Print this page
Possibility to physically reserve space without writing leaf blocks

*** 72,81 **** --- 72,82 ---- #include <sys/sid.h> #include "fs/fs_subr.h" #include <sys/zfs_ctldir.h> #include <sys/zfs_fuid.h> #include <sys/zfs_sa.h> + #include <sys/zfeature.h> #include <sys/dnlc.h> #include <sys/zfs_rlock.h> #include <sys/extdirent.h> #include <sys/kidmap.h> #include <sys/cred.h>
*** 290,308 **** --- 291,314 ---- return (error); *off = noff; return (error); } + + static int zfs_zero_write(vnode_t *vp, uint64_t size, cred_t *cr, + caller_context_t *ct); + /* ARGSUSED */ static int zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, int *rvalp, caller_context_t *ct) { offset_t off; int error; zfsvfs_t *zfsvfs; znode_t *zp; + uint64_t size; switch (com) { case _FIOFFS: return (zfs_sync(vp->v_vfsp, 0, cred));
*** 330,339 **** --- 336,350 ---- if (error) return (error); if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) return (SET_ERROR(EFAULT)); return (0); + case _FIO_RESERVE_SPACE: + if (ddi_copyin((void *)data, &size, sizeof (size), flag)) + return (EFAULT); + error = zfs_zero_write(vp, size, cred, ct); + return (error); } return (SET_ERROR(ENOTTY)); } /*
*** 954,963 **** --- 965,1070 ---- ZFS_EXIT(zfsvfs); return (0); } + #define ZFS_RESERVE_CHUNK (2 * 1024 * 1024) + /* ARGSUSED */ + static int + zfs_zero_write(vnode_t *vp, uint64_t size, cred_t *cr, caller_context_t *ct) + { + znode_t *zp = VTOZ(vp); + zfsvfs_t *zfsvfs = zp->z_zfsvfs; + int count = 0; + sa_bulk_attr_t bulk[4]; + uint64_t mtime[2], ctime[2]; + rl_t *rl; + int error = 0; + dmu_tx_t *tx = NULL; + uint64_t end_size; + uint64_t pos = 0; + + if (zp->z_size > 0) + return (EFBIG); + if (size == 0) + return (0); + + ZFS_ENTER(zfsvfs); + ZFS_VERIFY_ZP(zp); + + if (!spa_feature_is_enabled(zfsvfs->z_os->os_spa, + SPA_FEATURE_SPACE_RESERVATION)) + { + ZFS_EXIT(zfsvfs); + return (ENOTSUP); + } + + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, + &zp->z_size, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, + &zp->z_pflags, 8); + + /* + * If immutable or not appending then return EPERM + */ + if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY))) { + ZFS_EXIT(zfsvfs); + return (EPERM); + } + + rl = zfs_range_lock(zp, 0, size, RL_WRITER); + + if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || + zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { + error = EDQUOT; + goto out; + } + + while (pos < size) { + uint64_t length = size - pos; + length = MIN(length, ZFS_RESERVE_CHUNK); + again: + tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); + dmu_tx_hold_write(tx, zp->z_id, pos, length); + zfs_sa_upgrade_txholds(tx, zp); + error = dmu_tx_assign(tx, TXG_NOWAIT); + if (error) { + if (error == ERESTART) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto again; + } + dmu_tx_abort(tx); + goto out; + } + + if (pos == 0) + zfs_grow_blocksize(zp, MIN(size, zfsvfs->z_max_blksz), tx); + dmu_write_zero(zfsvfs->z_os, zp->z_id, pos, length, tx); + + zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); + + pos += length; + while ((end_size = zp->z_size) < pos) + (void) atomic_cas_64(&zp->z_size, end_size, pos); + + error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); + + dmu_tx_commit(tx); + if (error) + goto out; + } + out: + zfs_range_unlock(rl); + ZFS_EXIT(zfsvfs); + + return (error); + } + void zfs_get_done(zgd_t *zgd, int error) { znode_t *zp = zgd->zgd_private; objset_t *os = zp->z_zfsvfs->z_os;