1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2012 by Delphix. All rights reserved.
  24  */
  25 
  26 
  27 /* Portions Copyright 2007 Jeremy Teo */
  28 
  29 #ifdef _KERNEL
  30 #include <sys/types.h>
  31 #include <sys/param.h>
  32 #include <sys/time.h>
  33 #include <sys/systm.h>
  34 #include <sys/sysmacros.h>
  35 #include <sys/resource.h>
  36 #include <sys/mntent.h>
  37 #include <sys/mkdev.h>
  38 #include <sys/u8_textprep.h>
  39 #include <sys/dsl_dataset.h>
  40 #include <sys/vfs.h>
  41 #include <sys/vfs_opreg.h>
  42 #include <sys/vnode.h>
  43 #include <sys/file.h>
  44 #include <sys/kmem.h>
  45 #include <sys/errno.h>
  46 #include <sys/unistd.h>
  47 #include <sys/mode.h>
  48 #include <sys/atomic.h>
  49 #include <vm/pvn.h>
  50 #include "fs/fs_subr.h"
  51 #include <sys/zfs_dir.h>
  52 #include <sys/zfs_acl.h>
  53 #include <sys/zfs_ioctl.h>
  54 #include <sys/zfs_rlock.h>
  55 #include <sys/zfs_fuid.h>
  56 #include <sys/dnode.h>
  57 #include <sys/fs/zfs.h>
  58 #include <sys/kidmap.h>
  59 #endif /* _KERNEL */
  60 
  61 #include <sys/dmu.h>
  62 #include <sys/refcount.h>
  63 #include <sys/stat.h>
  64 #include <sys/zap.h>
  65 #include <sys/zfs_znode.h>
  66 #include <sys/sa.h>
  67 #include <sys/zfs_sa.h>
  68 #include <sys/zfs_stat.h>
  69 
  70 #include "zfs_prop.h"
  71 #include "zfs_comutil.h"
  72 
  73 /*
  74  * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
  75  * turned on when DEBUG is also defined.
  76  */
  77 #ifdef  DEBUG
  78 #define ZNODE_STATS
  79 #endif  /* DEBUG */
  80 
  81 #ifdef  ZNODE_STATS
  82 #define ZNODE_STAT_ADD(stat)                    ((stat)++)
  83 #else
  84 #define ZNODE_STAT_ADD(stat)                    /* nothing */
  85 #endif  /* ZNODE_STATS */
  86 
  87 /*
  88  * Functions needed for userland (ie: libzpool) are not put under
  89  * #ifdef_KERNEL; the rest of the functions have dependencies
  90  * (such as VFS logic) that will not compile easily in userland.
  91  */
  92 #ifdef _KERNEL
  93 /*
  94  * Needed to close a small window in zfs_znode_move() that allows the zfsvfs to
  95  * be freed before it can be safely accessed.
  96  */
  97 krwlock_t zfsvfs_lock;
  98 
  99 static kmem_cache_t *znode_cache = NULL;
 100 
 101 /*ARGSUSED*/
 102 static void
 103 znode_evict_error(dmu_buf_t *dbuf, void *user_ptr)
 104 {
 105         /*
 106          * We should never drop all dbuf refs without first clearing
 107          * the eviction callback.
 108          */
 109         panic("evicting znode %p\n", user_ptr);
 110 }
 111 
 112 /*ARGSUSED*/
 113 static int
 114 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
 115 {
 116         znode_t *zp = buf;
 117 
 118         ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
 119 
 120         zp->z_vnode = vn_alloc(kmflags);
 121         if (zp->z_vnode == NULL) {
 122                 return (-1);
 123         }
 124         ZTOV(zp)->v_data = zp;
 125 
 126         list_link_init(&zp->z_link_node);
 127 
 128         mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
 129         rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
 130         rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
 131         mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
 132 
 133         mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
 134         avl_create(&zp->z_range_avl, zfs_range_compare,
 135             sizeof (rl_t), offsetof(rl_t, r_node));
 136 
 137         zp->z_dirlocks = NULL;
 138         zp->z_acl_cached = NULL;
 139         zp->z_moved = 0;
 140         return (0);
 141 }
 142 
 143 /*ARGSUSED*/
 144 static void
 145 zfs_znode_cache_destructor(void *buf, void *arg)
 146 {
 147         znode_t *zp = buf;
 148 
 149         ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
 150         ASSERT(ZTOV(zp)->v_data == zp);
 151         vn_free(ZTOV(zp));
 152         ASSERT(!list_link_active(&zp->z_link_node));
 153         mutex_destroy(&zp->z_lock);
 154         rw_destroy(&zp->z_parent_lock);
 155         rw_destroy(&zp->z_name_lock);
 156         mutex_destroy(&zp->z_acl_lock);
 157         avl_destroy(&zp->z_range_avl);
 158         mutex_destroy(&zp->z_range_lock);
 159 
 160         ASSERT(zp->z_dirlocks == NULL);
 161         ASSERT(zp->z_acl_cached == NULL);
 162 }
 163 
 164 #ifdef  ZNODE_STATS
 165 static struct {
 166         uint64_t zms_zfsvfs_invalid;
 167         uint64_t zms_zfsvfs_recheck1;
 168         uint64_t zms_zfsvfs_unmounted;
 169         uint64_t zms_zfsvfs_recheck2;
 170         uint64_t zms_obj_held;
 171         uint64_t zms_vnode_locked;
 172         uint64_t zms_not_only_dnlc;
 173 } znode_move_stats;
 174 #endif  /* ZNODE_STATS */
 175 
 176 static void
 177 zfs_znode_move_impl(znode_t *ozp, znode_t *nzp)
 178 {
 179         vnode_t *vp;
 180 
 181         /* Copy fields. */
 182         nzp->z_zfsvfs = ozp->z_zfsvfs;
 183 
 184         /* Swap vnodes. */
 185         vp = nzp->z_vnode;
 186         nzp->z_vnode = ozp->z_vnode;
 187         ozp->z_vnode = vp; /* let destructor free the overwritten vnode */
 188         ZTOV(ozp)->v_data = ozp;
 189         ZTOV(nzp)->v_data = nzp;
 190 
 191         nzp->z_id = ozp->z_id;
 192         ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */
 193         ASSERT(avl_numnodes(&ozp->z_range_avl) == 0);
 194         nzp->z_unlinked = ozp->z_unlinked;
 195         nzp->z_atime_dirty = ozp->z_atime_dirty;
 196         nzp->z_zn_prefetch = ozp->z_zn_prefetch;
 197         nzp->z_blksz = ozp->z_blksz;
 198         nzp->z_seq = ozp->z_seq;
 199         nzp->z_mapcnt = ozp->z_mapcnt;
 200         nzp->z_gen = ozp->z_gen;
 201         nzp->z_sync_cnt = ozp->z_sync_cnt;
 202         nzp->z_is_sa = ozp->z_is_sa;
 203         nzp->z_sa_hdl = ozp->z_sa_hdl;
 204         bcopy(ozp->z_atime, nzp->z_atime, sizeof (uint64_t) * 2);
 205         nzp->z_links = ozp->z_links;
 206         nzp->z_size = ozp->z_size;
 207         nzp->z_pflags = ozp->z_pflags;
 208         nzp->z_uid = ozp->z_uid;
 209         nzp->z_gid = ozp->z_gid;
 210         nzp->z_mode = ozp->z_mode;
 211 
 212         /*
 213          * Since this is just an idle znode and kmem is already dealing with
 214          * memory pressure, release any cached ACL.
 215          */
 216         if (ozp->z_acl_cached) {
 217                 zfs_acl_free(ozp->z_acl_cached);
 218                 ozp->z_acl_cached = NULL;
 219         }
 220 
 221         sa_set_userp(nzp->z_sa_hdl, nzp);
 222 
 223         /*
 224          * Invalidate the original znode by clearing fields that provide a
 225          * pointer back to the znode. Set the low bit of the vfs pointer to
 226          * ensure that zfs_znode_move() recognizes the znode as invalid in any
 227          * subsequent callback.
 228          */
 229         ozp->z_sa_hdl = NULL;
 230         POINTER_INVALIDATE(&ozp->z_zfsvfs);
 231 
 232         /*
 233          * Mark the znode.
 234          */
 235         nzp->z_moved = 1;
 236         ozp->z_moved = (uint8_t)-1;
 237 }
 238 
 239 /*ARGSUSED*/
 240 static kmem_cbrc_t
 241 zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg)
 242 {
 243         znode_t *ozp = buf, *nzp = newbuf;
 244         zfsvfs_t *zfsvfs;
 245         vnode_t *vp;
 246 
 247         /*
 248          * The znode is on the file system's list of known znodes if the vfs
 249          * pointer is valid. We set the low bit of the vfs pointer when freeing
 250          * the znode to invalidate it, and the memory patterns written by kmem
 251          * (baddcafe and deadbeef) set at least one of the two low bits. A newly
 252          * created znode sets the vfs pointer last of all to indicate that the
 253          * znode is known and in a valid state to be moved by this function.
 254          */
 255         zfsvfs = ozp->z_zfsvfs;
 256         if (!POINTER_IS_VALID(zfsvfs)) {
 257                 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid);
 258                 return (KMEM_CBRC_DONT_KNOW);
 259         }
 260 
 261         /*
 262          * Close a small window in which it's possible that the filesystem could
 263          * be unmounted and freed, and zfsvfs, though valid in the previous
 264          * statement, could point to unrelated memory by the time we try to
 265          * prevent the filesystem from being unmounted.
 266          */
 267         rw_enter(&zfsvfs_lock, RW_WRITER);
 268         if (zfsvfs != ozp->z_zfsvfs) {
 269                 rw_exit(&zfsvfs_lock);
 270                 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck1);
 271                 return (KMEM_CBRC_DONT_KNOW);
 272         }
 273 
 274         /*
 275          * If the znode is still valid, then so is the file system. We know that
 276          * no valid file system can be freed while we hold zfsvfs_lock, so we
 277          * can safely ensure that the filesystem is not and will not be
 278          * unmounted. The next statement is equivalent to ZFS_ENTER().
 279          */
 280         rrw_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG);
 281         if (zfsvfs->z_unmounted) {
 282                 ZFS_EXIT(zfsvfs);
 283                 rw_exit(&zfsvfs_lock);
 284                 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted);
 285                 return (KMEM_CBRC_DONT_KNOW);
 286         }
 287         rw_exit(&zfsvfs_lock);
 288 
 289         mutex_enter(&zfsvfs->z_znodes_lock);
 290         /*
 291          * Recheck the vfs pointer in case the znode was removed just before
 292          * acquiring the lock.
 293          */
 294         if (zfsvfs != ozp->z_zfsvfs) {
 295                 mutex_exit(&zfsvfs->z_znodes_lock);
 296                 ZFS_EXIT(zfsvfs);
 297                 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck2);
 298                 return (KMEM_CBRC_DONT_KNOW);
 299         }
 300 
 301         /*
 302          * At this point we know that as long as we hold z_znodes_lock, the
 303          * znode cannot be freed and fields within the znode can be safely
 304          * accessed. Now, prevent a race with zfs_zget().
 305          */
 306         if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) {
 307                 mutex_exit(&zfsvfs->z_znodes_lock);
 308                 ZFS_EXIT(zfsvfs);
 309                 ZNODE_STAT_ADD(znode_move_stats.zms_obj_held);
 310                 return (KMEM_CBRC_LATER);
 311         }
 312 
 313         vp = ZTOV(ozp);
 314         if (mutex_tryenter(&vp->v_lock) == 0) {
 315                 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
 316                 mutex_exit(&zfsvfs->z_znodes_lock);
 317                 ZFS_EXIT(zfsvfs);
 318                 ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked);
 319                 return (KMEM_CBRC_LATER);
 320         }
 321 
 322         /* Only move znodes that are referenced _only_ by the DNLC. */
 323         if (vp->v_count != 1 || !vn_in_dnlc(vp)) {
 324                 mutex_exit(&vp->v_lock);
 325                 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
 326                 mutex_exit(&zfsvfs->z_znodes_lock);
 327                 ZFS_EXIT(zfsvfs);
 328                 ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc);
 329                 return (KMEM_CBRC_LATER);
 330         }
 331 
 332         /*
 333          * The znode is known and in a valid state to move. We're holding the
 334          * locks needed to execute the critical section.
 335          */
 336         zfs_znode_move_impl(ozp, nzp);
 337         mutex_exit(&vp->v_lock);
 338         ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
 339 
 340         list_link_replace(&ozp->z_link_node, &nzp->z_link_node);
 341         mutex_exit(&zfsvfs->z_znodes_lock);
 342         ZFS_EXIT(zfsvfs);
 343 
 344         return (KMEM_CBRC_YES);
 345 }
 346 
 347 void
 348 zfs_znode_init(void)
 349 {
 350         /*
 351          * Initialize zcache
 352          */
 353         rw_init(&zfsvfs_lock, NULL, RW_DEFAULT, NULL);
 354         ASSERT(znode_cache == NULL);
 355         znode_cache = kmem_cache_create("zfs_znode_cache",
 356             sizeof (znode_t), 0, zfs_znode_cache_constructor,
 357             zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
 358         kmem_cache_set_move(znode_cache, zfs_znode_move);
 359 }
 360 
 361 void
 362 zfs_znode_fini(void)
 363 {
 364         /*
 365          * Cleanup vfs & vnode ops
 366          */
 367         zfs_remove_op_tables();
 368 
 369         /*
 370          * Cleanup zcache
 371          */
 372         if (znode_cache)
 373                 kmem_cache_destroy(znode_cache);
 374         znode_cache = NULL;
 375         rw_destroy(&zfsvfs_lock);
 376 }
 377 
 378 struct vnodeops *zfs_dvnodeops;
 379 struct vnodeops *zfs_fvnodeops;
 380 struct vnodeops *zfs_symvnodeops;
 381 struct vnodeops *zfs_xdvnodeops;
 382 struct vnodeops *zfs_evnodeops;
 383 struct vnodeops *zfs_sharevnodeops;
 384 
 385 void
 386 zfs_remove_op_tables()
 387 {
 388         /*
 389          * Remove vfs ops
 390          */
 391         ASSERT(zfsfstype);
 392         (void) vfs_freevfsops_by_type(zfsfstype);
 393         zfsfstype = 0;
 394 
 395         /*
 396          * Remove vnode ops
 397          */
 398         if (zfs_dvnodeops)
 399                 vn_freevnodeops(zfs_dvnodeops);
 400         if (zfs_fvnodeops)
 401                 vn_freevnodeops(zfs_fvnodeops);
 402         if (zfs_symvnodeops)
 403                 vn_freevnodeops(zfs_symvnodeops);
 404         if (zfs_xdvnodeops)
 405                 vn_freevnodeops(zfs_xdvnodeops);
 406         if (zfs_evnodeops)
 407                 vn_freevnodeops(zfs_evnodeops);
 408         if (zfs_sharevnodeops)
 409                 vn_freevnodeops(zfs_sharevnodeops);
 410 
 411         zfs_dvnodeops = NULL;
 412         zfs_fvnodeops = NULL;
 413         zfs_symvnodeops = NULL;
 414         zfs_xdvnodeops = NULL;
 415         zfs_evnodeops = NULL;
 416         zfs_sharevnodeops = NULL;
 417 }
 418 
 419 extern const fs_operation_def_t zfs_dvnodeops_template[];
 420 extern const fs_operation_def_t zfs_fvnodeops_template[];
 421 extern const fs_operation_def_t zfs_xdvnodeops_template[];
 422 extern const fs_operation_def_t zfs_symvnodeops_template[];
 423 extern const fs_operation_def_t zfs_evnodeops_template[];
 424 extern const fs_operation_def_t zfs_sharevnodeops_template[];
 425 
 426 int
 427 zfs_create_op_tables()
 428 {
 429         int error;
 430 
 431         /*
 432          * zfs_dvnodeops can be set if mod_remove() calls mod_installfs()
 433          * due to a failure to remove the the 2nd modlinkage (zfs_modldrv).
 434          * In this case we just return as the ops vectors are already set up.
 435          */
 436         if (zfs_dvnodeops)
 437                 return (0);
 438 
 439         error = vn_make_ops(MNTTYPE_ZFS, zfs_dvnodeops_template,
 440             &zfs_dvnodeops);
 441         if (error)
 442                 return (error);
 443 
 444         error = vn_make_ops(MNTTYPE_ZFS, zfs_fvnodeops_template,
 445             &zfs_fvnodeops);
 446         if (error)
 447                 return (error);
 448 
 449         error = vn_make_ops(MNTTYPE_ZFS, zfs_symvnodeops_template,
 450             &zfs_symvnodeops);
 451         if (error)
 452                 return (error);
 453 
 454         error = vn_make_ops(MNTTYPE_ZFS, zfs_xdvnodeops_template,
 455             &zfs_xdvnodeops);
 456         if (error)
 457                 return (error);
 458 
 459         error = vn_make_ops(MNTTYPE_ZFS, zfs_evnodeops_template,
 460             &zfs_evnodeops);
 461         if (error)
 462                 return (error);
 463 
 464         error = vn_make_ops(MNTTYPE_ZFS, zfs_sharevnodeops_template,
 465             &zfs_sharevnodeops);
 466 
 467         return (error);
 468 }
 469 
 470 int
 471 zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
 472 {
 473         zfs_acl_ids_t acl_ids;
 474         vattr_t vattr;
 475         znode_t *sharezp;
 476         vnode_t *vp;
 477         znode_t *zp;
 478         int error;
 479 
 480         vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
 481         vattr.va_type = VDIR;
 482         vattr.va_mode = S_IFDIR|0555;
 483         vattr.va_uid = crgetuid(kcred);
 484         vattr.va_gid = crgetgid(kcred);
 485 
 486         sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
 487         ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
 488         sharezp->z_moved = 0;
 489         sharezp->z_unlinked = 0;
 490         sharezp->z_atime_dirty = 0;
 491         sharezp->z_zfsvfs = zfsvfs;
 492         sharezp->z_is_sa = zfsvfs->z_use_sa;
 493 
 494         vp = ZTOV(sharezp);
 495         vn_reinit(vp);
 496         vp->v_type = VDIR;
 497 
 498         VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
 499             kcred, NULL, &acl_ids));
 500         zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
 501         ASSERT3P(zp, ==, sharezp);
 502         ASSERT(!vn_in_dnlc(ZTOV(sharezp))); /* not valid to move */
 503         POINTER_INVALIDATE(&sharezp->z_zfsvfs);
 504         error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
 505             ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
 506         zfsvfs->z_shares_dir = sharezp->z_id;
 507 
 508         zfs_acl_ids_free(&acl_ids);
 509         ZTOV(sharezp)->v_count = 0;
 510         sa_handle_destroy(sharezp->z_sa_hdl);
 511         kmem_cache_free(znode_cache, sharezp);
 512 
 513         return (error);
 514 }
 515 
 516 /*
 517  * define a couple of values we need available
 518  * for both 64 and 32 bit environments.
 519  */
 520 #ifndef NBITSMINOR64
 521 #define NBITSMINOR64    32
 522 #endif
 523 #ifndef MAXMAJ64
 524 #define MAXMAJ64        0xffffffffUL
 525 #endif
 526 #ifndef MAXMIN64
 527 #define MAXMIN64        0xffffffffUL
 528 #endif
 529 
 530 /*
 531  * Create special expldev for ZFS private use.
 532  * Can't use standard expldev since it doesn't do
 533  * what we want.  The standard expldev() takes a
 534  * dev32_t in LP64 and expands it to a long dev_t.
 535  * We need an interface that takes a dev32_t in ILP32
 536  * and expands it to a long dev_t.
 537  */
 538 static uint64_t
 539 zfs_expldev(dev_t dev)
 540 {
 541 #ifndef _LP64
 542         major_t major = (major_t)dev >> NBITSMINOR32 & MAXMAJ32;
 543         return (((uint64_t)major << NBITSMINOR64) |
 544             ((minor_t)dev & MAXMIN32));
 545 #else
 546         return (dev);
 547 #endif
 548 }
 549 
 550 /*
 551  * Special cmpldev for ZFS private use.
 552  * Can't use standard cmpldev since it takes
 553  * a long dev_t and compresses it to dev32_t in
 554  * LP64.  We need to do a compaction of a long dev_t
 555  * to a dev32_t in ILP32.
 556  */
 557 dev_t
 558 zfs_cmpldev(uint64_t dev)
 559 {
 560 #ifndef _LP64
 561         minor_t minor = (minor_t)dev & MAXMIN64;
 562         major_t major = (major_t)(dev >> NBITSMINOR64) & MAXMAJ64;
 563 
 564         if (major > MAXMAJ32 || minor > MAXMIN32)
 565                 return (NODEV32);
 566 
 567         return (((dev32_t)major << NBITSMINOR32) | minor);
 568 #else
 569         return (dev);
 570 #endif
 571 }
 572 
 573 static void
 574 zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
 575     dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
 576 {
 577         ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
 578         ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
 579 
 580         mutex_enter(&zp->z_lock);
 581 
 582         ASSERT(zp->z_sa_hdl == NULL);
 583         ASSERT(zp->z_acl_cached == NULL);
 584         if (sa_hdl == NULL) {
 585                 VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
 586                     SA_HDL_SHARED, &zp->z_sa_hdl));
 587         } else {
 588                 zp->z_sa_hdl = sa_hdl;
 589                 sa_set_userp(sa_hdl, zp);
 590         }
 591 
 592         zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
 593 
 594         /*
 595          * Slap on VROOT if we are the root znode
 596          */
 597         if (zp->z_id == zfsvfs->z_root)
 598                 ZTOV(zp)->v_flag |= VROOT;
 599 
 600         mutex_exit(&zp->z_lock);
 601         vn_exists(ZTOV(zp));
 602 }
 603 
 604 void
 605 zfs_znode_dmu_fini(znode_t *zp)
 606 {
 607         ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
 608             zp->z_unlinked ||
 609             RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock));
 610 
 611         sa_handle_destroy(zp->z_sa_hdl);
 612         zp->z_sa_hdl = NULL;
 613 }
 614 
 615 /*
 616  * Construct a new znode/vnode and intialize.
 617  *
 618  * This does not do a call to dmu_set_user() that is
 619  * up to the caller to do, in case you don't want to
 620  * return the znode
 621  */
 622 static znode_t *
 623 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
 624     dmu_object_type_t obj_type, sa_handle_t *hdl)
 625 {
 626         znode_t *zp;
 627         vnode_t *vp;
 628         uint64_t mode;
 629         uint64_t parent;
 630         sa_bulk_attr_t bulk[9];
 631         int count = 0;
 632 
 633         zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
 634 
 635         ASSERT(zp->z_dirlocks == NULL);
 636         ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
 637         zp->z_moved = 0;
 638 
 639         /*
 640          * Defer setting z_zfsvfs until the znode is ready to be a candidate for
 641          * the zfs_znode_move() callback.
 642          */
 643         zp->z_sa_hdl = NULL;
 644         zp->z_unlinked = 0;
 645         zp->z_atime_dirty = 0;
 646         zp->z_mapcnt = 0;
 647         zp->z_id = db->db_object;
 648         zp->z_blksz = blksz;
 649         zp->z_seq = 0x7A4653;
 650         zp->z_sync_cnt = 0;
 651 
 652         vp = ZTOV(zp);
 653         vn_reinit(vp);
 654 
 655         zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
 656 
 657         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
 658         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8);
 659         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
 660             &zp->z_size, 8);
 661         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
 662             &zp->z_links, 8);
 663         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
 664             &zp->z_pflags, 8);
 665         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
 666         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
 667             &zp->z_atime, 16);
 668         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
 669             &zp->z_uid, 8);
 670         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
 671             &zp->z_gid, 8);
 672 
 673         if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
 674                 if (hdl == NULL)
 675                         sa_handle_destroy(zp->z_sa_hdl);
 676                 kmem_cache_free(znode_cache, zp);
 677                 return (NULL);
 678         }
 679 
 680         zp->z_mode = mode;
 681         vp->v_vfsp = zfsvfs->z_parent->z_vfs;
 682 
 683         vp->v_type = IFTOVT((mode_t)mode);
 684 
 685         switch (vp->v_type) {
 686         case VDIR:
 687                 if (zp->z_pflags & ZFS_XATTR) {
 688                         vn_setops(vp, zfs_xdvnodeops);
 689                         vp->v_flag |= V_XATTRDIR;
 690                 } else {
 691                         vn_setops(vp, zfs_dvnodeops);
 692                 }
 693                 zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
 694                 break;
 695         case VBLK:
 696         case VCHR:
 697                 {
 698                         uint64_t rdev;
 699                         VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs),
 700                             &rdev, sizeof (rdev)) == 0);
 701 
 702                         vp->v_rdev = zfs_cmpldev(rdev);
 703                 }
 704                 /*FALLTHROUGH*/
 705         case VFIFO:
 706         case VSOCK:
 707         case VDOOR:
 708                 vn_setops(vp, zfs_fvnodeops);
 709                 break;
 710         case VREG:
 711                 vp->v_flag |= VMODSORT;
 712                 if (parent == zfsvfs->z_shares_dir) {
 713                         ASSERT(zp->z_uid == 0 && zp->z_gid == 0);
 714                         vn_setops(vp, zfs_sharevnodeops);
 715                 } else {
 716                         vn_setops(vp, zfs_fvnodeops);
 717                 }
 718                 break;
 719         case VLNK:
 720                 vn_setops(vp, zfs_symvnodeops);
 721                 break;
 722         default:
 723                 vn_setops(vp, zfs_evnodeops);
 724                 break;
 725         }
 726 
 727         mutex_enter(&zfsvfs->z_znodes_lock);
 728         list_insert_tail(&zfsvfs->z_all_znodes, zp);
 729         membar_producer();
 730         /*
 731          * Everything else must be valid before assigning z_zfsvfs makes the
 732          * znode eligible for zfs_znode_move().
 733          */
 734         zp->z_zfsvfs = zfsvfs;
 735         mutex_exit(&zfsvfs->z_znodes_lock);
 736 
 737         VFS_HOLD(zfsvfs->z_vfs);
 738         return (zp);
 739 }
 740 
 741 static uint64_t empty_xattr;
 742 static uint64_t pad[4];
 743 static zfs_acl_phys_t acl_phys;
 744 /*
 745  * Create a new DMU object to hold a zfs znode.
 746  *
 747  *      IN:     dzp     - parent directory for new znode
 748  *              vap     - file attributes for new znode
 749  *              tx      - dmu transaction id for zap operations
 750  *              cr      - credentials of caller
 751  *              flag    - flags:
 752  *                        IS_ROOT_NODE  - new object will be root
 753  *                        IS_XATTR      - new object is an attribute
 754  *              bonuslen - length of bonus buffer
 755  *              setaclp  - File/Dir initial ACL
 756  *              fuidp    - Tracks fuid allocation.
 757  *
 758  *      OUT:    zpp     - allocated znode
 759  *
 760  */
 761 void
 762 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
 763     uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
 764 {
 765         uint64_t        crtime[2], atime[2], mtime[2], ctime[2];
 766         uint64_t        mode, size, links, parent, pflags;
 767         uint64_t        dzp_pflags = 0;
 768         uint64_t        rdev = 0;
 769         zfsvfs_t        *zfsvfs = dzp->z_zfsvfs;
 770         dmu_buf_t       *db;
 771         timestruc_t     now;
 772         uint64_t        gen, obj;
 773         int             err;
 774         int             bonuslen;
 775         sa_handle_t     *sa_hdl;
 776         dmu_object_type_t obj_type;
 777         sa_bulk_attr_t  sa_attrs[ZPL_END];
 778         int             cnt = 0;
 779         zfs_acl_locator_cb_t locate = { 0 };
 780 
 781         ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
 782 
 783         if (zfsvfs->z_replay) {
 784                 obj = vap->va_nodeid;
 785                 now = vap->va_ctime;         /* see zfs_replay_create() */
 786                 gen = vap->va_nblocks;               /* ditto */
 787         } else {
 788                 obj = 0;
 789                 gethrestime(&now);
 790                 gen = dmu_tx_get_txg(tx);
 791         }
 792 
 793         obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
 794         bonuslen = (obj_type == DMU_OT_SA) ?
 795             DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
 796 
 797         /*
 798          * Create a new DMU object.
 799          */
 800         /*
 801          * There's currently no mechanism for pre-reading the blocks that will
 802          * be needed to allocate a new object, so we accept the small chance
 803          * that there will be an i/o error and we will fail one of the
 804          * assertions below.
 805          */
 806         if (vap->va_type == VDIR) {
 807                 if (zfsvfs->z_replay) {
 808                         err = zap_create_claim_norm(zfsvfs->z_os, obj,
 809                             zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
 810                             obj_type, bonuslen, tx);
 811                         ASSERT0(err);
 812                 } else {
 813                         obj = zap_create_norm(zfsvfs->z_os,
 814                             zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
 815                             obj_type, bonuslen, tx);
 816                 }
 817         } else {
 818                 if (zfsvfs->z_replay) {
 819                         err = dmu_object_claim(zfsvfs->z_os, obj,
 820                             DMU_OT_PLAIN_FILE_CONTENTS, 0,
 821                             obj_type, bonuslen, tx);
 822                         ASSERT0(err);
 823                 } else {
 824                         obj = dmu_object_alloc(zfsvfs->z_os,
 825                             DMU_OT_PLAIN_FILE_CONTENTS, 0,
 826                             obj_type, bonuslen, tx);
 827                 }
 828         }
 829 
 830         ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
 831         VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
 832 
 833         /*
 834          * If this is the root, fix up the half-initialized parent pointer
 835          * to reference the just-allocated physical data area.
 836          */
 837         if (flag & IS_ROOT_NODE) {
 838                 dzp->z_id = obj;
 839         } else {
 840                 dzp_pflags = dzp->z_pflags;
 841         }
 842 
 843         /*
 844          * If parent is an xattr, so am I.
 845          */
 846         if (dzp_pflags & ZFS_XATTR) {
 847                 flag |= IS_XATTR;
 848         }
 849 
 850         if (zfsvfs->z_use_fuids)
 851                 pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
 852         else
 853                 pflags = 0;
 854 
 855         if (vap->va_type == VDIR) {
 856                 size = 2;               /* contents ("." and "..") */
 857                 links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
 858         } else {
 859                 size = links = 0;
 860         }
 861 
 862         if (vap->va_type == VBLK || vap->va_type == VCHR) {
 863                 rdev = zfs_expldev(vap->va_rdev);
 864         }
 865 
 866         parent = dzp->z_id;
 867         mode = acl_ids->z_mode;
 868         if (flag & IS_XATTR)
 869                 pflags |= ZFS_XATTR;
 870 
 871         /*
 872          * No execs denied will be deterimed when zfs_mode_compute() is called.
 873          */
 874         pflags |= acl_ids->z_aclp->z_hints &
 875             (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
 876             ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
 877 
 878         ZFS_TIME_ENCODE(&now, crtime);
 879         ZFS_TIME_ENCODE(&now, ctime);
 880 
 881         if (vap->va_mask & AT_ATIME) {
 882                 ZFS_TIME_ENCODE(&vap->va_atime, atime);
 883         } else {
 884                 ZFS_TIME_ENCODE(&now, atime);
 885         }
 886 
 887         if (vap->va_mask & AT_MTIME) {
 888                 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
 889         } else {
 890                 ZFS_TIME_ENCODE(&now, mtime);
 891         }
 892 
 893         /* Now add in all of the "SA" attributes */
 894         VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
 895             &sa_hdl));
 896 
 897         /*
 898          * Setup the array of attributes to be replaced/set on the new file
 899          *
 900          * order for  DMU_OT_ZNODE is critical since it needs to be constructed
 901          * in the old znode_phys_t format.  Don't change this ordering
 902          */
 903 
 904         if (obj_type == DMU_OT_ZNODE) {
 905                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
 906                     NULL, &atime, 16);
 907                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
 908                     NULL, &mtime, 16);
 909                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
 910                     NULL, &ctime, 16);
 911                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
 912                     NULL, &crtime, 16);
 913                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
 914                     NULL, &gen, 8);
 915                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
 916                     NULL, &mode, 8);
 917                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
 918                     NULL, &size, 8);
 919                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
 920                     NULL, &parent, 8);
 921         } else {
 922                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
 923                     NULL, &mode, 8);
 924                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
 925                     NULL, &size, 8);
 926                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
 927                     NULL, &gen, 8);
 928                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
 929                     &acl_ids->z_fuid, 8);
 930                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
 931                     &acl_ids->z_fgid, 8);
 932                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
 933                     NULL, &parent, 8);
 934                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
 935                     NULL, &pflags, 8);
 936                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
 937                     NULL, &atime, 16);
 938                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
 939                     NULL, &mtime, 16);
 940                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
 941                     NULL, &ctime, 16);
 942                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
 943                     NULL, &crtime, 16);
 944         }
 945 
 946         SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
 947 
 948         if (obj_type == DMU_OT_ZNODE) {
 949                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
 950                     &empty_xattr, 8);
 951         }
 952         if (obj_type == DMU_OT_ZNODE ||
 953             (vap->va_type == VBLK || vap->va_type == VCHR)) {
 954                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
 955                     NULL, &rdev, 8);
 956 
 957         }
 958         if (obj_type == DMU_OT_ZNODE) {
 959                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
 960                     NULL, &pflags, 8);
 961                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
 962                     &acl_ids->z_fuid, 8);
 963                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
 964                     &acl_ids->z_fgid, 8);
 965                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
 966                     sizeof (uint64_t) * 4);
 967                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
 968                     &acl_phys, sizeof (zfs_acl_phys_t));
 969         } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
 970                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
 971                     &acl_ids->z_aclp->z_acl_count, 8);
 972                 locate.cb_aclp = acl_ids->z_aclp;
 973                 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
 974                     zfs_acl_data_locator, &locate,
 975                     acl_ids->z_aclp->z_acl_bytes);
 976                 mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
 977                     acl_ids->z_fuid, acl_ids->z_fgid);
 978         }
 979 
 980         VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
 981 
 982         if (!(flag & IS_ROOT_NODE)) {
 983                 *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
 984                 ASSERT(*zpp != NULL);
 985         } else {
 986                 /*
 987                  * If we are creating the root node, the "parent" we
 988                  * passed in is the znode for the root.
 989                  */
 990                 *zpp = dzp;
 991 
 992                 (*zpp)->z_sa_hdl = sa_hdl;
 993         }
 994 
 995         (*zpp)->z_pflags = pflags;
 996         (*zpp)->z_mode = mode;
 997 
 998         if (vap->va_mask & AT_XVATTR)
 999                 zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
1000 
1001         if (obj_type == DMU_OT_ZNODE ||
1002             acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
1003                 err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx);
1004                 ASSERT0(err);
1005         }
1006         ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
1007 }
1008 
1009 /*
1010  * zfs_xvattr_set only updates the in-core attributes
1011  * it is assumed the caller will be doing an sa_bulk_update
1012  * to push the changes out
1013  */
1014 void
1015 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
1016 {
1017         xoptattr_t *xoap;
1018 
1019         xoap = xva_getxoptattr(xvap);
1020         ASSERT(xoap);
1021 
1022         if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
1023                 uint64_t times[2];
1024                 ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
1025                 (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
1026                     &times, sizeof (times), tx);
1027                 XVA_SET_RTN(xvap, XAT_CREATETIME);
1028         }
1029         if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
1030                 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
1031                     zp->z_pflags, tx);
1032                 XVA_SET_RTN(xvap, XAT_READONLY);
1033         }
1034         if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
1035                 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
1036                     zp->z_pflags, tx);
1037                 XVA_SET_RTN(xvap, XAT_HIDDEN);
1038         }
1039         if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
1040                 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
1041                     zp->z_pflags, tx);
1042                 XVA_SET_RTN(xvap, XAT_SYSTEM);
1043         }
1044         if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
1045                 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
1046                     zp->z_pflags, tx);
1047                 XVA_SET_RTN(xvap, XAT_ARCHIVE);
1048         }
1049         if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
1050                 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
1051                     zp->z_pflags, tx);
1052                 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
1053         }
1054         if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
1055                 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
1056                     zp->z_pflags, tx);
1057                 XVA_SET_RTN(xvap, XAT_NOUNLINK);
1058         }
1059         if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
1060                 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
1061                     zp->z_pflags, tx);
1062                 XVA_SET_RTN(xvap, XAT_APPENDONLY);
1063         }
1064         if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
1065                 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
1066                     zp->z_pflags, tx);
1067                 XVA_SET_RTN(xvap, XAT_NODUMP);
1068         }
1069         if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
1070                 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
1071                     zp->z_pflags, tx);
1072                 XVA_SET_RTN(xvap, XAT_OPAQUE);
1073         }
1074         if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
1075                 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
1076                     xoap->xoa_av_quarantined, zp->z_pflags, tx);
1077                 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
1078         }
1079         if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
1080                 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
1081                     zp->z_pflags, tx);
1082                 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
1083         }
1084         if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
1085                 zfs_sa_set_scanstamp(zp, xvap, tx);
1086                 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
1087         }
1088         if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
1089                 ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
1090                     zp->z_pflags, tx);
1091                 XVA_SET_RTN(xvap, XAT_REPARSE);
1092         }
1093         if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
1094                 ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
1095                     zp->z_pflags, tx);
1096                 XVA_SET_RTN(xvap, XAT_OFFLINE);
1097         }
1098         if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
1099                 ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
1100                     zp->z_pflags, tx);
1101                 XVA_SET_RTN(xvap, XAT_SPARSE);
1102         }
1103 }
1104 
1105 int
1106 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
1107 {
1108         dmu_object_info_t doi;
1109         dmu_buf_t       *db;
1110         znode_t         *zp;
1111         int err;
1112         sa_handle_t     *hdl;
1113 
1114         *zpp = NULL;
1115 
1116         ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
1117 
1118         err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1119         if (err) {
1120                 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1121                 return (err);
1122         }
1123 
1124         dmu_object_info_from_db(db, &doi);
1125         if (doi.doi_bonus_type != DMU_OT_SA &&
1126             (doi.doi_bonus_type != DMU_OT_ZNODE ||
1127             (doi.doi_bonus_type == DMU_OT_ZNODE &&
1128             doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1129                 sa_buf_rele(db, NULL);
1130                 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1131                 return (EINVAL);
1132         }
1133 
1134         hdl = dmu_buf_get_user(db);
1135         if (hdl != NULL) {
1136                 zp  = sa_get_userdata(hdl);
1137 
1138 
1139                 /*
1140                  * Since "SA" does immediate eviction we
1141                  * should never find a sa handle that doesn't
1142                  * know about the znode.
1143                  */
1144 
1145                 ASSERT3P(zp, !=, NULL);
1146 
1147                 mutex_enter(&zp->z_lock);
1148                 ASSERT3U(zp->z_id, ==, obj_num);
1149                 if (zp->z_unlinked) {
1150                         err = ENOENT;
1151                 } else {
1152                         VN_HOLD(ZTOV(zp));
1153                         *zpp = zp;
1154                         err = 0;
1155                 }
1156                 sa_buf_rele(db, NULL);
1157                 mutex_exit(&zp->z_lock);
1158                 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1159                 return (err);
1160         }
1161 
1162         /*
1163          * Not found create new znode/vnode
1164          * but only if file exists.
1165          *
1166          * There is a small window where zfs_vget() could
1167          * find this object while a file create is still in
1168          * progress.  This is checked for in zfs_znode_alloc()
1169          *
1170          * if zfs_znode_alloc() fails it will drop the hold on the
1171          * bonus buffer.
1172          */
1173         zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
1174             doi.doi_bonus_type, NULL);
1175         if (zp == NULL) {
1176                 err = ENOENT;
1177         } else {
1178                 *zpp = zp;
1179         }
1180         ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1181         return (err);
1182 }
1183 
1184 int
1185 zfs_rezget(znode_t *zp)
1186 {
1187         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1188         dmu_object_info_t doi;
1189         dmu_buf_t *db;
1190         uint64_t obj_num = zp->z_id;
1191         uint64_t mode;
1192         sa_bulk_attr_t bulk[8];
1193         int err;
1194         int count = 0;
1195         uint64_t gen;
1196 
1197         ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
1198 
1199         mutex_enter(&zp->z_acl_lock);
1200         if (zp->z_acl_cached) {
1201                 zfs_acl_free(zp->z_acl_cached);
1202                 zp->z_acl_cached = NULL;
1203         }
1204 
1205         mutex_exit(&zp->z_acl_lock);
1206         ASSERT(zp->z_sa_hdl == NULL);
1207         err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1208         if (err) {
1209                 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1210                 return (err);
1211         }
1212 
1213         dmu_object_info_from_db(db, &doi);
1214         if (doi.doi_bonus_type != DMU_OT_SA &&
1215             (doi.doi_bonus_type != DMU_OT_ZNODE ||
1216             (doi.doi_bonus_type == DMU_OT_ZNODE &&
1217             doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1218                 sa_buf_rele(db, NULL);
1219                 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1220                 return (EINVAL);
1221         }
1222 
1223         zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
1224 
1225         /* reload cached values */
1226         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1227             &gen, sizeof (gen));
1228         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
1229             &zp->z_size, sizeof (zp->z_size));
1230         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
1231             &zp->z_links, sizeof (zp->z_links));
1232         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1233             &zp->z_pflags, sizeof (zp->z_pflags));
1234         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1235             &zp->z_atime, sizeof (zp->z_atime));
1236         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1237             &zp->z_uid, sizeof (zp->z_uid));
1238         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1239             &zp->z_gid, sizeof (zp->z_gid));
1240         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1241             &mode, sizeof (mode));
1242 
1243         if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
1244                 zfs_znode_dmu_fini(zp);
1245                 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1246                 return (EIO);
1247         }
1248 
1249         zp->z_mode = mode;
1250 
1251         if (gen != zp->z_gen) {
1252                 zfs_znode_dmu_fini(zp);
1253                 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1254                 return (EIO);
1255         }
1256 
1257         zp->z_unlinked = (zp->z_links == 0);
1258         zp->z_blksz = doi.doi_data_block_size;
1259 
1260         ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1261 
1262         return (0);
1263 }
1264 
1265 void
1266 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
1267 {
1268         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1269         objset_t *os = zfsvfs->z_os;
1270         uint64_t obj = zp->z_id;
1271         uint64_t acl_obj = zfs_external_acl(zp);
1272 
1273         ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
1274         if (acl_obj) {
1275                 VERIFY(!zp->z_is_sa);
1276                 VERIFY(0 == dmu_object_free(os, acl_obj, tx));
1277         }
1278         VERIFY(0 == dmu_object_free(os, obj, tx));
1279         zfs_znode_dmu_fini(zp);
1280         ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
1281         zfs_znode_free(zp);
1282 }
1283 
1284 void
1285 zfs_zinactive(znode_t *zp)
1286 {
1287         vnode_t *vp = ZTOV(zp);
1288         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1289         uint64_t z_id = zp->z_id;
1290 
1291         ASSERT(zp->z_sa_hdl);
1292 
1293         /*
1294          * Don't allow a zfs_zget() while were trying to release this znode
1295          */
1296         ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
1297 
1298         mutex_enter(&zp->z_lock);
1299         mutex_enter(&vp->v_lock);
1300         vp->v_count--;
1301         if (vp->v_count > 0 || vn_has_cached_data(vp)) {
1302                 /*
1303                  * If the hold count is greater than zero, somebody has
1304                  * obtained a new reference on this znode while we were
1305                  * processing it here, so we are done.  If we still have
1306                  * mapped pages then we are also done, since we don't
1307                  * want to inactivate the znode until the pages get pushed.
1308                  *
1309                  * XXX - if vn_has_cached_data(vp) is true, but count == 0,
1310                  * this seems like it would leave the znode hanging with
1311                  * no chance to go inactive...
1312                  */
1313                 mutex_exit(&vp->v_lock);
1314                 mutex_exit(&zp->z_lock);
1315                 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1316                 return;
1317         }
1318         mutex_exit(&vp->v_lock);
1319 
1320         /*
1321          * If this was the last reference to a file with no links,
1322          * remove the file from the file system.
1323          */
1324         if (zp->z_unlinked) {
1325                 mutex_exit(&zp->z_lock);
1326                 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1327                 zfs_rmnode(zp);
1328                 return;
1329         }
1330 
1331         mutex_exit(&zp->z_lock);
1332         zfs_znode_dmu_fini(zp);
1333         ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1334         zfs_znode_free(zp);
1335 }
1336 
1337 void
1338 zfs_znode_free(znode_t *zp)
1339 {
1340         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1341 
1342         vn_invalid(ZTOV(zp));
1343 
1344         ASSERT(ZTOV(zp)->v_count == 0);
1345 
1346         mutex_enter(&zfsvfs->z_znodes_lock);
1347         POINTER_INVALIDATE(&zp->z_zfsvfs);
1348         list_remove(&zfsvfs->z_all_znodes, zp);
1349         mutex_exit(&zfsvfs->z_znodes_lock);
1350 
1351         if (zp->z_acl_cached) {
1352                 zfs_acl_free(zp->z_acl_cached);
1353                 zp->z_acl_cached = NULL;
1354         }
1355 
1356         kmem_cache_free(znode_cache, zp);
1357 
1358         VFS_RELE(zfsvfs->z_vfs);
1359 }
1360 
1361 void
1362 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
1363     uint64_t ctime[2], boolean_t have_tx)
1364 {
1365         timestruc_t     now;
1366 
1367         gethrestime(&now);
1368 
1369         if (have_tx) {  /* will sa_bulk_update happen really soon? */
1370                 zp->z_atime_dirty = 0;
1371                 zp->z_seq++;
1372         } else {
1373                 zp->z_atime_dirty = 1;
1374         }
1375 
1376         if (flag & AT_ATIME) {
1377                 ZFS_TIME_ENCODE(&now, zp->z_atime);
1378         }
1379 
1380         if (flag & AT_MTIME) {
1381                 ZFS_TIME_ENCODE(&now, mtime);
1382                 if (zp->z_zfsvfs->z_use_fuids) {
1383                         zp->z_pflags |= (ZFS_ARCHIVE |
1384                             ZFS_AV_MODIFIED);
1385                 }
1386         }
1387 
1388         if (flag & AT_CTIME) {
1389                 ZFS_TIME_ENCODE(&now, ctime);
1390                 if (zp->z_zfsvfs->z_use_fuids)
1391                         zp->z_pflags |= ZFS_ARCHIVE;
1392         }
1393 }
1394 
1395 /*
1396  * Grow the block size for a file.
1397  *
1398  *      IN:     zp      - znode of file to free data in.
1399  *              size    - requested block size
1400  *              tx      - open transaction.
1401  *
1402  * NOTE: this function assumes that the znode is write locked.
1403  */
1404 void
1405 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1406 {
1407         int             error;
1408         u_longlong_t    dummy;
1409 
1410         if (size <= zp->z_blksz)
1411                 return;
1412         /*
1413          * If the file size is already greater than the current blocksize,
1414          * we will not grow.  If there is more than one block in a file,
1415          * the blocksize cannot change.
1416          */
1417         if (zp->z_blksz && zp->z_size > zp->z_blksz)
1418                 return;
1419 
1420         error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
1421             size, 0, tx);
1422 
1423         if (error == ENOTSUP)
1424                 return;
1425         ASSERT0(error);
1426 
1427         /* What blocksize did we actually get? */
1428         dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
1429 }
1430 
1431 /*
1432  * This is a dummy interface used when pvn_vplist_dirty() should *not*
1433  * be calling back into the fs for a putpage().  E.g.: when truncating
1434  * a file, the pages being "thrown away* don't need to be written out.
1435  */
1436 /* ARGSUSED */
1437 static int
1438 zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
1439     int flags, cred_t *cr)
1440 {
1441         ASSERT(0);
1442         return (0);
1443 }
1444 
1445 /*
1446  * Increase the file length
1447  *
1448  *      IN:     zp      - znode of file to free data in.
1449  *              end     - new end-of-file
1450  *
1451  *      RETURN: 0 if success
1452  *              error code if failure
1453  */
1454 static int
1455 zfs_extend(znode_t *zp, uint64_t end)
1456 {
1457         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1458         dmu_tx_t *tx;
1459         rl_t *rl;
1460         uint64_t newblksz;
1461         int error;
1462 
1463         /*
1464          * We will change zp_size, lock the whole file.
1465          */
1466         rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1467 
1468         /*
1469          * Nothing to do if file already at desired length.
1470          */
1471         if (end <= zp->z_size) {
1472                 zfs_range_unlock(rl);
1473                 return (0);
1474         }
1475 top:
1476         tx = dmu_tx_create(zfsvfs->z_os);
1477         dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1478         zfs_sa_upgrade_txholds(tx, zp);
1479         if (end > zp->z_blksz &&
1480             (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
1481                 /*
1482                  * We are growing the file past the current block size.
1483                  */
1484                 if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
1485                         ASSERT(!ISP2(zp->z_blksz));
1486                         newblksz = MIN(end, SPA_MAXBLOCKSIZE);
1487                 } else {
1488                         newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
1489                 }
1490                 dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
1491         } else {
1492                 newblksz = 0;
1493         }
1494 
1495         error = dmu_tx_assign(tx, TXG_NOWAIT);
1496         if (error) {
1497                 if (error == ERESTART) {
1498                         dmu_tx_wait(tx);
1499                         dmu_tx_abort(tx);
1500                         goto top;
1501                 }
1502                 dmu_tx_abort(tx);
1503                 zfs_range_unlock(rl);
1504                 return (error);
1505         }
1506 
1507         if (newblksz)
1508                 zfs_grow_blocksize(zp, newblksz, tx);
1509 
1510         zp->z_size = end;
1511 
1512         VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
1513             &zp->z_size, sizeof (zp->z_size), tx));
1514 
1515         zfs_range_unlock(rl);
1516 
1517         dmu_tx_commit(tx);
1518 
1519         return (0);
1520 }
1521 
1522 /*
1523  * Free space in a file.
1524  *
1525  *      IN:     zp      - znode of file to free data in.
1526  *              off     - start of section to free.
1527  *              len     - length of section to free.
1528  *
1529  *      RETURN: 0 if success
1530  *              error code if failure
1531  */
1532 static int
1533 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
1534 {
1535         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1536         rl_t *rl;
1537         int error;
1538 
1539         /*
1540          * Lock the range being freed.
1541          */
1542         rl = zfs_range_lock(zp, off, len, RL_WRITER);
1543 
1544         /*
1545          * Nothing to do if file already at desired length.
1546          */
1547         if (off >= zp->z_size) {
1548                 zfs_range_unlock(rl);
1549                 return (0);
1550         }
1551 
1552         if (off + len > zp->z_size)
1553                 len = zp->z_size - off;
1554 
1555         error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
1556 
1557         zfs_range_unlock(rl);
1558 
1559         return (error);
1560 }
1561 
1562 /*
1563  * Truncate a file
1564  *
1565  *      IN:     zp      - znode of file to free data in.
1566  *              end     - new end-of-file.
1567  *
1568  *      RETURN: 0 if success
1569  *              error code if failure
1570  */
1571 static int
1572 zfs_trunc(znode_t *zp, uint64_t end)
1573 {
1574         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1575         vnode_t *vp = ZTOV(zp);
1576         dmu_tx_t *tx;
1577         rl_t *rl;
1578         int error;
1579         sa_bulk_attr_t bulk[2];
1580         int count = 0;
1581 
1582         /*
1583          * We will change zp_size, lock the whole file.
1584          */
1585         rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1586 
1587         /*
1588          * Nothing to do if file already at desired length.
1589          */
1590         if (end >= zp->z_size) {
1591                 zfs_range_unlock(rl);
1592                 return (0);
1593         }
1594 
1595         error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,  -1);
1596         if (error) {
1597                 zfs_range_unlock(rl);
1598                 return (error);
1599         }
1600 top:
1601         tx = dmu_tx_create(zfsvfs->z_os);
1602         dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1603         zfs_sa_upgrade_txholds(tx, zp);
1604         error = dmu_tx_assign(tx, TXG_NOWAIT);
1605         if (error) {
1606                 if (error == ERESTART) {
1607                         dmu_tx_wait(tx);
1608                         dmu_tx_abort(tx);
1609                         goto top;
1610                 }
1611                 dmu_tx_abort(tx);
1612                 zfs_range_unlock(rl);
1613                 return (error);
1614         }
1615 
1616         zp->z_size = end;
1617         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
1618             NULL, &zp->z_size, sizeof (zp->z_size));
1619 
1620         if (end == 0) {
1621                 zp->z_pflags &= ~ZFS_SPARSE;
1622                 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1623                     NULL, &zp->z_pflags, 8);
1624         }
1625         VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
1626 
1627         dmu_tx_commit(tx);
1628 
1629         /*
1630          * Clear any mapped pages in the truncated region.  This has to
1631          * happen outside of the transaction to avoid the possibility of
1632          * a deadlock with someone trying to push a page that we are
1633          * about to invalidate.
1634          */
1635         if (vn_has_cached_data(vp)) {
1636                 page_t *pp;
1637                 uint64_t start = end & PAGEMASK;
1638                 int poff = end & PAGEOFFSET;
1639 
1640                 if (poff != 0 && (pp = page_lookup(vp, start, SE_SHARED))) {
1641                         /*
1642                          * We need to zero a partial page.
1643                          */
1644                         pagezero(pp, poff, PAGESIZE - poff);
1645                         start += PAGESIZE;
1646                         page_unlock(pp);
1647                 }
1648                 error = pvn_vplist_dirty(vp, start, zfs_no_putpage,
1649                     B_INVAL | B_TRUNC, NULL);
1650                 ASSERT(error == 0);
1651         }
1652 
1653         zfs_range_unlock(rl);
1654 
1655         return (0);
1656 }
1657 
1658 /*
1659  * Free space in a file
1660  *
1661  *      IN:     zp      - znode of file to free data in.
1662  *              off     - start of range
1663  *              len     - end of range (0 => EOF)
1664  *              flag    - current file open mode flags.
1665  *              log     - TRUE if this action should be logged
1666  *
1667  *      RETURN: 0 if success
1668  *              error code if failure
1669  */
1670 int
1671 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1672 {
1673         vnode_t *vp = ZTOV(zp);
1674         dmu_tx_t *tx;
1675         zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1676         zilog_t *zilog = zfsvfs->z_log;
1677         uint64_t mode;
1678         uint64_t mtime[2], ctime[2];
1679         sa_bulk_attr_t bulk[3];
1680         int count = 0;
1681         int error;
1682 
1683         if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
1684             sizeof (mode))) != 0)
1685                 return (error);
1686 
1687         if (off > zp->z_size) {
1688                 error =  zfs_extend(zp, off+len);
1689                 if (error == 0 && log)
1690                         goto log;
1691                 else
1692                         return (error);
1693         }
1694 
1695         /*
1696          * Check for any locks in the region to be freed.
1697          */
1698 
1699         if (MANDLOCK(vp, (mode_t)mode)) {
1700                 uint64_t length = (len ? len : zp->z_size - off);
1701                 if (error = chklock(vp, FWRITE, off, length, flag, NULL))
1702                         return (error);
1703         }
1704 
1705         if (len == 0) {
1706                 error = zfs_trunc(zp, off);
1707         } else {
1708                 if ((error = zfs_free_range(zp, off, len)) == 0 &&
1709                     off + len > zp->z_size)
1710                         error = zfs_extend(zp, off+len);
1711         }
1712         if (error || !log)
1713                 return (error);
1714 log:
1715         tx = dmu_tx_create(zfsvfs->z_os);
1716         dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1717         zfs_sa_upgrade_txholds(tx, zp);
1718         error = dmu_tx_assign(tx, TXG_NOWAIT);
1719         if (error) {
1720                 if (error == ERESTART) {
1721                         dmu_tx_wait(tx);
1722                         dmu_tx_abort(tx);
1723                         goto log;
1724                 }
1725                 dmu_tx_abort(tx);
1726                 return (error);
1727         }
1728 
1729         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
1730         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
1731         SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1732             NULL, &zp->z_pflags, 8);
1733         zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
1734         error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1735         ASSERT(error == 0);
1736 
1737         zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1738 
1739         dmu_tx_commit(tx);
1740         return (0);
1741 }
1742 
1743 void
1744 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1745 {
1746         zfsvfs_t        zfsvfs;
1747         uint64_t        moid, obj, sa_obj, version;
1748         uint64_t        sense = ZFS_CASE_SENSITIVE;
1749         uint64_t        norm = 0;
1750         nvpair_t        *elem;
1751         int             error;
1752         int             i;
1753         znode_t         *rootzp = NULL;
1754         vnode_t         *vp;
1755         vattr_t         vattr;
1756         znode_t         *zp;
1757         zfs_acl_ids_t   acl_ids;
1758 
1759         /*
1760          * First attempt to create master node.
1761          */
1762         /*
1763          * In an empty objset, there are no blocks to read and thus
1764          * there can be no i/o errors (which we assert below).
1765          */
1766         moid = MASTER_NODE_OBJ;
1767         error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1768             DMU_OT_NONE, 0, tx);
1769         ASSERT(error == 0);
1770 
1771         /*
1772          * Set starting attributes.
1773          */
1774         version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
1775         elem = NULL;
1776         while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1777                 /* For the moment we expect all zpl props to be uint64_ts */
1778                 uint64_t val;
1779                 char *name;
1780 
1781                 ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
1782                 VERIFY(nvpair_value_uint64(elem, &val) == 0);
1783                 name = nvpair_name(elem);
1784                 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1785                         if (val < version)
1786                                 version = val;
1787                 } else {
1788                         error = zap_update(os, moid, name, 8, 1, &val, tx);
1789                 }
1790                 ASSERT(error == 0);
1791                 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1792                         norm = val;
1793                 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1794                         sense = val;
1795         }
1796         ASSERT(version != 0);
1797         error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
1798 
1799         /*
1800          * Create zap object used for SA attribute registration
1801          */
1802 
1803         if (version >= ZPL_VERSION_SA) {
1804                 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1805                     DMU_OT_NONE, 0, tx);
1806                 error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1807                 ASSERT(error == 0);
1808         } else {
1809                 sa_obj = 0;
1810         }
1811         /*
1812          * Create a delete queue.
1813          */
1814         obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1815 
1816         error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
1817         ASSERT(error == 0);
1818 
1819         /*
1820          * Create root znode.  Create minimal znode/vnode/zfsvfs
1821          * to allow zfs_mknode to work.
1822          */
1823         vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
1824         vattr.va_type = VDIR;
1825         vattr.va_mode = S_IFDIR|0755;
1826         vattr.va_uid = crgetuid(cr);
1827         vattr.va_gid = crgetgid(cr);
1828 
1829         rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
1830         ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
1831         rootzp->z_moved = 0;
1832         rootzp->z_unlinked = 0;
1833         rootzp->z_atime_dirty = 0;
1834         rootzp->z_is_sa = USE_SA(version, os);
1835 
1836         vp = ZTOV(rootzp);
1837         vn_reinit(vp);
1838         vp->v_type = VDIR;
1839 
1840         bzero(&zfsvfs, sizeof (zfsvfs_t));
1841 
1842         zfsvfs.z_os = os;
1843         zfsvfs.z_parent = &zfsvfs;
1844         zfsvfs.z_version = version;
1845         zfsvfs.z_use_fuids = USE_FUIDS(version, os);
1846         zfsvfs.z_use_sa = USE_SA(version, os);
1847         zfsvfs.z_norm = norm;
1848 
1849         error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
1850             &zfsvfs.z_attr_table);
1851 
1852         ASSERT(error == 0);
1853 
1854         /*
1855          * Fold case on file systems that are always or sometimes case
1856          * insensitive.
1857          */
1858         if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1859                 zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
1860 
1861         mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1862         list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
1863             offsetof(znode_t, z_link_node));
1864 
1865         for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1866                 mutex_init(&zfsvfs.z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
1867 
1868         rootzp->z_zfsvfs = &zfsvfs;
1869         VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
1870             cr, NULL, &acl_ids));
1871         zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
1872         ASSERT3P(zp, ==, rootzp);
1873         ASSERT(!vn_in_dnlc(ZTOV(rootzp))); /* not valid to move */
1874         error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1875         ASSERT(error == 0);
1876         zfs_acl_ids_free(&acl_ids);
1877         POINTER_INVALIDATE(&rootzp->z_zfsvfs);
1878 
1879         ZTOV(rootzp)->v_count = 0;
1880         sa_handle_destroy(rootzp->z_sa_hdl);
1881         kmem_cache_free(znode_cache, rootzp);
1882 
1883         /*
1884          * Create shares directory
1885          */
1886 
1887         error = zfs_create_share_dir(&zfsvfs, tx);
1888 
1889         ASSERT(error == 0);
1890 
1891         for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1892                 mutex_destroy(&zfsvfs.z_hold_mtx[i]);
1893 }
1894 
1895 #endif /* _KERNEL */
1896 
1897 static int
1898 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
1899 {
1900         uint64_t sa_obj = 0;
1901         int error;
1902 
1903         error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
1904         if (error != 0 && error != ENOENT)
1905                 return (error);
1906 
1907         error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
1908         return (error);
1909 }
1910 
1911 static int
1912 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
1913     dmu_buf_t **db, void *tag)
1914 {
1915         dmu_object_info_t doi;
1916         int error;
1917 
1918         if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
1919                 return (error);
1920 
1921         dmu_object_info_from_db(*db, &doi);
1922         if ((doi.doi_bonus_type != DMU_OT_SA &&
1923             doi.doi_bonus_type != DMU_OT_ZNODE) ||
1924             doi.doi_bonus_type == DMU_OT_ZNODE &&
1925             doi.doi_bonus_size < sizeof (znode_phys_t)) {
1926                 sa_buf_rele(*db, tag);
1927                 return (ENOTSUP);
1928         }
1929 
1930         error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
1931         if (error != 0) {
1932                 sa_buf_rele(*db, tag);
1933                 return (error);
1934         }
1935 
1936         return (0);
1937 }
1938 
1939 void
1940 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
1941 {
1942         sa_handle_destroy(hdl);
1943         sa_buf_rele(db, tag);
1944 }
1945 
1946 /*
1947  * Given an object number, return its parent object number and whether
1948  * or not the object is an extended attribute directory.
1949  */
1950 static int
1951 zfs_obj_to_pobj(sa_handle_t *hdl, sa_attr_type_t *sa_table, uint64_t *pobjp,
1952     int *is_xattrdir)
1953 {
1954         uint64_t parent;
1955         uint64_t pflags;
1956         uint64_t mode;
1957         sa_bulk_attr_t bulk[3];
1958         int count = 0;
1959         int error;
1960 
1961         SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
1962             &parent, sizeof (parent));
1963         SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
1964             &pflags, sizeof (pflags));
1965         SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1966             &mode, sizeof (mode));
1967 
1968         if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
1969                 return (error);
1970 
1971         *pobjp = parent;
1972         *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
1973 
1974         return (0);
1975 }
1976 
1977 /*
1978  * Given an object number, return some zpl level statistics
1979  */
1980 static int
1981 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
1982     zfs_stat_t *sb)
1983 {
1984         sa_bulk_attr_t bulk[4];
1985         int count = 0;
1986 
1987         SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1988             &sb->zs_mode, sizeof (sb->zs_mode));
1989         SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
1990             &sb->zs_gen, sizeof (sb->zs_gen));
1991         SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
1992             &sb->zs_links, sizeof (sb->zs_links));
1993         SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
1994             &sb->zs_ctime, sizeof (sb->zs_ctime));
1995 
1996         return (sa_bulk_lookup(hdl, bulk, count));
1997 }
1998 
1999 static int
2000 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
2001     sa_attr_type_t *sa_table, char *buf, int len)
2002 {
2003         sa_handle_t *sa_hdl;
2004         sa_handle_t *prevhdl = NULL;
2005         dmu_buf_t *prevdb = NULL;
2006         dmu_buf_t *sa_db = NULL;
2007         char *path = buf + len - 1;
2008         int error;
2009 
2010         *path = '\0';
2011         sa_hdl = hdl;
2012 
2013         for (;;) {
2014                 uint64_t pobj;
2015                 char component[MAXNAMELEN + 2];
2016                 size_t complen;
2017                 int is_xattrdir;
2018 
2019                 if (prevdb)
2020                         zfs_release_sa_handle(prevhdl, prevdb, FTAG);
2021 
2022                 if ((error = zfs_obj_to_pobj(sa_hdl, sa_table, &pobj,
2023                     &is_xattrdir)) != 0)
2024                         break;
2025 
2026                 if (pobj == obj) {
2027                         if (path[0] != '/')
2028                                 *--path = '/';
2029                         break;
2030                 }
2031 
2032                 component[0] = '/';
2033                 if (is_xattrdir) {
2034                         (void) sprintf(component + 1, "<xattrdir>");
2035                 } else {
2036                         error = zap_value_search(osp, pobj, obj,
2037                             ZFS_DIRENT_OBJ(-1ULL), component + 1);
2038                         if (error != 0)
2039                                 break;
2040                 }
2041 
2042                 complen = strlen(component);
2043                 path -= complen;
2044                 ASSERT(path >= buf);
2045                 bcopy(component, path, complen);
2046                 obj = pobj;
2047 
2048                 if (sa_hdl != hdl) {
2049                         prevhdl = sa_hdl;
2050                         prevdb = sa_db;
2051                 }
2052                 error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
2053                 if (error != 0) {
2054                         sa_hdl = prevhdl;
2055                         sa_db = prevdb;
2056                         break;
2057                 }
2058         }
2059 
2060         if (sa_hdl != NULL && sa_hdl != hdl) {
2061                 ASSERT(sa_db != NULL);
2062                 zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
2063         }
2064 
2065         if (error == 0)
2066                 (void) memmove(buf, path, buf + len - path);
2067 
2068         return (error);
2069 }
2070 
2071 int
2072 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
2073 {
2074         sa_attr_type_t *sa_table;
2075         sa_handle_t *hdl;
2076         dmu_buf_t *db;
2077         int error;
2078 
2079         error = zfs_sa_setup(osp, &sa_table);
2080         if (error != 0)
2081                 return (error);
2082 
2083         error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2084         if (error != 0)
2085                 return (error);
2086 
2087         error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2088 
2089         zfs_release_sa_handle(hdl, db, FTAG);
2090         return (error);
2091 }
2092 
2093 int
2094 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
2095     char *buf, int len)
2096 {
2097         char *path = buf + len - 1;
2098         sa_attr_type_t *sa_table;
2099         sa_handle_t *hdl;
2100         dmu_buf_t *db;
2101         int error;
2102 
2103         *path = '\0';
2104 
2105         error = zfs_sa_setup(osp, &sa_table);
2106         if (error != 0)
2107                 return (error);
2108 
2109         error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2110         if (error != 0)
2111                 return (error);
2112 
2113         error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
2114         if (error != 0) {
2115                 zfs_release_sa_handle(hdl, db, FTAG);
2116                 return (error);
2117         }
2118 
2119         error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2120 
2121         zfs_release_sa_handle(hdl, db, FTAG);
2122         return (error);
2123 }