Print this page
    
3006 VERIFY[S,U,P] and ASSERT[S,U,P] frequently check if first argument is zero
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/zfs/zfs_znode.c
          +++ new/usr/src/uts/common/fs/zfs/zfs_znode.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  
    | 
      ↓ open down ↓ | 
    12 lines elided | 
    
      ↑ open up ↑ | 
  
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
       23 + * Copyright (c) 2012 by Delphix. All rights reserved.
  23   24   */
  24   25  
       26 +
  25   27  /* Portions Copyright 2007 Jeremy Teo */
  26   28  
  27   29  #ifdef _KERNEL
  28   30  #include <sys/types.h>
  29   31  #include <sys/param.h>
  30   32  #include <sys/time.h>
  31   33  #include <sys/systm.h>
  32   34  #include <sys/sysmacros.h>
  33   35  #include <sys/resource.h>
  34   36  #include <sys/mntent.h>
  35   37  #include <sys/mkdev.h>
  36   38  #include <sys/u8_textprep.h>
  37   39  #include <sys/dsl_dataset.h>
  38   40  #include <sys/vfs.h>
  39   41  #include <sys/vfs_opreg.h>
  40   42  #include <sys/vnode.h>
  41   43  #include <sys/file.h>
  42   44  #include <sys/kmem.h>
  43   45  #include <sys/errno.h>
  44   46  #include <sys/unistd.h>
  45   47  #include <sys/mode.h>
  46   48  #include <sys/atomic.h>
  47   49  #include <vm/pvn.h>
  48   50  #include "fs/fs_subr.h"
  49   51  #include <sys/zfs_dir.h>
  50   52  #include <sys/zfs_acl.h>
  51   53  #include <sys/zfs_ioctl.h>
  52   54  #include <sys/zfs_rlock.h>
  53   55  #include <sys/zfs_fuid.h>
  54   56  #include <sys/dnode.h>
  55   57  #include <sys/fs/zfs.h>
  56   58  #include <sys/kidmap.h>
  57   59  #endif /* _KERNEL */
  58   60  
  59   61  #include <sys/dmu.h>
  60   62  #include <sys/refcount.h>
  61   63  #include <sys/stat.h>
  62   64  #include <sys/zap.h>
  63   65  #include <sys/zfs_znode.h>
  64   66  #include <sys/sa.h>
  65   67  #include <sys/zfs_sa.h>
  66   68  #include <sys/zfs_stat.h>
  67   69  
  68   70  #include "zfs_prop.h"
  69   71  #include "zfs_comutil.h"
  70   72  
  71   73  /*
  72   74   * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
  73   75   * turned on when DEBUG is also defined.
  74   76   */
  75   77  #ifdef  DEBUG
  76   78  #define ZNODE_STATS
  77   79  #endif  /* DEBUG */
  78   80  
  79   81  #ifdef  ZNODE_STATS
  80   82  #define ZNODE_STAT_ADD(stat)                    ((stat)++)
  81   83  #else
  82   84  #define ZNODE_STAT_ADD(stat)                    /* nothing */
  83   85  #endif  /* ZNODE_STATS */
  84   86  
  85   87  /*
  86   88   * Functions needed for userland (ie: libzpool) are not put under
  87   89   * #ifdef_KERNEL; the rest of the functions have dependencies
  88   90   * (such as VFS logic) that will not compile easily in userland.
  89   91   */
  90   92  #ifdef _KERNEL
  91   93  /*
  92   94   * Needed to close a small window in zfs_znode_move() that allows the zfsvfs to
  93   95   * be freed before it can be safely accessed.
  94   96   */
  95   97  krwlock_t zfsvfs_lock;
  96   98  
  97   99  static kmem_cache_t *znode_cache = NULL;
  98  100  
  99  101  /*ARGSUSED*/
 100  102  static void
 101  103  znode_evict_error(dmu_buf_t *dbuf, void *user_ptr)
 102  104  {
 103  105          /*
 104  106           * We should never drop all dbuf refs without first clearing
 105  107           * the eviction callback.
 106  108           */
 107  109          panic("evicting znode %p\n", user_ptr);
 108  110  }
 109  111  
 110  112  /*ARGSUSED*/
 111  113  static int
 112  114  zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
 113  115  {
 114  116          znode_t *zp = buf;
 115  117  
 116  118          ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
 117  119  
 118  120          zp->z_vnode = vn_alloc(kmflags);
 119  121          if (zp->z_vnode == NULL) {
 120  122                  return (-1);
 121  123          }
 122  124          ZTOV(zp)->v_data = zp;
 123  125  
 124  126          list_link_init(&zp->z_link_node);
 125  127  
 126  128          mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
 127  129          rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
 128  130          rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
 129  131          mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
 130  132  
 131  133          mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
 132  134          avl_create(&zp->z_range_avl, zfs_range_compare,
 133  135              sizeof (rl_t), offsetof(rl_t, r_node));
 134  136  
 135  137          zp->z_dirlocks = NULL;
 136  138          zp->z_acl_cached = NULL;
 137  139          zp->z_moved = 0;
 138  140          return (0);
 139  141  }
 140  142  
 141  143  /*ARGSUSED*/
 142  144  static void
 143  145  zfs_znode_cache_destructor(void *buf, void *arg)
 144  146  {
 145  147          znode_t *zp = buf;
 146  148  
 147  149          ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
 148  150          ASSERT(ZTOV(zp)->v_data == zp);
 149  151          vn_free(ZTOV(zp));
 150  152          ASSERT(!list_link_active(&zp->z_link_node));
 151  153          mutex_destroy(&zp->z_lock);
 152  154          rw_destroy(&zp->z_parent_lock);
 153  155          rw_destroy(&zp->z_name_lock);
 154  156          mutex_destroy(&zp->z_acl_lock);
 155  157          avl_destroy(&zp->z_range_avl);
 156  158          mutex_destroy(&zp->z_range_lock);
 157  159  
 158  160          ASSERT(zp->z_dirlocks == NULL);
 159  161          ASSERT(zp->z_acl_cached == NULL);
 160  162  }
 161  163  
 162  164  #ifdef  ZNODE_STATS
 163  165  static struct {
 164  166          uint64_t zms_zfsvfs_invalid;
 165  167          uint64_t zms_zfsvfs_recheck1;
 166  168          uint64_t zms_zfsvfs_unmounted;
 167  169          uint64_t zms_zfsvfs_recheck2;
 168  170          uint64_t zms_obj_held;
 169  171          uint64_t zms_vnode_locked;
 170  172          uint64_t zms_not_only_dnlc;
 171  173  } znode_move_stats;
 172  174  #endif  /* ZNODE_STATS */
 173  175  
 174  176  static void
 175  177  zfs_znode_move_impl(znode_t *ozp, znode_t *nzp)
 176  178  {
 177  179          vnode_t *vp;
 178  180  
 179  181          /* Copy fields. */
 180  182          nzp->z_zfsvfs = ozp->z_zfsvfs;
 181  183  
 182  184          /* Swap vnodes. */
 183  185          vp = nzp->z_vnode;
 184  186          nzp->z_vnode = ozp->z_vnode;
 185  187          ozp->z_vnode = vp; /* let destructor free the overwritten vnode */
 186  188          ZTOV(ozp)->v_data = ozp;
 187  189          ZTOV(nzp)->v_data = nzp;
 188  190  
 189  191          nzp->z_id = ozp->z_id;
 190  192          ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */
 191  193          ASSERT(avl_numnodes(&ozp->z_range_avl) == 0);
 192  194          nzp->z_unlinked = ozp->z_unlinked;
 193  195          nzp->z_atime_dirty = ozp->z_atime_dirty;
 194  196          nzp->z_zn_prefetch = ozp->z_zn_prefetch;
 195  197          nzp->z_blksz = ozp->z_blksz;
 196  198          nzp->z_seq = ozp->z_seq;
 197  199          nzp->z_mapcnt = ozp->z_mapcnt;
 198  200          nzp->z_gen = ozp->z_gen;
 199  201          nzp->z_sync_cnt = ozp->z_sync_cnt;
 200  202          nzp->z_is_sa = ozp->z_is_sa;
 201  203          nzp->z_sa_hdl = ozp->z_sa_hdl;
 202  204          bcopy(ozp->z_atime, nzp->z_atime, sizeof (uint64_t) * 2);
 203  205          nzp->z_links = ozp->z_links;
 204  206          nzp->z_size = ozp->z_size;
 205  207          nzp->z_pflags = ozp->z_pflags;
 206  208          nzp->z_uid = ozp->z_uid;
 207  209          nzp->z_gid = ozp->z_gid;
 208  210          nzp->z_mode = ozp->z_mode;
 209  211  
 210  212          /*
 211  213           * Since this is just an idle znode and kmem is already dealing with
 212  214           * memory pressure, release any cached ACL.
 213  215           */
 214  216          if (ozp->z_acl_cached) {
 215  217                  zfs_acl_free(ozp->z_acl_cached);
 216  218                  ozp->z_acl_cached = NULL;
 217  219          }
 218  220  
 219  221          sa_set_userp(nzp->z_sa_hdl, nzp);
 220  222  
 221  223          /*
 222  224           * Invalidate the original znode by clearing fields that provide a
 223  225           * pointer back to the znode. Set the low bit of the vfs pointer to
 224  226           * ensure that zfs_znode_move() recognizes the znode as invalid in any
 225  227           * subsequent callback.
 226  228           */
 227  229          ozp->z_sa_hdl = NULL;
 228  230          POINTER_INVALIDATE(&ozp->z_zfsvfs);
 229  231  
 230  232          /*
 231  233           * Mark the znode.
 232  234           */
 233  235          nzp->z_moved = 1;
 234  236          ozp->z_moved = (uint8_t)-1;
 235  237  }
 236  238  
 237  239  /*ARGSUSED*/
 238  240  static kmem_cbrc_t
 239  241  zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg)
 240  242  {
 241  243          znode_t *ozp = buf, *nzp = newbuf;
 242  244          zfsvfs_t *zfsvfs;
 243  245          vnode_t *vp;
 244  246  
 245  247          /*
 246  248           * The znode is on the file system's list of known znodes if the vfs
 247  249           * pointer is valid. We set the low bit of the vfs pointer when freeing
 248  250           * the znode to invalidate it, and the memory patterns written by kmem
 249  251           * (baddcafe and deadbeef) set at least one of the two low bits. A newly
 250  252           * created znode sets the vfs pointer last of all to indicate that the
 251  253           * znode is known and in a valid state to be moved by this function.
 252  254           */
 253  255          zfsvfs = ozp->z_zfsvfs;
 254  256          if (!POINTER_IS_VALID(zfsvfs)) {
 255  257                  ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid);
 256  258                  return (KMEM_CBRC_DONT_KNOW);
 257  259          }
 258  260  
 259  261          /*
 260  262           * Close a small window in which it's possible that the filesystem could
 261  263           * be unmounted and freed, and zfsvfs, though valid in the previous
 262  264           * statement, could point to unrelated memory by the time we try to
 263  265           * prevent the filesystem from being unmounted.
 264  266           */
 265  267          rw_enter(&zfsvfs_lock, RW_WRITER);
 266  268          if (zfsvfs != ozp->z_zfsvfs) {
 267  269                  rw_exit(&zfsvfs_lock);
 268  270                  ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck1);
 269  271                  return (KMEM_CBRC_DONT_KNOW);
 270  272          }
 271  273  
 272  274          /*
 273  275           * If the znode is still valid, then so is the file system. We know that
 274  276           * no valid file system can be freed while we hold zfsvfs_lock, so we
 275  277           * can safely ensure that the filesystem is not and will not be
 276  278           * unmounted. The next statement is equivalent to ZFS_ENTER().
 277  279           */
 278  280          rrw_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG);
 279  281          if (zfsvfs->z_unmounted) {
 280  282                  ZFS_EXIT(zfsvfs);
 281  283                  rw_exit(&zfsvfs_lock);
 282  284                  ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted);
 283  285                  return (KMEM_CBRC_DONT_KNOW);
 284  286          }
 285  287          rw_exit(&zfsvfs_lock);
 286  288  
 287  289          mutex_enter(&zfsvfs->z_znodes_lock);
 288  290          /*
 289  291           * Recheck the vfs pointer in case the znode was removed just before
 290  292           * acquiring the lock.
 291  293           */
 292  294          if (zfsvfs != ozp->z_zfsvfs) {
 293  295                  mutex_exit(&zfsvfs->z_znodes_lock);
 294  296                  ZFS_EXIT(zfsvfs);
 295  297                  ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck2);
 296  298                  return (KMEM_CBRC_DONT_KNOW);
 297  299          }
 298  300  
 299  301          /*
 300  302           * At this point we know that as long as we hold z_znodes_lock, the
 301  303           * znode cannot be freed and fields within the znode can be safely
 302  304           * accessed. Now, prevent a race with zfs_zget().
 303  305           */
 304  306          if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) {
 305  307                  mutex_exit(&zfsvfs->z_znodes_lock);
 306  308                  ZFS_EXIT(zfsvfs);
 307  309                  ZNODE_STAT_ADD(znode_move_stats.zms_obj_held);
 308  310                  return (KMEM_CBRC_LATER);
 309  311          }
 310  312  
 311  313          vp = ZTOV(ozp);
 312  314          if (mutex_tryenter(&vp->v_lock) == 0) {
 313  315                  ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
 314  316                  mutex_exit(&zfsvfs->z_znodes_lock);
 315  317                  ZFS_EXIT(zfsvfs);
 316  318                  ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked);
 317  319                  return (KMEM_CBRC_LATER);
 318  320          }
 319  321  
 320  322          /* Only move znodes that are referenced _only_ by the DNLC. */
 321  323          if (vp->v_count != 1 || !vn_in_dnlc(vp)) {
 322  324                  mutex_exit(&vp->v_lock);
 323  325                  ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
 324  326                  mutex_exit(&zfsvfs->z_znodes_lock);
 325  327                  ZFS_EXIT(zfsvfs);
 326  328                  ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc);
 327  329                  return (KMEM_CBRC_LATER);
 328  330          }
 329  331  
 330  332          /*
 331  333           * The znode is known and in a valid state to move. We're holding the
 332  334           * locks needed to execute the critical section.
 333  335           */
 334  336          zfs_znode_move_impl(ozp, nzp);
 335  337          mutex_exit(&vp->v_lock);
 336  338          ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
 337  339  
 338  340          list_link_replace(&ozp->z_link_node, &nzp->z_link_node);
 339  341          mutex_exit(&zfsvfs->z_znodes_lock);
 340  342          ZFS_EXIT(zfsvfs);
 341  343  
 342  344          return (KMEM_CBRC_YES);
 343  345  }
 344  346  
 345  347  void
 346  348  zfs_znode_init(void)
 347  349  {
 348  350          /*
 349  351           * Initialize zcache
 350  352           */
 351  353          rw_init(&zfsvfs_lock, NULL, RW_DEFAULT, NULL);
 352  354          ASSERT(znode_cache == NULL);
 353  355          znode_cache = kmem_cache_create("zfs_znode_cache",
 354  356              sizeof (znode_t), 0, zfs_znode_cache_constructor,
 355  357              zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
 356  358          kmem_cache_set_move(znode_cache, zfs_znode_move);
 357  359  }
 358  360  
 359  361  void
 360  362  zfs_znode_fini(void)
 361  363  {
 362  364          /*
 363  365           * Cleanup vfs & vnode ops
 364  366           */
 365  367          zfs_remove_op_tables();
 366  368  
 367  369          /*
 368  370           * Cleanup zcache
 369  371           */
 370  372          if (znode_cache)
 371  373                  kmem_cache_destroy(znode_cache);
 372  374          znode_cache = NULL;
 373  375          rw_destroy(&zfsvfs_lock);
 374  376  }
 375  377  
 376  378  struct vnodeops *zfs_dvnodeops;
 377  379  struct vnodeops *zfs_fvnodeops;
 378  380  struct vnodeops *zfs_symvnodeops;
 379  381  struct vnodeops *zfs_xdvnodeops;
 380  382  struct vnodeops *zfs_evnodeops;
 381  383  struct vnodeops *zfs_sharevnodeops;
 382  384  
 383  385  void
 384  386  zfs_remove_op_tables()
 385  387  {
 386  388          /*
 387  389           * Remove vfs ops
 388  390           */
 389  391          ASSERT(zfsfstype);
 390  392          (void) vfs_freevfsops_by_type(zfsfstype);
 391  393          zfsfstype = 0;
 392  394  
 393  395          /*
 394  396           * Remove vnode ops
 395  397           */
 396  398          if (zfs_dvnodeops)
 397  399                  vn_freevnodeops(zfs_dvnodeops);
 398  400          if (zfs_fvnodeops)
 399  401                  vn_freevnodeops(zfs_fvnodeops);
 400  402          if (zfs_symvnodeops)
 401  403                  vn_freevnodeops(zfs_symvnodeops);
 402  404          if (zfs_xdvnodeops)
 403  405                  vn_freevnodeops(zfs_xdvnodeops);
 404  406          if (zfs_evnodeops)
 405  407                  vn_freevnodeops(zfs_evnodeops);
 406  408          if (zfs_sharevnodeops)
 407  409                  vn_freevnodeops(zfs_sharevnodeops);
 408  410  
 409  411          zfs_dvnodeops = NULL;
 410  412          zfs_fvnodeops = NULL;
 411  413          zfs_symvnodeops = NULL;
 412  414          zfs_xdvnodeops = NULL;
 413  415          zfs_evnodeops = NULL;
 414  416          zfs_sharevnodeops = NULL;
 415  417  }
 416  418  
 417  419  extern const fs_operation_def_t zfs_dvnodeops_template[];
 418  420  extern const fs_operation_def_t zfs_fvnodeops_template[];
 419  421  extern const fs_operation_def_t zfs_xdvnodeops_template[];
 420  422  extern const fs_operation_def_t zfs_symvnodeops_template[];
 421  423  extern const fs_operation_def_t zfs_evnodeops_template[];
 422  424  extern const fs_operation_def_t zfs_sharevnodeops_template[];
 423  425  
 424  426  int
 425  427  zfs_create_op_tables()
 426  428  {
 427  429          int error;
 428  430  
 429  431          /*
 430  432           * zfs_dvnodeops can be set if mod_remove() calls mod_installfs()
 431  433           * due to a failure to remove the the 2nd modlinkage (zfs_modldrv).
 432  434           * In this case we just return as the ops vectors are already set up.
 433  435           */
 434  436          if (zfs_dvnodeops)
 435  437                  return (0);
 436  438  
 437  439          error = vn_make_ops(MNTTYPE_ZFS, zfs_dvnodeops_template,
 438  440              &zfs_dvnodeops);
 439  441          if (error)
 440  442                  return (error);
 441  443  
 442  444          error = vn_make_ops(MNTTYPE_ZFS, zfs_fvnodeops_template,
 443  445              &zfs_fvnodeops);
 444  446          if (error)
 445  447                  return (error);
 446  448  
 447  449          error = vn_make_ops(MNTTYPE_ZFS, zfs_symvnodeops_template,
 448  450              &zfs_symvnodeops);
 449  451          if (error)
 450  452                  return (error);
 451  453  
 452  454          error = vn_make_ops(MNTTYPE_ZFS, zfs_xdvnodeops_template,
 453  455              &zfs_xdvnodeops);
 454  456          if (error)
 455  457                  return (error);
 456  458  
 457  459          error = vn_make_ops(MNTTYPE_ZFS, zfs_evnodeops_template,
 458  460              &zfs_evnodeops);
 459  461          if (error)
 460  462                  return (error);
 461  463  
 462  464          error = vn_make_ops(MNTTYPE_ZFS, zfs_sharevnodeops_template,
 463  465              &zfs_sharevnodeops);
 464  466  
 465  467          return (error);
 466  468  }
 467  469  
 468  470  int
 469  471  zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
 470  472  {
 471  473          zfs_acl_ids_t acl_ids;
 472  474          vattr_t vattr;
 473  475          znode_t *sharezp;
 474  476          vnode_t *vp;
 475  477          znode_t *zp;
 476  478          int error;
 477  479  
 478  480          vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
 479  481          vattr.va_type = VDIR;
 480  482          vattr.va_mode = S_IFDIR|0555;
 481  483          vattr.va_uid = crgetuid(kcred);
 482  484          vattr.va_gid = crgetgid(kcred);
 483  485  
 484  486          sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
 485  487          ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs));
 486  488          sharezp->z_moved = 0;
 487  489          sharezp->z_unlinked = 0;
 488  490          sharezp->z_atime_dirty = 0;
 489  491          sharezp->z_zfsvfs = zfsvfs;
 490  492          sharezp->z_is_sa = zfsvfs->z_use_sa;
 491  493  
 492  494          vp = ZTOV(sharezp);
 493  495          vn_reinit(vp);
 494  496          vp->v_type = VDIR;
 495  497  
 496  498          VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
 497  499              kcred, NULL, &acl_ids));
 498  500          zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
 499  501          ASSERT3P(zp, ==, sharezp);
 500  502          ASSERT(!vn_in_dnlc(ZTOV(sharezp))); /* not valid to move */
 501  503          POINTER_INVALIDATE(&sharezp->z_zfsvfs);
 502  504          error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
 503  505              ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx);
 504  506          zfsvfs->z_shares_dir = sharezp->z_id;
 505  507  
 506  508          zfs_acl_ids_free(&acl_ids);
 507  509          ZTOV(sharezp)->v_count = 0;
 508  510          sa_handle_destroy(sharezp->z_sa_hdl);
 509  511          kmem_cache_free(znode_cache, sharezp);
 510  512  
 511  513          return (error);
 512  514  }
 513  515  
 514  516  /*
 515  517   * define a couple of values we need available
 516  518   * for both 64 and 32 bit environments.
 517  519   */
 518  520  #ifndef NBITSMINOR64
 519  521  #define NBITSMINOR64    32
 520  522  #endif
 521  523  #ifndef MAXMAJ64
 522  524  #define MAXMAJ64        0xffffffffUL
 523  525  #endif
 524  526  #ifndef MAXMIN64
 525  527  #define MAXMIN64        0xffffffffUL
 526  528  #endif
 527  529  
 528  530  /*
 529  531   * Create special expldev for ZFS private use.
 530  532   * Can't use standard expldev since it doesn't do
 531  533   * what we want.  The standard expldev() takes a
 532  534   * dev32_t in LP64 and expands it to a long dev_t.
 533  535   * We need an interface that takes a dev32_t in ILP32
 534  536   * and expands it to a long dev_t.
 535  537   */
 536  538  static uint64_t
 537  539  zfs_expldev(dev_t dev)
 538  540  {
 539  541  #ifndef _LP64
 540  542          major_t major = (major_t)dev >> NBITSMINOR32 & MAXMAJ32;
 541  543          return (((uint64_t)major << NBITSMINOR64) |
 542  544              ((minor_t)dev & MAXMIN32));
 543  545  #else
 544  546          return (dev);
 545  547  #endif
 546  548  }
 547  549  
 548  550  /*
 549  551   * Special cmpldev for ZFS private use.
 550  552   * Can't use standard cmpldev since it takes
 551  553   * a long dev_t and compresses it to dev32_t in
 552  554   * LP64.  We need to do a compaction of a long dev_t
 553  555   * to a dev32_t in ILP32.
 554  556   */
 555  557  dev_t
 556  558  zfs_cmpldev(uint64_t dev)
 557  559  {
 558  560  #ifndef _LP64
 559  561          minor_t minor = (minor_t)dev & MAXMIN64;
 560  562          major_t major = (major_t)(dev >> NBITSMINOR64) & MAXMAJ64;
 561  563  
 562  564          if (major > MAXMAJ32 || minor > MAXMIN32)
 563  565                  return (NODEV32);
 564  566  
 565  567          return (((dev32_t)major << NBITSMINOR32) | minor);
 566  568  #else
 567  569          return (dev);
 568  570  #endif
 569  571  }
 570  572  
 571  573  static void
 572  574  zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
 573  575      dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
 574  576  {
 575  577          ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
 576  578          ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
 577  579  
 578  580          mutex_enter(&zp->z_lock);
 579  581  
 580  582          ASSERT(zp->z_sa_hdl == NULL);
 581  583          ASSERT(zp->z_acl_cached == NULL);
 582  584          if (sa_hdl == NULL) {
 583  585                  VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
 584  586                      SA_HDL_SHARED, &zp->z_sa_hdl));
 585  587          } else {
 586  588                  zp->z_sa_hdl = sa_hdl;
 587  589                  sa_set_userp(sa_hdl, zp);
 588  590          }
 589  591  
 590  592          zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
 591  593  
 592  594          /*
 593  595           * Slap on VROOT if we are the root znode
 594  596           */
 595  597          if (zp->z_id == zfsvfs->z_root)
 596  598                  ZTOV(zp)->v_flag |= VROOT;
 597  599  
 598  600          mutex_exit(&zp->z_lock);
 599  601          vn_exists(ZTOV(zp));
 600  602  }
 601  603  
 602  604  void
 603  605  zfs_znode_dmu_fini(znode_t *zp)
 604  606  {
 605  607          ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) ||
 606  608              zp->z_unlinked ||
 607  609              RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock));
 608  610  
 609  611          sa_handle_destroy(zp->z_sa_hdl);
 610  612          zp->z_sa_hdl = NULL;
 611  613  }
 612  614  
 613  615  /*
 614  616   * Construct a new znode/vnode and intialize.
 615  617   *
 616  618   * This does not do a call to dmu_set_user() that is
 617  619   * up to the caller to do, in case you don't want to
 618  620   * return the znode
 619  621   */
 620  622  static znode_t *
 621  623  zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
 622  624      dmu_object_type_t obj_type, sa_handle_t *hdl)
 623  625  {
 624  626          znode_t *zp;
 625  627          vnode_t *vp;
 626  628          uint64_t mode;
 627  629          uint64_t parent;
 628  630          sa_bulk_attr_t bulk[9];
 629  631          int count = 0;
 630  632  
 631  633          zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
 632  634  
 633  635          ASSERT(zp->z_dirlocks == NULL);
 634  636          ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
 635  637          zp->z_moved = 0;
 636  638  
 637  639          /*
 638  640           * Defer setting z_zfsvfs until the znode is ready to be a candidate for
 639  641           * the zfs_znode_move() callback.
 640  642           */
 641  643          zp->z_sa_hdl = NULL;
 642  644          zp->z_unlinked = 0;
 643  645          zp->z_atime_dirty = 0;
 644  646          zp->z_mapcnt = 0;
 645  647          zp->z_id = db->db_object;
 646  648          zp->z_blksz = blksz;
 647  649          zp->z_seq = 0x7A4653;
 648  650          zp->z_sync_cnt = 0;
 649  651  
 650  652          vp = ZTOV(zp);
 651  653          vn_reinit(vp);
 652  654  
 653  655          zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
 654  656  
 655  657          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
 656  658          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8);
 657  659          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
 658  660              &zp->z_size, 8);
 659  661          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
 660  662              &zp->z_links, 8);
 661  663          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
 662  664              &zp->z_pflags, 8);
 663  665          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
 664  666          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
 665  667              &zp->z_atime, 16);
 666  668          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
 667  669              &zp->z_uid, 8);
 668  670          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
 669  671              &zp->z_gid, 8);
 670  672  
 671  673          if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
 672  674                  if (hdl == NULL)
 673  675                          sa_handle_destroy(zp->z_sa_hdl);
 674  676                  kmem_cache_free(znode_cache, zp);
 675  677                  return (NULL);
 676  678          }
 677  679  
 678  680          zp->z_mode = mode;
 679  681          vp->v_vfsp = zfsvfs->z_parent->z_vfs;
 680  682  
 681  683          vp->v_type = IFTOVT((mode_t)mode);
 682  684  
 683  685          switch (vp->v_type) {
 684  686          case VDIR:
 685  687                  if (zp->z_pflags & ZFS_XATTR) {
 686  688                          vn_setops(vp, zfs_xdvnodeops);
 687  689                          vp->v_flag |= V_XATTRDIR;
 688  690                  } else {
 689  691                          vn_setops(vp, zfs_dvnodeops);
 690  692                  }
 691  693                  zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */
 692  694                  break;
 693  695          case VBLK:
 694  696          case VCHR:
 695  697                  {
 696  698                          uint64_t rdev;
 697  699                          VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs),
 698  700                              &rdev, sizeof (rdev)) == 0);
 699  701  
 700  702                          vp->v_rdev = zfs_cmpldev(rdev);
 701  703                  }
 702  704                  /*FALLTHROUGH*/
 703  705          case VFIFO:
 704  706          case VSOCK:
 705  707          case VDOOR:
 706  708                  vn_setops(vp, zfs_fvnodeops);
 707  709                  break;
 708  710          case VREG:
 709  711                  vp->v_flag |= VMODSORT;
 710  712                  if (parent == zfsvfs->z_shares_dir) {
 711  713                          ASSERT(zp->z_uid == 0 && zp->z_gid == 0);
 712  714                          vn_setops(vp, zfs_sharevnodeops);
 713  715                  } else {
 714  716                          vn_setops(vp, zfs_fvnodeops);
 715  717                  }
 716  718                  break;
 717  719          case VLNK:
 718  720                  vn_setops(vp, zfs_symvnodeops);
 719  721                  break;
 720  722          default:
 721  723                  vn_setops(vp, zfs_evnodeops);
 722  724                  break;
 723  725          }
 724  726  
 725  727          mutex_enter(&zfsvfs->z_znodes_lock);
 726  728          list_insert_tail(&zfsvfs->z_all_znodes, zp);
 727  729          membar_producer();
 728  730          /*
 729  731           * Everything else must be valid before assigning z_zfsvfs makes the
 730  732           * znode eligible for zfs_znode_move().
 731  733           */
 732  734          zp->z_zfsvfs = zfsvfs;
 733  735          mutex_exit(&zfsvfs->z_znodes_lock);
 734  736  
 735  737          VFS_HOLD(zfsvfs->z_vfs);
 736  738          return (zp);
 737  739  }
 738  740  
 739  741  static uint64_t empty_xattr;
 740  742  static uint64_t pad[4];
 741  743  static zfs_acl_phys_t acl_phys;
 742  744  /*
 743  745   * Create a new DMU object to hold a zfs znode.
 744  746   *
 745  747   *      IN:     dzp     - parent directory for new znode
 746  748   *              vap     - file attributes for new znode
 747  749   *              tx      - dmu transaction id for zap operations
 748  750   *              cr      - credentials of caller
 749  751   *              flag    - flags:
 750  752   *                        IS_ROOT_NODE  - new object will be root
 751  753   *                        IS_XATTR      - new object is an attribute
 752  754   *              bonuslen - length of bonus buffer
 753  755   *              setaclp  - File/Dir initial ACL
 754  756   *              fuidp    - Tracks fuid allocation.
 755  757   *
 756  758   *      OUT:    zpp     - allocated znode
 757  759   *
 758  760   */
 759  761  void
 760  762  zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
 761  763      uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
 762  764  {
 763  765          uint64_t        crtime[2], atime[2], mtime[2], ctime[2];
 764  766          uint64_t        mode, size, links, parent, pflags;
 765  767          uint64_t        dzp_pflags = 0;
 766  768          uint64_t        rdev = 0;
 767  769          zfsvfs_t        *zfsvfs = dzp->z_zfsvfs;
 768  770          dmu_buf_t       *db;
 769  771          timestruc_t     now;
 770  772          uint64_t        gen, obj;
 771  773          int             err;
 772  774          int             bonuslen;
 773  775          sa_handle_t     *sa_hdl;
 774  776          dmu_object_type_t obj_type;
 775  777          sa_bulk_attr_t  sa_attrs[ZPL_END];
 776  778          int             cnt = 0;
 777  779          zfs_acl_locator_cb_t locate = { 0 };
 778  780  
 779  781          ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
 780  782  
 781  783          if (zfsvfs->z_replay) {
 782  784                  obj = vap->va_nodeid;
 783  785                  now = vap->va_ctime;            /* see zfs_replay_create() */
 784  786                  gen = vap->va_nblocks;          /* ditto */
 785  787          } else {
 786  788                  obj = 0;
 787  789                  gethrestime(&now);
 788  790                  gen = dmu_tx_get_txg(tx);
 789  791          }
 790  792  
 791  793          obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
 792  794          bonuslen = (obj_type == DMU_OT_SA) ?
 793  795              DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE;
 794  796  
 795  797          /*
 796  798           * Create a new DMU object.
 797  799           */
 798  800          /*
  
    | 
      ↓ open down ↓ | 
    764 lines elided | 
    
      ↑ open up ↑ | 
  
 799  801           * There's currently no mechanism for pre-reading the blocks that will
 800  802           * be needed to allocate a new object, so we accept the small chance
 801  803           * that there will be an i/o error and we will fail one of the
 802  804           * assertions below.
 803  805           */
 804  806          if (vap->va_type == VDIR) {
 805  807                  if (zfsvfs->z_replay) {
 806  808                          err = zap_create_claim_norm(zfsvfs->z_os, obj,
 807  809                              zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
 808  810                              obj_type, bonuslen, tx);
 809      -                        ASSERT3U(err, ==, 0);
      811 +                        ASSERT0(err);
 810  812                  } else {
 811  813                          obj = zap_create_norm(zfsvfs->z_os,
 812  814                              zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
 813  815                              obj_type, bonuslen, tx);
 814  816                  }
 815  817          } else {
 816  818                  if (zfsvfs->z_replay) {
 817  819                          err = dmu_object_claim(zfsvfs->z_os, obj,
 818  820                              DMU_OT_PLAIN_FILE_CONTENTS, 0,
 819  821                              obj_type, bonuslen, tx);
 820      -                        ASSERT3U(err, ==, 0);
      822 +                        ASSERT0(err);
 821  823                  } else {
 822  824                          obj = dmu_object_alloc(zfsvfs->z_os,
 823  825                              DMU_OT_PLAIN_FILE_CONTENTS, 0,
 824  826                              obj_type, bonuslen, tx);
 825  827                  }
 826  828          }
 827  829  
 828  830          ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
 829  831          VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
 830  832  
 831  833          /*
 832  834           * If this is the root, fix up the half-initialized parent pointer
 833  835           * to reference the just-allocated physical data area.
 834  836           */
 835  837          if (flag & IS_ROOT_NODE) {
 836  838                  dzp->z_id = obj;
 837  839          } else {
 838  840                  dzp_pflags = dzp->z_pflags;
 839  841          }
 840  842  
 841  843          /*
 842  844           * If parent is an xattr, so am I.
 843  845           */
 844  846          if (dzp_pflags & ZFS_XATTR) {
 845  847                  flag |= IS_XATTR;
 846  848          }
 847  849  
 848  850          if (zfsvfs->z_use_fuids)
 849  851                  pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
 850  852          else
 851  853                  pflags = 0;
 852  854  
 853  855          if (vap->va_type == VDIR) {
 854  856                  size = 2;               /* contents ("." and "..") */
 855  857                  links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1;
 856  858          } else {
 857  859                  size = links = 0;
 858  860          }
 859  861  
 860  862          if (vap->va_type == VBLK || vap->va_type == VCHR) {
 861  863                  rdev = zfs_expldev(vap->va_rdev);
 862  864          }
 863  865  
 864  866          parent = dzp->z_id;
 865  867          mode = acl_ids->z_mode;
 866  868          if (flag & IS_XATTR)
 867  869                  pflags |= ZFS_XATTR;
 868  870  
 869  871          /*
 870  872           * No execs denied will be deterimed when zfs_mode_compute() is called.
 871  873           */
 872  874          pflags |= acl_ids->z_aclp->z_hints &
 873  875              (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
 874  876              ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
 875  877  
 876  878          ZFS_TIME_ENCODE(&now, crtime);
 877  879          ZFS_TIME_ENCODE(&now, ctime);
 878  880  
 879  881          if (vap->va_mask & AT_ATIME) {
 880  882                  ZFS_TIME_ENCODE(&vap->va_atime, atime);
 881  883          } else {
 882  884                  ZFS_TIME_ENCODE(&now, atime);
 883  885          }
 884  886  
 885  887          if (vap->va_mask & AT_MTIME) {
 886  888                  ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
 887  889          } else {
 888  890                  ZFS_TIME_ENCODE(&now, mtime);
 889  891          }
 890  892  
 891  893          /* Now add in all of the "SA" attributes */
 892  894          VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
 893  895              &sa_hdl));
 894  896  
 895  897          /*
 896  898           * Setup the array of attributes to be replaced/set on the new file
 897  899           *
 898  900           * order for  DMU_OT_ZNODE is critical since it needs to be constructed
 899  901           * in the old znode_phys_t format.  Don't change this ordering
 900  902           */
 901  903  
 902  904          if (obj_type == DMU_OT_ZNODE) {
 903  905                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
 904  906                      NULL, &atime, 16);
 905  907                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
 906  908                      NULL, &mtime, 16);
 907  909                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
 908  910                      NULL, &ctime, 16);
 909  911                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
 910  912                      NULL, &crtime, 16);
 911  913                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
 912  914                      NULL, &gen, 8);
 913  915                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
 914  916                      NULL, &mode, 8);
 915  917                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
 916  918                      NULL, &size, 8);
 917  919                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
 918  920                      NULL, &parent, 8);
 919  921          } else {
 920  922                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
 921  923                      NULL, &mode, 8);
 922  924                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
 923  925                      NULL, &size, 8);
 924  926                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
 925  927                      NULL, &gen, 8);
 926  928                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
 927  929                      &acl_ids->z_fuid, 8);
 928  930                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
 929  931                      &acl_ids->z_fgid, 8);
 930  932                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
 931  933                      NULL, &parent, 8);
 932  934                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
 933  935                      NULL, &pflags, 8);
 934  936                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
 935  937                      NULL, &atime, 16);
 936  938                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
 937  939                      NULL, &mtime, 16);
 938  940                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
 939  941                      NULL, &ctime, 16);
 940  942                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
 941  943                      NULL, &crtime, 16);
 942  944          }
 943  945  
 944  946          SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
 945  947  
 946  948          if (obj_type == DMU_OT_ZNODE) {
 947  949                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
 948  950                      &empty_xattr, 8);
 949  951          }
 950  952          if (obj_type == DMU_OT_ZNODE ||
 951  953              (vap->va_type == VBLK || vap->va_type == VCHR)) {
 952  954                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
 953  955                      NULL, &rdev, 8);
 954  956  
 955  957          }
 956  958          if (obj_type == DMU_OT_ZNODE) {
 957  959                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
 958  960                      NULL, &pflags, 8);
 959  961                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
 960  962                      &acl_ids->z_fuid, 8);
 961  963                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
 962  964                      &acl_ids->z_fgid, 8);
 963  965                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
 964  966                      sizeof (uint64_t) * 4);
 965  967                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
 966  968                      &acl_phys, sizeof (zfs_acl_phys_t));
 967  969          } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
 968  970                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
 969  971                      &acl_ids->z_aclp->z_acl_count, 8);
 970  972                  locate.cb_aclp = acl_ids->z_aclp;
 971  973                  SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
 972  974                      zfs_acl_data_locator, &locate,
 973  975                      acl_ids->z_aclp->z_acl_bytes);
 974  976                  mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
 975  977                      acl_ids->z_fuid, acl_ids->z_fgid);
 976  978          }
 977  979  
 978  980          VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
 979  981  
 980  982          if (!(flag & IS_ROOT_NODE)) {
 981  983                  *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
 982  984                  ASSERT(*zpp != NULL);
 983  985          } else {
 984  986                  /*
 985  987                   * If we are creating the root node, the "parent" we
 986  988                   * passed in is the znode for the root.
 987  989                   */
 988  990                  *zpp = dzp;
 989  991  
 990  992                  (*zpp)->z_sa_hdl = sa_hdl;
 991  993          }
  
    | 
      ↓ open down ↓ | 
    161 lines elided | 
    
      ↑ open up ↑ | 
  
 992  994  
 993  995          (*zpp)->z_pflags = pflags;
 994  996          (*zpp)->z_mode = mode;
 995  997  
 996  998          if (vap->va_mask & AT_XVATTR)
 997  999                  zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
 998 1000  
 999 1001          if (obj_type == DMU_OT_ZNODE ||
1000 1002              acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
1001 1003                  err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx);
1002      -                ASSERT3P(err, ==, 0);
     1004 +                ASSERT0(err);
1003 1005          }
1004 1006          ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
1005 1007  }
1006 1008  
1007 1009  /*
1008 1010   * zfs_xvattr_set only updates the in-core attributes
1009 1011   * it is assumed the caller will be doing an sa_bulk_update
1010 1012   * to push the changes out
1011 1013   */
1012 1014  void
1013 1015  zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
1014 1016  {
1015 1017          xoptattr_t *xoap;
1016 1018  
1017 1019          xoap = xva_getxoptattr(xvap);
1018 1020          ASSERT(xoap);
1019 1021  
1020 1022          if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
1021 1023                  uint64_t times[2];
1022 1024                  ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
1023 1025                  (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs),
1024 1026                      ×, sizeof (times), tx);
1025 1027                  XVA_SET_RTN(xvap, XAT_CREATETIME);
1026 1028          }
1027 1029          if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
1028 1030                  ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
1029 1031                      zp->z_pflags, tx);
1030 1032                  XVA_SET_RTN(xvap, XAT_READONLY);
1031 1033          }
1032 1034          if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
1033 1035                  ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
1034 1036                      zp->z_pflags, tx);
1035 1037                  XVA_SET_RTN(xvap, XAT_HIDDEN);
1036 1038          }
1037 1039          if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
1038 1040                  ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
1039 1041                      zp->z_pflags, tx);
1040 1042                  XVA_SET_RTN(xvap, XAT_SYSTEM);
1041 1043          }
1042 1044          if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
1043 1045                  ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
1044 1046                      zp->z_pflags, tx);
1045 1047                  XVA_SET_RTN(xvap, XAT_ARCHIVE);
1046 1048          }
1047 1049          if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
1048 1050                  ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
1049 1051                      zp->z_pflags, tx);
1050 1052                  XVA_SET_RTN(xvap, XAT_IMMUTABLE);
1051 1053          }
1052 1054          if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
1053 1055                  ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
1054 1056                      zp->z_pflags, tx);
1055 1057                  XVA_SET_RTN(xvap, XAT_NOUNLINK);
1056 1058          }
1057 1059          if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
1058 1060                  ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
1059 1061                      zp->z_pflags, tx);
1060 1062                  XVA_SET_RTN(xvap, XAT_APPENDONLY);
1061 1063          }
1062 1064          if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
1063 1065                  ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
1064 1066                      zp->z_pflags, tx);
1065 1067                  XVA_SET_RTN(xvap, XAT_NODUMP);
1066 1068          }
1067 1069          if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
1068 1070                  ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
1069 1071                      zp->z_pflags, tx);
1070 1072                  XVA_SET_RTN(xvap, XAT_OPAQUE);
1071 1073          }
1072 1074          if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
1073 1075                  ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
1074 1076                      xoap->xoa_av_quarantined, zp->z_pflags, tx);
1075 1077                  XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
1076 1078          }
1077 1079          if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
1078 1080                  ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
1079 1081                      zp->z_pflags, tx);
1080 1082                  XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
1081 1083          }
1082 1084          if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
1083 1085                  zfs_sa_set_scanstamp(zp, xvap, tx);
1084 1086                  XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
1085 1087          }
1086 1088          if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
1087 1089                  ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
1088 1090                      zp->z_pflags, tx);
1089 1091                  XVA_SET_RTN(xvap, XAT_REPARSE);
1090 1092          }
1091 1093          if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
1092 1094                  ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
1093 1095                      zp->z_pflags, tx);
1094 1096                  XVA_SET_RTN(xvap, XAT_OFFLINE);
1095 1097          }
1096 1098          if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
1097 1099                  ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
1098 1100                      zp->z_pflags, tx);
1099 1101                  XVA_SET_RTN(xvap, XAT_SPARSE);
1100 1102          }
1101 1103  }
1102 1104  
1103 1105  int
1104 1106  zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
1105 1107  {
1106 1108          dmu_object_info_t doi;
1107 1109          dmu_buf_t       *db;
1108 1110          znode_t         *zp;
1109 1111          int err;
1110 1112          sa_handle_t     *hdl;
1111 1113  
1112 1114          *zpp = NULL;
1113 1115  
1114 1116          ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
1115 1117  
1116 1118          err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1117 1119          if (err) {
1118 1120                  ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1119 1121                  return (err);
1120 1122          }
1121 1123  
1122 1124          dmu_object_info_from_db(db, &doi);
1123 1125          if (doi.doi_bonus_type != DMU_OT_SA &&
1124 1126              (doi.doi_bonus_type != DMU_OT_ZNODE ||
1125 1127              (doi.doi_bonus_type == DMU_OT_ZNODE &&
1126 1128              doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1127 1129                  sa_buf_rele(db, NULL);
1128 1130                  ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1129 1131                  return (EINVAL);
1130 1132          }
1131 1133  
1132 1134          hdl = dmu_buf_get_user(db);
1133 1135          if (hdl != NULL) {
1134 1136                  zp  = sa_get_userdata(hdl);
1135 1137  
1136 1138  
1137 1139                  /*
1138 1140                   * Since "SA" does immediate eviction we
1139 1141                   * should never find a sa handle that doesn't
1140 1142                   * know about the znode.
1141 1143                   */
1142 1144  
1143 1145                  ASSERT3P(zp, !=, NULL);
1144 1146  
1145 1147                  mutex_enter(&zp->z_lock);
1146 1148                  ASSERT3U(zp->z_id, ==, obj_num);
1147 1149                  if (zp->z_unlinked) {
1148 1150                          err = ENOENT;
1149 1151                  } else {
1150 1152                          VN_HOLD(ZTOV(zp));
1151 1153                          *zpp = zp;
1152 1154                          err = 0;
1153 1155                  }
1154 1156                  sa_buf_rele(db, NULL);
1155 1157                  mutex_exit(&zp->z_lock);
1156 1158                  ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1157 1159                  return (err);
1158 1160          }
1159 1161  
1160 1162          /*
1161 1163           * Not found create new znode/vnode
1162 1164           * but only if file exists.
1163 1165           *
1164 1166           * There is a small window where zfs_vget() could
1165 1167           * find this object while a file create is still in
1166 1168           * progress.  This is checked for in zfs_znode_alloc()
1167 1169           *
1168 1170           * if zfs_znode_alloc() fails it will drop the hold on the
1169 1171           * bonus buffer.
1170 1172           */
1171 1173          zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
1172 1174              doi.doi_bonus_type, NULL);
1173 1175          if (zp == NULL) {
1174 1176                  err = ENOENT;
1175 1177          } else {
1176 1178                  *zpp = zp;
1177 1179          }
1178 1180          ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1179 1181          return (err);
1180 1182  }
1181 1183  
1182 1184  int
1183 1185  zfs_rezget(znode_t *zp)
1184 1186  {
1185 1187          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1186 1188          dmu_object_info_t doi;
1187 1189          dmu_buf_t *db;
1188 1190          uint64_t obj_num = zp->z_id;
1189 1191          uint64_t mode;
1190 1192          sa_bulk_attr_t bulk[8];
1191 1193          int err;
1192 1194          int count = 0;
1193 1195          uint64_t gen;
1194 1196  
1195 1197          ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num);
1196 1198  
1197 1199          mutex_enter(&zp->z_acl_lock);
1198 1200          if (zp->z_acl_cached) {
1199 1201                  zfs_acl_free(zp->z_acl_cached);
1200 1202                  zp->z_acl_cached = NULL;
1201 1203          }
1202 1204  
1203 1205          mutex_exit(&zp->z_acl_lock);
1204 1206          ASSERT(zp->z_sa_hdl == NULL);
1205 1207          err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
1206 1208          if (err) {
1207 1209                  ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1208 1210                  return (err);
1209 1211          }
1210 1212  
1211 1213          dmu_object_info_from_db(db, &doi);
1212 1214          if (doi.doi_bonus_type != DMU_OT_SA &&
1213 1215              (doi.doi_bonus_type != DMU_OT_ZNODE ||
1214 1216              (doi.doi_bonus_type == DMU_OT_ZNODE &&
1215 1217              doi.doi_bonus_size < sizeof (znode_phys_t)))) {
1216 1218                  sa_buf_rele(db, NULL);
1217 1219                  ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1218 1220                  return (EINVAL);
1219 1221          }
1220 1222  
1221 1223          zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
1222 1224  
1223 1225          /* reload cached values */
1224 1226          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1225 1227              &gen, sizeof (gen));
1226 1228          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
1227 1229              &zp->z_size, sizeof (zp->z_size));
1228 1230          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
1229 1231              &zp->z_links, sizeof (zp->z_links));
1230 1232          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1231 1233              &zp->z_pflags, sizeof (zp->z_pflags));
1232 1234          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1233 1235              &zp->z_atime, sizeof (zp->z_atime));
1234 1236          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1235 1237              &zp->z_uid, sizeof (zp->z_uid));
1236 1238          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1237 1239              &zp->z_gid, sizeof (zp->z_gid));
1238 1240          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1239 1241              &mode, sizeof (mode));
1240 1242  
1241 1243          if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
1242 1244                  zfs_znode_dmu_fini(zp);
1243 1245                  ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1244 1246                  return (EIO);
1245 1247          }
1246 1248  
1247 1249          zp->z_mode = mode;
1248 1250  
1249 1251          if (gen != zp->z_gen) {
1250 1252                  zfs_znode_dmu_fini(zp);
1251 1253                  ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1252 1254                  return (EIO);
1253 1255          }
1254 1256  
1255 1257          zp->z_unlinked = (zp->z_links == 0);
1256 1258          zp->z_blksz = doi.doi_data_block_size;
1257 1259  
1258 1260          ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
1259 1261  
1260 1262          return (0);
1261 1263  }
1262 1264  
1263 1265  void
1264 1266  zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
1265 1267  {
1266 1268          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1267 1269          objset_t *os = zfsvfs->z_os;
1268 1270          uint64_t obj = zp->z_id;
1269 1271          uint64_t acl_obj = zfs_external_acl(zp);
1270 1272  
1271 1273          ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
1272 1274          if (acl_obj) {
1273 1275                  VERIFY(!zp->z_is_sa);
1274 1276                  VERIFY(0 == dmu_object_free(os, acl_obj, tx));
1275 1277          }
1276 1278          VERIFY(0 == dmu_object_free(os, obj, tx));
1277 1279          zfs_znode_dmu_fini(zp);
1278 1280          ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
1279 1281          zfs_znode_free(zp);
1280 1282  }
1281 1283  
1282 1284  void
1283 1285  zfs_zinactive(znode_t *zp)
1284 1286  {
1285 1287          vnode_t *vp = ZTOV(zp);
1286 1288          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1287 1289          uint64_t z_id = zp->z_id;
1288 1290  
1289 1291          ASSERT(zp->z_sa_hdl);
1290 1292  
1291 1293          /*
1292 1294           * Don't allow a zfs_zget() while were trying to release this znode
1293 1295           */
1294 1296          ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
1295 1297  
1296 1298          mutex_enter(&zp->z_lock);
1297 1299          mutex_enter(&vp->v_lock);
1298 1300          vp->v_count--;
1299 1301          if (vp->v_count > 0 || vn_has_cached_data(vp)) {
1300 1302                  /*
1301 1303                   * If the hold count is greater than zero, somebody has
1302 1304                   * obtained a new reference on this znode while we were
1303 1305                   * processing it here, so we are done.  If we still have
1304 1306                   * mapped pages then we are also done, since we don't
1305 1307                   * want to inactivate the znode until the pages get pushed.
1306 1308                   *
1307 1309                   * XXX - if vn_has_cached_data(vp) is true, but count == 0,
1308 1310                   * this seems like it would leave the znode hanging with
1309 1311                   * no chance to go inactive...
1310 1312                   */
1311 1313                  mutex_exit(&vp->v_lock);
1312 1314                  mutex_exit(&zp->z_lock);
1313 1315                  ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1314 1316                  return;
1315 1317          }
1316 1318          mutex_exit(&vp->v_lock);
1317 1319  
1318 1320          /*
1319 1321           * If this was the last reference to a file with no links,
1320 1322           * remove the file from the file system.
1321 1323           */
1322 1324          if (zp->z_unlinked) {
1323 1325                  mutex_exit(&zp->z_lock);
1324 1326                  ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1325 1327                  zfs_rmnode(zp);
1326 1328                  return;
1327 1329          }
1328 1330  
1329 1331          mutex_exit(&zp->z_lock);
1330 1332          zfs_znode_dmu_fini(zp);
1331 1333          ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
1332 1334          zfs_znode_free(zp);
1333 1335  }
1334 1336  
1335 1337  void
1336 1338  zfs_znode_free(znode_t *zp)
1337 1339  {
1338 1340          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1339 1341  
1340 1342          vn_invalid(ZTOV(zp));
1341 1343  
1342 1344          ASSERT(ZTOV(zp)->v_count == 0);
1343 1345  
1344 1346          mutex_enter(&zfsvfs->z_znodes_lock);
1345 1347          POINTER_INVALIDATE(&zp->z_zfsvfs);
1346 1348          list_remove(&zfsvfs->z_all_znodes, zp);
1347 1349          mutex_exit(&zfsvfs->z_znodes_lock);
1348 1350  
1349 1351          if (zp->z_acl_cached) {
1350 1352                  zfs_acl_free(zp->z_acl_cached);
1351 1353                  zp->z_acl_cached = NULL;
1352 1354          }
1353 1355  
1354 1356          kmem_cache_free(znode_cache, zp);
1355 1357  
1356 1358          VFS_RELE(zfsvfs->z_vfs);
1357 1359  }
1358 1360  
1359 1361  void
1360 1362  zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
1361 1363      uint64_t ctime[2], boolean_t have_tx)
1362 1364  {
1363 1365          timestruc_t     now;
1364 1366  
1365 1367          gethrestime(&now);
1366 1368  
1367 1369          if (have_tx) {  /* will sa_bulk_update happen really soon? */
1368 1370                  zp->z_atime_dirty = 0;
1369 1371                  zp->z_seq++;
1370 1372          } else {
1371 1373                  zp->z_atime_dirty = 1;
1372 1374          }
1373 1375  
1374 1376          if (flag & AT_ATIME) {
1375 1377                  ZFS_TIME_ENCODE(&now, zp->z_atime);
1376 1378          }
1377 1379  
1378 1380          if (flag & AT_MTIME) {
1379 1381                  ZFS_TIME_ENCODE(&now, mtime);
1380 1382                  if (zp->z_zfsvfs->z_use_fuids) {
1381 1383                          zp->z_pflags |= (ZFS_ARCHIVE |
1382 1384                              ZFS_AV_MODIFIED);
1383 1385                  }
1384 1386          }
1385 1387  
1386 1388          if (flag & AT_CTIME) {
1387 1389                  ZFS_TIME_ENCODE(&now, ctime);
1388 1390                  if (zp->z_zfsvfs->z_use_fuids)
1389 1391                          zp->z_pflags |= ZFS_ARCHIVE;
1390 1392          }
1391 1393  }
1392 1394  
1393 1395  /*
1394 1396   * Grow the block size for a file.
1395 1397   *
1396 1398   *      IN:     zp      - znode of file to free data in.
1397 1399   *              size    - requested block size
1398 1400   *              tx      - open transaction.
1399 1401   *
1400 1402   * NOTE: this function assumes that the znode is write locked.
1401 1403   */
1402 1404  void
1403 1405  zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
1404 1406  {
1405 1407          int             error;
1406 1408          u_longlong_t    dummy;
1407 1409  
1408 1410          if (size <= zp->z_blksz)
1409 1411                  return;
1410 1412          /*
1411 1413           * If the file size is already greater than the current blocksize,
1412 1414           * we will not grow.  If there is more than one block in a file,
  
    | 
      ↓ open down ↓ | 
    400 lines elided | 
    
      ↑ open up ↑ | 
  
1413 1415           * the blocksize cannot change.
1414 1416           */
1415 1417          if (zp->z_blksz && zp->z_size > zp->z_blksz)
1416 1418                  return;
1417 1419  
1418 1420          error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id,
1419 1421              size, 0, tx);
1420 1422  
1421 1423          if (error == ENOTSUP)
1422 1424                  return;
1423      -        ASSERT3U(error, ==, 0);
     1425 +        ASSERT0(error);
1424 1426  
1425 1427          /* What blocksize did we actually get? */
1426 1428          dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
1427 1429  }
1428 1430  
1429 1431  /*
1430 1432   * This is a dummy interface used when pvn_vplist_dirty() should *not*
1431 1433   * be calling back into the fs for a putpage().  E.g.: when truncating
1432 1434   * a file, the pages being "thrown away* don't need to be written out.
1433 1435   */
1434 1436  /* ARGSUSED */
1435 1437  static int
1436 1438  zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
1437 1439      int flags, cred_t *cr)
1438 1440  {
1439 1441          ASSERT(0);
1440 1442          return (0);
1441 1443  }
1442 1444  
1443 1445  /*
1444 1446   * Increase the file length
1445 1447   *
1446 1448   *      IN:     zp      - znode of file to free data in.
1447 1449   *              end     - new end-of-file
1448 1450   *
1449 1451   *      RETURN: 0 if success
1450 1452   *              error code if failure
1451 1453   */
1452 1454  static int
1453 1455  zfs_extend(znode_t *zp, uint64_t end)
1454 1456  {
1455 1457          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1456 1458          dmu_tx_t *tx;
1457 1459          rl_t *rl;
1458 1460          uint64_t newblksz;
1459 1461          int error;
1460 1462  
1461 1463          /*
1462 1464           * We will change zp_size, lock the whole file.
1463 1465           */
1464 1466          rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1465 1467  
1466 1468          /*
1467 1469           * Nothing to do if file already at desired length.
1468 1470           */
1469 1471          if (end <= zp->z_size) {
1470 1472                  zfs_range_unlock(rl);
1471 1473                  return (0);
1472 1474          }
1473 1475  top:
1474 1476          tx = dmu_tx_create(zfsvfs->z_os);
1475 1477          dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1476 1478          zfs_sa_upgrade_txholds(tx, zp);
1477 1479          if (end > zp->z_blksz &&
1478 1480              (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
1479 1481                  /*
1480 1482                   * We are growing the file past the current block size.
1481 1483                   */
1482 1484                  if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) {
1483 1485                          ASSERT(!ISP2(zp->z_blksz));
1484 1486                          newblksz = MIN(end, SPA_MAXBLOCKSIZE);
1485 1487                  } else {
1486 1488                          newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz);
1487 1489                  }
1488 1490                  dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
1489 1491          } else {
1490 1492                  newblksz = 0;
1491 1493          }
1492 1494  
1493 1495          error = dmu_tx_assign(tx, TXG_NOWAIT);
1494 1496          if (error) {
1495 1497                  if (error == ERESTART) {
1496 1498                          dmu_tx_wait(tx);
1497 1499                          dmu_tx_abort(tx);
1498 1500                          goto top;
1499 1501                  }
1500 1502                  dmu_tx_abort(tx);
1501 1503                  zfs_range_unlock(rl);
1502 1504                  return (error);
1503 1505          }
1504 1506  
1505 1507          if (newblksz)
1506 1508                  zfs_grow_blocksize(zp, newblksz, tx);
1507 1509  
1508 1510          zp->z_size = end;
1509 1511  
1510 1512          VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
1511 1513              &zp->z_size, sizeof (zp->z_size), tx));
1512 1514  
1513 1515          zfs_range_unlock(rl);
1514 1516  
1515 1517          dmu_tx_commit(tx);
1516 1518  
1517 1519          return (0);
1518 1520  }
1519 1521  
1520 1522  /*
1521 1523   * Free space in a file.
1522 1524   *
1523 1525   *      IN:     zp      - znode of file to free data in.
1524 1526   *              off     - start of section to free.
1525 1527   *              len     - length of section to free.
1526 1528   *
1527 1529   *      RETURN: 0 if success
1528 1530   *              error code if failure
1529 1531   */
1530 1532  static int
1531 1533  zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
1532 1534  {
1533 1535          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1534 1536          rl_t *rl;
1535 1537          int error;
1536 1538  
1537 1539          /*
1538 1540           * Lock the range being freed.
1539 1541           */
1540 1542          rl = zfs_range_lock(zp, off, len, RL_WRITER);
1541 1543  
1542 1544          /*
1543 1545           * Nothing to do if file already at desired length.
1544 1546           */
1545 1547          if (off >= zp->z_size) {
1546 1548                  zfs_range_unlock(rl);
1547 1549                  return (0);
1548 1550          }
1549 1551  
1550 1552          if (off + len > zp->z_size)
1551 1553                  len = zp->z_size - off;
1552 1554  
1553 1555          error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
1554 1556  
1555 1557          zfs_range_unlock(rl);
1556 1558  
1557 1559          return (error);
1558 1560  }
1559 1561  
1560 1562  /*
1561 1563   * Truncate a file
1562 1564   *
1563 1565   *      IN:     zp      - znode of file to free data in.
1564 1566   *              end     - new end-of-file.
1565 1567   *
1566 1568   *      RETURN: 0 if success
1567 1569   *              error code if failure
1568 1570   */
1569 1571  static int
1570 1572  zfs_trunc(znode_t *zp, uint64_t end)
1571 1573  {
1572 1574          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1573 1575          vnode_t *vp = ZTOV(zp);
1574 1576          dmu_tx_t *tx;
1575 1577          rl_t *rl;
1576 1578          int error;
1577 1579          sa_bulk_attr_t bulk[2];
1578 1580          int count = 0;
1579 1581  
1580 1582          /*
1581 1583           * We will change zp_size, lock the whole file.
1582 1584           */
1583 1585          rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
1584 1586  
1585 1587          /*
1586 1588           * Nothing to do if file already at desired length.
1587 1589           */
1588 1590          if (end >= zp->z_size) {
1589 1591                  zfs_range_unlock(rl);
1590 1592                  return (0);
1591 1593          }
1592 1594  
1593 1595          error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,  -1);
1594 1596          if (error) {
1595 1597                  zfs_range_unlock(rl);
1596 1598                  return (error);
1597 1599          }
1598 1600  top:
1599 1601          tx = dmu_tx_create(zfsvfs->z_os);
1600 1602          dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1601 1603          zfs_sa_upgrade_txholds(tx, zp);
1602 1604          error = dmu_tx_assign(tx, TXG_NOWAIT);
1603 1605          if (error) {
1604 1606                  if (error == ERESTART) {
1605 1607                          dmu_tx_wait(tx);
1606 1608                          dmu_tx_abort(tx);
1607 1609                          goto top;
1608 1610                  }
1609 1611                  dmu_tx_abort(tx);
1610 1612                  zfs_range_unlock(rl);
1611 1613                  return (error);
1612 1614          }
1613 1615  
1614 1616          zp->z_size = end;
1615 1617          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
1616 1618              NULL, &zp->z_size, sizeof (zp->z_size));
1617 1619  
1618 1620          if (end == 0) {
1619 1621                  zp->z_pflags &= ~ZFS_SPARSE;
1620 1622                  SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1621 1623                      NULL, &zp->z_pflags, 8);
1622 1624          }
1623 1625          VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
1624 1626  
1625 1627          dmu_tx_commit(tx);
1626 1628  
1627 1629          /*
1628 1630           * Clear any mapped pages in the truncated region.  This has to
1629 1631           * happen outside of the transaction to avoid the possibility of
1630 1632           * a deadlock with someone trying to push a page that we are
1631 1633           * about to invalidate.
1632 1634           */
1633 1635          if (vn_has_cached_data(vp)) {
1634 1636                  page_t *pp;
1635 1637                  uint64_t start = end & PAGEMASK;
1636 1638                  int poff = end & PAGEOFFSET;
1637 1639  
1638 1640                  if (poff != 0 && (pp = page_lookup(vp, start, SE_SHARED))) {
1639 1641                          /*
1640 1642                           * We need to zero a partial page.
1641 1643                           */
1642 1644                          pagezero(pp, poff, PAGESIZE - poff);
1643 1645                          start += PAGESIZE;
1644 1646                          page_unlock(pp);
1645 1647                  }
1646 1648                  error = pvn_vplist_dirty(vp, start, zfs_no_putpage,
1647 1649                      B_INVAL | B_TRUNC, NULL);
1648 1650                  ASSERT(error == 0);
1649 1651          }
1650 1652  
1651 1653          zfs_range_unlock(rl);
1652 1654  
1653 1655          return (0);
1654 1656  }
1655 1657  
1656 1658  /*
1657 1659   * Free space in a file
1658 1660   *
1659 1661   *      IN:     zp      - znode of file to free data in.
1660 1662   *              off     - start of range
1661 1663   *              len     - end of range (0 => EOF)
1662 1664   *              flag    - current file open mode flags.
1663 1665   *              log     - TRUE if this action should be logged
1664 1666   *
1665 1667   *      RETURN: 0 if success
1666 1668   *              error code if failure
1667 1669   */
1668 1670  int
1669 1671  zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
1670 1672  {
1671 1673          vnode_t *vp = ZTOV(zp);
1672 1674          dmu_tx_t *tx;
1673 1675          zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1674 1676          zilog_t *zilog = zfsvfs->z_log;
1675 1677          uint64_t mode;
1676 1678          uint64_t mtime[2], ctime[2];
1677 1679          sa_bulk_attr_t bulk[3];
1678 1680          int count = 0;
1679 1681          int error;
1680 1682  
1681 1683          if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
1682 1684              sizeof (mode))) != 0)
1683 1685                  return (error);
1684 1686  
1685 1687          if (off > zp->z_size) {
1686 1688                  error =  zfs_extend(zp, off+len);
1687 1689                  if (error == 0 && log)
1688 1690                          goto log;
1689 1691                  else
1690 1692                          return (error);
1691 1693          }
1692 1694  
1693 1695          /*
1694 1696           * Check for any locks in the region to be freed.
1695 1697           */
1696 1698  
1697 1699          if (MANDLOCK(vp, (mode_t)mode)) {
1698 1700                  uint64_t length = (len ? len : zp->z_size - off);
1699 1701                  if (error = chklock(vp, FWRITE, off, length, flag, NULL))
1700 1702                          return (error);
1701 1703          }
1702 1704  
1703 1705          if (len == 0) {
1704 1706                  error = zfs_trunc(zp, off);
1705 1707          } else {
1706 1708                  if ((error = zfs_free_range(zp, off, len)) == 0 &&
1707 1709                      off + len > zp->z_size)
1708 1710                          error = zfs_extend(zp, off+len);
1709 1711          }
1710 1712          if (error || !log)
1711 1713                  return (error);
1712 1714  log:
1713 1715          tx = dmu_tx_create(zfsvfs->z_os);
1714 1716          dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1715 1717          zfs_sa_upgrade_txholds(tx, zp);
1716 1718          error = dmu_tx_assign(tx, TXG_NOWAIT);
1717 1719          if (error) {
1718 1720                  if (error == ERESTART) {
1719 1721                          dmu_tx_wait(tx);
1720 1722                          dmu_tx_abort(tx);
1721 1723                          goto log;
1722 1724                  }
1723 1725                  dmu_tx_abort(tx);
1724 1726                  return (error);
1725 1727          }
1726 1728  
1727 1729          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
1728 1730          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
1729 1731          SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
1730 1732              NULL, &zp->z_pflags, 8);
1731 1733          zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
1732 1734          error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1733 1735          ASSERT(error == 0);
1734 1736  
1735 1737          zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
1736 1738  
1737 1739          dmu_tx_commit(tx);
1738 1740          return (0);
1739 1741  }
1740 1742  
1741 1743  void
1742 1744  zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
1743 1745  {
1744 1746          zfsvfs_t        zfsvfs;
1745 1747          uint64_t        moid, obj, sa_obj, version;
1746 1748          uint64_t        sense = ZFS_CASE_SENSITIVE;
1747 1749          uint64_t        norm = 0;
1748 1750          nvpair_t        *elem;
1749 1751          int             error;
1750 1752          int             i;
1751 1753          znode_t         *rootzp = NULL;
1752 1754          vnode_t         *vp;
1753 1755          vattr_t         vattr;
1754 1756          znode_t         *zp;
1755 1757          zfs_acl_ids_t   acl_ids;
1756 1758  
1757 1759          /*
1758 1760           * First attempt to create master node.
1759 1761           */
1760 1762          /*
1761 1763           * In an empty objset, there are no blocks to read and thus
1762 1764           * there can be no i/o errors (which we assert below).
1763 1765           */
1764 1766          moid = MASTER_NODE_OBJ;
1765 1767          error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
1766 1768              DMU_OT_NONE, 0, tx);
1767 1769          ASSERT(error == 0);
1768 1770  
1769 1771          /*
1770 1772           * Set starting attributes.
1771 1773           */
1772 1774          version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
1773 1775          elem = NULL;
1774 1776          while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
1775 1777                  /* For the moment we expect all zpl props to be uint64_ts */
1776 1778                  uint64_t val;
1777 1779                  char *name;
1778 1780  
1779 1781                  ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
1780 1782                  VERIFY(nvpair_value_uint64(elem, &val) == 0);
1781 1783                  name = nvpair_name(elem);
1782 1784                  if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
1783 1785                          if (val < version)
1784 1786                                  version = val;
1785 1787                  } else {
1786 1788                          error = zap_update(os, moid, name, 8, 1, &val, tx);
1787 1789                  }
1788 1790                  ASSERT(error == 0);
1789 1791                  if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
1790 1792                          norm = val;
1791 1793                  else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
1792 1794                          sense = val;
1793 1795          }
1794 1796          ASSERT(version != 0);
1795 1797          error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
1796 1798  
1797 1799          /*
1798 1800           * Create zap object used for SA attribute registration
1799 1801           */
1800 1802  
1801 1803          if (version >= ZPL_VERSION_SA) {
1802 1804                  sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1803 1805                      DMU_OT_NONE, 0, tx);
1804 1806                  error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
1805 1807                  ASSERT(error == 0);
1806 1808          } else {
1807 1809                  sa_obj = 0;
1808 1810          }
1809 1811          /*
1810 1812           * Create a delete queue.
1811 1813           */
1812 1814          obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
1813 1815  
1814 1816          error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
1815 1817          ASSERT(error == 0);
1816 1818  
1817 1819          /*
1818 1820           * Create root znode.  Create minimal znode/vnode/zfsvfs
1819 1821           * to allow zfs_mknode to work.
1820 1822           */
1821 1823          vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
1822 1824          vattr.va_type = VDIR;
1823 1825          vattr.va_mode = S_IFDIR|0755;
1824 1826          vattr.va_uid = crgetuid(cr);
1825 1827          vattr.va_gid = crgetgid(cr);
1826 1828  
1827 1829          rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
1828 1830          ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs));
1829 1831          rootzp->z_moved = 0;
1830 1832          rootzp->z_unlinked = 0;
1831 1833          rootzp->z_atime_dirty = 0;
1832 1834          rootzp->z_is_sa = USE_SA(version, os);
1833 1835  
1834 1836          vp = ZTOV(rootzp);
1835 1837          vn_reinit(vp);
1836 1838          vp->v_type = VDIR;
1837 1839  
1838 1840          bzero(&zfsvfs, sizeof (zfsvfs_t));
1839 1841  
1840 1842          zfsvfs.z_os = os;
1841 1843          zfsvfs.z_parent = &zfsvfs;
1842 1844          zfsvfs.z_version = version;
1843 1845          zfsvfs.z_use_fuids = USE_FUIDS(version, os);
1844 1846          zfsvfs.z_use_sa = USE_SA(version, os);
1845 1847          zfsvfs.z_norm = norm;
1846 1848  
1847 1849          error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
1848 1850              &zfsvfs.z_attr_table);
1849 1851  
1850 1852          ASSERT(error == 0);
1851 1853  
1852 1854          /*
1853 1855           * Fold case on file systems that are always or sometimes case
1854 1856           * insensitive.
1855 1857           */
1856 1858          if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
1857 1859                  zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
1858 1860  
1859 1861          mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
1860 1862          list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
1861 1863              offsetof(znode_t, z_link_node));
1862 1864  
1863 1865          for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1864 1866                  mutex_init(&zfsvfs.z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
1865 1867  
1866 1868          rootzp->z_zfsvfs = &zfsvfs;
1867 1869          VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
1868 1870              cr, NULL, &acl_ids));
1869 1871          zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
1870 1872          ASSERT3P(zp, ==, rootzp);
1871 1873          ASSERT(!vn_in_dnlc(ZTOV(rootzp))); /* not valid to move */
1872 1874          error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
1873 1875          ASSERT(error == 0);
1874 1876          zfs_acl_ids_free(&acl_ids);
1875 1877          POINTER_INVALIDATE(&rootzp->z_zfsvfs);
1876 1878  
1877 1879          ZTOV(rootzp)->v_count = 0;
1878 1880          sa_handle_destroy(rootzp->z_sa_hdl);
1879 1881          kmem_cache_free(znode_cache, rootzp);
1880 1882  
1881 1883          /*
1882 1884           * Create shares directory
1883 1885           */
1884 1886  
1885 1887          error = zfs_create_share_dir(&zfsvfs, tx);
1886 1888  
1887 1889          ASSERT(error == 0);
1888 1890  
1889 1891          for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
1890 1892                  mutex_destroy(&zfsvfs.z_hold_mtx[i]);
1891 1893  }
1892 1894  
1893 1895  #endif /* _KERNEL */
1894 1896  
1895 1897  static int
1896 1898  zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
1897 1899  {
1898 1900          uint64_t sa_obj = 0;
1899 1901          int error;
1900 1902  
1901 1903          error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
1902 1904          if (error != 0 && error != ENOENT)
1903 1905                  return (error);
1904 1906  
1905 1907          error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
1906 1908          return (error);
1907 1909  }
1908 1910  
1909 1911  static int
1910 1912  zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
1911 1913      dmu_buf_t **db, void *tag)
1912 1914  {
1913 1915          dmu_object_info_t doi;
1914 1916          int error;
1915 1917  
1916 1918          if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
1917 1919                  return (error);
1918 1920  
1919 1921          dmu_object_info_from_db(*db, &doi);
1920 1922          if ((doi.doi_bonus_type != DMU_OT_SA &&
1921 1923              doi.doi_bonus_type != DMU_OT_ZNODE) ||
1922 1924              doi.doi_bonus_type == DMU_OT_ZNODE &&
1923 1925              doi.doi_bonus_size < sizeof (znode_phys_t)) {
1924 1926                  sa_buf_rele(*db, tag);
1925 1927                  return (ENOTSUP);
1926 1928          }
1927 1929  
1928 1930          error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
1929 1931          if (error != 0) {
1930 1932                  sa_buf_rele(*db, tag);
1931 1933                  return (error);
1932 1934          }
1933 1935  
1934 1936          return (0);
1935 1937  }
1936 1938  
1937 1939  void
1938 1940  zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
1939 1941  {
1940 1942          sa_handle_destroy(hdl);
1941 1943          sa_buf_rele(db, tag);
1942 1944  }
1943 1945  
1944 1946  /*
1945 1947   * Given an object number, return its parent object number and whether
1946 1948   * or not the object is an extended attribute directory.
1947 1949   */
1948 1950  static int
1949 1951  zfs_obj_to_pobj(sa_handle_t *hdl, sa_attr_type_t *sa_table, uint64_t *pobjp,
1950 1952      int *is_xattrdir)
1951 1953  {
1952 1954          uint64_t parent;
1953 1955          uint64_t pflags;
1954 1956          uint64_t mode;
1955 1957          sa_bulk_attr_t bulk[3];
1956 1958          int count = 0;
1957 1959          int error;
1958 1960  
1959 1961          SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
1960 1962              &parent, sizeof (parent));
1961 1963          SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
1962 1964              &pflags, sizeof (pflags));
1963 1965          SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1964 1966              &mode, sizeof (mode));
1965 1967  
1966 1968          if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
1967 1969                  return (error);
1968 1970  
1969 1971          *pobjp = parent;
1970 1972          *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
1971 1973  
1972 1974          return (0);
1973 1975  }
1974 1976  
1975 1977  /*
1976 1978   * Given an object number, return some zpl level statistics
1977 1979   */
1978 1980  static int
1979 1981  zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
1980 1982      zfs_stat_t *sb)
1981 1983  {
1982 1984          sa_bulk_attr_t bulk[4];
1983 1985          int count = 0;
1984 1986  
1985 1987          SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
1986 1988              &sb->zs_mode, sizeof (sb->zs_mode));
1987 1989          SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
1988 1990              &sb->zs_gen, sizeof (sb->zs_gen));
1989 1991          SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
1990 1992              &sb->zs_links, sizeof (sb->zs_links));
1991 1993          SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
1992 1994              &sb->zs_ctime, sizeof (sb->zs_ctime));
1993 1995  
1994 1996          return (sa_bulk_lookup(hdl, bulk, count));
1995 1997  }
1996 1998  
1997 1999  static int
1998 2000  zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
1999 2001      sa_attr_type_t *sa_table, char *buf, int len)
2000 2002  {
2001 2003          sa_handle_t *sa_hdl;
2002 2004          sa_handle_t *prevhdl = NULL;
2003 2005          dmu_buf_t *prevdb = NULL;
2004 2006          dmu_buf_t *sa_db = NULL;
2005 2007          char *path = buf + len - 1;
2006 2008          int error;
2007 2009  
2008 2010          *path = '\0';
2009 2011          sa_hdl = hdl;
2010 2012  
2011 2013          for (;;) {
2012 2014                  uint64_t pobj;
2013 2015                  char component[MAXNAMELEN + 2];
2014 2016                  size_t complen;
2015 2017                  int is_xattrdir;
2016 2018  
2017 2019                  if (prevdb)
2018 2020                          zfs_release_sa_handle(prevhdl, prevdb, FTAG);
2019 2021  
2020 2022                  if ((error = zfs_obj_to_pobj(sa_hdl, sa_table, &pobj,
2021 2023                      &is_xattrdir)) != 0)
2022 2024                          break;
2023 2025  
2024 2026                  if (pobj == obj) {
2025 2027                          if (path[0] != '/')
2026 2028                                  *--path = '/';
2027 2029                          break;
2028 2030                  }
2029 2031  
2030 2032                  component[0] = '/';
2031 2033                  if (is_xattrdir) {
2032 2034                          (void) sprintf(component + 1, "<xattrdir>");
2033 2035                  } else {
2034 2036                          error = zap_value_search(osp, pobj, obj,
2035 2037                              ZFS_DIRENT_OBJ(-1ULL), component + 1);
2036 2038                          if (error != 0)
2037 2039                                  break;
2038 2040                  }
2039 2041  
2040 2042                  complen = strlen(component);
2041 2043                  path -= complen;
2042 2044                  ASSERT(path >= buf);
2043 2045                  bcopy(component, path, complen);
2044 2046                  obj = pobj;
2045 2047  
2046 2048                  if (sa_hdl != hdl) {
2047 2049                          prevhdl = sa_hdl;
2048 2050                          prevdb = sa_db;
2049 2051                  }
2050 2052                  error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
2051 2053                  if (error != 0) {
2052 2054                          sa_hdl = prevhdl;
2053 2055                          sa_db = prevdb;
2054 2056                          break;
2055 2057                  }
2056 2058          }
2057 2059  
2058 2060          if (sa_hdl != NULL && sa_hdl != hdl) {
2059 2061                  ASSERT(sa_db != NULL);
2060 2062                  zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
2061 2063          }
2062 2064  
2063 2065          if (error == 0)
2064 2066                  (void) memmove(buf, path, buf + len - path);
2065 2067  
2066 2068          return (error);
2067 2069  }
2068 2070  
2069 2071  int
2070 2072  zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
2071 2073  {
2072 2074          sa_attr_type_t *sa_table;
2073 2075          sa_handle_t *hdl;
2074 2076          dmu_buf_t *db;
2075 2077          int error;
2076 2078  
2077 2079          error = zfs_sa_setup(osp, &sa_table);
2078 2080          if (error != 0)
2079 2081                  return (error);
2080 2082  
2081 2083          error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2082 2084          if (error != 0)
2083 2085                  return (error);
2084 2086  
2085 2087          error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2086 2088  
2087 2089          zfs_release_sa_handle(hdl, db, FTAG);
2088 2090          return (error);
2089 2091  }
2090 2092  
2091 2093  int
2092 2094  zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
2093 2095      char *buf, int len)
2094 2096  {
2095 2097          char *path = buf + len - 1;
2096 2098          sa_attr_type_t *sa_table;
2097 2099          sa_handle_t *hdl;
2098 2100          dmu_buf_t *db;
2099 2101          int error;
2100 2102  
2101 2103          *path = '\0';
2102 2104  
2103 2105          error = zfs_sa_setup(osp, &sa_table);
2104 2106          if (error != 0)
2105 2107                  return (error);
2106 2108  
2107 2109          error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
2108 2110          if (error != 0)
2109 2111                  return (error);
2110 2112  
2111 2113          error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
2112 2114          if (error != 0) {
2113 2115                  zfs_release_sa_handle(hdl, db, FTAG);
2114 2116                  return (error);
2115 2117          }
2116 2118  
2117 2119          error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
2118 2120  
2119 2121          zfs_release_sa_handle(hdl, db, FTAG);
2120 2122          return (error);
2121 2123  }
  
    | 
      ↓ open down ↓ | 
    688 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX