Print this page
    
Use the LZ4 algorithm to compress metadata when the corresponding feature is enabled
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/zfs/dmu.c
          +++ new/usr/src/uts/common/fs/zfs/dmu.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  
    | 
      ↓ open down ↓ | 
    16 lines elided | 
    
      ↑ open up ↑ | 
  
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
  24   24   */
  25   25  /* Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */
  26   26  /* Copyright (c) 2013, Joyent, Inc. All rights reserved. */
       27 +/* Copyright (c) 2014, Nexenta Systems, Inc. All rights reserved. */
  27   28  
  28   29  #include <sys/dmu.h>
  29   30  #include <sys/dmu_impl.h>
  30   31  #include <sys/dmu_tx.h>
  31   32  #include <sys/dbuf.h>
  32   33  #include <sys/dnode.h>
  33   34  #include <sys/zfs_context.h>
  34   35  #include <sys/dmu_objset.h>
  35   36  #include <sys/dmu_traverse.h>
  36   37  #include <sys/dsl_dataset.h>
  37   38  #include <sys/dsl_dir.h>
  38   39  #include <sys/dsl_pool.h>
  39   40  #include <sys/dsl_synctask.h>
  40   41  #include <sys/dsl_prop.h>
  41   42  #include <sys/dmu_zfetch.h>
  42   43  #include <sys/zfs_ioctl.h>
  43   44  #include <sys/zap.h>
  44   45  #include <sys/zio_checksum.h>
  45   46  #include <sys/zio_compress.h>
  46   47  #include <sys/sa.h>
       48 +#include <sys/zfeature.h>
  47   49  #ifdef _KERNEL
  48   50  #include <sys/vmsystm.h>
  49   51  #include <sys/zfs_znode.h>
  50   52  #endif
  51   53  
  52   54  /*
  53   55   * Enable/disable nopwrite feature.
  54   56   */
  55   57  int zfs_nopwrite_enabled = 1;
  56   58  
  57   59  const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
  58   60          {       DMU_BSWAP_UINT8,        TRUE,   "unallocated"           },
  59   61          {       DMU_BSWAP_ZAP,          TRUE,   "object directory"      },
  60   62          {       DMU_BSWAP_UINT64,       TRUE,   "object array"          },
  61   63          {       DMU_BSWAP_UINT8,        TRUE,   "packed nvlist"         },
  62   64          {       DMU_BSWAP_UINT64,       TRUE,   "packed nvlist size"    },
  63   65          {       DMU_BSWAP_UINT64,       TRUE,   "bpobj"                 },
  64   66          {       DMU_BSWAP_UINT64,       TRUE,   "bpobj header"          },
  65   67          {       DMU_BSWAP_UINT64,       TRUE,   "SPA space map header"  },
  66   68          {       DMU_BSWAP_UINT64,       TRUE,   "SPA space map"         },
  67   69          {       DMU_BSWAP_UINT64,       TRUE,   "ZIL intent log"        },
  68   70          {       DMU_BSWAP_DNODE,        TRUE,   "DMU dnode"             },
  69   71          {       DMU_BSWAP_OBJSET,       TRUE,   "DMU objset"            },
  70   72          {       DMU_BSWAP_UINT64,       TRUE,   "DSL directory"         },
  71   73          {       DMU_BSWAP_ZAP,          TRUE,   "DSL directory child map"},
  72   74          {       DMU_BSWAP_ZAP,          TRUE,   "DSL dataset snap map"  },
  73   75          {       DMU_BSWAP_ZAP,          TRUE,   "DSL props"             },
  74   76          {       DMU_BSWAP_UINT64,       TRUE,   "DSL dataset"           },
  75   77          {       DMU_BSWAP_ZNODE,        TRUE,   "ZFS znode"             },
  76   78          {       DMU_BSWAP_OLDACL,       TRUE,   "ZFS V0 ACL"            },
  77   79          {       DMU_BSWAP_UINT8,        FALSE,  "ZFS plain file"        },
  78   80          {       DMU_BSWAP_ZAP,          TRUE,   "ZFS directory"         },
  79   81          {       DMU_BSWAP_ZAP,          TRUE,   "ZFS master node"       },
  80   82          {       DMU_BSWAP_ZAP,          TRUE,   "ZFS delete queue"      },
  81   83          {       DMU_BSWAP_UINT8,        FALSE,  "zvol object"           },
  82   84          {       DMU_BSWAP_ZAP,          TRUE,   "zvol prop"             },
  83   85          {       DMU_BSWAP_UINT8,        FALSE,  "other uint8[]"         },
  84   86          {       DMU_BSWAP_UINT64,       FALSE,  "other uint64[]"        },
  85   87          {       DMU_BSWAP_ZAP,          TRUE,   "other ZAP"             },
  86   88          {       DMU_BSWAP_ZAP,          TRUE,   "persistent error log"  },
  87   89          {       DMU_BSWAP_UINT8,        TRUE,   "SPA history"           },
  88   90          {       DMU_BSWAP_UINT64,       TRUE,   "SPA history offsets"   },
  89   91          {       DMU_BSWAP_ZAP,          TRUE,   "Pool properties"       },
  90   92          {       DMU_BSWAP_ZAP,          TRUE,   "DSL permissions"       },
  91   93          {       DMU_BSWAP_ACL,          TRUE,   "ZFS ACL"               },
  92   94          {       DMU_BSWAP_UINT8,        TRUE,   "ZFS SYSACL"            },
  93   95          {       DMU_BSWAP_UINT8,        TRUE,   "FUID table"            },
  94   96          {       DMU_BSWAP_UINT64,       TRUE,   "FUID table size"       },
  95   97          {       DMU_BSWAP_ZAP,          TRUE,   "DSL dataset next clones"},
  96   98          {       DMU_BSWAP_ZAP,          TRUE,   "scan work queue"       },
  97   99          {       DMU_BSWAP_ZAP,          TRUE,   "ZFS user/group used"   },
  98  100          {       DMU_BSWAP_ZAP,          TRUE,   "ZFS user/group quota"  },
  99  101          {       DMU_BSWAP_ZAP,          TRUE,   "snapshot refcount tags"},
 100  102          {       DMU_BSWAP_ZAP,          TRUE,   "DDT ZAP algorithm"     },
 101  103          {       DMU_BSWAP_ZAP,          TRUE,   "DDT statistics"        },
 102  104          {       DMU_BSWAP_UINT8,        TRUE,   "System attributes"     },
 103  105          {       DMU_BSWAP_ZAP,          TRUE,   "SA master node"        },
 104  106          {       DMU_BSWAP_ZAP,          TRUE,   "SA attr registration"  },
 105  107          {       DMU_BSWAP_ZAP,          TRUE,   "SA attr layouts"       },
 106  108          {       DMU_BSWAP_ZAP,          TRUE,   "scan translations"     },
 107  109          {       DMU_BSWAP_UINT8,        FALSE,  "deduplicated block"    },
 108  110          {       DMU_BSWAP_ZAP,          TRUE,   "DSL deadlist map"      },
 109  111          {       DMU_BSWAP_UINT64,       TRUE,   "DSL deadlist map hdr"  },
 110  112          {       DMU_BSWAP_ZAP,          TRUE,   "DSL dir clones"        },
 111  113          {       DMU_BSWAP_UINT64,       TRUE,   "bpobj subobj"          }
 112  114  };
 113  115  
 114  116  const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
 115  117          {       byteswap_uint8_array,   "uint8"         },
 116  118          {       byteswap_uint16_array,  "uint16"        },
 117  119          {       byteswap_uint32_array,  "uint32"        },
 118  120          {       byteswap_uint64_array,  "uint64"        },
 119  121          {       zap_byteswap,           "zap"           },
 120  122          {       dnode_buf_byteswap,     "dnode"         },
 121  123          {       dmu_objset_byteswap,    "objset"        },
 122  124          {       zfs_znode_byteswap,     "znode"         },
 123  125          {       zfs_oldacl_byteswap,    "oldacl"        },
 124  126          {       zfs_acl_byteswap,       "acl"           }
 125  127  };
 126  128  
 127  129  int
 128  130  dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
 129  131      void *tag, dmu_buf_t **dbp, int flags)
 130  132  {
 131  133          dnode_t *dn;
 132  134          uint64_t blkid;
 133  135          dmu_buf_impl_t *db;
 134  136          int err;
 135  137          int db_flags = DB_RF_CANFAIL;
 136  138  
 137  139          if (flags & DMU_READ_NO_PREFETCH)
 138  140                  db_flags |= DB_RF_NOPREFETCH;
 139  141  
 140  142          err = dnode_hold(os, object, FTAG, &dn);
 141  143          if (err)
 142  144                  return (err);
 143  145          blkid = dbuf_whichblock(dn, offset);
 144  146          rw_enter(&dn->dn_struct_rwlock, RW_READER);
 145  147          db = dbuf_hold(dn, blkid, tag);
 146  148          rw_exit(&dn->dn_struct_rwlock);
 147  149          if (db == NULL) {
 148  150                  err = SET_ERROR(EIO);
 149  151          } else {
 150  152                  err = dbuf_read(db, NULL, db_flags);
 151  153                  if (err) {
 152  154                          dbuf_rele(db, tag);
 153  155                          db = NULL;
 154  156                  }
 155  157          }
 156  158  
 157  159          dnode_rele(dn, FTAG);
 158  160          *dbp = &db->db; /* NULL db plus first field offset is NULL */
 159  161          return (err);
 160  162  }
 161  163  
 162  164  int
 163  165  dmu_bonus_max(void)
 164  166  {
 165  167          return (DN_MAX_BONUSLEN);
 166  168  }
 167  169  
 168  170  int
 169  171  dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
 170  172  {
 171  173          dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 172  174          dnode_t *dn;
 173  175          int error;
 174  176  
 175  177          DB_DNODE_ENTER(db);
 176  178          dn = DB_DNODE(db);
 177  179  
 178  180          if (dn->dn_bonus != db) {
 179  181                  error = SET_ERROR(EINVAL);
 180  182          } else if (newsize < 0 || newsize > db_fake->db_size) {
 181  183                  error = SET_ERROR(EINVAL);
 182  184          } else {
 183  185                  dnode_setbonuslen(dn, newsize, tx);
 184  186                  error = 0;
 185  187          }
 186  188  
 187  189          DB_DNODE_EXIT(db);
 188  190          return (error);
 189  191  }
 190  192  
 191  193  int
 192  194  dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
 193  195  {
 194  196          dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 195  197          dnode_t *dn;
 196  198          int error;
 197  199  
 198  200          DB_DNODE_ENTER(db);
 199  201          dn = DB_DNODE(db);
 200  202  
 201  203          if (!DMU_OT_IS_VALID(type)) {
 202  204                  error = SET_ERROR(EINVAL);
 203  205          } else if (dn->dn_bonus != db) {
 204  206                  error = SET_ERROR(EINVAL);
 205  207          } else {
 206  208                  dnode_setbonus_type(dn, type, tx);
 207  209                  error = 0;
 208  210          }
 209  211  
 210  212          DB_DNODE_EXIT(db);
 211  213          return (error);
 212  214  }
 213  215  
 214  216  dmu_object_type_t
 215  217  dmu_get_bonustype(dmu_buf_t *db_fake)
 216  218  {
 217  219          dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 218  220          dnode_t *dn;
 219  221          dmu_object_type_t type;
 220  222  
 221  223          DB_DNODE_ENTER(db);
 222  224          dn = DB_DNODE(db);
 223  225          type = dn->dn_bonustype;
 224  226          DB_DNODE_EXIT(db);
 225  227  
 226  228          return (type);
 227  229  }
 228  230  
 229  231  int
 230  232  dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
 231  233  {
 232  234          dnode_t *dn;
 233  235          int error;
 234  236  
 235  237          error = dnode_hold(os, object, FTAG, &dn);
 236  238          dbuf_rm_spill(dn, tx);
 237  239          rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 238  240          dnode_rm_spill(dn, tx);
 239  241          rw_exit(&dn->dn_struct_rwlock);
 240  242          dnode_rele(dn, FTAG);
 241  243          return (error);
 242  244  }
 243  245  
 244  246  /*
 245  247   * returns ENOENT, EIO, or 0.
 246  248   */
 247  249  int
 248  250  dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
 249  251  {
 250  252          dnode_t *dn;
 251  253          dmu_buf_impl_t *db;
 252  254          int error;
 253  255  
 254  256          error = dnode_hold(os, object, FTAG, &dn);
 255  257          if (error)
 256  258                  return (error);
 257  259  
 258  260          rw_enter(&dn->dn_struct_rwlock, RW_READER);
 259  261          if (dn->dn_bonus == NULL) {
 260  262                  rw_exit(&dn->dn_struct_rwlock);
 261  263                  rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 262  264                  if (dn->dn_bonus == NULL)
 263  265                          dbuf_create_bonus(dn);
 264  266          }
 265  267          db = dn->dn_bonus;
 266  268  
 267  269          /* as long as the bonus buf is held, the dnode will be held */
 268  270          if (refcount_add(&db->db_holds, tag) == 1) {
 269  271                  VERIFY(dnode_add_ref(dn, db));
 270  272                  (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
 271  273          }
 272  274  
 273  275          /*
 274  276           * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
 275  277           * hold and incrementing the dbuf count to ensure that dnode_move() sees
 276  278           * a dnode hold for every dbuf.
 277  279           */
 278  280          rw_exit(&dn->dn_struct_rwlock);
 279  281  
 280  282          dnode_rele(dn, FTAG);
 281  283  
 282  284          VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));
 283  285  
 284  286          *dbp = &db->db;
 285  287          return (0);
 286  288  }
 287  289  
 288  290  /*
 289  291   * returns ENOENT, EIO, or 0.
 290  292   *
 291  293   * This interface will allocate a blank spill dbuf when a spill blk
 292  294   * doesn't already exist on the dnode.
 293  295   *
 294  296   * if you only want to find an already existing spill db, then
 295  297   * dmu_spill_hold_existing() should be used.
 296  298   */
 297  299  int
 298  300  dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
 299  301  {
 300  302          dmu_buf_impl_t *db = NULL;
 301  303          int err;
 302  304  
 303  305          if ((flags & DB_RF_HAVESTRUCT) == 0)
 304  306                  rw_enter(&dn->dn_struct_rwlock, RW_READER);
 305  307  
 306  308          db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
 307  309  
 308  310          if ((flags & DB_RF_HAVESTRUCT) == 0)
 309  311                  rw_exit(&dn->dn_struct_rwlock);
 310  312  
 311  313          ASSERT(db != NULL);
 312  314          err = dbuf_read(db, NULL, flags);
 313  315          if (err == 0)
 314  316                  *dbp = &db->db;
 315  317          else
 316  318                  dbuf_rele(db, tag);
 317  319          return (err);
 318  320  }
 319  321  
 320  322  int
 321  323  dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
 322  324  {
 323  325          dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
 324  326          dnode_t *dn;
 325  327          int err;
 326  328  
 327  329          DB_DNODE_ENTER(db);
 328  330          dn = DB_DNODE(db);
 329  331  
 330  332          if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
 331  333                  err = SET_ERROR(EINVAL);
 332  334          } else {
 333  335                  rw_enter(&dn->dn_struct_rwlock, RW_READER);
 334  336  
 335  337                  if (!dn->dn_have_spill) {
 336  338                          err = SET_ERROR(ENOENT);
 337  339                  } else {
 338  340                          err = dmu_spill_hold_by_dnode(dn,
 339  341                              DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
 340  342                  }
 341  343  
 342  344                  rw_exit(&dn->dn_struct_rwlock);
 343  345          }
 344  346  
 345  347          DB_DNODE_EXIT(db);
 346  348          return (err);
 347  349  }
 348  350  
 349  351  int
 350  352  dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
 351  353  {
 352  354          dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
 353  355          dnode_t *dn;
 354  356          int err;
 355  357  
 356  358          DB_DNODE_ENTER(db);
 357  359          dn = DB_DNODE(db);
 358  360          err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
 359  361          DB_DNODE_EXIT(db);
 360  362  
 361  363          return (err);
 362  364  }
 363  365  
 364  366  /*
 365  367   * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
 366  368   * to take a held dnode rather than <os, object> -- the lookup is wasteful,
 367  369   * and can induce severe lock contention when writing to several files
 368  370   * whose dnodes are in the same block.
 369  371   */
 370  372  static int
 371  373  dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
 372  374      int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
 373  375  {
 374  376          dmu_buf_t **dbp;
 375  377          uint64_t blkid, nblks, i;
 376  378          uint32_t dbuf_flags;
 377  379          int err;
 378  380          zio_t *zio;
 379  381  
 380  382          ASSERT(length <= DMU_MAX_ACCESS);
 381  383  
 382  384          dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
 383  385          if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
 384  386                  dbuf_flags |= DB_RF_NOPREFETCH;
 385  387  
 386  388          rw_enter(&dn->dn_struct_rwlock, RW_READER);
 387  389          if (dn->dn_datablkshift) {
 388  390                  int blkshift = dn->dn_datablkshift;
 389  391                  nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
 390  392                      P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
 391  393          } else {
 392  394                  if (offset + length > dn->dn_datablksz) {
 393  395                          zfs_panic_recover("zfs: accessing past end of object "
 394  396                              "%llx/%llx (size=%u access=%llu+%llu)",
 395  397                              (longlong_t)dn->dn_objset->
 396  398                              os_dsl_dataset->ds_object,
 397  399                              (longlong_t)dn->dn_object, dn->dn_datablksz,
 398  400                              (longlong_t)offset, (longlong_t)length);
 399  401                          rw_exit(&dn->dn_struct_rwlock);
 400  402                          return (SET_ERROR(EIO));
 401  403                  }
 402  404                  nblks = 1;
 403  405          }
 404  406          dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
 405  407  
 406  408          zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
 407  409          blkid = dbuf_whichblock(dn, offset);
 408  410          for (i = 0; i < nblks; i++) {
 409  411                  dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag);
 410  412                  if (db == NULL) {
 411  413                          rw_exit(&dn->dn_struct_rwlock);
 412  414                          dmu_buf_rele_array(dbp, nblks, tag);
 413  415                          zio_nowait(zio);
 414  416                          return (SET_ERROR(EIO));
 415  417                  }
 416  418                  /* initiate async i/o */
 417  419                  if (read) {
 418  420                          (void) dbuf_read(db, zio, dbuf_flags);
 419  421                  }
 420  422                  dbp[i] = &db->db;
 421  423          }
 422  424          rw_exit(&dn->dn_struct_rwlock);
 423  425  
 424  426          /* wait for async i/o */
 425  427          err = zio_wait(zio);
 426  428          if (err) {
 427  429                  dmu_buf_rele_array(dbp, nblks, tag);
 428  430                  return (err);
 429  431          }
 430  432  
 431  433          /* wait for other io to complete */
 432  434          if (read) {
 433  435                  for (i = 0; i < nblks; i++) {
 434  436                          dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
 435  437                          mutex_enter(&db->db_mtx);
 436  438                          while (db->db_state == DB_READ ||
 437  439                              db->db_state == DB_FILL)
 438  440                                  cv_wait(&db->db_changed, &db->db_mtx);
 439  441                          if (db->db_state == DB_UNCACHED)
 440  442                                  err = SET_ERROR(EIO);
 441  443                          mutex_exit(&db->db_mtx);
 442  444                          if (err) {
 443  445                                  dmu_buf_rele_array(dbp, nblks, tag);
 444  446                                  return (err);
 445  447                          }
 446  448                  }
 447  449          }
 448  450  
 449  451          *numbufsp = nblks;
 450  452          *dbpp = dbp;
 451  453          return (0);
 452  454  }
 453  455  
 454  456  static int
 455  457  dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
 456  458      uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
 457  459  {
 458  460          dnode_t *dn;
 459  461          int err;
 460  462  
 461  463          err = dnode_hold(os, object, FTAG, &dn);
 462  464          if (err)
 463  465                  return (err);
 464  466  
 465  467          err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
 466  468              numbufsp, dbpp, DMU_READ_PREFETCH);
 467  469  
 468  470          dnode_rele(dn, FTAG);
 469  471  
 470  472          return (err);
 471  473  }
 472  474  
 473  475  int
 474  476  dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
 475  477      uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
 476  478  {
 477  479          dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 478  480          dnode_t *dn;
 479  481          int err;
 480  482  
 481  483          DB_DNODE_ENTER(db);
 482  484          dn = DB_DNODE(db);
 483  485          err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
 484  486              numbufsp, dbpp, DMU_READ_PREFETCH);
 485  487          DB_DNODE_EXIT(db);
 486  488  
 487  489          return (err);
 488  490  }
 489  491  
 490  492  void
 491  493  dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
 492  494  {
 493  495          int i;
 494  496          dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
 495  497  
 496  498          if (numbufs == 0)
 497  499                  return;
 498  500  
 499  501          for (i = 0; i < numbufs; i++) {
 500  502                  if (dbp[i])
 501  503                          dbuf_rele(dbp[i], tag);
 502  504          }
 503  505  
 504  506          kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
 505  507  }
 506  508  
 507  509  /*
 508  510   * Issue prefetch i/os for the given blocks.
 509  511   *
 510  512   * Note: The assumption is that we *know* these blocks will be needed
 511  513   * almost immediately.  Therefore, the prefetch i/os will be issued at
 512  514   * ZIO_PRIORITY_SYNC_READ
 513  515   *
 514  516   * Note: indirect blocks and other metadata will be read synchronously,
 515  517   * causing this function to block if they are not already cached.
 516  518   */
 517  519  void
 518  520  dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len)
 519  521  {
 520  522          dnode_t *dn;
 521  523          uint64_t blkid;
 522  524          int nblks, err;
 523  525  
 524  526          if (zfs_prefetch_disable)
 525  527                  return;
 526  528  
 527  529          if (len == 0) {  /* they're interested in the bonus buffer */
 528  530                  dn = DMU_META_DNODE(os);
 529  531  
 530  532                  if (object == 0 || object >= DN_MAX_OBJECT)
 531  533                          return;
 532  534  
 533  535                  rw_enter(&dn->dn_struct_rwlock, RW_READER);
 534  536                  blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t));
 535  537                  dbuf_prefetch(dn, blkid, ZIO_PRIORITY_SYNC_READ);
 536  538                  rw_exit(&dn->dn_struct_rwlock);
 537  539                  return;
 538  540          }
 539  541  
 540  542          /*
 541  543           * XXX - Note, if the dnode for the requested object is not
 542  544           * already cached, we will do a *synchronous* read in the
 543  545           * dnode_hold() call.  The same is true for any indirects.
 544  546           */
 545  547          err = dnode_hold(os, object, FTAG, &dn);
 546  548          if (err != 0)
 547  549                  return;
 548  550  
 549  551          rw_enter(&dn->dn_struct_rwlock, RW_READER);
 550  552          if (dn->dn_datablkshift) {
 551  553                  int blkshift = dn->dn_datablkshift;
 552  554                  nblks = (P2ROUNDUP(offset + len, 1 << blkshift) -
 553  555                      P2ALIGN(offset, 1 << blkshift)) >> blkshift;
 554  556          } else {
 555  557                  nblks = (offset < dn->dn_datablksz);
 556  558          }
 557  559  
 558  560          if (nblks != 0) {
 559  561                  blkid = dbuf_whichblock(dn, offset);
 560  562                  for (int i = 0; i < nblks; i++)
 561  563                          dbuf_prefetch(dn, blkid + i, ZIO_PRIORITY_SYNC_READ);
 562  564          }
 563  565  
 564  566          rw_exit(&dn->dn_struct_rwlock);
 565  567  
 566  568          dnode_rele(dn, FTAG);
 567  569  }
 568  570  
 569  571  /*
 570  572   * Get the next "chunk" of file data to free.  We traverse the file from
 571  573   * the end so that the file gets shorter over time (if we crashes in the
 572  574   * middle, this will leave us in a better state).  We find allocated file
 573  575   * data by simply searching the allocated level 1 indirects.
 574  576   *
 575  577   * On input, *start should be the first offset that does not need to be
 576  578   * freed (e.g. "offset + length").  On return, *start will be the first
 577  579   * offset that should be freed.
 578  580   */
 579  581  static int
 580  582  get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
 581  583  {
 582  584          uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
 583  585          /* bytes of data covered by a level-1 indirect block */
 584  586          uint64_t iblkrange =
 585  587              dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
 586  588  
 587  589          ASSERT3U(minimum, <=, *start);
 588  590  
 589  591          if (*start - minimum <= iblkrange * maxblks) {
 590  592                  *start = minimum;
 591  593                  return (0);
 592  594          }
 593  595          ASSERT(ISP2(iblkrange));
 594  596  
 595  597          for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
 596  598                  int err;
 597  599  
 598  600                  /*
 599  601                   * dnode_next_offset(BACKWARDS) will find an allocated L1
 600  602                   * indirect block at or before the input offset.  We must
 601  603                   * decrement *start so that it is at the end of the region
 602  604                   * to search.
 603  605                   */
 604  606                  (*start)--;
 605  607                  err = dnode_next_offset(dn,
 606  608                      DNODE_FIND_BACKWARDS, start, 2, 1, 0);
 607  609  
 608  610                  /* if there are no indirect blocks before start, we are done */
 609  611                  if (err == ESRCH) {
 610  612                          *start = minimum;
 611  613                          break;
 612  614                  } else if (err != 0) {
 613  615                          return (err);
 614  616                  }
 615  617  
 616  618                  /* set start to the beginning of this L1 indirect */
 617  619                  *start = P2ALIGN(*start, iblkrange);
 618  620          }
 619  621          if (*start < minimum)
 620  622                  *start = minimum;
 621  623          return (0);
 622  624  }
 623  625  
 624  626  static int
 625  627  dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
 626  628      uint64_t length)
 627  629  {
 628  630          uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
 629  631          int err;
 630  632  
 631  633          if (offset >= object_size)
 632  634                  return (0);
 633  635  
 634  636          if (length == DMU_OBJECT_END || offset + length > object_size)
 635  637                  length = object_size - offset;
 636  638  
 637  639          while (length != 0) {
 638  640                  uint64_t chunk_end, chunk_begin;
 639  641  
 640  642                  chunk_end = chunk_begin = offset + length;
 641  643  
 642  644                  /* move chunk_begin backwards to the beginning of this chunk */
 643  645                  err = get_next_chunk(dn, &chunk_begin, offset);
 644  646                  if (err)
 645  647                          return (err);
 646  648                  ASSERT3U(chunk_begin, >=, offset);
 647  649                  ASSERT3U(chunk_begin, <=, chunk_end);
 648  650  
 649  651                  dmu_tx_t *tx = dmu_tx_create(os);
 650  652                  dmu_tx_hold_free(tx, dn->dn_object,
 651  653                      chunk_begin, chunk_end - chunk_begin);
 652  654                  err = dmu_tx_assign(tx, TXG_WAIT);
 653  655                  if (err) {
 654  656                          dmu_tx_abort(tx);
 655  657                          return (err);
 656  658                  }
 657  659                  dnode_free_range(dn, chunk_begin, chunk_end - chunk_begin, tx);
 658  660                  dmu_tx_commit(tx);
 659  661  
 660  662                  length -= chunk_end - chunk_begin;
 661  663          }
 662  664          return (0);
 663  665  }
 664  666  
 665  667  int
 666  668  dmu_free_long_range(objset_t *os, uint64_t object,
 667  669      uint64_t offset, uint64_t length)
 668  670  {
 669  671          dnode_t *dn;
 670  672          int err;
 671  673  
 672  674          err = dnode_hold(os, object, FTAG, &dn);
 673  675          if (err != 0)
 674  676                  return (err);
 675  677          err = dmu_free_long_range_impl(os, dn, offset, length);
 676  678  
 677  679          /*
 678  680           * It is important to zero out the maxblkid when freeing the entire
 679  681           * file, so that (a) subsequent calls to dmu_free_long_range_impl()
 680  682           * will take the fast path, and (b) dnode_reallocate() can verify
 681  683           * that the entire file has been freed.
 682  684           */
 683  685          if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
 684  686                  dn->dn_maxblkid = 0;
 685  687  
 686  688          dnode_rele(dn, FTAG);
 687  689          return (err);
 688  690  }
 689  691  
 690  692  int
 691  693  dmu_free_long_object(objset_t *os, uint64_t object)
 692  694  {
 693  695          dmu_tx_t *tx;
 694  696          int err;
 695  697  
 696  698          err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
 697  699          if (err != 0)
 698  700                  return (err);
 699  701  
 700  702          tx = dmu_tx_create(os);
 701  703          dmu_tx_hold_bonus(tx, object);
 702  704          dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
 703  705          err = dmu_tx_assign(tx, TXG_WAIT);
 704  706          if (err == 0) {
 705  707                  err = dmu_object_free(os, object, tx);
 706  708                  dmu_tx_commit(tx);
 707  709          } else {
 708  710                  dmu_tx_abort(tx);
 709  711          }
 710  712  
 711  713          return (err);
 712  714  }
 713  715  
 714  716  int
 715  717  dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
 716  718      uint64_t size, dmu_tx_t *tx)
 717  719  {
 718  720          dnode_t *dn;
 719  721          int err = dnode_hold(os, object, FTAG, &dn);
 720  722          if (err)
 721  723                  return (err);
 722  724          ASSERT(offset < UINT64_MAX);
 723  725          ASSERT(size == -1ULL || size <= UINT64_MAX - offset);
 724  726          dnode_free_range(dn, offset, size, tx);
 725  727          dnode_rele(dn, FTAG);
 726  728          return (0);
 727  729  }
 728  730  
 729  731  int
 730  732  dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
 731  733      void *buf, uint32_t flags)
 732  734  {
 733  735          dnode_t *dn;
 734  736          dmu_buf_t **dbp;
 735  737          int numbufs, err;
 736  738  
 737  739          err = dnode_hold(os, object, FTAG, &dn);
 738  740          if (err)
 739  741                  return (err);
 740  742  
 741  743          /*
 742  744           * Deal with odd block sizes, where there can't be data past the first
 743  745           * block.  If we ever do the tail block optimization, we will need to
 744  746           * handle that here as well.
 745  747           */
 746  748          if (dn->dn_maxblkid == 0) {
 747  749                  int newsz = offset > dn->dn_datablksz ? 0 :
 748  750                      MIN(size, dn->dn_datablksz - offset);
 749  751                  bzero((char *)buf + newsz, size - newsz);
 750  752                  size = newsz;
 751  753          }
 752  754  
 753  755          while (size > 0) {
 754  756                  uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
 755  757                  int i;
 756  758  
 757  759                  /*
 758  760                   * NB: we could do this block-at-a-time, but it's nice
 759  761                   * to be reading in parallel.
 760  762                   */
 761  763                  err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
 762  764                      TRUE, FTAG, &numbufs, &dbp, flags);
 763  765                  if (err)
 764  766                          break;
 765  767  
 766  768                  for (i = 0; i < numbufs; i++) {
 767  769                          int tocpy;
 768  770                          int bufoff;
 769  771                          dmu_buf_t *db = dbp[i];
 770  772  
 771  773                          ASSERT(size > 0);
 772  774  
 773  775                          bufoff = offset - db->db_offset;
 774  776                          tocpy = (int)MIN(db->db_size - bufoff, size);
 775  777  
 776  778                          bcopy((char *)db->db_data + bufoff, buf, tocpy);
 777  779  
 778  780                          offset += tocpy;
 779  781                          size -= tocpy;
 780  782                          buf = (char *)buf + tocpy;
 781  783                  }
 782  784                  dmu_buf_rele_array(dbp, numbufs, FTAG);
 783  785          }
 784  786          dnode_rele(dn, FTAG);
 785  787          return (err);
 786  788  }
 787  789  
 788  790  void
 789  791  dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
 790  792      const void *buf, dmu_tx_t *tx)
 791  793  {
 792  794          dmu_buf_t **dbp;
 793  795          int numbufs, i;
 794  796  
 795  797          if (size == 0)
 796  798                  return;
 797  799  
 798  800          VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
 799  801              FALSE, FTAG, &numbufs, &dbp));
 800  802  
 801  803          for (i = 0; i < numbufs; i++) {
 802  804                  int tocpy;
 803  805                  int bufoff;
 804  806                  dmu_buf_t *db = dbp[i];
 805  807  
 806  808                  ASSERT(size > 0);
 807  809  
 808  810                  bufoff = offset - db->db_offset;
 809  811                  tocpy = (int)MIN(db->db_size - bufoff, size);
 810  812  
 811  813                  ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
 812  814  
 813  815                  if (tocpy == db->db_size)
 814  816                          dmu_buf_will_fill(db, tx);
 815  817                  else
 816  818                          dmu_buf_will_dirty(db, tx);
 817  819  
 818  820                  bcopy(buf, (char *)db->db_data + bufoff, tocpy);
 819  821  
 820  822                  if (tocpy == db->db_size)
 821  823                          dmu_buf_fill_done(db, tx);
 822  824  
 823  825                  offset += tocpy;
 824  826                  size -= tocpy;
 825  827                  buf = (char *)buf + tocpy;
 826  828          }
 827  829          dmu_buf_rele_array(dbp, numbufs, FTAG);
 828  830  }
 829  831  
 830  832  void
 831  833  dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
 832  834      dmu_tx_t *tx)
 833  835  {
 834  836          dmu_buf_t **dbp;
 835  837          int numbufs, i;
 836  838  
 837  839          if (size == 0)
 838  840                  return;
 839  841  
 840  842          VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
 841  843              FALSE, FTAG, &numbufs, &dbp));
 842  844  
 843  845          for (i = 0; i < numbufs; i++) {
 844  846                  dmu_buf_t *db = dbp[i];
 845  847  
 846  848                  dmu_buf_will_not_fill(db, tx);
 847  849          }
 848  850          dmu_buf_rele_array(dbp, numbufs, FTAG);
 849  851  }
 850  852  
 851  853  /*
 852  854   * DMU support for xuio
 853  855   */
 854  856  kstat_t *xuio_ksp = NULL;
 855  857  
 856  858  int
 857  859  dmu_xuio_init(xuio_t *xuio, int nblk)
 858  860  {
 859  861          dmu_xuio_t *priv;
 860  862          uio_t *uio = &xuio->xu_uio;
 861  863  
 862  864          uio->uio_iovcnt = nblk;
 863  865          uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
 864  866  
 865  867          priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
 866  868          priv->cnt = nblk;
 867  869          priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
 868  870          priv->iovp = uio->uio_iov;
 869  871          XUIO_XUZC_PRIV(xuio) = priv;
 870  872  
 871  873          if (XUIO_XUZC_RW(xuio) == UIO_READ)
 872  874                  XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk);
 873  875          else
 874  876                  XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk);
 875  877  
 876  878          return (0);
 877  879  }
 878  880  
 879  881  void
 880  882  dmu_xuio_fini(xuio_t *xuio)
 881  883  {
 882  884          dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 883  885          int nblk = priv->cnt;
 884  886  
 885  887          kmem_free(priv->iovp, nblk * sizeof (iovec_t));
 886  888          kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *));
 887  889          kmem_free(priv, sizeof (dmu_xuio_t));
 888  890  
 889  891          if (XUIO_XUZC_RW(xuio) == UIO_READ)
 890  892                  XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk);
 891  893          else
 892  894                  XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk);
 893  895  }
 894  896  
 895  897  /*
 896  898   * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf }
 897  899   * and increase priv->next by 1.
 898  900   */
 899  901  int
 900  902  dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n)
 901  903  {
 902  904          struct iovec *iov;
 903  905          uio_t *uio = &xuio->xu_uio;
 904  906          dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 905  907          int i = priv->next++;
 906  908  
 907  909          ASSERT(i < priv->cnt);
 908  910          ASSERT(off + n <= arc_buf_size(abuf));
 909  911          iov = uio->uio_iov + i;
 910  912          iov->iov_base = (char *)abuf->b_data + off;
 911  913          iov->iov_len = n;
 912  914          priv->bufs[i] = abuf;
 913  915          return (0);
 914  916  }
 915  917  
 916  918  int
 917  919  dmu_xuio_cnt(xuio_t *xuio)
 918  920  {
 919  921          dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 920  922          return (priv->cnt);
 921  923  }
 922  924  
 923  925  arc_buf_t *
 924  926  dmu_xuio_arcbuf(xuio_t *xuio, int i)
 925  927  {
 926  928          dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 927  929  
 928  930          ASSERT(i < priv->cnt);
 929  931          return (priv->bufs[i]);
 930  932  }
 931  933  
 932  934  void
 933  935  dmu_xuio_clear(xuio_t *xuio, int i)
 934  936  {
 935  937          dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 936  938  
 937  939          ASSERT(i < priv->cnt);
 938  940          priv->bufs[i] = NULL;
 939  941  }
 940  942  
 941  943  static void
 942  944  xuio_stat_init(void)
 943  945  {
 944  946          xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc",
 945  947              KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t),
 946  948              KSTAT_FLAG_VIRTUAL);
 947  949          if (xuio_ksp != NULL) {
 948  950                  xuio_ksp->ks_data = &xuio_stats;
 949  951                  kstat_install(xuio_ksp);
 950  952          }
 951  953  }
 952  954  
 953  955  static void
 954  956  xuio_stat_fini(void)
 955  957  {
 956  958          if (xuio_ksp != NULL) {
 957  959                  kstat_delete(xuio_ksp);
 958  960                  xuio_ksp = NULL;
 959  961          }
 960  962  }
 961  963  
 962  964  void
 963  965  xuio_stat_wbuf_copied()
 964  966  {
 965  967          XUIOSTAT_BUMP(xuiostat_wbuf_copied);
 966  968  }
 967  969  
 968  970  void
 969  971  xuio_stat_wbuf_nocopy()
 970  972  {
 971  973          XUIOSTAT_BUMP(xuiostat_wbuf_nocopy);
 972  974  }
 973  975  
 974  976  #ifdef _KERNEL
 975  977  int
 976  978  dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
 977  979  {
 978  980          dmu_buf_t **dbp;
 979  981          int numbufs, i, err;
 980  982          xuio_t *xuio = NULL;
 981  983  
 982  984          /*
 983  985           * NB: we could do this block-at-a-time, but it's nice
 984  986           * to be reading in parallel.
 985  987           */
 986  988          err = dmu_buf_hold_array(os, object, uio->uio_loffset, size, TRUE, FTAG,
 987  989              &numbufs, &dbp);
 988  990          if (err)
 989  991                  return (err);
 990  992  
 991  993          if (uio->uio_extflg == UIO_XUIO)
 992  994                  xuio = (xuio_t *)uio;
 993  995  
 994  996          for (i = 0; i < numbufs; i++) {
 995  997                  int tocpy;
 996  998                  int bufoff;
 997  999                  dmu_buf_t *db = dbp[i];
 998 1000  
 999 1001                  ASSERT(size > 0);
1000 1002  
1001 1003                  bufoff = uio->uio_loffset - db->db_offset;
1002 1004                  tocpy = (int)MIN(db->db_size - bufoff, size);
1003 1005  
1004 1006                  if (xuio) {
1005 1007                          dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
1006 1008                          arc_buf_t *dbuf_abuf = dbi->db_buf;
1007 1009                          arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
1008 1010                          err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
1009 1011                          if (!err) {
1010 1012                                  uio->uio_resid -= tocpy;
1011 1013                                  uio->uio_loffset += tocpy;
1012 1014                          }
1013 1015  
1014 1016                          if (abuf == dbuf_abuf)
1015 1017                                  XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
1016 1018                          else
1017 1019                                  XUIOSTAT_BUMP(xuiostat_rbuf_copied);
1018 1020                  } else {
1019 1021                          err = uiomove((char *)db->db_data + bufoff, tocpy,
1020 1022                              UIO_READ, uio);
1021 1023                  }
1022 1024                  if (err)
1023 1025                          break;
1024 1026  
1025 1027                  size -= tocpy;
1026 1028          }
1027 1029          dmu_buf_rele_array(dbp, numbufs, FTAG);
1028 1030  
1029 1031          return (err);
1030 1032  }
1031 1033  
1032 1034  static int
1033 1035  dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
1034 1036  {
1035 1037          dmu_buf_t **dbp;
1036 1038          int numbufs;
1037 1039          int err = 0;
1038 1040          int i;
1039 1041  
1040 1042          err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1041 1043              FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1042 1044          if (err)
1043 1045                  return (err);
1044 1046  
1045 1047          for (i = 0; i < numbufs; i++) {
1046 1048                  int tocpy;
1047 1049                  int bufoff;
1048 1050                  dmu_buf_t *db = dbp[i];
1049 1051  
1050 1052                  ASSERT(size > 0);
1051 1053  
1052 1054                  bufoff = uio->uio_loffset - db->db_offset;
1053 1055                  tocpy = (int)MIN(db->db_size - bufoff, size);
1054 1056  
1055 1057                  ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1056 1058  
1057 1059                  if (tocpy == db->db_size)
1058 1060                          dmu_buf_will_fill(db, tx);
1059 1061                  else
1060 1062                          dmu_buf_will_dirty(db, tx);
1061 1063  
1062 1064                  /*
1063 1065                   * XXX uiomove could block forever (eg. nfs-backed
1064 1066                   * pages).  There needs to be a uiolockdown() function
1065 1067                   * to lock the pages in memory, so that uiomove won't
1066 1068                   * block.
1067 1069                   */
1068 1070                  err = uiomove((char *)db->db_data + bufoff, tocpy,
1069 1071                      UIO_WRITE, uio);
1070 1072  
1071 1073                  if (tocpy == db->db_size)
1072 1074                          dmu_buf_fill_done(db, tx);
1073 1075  
1074 1076                  if (err)
1075 1077                          break;
1076 1078  
1077 1079                  size -= tocpy;
1078 1080          }
1079 1081  
1080 1082          dmu_buf_rele_array(dbp, numbufs, FTAG);
1081 1083          return (err);
1082 1084  }
1083 1085  
1084 1086  int
1085 1087  dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
1086 1088      dmu_tx_t *tx)
1087 1089  {
1088 1090          dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1089 1091          dnode_t *dn;
1090 1092          int err;
1091 1093  
1092 1094          if (size == 0)
1093 1095                  return (0);
1094 1096  
1095 1097          DB_DNODE_ENTER(db);
1096 1098          dn = DB_DNODE(db);
1097 1099          err = dmu_write_uio_dnode(dn, uio, size, tx);
1098 1100          DB_DNODE_EXIT(db);
1099 1101  
1100 1102          return (err);
1101 1103  }
1102 1104  
1103 1105  int
1104 1106  dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
1105 1107      dmu_tx_t *tx)
1106 1108  {
1107 1109          dnode_t *dn;
1108 1110          int err;
1109 1111  
1110 1112          if (size == 0)
1111 1113                  return (0);
1112 1114  
1113 1115          err = dnode_hold(os, object, FTAG, &dn);
1114 1116          if (err)
1115 1117                  return (err);
1116 1118  
1117 1119          err = dmu_write_uio_dnode(dn, uio, size, tx);
1118 1120  
1119 1121          dnode_rele(dn, FTAG);
1120 1122  
1121 1123          return (err);
1122 1124  }
1123 1125  
1124 1126  int
1125 1127  dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1126 1128      page_t *pp, dmu_tx_t *tx)
1127 1129  {
1128 1130          dmu_buf_t **dbp;
1129 1131          int numbufs, i;
1130 1132          int err;
1131 1133  
1132 1134          if (size == 0)
1133 1135                  return (0);
1134 1136  
1135 1137          err = dmu_buf_hold_array(os, object, offset, size,
1136 1138              FALSE, FTAG, &numbufs, &dbp);
1137 1139          if (err)
1138 1140                  return (err);
1139 1141  
1140 1142          for (i = 0; i < numbufs; i++) {
1141 1143                  int tocpy, copied, thiscpy;
1142 1144                  int bufoff;
1143 1145                  dmu_buf_t *db = dbp[i];
1144 1146                  caddr_t va;
1145 1147  
1146 1148                  ASSERT(size > 0);
1147 1149                  ASSERT3U(db->db_size, >=, PAGESIZE);
1148 1150  
1149 1151                  bufoff = offset - db->db_offset;
1150 1152                  tocpy = (int)MIN(db->db_size - bufoff, size);
1151 1153  
1152 1154                  ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1153 1155  
1154 1156                  if (tocpy == db->db_size)
1155 1157                          dmu_buf_will_fill(db, tx);
1156 1158                  else
1157 1159                          dmu_buf_will_dirty(db, tx);
1158 1160  
1159 1161                  for (copied = 0; copied < tocpy; copied += PAGESIZE) {
1160 1162                          ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff);
1161 1163                          thiscpy = MIN(PAGESIZE, tocpy - copied);
1162 1164                          va = zfs_map_page(pp, S_READ);
1163 1165                          bcopy(va, (char *)db->db_data + bufoff, thiscpy);
1164 1166                          zfs_unmap_page(pp, va);
1165 1167                          pp = pp->p_next;
1166 1168                          bufoff += PAGESIZE;
1167 1169                  }
1168 1170  
1169 1171                  if (tocpy == db->db_size)
1170 1172                          dmu_buf_fill_done(db, tx);
1171 1173  
1172 1174                  offset += tocpy;
1173 1175                  size -= tocpy;
1174 1176          }
1175 1177          dmu_buf_rele_array(dbp, numbufs, FTAG);
1176 1178          return (err);
1177 1179  }
1178 1180  #endif
1179 1181  
1180 1182  /*
1181 1183   * Allocate a loaned anonymous arc buffer.
1182 1184   */
1183 1185  arc_buf_t *
1184 1186  dmu_request_arcbuf(dmu_buf_t *handle, int size)
1185 1187  {
1186 1188          dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1187 1189  
1188 1190          return (arc_loan_buf(db->db_objset->os_spa, size));
1189 1191  }
1190 1192  
1191 1193  /*
1192 1194   * Free a loaned arc buffer.
1193 1195   */
1194 1196  void
1195 1197  dmu_return_arcbuf(arc_buf_t *buf)
1196 1198  {
1197 1199          arc_return_buf(buf, FTAG);
1198 1200          VERIFY(arc_buf_remove_ref(buf, FTAG));
1199 1201  }
1200 1202  
1201 1203  /*
1202 1204   * When possible directly assign passed loaned arc buffer to a dbuf.
1203 1205   * If this is not possible copy the contents of passed arc buf via
1204 1206   * dmu_write().
1205 1207   */
1206 1208  void
1207 1209  dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1208 1210      dmu_tx_t *tx)
1209 1211  {
1210 1212          dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
1211 1213          dnode_t *dn;
1212 1214          dmu_buf_impl_t *db;
1213 1215          uint32_t blksz = (uint32_t)arc_buf_size(buf);
1214 1216          uint64_t blkid;
1215 1217  
1216 1218          DB_DNODE_ENTER(dbuf);
1217 1219          dn = DB_DNODE(dbuf);
1218 1220          rw_enter(&dn->dn_struct_rwlock, RW_READER);
1219 1221          blkid = dbuf_whichblock(dn, offset);
1220 1222          VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
1221 1223          rw_exit(&dn->dn_struct_rwlock);
1222 1224          DB_DNODE_EXIT(dbuf);
1223 1225  
1224 1226          if (offset == db->db.db_offset && blksz == db->db.db_size) {
1225 1227                  dbuf_assign_arcbuf(db, buf, tx);
1226 1228                  dbuf_rele(db, FTAG);
1227 1229          } else {
1228 1230                  objset_t *os;
1229 1231                  uint64_t object;
1230 1232  
1231 1233                  DB_DNODE_ENTER(dbuf);
1232 1234                  dn = DB_DNODE(dbuf);
1233 1235                  os = dn->dn_objset;
1234 1236                  object = dn->dn_object;
1235 1237                  DB_DNODE_EXIT(dbuf);
1236 1238  
1237 1239                  dbuf_rele(db, FTAG);
1238 1240                  dmu_write(os, object, offset, blksz, buf->b_data, tx);
1239 1241                  dmu_return_arcbuf(buf);
1240 1242                  XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1241 1243          }
1242 1244  }
1243 1245  
1244 1246  typedef struct {
1245 1247          dbuf_dirty_record_t     *dsa_dr;
1246 1248          dmu_sync_cb_t           *dsa_done;
1247 1249          zgd_t                   *dsa_zgd;
1248 1250          dmu_tx_t                *dsa_tx;
1249 1251  } dmu_sync_arg_t;
1250 1252  
1251 1253  /* ARGSUSED */
1252 1254  static void
1253 1255  dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1254 1256  {
1255 1257          dmu_sync_arg_t *dsa = varg;
1256 1258          dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1257 1259          blkptr_t *bp = zio->io_bp;
1258 1260  
1259 1261          if (zio->io_error == 0) {
1260 1262                  if (BP_IS_HOLE(bp)) {
1261 1263                          /*
1262 1264                           * A block of zeros may compress to a hole, but the
1263 1265                           * block size still needs to be known for replay.
1264 1266                           */
1265 1267                          BP_SET_LSIZE(bp, db->db_size);
1266 1268                  } else {
1267 1269                          ASSERT(BP_GET_LEVEL(bp) == 0);
1268 1270                          bp->blk_fill = 1;
1269 1271                  }
1270 1272          }
1271 1273  }
1272 1274  
1273 1275  static void
1274 1276  dmu_sync_late_arrival_ready(zio_t *zio)
1275 1277  {
1276 1278          dmu_sync_ready(zio, NULL, zio->io_private);
1277 1279  }
1278 1280  
1279 1281  /* ARGSUSED */
1280 1282  static void
1281 1283  dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1282 1284  {
1283 1285          dmu_sync_arg_t *dsa = varg;
1284 1286          dbuf_dirty_record_t *dr = dsa->dsa_dr;
1285 1287          dmu_buf_impl_t *db = dr->dr_dbuf;
1286 1288  
1287 1289          mutex_enter(&db->db_mtx);
1288 1290          ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1289 1291          if (zio->io_error == 0) {
1290 1292                  dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1291 1293                  if (dr->dt.dl.dr_nopwrite) {
1292 1294                          blkptr_t *bp = zio->io_bp;
1293 1295                          blkptr_t *bp_orig = &zio->io_bp_orig;
1294 1296                          uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
1295 1297  
1296 1298                          ASSERT(BP_EQUAL(bp, bp_orig));
1297 1299                          ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1298 1300                          ASSERT(zio_checksum_table[chksum].ci_dedup);
1299 1301                  }
1300 1302                  dr->dt.dl.dr_overridden_by = *zio->io_bp;
1301 1303                  dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1302 1304                  dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1303 1305                  if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by))
1304 1306                          BP_ZERO(&dr->dt.dl.dr_overridden_by);
1305 1307          } else {
1306 1308                  dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1307 1309          }
1308 1310          cv_broadcast(&db->db_changed);
1309 1311          mutex_exit(&db->db_mtx);
1310 1312  
1311 1313          dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1312 1314  
1313 1315          kmem_free(dsa, sizeof (*dsa));
1314 1316  }
1315 1317  
1316 1318  static void
1317 1319  dmu_sync_late_arrival_done(zio_t *zio)
1318 1320  {
1319 1321          blkptr_t *bp = zio->io_bp;
1320 1322          dmu_sync_arg_t *dsa = zio->io_private;
1321 1323          blkptr_t *bp_orig = &zio->io_bp_orig;
1322 1324  
1323 1325          if (zio->io_error == 0 && !BP_IS_HOLE(bp)) {
1324 1326                  /*
1325 1327                   * If we didn't allocate a new block (i.e. ZIO_FLAG_NOPWRITE)
1326 1328                   * then there is nothing to do here. Otherwise, free the
1327 1329                   * newly allocated block in this txg.
1328 1330                   */
1329 1331                  if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
1330 1332                          ASSERT(BP_EQUAL(bp, bp_orig));
1331 1333                  } else {
1332 1334                          ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1333 1335                          ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1334 1336                          ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1335 1337                          zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1336 1338                  }
1337 1339          }
1338 1340  
1339 1341          dmu_tx_commit(dsa->dsa_tx);
1340 1342  
1341 1343          dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1342 1344  
1343 1345          kmem_free(dsa, sizeof (*dsa));
1344 1346  }
1345 1347  
1346 1348  static int
1347 1349  dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1348 1350      zio_prop_t *zp, zbookmark_t *zb)
1349 1351  {
1350 1352          dmu_sync_arg_t *dsa;
1351 1353          dmu_tx_t *tx;
1352 1354  
1353 1355          tx = dmu_tx_create(os);
1354 1356          dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1355 1357          if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1356 1358                  dmu_tx_abort(tx);
1357 1359                  /* Make zl_get_data do txg_waited_synced() */
1358 1360                  return (SET_ERROR(EIO));
1359 1361          }
1360 1362  
1361 1363          dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1362 1364          dsa->dsa_dr = NULL;
1363 1365          dsa->dsa_done = done;
1364 1366          dsa->dsa_zgd = zgd;
1365 1367          dsa->dsa_tx = tx;
1366 1368  
1367 1369          zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1368 1370              zgd->zgd_db->db_data, zgd->zgd_db->db_size, zp,
1369 1371              dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done, dsa,
1370 1372              ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1371 1373  
1372 1374          return (0);
1373 1375  }
1374 1376  
1375 1377  /*
1376 1378   * Intent log support: sync the block associated with db to disk.
1377 1379   * N.B. and XXX: the caller is responsible for making sure that the
1378 1380   * data isn't changing while dmu_sync() is writing it.
1379 1381   *
1380 1382   * Return values:
1381 1383   *
1382 1384   *      EEXIST: this txg has already been synced, so there's nothing to do.
1383 1385   *              The caller should not log the write.
1384 1386   *
1385 1387   *      ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1386 1388   *              The caller should not log the write.
1387 1389   *
1388 1390   *      EALREADY: this block is already in the process of being synced.
1389 1391   *              The caller should track its progress (somehow).
1390 1392   *
1391 1393   *      EIO: could not do the I/O.
1392 1394   *              The caller should do a txg_wait_synced().
1393 1395   *
1394 1396   *      0: the I/O has been initiated.
1395 1397   *              The caller should log this blkptr in the done callback.
1396 1398   *              It is possible that the I/O will fail, in which case
1397 1399   *              the error will be reported to the done callback and
1398 1400   *              propagated to pio from zio_done().
1399 1401   */
1400 1402  int
1401 1403  dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1402 1404  {
1403 1405          blkptr_t *bp = zgd->zgd_bp;
1404 1406          dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1405 1407          objset_t *os = db->db_objset;
1406 1408          dsl_dataset_t *ds = os->os_dsl_dataset;
1407 1409          dbuf_dirty_record_t *dr;
1408 1410          dmu_sync_arg_t *dsa;
1409 1411          zbookmark_t zb;
1410 1412          zio_prop_t zp;
1411 1413          dnode_t *dn;
1412 1414  
1413 1415          ASSERT(pio != NULL);
1414 1416          ASSERT(txg != 0);
1415 1417  
1416 1418          SET_BOOKMARK(&zb, ds->ds_object,
1417 1419              db->db.db_object, db->db_level, db->db_blkid);
1418 1420  
1419 1421          DB_DNODE_ENTER(db);
1420 1422          dn = DB_DNODE(db);
1421 1423          dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1422 1424          DB_DNODE_EXIT(db);
1423 1425  
1424 1426          /*
1425 1427           * If we're frozen (running ziltest), we always need to generate a bp.
1426 1428           */
1427 1429          if (txg > spa_freeze_txg(os->os_spa))
1428 1430                  return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1429 1431  
1430 1432          /*
1431 1433           * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1432 1434           * and us.  If we determine that this txg is not yet syncing,
1433 1435           * but it begins to sync a moment later, that's OK because the
1434 1436           * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1435 1437           */
1436 1438          mutex_enter(&db->db_mtx);
1437 1439  
1438 1440          if (txg <= spa_last_synced_txg(os->os_spa)) {
1439 1441                  /*
1440 1442                   * This txg has already synced.  There's nothing to do.
1441 1443                   */
1442 1444                  mutex_exit(&db->db_mtx);
1443 1445                  return (SET_ERROR(EEXIST));
1444 1446          }
1445 1447  
1446 1448          if (txg <= spa_syncing_txg(os->os_spa)) {
1447 1449                  /*
1448 1450                   * This txg is currently syncing, so we can't mess with
1449 1451                   * the dirty record anymore; just write a new log block.
1450 1452                   */
1451 1453                  mutex_exit(&db->db_mtx);
1452 1454                  return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1453 1455          }
1454 1456  
1455 1457          dr = db->db_last_dirty;
1456 1458          while (dr && dr->dr_txg != txg)
1457 1459                  dr = dr->dr_next;
1458 1460  
1459 1461          if (dr == NULL) {
1460 1462                  /*
1461 1463                   * There's no dr for this dbuf, so it must have been freed.
1462 1464                   * There's no need to log writes to freed blocks, so we're done.
1463 1465                   */
1464 1466                  mutex_exit(&db->db_mtx);
1465 1467                  return (SET_ERROR(ENOENT));
1466 1468          }
1467 1469  
1468 1470          ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
1469 1471  
1470 1472          /*
1471 1473           * Assume the on-disk data is X, the current syncing data is Y,
1472 1474           * and the current in-memory data is Z (currently in dmu_sync).
1473 1475           * X and Z are identical but Y is has been modified. Normally,
1474 1476           * when X and Z are the same we will perform a nopwrite but if Y
1475 1477           * is different we must disable nopwrite since the resulting write
1476 1478           * of Y to disk can free the block containing X. If we allowed a
1477 1479           * nopwrite to occur the block pointing to Z would reference a freed
1478 1480           * block. Since this is a rare case we simplify this by disabling
1479 1481           * nopwrite if the current dmu_sync-ing dbuf has been modified in
1480 1482           * a previous transaction.
1481 1483           */
1482 1484          if (dr->dr_next)
1483 1485                  zp.zp_nopwrite = B_FALSE;
1484 1486  
1485 1487          ASSERT(dr->dr_txg == txg);
1486 1488          if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1487 1489              dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1488 1490                  /*
1489 1491                   * We have already issued a sync write for this buffer,
1490 1492                   * or this buffer has already been synced.  It could not
1491 1493                   * have been dirtied since, or we would have cleared the state.
1492 1494                   */
1493 1495                  mutex_exit(&db->db_mtx);
1494 1496                  return (SET_ERROR(EALREADY));
1495 1497          }
1496 1498  
1497 1499          ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1498 1500          dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1499 1501          mutex_exit(&db->db_mtx);
1500 1502  
1501 1503          dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1502 1504          dsa->dsa_dr = dr;
1503 1505          dsa->dsa_done = done;
1504 1506          dsa->dsa_zgd = zgd;
1505 1507          dsa->dsa_tx = NULL;
1506 1508  
1507 1509          zio_nowait(arc_write(pio, os->os_spa, txg,
1508 1510              bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
1509 1511              DBUF_IS_L2COMPRESSIBLE(db), &zp, dmu_sync_ready,
1510 1512              NULL, dmu_sync_done, dsa, ZIO_PRIORITY_SYNC_WRITE,
1511 1513              ZIO_FLAG_CANFAIL, &zb));
1512 1514  
1513 1515          return (0);
1514 1516  }
1515 1517  
1516 1518  int
1517 1519  dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1518 1520          dmu_tx_t *tx)
1519 1521  {
1520 1522          dnode_t *dn;
1521 1523          int err;
1522 1524  
1523 1525          err = dnode_hold(os, object, FTAG, &dn);
1524 1526          if (err)
1525 1527                  return (err);
1526 1528          err = dnode_set_blksz(dn, size, ibs, tx);
1527 1529          dnode_rele(dn, FTAG);
1528 1530          return (err);
1529 1531  }
1530 1532  
1531 1533  void
1532 1534  dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
1533 1535          dmu_tx_t *tx)
1534 1536  {
1535 1537          dnode_t *dn;
1536 1538  
1537 1539          /* XXX assumes dnode_hold will not get an i/o error */
1538 1540          (void) dnode_hold(os, object, FTAG, &dn);
1539 1541          ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS);
1540 1542          dn->dn_checksum = checksum;
1541 1543          dnode_setdirty(dn, tx);
1542 1544          dnode_rele(dn, FTAG);
1543 1545  }
1544 1546  
1545 1547  void
1546 1548  dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
1547 1549          dmu_tx_t *tx)
1548 1550  {
1549 1551          dnode_t *dn;
1550 1552  
1551 1553          /* XXX assumes dnode_hold will not get an i/o error */
1552 1554          (void) dnode_hold(os, object, FTAG, &dn);
1553 1555          ASSERT(compress < ZIO_COMPRESS_FUNCTIONS);
1554 1556          dn->dn_compress = compress;
1555 1557          dnode_setdirty(dn, tx);
1556 1558          dnode_rele(dn, FTAG);
1557 1559  }
1558 1560  
1559 1561  int zfs_mdcomp_disable = 0;
1560 1562  
1561 1563  /*
1562 1564   * When the "redundant_metadata" property is set to "most", only indirect
1563 1565   * blocks of this level and higher will have an additional ditto block.
1564 1566   */
1565 1567  int zfs_redundant_metadata_most_ditto_level = 2;
1566 1568  
1567 1569  void
1568 1570  dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
1569 1571  {
1570 1572          dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
1571 1573          boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
1572 1574              (wp & WP_SPILL));
1573 1575          enum zio_checksum checksum = os->os_checksum;
1574 1576          enum zio_compress compress = os->os_compress;
1575 1577          enum zio_checksum dedup_checksum = os->os_dedup_checksum;
1576 1578          boolean_t dedup = B_FALSE;
1577 1579          boolean_t nopwrite = B_FALSE;
1578 1580          boolean_t dedup_verify = os->os_dedup_verify;
1579 1581          int copies = os->os_copies;
1580 1582  
1581 1583          /*
1582 1584           * We maintain different write policies for each of the following
  
    | 
      ↓ open down ↓ | 
    1526 lines elided | 
    
      ↑ open up ↑ | 
  
1583 1585           * types of data:
1584 1586           *       1. metadata
1585 1587           *       2. preallocated blocks (i.e. level-0 blocks of a dump device)
1586 1588           *       3. all other level 0 blocks
1587 1589           */
1588 1590          if (ismd) {
1589 1591                  /*
1590 1592                   * XXX -- we should design a compression algorithm
1591 1593                   * that specializes in arrays of bps.
1592 1594                   */
1593      -                compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY :
1594      -                    ZIO_COMPRESS_LZJB;
     1595 +                boolean_t lz4_ac = spa_feature_is_active(os->os_spa,
     1596 +                    SPA_FEATURE_LZ4_COMPRESS);
1595 1597  
     1598 +                if (zfs_mdcomp_disable) {
     1599 +                        compress = ZIO_COMPRESS_EMPTY;
     1600 +                } else if (lz4_ac) {
     1601 +                        compress = ZIO_COMPRESS_LZ4;
     1602 +                } else {
     1603 +                        compress = ZIO_COMPRESS_LZJB;
     1604 +                }
     1605 +
1596 1606                  /*
1597 1607                   * Metadata always gets checksummed.  If the data
1598 1608                   * checksum is multi-bit correctable, and it's not a
1599 1609                   * ZBT-style checksum, then it's suitable for metadata
1600 1610                   * as well.  Otherwise, the metadata checksum defaults
1601 1611                   * to fletcher4.
1602 1612                   */
1603 1613                  if (zio_checksum_table[checksum].ci_correctable < 1 ||
1604 1614                      zio_checksum_table[checksum].ci_eck)
1605 1615                          checksum = ZIO_CHECKSUM_FLETCHER_4;
1606 1616  
1607 1617                  if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
1608 1618                      (os->os_redundant_metadata ==
1609 1619                      ZFS_REDUNDANT_METADATA_MOST &&
1610 1620                      (level >= zfs_redundant_metadata_most_ditto_level ||
1611 1621                      DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
1612 1622                          copies++;
1613 1623          } else if (wp & WP_NOFILL) {
1614 1624                  ASSERT(level == 0);
1615 1625  
1616 1626                  /*
1617 1627                   * If we're writing preallocated blocks, we aren't actually
1618 1628                   * writing them so don't set any policy properties.  These
1619 1629                   * blocks are currently only used by an external subsystem
1620 1630                   * outside of zfs (i.e. dump) and not written by the zio
1621 1631                   * pipeline.
1622 1632                   */
1623 1633                  compress = ZIO_COMPRESS_OFF;
1624 1634                  checksum = ZIO_CHECKSUM_NOPARITY;
1625 1635          } else {
1626 1636                  compress = zio_compress_select(dn->dn_compress, compress);
1627 1637  
1628 1638                  checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
1629 1639                      zio_checksum_select(dn->dn_checksum, checksum) :
1630 1640                      dedup_checksum;
1631 1641  
1632 1642                  /*
1633 1643                   * Determine dedup setting.  If we are in dmu_sync(),
1634 1644                   * we won't actually dedup now because that's all
1635 1645                   * done in syncing context; but we do want to use the
1636 1646                   * dedup checkum.  If the checksum is not strong
1637 1647                   * enough to ensure unique signatures, force
1638 1648                   * dedup_verify.
1639 1649                   */
1640 1650                  if (dedup_checksum != ZIO_CHECKSUM_OFF) {
1641 1651                          dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
1642 1652                          if (!zio_checksum_table[checksum].ci_dedup)
1643 1653                                  dedup_verify = B_TRUE;
1644 1654                  }
1645 1655  
1646 1656                  /*
1647 1657                   * Enable nopwrite if we have a cryptographically secure
1648 1658                   * checksum that has no known collisions (i.e. SHA-256)
1649 1659                   * and compression is enabled.  We don't enable nopwrite if
1650 1660                   * dedup is enabled as the two features are mutually exclusive.
1651 1661                   */
1652 1662                  nopwrite = (!dedup && zio_checksum_table[checksum].ci_dedup &&
1653 1663                      compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
1654 1664          }
1655 1665  
1656 1666          zp->zp_checksum = checksum;
1657 1667          zp->zp_compress = compress;
1658 1668          zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
1659 1669          zp->zp_level = level;
1660 1670          zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
1661 1671          zp->zp_dedup = dedup;
1662 1672          zp->zp_dedup_verify = dedup && dedup_verify;
1663 1673          zp->zp_nopwrite = nopwrite;
1664 1674  }
1665 1675  
1666 1676  int
1667 1677  dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
1668 1678  {
1669 1679          dnode_t *dn;
1670 1680          int i, err;
1671 1681  
1672 1682          err = dnode_hold(os, object, FTAG, &dn);
1673 1683          if (err)
1674 1684                  return (err);
1675 1685          /*
1676 1686           * Sync any current changes before
1677 1687           * we go trundling through the block pointers.
1678 1688           */
1679 1689          for (i = 0; i < TXG_SIZE; i++) {
1680 1690                  if (list_link_active(&dn->dn_dirty_link[i]))
1681 1691                          break;
1682 1692          }
1683 1693          if (i != TXG_SIZE) {
1684 1694                  dnode_rele(dn, FTAG);
1685 1695                  txg_wait_synced(dmu_objset_pool(os), 0);
1686 1696                  err = dnode_hold(os, object, FTAG, &dn);
1687 1697                  if (err)
1688 1698                          return (err);
1689 1699          }
1690 1700  
1691 1701          err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
1692 1702          dnode_rele(dn, FTAG);
1693 1703  
1694 1704          return (err);
1695 1705  }
1696 1706  
1697 1707  void
1698 1708  dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
1699 1709  {
1700 1710          dnode_phys_t *dnp;
1701 1711  
1702 1712          rw_enter(&dn->dn_struct_rwlock, RW_READER);
1703 1713          mutex_enter(&dn->dn_mtx);
1704 1714  
1705 1715          dnp = dn->dn_phys;
1706 1716  
1707 1717          doi->doi_data_block_size = dn->dn_datablksz;
1708 1718          doi->doi_metadata_block_size = dn->dn_indblkshift ?
1709 1719              1ULL << dn->dn_indblkshift : 0;
1710 1720          doi->doi_type = dn->dn_type;
1711 1721          doi->doi_bonus_type = dn->dn_bonustype;
1712 1722          doi->doi_bonus_size = dn->dn_bonuslen;
1713 1723          doi->doi_indirection = dn->dn_nlevels;
1714 1724          doi->doi_checksum = dn->dn_checksum;
1715 1725          doi->doi_compress = dn->dn_compress;
1716 1726          doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
1717 1727          doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
1718 1728          doi->doi_fill_count = 0;
1719 1729          for (int i = 0; i < dnp->dn_nblkptr; i++)
1720 1730                  doi->doi_fill_count += dnp->dn_blkptr[i].blk_fill;
1721 1731  
1722 1732          mutex_exit(&dn->dn_mtx);
1723 1733          rw_exit(&dn->dn_struct_rwlock);
1724 1734  }
1725 1735  
1726 1736  /*
1727 1737   * Get information on a DMU object.
1728 1738   * If doi is NULL, just indicates whether the object exists.
1729 1739   */
1730 1740  int
1731 1741  dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
1732 1742  {
1733 1743          dnode_t *dn;
1734 1744          int err = dnode_hold(os, object, FTAG, &dn);
1735 1745  
1736 1746          if (err)
1737 1747                  return (err);
1738 1748  
1739 1749          if (doi != NULL)
1740 1750                  dmu_object_info_from_dnode(dn, doi);
1741 1751  
1742 1752          dnode_rele(dn, FTAG);
1743 1753          return (0);
1744 1754  }
1745 1755  
1746 1756  /*
1747 1757   * As above, but faster; can be used when you have a held dbuf in hand.
1748 1758   */
1749 1759  void
1750 1760  dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
1751 1761  {
1752 1762          dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1753 1763  
1754 1764          DB_DNODE_ENTER(db);
1755 1765          dmu_object_info_from_dnode(DB_DNODE(db), doi);
1756 1766          DB_DNODE_EXIT(db);
1757 1767  }
1758 1768  
1759 1769  /*
1760 1770   * Faster still when you only care about the size.
1761 1771   * This is specifically optimized for zfs_getattr().
1762 1772   */
1763 1773  void
1764 1774  dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
1765 1775      u_longlong_t *nblk512)
1766 1776  {
1767 1777          dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1768 1778          dnode_t *dn;
1769 1779  
1770 1780          DB_DNODE_ENTER(db);
1771 1781          dn = DB_DNODE(db);
1772 1782  
1773 1783          *blksize = dn->dn_datablksz;
1774 1784          /* add 1 for dnode space */
1775 1785          *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
1776 1786              SPA_MINBLOCKSHIFT) + 1;
1777 1787          DB_DNODE_EXIT(db);
1778 1788  }
1779 1789  
1780 1790  void
1781 1791  byteswap_uint64_array(void *vbuf, size_t size)
1782 1792  {
1783 1793          uint64_t *buf = vbuf;
1784 1794          size_t count = size >> 3;
1785 1795          int i;
1786 1796  
1787 1797          ASSERT((size & 7) == 0);
1788 1798  
1789 1799          for (i = 0; i < count; i++)
1790 1800                  buf[i] = BSWAP_64(buf[i]);
1791 1801  }
1792 1802  
1793 1803  void
1794 1804  byteswap_uint32_array(void *vbuf, size_t size)
1795 1805  {
1796 1806          uint32_t *buf = vbuf;
1797 1807          size_t count = size >> 2;
1798 1808          int i;
1799 1809  
1800 1810          ASSERT((size & 3) == 0);
1801 1811  
1802 1812          for (i = 0; i < count; i++)
1803 1813                  buf[i] = BSWAP_32(buf[i]);
1804 1814  }
1805 1815  
1806 1816  void
1807 1817  byteswap_uint16_array(void *vbuf, size_t size)
1808 1818  {
1809 1819          uint16_t *buf = vbuf;
1810 1820          size_t count = size >> 1;
1811 1821          int i;
1812 1822  
1813 1823          ASSERT((size & 1) == 0);
1814 1824  
1815 1825          for (i = 0; i < count; i++)
1816 1826                  buf[i] = BSWAP_16(buf[i]);
1817 1827  }
1818 1828  
1819 1829  /* ARGSUSED */
1820 1830  void
1821 1831  byteswap_uint8_array(void *vbuf, size_t size)
1822 1832  {
1823 1833  }
1824 1834  
1825 1835  void
1826 1836  dmu_init(void)
1827 1837  {
1828 1838          zfs_dbgmsg_init();
1829 1839          sa_cache_init();
1830 1840          xuio_stat_init();
1831 1841          dmu_objset_init();
1832 1842          dnode_init();
1833 1843          dbuf_init();
1834 1844          zfetch_init();
1835 1845          l2arc_init();
1836 1846          arc_init();
1837 1847  }
1838 1848  
1839 1849  void
1840 1850  dmu_fini(void)
1841 1851  {
1842 1852          arc_fini(); /* arc depends on l2arc, so arc must go first */
1843 1853          l2arc_fini();
1844 1854          zfetch_fini();
1845 1855          dbuf_fini();
1846 1856          dnode_fini();
1847 1857          dmu_objset_fini();
1848 1858          xuio_stat_fini();
1849 1859          sa_cache_fini();
1850 1860          zfs_dbgmsg_fini();
1851 1861  }
  
    | 
      ↓ open down ↓ | 
    246 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX