Possibility to physically reserve space without writing leaf blocks

   1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
  24  */
  25 /* Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */
  26 /* Copyright (c) 2013, Joyent, Inc. All rights reserved. */
  27 /* Copyright (c) 2014, Nexenta Systems, Inc. All rights reserved. */
  28 
  29 #include <sys/dmu.h>
  30 #include <sys/dmu_impl.h>
  31 #include <sys/dmu_tx.h>
  32 #include <sys/dbuf.h>
  33 #include <sys/dnode.h>
  34 #include <sys/zfs_context.h>
  35 #include <sys/dmu_objset.h>
  36 #include <sys/dmu_traverse.h>
  37 #include <sys/dsl_dataset.h>
  38 #include <sys/dsl_dir.h>
  39 #include <sys/dsl_pool.h>
  40 #include <sys/dsl_synctask.h>
  41 #include <sys/dsl_prop.h>
  42 #include <sys/dmu_zfetch.h>
  43 #include <sys/zfs_ioctl.h>
  44 #include <sys/zap.h>
  45 #include <sys/zio_checksum.h>
  46 #include <sys/zio_compress.h>
  47 #include <sys/sa.h>
  48 #include <sys/zfeature.h>
  49 #ifdef _KERNEL
  50 #include <sys/vmsystm.h>
  51 #include <sys/zfs_znode.h>
  52 #endif
  53 
  54 /*
  55  * Enable/disable nopwrite feature.
  56  */
  57 int zfs_nopwrite_enabled = 1;
  58 
  59 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
  60         {       DMU_BSWAP_UINT8,        TRUE,   "unallocated"           },
  61         {       DMU_BSWAP_ZAP,          TRUE,   "object directory"      },
  62         {       DMU_BSWAP_UINT64,       TRUE,   "object array"          },
  63         {       DMU_BSWAP_UINT8,        TRUE,   "packed nvlist"         },
  64         {       DMU_BSWAP_UINT64,       TRUE,   "packed nvlist size"    },
  65         {       DMU_BSWAP_UINT64,       TRUE,   "bpobj"                 },
  66         {       DMU_BSWAP_UINT64,       TRUE,   "bpobj header"          },
  67         {       DMU_BSWAP_UINT64,       TRUE,   "SPA space map header"  },
  68         {       DMU_BSWAP_UINT64,       TRUE,   "SPA space map"         },
  69         {       DMU_BSWAP_UINT64,       TRUE,   "ZIL intent log"        },
  70         {       DMU_BSWAP_DNODE,        TRUE,   "DMU dnode"             },
  71         {       DMU_BSWAP_OBJSET,       TRUE,   "DMU objset"            },
  72         {       DMU_BSWAP_UINT64,       TRUE,   "DSL directory"         },
  73         {       DMU_BSWAP_ZAP,          TRUE,   "DSL directory child map"},
  74         {       DMU_BSWAP_ZAP,          TRUE,   "DSL dataset snap map"  },
  75         {       DMU_BSWAP_ZAP,          TRUE,   "DSL props"             },
  76         {       DMU_BSWAP_UINT64,       TRUE,   "DSL dataset"           },
  77         {       DMU_BSWAP_ZNODE,        TRUE,   "ZFS znode"             },
  78         {       DMU_BSWAP_OLDACL,       TRUE,   "ZFS V0 ACL"            },
  79         {       DMU_BSWAP_UINT8,        FALSE,  "ZFS plain file"        },
  80         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS directory"         },
  81         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS master node"       },
  82         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS delete queue"      },
  83         {       DMU_BSWAP_UINT8,        FALSE,  "zvol object"           },
  84         {       DMU_BSWAP_ZAP,          TRUE,   "zvol prop"             },
  85         {       DMU_BSWAP_UINT8,        FALSE,  "other uint8[]"         },
  86         {       DMU_BSWAP_UINT64,       FALSE,  "other uint64[]"        },
  87         {       DMU_BSWAP_ZAP,          TRUE,   "other ZAP"             },
  88         {       DMU_BSWAP_ZAP,          TRUE,   "persistent error log"  },
  89         {       DMU_BSWAP_UINT8,        TRUE,   "SPA history"           },
  90         {       DMU_BSWAP_UINT64,       TRUE,   "SPA history offsets"   },
  91         {       DMU_BSWAP_ZAP,          TRUE,   "Pool properties"       },
  92         {       DMU_BSWAP_ZAP,          TRUE,   "DSL permissions"       },
  93         {       DMU_BSWAP_ACL,          TRUE,   "ZFS ACL"               },
  94         {       DMU_BSWAP_UINT8,        TRUE,   "ZFS SYSACL"            },
  95         {       DMU_BSWAP_UINT8,        TRUE,   "FUID table"            },
  96         {       DMU_BSWAP_UINT64,       TRUE,   "FUID table size"       },
  97         {       DMU_BSWAP_ZAP,          TRUE,   "DSL dataset next clones"},
  98         {       DMU_BSWAP_ZAP,          TRUE,   "scan work queue"       },
  99         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS user/group used"   },
 100         {       DMU_BSWAP_ZAP,          TRUE,   "ZFS user/group quota"  },
 101         {       DMU_BSWAP_ZAP,          TRUE,   "snapshot refcount tags"},
 102         {       DMU_BSWAP_ZAP,          TRUE,   "DDT ZAP algorithm"     },
 103         {       DMU_BSWAP_ZAP,          TRUE,   "DDT statistics"        },
 104         {       DMU_BSWAP_UINT8,        TRUE,   "System attributes"     },
 105         {       DMU_BSWAP_ZAP,          TRUE,   "SA master node"        },
 106         {       DMU_BSWAP_ZAP,          TRUE,   "SA attr registration"  },
 107         {       DMU_BSWAP_ZAP,          TRUE,   "SA attr layouts"       },
 108         {       DMU_BSWAP_ZAP,          TRUE,   "scan translations"     },
 109         {       DMU_BSWAP_UINT8,        FALSE,  "deduplicated block"    },
 110         {       DMU_BSWAP_ZAP,          TRUE,   "DSL deadlist map"      },
 111         {       DMU_BSWAP_UINT64,       TRUE,   "DSL deadlist map hdr"  },
 112         {       DMU_BSWAP_ZAP,          TRUE,   "DSL dir clones"        },
 113         {       DMU_BSWAP_UINT64,       TRUE,   "bpobj subobj"          }
 114 };
 115 
 116 const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
 117         {       byteswap_uint8_array,   "uint8"         },
 118         {       byteswap_uint16_array,  "uint16"        },
 119         {       byteswap_uint32_array,  "uint32"        },
 120         {       byteswap_uint64_array,  "uint64"        },
 121         {       zap_byteswap,           "zap"           },
 122         {       dnode_buf_byteswap,     "dnode"         },
 123         {       dmu_objset_byteswap,    "objset"        },
 124         {       zfs_znode_byteswap,     "znode"         },
 125         {       zfs_oldacl_byteswap,    "oldacl"        },
 126         {       zfs_acl_byteswap,       "acl"           }
 127 };
 128 
 129 int
 130 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
 131     void *tag, dmu_buf_t **dbp)
 132 {
 133         dnode_t *dn;
 134         uint64_t blkid;
 135         dmu_buf_impl_t *db;
 136         int err;
 137 
 138         err = dnode_hold(os, object, FTAG, &dn);
 139         if (err)
 140                 return (err);
 141         blkid = dbuf_whichblock(dn, offset);
 142         rw_enter(&dn->dn_struct_rwlock, RW_READER);
 143         db = dbuf_hold(dn, blkid, tag);
 144         rw_exit(&dn->dn_struct_rwlock);
 145         dnode_rele(dn, FTAG);
 146 
 147         if (db == NULL) {
 148                 *dbp = NULL;
 149                 return (SET_ERROR(EIO));
 150         }
 151 
 152         *dbp = &db->db;
 153         return (err);
 154 }
 155 
 156 int
 157 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
 158     void *tag, dmu_buf_t **dbp, int flags)
 159 {
 160         int err;
 161         int db_flags = DB_RF_CANFAIL;
 162 
 163         if (flags & DMU_READ_NO_PREFETCH)
 164                 db_flags |= DB_RF_NOPREFETCH;
 165 
 166         err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
 167         if (err == 0) {
 168                 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
 169                 err = dbuf_read(db, NULL, db_flags);
 170                 if (err != 0) {
 171                         dbuf_rele(db, tag);
 172                         *dbp = NULL;
 173                 }
 174         }
 175 
 176         return (err);
 177 }
 178 
 179 int
 180 dmu_bonus_max(void)
 181 {
 182         return (DN_MAX_BONUSLEN);
 183 }
 184 
 185 int
 186 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
 187 {
 188         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 189         dnode_t *dn;
 190         int error;
 191 
 192         DB_DNODE_ENTER(db);
 193         dn = DB_DNODE(db);
 194 
 195         if (dn->dn_bonus != db) {
 196                 error = SET_ERROR(EINVAL);
 197         } else if (newsize < 0 || newsize > db_fake->db_size) {
 198                 error = SET_ERROR(EINVAL);
 199         } else {
 200                 dnode_setbonuslen(dn, newsize, tx);
 201                 error = 0;
 202         }
 203 
 204         DB_DNODE_EXIT(db);
 205         return (error);
 206 }
 207 
 208 int
 209 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
 210 {
 211         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 212         dnode_t *dn;
 213         int error;
 214 
 215         DB_DNODE_ENTER(db);
 216         dn = DB_DNODE(db);
 217 
 218         if (!DMU_OT_IS_VALID(type)) {
 219                 error = SET_ERROR(EINVAL);
 220         } else if (dn->dn_bonus != db) {
 221                 error = SET_ERROR(EINVAL);
 222         } else {
 223                 dnode_setbonus_type(dn, type, tx);
 224                 error = 0;
 225         }
 226 
 227         DB_DNODE_EXIT(db);
 228         return (error);
 229 }
 230 
 231 dmu_object_type_t
 232 dmu_get_bonustype(dmu_buf_t *db_fake)
 233 {
 234         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 235         dnode_t *dn;
 236         dmu_object_type_t type;
 237 
 238         DB_DNODE_ENTER(db);
 239         dn = DB_DNODE(db);
 240         type = dn->dn_bonustype;
 241         DB_DNODE_EXIT(db);
 242 
 243         return (type);
 244 }
 245 
 246 int
 247 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
 248 {
 249         dnode_t *dn;
 250         int error;
 251 
 252         error = dnode_hold(os, object, FTAG, &dn);
 253         dbuf_rm_spill(dn, tx);
 254         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 255         dnode_rm_spill(dn, tx);
 256         rw_exit(&dn->dn_struct_rwlock);
 257         dnode_rele(dn, FTAG);
 258         return (error);
 259 }
 260 
 261 /*
 262  * returns ENOENT, EIO, or 0.
 263  */
 264 int
 265 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
 266 {
 267         dnode_t *dn;
 268         dmu_buf_impl_t *db;
 269         int error;
 270 
 271         error = dnode_hold(os, object, FTAG, &dn);
 272         if (error)
 273                 return (error);
 274 
 275         rw_enter(&dn->dn_struct_rwlock, RW_READER);
 276         if (dn->dn_bonus == NULL) {
 277                 rw_exit(&dn->dn_struct_rwlock);
 278                 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 279                 if (dn->dn_bonus == NULL)
 280                         dbuf_create_bonus(dn);
 281         }
 282         db = dn->dn_bonus;
 283 
 284         /* as long as the bonus buf is held, the dnode will be held */
 285         if (refcount_add(&db->db_holds, tag) == 1) {
 286                 VERIFY(dnode_add_ref(dn, db));
 287                 atomic_inc_32(&dn->dn_dbufs_count);
 288         }
 289 
 290         /*
 291          * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
 292          * hold and incrementing the dbuf count to ensure that dnode_move() sees
 293          * a dnode hold for every dbuf.
 294          */
 295         rw_exit(&dn->dn_struct_rwlock);
 296 
 297         dnode_rele(dn, FTAG);
 298 
 299         VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH));
 300 
 301         *dbp = &db->db;
 302         return (0);
 303 }
 304 
 305 /*
 306  * returns ENOENT, EIO, or 0.
 307  *
 308  * This interface will allocate a blank spill dbuf when a spill blk
 309  * doesn't already exist on the dnode.
 310  *
 311  * if you only want to find an already existing spill db, then
 312  * dmu_spill_hold_existing() should be used.
 313  */
 314 int
 315 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
 316 {
 317         dmu_buf_impl_t *db = NULL;
 318         int err;
 319 
 320         if ((flags & DB_RF_HAVESTRUCT) == 0)
 321                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 322 
 323         db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
 324 
 325         if ((flags & DB_RF_HAVESTRUCT) == 0)
 326                 rw_exit(&dn->dn_struct_rwlock);
 327 
 328         ASSERT(db != NULL);
 329         err = dbuf_read(db, NULL, flags);
 330         if (err == 0)
 331                 *dbp = &db->db;
 332         else
 333                 dbuf_rele(db, tag);
 334         return (err);
 335 }
 336 
 337 int
 338 dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
 339 {
 340         dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
 341         dnode_t *dn;
 342         int err;
 343 
 344         DB_DNODE_ENTER(db);
 345         dn = DB_DNODE(db);
 346 
 347         if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
 348                 err = SET_ERROR(EINVAL);
 349         } else {
 350                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 351 
 352                 if (!dn->dn_have_spill) {
 353                         err = SET_ERROR(ENOENT);
 354                 } else {
 355                         err = dmu_spill_hold_by_dnode(dn,
 356                             DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
 357                 }
 358 
 359                 rw_exit(&dn->dn_struct_rwlock);
 360         }
 361 
 362         DB_DNODE_EXIT(db);
 363         return (err);
 364 }
 365 
 366 int
 367 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
 368 {
 369         dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
 370         dnode_t *dn;
 371         int err;
 372 
 373         DB_DNODE_ENTER(db);
 374         dn = DB_DNODE(db);
 375         err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
 376         DB_DNODE_EXIT(db);
 377 
 378         return (err);
 379 }
 380 
 381 /*
 382  * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
 383  * to take a held dnode rather than <os, object> -- the lookup is wasteful,
 384  * and can induce severe lock contention when writing to several files
 385  * whose dnodes are in the same block.
 386  */
 387 static int
 388 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
 389     int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
 390 {
 391         dmu_buf_t **dbp;
 392         uint64_t blkid, nblks, i;
 393         uint32_t dbuf_flags;
 394         int err;
 395         zio_t *zio;
 396 
 397         ASSERT(length <= DMU_MAX_ACCESS);
 398 
 399         dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
 400         if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
 401                 dbuf_flags |= DB_RF_NOPREFETCH;
 402 
 403         rw_enter(&dn->dn_struct_rwlock, RW_READER);
 404         if (dn->dn_datablkshift) {
 405                 int blkshift = dn->dn_datablkshift;
 406                 nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
 407                     P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
 408         } else {
 409                 if (offset + length > dn->dn_datablksz) {
 410                         zfs_panic_recover("zfs: accessing past end of object "
 411                             "%llx/%llx (size=%u access=%llu+%llu)",
 412                             (longlong_t)dn->dn_objset->
 413                             os_dsl_dataset->ds_object,
 414                             (longlong_t)dn->dn_object, dn->dn_datablksz,
 415                             (longlong_t)offset, (longlong_t)length);
 416                         rw_exit(&dn->dn_struct_rwlock);
 417                         return (SET_ERROR(EIO));
 418                 }
 419                 nblks = 1;
 420         }
 421         dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
 422 
 423         zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
 424         blkid = dbuf_whichblock(dn, offset);
 425         for (i = 0; i < nblks; i++) {
 426                 dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag);
 427                 if (db == NULL) {
 428                         rw_exit(&dn->dn_struct_rwlock);
 429                         dmu_buf_rele_array(dbp, nblks, tag);
 430                         zio_nowait(zio);
 431                         return (SET_ERROR(EIO));
 432                 }
 433                 /* initiate async i/o */
 434                 if (read) {
 435                         (void) dbuf_read(db, zio, dbuf_flags);
 436                 }
 437                 dbp[i] = &db->db;
 438         }
 439         rw_exit(&dn->dn_struct_rwlock);
 440 
 441         /* wait for async i/o */
 442         err = zio_wait(zio);
 443         if (err) {
 444                 dmu_buf_rele_array(dbp, nblks, tag);
 445                 return (err);
 446         }
 447 
 448         /* wait for other io to complete */
 449         if (read) {
 450                 for (i = 0; i < nblks; i++) {
 451                         dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
 452                         mutex_enter(&db->db_mtx);
 453                         while (db->db_state == DB_READ ||
 454                             db->db_state == DB_FILL)
 455                                 cv_wait(&db->db_changed, &db->db_mtx);
 456                         if (db->db_state == DB_UNCACHED)
 457                                 err = SET_ERROR(EIO);
 458                         mutex_exit(&db->db_mtx);
 459                         if (err) {
 460                                 dmu_buf_rele_array(dbp, nblks, tag);
 461                                 return (err);
 462                         }
 463                 }
 464         }
 465 
 466         *numbufsp = nblks;
 467         *dbpp = dbp;
 468         return (0);
 469 }
 470 
 471 static int
 472 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
 473     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
 474 {
 475         dnode_t *dn;
 476         int err;
 477 
 478         err = dnode_hold(os, object, FTAG, &dn);
 479         if (err)
 480                 return (err);
 481 
 482         err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
 483             numbufsp, dbpp, DMU_READ_PREFETCH);
 484 
 485         dnode_rele(dn, FTAG);
 486 
 487         return (err);
 488 }
 489 
 490 int
 491 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
 492     uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
 493 {
 494         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
 495         dnode_t *dn;
 496         int err;
 497 
 498         DB_DNODE_ENTER(db);
 499         dn = DB_DNODE(db);
 500         err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
 501             numbufsp, dbpp, DMU_READ_PREFETCH);
 502         DB_DNODE_EXIT(db);
 503 
 504         return (err);
 505 }
 506 
 507 void
 508 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
 509 {
 510         int i;
 511         dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
 512 
 513         if (numbufs == 0)
 514                 return;
 515 
 516         for (i = 0; i < numbufs; i++) {
 517                 if (dbp[i])
 518                         dbuf_rele(dbp[i], tag);
 519         }
 520 
 521         kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
 522 }
 523 
 524 /*
 525  * Issue prefetch i/os for the given blocks.
 526  *
 527  * Note: The assumption is that we *know* these blocks will be needed
 528  * almost immediately.  Therefore, the prefetch i/os will be issued at
 529  * ZIO_PRIORITY_SYNC_READ
 530  *
 531  * Note: indirect blocks and other metadata will be read synchronously,
 532  * causing this function to block if they are not already cached.
 533  */
 534 void
 535 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len)
 536 {
 537         dnode_t *dn;
 538         uint64_t blkid;
 539         int nblks, err;
 540 
 541         if (zfs_prefetch_disable)
 542                 return;
 543 
 544         if (len == 0) {  /* they're interested in the bonus buffer */
 545                 dn = DMU_META_DNODE(os);
 546 
 547                 if (object == 0 || object >= DN_MAX_OBJECT)
 548                         return;
 549 
 550                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 551                 blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t));
 552                 dbuf_prefetch(dn, blkid, ZIO_PRIORITY_SYNC_READ);
 553                 rw_exit(&dn->dn_struct_rwlock);
 554                 return;
 555         }
 556 
 557         /*
 558          * XXX - Note, if the dnode for the requested object is not
 559          * already cached, we will do a *synchronous* read in the
 560          * dnode_hold() call.  The same is true for any indirects.
 561          */
 562         err = dnode_hold(os, object, FTAG, &dn);
 563         if (err != 0)
 564                 return;
 565 
 566         rw_enter(&dn->dn_struct_rwlock, RW_READER);
 567         if (dn->dn_datablkshift) {
 568                 int blkshift = dn->dn_datablkshift;
 569                 nblks = (P2ROUNDUP(offset + len, 1 << blkshift) -
 570                     P2ALIGN(offset, 1 << blkshift)) >> blkshift;
 571         } else {
 572                 nblks = (offset < dn->dn_datablksz);
 573         }
 574 
 575         if (nblks != 0) {
 576                 blkid = dbuf_whichblock(dn, offset);
 577                 for (int i = 0; i < nblks; i++)
 578                         dbuf_prefetch(dn, blkid + i, ZIO_PRIORITY_SYNC_READ);
 579         }
 580 
 581         rw_exit(&dn->dn_struct_rwlock);
 582 
 583         dnode_rele(dn, FTAG);
 584 }
 585 
 586 /*
 587  * Get the next "chunk" of file data to free.  We traverse the file from
 588  * the end so that the file gets shorter over time (if we crashes in the
 589  * middle, this will leave us in a better state).  We find allocated file
 590  * data by simply searching the allocated level 1 indirects.
 591  *
 592  * On input, *start should be the first offset that does not need to be
 593  * freed (e.g. "offset + length").  On return, *start will be the first
 594  * offset that should be freed.
 595  */
 596 static int
 597 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
 598 {
 599         uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
 600         /* bytes of data covered by a level-1 indirect block */
 601         uint64_t iblkrange =
 602             dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
 603 
 604         ASSERT3U(minimum, <=, *start);
 605 
 606         if (*start - minimum <= iblkrange * maxblks) {
 607                 *start = minimum;
 608                 return (0);
 609         }
 610         ASSERT(ISP2(iblkrange));
 611 
 612         for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
 613                 int err;
 614 
 615                 /*
 616                  * dnode_next_offset(BACKWARDS) will find an allocated L1
 617                  * indirect block at or before the input offset.  We must
 618                  * decrement *start so that it is at the end of the region
 619                  * to search.
 620                  */
 621                 (*start)--;
 622                 err = dnode_next_offset(dn,
 623                     DNODE_FIND_BACKWARDS, start, 2, 1, 0);
 624 
 625                 /* if there are no indirect blocks before start, we are done */
 626                 if (err == ESRCH) {
 627                         *start = minimum;
 628                         break;
 629                 } else if (err != 0) {
 630                         return (err);
 631                 }
 632 
 633                 /* set start to the beginning of this L1 indirect */
 634                 *start = P2ALIGN(*start, iblkrange);
 635         }
 636         if (*start < minimum)
 637                 *start = minimum;
 638         return (0);
 639 }
 640 
 641 static int
 642 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
 643     uint64_t length)
 644 {
 645         uint64_t object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
 646         int err;
 647 
 648         if (offset >= object_size)
 649                 return (0);
 650 
 651         if (length == DMU_OBJECT_END || offset + length > object_size)
 652                 length = object_size - offset;
 653 
 654         while (length != 0) {
 655                 uint64_t chunk_end, chunk_begin;
 656 
 657                 chunk_end = chunk_begin = offset + length;
 658 
 659                 /* move chunk_begin backwards to the beginning of this chunk */
 660                 err = get_next_chunk(dn, &chunk_begin, offset);
 661                 if (err)
 662                         return (err);
 663                 ASSERT3U(chunk_begin, >=, offset);
 664                 ASSERT3U(chunk_begin, <=, chunk_end);
 665 
 666                 dmu_tx_t *tx = dmu_tx_create(os);
 667                 dmu_tx_hold_free(tx, dn->dn_object,
 668                     chunk_begin, chunk_end - chunk_begin);
 669 
 670                 /*
 671                  * Mark this transaction as typically resulting in a net
 672                  * reduction in space used.
 673                  */
 674                 dmu_tx_mark_netfree(tx);
 675                 err = dmu_tx_assign(tx, TXG_WAIT);
 676                 if (err) {
 677                         dmu_tx_abort(tx);
 678                         return (err);
 679                 }
 680                 dnode_free_range(dn, chunk_begin, chunk_end - chunk_begin, tx);
 681                 dmu_tx_commit(tx);
 682 
 683                 length -= chunk_end - chunk_begin;
 684         }
 685         return (0);
 686 }
 687 
 688 int
 689 dmu_free_long_range(objset_t *os, uint64_t object,
 690     uint64_t offset, uint64_t length)
 691 {
 692         dnode_t *dn;
 693         int err;
 694 
 695         err = dnode_hold(os, object, FTAG, &dn);
 696         if (err != 0)
 697                 return (err);
 698         err = dmu_free_long_range_impl(os, dn, offset, length);
 699 
 700         /*
 701          * It is important to zero out the maxblkid when freeing the entire
 702          * file, so that (a) subsequent calls to dmu_free_long_range_impl()
 703          * will take the fast path, and (b) dnode_reallocate() can verify
 704          * that the entire file has been freed.
 705          */
 706         if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
 707                 dn->dn_maxblkid = 0;
 708 
 709         dnode_rele(dn, FTAG);
 710         return (err);
 711 }
 712 
 713 int
 714 dmu_free_long_object(objset_t *os, uint64_t object)
 715 {
 716         dmu_tx_t *tx;
 717         int err;
 718 
 719         err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
 720         if (err != 0)
 721                 return (err);
 722 
 723         tx = dmu_tx_create(os);
 724         dmu_tx_hold_bonus(tx, object);
 725         dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
 726         dmu_tx_mark_netfree(tx);
 727         err = dmu_tx_assign(tx, TXG_WAIT);
 728         if (err == 0) {
 729                 err = dmu_object_free(os, object, tx);
 730                 dmu_tx_commit(tx);
 731         } else {
 732                 dmu_tx_abort(tx);
 733         }
 734 
 735         return (err);
 736 }
 737 
 738 int
 739 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
 740     uint64_t size, dmu_tx_t *tx)
 741 {
 742         dnode_t *dn;
 743         int err = dnode_hold(os, object, FTAG, &dn);
 744         if (err)
 745                 return (err);
 746         ASSERT(offset < UINT64_MAX);
 747         ASSERT(size == -1ULL || size <= UINT64_MAX - offset);
 748         dnode_free_range(dn, offset, size, tx);
 749         dnode_rele(dn, FTAG);
 750         return (0);
 751 }
 752 
 753 int
 754 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
 755     void *buf, uint32_t flags)
 756 {
 757         dnode_t *dn;
 758         dmu_buf_t **dbp;
 759         int numbufs, err;
 760 
 761         err = dnode_hold(os, object, FTAG, &dn);
 762         if (err)
 763                 return (err);
 764 
 765         /*
 766          * Deal with odd block sizes, where there can't be data past the first
 767          * block.  If we ever do the tail block optimization, we will need to
 768          * handle that here as well.
 769          */
 770         if (dn->dn_maxblkid == 0) {
 771                 int newsz = offset > dn->dn_datablksz ? 0 :
 772                     MIN(size, dn->dn_datablksz - offset);
 773                 bzero((char *)buf + newsz, size - newsz);
 774                 size = newsz;
 775         }
 776 
 777         while (size > 0) {
 778                 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
 779                 int i;
 780 
 781                 /*
 782                  * NB: we could do this block-at-a-time, but it's nice
 783                  * to be reading in parallel.
 784                  */
 785                 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
 786                     TRUE, FTAG, &numbufs, &dbp, flags);
 787                 if (err)
 788                         break;
 789 
 790                 for (i = 0; i < numbufs; i++) {
 791                         int tocpy;
 792                         int bufoff;
 793                         dmu_buf_t *db = dbp[i];
 794 
 795                         ASSERT(size > 0);
 796 
 797                         bufoff = offset - db->db_offset;
 798                         tocpy = (int)MIN(db->db_size - bufoff, size);
 799 
 800                         bcopy((char *)db->db_data + bufoff, buf, tocpy);
 801 
 802                         offset += tocpy;
 803                         size -= tocpy;
 804                         buf = (char *)buf + tocpy;
 805                 }
 806                 dmu_buf_rele_array(dbp, numbufs, FTAG);
 807         }
 808         dnode_rele(dn, FTAG);
 809         return (err);
 810 }
 811 
 812 void






















 813 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
 814     const void *buf, dmu_tx_t *tx)
 815 {
 816         dmu_buf_t **dbp;
 817         int numbufs, i;
 818 
 819         if (size == 0)
 820                 return;
 821 
 822         VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
 823             FALSE, FTAG, &numbufs, &dbp));
 824 
 825         for (i = 0; i < numbufs; i++) {
 826                 int tocpy;
 827                 int bufoff;
 828                 dmu_buf_t *db = dbp[i];
 829 
 830                 ASSERT(size > 0);
 831 
 832                 bufoff = offset - db->db_offset;
 833                 tocpy = (int)MIN(db->db_size - bufoff, size);
 834 
 835                 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
 836 
 837                 if (tocpy == db->db_size)
 838                         dmu_buf_will_fill(db, tx);
 839                 else
 840                         dmu_buf_will_dirty(db, tx);
 841 
 842                 bcopy(buf, (char *)db->db_data + bufoff, tocpy);
 843 
 844                 if (tocpy == db->db_size)
 845                         dmu_buf_fill_done(db, tx);
 846 
 847                 offset += tocpy;
 848                 size -= tocpy;
 849                 buf = (char *)buf + tocpy;
 850         }
 851         dmu_buf_rele_array(dbp, numbufs, FTAG);
 852 }
 853 
 854 void
 855 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
 856     dmu_tx_t *tx)
 857 {
 858         dmu_buf_t **dbp;
 859         int numbufs, i;
 860 
 861         if (size == 0)
 862                 return;
 863 
 864         VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
 865             FALSE, FTAG, &numbufs, &dbp));
 866 
 867         for (i = 0; i < numbufs; i++) {
 868                 dmu_buf_t *db = dbp[i];
 869 
 870                 dmu_buf_will_not_fill(db, tx);
 871         }
 872         dmu_buf_rele_array(dbp, numbufs, FTAG);
 873 }
 874 
 875 void
 876 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
 877     void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
 878     int compressed_size, int byteorder, dmu_tx_t *tx)
 879 {
 880         dmu_buf_t *db;
 881 
 882         ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
 883         ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
 884         VERIFY0(dmu_buf_hold_noread(os, object, offset,
 885             FTAG, &db));
 886 
 887         dmu_buf_write_embedded(db,
 888             data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
 889             uncompressed_size, compressed_size, byteorder, tx);
 890 
 891         dmu_buf_rele(db, FTAG);
 892 }
 893 
 894 /*
 895  * DMU support for xuio
 896  */
 897 kstat_t *xuio_ksp = NULL;
 898 
 899 int
 900 dmu_xuio_init(xuio_t *xuio, int nblk)
 901 {
 902         dmu_xuio_t *priv;
 903         uio_t *uio = &xuio->xu_uio;
 904 
 905         uio->uio_iovcnt = nblk;
 906         uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
 907 
 908         priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
 909         priv->cnt = nblk;
 910         priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
 911         priv->iovp = uio->uio_iov;
 912         XUIO_XUZC_PRIV(xuio) = priv;
 913 
 914         if (XUIO_XUZC_RW(xuio) == UIO_READ)
 915                 XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk);
 916         else
 917                 XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk);
 918 
 919         return (0);
 920 }
 921 
 922 void
 923 dmu_xuio_fini(xuio_t *xuio)
 924 {
 925         dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 926         int nblk = priv->cnt;
 927 
 928         kmem_free(priv->iovp, nblk * sizeof (iovec_t));
 929         kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *));
 930         kmem_free(priv, sizeof (dmu_xuio_t));
 931 
 932         if (XUIO_XUZC_RW(xuio) == UIO_READ)
 933                 XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk);
 934         else
 935                 XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk);
 936 }
 937 
 938 /*
 939  * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf }
 940  * and increase priv->next by 1.
 941  */
 942 int
 943 dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n)
 944 {
 945         struct iovec *iov;
 946         uio_t *uio = &xuio->xu_uio;
 947         dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 948         int i = priv->next++;
 949 
 950         ASSERT(i < priv->cnt);
 951         ASSERT(off + n <= arc_buf_size(abuf));
 952         iov = uio->uio_iov + i;
 953         iov->iov_base = (char *)abuf->b_data + off;
 954         iov->iov_len = n;
 955         priv->bufs[i] = abuf;
 956         return (0);
 957 }
 958 
 959 int
 960 dmu_xuio_cnt(xuio_t *xuio)
 961 {
 962         dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 963         return (priv->cnt);
 964 }
 965 
 966 arc_buf_t *
 967 dmu_xuio_arcbuf(xuio_t *xuio, int i)
 968 {
 969         dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 970 
 971         ASSERT(i < priv->cnt);
 972         return (priv->bufs[i]);
 973 }
 974 
 975 void
 976 dmu_xuio_clear(xuio_t *xuio, int i)
 977 {
 978         dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
 979 
 980         ASSERT(i < priv->cnt);
 981         priv->bufs[i] = NULL;
 982 }
 983 
 984 static void
 985 xuio_stat_init(void)
 986 {
 987         xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc",
 988             KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t),
 989             KSTAT_FLAG_VIRTUAL);
 990         if (xuio_ksp != NULL) {
 991                 xuio_ksp->ks_data = &xuio_stats;
 992                 kstat_install(xuio_ksp);
 993         }
 994 }
 995 
 996 static void
 997 xuio_stat_fini(void)
 998 {
 999         if (xuio_ksp != NULL) {
1000                 kstat_delete(xuio_ksp);
1001                 xuio_ksp = NULL;
1002         }
1003 }
1004 
1005 void
1006 xuio_stat_wbuf_copied()
1007 {
1008         XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1009 }
1010 
1011 void
1012 xuio_stat_wbuf_nocopy()
1013 {
1014         XUIOSTAT_BUMP(xuiostat_wbuf_nocopy);
1015 }
1016 
1017 #ifdef _KERNEL
1018 static int
1019 dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
1020 {
1021         dmu_buf_t **dbp;
1022         int numbufs, i, err;
1023         xuio_t *xuio = NULL;
1024 
1025         /*
1026          * NB: we could do this block-at-a-time, but it's nice
1027          * to be reading in parallel.
1028          */
1029         err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1030             TRUE, FTAG, &numbufs, &dbp, 0);
1031         if (err)
1032                 return (err);
1033 
1034         if (uio->uio_extflg == UIO_XUIO)
1035                 xuio = (xuio_t *)uio;
1036 
1037         for (i = 0; i < numbufs; i++) {
1038                 int tocpy;
1039                 int bufoff;
1040                 dmu_buf_t *db = dbp[i];
1041 
1042                 ASSERT(size > 0);
1043 
1044                 bufoff = uio->uio_loffset - db->db_offset;
1045                 tocpy = (int)MIN(db->db_size - bufoff, size);
1046 
1047                 if (xuio) {
1048                         dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
1049                         arc_buf_t *dbuf_abuf = dbi->db_buf;
1050                         arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
1051                         err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
1052                         if (!err) {
1053                                 uio->uio_resid -= tocpy;
1054                                 uio->uio_loffset += tocpy;
1055                         }
1056 
1057                         if (abuf == dbuf_abuf)
1058                                 XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
1059                         else
1060                                 XUIOSTAT_BUMP(xuiostat_rbuf_copied);
1061                 } else {
1062                         err = uiomove((char *)db->db_data + bufoff, tocpy,
1063                             UIO_READ, uio);
1064                 }
1065                 if (err)
1066                         break;
1067 
1068                 size -= tocpy;
1069         }
1070         dmu_buf_rele_array(dbp, numbufs, FTAG);
1071 
1072         return (err);
1073 }
1074 
1075 /*
1076  * Read 'size' bytes into the uio buffer.
1077  * From object zdb->db_object.
1078  * Starting at offset uio->uio_loffset.
1079  *
1080  * If the caller already has a dbuf in the target object
1081  * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1082  * because we don't have to find the dnode_t for the object.
1083  */
1084 int
1085 dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size)
1086 {
1087         dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1088         dnode_t *dn;
1089         int err;
1090 
1091         if (size == 0)
1092                 return (0);
1093 
1094         DB_DNODE_ENTER(db);
1095         dn = DB_DNODE(db);
1096         err = dmu_read_uio_dnode(dn, uio, size);
1097         DB_DNODE_EXIT(db);
1098 
1099         return (err);
1100 }
1101 
1102 /*
1103  * Read 'size' bytes into the uio buffer.
1104  * From the specified object
1105  * Starting at offset uio->uio_loffset.
1106  */
1107 int
1108 dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
1109 {
1110         dnode_t *dn;
1111         int err;
1112 
1113         if (size == 0)
1114                 return (0);
1115 
1116         err = dnode_hold(os, object, FTAG, &dn);
1117         if (err)
1118                 return (err);
1119 
1120         err = dmu_read_uio_dnode(dn, uio, size);
1121 
1122         dnode_rele(dn, FTAG);
1123 
1124         return (err);
1125 }
1126 
1127 static int
1128 dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
1129 {
1130         dmu_buf_t **dbp;
1131         int numbufs;
1132         int err = 0;
1133         int i;
1134 
1135         err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
1136             FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1137         if (err)
1138                 return (err);
1139 
1140         for (i = 0; i < numbufs; i++) {
1141                 int tocpy;
1142                 int bufoff;
1143                 dmu_buf_t *db = dbp[i];
1144 
1145                 ASSERT(size > 0);
1146 
1147                 bufoff = uio->uio_loffset - db->db_offset;
1148                 tocpy = (int)MIN(db->db_size - bufoff, size);
1149 
1150                 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1151 
1152                 if (tocpy == db->db_size)
1153                         dmu_buf_will_fill(db, tx);
1154                 else
1155                         dmu_buf_will_dirty(db, tx);
1156 
1157                 /*
1158                  * XXX uiomove could block forever (eg. nfs-backed
1159                  * pages).  There needs to be a uiolockdown() function
1160                  * to lock the pages in memory, so that uiomove won't
1161                  * block.
1162                  */
1163                 err = uiomove((char *)db->db_data + bufoff, tocpy,
1164                     UIO_WRITE, uio);
1165 
1166                 if (tocpy == db->db_size)
1167                         dmu_buf_fill_done(db, tx);
1168 
1169                 if (err)
1170                         break;
1171 
1172                 size -= tocpy;
1173         }
1174 
1175         dmu_buf_rele_array(dbp, numbufs, FTAG);
1176         return (err);
1177 }
1178 
1179 /*
1180  * Write 'size' bytes from the uio buffer.
1181  * To object zdb->db_object.
1182  * Starting at offset uio->uio_loffset.
1183  *
1184  * If the caller already has a dbuf in the target object
1185  * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1186  * because we don't have to find the dnode_t for the object.
1187  */
1188 int
1189 dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
1190     dmu_tx_t *tx)
1191 {
1192         dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1193         dnode_t *dn;
1194         int err;
1195 
1196         if (size == 0)
1197                 return (0);
1198 
1199         DB_DNODE_ENTER(db);
1200         dn = DB_DNODE(db);
1201         err = dmu_write_uio_dnode(dn, uio, size, tx);
1202         DB_DNODE_EXIT(db);
1203 
1204         return (err);
1205 }
1206 
1207 /*
1208  * Write 'size' bytes from the uio buffer.
1209  * To the specified object.
1210  * Starting at offset uio->uio_loffset.
1211  */
1212 int
1213 dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
1214     dmu_tx_t *tx)
1215 {
1216         dnode_t *dn;
1217         int err;
1218 
1219         if (size == 0)
1220                 return (0);
1221 
1222         err = dnode_hold(os, object, FTAG, &dn);
1223         if (err)
1224                 return (err);
1225 
1226         err = dmu_write_uio_dnode(dn, uio, size, tx);
1227 
1228         dnode_rele(dn, FTAG);
1229 
1230         return (err);
1231 }
1232 
1233 int
1234 dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1235     page_t *pp, dmu_tx_t *tx)
1236 {
1237         dmu_buf_t **dbp;
1238         int numbufs, i;
1239         int err;
1240 
1241         if (size == 0)
1242                 return (0);
1243 
1244         err = dmu_buf_hold_array(os, object, offset, size,
1245             FALSE, FTAG, &numbufs, &dbp);
1246         if (err)
1247                 return (err);
1248 
1249         for (i = 0; i < numbufs; i++) {
1250                 int tocpy, copied, thiscpy;
1251                 int bufoff;
1252                 dmu_buf_t *db = dbp[i];
1253                 caddr_t va;
1254 
1255                 ASSERT(size > 0);
1256                 ASSERT3U(db->db_size, >=, PAGESIZE);
1257 
1258                 bufoff = offset - db->db_offset;
1259                 tocpy = (int)MIN(db->db_size - bufoff, size);
1260 
1261                 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1262 
1263                 if (tocpy == db->db_size)
1264                         dmu_buf_will_fill(db, tx);
1265                 else
1266                         dmu_buf_will_dirty(db, tx);
1267 
1268                 for (copied = 0; copied < tocpy; copied += PAGESIZE) {
1269                         ASSERT3U(pp->p_offset, ==, db->db_offset + bufoff);
1270                         thiscpy = MIN(PAGESIZE, tocpy - copied);
1271                         va = zfs_map_page(pp, S_READ);
1272                         bcopy(va, (char *)db->db_data + bufoff, thiscpy);
1273                         zfs_unmap_page(pp, va);
1274                         pp = pp->p_next;
1275                         bufoff += PAGESIZE;
1276                 }
1277 
1278                 if (tocpy == db->db_size)
1279                         dmu_buf_fill_done(db, tx);
1280 
1281                 offset += tocpy;
1282                 size -= tocpy;
1283         }
1284         dmu_buf_rele_array(dbp, numbufs, FTAG);
1285         return (err);
1286 }
1287 #endif
1288 
1289 /*
1290  * Allocate a loaned anonymous arc buffer.
1291  */
1292 arc_buf_t *
1293 dmu_request_arcbuf(dmu_buf_t *handle, int size)
1294 {
1295         dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1296 
1297         return (arc_loan_buf(db->db_objset->os_spa, size));
1298 }
1299 
1300 /*
1301  * Free a loaned arc buffer.
1302  */
1303 void
1304 dmu_return_arcbuf(arc_buf_t *buf)
1305 {
1306         arc_return_buf(buf, FTAG);
1307         VERIFY(arc_buf_remove_ref(buf, FTAG));
1308 }
1309 
1310 /*
1311  * When possible directly assign passed loaned arc buffer to a dbuf.
1312  * If this is not possible copy the contents of passed arc buf via
1313  * dmu_write().
1314  */
1315 void
1316 dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1317     dmu_tx_t *tx)
1318 {
1319         dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
1320         dnode_t *dn;
1321         dmu_buf_impl_t *db;
1322         uint32_t blksz = (uint32_t)arc_buf_size(buf);
1323         uint64_t blkid;
1324 
1325         DB_DNODE_ENTER(dbuf);
1326         dn = DB_DNODE(dbuf);
1327         rw_enter(&dn->dn_struct_rwlock, RW_READER);
1328         blkid = dbuf_whichblock(dn, offset);
1329         VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
1330         rw_exit(&dn->dn_struct_rwlock);
1331         DB_DNODE_EXIT(dbuf);
1332 
1333         /*
1334          * We can only assign if the offset is aligned, the arc buf is the
1335          * same size as the dbuf, and the dbuf is not metadata.  It
1336          * can't be metadata because the loaned arc buf comes from the
1337          * user-data kmem arena.
1338          */
1339         if (offset == db->db.db_offset && blksz == db->db.db_size &&
1340             DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA) {
1341                 dbuf_assign_arcbuf(db, buf, tx);
1342                 dbuf_rele(db, FTAG);
1343         } else {
1344                 objset_t *os;
1345                 uint64_t object;
1346 
1347                 DB_DNODE_ENTER(dbuf);
1348                 dn = DB_DNODE(dbuf);
1349                 os = dn->dn_objset;
1350                 object = dn->dn_object;
1351                 DB_DNODE_EXIT(dbuf);
1352 
1353                 dbuf_rele(db, FTAG);
1354                 dmu_write(os, object, offset, blksz, buf->b_data, tx);
1355                 dmu_return_arcbuf(buf);
1356                 XUIOSTAT_BUMP(xuiostat_wbuf_copied);
1357         }
1358 }
1359 
1360 typedef struct {
1361         dbuf_dirty_record_t     *dsa_dr;
1362         dmu_sync_cb_t           *dsa_done;
1363         zgd_t                   *dsa_zgd;
1364         dmu_tx_t                *dsa_tx;
1365 } dmu_sync_arg_t;
1366 
1367 /* ARGSUSED */
1368 static void
1369 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1370 {
1371         dmu_sync_arg_t *dsa = varg;
1372         dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1373         blkptr_t *bp = zio->io_bp;
1374 
1375         if (zio->io_error == 0) {
1376                 if (BP_IS_HOLE(bp)) {
1377                         /*
1378                          * A block of zeros may compress to a hole, but the
1379                          * block size still needs to be known for replay.
1380                          */
1381                         BP_SET_LSIZE(bp, db->db_size);
1382                 } else if (!BP_IS_EMBEDDED(bp)) {
1383                         ASSERT(BP_GET_LEVEL(bp) == 0);
1384                         bp->blk_fill = 1;
1385                 }
1386         }
1387 }
1388 
1389 static void
1390 dmu_sync_late_arrival_ready(zio_t *zio)
1391 {
1392         dmu_sync_ready(zio, NULL, zio->io_private);
1393 }
1394 
1395 /* ARGSUSED */
1396 static void
1397 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1398 {
1399         dmu_sync_arg_t *dsa = varg;
1400         dbuf_dirty_record_t *dr = dsa->dsa_dr;
1401         dmu_buf_impl_t *db = dr->dr_dbuf;
1402 
1403         mutex_enter(&db->db_mtx);
1404         ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1405         if (zio->io_error == 0) {
1406                 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1407                 if (dr->dt.dl.dr_nopwrite) {
1408                         blkptr_t *bp = zio->io_bp;
1409                         blkptr_t *bp_orig = &zio->io_bp_orig;
1410                         uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
1411 
1412                         ASSERT(BP_EQUAL(bp, bp_orig));
1413                         ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1414                         ASSERT(zio_checksum_table[chksum].ci_dedup);
1415                 }
1416                 dr->dt.dl.dr_overridden_by = *zio->io_bp;
1417                 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1418                 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1419                 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by))
1420                         BP_ZERO(&dr->dt.dl.dr_overridden_by);
1421         } else {
1422                 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1423         }
1424         cv_broadcast(&db->db_changed);
1425         mutex_exit(&db->db_mtx);
1426 
1427         dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1428 
1429         kmem_free(dsa, sizeof (*dsa));
1430 }
1431 
1432 static void
1433 dmu_sync_late_arrival_done(zio_t *zio)
1434 {
1435         blkptr_t *bp = zio->io_bp;
1436         dmu_sync_arg_t *dsa = zio->io_private;
1437         blkptr_t *bp_orig = &zio->io_bp_orig;
1438 
1439         if (zio->io_error == 0 && !BP_IS_HOLE(bp)) {
1440                 /*
1441                  * If we didn't allocate a new block (i.e. ZIO_FLAG_NOPWRITE)
1442                  * then there is nothing to do here. Otherwise, free the
1443                  * newly allocated block in this txg.
1444                  */
1445                 if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
1446                         ASSERT(BP_EQUAL(bp, bp_orig));
1447                 } else {
1448                         ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1449                         ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1450                         ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1451                         zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1452                 }
1453         }
1454 
1455         dmu_tx_commit(dsa->dsa_tx);
1456 
1457         dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1458 
1459         kmem_free(dsa, sizeof (*dsa));
1460 }
1461 
1462 static int
1463 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1464     zio_prop_t *zp, zbookmark_phys_t *zb)
1465 {
1466         dmu_sync_arg_t *dsa;
1467         dmu_tx_t *tx;
1468 
1469         tx = dmu_tx_create(os);
1470         dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1471         if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1472                 dmu_tx_abort(tx);
1473                 /* Make zl_get_data do txg_waited_synced() */
1474                 return (SET_ERROR(EIO));
1475         }
1476 
1477         dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1478         dsa->dsa_dr = NULL;
1479         dsa->dsa_done = done;
1480         dsa->dsa_zgd = zgd;
1481         dsa->dsa_tx = tx;
1482 
1483         zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1484             zgd->zgd_db->db_data, zgd->zgd_db->db_size, zp,
1485             dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done, dsa,
1486             ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1487 
1488         return (0);
1489 }
1490 
1491 /*
1492  * Intent log support: sync the block associated with db to disk.
1493  * N.B. and XXX: the caller is responsible for making sure that the
1494  * data isn't changing while dmu_sync() is writing it.
1495  *
1496  * Return values:
1497  *
1498  *      EEXIST: this txg has already been synced, so there's nothing to do.
1499  *              The caller should not log the write.
1500  *
1501  *      ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1502  *              The caller should not log the write.
1503  *
1504  *      EALREADY: this block is already in the process of being synced.
1505  *              The caller should track its progress (somehow).
1506  *
1507  *      EIO: could not do the I/O.
1508  *              The caller should do a txg_wait_synced().
1509  *
1510  *      0: the I/O has been initiated.
1511  *              The caller should log this blkptr in the done callback.
1512  *              It is possible that the I/O will fail, in which case
1513  *              the error will be reported to the done callback and
1514  *              propagated to pio from zio_done().
1515  */
1516 int
1517 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1518 {
1519         blkptr_t *bp = zgd->zgd_bp;
1520         dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1521         objset_t *os = db->db_objset;
1522         dsl_dataset_t *ds = os->os_dsl_dataset;
1523         dbuf_dirty_record_t *dr;
1524         dmu_sync_arg_t *dsa;
1525         zbookmark_phys_t zb;
1526         zio_prop_t zp;
1527         dnode_t *dn;
1528 
1529         ASSERT(pio != NULL);
1530         ASSERT(txg != 0);
1531 
1532         SET_BOOKMARK(&zb, ds->ds_object,
1533             db->db.db_object, db->db_level, db->db_blkid);
1534 
1535         DB_DNODE_ENTER(db);
1536         dn = DB_DNODE(db);
1537         dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1538         DB_DNODE_EXIT(db);
1539 
1540         /*
1541          * If we're frozen (running ziltest), we always need to generate a bp.
1542          */
1543         if (txg > spa_freeze_txg(os->os_spa))
1544                 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1545 
1546         /*
1547          * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1548          * and us.  If we determine that this txg is not yet syncing,
1549          * but it begins to sync a moment later, that's OK because the
1550          * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1551          */
1552         mutex_enter(&db->db_mtx);
1553 
1554         if (txg <= spa_last_synced_txg(os->os_spa)) {
1555                 /*
1556                  * This txg has already synced.  There's nothing to do.
1557                  */
1558                 mutex_exit(&db->db_mtx);
1559                 return (SET_ERROR(EEXIST));
1560         }
1561 
1562         if (txg <= spa_syncing_txg(os->os_spa)) {
1563                 /*
1564                  * This txg is currently syncing, so we can't mess with
1565                  * the dirty record anymore; just write a new log block.
1566                  */
1567                 mutex_exit(&db->db_mtx);
1568                 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1569         }
1570 
1571         dr = db->db_last_dirty;
1572         while (dr && dr->dr_txg != txg)
1573                 dr = dr->dr_next;
1574 
1575         if (dr == NULL) {
1576                 /*
1577                  * There's no dr for this dbuf, so it must have been freed.
1578                  * There's no need to log writes to freed blocks, so we're done.
1579                  */
1580                 mutex_exit(&db->db_mtx);
1581                 return (SET_ERROR(ENOENT));
1582         }
1583 
1584         ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
1585 
1586         /*
1587          * Assume the on-disk data is X, the current syncing data is Y,
1588          * and the current in-memory data is Z (currently in dmu_sync).
1589          * X and Z are identical but Y is has been modified. Normally,
1590          * when X and Z are the same we will perform a nopwrite but if Y
1591          * is different we must disable nopwrite since the resulting write
1592          * of Y to disk can free the block containing X. If we allowed a
1593          * nopwrite to occur the block pointing to Z would reference a freed
1594          * block. Since this is a rare case we simplify this by disabling
1595          * nopwrite if the current dmu_sync-ing dbuf has been modified in
1596          * a previous transaction.
1597          */
1598         if (dr->dr_next)
1599                 zp.zp_nopwrite = B_FALSE;
1600 
1601         ASSERT(dr->dr_txg == txg);
1602         if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1603             dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1604                 /*
1605                  * We have already issued a sync write for this buffer,
1606                  * or this buffer has already been synced.  It could not
1607                  * have been dirtied since, or we would have cleared the state.
1608                  */
1609                 mutex_exit(&db->db_mtx);
1610                 return (SET_ERROR(EALREADY));
1611         }
1612 
1613         ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1614         dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1615         mutex_exit(&db->db_mtx);
1616 
1617         dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1618         dsa->dsa_dr = dr;
1619         dsa->dsa_done = done;
1620         dsa->dsa_zgd = zgd;
1621         dsa->dsa_tx = NULL;
1622 
1623         zio_nowait(arc_write(pio, os->os_spa, txg,
1624             bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
1625             DBUF_IS_L2COMPRESSIBLE(db), &zp, dmu_sync_ready,
1626             NULL, dmu_sync_done, dsa, ZIO_PRIORITY_SYNC_WRITE,
1627             ZIO_FLAG_CANFAIL, &zb));
1628 
1629         return (0);
1630 }
1631 
1632 int
1633 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1634         dmu_tx_t *tx)
1635 {
1636         dnode_t *dn;
1637         int err;
1638 
1639         err = dnode_hold(os, object, FTAG, &dn);
1640         if (err)
1641                 return (err);
1642         err = dnode_set_blksz(dn, size, ibs, tx);
1643         dnode_rele(dn, FTAG);
1644         return (err);
1645 }
1646 
1647 void
1648 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
1649         dmu_tx_t *tx)
1650 {
1651         dnode_t *dn;
1652 
1653         /*
1654          * Send streams include each object's checksum function.  This
1655          * check ensures that the receiving system can understand the
1656          * checksum function transmitted.
1657          */
1658         ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
1659 
1660         VERIFY0(dnode_hold(os, object, FTAG, &dn));
1661         ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
1662         dn->dn_checksum = checksum;
1663         dnode_setdirty(dn, tx);
1664         dnode_rele(dn, FTAG);
1665 }
1666 
1667 void
1668 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
1669         dmu_tx_t *tx)
1670 {
1671         dnode_t *dn;
1672 
1673         /*
1674          * Send streams include each object's compression function.  This
1675          * check ensures that the receiving system can understand the
1676          * compression function transmitted.
1677          */
1678         ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
1679 
1680         VERIFY0(dnode_hold(os, object, FTAG, &dn));
1681         dn->dn_compress = compress;
1682         dnode_setdirty(dn, tx);
1683         dnode_rele(dn, FTAG);
1684 }
1685 
1686 int zfs_mdcomp_disable = 0;
1687 
1688 /*
1689  * When the "redundant_metadata" property is set to "most", only indirect
1690  * blocks of this level and higher will have an additional ditto block.
1691  */
1692 int zfs_redundant_metadata_most_ditto_level = 2;
1693 
1694 void
1695 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
1696 {
1697         dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
1698         boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
1699             (wp & WP_SPILL));
1700         enum zio_checksum checksum = os->os_checksum;
1701         enum zio_compress compress = os->os_compress;
1702         enum zio_checksum dedup_checksum = os->os_dedup_checksum;
1703         boolean_t dedup = B_FALSE;
1704         boolean_t nopwrite = B_FALSE;
1705         boolean_t dedup_verify = os->os_dedup_verify;
1706         int copies = os->os_copies;
1707 
1708         /*
1709          * We maintain different write policies for each of the following
1710          * types of data:
1711          *       1. metadata
1712          *       2. preallocated blocks (i.e. level-0 blocks of a dump device)
1713          *       3. all other level 0 blocks
1714          */
1715         if (ismd) {
1716                 /*
1717                  * XXX -- we should design a compression algorithm
1718                  * that specializes in arrays of bps.
1719                  */
1720                 boolean_t lz4_ac = spa_feature_is_active(os->os_spa,
1721                     SPA_FEATURE_LZ4_COMPRESS);
1722 
1723                 if (zfs_mdcomp_disable) {
1724                         compress = ZIO_COMPRESS_EMPTY;
1725                 } else if (lz4_ac) {
1726                         compress = ZIO_COMPRESS_LZ4;
1727                 } else {
1728                         compress = ZIO_COMPRESS_LZJB;
1729                 }
1730 
1731                 /*
1732                  * Metadata always gets checksummed.  If the data
1733                  * checksum is multi-bit correctable, and it's not a
1734                  * ZBT-style checksum, then it's suitable for metadata
1735                  * as well.  Otherwise, the metadata checksum defaults
1736                  * to fletcher4.
1737                  */
1738                 if (zio_checksum_table[checksum].ci_correctable < 1 ||
1739                     zio_checksum_table[checksum].ci_eck)
1740                         checksum = ZIO_CHECKSUM_FLETCHER_4;
1741 
1742                 if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
1743                     (os->os_redundant_metadata ==
1744                     ZFS_REDUNDANT_METADATA_MOST &&
1745                     (level >= zfs_redundant_metadata_most_ditto_level ||
1746                     DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
1747                         copies++;
1748         } else if (wp & WP_NOFILL) {
1749                 ASSERT(level == 0);
1750 
1751                 /*
1752                  * If we're writing preallocated blocks, we aren't actually
1753                  * writing them so don't set any policy properties.  These
1754                  * blocks are currently only used by an external subsystem
1755                  * outside of zfs (i.e. dump) and not written by the zio
1756                  * pipeline.
1757                  */
1758                 compress = ZIO_COMPRESS_OFF;
1759                 checksum = ZIO_CHECKSUM_NOPARITY;
1760         } else {
1761                 compress = zio_compress_select(dn->dn_compress, compress);
1762 
1763                 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
1764                     zio_checksum_select(dn->dn_checksum, checksum) :
1765                     dedup_checksum;
1766 
1767                 /*
1768                  * Determine dedup setting.  If we are in dmu_sync(),
1769                  * we won't actually dedup now because that's all
1770                  * done in syncing context; but we do want to use the
1771                  * dedup checkum.  If the checksum is not strong
1772                  * enough to ensure unique signatures, force
1773                  * dedup_verify.
1774                  */
1775                 if (dedup_checksum != ZIO_CHECKSUM_OFF) {
1776                         dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
1777                         if (!zio_checksum_table[checksum].ci_dedup)
1778                                 dedup_verify = B_TRUE;
1779                 }
1780 
1781                 /*
1782                  * Enable nopwrite if we have a cryptographically secure
1783                  * checksum that has no known collisions (i.e. SHA-256)
1784                  * and compression is enabled.  We don't enable nopwrite if
1785                  * dedup is enabled as the two features are mutually exclusive.
1786                  */
1787                 nopwrite = (!dedup && zio_checksum_table[checksum].ci_dedup &&
1788                     compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
1789         }
1790 
1791         zp->zp_checksum = checksum;
1792         zp->zp_compress = compress;
1793         zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
1794         zp->zp_level = level;
1795         zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
1796         zp->zp_dedup = dedup;
1797         zp->zp_dedup_verify = dedup && dedup_verify;
1798         zp->zp_nopwrite = nopwrite;

1799 }
1800 
1801 int
1802 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
1803 {
1804         dnode_t *dn;
1805         int i, err;
1806 
1807         err = dnode_hold(os, object, FTAG, &dn);
1808         if (err)
1809                 return (err);
1810         /*
1811          * Sync any current changes before
1812          * we go trundling through the block pointers.
1813          */
1814         for (i = 0; i < TXG_SIZE; i++) {
1815                 if (list_link_active(&dn->dn_dirty_link[i]))
1816                         break;
1817         }
1818         if (i != TXG_SIZE) {
1819                 dnode_rele(dn, FTAG);
1820                 txg_wait_synced(dmu_objset_pool(os), 0);
1821                 err = dnode_hold(os, object, FTAG, &dn);
1822                 if (err)
1823                         return (err);
1824         }
1825 
1826         err = dnode_next_offset(dn, (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
1827         dnode_rele(dn, FTAG);
1828 
1829         return (err);
1830 }
1831 
1832 void
1833 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
1834 {
1835         dnode_phys_t *dnp;
1836 
1837         rw_enter(&dn->dn_struct_rwlock, RW_READER);
1838         mutex_enter(&dn->dn_mtx);
1839 
1840         dnp = dn->dn_phys;
1841 
1842         doi->doi_data_block_size = dn->dn_datablksz;
1843         doi->doi_metadata_block_size = dn->dn_indblkshift ?
1844             1ULL << dn->dn_indblkshift : 0;
1845         doi->doi_type = dn->dn_type;
1846         doi->doi_bonus_type = dn->dn_bonustype;
1847         doi->doi_bonus_size = dn->dn_bonuslen;
1848         doi->doi_indirection = dn->dn_nlevels;
1849         doi->doi_checksum = dn->dn_checksum;
1850         doi->doi_compress = dn->dn_compress;
1851         doi->doi_nblkptr = dn->dn_nblkptr;
1852         doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
1853         doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
1854         doi->doi_fill_count = 0;
1855         for (int i = 0; i < dnp->dn_nblkptr; i++)
1856                 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
1857 
1858         mutex_exit(&dn->dn_mtx);
1859         rw_exit(&dn->dn_struct_rwlock);
1860 }
1861 
1862 /*
1863  * Get information on a DMU object.
1864  * If doi is NULL, just indicates whether the object exists.
1865  */
1866 int
1867 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
1868 {
1869         dnode_t *dn;
1870         int err = dnode_hold(os, object, FTAG, &dn);
1871 
1872         if (err)
1873                 return (err);
1874 
1875         if (doi != NULL)
1876                 dmu_object_info_from_dnode(dn, doi);
1877 
1878         dnode_rele(dn, FTAG);
1879         return (0);
1880 }
1881 
1882 /*
1883  * As above, but faster; can be used when you have a held dbuf in hand.
1884  */
1885 void
1886 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
1887 {
1888         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1889 
1890         DB_DNODE_ENTER(db);
1891         dmu_object_info_from_dnode(DB_DNODE(db), doi);
1892         DB_DNODE_EXIT(db);
1893 }
1894 
1895 /*
1896  * Faster still when you only care about the size.
1897  * This is specifically optimized for zfs_getattr().
1898  */
1899 void
1900 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
1901     u_longlong_t *nblk512)
1902 {
1903         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1904         dnode_t *dn;
1905 
1906         DB_DNODE_ENTER(db);
1907         dn = DB_DNODE(db);
1908 
1909         *blksize = dn->dn_datablksz;
1910         /* add 1 for dnode space */
1911         *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
1912             SPA_MINBLOCKSHIFT) + 1;
1913         DB_DNODE_EXIT(db);
1914 }
1915 
1916 void
1917 byteswap_uint64_array(void *vbuf, size_t size)
1918 {
1919         uint64_t *buf = vbuf;
1920         size_t count = size >> 3;
1921         int i;
1922 
1923         ASSERT((size & 7) == 0);
1924 
1925         for (i = 0; i < count; i++)
1926                 buf[i] = BSWAP_64(buf[i]);
1927 }
1928 
1929 void
1930 byteswap_uint32_array(void *vbuf, size_t size)
1931 {
1932         uint32_t *buf = vbuf;
1933         size_t count = size >> 2;
1934         int i;
1935 
1936         ASSERT((size & 3) == 0);
1937 
1938         for (i = 0; i < count; i++)
1939                 buf[i] = BSWAP_32(buf[i]);
1940 }
1941 
1942 void
1943 byteswap_uint16_array(void *vbuf, size_t size)
1944 {
1945         uint16_t *buf = vbuf;
1946         size_t count = size >> 1;
1947         int i;
1948 
1949         ASSERT((size & 1) == 0);
1950 
1951         for (i = 0; i < count; i++)
1952                 buf[i] = BSWAP_16(buf[i]);
1953 }
1954 
1955 /* ARGSUSED */
1956 void
1957 byteswap_uint8_array(void *vbuf, size_t size)
1958 {
1959 }
1960 
1961 void
1962 dmu_init(void)
1963 {
1964         zfs_dbgmsg_init();
1965         sa_cache_init();
1966         xuio_stat_init();
1967         dmu_objset_init();
1968         dnode_init();
1969         dbuf_init();
1970         zfetch_init();
1971         l2arc_init();
1972         arc_init();
1973 }
1974 
1975 void
1976 dmu_fini(void)
1977 {
1978         arc_fini(); /* arc depends on l2arc, so arc must go first */
1979         l2arc_fini();
1980         zfetch_fini();
1981         dbuf_fini();
1982         dnode_fini();
1983         dmu_objset_fini();
1984         xuio_stat_fini();
1985         sa_cache_fini();
1986         zfs_dbgmsg_fini();
1987 }
--- EOF ---