1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  24  * Copyright (c) 2013 by Delphix. All rights reserved.
  25  */
  26 
  27 #include <sys/zfs_context.h>
  28 #include <sys/dmu.h>
  29 #include <sys/dmu_impl.h>
  30 #include <sys/dbuf.h>
  31 #include <sys/dmu_objset.h>
  32 #include <sys/dsl_dataset.h>
  33 #include <sys/dsl_dir.h>
  34 #include <sys/dmu_tx.h>
  35 #include <sys/spa.h>
  36 #include <sys/zio.h>
  37 #include <sys/dmu_zfetch.h>
  38 #include <sys/sa.h>
  39 #include <sys/sa_impl.h>
  40 
  41 static void dbuf_destroy(dmu_buf_impl_t *db);
  42 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
  43 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
  44 
  45 /*
  46  * Global data structures and functions for the dbuf cache.
  47  */
  48 static kmem_cache_t *dbuf_cache;
  49 
  50 /* ARGSUSED */
  51 static int
  52 dbuf_cons(void *vdb, void *unused, int kmflag)
  53 {
  54         dmu_buf_impl_t *db = vdb;
  55         bzero(db, sizeof (dmu_buf_impl_t));
  56 
  57         mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
  58         cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
  59         refcount_create(&db->db_holds);
  60         return (0);
  61 }
  62 
  63 /* ARGSUSED */
  64 static void
  65 dbuf_dest(void *vdb, void *unused)
  66 {
  67         dmu_buf_impl_t *db = vdb;
  68         mutex_destroy(&db->db_mtx);
  69         cv_destroy(&db->db_changed);
  70         refcount_destroy(&db->db_holds);
  71 }
  72 
  73 /*
  74  * dbuf hash table routines
  75  */
  76 static dbuf_hash_table_t dbuf_hash_table;
  77 
  78 static uint64_t dbuf_hash_count;
  79 
  80 static uint64_t
  81 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
  82 {
  83         uintptr_t osv = (uintptr_t)os;
  84         uint64_t crc = -1ULL;
  85 
  86         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
  87         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
  88         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
  89         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
  90         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
  91         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
  92         crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
  93 
  94         crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
  95 
  96         return (crc);
  97 }
  98 
  99 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
 100 
 101 #define DBUF_EQUAL(dbuf, os, obj, level, blkid)         \
 102         ((dbuf)->db.db_object == (obj) &&            \
 103         (dbuf)->db_objset == (os) &&                 \
 104         (dbuf)->db_level == (level) &&                       \
 105         (dbuf)->db_blkid == (blkid))
 106 
 107 dmu_buf_impl_t *
 108 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
 109 {
 110         dbuf_hash_table_t *h = &dbuf_hash_table;
 111         objset_t *os = dn->dn_objset;
 112         uint64_t obj = dn->dn_object;
 113         uint64_t hv = DBUF_HASH(os, obj, level, blkid);
 114         uint64_t idx = hv & h->hash_table_mask;
 115         dmu_buf_impl_t *db;
 116 
 117         mutex_enter(DBUF_HASH_MUTEX(h, idx));
 118         for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
 119                 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
 120                         mutex_enter(&db->db_mtx);
 121                         if (db->db_state != DB_EVICTING) {
 122                                 mutex_exit(DBUF_HASH_MUTEX(h, idx));
 123                                 return (db);
 124                         }
 125                         mutex_exit(&db->db_mtx);
 126                 }
 127         }
 128         mutex_exit(DBUF_HASH_MUTEX(h, idx));
 129         return (NULL);
 130 }
 131 
 132 /*
 133  * Insert an entry into the hash table.  If there is already an element
 134  * equal to elem in the hash table, then the already existing element
 135  * will be returned and the new element will not be inserted.
 136  * Otherwise returns NULL.
 137  */
 138 static dmu_buf_impl_t *
 139 dbuf_hash_insert(dmu_buf_impl_t *db)
 140 {
 141         dbuf_hash_table_t *h = &dbuf_hash_table;
 142         objset_t *os = db->db_objset;
 143         uint64_t obj = db->db.db_object;
 144         int level = db->db_level;
 145         uint64_t blkid = db->db_blkid;
 146         uint64_t hv = DBUF_HASH(os, obj, level, blkid);
 147         uint64_t idx = hv & h->hash_table_mask;
 148         dmu_buf_impl_t *dbf;
 149 
 150         mutex_enter(DBUF_HASH_MUTEX(h, idx));
 151         for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
 152                 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
 153                         mutex_enter(&dbf->db_mtx);
 154                         if (dbf->db_state != DB_EVICTING) {
 155                                 mutex_exit(DBUF_HASH_MUTEX(h, idx));
 156                                 return (dbf);
 157                         }
 158                         mutex_exit(&dbf->db_mtx);
 159                 }
 160         }
 161 
 162         mutex_enter(&db->db_mtx);
 163         db->db_hash_next = h->hash_table[idx];
 164         h->hash_table[idx] = db;
 165         mutex_exit(DBUF_HASH_MUTEX(h, idx));
 166         atomic_add_64(&dbuf_hash_count, 1);
 167 
 168         return (NULL);
 169 }
 170 
 171 /*
 172  * Remove an entry from the hash table.  This operation will
 173  * fail if there are any existing holds on the db.
 174  */
 175 static void
 176 dbuf_hash_remove(dmu_buf_impl_t *db)
 177 {
 178         dbuf_hash_table_t *h = &dbuf_hash_table;
 179         uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
 180             db->db_level, db->db_blkid);
 181         uint64_t idx = hv & h->hash_table_mask;
 182         dmu_buf_impl_t *dbf, **dbp;
 183 
 184         /*
 185          * We musn't hold db_mtx to maintin lock ordering:
 186          * DBUF_HASH_MUTEX > db_mtx.
 187          */
 188         ASSERT(refcount_is_zero(&db->db_holds));
 189         ASSERT(db->db_state == DB_EVICTING);
 190         ASSERT(!MUTEX_HELD(&db->db_mtx));
 191 
 192         mutex_enter(DBUF_HASH_MUTEX(h, idx));
 193         dbp = &h->hash_table[idx];
 194         while ((dbf = *dbp) != db) {
 195                 dbp = &dbf->db_hash_next;
 196                 ASSERT(dbf != NULL);
 197         }
 198         *dbp = db->db_hash_next;
 199         db->db_hash_next = NULL;
 200         mutex_exit(DBUF_HASH_MUTEX(h, idx));
 201         atomic_add_64(&dbuf_hash_count, -1);
 202 }
 203 
 204 static arc_evict_func_t dbuf_do_evict;
 205 
 206 static void
 207 dbuf_evict_user(dmu_buf_impl_t *db)
 208 {
 209         ASSERT(MUTEX_HELD(&db->db_mtx));
 210 
 211         if (db->db_level != 0 || db->db_evict_func == NULL)
 212                 return;
 213 
 214         if (db->db_user_data_ptr_ptr)
 215                 *db->db_user_data_ptr_ptr = db->db.db_data;
 216         db->db_evict_func(&db->db, db->db_user_ptr);
 217         db->db_user_ptr = NULL;
 218         db->db_user_data_ptr_ptr = NULL;
 219         db->db_evict_func = NULL;
 220 }
 221 
 222 boolean_t
 223 dbuf_is_metadata(dmu_buf_impl_t *db)
 224 {
 225         if (db->db_level > 0) {
 226                 return (B_TRUE);
 227         } else {
 228                 boolean_t is_metadata;
 229 
 230                 DB_DNODE_ENTER(db);
 231                 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
 232                 DB_DNODE_EXIT(db);
 233 
 234                 return (is_metadata);
 235         }
 236 }
 237 
 238 void
 239 dbuf_evict(dmu_buf_impl_t *db)
 240 {
 241         ASSERT(MUTEX_HELD(&db->db_mtx));
 242         ASSERT(db->db_buf == NULL);
 243         ASSERT(db->db_data_pending == NULL);
 244 
 245         dbuf_clear(db);
 246         dbuf_destroy(db);
 247 }
 248 
 249 void
 250 dbuf_init(void)
 251 {
 252         uint64_t hsize = 1ULL << 16;
 253         dbuf_hash_table_t *h = &dbuf_hash_table;
 254         int i;
 255 
 256         /*
 257          * The hash table is big enough to fill all of physical memory
 258          * with an average 4K block size.  The table will take up
 259          * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
 260          */
 261         while (hsize * 4096 < physmem * PAGESIZE)
 262                 hsize <<= 1;
 263 
 264 retry:
 265         h->hash_table_mask = hsize - 1;
 266         h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
 267         if (h->hash_table == NULL) {
 268                 /* XXX - we should really return an error instead of assert */
 269                 ASSERT(hsize > (1ULL << 10));
 270                 hsize >>= 1;
 271                 goto retry;
 272         }
 273 
 274         dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
 275             sizeof (dmu_buf_impl_t),
 276             0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
 277 
 278         for (i = 0; i < DBUF_MUTEXES; i++)
 279                 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
 280 }
 281 
 282 void
 283 dbuf_fini(void)
 284 {
 285         dbuf_hash_table_t *h = &dbuf_hash_table;
 286         int i;
 287 
 288         for (i = 0; i < DBUF_MUTEXES; i++)
 289                 mutex_destroy(&h->hash_mutexes[i]);
 290         kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
 291         kmem_cache_destroy(dbuf_cache);
 292 }
 293 
 294 /*
 295  * Other stuff.
 296  */
 297 
 298 #ifdef ZFS_DEBUG
 299 static void
 300 dbuf_verify(dmu_buf_impl_t *db)
 301 {
 302         dnode_t *dn;
 303         dbuf_dirty_record_t *dr;
 304 
 305         ASSERT(MUTEX_HELD(&db->db_mtx));
 306 
 307         if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
 308                 return;
 309 
 310         ASSERT(db->db_objset != NULL);
 311         DB_DNODE_ENTER(db);
 312         dn = DB_DNODE(db);
 313         if (dn == NULL) {
 314                 ASSERT(db->db_parent == NULL);
 315                 ASSERT(db->db_blkptr == NULL);
 316         } else {
 317                 ASSERT3U(db->db.db_object, ==, dn->dn_object);
 318                 ASSERT3P(db->db_objset, ==, dn->dn_objset);
 319                 ASSERT3U(db->db_level, <, dn->dn_nlevels);
 320                 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
 321                     db->db_blkid == DMU_SPILL_BLKID ||
 322                     !list_is_empty(&dn->dn_dbufs));
 323         }
 324         if (db->db_blkid == DMU_BONUS_BLKID) {
 325                 ASSERT(dn != NULL);
 326                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
 327                 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
 328         } else if (db->db_blkid == DMU_SPILL_BLKID) {
 329                 ASSERT(dn != NULL);
 330                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
 331                 ASSERT0(db->db.db_offset);
 332         } else {
 333                 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
 334         }
 335 
 336         for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
 337                 ASSERT(dr->dr_dbuf == db);
 338 
 339         for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
 340                 ASSERT(dr->dr_dbuf == db);
 341 
 342         /*
 343          * We can't assert that db_size matches dn_datablksz because it
 344          * can be momentarily different when another thread is doing
 345          * dnode_set_blksz().
 346          */
 347         if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
 348                 dr = db->db_data_pending;
 349                 /*
 350                  * It should only be modified in syncing context, so
 351                  * make sure we only have one copy of the data.
 352                  */
 353                 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
 354         }
 355 
 356         /* verify db->db_blkptr */
 357         if (db->db_blkptr) {
 358                 if (db->db_parent == dn->dn_dbuf) {
 359                         /* db is pointed to by the dnode */
 360                         /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
 361                         if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
 362                                 ASSERT(db->db_parent == NULL);
 363                         else
 364                                 ASSERT(db->db_parent != NULL);
 365                         if (db->db_blkid != DMU_SPILL_BLKID)
 366                                 ASSERT3P(db->db_blkptr, ==,
 367                                     &dn->dn_phys->dn_blkptr[db->db_blkid]);
 368                 } else {
 369                         /* db is pointed to by an indirect block */
 370                         int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
 371                         ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
 372                         ASSERT3U(db->db_parent->db.db_object, ==,
 373                             db->db.db_object);
 374                         /*
 375                          * dnode_grow_indblksz() can make this fail if we don't
 376                          * have the struct_rwlock.  XXX indblksz no longer
 377                          * grows.  safe to do this now?
 378                          */
 379                         if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
 380                                 ASSERT3P(db->db_blkptr, ==,
 381                                     ((blkptr_t *)db->db_parent->db.db_data +
 382                                     db->db_blkid % epb));
 383                         }
 384                 }
 385         }
 386         if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
 387             (db->db_buf == NULL || db->db_buf->b_data) &&
 388             db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
 389             db->db_state != DB_FILL && !dn->dn_free_txg) {
 390                 /*
 391                  * If the blkptr isn't set but they have nonzero data,
 392                  * it had better be dirty, otherwise we'll lose that
 393                  * data when we evict this buffer.
 394                  */
 395                 if (db->db_dirtycnt == 0) {
 396                         uint64_t *buf = db->db.db_data;
 397                         int i;
 398 
 399                         for (i = 0; i < db->db.db_size >> 3; i++) {
 400                                 ASSERT(buf[i] == 0);
 401                         }
 402                 }
 403         }
 404         DB_DNODE_EXIT(db);
 405 }
 406 #endif
 407 
 408 static void
 409 dbuf_update_data(dmu_buf_impl_t *db)
 410 {
 411         ASSERT(MUTEX_HELD(&db->db_mtx));
 412         if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
 413                 ASSERT(!refcount_is_zero(&db->db_holds));
 414                 *db->db_user_data_ptr_ptr = db->db.db_data;
 415         }
 416 }
 417 
 418 static void
 419 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
 420 {
 421         ASSERT(MUTEX_HELD(&db->db_mtx));
 422         ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
 423         db->db_buf = buf;
 424         if (buf != NULL) {
 425                 ASSERT(buf->b_data != NULL);
 426                 db->db.db_data = buf->b_data;
 427                 if (!arc_released(buf))
 428                         arc_set_callback(buf, dbuf_do_evict, db);
 429                 dbuf_update_data(db);
 430         } else {
 431                 dbuf_evict_user(db);
 432                 db->db.db_data = NULL;
 433                 if (db->db_state != DB_NOFILL)
 434                         db->db_state = DB_UNCACHED;
 435         }
 436 }
 437 
 438 /*
 439  * Loan out an arc_buf for read.  Return the loaned arc_buf.
 440  */
 441 arc_buf_t *
 442 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
 443 {
 444         arc_buf_t *abuf;
 445 
 446         mutex_enter(&db->db_mtx);
 447         if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
 448                 int blksz = db->db.db_size;
 449                 spa_t *spa;
 450 
 451                 mutex_exit(&db->db_mtx);
 452                 DB_GET_SPA(&spa, db);
 453                 abuf = arc_loan_buf(spa, blksz);
 454                 bcopy(db->db.db_data, abuf->b_data, blksz);
 455         } else {
 456                 abuf = db->db_buf;
 457                 arc_loan_inuse_buf(abuf, db);
 458                 dbuf_set_data(db, NULL);
 459                 mutex_exit(&db->db_mtx);
 460         }
 461         return (abuf);
 462 }
 463 
 464 uint64_t
 465 dbuf_whichblock(dnode_t *dn, uint64_t offset)
 466 {
 467         if (dn->dn_datablkshift) {
 468                 return (offset >> dn->dn_datablkshift);
 469         } else {
 470                 ASSERT3U(offset, <, dn->dn_datablksz);
 471                 return (0);
 472         }
 473 }
 474 
 475 static void
 476 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
 477 {
 478         dmu_buf_impl_t *db = vdb;
 479 
 480         mutex_enter(&db->db_mtx);
 481         ASSERT3U(db->db_state, ==, DB_READ);
 482         /*
 483          * All reads are synchronous, so we must have a hold on the dbuf
 484          */
 485         ASSERT(refcount_count(&db->db_holds) > 0);
 486         ASSERT(db->db_buf == NULL);
 487         ASSERT(db->db.db_data == NULL);
 488         if (db->db_level == 0 && db->db_freed_in_flight) {
 489                 /* we were freed in flight; disregard any error */
 490                 arc_release(buf, db);
 491                 bzero(buf->b_data, db->db.db_size);
 492                 arc_buf_freeze(buf);
 493                 db->db_freed_in_flight = FALSE;
 494                 dbuf_set_data(db, buf);
 495                 db->db_state = DB_CACHED;
 496         } else if (zio == NULL || zio->io_error == 0) {
 497                 dbuf_set_data(db, buf);
 498                 db->db_state = DB_CACHED;
 499         } else {
 500                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
 501                 ASSERT3P(db->db_buf, ==, NULL);
 502                 VERIFY(arc_buf_remove_ref(buf, db));
 503                 db->db_state = DB_UNCACHED;
 504         }
 505         cv_broadcast(&db->db_changed);
 506         dbuf_rele_and_unlock(db, NULL);
 507 }
 508 
 509 static void
 510 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
 511 {
 512         dnode_t *dn;
 513         spa_t *spa;
 514         zbookmark_t zb;
 515         uint32_t aflags = ARC_NOWAIT;
 516 
 517         DB_DNODE_ENTER(db);
 518         dn = DB_DNODE(db);
 519         ASSERT(!refcount_is_zero(&db->db_holds));
 520         /* We need the struct_rwlock to prevent db_blkptr from changing. */
 521         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
 522         ASSERT(MUTEX_HELD(&db->db_mtx));
 523         ASSERT(db->db_state == DB_UNCACHED);
 524         ASSERT(db->db_buf == NULL);
 525 
 526         if (db->db_blkid == DMU_BONUS_BLKID) {
 527                 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
 528 
 529                 ASSERT3U(bonuslen, <=, db->db.db_size);
 530                 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
 531                 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
 532                 if (bonuslen < DN_MAX_BONUSLEN)
 533                         bzero(db->db.db_data, DN_MAX_BONUSLEN);
 534                 if (bonuslen)
 535                         bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
 536                 DB_DNODE_EXIT(db);
 537                 dbuf_update_data(db);
 538                 db->db_state = DB_CACHED;
 539                 mutex_exit(&db->db_mtx);
 540                 return;
 541         }
 542 
 543         /*
 544          * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
 545          * processes the delete record and clears the bp while we are waiting
 546          * for the dn_mtx (resulting in a "no" from block_freed).
 547          */
 548         if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
 549             (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
 550             BP_IS_HOLE(db->db_blkptr)))) {
 551                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
 552 
 553                 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
 554                     db->db.db_size, db, type));
 555                 DB_DNODE_EXIT(db);
 556                 bzero(db->db.db_data, db->db.db_size);
 557                 db->db_state = DB_CACHED;
 558                 *flags |= DB_RF_CACHED;
 559                 mutex_exit(&db->db_mtx);
 560                 return;
 561         }
 562 
 563         spa = dn->dn_objset->os_spa;
 564         DB_DNODE_EXIT(db);
 565 
 566         db->db_state = DB_READ;
 567         mutex_exit(&db->db_mtx);
 568 
 569         if (DBUF_IS_L2CACHEABLE(db))
 570                 aflags |= ARC_L2CACHE;
 571 
 572         SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
 573             db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
 574             db->db.db_object, db->db_level, db->db_blkid);
 575 
 576         dbuf_add_ref(db, NULL);
 577 
 578         (void) arc_read(zio, spa, db->db_blkptr,
 579             dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
 580             (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
 581             &aflags, &zb);
 582         if (aflags & ARC_CACHED)
 583                 *flags |= DB_RF_CACHED;
 584 }
 585 
 586 int
 587 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
 588 {
 589         int err = 0;
 590         int havepzio = (zio != NULL);
 591         int prefetch;
 592         dnode_t *dn;
 593 
 594         /*
 595          * We don't have to hold the mutex to check db_state because it
 596          * can't be freed while we have a hold on the buffer.
 597          */
 598         ASSERT(!refcount_is_zero(&db->db_holds));
 599 
 600         if (db->db_state == DB_NOFILL)
 601                 return (SET_ERROR(EIO));
 602 
 603         DB_DNODE_ENTER(db);
 604         dn = DB_DNODE(db);
 605         if ((flags & DB_RF_HAVESTRUCT) == 0)
 606                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 607 
 608         prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
 609             (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
 610             DBUF_IS_CACHEABLE(db);
 611 
 612         mutex_enter(&db->db_mtx);
 613         if (db->db_state == DB_CACHED) {
 614                 mutex_exit(&db->db_mtx);
 615                 if (prefetch)
 616                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
 617                             db->db.db_size, TRUE);
 618                 if ((flags & DB_RF_HAVESTRUCT) == 0)
 619                         rw_exit(&dn->dn_struct_rwlock);
 620                 DB_DNODE_EXIT(db);
 621         } else if (db->db_state == DB_UNCACHED) {
 622                 spa_t *spa = dn->dn_objset->os_spa;
 623 
 624                 if (zio == NULL)
 625                         zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
 626                 dbuf_read_impl(db, zio, &flags);
 627 
 628                 /* dbuf_read_impl has dropped db_mtx for us */
 629 
 630                 if (prefetch)
 631                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
 632                             db->db.db_size, flags & DB_RF_CACHED);
 633 
 634                 if ((flags & DB_RF_HAVESTRUCT) == 0)
 635                         rw_exit(&dn->dn_struct_rwlock);
 636                 DB_DNODE_EXIT(db);
 637 
 638                 if (!havepzio)
 639                         err = zio_wait(zio);
 640         } else {
 641                 /*
 642                  * Another reader came in while the dbuf was in flight
 643                  * between UNCACHED and CACHED.  Either a writer will finish
 644                  * writing the buffer (sending the dbuf to CACHED) or the
 645                  * first reader's request will reach the read_done callback
 646                  * and send the dbuf to CACHED.  Otherwise, a failure
 647                  * occurred and the dbuf went to UNCACHED.
 648                  */
 649                 mutex_exit(&db->db_mtx);
 650                 if (prefetch)
 651                         dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
 652                             db->db.db_size, TRUE);
 653                 if ((flags & DB_RF_HAVESTRUCT) == 0)
 654                         rw_exit(&dn->dn_struct_rwlock);
 655                 DB_DNODE_EXIT(db);
 656 
 657                 /* Skip the wait per the caller's request. */
 658                 mutex_enter(&db->db_mtx);
 659                 if ((flags & DB_RF_NEVERWAIT) == 0) {
 660                         while (db->db_state == DB_READ ||
 661                             db->db_state == DB_FILL) {
 662                                 ASSERT(db->db_state == DB_READ ||
 663                                     (flags & DB_RF_HAVESTRUCT) == 0);
 664                                 cv_wait(&db->db_changed, &db->db_mtx);
 665                         }
 666                         if (db->db_state == DB_UNCACHED)
 667                                 err = SET_ERROR(EIO);
 668                 }
 669                 mutex_exit(&db->db_mtx);
 670         }
 671 
 672         ASSERT(err || havepzio || db->db_state == DB_CACHED);
 673         return (err);
 674 }
 675 
 676 static void
 677 dbuf_noread(dmu_buf_impl_t *db)
 678 {
 679         ASSERT(!refcount_is_zero(&db->db_holds));
 680         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
 681         mutex_enter(&db->db_mtx);
 682         while (db->db_state == DB_READ || db->db_state == DB_FILL)
 683                 cv_wait(&db->db_changed, &db->db_mtx);
 684         if (db->db_state == DB_UNCACHED) {
 685                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
 686                 spa_t *spa;
 687 
 688                 ASSERT(db->db_buf == NULL);
 689                 ASSERT(db->db.db_data == NULL);
 690                 DB_GET_SPA(&spa, db);
 691                 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
 692                 db->db_state = DB_FILL;
 693         } else if (db->db_state == DB_NOFILL) {
 694                 dbuf_set_data(db, NULL);
 695         } else {
 696                 ASSERT3U(db->db_state, ==, DB_CACHED);
 697         }
 698         mutex_exit(&db->db_mtx);
 699 }
 700 
 701 /*
 702  * This is our just-in-time copy function.  It makes a copy of
 703  * buffers, that have been modified in a previous transaction
 704  * group, before we modify them in the current active group.
 705  *
 706  * This function is used in two places: when we are dirtying a
 707  * buffer for the first time in a txg, and when we are freeing
 708  * a range in a dnode that includes this buffer.
 709  *
 710  * Note that when we are called from dbuf_free_range() we do
 711  * not put a hold on the buffer, we just traverse the active
 712  * dbuf list for the dnode.
 713  */
 714 static void
 715 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
 716 {
 717         dbuf_dirty_record_t *dr = db->db_last_dirty;
 718 
 719         ASSERT(MUTEX_HELD(&db->db_mtx));
 720         ASSERT(db->db.db_data != NULL);
 721         ASSERT(db->db_level == 0);
 722         ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
 723 
 724         if (dr == NULL ||
 725             (dr->dt.dl.dr_data !=
 726             ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
 727                 return;
 728 
 729         /*
 730          * If the last dirty record for this dbuf has not yet synced
 731          * and its referencing the dbuf data, either:
 732          *      reset the reference to point to a new copy,
 733          * or (if there a no active holders)
 734          *      just null out the current db_data pointer.
 735          */
 736         ASSERT(dr->dr_txg >= txg - 2);
 737         if (db->db_blkid == DMU_BONUS_BLKID) {
 738                 /* Note that the data bufs here are zio_bufs */
 739                 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
 740                 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
 741                 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
 742         } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
 743                 int size = db->db.db_size;
 744                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
 745                 spa_t *spa;
 746 
 747                 DB_GET_SPA(&spa, db);
 748                 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
 749                 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
 750         } else {
 751                 dbuf_set_data(db, NULL);
 752         }
 753 }
 754 
 755 void
 756 dbuf_unoverride(dbuf_dirty_record_t *dr)
 757 {
 758         dmu_buf_impl_t *db = dr->dr_dbuf;
 759         blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
 760         uint64_t txg = dr->dr_txg;
 761 
 762         ASSERT(MUTEX_HELD(&db->db_mtx));
 763         ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
 764         ASSERT(db->db_level == 0);
 765 
 766         if (db->db_blkid == DMU_BONUS_BLKID ||
 767             dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
 768                 return;
 769 
 770         ASSERT(db->db_data_pending != dr);
 771 
 772         /* free this block */
 773         if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) {
 774                 spa_t *spa;
 775 
 776                 DB_GET_SPA(&spa, db);
 777                 zio_free(spa, txg, bp);
 778         }
 779         dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
 780         dr->dt.dl.dr_nopwrite = B_FALSE;
 781 
 782         /*
 783          * Release the already-written buffer, so we leave it in
 784          * a consistent dirty state.  Note that all callers are
 785          * modifying the buffer, so they will immediately do
 786          * another (redundant) arc_release().  Therefore, leave
 787          * the buf thawed to save the effort of freezing &
 788          * immediately re-thawing it.
 789          */
 790         arc_release(dr->dt.dl.dr_data, db);
 791 }
 792 
 793 /*
 794  * Evict (if its unreferenced) or clear (if its referenced) any level-0
 795  * data blocks in the free range, so that any future readers will find
 796  * empty blocks.  Also, if we happen accross any level-1 dbufs in the
 797  * range that have not already been marked dirty, mark them dirty so
 798  * they stay in memory.
 799  */
 800 void
 801 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
 802 {
 803         dmu_buf_impl_t *db, *db_next;
 804         uint64_t txg = tx->tx_txg;
 805         int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
 806         uint64_t first_l1 = start >> epbs;
 807         uint64_t last_l1 = end >> epbs;
 808 
 809         if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
 810                 end = dn->dn_maxblkid;
 811                 last_l1 = end >> epbs;
 812         }
 813         dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
 814         mutex_enter(&dn->dn_dbufs_mtx);
 815         for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
 816                 db_next = list_next(&dn->dn_dbufs, db);
 817                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
 818 
 819                 if (db->db_level == 1 &&
 820                     db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
 821                         mutex_enter(&db->db_mtx);
 822                         if (db->db_last_dirty &&
 823                             db->db_last_dirty->dr_txg < txg) {
 824                                 dbuf_add_ref(db, FTAG);
 825                                 mutex_exit(&db->db_mtx);
 826                                 dbuf_will_dirty(db, tx);
 827                                 dbuf_rele(db, FTAG);
 828                         } else {
 829                                 mutex_exit(&db->db_mtx);
 830                         }
 831                 }
 832 
 833                 if (db->db_level != 0)
 834                         continue;
 835                 dprintf_dbuf(db, "found buf %s\n", "");
 836                 if (db->db_blkid < start || db->db_blkid > end)
 837                         continue;
 838 
 839                 /* found a level 0 buffer in the range */
 840                 mutex_enter(&db->db_mtx);
 841                 if (dbuf_undirty(db, tx)) {
 842                         /* mutex has been dropped and dbuf destroyed */
 843                         continue;
 844                 }
 845 
 846                 if (db->db_state == DB_UNCACHED ||
 847                     db->db_state == DB_NOFILL ||
 848                     db->db_state == DB_EVICTING) {
 849                         ASSERT(db->db.db_data == NULL);
 850                         mutex_exit(&db->db_mtx);
 851                         continue;
 852                 }
 853                 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
 854                         /* will be handled in dbuf_read_done or dbuf_rele */
 855                         db->db_freed_in_flight = TRUE;
 856                         mutex_exit(&db->db_mtx);
 857                         continue;
 858                 }
 859                 if (refcount_count(&db->db_holds) == 0) {
 860                         ASSERT(db->db_buf);
 861                         dbuf_clear(db);
 862                         continue;
 863                 }
 864                 /* The dbuf is referenced */
 865 
 866                 if (db->db_last_dirty != NULL) {
 867                         dbuf_dirty_record_t *dr = db->db_last_dirty;
 868 
 869                         if (dr->dr_txg == txg) {
 870                                 /*
 871                                  * This buffer is "in-use", re-adjust the file
 872                                  * size to reflect that this buffer may
 873                                  * contain new data when we sync.
 874                                  */
 875                                 if (db->db_blkid != DMU_SPILL_BLKID &&
 876                                     db->db_blkid > dn->dn_maxblkid)
 877                                         dn->dn_maxblkid = db->db_blkid;
 878                                 dbuf_unoverride(dr);
 879                         } else {
 880                                 /*
 881                                  * This dbuf is not dirty in the open context.
 882                                  * Either uncache it (if its not referenced in
 883                                  * the open context) or reset its contents to
 884                                  * empty.
 885                                  */
 886                                 dbuf_fix_old_data(db, txg);
 887                         }
 888                 }
 889                 /* clear the contents if its cached */
 890                 if (db->db_state == DB_CACHED) {
 891                         ASSERT(db->db.db_data != NULL);
 892                         arc_release(db->db_buf, db);
 893                         bzero(db->db.db_data, db->db.db_size);
 894                         arc_buf_freeze(db->db_buf);
 895                 }
 896 
 897                 mutex_exit(&db->db_mtx);
 898         }
 899         mutex_exit(&dn->dn_dbufs_mtx);
 900 }
 901 
 902 static int
 903 dbuf_block_freeable(dmu_buf_impl_t *db)
 904 {
 905         dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
 906         uint64_t birth_txg = 0;
 907 
 908         /*
 909          * We don't need any locking to protect db_blkptr:
 910          * If it's syncing, then db_last_dirty will be set
 911          * so we'll ignore db_blkptr.
 912          */
 913         ASSERT(MUTEX_HELD(&db->db_mtx));
 914         if (db->db_last_dirty)
 915                 birth_txg = db->db_last_dirty->dr_txg;
 916         else if (db->db_blkptr)
 917                 birth_txg = db->db_blkptr->blk_birth;
 918 
 919         /*
 920          * If we don't exist or are in a snapshot, we can't be freed.
 921          * Don't pass the bp to dsl_dataset_block_freeable() since we
 922          * are holding the db_mtx lock and might deadlock if we are
 923          * prefetching a dedup-ed block.
 924          */
 925         if (birth_txg)
 926                 return (ds == NULL ||
 927                     dsl_dataset_block_freeable(ds, NULL, birth_txg));
 928         else
 929                 return (FALSE);
 930 }
 931 
 932 void
 933 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
 934 {
 935         arc_buf_t *buf, *obuf;
 936         int osize = db->db.db_size;
 937         arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
 938         dnode_t *dn;
 939 
 940         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
 941 
 942         DB_DNODE_ENTER(db);
 943         dn = DB_DNODE(db);
 944 
 945         /* XXX does *this* func really need the lock? */
 946         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
 947 
 948         /*
 949          * This call to dbuf_will_dirty() with the dn_struct_rwlock held
 950          * is OK, because there can be no other references to the db
 951          * when we are changing its size, so no concurrent DB_FILL can
 952          * be happening.
 953          */
 954         /*
 955          * XXX we should be doing a dbuf_read, checking the return
 956          * value and returning that up to our callers
 957          */
 958         dbuf_will_dirty(db, tx);
 959 
 960         /* create the data buffer for the new block */
 961         buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
 962 
 963         /* copy old block data to the new block */
 964         obuf = db->db_buf;
 965         bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
 966         /* zero the remainder */
 967         if (size > osize)
 968                 bzero((uint8_t *)buf->b_data + osize, size - osize);
 969 
 970         mutex_enter(&db->db_mtx);
 971         dbuf_set_data(db, buf);
 972         VERIFY(arc_buf_remove_ref(obuf, db));
 973         db->db.db_size = size;
 974 
 975         if (db->db_level == 0) {
 976                 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
 977                 db->db_last_dirty->dt.dl.dr_data = buf;
 978         }
 979         mutex_exit(&db->db_mtx);
 980 
 981         dnode_willuse_space(dn, size-osize, tx);
 982         DB_DNODE_EXIT(db);
 983 }
 984 
 985 void
 986 dbuf_release_bp(dmu_buf_impl_t *db)
 987 {
 988         objset_t *os;
 989 
 990         DB_GET_OBJSET(&os, db);
 991         ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
 992         ASSERT(arc_released(os->os_phys_buf) ||
 993             list_link_active(&os->os_dsl_dataset->ds_synced_link));
 994         ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
 995 
 996         (void) arc_release(db->db_buf, db);
 997 }
 998 
 999 dbuf_dirty_record_t *
1000 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1001 {
1002         dnode_t *dn;
1003         objset_t *os;
1004         dbuf_dirty_record_t **drp, *dr;
1005         int drop_struct_lock = FALSE;
1006         boolean_t do_free_accounting = B_FALSE;
1007         int txgoff = tx->tx_txg & TXG_MASK;
1008 
1009         ASSERT(tx->tx_txg != 0);
1010         ASSERT(!refcount_is_zero(&db->db_holds));
1011         DMU_TX_DIRTY_BUF(tx, db);
1012 
1013         DB_DNODE_ENTER(db);
1014         dn = DB_DNODE(db);
1015         /*
1016          * Shouldn't dirty a regular buffer in syncing context.  Private
1017          * objects may be dirtied in syncing context, but only if they
1018          * were already pre-dirtied in open context.
1019          */
1020         ASSERT(!dmu_tx_is_syncing(tx) ||
1021             BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1022             DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1023             dn->dn_objset->os_dsl_dataset == NULL);
1024         /*
1025          * We make this assert for private objects as well, but after we
1026          * check if we're already dirty.  They are allowed to re-dirty
1027          * in syncing context.
1028          */
1029         ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1030             dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1031             (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1032 
1033         mutex_enter(&db->db_mtx);
1034         /*
1035          * XXX make this true for indirects too?  The problem is that
1036          * transactions created with dmu_tx_create_assigned() from
1037          * syncing context don't bother holding ahead.
1038          */
1039         ASSERT(db->db_level != 0 ||
1040             db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1041             db->db_state == DB_NOFILL);
1042 
1043         mutex_enter(&dn->dn_mtx);
1044         /*
1045          * Don't set dirtyctx to SYNC if we're just modifying this as we
1046          * initialize the objset.
1047          */
1048         if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1049             !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1050                 dn->dn_dirtyctx =
1051                     (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1052                 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1053                 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1054         }
1055         mutex_exit(&dn->dn_mtx);
1056 
1057         if (db->db_blkid == DMU_SPILL_BLKID)
1058                 dn->dn_have_spill = B_TRUE;
1059 
1060         /*
1061          * If this buffer is already dirty, we're done.
1062          */
1063         drp = &db->db_last_dirty;
1064         ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1065             db->db.db_object == DMU_META_DNODE_OBJECT);
1066         while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1067                 drp = &dr->dr_next;
1068         if (dr && dr->dr_txg == tx->tx_txg) {
1069                 DB_DNODE_EXIT(db);
1070 
1071                 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1072                         /*
1073                          * If this buffer has already been written out,
1074                          * we now need to reset its state.
1075                          */
1076                         dbuf_unoverride(dr);
1077                         if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1078                             db->db_state != DB_NOFILL)
1079                                 arc_buf_thaw(db->db_buf);
1080                 }
1081                 mutex_exit(&db->db_mtx);
1082                 return (dr);
1083         }
1084 
1085         /*
1086          * Only valid if not already dirty.
1087          */
1088         ASSERT(dn->dn_object == 0 ||
1089             dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1090             (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1091 
1092         ASSERT3U(dn->dn_nlevels, >, db->db_level);
1093         ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1094             dn->dn_phys->dn_nlevels > db->db_level ||
1095             dn->dn_next_nlevels[txgoff] > db->db_level ||
1096             dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1097             dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1098 
1099         /*
1100          * We should only be dirtying in syncing context if it's the
1101          * mos or we're initializing the os or it's a special object.
1102          * However, we are allowed to dirty in syncing context provided
1103          * we already dirtied it in open context.  Hence we must make
1104          * this assertion only if we're not already dirty.
1105          */
1106         os = dn->dn_objset;
1107         ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1108             os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1109         ASSERT(db->db.db_size != 0);
1110 
1111         dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1112 
1113         if (db->db_blkid != DMU_BONUS_BLKID) {
1114                 /*
1115                  * Update the accounting.
1116                  * Note: we delay "free accounting" until after we drop
1117                  * the db_mtx.  This keeps us from grabbing other locks
1118                  * (and possibly deadlocking) in bp_get_dsize() while
1119                  * also holding the db_mtx.
1120                  */
1121                 dnode_willuse_space(dn, db->db.db_size, tx);
1122                 do_free_accounting = dbuf_block_freeable(db);
1123         }
1124 
1125         /*
1126          * If this buffer is dirty in an old transaction group we need
1127          * to make a copy of it so that the changes we make in this
1128          * transaction group won't leak out when we sync the older txg.
1129          */
1130         dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1131         if (db->db_level == 0) {
1132                 void *data_old = db->db_buf;
1133 
1134                 if (db->db_state != DB_NOFILL) {
1135                         if (db->db_blkid == DMU_BONUS_BLKID) {
1136                                 dbuf_fix_old_data(db, tx->tx_txg);
1137                                 data_old = db->db.db_data;
1138                         } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1139                                 /*
1140                                  * Release the data buffer from the cache so
1141                                  * that we can modify it without impacting
1142                                  * possible other users of this cached data
1143                                  * block.  Note that indirect blocks and
1144                                  * private objects are not released until the
1145                                  * syncing state (since they are only modified
1146                                  * then).
1147                                  */
1148                                 arc_release(db->db_buf, db);
1149                                 dbuf_fix_old_data(db, tx->tx_txg);
1150                                 data_old = db->db_buf;
1151                         }
1152                         ASSERT(data_old != NULL);
1153                 }
1154                 dr->dt.dl.dr_data = data_old;
1155         } else {
1156                 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1157                 list_create(&dr->dt.di.dr_children,
1158                     sizeof (dbuf_dirty_record_t),
1159                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
1160         }
1161         dr->dr_dbuf = db;
1162         dr->dr_txg = tx->tx_txg;
1163         dr->dr_next = *drp;
1164         *drp = dr;
1165 
1166         /*
1167          * We could have been freed_in_flight between the dbuf_noread
1168          * and dbuf_dirty.  We win, as though the dbuf_noread() had
1169          * happened after the free.
1170          */
1171         if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1172             db->db_blkid != DMU_SPILL_BLKID) {
1173                 mutex_enter(&dn->dn_mtx);
1174                 dnode_clear_range(dn, db->db_blkid, 1, tx);
1175                 mutex_exit(&dn->dn_mtx);
1176                 db->db_freed_in_flight = FALSE;
1177         }
1178 
1179         /*
1180          * This buffer is now part of this txg
1181          */
1182         dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1183         db->db_dirtycnt += 1;
1184         ASSERT3U(db->db_dirtycnt, <=, 3);
1185 
1186         mutex_exit(&db->db_mtx);
1187 
1188         if (db->db_blkid == DMU_BONUS_BLKID ||
1189             db->db_blkid == DMU_SPILL_BLKID) {
1190                 mutex_enter(&dn->dn_mtx);
1191                 ASSERT(!list_link_active(&dr->dr_dirty_node));
1192                 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1193                 mutex_exit(&dn->dn_mtx);
1194                 dnode_setdirty(dn, tx);
1195                 DB_DNODE_EXIT(db);
1196                 return (dr);
1197         } else if (do_free_accounting) {
1198                 blkptr_t *bp = db->db_blkptr;
1199                 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1200                     bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1201                 /*
1202                  * This is only a guess -- if the dbuf is dirty
1203                  * in a previous txg, we don't know how much
1204                  * space it will use on disk yet.  We should
1205                  * really have the struct_rwlock to access
1206                  * db_blkptr, but since this is just a guess,
1207                  * it's OK if we get an odd answer.
1208                  */
1209                 ddt_prefetch(os->os_spa, bp);
1210                 dnode_willuse_space(dn, -willfree, tx);
1211         }
1212 
1213         if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1214                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1215                 drop_struct_lock = TRUE;
1216         }
1217 
1218         if (db->db_level == 0) {
1219                 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1220                 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1221         }
1222 
1223         if (db->db_level+1 < dn->dn_nlevels) {
1224                 dmu_buf_impl_t *parent = db->db_parent;
1225                 dbuf_dirty_record_t *di;
1226                 int parent_held = FALSE;
1227 
1228                 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1229                         int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1230 
1231                         parent = dbuf_hold_level(dn, db->db_level+1,
1232                             db->db_blkid >> epbs, FTAG);
1233                         ASSERT(parent != NULL);
1234                         parent_held = TRUE;
1235                 }
1236                 if (drop_struct_lock)
1237                         rw_exit(&dn->dn_struct_rwlock);
1238                 ASSERT3U(db->db_level+1, ==, parent->db_level);
1239                 di = dbuf_dirty(parent, tx);
1240                 if (parent_held)
1241                         dbuf_rele(parent, FTAG);
1242 
1243                 mutex_enter(&db->db_mtx);
1244                 /*  possible race with dbuf_undirty() */
1245                 if (db->db_last_dirty == dr ||
1246                     dn->dn_object == DMU_META_DNODE_OBJECT) {
1247                         mutex_enter(&di->dt.di.dr_mtx);
1248                         ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1249                         ASSERT(!list_link_active(&dr->dr_dirty_node));
1250                         list_insert_tail(&di->dt.di.dr_children, dr);
1251                         mutex_exit(&di->dt.di.dr_mtx);
1252                         dr->dr_parent = di;
1253                 }
1254                 mutex_exit(&db->db_mtx);
1255         } else {
1256                 ASSERT(db->db_level+1 == dn->dn_nlevels);
1257                 ASSERT(db->db_blkid < dn->dn_nblkptr);
1258                 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1259                 mutex_enter(&dn->dn_mtx);
1260                 ASSERT(!list_link_active(&dr->dr_dirty_node));
1261                 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1262                 mutex_exit(&dn->dn_mtx);
1263                 if (drop_struct_lock)
1264                         rw_exit(&dn->dn_struct_rwlock);
1265         }
1266 
1267         dnode_setdirty(dn, tx);
1268         DB_DNODE_EXIT(db);
1269         return (dr);
1270 }
1271 
1272 /*
1273  * Undirty a buffer in the transaction group referenced by the given
1274  * transaction.  Return whether this evicted the dbuf.
1275  */
1276 static boolean_t
1277 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1278 {
1279         dnode_t *dn;
1280         uint64_t txg = tx->tx_txg;
1281         dbuf_dirty_record_t *dr, **drp;
1282 
1283         ASSERT(txg != 0);
1284         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1285         ASSERT0(db->db_level);
1286         ASSERT(MUTEX_HELD(&db->db_mtx));
1287 
1288         /*
1289          * If this buffer is not dirty, we're done.
1290          */
1291         for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1292                 if (dr->dr_txg <= txg)
1293                         break;
1294         if (dr == NULL || dr->dr_txg < txg)
1295                 return (B_FALSE);
1296         ASSERT(dr->dr_txg == txg);
1297         ASSERT(dr->dr_dbuf == db);
1298 
1299         DB_DNODE_ENTER(db);
1300         dn = DB_DNODE(db);
1301 
1302         /*
1303          * Note:  This code will probably work even if there are concurrent
1304          * holders, but it is untested in that scenerio, as the ZPL and
1305          * ztest have additional locking (the range locks) that prevents
1306          * that type of concurrent access.
1307          */
1308         ASSERT3U(refcount_count(&db->db_holds), ==, db->db_dirtycnt);
1309 
1310         dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1311 
1312         ASSERT(db->db.db_size != 0);
1313 
1314         /* XXX would be nice to fix up dn_towrite_space[] */
1315 
1316         *drp = dr->dr_next;
1317 
1318         /*
1319          * Note that there are three places in dbuf_dirty()
1320          * where this dirty record may be put on a list.
1321          * Make sure to do a list_remove corresponding to
1322          * every one of those list_insert calls.
1323          */
1324         if (dr->dr_parent) {
1325                 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1326                 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1327                 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1328         } else if (db->db_blkid == DMU_SPILL_BLKID ||
1329             db->db_level+1 == dn->dn_nlevels) {
1330                 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1331                 mutex_enter(&dn->dn_mtx);
1332                 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1333                 mutex_exit(&dn->dn_mtx);
1334         }
1335         DB_DNODE_EXIT(db);
1336 
1337         if (db->db_state != DB_NOFILL) {
1338                 dbuf_unoverride(dr);
1339 
1340                 ASSERT(db->db_buf != NULL);
1341                 ASSERT(dr->dt.dl.dr_data != NULL);
1342                 if (dr->dt.dl.dr_data != db->db_buf)
1343                         VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1344         }
1345         kmem_free(dr, sizeof (dbuf_dirty_record_t));
1346 
1347         ASSERT(db->db_dirtycnt > 0);
1348         db->db_dirtycnt -= 1;
1349 
1350         if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1351                 arc_buf_t *buf = db->db_buf;
1352 
1353                 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1354                 dbuf_set_data(db, NULL);
1355                 VERIFY(arc_buf_remove_ref(buf, db));
1356                 dbuf_evict(db);
1357                 return (B_TRUE);
1358         }
1359 
1360         return (B_FALSE);
1361 }
1362 
1363 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1364 void
1365 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1366 {
1367         int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1368 
1369         ASSERT(tx->tx_txg != 0);
1370         ASSERT(!refcount_is_zero(&db->db_holds));
1371 
1372         DB_DNODE_ENTER(db);
1373         if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1374                 rf |= DB_RF_HAVESTRUCT;
1375         DB_DNODE_EXIT(db);
1376         (void) dbuf_read(db, NULL, rf);
1377         (void) dbuf_dirty(db, tx);
1378 }
1379 
1380 void
1381 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1382 {
1383         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1384 
1385         db->db_state = DB_NOFILL;
1386 
1387         dmu_buf_will_fill(db_fake, tx);
1388 }
1389 
1390 void
1391 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1392 {
1393         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1394 
1395         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1396         ASSERT(tx->tx_txg != 0);
1397         ASSERT(db->db_level == 0);
1398         ASSERT(!refcount_is_zero(&db->db_holds));
1399 
1400         ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1401             dmu_tx_private_ok(tx));
1402 
1403         dbuf_noread(db);
1404         (void) dbuf_dirty(db, tx);
1405 }
1406 
1407 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1408 /* ARGSUSED */
1409 void
1410 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1411 {
1412         mutex_enter(&db->db_mtx);
1413         DBUF_VERIFY(db);
1414 
1415         if (db->db_state == DB_FILL) {
1416                 if (db->db_level == 0 && db->db_freed_in_flight) {
1417                         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1418                         /* we were freed while filling */
1419                         /* XXX dbuf_undirty? */
1420                         bzero(db->db.db_data, db->db.db_size);
1421                         db->db_freed_in_flight = FALSE;
1422                 }
1423                 db->db_state = DB_CACHED;
1424                 cv_broadcast(&db->db_changed);
1425         }
1426         mutex_exit(&db->db_mtx);
1427 }
1428 
1429 /*
1430  * Directly assign a provided arc buf to a given dbuf if it's not referenced
1431  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1432  */
1433 void
1434 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1435 {
1436         ASSERT(!refcount_is_zero(&db->db_holds));
1437         ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1438         ASSERT(db->db_level == 0);
1439         ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1440         ASSERT(buf != NULL);
1441         ASSERT(arc_buf_size(buf) == db->db.db_size);
1442         ASSERT(tx->tx_txg != 0);
1443 
1444         arc_return_buf(buf, db);
1445         ASSERT(arc_released(buf));
1446 
1447         mutex_enter(&db->db_mtx);
1448 
1449         while (db->db_state == DB_READ || db->db_state == DB_FILL)
1450                 cv_wait(&db->db_changed, &db->db_mtx);
1451 
1452         ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1453 
1454         if (db->db_state == DB_CACHED &&
1455             refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1456                 mutex_exit(&db->db_mtx);
1457                 (void) dbuf_dirty(db, tx);
1458                 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1459                 VERIFY(arc_buf_remove_ref(buf, db));
1460                 xuio_stat_wbuf_copied();
1461                 return;
1462         }
1463 
1464         xuio_stat_wbuf_nocopy();
1465         if (db->db_state == DB_CACHED) {
1466                 dbuf_dirty_record_t *dr = db->db_last_dirty;
1467 
1468                 ASSERT(db->db_buf != NULL);
1469                 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1470                         ASSERT(dr->dt.dl.dr_data == db->db_buf);
1471                         if (!arc_released(db->db_buf)) {
1472                                 ASSERT(dr->dt.dl.dr_override_state ==
1473                                     DR_OVERRIDDEN);
1474                                 arc_release(db->db_buf, db);
1475                         }
1476                         dr->dt.dl.dr_data = buf;
1477                         VERIFY(arc_buf_remove_ref(db->db_buf, db));
1478                 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1479                         arc_release(db->db_buf, db);
1480                         VERIFY(arc_buf_remove_ref(db->db_buf, db));
1481                 }
1482                 db->db_buf = NULL;
1483         }
1484         ASSERT(db->db_buf == NULL);
1485         dbuf_set_data(db, buf);
1486         db->db_state = DB_FILL;
1487         mutex_exit(&db->db_mtx);
1488         (void) dbuf_dirty(db, tx);
1489         dbuf_fill_done(db, tx);
1490 }
1491 
1492 /*
1493  * "Clear" the contents of this dbuf.  This will mark the dbuf
1494  * EVICTING and clear *most* of its references.  Unfortunetely,
1495  * when we are not holding the dn_dbufs_mtx, we can't clear the
1496  * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1497  * in this case.  For callers from the DMU we will usually see:
1498  *      dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1499  * For the arc callback, we will usually see:
1500  *      dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1501  * Sometimes, though, we will get a mix of these two:
1502  *      DMU: dbuf_clear()->arc_buf_evict()
1503  *      ARC: dbuf_do_evict()->dbuf_destroy()
1504  */
1505 void
1506 dbuf_clear(dmu_buf_impl_t *db)
1507 {
1508         dnode_t *dn;
1509         dmu_buf_impl_t *parent = db->db_parent;
1510         dmu_buf_impl_t *dndb;
1511         int dbuf_gone = FALSE;
1512 
1513         ASSERT(MUTEX_HELD(&db->db_mtx));
1514         ASSERT(refcount_is_zero(&db->db_holds));
1515 
1516         dbuf_evict_user(db);
1517 
1518         if (db->db_state == DB_CACHED) {
1519                 ASSERT(db->db.db_data != NULL);
1520                 if (db->db_blkid == DMU_BONUS_BLKID) {
1521                         zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1522                         arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1523                 }
1524                 db->db.db_data = NULL;
1525                 db->db_state = DB_UNCACHED;
1526         }
1527 
1528         ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1529         ASSERT(db->db_data_pending == NULL);
1530 
1531         db->db_state = DB_EVICTING;
1532         db->db_blkptr = NULL;
1533 
1534         DB_DNODE_ENTER(db);
1535         dn = DB_DNODE(db);
1536         dndb = dn->dn_dbuf;
1537         if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1538                 list_remove(&dn->dn_dbufs, db);
1539                 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1540                 membar_producer();
1541                 DB_DNODE_EXIT(db);
1542                 /*
1543                  * Decrementing the dbuf count means that the hold corresponding
1544                  * to the removed dbuf is no longer discounted in dnode_move(),
1545                  * so the dnode cannot be moved until after we release the hold.
1546                  * The membar_producer() ensures visibility of the decremented
1547                  * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1548                  * release any lock.
1549                  */
1550                 dnode_rele(dn, db);
1551                 db->db_dnode_handle = NULL;
1552         } else {
1553                 DB_DNODE_EXIT(db);
1554         }
1555 
1556         if (db->db_buf)
1557                 dbuf_gone = arc_buf_evict(db->db_buf);
1558 
1559         if (!dbuf_gone)
1560                 mutex_exit(&db->db_mtx);
1561 
1562         /*
1563          * If this dbuf is referenced from an indirect dbuf,
1564          * decrement the ref count on the indirect dbuf.
1565          */
1566         if (parent && parent != dndb)
1567                 dbuf_rele(parent, db);
1568 }
1569 
1570 static int
1571 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1572     dmu_buf_impl_t **parentp, blkptr_t **bpp)
1573 {
1574         int nlevels, epbs;
1575 
1576         *parentp = NULL;
1577         *bpp = NULL;
1578 
1579         ASSERT(blkid != DMU_BONUS_BLKID);
1580 
1581         if (blkid == DMU_SPILL_BLKID) {
1582                 mutex_enter(&dn->dn_mtx);
1583                 if (dn->dn_have_spill &&
1584                     (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1585                         *bpp = &dn->dn_phys->dn_spill;
1586                 else
1587                         *bpp = NULL;
1588                 dbuf_add_ref(dn->dn_dbuf, NULL);
1589                 *parentp = dn->dn_dbuf;
1590                 mutex_exit(&dn->dn_mtx);
1591                 return (0);
1592         }
1593 
1594         if (dn->dn_phys->dn_nlevels == 0)
1595                 nlevels = 1;
1596         else
1597                 nlevels = dn->dn_phys->dn_nlevels;
1598 
1599         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1600 
1601         ASSERT3U(level * epbs, <, 64);
1602         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1603         if (level >= nlevels ||
1604             (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1605                 /* the buffer has no parent yet */
1606                 return (SET_ERROR(ENOENT));
1607         } else if (level < nlevels-1) {
1608                 /* this block is referenced from an indirect block */
1609                 int err = dbuf_hold_impl(dn, level+1,
1610                     blkid >> epbs, fail_sparse, NULL, parentp);
1611                 if (err)
1612                         return (err);
1613                 err = dbuf_read(*parentp, NULL,
1614                     (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1615                 if (err) {
1616                         dbuf_rele(*parentp, NULL);
1617                         *parentp = NULL;
1618                         return (err);
1619                 }
1620                 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1621                     (blkid & ((1ULL << epbs) - 1));
1622                 return (0);
1623         } else {
1624                 /* the block is referenced from the dnode */
1625                 ASSERT3U(level, ==, nlevels-1);
1626                 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1627                     blkid < dn->dn_phys->dn_nblkptr);
1628                 if (dn->dn_dbuf) {
1629                         dbuf_add_ref(dn->dn_dbuf, NULL);
1630                         *parentp = dn->dn_dbuf;
1631                 }
1632                 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1633                 return (0);
1634         }
1635 }
1636 
1637 static dmu_buf_impl_t *
1638 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1639     dmu_buf_impl_t *parent, blkptr_t *blkptr)
1640 {
1641         objset_t *os = dn->dn_objset;
1642         dmu_buf_impl_t *db, *odb;
1643 
1644         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1645         ASSERT(dn->dn_type != DMU_OT_NONE);
1646 
1647         db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1648 
1649         db->db_objset = os;
1650         db->db.db_object = dn->dn_object;
1651         db->db_level = level;
1652         db->db_blkid = blkid;
1653         db->db_last_dirty = NULL;
1654         db->db_dirtycnt = 0;
1655         db->db_dnode_handle = dn->dn_handle;
1656         db->db_parent = parent;
1657         db->db_blkptr = blkptr;
1658 
1659         db->db_user_ptr = NULL;
1660         db->db_user_data_ptr_ptr = NULL;
1661         db->db_evict_func = NULL;
1662         db->db_immediate_evict = 0;
1663         db->db_freed_in_flight = 0;
1664 
1665         if (blkid == DMU_BONUS_BLKID) {
1666                 ASSERT3P(parent, ==, dn->dn_dbuf);
1667                 db->db.db_size = DN_MAX_BONUSLEN -
1668                     (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1669                 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1670                 db->db.db_offset = DMU_BONUS_BLKID;
1671                 db->db_state = DB_UNCACHED;
1672                 /* the bonus dbuf is not placed in the hash table */
1673                 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1674                 return (db);
1675         } else if (blkid == DMU_SPILL_BLKID) {
1676                 db->db.db_size = (blkptr != NULL) ?
1677                     BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1678                 db->db.db_offset = 0;
1679         } else {
1680                 int blocksize =
1681                     db->db_level ? 1<<dn->dn_indblkshift :  dn->dn_datablksz;
1682                 db->db.db_size = blocksize;
1683                 db->db.db_offset = db->db_blkid * blocksize;
1684         }
1685 
1686         /*
1687          * Hold the dn_dbufs_mtx while we get the new dbuf
1688          * in the hash table *and* added to the dbufs list.
1689          * This prevents a possible deadlock with someone
1690          * trying to look up this dbuf before its added to the
1691          * dn_dbufs list.
1692          */
1693         mutex_enter(&dn->dn_dbufs_mtx);
1694         db->db_state = DB_EVICTING;
1695         if ((odb = dbuf_hash_insert(db)) != NULL) {
1696                 /* someone else inserted it first */
1697                 kmem_cache_free(dbuf_cache, db);
1698                 mutex_exit(&dn->dn_dbufs_mtx);
1699                 return (odb);
1700         }
1701         list_insert_head(&dn->dn_dbufs, db);
1702         db->db_state = DB_UNCACHED;
1703         mutex_exit(&dn->dn_dbufs_mtx);
1704         arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1705 
1706         if (parent && parent != dn->dn_dbuf)
1707                 dbuf_add_ref(parent, db);
1708 
1709         ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1710             refcount_count(&dn->dn_holds) > 0);
1711         (void) refcount_add(&dn->dn_holds, db);
1712         (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1713 
1714         dprintf_dbuf(db, "db=%p\n", db);
1715 
1716         return (db);
1717 }
1718 
1719 static int
1720 dbuf_do_evict(void *private)
1721 {
1722         arc_buf_t *buf = private;
1723         dmu_buf_impl_t *db = buf->b_private;
1724 
1725         if (!MUTEX_HELD(&db->db_mtx))
1726                 mutex_enter(&db->db_mtx);
1727 
1728         ASSERT(refcount_is_zero(&db->db_holds));
1729 
1730         if (db->db_state != DB_EVICTING) {
1731                 ASSERT(db->db_state == DB_CACHED);
1732                 DBUF_VERIFY(db);
1733                 db->db_buf = NULL;
1734                 dbuf_evict(db);
1735         } else {
1736                 mutex_exit(&db->db_mtx);
1737                 dbuf_destroy(db);
1738         }
1739         return (0);
1740 }
1741 
1742 static void
1743 dbuf_destroy(dmu_buf_impl_t *db)
1744 {
1745         ASSERT(refcount_is_zero(&db->db_holds));
1746 
1747         if (db->db_blkid != DMU_BONUS_BLKID) {
1748                 /*
1749                  * If this dbuf is still on the dn_dbufs list,
1750                  * remove it from that list.
1751                  */
1752                 if (db->db_dnode_handle != NULL) {
1753                         dnode_t *dn;
1754 
1755                         DB_DNODE_ENTER(db);
1756                         dn = DB_DNODE(db);
1757                         mutex_enter(&dn->dn_dbufs_mtx);
1758                         list_remove(&dn->dn_dbufs, db);
1759                         (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1760                         mutex_exit(&dn->dn_dbufs_mtx);
1761                         DB_DNODE_EXIT(db);
1762                         /*
1763                          * Decrementing the dbuf count means that the hold
1764                          * corresponding to the removed dbuf is no longer
1765                          * discounted in dnode_move(), so the dnode cannot be
1766                          * moved until after we release the hold.
1767                          */
1768                         dnode_rele(dn, db);
1769                         db->db_dnode_handle = NULL;
1770                 }
1771                 dbuf_hash_remove(db);
1772         }
1773         db->db_parent = NULL;
1774         db->db_buf = NULL;
1775 
1776         ASSERT(!list_link_active(&db->db_link));
1777         ASSERT(db->db.db_data == NULL);
1778         ASSERT(db->db_hash_next == NULL);
1779         ASSERT(db->db_blkptr == NULL);
1780         ASSERT(db->db_data_pending == NULL);
1781 
1782         kmem_cache_free(dbuf_cache, db);
1783         arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1784 }
1785 
1786 void
1787 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1788 {
1789         dmu_buf_impl_t *db = NULL;
1790         blkptr_t *bp = NULL;
1791 
1792         ASSERT(blkid != DMU_BONUS_BLKID);
1793         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1794 
1795         if (dnode_block_freed(dn, blkid))
1796                 return;
1797 
1798         /* dbuf_find() returns with db_mtx held */
1799         if (db = dbuf_find(dn, 0, blkid)) {
1800                 /*
1801                  * This dbuf is already in the cache.  We assume that
1802                  * it is already CACHED, or else about to be either
1803                  * read or filled.
1804                  */
1805                 mutex_exit(&db->db_mtx);
1806                 return;
1807         }
1808 
1809         if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1810                 if (bp && !BP_IS_HOLE(bp)) {
1811                         int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1812                             ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
1813                         dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1814                         uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1815                         zbookmark_t zb;
1816 
1817                         SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1818                             dn->dn_object, 0, blkid);
1819 
1820                         (void) arc_read(NULL, dn->dn_objset->os_spa,
1821                             bp, NULL, NULL, priority,
1822                             ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1823                             &aflags, &zb);
1824                 }
1825                 if (db)
1826                         dbuf_rele(db, NULL);
1827         }
1828 }
1829 
1830 /*
1831  * Returns with db_holds incremented, and db_mtx not held.
1832  * Note: dn_struct_rwlock must be held.
1833  */
1834 int
1835 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1836     void *tag, dmu_buf_impl_t **dbp)
1837 {
1838         dmu_buf_impl_t *db, *parent = NULL;
1839 
1840         ASSERT(blkid != DMU_BONUS_BLKID);
1841         ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1842         ASSERT3U(dn->dn_nlevels, >, level);
1843 
1844         *dbp = NULL;
1845 top:
1846         /* dbuf_find() returns with db_mtx held */
1847         db = dbuf_find(dn, level, blkid);
1848 
1849         if (db == NULL) {
1850                 blkptr_t *bp = NULL;
1851                 int err;
1852 
1853                 ASSERT3P(parent, ==, NULL);
1854                 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1855                 if (fail_sparse) {
1856                         if (err == 0 && bp && BP_IS_HOLE(bp))
1857                                 err = SET_ERROR(ENOENT);
1858                         if (err) {
1859                                 if (parent)
1860                                         dbuf_rele(parent, NULL);
1861                                 return (err);
1862                         }
1863                 }
1864                 if (err && err != ENOENT)
1865                         return (err);
1866                 db = dbuf_create(dn, level, blkid, parent, bp);
1867         }
1868 
1869         if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1870                 arc_buf_add_ref(db->db_buf, db);
1871                 if (db->db_buf->b_data == NULL) {
1872                         dbuf_clear(db);
1873                         if (parent) {
1874                                 dbuf_rele(parent, NULL);
1875                                 parent = NULL;
1876                         }
1877                         goto top;
1878                 }
1879                 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1880         }
1881 
1882         ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1883 
1884         /*
1885          * If this buffer is currently syncing out, and we are are
1886          * still referencing it from db_data, we need to make a copy
1887          * of it in case we decide we want to dirty it again in this txg.
1888          */
1889         if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1890             dn->dn_object != DMU_META_DNODE_OBJECT &&
1891             db->db_state == DB_CACHED && db->db_data_pending) {
1892                 dbuf_dirty_record_t *dr = db->db_data_pending;
1893 
1894                 if (dr->dt.dl.dr_data == db->db_buf) {
1895                         arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1896 
1897                         dbuf_set_data(db,
1898                             arc_buf_alloc(dn->dn_objset->os_spa,
1899                             db->db.db_size, db, type));
1900                         bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1901                             db->db.db_size);
1902                 }
1903         }
1904 
1905         (void) refcount_add(&db->db_holds, tag);
1906         dbuf_update_data(db);
1907         DBUF_VERIFY(db);
1908         mutex_exit(&db->db_mtx);
1909 
1910         /* NOTE: we can't rele the parent until after we drop the db_mtx */
1911         if (parent)
1912                 dbuf_rele(parent, NULL);
1913 
1914         ASSERT3P(DB_DNODE(db), ==, dn);
1915         ASSERT3U(db->db_blkid, ==, blkid);
1916         ASSERT3U(db->db_level, ==, level);
1917         *dbp = db;
1918 
1919         return (0);
1920 }
1921 
1922 dmu_buf_impl_t *
1923 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1924 {
1925         dmu_buf_impl_t *db;
1926         int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1927         return (err ? NULL : db);
1928 }
1929 
1930 dmu_buf_impl_t *
1931 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1932 {
1933         dmu_buf_impl_t *db;
1934         int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
1935         return (err ? NULL : db);
1936 }
1937 
1938 void
1939 dbuf_create_bonus(dnode_t *dn)
1940 {
1941         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1942 
1943         ASSERT(dn->dn_bonus == NULL);
1944         dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
1945 }
1946 
1947 int
1948 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
1949 {
1950         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1951         dnode_t *dn;
1952 
1953         if (db->db_blkid != DMU_SPILL_BLKID)
1954                 return (SET_ERROR(ENOTSUP));
1955         if (blksz == 0)
1956                 blksz = SPA_MINBLOCKSIZE;
1957         if (blksz > SPA_MAXBLOCKSIZE)
1958                 blksz = SPA_MAXBLOCKSIZE;
1959         else
1960                 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
1961 
1962         DB_DNODE_ENTER(db);
1963         dn = DB_DNODE(db);
1964         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1965         dbuf_new_size(db, blksz, tx);
1966         rw_exit(&dn->dn_struct_rwlock);
1967         DB_DNODE_EXIT(db);
1968 
1969         return (0);
1970 }
1971 
1972 void
1973 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
1974 {
1975         dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
1976 }
1977 
1978 #pragma weak dmu_buf_add_ref = dbuf_add_ref
1979 void
1980 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
1981 {
1982         int64_t holds = refcount_add(&db->db_holds, tag);
1983         ASSERT(holds > 1);
1984 }
1985 
1986 /*
1987  * If you call dbuf_rele() you had better not be referencing the dnode handle
1988  * unless you have some other direct or indirect hold on the dnode. (An indirect
1989  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
1990  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
1991  * dnode's parent dbuf evicting its dnode handles.
1992  */
1993 #pragma weak dmu_buf_rele = dbuf_rele
1994 void
1995 dbuf_rele(dmu_buf_impl_t *db, void *tag)
1996 {
1997         mutex_enter(&db->db_mtx);
1998         dbuf_rele_and_unlock(db, tag);
1999 }
2000 
2001 /*
2002  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2003  * db_dirtycnt and db_holds to be updated atomically.
2004  */
2005 void
2006 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2007 {
2008         int64_t holds;
2009 
2010         ASSERT(MUTEX_HELD(&db->db_mtx));
2011         DBUF_VERIFY(db);
2012 
2013         /*
2014          * Remove the reference to the dbuf before removing its hold on the
2015          * dnode so we can guarantee in dnode_move() that a referenced bonus
2016          * buffer has a corresponding dnode hold.
2017          */
2018         holds = refcount_remove(&db->db_holds, tag);
2019         ASSERT(holds >= 0);
2020 
2021         /*
2022          * We can't freeze indirects if there is a possibility that they
2023          * may be modified in the current syncing context.
2024          */
2025         if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2026                 arc_buf_freeze(db->db_buf);
2027 
2028         if (holds == db->db_dirtycnt &&
2029             db->db_level == 0 && db->db_immediate_evict)
2030                 dbuf_evict_user(db);
2031 
2032         if (holds == 0) {
2033                 if (db->db_blkid == DMU_BONUS_BLKID) {
2034                         mutex_exit(&db->db_mtx);
2035 
2036                         /*
2037                          * If the dnode moves here, we cannot cross this barrier
2038                          * until the move completes.
2039                          */
2040                         DB_DNODE_ENTER(db);
2041                         (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2042                         DB_DNODE_EXIT(db);
2043                         /*
2044                          * The bonus buffer's dnode hold is no longer discounted
2045                          * in dnode_move(). The dnode cannot move until after
2046                          * the dnode_rele().
2047                          */
2048                         dnode_rele(DB_DNODE(db), db);
2049                 } else if (db->db_buf == NULL) {
2050                         /*
2051                          * This is a special case: we never associated this
2052                          * dbuf with any data allocated from the ARC.
2053                          */
2054                         ASSERT(db->db_state == DB_UNCACHED ||
2055                             db->db_state == DB_NOFILL);
2056                         dbuf_evict(db);
2057                 } else if (arc_released(db->db_buf)) {
2058                         arc_buf_t *buf = db->db_buf;
2059                         /*
2060                          * This dbuf has anonymous data associated with it.
2061                          */
2062                         dbuf_set_data(db, NULL);
2063                         VERIFY(arc_buf_remove_ref(buf, db));
2064                         dbuf_evict(db);
2065                 } else {
2066                         VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2067 
2068                         /*
2069                          * A dbuf will be eligible for eviction if either the
2070                          * 'primarycache' property is set or a duplicate
2071                          * copy of this buffer is already cached in the arc.
2072                          *
2073                          * In the case of the 'primarycache' a buffer
2074                          * is considered for eviction if it matches the
2075                          * criteria set in the property.
2076                          *
2077                          * To decide if our buffer is considered a
2078                          * duplicate, we must call into the arc to determine
2079                          * if multiple buffers are referencing the same
2080                          * block on-disk. If so, then we simply evict
2081                          * ourselves.
2082                          */
2083                         if (!DBUF_IS_CACHEABLE(db) ||
2084                             arc_buf_eviction_needed(db->db_buf))
2085                                 dbuf_clear(db);
2086                         else
2087                                 mutex_exit(&db->db_mtx);
2088                 }
2089         } else {
2090                 mutex_exit(&db->db_mtx);
2091         }
2092 }
2093 
2094 #pragma weak dmu_buf_refcount = dbuf_refcount
2095 uint64_t
2096 dbuf_refcount(dmu_buf_impl_t *db)
2097 {
2098         return (refcount_count(&db->db_holds));
2099 }
2100 
2101 void *
2102 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2103     dmu_buf_evict_func_t *evict_func)
2104 {
2105         return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2106             user_data_ptr_ptr, evict_func));
2107 }
2108 
2109 void *
2110 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2111     dmu_buf_evict_func_t *evict_func)
2112 {
2113         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2114 
2115         db->db_immediate_evict = TRUE;
2116         return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2117             user_data_ptr_ptr, evict_func));
2118 }
2119 
2120 void *
2121 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2122     void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2123 {
2124         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2125         ASSERT(db->db_level == 0);
2126 
2127         ASSERT((user_ptr == NULL) == (evict_func == NULL));
2128 
2129         mutex_enter(&db->db_mtx);
2130 
2131         if (db->db_user_ptr == old_user_ptr) {
2132                 db->db_user_ptr = user_ptr;
2133                 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2134                 db->db_evict_func = evict_func;
2135 
2136                 dbuf_update_data(db);
2137         } else {
2138                 old_user_ptr = db->db_user_ptr;
2139         }
2140 
2141         mutex_exit(&db->db_mtx);
2142         return (old_user_ptr);
2143 }
2144 
2145 void *
2146 dmu_buf_get_user(dmu_buf_t *db_fake)
2147 {
2148         dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2149         ASSERT(!refcount_is_zero(&db->db_holds));
2150 
2151         return (db->db_user_ptr);
2152 }
2153 
2154 boolean_t
2155 dmu_buf_freeable(dmu_buf_t *dbuf)
2156 {
2157         boolean_t res = B_FALSE;
2158         dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2159 
2160         if (db->db_blkptr)
2161                 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2162                     db->db_blkptr, db->db_blkptr->blk_birth);
2163 
2164         return (res);
2165 }
2166 
2167 blkptr_t *
2168 dmu_buf_get_blkptr(dmu_buf_t *db)
2169 {
2170         dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2171         return (dbi->db_blkptr);
2172 }
2173 
2174 static void
2175 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2176 {
2177         /* ASSERT(dmu_tx_is_syncing(tx) */
2178         ASSERT(MUTEX_HELD(&db->db_mtx));
2179 
2180         if (db->db_blkptr != NULL)
2181                 return;
2182 
2183         if (db->db_blkid == DMU_SPILL_BLKID) {
2184                 db->db_blkptr = &dn->dn_phys->dn_spill;
2185                 BP_ZERO(db->db_blkptr);
2186                 return;
2187         }
2188         if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2189                 /*
2190                  * This buffer was allocated at a time when there was
2191                  * no available blkptrs from the dnode, or it was
2192                  * inappropriate to hook it in (i.e., nlevels mis-match).
2193                  */
2194                 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2195                 ASSERT(db->db_parent == NULL);
2196                 db->db_parent = dn->dn_dbuf;
2197                 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2198                 DBUF_VERIFY(db);
2199         } else {
2200                 dmu_buf_impl_t *parent = db->db_parent;
2201                 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2202 
2203                 ASSERT(dn->dn_phys->dn_nlevels > 1);
2204                 if (parent == NULL) {
2205                         mutex_exit(&db->db_mtx);
2206                         rw_enter(&dn->dn_struct_rwlock, RW_READER);
2207                         (void) dbuf_hold_impl(dn, db->db_level+1,
2208                             db->db_blkid >> epbs, FALSE, db, &parent);
2209                         rw_exit(&dn->dn_struct_rwlock);
2210                         mutex_enter(&db->db_mtx);
2211                         db->db_parent = parent;
2212                 }
2213                 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2214                     (db->db_blkid & ((1ULL << epbs) - 1));
2215                 DBUF_VERIFY(db);
2216         }
2217 }
2218 
2219 static void
2220 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2221 {
2222         dmu_buf_impl_t *db = dr->dr_dbuf;
2223         dnode_t *dn;
2224         zio_t *zio;
2225 
2226         ASSERT(dmu_tx_is_syncing(tx));
2227 
2228         dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2229 
2230         mutex_enter(&db->db_mtx);
2231 
2232         ASSERT(db->db_level > 0);
2233         DBUF_VERIFY(db);
2234 
2235         /* Read the block if it hasn't been read yet. */
2236         if (db->db_buf == NULL) {
2237                 mutex_exit(&db->db_mtx);
2238                 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2239                 mutex_enter(&db->db_mtx);
2240         }
2241         ASSERT3U(db->db_state, ==, DB_CACHED);
2242         ASSERT(db->db_buf != NULL);
2243 
2244         DB_DNODE_ENTER(db);
2245         dn = DB_DNODE(db);
2246         /* Indirect block size must match what the dnode thinks it is. */
2247         ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2248         dbuf_check_blkptr(dn, db);
2249         DB_DNODE_EXIT(db);
2250 
2251         /* Provide the pending dirty record to child dbufs */
2252         db->db_data_pending = dr;
2253 
2254         mutex_exit(&db->db_mtx);
2255         dbuf_write(dr, db->db_buf, tx);
2256 
2257         zio = dr->dr_zio;
2258         mutex_enter(&dr->dt.di.dr_mtx);
2259         dbuf_sync_list(&dr->dt.di.dr_children, tx);
2260         ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2261         mutex_exit(&dr->dt.di.dr_mtx);
2262         zio_nowait(zio);
2263 }
2264 
2265 static void
2266 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2267 {
2268         arc_buf_t **datap = &dr->dt.dl.dr_data;
2269         dmu_buf_impl_t *db = dr->dr_dbuf;
2270         dnode_t *dn;
2271         objset_t *os;
2272         uint64_t txg = tx->tx_txg;
2273 
2274         ASSERT(dmu_tx_is_syncing(tx));
2275 
2276         dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2277 
2278         mutex_enter(&db->db_mtx);
2279         /*
2280          * To be synced, we must be dirtied.  But we
2281          * might have been freed after the dirty.
2282          */
2283         if (db->db_state == DB_UNCACHED) {
2284                 /* This buffer has been freed since it was dirtied */
2285                 ASSERT(db->db.db_data == NULL);
2286         } else if (db->db_state == DB_FILL) {
2287                 /* This buffer was freed and is now being re-filled */
2288                 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2289         } else {
2290                 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2291         }
2292         DBUF_VERIFY(db);
2293 
2294         DB_DNODE_ENTER(db);
2295         dn = DB_DNODE(db);
2296 
2297         if (db->db_blkid == DMU_SPILL_BLKID) {
2298                 mutex_enter(&dn->dn_mtx);
2299                 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2300                 mutex_exit(&dn->dn_mtx);
2301         }
2302 
2303         /*
2304          * If this is a bonus buffer, simply copy the bonus data into the
2305          * dnode.  It will be written out when the dnode is synced (and it
2306          * will be synced, since it must have been dirty for dbuf_sync to
2307          * be called).
2308          */
2309         if (db->db_blkid == DMU_BONUS_BLKID) {
2310                 dbuf_dirty_record_t **drp;
2311 
2312                 ASSERT(*datap != NULL);
2313                 ASSERT0(db->db_level);
2314                 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2315                 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2316                 DB_DNODE_EXIT(db);
2317 
2318                 if (*datap != db->db.db_data) {
2319                         zio_buf_free(*datap, DN_MAX_BONUSLEN);
2320                         arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2321                 }
2322                 db->db_data_pending = NULL;
2323                 drp = &db->db_last_dirty;
2324                 while (*drp != dr)
2325                         drp = &(*drp)->dr_next;
2326                 ASSERT(dr->dr_next == NULL);
2327                 ASSERT(dr->dr_dbuf == db);
2328                 *drp = dr->dr_next;
2329                 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2330                 ASSERT(db->db_dirtycnt > 0);
2331                 db->db_dirtycnt -= 1;
2332                 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2333                 return;
2334         }
2335 
2336         os = dn->dn_objset;
2337 
2338         /*
2339          * This function may have dropped the db_mtx lock allowing a dmu_sync
2340          * operation to sneak in. As a result, we need to ensure that we
2341          * don't check the dr_override_state until we have returned from
2342          * dbuf_check_blkptr.
2343          */
2344         dbuf_check_blkptr(dn, db);
2345 
2346         /*
2347          * If this buffer is in the middle of an immediate write,
2348          * wait for the synchronous IO to complete.
2349          */
2350         while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2351                 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2352                 cv_wait(&db->db_changed, &db->db_mtx);
2353                 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2354         }
2355 
2356         if (db->db_state != DB_NOFILL &&
2357             dn->dn_object != DMU_META_DNODE_OBJECT &&
2358             refcount_count(&db->db_holds) > 1 &&
2359             dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2360             *datap == db->db_buf) {
2361                 /*
2362                  * If this buffer is currently "in use" (i.e., there
2363                  * are active holds and db_data still references it),
2364                  * then make a copy before we start the write so that
2365                  * any modifications from the open txg will not leak
2366                  * into this write.
2367                  *
2368                  * NOTE: this copy does not need to be made for
2369                  * objects only modified in the syncing context (e.g.
2370                  * DNONE_DNODE blocks).
2371                  */
2372                 int blksz = arc_buf_size(*datap);
2373                 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2374                 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2375                 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2376         }
2377         db->db_data_pending = dr;
2378 
2379         mutex_exit(&db->db_mtx);
2380 
2381         dbuf_write(dr, *datap, tx);
2382 
2383         ASSERT(!list_link_active(&dr->dr_dirty_node));
2384         if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2385                 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2386                 DB_DNODE_EXIT(db);
2387         } else {
2388                 /*
2389                  * Although zio_nowait() does not "wait for an IO", it does
2390                  * initiate the IO. If this is an empty write it seems plausible
2391                  * that the IO could actually be completed before the nowait
2392                  * returns. We need to DB_DNODE_EXIT() first in case
2393                  * zio_nowait() invalidates the dbuf.
2394                  */
2395                 DB_DNODE_EXIT(db);
2396                 zio_nowait(dr->dr_zio);
2397         }
2398 }
2399 
2400 void
2401 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2402 {
2403         dbuf_dirty_record_t *dr;
2404 
2405         while (dr = list_head(list)) {
2406                 if (dr->dr_zio != NULL) {
2407                         /*
2408                          * If we find an already initialized zio then we
2409                          * are processing the meta-dnode, and we have finished.
2410                          * The dbufs for all dnodes are put back on the list
2411                          * during processing, so that we can zio_wait()
2412                          * these IOs after initiating all child IOs.
2413                          */
2414                         ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2415                             DMU_META_DNODE_OBJECT);
2416                         break;
2417                 }
2418                 list_remove(list, dr);
2419                 if (dr->dr_dbuf->db_level > 0)
2420                         dbuf_sync_indirect(dr, tx);
2421                 else
2422                         dbuf_sync_leaf(dr, tx);
2423         }
2424 }
2425 
2426 /* ARGSUSED */
2427 static void
2428 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2429 {
2430         dmu_buf_impl_t *db = vdb;
2431         dnode_t *dn;
2432         blkptr_t *bp = zio->io_bp;
2433         blkptr_t *bp_orig = &zio->io_bp_orig;
2434         spa_t *spa = zio->io_spa;
2435         int64_t delta;
2436         uint64_t fill = 0;
2437         int i;
2438 
2439         ASSERT(db->db_blkptr == bp);
2440 
2441         DB_DNODE_ENTER(db);
2442         dn = DB_DNODE(db);
2443         delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2444         dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2445         zio->io_prev_space_delta = delta;
2446 
2447         if (BP_IS_HOLE(bp)) {
2448                 ASSERT(bp->blk_fill == 0);
2449                 DB_DNODE_EXIT(db);
2450                 return;
2451         }
2452 
2453         ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2454             BP_GET_TYPE(bp) == dn->dn_type) ||
2455             (db->db_blkid == DMU_SPILL_BLKID &&
2456             BP_GET_TYPE(bp) == dn->dn_bonustype));
2457         ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2458 
2459         mutex_enter(&db->db_mtx);
2460 
2461 #ifdef ZFS_DEBUG
2462         if (db->db_blkid == DMU_SPILL_BLKID) {
2463                 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2464                 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2465                     db->db_blkptr == &dn->dn_phys->dn_spill);
2466         }
2467 #endif
2468 
2469         if (db->db_level == 0) {
2470                 mutex_enter(&dn->dn_mtx);
2471                 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2472                     db->db_blkid != DMU_SPILL_BLKID)
2473                         dn->dn_phys->dn_maxblkid = db->db_blkid;
2474                 mutex_exit(&dn->dn_mtx);
2475 
2476                 if (dn->dn_type == DMU_OT_DNODE) {
2477                         dnode_phys_t *dnp = db->db.db_data;
2478                         for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2479                             i--, dnp++) {
2480                                 if (dnp->dn_type != DMU_OT_NONE)
2481                                         fill++;
2482                         }
2483                 } else {
2484                         fill = 1;
2485                 }
2486         } else {
2487                 blkptr_t *ibp = db->db.db_data;
2488                 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2489                 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2490                         if (BP_IS_HOLE(ibp))
2491                                 continue;
2492                         fill += ibp->blk_fill;
2493                 }
2494         }
2495         DB_DNODE_EXIT(db);
2496 
2497         bp->blk_fill = fill;
2498 
2499         mutex_exit(&db->db_mtx);
2500 }
2501 
2502 /* ARGSUSED */
2503 static void
2504 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2505 {
2506         dmu_buf_impl_t *db = vdb;
2507         blkptr_t *bp = zio->io_bp;
2508         blkptr_t *bp_orig = &zio->io_bp_orig;
2509         uint64_t txg = zio->io_txg;
2510         dbuf_dirty_record_t **drp, *dr;
2511 
2512         ASSERT0(zio->io_error);
2513         ASSERT(db->db_blkptr == bp);
2514 
2515         /*
2516          * For nopwrites and rewrites we ensure that the bp matches our
2517          * original and bypass all the accounting.
2518          */
2519         if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2520                 ASSERT(BP_EQUAL(bp, bp_orig));
2521         } else {
2522                 objset_t *os;
2523                 dsl_dataset_t *ds;
2524                 dmu_tx_t *tx;
2525 
2526                 DB_GET_OBJSET(&os, db);
2527                 ds = os->os_dsl_dataset;
2528                 tx = os->os_synctx;
2529 
2530                 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2531                 dsl_dataset_block_born(ds, bp, tx);
2532         }
2533 
2534         mutex_enter(&db->db_mtx);
2535 
2536         DBUF_VERIFY(db);
2537 
2538         drp = &db->db_last_dirty;
2539         while ((dr = *drp) != db->db_data_pending)
2540                 drp = &dr->dr_next;
2541         ASSERT(!list_link_active(&dr->dr_dirty_node));
2542         ASSERT(dr->dr_txg == txg);
2543         ASSERT(dr->dr_dbuf == db);
2544         ASSERT(dr->dr_next == NULL);
2545         *drp = dr->dr_next;
2546 
2547 #ifdef ZFS_DEBUG
2548         if (db->db_blkid == DMU_SPILL_BLKID) {
2549                 dnode_t *dn;
2550 
2551                 DB_DNODE_ENTER(db);
2552                 dn = DB_DNODE(db);
2553                 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2554                 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2555                     db->db_blkptr == &dn->dn_phys->dn_spill);
2556                 DB_DNODE_EXIT(db);
2557         }
2558 #endif
2559 
2560         if (db->db_level == 0) {
2561                 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2562                 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2563                 if (db->db_state != DB_NOFILL) {
2564                         if (dr->dt.dl.dr_data != db->db_buf)
2565                                 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2566                                     db));
2567                         else if (!arc_released(db->db_buf))
2568                                 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2569                 }
2570         } else {
2571                 dnode_t *dn;
2572 
2573                 DB_DNODE_ENTER(db);
2574                 dn = DB_DNODE(db);
2575                 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2576                 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2577                 if (!BP_IS_HOLE(db->db_blkptr)) {
2578                         int epbs =
2579                             dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2580                         ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2581                             db->db.db_size);
2582                         ASSERT3U(dn->dn_phys->dn_maxblkid
2583                             >> (db->db_level * epbs), >=, db->db_blkid);
2584                         arc_set_callback(db->db_buf, dbuf_do_evict, db);
2585                 }
2586                 DB_DNODE_EXIT(db);
2587                 mutex_destroy(&dr->dt.di.dr_mtx);
2588                 list_destroy(&dr->dt.di.dr_children);
2589         }
2590         kmem_free(dr, sizeof (dbuf_dirty_record_t));
2591 
2592         cv_broadcast(&db->db_changed);
2593         ASSERT(db->db_dirtycnt > 0);
2594         db->db_dirtycnt -= 1;
2595         db->db_data_pending = NULL;
2596         dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2597 }
2598 
2599 static void
2600 dbuf_write_nofill_ready(zio_t *zio)
2601 {
2602         dbuf_write_ready(zio, NULL, zio->io_private);
2603 }
2604 
2605 static void
2606 dbuf_write_nofill_done(zio_t *zio)
2607 {
2608         dbuf_write_done(zio, NULL, zio->io_private);
2609 }
2610 
2611 static void
2612 dbuf_write_override_ready(zio_t *zio)
2613 {
2614         dbuf_dirty_record_t *dr = zio->io_private;
2615         dmu_buf_impl_t *db = dr->dr_dbuf;
2616 
2617         dbuf_write_ready(zio, NULL, db);
2618 }
2619 
2620 static void
2621 dbuf_write_override_done(zio_t *zio)
2622 {
2623         dbuf_dirty_record_t *dr = zio->io_private;
2624         dmu_buf_impl_t *db = dr->dr_dbuf;
2625         blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2626 
2627         mutex_enter(&db->db_mtx);
2628         if (!BP_EQUAL(zio->io_bp, obp)) {
2629                 if (!BP_IS_HOLE(obp))
2630                         dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2631                 arc_release(dr->dt.dl.dr_data, db);
2632         }
2633         mutex_exit(&db->db_mtx);
2634 
2635         dbuf_write_done(zio, NULL, db);
2636 }
2637 
2638 /* Issue I/O to commit a dirty buffer to disk. */
2639 static void
2640 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2641 {
2642         dmu_buf_impl_t *db = dr->dr_dbuf;
2643         dnode_t *dn;
2644         objset_t *os;
2645         dmu_buf_impl_t *parent = db->db_parent;
2646         uint64_t txg = tx->tx_txg;
2647         zbookmark_t zb;
2648         zio_prop_t zp;
2649         zio_t *zio;
2650         int wp_flag = 0;
2651 
2652         DB_DNODE_ENTER(db);
2653         dn = DB_DNODE(db);
2654         os = dn->dn_objset;
2655 
2656         if (db->db_state != DB_NOFILL) {
2657                 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2658                         /*
2659                          * Private object buffers are released here rather
2660                          * than in dbuf_dirty() since they are only modified
2661                          * in the syncing context and we don't want the
2662                          * overhead of making multiple copies of the data.
2663                          */
2664                         if (BP_IS_HOLE(db->db_blkptr)) {
2665                                 arc_buf_thaw(data);
2666                         } else {
2667                                 dbuf_release_bp(db);
2668                         }
2669                 }
2670         }
2671 
2672         if (parent != dn->dn_dbuf) {
2673                 /* Our parent is an indirect block. */
2674                 /* We have a dirty parent that has been scheduled for write. */
2675                 ASSERT(parent && parent->db_data_pending);
2676                 /* Our parent's buffer is one level closer to the dnode. */
2677                 ASSERT(db->db_level == parent->db_level-1);
2678                 /*
2679                  * We're about to modify our parent's db_data by modifying
2680                  * our block pointer, so the parent must be released.
2681                  */
2682                 ASSERT(arc_released(parent->db_buf));
2683                 zio = parent->db_data_pending->dr_zio;
2684         } else {
2685                 /* Our parent is the dnode itself. */
2686                 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2687                     db->db_blkid != DMU_SPILL_BLKID) ||
2688                     (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2689                 if (db->db_blkid != DMU_SPILL_BLKID)
2690                         ASSERT3P(db->db_blkptr, ==,
2691                             &dn->dn_phys->dn_blkptr[db->db_blkid]);
2692                 zio = dn->dn_zio;
2693         }
2694 
2695         ASSERT(db->db_level == 0 || data == db->db_buf);
2696         ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2697         ASSERT(zio);
2698 
2699         SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2700             os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2701             db->db.db_object, db->db_level, db->db_blkid);
2702 
2703         if (db->db_blkid == DMU_SPILL_BLKID)
2704                 wp_flag = WP_SPILL;
2705         wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2706 
2707         dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2708         DB_DNODE_EXIT(db);
2709 
2710         if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2711                 ASSERT(db->db_state != DB_NOFILL);
2712                 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2713                     db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2714                     dbuf_write_override_ready, dbuf_write_override_done, dr,
2715                     ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2716                 mutex_enter(&db->db_mtx);
2717                 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2718                 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2719                     dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
2720                 mutex_exit(&db->db_mtx);
2721         } else if (db->db_state == DB_NOFILL) {
2722                 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2723                 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2724                     db->db_blkptr, NULL, db->db.db_size, &zp,
2725                     dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2726                     ZIO_PRIORITY_ASYNC_WRITE,
2727                     ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2728         } else {
2729                 ASSERT(arc_released(data));
2730                 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2731                     db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp,
2732                     dbuf_write_ready, dbuf_write_done, db,
2733                     ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2734         }
2735 }