1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2013 by Delphix. All rights reserved.
  24  */
  25 
  26 #include <sys/zfs_context.h>
  27 #include <sys/dbuf.h>
  28 #include <sys/dnode.h>
  29 #include <sys/dmu.h>
  30 #include <sys/dmu_impl.h>
  31 #include <sys/dmu_tx.h>
  32 #include <sys/dmu_objset.h>
  33 #include <sys/dsl_dir.h>
  34 #include <sys/dsl_dataset.h>
  35 #include <sys/spa.h>
  36 #include <sys/zio.h>
  37 #include <sys/dmu_zfetch.h>
  38 
  39 static int free_range_compar(const void *node1, const void *node2);
  40 
  41 static kmem_cache_t *dnode_cache;
  42 /*
  43  * Define DNODE_STATS to turn on statistic gathering. By default, it is only
  44  * turned on when DEBUG is also defined.
  45  */
  46 #ifdef  DEBUG
  47 #define DNODE_STATS
  48 #endif  /* DEBUG */
  49 
  50 #ifdef  DNODE_STATS
  51 #define DNODE_STAT_ADD(stat)                    ((stat)++)
  52 #else
  53 #define DNODE_STAT_ADD(stat)                    /* nothing */
  54 #endif  /* DNODE_STATS */
  55 
  56 static dnode_phys_t dnode_phys_zero;
  57 
  58 int zfs_default_bs = SPA_MINBLOCKSHIFT;
  59 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
  60 
  61 static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
  62 
  63 /* ARGSUSED */
  64 static int
  65 dnode_cons(void *arg, void *unused, int kmflag)
  66 {
  67         dnode_t *dn = arg;
  68         int i;
  69 
  70         rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
  71         mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
  72         mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
  73         cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
  74 
  75         /*
  76          * Every dbuf has a reference, and dropping a tracked reference is
  77          * O(number of references), so don't track dn_holds.
  78          */
  79         refcount_create_untracked(&dn->dn_holds);
  80         refcount_create(&dn->dn_tx_holds);
  81         list_link_init(&dn->dn_link);
  82 
  83         bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
  84         bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
  85         bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
  86         bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
  87         bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
  88         bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
  89         bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
  90 
  91         for (i = 0; i < TXG_SIZE; i++) {
  92                 list_link_init(&dn->dn_dirty_link[i]);
  93                 avl_create(&dn->dn_ranges[i], free_range_compar,
  94                     sizeof (free_range_t),
  95                     offsetof(struct free_range, fr_node));
  96                 list_create(&dn->dn_dirty_records[i],
  97                     sizeof (dbuf_dirty_record_t),
  98                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
  99         }
 100 
 101         dn->dn_allocated_txg = 0;
 102         dn->dn_free_txg = 0;
 103         dn->dn_assigned_txg = 0;
 104         dn->dn_dirtyctx = 0;
 105         dn->dn_dirtyctx_firstset = NULL;
 106         dn->dn_bonus = NULL;
 107         dn->dn_have_spill = B_FALSE;
 108         dn->dn_zio = NULL;
 109         dn->dn_oldused = 0;
 110         dn->dn_oldflags = 0;
 111         dn->dn_olduid = 0;
 112         dn->dn_oldgid = 0;
 113         dn->dn_newuid = 0;
 114         dn->dn_newgid = 0;
 115         dn->dn_id_flags = 0;
 116 
 117         dn->dn_dbufs_count = 0;
 118         list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t),
 119             offsetof(dmu_buf_impl_t, db_link));
 120 
 121         dn->dn_moved = 0;
 122         return (0);
 123 }
 124 
 125 /* ARGSUSED */
 126 static void
 127 dnode_dest(void *arg, void *unused)
 128 {
 129         int i;
 130         dnode_t *dn = arg;
 131 
 132         rw_destroy(&dn->dn_struct_rwlock);
 133         mutex_destroy(&dn->dn_mtx);
 134         mutex_destroy(&dn->dn_dbufs_mtx);
 135         cv_destroy(&dn->dn_notxholds);
 136         refcount_destroy(&dn->dn_holds);
 137         refcount_destroy(&dn->dn_tx_holds);
 138         ASSERT(!list_link_active(&dn->dn_link));
 139 
 140         for (i = 0; i < TXG_SIZE; i++) {
 141                 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
 142                 avl_destroy(&dn->dn_ranges[i]);
 143                 list_destroy(&dn->dn_dirty_records[i]);
 144                 ASSERT0(dn->dn_next_nblkptr[i]);
 145                 ASSERT0(dn->dn_next_nlevels[i]);
 146                 ASSERT0(dn->dn_next_indblkshift[i]);
 147                 ASSERT0(dn->dn_next_bonustype[i]);
 148                 ASSERT0(dn->dn_rm_spillblk[i]);
 149                 ASSERT0(dn->dn_next_bonuslen[i]);
 150                 ASSERT0(dn->dn_next_blksz[i]);
 151         }
 152 
 153         ASSERT0(dn->dn_allocated_txg);
 154         ASSERT0(dn->dn_free_txg);
 155         ASSERT0(dn->dn_assigned_txg);
 156         ASSERT0(dn->dn_dirtyctx);
 157         ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
 158         ASSERT3P(dn->dn_bonus, ==, NULL);
 159         ASSERT(!dn->dn_have_spill);
 160         ASSERT3P(dn->dn_zio, ==, NULL);
 161         ASSERT0(dn->dn_oldused);
 162         ASSERT0(dn->dn_oldflags);
 163         ASSERT0(dn->dn_olduid);
 164         ASSERT0(dn->dn_oldgid);
 165         ASSERT0(dn->dn_newuid);
 166         ASSERT0(dn->dn_newgid);
 167         ASSERT0(dn->dn_id_flags);
 168 
 169         ASSERT0(dn->dn_dbufs_count);
 170         list_destroy(&dn->dn_dbufs);
 171 }
 172 
 173 void
 174 dnode_init(void)
 175 {
 176         ASSERT(dnode_cache == NULL);
 177         dnode_cache = kmem_cache_create("dnode_t",
 178             sizeof (dnode_t),
 179             0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
 180         kmem_cache_set_move(dnode_cache, dnode_move);
 181 }
 182 
 183 void
 184 dnode_fini(void)
 185 {
 186         kmem_cache_destroy(dnode_cache);
 187         dnode_cache = NULL;
 188 }
 189 
 190 
 191 #ifdef ZFS_DEBUG
 192 void
 193 dnode_verify(dnode_t *dn)
 194 {
 195         int drop_struct_lock = FALSE;
 196 
 197         ASSERT(dn->dn_phys);
 198         ASSERT(dn->dn_objset);
 199         ASSERT(dn->dn_handle->dnh_dnode == dn);
 200 
 201         ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
 202 
 203         if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
 204                 return;
 205 
 206         if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
 207                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 208                 drop_struct_lock = TRUE;
 209         }
 210         if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
 211                 int i;
 212                 ASSERT3U(dn->dn_indblkshift, >=, 0);
 213                 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
 214                 if (dn->dn_datablkshift) {
 215                         ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
 216                         ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
 217                         ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
 218                 }
 219                 ASSERT3U(dn->dn_nlevels, <=, 30);
 220                 ASSERT(DMU_OT_IS_VALID(dn->dn_type));
 221                 ASSERT3U(dn->dn_nblkptr, >=, 1);
 222                 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
 223                 ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
 224                 ASSERT3U(dn->dn_datablksz, ==,
 225                     dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 226                 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
 227                 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
 228                     dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
 229                 for (i = 0; i < TXG_SIZE; i++) {
 230                         ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
 231                 }
 232         }
 233         if (dn->dn_phys->dn_type != DMU_OT_NONE)
 234                 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
 235         ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
 236         if (dn->dn_dbuf != NULL) {
 237                 ASSERT3P(dn->dn_phys, ==,
 238                     (dnode_phys_t *)dn->dn_dbuf->db.db_data +
 239                     (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
 240         }
 241         if (drop_struct_lock)
 242                 rw_exit(&dn->dn_struct_rwlock);
 243 }
 244 #endif
 245 
 246 void
 247 dnode_byteswap(dnode_phys_t *dnp)
 248 {
 249         uint64_t *buf64 = (void*)&dnp->dn_blkptr;
 250         int i;
 251 
 252         if (dnp->dn_type == DMU_OT_NONE) {
 253                 bzero(dnp, sizeof (dnode_phys_t));
 254                 return;
 255         }
 256 
 257         dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
 258         dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
 259         dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
 260         dnp->dn_used = BSWAP_64(dnp->dn_used);
 261 
 262         /*
 263          * dn_nblkptr is only one byte, so it's OK to read it in either
 264          * byte order.  We can't read dn_bouslen.
 265          */
 266         ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
 267         ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
 268         for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
 269                 buf64[i] = BSWAP_64(buf64[i]);
 270 
 271         /*
 272          * OK to check dn_bonuslen for zero, because it won't matter if
 273          * we have the wrong byte order.  This is necessary because the
 274          * dnode dnode is smaller than a regular dnode.
 275          */
 276         if (dnp->dn_bonuslen != 0) {
 277                 /*
 278                  * Note that the bonus length calculated here may be
 279                  * longer than the actual bonus buffer.  This is because
 280                  * we always put the bonus buffer after the last block
 281                  * pointer (instead of packing it against the end of the
 282                  * dnode buffer).
 283                  */
 284                 int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
 285                 size_t len = DN_MAX_BONUSLEN - off;
 286                 ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
 287                 dmu_object_byteswap_t byteswap =
 288                     DMU_OT_BYTESWAP(dnp->dn_bonustype);
 289                 dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
 290         }
 291 
 292         /* Swap SPILL block if we have one */
 293         if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
 294                 byteswap_uint64_array(&dnp->dn_spill, sizeof (blkptr_t));
 295 
 296 }
 297 
 298 void
 299 dnode_buf_byteswap(void *vbuf, size_t size)
 300 {
 301         dnode_phys_t *buf = vbuf;
 302         int i;
 303 
 304         ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
 305         ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
 306 
 307         size >>= DNODE_SHIFT;
 308         for (i = 0; i < size; i++) {
 309                 dnode_byteswap(buf);
 310                 buf++;
 311         }
 312 }
 313 
 314 static int
 315 free_range_compar(const void *node1, const void *node2)
 316 {
 317         const free_range_t *rp1 = node1;
 318         const free_range_t *rp2 = node2;
 319 
 320         if (rp1->fr_blkid < rp2->fr_blkid)
 321                 return (-1);
 322         else if (rp1->fr_blkid > rp2->fr_blkid)
 323                 return (1);
 324         else return (0);
 325 }
 326 
 327 void
 328 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
 329 {
 330         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
 331 
 332         dnode_setdirty(dn, tx);
 333         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 334         ASSERT3U(newsize, <=, DN_MAX_BONUSLEN -
 335             (dn->dn_nblkptr-1) * sizeof (blkptr_t));
 336         dn->dn_bonuslen = newsize;
 337         if (newsize == 0)
 338                 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
 339         else
 340                 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
 341         rw_exit(&dn->dn_struct_rwlock);
 342 }
 343 
 344 void
 345 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
 346 {
 347         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
 348         dnode_setdirty(dn, tx);
 349         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 350         dn->dn_bonustype = newtype;
 351         dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
 352         rw_exit(&dn->dn_struct_rwlock);
 353 }
 354 
 355 void
 356 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
 357 {
 358         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
 359         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
 360         dnode_setdirty(dn, tx);
 361         dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
 362         dn->dn_have_spill = B_FALSE;
 363 }
 364 
 365 static void
 366 dnode_setdblksz(dnode_t *dn, int size)
 367 {
 368         ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
 369         ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
 370         ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
 371         ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
 372             1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
 373         dn->dn_datablksz = size;
 374         dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
 375         dn->dn_datablkshift = ISP2(size) ? highbit(size - 1) : 0;
 376 }
 377 
 378 static dnode_t *
 379 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
 380     uint64_t object, dnode_handle_t *dnh)
 381 {
 382         dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
 383 
 384         ASSERT(!POINTER_IS_VALID(dn->dn_objset));
 385         dn->dn_moved = 0;
 386 
 387         /*
 388          * Defer setting dn_objset until the dnode is ready to be a candidate
 389          * for the dnode_move() callback.
 390          */
 391         dn->dn_object = object;
 392         dn->dn_dbuf = db;
 393         dn->dn_handle = dnh;
 394         dn->dn_phys = dnp;
 395 
 396         if (dnp->dn_datablkszsec) {
 397                 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 398         } else {
 399                 dn->dn_datablksz = 0;
 400                 dn->dn_datablkszsec = 0;
 401                 dn->dn_datablkshift = 0;
 402         }
 403         dn->dn_indblkshift = dnp->dn_indblkshift;
 404         dn->dn_nlevels = dnp->dn_nlevels;
 405         dn->dn_type = dnp->dn_type;
 406         dn->dn_nblkptr = dnp->dn_nblkptr;
 407         dn->dn_checksum = dnp->dn_checksum;
 408         dn->dn_compress = dnp->dn_compress;
 409         dn->dn_bonustype = dnp->dn_bonustype;
 410         dn->dn_bonuslen = dnp->dn_bonuslen;
 411         dn->dn_maxblkid = dnp->dn_maxblkid;
 412         dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
 413         dn->dn_id_flags = 0;
 414 
 415         dmu_zfetch_init(&dn->dn_zfetch, dn);
 416 
 417         ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
 418 
 419         mutex_enter(&os->os_lock);
 420         list_insert_head(&os->os_dnodes, dn);
 421         membar_producer();
 422         /*
 423          * Everything else must be valid before assigning dn_objset makes the
 424          * dnode eligible for dnode_move().
 425          */
 426         dn->dn_objset = os;
 427         mutex_exit(&os->os_lock);
 428 
 429         arc_space_consume(sizeof (dnode_t), ARC_SPACE_OTHER);
 430         return (dn);
 431 }
 432 
 433 /*
 434  * Caller must be holding the dnode handle, which is released upon return.
 435  */
 436 static void
 437 dnode_destroy(dnode_t *dn)
 438 {
 439         objset_t *os = dn->dn_objset;
 440 
 441         ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
 442 
 443         mutex_enter(&os->os_lock);
 444         POINTER_INVALIDATE(&dn->dn_objset);
 445         list_remove(&os->os_dnodes, dn);
 446         mutex_exit(&os->os_lock);
 447 
 448         /* the dnode can no longer move, so we can release the handle */
 449         zrl_remove(&dn->dn_handle->dnh_zrlock);
 450 
 451         dn->dn_allocated_txg = 0;
 452         dn->dn_free_txg = 0;
 453         dn->dn_assigned_txg = 0;
 454 
 455         dn->dn_dirtyctx = 0;
 456         if (dn->dn_dirtyctx_firstset != NULL) {
 457                 kmem_free(dn->dn_dirtyctx_firstset, 1);
 458                 dn->dn_dirtyctx_firstset = NULL;
 459         }
 460         if (dn->dn_bonus != NULL) {
 461                 list_t evict_list;
 462 
 463                 dmu_buf_create_user_evict_list(&evict_list);
 464                 mutex_enter(&dn->dn_bonus->db_mtx);
 465                 dbuf_evict(dn->dn_bonus, &evict_list);
 466                 dmu_buf_destroy_user_evict_list(&evict_list);
 467                 dn->dn_bonus = NULL;
 468         }
 469         dn->dn_zio = NULL;
 470 
 471         dn->dn_have_spill = B_FALSE;
 472         dn->dn_oldused = 0;
 473         dn->dn_oldflags = 0;
 474         dn->dn_olduid = 0;
 475         dn->dn_oldgid = 0;
 476         dn->dn_newuid = 0;
 477         dn->dn_newgid = 0;
 478         dn->dn_id_flags = 0;
 479 
 480         dmu_zfetch_rele(&dn->dn_zfetch);
 481         kmem_cache_free(dnode_cache, dn);
 482         arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER);
 483 }
 484 
 485 void
 486 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
 487     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
 488 {
 489         int i;
 490 
 491         if (blocksize == 0)
 492                 blocksize = 1 << zfs_default_bs;
 493         else if (blocksize > SPA_MAXBLOCKSIZE)
 494                 blocksize = SPA_MAXBLOCKSIZE;
 495         else
 496                 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
 497 
 498         if (ibs == 0)
 499                 ibs = zfs_default_ibs;
 500 
 501         ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
 502 
 503         dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
 504             dn->dn_object, tx->tx_txg, blocksize, ibs);
 505 
 506         ASSERT(dn->dn_type == DMU_OT_NONE);
 507         ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
 508         ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
 509         ASSERT(ot != DMU_OT_NONE);
 510         ASSERT(DMU_OT_IS_VALID(ot));
 511         ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
 512             (bonustype == DMU_OT_SA && bonuslen == 0) ||
 513             (bonustype != DMU_OT_NONE && bonuslen != 0));
 514         ASSERT(DMU_OT_IS_VALID(bonustype));
 515         ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
 516         ASSERT(dn->dn_type == DMU_OT_NONE);
 517         ASSERT0(dn->dn_maxblkid);
 518         ASSERT0(dn->dn_allocated_txg);
 519         ASSERT0(dn->dn_assigned_txg);
 520         ASSERT(refcount_is_zero(&dn->dn_tx_holds));
 521         ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
 522         ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
 523 
 524         for (i = 0; i < TXG_SIZE; i++) {
 525                 ASSERT0(dn->dn_next_nblkptr[i]);
 526                 ASSERT0(dn->dn_next_nlevels[i]);
 527                 ASSERT0(dn->dn_next_indblkshift[i]);
 528                 ASSERT0(dn->dn_next_bonuslen[i]);
 529                 ASSERT0(dn->dn_next_bonustype[i]);
 530                 ASSERT0(dn->dn_rm_spillblk[i]);
 531                 ASSERT0(dn->dn_next_blksz[i]);
 532                 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
 533                 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
 534                 ASSERT0(avl_numnodes(&dn->dn_ranges[i]));
 535         }
 536 
 537         dn->dn_type = ot;
 538         dnode_setdblksz(dn, blocksize);
 539         dn->dn_indblkshift = ibs;
 540         dn->dn_nlevels = 1;
 541         if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
 542                 dn->dn_nblkptr = 1;
 543         else
 544                 dn->dn_nblkptr = 1 +
 545                     ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
 546         dn->dn_bonustype = bonustype;
 547         dn->dn_bonuslen = bonuslen;
 548         dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
 549         dn->dn_compress = ZIO_COMPRESS_INHERIT;
 550         dn->dn_dirtyctx = 0;
 551 
 552         dn->dn_free_txg = 0;
 553         if (dn->dn_dirtyctx_firstset) {
 554                 kmem_free(dn->dn_dirtyctx_firstset, 1);
 555                 dn->dn_dirtyctx_firstset = NULL;
 556         }
 557 
 558         dn->dn_allocated_txg = tx->tx_txg;
 559         dn->dn_id_flags = 0;
 560 
 561         dnode_setdirty(dn, tx);
 562         dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
 563         dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
 564         dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
 565         dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
 566 }
 567 
 568 void
 569 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
 570     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
 571 {
 572         int nblkptr;
 573 
 574         ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
 575         ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE);
 576         ASSERT0(blocksize % SPA_MINBLOCKSIZE);
 577         ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
 578         ASSERT(tx->tx_txg != 0);
 579         ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
 580             (bonustype != DMU_OT_NONE && bonuslen != 0) ||
 581             (bonustype == DMU_OT_SA && bonuslen == 0));
 582         ASSERT(DMU_OT_IS_VALID(bonustype));
 583         ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
 584 
 585         /* clean up any unreferenced dbufs */
 586         dnode_evict_dbufs(dn);
 587 
 588         dn->dn_id_flags = 0;
 589 
 590         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
 591         dnode_setdirty(dn, tx);
 592         if (dn->dn_datablksz != blocksize) {
 593                 /* change blocksize */
 594                 ASSERT(dn->dn_maxblkid == 0 &&
 595                     (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
 596                     dnode_block_freed(dn, 0)));
 597                 dnode_setdblksz(dn, blocksize);
 598                 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
 599         }
 600         if (dn->dn_bonuslen != bonuslen)
 601                 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
 602 
 603         if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
 604                 nblkptr = 1;
 605         else
 606                 nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
 607         if (dn->dn_bonustype != bonustype)
 608                 dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
 609         if (dn->dn_nblkptr != nblkptr)
 610                 dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
 611         if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
 612                 dbuf_rm_spill(dn, tx);
 613                 dnode_rm_spill(dn, tx);
 614         }
 615         rw_exit(&dn->dn_struct_rwlock);
 616 
 617         /* change type */
 618         dn->dn_type = ot;
 619 
 620         /* change bonus size and type */
 621         mutex_enter(&dn->dn_mtx);
 622         dn->dn_bonustype = bonustype;
 623         dn->dn_bonuslen = bonuslen;
 624         dn->dn_nblkptr = nblkptr;
 625         dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
 626         dn->dn_compress = ZIO_COMPRESS_INHERIT;
 627         ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
 628 
 629         /* fix up the bonus db_size */
 630         if (dn->dn_bonus) {
 631                 dn->dn_bonus->db.db_size =
 632                     DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t);
 633                 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
 634         }
 635 
 636         dn->dn_allocated_txg = tx->tx_txg;
 637         mutex_exit(&dn->dn_mtx);
 638 }
 639 
 640 #ifdef  DNODE_STATS
 641 static struct {
 642         uint64_t dms_dnode_invalid;
 643         uint64_t dms_dnode_recheck1;
 644         uint64_t dms_dnode_recheck2;
 645         uint64_t dms_dnode_special;
 646         uint64_t dms_dnode_handle;
 647         uint64_t dms_dnode_rwlock;
 648         uint64_t dms_dnode_active;
 649 } dnode_move_stats;
 650 #endif  /* DNODE_STATS */
 651 
 652 static void
 653 dnode_move_impl(dnode_t *odn, dnode_t *ndn)
 654 {
 655         int i;
 656 
 657         ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
 658         ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
 659         ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
 660         ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock));
 661 
 662         /* Copy fields. */
 663         ndn->dn_objset = odn->dn_objset;
 664         ndn->dn_object = odn->dn_object;
 665         ndn->dn_dbuf = odn->dn_dbuf;
 666         ndn->dn_handle = odn->dn_handle;
 667         ndn->dn_phys = odn->dn_phys;
 668         ndn->dn_type = odn->dn_type;
 669         ndn->dn_bonuslen = odn->dn_bonuslen;
 670         ndn->dn_bonustype = odn->dn_bonustype;
 671         ndn->dn_nblkptr = odn->dn_nblkptr;
 672         ndn->dn_checksum = odn->dn_checksum;
 673         ndn->dn_compress = odn->dn_compress;
 674         ndn->dn_nlevels = odn->dn_nlevels;
 675         ndn->dn_indblkshift = odn->dn_indblkshift;
 676         ndn->dn_datablkshift = odn->dn_datablkshift;
 677         ndn->dn_datablkszsec = odn->dn_datablkszsec;
 678         ndn->dn_datablksz = odn->dn_datablksz;
 679         ndn->dn_maxblkid = odn->dn_maxblkid;
 680         bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
 681             sizeof (odn->dn_next_nblkptr));
 682         bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
 683             sizeof (odn->dn_next_nlevels));
 684         bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
 685             sizeof (odn->dn_next_indblkshift));
 686         bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
 687             sizeof (odn->dn_next_bonustype));
 688         bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
 689             sizeof (odn->dn_rm_spillblk));
 690         bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
 691             sizeof (odn->dn_next_bonuslen));
 692         bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
 693             sizeof (odn->dn_next_blksz));
 694         for (i = 0; i < TXG_SIZE; i++) {
 695                 list_move_tail(&ndn->dn_dirty_records[i],
 696                     &odn->dn_dirty_records[i]);
 697         }
 698         bcopy(&odn->dn_ranges[0], &ndn->dn_ranges[0], sizeof (odn->dn_ranges));
 699         ndn->dn_allocated_txg = odn->dn_allocated_txg;
 700         ndn->dn_free_txg = odn->dn_free_txg;
 701         ndn->dn_assigned_txg = odn->dn_assigned_txg;
 702         ndn->dn_dirtyctx = odn->dn_dirtyctx;
 703         ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
 704         ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
 705         refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
 706         ASSERT(list_is_empty(&ndn->dn_dbufs));
 707         list_move_tail(&ndn->dn_dbufs, &odn->dn_dbufs);
 708         ndn->dn_dbufs_count = odn->dn_dbufs_count;
 709         ndn->dn_bonus = odn->dn_bonus;
 710         ndn->dn_have_spill = odn->dn_have_spill;
 711         ndn->dn_zio = odn->dn_zio;
 712         ndn->dn_oldused = odn->dn_oldused;
 713         ndn->dn_oldflags = odn->dn_oldflags;
 714         ndn->dn_olduid = odn->dn_olduid;
 715         ndn->dn_oldgid = odn->dn_oldgid;
 716         ndn->dn_newuid = odn->dn_newuid;
 717         ndn->dn_newgid = odn->dn_newgid;
 718         ndn->dn_id_flags = odn->dn_id_flags;
 719         dmu_zfetch_init(&ndn->dn_zfetch, NULL);
 720         list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
 721         ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
 722         ndn->dn_zfetch.zf_stream_cnt = odn->dn_zfetch.zf_stream_cnt;
 723         ndn->dn_zfetch.zf_alloc_fail = odn->dn_zfetch.zf_alloc_fail;
 724 
 725         /*
 726          * Update back pointers. Updating the handle fixes the back pointer of
 727          * every descendant dbuf as well as the bonus dbuf.
 728          */
 729         ASSERT(ndn->dn_handle->dnh_dnode == odn);
 730         ndn->dn_handle->dnh_dnode = ndn;
 731         if (ndn->dn_zfetch.zf_dnode == odn) {
 732                 ndn->dn_zfetch.zf_dnode = ndn;
 733         }
 734 
 735         /*
 736          * Invalidate the original dnode by clearing all of its back pointers.
 737          */
 738         odn->dn_dbuf = NULL;
 739         odn->dn_handle = NULL;
 740         list_create(&odn->dn_dbufs, sizeof (dmu_buf_impl_t),
 741             offsetof(dmu_buf_impl_t, db_link));
 742         odn->dn_dbufs_count = 0;
 743         odn->dn_bonus = NULL;
 744         odn->dn_zfetch.zf_dnode = NULL;
 745 
 746         /*
 747          * Set the low bit of the objset pointer to ensure that dnode_move()
 748          * recognizes the dnode as invalid in any subsequent callback.
 749          */
 750         POINTER_INVALIDATE(&odn->dn_objset);
 751 
 752         /*
 753          * Satisfy the destructor.
 754          */
 755         for (i = 0; i < TXG_SIZE; i++) {
 756                 list_create(&odn->dn_dirty_records[i],
 757                     sizeof (dbuf_dirty_record_t),
 758                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
 759                 odn->dn_ranges[i].avl_root = NULL;
 760                 odn->dn_ranges[i].avl_numnodes = 0;
 761                 odn->dn_next_nlevels[i] = 0;
 762                 odn->dn_next_indblkshift[i] = 0;
 763                 odn->dn_next_bonustype[i] = 0;
 764                 odn->dn_rm_spillblk[i] = 0;
 765                 odn->dn_next_bonuslen[i] = 0;
 766                 odn->dn_next_blksz[i] = 0;
 767         }
 768         odn->dn_allocated_txg = 0;
 769         odn->dn_free_txg = 0;
 770         odn->dn_assigned_txg = 0;
 771         odn->dn_dirtyctx = 0;
 772         odn->dn_dirtyctx_firstset = NULL;
 773         odn->dn_have_spill = B_FALSE;
 774         odn->dn_zio = NULL;
 775         odn->dn_oldused = 0;
 776         odn->dn_oldflags = 0;
 777         odn->dn_olduid = 0;
 778         odn->dn_oldgid = 0;
 779         odn->dn_newuid = 0;
 780         odn->dn_newgid = 0;
 781         odn->dn_id_flags = 0;
 782 
 783         /*
 784          * Mark the dnode.
 785          */
 786         ndn->dn_moved = 1;
 787         odn->dn_moved = (uint8_t)-1;
 788 }
 789 
 790 #ifdef  _KERNEL
 791 /*ARGSUSED*/
 792 static kmem_cbrc_t
 793 dnode_move(void *buf, void *newbuf, size_t size, void *arg)
 794 {
 795         dnode_t *odn = buf, *ndn = newbuf;
 796         objset_t *os;
 797         int64_t refcount;
 798         uint32_t dbufs;
 799 
 800         /*
 801          * The dnode is on the objset's list of known dnodes if the objset
 802          * pointer is valid. We set the low bit of the objset pointer when
 803          * freeing the dnode to invalidate it, and the memory patterns written
 804          * by kmem (baddcafe and deadbeef) set at least one of the two low bits.
 805          * A newly created dnode sets the objset pointer last of all to indicate
 806          * that the dnode is known and in a valid state to be moved by this
 807          * function.
 808          */
 809         os = odn->dn_objset;
 810         if (!POINTER_IS_VALID(os)) {
 811                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_invalid);
 812                 return (KMEM_CBRC_DONT_KNOW);
 813         }
 814 
 815         /*
 816          * Ensure that the objset does not go away during the move.
 817          */
 818         rw_enter(&os_lock, RW_WRITER);
 819         if (os != odn->dn_objset) {
 820                 rw_exit(&os_lock);
 821                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck1);
 822                 return (KMEM_CBRC_DONT_KNOW);
 823         }
 824 
 825         /*
 826          * If the dnode is still valid, then so is the objset. We know that no
 827          * valid objset can be freed while we hold os_lock, so we can safely
 828          * ensure that the objset remains in use.
 829          */
 830         mutex_enter(&os->os_lock);
 831 
 832         /*
 833          * Recheck the objset pointer in case the dnode was removed just before
 834          * acquiring the lock.
 835          */
 836         if (os != odn->dn_objset) {
 837                 mutex_exit(&os->os_lock);
 838                 rw_exit(&os_lock);
 839                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck2);
 840                 return (KMEM_CBRC_DONT_KNOW);
 841         }
 842 
 843         /*
 844          * At this point we know that as long as we hold os->os_lock, the dnode
 845          * cannot be freed and fields within the dnode can be safely accessed.
 846          * The objset listing this dnode cannot go away as long as this dnode is
 847          * on its list.
 848          */
 849         rw_exit(&os_lock);
 850         if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
 851                 mutex_exit(&os->os_lock);
 852                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_special);
 853                 return (KMEM_CBRC_NO);
 854         }
 855         ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
 856 
 857         /*
 858          * Lock the dnode handle to prevent the dnode from obtaining any new
 859          * holds. This also prevents the descendant dbufs and the bonus dbuf
 860          * from accessing the dnode, so that we can discount their holds. The
 861          * handle is safe to access because we know that while the dnode cannot
 862          * go away, neither can its handle. Once we hold dnh_zrlock, we can
 863          * safely move any dnode referenced only by dbufs.
 864          */
 865         if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
 866                 mutex_exit(&os->os_lock);
 867                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_handle);
 868                 return (KMEM_CBRC_LATER);
 869         }
 870 
 871         /*
 872          * Ensure a consistent view of the dnode's holds and the dnode's dbufs.
 873          * We need to guarantee that there is a hold for every dbuf in order to
 874          * determine whether the dnode is actively referenced. Falsely matching
 875          * a dbuf to an active hold would lead to an unsafe move. It's possible
 876          * that a thread already having an active dnode hold is about to add a
 877          * dbuf, and we can't compare hold and dbuf counts while the add is in
 878          * progress.
 879          */
 880         if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
 881                 zrl_exit(&odn->dn_handle->dnh_zrlock);
 882                 mutex_exit(&os->os_lock);
 883                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_rwlock);
 884                 return (KMEM_CBRC_LATER);
 885         }
 886 
 887         /*
 888          * A dbuf may be removed (evicted) without an active dnode hold. In that
 889          * case, the dbuf count is decremented under the handle lock before the
 890          * dbuf's hold is released. This order ensures that if we count the hold
 891          * after the dbuf is removed but before its hold is released, we will
 892          * treat the unmatched hold as active and exit safely. If we count the
 893          * hold before the dbuf is removed, the hold is discounted, and the
 894          * removal is blocked until the move completes.
 895          */
 896         refcount = refcount_count(&odn->dn_holds);
 897         ASSERT(refcount >= 0);
 898         dbufs = odn->dn_dbufs_count;
 899 
 900         /* We can't have more dbufs than dnode holds. */
 901         ASSERT3U(dbufs, <=, refcount);
 902         DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
 903             uint32_t, dbufs);
 904 
 905         if (refcount > dbufs) {
 906                 rw_exit(&odn->dn_struct_rwlock);
 907                 zrl_exit(&odn->dn_handle->dnh_zrlock);
 908                 mutex_exit(&os->os_lock);
 909                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_active);
 910                 return (KMEM_CBRC_LATER);
 911         }
 912 
 913         rw_exit(&odn->dn_struct_rwlock);
 914 
 915         /*
 916          * At this point we know that anyone with a hold on the dnode is not
 917          * actively referencing it. The dnode is known and in a valid state to
 918          * move. We're holding the locks needed to execute the critical section.
 919          */
 920         dnode_move_impl(odn, ndn);
 921 
 922         list_link_replace(&odn->dn_link, &ndn->dn_link);
 923         /* If the dnode was safe to move, the refcount cannot have changed. */
 924         ASSERT(refcount == refcount_count(&ndn->dn_holds));
 925         ASSERT(dbufs == ndn->dn_dbufs_count);
 926         zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
 927         mutex_exit(&os->os_lock);
 928 
 929         return (KMEM_CBRC_YES);
 930 }
 931 #endif  /* _KERNEL */
 932 
 933 void
 934 dnode_special_close(dnode_handle_t *dnh)
 935 {
 936         dnode_t *dn = dnh->dnh_dnode;
 937 
 938         /*
 939          * Wait for final references to the dnode to clear.  This can
 940          * only happen if the arc is asyncronously evicting state that
 941          * has a hold on this dnode while we are trying to evict this
 942          * dnode.
 943          */
 944         while (refcount_count(&dn->dn_holds) > 0)
 945                 delay(1);
 946         zrl_add(&dnh->dnh_zrlock);
 947         dnode_destroy(dn); /* implicit zrl_remove() */
 948         zrl_destroy(&dnh->dnh_zrlock);
 949         dnh->dnh_dnode = NULL;
 950 }
 951 
 952 dnode_t *
 953 dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
 954     dnode_handle_t *dnh)
 955 {
 956         dnode_t *dn = dnode_create(os, dnp, NULL, object, dnh);
 957         dnh->dnh_dnode = dn;
 958         zrl_init(&dnh->dnh_zrlock);
 959         DNODE_VERIFY(dn);
 960         return (dn);
 961 }
 962 
 963 static void
 964 dnode_buf_pageout(dmu_buf_user_t *dbu)
 965 {
 966         dnode_children_t *children_dnodes = (dnode_children_t *)dbu;
 967         int i;
 968 
 969         for (i = 0; i < children_dnodes->dnc_count; i++) {
 970                 dnode_handle_t *dnh = &children_dnodes->dnc_children[i];
 971                 dnode_t *dn;
 972 
 973                 /*
 974                  * The dnode handle lock guards against the dnode moving to
 975                  * another valid address, so there is no need here to guard
 976                  * against changes to or from NULL.
 977                  */
 978                 if (dnh->dnh_dnode == NULL) {
 979                         zrl_destroy(&dnh->dnh_zrlock);
 980                         continue;
 981                 }
 982 
 983                 zrl_add(&dnh->dnh_zrlock);
 984                 dn = dnh->dnh_dnode;
 985                 /*
 986                  * If there are holds on this dnode, then there should
 987                  * be holds on the dnode's containing dbuf as well; thus
 988                  * it wouldn't be eligible for eviction and this function
 989                  * would not have been called.
 990                  */
 991                 ASSERT(refcount_is_zero(&dn->dn_holds));
 992                 ASSERT(refcount_is_zero(&dn->dn_tx_holds));
 993 
 994                 dnode_destroy(dn); /* implicit zrl_remove() */
 995                 zrl_destroy(&dnh->dnh_zrlock);
 996                 dnh->dnh_dnode = NULL;
 997         }
 998         kmem_free(children_dnodes, sizeof (dnode_children_t) +
 999             (children_dnodes->dnc_count - 1) * sizeof (dnode_handle_t));
1000 }
1001 
1002 /*
1003  * errors:
1004  * EINVAL - invalid object number.
1005  * EIO - i/o error.
1006  * succeeds even for free dnodes.
1007  */
1008 int
1009 dnode_hold_impl(objset_t *os, uint64_t object, int flag,
1010     void *tag, dnode_t **dnp)
1011 {
1012         int epb, idx, err;
1013         int drop_struct_lock = FALSE;
1014         int type;
1015         uint64_t blk;
1016         dnode_t *mdn, *dn;
1017         dmu_buf_impl_t *db;
1018         dnode_children_t *children_dnodes;
1019         dnode_handle_t *dnh;
1020 
1021         /*
1022          * If you are holding the spa config lock as writer, you shouldn't
1023          * be asking the DMU to do *anything* unless it's the root pool
1024          * which may require us to read from the root filesystem while
1025          * holding some (not all) of the locks as writer.
1026          */
1027         ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1028             (spa_is_root(os->os_spa) &&
1029             spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1030 
1031         if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) {
1032                 dn = (object == DMU_USERUSED_OBJECT) ?
1033                     DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
1034                 if (dn == NULL)
1035                         return (SET_ERROR(ENOENT));
1036                 type = dn->dn_type;
1037                 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1038                         return (SET_ERROR(ENOENT));
1039                 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1040                         return (SET_ERROR(EEXIST));
1041                 DNODE_VERIFY(dn);
1042                 (void) refcount_add(&dn->dn_holds, tag);
1043                 *dnp = dn;
1044                 return (0);
1045         }
1046 
1047         if (object == 0 || object >= DN_MAX_OBJECT)
1048                 return (SET_ERROR(EINVAL));
1049 
1050         mdn = DMU_META_DNODE(os);
1051         ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1052 
1053         DNODE_VERIFY(mdn);
1054 
1055         if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1056                 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1057                 drop_struct_lock = TRUE;
1058         }
1059 
1060         blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t));
1061 
1062         db = dbuf_hold(mdn, blk, FTAG);
1063         if (drop_struct_lock)
1064                 rw_exit(&mdn->dn_struct_rwlock);
1065         if (db == NULL)
1066                 return (SET_ERROR(EIO));
1067         err = dbuf_read(db, NULL, DB_RF_CANFAIL);
1068         if (err) {
1069                 dbuf_rele(db, FTAG);
1070                 return (err);
1071         }
1072 
1073         ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
1074         epb = db->db.db_size >> DNODE_SHIFT;
1075 
1076         idx = object & (epb-1);
1077 
1078         ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
1079         children_dnodes = (dnode_children_t *)dmu_buf_get_user(&db->db);
1080         if (children_dnodes == NULL) {
1081                 int i;
1082                 dnode_children_t *winner;
1083                 children_dnodes = kmem_alloc(sizeof (dnode_children_t) +
1084                     (epb - 1) * sizeof (dnode_handle_t), KM_SLEEP);
1085                 children_dnodes->dnc_count = epb;
1086                 dnh = &children_dnodes->dnc_children[0];
1087                 for (i = 0; i < epb; i++) {
1088                         zrl_init(&dnh[i].dnh_zrlock);
1089                         dnh[i].dnh_dnode = NULL;
1090                 }
1091                 dmu_buf_init_user(&children_dnodes->db_evict,
1092                     dnode_buf_pageout);
1093                 winner = (dnode_children_t *)
1094                     dmu_buf_set_user(&db->db, &children_dnodes->db_evict);
1095                 if (winner) {
1096                         kmem_free(children_dnodes, sizeof (dnode_children_t) +
1097                             (epb - 1) * sizeof (dnode_handle_t));
1098                         children_dnodes = winner;
1099                 }
1100         }
1101         ASSERT(children_dnodes->dnc_count == epb);
1102 
1103         dnh = &children_dnodes->dnc_children[idx];
1104         zrl_add(&dnh->dnh_zrlock);
1105         if ((dn = dnh->dnh_dnode) == NULL) {
1106                 dnode_phys_t *phys = (dnode_phys_t *)db->db.db_data+idx;
1107                 dnode_t *winner;
1108 
1109                 dn = dnode_create(os, phys, db, object, dnh);
1110                 winner = atomic_cas_ptr(&dnh->dnh_dnode, NULL, dn);
1111                 if (winner != NULL) {
1112                         zrl_add(&dnh->dnh_zrlock);
1113                         dnode_destroy(dn); /* implicit zrl_remove() */
1114                         dn = winner;
1115                 }
1116         }
1117 
1118         mutex_enter(&dn->dn_mtx);
1119         type = dn->dn_type;
1120         if (dn->dn_free_txg ||
1121             ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) ||
1122             ((flag & DNODE_MUST_BE_FREE) &&
1123             (type != DMU_OT_NONE || !refcount_is_zero(&dn->dn_holds)))) {
1124                 mutex_exit(&dn->dn_mtx);
1125                 zrl_remove(&dnh->dnh_zrlock);
1126                 dbuf_rele(db, FTAG);
1127                 return (type == DMU_OT_NONE ? ENOENT : EEXIST);
1128         }
1129         mutex_exit(&dn->dn_mtx);
1130 
1131         if (refcount_add(&dn->dn_holds, tag) == 1)
1132                 dbuf_add_ref(db, dnh);
1133         /* Now we can rely on the hold to prevent the dnode from moving. */
1134         zrl_remove(&dnh->dnh_zrlock);
1135 
1136         DNODE_VERIFY(dn);
1137         ASSERT3P(dn->dn_dbuf, ==, db);
1138         ASSERT3U(dn->dn_object, ==, object);
1139         dbuf_rele(db, FTAG);
1140 
1141         *dnp = dn;
1142         return (0);
1143 }
1144 
1145 /*
1146  * Return held dnode if the object is allocated, NULL if not.
1147  */
1148 int
1149 dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
1150 {
1151         return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp));
1152 }
1153 
1154 /*
1155  * Can only add a reference if there is already at least one
1156  * reference on the dnode.  Returns FALSE if unable to add a
1157  * new reference.
1158  */
1159 boolean_t
1160 dnode_add_ref(dnode_t *dn, void *tag)
1161 {
1162         mutex_enter(&dn->dn_mtx);
1163         if (refcount_is_zero(&dn->dn_holds)) {
1164                 mutex_exit(&dn->dn_mtx);
1165                 return (FALSE);
1166         }
1167         VERIFY(1 < refcount_add(&dn->dn_holds, tag));
1168         mutex_exit(&dn->dn_mtx);
1169         return (TRUE);
1170 }
1171 
1172 void
1173 dnode_rele(dnode_t *dn, void *tag)
1174 {
1175         uint64_t refs;
1176         /* Get while the hold prevents the dnode from moving. */
1177         dmu_buf_impl_t *db = dn->dn_dbuf;
1178         dnode_handle_t *dnh = dn->dn_handle;
1179 
1180         mutex_enter(&dn->dn_mtx);
1181         refs = refcount_remove(&dn->dn_holds, tag);
1182         mutex_exit(&dn->dn_mtx);
1183 
1184         /*
1185          * It's unsafe to release the last hold on a dnode by dnode_rele() or
1186          * indirectly by dbuf_rele() while relying on the dnode handle to
1187          * prevent the dnode from moving, since releasing the last hold could
1188          * result in the dnode's parent dbuf evicting its dnode handles. For
1189          * that reason anyone calling dnode_rele() or dbuf_rele() without some
1190          * other direct or indirect hold on the dnode must first drop the dnode
1191          * handle.
1192          */
1193         ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
1194 
1195         /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1196         if (refs == 0 && db != NULL) {
1197                 /*
1198                  * Another thread could add a hold to the dnode handle in
1199                  * dnode_hold_impl() while holding the parent dbuf. Since the
1200                  * hold on the parent dbuf prevents the handle from being
1201                  * destroyed, the hold on the handle is OK. We can't yet assert
1202                  * that the handle has zero references, but that will be
1203                  * asserted anyway when the handle gets destroyed.
1204                  */
1205                 dbuf_rele(db, dnh);
1206         }
1207 }
1208 
1209 void
1210 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1211 {
1212         objset_t *os = dn->dn_objset;
1213         uint64_t txg = tx->tx_txg;
1214 
1215         if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1216                 dsl_dataset_dirty(os->os_dsl_dataset, tx);
1217                 return;
1218         }
1219 
1220         DNODE_VERIFY(dn);
1221 
1222 #ifdef ZFS_DEBUG
1223         mutex_enter(&dn->dn_mtx);
1224         ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1225         ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1226         mutex_exit(&dn->dn_mtx);
1227 #endif
1228 
1229         /*
1230          * Determine old uid/gid when necessary
1231          */
1232         dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
1233 
1234         mutex_enter(&os->os_lock);
1235 
1236         /*
1237          * If we are already marked dirty, we're done.
1238          */
1239         if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
1240                 mutex_exit(&os->os_lock);
1241                 return;
1242         }
1243 
1244         ASSERT(!refcount_is_zero(&dn->dn_holds) || list_head(&dn->dn_dbufs));
1245         ASSERT(dn->dn_datablksz != 0);
1246         ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
1247         ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
1248         ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
1249 
1250         dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
1251             dn->dn_object, txg);
1252 
1253         if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
1254                 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
1255         } else {
1256                 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
1257         }
1258 
1259         mutex_exit(&os->os_lock);
1260 
1261         /*
1262          * The dnode maintains a hold on its containing dbuf as
1263          * long as there are holds on it.  Each instantiated child
1264          * dbuf maintains a hold on the dnode.  When the last child
1265          * drops its hold, the dnode will drop its hold on the
1266          * containing dbuf. We add a "dirty hold" here so that the
1267          * dnode will hang around after we finish processing its
1268          * children.
1269          */
1270         VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1271 
1272         (void) dbuf_dirty(dn->dn_dbuf, tx);
1273 
1274         dsl_dataset_dirty(os->os_dsl_dataset, tx);
1275 }
1276 
1277 void
1278 dnode_free(dnode_t *dn, dmu_tx_t *tx)
1279 {
1280         int txgoff = tx->tx_txg & TXG_MASK;
1281 
1282         dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
1283 
1284         /* we should be the only holder... hopefully */
1285         /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
1286 
1287         mutex_enter(&dn->dn_mtx);
1288         if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1289                 mutex_exit(&dn->dn_mtx);
1290                 return;
1291         }
1292         dn->dn_free_txg = tx->tx_txg;
1293         mutex_exit(&dn->dn_mtx);
1294 
1295         /*
1296          * If the dnode is already dirty, it needs to be moved from
1297          * the dirty list to the free list.
1298          */
1299         mutex_enter(&dn->dn_objset->os_lock);
1300         if (list_link_active(&dn->dn_dirty_link[txgoff])) {
1301                 list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn);
1302                 list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn);
1303                 mutex_exit(&dn->dn_objset->os_lock);
1304         } else {
1305                 mutex_exit(&dn->dn_objset->os_lock);
1306                 dnode_setdirty(dn, tx);
1307         }
1308 }
1309 
1310 /*
1311  * Try to change the block size for the indicated dnode.  This can only
1312  * succeed if there are no blocks allocated or dirty beyond first block
1313  */
1314 int
1315 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
1316 {
1317         dmu_buf_impl_t *db, *db_next;
1318         int err;
1319 
1320         if (size == 0)
1321                 size = SPA_MINBLOCKSIZE;
1322         if (size > SPA_MAXBLOCKSIZE)
1323                 size = SPA_MAXBLOCKSIZE;
1324         else
1325                 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
1326 
1327         if (ibs == dn->dn_indblkshift)
1328                 ibs = 0;
1329 
1330         if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
1331                 return (0);
1332 
1333         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1334 
1335         /* Check for any allocated blocks beyond the first */
1336         if (dn->dn_phys->dn_maxblkid != 0)
1337                 goto fail;
1338 
1339         mutex_enter(&dn->dn_dbufs_mtx);
1340         for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
1341                 db_next = list_next(&dn->dn_dbufs, db);
1342 
1343                 if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
1344                     db->db_blkid != DMU_SPILL_BLKID) {
1345                         mutex_exit(&dn->dn_dbufs_mtx);
1346                         goto fail;
1347                 }
1348         }
1349         mutex_exit(&dn->dn_dbufs_mtx);
1350 
1351         if (ibs && dn->dn_nlevels != 1)
1352                 goto fail;
1353 
1354         /* resize the old block */
1355         err = dbuf_hold_impl(dn, 0, 0, TRUE, FTAG, &db);
1356         if (err == 0)
1357                 dbuf_new_size(db, size, tx);
1358         else if (err != ENOENT)
1359                 goto fail;
1360 
1361         dnode_setdblksz(dn, size);
1362         dnode_setdirty(dn, tx);
1363         dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
1364         if (ibs) {
1365                 dn->dn_indblkshift = ibs;
1366                 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
1367         }
1368         /* rele after we have fixed the blocksize in the dnode */
1369         if (db)
1370                 dbuf_rele(db, FTAG);
1371 
1372         rw_exit(&dn->dn_struct_rwlock);
1373         return (0);
1374 
1375 fail:
1376         rw_exit(&dn->dn_struct_rwlock);
1377         return (SET_ERROR(ENOTSUP));
1378 }
1379 
1380 /* read-holding callers must not rely on the lock being continuously held */
1381 void
1382 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
1383 {
1384         uint64_t txgoff = tx->tx_txg & TXG_MASK;
1385         int epbs, new_nlevels;
1386         uint64_t sz;
1387 
1388         ASSERT(blkid != DMU_BONUS_BLKID);
1389 
1390         ASSERT(have_read ?
1391             RW_READ_HELD(&dn->dn_struct_rwlock) :
1392             RW_WRITE_HELD(&dn->dn_struct_rwlock));
1393 
1394         /*
1395          * if we have a read-lock, check to see if we need to do any work
1396          * before upgrading to a write-lock.
1397          */
1398         if (have_read) {
1399                 if (blkid <= dn->dn_maxblkid)
1400                         return;
1401 
1402                 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
1403                         rw_exit(&dn->dn_struct_rwlock);
1404                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1405                 }
1406         }
1407 
1408         if (blkid <= dn->dn_maxblkid)
1409                 goto out;
1410 
1411         dn->dn_maxblkid = blkid;
1412 
1413         /*
1414          * Compute the number of levels necessary to support the new maxblkid.
1415          */
1416         new_nlevels = 1;
1417         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1418         for (sz = dn->dn_nblkptr;
1419             sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
1420                 new_nlevels++;
1421 
1422         if (new_nlevels > dn->dn_nlevels) {
1423                 int old_nlevels = dn->dn_nlevels;
1424                 dmu_buf_impl_t *db;
1425                 list_t *list;
1426                 dbuf_dirty_record_t *new, *dr, *dr_next;
1427 
1428                 dn->dn_nlevels = new_nlevels;
1429 
1430                 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
1431                 dn->dn_next_nlevels[txgoff] = new_nlevels;
1432 
1433                 /* dirty the left indirects */
1434                 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
1435                 ASSERT(db != NULL);
1436                 new = dbuf_dirty(db, tx);
1437                 dbuf_rele(db, FTAG);
1438 
1439                 /* transfer the dirty records to the new indirect */
1440                 mutex_enter(&dn->dn_mtx);
1441                 mutex_enter(&new->dt.di.dr_mtx);
1442                 list = &dn->dn_dirty_records[txgoff];
1443                 for (dr = list_head(list); dr; dr = dr_next) {
1444                         dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
1445                         if (dr->dr_dbuf->db_level != new_nlevels-1 &&
1446                             dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
1447                             dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
1448                                 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
1449                                 list_remove(&dn->dn_dirty_records[txgoff], dr);
1450                                 list_insert_tail(&new->dt.di.dr_children, dr);
1451                                 dr->dr_parent = new;
1452                         }
1453                 }
1454                 mutex_exit(&new->dt.di.dr_mtx);
1455                 mutex_exit(&dn->dn_mtx);
1456         }
1457 
1458 out:
1459         if (have_read)
1460                 rw_downgrade(&dn->dn_struct_rwlock);
1461 }
1462 
1463 void
1464 dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
1465 {
1466         avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
1467         avl_index_t where;
1468         free_range_t *rp;
1469         free_range_t rp_tofind;
1470         uint64_t endblk = blkid + nblks;
1471 
1472         ASSERT(MUTEX_HELD(&dn->dn_mtx));
1473         ASSERT(nblks <= UINT64_MAX - blkid); /* no overflow */
1474 
1475         dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
1476             blkid, nblks, tx->tx_txg);
1477         rp_tofind.fr_blkid = blkid;
1478         rp = avl_find(tree, &rp_tofind, &where);
1479         if (rp == NULL)
1480                 rp = avl_nearest(tree, where, AVL_BEFORE);
1481         if (rp == NULL)
1482                 rp = avl_nearest(tree, where, AVL_AFTER);
1483 
1484         while (rp && (rp->fr_blkid <= blkid + nblks)) {
1485                 uint64_t fr_endblk = rp->fr_blkid + rp->fr_nblks;
1486                 free_range_t *nrp = AVL_NEXT(tree, rp);
1487 
1488                 if (blkid <= rp->fr_blkid && endblk >= fr_endblk) {
1489                         /* clear this entire range */
1490                         avl_remove(tree, rp);
1491                         kmem_free(rp, sizeof (free_range_t));
1492                 } else if (blkid <= rp->fr_blkid &&
1493                     endblk > rp->fr_blkid && endblk < fr_endblk) {
1494                         /* clear the beginning of this range */
1495                         rp->fr_blkid = endblk;
1496                         rp->fr_nblks = fr_endblk - endblk;
1497                 } else if (blkid > rp->fr_blkid && blkid < fr_endblk &&
1498                     endblk >= fr_endblk) {
1499                         /* clear the end of this range */
1500                         rp->fr_nblks = blkid - rp->fr_blkid;
1501                 } else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
1502                         /* clear a chunk out of this range */
1503                         free_range_t *new_rp =
1504                             kmem_alloc(sizeof (free_range_t), KM_SLEEP);
1505 
1506                         new_rp->fr_blkid = endblk;
1507                         new_rp->fr_nblks = fr_endblk - endblk;
1508                         avl_insert_here(tree, new_rp, rp, AVL_AFTER);
1509                         rp->fr_nblks = blkid - rp->fr_blkid;
1510                 }
1511                 /* there may be no overlap */
1512                 rp = nrp;
1513         }
1514 }
1515 
1516 void
1517 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
1518 {
1519         dmu_buf_impl_t *db;
1520         uint64_t blkoff, blkid, nblks;
1521         int blksz, blkshift, head, tail;
1522         int trunc = FALSE;
1523         int epbs;
1524 
1525         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1526         blksz = dn->dn_datablksz;
1527         blkshift = dn->dn_datablkshift;
1528         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1529 
1530         if (len == -1ULL) {
1531                 len = UINT64_MAX - off;
1532                 trunc = TRUE;
1533         }
1534 
1535         /*
1536          * First, block align the region to free:
1537          */
1538         if (ISP2(blksz)) {
1539                 head = P2NPHASE(off, blksz);
1540                 blkoff = P2PHASE(off, blksz);
1541                 if ((off >> blkshift) > dn->dn_maxblkid)
1542                         goto out;
1543         } else {
1544                 ASSERT(dn->dn_maxblkid == 0);
1545                 if (off == 0 && len >= blksz) {
1546                         /* Freeing the whole block; fast-track this request */
1547                         blkid = 0;
1548                         nblks = 1;
1549                         goto done;
1550                 } else if (off >= blksz) {
1551                         /* Freeing past end-of-data */
1552                         goto out;
1553                 } else {
1554                         /* Freeing part of the block. */
1555                         head = blksz - off;
1556                         ASSERT3U(head, >, 0);
1557                 }
1558                 blkoff = off;
1559         }
1560         /* zero out any partial block data at the start of the range */
1561         if (head) {
1562                 ASSERT3U(blkoff + head, ==, blksz);
1563                 if (len < head)
1564                         head = len;
1565                 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE,
1566                     FTAG, &db) == 0) {
1567                         caddr_t data;
1568 
1569                         /* don't dirty if it isn't on disk and isn't dirty */
1570                         if (db->db_last_dirty ||
1571                             (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1572                                 rw_exit(&dn->dn_struct_rwlock);
1573                                 dbuf_will_dirty(db, tx);
1574                                 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1575                                 data = db->db.db_data;
1576                                 bzero(data + blkoff, head);
1577                         }
1578                         dbuf_rele(db, FTAG);
1579                 }
1580                 off += head;
1581                 len -= head;
1582         }
1583 
1584         /* If the range was less than one block, we're done */
1585         if (len == 0)
1586                 goto out;
1587 
1588         /* If the remaining range is past end of file, we're done */
1589         if ((off >> blkshift) > dn->dn_maxblkid)
1590                 goto out;
1591 
1592         ASSERT(ISP2(blksz));
1593         if (trunc)
1594                 tail = 0;
1595         else
1596                 tail = P2PHASE(len, blksz);
1597 
1598         ASSERT0(P2PHASE(off, blksz));
1599         /* zero out any partial block data at the end of the range */
1600         if (tail) {
1601                 if (len < tail)
1602                         tail = len;
1603                 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len),
1604                     TRUE, FTAG, &db) == 0) {
1605                         /* don't dirty if not on disk and not dirty */
1606                         if (db->db_last_dirty ||
1607                             (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1608                                 rw_exit(&dn->dn_struct_rwlock);
1609                                 dbuf_will_dirty(db, tx);
1610                                 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1611                                 bzero(db->db.db_data, tail);
1612                         }
1613                         dbuf_rele(db, FTAG);
1614                 }
1615                 len -= tail;
1616         }
1617 
1618         /* If the range did not include a full block, we are done */
1619         if (len == 0)
1620                 goto out;
1621 
1622         ASSERT(IS_P2ALIGNED(off, blksz));
1623         ASSERT(trunc || IS_P2ALIGNED(len, blksz));
1624         blkid = off >> blkshift;
1625         nblks = len >> blkshift;
1626         if (trunc)
1627                 nblks += 1;
1628 
1629         /*
1630          * Read in and mark all the level-1 indirects dirty,
1631          * so that they will stay in memory until syncing phase.
1632          * Always dirty the first and last indirect to make sure
1633          * we dirty all the partial indirects.
1634          */
1635         if (dn->dn_nlevels > 1) {
1636                 uint64_t i, first, last;
1637                 int shift = epbs + dn->dn_datablkshift;
1638 
1639                 first = blkid >> epbs;
1640                 if (db = dbuf_hold_level(dn, 1, first, FTAG)) {
1641                         dbuf_will_dirty(db, tx);
1642                         dbuf_rele(db, FTAG);
1643                 }
1644                 if (trunc)
1645                         last = dn->dn_maxblkid >> epbs;
1646                 else
1647                         last = (blkid + nblks - 1) >> epbs;
1648                 if (last > first && (db = dbuf_hold_level(dn, 1, last, FTAG))) {
1649                         dbuf_will_dirty(db, tx);
1650                         dbuf_rele(db, FTAG);
1651                 }
1652                 for (i = first + 1; i < last; i++) {
1653                         uint64_t ibyte = i << shift;
1654                         int err;
1655 
1656                         err = dnode_next_offset(dn,
1657                             DNODE_FIND_HAVELOCK, &ibyte, 1, 1, 0);
1658                         i = ibyte >> shift;
1659                         if (err == ESRCH || i >= last)
1660                                 break;
1661                         ASSERT(err == 0);
1662                         db = dbuf_hold_level(dn, 1, i, FTAG);
1663                         if (db) {
1664                                 dbuf_will_dirty(db, tx);
1665                                 dbuf_rele(db, FTAG);
1666                         }
1667                 }
1668         }
1669 done:
1670         /*
1671          * Add this range to the dnode range list.
1672          * We will finish up this free operation in the syncing phase.
1673          */
1674         mutex_enter(&dn->dn_mtx);
1675         dnode_clear_range(dn, blkid, nblks, tx);
1676         {
1677                 free_range_t *rp, *found;
1678                 avl_index_t where;
1679                 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
1680 
1681                 /* Add new range to dn_ranges */
1682                 rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
1683                 rp->fr_blkid = blkid;
1684                 rp->fr_nblks = nblks;
1685                 found = avl_find(tree, rp, &where);
1686                 ASSERT(found == NULL);
1687                 avl_insert(tree, rp, where);
1688                 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
1689                     blkid, nblks, tx->tx_txg);
1690         }
1691         mutex_exit(&dn->dn_mtx);
1692 
1693         dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
1694         dnode_setdirty(dn, tx);
1695 out:
1696         if (trunc && dn->dn_maxblkid >= (off >> blkshift))
1697                 dn->dn_maxblkid = (off >> blkshift ? (off >> blkshift) - 1 : 0);
1698 
1699         rw_exit(&dn->dn_struct_rwlock);
1700 }
1701 
1702 static boolean_t
1703 dnode_spill_freed(dnode_t *dn)
1704 {
1705         int i;
1706 
1707         mutex_enter(&dn->dn_mtx);
1708         for (i = 0; i < TXG_SIZE; i++) {
1709                 if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
1710                         break;
1711         }
1712         mutex_exit(&dn->dn_mtx);
1713         return (i < TXG_SIZE);
1714 }
1715 
1716 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
1717 uint64_t
1718 dnode_block_freed(dnode_t *dn, uint64_t blkid)
1719 {
1720         free_range_t range_tofind;
1721         void *dp = spa_get_dsl(dn->dn_objset->os_spa);
1722         int i;
1723 
1724         if (blkid == DMU_BONUS_BLKID)
1725                 return (FALSE);
1726 
1727         /*
1728          * If we're in the process of opening the pool, dp will not be
1729          * set yet, but there shouldn't be anything dirty.
1730          */
1731         if (dp == NULL)
1732                 return (FALSE);
1733 
1734         if (dn->dn_free_txg)
1735                 return (TRUE);
1736 
1737         if (blkid == DMU_SPILL_BLKID)
1738                 return (dnode_spill_freed(dn));
1739 
1740         range_tofind.fr_blkid = blkid;
1741         mutex_enter(&dn->dn_mtx);
1742         for (i = 0; i < TXG_SIZE; i++) {
1743                 free_range_t *range_found;
1744                 avl_index_t idx;
1745 
1746                 range_found = avl_find(&dn->dn_ranges[i], &range_tofind, &idx);
1747                 if (range_found) {
1748                         ASSERT(range_found->fr_nblks > 0);
1749                         break;
1750                 }
1751                 range_found = avl_nearest(&dn->dn_ranges[i], idx, AVL_BEFORE);
1752                 if (range_found &&
1753                     range_found->fr_blkid + range_found->fr_nblks > blkid)
1754                         break;
1755         }
1756         mutex_exit(&dn->dn_mtx);
1757         return (i < TXG_SIZE);
1758 }
1759 
1760 /* call from syncing context when we actually write/free space for this dnode */
1761 void
1762 dnode_diduse_space(dnode_t *dn, int64_t delta)
1763 {
1764         uint64_t space;
1765         dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
1766             dn, dn->dn_phys,
1767             (u_longlong_t)dn->dn_phys->dn_used,
1768             (longlong_t)delta);
1769 
1770         mutex_enter(&dn->dn_mtx);
1771         space = DN_USED_BYTES(dn->dn_phys);
1772         if (delta > 0) {
1773                 ASSERT3U(space + delta, >=, space); /* no overflow */
1774         } else {
1775                 ASSERT3U(space, >=, -delta); /* no underflow */
1776         }
1777         space += delta;
1778         if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
1779                 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
1780                 ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
1781                 dn->dn_phys->dn_used = space >> DEV_BSHIFT;
1782         } else {
1783                 dn->dn_phys->dn_used = space;
1784                 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
1785         }
1786         mutex_exit(&dn->dn_mtx);
1787 }
1788 
1789 /*
1790  * Call when we think we're going to write/free space in open context.
1791  * Be conservative (ie. OK to write less than this or free more than
1792  * this, but don't write more or free less).
1793  */
1794 void
1795 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
1796 {
1797         objset_t *os = dn->dn_objset;
1798         dsl_dataset_t *ds = os->os_dsl_dataset;
1799 
1800         if (space > 0)
1801                 space = spa_get_asize(os->os_spa, space);
1802 
1803         if (ds)
1804                 dsl_dir_willuse_space(ds->ds_dir, space, tx);
1805 
1806         dmu_tx_willuse_space(tx, space);
1807 }
1808 
1809 /*
1810  * This function scans a block at the indicated "level" looking for
1811  * a hole or data (depending on 'flags').  If level > 0, then we are
1812  * scanning an indirect block looking at its pointers.  If level == 0,
1813  * then we are looking at a block of dnodes.  If we don't find what we
1814  * are looking for in the block, we return ESRCH.  Otherwise, return
1815  * with *offset pointing to the beginning (if searching forwards) or
1816  * end (if searching backwards) of the range covered by the block
1817  * pointer we matched on (or dnode).
1818  *
1819  * The basic search algorithm used below by dnode_next_offset() is to
1820  * use this function to search up the block tree (widen the search) until
1821  * we find something (i.e., we don't return ESRCH) and then search back
1822  * down the tree (narrow the search) until we reach our original search
1823  * level.
1824  */
1825 static int
1826 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
1827         int lvl, uint64_t blkfill, uint64_t txg)
1828 {
1829         dmu_buf_impl_t *db = NULL;
1830         void *data = NULL;
1831         uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
1832         uint64_t epb = 1ULL << epbs;
1833         uint64_t minfill, maxfill;
1834         boolean_t hole;
1835         int i, inc, error, span;
1836 
1837         dprintf("probing object %llu offset %llx level %d of %u\n",
1838             dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
1839 
1840         hole = ((flags & DNODE_FIND_HOLE) != 0);
1841         inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
1842         ASSERT(txg == 0 || !hole);
1843 
1844         if (lvl == dn->dn_phys->dn_nlevels) {
1845                 error = 0;
1846                 epb = dn->dn_phys->dn_nblkptr;
1847                 data = dn->dn_phys->dn_blkptr;
1848         } else {
1849                 uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl);
1850                 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db);
1851                 if (error) {
1852                         if (error != ENOENT)
1853                                 return (error);
1854                         if (hole)
1855                                 return (0);
1856                         /*
1857                          * This can only happen when we are searching up
1858                          * the block tree for data.  We don't really need to
1859                          * adjust the offset, as we will just end up looking
1860                          * at the pointer to this block in its parent, and its
1861                          * going to be unallocated, so we will skip over it.
1862                          */
1863                         return (SET_ERROR(ESRCH));
1864                 }
1865                 error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
1866                 if (error) {
1867                         dbuf_rele(db, FTAG);
1868                         return (error);
1869                 }
1870                 data = db->db.db_data;
1871         }
1872 
1873         if (db && txg &&
1874             (db->db_blkptr == NULL || db->db_blkptr->blk_birth <= txg)) {
1875                 /*
1876                  * This can only happen when we are searching up the tree
1877                  * and these conditions mean that we need to keep climbing.
1878                  */
1879                 error = SET_ERROR(ESRCH);
1880         } else if (lvl == 0) {
1881                 dnode_phys_t *dnp = data;
1882                 span = DNODE_SHIFT;
1883                 ASSERT(dn->dn_type == DMU_OT_DNODE);
1884 
1885                 for (i = (*offset >> span) & (blkfill - 1);
1886                     i >= 0 && i < blkfill; i += inc) {
1887                         if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
1888                                 break;
1889                         *offset += (1ULL << span) * inc;
1890                 }
1891                 if (i < 0 || i == blkfill)
1892                         error = SET_ERROR(ESRCH);
1893         } else {
1894                 blkptr_t *bp = data;
1895                 uint64_t start = *offset;
1896                 span = (lvl - 1) * epbs + dn->dn_datablkshift;
1897                 minfill = 0;
1898                 maxfill = blkfill << ((lvl - 1) * epbs);
1899 
1900                 if (hole)
1901                         maxfill--;
1902                 else
1903                         minfill++;
1904 
1905                 *offset = *offset >> span;
1906                 for (i = BF64_GET(*offset, 0, epbs);
1907                     i >= 0 && i < epb; i += inc) {
1908                         if (bp[i].blk_fill >= minfill &&
1909                             bp[i].blk_fill <= maxfill &&
1910                             (hole || bp[i].blk_birth > txg))
1911                                 break;
1912                         if (inc > 0 || *offset > 0)
1913                                 *offset += inc;
1914                 }
1915                 *offset = *offset << span;
1916                 if (inc < 0) {
1917                         /* traversing backwards; position offset at the end */
1918                         ASSERT3U(*offset, <=, start);
1919                         *offset = MIN(*offset + (1ULL << span) - 1, start);
1920                 } else if (*offset < start) {
1921                         *offset = start;
1922                 }
1923                 if (i < 0 || i >= epb)
1924                         error = SET_ERROR(ESRCH);
1925         }
1926 
1927         if (db)
1928                 dbuf_rele(db, FTAG);
1929 
1930         return (error);
1931 }
1932 
1933 /*
1934  * Find the next hole, data, or sparse region at or after *offset.
1935  * The value 'blkfill' tells us how many items we expect to find
1936  * in an L0 data block; this value is 1 for normal objects,
1937  * DNODES_PER_BLOCK for the meta dnode, and some fraction of
1938  * DNODES_PER_BLOCK when searching for sparse regions thereof.
1939  *
1940  * Examples:
1941  *
1942  * dnode_next_offset(dn, flags, offset, 1, 1, 0);
1943  *      Finds the next/previous hole/data in a file.
1944  *      Used in dmu_offset_next().
1945  *
1946  * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
1947  *      Finds the next free/allocated dnode an objset's meta-dnode.
1948  *      Only finds objects that have new contents since txg (ie.
1949  *      bonus buffer changes and content removal are ignored).
1950  *      Used in dmu_object_next().
1951  *
1952  * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
1953  *      Finds the next L2 meta-dnode bp that's at most 1/4 full.
1954  *      Used in dmu_object_alloc().
1955  */
1956 int
1957 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
1958     int minlvl, uint64_t blkfill, uint64_t txg)
1959 {
1960         uint64_t initial_offset = *offset;
1961         int lvl, maxlvl;
1962         int error = 0;
1963 
1964         if (!(flags & DNODE_FIND_HAVELOCK))
1965                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1966 
1967         if (dn->dn_phys->dn_nlevels == 0) {
1968                 error = SET_ERROR(ESRCH);
1969                 goto out;
1970         }
1971 
1972         if (dn->dn_datablkshift == 0) {
1973                 if (*offset < dn->dn_datablksz) {
1974                         if (flags & DNODE_FIND_HOLE)
1975                                 *offset = dn->dn_datablksz;
1976                 } else {
1977                         error = SET_ERROR(ESRCH);
1978                 }
1979                 goto out;
1980         }
1981 
1982         maxlvl = dn->dn_phys->dn_nlevels;
1983 
1984         for (lvl = minlvl; lvl <= maxlvl; lvl++) {
1985                 error = dnode_next_offset_level(dn,
1986                     flags, offset, lvl, blkfill, txg);
1987                 if (error != ESRCH)
1988                         break;
1989         }
1990 
1991         while (error == 0 && --lvl >= minlvl) {
1992                 error = dnode_next_offset_level(dn,
1993                     flags, offset, lvl, blkfill, txg);
1994         }
1995 
1996         if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
1997             initial_offset < *offset : initial_offset > *offset))
1998                 error = SET_ERROR(ESRCH);
1999 out:
2000         if (!(flags & DNODE_FIND_HAVELOCK))
2001                 rw_exit(&dn->dn_struct_rwlock);
2002 
2003         return (error);
2004 }