Print this page
3006 VERIFY[S,U,P] and ASSERT[S,U,P] frequently check if first argument is zero


 257         if (dbend <= end)
 258                 end = dbend;
 259         else if (all)
 260                 all = trunc;
 261         ASSERT3U(start, <=, end);
 262 
 263         if (db->db_level == 1) {
 264                 FREE_VERIFY(db, start, end, tx);
 265                 blocks_freed = free_blocks(dn, bp, end-start+1, tx);
 266                 arc_buf_freeze(db->db_buf);
 267                 ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
 268                 DB_DNODE_EXIT(db);
 269                 return (all ? ALL : blocks_freed);
 270         }
 271 
 272         for (i = start; i <= end; i++, bp++) {
 273                 if (BP_IS_HOLE(bp))
 274                         continue;
 275                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 276                 err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
 277                 ASSERT3U(err, ==, 0);
 278                 rw_exit(&dn->dn_struct_rwlock);
 279 
 280                 if (free_children(subdb, blkid, nblks, trunc, tx) == ALL) {
 281                         ASSERT3P(subdb->db_blkptr, ==, bp);
 282                         blocks_freed += free_blocks(dn, bp, 1, tx);
 283                 } else {
 284                         all = FALSE;
 285                 }
 286                 dbuf_rele(subdb, FTAG);
 287         }
 288         DB_DNODE_EXIT(db);
 289         arc_buf_freeze(db->db_buf);
 290 #ifdef ZFS_DEBUG
 291         bp -= (end-start)+1;
 292         for (i = start; i <= end; i++, bp++) {
 293                 if (i == start && blkid != 0)
 294                         continue;
 295                 else if (i == end && !trunc)
 296                         continue;
 297                 ASSERT3U(bp->blk_birth, ==, 0);
 298         }
 299 #endif
 300         ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
 301         return (all ? ALL : blocks_freed);
 302 }
 303 
 304 /*
 305  * free_range: Traverse the indicated range of the provided file
 306  * and "free" all the blocks contained there.
 307  */
 308 static void
 309 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
 310 {
 311         blkptr_t *bp = dn->dn_phys->dn_blkptr;
 312         dmu_buf_impl_t *db;
 313         int trunc, start, end, shift, i, err;
 314         int dnlevel = dn->dn_phys->dn_nlevels;
 315 
 316         if (blkid > dn->dn_phys->dn_maxblkid)
 317                 return;


 333                         uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
 334                             (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 335                         dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
 336                         ASSERT(off < dn->dn_phys->dn_maxblkid ||
 337                             dn->dn_phys->dn_maxblkid == 0 ||
 338                             dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
 339                 }
 340                 return;
 341         }
 342 
 343         shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
 344         start = blkid >> shift;
 345         ASSERT(start < dn->dn_phys->dn_nblkptr);
 346         end = (blkid + nblks - 1) >> shift;
 347         bp += start;
 348         for (i = start; i <= end; i++, bp++) {
 349                 if (BP_IS_HOLE(bp))
 350                         continue;
 351                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 352                 err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
 353                 ASSERT3U(err, ==, 0);
 354                 rw_exit(&dn->dn_struct_rwlock);
 355 
 356                 if (free_children(db, blkid, nblks, trunc, tx) == ALL) {
 357                         ASSERT3P(db->db_blkptr, ==, bp);
 358                         (void) free_blocks(dn, bp, 1, tx);
 359                 }
 360                 dbuf_rele(db, FTAG);
 361         }
 362         if (trunc) {
 363                 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
 364                     (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 365                 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
 366                 ASSERT(off < dn->dn_phys->dn_maxblkid ||
 367                     dn->dn_phys->dn_maxblkid == 0 ||
 368                     dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
 369         }
 370 }
 371 
 372 /*
 373  * Try to kick all the dnodes dbufs out of the cache...


 454                         ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
 455                             dr->dt.dl.dr_data == db->db_buf);
 456                         dbuf_unoverride(dr);
 457                 }
 458                 kmem_free(dr, sizeof (dbuf_dirty_record_t));
 459                 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
 460         }
 461 }
 462 
 463 static void
 464 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
 465 {
 466         int txgoff = tx->tx_txg & TXG_MASK;
 467 
 468         ASSERT(dmu_tx_is_syncing(tx));
 469 
 470         /*
 471          * Our contents should have been freed in dnode_sync() by the
 472          * free range record inserted by the caller of dnode_free().
 473          */
 474         ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
 475         ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
 476 
 477         dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
 478         dnode_evict_dbufs(dn);
 479         ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
 480 
 481         /*
 482          * XXX - It would be nice to assert this, but we may still
 483          * have residual holds from async evictions from the arc...
 484          *
 485          * zfs_obj_to_path() also depends on this being
 486          * commented out.
 487          *
 488          * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
 489          */
 490 
 491         /* Undirty next bits */
 492         dn->dn_next_nlevels[txgoff] = 0;
 493         dn->dn_next_indblkshift[txgoff] = 0;
 494         dn->dn_next_blksz[txgoff] = 0;




 257         if (dbend <= end)
 258                 end = dbend;
 259         else if (all)
 260                 all = trunc;
 261         ASSERT3U(start, <=, end);
 262 
 263         if (db->db_level == 1) {
 264                 FREE_VERIFY(db, start, end, tx);
 265                 blocks_freed = free_blocks(dn, bp, end-start+1, tx);
 266                 arc_buf_freeze(db->db_buf);
 267                 ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
 268                 DB_DNODE_EXIT(db);
 269                 return (all ? ALL : blocks_freed);
 270         }
 271 
 272         for (i = start; i <= end; i++, bp++) {
 273                 if (BP_IS_HOLE(bp))
 274                         continue;
 275                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 276                 err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
 277                 ASSERT0(err);
 278                 rw_exit(&dn->dn_struct_rwlock);
 279 
 280                 if (free_children(subdb, blkid, nblks, trunc, tx) == ALL) {
 281                         ASSERT3P(subdb->db_blkptr, ==, bp);
 282                         blocks_freed += free_blocks(dn, bp, 1, tx);
 283                 } else {
 284                         all = FALSE;
 285                 }
 286                 dbuf_rele(subdb, FTAG);
 287         }
 288         DB_DNODE_EXIT(db);
 289         arc_buf_freeze(db->db_buf);
 290 #ifdef ZFS_DEBUG
 291         bp -= (end-start)+1;
 292         for (i = start; i <= end; i++, bp++) {
 293                 if (i == start && blkid != 0)
 294                         continue;
 295                 else if (i == end && !trunc)
 296                         continue;
 297                 ASSERT0(bp->blk_birth);
 298         }
 299 #endif
 300         ASSERT(all || blocks_freed == 0 || db->db_last_dirty);
 301         return (all ? ALL : blocks_freed);
 302 }
 303 
 304 /*
 305  * free_range: Traverse the indicated range of the provided file
 306  * and "free" all the blocks contained there.
 307  */
 308 static void
 309 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
 310 {
 311         blkptr_t *bp = dn->dn_phys->dn_blkptr;
 312         dmu_buf_impl_t *db;
 313         int trunc, start, end, shift, i, err;
 314         int dnlevel = dn->dn_phys->dn_nlevels;
 315 
 316         if (blkid > dn->dn_phys->dn_maxblkid)
 317                 return;


 333                         uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
 334                             (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 335                         dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
 336                         ASSERT(off < dn->dn_phys->dn_maxblkid ||
 337                             dn->dn_phys->dn_maxblkid == 0 ||
 338                             dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
 339                 }
 340                 return;
 341         }
 342 
 343         shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
 344         start = blkid >> shift;
 345         ASSERT(start < dn->dn_phys->dn_nblkptr);
 346         end = (blkid + nblks - 1) >> shift;
 347         bp += start;
 348         for (i = start; i <= end; i++, bp++) {
 349                 if (BP_IS_HOLE(bp))
 350                         continue;
 351                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
 352                 err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
 353                 ASSERT0(err);
 354                 rw_exit(&dn->dn_struct_rwlock);
 355 
 356                 if (free_children(db, blkid, nblks, trunc, tx) == ALL) {
 357                         ASSERT3P(db->db_blkptr, ==, bp);
 358                         (void) free_blocks(dn, bp, 1, tx);
 359                 }
 360                 dbuf_rele(db, FTAG);
 361         }
 362         if (trunc) {
 363                 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
 364                     (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
 365                 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
 366                 ASSERT(off < dn->dn_phys->dn_maxblkid ||
 367                     dn->dn_phys->dn_maxblkid == 0 ||
 368                     dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
 369         }
 370 }
 371 
 372 /*
 373  * Try to kick all the dnodes dbufs out of the cache...


 454                         ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
 455                             dr->dt.dl.dr_data == db->db_buf);
 456                         dbuf_unoverride(dr);
 457                 }
 458                 kmem_free(dr, sizeof (dbuf_dirty_record_t));
 459                 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
 460         }
 461 }
 462 
 463 static void
 464 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
 465 {
 466         int txgoff = tx->tx_txg & TXG_MASK;
 467 
 468         ASSERT(dmu_tx_is_syncing(tx));
 469 
 470         /*
 471          * Our contents should have been freed in dnode_sync() by the
 472          * free range record inserted by the caller of dnode_free().
 473          */
 474         ASSERT0(DN_USED_BYTES(dn->dn_phys));
 475         ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
 476 
 477         dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
 478         dnode_evict_dbufs(dn);
 479         ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
 480 
 481         /*
 482          * XXX - It would be nice to assert this, but we may still
 483          * have residual holds from async evictions from the arc...
 484          *
 485          * zfs_obj_to_path() also depends on this being
 486          * commented out.
 487          *
 488          * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
 489          */
 490 
 491         /* Undirty next bits */
 492         dn->dn_next_nlevels[txgoff] = 0;
 493         dn->dn_next_indblkshift[txgoff] = 0;
 494         dn->dn_next_blksz[txgoff] = 0;