Print this page
Possibility to physically reserve space without writing leaf blocks


1282 
1283         if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
1284                 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
1285         } else {
1286                 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
1287         }
1288 
1289         mutex_exit(&os->os_lock);
1290 
1291         /*
1292          * The dnode maintains a hold on its containing dbuf as
1293          * long as there are holds on it.  Each instantiated child
1294          * dbuf maintains a hold on the dnode.  When the last child
1295          * drops its hold, the dnode will drop its hold on the
1296          * containing dbuf. We add a "dirty hold" here so that the
1297          * dnode will hang around after we finish processing its
1298          * children.
1299          */
1300         VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1301 
1302         (void) dbuf_dirty(dn->dn_dbuf, tx);
1303 
1304         dsl_dataset_dirty(os->os_dsl_dataset, tx);
1305 }
1306 
1307 void
1308 dnode_free(dnode_t *dn, dmu_tx_t *tx)
1309 {
1310         int txgoff = tx->tx_txg & TXG_MASK;
1311 
1312         dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
1313 
1314         /* we should be the only holder... hopefully */
1315         /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
1316 
1317         mutex_enter(&dn->dn_mtx);
1318         if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1319                 mutex_exit(&dn->dn_mtx);
1320                 return;
1321         }
1322         dn->dn_free_txg = tx->tx_txg;


1445         new_nlevels = 1;
1446         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1447         for (sz = dn->dn_nblkptr;
1448             sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
1449                 new_nlevels++;
1450 
1451         if (new_nlevels > dn->dn_nlevels) {
1452                 int old_nlevels = dn->dn_nlevels;
1453                 dmu_buf_impl_t *db;
1454                 list_t *list;
1455                 dbuf_dirty_record_t *new, *dr, *dr_next;
1456 
1457                 dn->dn_nlevels = new_nlevels;
1458 
1459                 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
1460                 dn->dn_next_nlevels[txgoff] = new_nlevels;
1461 
1462                 /* dirty the left indirects */
1463                 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
1464                 ASSERT(db != NULL);
1465                 new = dbuf_dirty(db, tx);
1466                 dbuf_rele(db, FTAG);
1467 
1468                 /* transfer the dirty records to the new indirect */
1469                 mutex_enter(&dn->dn_mtx);
1470                 mutex_enter(&new->dt.di.dr_mtx);
1471                 list = &dn->dn_dirty_records[txgoff];
1472                 for (dr = list_head(list); dr; dr = dr_next) {
1473                         dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
1474                         if (dr->dr_dbuf->db_level != new_nlevels-1 &&
1475                             dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
1476                             dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
1477                                 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
1478                                 list_remove(&dn->dn_dirty_records[txgoff], dr);
1479                                 list_insert_tail(&new->dt.di.dr_children, dr);
1480                                 dr->dr_parent = new;
1481                         }
1482                 }
1483                 mutex_exit(&new->dt.di.dr_mtx);
1484                 mutex_exit(&dn->dn_mtx);
1485         }




1282 
1283         if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
1284                 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
1285         } else {
1286                 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
1287         }
1288 
1289         mutex_exit(&os->os_lock);
1290 
1291         /*
1292          * The dnode maintains a hold on its containing dbuf as
1293          * long as there are holds on it.  Each instantiated child
1294          * dbuf maintains a hold on the dnode.  When the last child
1295          * drops its hold, the dnode will drop its hold on the
1296          * containing dbuf. We add a "dirty hold" here so that the
1297          * dnode will hang around after we finish processing its
1298          * children.
1299          */
1300         VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1301 
1302         (void) dbuf_dirty(dn->dn_dbuf, tx, B_FALSE);
1303 
1304         dsl_dataset_dirty(os->os_dsl_dataset, tx);
1305 }
1306 
1307 void
1308 dnode_free(dnode_t *dn, dmu_tx_t *tx)
1309 {
1310         int txgoff = tx->tx_txg & TXG_MASK;
1311 
1312         dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
1313 
1314         /* we should be the only holder... hopefully */
1315         /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
1316 
1317         mutex_enter(&dn->dn_mtx);
1318         if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1319                 mutex_exit(&dn->dn_mtx);
1320                 return;
1321         }
1322         dn->dn_free_txg = tx->tx_txg;


1445         new_nlevels = 1;
1446         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1447         for (sz = dn->dn_nblkptr;
1448             sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
1449                 new_nlevels++;
1450 
1451         if (new_nlevels > dn->dn_nlevels) {
1452                 int old_nlevels = dn->dn_nlevels;
1453                 dmu_buf_impl_t *db;
1454                 list_t *list;
1455                 dbuf_dirty_record_t *new, *dr, *dr_next;
1456 
1457                 dn->dn_nlevels = new_nlevels;
1458 
1459                 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
1460                 dn->dn_next_nlevels[txgoff] = new_nlevels;
1461 
1462                 /* dirty the left indirects */
1463                 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
1464                 ASSERT(db != NULL);
1465                 new = dbuf_dirty(db, tx, B_FALSE);
1466                 dbuf_rele(db, FTAG);
1467 
1468                 /* transfer the dirty records to the new indirect */
1469                 mutex_enter(&dn->dn_mtx);
1470                 mutex_enter(&new->dt.di.dr_mtx);
1471                 list = &dn->dn_dirty_records[txgoff];
1472                 for (dr = list_head(list); dr; dr = dr_next) {
1473                         dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
1474                         if (dr->dr_dbuf->db_level != new_nlevels-1 &&
1475                             dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
1476                             dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
1477                                 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
1478                                 list_remove(&dn->dn_dirty_records[txgoff], dr);
1479                                 list_insert_tail(&new->dt.di.dr_children, dr);
1480                                 dr->dr_parent = new;
1481                         }
1482                 }
1483                 mutex_exit(&new->dt.di.dr_mtx);
1484                 mutex_exit(&dn->dn_mtx);
1485         }