85 };
86
87 int
88 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
89 {
90 int err;
91 dsl_scan_t *scn;
92 spa_t *spa = dp->dp_spa;
93 uint64_t f;
94
95 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
96 scn->scn_dp = dp;
97
98 /*
99 * It's possible that we're resuming a scan after a reboot so
100 * make sure that the scan_async_destroying flag is initialized
101 * appropriately.
102 */
103 ASSERT(!scn->scn_async_destroying);
104 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
105 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY]);
106
107 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
108 "scrub_func", sizeof (uint64_t), 1, &f);
109 if (err == 0) {
110 /*
111 * There was an old-style scrub in progress. Restart a
112 * new-style scrub from the beginning.
113 */
114 scn->scn_restart_txg = txg;
115 zfs_dbgmsg("old-style scrub was in progress; "
116 "restarting new-style scrub in txg %llu",
117 scn->scn_restart_txg);
118
119 /*
120 * Load the queue obj from the old location so that it
121 * can be freed by dsl_scan_done().
122 */
123 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
124 "scrub_queue", sizeof (uint64_t), 1,
125 &scn->scn_phys.scn_queue_obj);
1336
1337 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
1338 dmu_tx_get_txg(tx), bp, 0));
1339 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
1340 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
1341 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
1342 scn->scn_visited_this_txg++;
1343 return (0);
1344 }
1345
1346 boolean_t
1347 dsl_scan_active(dsl_scan_t *scn)
1348 {
1349 spa_t *spa = scn->scn_dp->dp_spa;
1350 uint64_t used = 0, comp, uncomp;
1351
1352 if (spa->spa_load_state != SPA_LOAD_NONE)
1353 return (B_FALSE);
1354 if (spa_shutting_down(spa))
1355 return (B_FALSE);
1356
1357 if (scn->scn_phys.scn_state == DSS_SCANNING ||
1358 scn->scn_async_destroying)
1359 return (B_TRUE);
1360
1361 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1362 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
1363 &used, &comp, &uncomp);
1364 }
1365 return (used != 0);
1366 }
1367
1368 void
1369 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
1370 {
1371 dsl_scan_t *scn = dp->dp_scan;
1372 spa_t *spa = dp->dp_spa;
1373 int err;
1374
1375 /*
1376 * Check for scn_restart_txg before checking spa_load_state, so
1395 scn->scn_visited_this_txg = 0;
1396 scn->scn_pausing = B_FALSE;
1397 scn->scn_sync_start_time = gethrtime();
1398 spa->spa_scrub_active = B_TRUE;
1399
1400 /*
1401 * First process the free list. If we pause the free, don't do
1402 * any scanning. This ensures that there is no free list when
1403 * we are scanning, so the scan code doesn't have to worry about
1404 * traversing it.
1405 */
1406 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1407 scn->scn_is_bptree = B_FALSE;
1408 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1409 NULL, ZIO_FLAG_MUSTSUCCEED);
1410 err = bpobj_iterate(&dp->dp_free_bpobj,
1411 dsl_scan_free_block_cb, scn, tx);
1412 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root));
1413
1414 if (err == 0 && spa_feature_is_active(spa,
1415 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1416 ASSERT(scn->scn_async_destroying);
1417 scn->scn_is_bptree = B_TRUE;
1418 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1419 NULL, ZIO_FLAG_MUSTSUCCEED);
1420 err = bptree_iterate(dp->dp_meta_objset,
1421 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb,
1422 scn, tx);
1423 VERIFY0(zio_wait(scn->scn_zio_root));
1424
1425 if (err == 0) {
1426 zfeature_info_t *feat = &spa_feature_table
1427 [SPA_FEATURE_ASYNC_DESTROY];
1428 /* finished; deactivate async destroy feature */
1429 spa_feature_decr(spa, feat, tx);
1430 ASSERT(!spa_feature_is_active(spa, feat));
1431 VERIFY0(zap_remove(dp->dp_meta_objset,
1432 DMU_POOL_DIRECTORY_OBJECT,
1433 DMU_POOL_BPTREE_OBJ, tx));
1434 VERIFY0(bptree_free(dp->dp_meta_objset,
1435 dp->dp_bptree_obj, tx));
1436 dp->dp_bptree_obj = 0;
1437 scn->scn_async_destroying = B_FALSE;
1438 }
1439 }
1440 if (scn->scn_visited_this_txg) {
1441 zfs_dbgmsg("freed %llu blocks in %llums from "
1442 "free_bpobj/bptree txg %llu",
1443 (longlong_t)scn->scn_visited_this_txg,
1444 (longlong_t)
1445 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
1446 (longlong_t)tx->tx_txg);
1447 scn->scn_visited_this_txg = 0;
1448 /*
1449 * Re-sync the ddt so that we can further modify
1450 * it when doing bprewrite.
|
85 };
86
87 int
88 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
89 {
90 int err;
91 dsl_scan_t *scn;
92 spa_t *spa = dp->dp_spa;
93 uint64_t f;
94
95 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
96 scn->scn_dp = dp;
97
98 /*
99 * It's possible that we're resuming a scan after a reboot so
100 * make sure that the scan_async_destroying flag is initialized
101 * appropriately.
102 */
103 ASSERT(!scn->scn_async_destroying);
104 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
105 SPA_FEATURE_ASYNC_DESTROY);
106
107 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
108 "scrub_func", sizeof (uint64_t), 1, &f);
109 if (err == 0) {
110 /*
111 * There was an old-style scrub in progress. Restart a
112 * new-style scrub from the beginning.
113 */
114 scn->scn_restart_txg = txg;
115 zfs_dbgmsg("old-style scrub was in progress; "
116 "restarting new-style scrub in txg %llu",
117 scn->scn_restart_txg);
118
119 /*
120 * Load the queue obj from the old location so that it
121 * can be freed by dsl_scan_done().
122 */
123 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
124 "scrub_queue", sizeof (uint64_t), 1,
125 &scn->scn_phys.scn_queue_obj);
1336
1337 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
1338 dmu_tx_get_txg(tx), bp, 0));
1339 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
1340 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
1341 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
1342 scn->scn_visited_this_txg++;
1343 return (0);
1344 }
1345
1346 boolean_t
1347 dsl_scan_active(dsl_scan_t *scn)
1348 {
1349 spa_t *spa = scn->scn_dp->dp_spa;
1350 uint64_t used = 0, comp, uncomp;
1351
1352 if (spa->spa_load_state != SPA_LOAD_NONE)
1353 return (B_FALSE);
1354 if (spa_shutting_down(spa))
1355 return (B_FALSE);
1356 if (scn->scn_phys.scn_state == DSS_SCANNING ||
1357 scn->scn_async_destroying)
1358 return (B_TRUE);
1359
1360 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1361 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
1362 &used, &comp, &uncomp);
1363 }
1364 return (used != 0);
1365 }
1366
1367 void
1368 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
1369 {
1370 dsl_scan_t *scn = dp->dp_scan;
1371 spa_t *spa = dp->dp_spa;
1372 int err;
1373
1374 /*
1375 * Check for scn_restart_txg before checking spa_load_state, so
1394 scn->scn_visited_this_txg = 0;
1395 scn->scn_pausing = B_FALSE;
1396 scn->scn_sync_start_time = gethrtime();
1397 spa->spa_scrub_active = B_TRUE;
1398
1399 /*
1400 * First process the free list. If we pause the free, don't do
1401 * any scanning. This ensures that there is no free list when
1402 * we are scanning, so the scan code doesn't have to worry about
1403 * traversing it.
1404 */
1405 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1406 scn->scn_is_bptree = B_FALSE;
1407 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1408 NULL, ZIO_FLAG_MUSTSUCCEED);
1409 err = bpobj_iterate(&dp->dp_free_bpobj,
1410 dsl_scan_free_block_cb, scn, tx);
1411 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root));
1412
1413 if (err == 0 && spa_feature_is_active(spa,
1414 SPA_FEATURE_ASYNC_DESTROY)) {
1415 ASSERT(scn->scn_async_destroying);
1416 scn->scn_is_bptree = B_TRUE;
1417 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1418 NULL, ZIO_FLAG_MUSTSUCCEED);
1419 err = bptree_iterate(dp->dp_meta_objset,
1420 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb,
1421 scn, tx);
1422 VERIFY0(zio_wait(scn->scn_zio_root));
1423
1424 if (err == 0) {
1425 /* finished; deactivate async destroy feature */
1426 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY,
1427 tx);
1428 ASSERT(!spa_feature_is_active(spa,
1429 SPA_FEATURE_ASYNC_DESTROY));
1430 VERIFY0(zap_remove(dp->dp_meta_objset,
1431 DMU_POOL_DIRECTORY_OBJECT,
1432 DMU_POOL_BPTREE_OBJ, tx));
1433 VERIFY0(bptree_free(dp->dp_meta_objset,
1434 dp->dp_bptree_obj, tx));
1435 dp->dp_bptree_obj = 0;
1436 scn->scn_async_destroying = B_FALSE;
1437 }
1438 }
1439 if (scn->scn_visited_this_txg) {
1440 zfs_dbgmsg("freed %llu blocks in %llums from "
1441 "free_bpobj/bptree txg %llu",
1442 (longlong_t)scn->scn_visited_this_txg,
1443 (longlong_t)
1444 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
1445 (longlong_t)tx->tx_txg);
1446 scn->scn_visited_this_txg = 0;
1447 /*
1448 * Re-sync the ddt so that we can further modify
1449 * it when doing bprewrite.
|