53 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
54
55 static scan_cb_t dsl_scan_defrag_cb;
56 static scan_cb_t dsl_scan_scrub_cb;
57 static scan_cb_t dsl_scan_remove_cb;
58 static void dsl_scan_cancel_sync(void *, dmu_tx_t *);
59 static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx);
60
61 int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */
62 int zfs_resilver_delay = 2; /* number of ticks to delay resilver */
63 int zfs_scrub_delay = 4; /* number of ticks to delay scrub */
64 int zfs_scan_idle = 50; /* idle window in clock ticks */
65
66 int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */
67 int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
68 int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
69 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
70 boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */
71 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
72 int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
73
74 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
75 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
76 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
77
78 extern int zfs_txg_timeout;
79
80 /* the order has to match pool_scan_type */
81 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
82 NULL,
83 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
84 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
85 };
86
87 int
88 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
89 {
90 int err;
91 dsl_scan_t *scn;
92 spa_t *spa = dp->dp_spa;
1298 scn->scn_phys.scn_cur_min_txg =
1299 MAX(scn->scn_phys.scn_min_txg,
1300 ds->ds_phys->ds_prev_snap_txg);
1301 }
1302 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
1303 dsl_dataset_rele(ds, FTAG);
1304
1305 dsl_scan_visitds(scn, dsobj, tx);
1306 zap_cursor_fini(&zc);
1307 if (scn->scn_pausing)
1308 return;
1309 }
1310 zap_cursor_fini(&zc);
1311 }
1312
1313 static boolean_t
1314 dsl_scan_free_should_pause(dsl_scan_t *scn)
1315 {
1316 uint64_t elapsed_nanosecs;
1317
1318 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
1319 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
1320 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms &&
1321 txg_sync_waiting(scn->scn_dp)) ||
1322 spa_shutting_down(scn->scn_dp->dp_spa));
1323 }
1324
1325 static int
1326 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1327 {
1328 dsl_scan_t *scn = arg;
1329
1330 if (!scn->scn_is_bptree ||
1331 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
1332 if (dsl_scan_free_should_pause(scn))
1333 return (SET_ERROR(ERESTART));
1334 }
1335
1336 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
1337 dmu_tx_get_txg(tx), bp, 0));
|
53 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
54
55 static scan_cb_t dsl_scan_defrag_cb;
56 static scan_cb_t dsl_scan_scrub_cb;
57 static scan_cb_t dsl_scan_remove_cb;
58 static void dsl_scan_cancel_sync(void *, dmu_tx_t *);
59 static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx);
60
61 int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */
62 int zfs_resilver_delay = 2; /* number of ticks to delay resilver */
63 int zfs_scrub_delay = 4; /* number of ticks to delay scrub */
64 int zfs_scan_idle = 50; /* idle window in clock ticks */
65
66 int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */
67 int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
68 int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
69 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
70 boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */
71 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
72 int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
73 /* max number of blocks to free in a single TXG */
74 uint64_t zfs_free_max_blocks = UINT64_MAX;
75
76 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
77 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
78 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
79
80 extern int zfs_txg_timeout;
81
82 /* the order has to match pool_scan_type */
83 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
84 NULL,
85 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
86 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
87 };
88
89 int
90 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
91 {
92 int err;
93 dsl_scan_t *scn;
94 spa_t *spa = dp->dp_spa;
1300 scn->scn_phys.scn_cur_min_txg =
1301 MAX(scn->scn_phys.scn_min_txg,
1302 ds->ds_phys->ds_prev_snap_txg);
1303 }
1304 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
1305 dsl_dataset_rele(ds, FTAG);
1306
1307 dsl_scan_visitds(scn, dsobj, tx);
1308 zap_cursor_fini(&zc);
1309 if (scn->scn_pausing)
1310 return;
1311 }
1312 zap_cursor_fini(&zc);
1313 }
1314
1315 static boolean_t
1316 dsl_scan_free_should_pause(dsl_scan_t *scn)
1317 {
1318 uint64_t elapsed_nanosecs;
1319
1320 if (scn->scn_visited_this_txg >= zfs_free_max_blocks)
1321 return (B_TRUE);
1322
1323 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
1324 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
1325 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms &&
1326 txg_sync_waiting(scn->scn_dp)) ||
1327 spa_shutting_down(scn->scn_dp->dp_spa));
1328 }
1329
1330 static int
1331 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1332 {
1333 dsl_scan_t *scn = arg;
1334
1335 if (!scn->scn_is_bptree ||
1336 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
1337 if (dsl_scan_free_should_pause(scn))
1338 return (SET_ERROR(ERESTART));
1339 }
1340
1341 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
1342 dmu_tx_get_txg(tx), bp, 0));
|