825 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
826 if (dsl_dataset_is_snapshot(ds)) {
827 /* Note, scn_cur_{min,max}_txg stays the same. */
828 scn->scn_phys.scn_bookmark.zb_objset =
829 ds->ds_phys->ds_next_snap_obj;
830 zfs_dbgmsg("destroying ds %llu; currently traversing; "
831 "reset zb_objset to %llu",
832 (u_longlong_t)ds->ds_object,
833 (u_longlong_t)ds->ds_phys->ds_next_snap_obj);
834 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN;
835 } else {
836 SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
837 ZB_DESTROYED_OBJSET, 0, 0, 0);
838 zfs_dbgmsg("destroying ds %llu; currently traversing; "
839 "reset bookmark to -1,0,0,0",
840 (u_longlong_t)ds->ds_object);
841 }
842 } else if (zap_lookup_int_key(dp->dp_meta_objset,
843 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
844 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
845 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
846 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
847 if (dsl_dataset_is_snapshot(ds)) {
848 /*
849 * We keep the same mintxg; it could be >
850 * ds_creation_txg if the previous snapshot was
851 * deleted too.
852 */
853 VERIFY(zap_add_int_key(dp->dp_meta_objset,
854 scn->scn_phys.scn_queue_obj,
855 ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0);
856 zfs_dbgmsg("destroying ds %llu; in queue; "
857 "replacing with %llu",
858 (u_longlong_t)ds->ds_object,
859 (u_longlong_t)ds->ds_phys->ds_next_snap_obj);
860 } else {
861 zfs_dbgmsg("destroying ds %llu; in queue; removing",
862 (u_longlong_t)ds->ds_object);
863 }
864 } else {
865 zfs_dbgmsg("destroying ds %llu; ignoring",
877 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
878 {
879 dsl_pool_t *dp = ds->ds_dir->dd_pool;
880 dsl_scan_t *scn = dp->dp_scan;
881 uint64_t mintxg;
882
883 if (scn->scn_phys.scn_state != DSS_SCANNING)
884 return;
885
886 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
887
888 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
889 scn->scn_phys.scn_bookmark.zb_objset =
890 ds->ds_phys->ds_prev_snap_obj;
891 zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
892 "reset zb_objset to %llu",
893 (u_longlong_t)ds->ds_object,
894 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj);
895 } else if (zap_lookup_int_key(dp->dp_meta_objset,
896 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
897 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
898 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
899 VERIFY(zap_add_int_key(dp->dp_meta_objset,
900 scn->scn_phys.scn_queue_obj,
901 ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0);
902 zfs_dbgmsg("snapshotting ds %llu; in queue; "
903 "replacing with %llu",
904 (u_longlong_t)ds->ds_object,
905 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj);
906 }
907 dsl_scan_sync_state(scn, tx);
908 }
909
910 void
911 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
912 {
913 dsl_pool_t *dp = ds1->ds_dir->dd_pool;
914 dsl_scan_t *scn = dp->dp_scan;
915 uint64_t mintxg;
916
917 if (scn->scn_phys.scn_state != DSS_SCANNING)
920 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) {
921 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object;
922 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
923 "reset zb_objset to %llu",
924 (u_longlong_t)ds1->ds_object,
925 (u_longlong_t)ds2->ds_object);
926 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) {
927 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object;
928 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
929 "reset zb_objset to %llu",
930 (u_longlong_t)ds2->ds_object,
931 (u_longlong_t)ds1->ds_object);
932 }
933
934 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
935 ds1->ds_object, &mintxg) == 0) {
936 int err;
937
938 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg);
939 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg);
940 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
941 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
942 err = zap_add_int_key(dp->dp_meta_objset,
943 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
944 VERIFY(err == 0 || err == EEXIST);
945 if (err == EEXIST) {
946 /* Both were there to begin with */
947 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
948 scn->scn_phys.scn_queue_obj,
949 ds1->ds_object, mintxg, tx));
950 }
951 zfs_dbgmsg("clone_swap ds %llu; in queue; "
952 "replacing with %llu",
953 (u_longlong_t)ds1->ds_object,
954 (u_longlong_t)ds2->ds_object);
955 } else if (zap_lookup_int_key(dp->dp_meta_objset,
956 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) {
957 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg);
958 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg);
959 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
960 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
961 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
962 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
963 zfs_dbgmsg("clone_swap ds %llu; in queue; "
964 "replacing with %llu",
965 (u_longlong_t)ds2->ds_object,
966 (u_longlong_t)ds1->ds_object);
967 }
968
969 dsl_scan_sync_state(scn, tx);
970 }
971
972 struct enqueue_clones_arg {
973 dmu_tx_t *tx;
974 uint64_t originobj;
975 };
976
977 /* ARGSUSED */
978 static int
979 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
997 dsl_dataset_rele(ds, FTAG);
998 if (err)
999 return (err);
1000 ds = prev;
1001 }
1002 VERIFY(zap_add_int_key(dp->dp_meta_objset,
1003 scn->scn_phys.scn_queue_obj, ds->ds_object,
1004 ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0);
1005 }
1006 dsl_dataset_rele(ds, FTAG);
1007 return (0);
1008 }
1009
1010 static void
1011 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
1012 {
1013 dsl_pool_t *dp = scn->scn_dp;
1014 dsl_dataset_t *ds;
1015 objset_t *os;
1016
1017 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1018
1019 if (dmu_objset_from_ds(ds, &os))
1020 goto out;
1021
1022 /*
1023 * Only the ZIL in the head (non-snapshot) is valid. Even though
1024 * snapshots can have ZIL block pointers (which may be the same
1025 * BP as in the head), they must be ignored. So we traverse the
1026 * ZIL here, rather than in scan_recurse(), because the regular
1027 * snapshot block-sharing rules don't apply to it.
1028 */
1029 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds))
1030 dsl_scan_zil(dp, &os->os_zil_header);
1031
1032 /*
1033 * Iterate over the bps in this ds.
1034 */
1035 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1036 dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx);
1037
1291 */
1292 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx);
1293 if (scn->scn_pausing)
1294 return;
1295 }
1296
1297 /*
1298 * In case we were paused right at the end of the ds, zero the
1299 * bookmark so we don't think that we're still trying to resume.
1300 */
1301 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_t));
1302
1303 /* keep pulling things out of the zap-object-as-queue */
1304 while (zap_cursor_init(&zc, dp->dp_meta_objset,
1305 scn->scn_phys.scn_queue_obj),
1306 zap_cursor_retrieve(&zc, &za) == 0) {
1307 dsl_dataset_t *ds;
1308 uint64_t dsobj;
1309
1310 dsobj = strtonum(za.za_name, NULL);
1311 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
1312 scn->scn_phys.scn_queue_obj, dsobj, tx));
1313
1314 /* Set up min/max txg */
1315 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1316 if (za.za_first_integer != 0) {
1317 scn->scn_phys.scn_cur_min_txg =
1318 MAX(scn->scn_phys.scn_min_txg,
1319 za.za_first_integer);
1320 } else {
1321 scn->scn_phys.scn_cur_min_txg =
1322 MAX(scn->scn_phys.scn_min_txg,
1323 ds->ds_phys->ds_prev_snap_txg);
1324 }
1325 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
1326 dsl_dataset_rele(ds, FTAG);
1327
1328 dsl_scan_visitds(scn, dsobj, tx);
1329 zap_cursor_fini(&zc);
1330 if (scn->scn_pausing)
1331 return;
1332 }
1333 zap_cursor_fini(&zc);
1334 }
1335
1417 spa_sync_pass(dp->dp_spa) > 1)
1418 return;
1419
1420 scn->scn_visited_this_txg = 0;
1421 scn->scn_pausing = B_FALSE;
1422 scn->scn_sync_start_time = gethrtime();
1423 spa->spa_scrub_active = B_TRUE;
1424
1425 /*
1426 * First process the free list. If we pause the free, don't do
1427 * any scanning. This ensures that there is no free list when
1428 * we are scanning, so the scan code doesn't have to worry about
1429 * traversing it.
1430 */
1431 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1432 scn->scn_is_bptree = B_FALSE;
1433 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1434 NULL, ZIO_FLAG_MUSTSUCCEED);
1435 err = bpobj_iterate(&dp->dp_free_bpobj,
1436 dsl_scan_free_block_cb, scn, tx);
1437 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root));
1438
1439 if (err == 0 && spa_feature_is_active(spa,
1440 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1441 scn->scn_is_bptree = B_TRUE;
1442 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1443 NULL, ZIO_FLAG_MUSTSUCCEED);
1444 err = bptree_iterate(dp->dp_meta_objset,
1445 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb,
1446 scn, tx);
1447 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root));
1448 if (err != 0)
1449 return;
1450
1451 /* disable async destroy feature */
1452 spa_feature_decr(spa,
1453 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY], tx);
1454 ASSERT(!spa_feature_is_active(spa,
1455 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY]));
1456 VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
1457 DMU_POOL_DIRECTORY_OBJECT,
1458 DMU_POOL_BPTREE_OBJ, tx));
1459 VERIFY3U(0, ==, bptree_free(dp->dp_meta_objset,
1460 dp->dp_bptree_obj, tx));
1461 dp->dp_bptree_obj = 0;
1462 }
1463 if (scn->scn_visited_this_txg) {
1464 zfs_dbgmsg("freed %llu blocks in %llums from "
1465 "free_bpobj/bptree txg %llu",
1466 (longlong_t)scn->scn_visited_this_txg,
1467 (longlong_t)
1468 (gethrtime() - scn->scn_sync_start_time) / MICROSEC,
1469 (longlong_t)tx->tx_txg);
1470 scn->scn_visited_this_txg = 0;
1471 /*
1472 * Re-sync the ddt so that we can further modify
1473 * it when doing bprewrite.
1474 */
1475 ddt_sync(spa, tx->tx_txg);
1476 }
1477 if (err == ERESTART)
1478 return;
1479 }
|
825 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
826 if (dsl_dataset_is_snapshot(ds)) {
827 /* Note, scn_cur_{min,max}_txg stays the same. */
828 scn->scn_phys.scn_bookmark.zb_objset =
829 ds->ds_phys->ds_next_snap_obj;
830 zfs_dbgmsg("destroying ds %llu; currently traversing; "
831 "reset zb_objset to %llu",
832 (u_longlong_t)ds->ds_object,
833 (u_longlong_t)ds->ds_phys->ds_next_snap_obj);
834 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN;
835 } else {
836 SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
837 ZB_DESTROYED_OBJSET, 0, 0, 0);
838 zfs_dbgmsg("destroying ds %llu; currently traversing; "
839 "reset bookmark to -1,0,0,0",
840 (u_longlong_t)ds->ds_object);
841 }
842 } else if (zap_lookup_int_key(dp->dp_meta_objset,
843 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
844 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
845 VERIFY0(zap_remove_int(dp->dp_meta_objset,
846 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
847 if (dsl_dataset_is_snapshot(ds)) {
848 /*
849 * We keep the same mintxg; it could be >
850 * ds_creation_txg if the previous snapshot was
851 * deleted too.
852 */
853 VERIFY(zap_add_int_key(dp->dp_meta_objset,
854 scn->scn_phys.scn_queue_obj,
855 ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0);
856 zfs_dbgmsg("destroying ds %llu; in queue; "
857 "replacing with %llu",
858 (u_longlong_t)ds->ds_object,
859 (u_longlong_t)ds->ds_phys->ds_next_snap_obj);
860 } else {
861 zfs_dbgmsg("destroying ds %llu; in queue; removing",
862 (u_longlong_t)ds->ds_object);
863 }
864 } else {
865 zfs_dbgmsg("destroying ds %llu; ignoring",
877 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
878 {
879 dsl_pool_t *dp = ds->ds_dir->dd_pool;
880 dsl_scan_t *scn = dp->dp_scan;
881 uint64_t mintxg;
882
883 if (scn->scn_phys.scn_state != DSS_SCANNING)
884 return;
885
886 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
887
888 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
889 scn->scn_phys.scn_bookmark.zb_objset =
890 ds->ds_phys->ds_prev_snap_obj;
891 zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
892 "reset zb_objset to %llu",
893 (u_longlong_t)ds->ds_object,
894 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj);
895 } else if (zap_lookup_int_key(dp->dp_meta_objset,
896 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
897 VERIFY0(zap_remove_int(dp->dp_meta_objset,
898 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
899 VERIFY(zap_add_int_key(dp->dp_meta_objset,
900 scn->scn_phys.scn_queue_obj,
901 ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0);
902 zfs_dbgmsg("snapshotting ds %llu; in queue; "
903 "replacing with %llu",
904 (u_longlong_t)ds->ds_object,
905 (u_longlong_t)ds->ds_phys->ds_prev_snap_obj);
906 }
907 dsl_scan_sync_state(scn, tx);
908 }
909
910 void
911 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
912 {
913 dsl_pool_t *dp = ds1->ds_dir->dd_pool;
914 dsl_scan_t *scn = dp->dp_scan;
915 uint64_t mintxg;
916
917 if (scn->scn_phys.scn_state != DSS_SCANNING)
920 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) {
921 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object;
922 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
923 "reset zb_objset to %llu",
924 (u_longlong_t)ds1->ds_object,
925 (u_longlong_t)ds2->ds_object);
926 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) {
927 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object;
928 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
929 "reset zb_objset to %llu",
930 (u_longlong_t)ds2->ds_object,
931 (u_longlong_t)ds1->ds_object);
932 }
933
934 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
935 ds1->ds_object, &mintxg) == 0) {
936 int err;
937
938 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg);
939 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg);
940 VERIFY0(zap_remove_int(dp->dp_meta_objset,
941 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
942 err = zap_add_int_key(dp->dp_meta_objset,
943 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
944 VERIFY(err == 0 || err == EEXIST);
945 if (err == EEXIST) {
946 /* Both were there to begin with */
947 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
948 scn->scn_phys.scn_queue_obj,
949 ds1->ds_object, mintxg, tx));
950 }
951 zfs_dbgmsg("clone_swap ds %llu; in queue; "
952 "replacing with %llu",
953 (u_longlong_t)ds1->ds_object,
954 (u_longlong_t)ds2->ds_object);
955 } else if (zap_lookup_int_key(dp->dp_meta_objset,
956 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) {
957 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg);
958 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg);
959 VERIFY0(zap_remove_int(dp->dp_meta_objset,
960 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
961 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
962 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
963 zfs_dbgmsg("clone_swap ds %llu; in queue; "
964 "replacing with %llu",
965 (u_longlong_t)ds2->ds_object,
966 (u_longlong_t)ds1->ds_object);
967 }
968
969 dsl_scan_sync_state(scn, tx);
970 }
971
972 struct enqueue_clones_arg {
973 dmu_tx_t *tx;
974 uint64_t originobj;
975 };
976
977 /* ARGSUSED */
978 static int
979 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
997 dsl_dataset_rele(ds, FTAG);
998 if (err)
999 return (err);
1000 ds = prev;
1001 }
1002 VERIFY(zap_add_int_key(dp->dp_meta_objset,
1003 scn->scn_phys.scn_queue_obj, ds->ds_object,
1004 ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0);
1005 }
1006 dsl_dataset_rele(ds, FTAG);
1007 return (0);
1008 }
1009
1010 static void
1011 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
1012 {
1013 dsl_pool_t *dp = scn->scn_dp;
1014 dsl_dataset_t *ds;
1015 objset_t *os;
1016
1017 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1018
1019 if (dmu_objset_from_ds(ds, &os))
1020 goto out;
1021
1022 /*
1023 * Only the ZIL in the head (non-snapshot) is valid. Even though
1024 * snapshots can have ZIL block pointers (which may be the same
1025 * BP as in the head), they must be ignored. So we traverse the
1026 * ZIL here, rather than in scan_recurse(), because the regular
1027 * snapshot block-sharing rules don't apply to it.
1028 */
1029 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds))
1030 dsl_scan_zil(dp, &os->os_zil_header);
1031
1032 /*
1033 * Iterate over the bps in this ds.
1034 */
1035 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1036 dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx);
1037
1291 */
1292 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx);
1293 if (scn->scn_pausing)
1294 return;
1295 }
1296
1297 /*
1298 * In case we were paused right at the end of the ds, zero the
1299 * bookmark so we don't think that we're still trying to resume.
1300 */
1301 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_t));
1302
1303 /* keep pulling things out of the zap-object-as-queue */
1304 while (zap_cursor_init(&zc, dp->dp_meta_objset,
1305 scn->scn_phys.scn_queue_obj),
1306 zap_cursor_retrieve(&zc, &za) == 0) {
1307 dsl_dataset_t *ds;
1308 uint64_t dsobj;
1309
1310 dsobj = strtonum(za.za_name, NULL);
1311 VERIFY0(zap_remove_int(dp->dp_meta_objset,
1312 scn->scn_phys.scn_queue_obj, dsobj, tx));
1313
1314 /* Set up min/max txg */
1315 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1316 if (za.za_first_integer != 0) {
1317 scn->scn_phys.scn_cur_min_txg =
1318 MAX(scn->scn_phys.scn_min_txg,
1319 za.za_first_integer);
1320 } else {
1321 scn->scn_phys.scn_cur_min_txg =
1322 MAX(scn->scn_phys.scn_min_txg,
1323 ds->ds_phys->ds_prev_snap_txg);
1324 }
1325 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
1326 dsl_dataset_rele(ds, FTAG);
1327
1328 dsl_scan_visitds(scn, dsobj, tx);
1329 zap_cursor_fini(&zc);
1330 if (scn->scn_pausing)
1331 return;
1332 }
1333 zap_cursor_fini(&zc);
1334 }
1335
1417 spa_sync_pass(dp->dp_spa) > 1)
1418 return;
1419
1420 scn->scn_visited_this_txg = 0;
1421 scn->scn_pausing = B_FALSE;
1422 scn->scn_sync_start_time = gethrtime();
1423 spa->spa_scrub_active = B_TRUE;
1424
1425 /*
1426 * First process the free list. If we pause the free, don't do
1427 * any scanning. This ensures that there is no free list when
1428 * we are scanning, so the scan code doesn't have to worry about
1429 * traversing it.
1430 */
1431 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1432 scn->scn_is_bptree = B_FALSE;
1433 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1434 NULL, ZIO_FLAG_MUSTSUCCEED);
1435 err = bpobj_iterate(&dp->dp_free_bpobj,
1436 dsl_scan_free_block_cb, scn, tx);
1437 VERIFY0(zio_wait(scn->scn_zio_root));
1438
1439 if (err == 0 && spa_feature_is_active(spa,
1440 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1441 scn->scn_is_bptree = B_TRUE;
1442 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1443 NULL, ZIO_FLAG_MUSTSUCCEED);
1444 err = bptree_iterate(dp->dp_meta_objset,
1445 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb,
1446 scn, tx);
1447 VERIFY0(zio_wait(scn->scn_zio_root));
1448 if (err != 0)
1449 return;
1450
1451 /* disable async destroy feature */
1452 spa_feature_decr(spa,
1453 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY], tx);
1454 ASSERT(!spa_feature_is_active(spa,
1455 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY]));
1456 VERIFY0(zap_remove(dp->dp_meta_objset,
1457 DMU_POOL_DIRECTORY_OBJECT,
1458 DMU_POOL_BPTREE_OBJ, tx));
1459 VERIFY0(bptree_free(dp->dp_meta_objset,
1460 dp->dp_bptree_obj, tx));
1461 dp->dp_bptree_obj = 0;
1462 }
1463 if (scn->scn_visited_this_txg) {
1464 zfs_dbgmsg("freed %llu blocks in %llums from "
1465 "free_bpobj/bptree txg %llu",
1466 (longlong_t)scn->scn_visited_this_txg,
1467 (longlong_t)
1468 (gethrtime() - scn->scn_sync_start_time) / MICROSEC,
1469 (longlong_t)tx->tx_txg);
1470 scn->scn_visited_this_txg = 0;
1471 /*
1472 * Re-sync the ddt so that we can further modify
1473 * it when doing bprewrite.
1474 */
1475 ddt_sync(spa, tx->tx_txg);
1476 }
1477 if (err == ERESTART)
1478 return;
1479 }
|