331 }
332
333 static int
334 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
335 {
336 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
337 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
338 matchtype_t mt;
339 int err;
340
341 dsl_dir_snap_cmtime_update(ds->ds_dir);
342
343 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
344 mt = MT_FIRST;
345 else
346 mt = MT_EXACT;
347
348 err = zap_remove_norm(mos, snapobj, name, mt, tx);
349 if (err == ENOTSUP && mt == MT_FIRST)
350 err = zap_remove(mos, snapobj, name, tx);
351 return (err);
352 }
353
354 static int
355 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
356 dsl_dataset_t **dsp)
357 {
358 objset_t *mos = dp->dp_meta_objset;
359 dmu_buf_t *dbuf;
360 dsl_dataset_t *ds;
361 int err;
362 dmu_object_info_t doi;
363
364 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
365 dsl_pool_sync_context(dp));
366
367 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
368 if (err)
369 return (err);
370
1119 */
1120 dsl_dataset_make_exclusive(ds, tag);
1121 /*
1122 * If we're removing a clone, we might also need to remove its
1123 * origin.
1124 */
1125 do {
1126 dsda.need_prep = B_FALSE;
1127 if (dsl_dir_is_clone(dd)) {
1128 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1129 if (err) {
1130 dsl_dir_close(dd, FTAG);
1131 goto out;
1132 }
1133 }
1134
1135 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1136 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1137 dsl_dataset_destroy_sync, &dsda, tag, 0);
1138 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1139 dsl_dir_destroy_sync, dd, FTAG, 0);
1140 err = dsl_sync_task_group_wait(dstg);
1141 dsl_sync_task_group_destroy(dstg);
1142
1143 /*
1144 * We could be racing against 'zfs release' or 'zfs destroy -d'
1145 * on the origin snap, in which case we can get EBUSY if we
1146 * needed to destroy the origin snap but were not ready to
1147 * do so.
1148 */
1149 if (dsda.need_prep) {
1150 ASSERT(err == EBUSY);
1151 ASSERT(dsl_dir_is_clone(dd));
1152 ASSERT(dsda.rm_origin == NULL);
1153 }
1154 } while (dsda.need_prep);
1155
1156 if (dsda.rm_origin != NULL)
1157 dsl_dataset_disown(dsda.rm_origin, tag);
1158
1159 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1995 /*
1996 * If there's an fs-only reservation, any blocks that might become
1997 * owned by the snapshot dataset must be accommodated by space
1998 * outside of the reservation.
1999 */
2000 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
2001 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2002 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2003 return (ENOSPC);
2004
2005 /*
2006 * Propagate any reserved space for this snapshot to other
2007 * snapshot checks in this sync group.
2008 */
2009 if (asize > 0)
2010 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2011
2012 return (0);
2013 }
2014
2015 int
2016 dsl_dataset_snapshot_check(dsl_dataset_t *ds, const char *snapname,
2017 dmu_tx_t *tx)
2018 {
2019 int err;
2020 uint64_t value;
2021
2022 /*
2023 * We don't allow multiple snapshots of the same txg. If there
2024 * is already one, try again.
2025 */
2026 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2027 return (EAGAIN);
2028
2029 /*
2030 * Check for conflicting snapshot name.
2031 */
2032 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2033 if (err == 0)
2034 return (EEXIST);
2035 if (err != ENOENT)
2036 return (err);
2037
2038 /*
2039 * Check that the dataset's name is not too long. Name consists
2040 * of the dataset's length + 1 for the @-sign + snapshot name's length
2041 */
2042 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2043 return (ENAMETOOLONG);
2044
2045 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2046 if (err)
2047 return (err);
2048
2049 ds->ds_trysnap_txg = tx->tx_txg;
2050 return (0);
2051 }
2052
2053 void
2054 dsl_dataset_snapshot_sync(dsl_dataset_t *ds, const char *snapname,
2055 dmu_tx_t *tx)
2056 {
2057 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2058 dmu_buf_t *dbuf;
2059 dsl_dataset_phys_t *dsphys;
2060 uint64_t dsobj, crtxg;
2061 objset_t *mos = dp->dp_meta_objset;
2062 int err;
2063
2064 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2065
2066 /*
2067 * The origin's ds_creation_txg has to be < TXG_INITIAL
2068 */
2069 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2070 crtxg = 1;
2071 else
2072 crtxg = tx->tx_txg;
2073
2074 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2075 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2076 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2077 dmu_buf_will_dirty(dbuf, tx);
2078 dsphys = dbuf->db_data;
2079 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2080 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2081 dsphys->ds_fsid_guid = unique_create();
2082 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2083 sizeof (dsphys->ds_guid));
2084 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2085 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2701 if (ds->ds_phys->ds_prev_snap_obj == 0)
2702 continue;
2703
2704 dsl_deadlist_space(&ds->ds_deadlist,
2705 &dlused, &dlcomp, &dluncomp);
2706 pa->used += dlused;
2707 pa->comp += dlcomp;
2708 pa->uncomp += dluncomp;
2709 }
2710
2711 /*
2712 * If we are a clone of a clone then we never reached ORIGIN,
2713 * so we need to subtract out the clone origin's used space.
2714 */
2715 if (pa->origin_origin) {
2716 pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2717 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2718 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2719 }
2720
2721 /* Check that there is enough space here */
2722 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2723 pa->used);
2724 if (err)
2725 return (err);
2726
2727 /*
2728 * Compute the amounts of space that will be used by snapshots
2729 * after the promotion (for both origin and clone). For each,
2730 * it is the amount of space that will be on all of their
2731 * deadlists (that was not born before their new origin).
2732 */
2733 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2734 uint64_t space;
2735
2736 /*
2737 * Note, typically this will not be a clone of a clone,
2738 * so dd_origin_txg will be < TXG_INITIAL, so
2739 * these snaplist_space() -> dsl_deadlist_space_range()
2740 * calls will be fast because they do not have to
2741 * iterate over all bps.
2742 */
2743 snap = list_head(&pa->origin_snaps);
2836
2837 }
2838
2839 /* move snapshots to this dir */
2840 for (snap = list_head(&pa->shared_snaps); snap;
2841 snap = list_next(&pa->shared_snaps, snap)) {
2842 dsl_dataset_t *ds = snap->ds;
2843
2844 /* unregister props as dsl_dir is changing */
2845 if (ds->ds_objset) {
2846 dmu_objset_evict(ds->ds_objset);
2847 ds->ds_objset = NULL;
2848 }
2849 /* move snap name entry */
2850 VERIFY(0 == dsl_dataset_get_snapname(ds));
2851 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2852 ds->ds_snapname, tx));
2853 VERIFY(0 == zap_add(dp->dp_meta_objset,
2854 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2855 8, 1, &ds->ds_object, tx));
2856
2857 /* change containing dsl_dir */
2858 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2859 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2860 ds->ds_phys->ds_dir_obj = dd->dd_object;
2861 ASSERT3P(ds->ds_dir, ==, odd);
2862 dsl_dir_close(ds->ds_dir, ds);
2863 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2864 NULL, ds, &ds->ds_dir));
2865
2866 /* move any clone references */
2867 if (ds->ds_phys->ds_next_clones_obj &&
2868 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2869 zap_cursor_t zc;
2870 zap_attribute_t za;
2871
2872 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2873 ds->ds_phys->ds_next_clones_obj);
2874 zap_cursor_retrieve(&zc, &za) == 0;
2875 zap_cursor_advance(&zc)) {
|
331 }
332
333 static int
334 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
335 {
336 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
337 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
338 matchtype_t mt;
339 int err;
340
341 dsl_dir_snap_cmtime_update(ds->ds_dir);
342
343 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
344 mt = MT_FIRST;
345 else
346 mt = MT_EXACT;
347
348 err = zap_remove_norm(mos, snapobj, name, mt, tx);
349 if (err == ENOTSUP && mt == MT_FIRST)
350 err = zap_remove(mos, snapobj, name, tx);
351
352 if (err == 0)
353 dsl_snapcount_adjust(ds->ds_dir, tx, -1, B_TRUE);
354
355 return (err);
356 }
357
358 static int
359 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
360 dsl_dataset_t **dsp)
361 {
362 objset_t *mos = dp->dp_meta_objset;
363 dmu_buf_t *dbuf;
364 dsl_dataset_t *ds;
365 int err;
366 dmu_object_info_t doi;
367
368 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
369 dsl_pool_sync_context(dp));
370
371 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
372 if (err)
373 return (err);
374
1123 */
1124 dsl_dataset_make_exclusive(ds, tag);
1125 /*
1126 * If we're removing a clone, we might also need to remove its
1127 * origin.
1128 */
1129 do {
1130 dsda.need_prep = B_FALSE;
1131 if (dsl_dir_is_clone(dd)) {
1132 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1133 if (err) {
1134 dsl_dir_close(dd, FTAG);
1135 goto out;
1136 }
1137 }
1138
1139 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1140 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1141 dsl_dataset_destroy_sync, &dsda, tag, 0);
1142 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1143 dsl_dir_destroy_sync, dd, tag, 0);
1144 err = dsl_sync_task_group_wait(dstg);
1145 dsl_sync_task_group_destroy(dstg);
1146
1147 /*
1148 * We could be racing against 'zfs release' or 'zfs destroy -d'
1149 * on the origin snap, in which case we can get EBUSY if we
1150 * needed to destroy the origin snap but were not ready to
1151 * do so.
1152 */
1153 if (dsda.need_prep) {
1154 ASSERT(err == EBUSY);
1155 ASSERT(dsl_dir_is_clone(dd));
1156 ASSERT(dsda.rm_origin == NULL);
1157 }
1158 } while (dsda.need_prep);
1159
1160 if (dsda.rm_origin != NULL)
1161 dsl_dataset_disown(dsda.rm_origin, tag);
1162
1163 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1999 /*
2000 * If there's an fs-only reservation, any blocks that might become
2001 * owned by the snapshot dataset must be accommodated by space
2002 * outside of the reservation.
2003 */
2004 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
2005 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2006 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2007 return (ENOSPC);
2008
2009 /*
2010 * Propagate any reserved space for this snapshot to other
2011 * snapshot checks in this sync group.
2012 */
2013 if (asize > 0)
2014 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2015
2016 return (0);
2017 }
2018
2019 /*
2020 * Check if adding additional snapshot(s) would exceed any snapshot quotas.
2021 * Note that all snapshot quotas up to the root dataset (i.e. the pool itself)
2022 * or the given ancestor must be satisfied. Note that it is valid for the
2023 * count to exceed the quota. This can happen if a recursive snapshot is taken
2024 * from a dataset above this one.
2025 */
2026 int
2027 dsl_snapcount_check(dsl_dir_t *dd, dmu_tx_t *tx, uint64_t cnt,
2028 dsl_dir_t *ancestor)
2029 {
2030 uint64_t quota;
2031 int err = 0;
2032
2033 /*
2034 * As with dsl_dataset_set_reservation_check(), don't run this check in
2035 * open context.
2036 */
2037 if (!dmu_tx_is_syncing(tx))
2038 return (0);
2039
2040 /*
2041 * If renaming a dataset with no snapshots, count adjustment is 0.
2042 * Likewise when taking a recursive snapshot below the top-level (see
2043 * the comment in snapshot_check() for more details).
2044 */
2045 if (cnt == 0)
2046 return (0);
2047
2048 /*
2049 * If an ancestor has been provided, stop checking the quota once we
2050 * hit that dir. We need this during rename so that we don't overcount
2051 * the check once we recurse up to the common ancestor.
2052 */
2053 if (ancestor == dd)
2054 return (0);
2055
2056 /*
2057 * If there's no value for this property, there's no need to enforce a
2058 * snapshot quota.
2059 */
2060 err = dsl_prop_get_dd(dd, zfs_prop_to_name(ZFS_PROP_SNAPSHOT_QUOTA),
2061 8, 1, "a, NULL, B_FALSE);
2062 if (err == ENOENT)
2063 return (0);
2064 else if (err != 0)
2065 return (err);
2066
2067 #ifdef _KERNEL
2068 extern void __dtrace_probe_zfs__ss__quota(uint64_t, uint64_t, char *);
2069 __dtrace_probe_zfs__ss__quota(
2070 (uint64_t)dd->dd_phys->dd_snapshot_count, (uint64_t)quota,
2071 dd->dd_myname);
2072 #endif
2073
2074 if (quota > 0 && (dd->dd_phys->dd_snapshot_count + cnt) > quota)
2075 return (EDQUOT);
2076
2077 if (dd->dd_parent != NULL)
2078 err = dsl_snapcount_check(dd->dd_parent, tx, cnt, ancestor);
2079
2080 return (err);
2081 }
2082
2083 /*
2084 * Adjust the snapshot count for the specified dsl_dir_t and all parents.
2085 * When a new snapshot is created, increment the count on all parents, and when
2086 * a snapshot is destroyed, decrement the count.
2087 */
2088 void
2089 dsl_snapcount_adjust(dsl_dir_t *dd, dmu_tx_t *tx, int64_t delta,
2090 boolean_t first)
2091 {
2092 /*
2093 * On initial entry we need to check if this feature is active, but
2094 * we don't want to re-check this on each recursive call. Note: the
2095 * feature cannot be active if its not enabled. If the feature is not
2096 * active, don't touch the on-disk count fields.
2097 */
2098 if (first) {
2099 dsl_dataset_t *ds = NULL;
2100 spa_t *spa;
2101 zfeature_info_t *quota_feat =
2102 &spa_feature_table[SPA_FEATURE_DS_SS_QUOTA];
2103
2104 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2105 dd->dd_phys->dd_head_dataset_obj, FTAG, &ds));
2106 spa = dsl_dataset_get_spa(ds);
2107 dsl_dataset_rele(ds, FTAG);
2108 if (!spa_feature_is_active(spa, quota_feat))
2109 return;
2110 }
2111
2112 /*
2113 * As with dsl_dataset_set_reservation_check(), wdon't want to run
2114 * this check in open context.
2115 */
2116 if (!dmu_tx_is_syncing(tx))
2117 return;
2118
2119 /* if renaming a dataset with no snapshots, count adjustment is 0 */
2120 if (delta == 0)
2121 return;
2122
2123 /* Increment count for parent */
2124 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2125
2126 mutex_enter(&dd->dd_lock);
2127
2128 /*
2129 * Counts may be incorrect if dealing with an existing pool and
2130 * there has never been a quota set in the dataset hierarchy.
2131 * This is not an error.
2132 */
2133 if (delta < 0 && dd->dd_phys->dd_snapshot_count < (delta * -1)) {
2134 #ifdef _KERNEL
2135 extern void __dtrace_probe_zfs__sscnt__adj__neg(char *);
2136 __dtrace_probe_zfs__sscnt__adj__neg(dd->dd_myname);
2137 #endif
2138 mutex_exit(&dd->dd_lock);
2139 return;
2140 }
2141
2142 dd->dd_phys->dd_snapshot_count += delta;
2143
2144 /* Roll up this additional count into our ancestors */
2145
2146 if (dd->dd_parent != NULL)
2147 dsl_snapcount_adjust(dd->dd_parent, tx, delta, B_FALSE);
2148
2149 mutex_exit(&dd->dd_lock);
2150 }
2151
2152 int
2153 dsl_dataset_snapshot_check(dsl_dataset_t *ds, const char *snapname,
2154 uint64_t cnt, dmu_tx_t *tx)
2155 {
2156 int err;
2157 uint64_t value;
2158
2159 /*
2160 * We don't allow multiple snapshots of the same txg. If there
2161 * is already one, try again.
2162 */
2163 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2164 return (EAGAIN);
2165
2166 /*
2167 * Check for conflicting snapshot name.
2168 */
2169 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2170 if (err == 0)
2171 return (EEXIST);
2172 if (err != ENOENT)
2173 return (err);
2174
2175 /*
2176 * Check that the dataset's name is not too long. Name consists
2177 * of the dataset's length + 1 for the @-sign + snapshot name's length
2178 */
2179 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2180 return (ENAMETOOLONG);
2181
2182 err = dsl_snapcount_check(ds->ds_dir, tx, cnt, NULL);
2183 if (err)
2184 return (err);
2185
2186 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2187 if (err)
2188 return (err);
2189
2190 ds->ds_trysnap_txg = tx->tx_txg;
2191 return (0);
2192 }
2193
2194 void
2195 dsl_dataset_snapshot_sync(dsl_dataset_t *ds, const char *snapname,
2196 dmu_tx_t *tx)
2197 {
2198 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2199 dmu_buf_t *dbuf;
2200 dsl_dataset_phys_t *dsphys;
2201 uint64_t dsobj, crtxg;
2202 objset_t *mos = dp->dp_meta_objset;
2203 int err;
2204
2205 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2206
2207 dsl_snapcount_adjust(ds->ds_dir, tx, 1, B_TRUE);
2208
2209 /*
2210 * The origin's ds_creation_txg has to be < TXG_INITIAL
2211 */
2212 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2213 crtxg = 1;
2214 else
2215 crtxg = tx->tx_txg;
2216
2217 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2218 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2219 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2220 dmu_buf_will_dirty(dbuf, tx);
2221 dsphys = dbuf->db_data;
2222 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2223 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2224 dsphys->ds_fsid_guid = unique_create();
2225 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2226 sizeof (dsphys->ds_guid));
2227 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2228 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2844 if (ds->ds_phys->ds_prev_snap_obj == 0)
2845 continue;
2846
2847 dsl_deadlist_space(&ds->ds_deadlist,
2848 &dlused, &dlcomp, &dluncomp);
2849 pa->used += dlused;
2850 pa->comp += dlcomp;
2851 pa->uncomp += dluncomp;
2852 }
2853
2854 /*
2855 * If we are a clone of a clone then we never reached ORIGIN,
2856 * so we need to subtract out the clone origin's used space.
2857 */
2858 if (pa->origin_origin) {
2859 pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2860 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2861 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2862 }
2863
2864 /* Check that there is enough space and quota headroom here */
2865 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2866 origin_ds->ds_dir, pa->used, tx);
2867 if (err)
2868 return (err);
2869
2870 /*
2871 * Compute the amounts of space that will be used by snapshots
2872 * after the promotion (for both origin and clone). For each,
2873 * it is the amount of space that will be on all of their
2874 * deadlists (that was not born before their new origin).
2875 */
2876 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2877 uint64_t space;
2878
2879 /*
2880 * Note, typically this will not be a clone of a clone,
2881 * so dd_origin_txg will be < TXG_INITIAL, so
2882 * these snaplist_space() -> dsl_deadlist_space_range()
2883 * calls will be fast because they do not have to
2884 * iterate over all bps.
2885 */
2886 snap = list_head(&pa->origin_snaps);
2979
2980 }
2981
2982 /* move snapshots to this dir */
2983 for (snap = list_head(&pa->shared_snaps); snap;
2984 snap = list_next(&pa->shared_snaps, snap)) {
2985 dsl_dataset_t *ds = snap->ds;
2986
2987 /* unregister props as dsl_dir is changing */
2988 if (ds->ds_objset) {
2989 dmu_objset_evict(ds->ds_objset);
2990 ds->ds_objset = NULL;
2991 }
2992 /* move snap name entry */
2993 VERIFY(0 == dsl_dataset_get_snapname(ds));
2994 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2995 ds->ds_snapname, tx));
2996 VERIFY(0 == zap_add(dp->dp_meta_objset,
2997 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2998 8, 1, &ds->ds_object, tx));
2999 dsl_snapcount_adjust(hds->ds_dir, tx, 1, B_TRUE);
3000
3001 /* change containing dsl_dir */
3002 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3003 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
3004 ds->ds_phys->ds_dir_obj = dd->dd_object;
3005 ASSERT3P(ds->ds_dir, ==, odd);
3006 dsl_dir_close(ds->ds_dir, ds);
3007 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
3008 NULL, ds, &ds->ds_dir));
3009
3010 /* move any clone references */
3011 if (ds->ds_phys->ds_next_clones_obj &&
3012 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
3013 zap_cursor_t zc;
3014 zap_attribute_t za;
3015
3016 for (zap_cursor_init(&zc, dp->dp_meta_objset,
3017 ds->ds_phys->ds_next_clones_obj);
3018 zap_cursor_retrieve(&zc, &za) == 0;
3019 zap_cursor_advance(&zc)) {
|