Print this page
3740 Poor ZFS send / receive performance due to snapshot hold / release processing
Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dsl_destroy.c
+++ new/usr/src/uts/common/fs/zfs/dsl_destroy.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 + * Copyright (c) 2013 Steven Hartland. All rights reserved.
24 25 */
25 26
26 27 #include <sys/zfs_context.h>
27 28 #include <sys/dsl_userhold.h>
28 29 #include <sys/dsl_dataset.h>
29 30 #include <sys/dsl_synctask.h>
30 31 #include <sys/dmu_tx.h>
31 32 #include <sys/dsl_pool.h>
32 33 #include <sys/dsl_dir.h>
33 34 #include <sys/dmu_traverse.h>
34 35 #include <sys/dsl_scan.h>
35 36 #include <sys/dmu_objset.h>
36 37 #include <sys/zap.h>
37 38 #include <sys/zfeature.h>
38 39 #include <sys/zfs_ioctl.h>
39 40 #include <sys/dsl_deleg.h>
40 41
41 42 typedef struct dmu_snapshots_destroy_arg {
42 43 nvlist_t *dsda_snaps;
43 44 nvlist_t *dsda_successful_snaps;
44 45 boolean_t dsda_defer;
45 46 nvlist_t *dsda_errlist;
46 47 } dmu_snapshots_destroy_arg_t;
47 48
48 49 /*
49 50 * ds must be owned.
50 51 */
51 52 static int
52 53 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
53 54 {
54 55 if (!dsl_dataset_is_snapshot(ds))
55 56 return (SET_ERROR(EINVAL));
56 57
57 58 if (dsl_dataset_long_held(ds))
58 59 return (SET_ERROR(EBUSY));
59 60
60 61 /*
61 62 * Only allow deferred destroy on pools that support it.
62 63 * NOTE: deferred destroy is only supported on snapshots.
63 64 */
64 65 if (defer) {
65 66 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
66 67 SPA_VERSION_USERREFS)
67 68 return (SET_ERROR(ENOTSUP));
68 69 return (0);
69 70 }
70 71
71 72 /*
72 73 * If this snapshot has an elevated user reference count,
73 74 * we can't destroy it yet.
74 75 */
75 76 if (ds->ds_userrefs > 0)
76 77 return (SET_ERROR(EBUSY));
77 78
78 79 /*
79 80 * Can't delete a branch point.
80 81 */
81 82 if (ds->ds_phys->ds_num_children > 1)
82 83 return (SET_ERROR(EEXIST));
83 84
84 85 return (0);
85 86 }
86 87
87 88 static int
88 89 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
89 90 {
90 91 dmu_snapshots_destroy_arg_t *dsda = arg;
91 92 dsl_pool_t *dp = dmu_tx_pool(tx);
92 93 nvpair_t *pair;
93 94 int error = 0;
94 95
95 96 if (!dmu_tx_is_syncing(tx))
96 97 return (0);
97 98
98 99 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
99 100 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
100 101 dsl_dataset_t *ds;
101 102
102 103 error = dsl_dataset_hold(dp, nvpair_name(pair),
103 104 FTAG, &ds);
104 105
105 106 /*
106 107 * If the snapshot does not exist, silently ignore it
107 108 * (it's "already destroyed").
108 109 */
109 110 if (error == ENOENT)
110 111 continue;
111 112
112 113 if (error == 0) {
113 114 error = dsl_destroy_snapshot_check_impl(ds,
114 115 dsda->dsda_defer);
115 116 dsl_dataset_rele(ds, FTAG);
116 117 }
117 118
118 119 if (error == 0) {
119 120 fnvlist_add_boolean(dsda->dsda_successful_snaps,
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
120 121 nvpair_name(pair));
121 122 } else {
122 123 fnvlist_add_int32(dsda->dsda_errlist,
123 124 nvpair_name(pair), error);
124 125 }
125 126 }
126 127
127 128 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
128 129 if (pair != NULL)
129 130 return (fnvpair_value_int32(pair));
131 +
132 + if (nvlist_empty(dsda->dsda_successful_snaps))
133 + return (SET_ERROR(ENOENT));
134 +
130 135 return (0);
131 136 }
132 137
133 138 struct process_old_arg {
134 139 dsl_dataset_t *ds;
135 140 dsl_dataset_t *ds_prev;
136 141 boolean_t after_branch_point;
137 142 zio_t *pio;
138 143 uint64_t used, comp, uncomp;
139 144 };
140 145
141 146 static int
142 147 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
143 148 {
144 149 struct process_old_arg *poa = arg;
145 150 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
146 151
147 152 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
148 153 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
149 154 if (poa->ds_prev && !poa->after_branch_point &&
150 155 bp->blk_birth >
151 156 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
152 157 poa->ds_prev->ds_phys->ds_unique_bytes +=
153 158 bp_get_dsize_sync(dp->dp_spa, bp);
154 159 }
155 160 } else {
156 161 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
157 162 poa->comp += BP_GET_PSIZE(bp);
158 163 poa->uncomp += BP_GET_UCSIZE(bp);
159 164 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
160 165 }
161 166 return (0);
162 167 }
163 168
164 169 static void
165 170 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
166 171 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
167 172 {
168 173 struct process_old_arg poa = { 0 };
169 174 dsl_pool_t *dp = ds->ds_dir->dd_pool;
170 175 objset_t *mos = dp->dp_meta_objset;
171 176 uint64_t deadlist_obj;
172 177
173 178 ASSERT(ds->ds_deadlist.dl_oldfmt);
174 179 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
175 180
176 181 poa.ds = ds;
177 182 poa.ds_prev = ds_prev;
178 183 poa.after_branch_point = after_branch_point;
179 184 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
180 185 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
181 186 process_old_cb, &poa, tx));
182 187 VERIFY0(zio_wait(poa.pio));
183 188 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
184 189
185 190 /* change snapused */
186 191 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
187 192 -poa.used, -poa.comp, -poa.uncomp, tx);
188 193
189 194 /* swap next's deadlist to our deadlist */
190 195 dsl_deadlist_close(&ds->ds_deadlist);
191 196 dsl_deadlist_close(&ds_next->ds_deadlist);
192 197 deadlist_obj = ds->ds_phys->ds_deadlist_obj;
193 198 ds->ds_phys->ds_deadlist_obj = ds_next->ds_phys->ds_deadlist_obj;
194 199 ds_next->ds_phys->ds_deadlist_obj = deadlist_obj;
195 200 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
196 201 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
197 202 ds_next->ds_phys->ds_deadlist_obj);
198 203 }
199 204
200 205 static void
201 206 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
202 207 {
203 208 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
204 209 zap_cursor_t zc;
205 210 zap_attribute_t za;
206 211
207 212 /*
208 213 * If it is the old version, dd_clones doesn't exist so we can't
209 214 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
210 215 * doesn't matter.
211 216 */
212 217 if (ds->ds_dir->dd_phys->dd_clones == 0)
213 218 return;
214 219
215 220 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
216 221 zap_cursor_retrieve(&zc, &za) == 0;
217 222 zap_cursor_advance(&zc)) {
218 223 dsl_dataset_t *clone;
219 224
220 225 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
221 226 za.za_first_integer, FTAG, &clone));
222 227 if (clone->ds_dir->dd_origin_txg > mintxg) {
223 228 dsl_deadlist_remove_key(&clone->ds_deadlist,
224 229 mintxg, tx);
225 230 dsl_dataset_remove_clones_key(clone, mintxg, tx);
226 231 }
227 232 dsl_dataset_rele(clone, FTAG);
228 233 }
229 234 zap_cursor_fini(&zc);
230 235 }
231 236
232 237 void
233 238 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
234 239 {
235 240 int err;
236 241 int after_branch_point = FALSE;
237 242 dsl_pool_t *dp = ds->ds_dir->dd_pool;
238 243 objset_t *mos = dp->dp_meta_objset;
239 244 dsl_dataset_t *ds_prev = NULL;
240 245 uint64_t obj;
241 246
242 247 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
243 248 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
244 249 ASSERT(refcount_is_zero(&ds->ds_longholds));
245 250
246 251 if (defer &&
247 252 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)) {
248 253 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
249 254 dmu_buf_will_dirty(ds->ds_dbuf, tx);
250 255 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
251 256 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
252 257 return;
253 258 }
254 259
255 260 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
256 261
257 262 /* We need to log before removing it from the namespace. */
258 263 spa_history_log_internal_ds(ds, "destroy", tx, "");
259 264
260 265 dsl_scan_ds_destroyed(ds, tx);
261 266
262 267 obj = ds->ds_object;
263 268
264 269 if (ds->ds_phys->ds_prev_snap_obj != 0) {
265 270 ASSERT3P(ds->ds_prev, ==, NULL);
266 271 VERIFY0(dsl_dataset_hold_obj(dp,
267 272 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
268 273 after_branch_point =
269 274 (ds_prev->ds_phys->ds_next_snap_obj != obj);
270 275
271 276 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
272 277 if (after_branch_point &&
273 278 ds_prev->ds_phys->ds_next_clones_obj != 0) {
274 279 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
275 280 if (ds->ds_phys->ds_next_snap_obj != 0) {
276 281 VERIFY0(zap_add_int(mos,
277 282 ds_prev->ds_phys->ds_next_clones_obj,
278 283 ds->ds_phys->ds_next_snap_obj, tx));
279 284 }
280 285 }
281 286 if (!after_branch_point) {
282 287 ds_prev->ds_phys->ds_next_snap_obj =
283 288 ds->ds_phys->ds_next_snap_obj;
284 289 }
285 290 }
286 291
287 292 dsl_dataset_t *ds_next;
288 293 uint64_t old_unique;
289 294 uint64_t used = 0, comp = 0, uncomp = 0;
290 295
291 296 VERIFY0(dsl_dataset_hold_obj(dp,
292 297 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
293 298 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
294 299
295 300 old_unique = ds_next->ds_phys->ds_unique_bytes;
296 301
297 302 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
298 303 ds_next->ds_phys->ds_prev_snap_obj =
299 304 ds->ds_phys->ds_prev_snap_obj;
300 305 ds_next->ds_phys->ds_prev_snap_txg =
301 306 ds->ds_phys->ds_prev_snap_txg;
302 307 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
303 308 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
304 309
305 310 if (ds_next->ds_deadlist.dl_oldfmt) {
306 311 process_old_deadlist(ds, ds_prev, ds_next,
307 312 after_branch_point, tx);
308 313 } else {
309 314 /* Adjust prev's unique space. */
310 315 if (ds_prev && !after_branch_point) {
311 316 dsl_deadlist_space_range(&ds_next->ds_deadlist,
312 317 ds_prev->ds_phys->ds_prev_snap_txg,
313 318 ds->ds_phys->ds_prev_snap_txg,
314 319 &used, &comp, &uncomp);
315 320 ds_prev->ds_phys->ds_unique_bytes += used;
316 321 }
317 322
318 323 /* Adjust snapused. */
319 324 dsl_deadlist_space_range(&ds_next->ds_deadlist,
320 325 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
321 326 &used, &comp, &uncomp);
322 327 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
323 328 -used, -comp, -uncomp, tx);
324 329
325 330 /* Move blocks to be freed to pool's free list. */
326 331 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
327 332 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
328 333 tx);
329 334 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
330 335 DD_USED_HEAD, used, comp, uncomp, tx);
331 336
332 337 /* Merge our deadlist into next's and free it. */
333 338 dsl_deadlist_merge(&ds_next->ds_deadlist,
334 339 ds->ds_phys->ds_deadlist_obj, tx);
335 340 }
336 341 dsl_deadlist_close(&ds->ds_deadlist);
337 342 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
338 343 dmu_buf_will_dirty(ds->ds_dbuf, tx);
339 344 ds->ds_phys->ds_deadlist_obj = 0;
340 345
341 346 /* Collapse range in clone heads */
342 347 dsl_dataset_remove_clones_key(ds,
343 348 ds->ds_phys->ds_creation_txg, tx);
344 349
345 350 if (dsl_dataset_is_snapshot(ds_next)) {
346 351 dsl_dataset_t *ds_nextnext;
347 352
348 353 /*
349 354 * Update next's unique to include blocks which
350 355 * were previously shared by only this snapshot
351 356 * and it. Those blocks will be born after the
352 357 * prev snap and before this snap, and will have
353 358 * died after the next snap and before the one
354 359 * after that (ie. be on the snap after next's
355 360 * deadlist).
356 361 */
357 362 VERIFY0(dsl_dataset_hold_obj(dp,
358 363 ds_next->ds_phys->ds_next_snap_obj, FTAG, &ds_nextnext));
359 364 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
360 365 ds->ds_phys->ds_prev_snap_txg,
361 366 ds->ds_phys->ds_creation_txg,
362 367 &used, &comp, &uncomp);
363 368 ds_next->ds_phys->ds_unique_bytes += used;
364 369 dsl_dataset_rele(ds_nextnext, FTAG);
365 370 ASSERT3P(ds_next->ds_prev, ==, NULL);
366 371
367 372 /* Collapse range in this head. */
368 373 dsl_dataset_t *hds;
369 374 VERIFY0(dsl_dataset_hold_obj(dp,
370 375 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &hds));
371 376 dsl_deadlist_remove_key(&hds->ds_deadlist,
372 377 ds->ds_phys->ds_creation_txg, tx);
373 378 dsl_dataset_rele(hds, FTAG);
374 379
375 380 } else {
376 381 ASSERT3P(ds_next->ds_prev, ==, ds);
377 382 dsl_dataset_rele(ds_next->ds_prev, ds_next);
378 383 ds_next->ds_prev = NULL;
379 384 if (ds_prev) {
380 385 VERIFY0(dsl_dataset_hold_obj(dp,
381 386 ds->ds_phys->ds_prev_snap_obj,
382 387 ds_next, &ds_next->ds_prev));
383 388 }
384 389
385 390 dsl_dataset_recalc_head_uniq(ds_next);
386 391
387 392 /*
388 393 * Reduce the amount of our unconsumed refreservation
389 394 * being charged to our parent by the amount of
390 395 * new unique data we have gained.
391 396 */
392 397 if (old_unique < ds_next->ds_reserved) {
393 398 int64_t mrsdelta;
394 399 uint64_t new_unique =
395 400 ds_next->ds_phys->ds_unique_bytes;
396 401
397 402 ASSERT(old_unique <= new_unique);
398 403 mrsdelta = MIN(new_unique - old_unique,
399 404 ds_next->ds_reserved - old_unique);
400 405 dsl_dir_diduse_space(ds->ds_dir,
401 406 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
402 407 }
403 408 }
404 409 dsl_dataset_rele(ds_next, FTAG);
405 410
406 411 /*
407 412 * This must be done after the dsl_traverse(), because it will
408 413 * re-open the objset.
409 414 */
410 415 if (ds->ds_objset) {
411 416 dmu_objset_evict(ds->ds_objset);
412 417 ds->ds_objset = NULL;
413 418 }
414 419
415 420 /* remove from snapshot namespace */
416 421 dsl_dataset_t *ds_head;
417 422 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
418 423 VERIFY0(dsl_dataset_hold_obj(dp,
419 424 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
420 425 VERIFY0(dsl_dataset_get_snapname(ds));
421 426 #ifdef ZFS_DEBUG
422 427 {
423 428 uint64_t val;
424 429
425 430 err = dsl_dataset_snap_lookup(ds_head,
426 431 ds->ds_snapname, &val);
427 432 ASSERT0(err);
428 433 ASSERT3U(val, ==, obj);
429 434 }
430 435 #endif
431 436 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx));
432 437 dsl_dataset_rele(ds_head, FTAG);
433 438
434 439 if (ds_prev != NULL)
435 440 dsl_dataset_rele(ds_prev, FTAG);
436 441
437 442 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
438 443
439 444 if (ds->ds_phys->ds_next_clones_obj != 0) {
440 445 uint64_t count;
441 446 ASSERT0(zap_count(mos,
442 447 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
443 448 VERIFY0(dmu_object_free(mos,
444 449 ds->ds_phys->ds_next_clones_obj, tx));
445 450 }
446 451 if (ds->ds_phys->ds_props_obj != 0)
447 452 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
448 453 if (ds->ds_phys->ds_userrefs_obj != 0)
449 454 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
450 455 dsl_dir_rele(ds->ds_dir, ds);
451 456 ds->ds_dir = NULL;
452 457 VERIFY0(dmu_object_free(mos, obj, tx));
453 458 }
454 459
455 460 static void
456 461 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
457 462 {
458 463 dmu_snapshots_destroy_arg_t *dsda = arg;
459 464 dsl_pool_t *dp = dmu_tx_pool(tx);
460 465 nvpair_t *pair;
461 466
462 467 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
463 468 pair != NULL;
464 469 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
465 470 dsl_dataset_t *ds;
466 471
467 472 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
468 473
469 474 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
470 475 dsl_dataset_rele(ds, FTAG);
471 476 }
472 477 }
473 478
474 479 /*
475 480 * The semantics of this function are described in the comment above
476 481 * lzc_destroy_snaps(). To summarize:
477 482 *
478 483 * The snapshots must all be in the same pool.
479 484 *
480 485 * Snapshots that don't exist will be silently ignored (considered to be
481 486 * "already deleted").
482 487 *
483 488 * On success, all snaps will be destroyed and this will return 0.
484 489 * On failure, no snaps will be destroyed, the errlist will be filled in,
485 490 * and this will return an errno.
486 491 */
487 492 int
488 493 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
489 494 nvlist_t *errlist)
490 495 {
491 496 dmu_snapshots_destroy_arg_t dsda;
492 497 int error;
493 498 nvpair_t *pair;
494 499
495 500 pair = nvlist_next_nvpair(snaps, NULL);
496 501 if (pair == NULL)
497 502 return (0);
498 503
499 504 dsda.dsda_snaps = snaps;
500 505 dsda.dsda_successful_snaps = fnvlist_alloc();
501 506 dsda.dsda_defer = defer;
502 507 dsda.dsda_errlist = errlist;
503 508
504 509 error = dsl_sync_task(nvpair_name(pair),
505 510 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
506 511 &dsda, 0);
507 512 fnvlist_free(dsda.dsda_successful_snaps);
508 513
509 514 return (error);
510 515 }
511 516
512 517 int
513 518 dsl_destroy_snapshot(const char *name, boolean_t defer)
514 519 {
515 520 int error;
516 521 nvlist_t *nvl = fnvlist_alloc();
517 522 nvlist_t *errlist = fnvlist_alloc();
518 523
519 524 fnvlist_add_boolean(nvl, name);
520 525 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
521 526 fnvlist_free(errlist);
522 527 fnvlist_free(nvl);
523 528 return (error);
524 529 }
525 530
526 531 struct killarg {
527 532 dsl_dataset_t *ds;
528 533 dmu_tx_t *tx;
529 534 };
530 535
531 536 /* ARGSUSED */
532 537 static int
533 538 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
534 539 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
535 540 {
536 541 struct killarg *ka = arg;
537 542 dmu_tx_t *tx = ka->tx;
538 543
539 544 if (bp == NULL)
540 545 return (0);
541 546
542 547 if (zb->zb_level == ZB_ZIL_LEVEL) {
543 548 ASSERT(zilog != NULL);
544 549 /*
545 550 * It's a block in the intent log. It has no
546 551 * accounting, so just free it.
547 552 */
548 553 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
549 554 } else {
550 555 ASSERT(zilog == NULL);
551 556 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
552 557 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
553 558 }
554 559
555 560 return (0);
556 561 }
557 562
558 563 static void
559 564 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
560 565 {
561 566 struct killarg ka;
562 567
563 568 /*
564 569 * Free everything that we point to (that's born after
565 570 * the previous snapshot, if we are a clone)
566 571 *
567 572 * NB: this should be very quick, because we already
568 573 * freed all the objects in open context.
569 574 */
570 575 ka.ds = ds;
571 576 ka.tx = tx;
572 577 VERIFY0(traverse_dataset(ds,
573 578 ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
574 579 kill_blkptr, &ka));
575 580 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
576 581 }
577 582
578 583 typedef struct dsl_destroy_head_arg {
579 584 const char *ddha_name;
580 585 } dsl_destroy_head_arg_t;
581 586
582 587 int
583 588 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
584 589 {
585 590 int error;
586 591 uint64_t count;
587 592 objset_t *mos;
588 593
589 594 if (dsl_dataset_is_snapshot(ds))
590 595 return (SET_ERROR(EINVAL));
591 596
592 597 if (refcount_count(&ds->ds_longholds) != expected_holds)
593 598 return (SET_ERROR(EBUSY));
594 599
595 600 mos = ds->ds_dir->dd_pool->dp_meta_objset;
596 601
597 602 /*
598 603 * Can't delete a head dataset if there are snapshots of it.
599 604 * (Except if the only snapshots are from the branch we cloned
600 605 * from.)
601 606 */
602 607 if (ds->ds_prev != NULL &&
603 608 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
604 609 return (SET_ERROR(EBUSY));
605 610
606 611 /*
607 612 * Can't delete if there are children of this fs.
608 613 */
609 614 error = zap_count(mos,
610 615 ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
611 616 if (error != 0)
612 617 return (error);
613 618 if (count != 0)
614 619 return (SET_ERROR(EEXIST));
615 620
616 621 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
617 622 ds->ds_prev->ds_phys->ds_num_children == 2 &&
618 623 ds->ds_prev->ds_userrefs == 0) {
619 624 /* We need to remove the origin snapshot as well. */
620 625 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
621 626 return (SET_ERROR(EBUSY));
622 627 }
623 628 return (0);
624 629 }
625 630
626 631 static int
627 632 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
628 633 {
629 634 dsl_destroy_head_arg_t *ddha = arg;
630 635 dsl_pool_t *dp = dmu_tx_pool(tx);
631 636 dsl_dataset_t *ds;
632 637 int error;
633 638
634 639 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
635 640 if (error != 0)
636 641 return (error);
637 642
638 643 error = dsl_destroy_head_check_impl(ds, 0);
639 644 dsl_dataset_rele(ds, FTAG);
640 645 return (error);
641 646 }
642 647
643 648 static void
644 649 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
645 650 {
646 651 dsl_dir_t *dd;
647 652 dsl_pool_t *dp = dmu_tx_pool(tx);
648 653 objset_t *mos = dp->dp_meta_objset;
649 654 dd_used_t t;
650 655
651 656 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
652 657
653 658 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
654 659
655 660 ASSERT0(dd->dd_phys->dd_head_dataset_obj);
656 661
657 662 /*
658 663 * Remove our reservation. The impl() routine avoids setting the
659 664 * actual property, which would require the (already destroyed) ds.
660 665 */
661 666 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
662 667
663 668 ASSERT0(dd->dd_phys->dd_used_bytes);
664 669 ASSERT0(dd->dd_phys->dd_reserved);
665 670 for (t = 0; t < DD_USED_NUM; t++)
666 671 ASSERT0(dd->dd_phys->dd_used_breakdown[t]);
667 672
668 673 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
669 674 VERIFY0(zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
670 675 VERIFY0(dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
671 676 VERIFY0(zap_remove(mos,
672 677 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
673 678
674 679 dsl_dir_rele(dd, FTAG);
675 680 VERIFY0(dmu_object_free(mos, ddobj, tx));
676 681 }
677 682
678 683 void
679 684 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
680 685 {
681 686 dsl_pool_t *dp = dmu_tx_pool(tx);
682 687 objset_t *mos = dp->dp_meta_objset;
683 688 uint64_t obj, ddobj, prevobj = 0;
684 689 boolean_t rmorigin;
685 690
686 691 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
687 692 ASSERT(ds->ds_prev == NULL ||
688 693 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
689 694 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
690 695 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
691 696
692 697 /* We need to log before removing it from the namespace. */
693 698 spa_history_log_internal_ds(ds, "destroy", tx, "");
694 699
695 700 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
696 701 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
697 702 ds->ds_prev->ds_phys->ds_num_children == 2 &&
698 703 ds->ds_prev->ds_userrefs == 0);
699 704
700 705 /* Remove our reservation */
701 706 if (ds->ds_reserved != 0) {
702 707 dsl_dataset_set_refreservation_sync_impl(ds,
703 708 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
704 709 0, tx);
705 710 ASSERT0(ds->ds_reserved);
706 711 }
707 712
708 713 dsl_scan_ds_destroyed(ds, tx);
709 714
710 715 obj = ds->ds_object;
711 716
712 717 if (ds->ds_phys->ds_prev_snap_obj != 0) {
713 718 /* This is a clone */
714 719 ASSERT(ds->ds_prev != NULL);
715 720 ASSERT3U(ds->ds_prev->ds_phys->ds_next_snap_obj, !=, obj);
716 721 ASSERT0(ds->ds_phys->ds_next_snap_obj);
717 722
718 723 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
719 724 if (ds->ds_prev->ds_phys->ds_next_clones_obj != 0) {
720 725 dsl_dataset_remove_from_next_clones(ds->ds_prev,
721 726 obj, tx);
722 727 }
723 728
724 729 ASSERT3U(ds->ds_prev->ds_phys->ds_num_children, >, 1);
725 730 ds->ds_prev->ds_phys->ds_num_children--;
726 731 }
727 732
728 733 zfeature_info_t *async_destroy =
729 734 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
730 735 objset_t *os;
731 736
732 737 /*
733 738 * Destroy the deadlist. Unless it's a clone, the
734 739 * deadlist should be empty. (If it's a clone, it's
735 740 * safe to ignore the deadlist contents.)
736 741 */
737 742 dsl_deadlist_close(&ds->ds_deadlist);
738 743 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
739 744 dmu_buf_will_dirty(ds->ds_dbuf, tx);
740 745 ds->ds_phys->ds_deadlist_obj = 0;
741 746
742 747 VERIFY0(dmu_objset_from_ds(ds, &os));
743 748
744 749 if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
745 750 old_synchronous_dataset_destroy(ds, tx);
746 751 } else {
747 752 /*
748 753 * Move the bptree into the pool's list of trees to
749 754 * clean up and update space accounting information.
750 755 */
751 756 uint64_t used, comp, uncomp;
752 757
753 758 zil_destroy_sync(dmu_objset_zil(os), tx);
754 759
755 760 if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
756 761 dsl_scan_t *scn = dp->dp_scan;
757 762
758 763 spa_feature_incr(dp->dp_spa, async_destroy, tx);
759 764 dp->dp_bptree_obj = bptree_alloc(mos, tx);
760 765 VERIFY0(zap_add(mos,
761 766 DMU_POOL_DIRECTORY_OBJECT,
762 767 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
763 768 &dp->dp_bptree_obj, tx));
764 769 ASSERT(!scn->scn_async_destroying);
765 770 scn->scn_async_destroying = B_TRUE;
766 771 }
767 772
768 773 used = ds->ds_dir->dd_phys->dd_used_bytes;
769 774 comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
770 775 uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
771 776
772 777 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
773 778 ds->ds_phys->ds_unique_bytes == used);
774 779
775 780 bptree_add(mos, dp->dp_bptree_obj,
776 781 &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
777 782 used, comp, uncomp, tx);
778 783 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
779 784 -used, -comp, -uncomp, tx);
780 785 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
781 786 used, comp, uncomp, tx);
782 787 }
783 788
784 789 if (ds->ds_prev != NULL) {
785 790 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
786 791 VERIFY0(zap_remove_int(mos,
787 792 ds->ds_prev->ds_dir->dd_phys->dd_clones,
788 793 ds->ds_object, tx));
789 794 }
790 795 prevobj = ds->ds_prev->ds_object;
791 796 dsl_dataset_rele(ds->ds_prev, ds);
792 797 ds->ds_prev = NULL;
793 798 }
794 799
795 800 /*
796 801 * This must be done after the dsl_traverse(), because it will
797 802 * re-open the objset.
798 803 */
799 804 if (ds->ds_objset) {
800 805 dmu_objset_evict(ds->ds_objset);
801 806 ds->ds_objset = NULL;
802 807 }
803 808
804 809 /* Erase the link in the dir */
805 810 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
806 811 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
807 812 ddobj = ds->ds_dir->dd_object;
808 813 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
809 814 VERIFY0(zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx));
810 815
811 816 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
812 817
813 818 ASSERT0(ds->ds_phys->ds_next_clones_obj);
814 819 ASSERT0(ds->ds_phys->ds_props_obj);
815 820 ASSERT0(ds->ds_phys->ds_userrefs_obj);
816 821 dsl_dir_rele(ds->ds_dir, ds);
817 822 ds->ds_dir = NULL;
818 823 VERIFY0(dmu_object_free(mos, obj, tx));
819 824
820 825 dsl_dir_destroy_sync(ddobj, tx);
821 826
822 827 if (rmorigin) {
823 828 dsl_dataset_t *prev;
824 829 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
825 830 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
826 831 dsl_dataset_rele(prev, FTAG);
827 832 }
828 833 }
829 834
830 835 static void
831 836 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
832 837 {
833 838 dsl_destroy_head_arg_t *ddha = arg;
834 839 dsl_pool_t *dp = dmu_tx_pool(tx);
835 840 dsl_dataset_t *ds;
836 841
837 842 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
838 843 dsl_destroy_head_sync_impl(ds, tx);
839 844 dsl_dataset_rele(ds, FTAG);
840 845 }
841 846
842 847 static void
843 848 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
844 849 {
845 850 dsl_destroy_head_arg_t *ddha = arg;
846 851 dsl_pool_t *dp = dmu_tx_pool(tx);
847 852 dsl_dataset_t *ds;
848 853
849 854 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
850 855
851 856 /* Mark it as inconsistent on-disk, in case we crash */
852 857 dmu_buf_will_dirty(ds->ds_dbuf, tx);
853 858 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
854 859
855 860 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
856 861 dsl_dataset_rele(ds, FTAG);
857 862 }
858 863
859 864 int
860 865 dsl_destroy_head(const char *name)
861 866 {
862 867 dsl_destroy_head_arg_t ddha;
863 868 int error;
864 869 spa_t *spa;
865 870 boolean_t isenabled;
866 871
867 872 #ifdef _KERNEL
868 873 zfs_destroy_unmount_origin(name);
869 874 #endif
870 875
871 876 error = spa_open(name, &spa, FTAG);
872 877 if (error != 0)
873 878 return (error);
874 879 isenabled = spa_feature_is_enabled(spa,
875 880 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY]);
876 881 spa_close(spa, FTAG);
877 882
878 883 ddha.ddha_name = name;
879 884
880 885 if (!isenabled) {
881 886 objset_t *os;
882 887
883 888 error = dsl_sync_task(name, dsl_destroy_head_check,
884 889 dsl_destroy_head_begin_sync, &ddha, 0);
885 890 if (error != 0)
886 891 return (error);
887 892
888 893 /*
889 894 * Head deletion is processed in one txg on old pools;
890 895 * remove the objects from open context so that the txg sync
891 896 * is not too long.
892 897 */
893 898 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
894 899 if (error == 0) {
895 900 uint64_t prev_snap_txg =
896 901 dmu_objset_ds(os)->ds_phys->ds_prev_snap_txg;
897 902 for (uint64_t obj = 0; error == 0;
898 903 error = dmu_object_next(os, &obj, FALSE,
899 904 prev_snap_txg))
900 905 (void) dmu_free_object(os, obj);
901 906 /* sync out all frees */
902 907 txg_wait_synced(dmu_objset_pool(os), 0);
903 908 dmu_objset_disown(os, FTAG);
904 909 }
905 910 }
906 911
907 912 return (dsl_sync_task(name, dsl_destroy_head_check,
908 913 dsl_destroy_head_sync, &ddha, 0));
909 914 }
910 915
911 916 /*
912 917 * Note, this function is used as the callback for dmu_objset_find(). We
913 918 * always return 0 so that we will continue to find and process
914 919 * inconsistent datasets, even if we encounter an error trying to
915 920 * process one of them.
916 921 */
917 922 /* ARGSUSED */
918 923 int
919 924 dsl_destroy_inconsistent(const char *dsname, void *arg)
920 925 {
921 926 objset_t *os;
922 927
923 928 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
924 929 boolean_t inconsistent = DS_IS_INCONSISTENT(dmu_objset_ds(os));
925 930 dmu_objset_rele(os, FTAG);
926 931 if (inconsistent)
927 932 (void) dsl_destroy_head(dsname);
928 933 }
929 934 return (0);
930 935 }
↓ open down ↓ |
791 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX