Print this page
*** NO COMMENTS ***
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dsl_dataset.c
+++ new/usr/src/uts/common/fs/zfs/dsl_dataset.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 25 */
26 26
27 27 #include <sys/dmu_objset.h>
28 28 #include <sys/dsl_dataset.h>
29 29 #include <sys/dsl_dir.h>
30 30 #include <sys/dsl_prop.h>
31 31 #include <sys/dsl_synctask.h>
32 32 #include <sys/dmu_traverse.h>
33 33 #include <sys/dmu_impl.h>
34 34 #include <sys/dmu_tx.h>
35 35 #include <sys/arc.h>
36 36 #include <sys/zio.h>
37 37 #include <sys/zap.h>
38 38 #include <sys/zfeature.h>
39 39 #include <sys/unique.h>
40 40 #include <sys/zfs_context.h>
41 41 #include <sys/zfs_ioctl.h>
42 42 #include <sys/spa.h>
43 43 #include <sys/zfs_znode.h>
44 44 #include <sys/zfs_onexit.h>
45 45 #include <sys/zvol.h>
46 46 #include <sys/dsl_scan.h>
47 47 #include <sys/dsl_deadlist.h>
48 48
49 49 static char *dsl_reaper = "the grim reaper";
50 50
51 51 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
52 52 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
53 53 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
54 54
55 55 #define SWITCH64(x, y) \
56 56 { \
57 57 uint64_t __tmp = (x); \
58 58 (x) = (y); \
59 59 (y) = __tmp; \
60 60 }
61 61
62 62 #define DS_REF_MAX (1ULL << 62)
63 63
64 64 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
65 65
66 66 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
67 67
68 68
69 69 /*
70 70 * Figure out how much of this delta should be propogated to the dsl_dir
71 71 * layer. If there's a refreservation, that space has already been
72 72 * partially accounted for in our ancestors.
73 73 */
74 74 static int64_t
75 75 parent_delta(dsl_dataset_t *ds, int64_t delta)
76 76 {
77 77 uint64_t old_bytes, new_bytes;
78 78
79 79 if (ds->ds_reserved == 0)
80 80 return (delta);
81 81
82 82 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
83 83 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
84 84
85 85 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
86 86 return (new_bytes - old_bytes);
87 87 }
88 88
89 89 void
90 90 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
91 91 {
92 92 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
93 93 int compressed = BP_GET_PSIZE(bp);
94 94 int uncompressed = BP_GET_UCSIZE(bp);
95 95 int64_t delta;
96 96
97 97 dprintf_bp(bp, "ds=%p", ds);
98 98
99 99 ASSERT(dmu_tx_is_syncing(tx));
100 100 /* It could have been compressed away to nothing */
101 101 if (BP_IS_HOLE(bp))
102 102 return;
103 103 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
104 104 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
105 105 if (ds == NULL) {
106 106 /*
107 107 * Account for the meta-objset space in its placeholder
108 108 * dsl_dir.
109 109 */
110 110 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
111 111 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
112 112 used, compressed, uncompressed, tx);
113 113 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
114 114 return;
115 115 }
116 116 dmu_buf_will_dirty(ds->ds_dbuf, tx);
117 117
118 118 mutex_enter(&ds->ds_dir->dd_lock);
119 119 mutex_enter(&ds->ds_lock);
120 120 delta = parent_delta(ds, used);
121 121 ds->ds_phys->ds_referenced_bytes += used;
122 122 ds->ds_phys->ds_compressed_bytes += compressed;
123 123 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
124 124 ds->ds_phys->ds_unique_bytes += used;
125 125 mutex_exit(&ds->ds_lock);
126 126 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
127 127 compressed, uncompressed, tx);
128 128 dsl_dir_transfer_space(ds->ds_dir, used - delta,
129 129 DD_USED_REFRSRV, DD_USED_HEAD, tx);
130 130 mutex_exit(&ds->ds_dir->dd_lock);
131 131 }
132 132
133 133 int
134 134 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
135 135 boolean_t async)
136 136 {
137 137 if (BP_IS_HOLE(bp))
138 138 return (0);
139 139
140 140 ASSERT(dmu_tx_is_syncing(tx));
141 141 ASSERT(bp->blk_birth <= tx->tx_txg);
142 142
143 143 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
144 144 int compressed = BP_GET_PSIZE(bp);
145 145 int uncompressed = BP_GET_UCSIZE(bp);
146 146
147 147 ASSERT(used > 0);
148 148 if (ds == NULL) {
149 149 /*
150 150 * Account for the meta-objset space in its placeholder
151 151 * dataset.
152 152 */
153 153 dsl_free(tx->tx_pool, tx->tx_txg, bp);
154 154
155 155 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
156 156 -used, -compressed, -uncompressed, tx);
157 157 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
158 158 return (used);
159 159 }
160 160 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
161 161
162 162 ASSERT(!dsl_dataset_is_snapshot(ds));
163 163 dmu_buf_will_dirty(ds->ds_dbuf, tx);
164 164
165 165 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
166 166 int64_t delta;
167 167
168 168 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
169 169 dsl_free(tx->tx_pool, tx->tx_txg, bp);
170 170
171 171 mutex_enter(&ds->ds_dir->dd_lock);
172 172 mutex_enter(&ds->ds_lock);
173 173 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
174 174 !DS_UNIQUE_IS_ACCURATE(ds));
175 175 delta = parent_delta(ds, -used);
176 176 ds->ds_phys->ds_unique_bytes -= used;
177 177 mutex_exit(&ds->ds_lock);
178 178 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
179 179 delta, -compressed, -uncompressed, tx);
180 180 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
181 181 DD_USED_REFRSRV, DD_USED_HEAD, tx);
182 182 mutex_exit(&ds->ds_dir->dd_lock);
183 183 } else {
184 184 dprintf_bp(bp, "putting on dead list: %s", "");
185 185 if (async) {
186 186 /*
187 187 * We are here as part of zio's write done callback,
188 188 * which means we're a zio interrupt thread. We can't
189 189 * call dsl_deadlist_insert() now because it may block
190 190 * waiting for I/O. Instead, put bp on the deferred
191 191 * queue and let dsl_pool_sync() finish the job.
192 192 */
193 193 bplist_append(&ds->ds_pending_deadlist, bp);
194 194 } else {
195 195 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
196 196 }
197 197 ASSERT3U(ds->ds_prev->ds_object, ==,
198 198 ds->ds_phys->ds_prev_snap_obj);
199 199 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
200 200 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
201 201 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
202 202 ds->ds_object && bp->blk_birth >
203 203 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
204 204 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
205 205 mutex_enter(&ds->ds_prev->ds_lock);
206 206 ds->ds_prev->ds_phys->ds_unique_bytes += used;
207 207 mutex_exit(&ds->ds_prev->ds_lock);
208 208 }
209 209 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
210 210 dsl_dir_transfer_space(ds->ds_dir, used,
211 211 DD_USED_HEAD, DD_USED_SNAP, tx);
212 212 }
213 213 }
214 214 mutex_enter(&ds->ds_lock);
215 215 ASSERT3U(ds->ds_phys->ds_referenced_bytes, >=, used);
216 216 ds->ds_phys->ds_referenced_bytes -= used;
217 217 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
218 218 ds->ds_phys->ds_compressed_bytes -= compressed;
219 219 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
220 220 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
221 221 mutex_exit(&ds->ds_lock);
222 222
223 223 return (used);
224 224 }
225 225
226 226 uint64_t
227 227 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
228 228 {
229 229 uint64_t trysnap = 0;
230 230
231 231 if (ds == NULL)
232 232 return (0);
233 233 /*
234 234 * The snapshot creation could fail, but that would cause an
235 235 * incorrect FALSE return, which would only result in an
236 236 * overestimation of the amount of space that an operation would
237 237 * consume, which is OK.
238 238 *
239 239 * There's also a small window where we could miss a pending
240 240 * snapshot, because we could set the sync task in the quiescing
241 241 * phase. So this should only be used as a guess.
242 242 */
243 243 if (ds->ds_trysnap_txg >
244 244 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
245 245 trysnap = ds->ds_trysnap_txg;
246 246 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
247 247 }
248 248
249 249 boolean_t
250 250 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
251 251 uint64_t blk_birth)
252 252 {
253 253 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
254 254 return (B_FALSE);
255 255
256 256 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
257 257
258 258 return (B_TRUE);
259 259 }
260 260
261 261 /* ARGSUSED */
262 262 static void
263 263 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
264 264 {
265 265 dsl_dataset_t *ds = dsv;
266 266
267 267 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
268 268
269 269 unique_remove(ds->ds_fsid_guid);
270 270
271 271 if (ds->ds_objset != NULL)
272 272 dmu_objset_evict(ds->ds_objset);
273 273
274 274 if (ds->ds_prev) {
275 275 dsl_dataset_drop_ref(ds->ds_prev, ds);
276 276 ds->ds_prev = NULL;
277 277 }
278 278
279 279 bplist_destroy(&ds->ds_pending_deadlist);
280 280 if (db != NULL) {
281 281 dsl_deadlist_close(&ds->ds_deadlist);
↓ open down ↓ |
281 lines elided |
↑ open up ↑ |
282 282 } else {
283 283 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
284 284 ASSERT(!ds->ds_deadlist.dl_oldfmt);
285 285 }
286 286 if (ds->ds_dir)
287 287 dsl_dir_close(ds->ds_dir, ds);
288 288
289 289 ASSERT(!list_link_active(&ds->ds_synced_link));
290 290
291 291 mutex_destroy(&ds->ds_lock);
292 - mutex_destroy(&ds->ds_recvlock);
293 292 mutex_destroy(&ds->ds_opening_lock);
294 293 rw_destroy(&ds->ds_rwlock);
295 294 cv_destroy(&ds->ds_exclusive_cv);
296 295
297 296 kmem_free(ds, sizeof (dsl_dataset_t));
298 297 }
299 298
300 299 static int
301 300 dsl_dataset_get_snapname(dsl_dataset_t *ds)
302 301 {
303 302 dsl_dataset_phys_t *headphys;
304 303 int err;
305 304 dmu_buf_t *headdbuf;
306 305 dsl_pool_t *dp = ds->ds_dir->dd_pool;
307 306 objset_t *mos = dp->dp_meta_objset;
308 307
309 308 if (ds->ds_snapname[0])
310 309 return (0);
311 310 if (ds->ds_phys->ds_next_snap_obj == 0)
312 311 return (0);
313 312
314 313 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
315 314 FTAG, &headdbuf);
316 315 if (err)
317 316 return (err);
318 317 headphys = headdbuf->db_data;
319 318 err = zap_value_search(dp->dp_meta_objset,
320 319 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
321 320 dmu_buf_rele(headdbuf, FTAG);
322 321 return (err);
323 322 }
324 323
325 324 static int
326 325 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
327 326 {
328 327 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
329 328 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
330 329 matchtype_t mt;
331 330 int err;
332 331
333 332 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
334 333 mt = MT_FIRST;
335 334 else
336 335 mt = MT_EXACT;
337 336
338 337 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
339 338 value, mt, NULL, 0, NULL);
340 339 if (err == ENOTSUP && mt == MT_FIRST)
341 340 err = zap_lookup(mos, snapobj, name, 8, 1, value);
342 341 return (err);
343 342 }
344 343
345 344 static int
346 345 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
347 346 {
348 347 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
349 348 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
350 349 matchtype_t mt;
351 350 int err;
352 351
353 352 dsl_dir_snap_cmtime_update(ds->ds_dir);
354 353
355 354 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
356 355 mt = MT_FIRST;
357 356 else
358 357 mt = MT_EXACT;
359 358
360 359 err = zap_remove_norm(mos, snapobj, name, mt, tx);
361 360 if (err == ENOTSUP && mt == MT_FIRST)
362 361 err = zap_remove(mos, snapobj, name, tx);
363 362 return (err);
364 363 }
365 364
366 365 static int
367 366 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
368 367 dsl_dataset_t **dsp)
369 368 {
370 369 objset_t *mos = dp->dp_meta_objset;
371 370 dmu_buf_t *dbuf;
372 371 dsl_dataset_t *ds;
373 372 int err;
374 373 dmu_object_info_t doi;
375 374
376 375 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
377 376 dsl_pool_sync_context(dp));
378 377
379 378 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
380 379 if (err)
381 380 return (err);
382 381
383 382 /* Make sure dsobj has the correct object type. */
384 383 dmu_object_info_from_db(dbuf, &doi);
385 384 if (doi.doi_type != DMU_OT_DSL_DATASET)
386 385 return (EINVAL);
387 386
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
388 387 ds = dmu_buf_get_user(dbuf);
389 388 if (ds == NULL) {
390 389 dsl_dataset_t *winner;
391 390
392 391 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
393 392 ds->ds_dbuf = dbuf;
394 393 ds->ds_object = dsobj;
395 394 ds->ds_phys = dbuf->db_data;
396 395
397 396 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
398 - mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
399 397 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
400 398 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
401 399
402 400 rw_init(&ds->ds_rwlock, 0, 0, 0);
403 401 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
404 402
405 403 bplist_create(&ds->ds_pending_deadlist);
406 404 dsl_deadlist_open(&ds->ds_deadlist,
407 405 mos, ds->ds_phys->ds_deadlist_obj);
408 406
409 407 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
410 408 offsetof(dmu_sendarg_t, dsa_link));
411 409
412 410 if (err == 0) {
413 411 err = dsl_dir_open_obj(dp,
414 412 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
415 413 }
416 414 if (err) {
417 415 mutex_destroy(&ds->ds_lock);
418 - mutex_destroy(&ds->ds_recvlock);
419 416 mutex_destroy(&ds->ds_opening_lock);
420 417 rw_destroy(&ds->ds_rwlock);
421 418 cv_destroy(&ds->ds_exclusive_cv);
422 419 bplist_destroy(&ds->ds_pending_deadlist);
423 420 dsl_deadlist_close(&ds->ds_deadlist);
424 421 kmem_free(ds, sizeof (dsl_dataset_t));
425 422 dmu_buf_rele(dbuf, tag);
426 423 return (err);
427 424 }
428 425
429 426 if (!dsl_dataset_is_snapshot(ds)) {
430 427 ds->ds_snapname[0] = '\0';
431 428 if (ds->ds_phys->ds_prev_snap_obj) {
432 429 err = dsl_dataset_get_ref(dp,
433 430 ds->ds_phys->ds_prev_snap_obj,
434 431 ds, &ds->ds_prev);
435 432 }
436 433 } else {
437 434 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
438 435 err = dsl_dataset_get_snapname(ds);
439 436 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
440 437 err = zap_count(
441 438 ds->ds_dir->dd_pool->dp_meta_objset,
442 439 ds->ds_phys->ds_userrefs_obj,
443 440 &ds->ds_userrefs);
444 441 }
445 442 }
446 443
447 444 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
448 445 /*
449 446 * In sync context, we're called with either no lock
450 447 * or with the write lock. If we're not syncing,
451 448 * we're always called with the read lock held.
452 449 */
453 450 boolean_t need_lock =
454 451 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
455 452 dsl_pool_sync_context(dp);
456 453
457 454 if (need_lock)
458 455 rw_enter(&dp->dp_config_rwlock, RW_READER);
459 456
460 457 err = dsl_prop_get_ds(ds,
461 458 "refreservation", sizeof (uint64_t), 1,
462 459 &ds->ds_reserved, NULL);
463 460 if (err == 0) {
464 461 err = dsl_prop_get_ds(ds,
465 462 "refquota", sizeof (uint64_t), 1,
466 463 &ds->ds_quota, NULL);
467 464 }
468 465
469 466 if (need_lock)
470 467 rw_exit(&dp->dp_config_rwlock);
471 468 } else {
472 469 ds->ds_reserved = ds->ds_quota = 0;
473 470 }
474 471
475 472 if (err == 0) {
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
476 473 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
477 474 dsl_dataset_evict);
478 475 }
479 476 if (err || winner) {
480 477 bplist_destroy(&ds->ds_pending_deadlist);
481 478 dsl_deadlist_close(&ds->ds_deadlist);
482 479 if (ds->ds_prev)
483 480 dsl_dataset_drop_ref(ds->ds_prev, ds);
484 481 dsl_dir_close(ds->ds_dir, ds);
485 482 mutex_destroy(&ds->ds_lock);
486 - mutex_destroy(&ds->ds_recvlock);
487 483 mutex_destroy(&ds->ds_opening_lock);
488 484 rw_destroy(&ds->ds_rwlock);
489 485 cv_destroy(&ds->ds_exclusive_cv);
490 486 kmem_free(ds, sizeof (dsl_dataset_t));
491 487 if (err) {
492 488 dmu_buf_rele(dbuf, tag);
493 489 return (err);
494 490 }
495 491 ds = winner;
496 492 } else {
497 493 ds->ds_fsid_guid =
498 494 unique_insert(ds->ds_phys->ds_fsid_guid);
499 495 }
500 496 }
501 497 ASSERT3P(ds->ds_dbuf, ==, dbuf);
502 498 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
503 499 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
504 500 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
505 501 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
506 502 mutex_enter(&ds->ds_lock);
507 503 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
508 504 mutex_exit(&ds->ds_lock);
509 505 dmu_buf_rele(ds->ds_dbuf, tag);
510 506 return (ENOENT);
511 507 }
512 508 mutex_exit(&ds->ds_lock);
513 509 *dsp = ds;
514 510 return (0);
515 511 }
516 512
517 513 static int
518 514 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
519 515 {
520 516 dsl_pool_t *dp = ds->ds_dir->dd_pool;
521 517
522 518 /*
523 519 * In syncing context we don't want the rwlock lock: there
524 520 * may be an existing writer waiting for sync phase to
525 521 * finish. We don't need to worry about such writers, since
526 522 * sync phase is single-threaded, so the writer can't be
527 523 * doing anything while we are active.
528 524 */
529 525 if (dsl_pool_sync_context(dp)) {
530 526 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
531 527 return (0);
532 528 }
533 529
534 530 /*
535 531 * Normal users will hold the ds_rwlock as a READER until they
536 532 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
537 533 * drop their READER lock after they set the ds_owner field.
538 534 *
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
539 535 * If the dataset is being destroyed, the destroy thread will
540 536 * obtain a WRITER lock for exclusive access after it's done its
541 537 * open-context work and then change the ds_owner to
542 538 * dsl_reaper once destruction is assured. So threads
543 539 * may block here temporarily, until the "destructability" of
544 540 * the dataset is determined.
545 541 */
546 542 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
547 543 mutex_enter(&ds->ds_lock);
548 544 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
545 + int rc;
546 +
549 547 rw_exit(&dp->dp_config_rwlock);
550 - cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
551 - if (DSL_DATASET_IS_DESTROYED(ds)) {
548 + rc = cv_wait_sig(&ds->ds_exclusive_cv, &ds->ds_lock);
549 + if (!rc || DSL_DATASET_IS_DESTROYED(ds)) {
552 550 mutex_exit(&ds->ds_lock);
553 551 dsl_dataset_drop_ref(ds, tag);
554 552 rw_enter(&dp->dp_config_rwlock, RW_READER);
555 - return (ENOENT);
553 + return (rc ? ENOENT : EINTR);
556 554 }
557 555 /*
558 556 * The dp_config_rwlock lives above the ds_lock. And
559 557 * we need to check DSL_DATASET_IS_DESTROYED() while
560 558 * holding the ds_lock, so we have to drop and reacquire
561 559 * the ds_lock here.
562 560 */
563 561 mutex_exit(&ds->ds_lock);
564 562 rw_enter(&dp->dp_config_rwlock, RW_READER);
565 563 mutex_enter(&ds->ds_lock);
566 564 }
567 565 mutex_exit(&ds->ds_lock);
568 566 return (0);
569 567 }
570 568
571 569 int
572 570 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
573 571 dsl_dataset_t **dsp)
574 572 {
575 573 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
576 574
577 575 if (err)
578 576 return (err);
579 577 return (dsl_dataset_hold_ref(*dsp, tag));
580 578 }
581 579
582 580 int
583 581 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
584 582 void *tag, dsl_dataset_t **dsp)
585 583 {
586 584 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
587 585 if (err)
588 586 return (err);
589 587 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
590 588 dsl_dataset_rele(*dsp, tag);
591 589 *dsp = NULL;
592 590 return (EBUSY);
593 591 }
594 592 return (0);
595 593 }
596 594
597 595 int
598 596 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
599 597 {
600 598 dsl_dir_t *dd;
601 599 dsl_pool_t *dp;
602 600 const char *snapname;
603 601 uint64_t obj;
604 602 int err = 0;
605 603
606 604 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
607 605 if (err)
608 606 return (err);
609 607
610 608 dp = dd->dd_pool;
611 609 obj = dd->dd_phys->dd_head_dataset_obj;
612 610 rw_enter(&dp->dp_config_rwlock, RW_READER);
613 611 if (obj)
614 612 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
615 613 else
616 614 err = ENOENT;
617 615 if (err)
618 616 goto out;
619 617
620 618 err = dsl_dataset_hold_ref(*dsp, tag);
621 619
622 620 /* we may be looking for a snapshot */
623 621 if (err == 0 && snapname != NULL) {
624 622 dsl_dataset_t *ds = NULL;
625 623
626 624 if (*snapname++ != '@') {
627 625 dsl_dataset_rele(*dsp, tag);
628 626 err = ENOENT;
629 627 goto out;
630 628 }
631 629
632 630 dprintf("looking for snapshot '%s'\n", snapname);
633 631 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
634 632 if (err == 0)
635 633 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
636 634 dsl_dataset_rele(*dsp, tag);
637 635
638 636 ASSERT3U((err == 0), ==, (ds != NULL));
639 637
640 638 if (ds) {
641 639 mutex_enter(&ds->ds_lock);
642 640 if (ds->ds_snapname[0] == 0)
643 641 (void) strlcpy(ds->ds_snapname, snapname,
644 642 sizeof (ds->ds_snapname));
645 643 mutex_exit(&ds->ds_lock);
646 644 err = dsl_dataset_hold_ref(ds, tag);
647 645 *dsp = err ? NULL : ds;
648 646 }
649 647 }
650 648 out:
651 649 rw_exit(&dp->dp_config_rwlock);
652 650 dsl_dir_close(dd, FTAG);
653 651 return (err);
654 652 }
655 653
656 654 int
657 655 dsl_dataset_own(const char *name, boolean_t inconsistentok,
658 656 void *tag, dsl_dataset_t **dsp)
659 657 {
660 658 int err = dsl_dataset_hold(name, tag, dsp);
661 659 if (err)
662 660 return (err);
663 661 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
664 662 dsl_dataset_rele(*dsp, tag);
665 663 return (EBUSY);
666 664 }
667 665 return (0);
668 666 }
669 667
670 668 void
671 669 dsl_dataset_name(dsl_dataset_t *ds, char *name)
672 670 {
673 671 if (ds == NULL) {
674 672 (void) strcpy(name, "mos");
675 673 } else {
676 674 dsl_dir_name(ds->ds_dir, name);
677 675 VERIFY(0 == dsl_dataset_get_snapname(ds));
678 676 if (ds->ds_snapname[0]) {
679 677 (void) strcat(name, "@");
680 678 /*
681 679 * We use a "recursive" mutex so that we
682 680 * can call dprintf_ds() with ds_lock held.
683 681 */
684 682 if (!MUTEX_HELD(&ds->ds_lock)) {
685 683 mutex_enter(&ds->ds_lock);
686 684 (void) strcat(name, ds->ds_snapname);
687 685 mutex_exit(&ds->ds_lock);
688 686 } else {
689 687 (void) strcat(name, ds->ds_snapname);
690 688 }
691 689 }
692 690 }
693 691 }
694 692
695 693 static int
696 694 dsl_dataset_namelen(dsl_dataset_t *ds)
697 695 {
698 696 int result;
699 697
700 698 if (ds == NULL) {
701 699 result = 3; /* "mos" */
702 700 } else {
703 701 result = dsl_dir_namelen(ds->ds_dir);
704 702 VERIFY(0 == dsl_dataset_get_snapname(ds));
705 703 if (ds->ds_snapname[0]) {
706 704 ++result; /* adding one for the @-sign */
707 705 if (!MUTEX_HELD(&ds->ds_lock)) {
708 706 mutex_enter(&ds->ds_lock);
709 707 result += strlen(ds->ds_snapname);
710 708 mutex_exit(&ds->ds_lock);
711 709 } else {
712 710 result += strlen(ds->ds_snapname);
713 711 }
714 712 }
715 713 }
716 714
717 715 return (result);
718 716 }
719 717
720 718 void
721 719 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
722 720 {
723 721 dmu_buf_rele(ds->ds_dbuf, tag);
724 722 }
725 723
726 724 void
727 725 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
728 726 {
729 727 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
730 728 rw_exit(&ds->ds_rwlock);
731 729 }
732 730 dsl_dataset_drop_ref(ds, tag);
733 731 }
734 732
735 733 void
736 734 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
737 735 {
738 736 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
739 737 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
740 738
741 739 mutex_enter(&ds->ds_lock);
742 740 ds->ds_owner = NULL;
743 741 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
744 742 rw_exit(&ds->ds_rwlock);
745 743 cv_broadcast(&ds->ds_exclusive_cv);
746 744 }
747 745 mutex_exit(&ds->ds_lock);
748 746 if (ds->ds_dbuf)
749 747 dsl_dataset_drop_ref(ds, tag);
750 748 else
751 749 dsl_dataset_evict(NULL, ds);
752 750 }
753 751
754 752 boolean_t
755 753 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
756 754 {
757 755 boolean_t gotit = FALSE;
758 756
759 757 mutex_enter(&ds->ds_lock);
760 758 if (ds->ds_owner == NULL &&
761 759 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
762 760 ds->ds_owner = tag;
763 761 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
764 762 rw_exit(&ds->ds_rwlock);
765 763 gotit = TRUE;
766 764 }
767 765 mutex_exit(&ds->ds_lock);
768 766 return (gotit);
769 767 }
770 768
771 769 void
772 770 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
773 771 {
774 772 ASSERT3P(owner, ==, ds->ds_owner);
775 773 if (!RW_WRITE_HELD(&ds->ds_rwlock))
776 774 rw_enter(&ds->ds_rwlock, RW_WRITER);
777 775 }
778 776
779 777 uint64_t
780 778 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
781 779 uint64_t flags, dmu_tx_t *tx)
782 780 {
783 781 dsl_pool_t *dp = dd->dd_pool;
784 782 dmu_buf_t *dbuf;
785 783 dsl_dataset_phys_t *dsphys;
786 784 uint64_t dsobj;
787 785 objset_t *mos = dp->dp_meta_objset;
788 786
789 787 if (origin == NULL)
790 788 origin = dp->dp_origin_snap;
791 789
792 790 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
793 791 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
794 792 ASSERT(dmu_tx_is_syncing(tx));
795 793 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
796 794
797 795 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
798 796 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
799 797 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
800 798 dmu_buf_will_dirty(dbuf, tx);
801 799 dsphys = dbuf->db_data;
802 800 bzero(dsphys, sizeof (dsl_dataset_phys_t));
803 801 dsphys->ds_dir_obj = dd->dd_object;
804 802 dsphys->ds_flags = flags;
805 803 dsphys->ds_fsid_guid = unique_create();
806 804 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
807 805 sizeof (dsphys->ds_guid));
808 806 dsphys->ds_snapnames_zapobj =
809 807 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
810 808 DMU_OT_NONE, 0, tx);
811 809 dsphys->ds_creation_time = gethrestime_sec();
812 810 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
813 811
814 812 if (origin == NULL) {
815 813 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
816 814 } else {
817 815 dsl_dataset_t *ohds;
818 816
819 817 dsphys->ds_prev_snap_obj = origin->ds_object;
820 818 dsphys->ds_prev_snap_txg =
821 819 origin->ds_phys->ds_creation_txg;
822 820 dsphys->ds_referenced_bytes =
823 821 origin->ds_phys->ds_referenced_bytes;
824 822 dsphys->ds_compressed_bytes =
825 823 origin->ds_phys->ds_compressed_bytes;
826 824 dsphys->ds_uncompressed_bytes =
827 825 origin->ds_phys->ds_uncompressed_bytes;
828 826 dsphys->ds_bp = origin->ds_phys->ds_bp;
829 827 dsphys->ds_flags |= origin->ds_phys->ds_flags;
830 828
831 829 dmu_buf_will_dirty(origin->ds_dbuf, tx);
832 830 origin->ds_phys->ds_num_children++;
833 831
834 832 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
835 833 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
836 834 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
837 835 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
838 836 dsl_dataset_rele(ohds, FTAG);
839 837
840 838 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
841 839 if (origin->ds_phys->ds_next_clones_obj == 0) {
842 840 origin->ds_phys->ds_next_clones_obj =
843 841 zap_create(mos,
844 842 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
845 843 }
846 844 VERIFY(0 == zap_add_int(mos,
847 845 origin->ds_phys->ds_next_clones_obj,
848 846 dsobj, tx));
849 847 }
850 848
851 849 dmu_buf_will_dirty(dd->dd_dbuf, tx);
852 850 dd->dd_phys->dd_origin_obj = origin->ds_object;
853 851 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
854 852 if (origin->ds_dir->dd_phys->dd_clones == 0) {
855 853 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
856 854 origin->ds_dir->dd_phys->dd_clones =
857 855 zap_create(mos,
858 856 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
859 857 }
860 858 VERIFY3U(0, ==, zap_add_int(mos,
861 859 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
862 860 }
863 861 }
864 862
865 863 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
866 864 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
867 865
868 866 dmu_buf_rele(dbuf, FTAG);
869 867
870 868 dmu_buf_will_dirty(dd->dd_dbuf, tx);
871 869 dd->dd_phys->dd_head_dataset_obj = dsobj;
872 870
873 871 return (dsobj);
874 872 }
875 873
876 874 uint64_t
877 875 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
878 876 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
879 877 {
880 878 dsl_pool_t *dp = pdd->dd_pool;
881 879 uint64_t dsobj, ddobj;
882 880 dsl_dir_t *dd;
883 881
884 882 ASSERT(lastname[0] != '@');
885 883
886 884 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
887 885 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
888 886
889 887 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
890 888
891 889 dsl_deleg_set_create_perms(dd, tx, cr);
892 890
893 891 dsl_dir_close(dd, FTAG);
894 892
895 893 /*
896 894 * If we are creating a clone, make sure we zero out any stale
897 895 * data from the origin snapshots zil header.
898 896 */
899 897 if (origin != NULL) {
900 898 dsl_dataset_t *ds;
901 899 objset_t *os;
902 900
903 901 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
904 902 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
905 903 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
906 904 dsl_dataset_dirty(ds, tx);
907 905 dsl_dataset_rele(ds, FTAG);
908 906 }
909 907
910 908 return (dsobj);
911 909 }
912 910
913 911 /*
914 912 * The snapshots must all be in the same pool.
915 913 */
916 914 int
917 915 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
918 916 {
919 917 int err;
920 918 dsl_sync_task_t *dst;
921 919 spa_t *spa;
922 920 nvpair_t *pair;
923 921 dsl_sync_task_group_t *dstg;
924 922
925 923 pair = nvlist_next_nvpair(snaps, NULL);
926 924 if (pair == NULL)
927 925 return (0);
928 926
929 927 err = spa_open(nvpair_name(pair), &spa, FTAG);
930 928 if (err)
931 929 return (err);
932 930 dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
933 931
934 932 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
935 933 pair = nvlist_next_nvpair(snaps, pair)) {
936 934 dsl_dataset_t *ds;
937 935
938 936 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
939 937 if (err == 0) {
940 938 struct dsl_ds_destroyarg *dsda;
941 939
942 940 dsl_dataset_make_exclusive(ds, dstg);
943 941 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
944 942 KM_SLEEP);
945 943 dsda->ds = ds;
946 944 dsda->defer = defer;
947 945 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
948 946 dsl_dataset_destroy_sync, dsda, dstg, 0);
949 947 } else if (err == ENOENT) {
950 948 err = 0;
951 949 } else {
952 950 (void) strcpy(failed, nvpair_name(pair));
953 951 break;
954 952 }
955 953 }
956 954
957 955 if (err == 0)
958 956 err = dsl_sync_task_group_wait(dstg);
959 957
960 958 for (dst = list_head(&dstg->dstg_tasks); dst;
961 959 dst = list_next(&dstg->dstg_tasks, dst)) {
962 960 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
963 961 dsl_dataset_t *ds = dsda->ds;
964 962
965 963 /*
966 964 * Return the file system name that triggered the error
967 965 */
968 966 if (dst->dst_err) {
969 967 dsl_dataset_name(ds, failed);
970 968 }
971 969 ASSERT3P(dsda->rm_origin, ==, NULL);
972 970 dsl_dataset_disown(ds, dstg);
973 971 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
974 972 }
975 973
976 974 dsl_sync_task_group_destroy(dstg);
977 975 spa_close(spa, FTAG);
978 976 return (err);
979 977
980 978 }
981 979
982 980 static boolean_t
983 981 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
984 982 {
985 983 boolean_t might_destroy = B_FALSE;
986 984
987 985 mutex_enter(&ds->ds_lock);
988 986 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
989 987 DS_IS_DEFER_DESTROY(ds))
990 988 might_destroy = B_TRUE;
991 989 mutex_exit(&ds->ds_lock);
992 990
993 991 return (might_destroy);
994 992 }
995 993
996 994 /*
997 995 * If we're removing a clone, and these three conditions are true:
998 996 * 1) the clone's origin has no other children
999 997 * 2) the clone's origin has no user references
1000 998 * 3) the clone's origin has been marked for deferred destruction
1001 999 * Then, prepare to remove the origin as part of this sync task group.
1002 1000 */
1003 1001 static int
1004 1002 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1005 1003 {
1006 1004 dsl_dataset_t *ds = dsda->ds;
1007 1005 dsl_dataset_t *origin = ds->ds_prev;
1008 1006
1009 1007 if (dsl_dataset_might_destroy_origin(origin)) {
1010 1008 char *name;
1011 1009 int namelen;
1012 1010 int error;
1013 1011
1014 1012 namelen = dsl_dataset_namelen(origin) + 1;
1015 1013 name = kmem_alloc(namelen, KM_SLEEP);
1016 1014 dsl_dataset_name(origin, name);
1017 1015 #ifdef _KERNEL
1018 1016 error = zfs_unmount_snap(name, NULL);
1019 1017 if (error) {
1020 1018 kmem_free(name, namelen);
1021 1019 return (error);
1022 1020 }
1023 1021 #endif
1024 1022 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1025 1023 kmem_free(name, namelen);
1026 1024 if (error)
1027 1025 return (error);
1028 1026 dsda->rm_origin = origin;
1029 1027 dsl_dataset_make_exclusive(origin, tag);
1030 1028 }
1031 1029
1032 1030 return (0);
1033 1031 }
1034 1032
1035 1033 /*
1036 1034 * ds must be opened as OWNER. On return (whether successful or not),
1037 1035 * ds will be closed and caller can no longer dereference it.
1038 1036 */
1039 1037 int
1040 1038 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1041 1039 {
1042 1040 int err;
1043 1041 dsl_sync_task_group_t *dstg;
1044 1042 objset_t *os;
1045 1043 dsl_dir_t *dd;
1046 1044 uint64_t obj;
1047 1045 struct dsl_ds_destroyarg dsda = { 0 };
1048 1046 dsl_dataset_t dummy_ds = { 0 };
1049 1047
1050 1048 dsda.ds = ds;
1051 1049
1052 1050 if (dsl_dataset_is_snapshot(ds)) {
1053 1051 /* Destroying a snapshot is simpler */
1054 1052 dsl_dataset_make_exclusive(ds, tag);
1055 1053
1056 1054 dsda.defer = defer;
1057 1055 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1058 1056 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1059 1057 &dsda, tag, 0);
1060 1058 ASSERT3P(dsda.rm_origin, ==, NULL);
1061 1059 goto out;
1062 1060 } else if (defer) {
1063 1061 err = EINVAL;
1064 1062 goto out;
1065 1063 }
1066 1064
1067 1065 dd = ds->ds_dir;
1068 1066 dummy_ds.ds_dir = dd;
1069 1067 dummy_ds.ds_object = ds->ds_object;
1070 1068
1071 1069 /*
1072 1070 * Check for errors and mark this ds as inconsistent, in
1073 1071 * case we crash while freeing the objects.
1074 1072 */
1075 1073 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1076 1074 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1077 1075 if (err)
1078 1076 goto out;
1079 1077
1080 1078 err = dmu_objset_from_ds(ds, &os);
1081 1079 if (err)
1082 1080 goto out;
1083 1081
1084 1082 /*
1085 1083 * If async destruction is not enabled try to remove all objects
1086 1084 * while in the open context so that there is less work to do in
1087 1085 * the syncing context.
1088 1086 */
1089 1087 if (!spa_feature_is_enabled(dsl_dataset_get_spa(ds),
1090 1088 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1091 1089 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1092 1090 ds->ds_phys->ds_prev_snap_txg)) {
1093 1091 /*
1094 1092 * Ignore errors, if there is not enough disk space
1095 1093 * we will deal with it in dsl_dataset_destroy_sync().
1096 1094 */
1097 1095 (void) dmu_free_object(os, obj);
1098 1096 }
1099 1097 if (err != ESRCH)
1100 1098 goto out;
1101 1099 }
1102 1100
1103 1101 /*
1104 1102 * Only the ZIL knows how to free log blocks.
1105 1103 */
1106 1104 zil_destroy(dmu_objset_zil(os), B_FALSE);
1107 1105
1108 1106 /*
1109 1107 * Sync out all in-flight IO.
1110 1108 */
1111 1109 txg_wait_synced(dd->dd_pool, 0);
1112 1110
1113 1111 /*
1114 1112 * If we managed to free all the objects in open
1115 1113 * context, the user space accounting should be zero.
1116 1114 */
1117 1115 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1118 1116 dmu_objset_userused_enabled(os)) {
1119 1117 uint64_t count;
1120 1118
1121 1119 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1122 1120 count == 0);
1123 1121 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1124 1122 count == 0);
1125 1123 }
1126 1124
1127 1125 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1128 1126 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1129 1127 rw_exit(&dd->dd_pool->dp_config_rwlock);
1130 1128
1131 1129 if (err)
1132 1130 goto out;
1133 1131
1134 1132 /*
1135 1133 * Blow away the dsl_dir + head dataset.
1136 1134 */
1137 1135 dsl_dataset_make_exclusive(ds, tag);
1138 1136 /*
1139 1137 * If we're removing a clone, we might also need to remove its
1140 1138 * origin.
1141 1139 */
1142 1140 do {
1143 1141 dsda.need_prep = B_FALSE;
1144 1142 if (dsl_dir_is_clone(dd)) {
1145 1143 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1146 1144 if (err) {
1147 1145 dsl_dir_close(dd, FTAG);
1148 1146 goto out;
1149 1147 }
1150 1148 }
1151 1149
1152 1150 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1153 1151 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1154 1152 dsl_dataset_destroy_sync, &dsda, tag, 0);
1155 1153 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1156 1154 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1157 1155 err = dsl_sync_task_group_wait(dstg);
1158 1156 dsl_sync_task_group_destroy(dstg);
1159 1157
1160 1158 /*
1161 1159 * We could be racing against 'zfs release' or 'zfs destroy -d'
1162 1160 * on the origin snap, in which case we can get EBUSY if we
1163 1161 * needed to destroy the origin snap but were not ready to
1164 1162 * do so.
1165 1163 */
1166 1164 if (dsda.need_prep) {
1167 1165 ASSERT(err == EBUSY);
1168 1166 ASSERT(dsl_dir_is_clone(dd));
1169 1167 ASSERT(dsda.rm_origin == NULL);
1170 1168 }
1171 1169 } while (dsda.need_prep);
1172 1170
1173 1171 if (dsda.rm_origin != NULL)
1174 1172 dsl_dataset_disown(dsda.rm_origin, tag);
1175 1173
1176 1174 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1177 1175 if (err)
1178 1176 dsl_dir_close(dd, FTAG);
1179 1177 out:
1180 1178 dsl_dataset_disown(ds, tag);
1181 1179 return (err);
1182 1180 }
1183 1181
1184 1182 blkptr_t *
1185 1183 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1186 1184 {
1187 1185 return (&ds->ds_phys->ds_bp);
1188 1186 }
1189 1187
1190 1188 void
1191 1189 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1192 1190 {
1193 1191 ASSERT(dmu_tx_is_syncing(tx));
1194 1192 /* If it's the meta-objset, set dp_meta_rootbp */
1195 1193 if (ds == NULL) {
1196 1194 tx->tx_pool->dp_meta_rootbp = *bp;
1197 1195 } else {
1198 1196 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1199 1197 ds->ds_phys->ds_bp = *bp;
1200 1198 }
1201 1199 }
1202 1200
1203 1201 spa_t *
1204 1202 dsl_dataset_get_spa(dsl_dataset_t *ds)
1205 1203 {
1206 1204 return (ds->ds_dir->dd_pool->dp_spa);
1207 1205 }
1208 1206
1209 1207 void
1210 1208 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1211 1209 {
1212 1210 dsl_pool_t *dp;
1213 1211
1214 1212 if (ds == NULL) /* this is the meta-objset */
1215 1213 return;
1216 1214
1217 1215 ASSERT(ds->ds_objset != NULL);
1218 1216
1219 1217 if (ds->ds_phys->ds_next_snap_obj != 0)
1220 1218 panic("dirtying snapshot!");
1221 1219
1222 1220 dp = ds->ds_dir->dd_pool;
1223 1221
1224 1222 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1225 1223 /* up the hold count until we can be written out */
1226 1224 dmu_buf_add_ref(ds->ds_dbuf, ds);
1227 1225 }
1228 1226 }
1229 1227
1230 1228 /*
1231 1229 * The unique space in the head dataset can be calculated by subtracting
1232 1230 * the space used in the most recent snapshot, that is still being used
1233 1231 * in this file system, from the space currently in use. To figure out
1234 1232 * the space in the most recent snapshot still in use, we need to take
1235 1233 * the total space used in the snapshot and subtract out the space that
1236 1234 * has been freed up since the snapshot was taken.
1237 1235 */
1238 1236 static void
1239 1237 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1240 1238 {
1241 1239 uint64_t mrs_used;
1242 1240 uint64_t dlused, dlcomp, dluncomp;
1243 1241
1244 1242 ASSERT(!dsl_dataset_is_snapshot(ds));
1245 1243
1246 1244 if (ds->ds_phys->ds_prev_snap_obj != 0)
1247 1245 mrs_used = ds->ds_prev->ds_phys->ds_referenced_bytes;
1248 1246 else
1249 1247 mrs_used = 0;
1250 1248
1251 1249 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1252 1250
1253 1251 ASSERT3U(dlused, <=, mrs_used);
1254 1252 ds->ds_phys->ds_unique_bytes =
1255 1253 ds->ds_phys->ds_referenced_bytes - (mrs_used - dlused);
1256 1254
1257 1255 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1258 1256 SPA_VERSION_UNIQUE_ACCURATE)
1259 1257 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1260 1258 }
1261 1259
1262 1260 struct killarg {
1263 1261 dsl_dataset_t *ds;
1264 1262 dmu_tx_t *tx;
1265 1263 };
1266 1264
1267 1265 /* ARGSUSED */
1268 1266 static int
1269 1267 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1270 1268 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1271 1269 {
1272 1270 struct killarg *ka = arg;
1273 1271 dmu_tx_t *tx = ka->tx;
1274 1272
1275 1273 if (bp == NULL)
1276 1274 return (0);
1277 1275
1278 1276 if (zb->zb_level == ZB_ZIL_LEVEL) {
1279 1277 ASSERT(zilog != NULL);
1280 1278 /*
1281 1279 * It's a block in the intent log. It has no
1282 1280 * accounting, so just free it.
1283 1281 */
1284 1282 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1285 1283 } else {
1286 1284 ASSERT(zilog == NULL);
1287 1285 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1288 1286 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1289 1287 }
1290 1288
1291 1289 return (0);
1292 1290 }
1293 1291
1294 1292 /* ARGSUSED */
1295 1293 static int
1296 1294 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1297 1295 {
1298 1296 dsl_dataset_t *ds = arg1;
1299 1297 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1300 1298 uint64_t count;
1301 1299 int err;
1302 1300
1303 1301 /*
1304 1302 * Can't delete a head dataset if there are snapshots of it.
1305 1303 * (Except if the only snapshots are from the branch we cloned
1306 1304 * from.)
1307 1305 */
1308 1306 if (ds->ds_prev != NULL &&
1309 1307 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1310 1308 return (EBUSY);
1311 1309
1312 1310 /*
1313 1311 * This is really a dsl_dir thing, but check it here so that
1314 1312 * we'll be less likely to leave this dataset inconsistent &
1315 1313 * nearly destroyed.
1316 1314 */
1317 1315 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1318 1316 if (err)
1319 1317 return (err);
1320 1318 if (count != 0)
1321 1319 return (EEXIST);
1322 1320
1323 1321 return (0);
1324 1322 }
1325 1323
1326 1324 /* ARGSUSED */
1327 1325 static void
1328 1326 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1329 1327 {
1330 1328 dsl_dataset_t *ds = arg1;
1331 1329 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1332 1330
1333 1331 /* Mark it as inconsistent on-disk, in case we crash */
1334 1332 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1335 1333 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1336 1334
1337 1335 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1338 1336 "dataset = %llu", ds->ds_object);
1339 1337 }
1340 1338
1341 1339 static int
1342 1340 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1343 1341 dmu_tx_t *tx)
1344 1342 {
1345 1343 dsl_dataset_t *ds = dsda->ds;
1346 1344 dsl_dataset_t *ds_prev = ds->ds_prev;
1347 1345
1348 1346 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1349 1347 struct dsl_ds_destroyarg ndsda = {0};
1350 1348
1351 1349 /*
1352 1350 * If we're not prepared to remove the origin, don't remove
1353 1351 * the clone either.
1354 1352 */
1355 1353 if (dsda->rm_origin == NULL) {
1356 1354 dsda->need_prep = B_TRUE;
1357 1355 return (EBUSY);
1358 1356 }
1359 1357
1360 1358 ndsda.ds = ds_prev;
1361 1359 ndsda.is_origin_rm = B_TRUE;
1362 1360 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1363 1361 }
1364 1362
1365 1363 /*
1366 1364 * If we're not going to remove the origin after all,
1367 1365 * undo the open context setup.
1368 1366 */
1369 1367 if (dsda->rm_origin != NULL) {
1370 1368 dsl_dataset_disown(dsda->rm_origin, tag);
1371 1369 dsda->rm_origin = NULL;
1372 1370 }
1373 1371
1374 1372 return (0);
1375 1373 }
1376 1374
1377 1375 /*
1378 1376 * If you add new checks here, you may need to add
1379 1377 * additional checks to the "temporary" case in
1380 1378 * snapshot_check() in dmu_objset.c.
1381 1379 */
1382 1380 /* ARGSUSED */
1383 1381 int
1384 1382 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1385 1383 {
1386 1384 struct dsl_ds_destroyarg *dsda = arg1;
1387 1385 dsl_dataset_t *ds = dsda->ds;
1388 1386
1389 1387 /* we have an owner hold, so noone else can destroy us */
1390 1388 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1391 1389
1392 1390 /*
1393 1391 * Only allow deferred destroy on pools that support it.
1394 1392 * NOTE: deferred destroy is only supported on snapshots.
1395 1393 */
1396 1394 if (dsda->defer) {
1397 1395 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1398 1396 SPA_VERSION_USERREFS)
1399 1397 return (ENOTSUP);
1400 1398 ASSERT(dsl_dataset_is_snapshot(ds));
1401 1399 return (0);
1402 1400 }
1403 1401
1404 1402 /*
1405 1403 * Can't delete a head dataset if there are snapshots of it.
1406 1404 * (Except if the only snapshots are from the branch we cloned
1407 1405 * from.)
1408 1406 */
1409 1407 if (ds->ds_prev != NULL &&
1410 1408 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1411 1409 return (EBUSY);
1412 1410
1413 1411 /*
1414 1412 * If we made changes this txg, traverse_dsl_dataset won't find
1415 1413 * them. Try again.
1416 1414 */
1417 1415 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1418 1416 return (EAGAIN);
1419 1417
1420 1418 if (dsl_dataset_is_snapshot(ds)) {
1421 1419 /*
1422 1420 * If this snapshot has an elevated user reference count,
1423 1421 * we can't destroy it yet.
1424 1422 */
1425 1423 if (ds->ds_userrefs > 0 && !dsda->releasing)
1426 1424 return (EBUSY);
1427 1425
1428 1426 mutex_enter(&ds->ds_lock);
1429 1427 /*
1430 1428 * Can't delete a branch point. However, if we're destroying
1431 1429 * a clone and removing its origin due to it having a user
1432 1430 * hold count of 0 and having been marked for deferred destroy,
1433 1431 * it's OK for the origin to have a single clone.
1434 1432 */
1435 1433 if (ds->ds_phys->ds_num_children >
1436 1434 (dsda->is_origin_rm ? 2 : 1)) {
1437 1435 mutex_exit(&ds->ds_lock);
1438 1436 return (EEXIST);
1439 1437 }
1440 1438 mutex_exit(&ds->ds_lock);
1441 1439 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1442 1440 return (dsl_dataset_origin_check(dsda, arg2, tx));
1443 1441 }
1444 1442
1445 1443 /* XXX we should do some i/o error checking... */
1446 1444 return (0);
1447 1445 }
1448 1446
1449 1447 struct refsarg {
1450 1448 kmutex_t lock;
1451 1449 boolean_t gone;
1452 1450 kcondvar_t cv;
1453 1451 };
1454 1452
1455 1453 /* ARGSUSED */
1456 1454 static void
1457 1455 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1458 1456 {
1459 1457 struct refsarg *arg = argv;
1460 1458
1461 1459 mutex_enter(&arg->lock);
1462 1460 arg->gone = TRUE;
1463 1461 cv_signal(&arg->cv);
1464 1462 mutex_exit(&arg->lock);
1465 1463 }
1466 1464
1467 1465 static void
1468 1466 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1469 1467 {
1470 1468 struct refsarg arg;
1471 1469
1472 1470 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1473 1471 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1474 1472 arg.gone = FALSE;
1475 1473 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1476 1474 dsl_dataset_refs_gone);
1477 1475 dmu_buf_rele(ds->ds_dbuf, tag);
1478 1476 mutex_enter(&arg.lock);
1479 1477 while (!arg.gone)
1480 1478 cv_wait(&arg.cv, &arg.lock);
1481 1479 ASSERT(arg.gone);
1482 1480 mutex_exit(&arg.lock);
1483 1481 ds->ds_dbuf = NULL;
1484 1482 ds->ds_phys = NULL;
1485 1483 mutex_destroy(&arg.lock);
1486 1484 cv_destroy(&arg.cv);
1487 1485 }
1488 1486
1489 1487 static void
1490 1488 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1491 1489 {
1492 1490 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1493 1491 uint64_t count;
1494 1492 int err;
1495 1493
1496 1494 ASSERT(ds->ds_phys->ds_num_children >= 2);
1497 1495 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1498 1496 /*
1499 1497 * The err should not be ENOENT, but a bug in a previous version
1500 1498 * of the code could cause upgrade_clones_cb() to not set
1501 1499 * ds_next_snap_obj when it should, leading to a missing entry.
1502 1500 * If we knew that the pool was created after
1503 1501 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1504 1502 * ENOENT. However, at least we can check that we don't have
1505 1503 * too many entries in the next_clones_obj even after failing to
1506 1504 * remove this one.
1507 1505 */
1508 1506 if (err != ENOENT) {
1509 1507 VERIFY3U(err, ==, 0);
1510 1508 }
1511 1509 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1512 1510 &count));
1513 1511 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1514 1512 }
1515 1513
1516 1514 static void
1517 1515 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1518 1516 {
1519 1517 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1520 1518 zap_cursor_t zc;
1521 1519 zap_attribute_t za;
1522 1520
1523 1521 /*
1524 1522 * If it is the old version, dd_clones doesn't exist so we can't
1525 1523 * find the clones, but deadlist_remove_key() is a no-op so it
1526 1524 * doesn't matter.
1527 1525 */
1528 1526 if (ds->ds_dir->dd_phys->dd_clones == 0)
1529 1527 return;
1530 1528
1531 1529 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1532 1530 zap_cursor_retrieve(&zc, &za) == 0;
1533 1531 zap_cursor_advance(&zc)) {
1534 1532 dsl_dataset_t *clone;
1535 1533
1536 1534 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1537 1535 za.za_first_integer, FTAG, &clone));
1538 1536 if (clone->ds_dir->dd_origin_txg > mintxg) {
1539 1537 dsl_deadlist_remove_key(&clone->ds_deadlist,
1540 1538 mintxg, tx);
1541 1539 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1542 1540 }
1543 1541 dsl_dataset_rele(clone, FTAG);
1544 1542 }
1545 1543 zap_cursor_fini(&zc);
1546 1544 }
1547 1545
1548 1546 struct process_old_arg {
1549 1547 dsl_dataset_t *ds;
1550 1548 dsl_dataset_t *ds_prev;
1551 1549 boolean_t after_branch_point;
1552 1550 zio_t *pio;
1553 1551 uint64_t used, comp, uncomp;
1554 1552 };
1555 1553
1556 1554 static int
1557 1555 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1558 1556 {
1559 1557 struct process_old_arg *poa = arg;
1560 1558 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1561 1559
1562 1560 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1563 1561 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1564 1562 if (poa->ds_prev && !poa->after_branch_point &&
1565 1563 bp->blk_birth >
1566 1564 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1567 1565 poa->ds_prev->ds_phys->ds_unique_bytes +=
1568 1566 bp_get_dsize_sync(dp->dp_spa, bp);
1569 1567 }
1570 1568 } else {
1571 1569 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1572 1570 poa->comp += BP_GET_PSIZE(bp);
1573 1571 poa->uncomp += BP_GET_UCSIZE(bp);
1574 1572 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1575 1573 }
1576 1574 return (0);
1577 1575 }
1578 1576
1579 1577 static void
1580 1578 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1581 1579 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1582 1580 {
1583 1581 struct process_old_arg poa = { 0 };
1584 1582 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1585 1583 objset_t *mos = dp->dp_meta_objset;
1586 1584
1587 1585 ASSERT(ds->ds_deadlist.dl_oldfmt);
1588 1586 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1589 1587
1590 1588 poa.ds = ds;
1591 1589 poa.ds_prev = ds_prev;
1592 1590 poa.after_branch_point = after_branch_point;
1593 1591 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1594 1592 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1595 1593 process_old_cb, &poa, tx));
1596 1594 VERIFY3U(zio_wait(poa.pio), ==, 0);
1597 1595 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1598 1596
1599 1597 /* change snapused */
1600 1598 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1601 1599 -poa.used, -poa.comp, -poa.uncomp, tx);
1602 1600
1603 1601 /* swap next's deadlist to our deadlist */
1604 1602 dsl_deadlist_close(&ds->ds_deadlist);
1605 1603 dsl_deadlist_close(&ds_next->ds_deadlist);
1606 1604 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1607 1605 ds->ds_phys->ds_deadlist_obj);
1608 1606 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1609 1607 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1610 1608 ds_next->ds_phys->ds_deadlist_obj);
1611 1609 }
1612 1610
1613 1611 static int
1614 1612 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
1615 1613 {
1616 1614 int err;
1617 1615 struct killarg ka;
1618 1616
1619 1617 /*
1620 1618 * Free everything that we point to (that's born after
1621 1619 * the previous snapshot, if we are a clone)
1622 1620 *
1623 1621 * NB: this should be very quick, because we already
1624 1622 * freed all the objects in open context.
1625 1623 */
1626 1624 ka.ds = ds;
1627 1625 ka.tx = tx;
1628 1626 err = traverse_dataset(ds,
1629 1627 ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
1630 1628 kill_blkptr, &ka);
1631 1629 ASSERT3U(err, ==, 0);
1632 1630 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1633 1631
1634 1632 return (err);
1635 1633 }
1636 1634
1637 1635 void
1638 1636 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1639 1637 {
1640 1638 struct dsl_ds_destroyarg *dsda = arg1;
1641 1639 dsl_dataset_t *ds = dsda->ds;
1642 1640 int err;
1643 1641 int after_branch_point = FALSE;
1644 1642 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1645 1643 objset_t *mos = dp->dp_meta_objset;
1646 1644 dsl_dataset_t *ds_prev = NULL;
1647 1645 boolean_t wont_destroy;
1648 1646 uint64_t obj;
1649 1647
1650 1648 wont_destroy = (dsda->defer &&
1651 1649 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1652 1650
1653 1651 ASSERT(ds->ds_owner || wont_destroy);
1654 1652 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1655 1653 ASSERT(ds->ds_prev == NULL ||
1656 1654 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1657 1655 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1658 1656
1659 1657 if (wont_destroy) {
1660 1658 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1661 1659 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1662 1660 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1663 1661 return;
1664 1662 }
1665 1663
1666 1664 /* signal any waiters that this dataset is going away */
1667 1665 mutex_enter(&ds->ds_lock);
1668 1666 ds->ds_owner = dsl_reaper;
1669 1667 cv_broadcast(&ds->ds_exclusive_cv);
1670 1668 mutex_exit(&ds->ds_lock);
1671 1669
1672 1670 /* Remove our reservation */
1673 1671 if (ds->ds_reserved != 0) {
1674 1672 dsl_prop_setarg_t psa;
1675 1673 uint64_t value = 0;
1676 1674
1677 1675 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1678 1676 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1679 1677 &value);
1680 1678 psa.psa_effective_value = 0; /* predict default value */
1681 1679
1682 1680 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1683 1681 ASSERT3U(ds->ds_reserved, ==, 0);
1684 1682 }
1685 1683
1686 1684 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1687 1685
1688 1686 dsl_scan_ds_destroyed(ds, tx);
1689 1687
1690 1688 obj = ds->ds_object;
1691 1689
1692 1690 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1693 1691 if (ds->ds_prev) {
1694 1692 ds_prev = ds->ds_prev;
1695 1693 } else {
1696 1694 VERIFY(0 == dsl_dataset_hold_obj(dp,
1697 1695 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1698 1696 }
1699 1697 after_branch_point =
1700 1698 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1701 1699
1702 1700 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1703 1701 if (after_branch_point &&
1704 1702 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1705 1703 remove_from_next_clones(ds_prev, obj, tx);
1706 1704 if (ds->ds_phys->ds_next_snap_obj != 0) {
1707 1705 VERIFY(0 == zap_add_int(mos,
1708 1706 ds_prev->ds_phys->ds_next_clones_obj,
1709 1707 ds->ds_phys->ds_next_snap_obj, tx));
1710 1708 }
1711 1709 }
1712 1710 if (after_branch_point &&
1713 1711 ds->ds_phys->ds_next_snap_obj == 0) {
1714 1712 /* This clone is toast. */
1715 1713 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1716 1714 ds_prev->ds_phys->ds_num_children--;
1717 1715
1718 1716 /*
1719 1717 * If the clone's origin has no other clones, no
1720 1718 * user holds, and has been marked for deferred
1721 1719 * deletion, then we should have done the necessary
1722 1720 * destroy setup for it.
1723 1721 */
1724 1722 if (ds_prev->ds_phys->ds_num_children == 1 &&
1725 1723 ds_prev->ds_userrefs == 0 &&
1726 1724 DS_IS_DEFER_DESTROY(ds_prev)) {
1727 1725 ASSERT3P(dsda->rm_origin, !=, NULL);
1728 1726 } else {
1729 1727 ASSERT3P(dsda->rm_origin, ==, NULL);
1730 1728 }
1731 1729 } else if (!after_branch_point) {
1732 1730 ds_prev->ds_phys->ds_next_snap_obj =
1733 1731 ds->ds_phys->ds_next_snap_obj;
1734 1732 }
1735 1733 }
1736 1734
1737 1735 if (dsl_dataset_is_snapshot(ds)) {
1738 1736 dsl_dataset_t *ds_next;
1739 1737 uint64_t old_unique;
1740 1738 uint64_t used = 0, comp = 0, uncomp = 0;
1741 1739
1742 1740 VERIFY(0 == dsl_dataset_hold_obj(dp,
1743 1741 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1744 1742 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1745 1743
1746 1744 old_unique = ds_next->ds_phys->ds_unique_bytes;
1747 1745
1748 1746 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1749 1747 ds_next->ds_phys->ds_prev_snap_obj =
1750 1748 ds->ds_phys->ds_prev_snap_obj;
1751 1749 ds_next->ds_phys->ds_prev_snap_txg =
1752 1750 ds->ds_phys->ds_prev_snap_txg;
1753 1751 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1754 1752 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1755 1753
1756 1754
1757 1755 if (ds_next->ds_deadlist.dl_oldfmt) {
1758 1756 process_old_deadlist(ds, ds_prev, ds_next,
1759 1757 after_branch_point, tx);
1760 1758 } else {
1761 1759 /* Adjust prev's unique space. */
1762 1760 if (ds_prev && !after_branch_point) {
1763 1761 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1764 1762 ds_prev->ds_phys->ds_prev_snap_txg,
1765 1763 ds->ds_phys->ds_prev_snap_txg,
1766 1764 &used, &comp, &uncomp);
1767 1765 ds_prev->ds_phys->ds_unique_bytes += used;
1768 1766 }
1769 1767
1770 1768 /* Adjust snapused. */
1771 1769 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1772 1770 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1773 1771 &used, &comp, &uncomp);
1774 1772 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1775 1773 -used, -comp, -uncomp, tx);
1776 1774
1777 1775 /* Move blocks to be freed to pool's free list. */
1778 1776 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1779 1777 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1780 1778 tx);
1781 1779 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1782 1780 DD_USED_HEAD, used, comp, uncomp, tx);
1783 1781
1784 1782 /* Merge our deadlist into next's and free it. */
1785 1783 dsl_deadlist_merge(&ds_next->ds_deadlist,
1786 1784 ds->ds_phys->ds_deadlist_obj, tx);
1787 1785 }
1788 1786 dsl_deadlist_close(&ds->ds_deadlist);
1789 1787 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1790 1788
1791 1789 /* Collapse range in clone heads */
1792 1790 dsl_dataset_remove_clones_key(ds,
1793 1791 ds->ds_phys->ds_creation_txg, tx);
1794 1792
1795 1793 if (dsl_dataset_is_snapshot(ds_next)) {
1796 1794 dsl_dataset_t *ds_nextnext;
1797 1795
1798 1796 /*
1799 1797 * Update next's unique to include blocks which
1800 1798 * were previously shared by only this snapshot
1801 1799 * and it. Those blocks will be born after the
1802 1800 * prev snap and before this snap, and will have
1803 1801 * died after the next snap and before the one
1804 1802 * after that (ie. be on the snap after next's
1805 1803 * deadlist).
1806 1804 */
1807 1805 VERIFY(0 == dsl_dataset_hold_obj(dp,
1808 1806 ds_next->ds_phys->ds_next_snap_obj,
1809 1807 FTAG, &ds_nextnext));
1810 1808 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1811 1809 ds->ds_phys->ds_prev_snap_txg,
1812 1810 ds->ds_phys->ds_creation_txg,
1813 1811 &used, &comp, &uncomp);
1814 1812 ds_next->ds_phys->ds_unique_bytes += used;
1815 1813 dsl_dataset_rele(ds_nextnext, FTAG);
1816 1814 ASSERT3P(ds_next->ds_prev, ==, NULL);
1817 1815
1818 1816 /* Collapse range in this head. */
1819 1817 dsl_dataset_t *hds;
1820 1818 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1821 1819 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1822 1820 FTAG, &hds));
1823 1821 dsl_deadlist_remove_key(&hds->ds_deadlist,
1824 1822 ds->ds_phys->ds_creation_txg, tx);
1825 1823 dsl_dataset_rele(hds, FTAG);
1826 1824
1827 1825 } else {
1828 1826 ASSERT3P(ds_next->ds_prev, ==, ds);
1829 1827 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1830 1828 ds_next->ds_prev = NULL;
1831 1829 if (ds_prev) {
1832 1830 VERIFY(0 == dsl_dataset_get_ref(dp,
1833 1831 ds->ds_phys->ds_prev_snap_obj,
1834 1832 ds_next, &ds_next->ds_prev));
1835 1833 }
1836 1834
1837 1835 dsl_dataset_recalc_head_uniq(ds_next);
1838 1836
1839 1837 /*
1840 1838 * Reduce the amount of our unconsmed refreservation
1841 1839 * being charged to our parent by the amount of
1842 1840 * new unique data we have gained.
1843 1841 */
1844 1842 if (old_unique < ds_next->ds_reserved) {
1845 1843 int64_t mrsdelta;
1846 1844 uint64_t new_unique =
1847 1845 ds_next->ds_phys->ds_unique_bytes;
1848 1846
1849 1847 ASSERT(old_unique <= new_unique);
1850 1848 mrsdelta = MIN(new_unique - old_unique,
1851 1849 ds_next->ds_reserved - old_unique);
1852 1850 dsl_dir_diduse_space(ds->ds_dir,
1853 1851 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1854 1852 }
1855 1853 }
1856 1854 dsl_dataset_rele(ds_next, FTAG);
1857 1855 } else {
1858 1856 zfeature_info_t *async_destroy =
1859 1857 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
1860 1858
1861 1859 /*
1862 1860 * There's no next snapshot, so this is a head dataset.
1863 1861 * Destroy the deadlist. Unless it's a clone, the
1864 1862 * deadlist should be empty. (If it's a clone, it's
1865 1863 * safe to ignore the deadlist contents.)
1866 1864 */
1867 1865 dsl_deadlist_close(&ds->ds_deadlist);
1868 1866 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1869 1867 ds->ds_phys->ds_deadlist_obj = 0;
1870 1868
1871 1869 if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
1872 1870 err = old_synchronous_dataset_destroy(ds, tx);
1873 1871 } else {
1874 1872 /*
1875 1873 * Move the bptree into the pool's list of trees to
1876 1874 * clean up and update space accounting information.
1877 1875 */
1878 1876 uint64_t used, comp, uncomp;
1879 1877
1880 1878 ASSERT(err == 0 || err == EBUSY);
1881 1879 if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
1882 1880 spa_feature_incr(dp->dp_spa, async_destroy, tx);
1883 1881 dp->dp_bptree_obj = bptree_alloc(
1884 1882 dp->dp_meta_objset, tx);
1885 1883 VERIFY(zap_add(dp->dp_meta_objset,
1886 1884 DMU_POOL_DIRECTORY_OBJECT,
1887 1885 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
1888 1886 &dp->dp_bptree_obj, tx) == 0);
1889 1887 }
1890 1888
1891 1889 used = ds->ds_dir->dd_phys->dd_used_bytes;
1892 1890 comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
1893 1891 uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
1894 1892
1895 1893 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1896 1894 ds->ds_phys->ds_unique_bytes == used);
1897 1895
1898 1896 bptree_add(dp->dp_meta_objset, dp->dp_bptree_obj,
1899 1897 &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
1900 1898 used, comp, uncomp, tx);
1901 1899 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1902 1900 -used, -comp, -uncomp, tx);
1903 1901 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1904 1902 used, comp, uncomp, tx);
1905 1903 }
1906 1904
1907 1905 if (ds->ds_prev != NULL) {
1908 1906 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1909 1907 VERIFY3U(0, ==, zap_remove_int(mos,
1910 1908 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1911 1909 ds->ds_object, tx));
1912 1910 }
1913 1911 dsl_dataset_rele(ds->ds_prev, ds);
1914 1912 ds->ds_prev = ds_prev = NULL;
1915 1913 }
1916 1914 }
1917 1915
1918 1916 /*
1919 1917 * This must be done after the dsl_traverse(), because it will
1920 1918 * re-open the objset.
1921 1919 */
1922 1920 if (ds->ds_objset) {
1923 1921 dmu_objset_evict(ds->ds_objset);
1924 1922 ds->ds_objset = NULL;
1925 1923 }
1926 1924
1927 1925 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1928 1926 /* Erase the link in the dir */
1929 1927 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1930 1928 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1931 1929 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1932 1930 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1933 1931 ASSERT(err == 0);
1934 1932 } else {
1935 1933 /* remove from snapshot namespace */
1936 1934 dsl_dataset_t *ds_head;
1937 1935 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1938 1936 VERIFY(0 == dsl_dataset_hold_obj(dp,
1939 1937 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1940 1938 VERIFY(0 == dsl_dataset_get_snapname(ds));
1941 1939 #ifdef ZFS_DEBUG
1942 1940 {
1943 1941 uint64_t val;
1944 1942
1945 1943 err = dsl_dataset_snap_lookup(ds_head,
1946 1944 ds->ds_snapname, &val);
1947 1945 ASSERT3U(err, ==, 0);
1948 1946 ASSERT3U(val, ==, obj);
1949 1947 }
1950 1948 #endif
1951 1949 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1952 1950 ASSERT(err == 0);
1953 1951 dsl_dataset_rele(ds_head, FTAG);
1954 1952 }
1955 1953
1956 1954 if (ds_prev && ds->ds_prev != ds_prev)
1957 1955 dsl_dataset_rele(ds_prev, FTAG);
1958 1956
1959 1957 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1960 1958 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1961 1959 "dataset = %llu", ds->ds_object);
1962 1960
1963 1961 if (ds->ds_phys->ds_next_clones_obj != 0) {
1964 1962 uint64_t count;
1965 1963 ASSERT(0 == zap_count(mos,
1966 1964 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1967 1965 VERIFY(0 == dmu_object_free(mos,
1968 1966 ds->ds_phys->ds_next_clones_obj, tx));
1969 1967 }
1970 1968 if (ds->ds_phys->ds_props_obj != 0)
1971 1969 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1972 1970 if (ds->ds_phys->ds_userrefs_obj != 0)
1973 1971 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1974 1972 dsl_dir_close(ds->ds_dir, ds);
1975 1973 ds->ds_dir = NULL;
1976 1974 dsl_dataset_drain_refs(ds, tag);
1977 1975 VERIFY(0 == dmu_object_free(mos, obj, tx));
1978 1976
1979 1977 if (dsda->rm_origin) {
1980 1978 /*
1981 1979 * Remove the origin of the clone we just destroyed.
1982 1980 */
1983 1981 struct dsl_ds_destroyarg ndsda = {0};
1984 1982
1985 1983 ndsda.ds = dsda->rm_origin;
1986 1984 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1987 1985 }
1988 1986 }
1989 1987
1990 1988 static int
1991 1989 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1992 1990 {
1993 1991 uint64_t asize;
1994 1992
1995 1993 if (!dmu_tx_is_syncing(tx))
1996 1994 return (0);
1997 1995
1998 1996 /*
1999 1997 * If there's an fs-only reservation, any blocks that might become
2000 1998 * owned by the snapshot dataset must be accommodated by space
2001 1999 * outside of the reservation.
2002 2000 */
2003 2001 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
2004 2002 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2005 2003 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2006 2004 return (ENOSPC);
2007 2005
2008 2006 /*
2009 2007 * Propogate any reserved space for this snapshot to other
2010 2008 * snapshot checks in this sync group.
2011 2009 */
2012 2010 if (asize > 0)
2013 2011 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2014 2012
2015 2013 return (0);
2016 2014 }
2017 2015
2018 2016 int
2019 2017 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
2020 2018 {
2021 2019 dsl_dataset_t *ds = arg1;
2022 2020 const char *snapname = arg2;
2023 2021 int err;
2024 2022 uint64_t value;
2025 2023
2026 2024 /*
2027 2025 * We don't allow multiple snapshots of the same txg. If there
2028 2026 * is already one, try again.
2029 2027 */
2030 2028 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2031 2029 return (EAGAIN);
2032 2030
2033 2031 /*
2034 2032 * Check for conflicting name snapshot name.
2035 2033 */
2036 2034 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2037 2035 if (err == 0)
2038 2036 return (EEXIST);
2039 2037 if (err != ENOENT)
2040 2038 return (err);
2041 2039
2042 2040 /*
2043 2041 * Check that the dataset's name is not too long. Name consists
2044 2042 * of the dataset's length + 1 for the @-sign + snapshot name's length
2045 2043 */
2046 2044 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2047 2045 return (ENAMETOOLONG);
2048 2046
2049 2047 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2050 2048 if (err)
2051 2049 return (err);
2052 2050
2053 2051 ds->ds_trysnap_txg = tx->tx_txg;
2054 2052 return (0);
2055 2053 }
2056 2054
2057 2055 void
2058 2056 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2059 2057 {
2060 2058 dsl_dataset_t *ds = arg1;
2061 2059 const char *snapname = arg2;
2062 2060 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2063 2061 dmu_buf_t *dbuf;
2064 2062 dsl_dataset_phys_t *dsphys;
2065 2063 uint64_t dsobj, crtxg;
2066 2064 objset_t *mos = dp->dp_meta_objset;
2067 2065 int err;
2068 2066
2069 2067 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2070 2068
2071 2069 /*
2072 2070 * The origin's ds_creation_txg has to be < TXG_INITIAL
2073 2071 */
2074 2072 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2075 2073 crtxg = 1;
2076 2074 else
2077 2075 crtxg = tx->tx_txg;
2078 2076
2079 2077 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2080 2078 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2081 2079 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2082 2080 dmu_buf_will_dirty(dbuf, tx);
2083 2081 dsphys = dbuf->db_data;
2084 2082 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2085 2083 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2086 2084 dsphys->ds_fsid_guid = unique_create();
2087 2085 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2088 2086 sizeof (dsphys->ds_guid));
2089 2087 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2090 2088 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2091 2089 dsphys->ds_next_snap_obj = ds->ds_object;
2092 2090 dsphys->ds_num_children = 1;
2093 2091 dsphys->ds_creation_time = gethrestime_sec();
2094 2092 dsphys->ds_creation_txg = crtxg;
2095 2093 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2096 2094 dsphys->ds_referenced_bytes = ds->ds_phys->ds_referenced_bytes;
2097 2095 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2098 2096 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2099 2097 dsphys->ds_flags = ds->ds_phys->ds_flags;
2100 2098 dsphys->ds_bp = ds->ds_phys->ds_bp;
2101 2099 dmu_buf_rele(dbuf, FTAG);
2102 2100
2103 2101 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2104 2102 if (ds->ds_prev) {
2105 2103 uint64_t next_clones_obj =
2106 2104 ds->ds_prev->ds_phys->ds_next_clones_obj;
2107 2105 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2108 2106 ds->ds_object ||
2109 2107 ds->ds_prev->ds_phys->ds_num_children > 1);
2110 2108 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2111 2109 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2112 2110 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2113 2111 ds->ds_prev->ds_phys->ds_creation_txg);
2114 2112 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2115 2113 } else if (next_clones_obj != 0) {
2116 2114 remove_from_next_clones(ds->ds_prev,
2117 2115 dsphys->ds_next_snap_obj, tx);
2118 2116 VERIFY3U(0, ==, zap_add_int(mos,
2119 2117 next_clones_obj, dsobj, tx));
2120 2118 }
2121 2119 }
2122 2120
2123 2121 /*
2124 2122 * If we have a reference-reservation on this dataset, we will
2125 2123 * need to increase the amount of refreservation being charged
2126 2124 * since our unique space is going to zero.
2127 2125 */
2128 2126 if (ds->ds_reserved) {
2129 2127 int64_t delta;
2130 2128 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2131 2129 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2132 2130 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2133 2131 delta, 0, 0, tx);
2134 2132 }
2135 2133
2136 2134 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2137 2135 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2138 2136 ds->ds_dir->dd_myname, snapname, dsobj,
2139 2137 ds->ds_phys->ds_prev_snap_txg);
2140 2138 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2141 2139 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2142 2140 dsl_deadlist_close(&ds->ds_deadlist);
2143 2141 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2144 2142 dsl_deadlist_add_key(&ds->ds_deadlist,
2145 2143 ds->ds_phys->ds_prev_snap_txg, tx);
2146 2144
2147 2145 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2148 2146 ds->ds_phys->ds_prev_snap_obj = dsobj;
2149 2147 ds->ds_phys->ds_prev_snap_txg = crtxg;
2150 2148 ds->ds_phys->ds_unique_bytes = 0;
2151 2149 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2152 2150 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2153 2151
2154 2152 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2155 2153 snapname, 8, 1, &dsobj, tx);
2156 2154 ASSERT(err == 0);
2157 2155
2158 2156 if (ds->ds_prev)
2159 2157 dsl_dataset_drop_ref(ds->ds_prev, ds);
2160 2158 VERIFY(0 == dsl_dataset_get_ref(dp,
2161 2159 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2162 2160
2163 2161 dsl_scan_ds_snapshotted(ds, tx);
2164 2162
2165 2163 dsl_dir_snap_cmtime_update(ds->ds_dir);
2166 2164
2167 2165 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2168 2166 "dataset = %llu", dsobj);
2169 2167 }
2170 2168
2171 2169 void
2172 2170 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2173 2171 {
2174 2172 ASSERT(dmu_tx_is_syncing(tx));
2175 2173 ASSERT(ds->ds_objset != NULL);
2176 2174 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2177 2175
2178 2176 /*
2179 2177 * in case we had to change ds_fsid_guid when we opened it,
2180 2178 * sync it out now.
2181 2179 */
2182 2180 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2183 2181 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2184 2182
2185 2183 dsl_dir_dirty(ds->ds_dir, tx);
2186 2184 dmu_objset_sync(ds->ds_objset, zio, tx);
2187 2185 }
2188 2186
2189 2187 static void
2190 2188 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2191 2189 {
2192 2190 uint64_t count = 0;
2193 2191 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2194 2192 zap_cursor_t zc;
2195 2193 zap_attribute_t za;
2196 2194 nvlist_t *propval;
2197 2195 nvlist_t *val;
2198 2196
2199 2197 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2200 2198 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2201 2199 VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2202 2200
2203 2201 /*
2204 2202 * There may me missing entries in ds_next_clones_obj
2205 2203 * due to a bug in a previous version of the code.
2206 2204 * Only trust it if it has the right number of entries.
2207 2205 */
2208 2206 if (ds->ds_phys->ds_next_clones_obj != 0) {
2209 2207 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2210 2208 &count));
2211 2209 }
2212 2210 if (count != ds->ds_phys->ds_num_children - 1) {
2213 2211 goto fail;
2214 2212 }
2215 2213 for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2216 2214 zap_cursor_retrieve(&zc, &za) == 0;
2217 2215 zap_cursor_advance(&zc)) {
2218 2216 dsl_dataset_t *clone;
2219 2217 char buf[ZFS_MAXNAMELEN];
2220 2218 /*
2221 2219 * Even though we hold the dp_config_rwlock, the dataset
2222 2220 * may fail to open, returning ENOENT. If there is a
2223 2221 * thread concurrently attempting to destroy this
2224 2222 * dataset, it will have the ds_rwlock held for
2225 2223 * RW_WRITER. Our call to dsl_dataset_hold_obj() ->
2226 2224 * dsl_dataset_hold_ref() will fail its
2227 2225 * rw_tryenter(&ds->ds_rwlock, RW_READER), drop the
2228 2226 * dp_config_rwlock, and wait for the destroy progress
2229 2227 * and signal ds_exclusive_cv. If the destroy was
2230 2228 * successful, we will see that
2231 2229 * DSL_DATASET_IS_DESTROYED(), and return ENOENT.
2232 2230 */
2233 2231 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2234 2232 za.za_first_integer, FTAG, &clone) != 0)
2235 2233 continue;
2236 2234 dsl_dir_name(clone->ds_dir, buf);
2237 2235 VERIFY(nvlist_add_boolean(val, buf) == 0);
2238 2236 dsl_dataset_rele(clone, FTAG);
2239 2237 }
2240 2238 zap_cursor_fini(&zc);
2241 2239 VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2242 2240 VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2243 2241 propval) == 0);
2244 2242 fail:
2245 2243 nvlist_free(val);
2246 2244 nvlist_free(propval);
2247 2245 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2248 2246 }
2249 2247
2250 2248 void
2251 2249 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2252 2250 {
2253 2251 uint64_t refd, avail, uobjs, aobjs, ratio;
2254 2252
2255 2253 dsl_dir_stats(ds->ds_dir, nv);
2256 2254
2257 2255 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2258 2256 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2259 2257 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2260 2258
2261 2259 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2262 2260 ds->ds_phys->ds_creation_time);
2263 2261 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2264 2262 ds->ds_phys->ds_creation_txg);
2265 2263 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2266 2264 ds->ds_quota);
2267 2265 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2268 2266 ds->ds_reserved);
2269 2267 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2270 2268 ds->ds_phys->ds_guid);
2271 2269 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2272 2270 ds->ds_phys->ds_unique_bytes);
2273 2271 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2274 2272 ds->ds_object);
2275 2273 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2276 2274 ds->ds_userrefs);
2277 2275 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2278 2276 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2279 2277
2280 2278 if (ds->ds_phys->ds_prev_snap_obj != 0) {
2281 2279 uint64_t written, comp, uncomp;
2282 2280 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2283 2281 dsl_dataset_t *prev;
2284 2282
2285 2283 rw_enter(&dp->dp_config_rwlock, RW_READER);
2286 2284 int err = dsl_dataset_hold_obj(dp,
2287 2285 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2288 2286 rw_exit(&dp->dp_config_rwlock);
2289 2287 if (err == 0) {
2290 2288 err = dsl_dataset_space_written(prev, ds, &written,
2291 2289 &comp, &uncomp);
2292 2290 dsl_dataset_rele(prev, FTAG);
2293 2291 if (err == 0) {
2294 2292 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2295 2293 written);
2296 2294 }
2297 2295 }
2298 2296 }
2299 2297
2300 2298 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2301 2299 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2302 2300 ds->ds_phys->ds_compressed_bytes);
2303 2301 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2304 2302
2305 2303 if (ds->ds_phys->ds_next_snap_obj) {
2306 2304 /*
2307 2305 * This is a snapshot; override the dd's space used with
2308 2306 * our unique space and compression ratio.
2309 2307 */
2310 2308 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2311 2309 ds->ds_phys->ds_unique_bytes);
2312 2310 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2313 2311
2314 2312 get_clones_stat(ds, nv);
2315 2313 }
2316 2314 }
2317 2315
2318 2316 void
2319 2317 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2320 2318 {
2321 2319 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2322 2320 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2323 2321 stat->dds_guid = ds->ds_phys->ds_guid;
2324 2322 if (ds->ds_phys->ds_next_snap_obj) {
2325 2323 stat->dds_is_snapshot = B_TRUE;
2326 2324 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2327 2325 } else {
2328 2326 stat->dds_is_snapshot = B_FALSE;
2329 2327 stat->dds_num_clones = 0;
2330 2328 }
2331 2329
2332 2330 /* clone origin is really a dsl_dir thing... */
2333 2331 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2334 2332 if (dsl_dir_is_clone(ds->ds_dir)) {
2335 2333 dsl_dataset_t *ods;
2336 2334
2337 2335 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2338 2336 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2339 2337 dsl_dataset_name(ods, stat->dds_origin);
2340 2338 dsl_dataset_drop_ref(ods, FTAG);
2341 2339 } else {
2342 2340 stat->dds_origin[0] = '\0';
2343 2341 }
2344 2342 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2345 2343 }
2346 2344
2347 2345 uint64_t
2348 2346 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2349 2347 {
2350 2348 return (ds->ds_fsid_guid);
2351 2349 }
2352 2350
2353 2351 void
2354 2352 dsl_dataset_space(dsl_dataset_t *ds,
2355 2353 uint64_t *refdbytesp, uint64_t *availbytesp,
2356 2354 uint64_t *usedobjsp, uint64_t *availobjsp)
2357 2355 {
2358 2356 *refdbytesp = ds->ds_phys->ds_referenced_bytes;
2359 2357 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2360 2358 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2361 2359 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2362 2360 if (ds->ds_quota != 0) {
2363 2361 /*
2364 2362 * Adjust available bytes according to refquota
2365 2363 */
2366 2364 if (*refdbytesp < ds->ds_quota)
2367 2365 *availbytesp = MIN(*availbytesp,
2368 2366 ds->ds_quota - *refdbytesp);
2369 2367 else
2370 2368 *availbytesp = 0;
2371 2369 }
2372 2370 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2373 2371 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2374 2372 }
2375 2373
2376 2374 boolean_t
2377 2375 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2378 2376 {
2379 2377 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2380 2378
2381 2379 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2382 2380 dsl_pool_sync_context(dp));
2383 2381 if (ds->ds_prev == NULL)
2384 2382 return (B_FALSE);
2385 2383 if (ds->ds_phys->ds_bp.blk_birth >
2386 2384 ds->ds_prev->ds_phys->ds_creation_txg) {
2387 2385 objset_t *os, *os_prev;
2388 2386 /*
2389 2387 * It may be that only the ZIL differs, because it was
2390 2388 * reset in the head. Don't count that as being
2391 2389 * modified.
2392 2390 */
2393 2391 if (dmu_objset_from_ds(ds, &os) != 0)
2394 2392 return (B_TRUE);
2395 2393 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2396 2394 return (B_TRUE);
2397 2395 return (bcmp(&os->os_phys->os_meta_dnode,
2398 2396 &os_prev->os_phys->os_meta_dnode,
2399 2397 sizeof (os->os_phys->os_meta_dnode)) != 0);
2400 2398 }
2401 2399 return (B_FALSE);
2402 2400 }
2403 2401
2404 2402 /* ARGSUSED */
2405 2403 static int
2406 2404 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2407 2405 {
2408 2406 dsl_dataset_t *ds = arg1;
2409 2407 char *newsnapname = arg2;
2410 2408 dsl_dir_t *dd = ds->ds_dir;
2411 2409 dsl_dataset_t *hds;
2412 2410 uint64_t val;
2413 2411 int err;
2414 2412
2415 2413 err = dsl_dataset_hold_obj(dd->dd_pool,
2416 2414 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2417 2415 if (err)
2418 2416 return (err);
2419 2417
2420 2418 /* new name better not be in use */
2421 2419 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2422 2420 dsl_dataset_rele(hds, FTAG);
2423 2421
2424 2422 if (err == 0)
2425 2423 err = EEXIST;
2426 2424 else if (err == ENOENT)
2427 2425 err = 0;
2428 2426
2429 2427 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2430 2428 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2431 2429 err = ENAMETOOLONG;
2432 2430
2433 2431 return (err);
2434 2432 }
2435 2433
2436 2434 static void
2437 2435 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2438 2436 {
2439 2437 dsl_dataset_t *ds = arg1;
2440 2438 const char *newsnapname = arg2;
2441 2439 dsl_dir_t *dd = ds->ds_dir;
2442 2440 objset_t *mos = dd->dd_pool->dp_meta_objset;
2443 2441 dsl_dataset_t *hds;
2444 2442 int err;
2445 2443
2446 2444 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2447 2445
2448 2446 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2449 2447 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2450 2448
2451 2449 VERIFY(0 == dsl_dataset_get_snapname(ds));
2452 2450 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2453 2451 ASSERT3U(err, ==, 0);
2454 2452 mutex_enter(&ds->ds_lock);
2455 2453 (void) strcpy(ds->ds_snapname, newsnapname);
2456 2454 mutex_exit(&ds->ds_lock);
2457 2455 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2458 2456 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2459 2457 ASSERT3U(err, ==, 0);
2460 2458
2461 2459 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2462 2460 "dataset = %llu", ds->ds_object);
2463 2461 dsl_dataset_rele(hds, FTAG);
2464 2462 }
2465 2463
2466 2464 struct renamesnaparg {
2467 2465 dsl_sync_task_group_t *dstg;
2468 2466 char failed[MAXPATHLEN];
2469 2467 char *oldsnap;
2470 2468 char *newsnap;
2471 2469 };
2472 2470
2473 2471 static int
2474 2472 dsl_snapshot_rename_one(const char *name, void *arg)
2475 2473 {
2476 2474 struct renamesnaparg *ra = arg;
2477 2475 dsl_dataset_t *ds = NULL;
2478 2476 char *snapname;
2479 2477 int err;
2480 2478
2481 2479 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2482 2480 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2483 2481
2484 2482 /*
2485 2483 * For recursive snapshot renames the parent won't be changing
2486 2484 * so we just pass name for both the to/from argument.
2487 2485 */
2488 2486 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2489 2487 if (err != 0) {
2490 2488 strfree(snapname);
2491 2489 return (err == ENOENT ? 0 : err);
2492 2490 }
2493 2491
2494 2492 #ifdef _KERNEL
2495 2493 /*
2496 2494 * For all filesystems undergoing rename, we'll need to unmount it.
2497 2495 */
2498 2496 (void) zfs_unmount_snap(snapname, NULL);
2499 2497 #endif
2500 2498 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2501 2499 strfree(snapname);
2502 2500 if (err != 0)
2503 2501 return (err == ENOENT ? 0 : err);
2504 2502
2505 2503 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2506 2504 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2507 2505
2508 2506 return (0);
2509 2507 }
2510 2508
2511 2509 static int
2512 2510 dsl_recursive_rename(char *oldname, const char *newname)
2513 2511 {
2514 2512 int err;
2515 2513 struct renamesnaparg *ra;
2516 2514 dsl_sync_task_t *dst;
2517 2515 spa_t *spa;
2518 2516 char *cp, *fsname = spa_strdup(oldname);
2519 2517 int len = strlen(oldname) + 1;
2520 2518
2521 2519 /* truncate the snapshot name to get the fsname */
2522 2520 cp = strchr(fsname, '@');
2523 2521 *cp = '\0';
2524 2522
2525 2523 err = spa_open(fsname, &spa, FTAG);
2526 2524 if (err) {
2527 2525 kmem_free(fsname, len);
2528 2526 return (err);
2529 2527 }
2530 2528 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2531 2529 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2532 2530
2533 2531 ra->oldsnap = strchr(oldname, '@') + 1;
2534 2532 ra->newsnap = strchr(newname, '@') + 1;
2535 2533 *ra->failed = '\0';
2536 2534
2537 2535 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2538 2536 DS_FIND_CHILDREN);
2539 2537 kmem_free(fsname, len);
2540 2538
2541 2539 if (err == 0) {
2542 2540 err = dsl_sync_task_group_wait(ra->dstg);
2543 2541 }
2544 2542
2545 2543 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2546 2544 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2547 2545 dsl_dataset_t *ds = dst->dst_arg1;
2548 2546 if (dst->dst_err) {
2549 2547 dsl_dir_name(ds->ds_dir, ra->failed);
2550 2548 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2551 2549 (void) strlcat(ra->failed, ra->newsnap,
2552 2550 sizeof (ra->failed));
2553 2551 }
2554 2552 dsl_dataset_rele(ds, ra->dstg);
2555 2553 }
2556 2554
2557 2555 if (err)
2558 2556 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2559 2557
2560 2558 dsl_sync_task_group_destroy(ra->dstg);
2561 2559 kmem_free(ra, sizeof (struct renamesnaparg));
2562 2560 spa_close(spa, FTAG);
2563 2561 return (err);
2564 2562 }
2565 2563
2566 2564 static int
2567 2565 dsl_valid_rename(const char *oldname, void *arg)
2568 2566 {
2569 2567 int delta = *(int *)arg;
2570 2568
2571 2569 if (strlen(oldname) + delta >= MAXNAMELEN)
2572 2570 return (ENAMETOOLONG);
2573 2571
2574 2572 return (0);
2575 2573 }
2576 2574
2577 2575 #pragma weak dmu_objset_rename = dsl_dataset_rename
2578 2576 int
2579 2577 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2580 2578 {
2581 2579 dsl_dir_t *dd;
2582 2580 dsl_dataset_t *ds;
2583 2581 const char *tail;
2584 2582 int err;
2585 2583
2586 2584 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2587 2585 if (err)
2588 2586 return (err);
2589 2587
2590 2588 if (tail == NULL) {
2591 2589 int delta = strlen(newname) - strlen(oldname);
2592 2590
2593 2591 /* if we're growing, validate child name lengths */
2594 2592 if (delta > 0)
2595 2593 err = dmu_objset_find(oldname, dsl_valid_rename,
2596 2594 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2597 2595
2598 2596 if (err == 0)
2599 2597 err = dsl_dir_rename(dd, newname);
2600 2598 dsl_dir_close(dd, FTAG);
2601 2599 return (err);
2602 2600 }
2603 2601
2604 2602 if (tail[0] != '@') {
2605 2603 /* the name ended in a nonexistent component */
2606 2604 dsl_dir_close(dd, FTAG);
2607 2605 return (ENOENT);
2608 2606 }
2609 2607
2610 2608 dsl_dir_close(dd, FTAG);
2611 2609
2612 2610 /* new name must be snapshot in same filesystem */
2613 2611 tail = strchr(newname, '@');
2614 2612 if (tail == NULL)
2615 2613 return (EINVAL);
2616 2614 tail++;
2617 2615 if (strncmp(oldname, newname, tail - newname) != 0)
2618 2616 return (EXDEV);
2619 2617
2620 2618 if (recursive) {
2621 2619 err = dsl_recursive_rename(oldname, newname);
2622 2620 } else {
2623 2621 err = dsl_dataset_hold(oldname, FTAG, &ds);
2624 2622 if (err)
2625 2623 return (err);
2626 2624
2627 2625 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2628 2626 dsl_dataset_snapshot_rename_check,
2629 2627 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2630 2628
2631 2629 dsl_dataset_rele(ds, FTAG);
2632 2630 }
2633 2631
2634 2632 return (err);
2635 2633 }
2636 2634
2637 2635 struct promotenode {
2638 2636 list_node_t link;
2639 2637 dsl_dataset_t *ds;
2640 2638 };
2641 2639
2642 2640 struct promotearg {
2643 2641 list_t shared_snaps, origin_snaps, clone_snaps;
2644 2642 dsl_dataset_t *origin_origin;
2645 2643 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2646 2644 char *err_ds;
2647 2645 };
2648 2646
2649 2647 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2650 2648 static boolean_t snaplist_unstable(list_t *l);
2651 2649
2652 2650 static int
2653 2651 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2654 2652 {
2655 2653 dsl_dataset_t *hds = arg1;
2656 2654 struct promotearg *pa = arg2;
2657 2655 struct promotenode *snap = list_head(&pa->shared_snaps);
2658 2656 dsl_dataset_t *origin_ds = snap->ds;
2659 2657 int err;
2660 2658 uint64_t unused;
2661 2659
2662 2660 /* Check that it is a real clone */
2663 2661 if (!dsl_dir_is_clone(hds->ds_dir))
2664 2662 return (EINVAL);
2665 2663
2666 2664 /* Since this is so expensive, don't do the preliminary check */
2667 2665 if (!dmu_tx_is_syncing(tx))
2668 2666 return (0);
2669 2667
2670 2668 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2671 2669 return (EXDEV);
2672 2670
2673 2671 /* compute origin's new unique space */
2674 2672 snap = list_tail(&pa->clone_snaps);
2675 2673 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2676 2674 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2677 2675 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2678 2676 &pa->unique, &unused, &unused);
2679 2677
2680 2678 /*
2681 2679 * Walk the snapshots that we are moving
2682 2680 *
2683 2681 * Compute space to transfer. Consider the incremental changes
2684 2682 * to used for each snapshot:
2685 2683 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2686 2684 * So each snapshot gave birth to:
2687 2685 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2688 2686 * So a sequence would look like:
2689 2687 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2690 2688 * Which simplifies to:
2691 2689 * uN + kN + kN-1 + ... + k1 + k0
2692 2690 * Note however, if we stop before we reach the ORIGIN we get:
2693 2691 * uN + kN + kN-1 + ... + kM - uM-1
2694 2692 */
2695 2693 pa->used = origin_ds->ds_phys->ds_referenced_bytes;
2696 2694 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2697 2695 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2698 2696 for (snap = list_head(&pa->shared_snaps); snap;
2699 2697 snap = list_next(&pa->shared_snaps, snap)) {
2700 2698 uint64_t val, dlused, dlcomp, dluncomp;
2701 2699 dsl_dataset_t *ds = snap->ds;
2702 2700
2703 2701 /* Check that the snapshot name does not conflict */
2704 2702 VERIFY(0 == dsl_dataset_get_snapname(ds));
2705 2703 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2706 2704 if (err == 0) {
2707 2705 err = EEXIST;
2708 2706 goto out;
2709 2707 }
2710 2708 if (err != ENOENT)
2711 2709 goto out;
2712 2710
2713 2711 /* The very first snapshot does not have a deadlist */
2714 2712 if (ds->ds_phys->ds_prev_snap_obj == 0)
2715 2713 continue;
2716 2714
2717 2715 dsl_deadlist_space(&ds->ds_deadlist,
2718 2716 &dlused, &dlcomp, &dluncomp);
2719 2717 pa->used += dlused;
2720 2718 pa->comp += dlcomp;
2721 2719 pa->uncomp += dluncomp;
2722 2720 }
2723 2721
2724 2722 /*
2725 2723 * If we are a clone of a clone then we never reached ORIGIN,
2726 2724 * so we need to subtract out the clone origin's used space.
2727 2725 */
2728 2726 if (pa->origin_origin) {
2729 2727 pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2730 2728 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2731 2729 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2732 2730 }
2733 2731
2734 2732 /* Check that there is enough space here */
2735 2733 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2736 2734 pa->used);
2737 2735 if (err)
2738 2736 return (err);
2739 2737
2740 2738 /*
2741 2739 * Compute the amounts of space that will be used by snapshots
2742 2740 * after the promotion (for both origin and clone). For each,
2743 2741 * it is the amount of space that will be on all of their
2744 2742 * deadlists (that was not born before their new origin).
2745 2743 */
2746 2744 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2747 2745 uint64_t space;
2748 2746
2749 2747 /*
2750 2748 * Note, typically this will not be a clone of a clone,
2751 2749 * so dd_origin_txg will be < TXG_INITIAL, so
2752 2750 * these snaplist_space() -> dsl_deadlist_space_range()
2753 2751 * calls will be fast because they do not have to
2754 2752 * iterate over all bps.
2755 2753 */
2756 2754 snap = list_head(&pa->origin_snaps);
2757 2755 err = snaplist_space(&pa->shared_snaps,
2758 2756 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2759 2757 if (err)
2760 2758 return (err);
2761 2759
2762 2760 err = snaplist_space(&pa->clone_snaps,
2763 2761 snap->ds->ds_dir->dd_origin_txg, &space);
2764 2762 if (err)
2765 2763 return (err);
2766 2764 pa->cloneusedsnap += space;
2767 2765 }
2768 2766 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2769 2767 err = snaplist_space(&pa->origin_snaps,
2770 2768 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2771 2769 if (err)
2772 2770 return (err);
2773 2771 }
2774 2772
2775 2773 return (0);
2776 2774 out:
2777 2775 pa->err_ds = snap->ds->ds_snapname;
2778 2776 return (err);
2779 2777 }
2780 2778
2781 2779 static void
2782 2780 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2783 2781 {
2784 2782 dsl_dataset_t *hds = arg1;
2785 2783 struct promotearg *pa = arg2;
2786 2784 struct promotenode *snap = list_head(&pa->shared_snaps);
2787 2785 dsl_dataset_t *origin_ds = snap->ds;
2788 2786 dsl_dataset_t *origin_head;
2789 2787 dsl_dir_t *dd = hds->ds_dir;
2790 2788 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2791 2789 dsl_dir_t *odd = NULL;
2792 2790 uint64_t oldnext_obj;
2793 2791 int64_t delta;
2794 2792
2795 2793 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2796 2794
2797 2795 snap = list_head(&pa->origin_snaps);
2798 2796 origin_head = snap->ds;
2799 2797
2800 2798 /*
2801 2799 * We need to explicitly open odd, since origin_ds's dd will be
2802 2800 * changing.
2803 2801 */
2804 2802 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2805 2803 NULL, FTAG, &odd));
2806 2804
2807 2805 /* change origin's next snap */
2808 2806 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2809 2807 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2810 2808 snap = list_tail(&pa->clone_snaps);
2811 2809 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2812 2810 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2813 2811
2814 2812 /* change the origin's next clone */
2815 2813 if (origin_ds->ds_phys->ds_next_clones_obj) {
2816 2814 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2817 2815 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2818 2816 origin_ds->ds_phys->ds_next_clones_obj,
2819 2817 oldnext_obj, tx));
2820 2818 }
2821 2819
2822 2820 /* change origin */
2823 2821 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2824 2822 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2825 2823 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2826 2824 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2827 2825 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2828 2826 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2829 2827 origin_head->ds_dir->dd_origin_txg =
2830 2828 origin_ds->ds_phys->ds_creation_txg;
2831 2829
2832 2830 /* change dd_clone entries */
2833 2831 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2834 2832 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2835 2833 odd->dd_phys->dd_clones, hds->ds_object, tx));
2836 2834 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2837 2835 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2838 2836 hds->ds_object, tx));
2839 2837
2840 2838 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2841 2839 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2842 2840 origin_head->ds_object, tx));
2843 2841 if (dd->dd_phys->dd_clones == 0) {
2844 2842 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2845 2843 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2846 2844 }
2847 2845 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2848 2846 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2849 2847
2850 2848 }
2851 2849
2852 2850 /* move snapshots to this dir */
2853 2851 for (snap = list_head(&pa->shared_snaps); snap;
2854 2852 snap = list_next(&pa->shared_snaps, snap)) {
2855 2853 dsl_dataset_t *ds = snap->ds;
2856 2854
2857 2855 /* unregister props as dsl_dir is changing */
2858 2856 if (ds->ds_objset) {
2859 2857 dmu_objset_evict(ds->ds_objset);
2860 2858 ds->ds_objset = NULL;
2861 2859 }
2862 2860 /* move snap name entry */
2863 2861 VERIFY(0 == dsl_dataset_get_snapname(ds));
2864 2862 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2865 2863 ds->ds_snapname, tx));
2866 2864 VERIFY(0 == zap_add(dp->dp_meta_objset,
2867 2865 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2868 2866 8, 1, &ds->ds_object, tx));
2869 2867
2870 2868 /* change containing dsl_dir */
2871 2869 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2872 2870 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2873 2871 ds->ds_phys->ds_dir_obj = dd->dd_object;
2874 2872 ASSERT3P(ds->ds_dir, ==, odd);
2875 2873 dsl_dir_close(ds->ds_dir, ds);
2876 2874 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2877 2875 NULL, ds, &ds->ds_dir));
2878 2876
2879 2877 /* move any clone references */
2880 2878 if (ds->ds_phys->ds_next_clones_obj &&
2881 2879 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2882 2880 zap_cursor_t zc;
2883 2881 zap_attribute_t za;
2884 2882
2885 2883 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2886 2884 ds->ds_phys->ds_next_clones_obj);
2887 2885 zap_cursor_retrieve(&zc, &za) == 0;
2888 2886 zap_cursor_advance(&zc)) {
2889 2887 dsl_dataset_t *cnds;
2890 2888 uint64_t o;
2891 2889
2892 2890 if (za.za_first_integer == oldnext_obj) {
2893 2891 /*
2894 2892 * We've already moved the
2895 2893 * origin's reference.
2896 2894 */
2897 2895 continue;
2898 2896 }
2899 2897
2900 2898 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2901 2899 za.za_first_integer, FTAG, &cnds));
2902 2900 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2903 2901
2904 2902 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2905 2903 odd->dd_phys->dd_clones, o, tx), ==, 0);
2906 2904 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2907 2905 dd->dd_phys->dd_clones, o, tx), ==, 0);
2908 2906 dsl_dataset_rele(cnds, FTAG);
2909 2907 }
2910 2908 zap_cursor_fini(&zc);
2911 2909 }
2912 2910
2913 2911 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2914 2912 }
2915 2913
2916 2914 /*
2917 2915 * Change space accounting.
2918 2916 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2919 2917 * both be valid, or both be 0 (resulting in delta == 0). This
2920 2918 * is true for each of {clone,origin} independently.
2921 2919 */
2922 2920
2923 2921 delta = pa->cloneusedsnap -
2924 2922 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2925 2923 ASSERT3S(delta, >=, 0);
2926 2924 ASSERT3U(pa->used, >=, delta);
2927 2925 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2928 2926 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2929 2927 pa->used - delta, pa->comp, pa->uncomp, tx);
2930 2928
2931 2929 delta = pa->originusedsnap -
2932 2930 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2933 2931 ASSERT3S(delta, <=, 0);
2934 2932 ASSERT3U(pa->used, >=, -delta);
2935 2933 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2936 2934 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2937 2935 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2938 2936
2939 2937 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2940 2938
2941 2939 /* log history record */
2942 2940 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2943 2941 "dataset = %llu", hds->ds_object);
2944 2942
2945 2943 dsl_dir_close(odd, FTAG);
2946 2944 }
2947 2945
2948 2946 static char *snaplist_tag = "snaplist";
2949 2947 /*
2950 2948 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2951 2949 * (exclusive) and last_obj (inclusive). The list will be in reverse
2952 2950 * order (last_obj will be the list_head()). If first_obj == 0, do all
2953 2951 * snapshots back to this dataset's origin.
2954 2952 */
2955 2953 static int
2956 2954 snaplist_make(dsl_pool_t *dp, boolean_t own,
2957 2955 uint64_t first_obj, uint64_t last_obj, list_t *l)
2958 2956 {
2959 2957 uint64_t obj = last_obj;
2960 2958
2961 2959 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2962 2960
2963 2961 list_create(l, sizeof (struct promotenode),
2964 2962 offsetof(struct promotenode, link));
2965 2963
2966 2964 while (obj != first_obj) {
2967 2965 dsl_dataset_t *ds;
2968 2966 struct promotenode *snap;
2969 2967 int err;
2970 2968
2971 2969 if (own) {
2972 2970 err = dsl_dataset_own_obj(dp, obj,
2973 2971 0, snaplist_tag, &ds);
2974 2972 if (err == 0)
2975 2973 dsl_dataset_make_exclusive(ds, snaplist_tag);
2976 2974 } else {
2977 2975 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2978 2976 }
2979 2977 if (err == ENOENT) {
2980 2978 /* lost race with snapshot destroy */
2981 2979 struct promotenode *last = list_tail(l);
2982 2980 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2983 2981 obj = last->ds->ds_phys->ds_prev_snap_obj;
2984 2982 continue;
2985 2983 } else if (err) {
2986 2984 return (err);
2987 2985 }
2988 2986
2989 2987 if (first_obj == 0)
2990 2988 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2991 2989
2992 2990 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2993 2991 snap->ds = ds;
2994 2992 list_insert_tail(l, snap);
2995 2993 obj = ds->ds_phys->ds_prev_snap_obj;
2996 2994 }
2997 2995
2998 2996 return (0);
2999 2997 }
3000 2998
3001 2999 static int
3002 3000 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
3003 3001 {
3004 3002 struct promotenode *snap;
3005 3003
3006 3004 *spacep = 0;
3007 3005 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
3008 3006 uint64_t used, comp, uncomp;
3009 3007 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
3010 3008 mintxg, UINT64_MAX, &used, &comp, &uncomp);
3011 3009 *spacep += used;
3012 3010 }
3013 3011 return (0);
3014 3012 }
3015 3013
3016 3014 static void
3017 3015 snaplist_destroy(list_t *l, boolean_t own)
3018 3016 {
3019 3017 struct promotenode *snap;
3020 3018
3021 3019 if (!l || !list_link_active(&l->list_head))
3022 3020 return;
3023 3021
3024 3022 while ((snap = list_tail(l)) != NULL) {
3025 3023 list_remove(l, snap);
3026 3024 if (own)
3027 3025 dsl_dataset_disown(snap->ds, snaplist_tag);
3028 3026 else
3029 3027 dsl_dataset_rele(snap->ds, snaplist_tag);
3030 3028 kmem_free(snap, sizeof (struct promotenode));
3031 3029 }
3032 3030 list_destroy(l);
3033 3031 }
3034 3032
3035 3033 /*
3036 3034 * Promote a clone. Nomenclature note:
3037 3035 * "clone" or "cds": the original clone which is being promoted
3038 3036 * "origin" or "ods": the snapshot which is originally clone's origin
3039 3037 * "origin head" or "ohds": the dataset which is the head
3040 3038 * (filesystem/volume) for the origin
3041 3039 * "origin origin": the origin of the origin's filesystem (typically
3042 3040 * NULL, indicating that the clone is not a clone of a clone).
3043 3041 */
3044 3042 int
3045 3043 dsl_dataset_promote(const char *name, char *conflsnap)
3046 3044 {
3047 3045 dsl_dataset_t *ds;
3048 3046 dsl_dir_t *dd;
3049 3047 dsl_pool_t *dp;
3050 3048 dmu_object_info_t doi;
3051 3049 struct promotearg pa = { 0 };
3052 3050 struct promotenode *snap;
3053 3051 int err;
3054 3052
3055 3053 err = dsl_dataset_hold(name, FTAG, &ds);
3056 3054 if (err)
3057 3055 return (err);
3058 3056 dd = ds->ds_dir;
3059 3057 dp = dd->dd_pool;
3060 3058
3061 3059 err = dmu_object_info(dp->dp_meta_objset,
3062 3060 ds->ds_phys->ds_snapnames_zapobj, &doi);
3063 3061 if (err) {
3064 3062 dsl_dataset_rele(ds, FTAG);
3065 3063 return (err);
3066 3064 }
3067 3065
3068 3066 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3069 3067 dsl_dataset_rele(ds, FTAG);
3070 3068 return (EINVAL);
3071 3069 }
3072 3070
3073 3071 /*
3074 3072 * We are going to inherit all the snapshots taken before our
3075 3073 * origin (i.e., our new origin will be our parent's origin).
3076 3074 * Take ownership of them so that we can rename them into our
3077 3075 * namespace.
3078 3076 */
3079 3077 rw_enter(&dp->dp_config_rwlock, RW_READER);
3080 3078
3081 3079 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3082 3080 &pa.shared_snaps);
3083 3081 if (err != 0)
3084 3082 goto out;
3085 3083
3086 3084 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3087 3085 if (err != 0)
3088 3086 goto out;
3089 3087
3090 3088 snap = list_head(&pa.shared_snaps);
3091 3089 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3092 3090 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3093 3091 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3094 3092 if (err != 0)
3095 3093 goto out;
3096 3094
3097 3095 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3098 3096 err = dsl_dataset_hold_obj(dp,
3099 3097 snap->ds->ds_dir->dd_phys->dd_origin_obj,
3100 3098 FTAG, &pa.origin_origin);
3101 3099 if (err != 0)
3102 3100 goto out;
3103 3101 }
3104 3102
3105 3103 out:
3106 3104 rw_exit(&dp->dp_config_rwlock);
3107 3105
3108 3106 /*
3109 3107 * Add in 128x the snapnames zapobj size, since we will be moving
3110 3108 * a bunch of snapnames to the promoted ds, and dirtying their
3111 3109 * bonus buffers.
3112 3110 */
3113 3111 if (err == 0) {
3114 3112 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3115 3113 dsl_dataset_promote_sync, ds, &pa,
3116 3114 2 + 2 * doi.doi_physical_blocks_512);
3117 3115 if (err && pa.err_ds && conflsnap)
3118 3116 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3119 3117 }
3120 3118
3121 3119 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3122 3120 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3123 3121 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3124 3122 if (pa.origin_origin)
3125 3123 dsl_dataset_rele(pa.origin_origin, FTAG);
3126 3124 dsl_dataset_rele(ds, FTAG);
3127 3125 return (err);
3128 3126 }
3129 3127
3130 3128 struct cloneswaparg {
3131 3129 dsl_dataset_t *cds; /* clone dataset */
3132 3130 dsl_dataset_t *ohds; /* origin's head dataset */
3133 3131 boolean_t force;
3134 3132 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3135 3133 };
3136 3134
3137 3135 /* ARGSUSED */
3138 3136 static int
3139 3137 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3140 3138 {
3141 3139 struct cloneswaparg *csa = arg1;
3142 3140
3143 3141 /* they should both be heads */
3144 3142 if (dsl_dataset_is_snapshot(csa->cds) ||
3145 3143 dsl_dataset_is_snapshot(csa->ohds))
3146 3144 return (EINVAL);
3147 3145
3148 3146 /* the branch point should be just before them */
3149 3147 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3150 3148 return (EINVAL);
3151 3149
3152 3150 /* cds should be the clone (unless they are unrelated) */
3153 3151 if (csa->cds->ds_prev != NULL &&
3154 3152 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3155 3153 csa->ohds->ds_object !=
3156 3154 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3157 3155 return (EINVAL);
3158 3156
3159 3157 /* the clone should be a child of the origin */
3160 3158 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3161 3159 return (EINVAL);
3162 3160
3163 3161 /* ohds shouldn't be modified unless 'force' */
3164 3162 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3165 3163 return (ETXTBSY);
3166 3164
3167 3165 /* adjust amount of any unconsumed refreservation */
3168 3166 csa->unused_refres_delta =
3169 3167 (int64_t)MIN(csa->ohds->ds_reserved,
3170 3168 csa->ohds->ds_phys->ds_unique_bytes) -
3171 3169 (int64_t)MIN(csa->ohds->ds_reserved,
3172 3170 csa->cds->ds_phys->ds_unique_bytes);
3173 3171
3174 3172 if (csa->unused_refres_delta > 0 &&
3175 3173 csa->unused_refres_delta >
3176 3174 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3177 3175 return (ENOSPC);
3178 3176
3179 3177 if (csa->ohds->ds_quota != 0 &&
3180 3178 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3181 3179 return (EDQUOT);
3182 3180
3183 3181 return (0);
3184 3182 }
3185 3183
3186 3184 /* ARGSUSED */
3187 3185 static void
3188 3186 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3189 3187 {
3190 3188 struct cloneswaparg *csa = arg1;
3191 3189 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3192 3190
3193 3191 ASSERT(csa->cds->ds_reserved == 0);
3194 3192 ASSERT(csa->ohds->ds_quota == 0 ||
3195 3193 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3196 3194
3197 3195 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3198 3196 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3199 3197
3200 3198 if (csa->cds->ds_objset != NULL) {
3201 3199 dmu_objset_evict(csa->cds->ds_objset);
3202 3200 csa->cds->ds_objset = NULL;
3203 3201 }
3204 3202
3205 3203 if (csa->ohds->ds_objset != NULL) {
3206 3204 dmu_objset_evict(csa->ohds->ds_objset);
3207 3205 csa->ohds->ds_objset = NULL;
3208 3206 }
3209 3207
3210 3208 /*
3211 3209 * Reset origin's unique bytes, if it exists.
3212 3210 */
3213 3211 if (csa->cds->ds_prev) {
3214 3212 dsl_dataset_t *origin = csa->cds->ds_prev;
3215 3213 uint64_t comp, uncomp;
3216 3214
3217 3215 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3218 3216 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3219 3217 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3220 3218 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3221 3219 }
3222 3220
3223 3221 /* swap blkptrs */
3224 3222 {
3225 3223 blkptr_t tmp;
3226 3224 tmp = csa->ohds->ds_phys->ds_bp;
3227 3225 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3228 3226 csa->cds->ds_phys->ds_bp = tmp;
3229 3227 }
3230 3228
3231 3229 /* set dd_*_bytes */
3232 3230 {
3233 3231 int64_t dused, dcomp, duncomp;
3234 3232 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3235 3233 uint64_t odl_used, odl_comp, odl_uncomp;
3236 3234
3237 3235 ASSERT3U(csa->cds->ds_dir->dd_phys->
3238 3236 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3239 3237
3240 3238 dsl_deadlist_space(&csa->cds->ds_deadlist,
3241 3239 &cdl_used, &cdl_comp, &cdl_uncomp);
3242 3240 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3243 3241 &odl_used, &odl_comp, &odl_uncomp);
3244 3242
3245 3243 dused = csa->cds->ds_phys->ds_referenced_bytes + cdl_used -
3246 3244 (csa->ohds->ds_phys->ds_referenced_bytes + odl_used);
3247 3245 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3248 3246 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3249 3247 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3250 3248 cdl_uncomp -
3251 3249 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3252 3250
3253 3251 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3254 3252 dused, dcomp, duncomp, tx);
3255 3253 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3256 3254 -dused, -dcomp, -duncomp, tx);
3257 3255
3258 3256 /*
3259 3257 * The difference in the space used by snapshots is the
3260 3258 * difference in snapshot space due to the head's
3261 3259 * deadlist (since that's the only thing that's
3262 3260 * changing that affects the snapused).
3263 3261 */
3264 3262 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3265 3263 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3266 3264 &cdl_used, &cdl_comp, &cdl_uncomp);
3267 3265 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3268 3266 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3269 3267 &odl_used, &odl_comp, &odl_uncomp);
3270 3268 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3271 3269 DD_USED_HEAD, DD_USED_SNAP, tx);
3272 3270 }
3273 3271
3274 3272 /* swap ds_*_bytes */
3275 3273 SWITCH64(csa->ohds->ds_phys->ds_referenced_bytes,
3276 3274 csa->cds->ds_phys->ds_referenced_bytes);
3277 3275 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3278 3276 csa->cds->ds_phys->ds_compressed_bytes);
3279 3277 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3280 3278 csa->cds->ds_phys->ds_uncompressed_bytes);
3281 3279 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3282 3280 csa->cds->ds_phys->ds_unique_bytes);
3283 3281
3284 3282 /* apply any parent delta for change in unconsumed refreservation */
3285 3283 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3286 3284 csa->unused_refres_delta, 0, 0, tx);
3287 3285
3288 3286 /*
3289 3287 * Swap deadlists.
3290 3288 */
3291 3289 dsl_deadlist_close(&csa->cds->ds_deadlist);
3292 3290 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3293 3291 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3294 3292 csa->cds->ds_phys->ds_deadlist_obj);
3295 3293 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3296 3294 csa->cds->ds_phys->ds_deadlist_obj);
3297 3295 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3298 3296 csa->ohds->ds_phys->ds_deadlist_obj);
3299 3297
3300 3298 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3301 3299 }
3302 3300
3303 3301 /*
3304 3302 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3305 3303 * recv" into an existing fs to swizzle the file system to the new
3306 3304 * version, and by "zfs rollback". Can also be used to swap two
3307 3305 * independent head datasets if neither has any snapshots.
3308 3306 */
3309 3307 int
3310 3308 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3311 3309 boolean_t force)
3312 3310 {
3313 3311 struct cloneswaparg csa;
3314 3312 int error;
3315 3313
3316 3314 ASSERT(clone->ds_owner);
3317 3315 ASSERT(origin_head->ds_owner);
3318 3316 retry:
3319 3317 /*
3320 3318 * Need exclusive access for the swap. If we're swapping these
3321 3319 * datasets back after an error, we already hold the locks.
3322 3320 */
3323 3321 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3324 3322 rw_enter(&clone->ds_rwlock, RW_WRITER);
3325 3323 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3326 3324 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3327 3325 rw_exit(&clone->ds_rwlock);
3328 3326 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3329 3327 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3330 3328 rw_exit(&origin_head->ds_rwlock);
3331 3329 goto retry;
3332 3330 }
3333 3331 }
3334 3332 csa.cds = clone;
3335 3333 csa.ohds = origin_head;
3336 3334 csa.force = force;
3337 3335 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3338 3336 dsl_dataset_clone_swap_check,
3339 3337 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3340 3338 return (error);
3341 3339 }
3342 3340
3343 3341 /*
3344 3342 * Given a pool name and a dataset object number in that pool,
3345 3343 * return the name of that dataset.
3346 3344 */
3347 3345 int
3348 3346 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3349 3347 {
3350 3348 spa_t *spa;
3351 3349 dsl_pool_t *dp;
3352 3350 dsl_dataset_t *ds;
3353 3351 int error;
3354 3352
3355 3353 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3356 3354 return (error);
3357 3355 dp = spa_get_dsl(spa);
3358 3356 rw_enter(&dp->dp_config_rwlock, RW_READER);
3359 3357 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3360 3358 dsl_dataset_name(ds, buf);
3361 3359 dsl_dataset_rele(ds, FTAG);
3362 3360 }
3363 3361 rw_exit(&dp->dp_config_rwlock);
3364 3362 spa_close(spa, FTAG);
3365 3363
3366 3364 return (error);
3367 3365 }
3368 3366
3369 3367 int
3370 3368 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3371 3369 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3372 3370 {
3373 3371 int error = 0;
3374 3372
3375 3373 ASSERT3S(asize, >, 0);
3376 3374
3377 3375 /*
3378 3376 * *ref_rsrv is the portion of asize that will come from any
3379 3377 * unconsumed refreservation space.
3380 3378 */
3381 3379 *ref_rsrv = 0;
3382 3380
3383 3381 mutex_enter(&ds->ds_lock);
3384 3382 /*
3385 3383 * Make a space adjustment for reserved bytes.
3386 3384 */
3387 3385 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3388 3386 ASSERT3U(*used, >=,
3389 3387 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3390 3388 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3391 3389 *ref_rsrv =
3392 3390 asize - MIN(asize, parent_delta(ds, asize + inflight));
3393 3391 }
3394 3392
3395 3393 if (!check_quota || ds->ds_quota == 0) {
3396 3394 mutex_exit(&ds->ds_lock);
3397 3395 return (0);
3398 3396 }
3399 3397 /*
3400 3398 * If they are requesting more space, and our current estimate
3401 3399 * is over quota, they get to try again unless the actual
3402 3400 * on-disk is over quota and there are no pending changes (which
3403 3401 * may free up space for us).
3404 3402 */
3405 3403 if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
3406 3404 if (inflight > 0 ||
3407 3405 ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
3408 3406 error = ERESTART;
3409 3407 else
3410 3408 error = EDQUOT;
3411 3409 }
3412 3410 mutex_exit(&ds->ds_lock);
3413 3411
3414 3412 return (error);
3415 3413 }
3416 3414
3417 3415 /* ARGSUSED */
3418 3416 static int
3419 3417 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3420 3418 {
3421 3419 dsl_dataset_t *ds = arg1;
3422 3420 dsl_prop_setarg_t *psa = arg2;
3423 3421 int err;
3424 3422
3425 3423 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3426 3424 return (ENOTSUP);
3427 3425
3428 3426 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3429 3427 return (err);
3430 3428
3431 3429 if (psa->psa_effective_value == 0)
3432 3430 return (0);
3433 3431
3434 3432 if (psa->psa_effective_value < ds->ds_phys->ds_referenced_bytes ||
3435 3433 psa->psa_effective_value < ds->ds_reserved)
3436 3434 return (ENOSPC);
3437 3435
3438 3436 return (0);
3439 3437 }
3440 3438
3441 3439 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3442 3440
3443 3441 void
3444 3442 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3445 3443 {
3446 3444 dsl_dataset_t *ds = arg1;
3447 3445 dsl_prop_setarg_t *psa = arg2;
3448 3446 uint64_t effective_value = psa->psa_effective_value;
3449 3447
3450 3448 dsl_prop_set_sync(ds, psa, tx);
3451 3449 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3452 3450
3453 3451 if (ds->ds_quota != effective_value) {
3454 3452 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3455 3453 ds->ds_quota = effective_value;
3456 3454
3457 3455 spa_history_log_internal(LOG_DS_REFQUOTA,
3458 3456 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3459 3457 (longlong_t)ds->ds_quota, ds->ds_object);
3460 3458 }
3461 3459 }
3462 3460
3463 3461 int
3464 3462 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3465 3463 {
3466 3464 dsl_dataset_t *ds;
3467 3465 dsl_prop_setarg_t psa;
3468 3466 int err;
3469 3467
3470 3468 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a);
3471 3469
3472 3470 err = dsl_dataset_hold(dsname, FTAG, &ds);
3473 3471 if (err)
3474 3472 return (err);
3475 3473
3476 3474 /*
3477 3475 * If someone removes a file, then tries to set the quota, we
3478 3476 * want to make sure the file freeing takes effect.
3479 3477 */
3480 3478 txg_wait_open(ds->ds_dir->dd_pool, 0);
3481 3479
3482 3480 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3483 3481 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3484 3482 ds, &psa, 0);
3485 3483
3486 3484 dsl_dataset_rele(ds, FTAG);
3487 3485 return (err);
3488 3486 }
3489 3487
3490 3488 static int
3491 3489 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3492 3490 {
3493 3491 dsl_dataset_t *ds = arg1;
3494 3492 dsl_prop_setarg_t *psa = arg2;
3495 3493 uint64_t effective_value;
3496 3494 uint64_t unique;
3497 3495 int err;
3498 3496
3499 3497 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3500 3498 SPA_VERSION_REFRESERVATION)
3501 3499 return (ENOTSUP);
3502 3500
3503 3501 if (dsl_dataset_is_snapshot(ds))
3504 3502 return (EINVAL);
3505 3503
3506 3504 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3507 3505 return (err);
3508 3506
3509 3507 effective_value = psa->psa_effective_value;
3510 3508
3511 3509 /*
3512 3510 * If we are doing the preliminary check in open context, the
3513 3511 * space estimates may be inaccurate.
3514 3512 */
3515 3513 if (!dmu_tx_is_syncing(tx))
3516 3514 return (0);
3517 3515
3518 3516 mutex_enter(&ds->ds_lock);
3519 3517 if (!DS_UNIQUE_IS_ACCURATE(ds))
3520 3518 dsl_dataset_recalc_head_uniq(ds);
3521 3519 unique = ds->ds_phys->ds_unique_bytes;
3522 3520 mutex_exit(&ds->ds_lock);
3523 3521
3524 3522 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3525 3523 uint64_t delta = MAX(unique, effective_value) -
3526 3524 MAX(unique, ds->ds_reserved);
3527 3525
3528 3526 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3529 3527 return (ENOSPC);
3530 3528 if (ds->ds_quota > 0 &&
3531 3529 effective_value > ds->ds_quota)
3532 3530 return (ENOSPC);
3533 3531 }
3534 3532
3535 3533 return (0);
3536 3534 }
3537 3535
3538 3536 static void
3539 3537 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3540 3538 {
3541 3539 dsl_dataset_t *ds = arg1;
3542 3540 dsl_prop_setarg_t *psa = arg2;
3543 3541 uint64_t effective_value = psa->psa_effective_value;
3544 3542 uint64_t unique;
3545 3543 int64_t delta;
3546 3544
3547 3545 dsl_prop_set_sync(ds, psa, tx);
3548 3546 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3549 3547
3550 3548 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3551 3549
3552 3550 mutex_enter(&ds->ds_dir->dd_lock);
3553 3551 mutex_enter(&ds->ds_lock);
3554 3552 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3555 3553 unique = ds->ds_phys->ds_unique_bytes;
3556 3554 delta = MAX(0, (int64_t)(effective_value - unique)) -
3557 3555 MAX(0, (int64_t)(ds->ds_reserved - unique));
3558 3556 ds->ds_reserved = effective_value;
3559 3557 mutex_exit(&ds->ds_lock);
3560 3558
3561 3559 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3562 3560 mutex_exit(&ds->ds_dir->dd_lock);
3563 3561
3564 3562 spa_history_log_internal(LOG_DS_REFRESERV,
3565 3563 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3566 3564 (longlong_t)effective_value, ds->ds_object);
3567 3565 }
3568 3566
3569 3567 int
3570 3568 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3571 3569 uint64_t reservation)
3572 3570 {
3573 3571 dsl_dataset_t *ds;
3574 3572 dsl_prop_setarg_t psa;
3575 3573 int err;
3576 3574
3577 3575 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3578 3576 &reservation);
3579 3577
3580 3578 err = dsl_dataset_hold(dsname, FTAG, &ds);
3581 3579 if (err)
3582 3580 return (err);
3583 3581
3584 3582 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3585 3583 dsl_dataset_set_reservation_check,
3586 3584 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3587 3585
3588 3586 dsl_dataset_rele(ds, FTAG);
3589 3587 return (err);
3590 3588 }
3591 3589
3592 3590 typedef struct zfs_hold_cleanup_arg {
3593 3591 dsl_pool_t *dp;
3594 3592 uint64_t dsobj;
3595 3593 char htag[MAXNAMELEN];
3596 3594 } zfs_hold_cleanup_arg_t;
3597 3595
3598 3596 static void
3599 3597 dsl_dataset_user_release_onexit(void *arg)
3600 3598 {
3601 3599 zfs_hold_cleanup_arg_t *ca = arg;
3602 3600
3603 3601 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3604 3602 B_TRUE);
3605 3603 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3606 3604 }
3607 3605
3608 3606 void
3609 3607 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3610 3608 minor_t minor)
3611 3609 {
3612 3610 zfs_hold_cleanup_arg_t *ca;
3613 3611
3614 3612 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3615 3613 ca->dp = ds->ds_dir->dd_pool;
3616 3614 ca->dsobj = ds->ds_object;
3617 3615 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3618 3616 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3619 3617 dsl_dataset_user_release_onexit, ca, NULL));
3620 3618 }
3621 3619
3622 3620 /*
3623 3621 * If you add new checks here, you may need to add
3624 3622 * additional checks to the "temporary" case in
3625 3623 * snapshot_check() in dmu_objset.c.
3626 3624 */
3627 3625 static int
3628 3626 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3629 3627 {
3630 3628 dsl_dataset_t *ds = arg1;
3631 3629 struct dsl_ds_holdarg *ha = arg2;
3632 3630 char *htag = ha->htag;
3633 3631 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3634 3632 int error = 0;
3635 3633
3636 3634 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3637 3635 return (ENOTSUP);
3638 3636
3639 3637 if (!dsl_dataset_is_snapshot(ds))
3640 3638 return (EINVAL);
3641 3639
3642 3640 /* tags must be unique */
3643 3641 mutex_enter(&ds->ds_lock);
3644 3642 if (ds->ds_phys->ds_userrefs_obj) {
3645 3643 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3646 3644 8, 1, tx);
3647 3645 if (error == 0)
3648 3646 error = EEXIST;
3649 3647 else if (error == ENOENT)
3650 3648 error = 0;
3651 3649 }
3652 3650 mutex_exit(&ds->ds_lock);
3653 3651
3654 3652 if (error == 0 && ha->temphold &&
3655 3653 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3656 3654 error = E2BIG;
3657 3655
3658 3656 return (error);
3659 3657 }
3660 3658
3661 3659 void
3662 3660 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3663 3661 {
3664 3662 dsl_dataset_t *ds = arg1;
3665 3663 struct dsl_ds_holdarg *ha = arg2;
3666 3664 char *htag = ha->htag;
3667 3665 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3668 3666 objset_t *mos = dp->dp_meta_objset;
3669 3667 uint64_t now = gethrestime_sec();
3670 3668 uint64_t zapobj;
3671 3669
3672 3670 mutex_enter(&ds->ds_lock);
3673 3671 if (ds->ds_phys->ds_userrefs_obj == 0) {
3674 3672 /*
3675 3673 * This is the first user hold for this dataset. Create
3676 3674 * the userrefs zap object.
3677 3675 */
3678 3676 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3679 3677 zapobj = ds->ds_phys->ds_userrefs_obj =
3680 3678 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3681 3679 } else {
3682 3680 zapobj = ds->ds_phys->ds_userrefs_obj;
3683 3681 }
3684 3682 ds->ds_userrefs++;
3685 3683 mutex_exit(&ds->ds_lock);
3686 3684
3687 3685 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3688 3686
3689 3687 if (ha->temphold) {
3690 3688 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3691 3689 htag, &now, tx));
3692 3690 }
3693 3691
3694 3692 spa_history_log_internal(LOG_DS_USER_HOLD,
3695 3693 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3696 3694 (int)ha->temphold, ds->ds_object);
3697 3695 }
3698 3696
3699 3697 static int
3700 3698 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3701 3699 {
3702 3700 struct dsl_ds_holdarg *ha = arg;
3703 3701 dsl_dataset_t *ds;
3704 3702 int error;
3705 3703 char *name;
3706 3704
3707 3705 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3708 3706 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3709 3707 error = dsl_dataset_hold(name, ha->dstg, &ds);
3710 3708 strfree(name);
3711 3709 if (error == 0) {
3712 3710 ha->gotone = B_TRUE;
3713 3711 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3714 3712 dsl_dataset_user_hold_sync, ds, ha, 0);
3715 3713 } else if (error == ENOENT && ha->recursive) {
3716 3714 error = 0;
3717 3715 } else {
3718 3716 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3719 3717 }
3720 3718 return (error);
3721 3719 }
3722 3720
3723 3721 int
3724 3722 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3725 3723 boolean_t temphold)
3726 3724 {
3727 3725 struct dsl_ds_holdarg *ha;
3728 3726 int error;
3729 3727
3730 3728 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3731 3729 ha->htag = htag;
3732 3730 ha->temphold = temphold;
3733 3731 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3734 3732 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3735 3733 ds, ha, 0);
3736 3734 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3737 3735
3738 3736 return (error);
3739 3737 }
3740 3738
3741 3739 int
3742 3740 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3743 3741 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3744 3742 {
3745 3743 struct dsl_ds_holdarg *ha;
3746 3744 dsl_sync_task_t *dst;
3747 3745 spa_t *spa;
3748 3746 int error;
3749 3747 minor_t minor = 0;
3750 3748
3751 3749 if (cleanup_fd != -1) {
3752 3750 /* Currently we only support cleanup-on-exit of tempholds. */
3753 3751 if (!temphold)
3754 3752 return (EINVAL);
3755 3753 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3756 3754 if (error)
3757 3755 return (error);
3758 3756 }
3759 3757
3760 3758 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3761 3759
3762 3760 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3763 3761
3764 3762 error = spa_open(dsname, &spa, FTAG);
3765 3763 if (error) {
3766 3764 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3767 3765 if (cleanup_fd != -1)
3768 3766 zfs_onexit_fd_rele(cleanup_fd);
3769 3767 return (error);
3770 3768 }
3771 3769
3772 3770 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3773 3771 ha->htag = htag;
3774 3772 ha->snapname = snapname;
3775 3773 ha->recursive = recursive;
3776 3774 ha->temphold = temphold;
3777 3775
3778 3776 if (recursive) {
3779 3777 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3780 3778 ha, DS_FIND_CHILDREN);
3781 3779 } else {
3782 3780 error = dsl_dataset_user_hold_one(dsname, ha);
3783 3781 }
3784 3782 if (error == 0)
3785 3783 error = dsl_sync_task_group_wait(ha->dstg);
3786 3784
3787 3785 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3788 3786 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3789 3787 dsl_dataset_t *ds = dst->dst_arg1;
3790 3788
3791 3789 if (dst->dst_err) {
3792 3790 dsl_dataset_name(ds, ha->failed);
3793 3791 *strchr(ha->failed, '@') = '\0';
3794 3792 } else if (error == 0 && minor != 0 && temphold) {
3795 3793 /*
3796 3794 * If this hold is to be released upon process exit,
3797 3795 * register that action now.
3798 3796 */
3799 3797 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3800 3798 }
3801 3799 dsl_dataset_rele(ds, ha->dstg);
3802 3800 }
3803 3801
3804 3802 if (error == 0 && recursive && !ha->gotone)
3805 3803 error = ENOENT;
3806 3804
3807 3805 if (error)
3808 3806 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3809 3807
3810 3808 dsl_sync_task_group_destroy(ha->dstg);
3811 3809
3812 3810 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3813 3811 spa_close(spa, FTAG);
3814 3812 if (cleanup_fd != -1)
3815 3813 zfs_onexit_fd_rele(cleanup_fd);
3816 3814 return (error);
3817 3815 }
3818 3816
3819 3817 struct dsl_ds_releasearg {
3820 3818 dsl_dataset_t *ds;
3821 3819 const char *htag;
3822 3820 boolean_t own; /* do we own or just hold ds? */
3823 3821 };
3824 3822
3825 3823 static int
3826 3824 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3827 3825 boolean_t *might_destroy)
3828 3826 {
3829 3827 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3830 3828 uint64_t zapobj;
3831 3829 uint64_t tmp;
3832 3830 int error;
3833 3831
3834 3832 *might_destroy = B_FALSE;
3835 3833
3836 3834 mutex_enter(&ds->ds_lock);
3837 3835 zapobj = ds->ds_phys->ds_userrefs_obj;
3838 3836 if (zapobj == 0) {
3839 3837 /* The tag can't possibly exist */
3840 3838 mutex_exit(&ds->ds_lock);
3841 3839 return (ESRCH);
3842 3840 }
3843 3841
3844 3842 /* Make sure the tag exists */
3845 3843 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3846 3844 if (error) {
3847 3845 mutex_exit(&ds->ds_lock);
3848 3846 if (error == ENOENT)
3849 3847 error = ESRCH;
3850 3848 return (error);
3851 3849 }
3852 3850
3853 3851 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3854 3852 DS_IS_DEFER_DESTROY(ds))
3855 3853 *might_destroy = B_TRUE;
3856 3854
3857 3855 mutex_exit(&ds->ds_lock);
3858 3856 return (0);
3859 3857 }
3860 3858
3861 3859 static int
3862 3860 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3863 3861 {
3864 3862 struct dsl_ds_releasearg *ra = arg1;
3865 3863 dsl_dataset_t *ds = ra->ds;
3866 3864 boolean_t might_destroy;
3867 3865 int error;
3868 3866
3869 3867 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3870 3868 return (ENOTSUP);
3871 3869
3872 3870 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3873 3871 if (error)
3874 3872 return (error);
3875 3873
3876 3874 if (might_destroy) {
3877 3875 struct dsl_ds_destroyarg dsda = {0};
3878 3876
3879 3877 if (dmu_tx_is_syncing(tx)) {
3880 3878 /*
3881 3879 * If we're not prepared to remove the snapshot,
3882 3880 * we can't allow the release to happen right now.
3883 3881 */
3884 3882 if (!ra->own)
3885 3883 return (EBUSY);
3886 3884 }
3887 3885 dsda.ds = ds;
3888 3886 dsda.releasing = B_TRUE;
3889 3887 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3890 3888 }
3891 3889
3892 3890 return (0);
3893 3891 }
3894 3892
3895 3893 static void
3896 3894 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3897 3895 {
3898 3896 struct dsl_ds_releasearg *ra = arg1;
3899 3897 dsl_dataset_t *ds = ra->ds;
3900 3898 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3901 3899 objset_t *mos = dp->dp_meta_objset;
3902 3900 uint64_t zapobj;
3903 3901 uint64_t dsobj = ds->ds_object;
3904 3902 uint64_t refs;
3905 3903 int error;
3906 3904
3907 3905 mutex_enter(&ds->ds_lock);
3908 3906 ds->ds_userrefs--;
3909 3907 refs = ds->ds_userrefs;
3910 3908 mutex_exit(&ds->ds_lock);
3911 3909 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3912 3910 VERIFY(error == 0 || error == ENOENT);
3913 3911 zapobj = ds->ds_phys->ds_userrefs_obj;
3914 3912 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3915 3913 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3916 3914 DS_IS_DEFER_DESTROY(ds)) {
3917 3915 struct dsl_ds_destroyarg dsda = {0};
3918 3916
3919 3917 ASSERT(ra->own);
3920 3918 dsda.ds = ds;
3921 3919 dsda.releasing = B_TRUE;
3922 3920 /* We already did the destroy_check */
3923 3921 dsl_dataset_destroy_sync(&dsda, tag, tx);
3924 3922 }
3925 3923
3926 3924 spa_history_log_internal(LOG_DS_USER_RELEASE,
3927 3925 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3928 3926 ra->htag, (longlong_t)refs, dsobj);
3929 3927 }
3930 3928
3931 3929 static int
3932 3930 dsl_dataset_user_release_one(const char *dsname, void *arg)
3933 3931 {
3934 3932 struct dsl_ds_holdarg *ha = arg;
3935 3933 struct dsl_ds_releasearg *ra;
3936 3934 dsl_dataset_t *ds;
3937 3935 int error;
3938 3936 void *dtag = ha->dstg;
3939 3937 char *name;
3940 3938 boolean_t own = B_FALSE;
3941 3939 boolean_t might_destroy;
3942 3940
3943 3941 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3944 3942 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3945 3943 error = dsl_dataset_hold(name, dtag, &ds);
3946 3944 strfree(name);
3947 3945 if (error == ENOENT && ha->recursive)
3948 3946 return (0);
3949 3947 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3950 3948 if (error)
3951 3949 return (error);
3952 3950
3953 3951 ha->gotone = B_TRUE;
3954 3952
3955 3953 ASSERT(dsl_dataset_is_snapshot(ds));
3956 3954
3957 3955 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3958 3956 if (error) {
3959 3957 dsl_dataset_rele(ds, dtag);
3960 3958 return (error);
3961 3959 }
3962 3960
3963 3961 if (might_destroy) {
3964 3962 #ifdef _KERNEL
3965 3963 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3966 3964 error = zfs_unmount_snap(name, NULL);
3967 3965 strfree(name);
3968 3966 if (error) {
3969 3967 dsl_dataset_rele(ds, dtag);
3970 3968 return (error);
3971 3969 }
3972 3970 #endif
3973 3971 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3974 3972 dsl_dataset_rele(ds, dtag);
3975 3973 return (EBUSY);
3976 3974 } else {
3977 3975 own = B_TRUE;
3978 3976 dsl_dataset_make_exclusive(ds, dtag);
3979 3977 }
3980 3978 }
3981 3979
3982 3980 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3983 3981 ra->ds = ds;
3984 3982 ra->htag = ha->htag;
3985 3983 ra->own = own;
3986 3984 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3987 3985 dsl_dataset_user_release_sync, ra, dtag, 0);
3988 3986
3989 3987 return (0);
3990 3988 }
3991 3989
3992 3990 int
3993 3991 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3994 3992 boolean_t recursive)
3995 3993 {
3996 3994 struct dsl_ds_holdarg *ha;
3997 3995 dsl_sync_task_t *dst;
3998 3996 spa_t *spa;
3999 3997 int error;
4000 3998
4001 3999 top:
4002 4000 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
4003 4001
4004 4002 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
4005 4003
4006 4004 error = spa_open(dsname, &spa, FTAG);
4007 4005 if (error) {
4008 4006 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4009 4007 return (error);
4010 4008 }
4011 4009
4012 4010 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
4013 4011 ha->htag = htag;
4014 4012 ha->snapname = snapname;
4015 4013 ha->recursive = recursive;
4016 4014 if (recursive) {
4017 4015 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
4018 4016 ha, DS_FIND_CHILDREN);
4019 4017 } else {
4020 4018 error = dsl_dataset_user_release_one(dsname, ha);
4021 4019 }
4022 4020 if (error == 0)
4023 4021 error = dsl_sync_task_group_wait(ha->dstg);
4024 4022
4025 4023 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
4026 4024 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
4027 4025 struct dsl_ds_releasearg *ra = dst->dst_arg1;
4028 4026 dsl_dataset_t *ds = ra->ds;
4029 4027
4030 4028 if (dst->dst_err)
4031 4029 dsl_dataset_name(ds, ha->failed);
4032 4030
4033 4031 if (ra->own)
4034 4032 dsl_dataset_disown(ds, ha->dstg);
4035 4033 else
4036 4034 dsl_dataset_rele(ds, ha->dstg);
4037 4035
4038 4036 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
4039 4037 }
4040 4038
4041 4039 if (error == 0 && recursive && !ha->gotone)
4042 4040 error = ENOENT;
4043 4041
4044 4042 if (error && error != EBUSY)
4045 4043 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4046 4044
4047 4045 dsl_sync_task_group_destroy(ha->dstg);
4048 4046 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4049 4047 spa_close(spa, FTAG);
4050 4048
4051 4049 /*
4052 4050 * We can get EBUSY if we were racing with deferred destroy and
4053 4051 * dsl_dataset_user_release_check() hadn't done the necessary
4054 4052 * open context setup. We can also get EBUSY if we're racing
4055 4053 * with destroy and that thread is the ds_owner. Either way
4056 4054 * the busy condition should be transient, and we should retry
4057 4055 * the release operation.
4058 4056 */
4059 4057 if (error == EBUSY)
4060 4058 goto top;
4061 4059
4062 4060 return (error);
4063 4061 }
4064 4062
4065 4063 /*
4066 4064 * Called at spa_load time (with retry == B_FALSE) to release a stale
4067 4065 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4068 4066 */
4069 4067 int
4070 4068 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4071 4069 boolean_t retry)
4072 4070 {
4073 4071 dsl_dataset_t *ds;
4074 4072 char *snap;
4075 4073 char *name;
4076 4074 int namelen;
4077 4075 int error;
4078 4076
4079 4077 do {
4080 4078 rw_enter(&dp->dp_config_rwlock, RW_READER);
4081 4079 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4082 4080 rw_exit(&dp->dp_config_rwlock);
4083 4081 if (error)
4084 4082 return (error);
4085 4083 namelen = dsl_dataset_namelen(ds)+1;
4086 4084 name = kmem_alloc(namelen, KM_SLEEP);
4087 4085 dsl_dataset_name(ds, name);
4088 4086 dsl_dataset_rele(ds, FTAG);
4089 4087
4090 4088 snap = strchr(name, '@');
4091 4089 *snap = '\0';
4092 4090 ++snap;
4093 4091 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4094 4092 kmem_free(name, namelen);
4095 4093
4096 4094 /*
4097 4095 * The object can't have been destroyed because we have a hold,
4098 4096 * but it might have been renamed, resulting in ENOENT. Retry
4099 4097 * if we've been requested to do so.
4100 4098 *
4101 4099 * It would be nice if we could use the dsobj all the way
4102 4100 * through and avoid ENOENT entirely. But we might need to
4103 4101 * unmount the snapshot, and there's currently no way to lookup
4104 4102 * a vfsp using a ZFS object id.
4105 4103 */
4106 4104 } while ((error == ENOENT) && retry);
4107 4105
4108 4106 return (error);
4109 4107 }
4110 4108
4111 4109 int
4112 4110 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4113 4111 {
4114 4112 dsl_dataset_t *ds;
4115 4113 int err;
4116 4114
4117 4115 err = dsl_dataset_hold(dsname, FTAG, &ds);
4118 4116 if (err)
4119 4117 return (err);
4120 4118
4121 4119 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4122 4120 if (ds->ds_phys->ds_userrefs_obj != 0) {
4123 4121 zap_attribute_t *za;
4124 4122 zap_cursor_t zc;
4125 4123
4126 4124 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4127 4125 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4128 4126 ds->ds_phys->ds_userrefs_obj);
4129 4127 zap_cursor_retrieve(&zc, za) == 0;
4130 4128 zap_cursor_advance(&zc)) {
4131 4129 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4132 4130 za->za_first_integer));
4133 4131 }
4134 4132 zap_cursor_fini(&zc);
4135 4133 kmem_free(za, sizeof (zap_attribute_t));
4136 4134 }
4137 4135 dsl_dataset_rele(ds, FTAG);
4138 4136 return (0);
4139 4137 }
4140 4138
4141 4139 /*
4142 4140 * Note, this function is used as the callback for dmu_objset_find(). We
4143 4141 * always return 0 so that we will continue to find and process
4144 4142 * inconsistent datasets, even if we encounter an error trying to
4145 4143 * process one of them.
4146 4144 */
4147 4145 /* ARGSUSED */
4148 4146 int
4149 4147 dsl_destroy_inconsistent(const char *dsname, void *arg)
4150 4148 {
4151 4149 dsl_dataset_t *ds;
4152 4150
4153 4151 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4154 4152 if (DS_IS_INCONSISTENT(ds))
4155 4153 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4156 4154 else
4157 4155 dsl_dataset_disown(ds, FTAG);
4158 4156 }
4159 4157 return (0);
4160 4158 }
4161 4159
4162 4160 /*
4163 4161 * Return (in *usedp) the amount of space written in new that is not
4164 4162 * present in oldsnap. New may be a snapshot or the head. Old must be
4165 4163 * a snapshot before new, in new's filesystem (or its origin). If not then
4166 4164 * fail and return EINVAL.
4167 4165 *
4168 4166 * The written space is calculated by considering two components: First, we
4169 4167 * ignore any freed space, and calculate the written as new's used space
4170 4168 * minus old's used space. Next, we add in the amount of space that was freed
4171 4169 * between the two snapshots, thus reducing new's used space relative to old's.
4172 4170 * Specifically, this is the space that was born before old->ds_creation_txg,
4173 4171 * and freed before new (ie. on new's deadlist or a previous deadlist).
4174 4172 *
4175 4173 * space freed [---------------------]
4176 4174 * snapshots ---O-------O--------O-------O------
4177 4175 * oldsnap new
4178 4176 */
4179 4177 int
4180 4178 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4181 4179 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4182 4180 {
4183 4181 int err = 0;
4184 4182 uint64_t snapobj;
4185 4183 dsl_pool_t *dp = new->ds_dir->dd_pool;
4186 4184
4187 4185 *usedp = 0;
4188 4186 *usedp += new->ds_phys->ds_referenced_bytes;
4189 4187 *usedp -= oldsnap->ds_phys->ds_referenced_bytes;
4190 4188
4191 4189 *compp = 0;
4192 4190 *compp += new->ds_phys->ds_compressed_bytes;
4193 4191 *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4194 4192
4195 4193 *uncompp = 0;
4196 4194 *uncompp += new->ds_phys->ds_uncompressed_bytes;
4197 4195 *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4198 4196
4199 4197 rw_enter(&dp->dp_config_rwlock, RW_READER);
4200 4198 snapobj = new->ds_object;
4201 4199 while (snapobj != oldsnap->ds_object) {
4202 4200 dsl_dataset_t *snap;
4203 4201 uint64_t used, comp, uncomp;
4204 4202
4205 4203 if (snapobj == new->ds_object) {
4206 4204 snap = new;
4207 4205 } else {
4208 4206 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4209 4207 if (err != 0)
4210 4208 break;
4211 4209 }
4212 4210
4213 4211 if (snap->ds_phys->ds_prev_snap_txg ==
4214 4212 oldsnap->ds_phys->ds_creation_txg) {
4215 4213 /*
4216 4214 * The blocks in the deadlist can not be born after
4217 4215 * ds_prev_snap_txg, so get the whole deadlist space,
4218 4216 * which is more efficient (especially for old-format
4219 4217 * deadlists). Unfortunately the deadlist code
4220 4218 * doesn't have enough information to make this
4221 4219 * optimization itself.
4222 4220 */
4223 4221 dsl_deadlist_space(&snap->ds_deadlist,
4224 4222 &used, &comp, &uncomp);
4225 4223 } else {
4226 4224 dsl_deadlist_space_range(&snap->ds_deadlist,
4227 4225 0, oldsnap->ds_phys->ds_creation_txg,
4228 4226 &used, &comp, &uncomp);
4229 4227 }
4230 4228 *usedp += used;
4231 4229 *compp += comp;
4232 4230 *uncompp += uncomp;
4233 4231
4234 4232 /*
4235 4233 * If we get to the beginning of the chain of snapshots
4236 4234 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4237 4235 * was not a snapshot of/before new.
4238 4236 */
4239 4237 snapobj = snap->ds_phys->ds_prev_snap_obj;
4240 4238 if (snap != new)
4241 4239 dsl_dataset_rele(snap, FTAG);
4242 4240 if (snapobj == 0) {
4243 4241 err = EINVAL;
4244 4242 break;
4245 4243 }
4246 4244
4247 4245 }
4248 4246 rw_exit(&dp->dp_config_rwlock);
4249 4247 return (err);
4250 4248 }
4251 4249
4252 4250 /*
4253 4251 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4254 4252 * lastsnap, and all snapshots in between are deleted.
4255 4253 *
4256 4254 * blocks that would be freed [---------------------------]
4257 4255 * snapshots ---O-------O--------O-------O--------O
4258 4256 * firstsnap lastsnap
4259 4257 *
4260 4258 * This is the set of blocks that were born after the snap before firstsnap,
4261 4259 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4262 4260 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4263 4261 * We calculate this by iterating over the relevant deadlists (from the snap
4264 4262 * after lastsnap, backward to the snap after firstsnap), summing up the
4265 4263 * space on the deadlist that was born after the snap before firstsnap.
4266 4264 */
4267 4265 int
4268 4266 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4269 4267 dsl_dataset_t *lastsnap,
4270 4268 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4271 4269 {
4272 4270 int err = 0;
4273 4271 uint64_t snapobj;
4274 4272 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4275 4273
4276 4274 ASSERT(dsl_dataset_is_snapshot(firstsnap));
4277 4275 ASSERT(dsl_dataset_is_snapshot(lastsnap));
4278 4276
4279 4277 /*
4280 4278 * Check that the snapshots are in the same dsl_dir, and firstsnap
4281 4279 * is before lastsnap.
4282 4280 */
4283 4281 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4284 4282 firstsnap->ds_phys->ds_creation_txg >
4285 4283 lastsnap->ds_phys->ds_creation_txg)
4286 4284 return (EINVAL);
4287 4285
4288 4286 *usedp = *compp = *uncompp = 0;
4289 4287
4290 4288 rw_enter(&dp->dp_config_rwlock, RW_READER);
4291 4289 snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4292 4290 while (snapobj != firstsnap->ds_object) {
4293 4291 dsl_dataset_t *ds;
4294 4292 uint64_t used, comp, uncomp;
4295 4293
4296 4294 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4297 4295 if (err != 0)
4298 4296 break;
4299 4297
4300 4298 dsl_deadlist_space_range(&ds->ds_deadlist,
4301 4299 firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4302 4300 &used, &comp, &uncomp);
4303 4301 *usedp += used;
4304 4302 *compp += comp;
4305 4303 *uncompp += uncomp;
4306 4304
4307 4305 snapobj = ds->ds_phys->ds_prev_snap_obj;
4308 4306 ASSERT3U(snapobj, !=, 0);
4309 4307 dsl_dataset_rele(ds, FTAG);
4310 4308 }
4311 4309 rw_exit(&dp->dp_config_rwlock);
4312 4310 return (err);
4313 4311 }
↓ open down ↓ |
3748 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX