Print this page
2882 implement libzfs_core
2883 changing "canmount" property to "on" should not always remount dataset
2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Bill Pijewski <wdp@joyent.com>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dmu_objset.c
+++ new/usr/src/uts/common/fs/zfs/dmu_objset.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright (c) 2012 by Delphix. All rights reserved.
23 24 */
24 25
25 26 /* Portions Copyright 2010 Robert Milkowski */
26 27
27 28 #include <sys/cred.h>
28 29 #include <sys/zfs_context.h>
29 30 #include <sys/dmu_objset.h>
30 31 #include <sys/dsl_dir.h>
31 32 #include <sys/dsl_dataset.h>
32 33 #include <sys/dsl_prop.h>
33 34 #include <sys/dsl_pool.h>
34 35 #include <sys/dsl_synctask.h>
35 36 #include <sys/dsl_deleg.h>
36 37 #include <sys/dnode.h>
37 38 #include <sys/dbuf.h>
38 39 #include <sys/zvol.h>
39 40 #include <sys/dmu_tx.h>
40 41 #include <sys/zap.h>
41 42 #include <sys/zil.h>
42 43 #include <sys/dmu_impl.h>
43 44 #include <sys/zfs_ioctl.h>
44 45 #include <sys/sa.h>
45 46 #include <sys/zfs_onexit.h>
46 47
47 48 /*
48 49 * Needed to close a window in dnode_move() that allows the objset to be freed
49 50 * before it can be safely accessed.
50 51 */
51 52 krwlock_t os_lock;
52 53
53 54 void
54 55 dmu_objset_init(void)
55 56 {
56 57 rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
57 58 }
58 59
59 60 void
60 61 dmu_objset_fini(void)
61 62 {
62 63 rw_destroy(&os_lock);
63 64 }
64 65
65 66 spa_t *
66 67 dmu_objset_spa(objset_t *os)
67 68 {
68 69 return (os->os_spa);
69 70 }
70 71
71 72 zilog_t *
72 73 dmu_objset_zil(objset_t *os)
73 74 {
74 75 return (os->os_zil);
75 76 }
76 77
77 78 dsl_pool_t *
78 79 dmu_objset_pool(objset_t *os)
79 80 {
80 81 dsl_dataset_t *ds;
81 82
82 83 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
83 84 return (ds->ds_dir->dd_pool);
84 85 else
85 86 return (spa_get_dsl(os->os_spa));
86 87 }
87 88
88 89 dsl_dataset_t *
89 90 dmu_objset_ds(objset_t *os)
90 91 {
91 92 return (os->os_dsl_dataset);
92 93 }
93 94
94 95 dmu_objset_type_t
95 96 dmu_objset_type(objset_t *os)
96 97 {
97 98 return (os->os_phys->os_type);
98 99 }
99 100
100 101 void
101 102 dmu_objset_name(objset_t *os, char *buf)
102 103 {
103 104 dsl_dataset_name(os->os_dsl_dataset, buf);
104 105 }
105 106
106 107 uint64_t
107 108 dmu_objset_id(objset_t *os)
108 109 {
109 110 dsl_dataset_t *ds = os->os_dsl_dataset;
110 111
111 112 return (ds ? ds->ds_object : 0);
112 113 }
113 114
114 115 uint64_t
115 116 dmu_objset_syncprop(objset_t *os)
116 117 {
117 118 return (os->os_sync);
118 119 }
119 120
120 121 uint64_t
121 122 dmu_objset_logbias(objset_t *os)
122 123 {
123 124 return (os->os_logbias);
124 125 }
125 126
126 127 static void
127 128 checksum_changed_cb(void *arg, uint64_t newval)
128 129 {
129 130 objset_t *os = arg;
130 131
131 132 /*
132 133 * Inheritance should have been done by now.
133 134 */
134 135 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
135 136
136 137 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
137 138 }
138 139
139 140 static void
140 141 compression_changed_cb(void *arg, uint64_t newval)
141 142 {
142 143 objset_t *os = arg;
143 144
144 145 /*
145 146 * Inheritance and range checking should have been done by now.
146 147 */
147 148 ASSERT(newval != ZIO_COMPRESS_INHERIT);
148 149
149 150 os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
150 151 }
151 152
152 153 static void
153 154 copies_changed_cb(void *arg, uint64_t newval)
154 155 {
155 156 objset_t *os = arg;
156 157
157 158 /*
158 159 * Inheritance and range checking should have been done by now.
159 160 */
160 161 ASSERT(newval > 0);
161 162 ASSERT(newval <= spa_max_replication(os->os_spa));
162 163
163 164 os->os_copies = newval;
164 165 }
165 166
166 167 static void
167 168 dedup_changed_cb(void *arg, uint64_t newval)
168 169 {
169 170 objset_t *os = arg;
170 171 spa_t *spa = os->os_spa;
171 172 enum zio_checksum checksum;
172 173
173 174 /*
174 175 * Inheritance should have been done by now.
175 176 */
176 177 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
177 178
178 179 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
179 180
180 181 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
181 182 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
182 183 }
183 184
184 185 static void
185 186 primary_cache_changed_cb(void *arg, uint64_t newval)
186 187 {
187 188 objset_t *os = arg;
188 189
189 190 /*
190 191 * Inheritance and range checking should have been done by now.
191 192 */
192 193 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
193 194 newval == ZFS_CACHE_METADATA);
194 195
195 196 os->os_primary_cache = newval;
196 197 }
197 198
198 199 static void
199 200 secondary_cache_changed_cb(void *arg, uint64_t newval)
200 201 {
201 202 objset_t *os = arg;
202 203
203 204 /*
204 205 * Inheritance and range checking should have been done by now.
205 206 */
206 207 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
207 208 newval == ZFS_CACHE_METADATA);
208 209
209 210 os->os_secondary_cache = newval;
210 211 }
211 212
212 213 static void
213 214 sync_changed_cb(void *arg, uint64_t newval)
214 215 {
215 216 objset_t *os = arg;
216 217
217 218 /*
218 219 * Inheritance and range checking should have been done by now.
219 220 */
220 221 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
221 222 newval == ZFS_SYNC_DISABLED);
222 223
223 224 os->os_sync = newval;
224 225 if (os->os_zil)
225 226 zil_set_sync(os->os_zil, newval);
226 227 }
227 228
228 229 static void
229 230 logbias_changed_cb(void *arg, uint64_t newval)
230 231 {
231 232 objset_t *os = arg;
232 233
233 234 ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
234 235 newval == ZFS_LOGBIAS_THROUGHPUT);
235 236 os->os_logbias = newval;
236 237 if (os->os_zil)
237 238 zil_set_logbias(os->os_zil, newval);
238 239 }
239 240
240 241 void
241 242 dmu_objset_byteswap(void *buf, size_t size)
242 243 {
243 244 objset_phys_t *osp = buf;
244 245
245 246 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
246 247 dnode_byteswap(&osp->os_meta_dnode);
247 248 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
248 249 osp->os_type = BSWAP_64(osp->os_type);
249 250 osp->os_flags = BSWAP_64(osp->os_flags);
250 251 if (size == sizeof (objset_phys_t)) {
251 252 dnode_byteswap(&osp->os_userused_dnode);
252 253 dnode_byteswap(&osp->os_groupused_dnode);
253 254 }
254 255 }
255 256
256 257 int
257 258 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
258 259 objset_t **osp)
259 260 {
260 261 objset_t *os;
261 262 int i, err;
262 263
263 264 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
264 265
265 266 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
266 267 os->os_dsl_dataset = ds;
267 268 os->os_spa = spa;
268 269 os->os_rootbp = bp;
269 270 if (!BP_IS_HOLE(os->os_rootbp)) {
270 271 uint32_t aflags = ARC_WAIT;
271 272 zbookmark_t zb;
272 273 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
273 274 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
274 275
275 276 if (DMU_OS_IS_L2CACHEABLE(os))
276 277 aflags |= ARC_L2CACHE;
277 278
278 279 dprintf_bp(os->os_rootbp, "reading %s", "");
279 280 /*
280 281 * XXX when bprewrite scrub can change the bp,
281 282 * and this is called from dmu_objset_open_ds_os, the bp
282 283 * could change, and we'll need a lock.
283 284 */
284 285 err = dsl_read_nolock(NULL, spa, os->os_rootbp,
285 286 arc_getbuf_func, &os->os_phys_buf,
286 287 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
287 288 if (err) {
288 289 kmem_free(os, sizeof (objset_t));
289 290 /* convert checksum errors into IO errors */
290 291 if (err == ECKSUM)
291 292 err = EIO;
292 293 return (err);
293 294 }
294 295
295 296 /* Increase the blocksize if we are permitted. */
296 297 if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
297 298 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
298 299 arc_buf_t *buf = arc_buf_alloc(spa,
299 300 sizeof (objset_phys_t), &os->os_phys_buf,
300 301 ARC_BUFC_METADATA);
301 302 bzero(buf->b_data, sizeof (objset_phys_t));
302 303 bcopy(os->os_phys_buf->b_data, buf->b_data,
303 304 arc_buf_size(os->os_phys_buf));
304 305 (void) arc_buf_remove_ref(os->os_phys_buf,
305 306 &os->os_phys_buf);
306 307 os->os_phys_buf = buf;
307 308 }
308 309
309 310 os->os_phys = os->os_phys_buf->b_data;
310 311 os->os_flags = os->os_phys->os_flags;
311 312 } else {
312 313 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
313 314 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
314 315 os->os_phys_buf = arc_buf_alloc(spa, size,
315 316 &os->os_phys_buf, ARC_BUFC_METADATA);
316 317 os->os_phys = os->os_phys_buf->b_data;
317 318 bzero(os->os_phys, size);
318 319 }
319 320
320 321 /*
321 322 * Note: the changed_cb will be called once before the register
322 323 * func returns, thus changing the checksum/compression from the
323 324 * default (fletcher2/off). Snapshots don't need to know about
324 325 * checksum/compression/copies.
325 326 */
326 327 if (ds) {
327 328 err = dsl_prop_register(ds, "primarycache",
328 329 primary_cache_changed_cb, os);
329 330 if (err == 0)
330 331 err = dsl_prop_register(ds, "secondarycache",
331 332 secondary_cache_changed_cb, os);
332 333 if (!dsl_dataset_is_snapshot(ds)) {
333 334 if (err == 0)
334 335 err = dsl_prop_register(ds, "checksum",
335 336 checksum_changed_cb, os);
336 337 if (err == 0)
337 338 err = dsl_prop_register(ds, "compression",
338 339 compression_changed_cb, os);
339 340 if (err == 0)
340 341 err = dsl_prop_register(ds, "copies",
341 342 copies_changed_cb, os);
342 343 if (err == 0)
343 344 err = dsl_prop_register(ds, "dedup",
344 345 dedup_changed_cb, os);
345 346 if (err == 0)
346 347 err = dsl_prop_register(ds, "logbias",
347 348 logbias_changed_cb, os);
348 349 if (err == 0)
349 350 err = dsl_prop_register(ds, "sync",
350 351 sync_changed_cb, os);
351 352 }
352 353 if (err) {
353 354 VERIFY(arc_buf_remove_ref(os->os_phys_buf,
354 355 &os->os_phys_buf) == 1);
355 356 kmem_free(os, sizeof (objset_t));
356 357 return (err);
357 358 }
358 359 } else if (ds == NULL) {
359 360 /* It's the meta-objset. */
360 361 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
361 362 os->os_compress = ZIO_COMPRESS_LZJB;
362 363 os->os_copies = spa_max_replication(spa);
363 364 os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
364 365 os->os_dedup_verify = 0;
365 366 os->os_logbias = 0;
366 367 os->os_sync = 0;
367 368 os->os_primary_cache = ZFS_CACHE_ALL;
368 369 os->os_secondary_cache = ZFS_CACHE_ALL;
369 370 }
370 371
371 372 if (ds == NULL || !dsl_dataset_is_snapshot(ds))
372 373 os->os_zil_header = os->os_phys->os_zil_header;
373 374 os->os_zil = zil_alloc(os, &os->os_zil_header);
374 375
375 376 for (i = 0; i < TXG_SIZE; i++) {
376 377 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
377 378 offsetof(dnode_t, dn_dirty_link[i]));
378 379 list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
379 380 offsetof(dnode_t, dn_dirty_link[i]));
380 381 }
381 382 list_create(&os->os_dnodes, sizeof (dnode_t),
382 383 offsetof(dnode_t, dn_link));
383 384 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
384 385 offsetof(dmu_buf_impl_t, db_link));
385 386
386 387 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
387 388 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
388 389 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
389 390
390 391 DMU_META_DNODE(os) = dnode_special_open(os,
391 392 &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT,
392 393 &os->os_meta_dnode);
393 394 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
394 395 DMU_USERUSED_DNODE(os) = dnode_special_open(os,
395 396 &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT,
396 397 &os->os_userused_dnode);
397 398 DMU_GROUPUSED_DNODE(os) = dnode_special_open(os,
398 399 &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT,
399 400 &os->os_groupused_dnode);
400 401 }
401 402
402 403 /*
403 404 * We should be the only thread trying to do this because we
404 405 * have ds_opening_lock
405 406 */
406 407 if (ds) {
407 408 mutex_enter(&ds->ds_lock);
408 409 ASSERT(ds->ds_objset == NULL);
409 410 ds->ds_objset = os;
410 411 mutex_exit(&ds->ds_lock);
411 412 }
412 413
413 414 *osp = os;
414 415 return (0);
415 416 }
416 417
417 418 int
418 419 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
419 420 {
420 421 int err = 0;
421 422
422 423 mutex_enter(&ds->ds_opening_lock);
423 424 *osp = ds->ds_objset;
424 425 if (*osp == NULL) {
425 426 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
426 427 ds, dsl_dataset_get_blkptr(ds), osp);
427 428 }
428 429 mutex_exit(&ds->ds_opening_lock);
429 430 return (err);
430 431 }
431 432
432 433 /* called from zpl */
433 434 int
434 435 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
435 436 {
436 437 dsl_dataset_t *ds;
437 438 int err;
438 439
439 440 err = dsl_dataset_hold(name, tag, &ds);
440 441 if (err)
441 442 return (err);
442 443
443 444 err = dmu_objset_from_ds(ds, osp);
444 445 if (err)
445 446 dsl_dataset_rele(ds, tag);
446 447
447 448 return (err);
448 449 }
449 450
450 451 /* called from zpl */
451 452 int
452 453 dmu_objset_own(const char *name, dmu_objset_type_t type,
453 454 boolean_t readonly, void *tag, objset_t **osp)
454 455 {
455 456 dsl_dataset_t *ds;
456 457 int err;
457 458
458 459 err = dsl_dataset_own(name, B_FALSE, tag, &ds);
459 460 if (err)
460 461 return (err);
461 462
462 463 err = dmu_objset_from_ds(ds, osp);
463 464 if (err) {
464 465 dsl_dataset_disown(ds, tag);
465 466 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
466 467 dmu_objset_disown(*osp, tag);
467 468 return (EINVAL);
468 469 } else if (!readonly && dsl_dataset_is_snapshot(ds)) {
469 470 dmu_objset_disown(*osp, tag);
470 471 return (EROFS);
471 472 }
472 473 return (err);
473 474 }
474 475
475 476 void
476 477 dmu_objset_rele(objset_t *os, void *tag)
477 478 {
478 479 dsl_dataset_rele(os->os_dsl_dataset, tag);
479 480 }
480 481
481 482 void
482 483 dmu_objset_disown(objset_t *os, void *tag)
483 484 {
484 485 dsl_dataset_disown(os->os_dsl_dataset, tag);
485 486 }
486 487
487 488 int
488 489 dmu_objset_evict_dbufs(objset_t *os)
489 490 {
490 491 dnode_t *dn;
491 492
492 493 mutex_enter(&os->os_lock);
493 494
494 495 /* process the mdn last, since the other dnodes have holds on it */
495 496 list_remove(&os->os_dnodes, DMU_META_DNODE(os));
496 497 list_insert_tail(&os->os_dnodes, DMU_META_DNODE(os));
497 498
498 499 /*
499 500 * Find the first dnode with holds. We have to do this dance
500 501 * because dnode_add_ref() only works if you already have a
501 502 * hold. If there are no holds then it has no dbufs so OK to
502 503 * skip.
503 504 */
504 505 for (dn = list_head(&os->os_dnodes);
505 506 dn && !dnode_add_ref(dn, FTAG);
506 507 dn = list_next(&os->os_dnodes, dn))
507 508 continue;
508 509
509 510 while (dn) {
510 511 dnode_t *next_dn = dn;
511 512
512 513 do {
513 514 next_dn = list_next(&os->os_dnodes, next_dn);
514 515 } while (next_dn && !dnode_add_ref(next_dn, FTAG));
515 516
516 517 mutex_exit(&os->os_lock);
517 518 dnode_evict_dbufs(dn);
518 519 dnode_rele(dn, FTAG);
519 520 mutex_enter(&os->os_lock);
520 521 dn = next_dn;
521 522 }
522 523 dn = list_head(&os->os_dnodes);
523 524 mutex_exit(&os->os_lock);
524 525 return (dn != DMU_META_DNODE(os));
525 526 }
526 527
527 528 void
528 529 dmu_objset_evict(objset_t *os)
529 530 {
530 531 dsl_dataset_t *ds = os->os_dsl_dataset;
531 532
532 533 for (int t = 0; t < TXG_SIZE; t++)
533 534 ASSERT(!dmu_objset_is_dirty(os, t));
534 535
535 536 if (ds) {
536 537 if (!dsl_dataset_is_snapshot(ds)) {
537 538 VERIFY(0 == dsl_prop_unregister(ds, "checksum",
538 539 checksum_changed_cb, os));
539 540 VERIFY(0 == dsl_prop_unregister(ds, "compression",
540 541 compression_changed_cb, os));
541 542 VERIFY(0 == dsl_prop_unregister(ds, "copies",
542 543 copies_changed_cb, os));
543 544 VERIFY(0 == dsl_prop_unregister(ds, "dedup",
544 545 dedup_changed_cb, os));
545 546 VERIFY(0 == dsl_prop_unregister(ds, "logbias",
546 547 logbias_changed_cb, os));
547 548 VERIFY(0 == dsl_prop_unregister(ds, "sync",
548 549 sync_changed_cb, os));
549 550 }
550 551 VERIFY(0 == dsl_prop_unregister(ds, "primarycache",
551 552 primary_cache_changed_cb, os));
552 553 VERIFY(0 == dsl_prop_unregister(ds, "secondarycache",
553 554 secondary_cache_changed_cb, os));
554 555 }
555 556
556 557 if (os->os_sa)
557 558 sa_tear_down(os);
558 559
559 560 /*
560 561 * We should need only a single pass over the dnode list, since
561 562 * nothing can be added to the list at this point.
562 563 */
563 564 (void) dmu_objset_evict_dbufs(os);
564 565
565 566 dnode_special_close(&os->os_meta_dnode);
566 567 if (DMU_USERUSED_DNODE(os)) {
567 568 dnode_special_close(&os->os_userused_dnode);
568 569 dnode_special_close(&os->os_groupused_dnode);
569 570 }
570 571 zil_free(os->os_zil);
571 572
572 573 ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
573 574
574 575 VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf) == 1);
575 576
576 577 /*
577 578 * This is a barrier to prevent the objset from going away in
578 579 * dnode_move() until we can safely ensure that the objset is still in
579 580 * use. We consider the objset valid before the barrier and invalid
580 581 * after the barrier.
581 582 */
582 583 rw_enter(&os_lock, RW_READER);
583 584 rw_exit(&os_lock);
584 585
585 586 mutex_destroy(&os->os_lock);
586 587 mutex_destroy(&os->os_obj_lock);
587 588 mutex_destroy(&os->os_user_ptr_lock);
588 589 kmem_free(os, sizeof (objset_t));
589 590 }
590 591
591 592 timestruc_t
592 593 dmu_objset_snap_cmtime(objset_t *os)
593 594 {
594 595 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
595 596 }
596 597
597 598 /* called from dsl for meta-objset */
598 599 objset_t *
599 600 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
600 601 dmu_objset_type_t type, dmu_tx_t *tx)
601 602 {
602 603 objset_t *os;
603 604 dnode_t *mdn;
604 605
605 606 ASSERT(dmu_tx_is_syncing(tx));
606 607 if (ds != NULL)
607 608 VERIFY(0 == dmu_objset_from_ds(ds, &os));
608 609 else
609 610 VERIFY(0 == dmu_objset_open_impl(spa, NULL, bp, &os));
610 611
611 612 mdn = DMU_META_DNODE(os);
612 613
613 614 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
614 615 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
615 616
616 617 /*
617 618 * We don't want to have to increase the meta-dnode's nlevels
618 619 * later, because then we could do it in quescing context while
619 620 * we are also accessing it in open context.
620 621 *
621 622 * This precaution is not necessary for the MOS (ds == NULL),
622 623 * because the MOS is only updated in syncing context.
623 624 * This is most fortunate: the MOS is the only objset that
624 625 * needs to be synced multiple times as spa_sync() iterates
625 626 * to convergence, so minimizing its dn_nlevels matters.
626 627 */
627 628 if (ds != NULL) {
628 629 int levels = 1;
629 630
630 631 /*
631 632 * Determine the number of levels necessary for the meta-dnode
632 633 * to contain DN_MAX_OBJECT dnodes.
633 634 */
634 635 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
635 636 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
636 637 DN_MAX_OBJECT * sizeof (dnode_phys_t))
637 638 levels++;
638 639
639 640 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
640 641 mdn->dn_nlevels = levels;
641 642 }
642 643
643 644 ASSERT(type != DMU_OST_NONE);
644 645 ASSERT(type != DMU_OST_ANY);
645 646 ASSERT(type < DMU_OST_NUMTYPES);
646 647 os->os_phys->os_type = type;
647 648 if (dmu_objset_userused_enabled(os)) {
648 649 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
649 650 os->os_flags = os->os_phys->os_flags;
650 651 }
651 652
652 653 dsl_dataset_dirty(ds, tx);
653 654
654 655 return (os);
655 656 }
656 657
657 658 struct oscarg {
658 659 void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
659 660 void *userarg;
660 661 dsl_dataset_t *clone_origin;
661 662 const char *lastname;
662 663 dmu_objset_type_t type;
663 664 uint64_t flags;
664 665 cred_t *cr;
665 666 };
666 667
667 668 /*ARGSUSED*/
668 669 static int
669 670 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
670 671 {
671 672 dsl_dir_t *dd = arg1;
672 673 struct oscarg *oa = arg2;
673 674 objset_t *mos = dd->dd_pool->dp_meta_objset;
674 675 int err;
675 676 uint64_t ddobj;
676 677
677 678 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
678 679 oa->lastname, sizeof (uint64_t), 1, &ddobj);
679 680 if (err != ENOENT)
680 681 return (err ? err : EEXIST);
681 682
682 683 if (oa->clone_origin != NULL) {
683 684 /* You can't clone across pools. */
684 685 if (oa->clone_origin->ds_dir->dd_pool != dd->dd_pool)
685 686 return (EXDEV);
686 687
687 688 /* You can only clone snapshots, not the head datasets. */
688 689 if (!dsl_dataset_is_snapshot(oa->clone_origin))
689 690 return (EINVAL);
690 691 }
691 692
↓ open down ↓ |
659 lines elided |
↑ open up ↑ |
692 693 return (0);
693 694 }
694 695
695 696 static void
696 697 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
697 698 {
698 699 dsl_dir_t *dd = arg1;
699 700 spa_t *spa = dd->dd_pool->dp_spa;
700 701 struct oscarg *oa = arg2;
701 702 uint64_t obj;
703 + dsl_dataset_t *ds;
704 + blkptr_t *bp;
702 705
703 706 ASSERT(dmu_tx_is_syncing(tx));
704 707
705 708 obj = dsl_dataset_create_sync(dd, oa->lastname,
706 709 oa->clone_origin, oa->flags, oa->cr, tx);
707 710
708 - if (oa->clone_origin == NULL) {
709 - dsl_pool_t *dp = dd->dd_pool;
710 - dsl_dataset_t *ds;
711 - blkptr_t *bp;
712 - objset_t *os;
713 -
714 - VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
715 - bp = dsl_dataset_get_blkptr(ds);
716 - ASSERT(BP_IS_HOLE(bp));
717 -
718 - os = dmu_objset_create_impl(spa, ds, bp, oa->type, tx);
711 + VERIFY3U(0, ==, dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds));
712 + bp = dsl_dataset_get_blkptr(ds);
713 + if (BP_IS_HOLE(bp)) {
714 + objset_t *os =
715 + dmu_objset_create_impl(spa, ds, bp, oa->type, tx);
719 716
720 717 if (oa->userfunc)
721 718 oa->userfunc(os, oa->userarg, oa->cr, tx);
722 - dsl_dataset_rele(ds, FTAG);
723 719 }
724 720
725 - spa_history_log_internal(LOG_DS_CREATE, spa, tx, "dataset = %llu", obj);
721 + if (oa->clone_origin == NULL) {
722 + spa_history_log_internal_ds(ds, "create", tx, "");
723 + } else {
724 + char namebuf[MAXNAMELEN];
725 + dsl_dataset_name(oa->clone_origin, namebuf);
726 + spa_history_log_internal_ds(ds, "clone", tx,
727 + "origin=%s (%llu)", namebuf, oa->clone_origin->ds_object);
728 + }
729 + dsl_dataset_rele(ds, FTAG);
726 730 }
727 731
728 732 int
729 733 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
730 734 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
731 735 {
732 736 dsl_dir_t *pdd;
733 737 const char *tail;
734 738 int err = 0;
735 739 struct oscarg oa = { 0 };
736 740
737 741 ASSERT(strchr(name, '@') == NULL);
738 742 err = dsl_dir_open(name, FTAG, &pdd, &tail);
739 743 if (err)
740 744 return (err);
741 745 if (tail == NULL) {
742 746 dsl_dir_close(pdd, FTAG);
743 747 return (EEXIST);
744 748 }
745 749
746 750 oa.userfunc = func;
747 751 oa.userarg = arg;
748 752 oa.lastname = tail;
749 753 oa.type = type;
750 754 oa.flags = flags;
751 755 oa.cr = CRED();
752 756
753 757 err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
754 758 dmu_objset_create_sync, pdd, &oa, 5);
755 759 dsl_dir_close(pdd, FTAG);
756 760 return (err);
757 761 }
758 762
759 763 int
760 764 dmu_objset_clone(const char *name, dsl_dataset_t *clone_origin, uint64_t flags)
761 765 {
762 766 dsl_dir_t *pdd;
763 767 const char *tail;
764 768 int err = 0;
765 769 struct oscarg oa = { 0 };
766 770
767 771 ASSERT(strchr(name, '@') == NULL);
768 772 err = dsl_dir_open(name, FTAG, &pdd, &tail);
769 773 if (err)
770 774 return (err);
771 775 if (tail == NULL) {
772 776 dsl_dir_close(pdd, FTAG);
773 777 return (EEXIST);
774 778 }
775 779
776 780 oa.lastname = tail;
777 781 oa.clone_origin = clone_origin;
778 782 oa.flags = flags;
779 783 oa.cr = CRED();
780 784
781 785 err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
782 786 dmu_objset_create_sync, pdd, &oa, 5);
783 787 dsl_dir_close(pdd, FTAG);
784 788 return (err);
785 789 }
786 790
787 791 int
788 792 dmu_objset_destroy(const char *name, boolean_t defer)
789 793 {
790 794 dsl_dataset_t *ds;
791 795 int error;
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
792 796
793 797 error = dsl_dataset_own(name, B_TRUE, FTAG, &ds);
794 798 if (error == 0) {
795 799 error = dsl_dataset_destroy(ds, FTAG, defer);
796 800 /* dsl_dataset_destroy() closes the ds. */
797 801 }
798 802
799 803 return (error);
800 804 }
801 805
802 -struct snaparg {
803 - dsl_sync_task_group_t *dstg;
804 - char *snapname;
805 - char *htag;
806 - char failed[MAXPATHLEN];
807 - boolean_t recursive;
808 - boolean_t needsuspend;
809 - boolean_t temporary;
810 - nvlist_t *props;
811 - struct dsl_ds_holdarg *ha; /* only needed in the temporary case */
812 - dsl_dataset_t *newds;
813 -};
806 +typedef struct snapallarg {
807 + dsl_sync_task_group_t *saa_dstg;
808 + boolean_t saa_needsuspend;
809 + nvlist_t *saa_props;
810 +
811 + /* the following are used only if 'temporary' is set: */
812 + boolean_t saa_temporary;
813 + const char *saa_htag;
814 + struct dsl_ds_holdarg *saa_ha;
815 + dsl_dataset_t *saa_newds;
816 +} snapallarg_t;
817 +
818 +typedef struct snaponearg {
819 + const char *soa_longname; /* long snap name */
820 + const char *soa_snapname; /* short snap name */
821 + snapallarg_t *soa_saa;
822 +} snaponearg_t;
814 823
815 824 static int
816 825 snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
817 826 {
818 827 objset_t *os = arg1;
819 - struct snaparg *sn = arg2;
828 + snaponearg_t *soa = arg2;
829 + snapallarg_t *saa = soa->soa_saa;
820 830 int error;
821 831
822 832 /* The props have already been checked by zfs_check_userprops(). */
823 833
824 834 error = dsl_dataset_snapshot_check(os->os_dsl_dataset,
825 - sn->snapname, tx);
835 + soa->soa_snapname, tx);
826 836 if (error)
827 837 return (error);
828 838
829 - if (sn->temporary) {
839 + if (saa->saa_temporary) {
830 840 /*
831 841 * Ideally we would just call
832 842 * dsl_dataset_user_hold_check() and
833 843 * dsl_dataset_destroy_check() here. However the
834 844 * dataset we want to hold and destroy is the snapshot
835 845 * that we just confirmed we can create, but it won't
836 846 * exist until after these checks are run. Do any
837 847 * checks we can here and if more checks are added to
838 848 * those routines in the future, similar checks may be
839 849 * necessary here.
840 850 */
841 851 if (spa_version(os->os_spa) < SPA_VERSION_USERREFS)
842 852 return (ENOTSUP);
843 853 /*
844 854 * Not checking number of tags because the tag will be
845 855 * unique, as it will be the only tag.
846 856 */
847 - if (strlen(sn->htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
857 + if (strlen(saa->saa_htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
848 858 return (E2BIG);
849 859
850 - sn->ha = kmem_alloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
851 - sn->ha->temphold = B_TRUE;
852 - sn->ha->htag = sn->htag;
860 + saa->saa_ha = kmem_alloc(sizeof (struct dsl_ds_holdarg),
861 + KM_SLEEP);
862 + saa->saa_ha->temphold = B_TRUE;
863 + saa->saa_ha->htag = saa->saa_htag;
853 864 }
854 865 return (error);
855 866 }
856 867
857 868 static void
858 869 snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
859 870 {
860 871 objset_t *os = arg1;
861 872 dsl_dataset_t *ds = os->os_dsl_dataset;
862 - struct snaparg *sn = arg2;
873 + snaponearg_t *soa = arg2;
874 + snapallarg_t *saa = soa->soa_saa;
863 875
864 - dsl_dataset_snapshot_sync(ds, sn->snapname, tx);
876 + dsl_dataset_snapshot_sync(ds, soa->soa_snapname, tx);
865 877
866 - if (sn->props) {
878 + if (saa->saa_props != NULL) {
867 879 dsl_props_arg_t pa;
868 - pa.pa_props = sn->props;
880 + pa.pa_props = saa->saa_props;
869 881 pa.pa_source = ZPROP_SRC_LOCAL;
870 882 dsl_props_set_sync(ds->ds_prev, &pa, tx);
871 883 }
872 884
873 - if (sn->temporary) {
885 + if (saa->saa_temporary) {
874 886 struct dsl_ds_destroyarg da;
875 887
876 - dsl_dataset_user_hold_sync(ds->ds_prev, sn->ha, tx);
877 - kmem_free(sn->ha, sizeof (struct dsl_ds_holdarg));
878 - sn->ha = NULL;
879 - sn->newds = ds->ds_prev;
888 + dsl_dataset_user_hold_sync(ds->ds_prev, saa->saa_ha, tx);
889 + kmem_free(saa->saa_ha, sizeof (struct dsl_ds_holdarg));
890 + saa->saa_ha = NULL;
891 + saa->saa_newds = ds->ds_prev;
880 892
881 893 da.ds = ds->ds_prev;
882 894 da.defer = B_TRUE;
883 895 dsl_dataset_destroy_sync(&da, FTAG, tx);
884 896 }
885 897 }
886 898
887 899 static int
888 -dmu_objset_snapshot_one(const char *name, void *arg)
900 +snapshot_one_impl(const char *snapname, void *arg)
889 901 {
890 - struct snaparg *sn = arg;
902 + char fsname[MAXPATHLEN];
903 + snapallarg_t *saa = arg;
904 + snaponearg_t *soa;
891 905 objset_t *os;
892 906 int err;
893 - char *cp;
894 907
895 - /*
896 - * If the objset starts with a '%', then ignore it unless it was
897 - * explicitly named (ie, not recursive). These hidden datasets
898 - * are always inconsistent, and by not opening them here, we can
899 - * avoid a race with dsl_dir_destroy_check().
900 - */
901 - cp = strrchr(name, '/');
902 - if (cp && cp[1] == '%' && sn->recursive)
903 - return (0);
904 -
905 - (void) strcpy(sn->failed, name);
906 -
907 - /*
908 - * Check permissions if we are doing a recursive snapshot. The
909 - * permission checks for the starting dataset have already been
910 - * performed in zfs_secpolicy_snapshot()
911 - */
912 - if (sn->recursive && (err = zfs_secpolicy_snapshot_perms(name, CRED())))
913 - return (err);
908 + (void) strlcpy(fsname, snapname, sizeof (fsname));
909 + strchr(fsname, '@')[0] = '\0';
914 910
915 - err = dmu_objset_hold(name, sn, &os);
911 + err = dmu_objset_hold(fsname, saa, &os);
916 912 if (err != 0)
917 913 return (err);
918 914
919 915 /*
920 916 * If the objset is in an inconsistent state (eg, in the process
921 - * of being destroyed), don't snapshot it. As with %hidden
922 - * datasets, we return EBUSY if this name was explicitly
923 - * requested (ie, not recursive), and otherwise ignore it.
917 + * of being destroyed), don't snapshot it.
924 918 */
925 919 if (os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) {
926 - dmu_objset_rele(os, sn);
927 - return (sn->recursive ? 0 : EBUSY);
920 + dmu_objset_rele(os, saa);
921 + return (EBUSY);
928 922 }
929 923
930 - if (sn->needsuspend) {
924 + if (saa->saa_needsuspend) {
931 925 err = zil_suspend(dmu_objset_zil(os));
932 926 if (err) {
933 - dmu_objset_rele(os, sn);
927 + dmu_objset_rele(os, saa);
934 928 return (err);
935 929 }
936 930 }
937 - dsl_sync_task_create(sn->dstg, snapshot_check, snapshot_sync,
938 - os, sn, 3);
931 +
932 + soa = kmem_zalloc(sizeof (*soa), KM_SLEEP);
933 + soa->soa_saa = saa;
934 + soa->soa_longname = snapname;
935 + soa->soa_snapname = strchr(snapname, '@') + 1;
936 +
937 + dsl_sync_task_create(saa->saa_dstg, snapshot_check, snapshot_sync,
938 + os, soa, 3);
939 939
940 940 return (0);
941 941 }
942 942
943 +/*
944 + * The snapshots must all be in the same pool.
945 + */
943 946 int
944 -dmu_objset_snapshot(char *fsname, char *snapname, char *tag,
945 - nvlist_t *props, boolean_t recursive, boolean_t temporary, int cleanup_fd)
947 +dmu_objset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
946 948 {
947 949 dsl_sync_task_t *dst;
948 - struct snaparg sn;
950 + snapallarg_t saa = { 0 };
949 951 spa_t *spa;
950 - minor_t minor;
952 + int rv = 0;
951 953 int err;
954 + nvpair_t *pair;
952 955
953 - (void) strcpy(sn.failed, fsname);
956 + pair = nvlist_next_nvpair(snaps, NULL);
957 + if (pair == NULL)
958 + return (0);
954 959
955 - err = spa_open(fsname, &spa, FTAG);
960 + err = spa_open(nvpair_name(pair), &spa, FTAG);
956 961 if (err)
957 962 return (err);
958 -
959 - if (temporary) {
960 - if (cleanup_fd < 0) {
961 - spa_close(spa, FTAG);
962 - return (EINVAL);
963 + saa.saa_dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
964 + saa.saa_props = props;
965 + saa.saa_needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
966 +
967 + for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
968 + pair = nvlist_next_nvpair(snaps, pair)) {
969 + err = snapshot_one_impl(nvpair_name(pair), &saa);
970 + if (err != 0) {
971 + if (errors != NULL) {
972 + fnvlist_add_int32(errors,
973 + nvpair_name(pair), err);
974 + }
975 + rv = err;
963 976 }
964 - if ((err = zfs_onexit_fd_hold(cleanup_fd, &minor)) != 0) {
965 - spa_close(spa, FTAG);
966 - return (err);
977 + }
978 +
979 + /*
980 + * If any call to snapshot_one_impl() failed, don't execute the
981 + * sync task. The error handling code below will clean up the
982 + * snaponearg_t from any successful calls to
983 + * snapshot_one_impl().
984 + */
985 + if (rv == 0)
986 + err = dsl_sync_task_group_wait(saa.saa_dstg);
987 + if (err != 0)
988 + rv = err;
989 +
990 + for (dst = list_head(&saa.saa_dstg->dstg_tasks); dst;
991 + dst = list_next(&saa.saa_dstg->dstg_tasks, dst)) {
992 + objset_t *os = dst->dst_arg1;
993 + snaponearg_t *soa = dst->dst_arg2;
994 + if (dst->dst_err != 0) {
995 + if (errors != NULL) {
996 + fnvlist_add_int32(errors,
997 + soa->soa_longname, dst->dst_err);
998 + }
999 + rv = dst->dst_err;
967 1000 }
1001 +
1002 + if (saa.saa_needsuspend)
1003 + zil_resume(dmu_objset_zil(os));
1004 + dmu_objset_rele(os, &saa);
1005 + kmem_free(soa, sizeof (*soa));
968 1006 }
969 1007
970 - sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
971 - sn.snapname = snapname;
972 - sn.htag = tag;
973 - sn.props = props;
974 - sn.recursive = recursive;
975 - sn.needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
976 - sn.temporary = temporary;
977 - sn.ha = NULL;
978 - sn.newds = NULL;
979 -
980 - if (recursive) {
981 - err = dmu_objset_find(fsname,
982 - dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
983 - } else {
984 - err = dmu_objset_snapshot_one(fsname, &sn);
1008 + dsl_sync_task_group_destroy(saa.saa_dstg);
1009 + spa_close(spa, FTAG);
1010 + return (rv);
1011 +}
1012 +
1013 +int
1014 +dmu_objset_snapshot_one(const char *fsname, const char *snapname)
1015 +{
1016 + int err;
1017 + char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
1018 + nvlist_t *snaps = fnvlist_alloc();
1019 +
1020 + fnvlist_add_boolean(snaps, longsnap);
1021 + err = dmu_objset_snapshot(snaps, NULL, NULL);
1022 + fnvlist_free(snaps);
1023 + strfree(longsnap);
1024 + return (err);
1025 +}
1026 +
1027 +int
1028 +dmu_objset_snapshot_tmp(const char *snapname, const char *tag, int cleanup_fd)
1029 +{
1030 + dsl_sync_task_t *dst;
1031 + snapallarg_t saa = { 0 };
1032 + spa_t *spa;
1033 + minor_t minor;
1034 + int err;
1035 +
1036 + err = spa_open(snapname, &spa, FTAG);
1037 + if (err)
1038 + return (err);
1039 + saa.saa_dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
1040 + saa.saa_htag = tag;
1041 + saa.saa_needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
1042 + saa.saa_temporary = B_TRUE;
1043 +
1044 + if (cleanup_fd < 0) {
1045 + spa_close(spa, FTAG);
1046 + return (EINVAL);
1047 + }
1048 + if ((err = zfs_onexit_fd_hold(cleanup_fd, &minor)) != 0) {
1049 + spa_close(spa, FTAG);
1050 + return (err);
985 1051 }
986 1052
1053 + err = snapshot_one_impl(snapname, &saa);
1054 +
987 1055 if (err == 0)
988 - err = dsl_sync_task_group_wait(sn.dstg);
1056 + err = dsl_sync_task_group_wait(saa.saa_dstg);
989 1057
990 - for (dst = list_head(&sn.dstg->dstg_tasks); dst;
991 - dst = list_next(&sn.dstg->dstg_tasks, dst)) {
1058 + for (dst = list_head(&saa.saa_dstg->dstg_tasks); dst;
1059 + dst = list_next(&saa.saa_dstg->dstg_tasks, dst)) {
992 1060 objset_t *os = dst->dst_arg1;
993 - dsl_dataset_t *ds = os->os_dsl_dataset;
994 - if (dst->dst_err) {
995 - dsl_dataset_name(ds, sn.failed);
996 - } else if (temporary) {
997 - dsl_register_onexit_hold_cleanup(sn.newds, tag, minor);
998 - }
999 - if (sn.needsuspend)
1061 + dsl_register_onexit_hold_cleanup(saa.saa_newds, tag, minor);
1062 + if (saa.saa_needsuspend)
1000 1063 zil_resume(dmu_objset_zil(os));
1001 - dmu_objset_rele(os, &sn);
1064 + dmu_objset_rele(os, &saa);
1002 1065 }
1003 1066
1004 - if (err)
1005 - (void) strcpy(fsname, sn.failed);
1006 - if (temporary)
1007 - zfs_onexit_fd_rele(cleanup_fd);
1008 - dsl_sync_task_group_destroy(sn.dstg);
1067 + zfs_onexit_fd_rele(cleanup_fd);
1068 + dsl_sync_task_group_destroy(saa.saa_dstg);
1009 1069 spa_close(spa, FTAG);
1010 1070 return (err);
1011 1071 }
1012 1072
1073 +
1013 1074 static void
1014 1075 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
1015 1076 {
1016 1077 dnode_t *dn;
1017 1078
1018 1079 while (dn = list_head(list)) {
1019 1080 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
1020 1081 ASSERT(dn->dn_dbuf->db_data_pending);
1021 1082 /*
1022 1083 * Initialize dn_zio outside dnode_sync() because the
1023 1084 * meta-dnode needs to set it ouside dnode_sync().
1024 1085 */
1025 1086 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
1026 1087 ASSERT(dn->dn_zio);
1027 1088
1028 1089 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
1029 1090 list_remove(list, dn);
1030 1091
1031 1092 if (newlist) {
1032 1093 (void) dnode_add_ref(dn, newlist);
1033 1094 list_insert_tail(newlist, dn);
1034 1095 }
1035 1096
1036 1097 dnode_sync(dn, tx);
1037 1098 }
1038 1099 }
1039 1100
1040 1101 /* ARGSUSED */
1041 1102 static void
1042 1103 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
1043 1104 {
1044 1105 blkptr_t *bp = zio->io_bp;
1045 1106 objset_t *os = arg;
1046 1107 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
1047 1108
1048 1109 ASSERT(bp == os->os_rootbp);
1049 1110 ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET);
1050 1111 ASSERT(BP_GET_LEVEL(bp) == 0);
1051 1112
1052 1113 /*
1053 1114 * Update rootbp fill count: it should be the number of objects
1054 1115 * allocated in the object set (not counting the "special"
1055 1116 * objects that are stored in the objset_phys_t -- the meta
1056 1117 * dnode and user/group accounting objects).
1057 1118 */
1058 1119 bp->blk_fill = 0;
1059 1120 for (int i = 0; i < dnp->dn_nblkptr; i++)
1060 1121 bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
1061 1122 }
1062 1123
1063 1124 /* ARGSUSED */
1064 1125 static void
1065 1126 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
1066 1127 {
1067 1128 blkptr_t *bp = zio->io_bp;
1068 1129 blkptr_t *bp_orig = &zio->io_bp_orig;
1069 1130 objset_t *os = arg;
1070 1131
1071 1132 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
1072 1133 ASSERT(BP_EQUAL(bp, bp_orig));
1073 1134 } else {
1074 1135 dsl_dataset_t *ds = os->os_dsl_dataset;
1075 1136 dmu_tx_t *tx = os->os_synctx;
1076 1137
1077 1138 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
1078 1139 dsl_dataset_block_born(ds, bp, tx);
1079 1140 }
1080 1141 }
1081 1142
1082 1143 /* called from dsl */
1083 1144 void
1084 1145 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
1085 1146 {
1086 1147 int txgoff;
1087 1148 zbookmark_t zb;
1088 1149 zio_prop_t zp;
1089 1150 zio_t *zio;
1090 1151 list_t *list;
1091 1152 list_t *newlist = NULL;
1092 1153 dbuf_dirty_record_t *dr;
1093 1154
1094 1155 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
1095 1156
1096 1157 ASSERT(dmu_tx_is_syncing(tx));
1097 1158 /* XXX the write_done callback should really give us the tx... */
1098 1159 os->os_synctx = tx;
1099 1160
1100 1161 if (os->os_dsl_dataset == NULL) {
1101 1162 /*
1102 1163 * This is the MOS. If we have upgraded,
1103 1164 * spa_max_replication() could change, so reset
1104 1165 * os_copies here.
1105 1166 */
1106 1167 os->os_copies = spa_max_replication(os->os_spa);
1107 1168 }
1108 1169
1109 1170 /*
1110 1171 * Create the root block IO
1111 1172 */
1112 1173 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1113 1174 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1114 1175 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1115 1176 VERIFY3U(0, ==, arc_release_bp(os->os_phys_buf, &os->os_phys_buf,
1116 1177 os->os_rootbp, os->os_spa, &zb));
1117 1178
1118 1179 dmu_write_policy(os, NULL, 0, 0, &zp);
1119 1180
1120 1181 zio = arc_write(pio, os->os_spa, tx->tx_txg,
1121 1182 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp,
1122 1183 dmu_objset_write_ready, dmu_objset_write_done, os,
1123 1184 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1124 1185
1125 1186 /*
1126 1187 * Sync special dnodes - the parent IO for the sync is the root block
1127 1188 */
1128 1189 DMU_META_DNODE(os)->dn_zio = zio;
1129 1190 dnode_sync(DMU_META_DNODE(os), tx);
1130 1191
1131 1192 os->os_phys->os_flags = os->os_flags;
1132 1193
1133 1194 if (DMU_USERUSED_DNODE(os) &&
1134 1195 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1135 1196 DMU_USERUSED_DNODE(os)->dn_zio = zio;
1136 1197 dnode_sync(DMU_USERUSED_DNODE(os), tx);
1137 1198 DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
1138 1199 dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
1139 1200 }
1140 1201
1141 1202 txgoff = tx->tx_txg & TXG_MASK;
1142 1203
1143 1204 if (dmu_objset_userused_enabled(os)) {
1144 1205 newlist = &os->os_synced_dnodes;
1145 1206 /*
1146 1207 * We must create the list here because it uses the
1147 1208 * dn_dirty_link[] of this txg.
1148 1209 */
1149 1210 list_create(newlist, sizeof (dnode_t),
1150 1211 offsetof(dnode_t, dn_dirty_link[txgoff]));
1151 1212 }
1152 1213
1153 1214 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
1154 1215 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
1155 1216
1156 1217 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
1157 1218 while (dr = list_head(list)) {
1158 1219 ASSERT(dr->dr_dbuf->db_level == 0);
1159 1220 list_remove(list, dr);
1160 1221 if (dr->dr_zio)
1161 1222 zio_nowait(dr->dr_zio);
1162 1223 }
1163 1224 /*
1164 1225 * Free intent log blocks up to this tx.
1165 1226 */
1166 1227 zil_sync(os->os_zil, tx);
1167 1228 os->os_phys->os_zil_header = os->os_zil_header;
1168 1229 zio_nowait(zio);
1169 1230 }
1170 1231
1171 1232 boolean_t
1172 1233 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1173 1234 {
1174 1235 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) ||
1175 1236 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK]));
1176 1237 }
1177 1238
1178 1239 boolean_t
1179 1240 dmu_objset_is_dirty_anywhere(objset_t *os)
1180 1241 {
1181 1242 for (int t = 0; t < TXG_SIZE; t++)
1182 1243 if (dmu_objset_is_dirty(os, t))
1183 1244 return (B_TRUE);
1184 1245 return (B_FALSE);
1185 1246 }
1186 1247
1187 1248 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1188 1249
1189 1250 void
1190 1251 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1191 1252 {
1192 1253 used_cbs[ost] = cb;
1193 1254 }
1194 1255
1195 1256 boolean_t
1196 1257 dmu_objset_userused_enabled(objset_t *os)
1197 1258 {
1198 1259 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1199 1260 used_cbs[os->os_phys->os_type] != NULL &&
1200 1261 DMU_USERUSED_DNODE(os) != NULL);
1201 1262 }
1202 1263
1203 1264 static void
1204 1265 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags,
1205 1266 uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx)
1206 1267 {
1207 1268 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1208 1269 int64_t delta = DNODE_SIZE + used;
1209 1270 if (subtract)
1210 1271 delta = -delta;
1211 1272 VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT,
1212 1273 user, delta, tx));
1213 1274 VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1214 1275 group, delta, tx));
1215 1276 }
1216 1277 }
1217 1278
1218 1279 void
1219 1280 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1220 1281 {
1221 1282 dnode_t *dn;
1222 1283 list_t *list = &os->os_synced_dnodes;
1223 1284
1224 1285 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1225 1286
1226 1287 while (dn = list_head(list)) {
1227 1288 int flags;
1228 1289 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1229 1290 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1230 1291 dn->dn_phys->dn_flags &
1231 1292 DNODE_FLAG_USERUSED_ACCOUNTED);
1232 1293
1233 1294 /* Allocate the user/groupused objects if necessary. */
1234 1295 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
1235 1296 VERIFY(0 == zap_create_claim(os,
1236 1297 DMU_USERUSED_OBJECT,
1237 1298 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1238 1299 VERIFY(0 == zap_create_claim(os,
1239 1300 DMU_GROUPUSED_OBJECT,
1240 1301 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1241 1302 }
1242 1303
1243 1304 /*
1244 1305 * We intentionally modify the zap object even if the
1245 1306 * net delta is zero. Otherwise
1246 1307 * the block of the zap obj could be shared between
1247 1308 * datasets but need to be different between them after
1248 1309 * a bprewrite.
1249 1310 */
1250 1311
1251 1312 flags = dn->dn_id_flags;
1252 1313 ASSERT(flags);
1253 1314 if (flags & DN_ID_OLD_EXIST) {
1254 1315 do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags,
1255 1316 dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx);
1256 1317 }
1257 1318 if (flags & DN_ID_NEW_EXIST) {
1258 1319 do_userquota_update(os, DN_USED_BYTES(dn->dn_phys),
1259 1320 dn->dn_phys->dn_flags, dn->dn_newuid,
1260 1321 dn->dn_newgid, B_FALSE, tx);
1261 1322 }
1262 1323
1263 1324 mutex_enter(&dn->dn_mtx);
1264 1325 dn->dn_oldused = 0;
1265 1326 dn->dn_oldflags = 0;
1266 1327 if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1267 1328 dn->dn_olduid = dn->dn_newuid;
1268 1329 dn->dn_oldgid = dn->dn_newgid;
1269 1330 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1270 1331 if (dn->dn_bonuslen == 0)
1271 1332 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1272 1333 else
1273 1334 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1274 1335 }
1275 1336 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
1276 1337 mutex_exit(&dn->dn_mtx);
1277 1338
1278 1339 list_remove(list, dn);
1279 1340 dnode_rele(dn, list);
1280 1341 }
1281 1342 }
1282 1343
1283 1344 /*
1284 1345 * Returns a pointer to data to find uid/gid from
1285 1346 *
1286 1347 * If a dirty record for transaction group that is syncing can't
1287 1348 * be found then NULL is returned. In the NULL case it is assumed
1288 1349 * the uid/gid aren't changing.
1289 1350 */
1290 1351 static void *
1291 1352 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1292 1353 {
1293 1354 dbuf_dirty_record_t *dr, **drp;
1294 1355 void *data;
1295 1356
1296 1357 if (db->db_dirtycnt == 0)
1297 1358 return (db->db.db_data); /* Nothing is changing */
1298 1359
1299 1360 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1300 1361 if (dr->dr_txg == tx->tx_txg)
1301 1362 break;
1302 1363
1303 1364 if (dr == NULL) {
1304 1365 data = NULL;
1305 1366 } else {
1306 1367 dnode_t *dn;
1307 1368
1308 1369 DB_DNODE_ENTER(dr->dr_dbuf);
1309 1370 dn = DB_DNODE(dr->dr_dbuf);
1310 1371
1311 1372 if (dn->dn_bonuslen == 0 &&
1312 1373 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1313 1374 data = dr->dt.dl.dr_data->b_data;
1314 1375 else
1315 1376 data = dr->dt.dl.dr_data;
1316 1377
1317 1378 DB_DNODE_EXIT(dr->dr_dbuf);
1318 1379 }
1319 1380
1320 1381 return (data);
1321 1382 }
1322 1383
1323 1384 void
1324 1385 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1325 1386 {
1326 1387 objset_t *os = dn->dn_objset;
1327 1388 void *data = NULL;
1328 1389 dmu_buf_impl_t *db = NULL;
1329 1390 uint64_t *user, *group;
1330 1391 int flags = dn->dn_id_flags;
1331 1392 int error;
1332 1393 boolean_t have_spill = B_FALSE;
1333 1394
1334 1395 if (!dmu_objset_userused_enabled(dn->dn_objset))
1335 1396 return;
1336 1397
1337 1398 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1338 1399 DN_ID_CHKED_SPILL)))
1339 1400 return;
1340 1401
1341 1402 if (before && dn->dn_bonuslen != 0)
1342 1403 data = DN_BONUS(dn->dn_phys);
1343 1404 else if (!before && dn->dn_bonuslen != 0) {
1344 1405 if (dn->dn_bonus) {
1345 1406 db = dn->dn_bonus;
1346 1407 mutex_enter(&db->db_mtx);
1347 1408 data = dmu_objset_userquota_find_data(db, tx);
1348 1409 } else {
1349 1410 data = DN_BONUS(dn->dn_phys);
1350 1411 }
1351 1412 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1352 1413 int rf = 0;
1353 1414
1354 1415 if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1355 1416 rf |= DB_RF_HAVESTRUCT;
1356 1417 error = dmu_spill_hold_by_dnode(dn,
1357 1418 rf | DB_RF_MUST_SUCCEED,
1358 1419 FTAG, (dmu_buf_t **)&db);
1359 1420 ASSERT(error == 0);
1360 1421 mutex_enter(&db->db_mtx);
1361 1422 data = (before) ? db->db.db_data :
1362 1423 dmu_objset_userquota_find_data(db, tx);
1363 1424 have_spill = B_TRUE;
1364 1425 } else {
1365 1426 mutex_enter(&dn->dn_mtx);
1366 1427 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1367 1428 mutex_exit(&dn->dn_mtx);
1368 1429 return;
1369 1430 }
1370 1431
1371 1432 if (before) {
1372 1433 ASSERT(data);
1373 1434 user = &dn->dn_olduid;
1374 1435 group = &dn->dn_oldgid;
1375 1436 } else if (data) {
1376 1437 user = &dn->dn_newuid;
1377 1438 group = &dn->dn_newgid;
1378 1439 }
1379 1440
1380 1441 /*
1381 1442 * Must always call the callback in case the object
1382 1443 * type has changed and that type isn't an object type to track
1383 1444 */
1384 1445 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1385 1446 user, group);
1386 1447
1387 1448 /*
1388 1449 * Preserve existing uid/gid when the callback can't determine
1389 1450 * what the new uid/gid are and the callback returned EEXIST.
1390 1451 * The EEXIST error tells us to just use the existing uid/gid.
1391 1452 * If we don't know what the old values are then just assign
1392 1453 * them to 0, since that is a new file being created.
1393 1454 */
1394 1455 if (!before && data == NULL && error == EEXIST) {
1395 1456 if (flags & DN_ID_OLD_EXIST) {
1396 1457 dn->dn_newuid = dn->dn_olduid;
1397 1458 dn->dn_newgid = dn->dn_oldgid;
1398 1459 } else {
1399 1460 dn->dn_newuid = 0;
1400 1461 dn->dn_newgid = 0;
1401 1462 }
1402 1463 error = 0;
1403 1464 }
1404 1465
1405 1466 if (db)
1406 1467 mutex_exit(&db->db_mtx);
1407 1468
1408 1469 mutex_enter(&dn->dn_mtx);
1409 1470 if (error == 0 && before)
1410 1471 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1411 1472 if (error == 0 && !before)
1412 1473 dn->dn_id_flags |= DN_ID_NEW_EXIST;
1413 1474
1414 1475 if (have_spill) {
1415 1476 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1416 1477 } else {
1417 1478 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1418 1479 }
1419 1480 mutex_exit(&dn->dn_mtx);
1420 1481 if (have_spill)
1421 1482 dmu_buf_rele((dmu_buf_t *)db, FTAG);
1422 1483 }
1423 1484
1424 1485 boolean_t
1425 1486 dmu_objset_userspace_present(objset_t *os)
1426 1487 {
1427 1488 return (os->os_phys->os_flags &
1428 1489 OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1429 1490 }
1430 1491
1431 1492 int
1432 1493 dmu_objset_userspace_upgrade(objset_t *os)
1433 1494 {
1434 1495 uint64_t obj;
1435 1496 int err = 0;
1436 1497
1437 1498 if (dmu_objset_userspace_present(os))
1438 1499 return (0);
1439 1500 if (!dmu_objset_userused_enabled(os))
1440 1501 return (ENOTSUP);
1441 1502 if (dmu_objset_is_snapshot(os))
1442 1503 return (EINVAL);
1443 1504
1444 1505 /*
1445 1506 * We simply need to mark every object dirty, so that it will be
1446 1507 * synced out and now accounted. If this is called
1447 1508 * concurrently, or if we already did some work before crashing,
1448 1509 * that's fine, since we track each object's accounted state
1449 1510 * independently.
1450 1511 */
1451 1512
1452 1513 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1453 1514 dmu_tx_t *tx;
1454 1515 dmu_buf_t *db;
1455 1516 int objerr;
1456 1517
1457 1518 if (issig(JUSTLOOKING) && issig(FORREAL))
1458 1519 return (EINTR);
1459 1520
1460 1521 objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1461 1522 if (objerr)
1462 1523 continue;
1463 1524 tx = dmu_tx_create(os);
1464 1525 dmu_tx_hold_bonus(tx, obj);
1465 1526 objerr = dmu_tx_assign(tx, TXG_WAIT);
1466 1527 if (objerr) {
1467 1528 dmu_tx_abort(tx);
1468 1529 continue;
1469 1530 }
1470 1531 dmu_buf_will_dirty(db, tx);
1471 1532 dmu_buf_rele(db, FTAG);
1472 1533 dmu_tx_commit(tx);
1473 1534 }
1474 1535
1475 1536 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1476 1537 txg_wait_synced(dmu_objset_pool(os), 0);
1477 1538 return (0);
1478 1539 }
1479 1540
1480 1541 void
1481 1542 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1482 1543 uint64_t *usedobjsp, uint64_t *availobjsp)
1483 1544 {
1484 1545 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1485 1546 usedobjsp, availobjsp);
1486 1547 }
1487 1548
1488 1549 uint64_t
1489 1550 dmu_objset_fsid_guid(objset_t *os)
1490 1551 {
1491 1552 return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1492 1553 }
1493 1554
1494 1555 void
1495 1556 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1496 1557 {
1497 1558 stat->dds_type = os->os_phys->os_type;
1498 1559 if (os->os_dsl_dataset)
1499 1560 dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1500 1561 }
1501 1562
1502 1563 void
1503 1564 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1504 1565 {
1505 1566 ASSERT(os->os_dsl_dataset ||
1506 1567 os->os_phys->os_type == DMU_OST_META);
1507 1568
1508 1569 if (os->os_dsl_dataset != NULL)
1509 1570 dsl_dataset_stats(os->os_dsl_dataset, nv);
1510 1571
1511 1572 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1512 1573 os->os_phys->os_type);
1513 1574 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1514 1575 dmu_objset_userspace_present(os));
1515 1576 }
1516 1577
1517 1578 int
1518 1579 dmu_objset_is_snapshot(objset_t *os)
1519 1580 {
1520 1581 if (os->os_dsl_dataset != NULL)
1521 1582 return (dsl_dataset_is_snapshot(os->os_dsl_dataset));
1522 1583 else
1523 1584 return (B_FALSE);
1524 1585 }
1525 1586
1526 1587 int
1527 1588 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1528 1589 boolean_t *conflict)
1529 1590 {
1530 1591 dsl_dataset_t *ds = os->os_dsl_dataset;
1531 1592 uint64_t ignored;
1532 1593
1533 1594 if (ds->ds_phys->ds_snapnames_zapobj == 0)
1534 1595 return (ENOENT);
1535 1596
1536 1597 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1537 1598 ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
1538 1599 real, maxlen, conflict));
1539 1600 }
1540 1601
1541 1602 int
1542 1603 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1543 1604 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1544 1605 {
1545 1606 dsl_dataset_t *ds = os->os_dsl_dataset;
1546 1607 zap_cursor_t cursor;
1547 1608 zap_attribute_t attr;
1548 1609
1549 1610 if (ds->ds_phys->ds_snapnames_zapobj == 0)
1550 1611 return (ENOENT);
1551 1612
1552 1613 zap_cursor_init_serialized(&cursor,
1553 1614 ds->ds_dir->dd_pool->dp_meta_objset,
1554 1615 ds->ds_phys->ds_snapnames_zapobj, *offp);
1555 1616
1556 1617 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1557 1618 zap_cursor_fini(&cursor);
1558 1619 return (ENOENT);
1559 1620 }
1560 1621
1561 1622 if (strlen(attr.za_name) + 1 > namelen) {
1562 1623 zap_cursor_fini(&cursor);
1563 1624 return (ENAMETOOLONG);
1564 1625 }
1565 1626
1566 1627 (void) strcpy(name, attr.za_name);
1567 1628 if (idp)
1568 1629 *idp = attr.za_first_integer;
1569 1630 if (case_conflict)
1570 1631 *case_conflict = attr.za_normalization_conflict;
1571 1632 zap_cursor_advance(&cursor);
1572 1633 *offp = zap_cursor_serialize(&cursor);
1573 1634 zap_cursor_fini(&cursor);
1574 1635
1575 1636 return (0);
1576 1637 }
1577 1638
1578 1639 int
1579 1640 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1580 1641 uint64_t *idp, uint64_t *offp)
1581 1642 {
1582 1643 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1583 1644 zap_cursor_t cursor;
1584 1645 zap_attribute_t attr;
1585 1646
1586 1647 /* there is no next dir on a snapshot! */
1587 1648 if (os->os_dsl_dataset->ds_object !=
1588 1649 dd->dd_phys->dd_head_dataset_obj)
1589 1650 return (ENOENT);
1590 1651
1591 1652 zap_cursor_init_serialized(&cursor,
1592 1653 dd->dd_pool->dp_meta_objset,
1593 1654 dd->dd_phys->dd_child_dir_zapobj, *offp);
1594 1655
1595 1656 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1596 1657 zap_cursor_fini(&cursor);
1597 1658 return (ENOENT);
1598 1659 }
1599 1660
1600 1661 if (strlen(attr.za_name) + 1 > namelen) {
1601 1662 zap_cursor_fini(&cursor);
1602 1663 return (ENAMETOOLONG);
1603 1664 }
1604 1665
1605 1666 (void) strcpy(name, attr.za_name);
1606 1667 if (idp)
1607 1668 *idp = attr.za_first_integer;
1608 1669 zap_cursor_advance(&cursor);
1609 1670 *offp = zap_cursor_serialize(&cursor);
1610 1671 zap_cursor_fini(&cursor);
1611 1672
1612 1673 return (0);
1613 1674 }
1614 1675
1615 1676 struct findarg {
1616 1677 int (*func)(const char *, void *);
1617 1678 void *arg;
1618 1679 };
1619 1680
1620 1681 /* ARGSUSED */
1621 1682 static int
1622 1683 findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
1623 1684 {
1624 1685 struct findarg *fa = arg;
1625 1686 return (fa->func(dsname, fa->arg));
1626 1687 }
1627 1688
1628 1689 /*
1629 1690 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1630 1691 * Perhaps change all callers to use dmu_objset_find_spa()?
1631 1692 */
1632 1693 int
1633 1694 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
1634 1695 int flags)
1635 1696 {
1636 1697 struct findarg fa;
1637 1698 fa.func = func;
1638 1699 fa.arg = arg;
1639 1700 return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags));
1640 1701 }
1641 1702
1642 1703 /*
1643 1704 * Find all objsets under name, call func on each
1644 1705 */
1645 1706 int
1646 1707 dmu_objset_find_spa(spa_t *spa, const char *name,
1647 1708 int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags)
1648 1709 {
1649 1710 dsl_dir_t *dd;
1650 1711 dsl_pool_t *dp;
1651 1712 dsl_dataset_t *ds;
1652 1713 zap_cursor_t zc;
1653 1714 zap_attribute_t *attr;
1654 1715 char *child;
1655 1716 uint64_t thisobj;
1656 1717 int err;
1657 1718
1658 1719 if (name == NULL)
1659 1720 name = spa_name(spa);
1660 1721 err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL);
1661 1722 if (err)
1662 1723 return (err);
1663 1724
1664 1725 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1665 1726 if (dd->dd_myname[0] == '$') {
1666 1727 dsl_dir_close(dd, FTAG);
1667 1728 return (0);
1668 1729 }
1669 1730
1670 1731 thisobj = dd->dd_phys->dd_head_dataset_obj;
1671 1732 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1672 1733 dp = dd->dd_pool;
1673 1734
1674 1735 /*
1675 1736 * Iterate over all children.
1676 1737 */
1677 1738 if (flags & DS_FIND_CHILDREN) {
1678 1739 for (zap_cursor_init(&zc, dp->dp_meta_objset,
1679 1740 dd->dd_phys->dd_child_dir_zapobj);
1680 1741 zap_cursor_retrieve(&zc, attr) == 0;
1681 1742 (void) zap_cursor_advance(&zc)) {
1682 1743 ASSERT(attr->za_integer_length == sizeof (uint64_t));
1683 1744 ASSERT(attr->za_num_integers == 1);
1684 1745
1685 1746 child = kmem_asprintf("%s/%s", name, attr->za_name);
1686 1747 err = dmu_objset_find_spa(spa, child, func, arg, flags);
1687 1748 strfree(child);
1688 1749 if (err)
1689 1750 break;
1690 1751 }
1691 1752 zap_cursor_fini(&zc);
1692 1753
1693 1754 if (err) {
1694 1755 dsl_dir_close(dd, FTAG);
1695 1756 kmem_free(attr, sizeof (zap_attribute_t));
1696 1757 return (err);
1697 1758 }
1698 1759 }
1699 1760
1700 1761 /*
1701 1762 * Iterate over all snapshots.
1702 1763 */
1703 1764 if (flags & DS_FIND_SNAPSHOTS) {
1704 1765 if (!dsl_pool_sync_context(dp))
1705 1766 rw_enter(&dp->dp_config_rwlock, RW_READER);
1706 1767 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1707 1768 if (!dsl_pool_sync_context(dp))
1708 1769 rw_exit(&dp->dp_config_rwlock);
1709 1770
1710 1771 if (err == 0) {
1711 1772 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
1712 1773 dsl_dataset_rele(ds, FTAG);
1713 1774
1714 1775 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1715 1776 zap_cursor_retrieve(&zc, attr) == 0;
1716 1777 (void) zap_cursor_advance(&zc)) {
1717 1778 ASSERT(attr->za_integer_length ==
1718 1779 sizeof (uint64_t));
1719 1780 ASSERT(attr->za_num_integers == 1);
1720 1781
1721 1782 child = kmem_asprintf("%s@%s",
1722 1783 name, attr->za_name);
1723 1784 err = func(spa, attr->za_first_integer,
1724 1785 child, arg);
1725 1786 strfree(child);
1726 1787 if (err)
1727 1788 break;
1728 1789 }
1729 1790 zap_cursor_fini(&zc);
1730 1791 }
1731 1792 }
1732 1793
1733 1794 dsl_dir_close(dd, FTAG);
1734 1795 kmem_free(attr, sizeof (zap_attribute_t));
1735 1796
1736 1797 if (err)
1737 1798 return (err);
1738 1799
1739 1800 /*
1740 1801 * Apply to self if appropriate.
1741 1802 */
1742 1803 err = func(spa, thisobj, name, arg);
1743 1804 return (err);
1744 1805 }
1745 1806
1746 1807 /* ARGSUSED */
1747 1808 int
1748 1809 dmu_objset_prefetch(const char *name, void *arg)
1749 1810 {
1750 1811 dsl_dataset_t *ds;
1751 1812
1752 1813 if (dsl_dataset_hold(name, FTAG, &ds))
1753 1814 return (0);
1754 1815
1755 1816 if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) {
1756 1817 mutex_enter(&ds->ds_opening_lock);
1757 1818 if (ds->ds_objset == NULL) {
1758 1819 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1759 1820 zbookmark_t zb;
1760 1821
1761 1822 SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
1762 1823 ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1763 1824
1764 1825 (void) dsl_read_nolock(NULL, dsl_dataset_get_spa(ds),
1765 1826 &ds->ds_phys->ds_bp, NULL, NULL,
1766 1827 ZIO_PRIORITY_ASYNC_READ,
1767 1828 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1768 1829 &aflags, &zb);
1769 1830 }
1770 1831 mutex_exit(&ds->ds_opening_lock);
1771 1832 }
1772 1833
1773 1834 dsl_dataset_rele(ds, FTAG);
1774 1835 return (0);
1775 1836 }
1776 1837
1777 1838 void
1778 1839 dmu_objset_set_user(objset_t *os, void *user_ptr)
1779 1840 {
1780 1841 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1781 1842 os->os_user_ptr = user_ptr;
1782 1843 }
1783 1844
1784 1845 void *
1785 1846 dmu_objset_get_user(objset_t *os)
1786 1847 {
1787 1848 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1788 1849 return (os->os_user_ptr);
1789 1850 }
↓ open down ↓ |
767 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX