1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 */ 28 29 /* Portions Copyright 2010 Robert Milkowski */ 30 31 #include <sys/cred.h> 32 #include <sys/zfs_context.h> 33 #include <sys/dmu_objset.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dsl_dataset.h> 36 #include <sys/dsl_prop.h> 37 #include <sys/dsl_pool.h> 38 #include <sys/dsl_synctask.h> 39 #include <sys/dsl_deleg.h> 40 #include <sys/dnode.h> 41 #include <sys/dbuf.h> 42 #include <sys/zvol.h> 43 #include <sys/dmu_tx.h> 44 #include <sys/zap.h> 45 #include <sys/zil.h> 46 #include <sys/dmu_impl.h> 47 #include <sys/zfs_ioctl.h> 48 #include <sys/sa.h> 49 #include <sys/zfs_onexit.h> 50 #include <sys/dsl_destroy.h> 51 52 /* 53 * Needed to close a window in dnode_move() that allows the objset to be freed 54 * before it can be safely accessed. 55 */ 56 krwlock_t os_lock; 57 58 void 59 dmu_objset_init(void) 60 { 61 rw_init(&os_lock, NULL, RW_DEFAULT, NULL); 62 } 63 64 void 65 dmu_objset_fini(void) 66 { 67 rw_destroy(&os_lock); 68 } 69 70 spa_t * 71 dmu_objset_spa(objset_t *os) 72 { 73 return (os->os_spa); 74 } 75 76 zilog_t * 77 dmu_objset_zil(objset_t *os) 78 { 79 return (os->os_zil); 80 } 81 82 dsl_pool_t * 83 dmu_objset_pool(objset_t *os) 84 { 85 dsl_dataset_t *ds; 86 87 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) 88 return (ds->ds_dir->dd_pool); 89 else 90 return (spa_get_dsl(os->os_spa)); 91 } 92 93 dsl_dataset_t * 94 dmu_objset_ds(objset_t *os) 95 { 96 return (os->os_dsl_dataset); 97 } 98 99 dmu_objset_type_t 100 dmu_objset_type(objset_t *os) 101 { 102 return (os->os_phys->os_type); 103 } 104 105 void 106 dmu_objset_name(objset_t *os, char *buf) 107 { 108 dsl_dataset_name(os->os_dsl_dataset, buf); 109 } 110 111 uint64_t 112 dmu_objset_id(objset_t *os) 113 { 114 dsl_dataset_t *ds = os->os_dsl_dataset; 115 116 return (ds ? ds->ds_object : 0); 117 } 118 119 zfs_sync_type_t 120 dmu_objset_syncprop(objset_t *os) 121 { 122 return (os->os_sync); 123 } 124 125 zfs_logbias_op_t 126 dmu_objset_logbias(objset_t *os) 127 { 128 return (os->os_logbias); 129 } 130 131 static void 132 checksum_changed_cb(void *arg, uint64_t newval) 133 { 134 objset_t *os = arg; 135 136 /* 137 * Inheritance should have been done by now. 138 */ 139 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 140 141 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); 142 } 143 144 static void 145 compression_changed_cb(void *arg, uint64_t newval) 146 { 147 objset_t *os = arg; 148 149 /* 150 * Inheritance and range checking should have been done by now. 151 */ 152 ASSERT(newval != ZIO_COMPRESS_INHERIT); 153 154 os->os_compress = zio_compress_select(os->os_spa, newval, 155 ZIO_COMPRESS_ON); 156 } 157 158 static void 159 copies_changed_cb(void *arg, uint64_t newval) 160 { 161 objset_t *os = arg; 162 163 /* 164 * Inheritance and range checking should have been done by now. 165 */ 166 ASSERT(newval > 0); 167 ASSERT(newval <= spa_max_replication(os->os_spa)); 168 169 os->os_copies = newval; 170 } 171 172 static void 173 dedup_changed_cb(void *arg, uint64_t newval) 174 { 175 objset_t *os = arg; 176 spa_t *spa = os->os_spa; 177 enum zio_checksum checksum; 178 179 /* 180 * Inheritance should have been done by now. 181 */ 182 ASSERT(newval != ZIO_CHECKSUM_INHERIT); 183 184 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); 185 186 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; 187 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); 188 } 189 190 static void 191 primary_cache_changed_cb(void *arg, uint64_t newval) 192 { 193 objset_t *os = arg; 194 195 /* 196 * Inheritance and range checking should have been done by now. 197 */ 198 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 199 newval == ZFS_CACHE_METADATA); 200 201 os->os_primary_cache = newval; 202 } 203 204 static void 205 secondary_cache_changed_cb(void *arg, uint64_t newval) 206 { 207 objset_t *os = arg; 208 209 /* 210 * Inheritance and range checking should have been done by now. 211 */ 212 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || 213 newval == ZFS_CACHE_METADATA); 214 215 os->os_secondary_cache = newval; 216 } 217 218 static void 219 sync_changed_cb(void *arg, uint64_t newval) 220 { 221 objset_t *os = arg; 222 223 /* 224 * Inheritance and range checking should have been done by now. 225 */ 226 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || 227 newval == ZFS_SYNC_DISABLED); 228 229 os->os_sync = newval; 230 if (os->os_zil) 231 zil_set_sync(os->os_zil, newval); 232 } 233 234 static void 235 redundant_metadata_changed_cb(void *arg, uint64_t newval) 236 { 237 objset_t *os = arg; 238 239 /* 240 * Inheritance and range checking should have been done by now. 241 */ 242 ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL || 243 newval == ZFS_REDUNDANT_METADATA_MOST); 244 245 os->os_redundant_metadata = newval; 246 } 247 248 static void 249 logbias_changed_cb(void *arg, uint64_t newval) 250 { 251 objset_t *os = arg; 252 253 ASSERT(newval == ZFS_LOGBIAS_LATENCY || 254 newval == ZFS_LOGBIAS_THROUGHPUT); 255 os->os_logbias = newval; 256 if (os->os_zil) 257 zil_set_logbias(os->os_zil, newval); 258 } 259 260 static void 261 recordsize_changed_cb(void *arg, uint64_t newval) 262 { 263 objset_t *os = arg; 264 265 os->os_recordsize = newval; 266 } 267 268 void 269 dmu_objset_byteswap(void *buf, size_t size) 270 { 271 objset_phys_t *osp = buf; 272 273 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); 274 dnode_byteswap(&osp->os_meta_dnode); 275 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); 276 osp->os_type = BSWAP_64(osp->os_type); 277 osp->os_flags = BSWAP_64(osp->os_flags); 278 if (size == sizeof (objset_phys_t)) { 279 dnode_byteswap(&osp->os_userused_dnode); 280 dnode_byteswap(&osp->os_groupused_dnode); 281 } 282 } 283 284 int 285 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 286 objset_t **osp) 287 { 288 objset_t *os; 289 int i, err; 290 291 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); 292 293 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); 294 os->os_dsl_dataset = ds; 295 os->os_spa = spa; 296 os->os_rootbp = bp; 297 if (!BP_IS_HOLE(os->os_rootbp)) { 298 arc_flags_t aflags = ARC_FLAG_WAIT; 299 zbookmark_phys_t zb; 300 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 301 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 302 303 if (DMU_OS_IS_L2CACHEABLE(os)) 304 aflags |= ARC_FLAG_L2CACHE; 305 if (DMU_OS_IS_L2COMPRESSIBLE(os)) 306 aflags |= ARC_FLAG_L2COMPRESS; 307 308 dprintf_bp(os->os_rootbp, "reading %s", ""); 309 err = arc_read(NULL, spa, os->os_rootbp, 310 arc_getbuf_func, &os->os_phys_buf, 311 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); 312 if (err != 0) { 313 kmem_free(os, sizeof (objset_t)); 314 /* convert checksum errors into IO errors */ 315 if (err == ECKSUM) 316 err = SET_ERROR(EIO); 317 return (err); 318 } 319 320 /* Increase the blocksize if we are permitted. */ 321 if (spa_version(spa) >= SPA_VERSION_USERSPACE && 322 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { 323 arc_buf_t *buf = arc_buf_alloc(spa, 324 sizeof (objset_phys_t), &os->os_phys_buf, 325 ARC_BUFC_METADATA); 326 bzero(buf->b_data, sizeof (objset_phys_t)); 327 bcopy(os->os_phys_buf->b_data, buf->b_data, 328 arc_buf_size(os->os_phys_buf)); 329 (void) arc_buf_remove_ref(os->os_phys_buf, 330 &os->os_phys_buf); 331 os->os_phys_buf = buf; 332 } 333 334 os->os_phys = os->os_phys_buf->b_data; 335 os->os_flags = os->os_phys->os_flags; 336 } else { 337 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? 338 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; 339 os->os_phys_buf = arc_buf_alloc(spa, size, 340 &os->os_phys_buf, ARC_BUFC_METADATA); 341 os->os_phys = os->os_phys_buf->b_data; 342 bzero(os->os_phys, size); 343 } 344 345 /* 346 * Note: the changed_cb will be called once before the register 347 * func returns, thus changing the checksum/compression from the 348 * default (fletcher2/off). Snapshots don't need to know about 349 * checksum/compression/copies. 350 */ 351 if (ds != NULL) { 352 err = dsl_prop_register(ds, 353 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 354 primary_cache_changed_cb, os); 355 if (err == 0) { 356 err = dsl_prop_register(ds, 357 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 358 secondary_cache_changed_cb, os); 359 } 360 if (!ds->ds_is_snapshot) { 361 if (err == 0) { 362 err = dsl_prop_register(ds, 363 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 364 checksum_changed_cb, os); 365 } 366 if (err == 0) { 367 err = dsl_prop_register(ds, 368 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 369 compression_changed_cb, os); 370 } 371 if (err == 0) { 372 err = dsl_prop_register(ds, 373 zfs_prop_to_name(ZFS_PROP_COPIES), 374 copies_changed_cb, os); 375 } 376 if (err == 0) { 377 err = dsl_prop_register(ds, 378 zfs_prop_to_name(ZFS_PROP_DEDUP), 379 dedup_changed_cb, os); 380 } 381 if (err == 0) { 382 err = dsl_prop_register(ds, 383 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 384 logbias_changed_cb, os); 385 } 386 if (err == 0) { 387 err = dsl_prop_register(ds, 388 zfs_prop_to_name(ZFS_PROP_SYNC), 389 sync_changed_cb, os); 390 } 391 if (err == 0) { 392 err = dsl_prop_register(ds, 393 zfs_prop_to_name( 394 ZFS_PROP_REDUNDANT_METADATA), 395 redundant_metadata_changed_cb, os); 396 } 397 if (err == 0) { 398 err = dsl_prop_register(ds, 399 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), 400 recordsize_changed_cb, os); 401 } 402 } 403 if (err != 0) { 404 VERIFY(arc_buf_remove_ref(os->os_phys_buf, 405 &os->os_phys_buf)); 406 kmem_free(os, sizeof (objset_t)); 407 return (err); 408 } 409 } else { 410 /* It's the meta-objset. */ 411 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; 412 os->os_compress = ZIO_COMPRESS_ON; 413 os->os_copies = spa_max_replication(spa); 414 os->os_dedup_checksum = ZIO_CHECKSUM_OFF; 415 os->os_dedup_verify = B_FALSE; 416 os->os_logbias = ZFS_LOGBIAS_LATENCY; 417 os->os_sync = ZFS_SYNC_STANDARD; 418 os->os_primary_cache = ZFS_CACHE_ALL; 419 os->os_secondary_cache = ZFS_CACHE_ALL; 420 } 421 422 if (ds == NULL || !ds->ds_is_snapshot) 423 os->os_zil_header = os->os_phys->os_zil_header; 424 os->os_zil = zil_alloc(os, &os->os_zil_header); 425 426 for (i = 0; i < TXG_SIZE; i++) { 427 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), 428 offsetof(dnode_t, dn_dirty_link[i])); 429 list_create(&os->os_free_dnodes[i], sizeof (dnode_t), 430 offsetof(dnode_t, dn_dirty_link[i])); 431 } 432 list_create(&os->os_dnodes, sizeof (dnode_t), 433 offsetof(dnode_t, dn_link)); 434 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), 435 offsetof(dmu_buf_impl_t, db_link)); 436 437 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); 438 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); 439 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); 440 441 dnode_special_open(os, &os->os_phys->os_meta_dnode, 442 DMU_META_DNODE_OBJECT, &os->os_meta_dnode); 443 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { 444 dnode_special_open(os, &os->os_phys->os_userused_dnode, 445 DMU_USERUSED_OBJECT, &os->os_userused_dnode); 446 dnode_special_open(os, &os->os_phys->os_groupused_dnode, 447 DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); 448 } 449 450 *osp = os; 451 return (0); 452 } 453 454 int 455 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) 456 { 457 int err = 0; 458 459 mutex_enter(&ds->ds_opening_lock); 460 if (ds->ds_objset == NULL) { 461 objset_t *os; 462 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), 463 ds, dsl_dataset_get_blkptr(ds), &os); 464 465 if (err == 0) { 466 mutex_enter(&ds->ds_lock); 467 ASSERT(ds->ds_objset == NULL); 468 ds->ds_objset = os; 469 mutex_exit(&ds->ds_lock); 470 } 471 } 472 *osp = ds->ds_objset; 473 mutex_exit(&ds->ds_opening_lock); 474 return (err); 475 } 476 477 /* 478 * Holds the pool while the objset is held. Therefore only one objset 479 * can be held at a time. 480 */ 481 int 482 dmu_objset_hold(const char *name, void *tag, objset_t **osp) 483 { 484 dsl_pool_t *dp; 485 dsl_dataset_t *ds; 486 int err; 487 488 err = dsl_pool_hold(name, tag, &dp); 489 if (err != 0) 490 return (err); 491 err = dsl_dataset_hold(dp, name, tag, &ds); 492 if (err != 0) { 493 dsl_pool_rele(dp, tag); 494 return (err); 495 } 496 497 err = dmu_objset_from_ds(ds, osp); 498 if (err != 0) { 499 dsl_dataset_rele(ds, tag); 500 dsl_pool_rele(dp, tag); 501 } 502 503 return (err); 504 } 505 506 /* 507 * dsl_pool must not be held when this is called. 508 * Upon successful return, there will be a longhold on the dataset, 509 * and the dsl_pool will not be held. 510 */ 511 int 512 dmu_objset_own(const char *name, dmu_objset_type_t type, 513 boolean_t readonly, void *tag, objset_t **osp) 514 { 515 dsl_pool_t *dp; 516 dsl_dataset_t *ds; 517 int err; 518 519 err = dsl_pool_hold(name, FTAG, &dp); 520 if (err != 0) 521 return (err); 522 err = dsl_dataset_own(dp, name, tag, &ds); 523 if (err != 0) { 524 dsl_pool_rele(dp, FTAG); 525 return (err); 526 } 527 528 err = dmu_objset_from_ds(ds, osp); 529 dsl_pool_rele(dp, FTAG); 530 if (err != 0) { 531 dsl_dataset_disown(ds, tag); 532 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { 533 dsl_dataset_disown(ds, tag); 534 return (SET_ERROR(EINVAL)); 535 } else if (!readonly && ds->ds_is_snapshot) { 536 dsl_dataset_disown(ds, tag); 537 return (SET_ERROR(EROFS)); 538 } 539 return (err); 540 } 541 542 void 543 dmu_objset_rele(objset_t *os, void *tag) 544 { 545 dsl_pool_t *dp = dmu_objset_pool(os); 546 dsl_dataset_rele(os->os_dsl_dataset, tag); 547 dsl_pool_rele(dp, tag); 548 } 549 550 /* 551 * When we are called, os MUST refer to an objset associated with a dataset 552 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner 553 * == tag. We will then release and reacquire ownership of the dataset while 554 * holding the pool config_rwlock to avoid intervening namespace or ownership 555 * changes may occur. 556 * 557 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to 558 * release the hold on its dataset and acquire a new one on the dataset of the 559 * same name so that it can be partially torn down and reconstructed. 560 */ 561 void 562 dmu_objset_refresh_ownership(objset_t *os, void *tag) 563 { 564 dsl_pool_t *dp; 565 dsl_dataset_t *ds, *newds; 566 char name[MAXNAMELEN]; 567 568 ds = os->os_dsl_dataset; 569 VERIFY3P(ds, !=, NULL); 570 VERIFY3P(ds->ds_owner, ==, tag); 571 VERIFY(dsl_dataset_long_held(ds)); 572 573 dsl_dataset_name(ds, name); 574 dp = dmu_objset_pool(os); 575 dsl_pool_config_enter(dp, FTAG); 576 dmu_objset_disown(os, tag); 577 VERIFY0(dsl_dataset_own(dp, name, tag, &newds)); 578 VERIFY3P(newds, ==, os->os_dsl_dataset); 579 dsl_pool_config_exit(dp, FTAG); 580 } 581 582 void 583 dmu_objset_disown(objset_t *os, void *tag) 584 { 585 dsl_dataset_disown(os->os_dsl_dataset, tag); 586 } 587 588 void 589 dmu_objset_evict_dbufs(objset_t *os) 590 { 591 dnode_t dn_marker; 592 dnode_t *dn; 593 594 mutex_enter(&os->os_lock); 595 dn = list_head(&os->os_dnodes); 596 while (dn != NULL) { 597 /* 598 * Skip dnodes without holds. We have to do this dance 599 * because dnode_add_ref() only works if there is already a 600 * hold. If the dnode has no holds, then it has no dbufs. 601 */ 602 if (dnode_add_ref(dn, FTAG)) { 603 list_insert_after(&os->os_dnodes, dn, &dn_marker); 604 mutex_exit(&os->os_lock); 605 606 dnode_evict_dbufs(dn); 607 dnode_rele(dn, FTAG); 608 609 mutex_enter(&os->os_lock); 610 dn = list_next(&os->os_dnodes, &dn_marker); 611 list_remove(&os->os_dnodes, &dn_marker); 612 } else { 613 dn = list_next(&os->os_dnodes, dn); 614 } 615 } 616 mutex_exit(&os->os_lock); 617 618 if (DMU_USERUSED_DNODE(os) != NULL) { 619 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os)); 620 dnode_evict_dbufs(DMU_USERUSED_DNODE(os)); 621 } 622 dnode_evict_dbufs(DMU_META_DNODE(os)); 623 } 624 625 /* 626 * Objset eviction processing is split into into two pieces. 627 * The first marks the objset as evicting, evicts any dbufs that 628 * have a refcount of zero, and then queues up the objset for the 629 * second phase of eviction. Once os->os_dnodes has been cleared by 630 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed. 631 * The second phase closes the special dnodes, dequeues the objset from 632 * the list of those undergoing eviction, and finally frees the objset. 633 * 634 * NOTE: Due to asynchronous eviction processing (invocation of 635 * dnode_buf_pageout()), it is possible for the meta dnode for the 636 * objset to have no holds even though os->os_dnodes is not empty. 637 */ 638 void 639 dmu_objset_evict(objset_t *os) 640 { 641 dsl_dataset_t *ds = os->os_dsl_dataset; 642 643 for (int t = 0; t < TXG_SIZE; t++) 644 ASSERT(!dmu_objset_is_dirty(os, t)); 645 646 if (ds) { 647 if (!ds->ds_is_snapshot) { 648 VERIFY0(dsl_prop_unregister(ds, 649 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 650 checksum_changed_cb, os)); 651 VERIFY0(dsl_prop_unregister(ds, 652 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 653 compression_changed_cb, os)); 654 VERIFY0(dsl_prop_unregister(ds, 655 zfs_prop_to_name(ZFS_PROP_COPIES), 656 copies_changed_cb, os)); 657 VERIFY0(dsl_prop_unregister(ds, 658 zfs_prop_to_name(ZFS_PROP_DEDUP), 659 dedup_changed_cb, os)); 660 VERIFY0(dsl_prop_unregister(ds, 661 zfs_prop_to_name(ZFS_PROP_LOGBIAS), 662 logbias_changed_cb, os)); 663 VERIFY0(dsl_prop_unregister(ds, 664 zfs_prop_to_name(ZFS_PROP_SYNC), 665 sync_changed_cb, os)); 666 VERIFY0(dsl_prop_unregister(ds, 667 zfs_prop_to_name(ZFS_PROP_REDUNDANT_METADATA), 668 redundant_metadata_changed_cb, os)); 669 VERIFY0(dsl_prop_unregister(ds, 670 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), 671 recordsize_changed_cb, os)); 672 } 673 VERIFY0(dsl_prop_unregister(ds, 674 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), 675 primary_cache_changed_cb, os)); 676 VERIFY0(dsl_prop_unregister(ds, 677 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), 678 secondary_cache_changed_cb, os)); 679 } 680 681 if (os->os_sa) 682 sa_tear_down(os); 683 684 os->os_evicting = B_TRUE; 685 dmu_objset_evict_dbufs(os); 686 687 mutex_enter(&os->os_lock); 688 spa_evicting_os_register(os->os_spa, os); 689 if (list_is_empty(&os->os_dnodes)) { 690 mutex_exit(&os->os_lock); 691 dmu_objset_evict_done(os); 692 } else { 693 mutex_exit(&os->os_lock); 694 } 695 } 696 697 void 698 dmu_objset_evict_done(objset_t *os) 699 { 700 ASSERT3P(list_head(&os->os_dnodes), ==, NULL); 701 702 dnode_special_close(&os->os_meta_dnode); 703 if (DMU_USERUSED_DNODE(os)) { 704 dnode_special_close(&os->os_userused_dnode); 705 dnode_special_close(&os->os_groupused_dnode); 706 } 707 zil_free(os->os_zil); 708 709 VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf)); 710 711 /* 712 * This is a barrier to prevent the objset from going away in 713 * dnode_move() until we can safely ensure that the objset is still in 714 * use. We consider the objset valid before the barrier and invalid 715 * after the barrier. 716 */ 717 rw_enter(&os_lock, RW_READER); 718 rw_exit(&os_lock); 719 720 mutex_destroy(&os->os_lock); 721 mutex_destroy(&os->os_obj_lock); 722 mutex_destroy(&os->os_user_ptr_lock); 723 spa_evicting_os_deregister(os->os_spa, os); 724 kmem_free(os, sizeof (objset_t)); 725 } 726 727 timestruc_t 728 dmu_objset_snap_cmtime(objset_t *os) 729 { 730 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); 731 } 732 733 /* called from dsl for meta-objset */ 734 objset_t * 735 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, 736 dmu_objset_type_t type, dmu_tx_t *tx) 737 { 738 objset_t *os; 739 dnode_t *mdn; 740 741 ASSERT(dmu_tx_is_syncing(tx)); 742 743 if (ds != NULL) 744 VERIFY0(dmu_objset_from_ds(ds, &os)); 745 else 746 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); 747 748 mdn = DMU_META_DNODE(os); 749 750 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT, 751 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx); 752 753 /* 754 * We don't want to have to increase the meta-dnode's nlevels 755 * later, because then we could do it in quescing context while 756 * we are also accessing it in open context. 757 * 758 * This precaution is not necessary for the MOS (ds == NULL), 759 * because the MOS is only updated in syncing context. 760 * This is most fortunate: the MOS is the only objset that 761 * needs to be synced multiple times as spa_sync() iterates 762 * to convergence, so minimizing its dn_nlevels matters. 763 */ 764 if (ds != NULL) { 765 int levels = 1; 766 767 /* 768 * Determine the number of levels necessary for the meta-dnode 769 * to contain DN_MAX_OBJECT dnodes. 770 */ 771 while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + 772 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < 773 DN_MAX_OBJECT * sizeof (dnode_phys_t)) 774 levels++; 775 776 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = 777 mdn->dn_nlevels = levels; 778 } 779 780 ASSERT(type != DMU_OST_NONE); 781 ASSERT(type != DMU_OST_ANY); 782 ASSERT(type < DMU_OST_NUMTYPES); 783 os->os_phys->os_type = type; 784 if (dmu_objset_userused_enabled(os)) { 785 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 786 os->os_flags = os->os_phys->os_flags; 787 } 788 789 dsl_dataset_dirty(ds, tx); 790 791 return (os); 792 } 793 794 typedef struct dmu_objset_create_arg { 795 const char *doca_name; 796 cred_t *doca_cred; 797 void (*doca_userfunc)(objset_t *os, void *arg, 798 cred_t *cr, dmu_tx_t *tx); 799 void *doca_userarg; 800 dmu_objset_type_t doca_type; 801 uint64_t doca_flags; 802 } dmu_objset_create_arg_t; 803 804 /*ARGSUSED*/ 805 static int 806 dmu_objset_create_check(void *arg, dmu_tx_t *tx) 807 { 808 dmu_objset_create_arg_t *doca = arg; 809 dsl_pool_t *dp = dmu_tx_pool(tx); 810 dsl_dir_t *pdd; 811 const char *tail; 812 int error; 813 814 if (strchr(doca->doca_name, '@') != NULL) 815 return (SET_ERROR(EINVAL)); 816 817 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); 818 if (error != 0) 819 return (error); 820 if (tail == NULL) { 821 dsl_dir_rele(pdd, FTAG); 822 return (SET_ERROR(EEXIST)); 823 } 824 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 825 doca->doca_cred); 826 dsl_dir_rele(pdd, FTAG); 827 828 return (error); 829 } 830 831 static void 832 dmu_objset_create_sync(void *arg, dmu_tx_t *tx) 833 { 834 dmu_objset_create_arg_t *doca = arg; 835 dsl_pool_t *dp = dmu_tx_pool(tx); 836 dsl_dir_t *pdd; 837 const char *tail; 838 dsl_dataset_t *ds; 839 uint64_t obj; 840 blkptr_t *bp; 841 objset_t *os; 842 843 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); 844 845 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, 846 doca->doca_cred, tx); 847 848 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 849 bp = dsl_dataset_get_blkptr(ds); 850 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, 851 ds, bp, doca->doca_type, tx); 852 853 if (doca->doca_userfunc != NULL) { 854 doca->doca_userfunc(os, doca->doca_userarg, 855 doca->doca_cred, tx); 856 } 857 858 spa_history_log_internal_ds(ds, "create", tx, ""); 859 dsl_dataset_rele(ds, FTAG); 860 dsl_dir_rele(pdd, FTAG); 861 } 862 863 int 864 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 865 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) 866 { 867 dmu_objset_create_arg_t doca; 868 869 doca.doca_name = name; 870 doca.doca_cred = CRED(); 871 doca.doca_flags = flags; 872 doca.doca_userfunc = func; 873 doca.doca_userarg = arg; 874 doca.doca_type = type; 875 876 return (dsl_sync_task(name, 877 dmu_objset_create_check, dmu_objset_create_sync, &doca, 878 5, ZFS_SPACE_CHECK_NORMAL)); 879 } 880 881 typedef struct dmu_objset_clone_arg { 882 const char *doca_clone; 883 const char *doca_origin; 884 cred_t *doca_cred; 885 } dmu_objset_clone_arg_t; 886 887 /*ARGSUSED*/ 888 static int 889 dmu_objset_clone_check(void *arg, dmu_tx_t *tx) 890 { 891 dmu_objset_clone_arg_t *doca = arg; 892 dsl_dir_t *pdd; 893 const char *tail; 894 int error; 895 dsl_dataset_t *origin; 896 dsl_pool_t *dp = dmu_tx_pool(tx); 897 898 if (strchr(doca->doca_clone, '@') != NULL) 899 return (SET_ERROR(EINVAL)); 900 901 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); 902 if (error != 0) 903 return (error); 904 if (tail == NULL) { 905 dsl_dir_rele(pdd, FTAG); 906 return (SET_ERROR(EEXIST)); 907 } 908 /* You can't clone across pools. */ 909 if (pdd->dd_pool != dp) { 910 dsl_dir_rele(pdd, FTAG); 911 return (SET_ERROR(EXDEV)); 912 } 913 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, 914 doca->doca_cred); 915 if (error != 0) { 916 dsl_dir_rele(pdd, FTAG); 917 return (SET_ERROR(EDQUOT)); 918 } 919 dsl_dir_rele(pdd, FTAG); 920 921 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); 922 if (error != 0) 923 return (error); 924 925 /* You can't clone across pools. */ 926 if (origin->ds_dir->dd_pool != dp) { 927 dsl_dataset_rele(origin, FTAG); 928 return (SET_ERROR(EXDEV)); 929 } 930 931 /* You can only clone snapshots, not the head datasets. */ 932 if (!origin->ds_is_snapshot) { 933 dsl_dataset_rele(origin, FTAG); 934 return (SET_ERROR(EINVAL)); 935 } 936 dsl_dataset_rele(origin, FTAG); 937 938 return (0); 939 } 940 941 static void 942 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) 943 { 944 dmu_objset_clone_arg_t *doca = arg; 945 dsl_pool_t *dp = dmu_tx_pool(tx); 946 dsl_dir_t *pdd; 947 const char *tail; 948 dsl_dataset_t *origin, *ds; 949 uint64_t obj; 950 char namebuf[MAXNAMELEN]; 951 952 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); 953 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); 954 955 obj = dsl_dataset_create_sync(pdd, tail, origin, 0, 956 doca->doca_cred, tx); 957 958 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); 959 dsl_dataset_name(origin, namebuf); 960 spa_history_log_internal_ds(ds, "clone", tx, 961 "origin=%s (%llu)", namebuf, origin->ds_object); 962 dsl_dataset_rele(ds, FTAG); 963 dsl_dataset_rele(origin, FTAG); 964 dsl_dir_rele(pdd, FTAG); 965 } 966 967 int 968 dmu_objset_clone(const char *clone, const char *origin) 969 { 970 dmu_objset_clone_arg_t doca; 971 972 doca.doca_clone = clone; 973 doca.doca_origin = origin; 974 doca.doca_cred = CRED(); 975 976 return (dsl_sync_task(clone, 977 dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 978 5, ZFS_SPACE_CHECK_NORMAL)); 979 } 980 981 int 982 dmu_objset_snapshot_one(const char *fsname, const char *snapname) 983 { 984 int err; 985 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); 986 nvlist_t *snaps = fnvlist_alloc(); 987 988 fnvlist_add_boolean(snaps, longsnap); 989 strfree(longsnap); 990 err = dsl_dataset_snapshot(snaps, NULL, NULL); 991 fnvlist_free(snaps); 992 return (err); 993 } 994 995 static void 996 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) 997 { 998 dnode_t *dn; 999 1000 while (dn = list_head(list)) { 1001 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 1002 ASSERT(dn->dn_dbuf->db_data_pending); 1003 /* 1004 * Initialize dn_zio outside dnode_sync() because the 1005 * meta-dnode needs to set it ouside dnode_sync(). 1006 */ 1007 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; 1008 ASSERT(dn->dn_zio); 1009 1010 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); 1011 list_remove(list, dn); 1012 1013 if (newlist) { 1014 (void) dnode_add_ref(dn, newlist); 1015 list_insert_tail(newlist, dn); 1016 } 1017 1018 dnode_sync(dn, tx); 1019 } 1020 } 1021 1022 /* ARGSUSED */ 1023 static void 1024 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) 1025 { 1026 blkptr_t *bp = zio->io_bp; 1027 objset_t *os = arg; 1028 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; 1029 1030 ASSERT(!BP_IS_EMBEDDED(bp)); 1031 ASSERT3P(bp, ==, os->os_rootbp); 1032 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); 1033 ASSERT0(BP_GET_LEVEL(bp)); 1034 1035 /* 1036 * Update rootbp fill count: it should be the number of objects 1037 * allocated in the object set (not counting the "special" 1038 * objects that are stored in the objset_phys_t -- the meta 1039 * dnode and user/group accounting objects). 1040 */ 1041 bp->blk_fill = 0; 1042 for (int i = 0; i < dnp->dn_nblkptr; i++) 1043 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]); 1044 } 1045 1046 /* ARGSUSED */ 1047 static void 1048 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) 1049 { 1050 blkptr_t *bp = zio->io_bp; 1051 blkptr_t *bp_orig = &zio->io_bp_orig; 1052 objset_t *os = arg; 1053 1054 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 1055 ASSERT(BP_EQUAL(bp, bp_orig)); 1056 } else { 1057 dsl_dataset_t *ds = os->os_dsl_dataset; 1058 dmu_tx_t *tx = os->os_synctx; 1059 1060 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 1061 dsl_dataset_block_born(ds, bp, tx); 1062 } 1063 } 1064 1065 /* called from dsl */ 1066 void 1067 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) 1068 { 1069 int txgoff; 1070 zbookmark_phys_t zb; 1071 zio_prop_t zp; 1072 zio_t *zio; 1073 list_t *list; 1074 list_t *newlist = NULL; 1075 dbuf_dirty_record_t *dr; 1076 1077 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); 1078 1079 ASSERT(dmu_tx_is_syncing(tx)); 1080 /* XXX the write_done callback should really give us the tx... */ 1081 os->os_synctx = tx; 1082 1083 if (os->os_dsl_dataset == NULL) { 1084 /* 1085 * This is the MOS. If we have upgraded, 1086 * spa_max_replication() could change, so reset 1087 * os_copies here. 1088 */ 1089 os->os_copies = spa_max_replication(os->os_spa); 1090 } 1091 1092 /* 1093 * Create the root block IO 1094 */ 1095 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 1096 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1097 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 1098 arc_release(os->os_phys_buf, &os->os_phys_buf); 1099 1100 dmu_write_policy(os, NULL, 0, 0, &zp); 1101 1102 zio = arc_write(pio, os->os_spa, tx->tx_txg, 1103 os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), 1104 DMU_OS_IS_L2COMPRESSIBLE(os), &zp, dmu_objset_write_ready, 1105 NULL, dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE, 1106 ZIO_FLAG_MUSTSUCCEED, &zb); 1107 1108 /* 1109 * Sync special dnodes - the parent IO for the sync is the root block 1110 */ 1111 DMU_META_DNODE(os)->dn_zio = zio; 1112 dnode_sync(DMU_META_DNODE(os), tx); 1113 1114 os->os_phys->os_flags = os->os_flags; 1115 1116 if (DMU_USERUSED_DNODE(os) && 1117 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { 1118 DMU_USERUSED_DNODE(os)->dn_zio = zio; 1119 dnode_sync(DMU_USERUSED_DNODE(os), tx); 1120 DMU_GROUPUSED_DNODE(os)->dn_zio = zio; 1121 dnode_sync(DMU_GROUPUSED_DNODE(os), tx); 1122 } 1123 1124 txgoff = tx->tx_txg & TXG_MASK; 1125 1126 if (dmu_objset_userused_enabled(os)) { 1127 newlist = &os->os_synced_dnodes; 1128 /* 1129 * We must create the list here because it uses the 1130 * dn_dirty_link[] of this txg. 1131 */ 1132 list_create(newlist, sizeof (dnode_t), 1133 offsetof(dnode_t, dn_dirty_link[txgoff])); 1134 } 1135 1136 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); 1137 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); 1138 1139 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; 1140 while (dr = list_head(list)) { 1141 ASSERT0(dr->dr_dbuf->db_level); 1142 list_remove(list, dr); 1143 if (dr->dr_zio) 1144 zio_nowait(dr->dr_zio); 1145 } 1146 /* 1147 * Free intent log blocks up to this tx. 1148 */ 1149 zil_sync(os->os_zil, tx); 1150 os->os_phys->os_zil_header = os->os_zil_header; 1151 zio_nowait(zio); 1152 } 1153 1154 boolean_t 1155 dmu_objset_is_dirty(objset_t *os, uint64_t txg) 1156 { 1157 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) || 1158 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK])); 1159 } 1160 1161 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; 1162 1163 void 1164 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) 1165 { 1166 used_cbs[ost] = cb; 1167 } 1168 1169 boolean_t 1170 dmu_objset_userused_enabled(objset_t *os) 1171 { 1172 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && 1173 used_cbs[os->os_phys->os_type] != NULL && 1174 DMU_USERUSED_DNODE(os) != NULL); 1175 } 1176 1177 static void 1178 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags, 1179 uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx) 1180 { 1181 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) { 1182 int64_t delta = DNODE_SIZE + used; 1183 if (subtract) 1184 delta = -delta; 1185 VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT, 1186 user, delta, tx)); 1187 VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT, 1188 group, delta, tx)); 1189 } 1190 } 1191 1192 void 1193 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) 1194 { 1195 dnode_t *dn; 1196 list_t *list = &os->os_synced_dnodes; 1197 1198 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os)); 1199 1200 while (dn = list_head(list)) { 1201 int flags; 1202 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); 1203 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || 1204 dn->dn_phys->dn_flags & 1205 DNODE_FLAG_USERUSED_ACCOUNTED); 1206 1207 /* Allocate the user/groupused objects if necessary. */ 1208 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { 1209 VERIFY(0 == zap_create_claim(os, 1210 DMU_USERUSED_OBJECT, 1211 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1212 VERIFY(0 == zap_create_claim(os, 1213 DMU_GROUPUSED_OBJECT, 1214 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); 1215 } 1216 1217 /* 1218 * We intentionally modify the zap object even if the 1219 * net delta is zero. Otherwise 1220 * the block of the zap obj could be shared between 1221 * datasets but need to be different between them after 1222 * a bprewrite. 1223 */ 1224 1225 flags = dn->dn_id_flags; 1226 ASSERT(flags); 1227 if (flags & DN_ID_OLD_EXIST) { 1228 do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags, 1229 dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx); 1230 } 1231 if (flags & DN_ID_NEW_EXIST) { 1232 do_userquota_update(os, DN_USED_BYTES(dn->dn_phys), 1233 dn->dn_phys->dn_flags, dn->dn_newuid, 1234 dn->dn_newgid, B_FALSE, tx); 1235 } 1236 1237 mutex_enter(&dn->dn_mtx); 1238 dn->dn_oldused = 0; 1239 dn->dn_oldflags = 0; 1240 if (dn->dn_id_flags & DN_ID_NEW_EXIST) { 1241 dn->dn_olduid = dn->dn_newuid; 1242 dn->dn_oldgid = dn->dn_newgid; 1243 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1244 if (dn->dn_bonuslen == 0) 1245 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1246 else 1247 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1248 } 1249 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); 1250 mutex_exit(&dn->dn_mtx); 1251 1252 list_remove(list, dn); 1253 dnode_rele(dn, list); 1254 } 1255 } 1256 1257 /* 1258 * Returns a pointer to data to find uid/gid from 1259 * 1260 * If a dirty record for transaction group that is syncing can't 1261 * be found then NULL is returned. In the NULL case it is assumed 1262 * the uid/gid aren't changing. 1263 */ 1264 static void * 1265 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) 1266 { 1267 dbuf_dirty_record_t *dr, **drp; 1268 void *data; 1269 1270 if (db->db_dirtycnt == 0) 1271 return (db->db.db_data); /* Nothing is changing */ 1272 1273 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1274 if (dr->dr_txg == tx->tx_txg) 1275 break; 1276 1277 if (dr == NULL) { 1278 data = NULL; 1279 } else { 1280 dnode_t *dn; 1281 1282 DB_DNODE_ENTER(dr->dr_dbuf); 1283 dn = DB_DNODE(dr->dr_dbuf); 1284 1285 if (dn->dn_bonuslen == 0 && 1286 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) 1287 data = dr->dt.dl.dr_data->b_data; 1288 else 1289 data = dr->dt.dl.dr_data; 1290 1291 DB_DNODE_EXIT(dr->dr_dbuf); 1292 } 1293 1294 return (data); 1295 } 1296 1297 void 1298 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) 1299 { 1300 objset_t *os = dn->dn_objset; 1301 void *data = NULL; 1302 dmu_buf_impl_t *db = NULL; 1303 uint64_t *user = NULL; 1304 uint64_t *group = NULL; 1305 int flags = dn->dn_id_flags; 1306 int error; 1307 boolean_t have_spill = B_FALSE; 1308 1309 if (!dmu_objset_userused_enabled(dn->dn_objset)) 1310 return; 1311 1312 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| 1313 DN_ID_CHKED_SPILL))) 1314 return; 1315 1316 if (before && dn->dn_bonuslen != 0) 1317 data = DN_BONUS(dn->dn_phys); 1318 else if (!before && dn->dn_bonuslen != 0) { 1319 if (dn->dn_bonus) { 1320 db = dn->dn_bonus; 1321 mutex_enter(&db->db_mtx); 1322 data = dmu_objset_userquota_find_data(db, tx); 1323 } else { 1324 data = DN_BONUS(dn->dn_phys); 1325 } 1326 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { 1327 int rf = 0; 1328 1329 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) 1330 rf |= DB_RF_HAVESTRUCT; 1331 error = dmu_spill_hold_by_dnode(dn, 1332 rf | DB_RF_MUST_SUCCEED, 1333 FTAG, (dmu_buf_t **)&db); 1334 ASSERT(error == 0); 1335 mutex_enter(&db->db_mtx); 1336 data = (before) ? db->db.db_data : 1337 dmu_objset_userquota_find_data(db, tx); 1338 have_spill = B_TRUE; 1339 } else { 1340 mutex_enter(&dn->dn_mtx); 1341 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1342 mutex_exit(&dn->dn_mtx); 1343 return; 1344 } 1345 1346 if (before) { 1347 ASSERT(data); 1348 user = &dn->dn_olduid; 1349 group = &dn->dn_oldgid; 1350 } else if (data) { 1351 user = &dn->dn_newuid; 1352 group = &dn->dn_newgid; 1353 } 1354 1355 /* 1356 * Must always call the callback in case the object 1357 * type has changed and that type isn't an object type to track 1358 */ 1359 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, 1360 user, group); 1361 1362 /* 1363 * Preserve existing uid/gid when the callback can't determine 1364 * what the new uid/gid are and the callback returned EEXIST. 1365 * The EEXIST error tells us to just use the existing uid/gid. 1366 * If we don't know what the old values are then just assign 1367 * them to 0, since that is a new file being created. 1368 */ 1369 if (!before && data == NULL && error == EEXIST) { 1370 if (flags & DN_ID_OLD_EXIST) { 1371 dn->dn_newuid = dn->dn_olduid; 1372 dn->dn_newgid = dn->dn_oldgid; 1373 } else { 1374 dn->dn_newuid = 0; 1375 dn->dn_newgid = 0; 1376 } 1377 error = 0; 1378 } 1379 1380 if (db) 1381 mutex_exit(&db->db_mtx); 1382 1383 mutex_enter(&dn->dn_mtx); 1384 if (error == 0 && before) 1385 dn->dn_id_flags |= DN_ID_OLD_EXIST; 1386 if (error == 0 && !before) 1387 dn->dn_id_flags |= DN_ID_NEW_EXIST; 1388 1389 if (have_spill) { 1390 dn->dn_id_flags |= DN_ID_CHKED_SPILL; 1391 } else { 1392 dn->dn_id_flags |= DN_ID_CHKED_BONUS; 1393 } 1394 mutex_exit(&dn->dn_mtx); 1395 if (have_spill) 1396 dmu_buf_rele((dmu_buf_t *)db, FTAG); 1397 } 1398 1399 boolean_t 1400 dmu_objset_userspace_present(objset_t *os) 1401 { 1402 return (os->os_phys->os_flags & 1403 OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1404 } 1405 1406 int 1407 dmu_objset_userspace_upgrade(objset_t *os) 1408 { 1409 uint64_t obj; 1410 int err = 0; 1411 1412 if (dmu_objset_userspace_present(os)) 1413 return (0); 1414 if (!dmu_objset_userused_enabled(os)) 1415 return (SET_ERROR(ENOTSUP)); 1416 if (dmu_objset_is_snapshot(os)) 1417 return (SET_ERROR(EINVAL)); 1418 1419 /* 1420 * We simply need to mark every object dirty, so that it will be 1421 * synced out and now accounted. If this is called 1422 * concurrently, or if we already did some work before crashing, 1423 * that's fine, since we track each object's accounted state 1424 * independently. 1425 */ 1426 1427 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { 1428 dmu_tx_t *tx; 1429 dmu_buf_t *db; 1430 int objerr; 1431 1432 if (issig(JUSTLOOKING) && issig(FORREAL)) 1433 return (SET_ERROR(EINTR)); 1434 1435 objerr = dmu_bonus_hold(os, obj, FTAG, &db); 1436 if (objerr != 0) 1437 continue; 1438 tx = dmu_tx_create(os); 1439 dmu_tx_hold_bonus(tx, obj); 1440 objerr = dmu_tx_assign(tx, TXG_WAIT); 1441 if (objerr != 0) { 1442 dmu_tx_abort(tx); 1443 continue; 1444 } 1445 dmu_buf_will_dirty(db, tx); 1446 dmu_buf_rele(db, FTAG); 1447 dmu_tx_commit(tx); 1448 } 1449 1450 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; 1451 txg_wait_synced(dmu_objset_pool(os), 0); 1452 return (0); 1453 } 1454 1455 void 1456 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 1457 uint64_t *usedobjsp, uint64_t *availobjsp) 1458 { 1459 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, 1460 usedobjsp, availobjsp); 1461 } 1462 1463 uint64_t 1464 dmu_objset_fsid_guid(objset_t *os) 1465 { 1466 return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); 1467 } 1468 1469 void 1470 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) 1471 { 1472 stat->dds_type = os->os_phys->os_type; 1473 if (os->os_dsl_dataset) 1474 dsl_dataset_fast_stat(os->os_dsl_dataset, stat); 1475 } 1476 1477 void 1478 dmu_objset_stats(objset_t *os, nvlist_t *nv) 1479 { 1480 ASSERT(os->os_dsl_dataset || 1481 os->os_phys->os_type == DMU_OST_META); 1482 1483 if (os->os_dsl_dataset != NULL) 1484 dsl_dataset_stats(os->os_dsl_dataset, nv); 1485 1486 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, 1487 os->os_phys->os_type); 1488 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, 1489 dmu_objset_userspace_present(os)); 1490 } 1491 1492 int 1493 dmu_objset_is_snapshot(objset_t *os) 1494 { 1495 if (os->os_dsl_dataset != NULL) 1496 return (os->os_dsl_dataset->ds_is_snapshot); 1497 else 1498 return (B_FALSE); 1499 } 1500 1501 int 1502 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, 1503 boolean_t *conflict) 1504 { 1505 dsl_dataset_t *ds = os->os_dsl_dataset; 1506 uint64_t ignored; 1507 1508 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1509 return (SET_ERROR(ENOENT)); 1510 1511 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, 1512 dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored, 1513 MT_FIRST, real, maxlen, conflict)); 1514 } 1515 1516 int 1517 dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 1518 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) 1519 { 1520 dsl_dataset_t *ds = os->os_dsl_dataset; 1521 zap_cursor_t cursor; 1522 zap_attribute_t attr; 1523 1524 ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); 1525 1526 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) 1527 return (SET_ERROR(ENOENT)); 1528 1529 zap_cursor_init_serialized(&cursor, 1530 ds->ds_dir->dd_pool->dp_meta_objset, 1531 dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp); 1532 1533 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1534 zap_cursor_fini(&cursor); 1535 return (SET_ERROR(ENOENT)); 1536 } 1537 1538 if (strlen(attr.za_name) + 1 > namelen) { 1539 zap_cursor_fini(&cursor); 1540 return (SET_ERROR(ENAMETOOLONG)); 1541 } 1542 1543 (void) strcpy(name, attr.za_name); 1544 if (idp) 1545 *idp = attr.za_first_integer; 1546 if (case_conflict) 1547 *case_conflict = attr.za_normalization_conflict; 1548 zap_cursor_advance(&cursor); 1549 *offp = zap_cursor_serialize(&cursor); 1550 zap_cursor_fini(&cursor); 1551 1552 return (0); 1553 } 1554 1555 int 1556 dmu_dir_list_next(objset_t *os, int namelen, char *name, 1557 uint64_t *idp, uint64_t *offp) 1558 { 1559 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; 1560 zap_cursor_t cursor; 1561 zap_attribute_t attr; 1562 1563 /* there is no next dir on a snapshot! */ 1564 if (os->os_dsl_dataset->ds_object != 1565 dsl_dir_phys(dd)->dd_head_dataset_obj) 1566 return (SET_ERROR(ENOENT)); 1567 1568 zap_cursor_init_serialized(&cursor, 1569 dd->dd_pool->dp_meta_objset, 1570 dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp); 1571 1572 if (zap_cursor_retrieve(&cursor, &attr) != 0) { 1573 zap_cursor_fini(&cursor); 1574 return (SET_ERROR(ENOENT)); 1575 } 1576 1577 if (strlen(attr.za_name) + 1 > namelen) { 1578 zap_cursor_fini(&cursor); 1579 return (SET_ERROR(ENAMETOOLONG)); 1580 } 1581 1582 (void) strcpy(name, attr.za_name); 1583 if (idp) 1584 *idp = attr.za_first_integer; 1585 zap_cursor_advance(&cursor); 1586 *offp = zap_cursor_serialize(&cursor); 1587 zap_cursor_fini(&cursor); 1588 1589 return (0); 1590 } 1591 1592 /* 1593 * Find objsets under and including ddobj, call func(ds) on each. 1594 */ 1595 int 1596 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, 1597 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) 1598 { 1599 dsl_dir_t *dd; 1600 dsl_dataset_t *ds; 1601 zap_cursor_t zc; 1602 zap_attribute_t *attr; 1603 uint64_t thisobj; 1604 int err; 1605 1606 ASSERT(dsl_pool_config_held(dp)); 1607 1608 err = dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd); 1609 if (err != 0) 1610 return (err); 1611 1612 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1613 if (dd->dd_myname[0] == '$') { 1614 dsl_dir_rele(dd, FTAG); 1615 return (0); 1616 } 1617 1618 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 1619 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1620 1621 /* 1622 * Iterate over all children. 1623 */ 1624 if (flags & DS_FIND_CHILDREN) { 1625 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1626 dsl_dir_phys(dd)->dd_child_dir_zapobj); 1627 zap_cursor_retrieve(&zc, attr) == 0; 1628 (void) zap_cursor_advance(&zc)) { 1629 ASSERT3U(attr->za_integer_length, ==, 1630 sizeof (uint64_t)); 1631 ASSERT3U(attr->za_num_integers, ==, 1); 1632 1633 err = dmu_objset_find_dp(dp, attr->za_first_integer, 1634 func, arg, flags); 1635 if (err != 0) 1636 break; 1637 } 1638 zap_cursor_fini(&zc); 1639 1640 if (err != 0) { 1641 dsl_dir_rele(dd, FTAG); 1642 kmem_free(attr, sizeof (zap_attribute_t)); 1643 return (err); 1644 } 1645 } 1646 1647 /* 1648 * Iterate over all snapshots. 1649 */ 1650 if (flags & DS_FIND_SNAPSHOTS) { 1651 dsl_dataset_t *ds; 1652 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1653 1654 if (err == 0) { 1655 uint64_t snapobj; 1656 1657 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 1658 dsl_dataset_rele(ds, FTAG); 1659 1660 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1661 zap_cursor_retrieve(&zc, attr) == 0; 1662 (void) zap_cursor_advance(&zc)) { 1663 ASSERT3U(attr->za_integer_length, ==, 1664 sizeof (uint64_t)); 1665 ASSERT3U(attr->za_num_integers, ==, 1); 1666 1667 err = dsl_dataset_hold_obj(dp, 1668 attr->za_first_integer, FTAG, &ds); 1669 if (err != 0) 1670 break; 1671 err = func(dp, ds, arg); 1672 dsl_dataset_rele(ds, FTAG); 1673 if (err != 0) 1674 break; 1675 } 1676 zap_cursor_fini(&zc); 1677 } 1678 } 1679 1680 dsl_dir_rele(dd, FTAG); 1681 kmem_free(attr, sizeof (zap_attribute_t)); 1682 1683 if (err != 0) 1684 return (err); 1685 1686 /* 1687 * Apply to self. 1688 */ 1689 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1690 if (err != 0) 1691 return (err); 1692 err = func(dp, ds, arg); 1693 dsl_dataset_rele(ds, FTAG); 1694 return (err); 1695 } 1696 1697 /* 1698 * Find all objsets under name, and for each, call 'func(child_name, arg)'. 1699 * The dp_config_rwlock must not be held when this is called, and it 1700 * will not be held when the callback is called. 1701 * Therefore this function should only be used when the pool is not changing 1702 * (e.g. in syncing context), or the callback can deal with the possible races. 1703 */ 1704 static int 1705 dmu_objset_find_impl(spa_t *spa, const char *name, 1706 int func(const char *, void *), void *arg, int flags) 1707 { 1708 dsl_dir_t *dd; 1709 dsl_pool_t *dp = spa_get_dsl(spa); 1710 dsl_dataset_t *ds; 1711 zap_cursor_t zc; 1712 zap_attribute_t *attr; 1713 char *child; 1714 uint64_t thisobj; 1715 int err; 1716 1717 dsl_pool_config_enter(dp, FTAG); 1718 1719 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); 1720 if (err != 0) { 1721 dsl_pool_config_exit(dp, FTAG); 1722 return (err); 1723 } 1724 1725 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ 1726 if (dd->dd_myname[0] == '$') { 1727 dsl_dir_rele(dd, FTAG); 1728 dsl_pool_config_exit(dp, FTAG); 1729 return (0); 1730 } 1731 1732 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; 1733 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 1734 1735 /* 1736 * Iterate over all children. 1737 */ 1738 if (flags & DS_FIND_CHILDREN) { 1739 for (zap_cursor_init(&zc, dp->dp_meta_objset, 1740 dsl_dir_phys(dd)->dd_child_dir_zapobj); 1741 zap_cursor_retrieve(&zc, attr) == 0; 1742 (void) zap_cursor_advance(&zc)) { 1743 ASSERT3U(attr->za_integer_length, ==, 1744 sizeof (uint64_t)); 1745 ASSERT3U(attr->za_num_integers, ==, 1); 1746 1747 child = kmem_asprintf("%s/%s", name, attr->za_name); 1748 dsl_pool_config_exit(dp, FTAG); 1749 err = dmu_objset_find_impl(spa, child, 1750 func, arg, flags); 1751 dsl_pool_config_enter(dp, FTAG); 1752 strfree(child); 1753 if (err != 0) 1754 break; 1755 } 1756 zap_cursor_fini(&zc); 1757 1758 if (err != 0) { 1759 dsl_dir_rele(dd, FTAG); 1760 dsl_pool_config_exit(dp, FTAG); 1761 kmem_free(attr, sizeof (zap_attribute_t)); 1762 return (err); 1763 } 1764 } 1765 1766 /* 1767 * Iterate over all snapshots. 1768 */ 1769 if (flags & DS_FIND_SNAPSHOTS) { 1770 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); 1771 1772 if (err == 0) { 1773 uint64_t snapobj; 1774 1775 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; 1776 dsl_dataset_rele(ds, FTAG); 1777 1778 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); 1779 zap_cursor_retrieve(&zc, attr) == 0; 1780 (void) zap_cursor_advance(&zc)) { 1781 ASSERT3U(attr->za_integer_length, ==, 1782 sizeof (uint64_t)); 1783 ASSERT3U(attr->za_num_integers, ==, 1); 1784 1785 child = kmem_asprintf("%s@%s", 1786 name, attr->za_name); 1787 dsl_pool_config_exit(dp, FTAG); 1788 err = func(child, arg); 1789 dsl_pool_config_enter(dp, FTAG); 1790 strfree(child); 1791 if (err != 0) 1792 break; 1793 } 1794 zap_cursor_fini(&zc); 1795 } 1796 } 1797 1798 dsl_dir_rele(dd, FTAG); 1799 kmem_free(attr, sizeof (zap_attribute_t)); 1800 dsl_pool_config_exit(dp, FTAG); 1801 1802 if (err != 0) 1803 return (err); 1804 1805 /* Apply to self. */ 1806 return (func(name, arg)); 1807 } 1808 1809 /* 1810 * See comment above dmu_objset_find_impl(). 1811 */ 1812 int 1813 dmu_objset_find(char *name, int func(const char *, void *), void *arg, 1814 int flags) 1815 { 1816 spa_t *spa; 1817 int error; 1818 1819 error = spa_open(name, &spa, FTAG); 1820 if (error != 0) 1821 return (error); 1822 error = dmu_objset_find_impl(spa, name, func, arg, flags); 1823 spa_close(spa, FTAG); 1824 return (error); 1825 } 1826 1827 void 1828 dmu_objset_set_user(objset_t *os, void *user_ptr) 1829 { 1830 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1831 os->os_user_ptr = user_ptr; 1832 } 1833 1834 void * 1835 dmu_objset_get_user(objset_t *os) 1836 { 1837 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); 1838 return (os->os_user_ptr); 1839 } 1840 1841 /* 1842 * Determine name of filesystem, given name of snapshot. 1843 * buf must be at least MAXNAMELEN bytes 1844 */ 1845 int 1846 dmu_fsname(const char *snapname, char *buf) 1847 { 1848 char *atp = strchr(snapname, '@'); 1849 if (atp == NULL) 1850 return (SET_ERROR(EINVAL)); 1851 if (atp - snapname >= MAXNAMELEN) 1852 return (SET_ERROR(ENAMETOOLONG)); 1853 (void) strlcpy(buf, snapname, atp - snapname + 1); 1854 return (0); 1855 }