1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Saso Kiselkov. All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa_impl.h> 30 #include <sys/spa_boot.h> 31 #include <sys/zio.h> 32 #include <sys/zio_checksum.h> 33 #include <sys/zio_compress.h> 34 #include <sys/dmu.h> 35 #include <sys/dmu_tx.h> 36 #include <sys/zap.h> 37 #include <sys/zil.h> 38 #include <sys/vdev_impl.h> 39 #include <sys/metaslab.h> 40 #include <sys/uberblock_impl.h> 41 #include <sys/txg.h> 42 #include <sys/avl.h> 43 #include <sys/unique.h> 44 #include <sys/dsl_pool.h> 45 #include <sys/dsl_dir.h> 46 #include <sys/dsl_prop.h> 47 #include <sys/dsl_scan.h> 48 #include <sys/fs/zfs.h> 49 #include <sys/metaslab_impl.h> 50 #include <sys/arc.h> 51 #include <sys/ddt.h> 52 #include "zfs_prop.h" 53 #include <sys/zfeature.h> 54 55 /* 56 * SPA locking 57 * 58 * There are four basic locks for managing spa_t structures: 59 * 60 * spa_namespace_lock (global mutex) 61 * 62 * This lock must be acquired to do any of the following: 63 * 64 * - Lookup a spa_t by name 65 * - Add or remove a spa_t from the namespace 66 * - Increase spa_refcount from non-zero 67 * - Check if spa_refcount is zero 68 * - Rename a spa_t 69 * - add/remove/attach/detach devices 70 * - Held for the duration of create/destroy/import/export 71 * 72 * It does not need to handle recursion. A create or destroy may 73 * reference objects (files or zvols) in other pools, but by 74 * definition they must have an existing reference, and will never need 75 * to lookup a spa_t by name. 76 * 77 * spa_refcount (per-spa refcount_t protected by mutex) 78 * 79 * This reference count keep track of any active users of the spa_t. The 80 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 81 * the refcount is never really 'zero' - opening a pool implicitly keeps 82 * some references in the DMU. Internally we check against spa_minref, but 83 * present the image of a zero/non-zero value to consumers. 84 * 85 * spa_config_lock[] (per-spa array of rwlocks) 86 * 87 * This protects the spa_t from config changes, and must be held in 88 * the following circumstances: 89 * 90 * - RW_READER to perform I/O to the spa 91 * - RW_WRITER to change the vdev config 92 * 93 * The locking order is fairly straightforward: 94 * 95 * spa_namespace_lock -> spa_refcount 96 * 97 * The namespace lock must be acquired to increase the refcount from 0 98 * or to check if it is zero. 99 * 100 * spa_refcount -> spa_config_lock[] 101 * 102 * There must be at least one valid reference on the spa_t to acquire 103 * the config lock. 104 * 105 * spa_namespace_lock -> spa_config_lock[] 106 * 107 * The namespace lock must always be taken before the config lock. 108 * 109 * 110 * The spa_namespace_lock can be acquired directly and is globally visible. 111 * 112 * The namespace is manipulated using the following functions, all of which 113 * require the spa_namespace_lock to be held. 114 * 115 * spa_lookup() Lookup a spa_t by name. 116 * 117 * spa_add() Create a new spa_t in the namespace. 118 * 119 * spa_remove() Remove a spa_t from the namespace. This also 120 * frees up any memory associated with the spa_t. 121 * 122 * spa_next() Returns the next spa_t in the system, or the 123 * first if NULL is passed. 124 * 125 * spa_evict_all() Shutdown and remove all spa_t structures in 126 * the system. 127 * 128 * spa_guid_exists() Determine whether a pool/device guid exists. 129 * 130 * The spa_refcount is manipulated using the following functions: 131 * 132 * spa_open_ref() Adds a reference to the given spa_t. Must be 133 * called with spa_namespace_lock held if the 134 * refcount is currently zero. 135 * 136 * spa_close() Remove a reference from the spa_t. This will 137 * not free the spa_t or remove it from the 138 * namespace. No locking is required. 139 * 140 * spa_refcount_zero() Returns true if the refcount is currently 141 * zero. Must be called with spa_namespace_lock 142 * held. 143 * 144 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 145 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 146 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 147 * 148 * To read the configuration, it suffices to hold one of these locks as reader. 149 * To modify the configuration, you must hold all locks as writer. To modify 150 * vdev state without altering the vdev tree's topology (e.g. online/offline), 151 * you must hold SCL_STATE and SCL_ZIO as writer. 152 * 153 * We use these distinct config locks to avoid recursive lock entry. 154 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 155 * block allocations (SCL_ALLOC), which may require reading space maps 156 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 157 * 158 * The spa config locks cannot be normal rwlocks because we need the 159 * ability to hand off ownership. For example, SCL_ZIO is acquired 160 * by the issuing thread and later released by an interrupt thread. 161 * They do, however, obey the usual write-wanted semantics to prevent 162 * writer (i.e. system administrator) starvation. 163 * 164 * The lock acquisition rules are as follows: 165 * 166 * SCL_CONFIG 167 * Protects changes to the vdev tree topology, such as vdev 168 * add/remove/attach/detach. Protects the dirty config list 169 * (spa_config_dirty_list) and the set of spares and l2arc devices. 170 * 171 * SCL_STATE 172 * Protects changes to pool state and vdev state, such as vdev 173 * online/offline/fault/degrade/clear. Protects the dirty state list 174 * (spa_state_dirty_list) and global pool state (spa_state). 175 * 176 * SCL_ALLOC 177 * Protects changes to metaslab groups and classes. 178 * Held as reader by metaslab_alloc() and metaslab_claim(). 179 * 180 * SCL_ZIO 181 * Held by bp-level zios (those which have no io_vd upon entry) 182 * to prevent changes to the vdev tree. The bp-level zio implicitly 183 * protects all of its vdev child zios, which do not hold SCL_ZIO. 184 * 185 * SCL_FREE 186 * Protects changes to metaslab groups and classes. 187 * Held as reader by metaslab_free(). SCL_FREE is distinct from 188 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 189 * blocks in zio_done() while another i/o that holds either 190 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 191 * 192 * SCL_VDEV 193 * Held as reader to prevent changes to the vdev tree during trivial 194 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 195 * other locks, and lower than all of them, to ensure that it's safe 196 * to acquire regardless of caller context. 197 * 198 * In addition, the following rules apply: 199 * 200 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 201 * The lock ordering is SCL_CONFIG > spa_props_lock. 202 * 203 * (b) I/O operations on leaf vdevs. For any zio operation that takes 204 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 205 * or zio_write_phys() -- the caller must ensure that the config cannot 206 * cannot change in the interim, and that the vdev cannot be reopened. 207 * SCL_STATE as reader suffices for both. 208 * 209 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 210 * 211 * spa_vdev_enter() Acquire the namespace lock and the config lock 212 * for writing. 213 * 214 * spa_vdev_exit() Release the config lock, wait for all I/O 215 * to complete, sync the updated configs to the 216 * cache, and release the namespace lock. 217 * 218 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 219 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 220 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 221 * 222 * spa_rename() is also implemented within this file since it requires 223 * manipulation of the namespace. 224 */ 225 226 static avl_tree_t spa_namespace_avl; 227 kmutex_t spa_namespace_lock; 228 static kcondvar_t spa_namespace_cv; 229 static int spa_active_count; 230 int spa_max_replication_override = SPA_DVAS_PER_BP; 231 232 static kmutex_t spa_spare_lock; 233 static avl_tree_t spa_spare_avl; 234 static kmutex_t spa_l2cache_lock; 235 static avl_tree_t spa_l2cache_avl; 236 237 kmem_cache_t *spa_buffer_pool; 238 int spa_mode_global; 239 240 #ifdef ZFS_DEBUG 241 /* Everything except dprintf and spa is on by default in debug builds */ 242 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 243 #else 244 int zfs_flags = 0; 245 #endif 246 247 /* 248 * zfs_recover can be set to nonzero to attempt to recover from 249 * otherwise-fatal errors, typically caused by on-disk corruption. When 250 * set, calls to zfs_panic_recover() will turn into warning messages. 251 */ 252 int zfs_recover = 0; 253 254 /* 255 * Expiration time in milliseconds. This value has two meanings. First it is 256 * used to determine when the spa_deadman() logic should fire. By default the 257 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 258 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 259 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 260 * in a system panic. 261 */ 262 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 263 264 /* 265 * Check time in milliseconds. This defines the frequency at which we check 266 * for hung I/O. 267 */ 268 uint64_t zfs_deadman_checktime_ms = 5000ULL; 269 270 /* 271 * Override the zfs deadman behavior via /etc/system. By default the 272 * deadman is enabled except on VMware and sparc deployments. 273 */ 274 int zfs_deadman_enabled = -1; 275 276 /* 277 * The worst case is single-sector max-parity RAID-Z blocks, in which 278 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 279 * times the size; so just assume that. Add to this the fact that 280 * we can have up to 3 DVAs per bp, and one more factor of 2 because 281 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 282 * the worst case is: 283 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 284 */ 285 int spa_asize_inflation = 24; 286 287 /* 288 * ========================================================================== 289 * SPA config locking 290 * ========================================================================== 291 */ 292 static void 293 spa_config_lock_init(spa_t *spa) 294 { 295 for (int i = 0; i < SCL_LOCKS; i++) { 296 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 297 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 298 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 299 refcount_create_untracked(&scl->scl_count); 300 scl->scl_writer = NULL; 301 scl->scl_write_wanted = 0; 302 } 303 } 304 305 static void 306 spa_config_lock_destroy(spa_t *spa) 307 { 308 for (int i = 0; i < SCL_LOCKS; i++) { 309 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 310 mutex_destroy(&scl->scl_lock); 311 cv_destroy(&scl->scl_cv); 312 refcount_destroy(&scl->scl_count); 313 ASSERT(scl->scl_writer == NULL); 314 ASSERT(scl->scl_write_wanted == 0); 315 } 316 } 317 318 int 319 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 320 { 321 for (int i = 0; i < SCL_LOCKS; i++) { 322 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 323 if (!(locks & (1 << i))) 324 continue; 325 mutex_enter(&scl->scl_lock); 326 if (rw == RW_READER) { 327 if (scl->scl_writer || scl->scl_write_wanted) { 328 mutex_exit(&scl->scl_lock); 329 spa_config_exit(spa, locks ^ (1 << i), tag); 330 return (0); 331 } 332 } else { 333 ASSERT(scl->scl_writer != curthread); 334 if (!refcount_is_zero(&scl->scl_count)) { 335 mutex_exit(&scl->scl_lock); 336 spa_config_exit(spa, locks ^ (1 << i), tag); 337 return (0); 338 } 339 scl->scl_writer = curthread; 340 } 341 (void) refcount_add(&scl->scl_count, tag); 342 mutex_exit(&scl->scl_lock); 343 } 344 return (1); 345 } 346 347 void 348 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 349 { 350 int wlocks_held = 0; 351 352 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 353 354 for (int i = 0; i < SCL_LOCKS; i++) { 355 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 356 if (scl->scl_writer == curthread) 357 wlocks_held |= (1 << i); 358 if (!(locks & (1 << i))) 359 continue; 360 mutex_enter(&scl->scl_lock); 361 if (rw == RW_READER) { 362 while (scl->scl_writer || scl->scl_write_wanted) { 363 cv_wait(&scl->scl_cv, &scl->scl_lock); 364 } 365 } else { 366 ASSERT(scl->scl_writer != curthread); 367 while (!refcount_is_zero(&scl->scl_count)) { 368 scl->scl_write_wanted++; 369 cv_wait(&scl->scl_cv, &scl->scl_lock); 370 scl->scl_write_wanted--; 371 } 372 scl->scl_writer = curthread; 373 } 374 (void) refcount_add(&scl->scl_count, tag); 375 mutex_exit(&scl->scl_lock); 376 } 377 ASSERT(wlocks_held <= locks); 378 } 379 380 void 381 spa_config_exit(spa_t *spa, int locks, void *tag) 382 { 383 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 384 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 385 if (!(locks & (1 << i))) 386 continue; 387 mutex_enter(&scl->scl_lock); 388 ASSERT(!refcount_is_zero(&scl->scl_count)); 389 if (refcount_remove(&scl->scl_count, tag) == 0) { 390 ASSERT(scl->scl_writer == NULL || 391 scl->scl_writer == curthread); 392 scl->scl_writer = NULL; /* OK in either case */ 393 cv_broadcast(&scl->scl_cv); 394 } 395 mutex_exit(&scl->scl_lock); 396 } 397 } 398 399 int 400 spa_config_held(spa_t *spa, int locks, krw_t rw) 401 { 402 int locks_held = 0; 403 404 for (int i = 0; i < SCL_LOCKS; i++) { 405 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 406 if (!(locks & (1 << i))) 407 continue; 408 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 409 (rw == RW_WRITER && scl->scl_writer == curthread)) 410 locks_held |= 1 << i; 411 } 412 413 return (locks_held); 414 } 415 416 /* 417 * ========================================================================== 418 * SPA namespace functions 419 * ========================================================================== 420 */ 421 422 /* 423 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 424 * Returns NULL if no matching spa_t is found. 425 */ 426 spa_t * 427 spa_lookup(const char *name) 428 { 429 static spa_t search; /* spa_t is large; don't allocate on stack */ 430 spa_t *spa; 431 avl_index_t where; 432 char *cp; 433 434 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 435 436 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 437 438 /* 439 * If it's a full dataset name, figure out the pool name and 440 * just use that. 441 */ 442 cp = strpbrk(search.spa_name, "/@"); 443 if (cp != NULL) 444 *cp = '\0'; 445 446 spa = avl_find(&spa_namespace_avl, &search, &where); 447 448 return (spa); 449 } 450 451 /* 452 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 453 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 454 * looking for potentially hung I/Os. 455 */ 456 void 457 spa_deadman(void *arg) 458 { 459 spa_t *spa = arg; 460 461 /* 462 * Disable the deadman timer if the pool is suspended. 463 */ 464 if (spa_suspended(spa)) { 465 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 466 return; 467 } 468 469 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 470 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 471 ++spa->spa_deadman_calls); 472 if (zfs_deadman_enabled) 473 vdev_deadman(spa->spa_root_vdev); 474 } 475 476 /* 477 * Create an uninitialized spa_t with the given name. Requires 478 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 479 * exist by calling spa_lookup() first. 480 */ 481 spa_t * 482 spa_add(const char *name, nvlist_t *config, const char *altroot) 483 { 484 spa_t *spa; 485 spa_config_dirent_t *dp; 486 cyc_handler_t hdlr; 487 cyc_time_t when; 488 489 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 490 491 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 492 493 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 494 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 495 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 496 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 497 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 498 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 499 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 500 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 501 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 502 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 503 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 504 505 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 506 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 507 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 508 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 509 510 for (int t = 0; t < TXG_SIZE; t++) 511 bplist_create(&spa->spa_free_bplist[t]); 512 513 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 514 spa->spa_state = POOL_STATE_UNINITIALIZED; 515 spa->spa_freeze_txg = UINT64_MAX; 516 spa->spa_final_txg = UINT64_MAX; 517 spa->spa_load_max_txg = UINT64_MAX; 518 spa->spa_proc = &p0; 519 spa->spa_proc_state = SPA_PROC_NONE; 520 521 hdlr.cyh_func = spa_deadman; 522 hdlr.cyh_arg = spa; 523 hdlr.cyh_level = CY_LOW_LEVEL; 524 525 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 526 527 /* 528 * This determines how often we need to check for hung I/Os after 529 * the cyclic has already fired. Since checking for hung I/Os is 530 * an expensive operation we don't want to check too frequently. 531 * Instead wait for 5 seconds before checking again. 532 */ 533 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 534 when.cyt_when = CY_INFINITY; 535 mutex_enter(&cpu_lock); 536 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 537 mutex_exit(&cpu_lock); 538 539 refcount_create(&spa->spa_refcount); 540 spa_config_lock_init(spa); 541 542 avl_add(&spa_namespace_avl, spa); 543 544 /* 545 * Set the alternate root, if there is one. 546 */ 547 if (altroot) { 548 spa->spa_root = spa_strdup(altroot); 549 spa_active_count++; 550 } 551 552 /* 553 * Every pool starts with the default cachefile 554 */ 555 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 556 offsetof(spa_config_dirent_t, scd_link)); 557 558 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 559 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 560 list_insert_head(&spa->spa_config_list, dp); 561 562 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 563 KM_SLEEP) == 0); 564 565 if (config != NULL) { 566 nvlist_t *features; 567 568 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 569 &features) == 0) { 570 VERIFY(nvlist_dup(features, &spa->spa_label_features, 571 0) == 0); 572 } 573 574 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 575 } 576 577 if (spa->spa_label_features == NULL) { 578 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 579 KM_SLEEP) == 0); 580 } 581 582 spa->spa_iokstat = kstat_create("zfs", 0, name, 583 "disk", KSTAT_TYPE_IO, 1, 0); 584 if (spa->spa_iokstat) { 585 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 586 kstat_install(spa->spa_iokstat); 587 } 588 589 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 590 591 return (spa); 592 } 593 594 /* 595 * Removes a spa_t from the namespace, freeing up any memory used. Requires 596 * spa_namespace_lock. This is called only after the spa_t has been closed and 597 * deactivated. 598 */ 599 void 600 spa_remove(spa_t *spa) 601 { 602 spa_config_dirent_t *dp; 603 604 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 605 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 606 607 nvlist_free(spa->spa_config_splitting); 608 609 avl_remove(&spa_namespace_avl, spa); 610 cv_broadcast(&spa_namespace_cv); 611 612 if (spa->spa_root) { 613 spa_strfree(spa->spa_root); 614 spa_active_count--; 615 } 616 617 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 618 list_remove(&spa->spa_config_list, dp); 619 if (dp->scd_path != NULL) 620 spa_strfree(dp->scd_path); 621 kmem_free(dp, sizeof (spa_config_dirent_t)); 622 } 623 624 list_destroy(&spa->spa_config_list); 625 626 nvlist_free(spa->spa_label_features); 627 nvlist_free(spa->spa_load_info); 628 spa_config_set(spa, NULL); 629 630 mutex_enter(&cpu_lock); 631 if (spa->spa_deadman_cycid != CYCLIC_NONE) 632 cyclic_remove(spa->spa_deadman_cycid); 633 mutex_exit(&cpu_lock); 634 spa->spa_deadman_cycid = CYCLIC_NONE; 635 636 refcount_destroy(&spa->spa_refcount); 637 638 spa_config_lock_destroy(spa); 639 640 kstat_delete(spa->spa_iokstat); 641 spa->spa_iokstat = NULL; 642 643 for (int t = 0; t < TXG_SIZE; t++) 644 bplist_destroy(&spa->spa_free_bplist[t]); 645 646 zio_checksum_templates_free(spa); 647 648 cv_destroy(&spa->spa_async_cv); 649 cv_destroy(&spa->spa_proc_cv); 650 cv_destroy(&spa->spa_scrub_io_cv); 651 cv_destroy(&spa->spa_suspend_cv); 652 653 mutex_destroy(&spa->spa_async_lock); 654 mutex_destroy(&spa->spa_errlist_lock); 655 mutex_destroy(&spa->spa_errlog_lock); 656 mutex_destroy(&spa->spa_history_lock); 657 mutex_destroy(&spa->spa_proc_lock); 658 mutex_destroy(&spa->spa_props_lock); 659 mutex_destroy(&spa->spa_cksum_tmpls_lock); 660 mutex_destroy(&spa->spa_scrub_lock); 661 mutex_destroy(&spa->spa_suspend_lock); 662 mutex_destroy(&spa->spa_vdev_top_lock); 663 mutex_destroy(&spa->spa_iokstat_lock); 664 665 kmem_free(spa, sizeof (spa_t)); 666 } 667 668 /* 669 * Given a pool, return the next pool in the namespace, or NULL if there is 670 * none. If 'prev' is NULL, return the first pool. 671 */ 672 spa_t * 673 spa_next(spa_t *prev) 674 { 675 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 676 677 if (prev) 678 return (AVL_NEXT(&spa_namespace_avl, prev)); 679 else 680 return (avl_first(&spa_namespace_avl)); 681 } 682 683 /* 684 * ========================================================================== 685 * SPA refcount functions 686 * ========================================================================== 687 */ 688 689 /* 690 * Add a reference to the given spa_t. Must have at least one reference, or 691 * have the namespace lock held. 692 */ 693 void 694 spa_open_ref(spa_t *spa, void *tag) 695 { 696 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 697 MUTEX_HELD(&spa_namespace_lock)); 698 (void) refcount_add(&spa->spa_refcount, tag); 699 } 700 701 /* 702 * Remove a reference to the given spa_t. Must have at least one reference, or 703 * have the namespace lock held. 704 */ 705 void 706 spa_close(spa_t *spa, void *tag) 707 { 708 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 709 MUTEX_HELD(&spa_namespace_lock)); 710 (void) refcount_remove(&spa->spa_refcount, tag); 711 } 712 713 /* 714 * Check to see if the spa refcount is zero. Must be called with 715 * spa_namespace_lock held. We really compare against spa_minref, which is the 716 * number of references acquired when opening a pool 717 */ 718 boolean_t 719 spa_refcount_zero(spa_t *spa) 720 { 721 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 722 723 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 724 } 725 726 /* 727 * ========================================================================== 728 * SPA spare and l2cache tracking 729 * ========================================================================== 730 */ 731 732 /* 733 * Hot spares and cache devices are tracked using the same code below, 734 * for 'auxiliary' devices. 735 */ 736 737 typedef struct spa_aux { 738 uint64_t aux_guid; 739 uint64_t aux_pool; 740 avl_node_t aux_avl; 741 int aux_count; 742 } spa_aux_t; 743 744 static int 745 spa_aux_compare(const void *a, const void *b) 746 { 747 const spa_aux_t *sa = a; 748 const spa_aux_t *sb = b; 749 750 if (sa->aux_guid < sb->aux_guid) 751 return (-1); 752 else if (sa->aux_guid > sb->aux_guid) 753 return (1); 754 else 755 return (0); 756 } 757 758 void 759 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 760 { 761 avl_index_t where; 762 spa_aux_t search; 763 spa_aux_t *aux; 764 765 search.aux_guid = vd->vdev_guid; 766 if ((aux = avl_find(avl, &search, &where)) != NULL) { 767 aux->aux_count++; 768 } else { 769 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 770 aux->aux_guid = vd->vdev_guid; 771 aux->aux_count = 1; 772 avl_insert(avl, aux, where); 773 } 774 } 775 776 void 777 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 778 { 779 spa_aux_t search; 780 spa_aux_t *aux; 781 avl_index_t where; 782 783 search.aux_guid = vd->vdev_guid; 784 aux = avl_find(avl, &search, &where); 785 786 ASSERT(aux != NULL); 787 788 if (--aux->aux_count == 0) { 789 avl_remove(avl, aux); 790 kmem_free(aux, sizeof (spa_aux_t)); 791 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 792 aux->aux_pool = 0ULL; 793 } 794 } 795 796 boolean_t 797 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 798 { 799 spa_aux_t search, *found; 800 801 search.aux_guid = guid; 802 found = avl_find(avl, &search, NULL); 803 804 if (pool) { 805 if (found) 806 *pool = found->aux_pool; 807 else 808 *pool = 0ULL; 809 } 810 811 if (refcnt) { 812 if (found) 813 *refcnt = found->aux_count; 814 else 815 *refcnt = 0; 816 } 817 818 return (found != NULL); 819 } 820 821 void 822 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 823 { 824 spa_aux_t search, *found; 825 avl_index_t where; 826 827 search.aux_guid = vd->vdev_guid; 828 found = avl_find(avl, &search, &where); 829 ASSERT(found != NULL); 830 ASSERT(found->aux_pool == 0ULL); 831 832 found->aux_pool = spa_guid(vd->vdev_spa); 833 } 834 835 /* 836 * Spares are tracked globally due to the following constraints: 837 * 838 * - A spare may be part of multiple pools. 839 * - A spare may be added to a pool even if it's actively in use within 840 * another pool. 841 * - A spare in use in any pool can only be the source of a replacement if 842 * the target is a spare in the same pool. 843 * 844 * We keep track of all spares on the system through the use of a reference 845 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 846 * spare, then we bump the reference count in the AVL tree. In addition, we set 847 * the 'vdev_isspare' member to indicate that the device is a spare (active or 848 * inactive). When a spare is made active (used to replace a device in the 849 * pool), we also keep track of which pool its been made a part of. 850 * 851 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 852 * called under the spa_namespace lock as part of vdev reconfiguration. The 853 * separate spare lock exists for the status query path, which does not need to 854 * be completely consistent with respect to other vdev configuration changes. 855 */ 856 857 static int 858 spa_spare_compare(const void *a, const void *b) 859 { 860 return (spa_aux_compare(a, b)); 861 } 862 863 void 864 spa_spare_add(vdev_t *vd) 865 { 866 mutex_enter(&spa_spare_lock); 867 ASSERT(!vd->vdev_isspare); 868 spa_aux_add(vd, &spa_spare_avl); 869 vd->vdev_isspare = B_TRUE; 870 mutex_exit(&spa_spare_lock); 871 } 872 873 void 874 spa_spare_remove(vdev_t *vd) 875 { 876 mutex_enter(&spa_spare_lock); 877 ASSERT(vd->vdev_isspare); 878 spa_aux_remove(vd, &spa_spare_avl); 879 vd->vdev_isspare = B_FALSE; 880 mutex_exit(&spa_spare_lock); 881 } 882 883 boolean_t 884 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 885 { 886 boolean_t found; 887 888 mutex_enter(&spa_spare_lock); 889 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 890 mutex_exit(&spa_spare_lock); 891 892 return (found); 893 } 894 895 void 896 spa_spare_activate(vdev_t *vd) 897 { 898 mutex_enter(&spa_spare_lock); 899 ASSERT(vd->vdev_isspare); 900 spa_aux_activate(vd, &spa_spare_avl); 901 mutex_exit(&spa_spare_lock); 902 } 903 904 /* 905 * Level 2 ARC devices are tracked globally for the same reasons as spares. 906 * Cache devices currently only support one pool per cache device, and so 907 * for these devices the aux reference count is currently unused beyond 1. 908 */ 909 910 static int 911 spa_l2cache_compare(const void *a, const void *b) 912 { 913 return (spa_aux_compare(a, b)); 914 } 915 916 void 917 spa_l2cache_add(vdev_t *vd) 918 { 919 mutex_enter(&spa_l2cache_lock); 920 ASSERT(!vd->vdev_isl2cache); 921 spa_aux_add(vd, &spa_l2cache_avl); 922 vd->vdev_isl2cache = B_TRUE; 923 mutex_exit(&spa_l2cache_lock); 924 } 925 926 void 927 spa_l2cache_remove(vdev_t *vd) 928 { 929 mutex_enter(&spa_l2cache_lock); 930 ASSERT(vd->vdev_isl2cache); 931 spa_aux_remove(vd, &spa_l2cache_avl); 932 vd->vdev_isl2cache = B_FALSE; 933 mutex_exit(&spa_l2cache_lock); 934 } 935 936 boolean_t 937 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 938 { 939 boolean_t found; 940 941 mutex_enter(&spa_l2cache_lock); 942 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 943 mutex_exit(&spa_l2cache_lock); 944 945 return (found); 946 } 947 948 void 949 spa_l2cache_activate(vdev_t *vd) 950 { 951 mutex_enter(&spa_l2cache_lock); 952 ASSERT(vd->vdev_isl2cache); 953 spa_aux_activate(vd, &spa_l2cache_avl); 954 mutex_exit(&spa_l2cache_lock); 955 } 956 957 /* 958 * ========================================================================== 959 * SPA vdev locking 960 * ========================================================================== 961 */ 962 963 /* 964 * Lock the given spa_t for the purpose of adding or removing a vdev. 965 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 966 * It returns the next transaction group for the spa_t. 967 */ 968 uint64_t 969 spa_vdev_enter(spa_t *spa) 970 { 971 mutex_enter(&spa->spa_vdev_top_lock); 972 mutex_enter(&spa_namespace_lock); 973 return (spa_vdev_config_enter(spa)); 974 } 975 976 /* 977 * Internal implementation for spa_vdev_enter(). Used when a vdev 978 * operation requires multiple syncs (i.e. removing a device) while 979 * keeping the spa_namespace_lock held. 980 */ 981 uint64_t 982 spa_vdev_config_enter(spa_t *spa) 983 { 984 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 985 986 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 987 988 return (spa_last_synced_txg(spa) + 1); 989 } 990 991 /* 992 * Used in combination with spa_vdev_config_enter() to allow the syncing 993 * of multiple transactions without releasing the spa_namespace_lock. 994 */ 995 void 996 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 997 { 998 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 999 1000 int config_changed = B_FALSE; 1001 1002 ASSERT(txg > spa_last_synced_txg(spa)); 1003 1004 spa->spa_pending_vdev = NULL; 1005 1006 /* 1007 * Reassess the DTLs. 1008 */ 1009 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1010 1011 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1012 config_changed = B_TRUE; 1013 spa->spa_config_generation++; 1014 } 1015 1016 /* 1017 * Verify the metaslab classes. 1018 */ 1019 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1020 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1021 1022 spa_config_exit(spa, SCL_ALL, spa); 1023 1024 /* 1025 * Panic the system if the specified tag requires it. This 1026 * is useful for ensuring that configurations are updated 1027 * transactionally. 1028 */ 1029 if (zio_injection_enabled) 1030 zio_handle_panic_injection(spa, tag, 0); 1031 1032 /* 1033 * Note: this txg_wait_synced() is important because it ensures 1034 * that there won't be more than one config change per txg. 1035 * This allows us to use the txg as the generation number. 1036 */ 1037 if (error == 0) 1038 txg_wait_synced(spa->spa_dsl_pool, txg); 1039 1040 if (vd != NULL) { 1041 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1042 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1043 vdev_free(vd); 1044 spa_config_exit(spa, SCL_ALL, spa); 1045 } 1046 1047 /* 1048 * If the config changed, update the config cache. 1049 */ 1050 if (config_changed) 1051 spa_config_sync(spa, B_FALSE, B_TRUE); 1052 } 1053 1054 /* 1055 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1056 * locking of spa_vdev_enter(), we also want make sure the transactions have 1057 * synced to disk, and then update the global configuration cache with the new 1058 * information. 1059 */ 1060 int 1061 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1062 { 1063 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1064 mutex_exit(&spa_namespace_lock); 1065 mutex_exit(&spa->spa_vdev_top_lock); 1066 1067 return (error); 1068 } 1069 1070 /* 1071 * Lock the given spa_t for the purpose of changing vdev state. 1072 */ 1073 void 1074 spa_vdev_state_enter(spa_t *spa, int oplocks) 1075 { 1076 int locks = SCL_STATE_ALL | oplocks; 1077 1078 /* 1079 * Root pools may need to read of the underlying devfs filesystem 1080 * when opening up a vdev. Unfortunately if we're holding the 1081 * SCL_ZIO lock it will result in a deadlock when we try to issue 1082 * the read from the root filesystem. Instead we "prefetch" 1083 * the associated vnodes that we need prior to opening the 1084 * underlying devices and cache them so that we can prevent 1085 * any I/O when we are doing the actual open. 1086 */ 1087 if (spa_is_root(spa)) { 1088 int low = locks & ~(SCL_ZIO - 1); 1089 int high = locks & ~low; 1090 1091 spa_config_enter(spa, high, spa, RW_WRITER); 1092 vdev_hold(spa->spa_root_vdev); 1093 spa_config_enter(spa, low, spa, RW_WRITER); 1094 } else { 1095 spa_config_enter(spa, locks, spa, RW_WRITER); 1096 } 1097 spa->spa_vdev_locks = locks; 1098 } 1099 1100 int 1101 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1102 { 1103 boolean_t config_changed = B_FALSE; 1104 1105 if (vd != NULL || error == 0) 1106 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1107 0, 0, B_FALSE); 1108 1109 if (vd != NULL) { 1110 vdev_state_dirty(vd->vdev_top); 1111 config_changed = B_TRUE; 1112 spa->spa_config_generation++; 1113 } 1114 1115 if (spa_is_root(spa)) 1116 vdev_rele(spa->spa_root_vdev); 1117 1118 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1119 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1120 1121 /* 1122 * If anything changed, wait for it to sync. This ensures that, 1123 * from the system administrator's perspective, zpool(1M) commands 1124 * are synchronous. This is important for things like zpool offline: 1125 * when the command completes, you expect no further I/O from ZFS. 1126 */ 1127 if (vd != NULL) 1128 txg_wait_synced(spa->spa_dsl_pool, 0); 1129 1130 /* 1131 * If the config changed, update the config cache. 1132 */ 1133 if (config_changed) { 1134 mutex_enter(&spa_namespace_lock); 1135 spa_config_sync(spa, B_FALSE, B_TRUE); 1136 mutex_exit(&spa_namespace_lock); 1137 } 1138 1139 return (error); 1140 } 1141 1142 /* 1143 * ========================================================================== 1144 * Miscellaneous functions 1145 * ========================================================================== 1146 */ 1147 1148 void 1149 spa_activate_mos_feature(spa_t *spa, const char *feature) 1150 { 1151 (void) nvlist_add_boolean(spa->spa_label_features, feature); 1152 vdev_config_dirty(spa->spa_root_vdev); 1153 } 1154 1155 void 1156 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1157 { 1158 (void) nvlist_remove_all(spa->spa_label_features, feature); 1159 vdev_config_dirty(spa->spa_root_vdev); 1160 } 1161 1162 /* 1163 * Rename a spa_t. 1164 */ 1165 int 1166 spa_rename(const char *name, const char *newname) 1167 { 1168 spa_t *spa; 1169 int err; 1170 1171 /* 1172 * Lookup the spa_t and grab the config lock for writing. We need to 1173 * actually open the pool so that we can sync out the necessary labels. 1174 * It's OK to call spa_open() with the namespace lock held because we 1175 * allow recursive calls for other reasons. 1176 */ 1177 mutex_enter(&spa_namespace_lock); 1178 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1179 mutex_exit(&spa_namespace_lock); 1180 return (err); 1181 } 1182 1183 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1184 1185 avl_remove(&spa_namespace_avl, spa); 1186 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1187 avl_add(&spa_namespace_avl, spa); 1188 1189 /* 1190 * Sync all labels to disk with the new names by marking the root vdev 1191 * dirty and waiting for it to sync. It will pick up the new pool name 1192 * during the sync. 1193 */ 1194 vdev_config_dirty(spa->spa_root_vdev); 1195 1196 spa_config_exit(spa, SCL_ALL, FTAG); 1197 1198 txg_wait_synced(spa->spa_dsl_pool, 0); 1199 1200 /* 1201 * Sync the updated config cache. 1202 */ 1203 spa_config_sync(spa, B_FALSE, B_TRUE); 1204 1205 spa_close(spa, FTAG); 1206 1207 mutex_exit(&spa_namespace_lock); 1208 1209 return (0); 1210 } 1211 1212 /* 1213 * Return the spa_t associated with given pool_guid, if it exists. If 1214 * device_guid is non-zero, determine whether the pool exists *and* contains 1215 * a device with the specified device_guid. 1216 */ 1217 spa_t * 1218 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1219 { 1220 spa_t *spa; 1221 avl_tree_t *t = &spa_namespace_avl; 1222 1223 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1224 1225 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1226 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1227 continue; 1228 if (spa->spa_root_vdev == NULL) 1229 continue; 1230 if (spa_guid(spa) == pool_guid) { 1231 if (device_guid == 0) 1232 break; 1233 1234 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1235 device_guid) != NULL) 1236 break; 1237 1238 /* 1239 * Check any devices we may be in the process of adding. 1240 */ 1241 if (spa->spa_pending_vdev) { 1242 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1243 device_guid) != NULL) 1244 break; 1245 } 1246 } 1247 } 1248 1249 return (spa); 1250 } 1251 1252 /* 1253 * Determine whether a pool with the given pool_guid exists. 1254 */ 1255 boolean_t 1256 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1257 { 1258 return (spa_by_guid(pool_guid, device_guid) != NULL); 1259 } 1260 1261 char * 1262 spa_strdup(const char *s) 1263 { 1264 size_t len; 1265 char *new; 1266 1267 len = strlen(s); 1268 new = kmem_alloc(len + 1, KM_SLEEP); 1269 bcopy(s, new, len); 1270 new[len] = '\0'; 1271 1272 return (new); 1273 } 1274 1275 void 1276 spa_strfree(char *s) 1277 { 1278 kmem_free(s, strlen(s) + 1); 1279 } 1280 1281 uint64_t 1282 spa_get_random(uint64_t range) 1283 { 1284 uint64_t r; 1285 1286 ASSERT(range != 0); 1287 1288 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1289 1290 return (r % range); 1291 } 1292 1293 uint64_t 1294 spa_generate_guid(spa_t *spa) 1295 { 1296 uint64_t guid = spa_get_random(-1ULL); 1297 1298 if (spa != NULL) { 1299 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1300 guid = spa_get_random(-1ULL); 1301 } else { 1302 while (guid == 0 || spa_guid_exists(guid, 0)) 1303 guid = spa_get_random(-1ULL); 1304 } 1305 1306 return (guid); 1307 } 1308 1309 void 1310 sprintf_blkptr(char *buf, const blkptr_t *bp) 1311 { 1312 char type[256]; 1313 char *checksum = NULL; 1314 char *compress = NULL; 1315 1316 if (bp != NULL) { 1317 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1318 dmu_object_byteswap_t bswap = 1319 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1320 (void) snprintf(type, sizeof (type), "bswap %s %s", 1321 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1322 "metadata" : "data", 1323 dmu_ot_byteswap[bswap].ob_name); 1324 } else { 1325 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1326 sizeof (type)); 1327 } 1328 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1329 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1330 } 1331 1332 SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress); 1333 } 1334 1335 void 1336 spa_freeze(spa_t *spa) 1337 { 1338 uint64_t freeze_txg = 0; 1339 1340 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1341 if (spa->spa_freeze_txg == UINT64_MAX) { 1342 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1343 spa->spa_freeze_txg = freeze_txg; 1344 } 1345 spa_config_exit(spa, SCL_ALL, FTAG); 1346 if (freeze_txg != 0) 1347 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1348 } 1349 1350 void 1351 zfs_panic_recover(const char *fmt, ...) 1352 { 1353 va_list adx; 1354 1355 va_start(adx, fmt); 1356 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1357 va_end(adx); 1358 } 1359 1360 /* 1361 * This is a stripped-down version of strtoull, suitable only for converting 1362 * lowercase hexadecimal numbers that don't overflow. 1363 */ 1364 uint64_t 1365 strtonum(const char *str, char **nptr) 1366 { 1367 uint64_t val = 0; 1368 char c; 1369 int digit; 1370 1371 while ((c = *str) != '\0') { 1372 if (c >= '0' && c <= '9') 1373 digit = c - '0'; 1374 else if (c >= 'a' && c <= 'f') 1375 digit = 10 + c - 'a'; 1376 else 1377 break; 1378 1379 val *= 16; 1380 val += digit; 1381 1382 str++; 1383 } 1384 1385 if (nptr) 1386 *nptr = (char *)str; 1387 1388 return (val); 1389 } 1390 1391 /* 1392 * ========================================================================== 1393 * Accessor functions 1394 * ========================================================================== 1395 */ 1396 1397 boolean_t 1398 spa_shutting_down(spa_t *spa) 1399 { 1400 return (spa->spa_async_suspended); 1401 } 1402 1403 dsl_pool_t * 1404 spa_get_dsl(spa_t *spa) 1405 { 1406 return (spa->spa_dsl_pool); 1407 } 1408 1409 boolean_t 1410 spa_is_initializing(spa_t *spa) 1411 { 1412 return (spa->spa_is_initializing); 1413 } 1414 1415 blkptr_t * 1416 spa_get_rootblkptr(spa_t *spa) 1417 { 1418 return (&spa->spa_ubsync.ub_rootbp); 1419 } 1420 1421 void 1422 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1423 { 1424 spa->spa_uberblock.ub_rootbp = *bp; 1425 } 1426 1427 void 1428 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1429 { 1430 if (spa->spa_root == NULL) 1431 buf[0] = '\0'; 1432 else 1433 (void) strncpy(buf, spa->spa_root, buflen); 1434 } 1435 1436 int 1437 spa_sync_pass(spa_t *spa) 1438 { 1439 return (spa->spa_sync_pass); 1440 } 1441 1442 char * 1443 spa_name(spa_t *spa) 1444 { 1445 return (spa->spa_name); 1446 } 1447 1448 uint64_t 1449 spa_guid(spa_t *spa) 1450 { 1451 dsl_pool_t *dp = spa_get_dsl(spa); 1452 uint64_t guid; 1453 1454 /* 1455 * If we fail to parse the config during spa_load(), we can go through 1456 * the error path (which posts an ereport) and end up here with no root 1457 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1458 * this case. 1459 */ 1460 if (spa->spa_root_vdev == NULL) 1461 return (spa->spa_config_guid); 1462 1463 guid = spa->spa_last_synced_guid != 0 ? 1464 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1465 1466 /* 1467 * Return the most recently synced out guid unless we're 1468 * in syncing context. 1469 */ 1470 if (dp && dsl_pool_sync_context(dp)) 1471 return (spa->spa_root_vdev->vdev_guid); 1472 else 1473 return (guid); 1474 } 1475 1476 uint64_t 1477 spa_load_guid(spa_t *spa) 1478 { 1479 /* 1480 * This is a GUID that exists solely as a reference for the 1481 * purposes of the arc. It is generated at load time, and 1482 * is never written to persistent storage. 1483 */ 1484 return (spa->spa_load_guid); 1485 } 1486 1487 uint64_t 1488 spa_last_synced_txg(spa_t *spa) 1489 { 1490 return (spa->spa_ubsync.ub_txg); 1491 } 1492 1493 uint64_t 1494 spa_first_txg(spa_t *spa) 1495 { 1496 return (spa->spa_first_txg); 1497 } 1498 1499 uint64_t 1500 spa_syncing_txg(spa_t *spa) 1501 { 1502 return (spa->spa_syncing_txg); 1503 } 1504 1505 pool_state_t 1506 spa_state(spa_t *spa) 1507 { 1508 return (spa->spa_state); 1509 } 1510 1511 spa_load_state_t 1512 spa_load_state(spa_t *spa) 1513 { 1514 return (spa->spa_load_state); 1515 } 1516 1517 uint64_t 1518 spa_freeze_txg(spa_t *spa) 1519 { 1520 return (spa->spa_freeze_txg); 1521 } 1522 1523 /* ARGSUSED */ 1524 uint64_t 1525 spa_get_asize(spa_t *spa, uint64_t lsize) 1526 { 1527 return (lsize * spa_asize_inflation); 1528 } 1529 1530 uint64_t 1531 spa_get_dspace(spa_t *spa) 1532 { 1533 return (spa->spa_dspace); 1534 } 1535 1536 void 1537 spa_update_dspace(spa_t *spa) 1538 { 1539 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1540 ddt_get_dedup_dspace(spa); 1541 } 1542 1543 /* 1544 * Return the failure mode that has been set to this pool. The default 1545 * behavior will be to block all I/Os when a complete failure occurs. 1546 */ 1547 uint8_t 1548 spa_get_failmode(spa_t *spa) 1549 { 1550 return (spa->spa_failmode); 1551 } 1552 1553 boolean_t 1554 spa_suspended(spa_t *spa) 1555 { 1556 return (spa->spa_suspended); 1557 } 1558 1559 uint64_t 1560 spa_version(spa_t *spa) 1561 { 1562 return (spa->spa_ubsync.ub_version); 1563 } 1564 1565 boolean_t 1566 spa_deflate(spa_t *spa) 1567 { 1568 return (spa->spa_deflate); 1569 } 1570 1571 metaslab_class_t * 1572 spa_normal_class(spa_t *spa) 1573 { 1574 return (spa->spa_normal_class); 1575 } 1576 1577 metaslab_class_t * 1578 spa_log_class(spa_t *spa) 1579 { 1580 return (spa->spa_log_class); 1581 } 1582 1583 int 1584 spa_max_replication(spa_t *spa) 1585 { 1586 /* 1587 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1588 * handle BPs with more than one DVA allocated. Set our max 1589 * replication level accordingly. 1590 */ 1591 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1592 return (1); 1593 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1594 } 1595 1596 int 1597 spa_prev_software_version(spa_t *spa) 1598 { 1599 return (spa->spa_prev_software_version); 1600 } 1601 1602 uint64_t 1603 spa_deadman_synctime(spa_t *spa) 1604 { 1605 return (spa->spa_deadman_synctime); 1606 } 1607 1608 uint64_t 1609 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1610 { 1611 uint64_t asize = DVA_GET_ASIZE(dva); 1612 uint64_t dsize = asize; 1613 1614 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1615 1616 if (asize != 0 && spa->spa_deflate) { 1617 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1618 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1619 } 1620 1621 return (dsize); 1622 } 1623 1624 uint64_t 1625 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1626 { 1627 uint64_t dsize = 0; 1628 1629 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1630 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1631 1632 return (dsize); 1633 } 1634 1635 uint64_t 1636 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1637 { 1638 uint64_t dsize = 0; 1639 1640 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1641 1642 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1643 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1644 1645 spa_config_exit(spa, SCL_VDEV, FTAG); 1646 1647 return (dsize); 1648 } 1649 1650 /* 1651 * ========================================================================== 1652 * Initialization and Termination 1653 * ========================================================================== 1654 */ 1655 1656 static int 1657 spa_name_compare(const void *a1, const void *a2) 1658 { 1659 const spa_t *s1 = a1; 1660 const spa_t *s2 = a2; 1661 int s; 1662 1663 s = strcmp(s1->spa_name, s2->spa_name); 1664 if (s > 0) 1665 return (1); 1666 if (s < 0) 1667 return (-1); 1668 return (0); 1669 } 1670 1671 int 1672 spa_busy(void) 1673 { 1674 return (spa_active_count); 1675 } 1676 1677 void 1678 spa_boot_init() 1679 { 1680 spa_config_load(); 1681 } 1682 1683 void 1684 spa_init(int mode) 1685 { 1686 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1687 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1688 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1689 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1690 1691 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1692 offsetof(spa_t, spa_avl)); 1693 1694 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1695 offsetof(spa_aux_t, aux_avl)); 1696 1697 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1698 offsetof(spa_aux_t, aux_avl)); 1699 1700 spa_mode_global = mode; 1701 1702 #ifdef _KERNEL 1703 spa_arch_init(); 1704 #else 1705 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1706 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1707 if (arc_procfd == -1) { 1708 perror("could not enable watchpoints: " 1709 "opening /proc/self/ctl failed: "); 1710 } else { 1711 arc_watch = B_TRUE; 1712 } 1713 } 1714 #endif 1715 1716 refcount_init(); 1717 unique_init(); 1718 range_tree_init(); 1719 zio_init(); 1720 dmu_init(); 1721 zil_init(); 1722 vdev_cache_stat_init(); 1723 zfs_prop_init(); 1724 zpool_prop_init(); 1725 zpool_feature_init(); 1726 spa_config_load(); 1727 l2arc_start(); 1728 } 1729 1730 void 1731 spa_fini(void) 1732 { 1733 l2arc_stop(); 1734 1735 spa_evict_all(); 1736 1737 vdev_cache_stat_fini(); 1738 zil_fini(); 1739 dmu_fini(); 1740 zio_fini(); 1741 range_tree_fini(); 1742 unique_fini(); 1743 refcount_fini(); 1744 1745 avl_destroy(&spa_namespace_avl); 1746 avl_destroy(&spa_spare_avl); 1747 avl_destroy(&spa_l2cache_avl); 1748 1749 cv_destroy(&spa_namespace_cv); 1750 mutex_destroy(&spa_namespace_lock); 1751 mutex_destroy(&spa_spare_lock); 1752 mutex_destroy(&spa_l2cache_lock); 1753 } 1754 1755 /* 1756 * Return whether this pool has slogs. No locking needed. 1757 * It's not a problem if the wrong answer is returned as it's only for 1758 * performance and not correctness 1759 */ 1760 boolean_t 1761 spa_has_slogs(spa_t *spa) 1762 { 1763 return (spa->spa_log_class->mc_rotor != NULL); 1764 } 1765 1766 spa_log_state_t 1767 spa_get_log_state(spa_t *spa) 1768 { 1769 return (spa->spa_log_state); 1770 } 1771 1772 void 1773 spa_set_log_state(spa_t *spa, spa_log_state_t state) 1774 { 1775 spa->spa_log_state = state; 1776 } 1777 1778 boolean_t 1779 spa_is_root(spa_t *spa) 1780 { 1781 return (spa->spa_is_root); 1782 } 1783 1784 boolean_t 1785 spa_writeable(spa_t *spa) 1786 { 1787 return (!!(spa->spa_mode & FWRITE)); 1788 } 1789 1790 static int 1791 activate_salted_cksum_check(zfeature_info_t *feature, dmu_tx_t *tx) 1792 { 1793 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1794 1795 if (!spa_feature_is_active(spa, feature)) 1796 return (0); 1797 else 1798 return (SET_ERROR(EBUSY)); 1799 } 1800 1801 static void 1802 activate_salted_cksum_sync(zfeature_info_t *feature, dmu_tx_t *tx) 1803 { 1804 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1805 1806 spa_feature_incr(spa, feature, tx); 1807 /* 1808 * This is the first salted checksum that's been activated, so 1809 * create the persistent checksum salt object now. 1810 */ 1811 if (spa->spa_cksum_salt_obj == 0) { 1812 spa->spa_cksum_salt_obj = zap_create_link(spa->spa_meta_objset, 1813 DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT, 1814 DMU_POOL_CHECKSUM_SALT, tx); 1815 VERIFY3U(zap_add(spa->spa_meta_objset, 1816 spa->spa_cksum_salt_obj, DMU_POOL_CHECKSUM_SALT, 1, 1817 sizeof (spa->spa_cksum_salt.zcs_bytes), 1818 spa->spa_cksum_salt.zcs_bytes, tx), ==, 0); 1819 } 1820 } 1821 1822 /* 1823 * Activates a feature associated with a salted checksum. You must call this 1824 * function instead of calling spa_feature_incr() directly, because we may 1825 * also need to sync the MOS object holding the checksum salt. 1826 * Arguments: 1827 * spa Pool on which to activate the salted checksum feature. 1828 * feature Salted checksum algorithm feature to activate (see 1829 * spa_feature_table). 1830 */ 1831 int 1832 spa_activate_salted_cksum(spa_t *spa, struct zfeature_info *feature) 1833 { 1834 int err; 1835 1836 /* EBUSY here indicates that the feature is already active */ 1837 err = dsl_sync_task(spa_name(spa), 1838 (dsl_checkfunc_t *)activate_salted_cksum_check, 1839 (dsl_syncfunc_t *)activate_salted_cksum_sync, feature, 2); 1840 1841 if (err != 0 && err != EBUSY) 1842 return (err); 1843 else 1844 return (0); 1845 } 1846 1847 int 1848 spa_mode(spa_t *spa) 1849 { 1850 return (spa->spa_mode); 1851 } 1852 1853 uint64_t 1854 spa_bootfs(spa_t *spa) 1855 { 1856 return (spa->spa_bootfs); 1857 } 1858 1859 uint64_t 1860 spa_delegation(spa_t *spa) 1861 { 1862 return (spa->spa_delegation); 1863 } 1864 1865 objset_t * 1866 spa_meta_objset(spa_t *spa) 1867 { 1868 return (spa->spa_meta_objset); 1869 } 1870 1871 enum zio_checksum 1872 spa_dedup_checksum(spa_t *spa) 1873 { 1874 return (spa->spa_dedup_checksum); 1875 } 1876 1877 /* 1878 * Reset pool scan stat per scan pass (or reboot). 1879 */ 1880 void 1881 spa_scan_stat_init(spa_t *spa) 1882 { 1883 /* data not stored on disk */ 1884 spa->spa_scan_pass_start = gethrestime_sec(); 1885 spa->spa_scan_pass_exam = 0; 1886 vdev_scan_stat_init(spa->spa_root_vdev); 1887 } 1888 1889 /* 1890 * Get scan stats for zpool status reports 1891 */ 1892 int 1893 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 1894 { 1895 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 1896 1897 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 1898 return (SET_ERROR(ENOENT)); 1899 bzero(ps, sizeof (pool_scan_stat_t)); 1900 1901 /* data stored on disk */ 1902 ps->pss_func = scn->scn_phys.scn_func; 1903 ps->pss_start_time = scn->scn_phys.scn_start_time; 1904 ps->pss_end_time = scn->scn_phys.scn_end_time; 1905 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 1906 ps->pss_examined = scn->scn_phys.scn_examined; 1907 ps->pss_to_process = scn->scn_phys.scn_to_process; 1908 ps->pss_processed = scn->scn_phys.scn_processed; 1909 ps->pss_errors = scn->scn_phys.scn_errors; 1910 ps->pss_state = scn->scn_phys.scn_state; 1911 1912 /* data not stored on disk */ 1913 ps->pss_pass_start = spa->spa_scan_pass_start; 1914 ps->pss_pass_exam = spa->spa_scan_pass_exam; 1915 1916 return (0); 1917 } 1918 1919 boolean_t 1920 spa_debug_enabled(spa_t *spa) 1921 { 1922 return (spa->spa_debug); 1923 }