1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2013 by Delphix. All rights reserved.
  24  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  25  */
  26 
  27 #include <sys/zfs_context.h>
  28 #include <sys/spa_impl.h>
  29 #include <sys/spa_boot.h>
  30 #include <sys/zio.h>
  31 #include <sys/zio_checksum.h>
  32 #include <sys/zio_compress.h>
  33 #include <sys/dmu.h>
  34 #include <sys/dmu_tx.h>
  35 #include <sys/zap.h>
  36 #include <sys/zil.h>
  37 #include <sys/vdev_impl.h>
  38 #include <sys/metaslab.h>
  39 #include <sys/uberblock_impl.h>
  40 #include <sys/txg.h>
  41 #include <sys/avl.h>
  42 #include <sys/unique.h>
  43 #include <sys/dsl_pool.h>
  44 #include <sys/dsl_dir.h>
  45 #include <sys/dsl_prop.h>
  46 #include <sys/dsl_scan.h>
  47 #include <sys/fs/zfs.h>
  48 #include <sys/metaslab_impl.h>
  49 #include <sys/arc.h>
  50 #include <sys/ddt.h>
  51 #include "zfs_prop.h"
  52 #include "zfeature_common.h"
  53 
  54 /*
  55  * SPA locking
  56  *
  57  * There are four basic locks for managing spa_t structures:
  58  *
  59  * spa_namespace_lock (global mutex)
  60  *
  61  *      This lock must be acquired to do any of the following:
  62  *
  63  *              - Lookup a spa_t by name
  64  *              - Add or remove a spa_t from the namespace
  65  *              - Increase spa_refcount from non-zero
  66  *              - Check if spa_refcount is zero
  67  *              - Rename a spa_t
  68  *              - add/remove/attach/detach devices
  69  *              - Held for the duration of create/destroy/import/export
  70  *
  71  *      It does not need to handle recursion.  A create or destroy may
  72  *      reference objects (files or zvols) in other pools, but by
  73  *      definition they must have an existing reference, and will never need
  74  *      to lookup a spa_t by name.
  75  *
  76  * spa_refcount (per-spa refcount_t protected by mutex)
  77  *
  78  *      This reference count keep track of any active users of the spa_t.  The
  79  *      spa_t cannot be destroyed or freed while this is non-zero.  Internally,
  80  *      the refcount is never really 'zero' - opening a pool implicitly keeps
  81  *      some references in the DMU.  Internally we check against spa_minref, but
  82  *      present the image of a zero/non-zero value to consumers.
  83  *
  84  * spa_config_lock[] (per-spa array of rwlocks)
  85  *
  86  *      This protects the spa_t from config changes, and must be held in
  87  *      the following circumstances:
  88  *
  89  *              - RW_READER to perform I/O to the spa
  90  *              - RW_WRITER to change the vdev config
  91  *
  92  * The locking order is fairly straightforward:
  93  *
  94  *              spa_namespace_lock      ->   spa_refcount
  95  *
  96  *      The namespace lock must be acquired to increase the refcount from 0
  97  *      or to check if it is zero.
  98  *
  99  *              spa_refcount            ->   spa_config_lock[]
 100  *
 101  *      There must be at least one valid reference on the spa_t to acquire
 102  *      the config lock.
 103  *
 104  *              spa_namespace_lock      ->   spa_config_lock[]
 105  *
 106  *      The namespace lock must always be taken before the config lock.
 107  *
 108  *
 109  * The spa_namespace_lock can be acquired directly and is globally visible.
 110  *
 111  * The namespace is manipulated using the following functions, all of which
 112  * require the spa_namespace_lock to be held.
 113  *
 114  *      spa_lookup()            Lookup a spa_t by name.
 115  *
 116  *      spa_add()               Create a new spa_t in the namespace.
 117  *
 118  *      spa_remove()            Remove a spa_t from the namespace.  This also
 119  *                              frees up any memory associated with the spa_t.
 120  *
 121  *      spa_next()              Returns the next spa_t in the system, or the
 122  *                              first if NULL is passed.
 123  *
 124  *      spa_evict_all()         Shutdown and remove all spa_t structures in
 125  *                              the system.
 126  *
 127  *      spa_guid_exists()       Determine whether a pool/device guid exists.
 128  *
 129  * The spa_refcount is manipulated using the following functions:
 130  *
 131  *      spa_open_ref()          Adds a reference to the given spa_t.  Must be
 132  *                              called with spa_namespace_lock held if the
 133  *                              refcount is currently zero.
 134  *
 135  *      spa_close()             Remove a reference from the spa_t.  This will
 136  *                              not free the spa_t or remove it from the
 137  *                              namespace.  No locking is required.
 138  *
 139  *      spa_refcount_zero()     Returns true if the refcount is currently
 140  *                              zero.  Must be called with spa_namespace_lock
 141  *                              held.
 142  *
 143  * The spa_config_lock[] is an array of rwlocks, ordered as follows:
 144  * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
 145  * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
 146  *
 147  * To read the configuration, it suffices to hold one of these locks as reader.
 148  * To modify the configuration, you must hold all locks as writer.  To modify
 149  * vdev state without altering the vdev tree's topology (e.g. online/offline),
 150  * you must hold SCL_STATE and SCL_ZIO as writer.
 151  *
 152  * We use these distinct config locks to avoid recursive lock entry.
 153  * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
 154  * block allocations (SCL_ALLOC), which may require reading space maps
 155  * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
 156  *
 157  * The spa config locks cannot be normal rwlocks because we need the
 158  * ability to hand off ownership.  For example, SCL_ZIO is acquired
 159  * by the issuing thread and later released by an interrupt thread.
 160  * They do, however, obey the usual write-wanted semantics to prevent
 161  * writer (i.e. system administrator) starvation.
 162  *
 163  * The lock acquisition rules are as follows:
 164  *
 165  * SCL_CONFIG
 166  *      Protects changes to the vdev tree topology, such as vdev
 167  *      add/remove/attach/detach.  Protects the dirty config list
 168  *      (spa_config_dirty_list) and the set of spares and l2arc devices.
 169  *
 170  * SCL_STATE
 171  *      Protects changes to pool state and vdev state, such as vdev
 172  *      online/offline/fault/degrade/clear.  Protects the dirty state list
 173  *      (spa_state_dirty_list) and global pool state (spa_state).
 174  *
 175  * SCL_ALLOC
 176  *      Protects changes to metaslab groups and classes.
 177  *      Held as reader by metaslab_alloc() and metaslab_claim().
 178  *
 179  * SCL_ZIO
 180  *      Held by bp-level zios (those which have no io_vd upon entry)
 181  *      to prevent changes to the vdev tree.  The bp-level zio implicitly
 182  *      protects all of its vdev child zios, which do not hold SCL_ZIO.
 183  *
 184  * SCL_FREE
 185  *      Protects changes to metaslab groups and classes.
 186  *      Held as reader by metaslab_free().  SCL_FREE is distinct from
 187  *      SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
 188  *      blocks in zio_done() while another i/o that holds either
 189  *      SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
 190  *
 191  * SCL_VDEV
 192  *      Held as reader to prevent changes to the vdev tree during trivial
 193  *      inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
 194  *      other locks, and lower than all of them, to ensure that it's safe
 195  *      to acquire regardless of caller context.
 196  *
 197  * In addition, the following rules apply:
 198  *
 199  * (a)  spa_props_lock protects pool properties, spa_config and spa_config_list.
 200  *      The lock ordering is SCL_CONFIG > spa_props_lock.
 201  *
 202  * (b)  I/O operations on leaf vdevs.  For any zio operation that takes
 203  *      an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
 204  *      or zio_write_phys() -- the caller must ensure that the config cannot
 205  *      cannot change in the interim, and that the vdev cannot be reopened.
 206  *      SCL_STATE as reader suffices for both.
 207  *
 208  * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
 209  *
 210  *      spa_vdev_enter()        Acquire the namespace lock and the config lock
 211  *                              for writing.
 212  *
 213  *      spa_vdev_exit()         Release the config lock, wait for all I/O
 214  *                              to complete, sync the updated configs to the
 215  *                              cache, and release the namespace lock.
 216  *
 217  * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
 218  * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
 219  * locking is, always, based on spa_namespace_lock and spa_config_lock[].
 220  *
 221  * spa_rename() is also implemented within this file since it requires
 222  * manipulation of the namespace.
 223  */
 224 
 225 static avl_tree_t spa_namespace_avl;
 226 kmutex_t spa_namespace_lock;
 227 static kcondvar_t spa_namespace_cv;
 228 static int spa_active_count;
 229 int spa_max_replication_override = SPA_DVAS_PER_BP;
 230 
 231 static kmutex_t spa_spare_lock;
 232 static avl_tree_t spa_spare_avl;
 233 static kmutex_t spa_l2cache_lock;
 234 static avl_tree_t spa_l2cache_avl;
 235 
 236 kmem_cache_t *spa_buffer_pool;
 237 int spa_mode_global;
 238 
 239 #ifdef ZFS_DEBUG
 240 /* Everything except dprintf and spa is on by default in debug builds */
 241 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
 242 #else
 243 int zfs_flags = 0;
 244 #endif
 245 
 246 /*
 247  * zfs_recover can be set to nonzero to attempt to recover from
 248  * otherwise-fatal errors, typically caused by on-disk corruption.  When
 249  * set, calls to zfs_panic_recover() will turn into warning messages.
 250  */
 251 int zfs_recover = 0;
 252 
 253 /*
 254  * Expiration time in milliseconds. This value has two meanings. First it is
 255  * used to determine when the spa_deadman() logic should fire. By default the
 256  * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
 257  * Secondly, the value determines if an I/O is considered "hung". Any I/O that
 258  * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
 259  * in a system panic.
 260  */
 261 uint64_t zfs_deadman_synctime_ms = 1000000ULL;
 262 
 263 /*
 264  * Check time in milliseconds. This defines the frequency at which we check
 265  * for hung I/O.
 266  */
 267 uint64_t zfs_deadman_checktime_ms = 5000ULL;
 268 
 269 /*
 270  * Override the zfs deadman behavior via /etc/system. By default the
 271  * deadman is enabled except on VMware and sparc deployments.
 272  */
 273 int zfs_deadman_enabled = -1;
 274 
 275 /*
 276  * The worst case is single-sector max-parity RAID-Z blocks, in which
 277  * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
 278  * times the size; so just assume that.  Add to this the fact that
 279  * we can have up to 3 DVAs per bp, and one more factor of 2 because
 280  * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
 281  * the worst case is:
 282  *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
 283  */
 284 int spa_asize_inflation = 24;
 285 
 286 /*
 287  * ==========================================================================
 288  * SPA config locking
 289  * ==========================================================================
 290  */
 291 static void
 292 spa_config_lock_init(spa_t *spa)
 293 {
 294         for (int i = 0; i < SCL_LOCKS; i++) {
 295                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 296                 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
 297                 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
 298                 refcount_create_untracked(&scl->scl_count);
 299                 scl->scl_writer = NULL;
 300                 scl->scl_write_wanted = 0;
 301         }
 302 }
 303 
 304 static void
 305 spa_config_lock_destroy(spa_t *spa)
 306 {
 307         for (int i = 0; i < SCL_LOCKS; i++) {
 308                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 309                 mutex_destroy(&scl->scl_lock);
 310                 cv_destroy(&scl->scl_cv);
 311                 refcount_destroy(&scl->scl_count);
 312                 ASSERT(scl->scl_writer == NULL);
 313                 ASSERT(scl->scl_write_wanted == 0);
 314         }
 315 }
 316 
 317 int
 318 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
 319 {
 320         for (int i = 0; i < SCL_LOCKS; i++) {
 321                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 322                 if (!(locks & (1 << i)))
 323                         continue;
 324                 mutex_enter(&scl->scl_lock);
 325                 if (rw == RW_READER) {
 326                         if (scl->scl_writer || scl->scl_write_wanted) {
 327                                 mutex_exit(&scl->scl_lock);
 328                                 spa_config_exit(spa, locks ^ (1 << i), tag);
 329                                 return (0);
 330                         }
 331                 } else {
 332                         ASSERT(scl->scl_writer != curthread);
 333                         if (!refcount_is_zero(&scl->scl_count)) {
 334                                 mutex_exit(&scl->scl_lock);
 335                                 spa_config_exit(spa, locks ^ (1 << i), tag);
 336                                 return (0);
 337                         }
 338                         scl->scl_writer = curthread;
 339                 }
 340                 (void) refcount_add(&scl->scl_count, tag);
 341                 mutex_exit(&scl->scl_lock);
 342         }
 343         return (1);
 344 }
 345 
 346 void
 347 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
 348 {
 349         int wlocks_held = 0;
 350 
 351         ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
 352 
 353         for (int i = 0; i < SCL_LOCKS; i++) {
 354                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 355                 if (scl->scl_writer == curthread)
 356                         wlocks_held |= (1 << i);
 357                 if (!(locks & (1 << i)))
 358                         continue;
 359                 mutex_enter(&scl->scl_lock);
 360                 if (rw == RW_READER) {
 361                         while (scl->scl_writer || scl->scl_write_wanted) {
 362                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
 363                         }
 364                 } else {
 365                         ASSERT(scl->scl_writer != curthread);
 366                         while (!refcount_is_zero(&scl->scl_count)) {
 367                                 scl->scl_write_wanted++;
 368                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
 369                                 scl->scl_write_wanted--;
 370                         }
 371                         scl->scl_writer = curthread;
 372                 }
 373                 (void) refcount_add(&scl->scl_count, tag);
 374                 mutex_exit(&scl->scl_lock);
 375         }
 376         ASSERT(wlocks_held <= locks);
 377 }
 378 
 379 void
 380 spa_config_exit(spa_t *spa, int locks, void *tag)
 381 {
 382         for (int i = SCL_LOCKS - 1; i >= 0; i--) {
 383                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 384                 if (!(locks & (1 << i)))
 385                         continue;
 386                 mutex_enter(&scl->scl_lock);
 387                 ASSERT(!refcount_is_zero(&scl->scl_count));
 388                 if (refcount_remove(&scl->scl_count, tag) == 0) {
 389                         ASSERT(scl->scl_writer == NULL ||
 390                             scl->scl_writer == curthread);
 391                         scl->scl_writer = NULL;      /* OK in either case */
 392                         cv_broadcast(&scl->scl_cv);
 393                 }
 394                 mutex_exit(&scl->scl_lock);
 395         }
 396 }
 397 
 398 int
 399 spa_config_held(spa_t *spa, int locks, krw_t rw)
 400 {
 401         int locks_held = 0;
 402 
 403         for (int i = 0; i < SCL_LOCKS; i++) {
 404                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
 405                 if (!(locks & (1 << i)))
 406                         continue;
 407                 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
 408                     (rw == RW_WRITER && scl->scl_writer == curthread))
 409                         locks_held |= 1 << i;
 410         }
 411 
 412         return (locks_held);
 413 }
 414 
 415 /*
 416  * ==========================================================================
 417  * SPA namespace functions
 418  * ==========================================================================
 419  */
 420 
 421 /*
 422  * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
 423  * Returns NULL if no matching spa_t is found.
 424  */
 425 spa_t *
 426 spa_lookup(const char *name)
 427 {
 428         static spa_t search;    /* spa_t is large; don't allocate on stack */
 429         spa_t *spa;
 430         avl_index_t where;
 431         char *cp;
 432 
 433         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 434 
 435         (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
 436 
 437         /*
 438          * If it's a full dataset name, figure out the pool name and
 439          * just use that.
 440          */
 441         cp = strpbrk(search.spa_name, "/@");
 442         if (cp != NULL)
 443                 *cp = '\0';
 444 
 445         spa = avl_find(&spa_namespace_avl, &search, &where);
 446 
 447         return (spa);
 448 }
 449 
 450 /*
 451  * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
 452  * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
 453  * looking for potentially hung I/Os.
 454  */
 455 void
 456 spa_deadman(void *arg)
 457 {
 458         spa_t *spa = arg;
 459 
 460         zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
 461             (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
 462             ++spa->spa_deadman_calls);
 463         if (zfs_deadman_enabled)
 464                 vdev_deadman(spa->spa_root_vdev);
 465 }
 466 
 467 /*
 468  * Create an uninitialized spa_t with the given name.  Requires
 469  * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
 470  * exist by calling spa_lookup() first.
 471  */
 472 spa_t *
 473 spa_add(const char *name, nvlist_t *config, const char *altroot)
 474 {
 475         spa_t *spa;
 476         spa_config_dirent_t *dp;
 477         cyc_handler_t hdlr;
 478         cyc_time_t when;
 479 
 480         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 481 
 482         spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
 483 
 484         mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
 485         mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
 486         mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
 487         mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
 488         mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
 489         mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
 490         mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
 491         mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
 492         mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
 493         mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
 494 
 495         cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
 496         cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
 497         cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
 498         cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
 499 
 500         for (int t = 0; t < TXG_SIZE; t++)
 501                 bplist_create(&spa->spa_free_bplist[t]);
 502 
 503         (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
 504         spa->spa_state = POOL_STATE_UNINITIALIZED;
 505         spa->spa_freeze_txg = UINT64_MAX;
 506         spa->spa_final_txg = UINT64_MAX;
 507         spa->spa_load_max_txg = UINT64_MAX;
 508         spa->spa_proc = &p0;
 509         spa->spa_proc_state = SPA_PROC_NONE;
 510 
 511         hdlr.cyh_func = spa_deadman;
 512         hdlr.cyh_arg = spa;
 513         hdlr.cyh_level = CY_LOW_LEVEL;
 514 
 515         spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
 516 
 517         /*
 518          * This determines how often we need to check for hung I/Os after
 519          * the cyclic has already fired. Since checking for hung I/Os is
 520          * an expensive operation we don't want to check too frequently.
 521          * Instead wait for 5 seconds before checking again.
 522          */
 523         when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
 524         when.cyt_when = CY_INFINITY;
 525         mutex_enter(&cpu_lock);
 526         spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
 527         mutex_exit(&cpu_lock);
 528 
 529         refcount_create(&spa->spa_refcount);
 530         spa_config_lock_init(spa);
 531 
 532         avl_add(&spa_namespace_avl, spa);
 533 
 534         /*
 535          * Set the alternate root, if there is one.
 536          */
 537         if (altroot) {
 538                 spa->spa_root = spa_strdup(altroot);
 539                 spa_active_count++;
 540         }
 541 
 542         /*
 543          * Every pool starts with the default cachefile
 544          */
 545         list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
 546             offsetof(spa_config_dirent_t, scd_link));
 547 
 548         dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
 549         dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
 550         list_insert_head(&spa->spa_config_list, dp);
 551 
 552         VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
 553             KM_SLEEP) == 0);
 554 
 555         if (config != NULL) {
 556                 nvlist_t *features;
 557 
 558                 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
 559                     &features) == 0) {
 560                         VERIFY(nvlist_dup(features, &spa->spa_label_features,
 561                             0) == 0);
 562                 }
 563 
 564                 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
 565         }
 566 
 567         if (spa->spa_label_features == NULL) {
 568                 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
 569                     KM_SLEEP) == 0);
 570         }
 571 
 572         spa->spa_iokstat = kstat_create("zfs", 0, name,
 573             "disk", KSTAT_TYPE_IO, 1, 0);
 574         if (spa->spa_iokstat) {
 575                 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
 576                 kstat_install(spa->spa_iokstat);
 577         }
 578 
 579         spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
 580 
 581         return (spa);
 582 }
 583 
 584 /*
 585  * Removes a spa_t from the namespace, freeing up any memory used.  Requires
 586  * spa_namespace_lock.  This is called only after the spa_t has been closed and
 587  * deactivated.
 588  */
 589 void
 590 spa_remove(spa_t *spa)
 591 {
 592         spa_config_dirent_t *dp;
 593 
 594         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 595         ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
 596 
 597         nvlist_free(spa->spa_config_splitting);
 598 
 599         avl_remove(&spa_namespace_avl, spa);
 600         cv_broadcast(&spa_namespace_cv);
 601 
 602         if (spa->spa_root) {
 603                 spa_strfree(spa->spa_root);
 604                 spa_active_count--;
 605         }
 606 
 607         while ((dp = list_head(&spa->spa_config_list)) != NULL) {
 608                 list_remove(&spa->spa_config_list, dp);
 609                 if (dp->scd_path != NULL)
 610                         spa_strfree(dp->scd_path);
 611                 kmem_free(dp, sizeof (spa_config_dirent_t));
 612         }
 613 
 614         list_destroy(&spa->spa_config_list);
 615 
 616         nvlist_free(spa->spa_label_features);
 617         nvlist_free(spa->spa_load_info);
 618         spa_config_set(spa, NULL);
 619 
 620         mutex_enter(&cpu_lock);
 621         if (spa->spa_deadman_cycid != CYCLIC_NONE)
 622                 cyclic_remove(spa->spa_deadman_cycid);
 623         mutex_exit(&cpu_lock);
 624         spa->spa_deadman_cycid = CYCLIC_NONE;
 625 
 626         refcount_destroy(&spa->spa_refcount);
 627 
 628         spa_config_lock_destroy(spa);
 629 
 630         kstat_delete(spa->spa_iokstat);
 631         spa->spa_iokstat = NULL;
 632 
 633         for (int t = 0; t < TXG_SIZE; t++)
 634                 bplist_destroy(&spa->spa_free_bplist[t]);
 635 
 636         cv_destroy(&spa->spa_async_cv);
 637         cv_destroy(&spa->spa_proc_cv);
 638         cv_destroy(&spa->spa_scrub_io_cv);
 639         cv_destroy(&spa->spa_suspend_cv);
 640 
 641         mutex_destroy(&spa->spa_async_lock);
 642         mutex_destroy(&spa->spa_errlist_lock);
 643         mutex_destroy(&spa->spa_errlog_lock);
 644         mutex_destroy(&spa->spa_history_lock);
 645         mutex_destroy(&spa->spa_proc_lock);
 646         mutex_destroy(&spa->spa_props_lock);
 647         mutex_destroy(&spa->spa_scrub_lock);
 648         mutex_destroy(&spa->spa_suspend_lock);
 649         mutex_destroy(&spa->spa_vdev_top_lock);
 650         mutex_destroy(&spa->spa_iokstat_lock);
 651 
 652         kmem_free(spa, sizeof (spa_t));
 653 }
 654 
 655 /*
 656  * Given a pool, return the next pool in the namespace, or NULL if there is
 657  * none.  If 'prev' is NULL, return the first pool.
 658  */
 659 spa_t *
 660 spa_next(spa_t *prev)
 661 {
 662         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 663 
 664         if (prev)
 665                 return (AVL_NEXT(&spa_namespace_avl, prev));
 666         else
 667                 return (avl_first(&spa_namespace_avl));
 668 }
 669 
 670 /*
 671  * ==========================================================================
 672  * SPA refcount functions
 673  * ==========================================================================
 674  */
 675 
 676 /*
 677  * Add a reference to the given spa_t.  Must have at least one reference, or
 678  * have the namespace lock held.
 679  */
 680 void
 681 spa_open_ref(spa_t *spa, void *tag)
 682 {
 683         ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
 684             MUTEX_HELD(&spa_namespace_lock));
 685         (void) refcount_add(&spa->spa_refcount, tag);
 686 }
 687 
 688 /*
 689  * Remove a reference to the given spa_t.  Must have at least one reference, or
 690  * have the namespace lock held.
 691  */
 692 void
 693 spa_close(spa_t *spa, void *tag)
 694 {
 695         ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
 696             MUTEX_HELD(&spa_namespace_lock));
 697         (void) refcount_remove(&spa->spa_refcount, tag);
 698 }
 699 
 700 /*
 701  * Check to see if the spa refcount is zero.  Must be called with
 702  * spa_namespace_lock held.  We really compare against spa_minref, which is the
 703  * number of references acquired when opening a pool
 704  */
 705 boolean_t
 706 spa_refcount_zero(spa_t *spa)
 707 {
 708         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 709 
 710         return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
 711 }
 712 
 713 /*
 714  * ==========================================================================
 715  * SPA spare and l2cache tracking
 716  * ==========================================================================
 717  */
 718 
 719 /*
 720  * Hot spares and cache devices are tracked using the same code below,
 721  * for 'auxiliary' devices.
 722  */
 723 
 724 typedef struct spa_aux {
 725         uint64_t        aux_guid;
 726         uint64_t        aux_pool;
 727         avl_node_t      aux_avl;
 728         int             aux_count;
 729 } spa_aux_t;
 730 
 731 static int
 732 spa_aux_compare(const void *a, const void *b)
 733 {
 734         const spa_aux_t *sa = a;
 735         const spa_aux_t *sb = b;
 736 
 737         if (sa->aux_guid < sb->aux_guid)
 738                 return (-1);
 739         else if (sa->aux_guid > sb->aux_guid)
 740                 return (1);
 741         else
 742                 return (0);
 743 }
 744 
 745 void
 746 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
 747 {
 748         avl_index_t where;
 749         spa_aux_t search;
 750         spa_aux_t *aux;
 751 
 752         search.aux_guid = vd->vdev_guid;
 753         if ((aux = avl_find(avl, &search, &where)) != NULL) {
 754                 aux->aux_count++;
 755         } else {
 756                 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
 757                 aux->aux_guid = vd->vdev_guid;
 758                 aux->aux_count = 1;
 759                 avl_insert(avl, aux, where);
 760         }
 761 }
 762 
 763 void
 764 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
 765 {
 766         spa_aux_t search;
 767         spa_aux_t *aux;
 768         avl_index_t where;
 769 
 770         search.aux_guid = vd->vdev_guid;
 771         aux = avl_find(avl, &search, &where);
 772 
 773         ASSERT(aux != NULL);
 774 
 775         if (--aux->aux_count == 0) {
 776                 avl_remove(avl, aux);
 777                 kmem_free(aux, sizeof (spa_aux_t));
 778         } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
 779                 aux->aux_pool = 0ULL;
 780         }
 781 }
 782 
 783 boolean_t
 784 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
 785 {
 786         spa_aux_t search, *found;
 787 
 788         search.aux_guid = guid;
 789         found = avl_find(avl, &search, NULL);
 790 
 791         if (pool) {
 792                 if (found)
 793                         *pool = found->aux_pool;
 794                 else
 795                         *pool = 0ULL;
 796         }
 797 
 798         if (refcnt) {
 799                 if (found)
 800                         *refcnt = found->aux_count;
 801                 else
 802                         *refcnt = 0;
 803         }
 804 
 805         return (found != NULL);
 806 }
 807 
 808 void
 809 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
 810 {
 811         spa_aux_t search, *found;
 812         avl_index_t where;
 813 
 814         search.aux_guid = vd->vdev_guid;
 815         found = avl_find(avl, &search, &where);
 816         ASSERT(found != NULL);
 817         ASSERT(found->aux_pool == 0ULL);
 818 
 819         found->aux_pool = spa_guid(vd->vdev_spa);
 820 }
 821 
 822 /*
 823  * Spares are tracked globally due to the following constraints:
 824  *
 825  *      - A spare may be part of multiple pools.
 826  *      - A spare may be added to a pool even if it's actively in use within
 827  *        another pool.
 828  *      - A spare in use in any pool can only be the source of a replacement if
 829  *        the target is a spare in the same pool.
 830  *
 831  * We keep track of all spares on the system through the use of a reference
 832  * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
 833  * spare, then we bump the reference count in the AVL tree.  In addition, we set
 834  * the 'vdev_isspare' member to indicate that the device is a spare (active or
 835  * inactive).  When a spare is made active (used to replace a device in the
 836  * pool), we also keep track of which pool its been made a part of.
 837  *
 838  * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
 839  * called under the spa_namespace lock as part of vdev reconfiguration.  The
 840  * separate spare lock exists for the status query path, which does not need to
 841  * be completely consistent with respect to other vdev configuration changes.
 842  */
 843 
 844 static int
 845 spa_spare_compare(const void *a, const void *b)
 846 {
 847         return (spa_aux_compare(a, b));
 848 }
 849 
 850 void
 851 spa_spare_add(vdev_t *vd)
 852 {
 853         mutex_enter(&spa_spare_lock);
 854         ASSERT(!vd->vdev_isspare);
 855         spa_aux_add(vd, &spa_spare_avl);
 856         vd->vdev_isspare = B_TRUE;
 857         mutex_exit(&spa_spare_lock);
 858 }
 859 
 860 void
 861 spa_spare_remove(vdev_t *vd)
 862 {
 863         mutex_enter(&spa_spare_lock);
 864         ASSERT(vd->vdev_isspare);
 865         spa_aux_remove(vd, &spa_spare_avl);
 866         vd->vdev_isspare = B_FALSE;
 867         mutex_exit(&spa_spare_lock);
 868 }
 869 
 870 boolean_t
 871 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
 872 {
 873         boolean_t found;
 874 
 875         mutex_enter(&spa_spare_lock);
 876         found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
 877         mutex_exit(&spa_spare_lock);
 878 
 879         return (found);
 880 }
 881 
 882 void
 883 spa_spare_activate(vdev_t *vd)
 884 {
 885         mutex_enter(&spa_spare_lock);
 886         ASSERT(vd->vdev_isspare);
 887         spa_aux_activate(vd, &spa_spare_avl);
 888         mutex_exit(&spa_spare_lock);
 889 }
 890 
 891 /*
 892  * Level 2 ARC devices are tracked globally for the same reasons as spares.
 893  * Cache devices currently only support one pool per cache device, and so
 894  * for these devices the aux reference count is currently unused beyond 1.
 895  */
 896 
 897 static int
 898 spa_l2cache_compare(const void *a, const void *b)
 899 {
 900         return (spa_aux_compare(a, b));
 901 }
 902 
 903 void
 904 spa_l2cache_add(vdev_t *vd)
 905 {
 906         mutex_enter(&spa_l2cache_lock);
 907         ASSERT(!vd->vdev_isl2cache);
 908         spa_aux_add(vd, &spa_l2cache_avl);
 909         vd->vdev_isl2cache = B_TRUE;
 910         mutex_exit(&spa_l2cache_lock);
 911 }
 912 
 913 void
 914 spa_l2cache_remove(vdev_t *vd)
 915 {
 916         mutex_enter(&spa_l2cache_lock);
 917         ASSERT(vd->vdev_isl2cache);
 918         spa_aux_remove(vd, &spa_l2cache_avl);
 919         vd->vdev_isl2cache = B_FALSE;
 920         mutex_exit(&spa_l2cache_lock);
 921 }
 922 
 923 boolean_t
 924 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
 925 {
 926         boolean_t found;
 927 
 928         mutex_enter(&spa_l2cache_lock);
 929         found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
 930         mutex_exit(&spa_l2cache_lock);
 931 
 932         return (found);
 933 }
 934 
 935 void
 936 spa_l2cache_activate(vdev_t *vd)
 937 {
 938         mutex_enter(&spa_l2cache_lock);
 939         ASSERT(vd->vdev_isl2cache);
 940         spa_aux_activate(vd, &spa_l2cache_avl);
 941         mutex_exit(&spa_l2cache_lock);
 942 }
 943 
 944 /*
 945  * ==========================================================================
 946  * SPA vdev locking
 947  * ==========================================================================
 948  */
 949 
 950 /*
 951  * Lock the given spa_t for the purpose of adding or removing a vdev.
 952  * Grabs the global spa_namespace_lock plus the spa config lock for writing.
 953  * It returns the next transaction group for the spa_t.
 954  */
 955 uint64_t
 956 spa_vdev_enter(spa_t *spa)
 957 {
 958         mutex_enter(&spa->spa_vdev_top_lock);
 959         mutex_enter(&spa_namespace_lock);
 960         return (spa_vdev_config_enter(spa));
 961 }
 962 
 963 /*
 964  * Internal implementation for spa_vdev_enter().  Used when a vdev
 965  * operation requires multiple syncs (i.e. removing a device) while
 966  * keeping the spa_namespace_lock held.
 967  */
 968 uint64_t
 969 spa_vdev_config_enter(spa_t *spa)
 970 {
 971         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 972 
 973         spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
 974 
 975         return (spa_last_synced_txg(spa) + 1);
 976 }
 977 
 978 /*
 979  * Used in combination with spa_vdev_config_enter() to allow the syncing
 980  * of multiple transactions without releasing the spa_namespace_lock.
 981  */
 982 void
 983 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
 984 {
 985         ASSERT(MUTEX_HELD(&spa_namespace_lock));
 986 
 987         int config_changed = B_FALSE;
 988 
 989         ASSERT(txg > spa_last_synced_txg(spa));
 990 
 991         spa->spa_pending_vdev = NULL;
 992 
 993         /*
 994          * Reassess the DTLs.
 995          */
 996         vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
 997 
 998         if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
 999                 config_changed = B_TRUE;
1000                 spa->spa_config_generation++;
1001         }
1002 
1003         /*
1004          * Verify the metaslab classes.
1005          */
1006         ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1007         ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1008 
1009         spa_config_exit(spa, SCL_ALL, spa);
1010 
1011         /*
1012          * Panic the system if the specified tag requires it.  This
1013          * is useful for ensuring that configurations are updated
1014          * transactionally.
1015          */
1016         if (zio_injection_enabled)
1017                 zio_handle_panic_injection(spa, tag, 0);
1018 
1019         /*
1020          * Note: this txg_wait_synced() is important because it ensures
1021          * that there won't be more than one config change per txg.
1022          * This allows us to use the txg as the generation number.
1023          */
1024         if (error == 0)
1025                 txg_wait_synced(spa->spa_dsl_pool, txg);
1026 
1027         if (vd != NULL) {
1028                 ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
1029                 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1030                 vdev_free(vd);
1031                 spa_config_exit(spa, SCL_ALL, spa);
1032         }
1033 
1034         /*
1035          * If the config changed, update the config cache.
1036          */
1037         if (config_changed)
1038                 spa_config_sync(spa, B_FALSE, B_TRUE);
1039 }
1040 
1041 /*
1042  * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1043  * locking of spa_vdev_enter(), we also want make sure the transactions have
1044  * synced to disk, and then update the global configuration cache with the new
1045  * information.
1046  */
1047 int
1048 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1049 {
1050         spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1051         mutex_exit(&spa_namespace_lock);
1052         mutex_exit(&spa->spa_vdev_top_lock);
1053 
1054         return (error);
1055 }
1056 
1057 /*
1058  * Lock the given spa_t for the purpose of changing vdev state.
1059  */
1060 void
1061 spa_vdev_state_enter(spa_t *spa, int oplocks)
1062 {
1063         int locks = SCL_STATE_ALL | oplocks;
1064 
1065         /*
1066          * Root pools may need to read of the underlying devfs filesystem
1067          * when opening up a vdev.  Unfortunately if we're holding the
1068          * SCL_ZIO lock it will result in a deadlock when we try to issue
1069          * the read from the root filesystem.  Instead we "prefetch"
1070          * the associated vnodes that we need prior to opening the
1071          * underlying devices and cache them so that we can prevent
1072          * any I/O when we are doing the actual open.
1073          */
1074         if (spa_is_root(spa)) {
1075                 int low = locks & ~(SCL_ZIO - 1);
1076                 int high = locks & ~low;
1077 
1078                 spa_config_enter(spa, high, spa, RW_WRITER);
1079                 vdev_hold(spa->spa_root_vdev);
1080                 spa_config_enter(spa, low, spa, RW_WRITER);
1081         } else {
1082                 spa_config_enter(spa, locks, spa, RW_WRITER);
1083         }
1084         spa->spa_vdev_locks = locks;
1085 }
1086 
1087 int
1088 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1089 {
1090         boolean_t config_changed = B_FALSE;
1091 
1092         if (vd != NULL || error == 0)
1093                 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1094                     0, 0, B_FALSE);
1095 
1096         if (vd != NULL) {
1097                 vdev_state_dirty(vd->vdev_top);
1098                 config_changed = B_TRUE;
1099                 spa->spa_config_generation++;
1100         }
1101 
1102         if (spa_is_root(spa))
1103                 vdev_rele(spa->spa_root_vdev);
1104 
1105         ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1106         spa_config_exit(spa, spa->spa_vdev_locks, spa);
1107 
1108         /*
1109          * If anything changed, wait for it to sync.  This ensures that,
1110          * from the system administrator's perspective, zpool(1M) commands
1111          * are synchronous.  This is important for things like zpool offline:
1112          * when the command completes, you expect no further I/O from ZFS.
1113          */
1114         if (vd != NULL)
1115                 txg_wait_synced(spa->spa_dsl_pool, 0);
1116 
1117         /*
1118          * If the config changed, update the config cache.
1119          */
1120         if (config_changed) {
1121                 mutex_enter(&spa_namespace_lock);
1122                 spa_config_sync(spa, B_FALSE, B_TRUE);
1123                 mutex_exit(&spa_namespace_lock);
1124         }
1125 
1126         return (error);
1127 }
1128 
1129 /*
1130  * ==========================================================================
1131  * Miscellaneous functions
1132  * ==========================================================================
1133  */
1134 
1135 void
1136 spa_activate_mos_feature(spa_t *spa, const char *feature)
1137 {
1138         (void) nvlist_add_boolean(spa->spa_label_features, feature);
1139         vdev_config_dirty(spa->spa_root_vdev);
1140 }
1141 
1142 void
1143 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1144 {
1145         (void) nvlist_remove_all(spa->spa_label_features, feature);
1146         vdev_config_dirty(spa->spa_root_vdev);
1147 }
1148 
1149 /*
1150  * Rename a spa_t.
1151  */
1152 int
1153 spa_rename(const char *name, const char *newname)
1154 {
1155         spa_t *spa;
1156         int err;
1157 
1158         /*
1159          * Lookup the spa_t and grab the config lock for writing.  We need to
1160          * actually open the pool so that we can sync out the necessary labels.
1161          * It's OK to call spa_open() with the namespace lock held because we
1162          * allow recursive calls for other reasons.
1163          */
1164         mutex_enter(&spa_namespace_lock);
1165         if ((err = spa_open(name, &spa, FTAG)) != 0) {
1166                 mutex_exit(&spa_namespace_lock);
1167                 return (err);
1168         }
1169 
1170         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1171 
1172         avl_remove(&spa_namespace_avl, spa);
1173         (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1174         avl_add(&spa_namespace_avl, spa);
1175 
1176         /*
1177          * Sync all labels to disk with the new names by marking the root vdev
1178          * dirty and waiting for it to sync.  It will pick up the new pool name
1179          * during the sync.
1180          */
1181         vdev_config_dirty(spa->spa_root_vdev);
1182 
1183         spa_config_exit(spa, SCL_ALL, FTAG);
1184 
1185         txg_wait_synced(spa->spa_dsl_pool, 0);
1186 
1187         /*
1188          * Sync the updated config cache.
1189          */
1190         spa_config_sync(spa, B_FALSE, B_TRUE);
1191 
1192         spa_close(spa, FTAG);
1193 
1194         mutex_exit(&spa_namespace_lock);
1195 
1196         return (0);
1197 }
1198 
1199 /*
1200  * Return the spa_t associated with given pool_guid, if it exists.  If
1201  * device_guid is non-zero, determine whether the pool exists *and* contains
1202  * a device with the specified device_guid.
1203  */
1204 spa_t *
1205 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1206 {
1207         spa_t *spa;
1208         avl_tree_t *t = &spa_namespace_avl;
1209 
1210         ASSERT(MUTEX_HELD(&spa_namespace_lock));
1211 
1212         for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1213                 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1214                         continue;
1215                 if (spa->spa_root_vdev == NULL)
1216                         continue;
1217                 if (spa_guid(spa) == pool_guid) {
1218                         if (device_guid == 0)
1219                                 break;
1220 
1221                         if (vdev_lookup_by_guid(spa->spa_root_vdev,
1222                             device_guid) != NULL)
1223                                 break;
1224 
1225                         /*
1226                          * Check any devices we may be in the process of adding.
1227                          */
1228                         if (spa->spa_pending_vdev) {
1229                                 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1230                                     device_guid) != NULL)
1231                                         break;
1232                         }
1233                 }
1234         }
1235 
1236         return (spa);
1237 }
1238 
1239 /*
1240  * Determine whether a pool with the given pool_guid exists.
1241  */
1242 boolean_t
1243 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1244 {
1245         return (spa_by_guid(pool_guid, device_guid) != NULL);
1246 }
1247 
1248 char *
1249 spa_strdup(const char *s)
1250 {
1251         size_t len;
1252         char *new;
1253 
1254         len = strlen(s);
1255         new = kmem_alloc(len + 1, KM_SLEEP);
1256         bcopy(s, new, len);
1257         new[len] = '\0';
1258 
1259         return (new);
1260 }
1261 
1262 void
1263 spa_strfree(char *s)
1264 {
1265         kmem_free(s, strlen(s) + 1);
1266 }
1267 
1268 uint64_t
1269 spa_get_random(uint64_t range)
1270 {
1271         uint64_t r;
1272 
1273         ASSERT(range != 0);
1274 
1275         (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1276 
1277         return (r % range);
1278 }
1279 
1280 uint64_t
1281 spa_generate_guid(spa_t *spa)
1282 {
1283         uint64_t guid = spa_get_random(-1ULL);
1284 
1285         if (spa != NULL) {
1286                 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1287                         guid = spa_get_random(-1ULL);
1288         } else {
1289                 while (guid == 0 || spa_guid_exists(guid, 0))
1290                         guid = spa_get_random(-1ULL);
1291         }
1292 
1293         return (guid);
1294 }
1295 
1296 void
1297 sprintf_blkptr(char *buf, const blkptr_t *bp)
1298 {
1299         char type[256];
1300         char *checksum = NULL;
1301         char *compress = NULL;
1302 
1303         if (bp != NULL) {
1304                 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1305                         dmu_object_byteswap_t bswap =
1306                             DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1307                         (void) snprintf(type, sizeof (type), "bswap %s %s",
1308                             DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1309                             "metadata" : "data",
1310                             dmu_ot_byteswap[bswap].ob_name);
1311                 } else {
1312                         (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1313                             sizeof (type));
1314                 }
1315                 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1316                 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1317         }
1318 
1319         SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
1320 }
1321 
1322 void
1323 spa_freeze(spa_t *spa)
1324 {
1325         uint64_t freeze_txg = 0;
1326 
1327         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1328         if (spa->spa_freeze_txg == UINT64_MAX) {
1329                 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1330                 spa->spa_freeze_txg = freeze_txg;
1331         }
1332         spa_config_exit(spa, SCL_ALL, FTAG);
1333         if (freeze_txg != 0)
1334                 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1335 }
1336 
1337 void
1338 zfs_panic_recover(const char *fmt, ...)
1339 {
1340         va_list adx;
1341 
1342         va_start(adx, fmt);
1343         vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1344         va_end(adx);
1345 }
1346 
1347 /*
1348  * This is a stripped-down version of strtoull, suitable only for converting
1349  * lowercase hexadecimal numbers that don't overflow.
1350  */
1351 uint64_t
1352 strtonum(const char *str, char **nptr)
1353 {
1354         uint64_t val = 0;
1355         char c;
1356         int digit;
1357 
1358         while ((c = *str) != '\0') {
1359                 if (c >= '0' && c <= '9')
1360                         digit = c - '0';
1361                 else if (c >= 'a' && c <= 'f')
1362                         digit = 10 + c - 'a';
1363                 else
1364                         break;
1365 
1366                 val *= 16;
1367                 val += digit;
1368 
1369                 str++;
1370         }
1371 
1372         if (nptr)
1373                 *nptr = (char *)str;
1374 
1375         return (val);
1376 }
1377 
1378 /*
1379  * ==========================================================================
1380  * Accessor functions
1381  * ==========================================================================
1382  */
1383 
1384 boolean_t
1385 spa_shutting_down(spa_t *spa)
1386 {
1387         return (spa->spa_async_suspended);
1388 }
1389 
1390 dsl_pool_t *
1391 spa_get_dsl(spa_t *spa)
1392 {
1393         return (spa->spa_dsl_pool);
1394 }
1395 
1396 boolean_t
1397 spa_is_initializing(spa_t *spa)
1398 {
1399         return (spa->spa_is_initializing);
1400 }
1401 
1402 blkptr_t *
1403 spa_get_rootblkptr(spa_t *spa)
1404 {
1405         return (&spa->spa_ubsync.ub_rootbp);
1406 }
1407 
1408 void
1409 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1410 {
1411         spa->spa_uberblock.ub_rootbp = *bp;
1412 }
1413 
1414 void
1415 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1416 {
1417         if (spa->spa_root == NULL)
1418                 buf[0] = '\0';
1419         else
1420                 (void) strncpy(buf, spa->spa_root, buflen);
1421 }
1422 
1423 int
1424 spa_sync_pass(spa_t *spa)
1425 {
1426         return (spa->spa_sync_pass);
1427 }
1428 
1429 char *
1430 spa_name(spa_t *spa)
1431 {
1432         return (spa->spa_name);
1433 }
1434 
1435 uint64_t
1436 spa_guid(spa_t *spa)
1437 {
1438         dsl_pool_t *dp = spa_get_dsl(spa);
1439         uint64_t guid;
1440 
1441         /*
1442          * If we fail to parse the config during spa_load(), we can go through
1443          * the error path (which posts an ereport) and end up here with no root
1444          * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1445          * this case.
1446          */
1447         if (spa->spa_root_vdev == NULL)
1448                 return (spa->spa_config_guid);
1449 
1450         guid = spa->spa_last_synced_guid != 0 ?
1451             spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1452 
1453         /*
1454          * Return the most recently synced out guid unless we're
1455          * in syncing context.
1456          */
1457         if (dp && dsl_pool_sync_context(dp))
1458                 return (spa->spa_root_vdev->vdev_guid);
1459         else
1460                 return (guid);
1461 }
1462 
1463 uint64_t
1464 spa_load_guid(spa_t *spa)
1465 {
1466         /*
1467          * This is a GUID that exists solely as a reference for the
1468          * purposes of the arc.  It is generated at load time, and
1469          * is never written to persistent storage.
1470          */
1471         return (spa->spa_load_guid);
1472 }
1473 
1474 uint64_t
1475 spa_last_synced_txg(spa_t *spa)
1476 {
1477         return (spa->spa_ubsync.ub_txg);
1478 }
1479 
1480 uint64_t
1481 spa_first_txg(spa_t *spa)
1482 {
1483         return (spa->spa_first_txg);
1484 }
1485 
1486 uint64_t
1487 spa_syncing_txg(spa_t *spa)
1488 {
1489         return (spa->spa_syncing_txg);
1490 }
1491 
1492 pool_state_t
1493 spa_state(spa_t *spa)
1494 {
1495         return (spa->spa_state);
1496 }
1497 
1498 spa_load_state_t
1499 spa_load_state(spa_t *spa)
1500 {
1501         return (spa->spa_load_state);
1502 }
1503 
1504 uint64_t
1505 spa_freeze_txg(spa_t *spa)
1506 {
1507         return (spa->spa_freeze_txg);
1508 }
1509 
1510 /* ARGSUSED */
1511 uint64_t
1512 spa_get_asize(spa_t *spa, uint64_t lsize)
1513 {
1514         return (lsize * spa_asize_inflation);
1515 }
1516 
1517 uint64_t
1518 spa_get_dspace(spa_t *spa)
1519 {
1520         return (spa->spa_dspace);
1521 }
1522 
1523 void
1524 spa_update_dspace(spa_t *spa)
1525 {
1526         spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1527             ddt_get_dedup_dspace(spa);
1528 }
1529 
1530 /*
1531  * Return the failure mode that has been set to this pool. The default
1532  * behavior will be to block all I/Os when a complete failure occurs.
1533  */
1534 uint8_t
1535 spa_get_failmode(spa_t *spa)
1536 {
1537         return (spa->spa_failmode);
1538 }
1539 
1540 boolean_t
1541 spa_suspended(spa_t *spa)
1542 {
1543         return (spa->spa_suspended);
1544 }
1545 
1546 uint64_t
1547 spa_version(spa_t *spa)
1548 {
1549         return (spa->spa_ubsync.ub_version);
1550 }
1551 
1552 boolean_t
1553 spa_deflate(spa_t *spa)
1554 {
1555         return (spa->spa_deflate);
1556 }
1557 
1558 metaslab_class_t *
1559 spa_normal_class(spa_t *spa)
1560 {
1561         return (spa->spa_normal_class);
1562 }
1563 
1564 metaslab_class_t *
1565 spa_log_class(spa_t *spa)
1566 {
1567         return (spa->spa_log_class);
1568 }
1569 
1570 int
1571 spa_max_replication(spa_t *spa)
1572 {
1573         /*
1574          * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1575          * handle BPs with more than one DVA allocated.  Set our max
1576          * replication level accordingly.
1577          */
1578         if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1579                 return (1);
1580         return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1581 }
1582 
1583 int
1584 spa_prev_software_version(spa_t *spa)
1585 {
1586         return (spa->spa_prev_software_version);
1587 }
1588 
1589 uint64_t
1590 spa_deadman_synctime(spa_t *spa)
1591 {
1592         return (spa->spa_deadman_synctime);
1593 }
1594 
1595 uint64_t
1596 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1597 {
1598         uint64_t asize = DVA_GET_ASIZE(dva);
1599         uint64_t dsize = asize;
1600 
1601         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1602 
1603         if (asize != 0 && spa->spa_deflate) {
1604                 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1605                 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1606         }
1607 
1608         return (dsize);
1609 }
1610 
1611 uint64_t
1612 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1613 {
1614         uint64_t dsize = 0;
1615 
1616         for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1617                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1618 
1619         return (dsize);
1620 }
1621 
1622 uint64_t
1623 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1624 {
1625         uint64_t dsize = 0;
1626 
1627         spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1628 
1629         for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1630                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1631 
1632         spa_config_exit(spa, SCL_VDEV, FTAG);
1633 
1634         return (dsize);
1635 }
1636 
1637 /*
1638  * ==========================================================================
1639  * Initialization and Termination
1640  * ==========================================================================
1641  */
1642 
1643 static int
1644 spa_name_compare(const void *a1, const void *a2)
1645 {
1646         const spa_t *s1 = a1;
1647         const spa_t *s2 = a2;
1648         int s;
1649 
1650         s = strcmp(s1->spa_name, s2->spa_name);
1651         if (s > 0)
1652                 return (1);
1653         if (s < 0)
1654                 return (-1);
1655         return (0);
1656 }
1657 
1658 int
1659 spa_busy(void)
1660 {
1661         return (spa_active_count);
1662 }
1663 
1664 void
1665 spa_boot_init()
1666 {
1667         spa_config_load();
1668 }
1669 
1670 void
1671 spa_init(int mode)
1672 {
1673         mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1674         mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1675         mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1676         cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1677 
1678         avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1679             offsetof(spa_t, spa_avl));
1680 
1681         avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1682             offsetof(spa_aux_t, aux_avl));
1683 
1684         avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1685             offsetof(spa_aux_t, aux_avl));
1686 
1687         spa_mode_global = mode;
1688 
1689 #ifdef _KERNEL
1690         spa_arch_init();
1691 #else
1692         if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1693                 arc_procfd = open("/proc/self/ctl", O_WRONLY);
1694                 if (arc_procfd == -1) {
1695                         perror("could not enable watchpoints: "
1696                             "opening /proc/self/ctl failed: ");
1697                 } else {
1698                         arc_watch = B_TRUE;
1699                 }
1700         }
1701 #endif
1702 
1703         refcount_init();
1704         unique_init();
1705         space_map_init();
1706         zio_init();
1707         dmu_init();
1708         zil_init();
1709         vdev_cache_stat_init();
1710         zfs_prop_init();
1711         zpool_prop_init();
1712         zpool_feature_init();
1713         spa_config_load();
1714         l2arc_start();
1715 }
1716 
1717 void
1718 spa_fini(void)
1719 {
1720         l2arc_stop();
1721 
1722         spa_evict_all();
1723 
1724         vdev_cache_stat_fini();
1725         zil_fini();
1726         dmu_fini();
1727         zio_fini();
1728         space_map_fini();
1729         unique_fini();
1730         refcount_fini();
1731 
1732         avl_destroy(&spa_namespace_avl);
1733         avl_destroy(&spa_spare_avl);
1734         avl_destroy(&spa_l2cache_avl);
1735 
1736         cv_destroy(&spa_namespace_cv);
1737         mutex_destroy(&spa_namespace_lock);
1738         mutex_destroy(&spa_spare_lock);
1739         mutex_destroy(&spa_l2cache_lock);
1740 }
1741 
1742 /*
1743  * Return whether this pool has slogs. No locking needed.
1744  * It's not a problem if the wrong answer is returned as it's only for
1745  * performance and not correctness
1746  */
1747 boolean_t
1748 spa_has_slogs(spa_t *spa)
1749 {
1750         return (spa->spa_log_class->mc_rotor != NULL);
1751 }
1752 
1753 spa_log_state_t
1754 spa_get_log_state(spa_t *spa)
1755 {
1756         return (spa->spa_log_state);
1757 }
1758 
1759 void
1760 spa_set_log_state(spa_t *spa, spa_log_state_t state)
1761 {
1762         spa->spa_log_state = state;
1763 }
1764 
1765 boolean_t
1766 spa_is_root(spa_t *spa)
1767 {
1768         return (spa->spa_is_root);
1769 }
1770 
1771 boolean_t
1772 spa_writeable(spa_t *spa)
1773 {
1774         return (!!(spa->spa_mode & FWRITE));
1775 }
1776 
1777 int
1778 spa_mode(spa_t *spa)
1779 {
1780         return (spa->spa_mode);
1781 }
1782 
1783 uint64_t
1784 spa_bootfs(spa_t *spa)
1785 {
1786         return (spa->spa_bootfs);
1787 }
1788 
1789 uint64_t
1790 spa_delegation(spa_t *spa)
1791 {
1792         return (spa->spa_delegation);
1793 }
1794 
1795 objset_t *
1796 spa_meta_objset(spa_t *spa)
1797 {
1798         return (spa->spa_meta_objset);
1799 }
1800 
1801 enum zio_checksum
1802 spa_dedup_checksum(spa_t *spa)
1803 {
1804         return (spa->spa_dedup_checksum);
1805 }
1806 
1807 /*
1808  * Reset pool scan stat per scan pass (or reboot).
1809  */
1810 void
1811 spa_scan_stat_init(spa_t *spa)
1812 {
1813         /* data not stored on disk */
1814         spa->spa_scan_pass_start = gethrestime_sec();
1815         spa->spa_scan_pass_exam = 0;
1816         vdev_scan_stat_init(spa->spa_root_vdev);
1817 }
1818 
1819 /*
1820  * Get scan stats for zpool status reports
1821  */
1822 int
1823 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1824 {
1825         dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1826 
1827         if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1828                 return (SET_ERROR(ENOENT));
1829         bzero(ps, sizeof (pool_scan_stat_t));
1830 
1831         /* data stored on disk */
1832         ps->pss_func = scn->scn_phys.scn_func;
1833         ps->pss_start_time = scn->scn_phys.scn_start_time;
1834         ps->pss_end_time = scn->scn_phys.scn_end_time;
1835         ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1836         ps->pss_examined = scn->scn_phys.scn_examined;
1837         ps->pss_to_process = scn->scn_phys.scn_to_process;
1838         ps->pss_processed = scn->scn_phys.scn_processed;
1839         ps->pss_errors = scn->scn_phys.scn_errors;
1840         ps->pss_state = scn->scn_phys.scn_state;
1841 
1842         /* data not stored on disk */
1843         ps->pss_pass_start = spa->spa_scan_pass_start;
1844         ps->pss_pass_exam = spa->spa_scan_pass_exam;
1845 
1846         return (0);
1847 }
1848 
1849 boolean_t
1850 spa_debug_enabled(spa_t *spa)
1851 {
1852         return (spa->spa_debug);
1853 }