1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
29 #include <sys/spa_boot.h>
30 #include <sys/zio.h>
31 #include <sys/zio_checksum.h>
32 #include <sys/zio_compress.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/zap.h>
36 #include <sys/zil.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/metaslab.h>
39 #include <sys/uberblock_impl.h>
40 #include <sys/txg.h>
41 #include <sys/avl.h>
42 #include <sys/unique.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_dir.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/dsl_scan.h>
47 #include <sys/fs/zfs.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/arc.h>
50 #include <sys/ddt.h>
51 #include "zfs_prop.h"
52 #include "zfeature_common.h"
53
54 /*
55 * SPA locking
56 *
57 * There are four basic locks for managing spa_t structures:
58 *
59 * spa_namespace_lock (global mutex)
60 *
61 * This lock must be acquired to do any of the following:
62 *
63 * - Lookup a spa_t by name
64 * - Add or remove a spa_t from the namespace
65 * - Increase spa_refcount from non-zero
66 * - Check if spa_refcount is zero
67 * - Rename a spa_t
68 * - add/remove/attach/detach devices
69 * - Held for the duration of create/destroy/import/export
70 *
71 * It does not need to handle recursion. A create or destroy may
72 * reference objects (files or zvols) in other pools, but by
73 * definition they must have an existing reference, and will never need
74 * to lookup a spa_t by name.
75 *
76 * spa_refcount (per-spa refcount_t protected by mutex)
77 *
78 * This reference count keep track of any active users of the spa_t. The
79 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
80 * the refcount is never really 'zero' - opening a pool implicitly keeps
81 * some references in the DMU. Internally we check against spa_minref, but
82 * present the image of a zero/non-zero value to consumers.
83 *
84 * spa_config_lock[] (per-spa array of rwlocks)
85 *
86 * This protects the spa_t from config changes, and must be held in
87 * the following circumstances:
88 *
89 * - RW_READER to perform I/O to the spa
90 * - RW_WRITER to change the vdev config
91 *
92 * The locking order is fairly straightforward:
93 *
94 * spa_namespace_lock -> spa_refcount
95 *
96 * The namespace lock must be acquired to increase the refcount from 0
97 * or to check if it is zero.
98 *
99 * spa_refcount -> spa_config_lock[]
100 *
101 * There must be at least one valid reference on the spa_t to acquire
102 * the config lock.
103 *
104 * spa_namespace_lock -> spa_config_lock[]
105 *
106 * The namespace lock must always be taken before the config lock.
107 *
108 *
109 * The spa_namespace_lock can be acquired directly and is globally visible.
110 *
111 * The namespace is manipulated using the following functions, all of which
112 * require the spa_namespace_lock to be held.
113 *
114 * spa_lookup() Lookup a spa_t by name.
115 *
116 * spa_add() Create a new spa_t in the namespace.
117 *
118 * spa_remove() Remove a spa_t from the namespace. This also
119 * frees up any memory associated with the spa_t.
120 *
121 * spa_next() Returns the next spa_t in the system, or the
122 * first if NULL is passed.
123 *
124 * spa_evict_all() Shutdown and remove all spa_t structures in
125 * the system.
126 *
127 * spa_guid_exists() Determine whether a pool/device guid exists.
128 *
129 * The spa_refcount is manipulated using the following functions:
130 *
131 * spa_open_ref() Adds a reference to the given spa_t. Must be
132 * called with spa_namespace_lock held if the
133 * refcount is currently zero.
134 *
135 * spa_close() Remove a reference from the spa_t. This will
136 * not free the spa_t or remove it from the
137 * namespace. No locking is required.
138 *
139 * spa_refcount_zero() Returns true if the refcount is currently
140 * zero. Must be called with spa_namespace_lock
141 * held.
142 *
143 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
144 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
145 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
146 *
147 * To read the configuration, it suffices to hold one of these locks as reader.
148 * To modify the configuration, you must hold all locks as writer. To modify
149 * vdev state without altering the vdev tree's topology (e.g. online/offline),
150 * you must hold SCL_STATE and SCL_ZIO as writer.
151 *
152 * We use these distinct config locks to avoid recursive lock entry.
153 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
154 * block allocations (SCL_ALLOC), which may require reading space maps
155 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
156 *
157 * The spa config locks cannot be normal rwlocks because we need the
158 * ability to hand off ownership. For example, SCL_ZIO is acquired
159 * by the issuing thread and later released by an interrupt thread.
160 * They do, however, obey the usual write-wanted semantics to prevent
161 * writer (i.e. system administrator) starvation.
162 *
163 * The lock acquisition rules are as follows:
164 *
165 * SCL_CONFIG
166 * Protects changes to the vdev tree topology, such as vdev
167 * add/remove/attach/detach. Protects the dirty config list
168 * (spa_config_dirty_list) and the set of spares and l2arc devices.
169 *
170 * SCL_STATE
171 * Protects changes to pool state and vdev state, such as vdev
172 * online/offline/fault/degrade/clear. Protects the dirty state list
173 * (spa_state_dirty_list) and global pool state (spa_state).
174 *
175 * SCL_ALLOC
176 * Protects changes to metaslab groups and classes.
177 * Held as reader by metaslab_alloc() and metaslab_claim().
178 *
179 * SCL_ZIO
180 * Held by bp-level zios (those which have no io_vd upon entry)
181 * to prevent changes to the vdev tree. The bp-level zio implicitly
182 * protects all of its vdev child zios, which do not hold SCL_ZIO.
183 *
184 * SCL_FREE
185 * Protects changes to metaslab groups and classes.
186 * Held as reader by metaslab_free(). SCL_FREE is distinct from
187 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
188 * blocks in zio_done() while another i/o that holds either
189 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
190 *
191 * SCL_VDEV
192 * Held as reader to prevent changes to the vdev tree during trivial
193 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
194 * other locks, and lower than all of them, to ensure that it's safe
195 * to acquire regardless of caller context.
196 *
197 * In addition, the following rules apply:
198 *
199 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
200 * The lock ordering is SCL_CONFIG > spa_props_lock.
201 *
202 * (b) I/O operations on leaf vdevs. For any zio operation that takes
203 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
204 * or zio_write_phys() -- the caller must ensure that the config cannot
205 * cannot change in the interim, and that the vdev cannot be reopened.
206 * SCL_STATE as reader suffices for both.
207 *
208 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
209 *
210 * spa_vdev_enter() Acquire the namespace lock and the config lock
211 * for writing.
212 *
213 * spa_vdev_exit() Release the config lock, wait for all I/O
214 * to complete, sync the updated configs to the
215 * cache, and release the namespace lock.
216 *
217 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
218 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
219 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
220 *
221 * spa_rename() is also implemented within this file since it requires
222 * manipulation of the namespace.
223 */
224
225 static avl_tree_t spa_namespace_avl;
226 kmutex_t spa_namespace_lock;
227 static kcondvar_t spa_namespace_cv;
228 static int spa_active_count;
229 int spa_max_replication_override = SPA_DVAS_PER_BP;
230
231 static kmutex_t spa_spare_lock;
232 static avl_tree_t spa_spare_avl;
233 static kmutex_t spa_l2cache_lock;
234 static avl_tree_t spa_l2cache_avl;
235
236 kmem_cache_t *spa_buffer_pool;
237 int spa_mode_global;
238
239 #ifdef ZFS_DEBUG
240 /* Everything except dprintf and spa is on by default in debug builds */
241 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
242 #else
243 int zfs_flags = 0;
244 #endif
245
246 /*
247 * zfs_recover can be set to nonzero to attempt to recover from
248 * otherwise-fatal errors, typically caused by on-disk corruption. When
249 * set, calls to zfs_panic_recover() will turn into warning messages.
250 * This should only be used as a last resort, as it typically results
251 * in leaked space, or worse.
252 */
253 boolean_t zfs_recover = B_FALSE;
254
255 /*
256 * If destroy encounters an EIO while reading metadata (e.g. indirect
257 * blocks), space referenced by the missing metadata can not be freed.
258 * Normally this causes the background destroy to become "stalled", as
259 * it is unable to make forward progress. While in this stalled state,
260 * all remaining space to free from the error-encountering filesystem is
261 * "temporarily leaked". Set this flag to cause it to ignore the EIO,
262 * permanently leak the space from indirect blocks that can not be read,
263 * and continue to free everything else that it can.
264 *
265 * The default, "stalling" behavior is useful if the storage partially
266 * fails (i.e. some but not all i/os fail), and then later recovers. In
267 * this case, we will be able to continue pool operations while it is
268 * partially failed, and when it recovers, we can continue to free the
269 * space, with no leaks. However, note that this case is actually
270 * fairly rare.
271 *
272 * Typically pools either (a) fail completely (but perhaps temporarily,
273 * e.g. a top-level vdev going offline), or (b) have localized,
274 * permanent errors (e.g. disk returns the wrong data due to bit flip or
275 * firmware bug). In case (a), this setting does not matter because the
276 * pool will be suspended and the sync thread will not be able to make
277 * forward progress regardless. In case (b), because the error is
278 * permanent, the best we can do is leak the minimum amount of space,
279 * which is what setting this flag will do. Therefore, it is reasonable
280 * for this flag to normally be set, but we chose the more conservative
281 * approach of not setting it, so that there is no possibility of
282 * leaking space in the "partial temporary" failure case.
283 */
284 boolean_t zfs_free_leak_on_eio = B_FALSE;
285
286 /*
287 * Expiration time in milliseconds. This value has two meanings. First it is
288 * used to determine when the spa_deadman() logic should fire. By default the
289 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
290 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
291 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
292 * in a system panic.
293 */
294 uint64_t zfs_deadman_synctime_ms = 1000000ULL;
295
296 /*
297 * Check time in milliseconds. This defines the frequency at which we check
298 * for hung I/O.
299 */
300 uint64_t zfs_deadman_checktime_ms = 5000ULL;
301
302 /*
303 * Override the zfs deadman behavior via /etc/system. By default the
304 * deadman is enabled except on VMware and sparc deployments.
305 */
306 int zfs_deadman_enabled = -1;
307
308 /*
309 * The worst case is single-sector max-parity RAID-Z blocks, in which
310 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
311 * times the size; so just assume that. Add to this the fact that
312 * we can have up to 3 DVAs per bp, and one more factor of 2 because
313 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
314 * the worst case is:
315 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
316 */
317 int spa_asize_inflation = 24;
318
319 /*
320 * ==========================================================================
321 * SPA config locking
322 * ==========================================================================
323 */
324 static void
325 spa_config_lock_init(spa_t *spa)
326 {
327 for (int i = 0; i < SCL_LOCKS; i++) {
328 spa_config_lock_t *scl = &spa->spa_config_lock[i];
329 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
330 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
331 refcount_create_untracked(&scl->scl_count);
332 scl->scl_writer = NULL;
333 scl->scl_write_wanted = 0;
334 }
335 }
336
337 static void
338 spa_config_lock_destroy(spa_t *spa)
339 {
340 for (int i = 0; i < SCL_LOCKS; i++) {
341 spa_config_lock_t *scl = &spa->spa_config_lock[i];
342 mutex_destroy(&scl->scl_lock);
343 cv_destroy(&scl->scl_cv);
344 refcount_destroy(&scl->scl_count);
345 ASSERT(scl->scl_writer == NULL);
346 ASSERT(scl->scl_write_wanted == 0);
347 }
348 }
349
350 int
351 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
352 {
353 for (int i = 0; i < SCL_LOCKS; i++) {
354 spa_config_lock_t *scl = &spa->spa_config_lock[i];
355 if (!(locks & (1 << i)))
356 continue;
357 mutex_enter(&scl->scl_lock);
358 if (rw == RW_READER) {
359 if (scl->scl_writer || scl->scl_write_wanted) {
360 mutex_exit(&scl->scl_lock);
361 spa_config_exit(spa, locks ^ (1 << i), tag);
362 return (0);
363 }
364 } else {
365 ASSERT(scl->scl_writer != curthread);
366 if (!refcount_is_zero(&scl->scl_count)) {
367 mutex_exit(&scl->scl_lock);
368 spa_config_exit(spa, locks ^ (1 << i), tag);
369 return (0);
370 }
371 scl->scl_writer = curthread;
372 }
373 (void) refcount_add(&scl->scl_count, tag);
374 mutex_exit(&scl->scl_lock);
375 }
376 return (1);
377 }
378
379 void
380 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
381 {
382 int wlocks_held = 0;
383
384 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
385
386 for (int i = 0; i < SCL_LOCKS; i++) {
387 spa_config_lock_t *scl = &spa->spa_config_lock[i];
388 if (scl->scl_writer == curthread)
389 wlocks_held |= (1 << i);
390 if (!(locks & (1 << i)))
391 continue;
392 mutex_enter(&scl->scl_lock);
393 if (rw == RW_READER) {
394 while (scl->scl_writer || scl->scl_write_wanted) {
395 cv_wait(&scl->scl_cv, &scl->scl_lock);
396 }
397 } else {
398 ASSERT(scl->scl_writer != curthread);
399 while (!refcount_is_zero(&scl->scl_count)) {
400 scl->scl_write_wanted++;
401 cv_wait(&scl->scl_cv, &scl->scl_lock);
402 scl->scl_write_wanted--;
403 }
404 scl->scl_writer = curthread;
405 }
406 (void) refcount_add(&scl->scl_count, tag);
407 mutex_exit(&scl->scl_lock);
408 }
409 ASSERT(wlocks_held <= locks);
410 }
411
412 void
413 spa_config_exit(spa_t *spa, int locks, void *tag)
414 {
415 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
416 spa_config_lock_t *scl = &spa->spa_config_lock[i];
417 if (!(locks & (1 << i)))
418 continue;
419 mutex_enter(&scl->scl_lock);
420 ASSERT(!refcount_is_zero(&scl->scl_count));
421 if (refcount_remove(&scl->scl_count, tag) == 0) {
422 ASSERT(scl->scl_writer == NULL ||
423 scl->scl_writer == curthread);
424 scl->scl_writer = NULL; /* OK in either case */
425 cv_broadcast(&scl->scl_cv);
426 }
427 mutex_exit(&scl->scl_lock);
428 }
429 }
430
431 int
432 spa_config_held(spa_t *spa, int locks, krw_t rw)
433 {
434 int locks_held = 0;
435
436 for (int i = 0; i < SCL_LOCKS; i++) {
437 spa_config_lock_t *scl = &spa->spa_config_lock[i];
438 if (!(locks & (1 << i)))
439 continue;
440 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
441 (rw == RW_WRITER && scl->scl_writer == curthread))
442 locks_held |= 1 << i;
443 }
444
445 return (locks_held);
446 }
447
448 /*
449 * ==========================================================================
450 * SPA namespace functions
451 * ==========================================================================
452 */
453
454 /*
455 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
456 * Returns NULL if no matching spa_t is found.
457 */
458 spa_t *
459 spa_lookup(const char *name)
460 {
461 spa_t *search;
462 spa_t *spa;
463 avl_index_t where;
464 char *cp;
465
466 search = kmem_alloc(sizeof(*search), KM_SLEEP);
467
468 (void) strlcpy(search->spa_name, name, sizeof (search->spa_name));
469
470 /*
471 * If it's a full dataset name, figure out the pool name and
472 * just use that.
473 */
474 cp = strpbrk(search->spa_name, "/@#");
475 if (cp != NULL)
476 *cp = '\0';
477
478 spa = avl_find(&spa_namespace_avl, search, &where);
479 kmem_free(search, sizeof(*search));
480
481 return (spa);
482 }
483
484 /*
485 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
486 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
487 * looking for potentially hung I/Os.
488 */
489 void
490 spa_deadman(void *arg)
491 {
492 spa_t *spa = arg;
493
494 /*
495 * Disable the deadman timer if the pool is suspended.
496 */
497 if (spa_suspended(spa)) {
498 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
499 return;
500 }
501
502 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
503 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
504 ++spa->spa_deadman_calls);
505 if (zfs_deadman_enabled)
506 vdev_deadman(spa->spa_root_vdev);
507 }
508
509 /*
510 * Create an uninitialized spa_t with the given name. Requires
511 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
512 * exist by calling spa_lookup() first.
513 */
514 spa_t *
515 spa_add(const char *name, nvlist_t *config, const char *altroot)
516 {
517 spa_t *spa;
518 spa_config_dirent_t *dp;
519 cyc_handler_t hdlr;
520 cyc_time_t when;
521
522 ASSERT(MUTEX_HELD(&spa_namespace_lock));
523
524 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
525
526 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
527 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
528 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
529 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
530 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
531 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
532 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
533 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
534 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
535 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
536
537 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
538 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
539 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
540 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
541
542 for (int t = 0; t < TXG_SIZE; t++)
543 bplist_create(&spa->spa_free_bplist[t]);
544
545 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
546 spa->spa_state = POOL_STATE_UNINITIALIZED;
547 spa->spa_freeze_txg = UINT64_MAX;
548 spa->spa_final_txg = UINT64_MAX;
549 spa->spa_load_max_txg = UINT64_MAX;
550 spa->spa_proc = &p0;
551 spa->spa_proc_state = SPA_PROC_NONE;
552
553 hdlr.cyh_func = spa_deadman;
554 hdlr.cyh_arg = spa;
555 hdlr.cyh_level = CY_LOW_LEVEL;
556
557 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
558
559 /*
560 * This determines how often we need to check for hung I/Os after
561 * the cyclic has already fired. Since checking for hung I/Os is
562 * an expensive operation we don't want to check too frequently.
563 * Instead wait for 5 seconds before checking again.
564 */
565 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
566 when.cyt_when = CY_INFINITY;
567 mutex_enter(&cpu_lock);
568 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
569 mutex_exit(&cpu_lock);
570
571 refcount_create(&spa->spa_refcount);
572 spa_config_lock_init(spa);
573
574 avl_add(&spa_namespace_avl, spa);
575
576 /*
577 * Set the alternate root, if there is one.
578 */
579 if (altroot) {
580 spa->spa_root = spa_strdup(altroot);
581 spa_active_count++;
582 }
583
584 /*
585 * Every pool starts with the default cachefile
586 */
587 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
588 offsetof(spa_config_dirent_t, scd_link));
589
590 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
591 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
592 list_insert_head(&spa->spa_config_list, dp);
593
594 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
595 KM_SLEEP) == 0);
596
597 if (config != NULL) {
598 nvlist_t *features;
599
600 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
601 &features) == 0) {
602 VERIFY(nvlist_dup(features, &spa->spa_label_features,
603 0) == 0);
604 }
605
606 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
607 }
608
609 if (spa->spa_label_features == NULL) {
610 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
611 KM_SLEEP) == 0);
612 }
613
614 spa->spa_iokstat = kstat_create("zfs", 0, name,
615 "disk", KSTAT_TYPE_IO, 1, 0);
616 if (spa->spa_iokstat) {
617 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
618 kstat_install(spa->spa_iokstat);
619 }
620
621 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
622
623 /*
624 * As a pool is being created, treat all features as disabled by
625 * setting SPA_FEATURE_DISABLED for all entries in the feature
626 * refcount cache.
627 */
628 for (int i = 0; i < SPA_FEATURES; i++) {
629 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
630 }
631
632 return (spa);
633 }
634
635 /*
636 * Removes a spa_t from the namespace, freeing up any memory used. Requires
637 * spa_namespace_lock. This is called only after the spa_t has been closed and
638 * deactivated.
639 */
640 void
641 spa_remove(spa_t *spa)
642 {
643 spa_config_dirent_t *dp;
644
645 ASSERT(MUTEX_HELD(&spa_namespace_lock));
646 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
647
648 nvlist_free(spa->spa_config_splitting);
649
650 avl_remove(&spa_namespace_avl, spa);
651 cv_broadcast(&spa_namespace_cv);
652
653 if (spa->spa_root) {
654 spa_strfree(spa->spa_root);
655 spa_active_count--;
656 }
657
658 while ((dp = list_head(&spa->spa_config_list)) != NULL) {
659 list_remove(&spa->spa_config_list, dp);
660 if (dp->scd_path != NULL)
661 spa_strfree(dp->scd_path);
662 kmem_free(dp, sizeof (spa_config_dirent_t));
663 }
664
665 list_destroy(&spa->spa_config_list);
666
667 nvlist_free(spa->spa_label_features);
668 nvlist_free(spa->spa_load_info);
669 spa_config_set(spa, NULL);
670
671 mutex_enter(&cpu_lock);
672 if (spa->spa_deadman_cycid != CYCLIC_NONE)
673 cyclic_remove(spa->spa_deadman_cycid);
674 mutex_exit(&cpu_lock);
675 spa->spa_deadman_cycid = CYCLIC_NONE;
676
677 refcount_destroy(&spa->spa_refcount);
678
679 spa_config_lock_destroy(spa);
680
681 kstat_delete(spa->spa_iokstat);
682 spa->spa_iokstat = NULL;
683
684 for (int t = 0; t < TXG_SIZE; t++)
685 bplist_destroy(&spa->spa_free_bplist[t]);
686
687 cv_destroy(&spa->spa_async_cv);
688 cv_destroy(&spa->spa_proc_cv);
689 cv_destroy(&spa->spa_scrub_io_cv);
690 cv_destroy(&spa->spa_suspend_cv);
691
692 mutex_destroy(&spa->spa_async_lock);
693 mutex_destroy(&spa->spa_errlist_lock);
694 mutex_destroy(&spa->spa_errlog_lock);
695 mutex_destroy(&spa->spa_history_lock);
696 mutex_destroy(&spa->spa_proc_lock);
697 mutex_destroy(&spa->spa_props_lock);
698 mutex_destroy(&spa->spa_scrub_lock);
699 mutex_destroy(&spa->spa_suspend_lock);
700 mutex_destroy(&spa->spa_vdev_top_lock);
701 mutex_destroy(&spa->spa_iokstat_lock);
702
703 kmem_free(spa, sizeof (spa_t));
704 }
705
706 /*
707 * Given a pool, return the next pool in the namespace, or NULL if there is
708 * none. If 'prev' is NULL, return the first pool.
709 */
710 spa_t *
711 spa_next(spa_t *prev)
712 {
713 ASSERT(MUTEX_HELD(&spa_namespace_lock));
714
715 if (prev)
716 return (AVL_NEXT(&spa_namespace_avl, prev));
717 else
718 return (avl_first(&spa_namespace_avl));
719 }
720
721 /*
722 * ==========================================================================
723 * SPA refcount functions
724 * ==========================================================================
725 */
726
727 /*
728 * Add a reference to the given spa_t. Must have at least one reference, or
729 * have the namespace lock held.
730 */
731 void
732 spa_open_ref(spa_t *spa, void *tag)
733 {
734 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
735 MUTEX_HELD(&spa_namespace_lock));
736 (void) refcount_add(&spa->spa_refcount, tag);
737 }
738
739 /*
740 * Remove a reference to the given spa_t. Must have at least one reference, or
741 * have the namespace lock held.
742 */
743 void
744 spa_close(spa_t *spa, void *tag)
745 {
746 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
747 MUTEX_HELD(&spa_namespace_lock));
748 (void) refcount_remove(&spa->spa_refcount, tag);
749 }
750
751 /*
752 * Check to see if the spa refcount is zero. Must be called with
753 * spa_namespace_lock held. We really compare against spa_minref, which is the
754 * number of references acquired when opening a pool
755 */
756 boolean_t
757 spa_refcount_zero(spa_t *spa)
758 {
759 ASSERT(MUTEX_HELD(&spa_namespace_lock));
760
761 return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
762 }
763
764 /*
765 * ==========================================================================
766 * SPA spare and l2cache tracking
767 * ==========================================================================
768 */
769
770 /*
771 * Hot spares and cache devices are tracked using the same code below,
772 * for 'auxiliary' devices.
773 */
774
775 typedef struct spa_aux {
776 uint64_t aux_guid;
777 uint64_t aux_pool;
778 avl_node_t aux_avl;
779 int aux_count;
780 } spa_aux_t;
781
782 static int
783 spa_aux_compare(const void *a, const void *b)
784 {
785 const spa_aux_t *sa = a;
786 const spa_aux_t *sb = b;
787
788 if (sa->aux_guid < sb->aux_guid)
789 return (-1);
790 else if (sa->aux_guid > sb->aux_guid)
791 return (1);
792 else
793 return (0);
794 }
795
796 void
797 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
798 {
799 avl_index_t where;
800 spa_aux_t search;
801 spa_aux_t *aux;
802
803 search.aux_guid = vd->vdev_guid;
804 if ((aux = avl_find(avl, &search, &where)) != NULL) {
805 aux->aux_count++;
806 } else {
807 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
808 aux->aux_guid = vd->vdev_guid;
809 aux->aux_count = 1;
810 avl_insert(avl, aux, where);
811 }
812 }
813
814 void
815 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
816 {
817 spa_aux_t search;
818 spa_aux_t *aux;
819 avl_index_t where;
820
821 search.aux_guid = vd->vdev_guid;
822 aux = avl_find(avl, &search, &where);
823
824 ASSERT(aux != NULL);
825
826 if (--aux->aux_count == 0) {
827 avl_remove(avl, aux);
828 kmem_free(aux, sizeof (spa_aux_t));
829 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
830 aux->aux_pool = 0ULL;
831 }
832 }
833
834 boolean_t
835 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
836 {
837 spa_aux_t search, *found;
838
839 search.aux_guid = guid;
840 found = avl_find(avl, &search, NULL);
841
842 if (pool) {
843 if (found)
844 *pool = found->aux_pool;
845 else
846 *pool = 0ULL;
847 }
848
849 if (refcnt) {
850 if (found)
851 *refcnt = found->aux_count;
852 else
853 *refcnt = 0;
854 }
855
856 return (found != NULL);
857 }
858
859 void
860 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
861 {
862 spa_aux_t search, *found;
863 avl_index_t where;
864
865 search.aux_guid = vd->vdev_guid;
866 found = avl_find(avl, &search, &where);
867 ASSERT(found != NULL);
868 ASSERT(found->aux_pool == 0ULL);
869
870 found->aux_pool = spa_guid(vd->vdev_spa);
871 }
872
873 /*
874 * Spares are tracked globally due to the following constraints:
875 *
876 * - A spare may be part of multiple pools.
877 * - A spare may be added to a pool even if it's actively in use within
878 * another pool.
879 * - A spare in use in any pool can only be the source of a replacement if
880 * the target is a spare in the same pool.
881 *
882 * We keep track of all spares on the system through the use of a reference
883 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
884 * spare, then we bump the reference count in the AVL tree. In addition, we set
885 * the 'vdev_isspare' member to indicate that the device is a spare (active or
886 * inactive). When a spare is made active (used to replace a device in the
887 * pool), we also keep track of which pool its been made a part of.
888 *
889 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
890 * called under the spa_namespace lock as part of vdev reconfiguration. The
891 * separate spare lock exists for the status query path, which does not need to
892 * be completely consistent with respect to other vdev configuration changes.
893 */
894
895 static int
896 spa_spare_compare(const void *a, const void *b)
897 {
898 return (spa_aux_compare(a, b));
899 }
900
901 void
902 spa_spare_add(vdev_t *vd)
903 {
904 mutex_enter(&spa_spare_lock);
905 ASSERT(!vd->vdev_isspare);
906 spa_aux_add(vd, &spa_spare_avl);
907 vd->vdev_isspare = B_TRUE;
908 mutex_exit(&spa_spare_lock);
909 }
910
911 void
912 spa_spare_remove(vdev_t *vd)
913 {
914 mutex_enter(&spa_spare_lock);
915 ASSERT(vd->vdev_isspare);
916 spa_aux_remove(vd, &spa_spare_avl);
917 vd->vdev_isspare = B_FALSE;
918 mutex_exit(&spa_spare_lock);
919 }
920
921 boolean_t
922 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
923 {
924 boolean_t found;
925
926 mutex_enter(&spa_spare_lock);
927 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
928 mutex_exit(&spa_spare_lock);
929
930 return (found);
931 }
932
933 void
934 spa_spare_activate(vdev_t *vd)
935 {
936 mutex_enter(&spa_spare_lock);
937 ASSERT(vd->vdev_isspare);
938 spa_aux_activate(vd, &spa_spare_avl);
939 mutex_exit(&spa_spare_lock);
940 }
941
942 /*
943 * Level 2 ARC devices are tracked globally for the same reasons as spares.
944 * Cache devices currently only support one pool per cache device, and so
945 * for these devices the aux reference count is currently unused beyond 1.
946 */
947
948 static int
949 spa_l2cache_compare(const void *a, const void *b)
950 {
951 return (spa_aux_compare(a, b));
952 }
953
954 void
955 spa_l2cache_add(vdev_t *vd)
956 {
957 mutex_enter(&spa_l2cache_lock);
958 ASSERT(!vd->vdev_isl2cache);
959 spa_aux_add(vd, &spa_l2cache_avl);
960 vd->vdev_isl2cache = B_TRUE;
961 mutex_exit(&spa_l2cache_lock);
962 }
963
964 void
965 spa_l2cache_remove(vdev_t *vd)
966 {
967 mutex_enter(&spa_l2cache_lock);
968 ASSERT(vd->vdev_isl2cache);
969 spa_aux_remove(vd, &spa_l2cache_avl);
970 vd->vdev_isl2cache = B_FALSE;
971 mutex_exit(&spa_l2cache_lock);
972 }
973
974 boolean_t
975 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
976 {
977 boolean_t found;
978
979 mutex_enter(&spa_l2cache_lock);
980 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
981 mutex_exit(&spa_l2cache_lock);
982
983 return (found);
984 }
985
986 void
987 spa_l2cache_activate(vdev_t *vd)
988 {
989 mutex_enter(&spa_l2cache_lock);
990 ASSERT(vd->vdev_isl2cache);
991 spa_aux_activate(vd, &spa_l2cache_avl);
992 mutex_exit(&spa_l2cache_lock);
993 }
994
995 /*
996 * ==========================================================================
997 * SPA vdev locking
998 * ==========================================================================
999 */
1000
1001 /*
1002 * Lock the given spa_t for the purpose of adding or removing a vdev.
1003 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1004 * It returns the next transaction group for the spa_t.
1005 */
1006 uint64_t
1007 spa_vdev_enter(spa_t *spa)
1008 {
1009 mutex_enter(&spa->spa_vdev_top_lock);
1010 mutex_enter(&spa_namespace_lock);
1011 return (spa_vdev_config_enter(spa));
1012 }
1013
1014 /*
1015 * Internal implementation for spa_vdev_enter(). Used when a vdev
1016 * operation requires multiple syncs (i.e. removing a device) while
1017 * keeping the spa_namespace_lock held.
1018 */
1019 uint64_t
1020 spa_vdev_config_enter(spa_t *spa)
1021 {
1022 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1023
1024 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1025
1026 return (spa_last_synced_txg(spa) + 1);
1027 }
1028
1029 /*
1030 * Used in combination with spa_vdev_config_enter() to allow the syncing
1031 * of multiple transactions without releasing the spa_namespace_lock.
1032 */
1033 void
1034 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1035 {
1036 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1037
1038 int config_changed = B_FALSE;
1039
1040 ASSERT(txg > spa_last_synced_txg(spa));
1041
1042 spa->spa_pending_vdev = NULL;
1043
1044 /*
1045 * Reassess the DTLs.
1046 */
1047 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1048
1049 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1050 config_changed = B_TRUE;
1051 spa->spa_config_generation++;
1052 }
1053
1054 /*
1055 * Verify the metaslab classes.
1056 */
1057 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1058 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1059
1060 spa_config_exit(spa, SCL_ALL, spa);
1061
1062 /*
1063 * Panic the system if the specified tag requires it. This
1064 * is useful for ensuring that configurations are updated
1065 * transactionally.
1066 */
1067 if (zio_injection_enabled)
1068 zio_handle_panic_injection(spa, tag, 0);
1069
1070 /*
1071 * Note: this txg_wait_synced() is important because it ensures
1072 * that there won't be more than one config change per txg.
1073 * This allows us to use the txg as the generation number.
1074 */
1075 if (error == 0)
1076 txg_wait_synced(spa->spa_dsl_pool, txg);
1077
1078 if (vd != NULL) {
1079 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1080 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1081 vdev_free(vd);
1082 spa_config_exit(spa, SCL_ALL, spa);
1083 }
1084
1085 /*
1086 * If the config changed, update the config cache.
1087 */
1088 if (config_changed)
1089 spa_config_sync(spa, B_FALSE, B_TRUE);
1090 }
1091
1092 /*
1093 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1094 * locking of spa_vdev_enter(), we also want make sure the transactions have
1095 * synced to disk, and then update the global configuration cache with the new
1096 * information.
1097 */
1098 int
1099 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1100 {
1101 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1102 mutex_exit(&spa_namespace_lock);
1103 mutex_exit(&spa->spa_vdev_top_lock);
1104
1105 return (error);
1106 }
1107
1108 /*
1109 * Lock the given spa_t for the purpose of changing vdev state.
1110 */
1111 void
1112 spa_vdev_state_enter(spa_t *spa, int oplocks)
1113 {
1114 int locks = SCL_STATE_ALL | oplocks;
1115
1116 /*
1117 * Root pools may need to read of the underlying devfs filesystem
1118 * when opening up a vdev. Unfortunately if we're holding the
1119 * SCL_ZIO lock it will result in a deadlock when we try to issue
1120 * the read from the root filesystem. Instead we "prefetch"
1121 * the associated vnodes that we need prior to opening the
1122 * underlying devices and cache them so that we can prevent
1123 * any I/O when we are doing the actual open.
1124 */
1125 if (spa_is_root(spa)) {
1126 int low = locks & ~(SCL_ZIO - 1);
1127 int high = locks & ~low;
1128
1129 spa_config_enter(spa, high, spa, RW_WRITER);
1130 vdev_hold(spa->spa_root_vdev);
1131 spa_config_enter(spa, low, spa, RW_WRITER);
1132 } else {
1133 spa_config_enter(spa, locks, spa, RW_WRITER);
1134 }
1135 spa->spa_vdev_locks = locks;
1136 }
1137
1138 int
1139 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1140 {
1141 boolean_t config_changed = B_FALSE;
1142
1143 if (vd != NULL || error == 0)
1144 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1145 0, 0, B_FALSE);
1146
1147 if (vd != NULL) {
1148 vdev_state_dirty(vd->vdev_top);
1149 config_changed = B_TRUE;
1150 spa->spa_config_generation++;
1151 }
1152
1153 if (spa_is_root(spa))
1154 vdev_rele(spa->spa_root_vdev);
1155
1156 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1157 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1158
1159 /*
1160 * If anything changed, wait for it to sync. This ensures that,
1161 * from the system administrator's perspective, zpool(1M) commands
1162 * are synchronous. This is important for things like zpool offline:
1163 * when the command completes, you expect no further I/O from ZFS.
1164 */
1165 if (vd != NULL)
1166 txg_wait_synced(spa->spa_dsl_pool, 0);
1167
1168 /*
1169 * If the config changed, update the config cache.
1170 */
1171 if (config_changed) {
1172 mutex_enter(&spa_namespace_lock);
1173 spa_config_sync(spa, B_FALSE, B_TRUE);
1174 mutex_exit(&spa_namespace_lock);
1175 }
1176
1177 return (error);
1178 }
1179
1180 /*
1181 * ==========================================================================
1182 * Miscellaneous functions
1183 * ==========================================================================
1184 */
1185
1186 void
1187 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1188 {
1189 if (!nvlist_exists(spa->spa_label_features, feature)) {
1190 fnvlist_add_boolean(spa->spa_label_features, feature);
1191 /*
1192 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1193 * dirty the vdev config because lock SCL_CONFIG is not held.
1194 * Thankfully, in this case we don't need to dirty the config
1195 * because it will be written out anyway when we finish
1196 * creating the pool.
1197 */
1198 if (tx->tx_txg != TXG_INITIAL)
1199 vdev_config_dirty(spa->spa_root_vdev);
1200 }
1201 }
1202
1203 void
1204 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1205 {
1206 if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1207 vdev_config_dirty(spa->spa_root_vdev);
1208 }
1209
1210 /*
1211 * Rename a spa_t.
1212 */
1213 int
1214 spa_rename(const char *name, const char *newname)
1215 {
1216 spa_t *spa;
1217 int err;
1218
1219 /*
1220 * Lookup the spa_t and grab the config lock for writing. We need to
1221 * actually open the pool so that we can sync out the necessary labels.
1222 * It's OK to call spa_open() with the namespace lock held because we
1223 * allow recursive calls for other reasons.
1224 */
1225 mutex_enter(&spa_namespace_lock);
1226 if ((err = spa_open(name, &spa, FTAG)) != 0) {
1227 mutex_exit(&spa_namespace_lock);
1228 return (err);
1229 }
1230
1231 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1232
1233 avl_remove(&spa_namespace_avl, spa);
1234 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1235 avl_add(&spa_namespace_avl, spa);
1236
1237 /*
1238 * Sync all labels to disk with the new names by marking the root vdev
1239 * dirty and waiting for it to sync. It will pick up the new pool name
1240 * during the sync.
1241 */
1242 vdev_config_dirty(spa->spa_root_vdev);
1243
1244 spa_config_exit(spa, SCL_ALL, FTAG);
1245
1246 txg_wait_synced(spa->spa_dsl_pool, 0);
1247
1248 /*
1249 * Sync the updated config cache.
1250 */
1251 spa_config_sync(spa, B_FALSE, B_TRUE);
1252
1253 spa_close(spa, FTAG);
1254
1255 mutex_exit(&spa_namespace_lock);
1256
1257 return (0);
1258 }
1259
1260 /*
1261 * Return the spa_t associated with given pool_guid, if it exists. If
1262 * device_guid is non-zero, determine whether the pool exists *and* contains
1263 * a device with the specified device_guid.
1264 */
1265 spa_t *
1266 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1267 {
1268 spa_t *spa;
1269 avl_tree_t *t = &spa_namespace_avl;
1270
1271 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1272
1273 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1274 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1275 continue;
1276 if (spa->spa_root_vdev == NULL)
1277 continue;
1278 if (spa_guid(spa) == pool_guid) {
1279 if (device_guid == 0)
1280 break;
1281
1282 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1283 device_guid) != NULL)
1284 break;
1285
1286 /*
1287 * Check any devices we may be in the process of adding.
1288 */
1289 if (spa->spa_pending_vdev) {
1290 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1291 device_guid) != NULL)
1292 break;
1293 }
1294 }
1295 }
1296
1297 return (spa);
1298 }
1299
1300 /*
1301 * Determine whether a pool with the given pool_guid exists.
1302 */
1303 boolean_t
1304 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1305 {
1306 return (spa_by_guid(pool_guid, device_guid) != NULL);
1307 }
1308
1309 char *
1310 spa_strdup(const char *s)
1311 {
1312 size_t len;
1313 char *new;
1314
1315 len = strlen(s);
1316 new = kmem_alloc(len + 1, KM_SLEEP);
1317 bcopy(s, new, len);
1318 new[len] = '\0';
1319
1320 return (new);
1321 }
1322
1323 void
1324 spa_strfree(char *s)
1325 {
1326 kmem_free(s, strlen(s) + 1);
1327 }
1328
1329 uint64_t
1330 spa_get_random(uint64_t range)
1331 {
1332 uint64_t r;
1333
1334 ASSERT(range != 0);
1335
1336 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1337
1338 return (r % range);
1339 }
1340
1341 uint64_t
1342 spa_generate_guid(spa_t *spa)
1343 {
1344 uint64_t guid = spa_get_random(-1ULL);
1345
1346 if (spa != NULL) {
1347 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1348 guid = spa_get_random(-1ULL);
1349 } else {
1350 while (guid == 0 || spa_guid_exists(guid, 0))
1351 guid = spa_get_random(-1ULL);
1352 }
1353
1354 return (guid);
1355 }
1356
1357 void
1358 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1359 {
1360 char type[256];
1361 char *checksum = NULL;
1362 char *compress = NULL;
1363
1364 if (bp != NULL) {
1365 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1366 dmu_object_byteswap_t bswap =
1367 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1368 (void) snprintf(type, sizeof (type), "bswap %s %s",
1369 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1370 "metadata" : "data",
1371 dmu_ot_byteswap[bswap].ob_name);
1372 } else {
1373 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1374 sizeof (type));
1375 }
1376 if (!BP_IS_EMBEDDED(bp)) {
1377 checksum =
1378 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1379 }
1380 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1381 }
1382
1383 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1384 compress);
1385 }
1386
1387 void
1388 spa_freeze(spa_t *spa)
1389 {
1390 uint64_t freeze_txg = 0;
1391
1392 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1393 if (spa->spa_freeze_txg == UINT64_MAX) {
1394 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1395 spa->spa_freeze_txg = freeze_txg;
1396 }
1397 spa_config_exit(spa, SCL_ALL, FTAG);
1398 if (freeze_txg != 0)
1399 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1400 }
1401
1402 void
1403 zfs_panic_recover(const char *fmt, ...)
1404 {
1405 va_list adx;
1406
1407 va_start(adx, fmt);
1408 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1409 va_end(adx);
1410 }
1411
1412 /*
1413 * This is a stripped-down version of strtoull, suitable only for converting
1414 * lowercase hexadecimal numbers that don't overflow.
1415 */
1416 uint64_t
1417 strtonum(const char *str, char **nptr)
1418 {
1419 uint64_t val = 0;
1420 char c;
1421 int digit;
1422
1423 while ((c = *str) != '\0') {
1424 if (c >= '0' && c <= '9')
1425 digit = c - '0';
1426 else if (c >= 'a' && c <= 'f')
1427 digit = 10 + c - 'a';
1428 else
1429 break;
1430
1431 val *= 16;
1432 val += digit;
1433
1434 str++;
1435 }
1436
1437 if (nptr)
1438 *nptr = (char *)str;
1439
1440 return (val);
1441 }
1442
1443 /*
1444 * ==========================================================================
1445 * Accessor functions
1446 * ==========================================================================
1447 */
1448
1449 boolean_t
1450 spa_shutting_down(spa_t *spa)
1451 {
1452 return (spa->spa_async_suspended);
1453 }
1454
1455 dsl_pool_t *
1456 spa_get_dsl(spa_t *spa)
1457 {
1458 return (spa->spa_dsl_pool);
1459 }
1460
1461 boolean_t
1462 spa_is_initializing(spa_t *spa)
1463 {
1464 return (spa->spa_is_initializing);
1465 }
1466
1467 blkptr_t *
1468 spa_get_rootblkptr(spa_t *spa)
1469 {
1470 return (&spa->spa_ubsync.ub_rootbp);
1471 }
1472
1473 void
1474 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1475 {
1476 spa->spa_uberblock.ub_rootbp = *bp;
1477 }
1478
1479 void
1480 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1481 {
1482 if (spa->spa_root == NULL)
1483 buf[0] = '\0';
1484 else
1485 (void) strncpy(buf, spa->spa_root, buflen);
1486 }
1487
1488 int
1489 spa_sync_pass(spa_t *spa)
1490 {
1491 return (spa->spa_sync_pass);
1492 }
1493
1494 char *
1495 spa_name(spa_t *spa)
1496 {
1497 return (spa->spa_name);
1498 }
1499
1500 uint64_t
1501 spa_guid(spa_t *spa)
1502 {
1503 dsl_pool_t *dp = spa_get_dsl(spa);
1504 uint64_t guid;
1505
1506 /*
1507 * If we fail to parse the config during spa_load(), we can go through
1508 * the error path (which posts an ereport) and end up here with no root
1509 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1510 * this case.
1511 */
1512 if (spa->spa_root_vdev == NULL)
1513 return (spa->spa_config_guid);
1514
1515 guid = spa->spa_last_synced_guid != 0 ?
1516 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1517
1518 /*
1519 * Return the most recently synced out guid unless we're
1520 * in syncing context.
1521 */
1522 if (dp && dsl_pool_sync_context(dp))
1523 return (spa->spa_root_vdev->vdev_guid);
1524 else
1525 return (guid);
1526 }
1527
1528 uint64_t
1529 spa_load_guid(spa_t *spa)
1530 {
1531 /*
1532 * This is a GUID that exists solely as a reference for the
1533 * purposes of the arc. It is generated at load time, and
1534 * is never written to persistent storage.
1535 */
1536 return (spa->spa_load_guid);
1537 }
1538
1539 uint64_t
1540 spa_last_synced_txg(spa_t *spa)
1541 {
1542 return (spa->spa_ubsync.ub_txg);
1543 }
1544
1545 uint64_t
1546 spa_first_txg(spa_t *spa)
1547 {
1548 return (spa->spa_first_txg);
1549 }
1550
1551 uint64_t
1552 spa_syncing_txg(spa_t *spa)
1553 {
1554 return (spa->spa_syncing_txg);
1555 }
1556
1557 pool_state_t
1558 spa_state(spa_t *spa)
1559 {
1560 return (spa->spa_state);
1561 }
1562
1563 spa_load_state_t
1564 spa_load_state(spa_t *spa)
1565 {
1566 return (spa->spa_load_state);
1567 }
1568
1569 uint64_t
1570 spa_freeze_txg(spa_t *spa)
1571 {
1572 return (spa->spa_freeze_txg);
1573 }
1574
1575 /* ARGSUSED */
1576 uint64_t
1577 spa_get_asize(spa_t *spa, uint64_t lsize)
1578 {
1579 return (lsize * spa_asize_inflation);
1580 }
1581
1582 uint64_t
1583 spa_get_dspace(spa_t *spa)
1584 {
1585 return (spa->spa_dspace);
1586 }
1587
1588 void
1589 spa_update_dspace(spa_t *spa)
1590 {
1591 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1592 ddt_get_dedup_dspace(spa);
1593 }
1594
1595 /*
1596 * Return the failure mode that has been set to this pool. The default
1597 * behavior will be to block all I/Os when a complete failure occurs.
1598 */
1599 uint8_t
1600 spa_get_failmode(spa_t *spa)
1601 {
1602 return (spa->spa_failmode);
1603 }
1604
1605 boolean_t
1606 spa_suspended(spa_t *spa)
1607 {
1608 return (spa->spa_suspended);
1609 }
1610
1611 uint64_t
1612 spa_version(spa_t *spa)
1613 {
1614 return (spa->spa_ubsync.ub_version);
1615 }
1616
1617 boolean_t
1618 spa_deflate(spa_t *spa)
1619 {
1620 return (spa->spa_deflate);
1621 }
1622
1623 metaslab_class_t *
1624 spa_normal_class(spa_t *spa)
1625 {
1626 return (spa->spa_normal_class);
1627 }
1628
1629 metaslab_class_t *
1630 spa_log_class(spa_t *spa)
1631 {
1632 return (spa->spa_log_class);
1633 }
1634
1635 int
1636 spa_max_replication(spa_t *spa)
1637 {
1638 /*
1639 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1640 * handle BPs with more than one DVA allocated. Set our max
1641 * replication level accordingly.
1642 */
1643 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1644 return (1);
1645 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1646 }
1647
1648 int
1649 spa_prev_software_version(spa_t *spa)
1650 {
1651 return (spa->spa_prev_software_version);
1652 }
1653
1654 uint64_t
1655 spa_deadman_synctime(spa_t *spa)
1656 {
1657 return (spa->spa_deadman_synctime);
1658 }
1659
1660 uint64_t
1661 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1662 {
1663 uint64_t asize = DVA_GET_ASIZE(dva);
1664 uint64_t dsize = asize;
1665
1666 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1667
1668 if (asize != 0 && spa->spa_deflate) {
1669 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1670 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1671 }
1672
1673 return (dsize);
1674 }
1675
1676 uint64_t
1677 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1678 {
1679 uint64_t dsize = 0;
1680
1681 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1682 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1683
1684 return (dsize);
1685 }
1686
1687 uint64_t
1688 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1689 {
1690 uint64_t dsize = 0;
1691
1692 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1693
1694 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1695 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1696
1697 spa_config_exit(spa, SCL_VDEV, FTAG);
1698
1699 return (dsize);
1700 }
1701
1702 /*
1703 * ==========================================================================
1704 * Initialization and Termination
1705 * ==========================================================================
1706 */
1707
1708 static int
1709 spa_name_compare(const void *a1, const void *a2)
1710 {
1711 const spa_t *s1 = a1;
1712 const spa_t *s2 = a2;
1713 int s;
1714
1715 s = strcmp(s1->spa_name, s2->spa_name);
1716 if (s > 0)
1717 return (1);
1718 if (s < 0)
1719 return (-1);
1720 return (0);
1721 }
1722
1723 int
1724 spa_busy(void)
1725 {
1726 return (spa_active_count);
1727 }
1728
1729 void
1730 spa_boot_init()
1731 {
1732 spa_config_load();
1733 }
1734
1735 void
1736 spa_init(int mode)
1737 {
1738 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1739 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1740 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1741 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1742
1743 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1744 offsetof(spa_t, spa_avl));
1745
1746 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1747 offsetof(spa_aux_t, aux_avl));
1748
1749 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1750 offsetof(spa_aux_t, aux_avl));
1751
1752 spa_mode_global = mode;
1753
1754 #ifdef _KERNEL
1755 spa_arch_init();
1756 #else
1757 if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1758 arc_procfd = open("/proc/self/ctl", O_WRONLY);
1759 if (arc_procfd == -1) {
1760 perror("could not enable watchpoints: "
1761 "opening /proc/self/ctl failed: ");
1762 } else {
1763 arc_watch = B_TRUE;
1764 }
1765 }
1766 #endif
1767
1768 refcount_init();
1769 unique_init();
1770 range_tree_init();
1771 zio_init();
1772 dmu_init();
1773 zil_init();
1774 vdev_cache_stat_init();
1775 zfs_prop_init();
1776 zpool_prop_init();
1777 zpool_feature_init();
1778 spa_config_load();
1779 l2arc_start();
1780 }
1781
1782 void
1783 spa_fini(void)
1784 {
1785 l2arc_stop();
1786
1787 spa_evict_all();
1788
1789 vdev_cache_stat_fini();
1790 zil_fini();
1791 dmu_fini();
1792 zio_fini();
1793 range_tree_fini();
1794 unique_fini();
1795 refcount_fini();
1796
1797 avl_destroy(&spa_namespace_avl);
1798 avl_destroy(&spa_spare_avl);
1799 avl_destroy(&spa_l2cache_avl);
1800
1801 cv_destroy(&spa_namespace_cv);
1802 mutex_destroy(&spa_namespace_lock);
1803 mutex_destroy(&spa_spare_lock);
1804 mutex_destroy(&spa_l2cache_lock);
1805 }
1806
1807 /*
1808 * Return whether this pool has slogs. No locking needed.
1809 * It's not a problem if the wrong answer is returned as it's only for
1810 * performance and not correctness
1811 */
1812 boolean_t
1813 spa_has_slogs(spa_t *spa)
1814 {
1815 return (spa->spa_log_class->mc_rotor != NULL);
1816 }
1817
1818 spa_log_state_t
1819 spa_get_log_state(spa_t *spa)
1820 {
1821 return (spa->spa_log_state);
1822 }
1823
1824 void
1825 spa_set_log_state(spa_t *spa, spa_log_state_t state)
1826 {
1827 spa->spa_log_state = state;
1828 }
1829
1830 boolean_t
1831 spa_is_root(spa_t *spa)
1832 {
1833 return (spa->spa_is_root);
1834 }
1835
1836 boolean_t
1837 spa_writeable(spa_t *spa)
1838 {
1839 return (!!(spa->spa_mode & FWRITE));
1840 }
1841
1842 int
1843 spa_mode(spa_t *spa)
1844 {
1845 return (spa->spa_mode);
1846 }
1847
1848 uint64_t
1849 spa_bootfs(spa_t *spa)
1850 {
1851 return (spa->spa_bootfs);
1852 }
1853
1854 uint64_t
1855 spa_delegation(spa_t *spa)
1856 {
1857 return (spa->spa_delegation);
1858 }
1859
1860 objset_t *
1861 spa_meta_objset(spa_t *spa)
1862 {
1863 return (spa->spa_meta_objset);
1864 }
1865
1866 enum zio_checksum
1867 spa_dedup_checksum(spa_t *spa)
1868 {
1869 return (spa->spa_dedup_checksum);
1870 }
1871
1872 /*
1873 * Reset pool scan stat per scan pass (or reboot).
1874 */
1875 void
1876 spa_scan_stat_init(spa_t *spa)
1877 {
1878 /* data not stored on disk */
1879 spa->spa_scan_pass_start = gethrestime_sec();
1880 spa->spa_scan_pass_exam = 0;
1881 vdev_scan_stat_init(spa->spa_root_vdev);
1882 }
1883
1884 /*
1885 * Get scan stats for zpool status reports
1886 */
1887 int
1888 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1889 {
1890 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1891
1892 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1893 return (SET_ERROR(ENOENT));
1894 bzero(ps, sizeof (pool_scan_stat_t));
1895
1896 /* data stored on disk */
1897 ps->pss_func = scn->scn_phys.scn_func;
1898 ps->pss_start_time = scn->scn_phys.scn_start_time;
1899 ps->pss_end_time = scn->scn_phys.scn_end_time;
1900 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1901 ps->pss_examined = scn->scn_phys.scn_examined;
1902 ps->pss_to_process = scn->scn_phys.scn_to_process;
1903 ps->pss_processed = scn->scn_phys.scn_processed;
1904 ps->pss_errors = scn->scn_phys.scn_errors;
1905 ps->pss_state = scn->scn_phys.scn_state;
1906
1907 /* data not stored on disk */
1908 ps->pss_pass_start = spa->spa_scan_pass_start;
1909 ps->pss_pass_exam = spa->spa_scan_pass_exam;
1910
1911 return (0);
1912 }
1913
1914 boolean_t
1915 spa_debug_enabled(spa_t *spa)
1916 {
1917 return (spa->spa_debug);
1918 }