1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
29 #include <sys/spa_boot.h>
30 #include <sys/zio.h>
31 #include <sys/zio_checksum.h>
32 #include <sys/zio_compress.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/zap.h>
36 #include <sys/zil.h>
37 #include <sys/vdev_impl.h>
38 #include <sys/metaslab.h>
39 #include <sys/uberblock_impl.h>
40 #include <sys/txg.h>
41 #include <sys/avl.h>
42 #include <sys/unique.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_dir.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/dsl_scan.h>
47 #include <sys/fs/zfs.h>
48 #include <sys/metaslab_impl.h>
49 #include <sys/arc.h>
50 #include <sys/ddt.h>
51 #include "zfs_prop.h"
52 #include "zfeature_common.h"
53
54 /*
55 * SPA locking
56 *
57 * There are four basic locks for managing spa_t structures:
58 *
59 * spa_namespace_lock (global mutex)
60 *
61 * This lock must be acquired to do any of the following:
62 *
63 * - Lookup a spa_t by name
64 * - Add or remove a spa_t from the namespace
65 * - Increase spa_refcount from non-zero
66 * - Check if spa_refcount is zero
67 * - Rename a spa_t
68 * - add/remove/attach/detach devices
69 * - Held for the duration of create/destroy/import/export
70 *
71 * It does not need to handle recursion. A create or destroy may
72 * reference objects (files or zvols) in other pools, but by
73 * definition they must have an existing reference, and will never need
74 * to lookup a spa_t by name.
75 *
76 * spa_refcount (per-spa refcount_t protected by mutex)
77 *
78 * This reference count keep track of any active users of the spa_t. The
79 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
80 * the refcount is never really 'zero' - opening a pool implicitly keeps
81 * some references in the DMU. Internally we check against spa_minref, but
82 * present the image of a zero/non-zero value to consumers.
83 *
84 * spa_config_lock[] (per-spa array of rwlocks)
85 *
86 * This protects the spa_t from config changes, and must be held in
87 * the following circumstances:
88 *
89 * - RW_READER to perform I/O to the spa
90 * - RW_WRITER to change the vdev config
91 *
92 * The locking order is fairly straightforward:
93 *
94 * spa_namespace_lock -> spa_refcount
95 *
96 * The namespace lock must be acquired to increase the refcount from 0
97 * or to check if it is zero.
98 *
99 * spa_refcount -> spa_config_lock[]
100 *
101 * There must be at least one valid reference on the spa_t to acquire
102 * the config lock.
103 *
104 * spa_namespace_lock -> spa_config_lock[]
105 *
106 * The namespace lock must always be taken before the config lock.
107 *
108 *
109 * The spa_namespace_lock can be acquired directly and is globally visible.
110 *
111 * The namespace is manipulated using the following functions, all of which
112 * require the spa_namespace_lock to be held.
113 *
114 * spa_lookup() Lookup a spa_t by name.
115 *
116 * spa_add() Create a new spa_t in the namespace.
117 *
118 * spa_remove() Remove a spa_t from the namespace. This also
119 * frees up any memory associated with the spa_t.
120 *
121 * spa_next() Returns the next spa_t in the system, or the
122 * first if NULL is passed.
123 *
124 * spa_evict_all() Shutdown and remove all spa_t structures in
125 * the system.
126 *
127 * spa_guid_exists() Determine whether a pool/device guid exists.
128 *
129 * The spa_refcount is manipulated using the following functions:
130 *
131 * spa_open_ref() Adds a reference to the given spa_t. Must be
132 * called with spa_namespace_lock held if the
133 * refcount is currently zero.
134 *
135 * spa_close() Remove a reference from the spa_t. This will
136 * not free the spa_t or remove it from the
137 * namespace. No locking is required.
138 *
139 * spa_refcount_zero() Returns true if the refcount is currently
140 * zero. Must be called with spa_namespace_lock
141 * held.
142 *
143 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
144 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
145 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
146 *
147 * To read the configuration, it suffices to hold one of these locks as reader.
148 * To modify the configuration, you must hold all locks as writer. To modify
149 * vdev state without altering the vdev tree's topology (e.g. online/offline),
150 * you must hold SCL_STATE and SCL_ZIO as writer.
151 *
152 * We use these distinct config locks to avoid recursive lock entry.
153 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
154 * block allocations (SCL_ALLOC), which may require reading space maps
155 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
156 *
157 * The spa config locks cannot be normal rwlocks because we need the
158 * ability to hand off ownership. For example, SCL_ZIO is acquired
159 * by the issuing thread and later released by an interrupt thread.
160 * They do, however, obey the usual write-wanted semantics to prevent
161 * writer (i.e. system administrator) starvation.
162 *
163 * The lock acquisition rules are as follows:
164 *
165 * SCL_CONFIG
166 * Protects changes to the vdev tree topology, such as vdev
167 * add/remove/attach/detach. Protects the dirty config list
168 * (spa_config_dirty_list) and the set of spares and l2arc devices.
169 *
170 * SCL_STATE
171 * Protects changes to pool state and vdev state, such as vdev
172 * online/offline/fault/degrade/clear. Protects the dirty state list
173 * (spa_state_dirty_list) and global pool state (spa_state).
174 *
175 * SCL_ALLOC
176 * Protects changes to metaslab groups and classes.
177 * Held as reader by metaslab_alloc() and metaslab_claim().
178 *
179 * SCL_ZIO
180 * Held by bp-level zios (those which have no io_vd upon entry)
181 * to prevent changes to the vdev tree. The bp-level zio implicitly
182 * protects all of its vdev child zios, which do not hold SCL_ZIO.
183 *
184 * SCL_FREE
185 * Protects changes to metaslab groups and classes.
186 * Held as reader by metaslab_free(). SCL_FREE is distinct from
187 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
188 * blocks in zio_done() while another i/o that holds either
189 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
190 *
191 * SCL_VDEV
192 * Held as reader to prevent changes to the vdev tree during trivial
193 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
194 * other locks, and lower than all of them, to ensure that it's safe
195 * to acquire regardless of caller context.
196 *
197 * In addition, the following rules apply:
198 *
199 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
200 * The lock ordering is SCL_CONFIG > spa_props_lock.
201 *
202 * (b) I/O operations on leaf vdevs. For any zio operation that takes
203 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
204 * or zio_write_phys() -- the caller must ensure that the config cannot
205 * cannot change in the interim, and that the vdev cannot be reopened.
206 * SCL_STATE as reader suffices for both.
207 *
208 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
209 *
210 * spa_vdev_enter() Acquire the namespace lock and the config lock
211 * for writing.
212 *
213 * spa_vdev_exit() Release the config lock, wait for all I/O
214 * to complete, sync the updated configs to the
215 * cache, and release the namespace lock.
216 *
217 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
218 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
219 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
220 *
221 * spa_rename() is also implemented within this file since it requires
222 * manipulation of the namespace.
223 */
224
225 static avl_tree_t spa_namespace_avl;
226 kmutex_t spa_namespace_lock;
227 static kcondvar_t spa_namespace_cv;
228 static int spa_active_count;
229 int spa_max_replication_override = SPA_DVAS_PER_BP;
230
231 static kmutex_t spa_spare_lock;
232 static avl_tree_t spa_spare_avl;
233 static kmutex_t spa_l2cache_lock;
234 static avl_tree_t spa_l2cache_avl;
235
236 kmem_cache_t *spa_buffer_pool;
237 int spa_mode_global;
238
239 #ifdef ZFS_DEBUG
240 /* Everything except dprintf and spa is on by default in debug builds */
241 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
242 #else
243 int zfs_flags = 0;
244 #endif
245
246 /*
247 * zfs_recover can be set to nonzero to attempt to recover from
248 * otherwise-fatal errors, typically caused by on-disk corruption. When
249 * set, calls to zfs_panic_recover() will turn into warning messages.
250 */
251 int zfs_recover = 0;
252
253 /*
254 * Expiration time in milliseconds. This value has two meanings. First it is
255 * used to determine when the spa_deadman() logic should fire. By default the
256 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
257 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
258 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
259 * in a system panic.
260 */
261 uint64_t zfs_deadman_synctime_ms = 1000000ULL;
262
263 /*
264 * Check time in milliseconds. This defines the frequency at which we check
265 * for hung I/O.
266 */
267 uint64_t zfs_deadman_checktime_ms = 5000ULL;
268
269 /*
270 * Override the zfs deadman behavior via /etc/system. By default the
271 * deadman is enabled except on VMware and sparc deployments.
272 */
273 int zfs_deadman_enabled = -1;
274
275 /*
276 * The worst case is single-sector max-parity RAID-Z blocks, in which
277 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
278 * times the size; so just assume that. Add to this the fact that
279 * we can have up to 3 DVAs per bp, and one more factor of 2 because
280 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
281 * the worst case is:
282 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
283 */
284 int spa_asize_inflation = 24;
285
286 /*
287 * ==========================================================================
288 * SPA config locking
289 * ==========================================================================
290 */
291 static void
292 spa_config_lock_init(spa_t *spa)
293 {
294 for (int i = 0; i < SCL_LOCKS; i++) {
295 spa_config_lock_t *scl = &spa->spa_config_lock[i];
296 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
297 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
298 refcount_create_untracked(&scl->scl_count);
299 scl->scl_writer = NULL;
300 scl->scl_write_wanted = 0;
301 }
302 }
303
304 static void
305 spa_config_lock_destroy(spa_t *spa)
306 {
307 for (int i = 0; i < SCL_LOCKS; i++) {
308 spa_config_lock_t *scl = &spa->spa_config_lock[i];
309 mutex_destroy(&scl->scl_lock);
310 cv_destroy(&scl->scl_cv);
311 refcount_destroy(&scl->scl_count);
312 ASSERT(scl->scl_writer == NULL);
313 ASSERT(scl->scl_write_wanted == 0);
314 }
315 }
316
317 int
318 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
319 {
320 for (int i = 0; i < SCL_LOCKS; i++) {
321 spa_config_lock_t *scl = &spa->spa_config_lock[i];
322 if (!(locks & (1 << i)))
323 continue;
324 mutex_enter(&scl->scl_lock);
325 if (rw == RW_READER) {
326 if (scl->scl_writer || scl->scl_write_wanted) {
327 mutex_exit(&scl->scl_lock);
328 spa_config_exit(spa, locks ^ (1 << i), tag);
329 return (0);
330 }
331 } else {
332 ASSERT(scl->scl_writer != curthread);
333 if (!refcount_is_zero(&scl->scl_count)) {
334 mutex_exit(&scl->scl_lock);
335 spa_config_exit(spa, locks ^ (1 << i), tag);
336 return (0);
337 }
338 scl->scl_writer = curthread;
339 }
340 (void) refcount_add(&scl->scl_count, tag);
341 mutex_exit(&scl->scl_lock);
342 }
343 return (1);
344 }
345
346 void
347 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
348 {
349 int wlocks_held = 0;
350
351 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
352
353 for (int i = 0; i < SCL_LOCKS; i++) {
354 spa_config_lock_t *scl = &spa->spa_config_lock[i];
355 if (scl->scl_writer == curthread)
356 wlocks_held |= (1 << i);
357 if (!(locks & (1 << i)))
358 continue;
359 mutex_enter(&scl->scl_lock);
360 if (rw == RW_READER) {
361 while (scl->scl_writer || scl->scl_write_wanted) {
362 cv_wait(&scl->scl_cv, &scl->scl_lock);
363 }
364 } else {
365 ASSERT(scl->scl_writer != curthread);
366 while (!refcount_is_zero(&scl->scl_count)) {
367 scl->scl_write_wanted++;
368 cv_wait(&scl->scl_cv, &scl->scl_lock);
369 scl->scl_write_wanted--;
370 }
371 scl->scl_writer = curthread;
372 }
373 (void) refcount_add(&scl->scl_count, tag);
374 mutex_exit(&scl->scl_lock);
375 }
376 ASSERT(wlocks_held <= locks);
377 }
378
379 void
380 spa_config_exit(spa_t *spa, int locks, void *tag)
381 {
382 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
383 spa_config_lock_t *scl = &spa->spa_config_lock[i];
384 if (!(locks & (1 << i)))
385 continue;
386 mutex_enter(&scl->scl_lock);
387 ASSERT(!refcount_is_zero(&scl->scl_count));
388 if (refcount_remove(&scl->scl_count, tag) == 0) {
389 ASSERT(scl->scl_writer == NULL ||
390 scl->scl_writer == curthread);
391 scl->scl_writer = NULL; /* OK in either case */
392 cv_broadcast(&scl->scl_cv);
393 }
394 mutex_exit(&scl->scl_lock);
395 }
396 }
397
398 int
399 spa_config_held(spa_t *spa, int locks, krw_t rw)
400 {
401 int locks_held = 0;
402
403 for (int i = 0; i < SCL_LOCKS; i++) {
404 spa_config_lock_t *scl = &spa->spa_config_lock[i];
405 if (!(locks & (1 << i)))
406 continue;
407 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
408 (rw == RW_WRITER && scl->scl_writer == curthread))
409 locks_held |= 1 << i;
410 }
411
412 return (locks_held);
413 }
414
415 /*
416 * ==========================================================================
417 * SPA namespace functions
418 * ==========================================================================
419 */
420
421 /*
422 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
423 * Returns NULL if no matching spa_t is found.
424 */
425 spa_t *
426 spa_lookup(const char *name)
427 {
428 static spa_t search; /* spa_t is large; don't allocate on stack */
429 spa_t *spa;
430 avl_index_t where;
431 char *cp;
432
433 ASSERT(MUTEX_HELD(&spa_namespace_lock));
434
435 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
436
437 /*
438 * If it's a full dataset name, figure out the pool name and
439 * just use that.
440 */
441 cp = strpbrk(search.spa_name, "/@");
442 if (cp != NULL)
443 *cp = '\0';
444
445 spa = avl_find(&spa_namespace_avl, &search, &where);
446
447 return (spa);
448 }
449
450 /*
451 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
452 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
453 * looking for potentially hung I/Os.
454 */
455 void
456 spa_deadman(void *arg)
457 {
458 spa_t *spa = arg;
459
460 /*
461 * Disable the deadman timer if the pool is suspended.
462 */
463 if (spa_suspended(spa)) {
464 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
465 return;
466 }
467
468 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
469 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
470 ++spa->spa_deadman_calls);
471 if (zfs_deadman_enabled)
472 vdev_deadman(spa->spa_root_vdev);
473 }
474
475 /*
476 * Create an uninitialized spa_t with the given name. Requires
477 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
478 * exist by calling spa_lookup() first.
479 */
480 spa_t *
481 spa_add(const char *name, nvlist_t *config, const char *altroot)
482 {
483 spa_t *spa;
484 spa_config_dirent_t *dp;
485 cyc_handler_t hdlr;
486 cyc_time_t when;
487
488 ASSERT(MUTEX_HELD(&spa_namespace_lock));
489
490 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
491
492 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
493 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
494 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
495 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
496 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
497 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
498 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
499 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
500 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
501 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL);
502
503 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
504 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
505 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
506 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
507
508 for (int t = 0; t < TXG_SIZE; t++)
509 bplist_create(&spa->spa_free_bplist[t]);
510
511 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
512 spa->spa_state = POOL_STATE_UNINITIALIZED;
513 spa->spa_freeze_txg = UINT64_MAX;
514 spa->spa_final_txg = UINT64_MAX;
515 spa->spa_load_max_txg = UINT64_MAX;
516 spa->spa_proc = &p0;
517 spa->spa_proc_state = SPA_PROC_NONE;
518
519 hdlr.cyh_func = spa_deadman;
520 hdlr.cyh_arg = spa;
521 hdlr.cyh_level = CY_LOW_LEVEL;
522
523 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
524
525 /*
526 * This determines how often we need to check for hung I/Os after
527 * the cyclic has already fired. Since checking for hung I/Os is
528 * an expensive operation we don't want to check too frequently.
529 * Instead wait for 5 seconds before checking again.
530 */
531 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
532 when.cyt_when = CY_INFINITY;
533 mutex_enter(&cpu_lock);
534 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
535 mutex_exit(&cpu_lock);
536
537 refcount_create(&spa->spa_refcount);
538 spa_config_lock_init(spa);
539
540 avl_add(&spa_namespace_avl, spa);
541
542 /*
543 * Set the alternate root, if there is one.
544 */
545 if (altroot) {
546 spa->spa_root = spa_strdup(altroot);
547 spa_active_count++;
548 }
549
550 /*
551 * Every pool starts with the default cachefile
552 */
553 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
554 offsetof(spa_config_dirent_t, scd_link));
555
556 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
557 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
558 list_insert_head(&spa->spa_config_list, dp);
559
560 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
561 KM_SLEEP) == 0);
562
563 if (config != NULL) {
564 nvlist_t *features;
565
566 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
567 &features) == 0) {
568 VERIFY(nvlist_dup(features, &spa->spa_label_features,
569 0) == 0);
570 }
571
572 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
573 }
574
575 if (spa->spa_label_features == NULL) {
576 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
577 KM_SLEEP) == 0);
578 }
579
580 spa->spa_iokstat = kstat_create("zfs", 0, name,
581 "disk", KSTAT_TYPE_IO, 1, 0);
582 if (spa->spa_iokstat) {
583 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock;
584 kstat_install(spa->spa_iokstat);
585 }
586
587 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
588
589 return (spa);
590 }
591
592 /*
593 * Removes a spa_t from the namespace, freeing up any memory used. Requires
594 * spa_namespace_lock. This is called only after the spa_t has been closed and
595 * deactivated.
596 */
597 void
598 spa_remove(spa_t *spa)
599 {
600 spa_config_dirent_t *dp;
601
602 ASSERT(MUTEX_HELD(&spa_namespace_lock));
603 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
604
605 nvlist_free(spa->spa_config_splitting);
606
607 avl_remove(&spa_namespace_avl, spa);
608 cv_broadcast(&spa_namespace_cv);
609
610 if (spa->spa_root) {
611 spa_strfree(spa->spa_root);
612 spa_active_count--;
613 }
614
615 while ((dp = list_head(&spa->spa_config_list)) != NULL) {
616 list_remove(&spa->spa_config_list, dp);
617 if (dp->scd_path != NULL)
618 spa_strfree(dp->scd_path);
619 kmem_free(dp, sizeof (spa_config_dirent_t));
620 }
621
622 list_destroy(&spa->spa_config_list);
623
624 nvlist_free(spa->spa_label_features);
625 nvlist_free(spa->spa_load_info);
626 spa_config_set(spa, NULL);
627
628 mutex_enter(&cpu_lock);
629 if (spa->spa_deadman_cycid != CYCLIC_NONE)
630 cyclic_remove(spa->spa_deadman_cycid);
631 mutex_exit(&cpu_lock);
632 spa->spa_deadman_cycid = CYCLIC_NONE;
633
634 refcount_destroy(&spa->spa_refcount);
635
636 spa_config_lock_destroy(spa);
637
638 kstat_delete(spa->spa_iokstat);
639 spa->spa_iokstat = NULL;
640
641 for (int t = 0; t < TXG_SIZE; t++)
642 bplist_destroy(&spa->spa_free_bplist[t]);
643
644 cv_destroy(&spa->spa_async_cv);
645 cv_destroy(&spa->spa_proc_cv);
646 cv_destroy(&spa->spa_scrub_io_cv);
647 cv_destroy(&spa->spa_suspend_cv);
648
649 mutex_destroy(&spa->spa_async_lock);
650 mutex_destroy(&spa->spa_errlist_lock);
651 mutex_destroy(&spa->spa_errlog_lock);
652 mutex_destroy(&spa->spa_history_lock);
653 mutex_destroy(&spa->spa_proc_lock);
654 mutex_destroy(&spa->spa_props_lock);
655 mutex_destroy(&spa->spa_scrub_lock);
656 mutex_destroy(&spa->spa_suspend_lock);
657 mutex_destroy(&spa->spa_vdev_top_lock);
658 mutex_destroy(&spa->spa_iokstat_lock);
659
660 kmem_free(spa, sizeof (spa_t));
661 }
662
663 /*
664 * Given a pool, return the next pool in the namespace, or NULL if there is
665 * none. If 'prev' is NULL, return the first pool.
666 */
667 spa_t *
668 spa_next(spa_t *prev)
669 {
670 ASSERT(MUTEX_HELD(&spa_namespace_lock));
671
672 if (prev)
673 return (AVL_NEXT(&spa_namespace_avl, prev));
674 else
675 return (avl_first(&spa_namespace_avl));
676 }
677
678 /*
679 * ==========================================================================
680 * SPA refcount functions
681 * ==========================================================================
682 */
683
684 /*
685 * Add a reference to the given spa_t. Must have at least one reference, or
686 * have the namespace lock held.
687 */
688 void
689 spa_open_ref(spa_t *spa, void *tag)
690 {
691 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
692 MUTEX_HELD(&spa_namespace_lock));
693 (void) refcount_add(&spa->spa_refcount, tag);
694 }
695
696 /*
697 * Remove a reference to the given spa_t. Must have at least one reference, or
698 * have the namespace lock held.
699 */
700 void
701 spa_close(spa_t *spa, void *tag)
702 {
703 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
704 MUTEX_HELD(&spa_namespace_lock));
705 (void) refcount_remove(&spa->spa_refcount, tag);
706 }
707
708 /*
709 * Check to see if the spa refcount is zero. Must be called with
710 * spa_namespace_lock held. We really compare against spa_minref, which is the
711 * number of references acquired when opening a pool
712 */
713 boolean_t
714 spa_refcount_zero(spa_t *spa)
715 {
716 ASSERT(MUTEX_HELD(&spa_namespace_lock));
717
718 return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
719 }
720
721 /*
722 * ==========================================================================
723 * SPA spare and l2cache tracking
724 * ==========================================================================
725 */
726
727 /*
728 * Hot spares and cache devices are tracked using the same code below,
729 * for 'auxiliary' devices.
730 */
731
732 typedef struct spa_aux {
733 uint64_t aux_guid;
734 uint64_t aux_pool;
735 avl_node_t aux_avl;
736 int aux_count;
737 } spa_aux_t;
738
739 static int
740 spa_aux_compare(const void *a, const void *b)
741 {
742 const spa_aux_t *sa = a;
743 const spa_aux_t *sb = b;
744
745 if (sa->aux_guid < sb->aux_guid)
746 return (-1);
747 else if (sa->aux_guid > sb->aux_guid)
748 return (1);
749 else
750 return (0);
751 }
752
753 void
754 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
755 {
756 avl_index_t where;
757 spa_aux_t search;
758 spa_aux_t *aux;
759
760 search.aux_guid = vd->vdev_guid;
761 if ((aux = avl_find(avl, &search, &where)) != NULL) {
762 aux->aux_count++;
763 } else {
764 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
765 aux->aux_guid = vd->vdev_guid;
766 aux->aux_count = 1;
767 avl_insert(avl, aux, where);
768 }
769 }
770
771 void
772 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
773 {
774 spa_aux_t search;
775 spa_aux_t *aux;
776 avl_index_t where;
777
778 search.aux_guid = vd->vdev_guid;
779 aux = avl_find(avl, &search, &where);
780
781 ASSERT(aux != NULL);
782
783 if (--aux->aux_count == 0) {
784 avl_remove(avl, aux);
785 kmem_free(aux, sizeof (spa_aux_t));
786 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
787 aux->aux_pool = 0ULL;
788 }
789 }
790
791 boolean_t
792 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
793 {
794 spa_aux_t search, *found;
795
796 search.aux_guid = guid;
797 found = avl_find(avl, &search, NULL);
798
799 if (pool) {
800 if (found)
801 *pool = found->aux_pool;
802 else
803 *pool = 0ULL;
804 }
805
806 if (refcnt) {
807 if (found)
808 *refcnt = found->aux_count;
809 else
810 *refcnt = 0;
811 }
812
813 return (found != NULL);
814 }
815
816 void
817 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
818 {
819 spa_aux_t search, *found;
820 avl_index_t where;
821
822 search.aux_guid = vd->vdev_guid;
823 found = avl_find(avl, &search, &where);
824 ASSERT(found != NULL);
825 ASSERT(found->aux_pool == 0ULL);
826
827 found->aux_pool = spa_guid(vd->vdev_spa);
828 }
829
830 /*
831 * Spares are tracked globally due to the following constraints:
832 *
833 * - A spare may be part of multiple pools.
834 * - A spare may be added to a pool even if it's actively in use within
835 * another pool.
836 * - A spare in use in any pool can only be the source of a replacement if
837 * the target is a spare in the same pool.
838 *
839 * We keep track of all spares on the system through the use of a reference
840 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
841 * spare, then we bump the reference count in the AVL tree. In addition, we set
842 * the 'vdev_isspare' member to indicate that the device is a spare (active or
843 * inactive). When a spare is made active (used to replace a device in the
844 * pool), we also keep track of which pool its been made a part of.
845 *
846 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
847 * called under the spa_namespace lock as part of vdev reconfiguration. The
848 * separate spare lock exists for the status query path, which does not need to
849 * be completely consistent with respect to other vdev configuration changes.
850 */
851
852 static int
853 spa_spare_compare(const void *a, const void *b)
854 {
855 return (spa_aux_compare(a, b));
856 }
857
858 void
859 spa_spare_add(vdev_t *vd)
860 {
861 mutex_enter(&spa_spare_lock);
862 ASSERT(!vd->vdev_isspare);
863 spa_aux_add(vd, &spa_spare_avl);
864 vd->vdev_isspare = B_TRUE;
865 mutex_exit(&spa_spare_lock);
866 }
867
868 void
869 spa_spare_remove(vdev_t *vd)
870 {
871 mutex_enter(&spa_spare_lock);
872 ASSERT(vd->vdev_isspare);
873 spa_aux_remove(vd, &spa_spare_avl);
874 vd->vdev_isspare = B_FALSE;
875 mutex_exit(&spa_spare_lock);
876 }
877
878 boolean_t
879 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
880 {
881 boolean_t found;
882
883 mutex_enter(&spa_spare_lock);
884 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
885 mutex_exit(&spa_spare_lock);
886
887 return (found);
888 }
889
890 void
891 spa_spare_activate(vdev_t *vd)
892 {
893 mutex_enter(&spa_spare_lock);
894 ASSERT(vd->vdev_isspare);
895 spa_aux_activate(vd, &spa_spare_avl);
896 mutex_exit(&spa_spare_lock);
897 }
898
899 /*
900 * Level 2 ARC devices are tracked globally for the same reasons as spares.
901 * Cache devices currently only support one pool per cache device, and so
902 * for these devices the aux reference count is currently unused beyond 1.
903 */
904
905 static int
906 spa_l2cache_compare(const void *a, const void *b)
907 {
908 return (spa_aux_compare(a, b));
909 }
910
911 void
912 spa_l2cache_add(vdev_t *vd)
913 {
914 mutex_enter(&spa_l2cache_lock);
915 ASSERT(!vd->vdev_isl2cache);
916 spa_aux_add(vd, &spa_l2cache_avl);
917 vd->vdev_isl2cache = B_TRUE;
918 mutex_exit(&spa_l2cache_lock);
919 }
920
921 void
922 spa_l2cache_remove(vdev_t *vd)
923 {
924 mutex_enter(&spa_l2cache_lock);
925 ASSERT(vd->vdev_isl2cache);
926 spa_aux_remove(vd, &spa_l2cache_avl);
927 vd->vdev_isl2cache = B_FALSE;
928 mutex_exit(&spa_l2cache_lock);
929 }
930
931 boolean_t
932 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
933 {
934 boolean_t found;
935
936 mutex_enter(&spa_l2cache_lock);
937 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
938 mutex_exit(&spa_l2cache_lock);
939
940 return (found);
941 }
942
943 void
944 spa_l2cache_activate(vdev_t *vd)
945 {
946 mutex_enter(&spa_l2cache_lock);
947 ASSERT(vd->vdev_isl2cache);
948 spa_aux_activate(vd, &spa_l2cache_avl);
949 mutex_exit(&spa_l2cache_lock);
950 }
951
952 /*
953 * ==========================================================================
954 * SPA vdev locking
955 * ==========================================================================
956 */
957
958 /*
959 * Lock the given spa_t for the purpose of adding or removing a vdev.
960 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
961 * It returns the next transaction group for the spa_t.
962 */
963 uint64_t
964 spa_vdev_enter(spa_t *spa)
965 {
966 mutex_enter(&spa->spa_vdev_top_lock);
967 mutex_enter(&spa_namespace_lock);
968 return (spa_vdev_config_enter(spa));
969 }
970
971 /*
972 * Internal implementation for spa_vdev_enter(). Used when a vdev
973 * operation requires multiple syncs (i.e. removing a device) while
974 * keeping the spa_namespace_lock held.
975 */
976 uint64_t
977 spa_vdev_config_enter(spa_t *spa)
978 {
979 ASSERT(MUTEX_HELD(&spa_namespace_lock));
980
981 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
982
983 return (spa_last_synced_txg(spa) + 1);
984 }
985
986 /*
987 * Used in combination with spa_vdev_config_enter() to allow the syncing
988 * of multiple transactions without releasing the spa_namespace_lock.
989 */
990 void
991 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
992 {
993 ASSERT(MUTEX_HELD(&spa_namespace_lock));
994
995 int config_changed = B_FALSE;
996
997 ASSERT(txg > spa_last_synced_txg(spa));
998
999 spa->spa_pending_vdev = NULL;
1000
1001 /*
1002 * Reassess the DTLs.
1003 */
1004 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1005
1006 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1007 config_changed = B_TRUE;
1008 spa->spa_config_generation++;
1009 }
1010
1011 /*
1012 * Verify the metaslab classes.
1013 */
1014 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1015 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1016
1017 spa_config_exit(spa, SCL_ALL, spa);
1018
1019 /*
1020 * Panic the system if the specified tag requires it. This
1021 * is useful for ensuring that configurations are updated
1022 * transactionally.
1023 */
1024 if (zio_injection_enabled)
1025 zio_handle_panic_injection(spa, tag, 0);
1026
1027 /*
1028 * Note: this txg_wait_synced() is important because it ensures
1029 * that there won't be more than one config change per txg.
1030 * This allows us to use the txg as the generation number.
1031 */
1032 if (error == 0)
1033 txg_wait_synced(spa->spa_dsl_pool, txg);
1034
1035 if (vd != NULL) {
1036 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1037 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1038 vdev_free(vd);
1039 spa_config_exit(spa, SCL_ALL, spa);
1040 }
1041
1042 /*
1043 * If the config changed, update the config cache.
1044 */
1045 if (config_changed)
1046 spa_config_sync(spa, B_FALSE, B_TRUE);
1047 }
1048
1049 /*
1050 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1051 * locking of spa_vdev_enter(), we also want make sure the transactions have
1052 * synced to disk, and then update the global configuration cache with the new
1053 * information.
1054 */
1055 int
1056 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1057 {
1058 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1059 mutex_exit(&spa_namespace_lock);
1060 mutex_exit(&spa->spa_vdev_top_lock);
1061
1062 return (error);
1063 }
1064
1065 /*
1066 * Lock the given spa_t for the purpose of changing vdev state.
1067 */
1068 void
1069 spa_vdev_state_enter(spa_t *spa, int oplocks)
1070 {
1071 int locks = SCL_STATE_ALL | oplocks;
1072
1073 /*
1074 * Root pools may need to read of the underlying devfs filesystem
1075 * when opening up a vdev. Unfortunately if we're holding the
1076 * SCL_ZIO lock it will result in a deadlock when we try to issue
1077 * the read from the root filesystem. Instead we "prefetch"
1078 * the associated vnodes that we need prior to opening the
1079 * underlying devices and cache them so that we can prevent
1080 * any I/O when we are doing the actual open.
1081 */
1082 if (spa_is_root(spa)) {
1083 int low = locks & ~(SCL_ZIO - 1);
1084 int high = locks & ~low;
1085
1086 spa_config_enter(spa, high, spa, RW_WRITER);
1087 vdev_hold(spa->spa_root_vdev);
1088 spa_config_enter(spa, low, spa, RW_WRITER);
1089 } else {
1090 spa_config_enter(spa, locks, spa, RW_WRITER);
1091 }
1092 spa->spa_vdev_locks = locks;
1093 }
1094
1095 int
1096 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1097 {
1098 boolean_t config_changed = B_FALSE;
1099
1100 if (vd != NULL || error == 0)
1101 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1102 0, 0, B_FALSE);
1103
1104 if (vd != NULL) {
1105 vdev_state_dirty(vd->vdev_top);
1106 config_changed = B_TRUE;
1107 spa->spa_config_generation++;
1108 }
1109
1110 if (spa_is_root(spa))
1111 vdev_rele(spa->spa_root_vdev);
1112
1113 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1114 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1115
1116 /*
1117 * If anything changed, wait for it to sync. This ensures that,
1118 * from the system administrator's perspective, zpool(1M) commands
1119 * are synchronous. This is important for things like zpool offline:
1120 * when the command completes, you expect no further I/O from ZFS.
1121 */
1122 if (vd != NULL)
1123 txg_wait_synced(spa->spa_dsl_pool, 0);
1124
1125 /*
1126 * If the config changed, update the config cache.
1127 */
1128 if (config_changed) {
1129 mutex_enter(&spa_namespace_lock);
1130 spa_config_sync(spa, B_FALSE, B_TRUE);
1131 mutex_exit(&spa_namespace_lock);
1132 }
1133
1134 return (error);
1135 }
1136
1137 /*
1138 * ==========================================================================
1139 * Miscellaneous functions
1140 * ==========================================================================
1141 */
1142
1143 void
1144 spa_activate_mos_feature(spa_t *spa, const char *feature)
1145 {
1146 (void) nvlist_add_boolean(spa->spa_label_features, feature);
1147 vdev_config_dirty(spa->spa_root_vdev);
1148 }
1149
1150 void
1151 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1152 {
1153 (void) nvlist_remove_all(spa->spa_label_features, feature);
1154 vdev_config_dirty(spa->spa_root_vdev);
1155 }
1156
1157 /*
1158 * Rename a spa_t.
1159 */
1160 int
1161 spa_rename(const char *name, const char *newname)
1162 {
1163 spa_t *spa;
1164 int err;
1165
1166 /*
1167 * Lookup the spa_t and grab the config lock for writing. We need to
1168 * actually open the pool so that we can sync out the necessary labels.
1169 * It's OK to call spa_open() with the namespace lock held because we
1170 * allow recursive calls for other reasons.
1171 */
1172 mutex_enter(&spa_namespace_lock);
1173 if ((err = spa_open(name, &spa, FTAG)) != 0) {
1174 mutex_exit(&spa_namespace_lock);
1175 return (err);
1176 }
1177
1178 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1179
1180 avl_remove(&spa_namespace_avl, spa);
1181 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1182 avl_add(&spa_namespace_avl, spa);
1183
1184 /*
1185 * Sync all labels to disk with the new names by marking the root vdev
1186 * dirty and waiting for it to sync. It will pick up the new pool name
1187 * during the sync.
1188 */
1189 vdev_config_dirty(spa->spa_root_vdev);
1190
1191 spa_config_exit(spa, SCL_ALL, FTAG);
1192
1193 txg_wait_synced(spa->spa_dsl_pool, 0);
1194
1195 /*
1196 * Sync the updated config cache.
1197 */
1198 spa_config_sync(spa, B_FALSE, B_TRUE);
1199
1200 spa_close(spa, FTAG);
1201
1202 mutex_exit(&spa_namespace_lock);
1203
1204 return (0);
1205 }
1206
1207 /*
1208 * Return the spa_t associated with given pool_guid, if it exists. If
1209 * device_guid is non-zero, determine whether the pool exists *and* contains
1210 * a device with the specified device_guid.
1211 */
1212 spa_t *
1213 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1214 {
1215 spa_t *spa;
1216 avl_tree_t *t = &spa_namespace_avl;
1217
1218 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1219
1220 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1221 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1222 continue;
1223 if (spa->spa_root_vdev == NULL)
1224 continue;
1225 if (spa_guid(spa) == pool_guid) {
1226 if (device_guid == 0)
1227 break;
1228
1229 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1230 device_guid) != NULL)
1231 break;
1232
1233 /*
1234 * Check any devices we may be in the process of adding.
1235 */
1236 if (spa->spa_pending_vdev) {
1237 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1238 device_guid) != NULL)
1239 break;
1240 }
1241 }
1242 }
1243
1244 return (spa);
1245 }
1246
1247 /*
1248 * Determine whether a pool with the given pool_guid exists.
1249 */
1250 boolean_t
1251 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1252 {
1253 return (spa_by_guid(pool_guid, device_guid) != NULL);
1254 }
1255
1256 char *
1257 spa_strdup(const char *s)
1258 {
1259 size_t len;
1260 char *new;
1261
1262 len = strlen(s);
1263 new = kmem_alloc(len + 1, KM_SLEEP);
1264 bcopy(s, new, len);
1265 new[len] = '\0';
1266
1267 return (new);
1268 }
1269
1270 void
1271 spa_strfree(char *s)
1272 {
1273 kmem_free(s, strlen(s) + 1);
1274 }
1275
1276 uint64_t
1277 spa_get_random(uint64_t range)
1278 {
1279 uint64_t r;
1280
1281 ASSERT(range != 0);
1282
1283 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1284
1285 return (r % range);
1286 }
1287
1288 uint64_t
1289 spa_generate_guid(spa_t *spa)
1290 {
1291 uint64_t guid = spa_get_random(-1ULL);
1292
1293 if (spa != NULL) {
1294 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1295 guid = spa_get_random(-1ULL);
1296 } else {
1297 while (guid == 0 || spa_guid_exists(guid, 0))
1298 guid = spa_get_random(-1ULL);
1299 }
1300
1301 return (guid);
1302 }
1303
1304 void
1305 sprintf_blkptr(char *buf, const blkptr_t *bp)
1306 {
1307 char type[256];
1308 char *checksum = NULL;
1309 char *compress = NULL;
1310
1311 if (bp != NULL) {
1312 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1313 dmu_object_byteswap_t bswap =
1314 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1315 (void) snprintf(type, sizeof (type), "bswap %s %s",
1316 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1317 "metadata" : "data",
1318 dmu_ot_byteswap[bswap].ob_name);
1319 } else {
1320 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1321 sizeof (type));
1322 }
1323 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1324 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1325 }
1326
1327 SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
1328 }
1329
1330 void
1331 spa_freeze(spa_t *spa)
1332 {
1333 uint64_t freeze_txg = 0;
1334
1335 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1336 if (spa->spa_freeze_txg == UINT64_MAX) {
1337 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1338 spa->spa_freeze_txg = freeze_txg;
1339 }
1340 spa_config_exit(spa, SCL_ALL, FTAG);
1341 if (freeze_txg != 0)
1342 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1343 }
1344
1345 void
1346 zfs_panic_recover(const char *fmt, ...)
1347 {
1348 va_list adx;
1349
1350 va_start(adx, fmt);
1351 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1352 va_end(adx);
1353 }
1354
1355 /*
1356 * This is a stripped-down version of strtoull, suitable only for converting
1357 * lowercase hexadecimal numbers that don't overflow.
1358 */
1359 uint64_t
1360 strtonum(const char *str, char **nptr)
1361 {
1362 uint64_t val = 0;
1363 char c;
1364 int digit;
1365
1366 while ((c = *str) != '\0') {
1367 if (c >= '0' && c <= '9')
1368 digit = c - '0';
1369 else if (c >= 'a' && c <= 'f')
1370 digit = 10 + c - 'a';
1371 else
1372 break;
1373
1374 val *= 16;
1375 val += digit;
1376
1377 str++;
1378 }
1379
1380 if (nptr)
1381 *nptr = (char *)str;
1382
1383 return (val);
1384 }
1385
1386 /*
1387 * ==========================================================================
1388 * Accessor functions
1389 * ==========================================================================
1390 */
1391
1392 boolean_t
1393 spa_shutting_down(spa_t *spa)
1394 {
1395 return (spa->spa_async_suspended);
1396 }
1397
1398 dsl_pool_t *
1399 spa_get_dsl(spa_t *spa)
1400 {
1401 return (spa->spa_dsl_pool);
1402 }
1403
1404 boolean_t
1405 spa_is_initializing(spa_t *spa)
1406 {
1407 return (spa->spa_is_initializing);
1408 }
1409
1410 blkptr_t *
1411 spa_get_rootblkptr(spa_t *spa)
1412 {
1413 return (&spa->spa_ubsync.ub_rootbp);
1414 }
1415
1416 void
1417 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1418 {
1419 spa->spa_uberblock.ub_rootbp = *bp;
1420 }
1421
1422 void
1423 spa_altroot(spa_t *spa, char *buf, size_t buflen)
1424 {
1425 if (spa->spa_root == NULL)
1426 buf[0] = '\0';
1427 else
1428 (void) strncpy(buf, spa->spa_root, buflen);
1429 }
1430
1431 int
1432 spa_sync_pass(spa_t *spa)
1433 {
1434 return (spa->spa_sync_pass);
1435 }
1436
1437 char *
1438 spa_name(spa_t *spa)
1439 {
1440 return (spa->spa_name);
1441 }
1442
1443 uint64_t
1444 spa_guid(spa_t *spa)
1445 {
1446 dsl_pool_t *dp = spa_get_dsl(spa);
1447 uint64_t guid;
1448
1449 /*
1450 * If we fail to parse the config during spa_load(), we can go through
1451 * the error path (which posts an ereport) and end up here with no root
1452 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
1453 * this case.
1454 */
1455 if (spa->spa_root_vdev == NULL)
1456 return (spa->spa_config_guid);
1457
1458 guid = spa->spa_last_synced_guid != 0 ?
1459 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1460
1461 /*
1462 * Return the most recently synced out guid unless we're
1463 * in syncing context.
1464 */
1465 if (dp && dsl_pool_sync_context(dp))
1466 return (spa->spa_root_vdev->vdev_guid);
1467 else
1468 return (guid);
1469 }
1470
1471 uint64_t
1472 spa_load_guid(spa_t *spa)
1473 {
1474 /*
1475 * This is a GUID that exists solely as a reference for the
1476 * purposes of the arc. It is generated at load time, and
1477 * is never written to persistent storage.
1478 */
1479 return (spa->spa_load_guid);
1480 }
1481
1482 uint64_t
1483 spa_last_synced_txg(spa_t *spa)
1484 {
1485 return (spa->spa_ubsync.ub_txg);
1486 }
1487
1488 uint64_t
1489 spa_first_txg(spa_t *spa)
1490 {
1491 return (spa->spa_first_txg);
1492 }
1493
1494 uint64_t
1495 spa_syncing_txg(spa_t *spa)
1496 {
1497 return (spa->spa_syncing_txg);
1498 }
1499
1500 pool_state_t
1501 spa_state(spa_t *spa)
1502 {
1503 return (spa->spa_state);
1504 }
1505
1506 spa_load_state_t
1507 spa_load_state(spa_t *spa)
1508 {
1509 return (spa->spa_load_state);
1510 }
1511
1512 uint64_t
1513 spa_freeze_txg(spa_t *spa)
1514 {
1515 return (spa->spa_freeze_txg);
1516 }
1517
1518 /* ARGSUSED */
1519 uint64_t
1520 spa_get_asize(spa_t *spa, uint64_t lsize)
1521 {
1522 return (lsize * spa_asize_inflation);
1523 }
1524
1525 uint64_t
1526 spa_get_dspace(spa_t *spa)
1527 {
1528 return (spa->spa_dspace);
1529 }
1530
1531 void
1532 spa_update_dspace(spa_t *spa)
1533 {
1534 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1535 ddt_get_dedup_dspace(spa);
1536 }
1537
1538 /*
1539 * Return the failure mode that has been set to this pool. The default
1540 * behavior will be to block all I/Os when a complete failure occurs.
1541 */
1542 uint8_t
1543 spa_get_failmode(spa_t *spa)
1544 {
1545 return (spa->spa_failmode);
1546 }
1547
1548 boolean_t
1549 spa_suspended(spa_t *spa)
1550 {
1551 return (spa->spa_suspended);
1552 }
1553
1554 uint64_t
1555 spa_version(spa_t *spa)
1556 {
1557 return (spa->spa_ubsync.ub_version);
1558 }
1559
1560 boolean_t
1561 spa_deflate(spa_t *spa)
1562 {
1563 return (spa->spa_deflate);
1564 }
1565
1566 metaslab_class_t *
1567 spa_normal_class(spa_t *spa)
1568 {
1569 return (spa->spa_normal_class);
1570 }
1571
1572 metaslab_class_t *
1573 spa_log_class(spa_t *spa)
1574 {
1575 return (spa->spa_log_class);
1576 }
1577
1578 int
1579 spa_max_replication(spa_t *spa)
1580 {
1581 /*
1582 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1583 * handle BPs with more than one DVA allocated. Set our max
1584 * replication level accordingly.
1585 */
1586 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1587 return (1);
1588 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1589 }
1590
1591 int
1592 spa_prev_software_version(spa_t *spa)
1593 {
1594 return (spa->spa_prev_software_version);
1595 }
1596
1597 uint64_t
1598 spa_deadman_synctime(spa_t *spa)
1599 {
1600 return (spa->spa_deadman_synctime);
1601 }
1602
1603 uint64_t
1604 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1605 {
1606 uint64_t asize = DVA_GET_ASIZE(dva);
1607 uint64_t dsize = asize;
1608
1609 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1610
1611 if (asize != 0 && spa->spa_deflate) {
1612 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1613 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1614 }
1615
1616 return (dsize);
1617 }
1618
1619 uint64_t
1620 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1621 {
1622 uint64_t dsize = 0;
1623
1624 for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1625 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1626
1627 return (dsize);
1628 }
1629
1630 uint64_t
1631 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1632 {
1633 uint64_t dsize = 0;
1634
1635 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1636
1637 for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1638 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1639
1640 spa_config_exit(spa, SCL_VDEV, FTAG);
1641
1642 return (dsize);
1643 }
1644
1645 /*
1646 * ==========================================================================
1647 * Initialization and Termination
1648 * ==========================================================================
1649 */
1650
1651 static int
1652 spa_name_compare(const void *a1, const void *a2)
1653 {
1654 const spa_t *s1 = a1;
1655 const spa_t *s2 = a2;
1656 int s;
1657
1658 s = strcmp(s1->spa_name, s2->spa_name);
1659 if (s > 0)
1660 return (1);
1661 if (s < 0)
1662 return (-1);
1663 return (0);
1664 }
1665
1666 int
1667 spa_busy(void)
1668 {
1669 return (spa_active_count);
1670 }
1671
1672 void
1673 spa_boot_init()
1674 {
1675 spa_config_load();
1676 }
1677
1678 void
1679 spa_init(int mode)
1680 {
1681 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1682 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1683 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1684 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1685
1686 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1687 offsetof(spa_t, spa_avl));
1688
1689 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1690 offsetof(spa_aux_t, aux_avl));
1691
1692 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1693 offsetof(spa_aux_t, aux_avl));
1694
1695 spa_mode_global = mode;
1696
1697 #ifdef _KERNEL
1698 spa_arch_init();
1699 #else
1700 if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1701 arc_procfd = open("/proc/self/ctl", O_WRONLY);
1702 if (arc_procfd == -1) {
1703 perror("could not enable watchpoints: "
1704 "opening /proc/self/ctl failed: ");
1705 } else {
1706 arc_watch = B_TRUE;
1707 }
1708 }
1709 #endif
1710
1711 refcount_init();
1712 unique_init();
1713 range_tree_init();
1714 zio_init();
1715 dmu_init();
1716 zil_init();
1717 vdev_cache_stat_init();
1718 zfs_prop_init();
1719 zpool_prop_init();
1720 zpool_feature_init();
1721 spa_config_load();
1722 l2arc_start();
1723 }
1724
1725 void
1726 spa_fini(void)
1727 {
1728 l2arc_stop();
1729
1730 spa_evict_all();
1731
1732 vdev_cache_stat_fini();
1733 zil_fini();
1734 dmu_fini();
1735 zio_fini();
1736 range_tree_fini();
1737 unique_fini();
1738 refcount_fini();
1739
1740 avl_destroy(&spa_namespace_avl);
1741 avl_destroy(&spa_spare_avl);
1742 avl_destroy(&spa_l2cache_avl);
1743
1744 cv_destroy(&spa_namespace_cv);
1745 mutex_destroy(&spa_namespace_lock);
1746 mutex_destroy(&spa_spare_lock);
1747 mutex_destroy(&spa_l2cache_lock);
1748 }
1749
1750 /*
1751 * Return whether this pool has slogs. No locking needed.
1752 * It's not a problem if the wrong answer is returned as it's only for
1753 * performance and not correctness
1754 */
1755 boolean_t
1756 spa_has_slogs(spa_t *spa)
1757 {
1758 return (spa->spa_log_class->mc_rotor != NULL);
1759 }
1760
1761 spa_log_state_t
1762 spa_get_log_state(spa_t *spa)
1763 {
1764 return (spa->spa_log_state);
1765 }
1766
1767 void
1768 spa_set_log_state(spa_t *spa, spa_log_state_t state)
1769 {
1770 spa->spa_log_state = state;
1771 }
1772
1773 boolean_t
1774 spa_is_root(spa_t *spa)
1775 {
1776 return (spa->spa_is_root);
1777 }
1778
1779 boolean_t
1780 spa_writeable(spa_t *spa)
1781 {
1782 return (!!(spa->spa_mode & FWRITE));
1783 }
1784
1785 int
1786 spa_mode(spa_t *spa)
1787 {
1788 return (spa->spa_mode);
1789 }
1790
1791 uint64_t
1792 spa_bootfs(spa_t *spa)
1793 {
1794 return (spa->spa_bootfs);
1795 }
1796
1797 uint64_t
1798 spa_delegation(spa_t *spa)
1799 {
1800 return (spa->spa_delegation);
1801 }
1802
1803 objset_t *
1804 spa_meta_objset(spa_t *spa)
1805 {
1806 return (spa->spa_meta_objset);
1807 }
1808
1809 enum zio_checksum
1810 spa_dedup_checksum(spa_t *spa)
1811 {
1812 return (spa->spa_dedup_checksum);
1813 }
1814
1815 /*
1816 * Reset pool scan stat per scan pass (or reboot).
1817 */
1818 void
1819 spa_scan_stat_init(spa_t *spa)
1820 {
1821 /* data not stored on disk */
1822 spa->spa_scan_pass_start = gethrestime_sec();
1823 spa->spa_scan_pass_exam = 0;
1824 vdev_scan_stat_init(spa->spa_root_vdev);
1825 }
1826
1827 /*
1828 * Get scan stats for zpool status reports
1829 */
1830 int
1831 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1832 {
1833 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1834
1835 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1836 return (SET_ERROR(ENOENT));
1837 bzero(ps, sizeof (pool_scan_stat_t));
1838
1839 /* data stored on disk */
1840 ps->pss_func = scn->scn_phys.scn_func;
1841 ps->pss_start_time = scn->scn_phys.scn_start_time;
1842 ps->pss_end_time = scn->scn_phys.scn_end_time;
1843 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1844 ps->pss_examined = scn->scn_phys.scn_examined;
1845 ps->pss_to_process = scn->scn_phys.scn_to_process;
1846 ps->pss_processed = scn->scn_phys.scn_processed;
1847 ps->pss_errors = scn->scn_phys.scn_errors;
1848 ps->pss_state = scn->scn_phys.scn_state;
1849
1850 /* data not stored on disk */
1851 ps->pss_pass_start = spa->spa_scan_pass_start;
1852 ps->pss_pass_exam = spa->spa_scan_pass_exam;
1853
1854 return (0);
1855 }
1856
1857 boolean_t
1858 spa_debug_enabled(spa_t *spa)
1859 {
1860 return (spa->spa_debug);
1861 }