1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 25 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 /* 29 * SPA: Storage Pool Allocator 30 * 31 * This file contains all the routines used when modifying on-disk SPA state. 32 * This includes opening, importing, destroying, exporting a pool, and syncing a 33 * pool. 34 */ 35 36 #include <sys/zfs_context.h> 37 #include <sys/fm/fs/zfs.h> 38 #include <sys/spa_impl.h> 39 #include <sys/zio.h> 40 #include <sys/zio_checksum.h> 41 #include <sys/dmu.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/zap.h> 44 #include <sys/zil.h> 45 #include <sys/ddt.h> 46 #include <sys/vdev_impl.h> 47 #include <sys/metaslab.h> 48 #include <sys/metaslab_impl.h> 49 #include <sys/uberblock_impl.h> 50 #include <sys/txg.h> 51 #include <sys/avl.h> 52 #include <sys/dmu_traverse.h> 53 #include <sys/dmu_objset.h> 54 #include <sys/unique.h> 55 #include <sys/dsl_pool.h> 56 #include <sys/dsl_dataset.h> 57 #include <sys/dsl_dir.h> 58 #include <sys/dsl_prop.h> 59 #include <sys/dsl_synctask.h> 60 #include <sys/fs/zfs.h> 61 #include <sys/arc.h> 62 #include <sys/callb.h> 63 #include <sys/systeminfo.h> 64 #include <sys/spa_boot.h> 65 #include <sys/zfs_ioctl.h> 66 #include <sys/dsl_scan.h> 67 #include <sys/zfeature.h> 68 #include <sys/dsl_destroy.h> 69 70 #ifdef _KERNEL 71 #include <sys/bootprops.h> 72 #include <sys/callb.h> 73 #include <sys/cpupart.h> 74 #include <sys/pool.h> 75 #include <sys/sysdc.h> 76 #include <sys/zone.h> 77 #endif /* _KERNEL */ 78 79 #include "zfs_prop.h" 80 #include "zfs_comutil.h" 81 82 /* 83 * The interval, in seconds, at which failed configuration cache file writes 84 * should be retried. 85 */ 86 static int zfs_ccw_retry_interval = 300; 87 88 typedef enum zti_modes { 89 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 90 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 91 ZTI_MODE_NULL, /* don't create a taskq */ 92 ZTI_NMODES 93 } zti_modes_t; 94 95 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 96 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 97 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 98 99 #define ZTI_N(n) ZTI_P(n, 1) 100 #define ZTI_ONE ZTI_N(1) 101 102 typedef struct zio_taskq_info { 103 zti_modes_t zti_mode; 104 uint_t zti_value; 105 uint_t zti_count; 106 } zio_taskq_info_t; 107 108 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 109 "issue", "issue_high", "intr", "intr_high" 110 }; 111 112 /* 113 * This table defines the taskq settings for each ZFS I/O type. When 114 * initializing a pool, we use this table to create an appropriately sized 115 * taskq. Some operations are low volume and therefore have a small, static 116 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 117 * macros. Other operations process a large amount of data; the ZTI_BATCH 118 * macro causes us to create a taskq oriented for throughput. Some operations 119 * are so high frequency and short-lived that the taskq itself can become a a 120 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 121 * additional degree of parallelism specified by the number of threads per- 122 * taskq and the number of taskqs; when dispatching an event in this case, the 123 * particular taskq is chosen at random. 124 * 125 * The different taskq priorities are to handle the different contexts (issue 126 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 127 * need to be handled with minimum delay. 128 */ 129 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 130 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 131 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 132 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 133 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 134 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 135 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 136 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 137 }; 138 139 static void spa_sync_version(void *arg, dmu_tx_t *tx); 140 static void spa_sync_props(void *arg, dmu_tx_t *tx); 141 static boolean_t spa_has_active_shared_spare(spa_t *spa); 142 static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 143 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 144 char **ereport); 145 static void spa_vdev_resilver_done(spa_t *spa); 146 147 uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 148 id_t zio_taskq_psrset_bind = PS_NONE; 149 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 150 uint_t zio_taskq_basedc = 80; /* base duty cycle */ 151 152 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 153 extern int zfs_sync_pass_deferred_free; 154 155 /* 156 * This (illegal) pool name is used when temporarily importing a spa_t in order 157 * to get the vdev stats associated with the imported devices. 158 */ 159 #define TRYIMPORT_NAME "$import" 160 161 /* 162 * ========================================================================== 163 * SPA properties routines 164 * ========================================================================== 165 */ 166 167 /* 168 * Add a (source=src, propname=propval) list to an nvlist. 169 */ 170 static void 171 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 172 uint64_t intval, zprop_source_t src) 173 { 174 const char *propname = zpool_prop_to_name(prop); 175 nvlist_t *propval; 176 177 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 178 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 179 180 if (strval != NULL) 181 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 182 else 183 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 184 185 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 186 nvlist_free(propval); 187 } 188 189 /* 190 * Get property values from the spa configuration. 191 */ 192 static void 193 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 194 { 195 vdev_t *rvd = spa->spa_root_vdev; 196 dsl_pool_t *pool = spa->spa_dsl_pool; 197 uint64_t size; 198 uint64_t alloc; 199 uint64_t space; 200 uint64_t cap, version; 201 zprop_source_t src = ZPROP_SRC_NONE; 202 spa_config_dirent_t *dp; 203 204 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 205 206 if (rvd != NULL) { 207 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 208 size = metaslab_class_get_space(spa_normal_class(spa)); 209 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 210 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 211 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 212 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 213 size - alloc, src); 214 215 space = 0; 216 for (int c = 0; c < rvd->vdev_children; c++) { 217 vdev_t *tvd = rvd->vdev_child[c]; 218 space += tvd->vdev_max_asize - tvd->vdev_asize; 219 } 220 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space, 221 src); 222 223 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 224 (spa_mode(spa) == FREAD), src); 225 226 cap = (size == 0) ? 0 : (alloc * 100 / size); 227 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 228 229 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 230 ddt_get_pool_dedup_ratio(spa), src); 231 232 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 233 rvd->vdev_state, src); 234 235 version = spa_version(spa); 236 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 237 src = ZPROP_SRC_DEFAULT; 238 else 239 src = ZPROP_SRC_LOCAL; 240 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 241 } 242 243 if (pool != NULL) { 244 /* 245 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 246 * when opening pools before this version freedir will be NULL. 247 */ 248 if (pool->dp_free_dir != NULL) { 249 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 250 pool->dp_free_dir->dd_phys->dd_used_bytes, src); 251 } else { 252 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 253 NULL, 0, src); 254 } 255 256 if (pool->dp_leak_dir != NULL) { 257 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 258 pool->dp_leak_dir->dd_phys->dd_used_bytes, src); 259 } else { 260 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 261 NULL, 0, src); 262 } 263 } 264 265 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 266 267 if (spa->spa_comment != NULL) { 268 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 269 0, ZPROP_SRC_LOCAL); 270 } 271 272 if (spa->spa_root != NULL) 273 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 274 0, ZPROP_SRC_LOCAL); 275 276 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 277 if (dp->scd_path == NULL) { 278 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 279 "none", 0, ZPROP_SRC_LOCAL); 280 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 281 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 282 dp->scd_path, 0, ZPROP_SRC_LOCAL); 283 } 284 } 285 } 286 287 /* 288 * Get zpool property values. 289 */ 290 int 291 spa_prop_get(spa_t *spa, nvlist_t **nvp) 292 { 293 objset_t *mos = spa->spa_meta_objset; 294 zap_cursor_t zc; 295 zap_attribute_t za; 296 int err; 297 298 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 299 300 mutex_enter(&spa->spa_props_lock); 301 302 /* 303 * Get properties from the spa config. 304 */ 305 spa_prop_get_config(spa, nvp); 306 307 /* If no pool property object, no more prop to get. */ 308 if (mos == NULL || spa->spa_pool_props_object == 0) { 309 mutex_exit(&spa->spa_props_lock); 310 return (0); 311 } 312 313 /* 314 * Get properties from the MOS pool property object. 315 */ 316 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 317 (err = zap_cursor_retrieve(&zc, &za)) == 0; 318 zap_cursor_advance(&zc)) { 319 uint64_t intval = 0; 320 char *strval = NULL; 321 zprop_source_t src = ZPROP_SRC_DEFAULT; 322 zpool_prop_t prop; 323 324 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 325 continue; 326 327 switch (za.za_integer_length) { 328 case 8: 329 /* integer property */ 330 if (za.za_first_integer != 331 zpool_prop_default_numeric(prop)) 332 src = ZPROP_SRC_LOCAL; 333 334 if (prop == ZPOOL_PROP_BOOTFS) { 335 dsl_pool_t *dp; 336 dsl_dataset_t *ds = NULL; 337 338 dp = spa_get_dsl(spa); 339 dsl_pool_config_enter(dp, FTAG); 340 if (err = dsl_dataset_hold_obj(dp, 341 za.za_first_integer, FTAG, &ds)) { 342 dsl_pool_config_exit(dp, FTAG); 343 break; 344 } 345 346 strval = kmem_alloc( 347 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 348 KM_SLEEP); 349 dsl_dataset_name(ds, strval); 350 dsl_dataset_rele(ds, FTAG); 351 dsl_pool_config_exit(dp, FTAG); 352 } else { 353 strval = NULL; 354 intval = za.za_first_integer; 355 } 356 357 spa_prop_add_list(*nvp, prop, strval, intval, src); 358 359 if (strval != NULL) 360 kmem_free(strval, 361 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 362 363 break; 364 365 case 1: 366 /* string property */ 367 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 368 err = zap_lookup(mos, spa->spa_pool_props_object, 369 za.za_name, 1, za.za_num_integers, strval); 370 if (err) { 371 kmem_free(strval, za.za_num_integers); 372 break; 373 } 374 spa_prop_add_list(*nvp, prop, strval, 0, src); 375 kmem_free(strval, za.za_num_integers); 376 break; 377 378 default: 379 break; 380 } 381 } 382 zap_cursor_fini(&zc); 383 mutex_exit(&spa->spa_props_lock); 384 out: 385 if (err && err != ENOENT) { 386 nvlist_free(*nvp); 387 *nvp = NULL; 388 return (err); 389 } 390 391 return (0); 392 } 393 394 /* 395 * Validate the given pool properties nvlist and modify the list 396 * for the property values to be set. 397 */ 398 static int 399 spa_prop_validate(spa_t *spa, nvlist_t *props) 400 { 401 nvpair_t *elem; 402 int error = 0, reset_bootfs = 0; 403 uint64_t objnum = 0; 404 boolean_t has_feature = B_FALSE; 405 406 elem = NULL; 407 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 408 uint64_t intval; 409 char *strval, *slash, *check, *fname; 410 const char *propname = nvpair_name(elem); 411 zpool_prop_t prop = zpool_name_to_prop(propname); 412 413 switch (prop) { 414 case ZPROP_INVAL: 415 if (!zpool_prop_feature(propname)) { 416 error = SET_ERROR(EINVAL); 417 break; 418 } 419 420 /* 421 * Sanitize the input. 422 */ 423 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 424 error = SET_ERROR(EINVAL); 425 break; 426 } 427 428 if (nvpair_value_uint64(elem, &intval) != 0) { 429 error = SET_ERROR(EINVAL); 430 break; 431 } 432 433 if (intval != 0) { 434 error = SET_ERROR(EINVAL); 435 break; 436 } 437 438 fname = strchr(propname, '@') + 1; 439 if (zfeature_lookup_name(fname, NULL) != 0) { 440 error = SET_ERROR(EINVAL); 441 break; 442 } 443 444 has_feature = B_TRUE; 445 break; 446 447 case ZPOOL_PROP_VERSION: 448 error = nvpair_value_uint64(elem, &intval); 449 if (!error && 450 (intval < spa_version(spa) || 451 intval > SPA_VERSION_BEFORE_FEATURES || 452 has_feature)) 453 error = SET_ERROR(EINVAL); 454 break; 455 456 case ZPOOL_PROP_DELEGATION: 457 case ZPOOL_PROP_AUTOREPLACE: 458 case ZPOOL_PROP_LISTSNAPS: 459 case ZPOOL_PROP_AUTOEXPAND: 460 error = nvpair_value_uint64(elem, &intval); 461 if (!error && intval > 1) 462 error = SET_ERROR(EINVAL); 463 break; 464 465 case ZPOOL_PROP_BOOTFS: 466 /* 467 * If the pool version is less than SPA_VERSION_BOOTFS, 468 * or the pool is still being created (version == 0), 469 * the bootfs property cannot be set. 470 */ 471 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 472 error = SET_ERROR(ENOTSUP); 473 break; 474 } 475 476 /* 477 * Make sure the vdev config is bootable 478 */ 479 if (!vdev_is_bootable(spa->spa_root_vdev)) { 480 error = SET_ERROR(ENOTSUP); 481 break; 482 } 483 484 reset_bootfs = 1; 485 486 error = nvpair_value_string(elem, &strval); 487 488 if (!error) { 489 objset_t *os; 490 uint64_t compress; 491 492 if (strval == NULL || strval[0] == '\0') { 493 objnum = zpool_prop_default_numeric( 494 ZPOOL_PROP_BOOTFS); 495 break; 496 } 497 498 if (error = dmu_objset_hold(strval, FTAG, &os)) 499 break; 500 501 /* Must be ZPL and not gzip compressed. */ 502 503 if (dmu_objset_type(os) != DMU_OST_ZFS) { 504 error = SET_ERROR(ENOTSUP); 505 } else if ((error = 506 dsl_prop_get_int_ds(dmu_objset_ds(os), 507 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 508 &compress)) == 0 && 509 !BOOTFS_COMPRESS_VALID(compress)) { 510 error = SET_ERROR(ENOTSUP); 511 } else { 512 objnum = dmu_objset_id(os); 513 } 514 dmu_objset_rele(os, FTAG); 515 } 516 break; 517 518 case ZPOOL_PROP_FAILUREMODE: 519 error = nvpair_value_uint64(elem, &intval); 520 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 521 intval > ZIO_FAILURE_MODE_PANIC)) 522 error = SET_ERROR(EINVAL); 523 524 /* 525 * This is a special case which only occurs when 526 * the pool has completely failed. This allows 527 * the user to change the in-core failmode property 528 * without syncing it out to disk (I/Os might 529 * currently be blocked). We do this by returning 530 * EIO to the caller (spa_prop_set) to trick it 531 * into thinking we encountered a property validation 532 * error. 533 */ 534 if (!error && spa_suspended(spa)) { 535 spa->spa_failmode = intval; 536 error = SET_ERROR(EIO); 537 } 538 break; 539 540 case ZPOOL_PROP_CACHEFILE: 541 if ((error = nvpair_value_string(elem, &strval)) != 0) 542 break; 543 544 if (strval[0] == '\0') 545 break; 546 547 if (strcmp(strval, "none") == 0) 548 break; 549 550 if (strval[0] != '/') { 551 error = SET_ERROR(EINVAL); 552 break; 553 } 554 555 slash = strrchr(strval, '/'); 556 ASSERT(slash != NULL); 557 558 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 559 strcmp(slash, "/..") == 0) 560 error = SET_ERROR(EINVAL); 561 break; 562 563 case ZPOOL_PROP_COMMENT: 564 if ((error = nvpair_value_string(elem, &strval)) != 0) 565 break; 566 for (check = strval; *check != '\0'; check++) { 567 /* 568 * The kernel doesn't have an easy isprint() 569 * check. For this kernel check, we merely 570 * check ASCII apart from DEL. Fix this if 571 * there is an easy-to-use kernel isprint(). 572 */ 573 if (*check >= 0x7f) { 574 error = SET_ERROR(EINVAL); 575 break; 576 } 577 check++; 578 } 579 if (strlen(strval) > ZPROP_MAX_COMMENT) 580 error = E2BIG; 581 break; 582 583 case ZPOOL_PROP_DEDUPDITTO: 584 if (spa_version(spa) < SPA_VERSION_DEDUP) 585 error = SET_ERROR(ENOTSUP); 586 else 587 error = nvpair_value_uint64(elem, &intval); 588 if (error == 0 && 589 intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 590 error = SET_ERROR(EINVAL); 591 break; 592 } 593 594 if (error) 595 break; 596 } 597 598 if (!error && reset_bootfs) { 599 error = nvlist_remove(props, 600 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 601 602 if (!error) { 603 error = nvlist_add_uint64(props, 604 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 605 } 606 } 607 608 return (error); 609 } 610 611 void 612 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 613 { 614 char *cachefile; 615 spa_config_dirent_t *dp; 616 617 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 618 &cachefile) != 0) 619 return; 620 621 dp = kmem_alloc(sizeof (spa_config_dirent_t), 622 KM_SLEEP); 623 624 if (cachefile[0] == '\0') 625 dp->scd_path = spa_strdup(spa_config_path); 626 else if (strcmp(cachefile, "none") == 0) 627 dp->scd_path = NULL; 628 else 629 dp->scd_path = spa_strdup(cachefile); 630 631 list_insert_head(&spa->spa_config_list, dp); 632 if (need_sync) 633 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 634 } 635 636 int 637 spa_prop_set(spa_t *spa, nvlist_t *nvp) 638 { 639 int error; 640 nvpair_t *elem = NULL; 641 boolean_t need_sync = B_FALSE; 642 643 if ((error = spa_prop_validate(spa, nvp)) != 0) 644 return (error); 645 646 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 647 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 648 649 if (prop == ZPOOL_PROP_CACHEFILE || 650 prop == ZPOOL_PROP_ALTROOT || 651 prop == ZPOOL_PROP_READONLY) 652 continue; 653 654 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 655 uint64_t ver; 656 657 if (prop == ZPOOL_PROP_VERSION) { 658 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 659 } else { 660 ASSERT(zpool_prop_feature(nvpair_name(elem))); 661 ver = SPA_VERSION_FEATURES; 662 need_sync = B_TRUE; 663 } 664 665 /* Save time if the version is already set. */ 666 if (ver == spa_version(spa)) 667 continue; 668 669 /* 670 * In addition to the pool directory object, we might 671 * create the pool properties object, the features for 672 * read object, the features for write object, or the 673 * feature descriptions object. 674 */ 675 error = dsl_sync_task(spa->spa_name, NULL, 676 spa_sync_version, &ver, 6); 677 if (error) 678 return (error); 679 continue; 680 } 681 682 need_sync = B_TRUE; 683 break; 684 } 685 686 if (need_sync) { 687 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 688 nvp, 6)); 689 } 690 691 return (0); 692 } 693 694 /* 695 * If the bootfs property value is dsobj, clear it. 696 */ 697 void 698 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 699 { 700 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 701 VERIFY(zap_remove(spa->spa_meta_objset, 702 spa->spa_pool_props_object, 703 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 704 spa->spa_bootfs = 0; 705 } 706 } 707 708 /*ARGSUSED*/ 709 static int 710 spa_change_guid_check(void *arg, dmu_tx_t *tx) 711 { 712 uint64_t *newguid = arg; 713 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 714 vdev_t *rvd = spa->spa_root_vdev; 715 uint64_t vdev_state; 716 717 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 718 vdev_state = rvd->vdev_state; 719 spa_config_exit(spa, SCL_STATE, FTAG); 720 721 if (vdev_state != VDEV_STATE_HEALTHY) 722 return (SET_ERROR(ENXIO)); 723 724 ASSERT3U(spa_guid(spa), !=, *newguid); 725 726 return (0); 727 } 728 729 static void 730 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 731 { 732 uint64_t *newguid = arg; 733 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 734 uint64_t oldguid; 735 vdev_t *rvd = spa->spa_root_vdev; 736 737 oldguid = spa_guid(spa); 738 739 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 740 rvd->vdev_guid = *newguid; 741 rvd->vdev_guid_sum += (*newguid - oldguid); 742 vdev_config_dirty(rvd); 743 spa_config_exit(spa, SCL_STATE, FTAG); 744 745 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 746 oldguid, *newguid); 747 } 748 749 /* 750 * Change the GUID for the pool. This is done so that we can later 751 * re-import a pool built from a clone of our own vdevs. We will modify 752 * the root vdev's guid, our own pool guid, and then mark all of our 753 * vdevs dirty. Note that we must make sure that all our vdevs are 754 * online when we do this, or else any vdevs that weren't present 755 * would be orphaned from our pool. We are also going to issue a 756 * sysevent to update any watchers. 757 */ 758 int 759 spa_change_guid(spa_t *spa) 760 { 761 int error; 762 uint64_t guid; 763 764 mutex_enter(&spa->spa_vdev_top_lock); 765 mutex_enter(&spa_namespace_lock); 766 guid = spa_generate_guid(NULL); 767 768 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 769 spa_change_guid_sync, &guid, 5); 770 771 if (error == 0) { 772 spa_config_sync(spa, B_FALSE, B_TRUE); 773 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 774 } 775 776 mutex_exit(&spa_namespace_lock); 777 mutex_exit(&spa->spa_vdev_top_lock); 778 779 return (error); 780 } 781 782 /* 783 * ========================================================================== 784 * SPA state manipulation (open/create/destroy/import/export) 785 * ========================================================================== 786 */ 787 788 static int 789 spa_error_entry_compare(const void *a, const void *b) 790 { 791 spa_error_entry_t *sa = (spa_error_entry_t *)a; 792 spa_error_entry_t *sb = (spa_error_entry_t *)b; 793 int ret; 794 795 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 796 sizeof (zbookmark_t)); 797 798 if (ret < 0) 799 return (-1); 800 else if (ret > 0) 801 return (1); 802 else 803 return (0); 804 } 805 806 /* 807 * Utility function which retrieves copies of the current logs and 808 * re-initializes them in the process. 809 */ 810 void 811 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 812 { 813 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 814 815 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 816 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 817 818 avl_create(&spa->spa_errlist_scrub, 819 spa_error_entry_compare, sizeof (spa_error_entry_t), 820 offsetof(spa_error_entry_t, se_avl)); 821 avl_create(&spa->spa_errlist_last, 822 spa_error_entry_compare, sizeof (spa_error_entry_t), 823 offsetof(spa_error_entry_t, se_avl)); 824 } 825 826 static void 827 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 828 { 829 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 830 enum zti_modes mode = ztip->zti_mode; 831 uint_t value = ztip->zti_value; 832 uint_t count = ztip->zti_count; 833 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 834 char name[32]; 835 uint_t flags = 0; 836 boolean_t batch = B_FALSE; 837 838 if (mode == ZTI_MODE_NULL) { 839 tqs->stqs_count = 0; 840 tqs->stqs_taskq = NULL; 841 return; 842 } 843 844 ASSERT3U(count, >, 0); 845 846 tqs->stqs_count = count; 847 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 848 849 switch (mode) { 850 case ZTI_MODE_FIXED: 851 ASSERT3U(value, >=, 1); 852 value = MAX(value, 1); 853 break; 854 855 case ZTI_MODE_BATCH: 856 batch = B_TRUE; 857 flags |= TASKQ_THREADS_CPU_PCT; 858 value = zio_taskq_batch_pct; 859 break; 860 861 default: 862 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 863 "spa_activate()", 864 zio_type_name[t], zio_taskq_types[q], mode, value); 865 break; 866 } 867 868 for (uint_t i = 0; i < count; i++) { 869 taskq_t *tq; 870 871 if (count > 1) { 872 (void) snprintf(name, sizeof (name), "%s_%s_%u", 873 zio_type_name[t], zio_taskq_types[q], i); 874 } else { 875 (void) snprintf(name, sizeof (name), "%s_%s", 876 zio_type_name[t], zio_taskq_types[q]); 877 } 878 879 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 880 if (batch) 881 flags |= TASKQ_DC_BATCH; 882 883 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 884 spa->spa_proc, zio_taskq_basedc, flags); 885 } else { 886 pri_t pri = maxclsyspri; 887 /* 888 * The write issue taskq can be extremely CPU 889 * intensive. Run it at slightly lower priority 890 * than the other taskqs. 891 */ 892 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 893 pri--; 894 895 tq = taskq_create_proc(name, value, pri, 50, 896 INT_MAX, spa->spa_proc, flags); 897 } 898 899 tqs->stqs_taskq[i] = tq; 900 } 901 } 902 903 static void 904 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 905 { 906 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 907 908 if (tqs->stqs_taskq == NULL) { 909 ASSERT0(tqs->stqs_count); 910 return; 911 } 912 913 for (uint_t i = 0; i < tqs->stqs_count; i++) { 914 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 915 taskq_destroy(tqs->stqs_taskq[i]); 916 } 917 918 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 919 tqs->stqs_taskq = NULL; 920 } 921 922 /* 923 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 924 * Note that a type may have multiple discrete taskqs to avoid lock contention 925 * on the taskq itself. In that case we choose which taskq at random by using 926 * the low bits of gethrtime(). 927 */ 928 void 929 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 930 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 931 { 932 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 933 taskq_t *tq; 934 935 ASSERT3P(tqs->stqs_taskq, !=, NULL); 936 ASSERT3U(tqs->stqs_count, !=, 0); 937 938 if (tqs->stqs_count == 1) { 939 tq = tqs->stqs_taskq[0]; 940 } else { 941 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 942 } 943 944 taskq_dispatch_ent(tq, func, arg, flags, ent); 945 } 946 947 static void 948 spa_create_zio_taskqs(spa_t *spa) 949 { 950 for (int t = 0; t < ZIO_TYPES; t++) { 951 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 952 spa_taskqs_init(spa, t, q); 953 } 954 } 955 } 956 957 #ifdef _KERNEL 958 static void 959 spa_thread(void *arg) 960 { 961 callb_cpr_t cprinfo; 962 963 spa_t *spa = arg; 964 user_t *pu = PTOU(curproc); 965 966 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 967 spa->spa_name); 968 969 ASSERT(curproc != &p0); 970 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 971 "zpool-%s", spa->spa_name); 972 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 973 974 /* bind this thread to the requested psrset */ 975 if (zio_taskq_psrset_bind != PS_NONE) { 976 pool_lock(); 977 mutex_enter(&cpu_lock); 978 mutex_enter(&pidlock); 979 mutex_enter(&curproc->p_lock); 980 981 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 982 0, NULL, NULL) == 0) { 983 curthread->t_bind_pset = zio_taskq_psrset_bind; 984 } else { 985 cmn_err(CE_WARN, 986 "Couldn't bind process for zfs pool \"%s\" to " 987 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 988 } 989 990 mutex_exit(&curproc->p_lock); 991 mutex_exit(&pidlock); 992 mutex_exit(&cpu_lock); 993 pool_unlock(); 994 } 995 996 if (zio_taskq_sysdc) { 997 sysdc_thread_enter(curthread, 100, 0); 998 } 999 1000 spa->spa_proc = curproc; 1001 spa->spa_did = curthread->t_did; 1002 1003 spa_create_zio_taskqs(spa); 1004 1005 mutex_enter(&spa->spa_proc_lock); 1006 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1007 1008 spa->spa_proc_state = SPA_PROC_ACTIVE; 1009 cv_broadcast(&spa->spa_proc_cv); 1010 1011 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1012 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1013 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1014 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1015 1016 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1017 spa->spa_proc_state = SPA_PROC_GONE; 1018 spa->spa_proc = &p0; 1019 cv_broadcast(&spa->spa_proc_cv); 1020 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1021 1022 mutex_enter(&curproc->p_lock); 1023 lwp_exit(); 1024 } 1025 #endif 1026 1027 /* 1028 * Activate an uninitialized pool. 1029 */ 1030 static void 1031 spa_activate(spa_t *spa, int mode) 1032 { 1033 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1034 1035 spa->spa_state = POOL_STATE_ACTIVE; 1036 spa->spa_mode = mode; 1037 1038 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1039 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1040 1041 /* Try to create a covering process */ 1042 mutex_enter(&spa->spa_proc_lock); 1043 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1044 ASSERT(spa->spa_proc == &p0); 1045 spa->spa_did = 0; 1046 1047 /* Only create a process if we're going to be around a while. */ 1048 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1049 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1050 NULL, 0) == 0) { 1051 spa->spa_proc_state = SPA_PROC_CREATED; 1052 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1053 cv_wait(&spa->spa_proc_cv, 1054 &spa->spa_proc_lock); 1055 } 1056 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1057 ASSERT(spa->spa_proc != &p0); 1058 ASSERT(spa->spa_did != 0); 1059 } else { 1060 #ifdef _KERNEL 1061 cmn_err(CE_WARN, 1062 "Couldn't create process for zfs pool \"%s\"\n", 1063 spa->spa_name); 1064 #endif 1065 } 1066 } 1067 mutex_exit(&spa->spa_proc_lock); 1068 1069 /* If we didn't create a process, we need to create our taskqs. */ 1070 if (spa->spa_proc == &p0) { 1071 spa_create_zio_taskqs(spa); 1072 } 1073 1074 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1075 offsetof(vdev_t, vdev_config_dirty_node)); 1076 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1077 offsetof(vdev_t, vdev_state_dirty_node)); 1078 1079 txg_list_create(&spa->spa_vdev_txg_list, 1080 offsetof(struct vdev, vdev_txg_node)); 1081 1082 avl_create(&spa->spa_errlist_scrub, 1083 spa_error_entry_compare, sizeof (spa_error_entry_t), 1084 offsetof(spa_error_entry_t, se_avl)); 1085 avl_create(&spa->spa_errlist_last, 1086 spa_error_entry_compare, sizeof (spa_error_entry_t), 1087 offsetof(spa_error_entry_t, se_avl)); 1088 } 1089 1090 /* 1091 * Opposite of spa_activate(). 1092 */ 1093 static void 1094 spa_deactivate(spa_t *spa) 1095 { 1096 ASSERT(spa->spa_sync_on == B_FALSE); 1097 ASSERT(spa->spa_dsl_pool == NULL); 1098 ASSERT(spa->spa_root_vdev == NULL); 1099 ASSERT(spa->spa_async_zio_root == NULL); 1100 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1101 1102 txg_list_destroy(&spa->spa_vdev_txg_list); 1103 1104 list_destroy(&spa->spa_config_dirty_list); 1105 list_destroy(&spa->spa_state_dirty_list); 1106 1107 for (int t = 0; t < ZIO_TYPES; t++) { 1108 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1109 spa_taskqs_fini(spa, t, q); 1110 } 1111 } 1112 1113 metaslab_class_destroy(spa->spa_normal_class); 1114 spa->spa_normal_class = NULL; 1115 1116 metaslab_class_destroy(spa->spa_log_class); 1117 spa->spa_log_class = NULL; 1118 1119 /* 1120 * If this was part of an import or the open otherwise failed, we may 1121 * still have errors left in the queues. Empty them just in case. 1122 */ 1123 spa_errlog_drain(spa); 1124 1125 avl_destroy(&spa->spa_errlist_scrub); 1126 avl_destroy(&spa->spa_errlist_last); 1127 1128 spa->spa_state = POOL_STATE_UNINITIALIZED; 1129 1130 mutex_enter(&spa->spa_proc_lock); 1131 if (spa->spa_proc_state != SPA_PROC_NONE) { 1132 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1133 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1134 cv_broadcast(&spa->spa_proc_cv); 1135 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1136 ASSERT(spa->spa_proc != &p0); 1137 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1138 } 1139 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1140 spa->spa_proc_state = SPA_PROC_NONE; 1141 } 1142 ASSERT(spa->spa_proc == &p0); 1143 mutex_exit(&spa->spa_proc_lock); 1144 1145 /* 1146 * We want to make sure spa_thread() has actually exited the ZFS 1147 * module, so that the module can't be unloaded out from underneath 1148 * it. 1149 */ 1150 if (spa->spa_did != 0) { 1151 thread_join(spa->spa_did); 1152 spa->spa_did = 0; 1153 } 1154 } 1155 1156 /* 1157 * Verify a pool configuration, and construct the vdev tree appropriately. This 1158 * will create all the necessary vdevs in the appropriate layout, with each vdev 1159 * in the CLOSED state. This will prep the pool before open/creation/import. 1160 * All vdev validation is done by the vdev_alloc() routine. 1161 */ 1162 static int 1163 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1164 uint_t id, int atype) 1165 { 1166 nvlist_t **child; 1167 uint_t children; 1168 int error; 1169 1170 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1171 return (error); 1172 1173 if ((*vdp)->vdev_ops->vdev_op_leaf) 1174 return (0); 1175 1176 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1177 &child, &children); 1178 1179 if (error == ENOENT) 1180 return (0); 1181 1182 if (error) { 1183 vdev_free(*vdp); 1184 *vdp = NULL; 1185 return (SET_ERROR(EINVAL)); 1186 } 1187 1188 for (int c = 0; c < children; c++) { 1189 vdev_t *vd; 1190 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1191 atype)) != 0) { 1192 vdev_free(*vdp); 1193 *vdp = NULL; 1194 return (error); 1195 } 1196 } 1197 1198 ASSERT(*vdp != NULL); 1199 1200 return (0); 1201 } 1202 1203 /* 1204 * Opposite of spa_load(). 1205 */ 1206 static void 1207 spa_unload(spa_t *spa) 1208 { 1209 int i; 1210 1211 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1212 1213 /* 1214 * Stop async tasks. 1215 */ 1216 spa_async_suspend(spa); 1217 1218 /* 1219 * Stop syncing. 1220 */ 1221 if (spa->spa_sync_on) { 1222 txg_sync_stop(spa->spa_dsl_pool); 1223 spa->spa_sync_on = B_FALSE; 1224 } 1225 1226 /* 1227 * Wait for any outstanding async I/O to complete. 1228 */ 1229 if (spa->spa_async_zio_root != NULL) { 1230 (void) zio_wait(spa->spa_async_zio_root); 1231 spa->spa_async_zio_root = NULL; 1232 } 1233 1234 bpobj_close(&spa->spa_deferred_bpobj); 1235 1236 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1237 1238 /* 1239 * Close all vdevs. 1240 */ 1241 if (spa->spa_root_vdev) 1242 vdev_free(spa->spa_root_vdev); 1243 ASSERT(spa->spa_root_vdev == NULL); 1244 1245 /* 1246 * Close the dsl pool. 1247 */ 1248 if (spa->spa_dsl_pool) { 1249 dsl_pool_close(spa->spa_dsl_pool); 1250 spa->spa_dsl_pool = NULL; 1251 spa->spa_meta_objset = NULL; 1252 } 1253 1254 ddt_unload(spa); 1255 1256 1257 /* 1258 * Drop and purge level 2 cache 1259 */ 1260 spa_l2cache_drop(spa); 1261 1262 for (i = 0; i < spa->spa_spares.sav_count; i++) 1263 vdev_free(spa->spa_spares.sav_vdevs[i]); 1264 if (spa->spa_spares.sav_vdevs) { 1265 kmem_free(spa->spa_spares.sav_vdevs, 1266 spa->spa_spares.sav_count * sizeof (void *)); 1267 spa->spa_spares.sav_vdevs = NULL; 1268 } 1269 if (spa->spa_spares.sav_config) { 1270 nvlist_free(spa->spa_spares.sav_config); 1271 spa->spa_spares.sav_config = NULL; 1272 } 1273 spa->spa_spares.sav_count = 0; 1274 1275 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1276 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1277 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1278 } 1279 if (spa->spa_l2cache.sav_vdevs) { 1280 kmem_free(spa->spa_l2cache.sav_vdevs, 1281 spa->spa_l2cache.sav_count * sizeof (void *)); 1282 spa->spa_l2cache.sav_vdevs = NULL; 1283 } 1284 if (spa->spa_l2cache.sav_config) { 1285 nvlist_free(spa->spa_l2cache.sav_config); 1286 spa->spa_l2cache.sav_config = NULL; 1287 } 1288 spa->spa_l2cache.sav_count = 0; 1289 1290 spa->spa_async_suspended = 0; 1291 1292 if (spa->spa_comment != NULL) { 1293 spa_strfree(spa->spa_comment); 1294 spa->spa_comment = NULL; 1295 } 1296 1297 spa_config_exit(spa, SCL_ALL, FTAG); 1298 } 1299 1300 /* 1301 * Load (or re-load) the current list of vdevs describing the active spares for 1302 * this pool. When this is called, we have some form of basic information in 1303 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1304 * then re-generate a more complete list including status information. 1305 */ 1306 static void 1307 spa_load_spares(spa_t *spa) 1308 { 1309 nvlist_t **spares; 1310 uint_t nspares; 1311 int i; 1312 vdev_t *vd, *tvd; 1313 1314 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1315 1316 /* 1317 * First, close and free any existing spare vdevs. 1318 */ 1319 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1320 vd = spa->spa_spares.sav_vdevs[i]; 1321 1322 /* Undo the call to spa_activate() below */ 1323 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1324 B_FALSE)) != NULL && tvd->vdev_isspare) 1325 spa_spare_remove(tvd); 1326 vdev_close(vd); 1327 vdev_free(vd); 1328 } 1329 1330 if (spa->spa_spares.sav_vdevs) 1331 kmem_free(spa->spa_spares.sav_vdevs, 1332 spa->spa_spares.sav_count * sizeof (void *)); 1333 1334 if (spa->spa_spares.sav_config == NULL) 1335 nspares = 0; 1336 else 1337 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1338 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1339 1340 spa->spa_spares.sav_count = (int)nspares; 1341 spa->spa_spares.sav_vdevs = NULL; 1342 1343 if (nspares == 0) 1344 return; 1345 1346 /* 1347 * Construct the array of vdevs, opening them to get status in the 1348 * process. For each spare, there is potentially two different vdev_t 1349 * structures associated with it: one in the list of spares (used only 1350 * for basic validation purposes) and one in the active vdev 1351 * configuration (if it's spared in). During this phase we open and 1352 * validate each vdev on the spare list. If the vdev also exists in the 1353 * active configuration, then we also mark this vdev as an active spare. 1354 */ 1355 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1356 KM_SLEEP); 1357 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1358 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1359 VDEV_ALLOC_SPARE) == 0); 1360 ASSERT(vd != NULL); 1361 1362 spa->spa_spares.sav_vdevs[i] = vd; 1363 1364 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1365 B_FALSE)) != NULL) { 1366 if (!tvd->vdev_isspare) 1367 spa_spare_add(tvd); 1368 1369 /* 1370 * We only mark the spare active if we were successfully 1371 * able to load the vdev. Otherwise, importing a pool 1372 * with a bad active spare would result in strange 1373 * behavior, because multiple pool would think the spare 1374 * is actively in use. 1375 * 1376 * There is a vulnerability here to an equally bizarre 1377 * circumstance, where a dead active spare is later 1378 * brought back to life (onlined or otherwise). Given 1379 * the rarity of this scenario, and the extra complexity 1380 * it adds, we ignore the possibility. 1381 */ 1382 if (!vdev_is_dead(tvd)) 1383 spa_spare_activate(tvd); 1384 } 1385 1386 vd->vdev_top = vd; 1387 vd->vdev_aux = &spa->spa_spares; 1388 1389 if (vdev_open(vd) != 0) 1390 continue; 1391 1392 if (vdev_validate_aux(vd) == 0) 1393 spa_spare_add(vd); 1394 } 1395 1396 /* 1397 * Recompute the stashed list of spares, with status information 1398 * this time. 1399 */ 1400 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1401 DATA_TYPE_NVLIST_ARRAY) == 0); 1402 1403 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1404 KM_SLEEP); 1405 for (i = 0; i < spa->spa_spares.sav_count; i++) 1406 spares[i] = vdev_config_generate(spa, 1407 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1408 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1409 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1410 for (i = 0; i < spa->spa_spares.sav_count; i++) 1411 nvlist_free(spares[i]); 1412 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1413 } 1414 1415 /* 1416 * Load (or re-load) the current list of vdevs describing the active l2cache for 1417 * this pool. When this is called, we have some form of basic information in 1418 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1419 * then re-generate a more complete list including status information. 1420 * Devices which are already active have their details maintained, and are 1421 * not re-opened. 1422 */ 1423 static void 1424 spa_load_l2cache(spa_t *spa) 1425 { 1426 nvlist_t **l2cache; 1427 uint_t nl2cache; 1428 int i, j, oldnvdevs; 1429 uint64_t guid; 1430 vdev_t *vd, **oldvdevs, **newvdevs; 1431 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1432 1433 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1434 1435 if (sav->sav_config != NULL) { 1436 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1437 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1438 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1439 } else { 1440 nl2cache = 0; 1441 newvdevs = NULL; 1442 } 1443 1444 oldvdevs = sav->sav_vdevs; 1445 oldnvdevs = sav->sav_count; 1446 sav->sav_vdevs = NULL; 1447 sav->sav_count = 0; 1448 1449 /* 1450 * Process new nvlist of vdevs. 1451 */ 1452 for (i = 0; i < nl2cache; i++) { 1453 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1454 &guid) == 0); 1455 1456 newvdevs[i] = NULL; 1457 for (j = 0; j < oldnvdevs; j++) { 1458 vd = oldvdevs[j]; 1459 if (vd != NULL && guid == vd->vdev_guid) { 1460 /* 1461 * Retain previous vdev for add/remove ops. 1462 */ 1463 newvdevs[i] = vd; 1464 oldvdevs[j] = NULL; 1465 break; 1466 } 1467 } 1468 1469 if (newvdevs[i] == NULL) { 1470 /* 1471 * Create new vdev 1472 */ 1473 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1474 VDEV_ALLOC_L2CACHE) == 0); 1475 ASSERT(vd != NULL); 1476 newvdevs[i] = vd; 1477 1478 /* 1479 * Commit this vdev as an l2cache device, 1480 * even if it fails to open. 1481 */ 1482 spa_l2cache_add(vd); 1483 1484 vd->vdev_top = vd; 1485 vd->vdev_aux = sav; 1486 1487 spa_l2cache_activate(vd); 1488 1489 if (vdev_open(vd) != 0) 1490 continue; 1491 1492 (void) vdev_validate_aux(vd); 1493 1494 if (!vdev_is_dead(vd)) 1495 l2arc_add_vdev(spa, vd); 1496 } 1497 } 1498 1499 /* 1500 * Purge vdevs that were dropped 1501 */ 1502 for (i = 0; i < oldnvdevs; i++) { 1503 uint64_t pool; 1504 1505 vd = oldvdevs[i]; 1506 if (vd != NULL) { 1507 ASSERT(vd->vdev_isl2cache); 1508 1509 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1510 pool != 0ULL && l2arc_vdev_present(vd)) 1511 l2arc_remove_vdev(vd); 1512 vdev_clear_stats(vd); 1513 vdev_free(vd); 1514 } 1515 } 1516 1517 if (oldvdevs) 1518 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1519 1520 if (sav->sav_config == NULL) 1521 goto out; 1522 1523 sav->sav_vdevs = newvdevs; 1524 sav->sav_count = (int)nl2cache; 1525 1526 /* 1527 * Recompute the stashed list of l2cache devices, with status 1528 * information this time. 1529 */ 1530 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1531 DATA_TYPE_NVLIST_ARRAY) == 0); 1532 1533 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1534 for (i = 0; i < sav->sav_count; i++) 1535 l2cache[i] = vdev_config_generate(spa, 1536 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1537 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1538 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1539 out: 1540 for (i = 0; i < sav->sav_count; i++) 1541 nvlist_free(l2cache[i]); 1542 if (sav->sav_count) 1543 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1544 } 1545 1546 static int 1547 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1548 { 1549 dmu_buf_t *db; 1550 char *packed = NULL; 1551 size_t nvsize = 0; 1552 int error; 1553 *value = NULL; 1554 1555 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 1556 nvsize = *(uint64_t *)db->db_data; 1557 dmu_buf_rele(db, FTAG); 1558 1559 packed = kmem_alloc(nvsize, KM_SLEEP); 1560 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1561 DMU_READ_PREFETCH); 1562 if (error == 0) 1563 error = nvlist_unpack(packed, nvsize, value, 0); 1564 kmem_free(packed, nvsize); 1565 1566 return (error); 1567 } 1568 1569 /* 1570 * Checks to see if the given vdev could not be opened, in which case we post a 1571 * sysevent to notify the autoreplace code that the device has been removed. 1572 */ 1573 static void 1574 spa_check_removed(vdev_t *vd) 1575 { 1576 for (int c = 0; c < vd->vdev_children; c++) 1577 spa_check_removed(vd->vdev_child[c]); 1578 1579 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1580 !vd->vdev_ishole) { 1581 zfs_post_autoreplace(vd->vdev_spa, vd); 1582 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1583 } 1584 } 1585 1586 /* 1587 * Validate the current config against the MOS config 1588 */ 1589 static boolean_t 1590 spa_config_valid(spa_t *spa, nvlist_t *config) 1591 { 1592 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 1593 nvlist_t *nv; 1594 1595 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 1596 1597 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1598 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 1599 1600 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1601 1602 /* 1603 * If we're doing a normal import, then build up any additional 1604 * diagnostic information about missing devices in this config. 1605 * We'll pass this up to the user for further processing. 1606 */ 1607 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1608 nvlist_t **child, *nv; 1609 uint64_t idx = 0; 1610 1611 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1612 KM_SLEEP); 1613 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1614 1615 for (int c = 0; c < rvd->vdev_children; c++) { 1616 vdev_t *tvd = rvd->vdev_child[c]; 1617 vdev_t *mtvd = mrvd->vdev_child[c]; 1618 1619 if (tvd->vdev_ops == &vdev_missing_ops && 1620 mtvd->vdev_ops != &vdev_missing_ops && 1621 mtvd->vdev_islog) 1622 child[idx++] = vdev_config_generate(spa, mtvd, 1623 B_FALSE, 0); 1624 } 1625 1626 if (idx) { 1627 VERIFY(nvlist_add_nvlist_array(nv, 1628 ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 1629 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 1630 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 1631 1632 for (int i = 0; i < idx; i++) 1633 nvlist_free(child[i]); 1634 } 1635 nvlist_free(nv); 1636 kmem_free(child, rvd->vdev_children * sizeof (char **)); 1637 } 1638 1639 /* 1640 * Compare the root vdev tree with the information we have 1641 * from the MOS config (mrvd). Check each top-level vdev 1642 * with the corresponding MOS config top-level (mtvd). 1643 */ 1644 for (int c = 0; c < rvd->vdev_children; c++) { 1645 vdev_t *tvd = rvd->vdev_child[c]; 1646 vdev_t *mtvd = mrvd->vdev_child[c]; 1647 1648 /* 1649 * Resolve any "missing" vdevs in the current configuration. 1650 * If we find that the MOS config has more accurate information 1651 * about the top-level vdev then use that vdev instead. 1652 */ 1653 if (tvd->vdev_ops == &vdev_missing_ops && 1654 mtvd->vdev_ops != &vdev_missing_ops) { 1655 1656 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 1657 continue; 1658 1659 /* 1660 * Device specific actions. 1661 */ 1662 if (mtvd->vdev_islog) { 1663 spa_set_log_state(spa, SPA_LOG_CLEAR); 1664 } else { 1665 /* 1666 * XXX - once we have 'readonly' pool 1667 * support we should be able to handle 1668 * missing data devices by transitioning 1669 * the pool to readonly. 1670 */ 1671 continue; 1672 } 1673 1674 /* 1675 * Swap the missing vdev with the data we were 1676 * able to obtain from the MOS config. 1677 */ 1678 vdev_remove_child(rvd, tvd); 1679 vdev_remove_child(mrvd, mtvd); 1680 1681 vdev_add_child(rvd, mtvd); 1682 vdev_add_child(mrvd, tvd); 1683 1684 spa_config_exit(spa, SCL_ALL, FTAG); 1685 vdev_load(mtvd); 1686 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1687 1688 vdev_reopen(rvd); 1689 } else if (mtvd->vdev_islog) { 1690 /* 1691 * Load the slog device's state from the MOS config 1692 * since it's possible that the label does not 1693 * contain the most up-to-date information. 1694 */ 1695 vdev_load_log_state(tvd, mtvd); 1696 vdev_reopen(tvd); 1697 } 1698 } 1699 vdev_free(mrvd); 1700 spa_config_exit(spa, SCL_ALL, FTAG); 1701 1702 /* 1703 * Ensure we were able to validate the config. 1704 */ 1705 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1706 } 1707 1708 /* 1709 * Check for missing log devices 1710 */ 1711 static boolean_t 1712 spa_check_logs(spa_t *spa) 1713 { 1714 boolean_t rv = B_FALSE; 1715 1716 switch (spa->spa_log_state) { 1717 case SPA_LOG_MISSING: 1718 /* need to recheck in case slog has been restored */ 1719 case SPA_LOG_UNKNOWN: 1720 rv = (dmu_objset_find_parallel(spa->spa_name, 1721 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0); 1722 if (rv) 1723 spa_set_log_state(spa, SPA_LOG_MISSING); 1724 break; 1725 } 1726 return (rv); 1727 } 1728 1729 static boolean_t 1730 spa_passivate_log(spa_t *spa) 1731 { 1732 vdev_t *rvd = spa->spa_root_vdev; 1733 boolean_t slog_found = B_FALSE; 1734 1735 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1736 1737 if (!spa_has_slogs(spa)) 1738 return (B_FALSE); 1739 1740 for (int c = 0; c < rvd->vdev_children; c++) { 1741 vdev_t *tvd = rvd->vdev_child[c]; 1742 metaslab_group_t *mg = tvd->vdev_mg; 1743 1744 if (tvd->vdev_islog) { 1745 metaslab_group_passivate(mg); 1746 slog_found = B_TRUE; 1747 } 1748 } 1749 1750 return (slog_found); 1751 } 1752 1753 static void 1754 spa_activate_log(spa_t *spa) 1755 { 1756 vdev_t *rvd = spa->spa_root_vdev; 1757 1758 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1759 1760 for (int c = 0; c < rvd->vdev_children; c++) { 1761 vdev_t *tvd = rvd->vdev_child[c]; 1762 metaslab_group_t *mg = tvd->vdev_mg; 1763 1764 if (tvd->vdev_islog) 1765 metaslab_group_activate(mg); 1766 } 1767 } 1768 1769 int 1770 spa_offline_log(spa_t *spa) 1771 { 1772 int error; 1773 1774 error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1775 NULL, DS_FIND_CHILDREN); 1776 if (error == 0) { 1777 /* 1778 * We successfully offlined the log device, sync out the 1779 * current txg so that the "stubby" block can be removed 1780 * by zil_sync(). 1781 */ 1782 txg_wait_synced(spa->spa_dsl_pool, 0); 1783 } 1784 return (error); 1785 } 1786 1787 static void 1788 spa_aux_check_removed(spa_aux_vdev_t *sav) 1789 { 1790 for (int i = 0; i < sav->sav_count; i++) 1791 spa_check_removed(sav->sav_vdevs[i]); 1792 } 1793 1794 void 1795 spa_claim_notify(zio_t *zio) 1796 { 1797 spa_t *spa = zio->io_spa; 1798 1799 if (zio->io_error) 1800 return; 1801 1802 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1803 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1804 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1805 mutex_exit(&spa->spa_props_lock); 1806 } 1807 1808 typedef struct spa_load_error { 1809 uint64_t sle_meta_count; 1810 uint64_t sle_data_count; 1811 } spa_load_error_t; 1812 1813 static void 1814 spa_load_verify_done(zio_t *zio) 1815 { 1816 blkptr_t *bp = zio->io_bp; 1817 spa_load_error_t *sle = zio->io_private; 1818 dmu_object_type_t type = BP_GET_TYPE(bp); 1819 int error = zio->io_error; 1820 1821 if (error) { 1822 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1823 type != DMU_OT_INTENT_LOG) 1824 atomic_add_64(&sle->sle_meta_count, 1); 1825 else 1826 atomic_add_64(&sle->sle_data_count, 1); 1827 } 1828 zio_data_buf_free(zio->io_data, zio->io_size); 1829 } 1830 1831 /*ARGSUSED*/ 1832 static int 1833 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1834 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 1835 { 1836 if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { 1837 zio_t *rio = arg; 1838 size_t size = BP_GET_PSIZE(bp); 1839 void *data = zio_data_buf_alloc(size); 1840 1841 zio_nowait(zio_read(rio, spa, bp, data, size, 1842 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 1843 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 1844 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 1845 } 1846 return (0); 1847 } 1848 1849 static int 1850 spa_load_verify(spa_t *spa) 1851 { 1852 zio_t *rio; 1853 spa_load_error_t sle = { 0 }; 1854 zpool_rewind_policy_t policy; 1855 boolean_t verify_ok = B_FALSE; 1856 int error; 1857 1858 zpool_get_rewind_policy(spa->spa_config, &policy); 1859 1860 if (policy.zrp_request & ZPOOL_NEVER_REWIND) 1861 return (0); 1862 1863 rio = zio_root(spa, NULL, &sle, 1864 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1865 1866 error = traverse_pool(spa, spa->spa_verify_min_txg, 1867 TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio); 1868 1869 (void) zio_wait(rio); 1870 1871 spa->spa_load_meta_errors = sle.sle_meta_count; 1872 spa->spa_load_data_errors = sle.sle_data_count; 1873 1874 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 1875 sle.sle_data_count <= policy.zrp_maxdata) { 1876 int64_t loss = 0; 1877 1878 verify_ok = B_TRUE; 1879 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 1880 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 1881 1882 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 1883 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1884 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 1885 VERIFY(nvlist_add_int64(spa->spa_load_info, 1886 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 1887 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1888 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 1889 } else { 1890 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 1891 } 1892 1893 if (error) { 1894 if (error != ENXIO && error != EIO) 1895 error = SET_ERROR(EIO); 1896 return (error); 1897 } 1898 1899 return (verify_ok ? 0 : EIO); 1900 } 1901 1902 /* 1903 * Find a value in the pool props object. 1904 */ 1905 static void 1906 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 1907 { 1908 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 1909 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 1910 } 1911 1912 /* 1913 * Find a value in the pool directory object. 1914 */ 1915 static int 1916 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 1917 { 1918 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1919 name, sizeof (uint64_t), 1, val)); 1920 } 1921 1922 static int 1923 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 1924 { 1925 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 1926 return (err); 1927 } 1928 1929 /* 1930 * Fix up config after a partly-completed split. This is done with the 1931 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 1932 * pool have that entry in their config, but only the splitting one contains 1933 * a list of all the guids of the vdevs that are being split off. 1934 * 1935 * This function determines what to do with that list: either rejoin 1936 * all the disks to the pool, or complete the splitting process. To attempt 1937 * the rejoin, each disk that is offlined is marked online again, and 1938 * we do a reopen() call. If the vdev label for every disk that was 1939 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 1940 * then we call vdev_split() on each disk, and complete the split. 1941 * 1942 * Otherwise we leave the config alone, with all the vdevs in place in 1943 * the original pool. 1944 */ 1945 static void 1946 spa_try_repair(spa_t *spa, nvlist_t *config) 1947 { 1948 uint_t extracted; 1949 uint64_t *glist; 1950 uint_t i, gcount; 1951 nvlist_t *nvl; 1952 vdev_t **vd; 1953 boolean_t attempt_reopen; 1954 1955 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 1956 return; 1957 1958 /* check that the config is complete */ 1959 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 1960 &glist, &gcount) != 0) 1961 return; 1962 1963 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 1964 1965 /* attempt to online all the vdevs & validate */ 1966 attempt_reopen = B_TRUE; 1967 for (i = 0; i < gcount; i++) { 1968 if (glist[i] == 0) /* vdev is hole */ 1969 continue; 1970 1971 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 1972 if (vd[i] == NULL) { 1973 /* 1974 * Don't bother attempting to reopen the disks; 1975 * just do the split. 1976 */ 1977 attempt_reopen = B_FALSE; 1978 } else { 1979 /* attempt to re-online it */ 1980 vd[i]->vdev_offline = B_FALSE; 1981 } 1982 } 1983 1984 if (attempt_reopen) { 1985 vdev_reopen(spa->spa_root_vdev); 1986 1987 /* check each device to see what state it's in */ 1988 for (extracted = 0, i = 0; i < gcount; i++) { 1989 if (vd[i] != NULL && 1990 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 1991 break; 1992 ++extracted; 1993 } 1994 } 1995 1996 /* 1997 * If every disk has been moved to the new pool, or if we never 1998 * even attempted to look at them, then we split them off for 1999 * good. 2000 */ 2001 if (!attempt_reopen || gcount == extracted) { 2002 for (i = 0; i < gcount; i++) 2003 if (vd[i] != NULL) 2004 vdev_split(vd[i]); 2005 vdev_reopen(spa->spa_root_vdev); 2006 } 2007 2008 kmem_free(vd, gcount * sizeof (vdev_t *)); 2009 } 2010 2011 static int 2012 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 2013 boolean_t mosconfig) 2014 { 2015 nvlist_t *config = spa->spa_config; 2016 char *ereport = FM_EREPORT_ZFS_POOL; 2017 char *comment; 2018 int error; 2019 uint64_t pool_guid; 2020 nvlist_t *nvl; 2021 2022 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2023 return (SET_ERROR(EINVAL)); 2024 2025 ASSERT(spa->spa_comment == NULL); 2026 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2027 spa->spa_comment = spa_strdup(comment); 2028 2029 /* 2030 * Versioning wasn't explicitly added to the label until later, so if 2031 * it's not present treat it as the initial version. 2032 */ 2033 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2034 &spa->spa_ubsync.ub_version) != 0) 2035 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2036 2037 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2038 &spa->spa_config_txg); 2039 2040 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 2041 spa_guid_exists(pool_guid, 0)) { 2042 error = SET_ERROR(EEXIST); 2043 } else { 2044 spa->spa_config_guid = pool_guid; 2045 2046 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 2047 &nvl) == 0) { 2048 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 2049 KM_SLEEP) == 0); 2050 } 2051 2052 nvlist_free(spa->spa_load_info); 2053 spa->spa_load_info = fnvlist_alloc(); 2054 2055 gethrestime(&spa->spa_loaded_ts); 2056 error = spa_load_impl(spa, pool_guid, config, state, type, 2057 mosconfig, &ereport); 2058 } 2059 2060 spa->spa_minref = refcount_count(&spa->spa_refcount); 2061 if (error) { 2062 if (error != EEXIST) { 2063 spa->spa_loaded_ts.tv_sec = 0; 2064 spa->spa_loaded_ts.tv_nsec = 0; 2065 } 2066 if (error != EBADF) { 2067 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2068 } 2069 } 2070 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2071 spa->spa_ena = 0; 2072 2073 return (error); 2074 } 2075 2076 /* 2077 * Load an existing storage pool, using the pool's builtin spa_config as a 2078 * source of configuration information. 2079 */ 2080 static int 2081 spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 2082 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 2083 char **ereport) 2084 { 2085 int error = 0; 2086 nvlist_t *nvroot = NULL; 2087 nvlist_t *label; 2088 vdev_t *rvd; 2089 uberblock_t *ub = &spa->spa_uberblock; 2090 uint64_t children, config_cache_txg = spa->spa_config_txg; 2091 int orig_mode = spa->spa_mode; 2092 int parse; 2093 uint64_t obj; 2094 boolean_t missing_feat_write = B_FALSE; 2095 2096 /* 2097 * If this is an untrusted config, access the pool in read-only mode. 2098 * This prevents things like resilvering recently removed devices. 2099 */ 2100 if (!mosconfig) 2101 spa->spa_mode = FREAD; 2102 2103 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2104 2105 spa->spa_load_state = state; 2106 2107 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2108 return (SET_ERROR(EINVAL)); 2109 2110 parse = (type == SPA_IMPORT_EXISTING ? 2111 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2112 2113 /* 2114 * Create "The Godfather" zio to hold all async IOs 2115 */ 2116 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 2117 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 2118 2119 /* 2120 * Parse the configuration into a vdev tree. We explicitly set the 2121 * value that will be returned by spa_version() since parsing the 2122 * configuration requires knowing the version number. 2123 */ 2124 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2125 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2126 spa_config_exit(spa, SCL_ALL, FTAG); 2127 2128 if (error != 0) 2129 return (error); 2130 2131 ASSERT(spa->spa_root_vdev == rvd); 2132 2133 if (type != SPA_IMPORT_ASSEMBLE) { 2134 ASSERT(spa_guid(spa) == pool_guid); 2135 } 2136 2137 /* 2138 * Try to open all vdevs, loading each label in the process. 2139 */ 2140 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2141 error = vdev_open(rvd); 2142 spa_config_exit(spa, SCL_ALL, FTAG); 2143 if (error != 0) 2144 return (error); 2145 2146 /* 2147 * We need to validate the vdev labels against the configuration that 2148 * we have in hand, which is dependent on the setting of mosconfig. If 2149 * mosconfig is true then we're validating the vdev labels based on 2150 * that config. Otherwise, we're validating against the cached config 2151 * (zpool.cache) that was read when we loaded the zfs module, and then 2152 * later we will recursively call spa_load() and validate against 2153 * the vdev config. 2154 * 2155 * If we're assembling a new pool that's been split off from an 2156 * existing pool, the labels haven't yet been updated so we skip 2157 * validation for now. 2158 */ 2159 if (type != SPA_IMPORT_ASSEMBLE) { 2160 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2161 error = vdev_validate(rvd, mosconfig); 2162 spa_config_exit(spa, SCL_ALL, FTAG); 2163 2164 if (error != 0) 2165 return (error); 2166 2167 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2168 return (SET_ERROR(ENXIO)); 2169 } 2170 2171 /* 2172 * Find the best uberblock. 2173 */ 2174 vdev_uberblock_load(rvd, ub, &label); 2175 2176 /* 2177 * If we weren't able to find a single valid uberblock, return failure. 2178 */ 2179 if (ub->ub_txg == 0) { 2180 nvlist_free(label); 2181 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2182 } 2183 2184 /* 2185 * If the pool has an unsupported version we can't open it. 2186 */ 2187 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2188 nvlist_free(label); 2189 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2190 } 2191 2192 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2193 nvlist_t *features; 2194 2195 /* 2196 * If we weren't able to find what's necessary for reading the 2197 * MOS in the label, return failure. 2198 */ 2199 if (label == NULL || nvlist_lookup_nvlist(label, 2200 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2201 nvlist_free(label); 2202 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2203 ENXIO)); 2204 } 2205 2206 /* 2207 * Update our in-core representation with the definitive values 2208 * from the label. 2209 */ 2210 nvlist_free(spa->spa_label_features); 2211 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2212 } 2213 2214 nvlist_free(label); 2215 2216 /* 2217 * Look through entries in the label nvlist's features_for_read. If 2218 * there is a feature listed there which we don't understand then we 2219 * cannot open a pool. 2220 */ 2221 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2222 nvlist_t *unsup_feat; 2223 2224 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2225 0); 2226 2227 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2228 NULL); nvp != NULL; 2229 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2230 if (!zfeature_is_supported(nvpair_name(nvp))) { 2231 VERIFY(nvlist_add_string(unsup_feat, 2232 nvpair_name(nvp), "") == 0); 2233 } 2234 } 2235 2236 if (!nvlist_empty(unsup_feat)) { 2237 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2238 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2239 nvlist_free(unsup_feat); 2240 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2241 ENOTSUP)); 2242 } 2243 2244 nvlist_free(unsup_feat); 2245 } 2246 2247 /* 2248 * If the vdev guid sum doesn't match the uberblock, we have an 2249 * incomplete configuration. We first check to see if the pool 2250 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 2251 * If it is, defer the vdev_guid_sum check till later so we 2252 * can handle missing vdevs. 2253 */ 2254 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 2255 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 2256 rvd->vdev_guid_sum != ub->ub_guid_sum) 2257 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2258 2259 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2260 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2261 spa_try_repair(spa, config); 2262 spa_config_exit(spa, SCL_ALL, FTAG); 2263 nvlist_free(spa->spa_config_splitting); 2264 spa->spa_config_splitting = NULL; 2265 } 2266 2267 /* 2268 * Initialize internal SPA structures. 2269 */ 2270 spa->spa_state = POOL_STATE_ACTIVE; 2271 spa->spa_ubsync = spa->spa_uberblock; 2272 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2273 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2274 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2275 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2276 spa->spa_claim_max_txg = spa->spa_first_txg; 2277 spa->spa_prev_software_version = ub->ub_software_version; 2278 2279 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2280 if (error) 2281 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2282 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2283 2284 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 2285 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2286 2287 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2288 boolean_t missing_feat_read = B_FALSE; 2289 nvlist_t *unsup_feat, *enabled_feat; 2290 2291 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2292 &spa->spa_feat_for_read_obj) != 0) { 2293 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2294 } 2295 2296 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2297 &spa->spa_feat_for_write_obj) != 0) { 2298 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2299 } 2300 2301 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2302 &spa->spa_feat_desc_obj) != 0) { 2303 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2304 } 2305 2306 enabled_feat = fnvlist_alloc(); 2307 unsup_feat = fnvlist_alloc(); 2308 2309 if (!spa_features_check(spa, B_FALSE, 2310 unsup_feat, enabled_feat)) 2311 missing_feat_read = B_TRUE; 2312 2313 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 2314 if (!spa_features_check(spa, B_TRUE, 2315 unsup_feat, enabled_feat)) { 2316 missing_feat_write = B_TRUE; 2317 } 2318 } 2319 2320 fnvlist_add_nvlist(spa->spa_load_info, 2321 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 2322 2323 if (!nvlist_empty(unsup_feat)) { 2324 fnvlist_add_nvlist(spa->spa_load_info, 2325 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 2326 } 2327 2328 fnvlist_free(enabled_feat); 2329 fnvlist_free(unsup_feat); 2330 2331 if (!missing_feat_read) { 2332 fnvlist_add_boolean(spa->spa_load_info, 2333 ZPOOL_CONFIG_CAN_RDONLY); 2334 } 2335 2336 /* 2337 * If the state is SPA_LOAD_TRYIMPORT, our objective is 2338 * twofold: to determine whether the pool is available for 2339 * import in read-write mode and (if it is not) whether the 2340 * pool is available for import in read-only mode. If the pool 2341 * is available for import in read-write mode, it is displayed 2342 * as available in userland; if it is not available for import 2343 * in read-only mode, it is displayed as unavailable in 2344 * userland. If the pool is available for import in read-only 2345 * mode but not read-write mode, it is displayed as unavailable 2346 * in userland with a special note that the pool is actually 2347 * available for open in read-only mode. 2348 * 2349 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2350 * missing a feature for write, we must first determine whether 2351 * the pool can be opened read-only before returning to 2352 * userland in order to know whether to display the 2353 * abovementioned note. 2354 */ 2355 if (missing_feat_read || (missing_feat_write && 2356 spa_writeable(spa))) { 2357 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2358 ENOTSUP)); 2359 } 2360 2361 /* 2362 * Load refcounts for ZFS features from disk into an in-memory 2363 * cache during SPA initialization. 2364 */ 2365 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2366 uint64_t refcount; 2367 2368 error = feature_get_refcount_from_disk(spa, 2369 &spa_feature_table[i], &refcount); 2370 if (error == 0) { 2371 spa->spa_feat_refcount_cache[i] = refcount; 2372 } else if (error == ENOTSUP) { 2373 spa->spa_feat_refcount_cache[i] = 2374 SPA_FEATURE_DISABLED; 2375 } else { 2376 return (spa_vdev_err(rvd, 2377 VDEV_AUX_CORRUPT_DATA, EIO)); 2378 } 2379 } 2380 } 2381 2382 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 2383 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 2384 &spa->spa_feat_enabled_txg_obj) != 0) 2385 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2386 } 2387 2388 spa->spa_is_initializing = B_TRUE; 2389 error = dsl_pool_open(spa->spa_dsl_pool); 2390 spa->spa_is_initializing = B_FALSE; 2391 if (error != 0) 2392 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2393 2394 if (!mosconfig) { 2395 uint64_t hostid; 2396 nvlist_t *policy = NULL, *nvconfig; 2397 2398 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2399 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2400 2401 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 2402 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2403 char *hostname; 2404 unsigned long myhostid = 0; 2405 2406 VERIFY(nvlist_lookup_string(nvconfig, 2407 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 2408 2409 #ifdef _KERNEL 2410 myhostid = zone_get_hostid(NULL); 2411 #else /* _KERNEL */ 2412 /* 2413 * We're emulating the system's hostid in userland, so 2414 * we can't use zone_get_hostid(). 2415 */ 2416 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2417 #endif /* _KERNEL */ 2418 if (hostid != 0 && myhostid != 0 && 2419 hostid != myhostid) { 2420 nvlist_free(nvconfig); 2421 cmn_err(CE_WARN, "pool '%s' could not be " 2422 "loaded as it was last accessed by " 2423 "another system (host: %s hostid: 0x%lx). " 2424 "See: http://illumos.org/msg/ZFS-8000-EY", 2425 spa_name(spa), hostname, 2426 (unsigned long)hostid); 2427 return (SET_ERROR(EBADF)); 2428 } 2429 } 2430 if (nvlist_lookup_nvlist(spa->spa_config, 2431 ZPOOL_REWIND_POLICY, &policy) == 0) 2432 VERIFY(nvlist_add_nvlist(nvconfig, 2433 ZPOOL_REWIND_POLICY, policy) == 0); 2434 2435 spa_config_set(spa, nvconfig); 2436 spa_unload(spa); 2437 spa_deactivate(spa); 2438 spa_activate(spa, orig_mode); 2439 2440 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2441 } 2442 2443 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2444 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2445 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2446 if (error != 0) 2447 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2448 2449 /* 2450 * Load the bit that tells us to use the new accounting function 2451 * (raid-z deflation). If we have an older pool, this will not 2452 * be present. 2453 */ 2454 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 2455 if (error != 0 && error != ENOENT) 2456 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2457 2458 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 2459 &spa->spa_creation_version); 2460 if (error != 0 && error != ENOENT) 2461 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2462 2463 /* 2464 * Load the persistent error log. If we have an older pool, this will 2465 * not be present. 2466 */ 2467 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 2468 if (error != 0 && error != ENOENT) 2469 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2470 2471 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 2472 &spa->spa_errlog_scrub); 2473 if (error != 0 && error != ENOENT) 2474 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2475 2476 /* 2477 * Load the history object. If we have an older pool, this 2478 * will not be present. 2479 */ 2480 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 2481 if (error != 0 && error != ENOENT) 2482 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2483 2484 /* 2485 * If we're assembling the pool from the split-off vdevs of 2486 * an existing pool, we don't want to attach the spares & cache 2487 * devices. 2488 */ 2489 2490 /* 2491 * Load any hot spares for this pool. 2492 */ 2493 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 2494 if (error != 0 && error != ENOENT) 2495 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2496 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2497 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2498 if (load_nvlist(spa, spa->spa_spares.sav_object, 2499 &spa->spa_spares.sav_config) != 0) 2500 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2501 2502 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2503 spa_load_spares(spa); 2504 spa_config_exit(spa, SCL_ALL, FTAG); 2505 } else if (error == 0) { 2506 spa->spa_spares.sav_sync = B_TRUE; 2507 } 2508 2509 /* 2510 * Load any level 2 ARC devices for this pool. 2511 */ 2512 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2513 &spa->spa_l2cache.sav_object); 2514 if (error != 0 && error != ENOENT) 2515 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2516 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2517 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2518 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 2519 &spa->spa_l2cache.sav_config) != 0) 2520 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2521 2522 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2523 spa_load_l2cache(spa); 2524 spa_config_exit(spa, SCL_ALL, FTAG); 2525 } else if (error == 0) { 2526 spa->spa_l2cache.sav_sync = B_TRUE; 2527 } 2528 2529 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2530 2531 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 2532 if (error && error != ENOENT) 2533 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2534 2535 if (error == 0) { 2536 uint64_t autoreplace; 2537 2538 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 2539 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 2540 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 2541 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 2542 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 2543 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 2544 &spa->spa_dedup_ditto); 2545 2546 spa->spa_autoreplace = (autoreplace != 0); 2547 } 2548 2549 /* 2550 * If the 'autoreplace' property is set, then post a resource notifying 2551 * the ZFS DE that it should not issue any faults for unopenable 2552 * devices. We also iterate over the vdevs, and post a sysevent for any 2553 * unopenable vdevs so that the normal autoreplace handler can take 2554 * over. 2555 */ 2556 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 2557 spa_check_removed(spa->spa_root_vdev); 2558 /* 2559 * For the import case, this is done in spa_import(), because 2560 * at this point we're using the spare definitions from 2561 * the MOS config, not necessarily from the userland config. 2562 */ 2563 if (state != SPA_LOAD_IMPORT) { 2564 spa_aux_check_removed(&spa->spa_spares); 2565 spa_aux_check_removed(&spa->spa_l2cache); 2566 } 2567 } 2568 2569 /* 2570 * Load the vdev state for all toplevel vdevs. 2571 */ 2572 vdev_load(rvd); 2573 2574 /* 2575 * Propagate the leaf DTLs we just loaded all the way up the tree. 2576 */ 2577 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2578 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2579 spa_config_exit(spa, SCL_ALL, FTAG); 2580 2581 /* 2582 * Load the DDTs (dedup tables). 2583 */ 2584 error = ddt_load(spa); 2585 if (error != 0) 2586 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2587 2588 spa_update_dspace(spa); 2589 2590 /* 2591 * Validate the config, using the MOS config to fill in any 2592 * information which might be missing. If we fail to validate 2593 * the config then declare the pool unfit for use. If we're 2594 * assembling a pool from a split, the log is not transferred 2595 * over. 2596 */ 2597 if (type != SPA_IMPORT_ASSEMBLE) { 2598 nvlist_t *nvconfig; 2599 2600 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2601 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2602 2603 if (!spa_config_valid(spa, nvconfig)) { 2604 nvlist_free(nvconfig); 2605 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2606 ENXIO)); 2607 } 2608 nvlist_free(nvconfig); 2609 2610 /* 2611 * Now that we've validated the config, check the state of the 2612 * root vdev. If it can't be opened, it indicates one or 2613 * more toplevel vdevs are faulted. 2614 */ 2615 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2616 return (SET_ERROR(ENXIO)); 2617 2618 if (spa_check_logs(spa)) { 2619 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 2620 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 2621 } 2622 } 2623 2624 if (missing_feat_write) { 2625 ASSERT(state == SPA_LOAD_TRYIMPORT); 2626 2627 /* 2628 * At this point, we know that we can open the pool in 2629 * read-only mode but not read-write mode. We now have enough 2630 * information and can return to userland. 2631 */ 2632 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2633 } 2634 2635 /* 2636 * We've successfully opened the pool, verify that we're ready 2637 * to start pushing transactions. 2638 */ 2639 if (state != SPA_LOAD_TRYIMPORT) { 2640 if (error = spa_load_verify(spa)) 2641 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2642 error)); 2643 } 2644 2645 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2646 spa->spa_load_max_txg == UINT64_MAX)) { 2647 dmu_tx_t *tx; 2648 int need_update = B_FALSE; 2649 2650 ASSERT(state != SPA_LOAD_TRYIMPORT); 2651 2652 /* 2653 * Claim log blocks that haven't been committed yet. 2654 * This must all happen in a single txg. 2655 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2656 * invoked from zil_claim_log_block()'s i/o done callback. 2657 * Price of rollback is that we abandon the log. 2658 */ 2659 spa->spa_claiming = B_TRUE; 2660 2661 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 2662 spa_first_txg(spa)); 2663 (void) dmu_objset_find_parallel(spa_name(spa), 2664 zil_claim, tx, DS_FIND_CHILDREN); 2665 dmu_tx_commit(tx); 2666 2667 spa->spa_claiming = B_FALSE; 2668 2669 spa_set_log_state(spa, SPA_LOG_GOOD); 2670 spa->spa_sync_on = B_TRUE; 2671 txg_sync_start(spa->spa_dsl_pool); 2672 2673 /* 2674 * Wait for all claims to sync. We sync up to the highest 2675 * claimed log block birth time so that claimed log blocks 2676 * don't appear to be from the future. spa_claim_max_txg 2677 * will have been set for us by either zil_check_log_chain() 2678 * (invoked from spa_check_logs()) or zil_claim() above. 2679 */ 2680 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 2681 2682 /* 2683 * If the config cache is stale, or we have uninitialized 2684 * metaslabs (see spa_vdev_add()), then update the config. 2685 * 2686 * If this is a verbatim import, trust the current 2687 * in-core spa_config and update the disk labels. 2688 */ 2689 if (config_cache_txg != spa->spa_config_txg || 2690 state == SPA_LOAD_IMPORT || 2691 state == SPA_LOAD_RECOVER || 2692 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 2693 need_update = B_TRUE; 2694 2695 for (int c = 0; c < rvd->vdev_children; c++) 2696 if (rvd->vdev_child[c]->vdev_ms_array == 0) 2697 need_update = B_TRUE; 2698 2699 /* 2700 * Update the config cache asychronously in case we're the 2701 * root pool, in which case the config cache isn't writable yet. 2702 */ 2703 if (need_update) 2704 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2705 2706 /* 2707 * Check all DTLs to see if anything needs resilvering. 2708 */ 2709 if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 2710 vdev_resilver_needed(rvd, NULL, NULL)) 2711 spa_async_request(spa, SPA_ASYNC_RESILVER); 2712 2713 /* 2714 * Log the fact that we booted up (so that we can detect if 2715 * we rebooted in the middle of an operation). 2716 */ 2717 spa_history_log_version(spa, "open"); 2718 2719 /* 2720 * Delete any inconsistent datasets. 2721 */ 2722 (void) dmu_objset_find(spa_name(spa), 2723 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 2724 2725 /* 2726 * Clean up any stale temporary dataset userrefs. 2727 */ 2728 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 2729 } 2730 2731 return (0); 2732 } 2733 2734 static int 2735 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 2736 { 2737 int mode = spa->spa_mode; 2738 2739 spa_unload(spa); 2740 spa_deactivate(spa); 2741 2742 spa->spa_load_max_txg--; 2743 2744 spa_activate(spa, mode); 2745 spa_async_suspend(spa); 2746 2747 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 2748 } 2749 2750 /* 2751 * If spa_load() fails this function will try loading prior txg's. If 2752 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 2753 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 2754 * function will not rewind the pool and will return the same error as 2755 * spa_load(). 2756 */ 2757 static int 2758 spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 2759 uint64_t max_request, int rewind_flags) 2760 { 2761 nvlist_t *loadinfo = NULL; 2762 nvlist_t *config = NULL; 2763 int load_error, rewind_error; 2764 uint64_t safe_rewind_txg; 2765 uint64_t min_txg; 2766 2767 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 2768 spa->spa_load_max_txg = spa->spa_load_txg; 2769 spa_set_log_state(spa, SPA_LOG_CLEAR); 2770 } else { 2771 spa->spa_load_max_txg = max_request; 2772 } 2773 2774 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 2775 mosconfig); 2776 if (load_error == 0) 2777 return (0); 2778 2779 if (spa->spa_root_vdev != NULL) 2780 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2781 2782 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 2783 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 2784 2785 if (rewind_flags & ZPOOL_NEVER_REWIND) { 2786 nvlist_free(config); 2787 return (load_error); 2788 } 2789 2790 if (state == SPA_LOAD_RECOVER) { 2791 /* Price of rolling back is discarding txgs, including log */ 2792 spa_set_log_state(spa, SPA_LOG_CLEAR); 2793 } else { 2794 /* 2795 * If we aren't rolling back save the load info from our first 2796 * import attempt so that we can restore it after attempting 2797 * to rewind. 2798 */ 2799 loadinfo = spa->spa_load_info; 2800 spa->spa_load_info = fnvlist_alloc(); 2801 } 2802 2803 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 2804 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 2805 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 2806 TXG_INITIAL : safe_rewind_txg; 2807 2808 /* 2809 * Continue as long as we're finding errors, we're still within 2810 * the acceptable rewind range, and we're still finding uberblocks 2811 */ 2812 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 2813 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 2814 if (spa->spa_load_max_txg < safe_rewind_txg) 2815 spa->spa_extreme_rewind = B_TRUE; 2816 rewind_error = spa_load_retry(spa, state, mosconfig); 2817 } 2818 2819 spa->spa_extreme_rewind = B_FALSE; 2820 spa->spa_load_max_txg = UINT64_MAX; 2821 2822 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 2823 spa_config_set(spa, config); 2824 2825 if (state == SPA_LOAD_RECOVER) { 2826 ASSERT3P(loadinfo, ==, NULL); 2827 return (rewind_error); 2828 } else { 2829 /* Store the rewind info as part of the initial load info */ 2830 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 2831 spa->spa_load_info); 2832 2833 /* Restore the initial load info */ 2834 fnvlist_free(spa->spa_load_info); 2835 spa->spa_load_info = loadinfo; 2836 2837 return (load_error); 2838 } 2839 } 2840 2841 /* 2842 * Pool Open/Import 2843 * 2844 * The import case is identical to an open except that the configuration is sent 2845 * down from userland, instead of grabbed from the configuration cache. For the 2846 * case of an open, the pool configuration will exist in the 2847 * POOL_STATE_UNINITIALIZED state. 2848 * 2849 * The stats information (gen/count/ustats) is used to gather vdev statistics at 2850 * the same time open the pool, without having to keep around the spa_t in some 2851 * ambiguous state. 2852 */ 2853 static int 2854 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 2855 nvlist_t **config, int lock) 2856 { 2857 spa_t *spa; 2858 spa_load_state_t state = SPA_LOAD_OPEN; 2859 int error; 2860 int locked = B_FALSE; 2861 2862 *spapp = NULL; 2863 2864 /* 2865 * As disgusting as this is, we need to support recursive calls to this 2866 * function because dsl_dir_open() is called during spa_load(), and ends 2867 * up calling spa_open() again. The real fix is to figure out how to 2868 * avoid dsl_dir_open() calling this in the first place. 2869 */ 2870 if (lock && (mutex_owner(&spa_namespace_lock) != curthread)) { 2871 mutex_enter(&spa_namespace_lock); 2872 locked = B_TRUE; 2873 } 2874 2875 if ((spa = spa_lookup(pool)) == NULL) { 2876 if (locked) 2877 mutex_exit(&spa_namespace_lock); 2878 return (SET_ERROR(ENOENT)); 2879 } 2880 2881 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 2882 zpool_rewind_policy_t policy; 2883 2884 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 2885 &policy); 2886 if (policy.zrp_request & ZPOOL_DO_REWIND) 2887 state = SPA_LOAD_RECOVER; 2888 2889 spa_activate(spa, spa_mode_global); 2890 2891 if (state != SPA_LOAD_RECOVER) 2892 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 2893 2894 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 2895 policy.zrp_request); 2896 2897 if (error == EBADF) { 2898 /* 2899 * If vdev_validate() returns failure (indicated by 2900 * EBADF), it indicates that one of the vdevs indicates 2901 * that the pool has been exported or destroyed. If 2902 * this is the case, the config cache is out of sync and 2903 * we should remove the pool from the namespace. 2904 */ 2905 spa_unload(spa); 2906 spa_deactivate(spa); 2907 spa_config_sync(spa, B_TRUE, B_TRUE); 2908 spa_remove(spa); 2909 if (locked) 2910 mutex_exit(&spa_namespace_lock); 2911 return (SET_ERROR(ENOENT)); 2912 } 2913 2914 if (error) { 2915 /* 2916 * We can't open the pool, but we still have useful 2917 * information: the state of each vdev after the 2918 * attempted vdev_open(). Return this to the user. 2919 */ 2920 if (config != NULL && spa->spa_config) { 2921 VERIFY(nvlist_dup(spa->spa_config, config, 2922 KM_SLEEP) == 0); 2923 VERIFY(nvlist_add_nvlist(*config, 2924 ZPOOL_CONFIG_LOAD_INFO, 2925 spa->spa_load_info) == 0); 2926 } 2927 spa_unload(spa); 2928 spa_deactivate(spa); 2929 spa->spa_last_open_failed = error; 2930 if (locked) 2931 mutex_exit(&spa_namespace_lock); 2932 *spapp = NULL; 2933 return (error); 2934 } 2935 } 2936 2937 spa_open_ref(spa, tag); 2938 2939 if (config != NULL) 2940 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2941 2942 /* 2943 * If we've recovered the pool, pass back any information we 2944 * gathered while doing the load. 2945 */ 2946 if (state == SPA_LOAD_RECOVER) { 2947 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 2948 spa->spa_load_info) == 0); 2949 } 2950 2951 if (locked) { 2952 spa->spa_last_open_failed = 0; 2953 spa->spa_last_ubsync_txg = 0; 2954 spa->spa_load_txg = 0; 2955 mutex_exit(&spa_namespace_lock); 2956 } 2957 2958 *spapp = spa; 2959 2960 return (0); 2961 } 2962 2963 int 2964 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 2965 nvlist_t **config) 2966 { 2967 return (spa_open_common(name, spapp, tag, policy, config, 1)); 2968 } 2969 2970 int 2971 spa_open(const char *name, spa_t **spapp, void *tag) 2972 { 2973 return (spa_open_common(name, spapp, tag, NULL, NULL, 1)); 2974 } 2975 2976 int 2977 spa_open_lock(const char *name, spa_t **spapp, void *tag, int lock) 2978 { 2979 return (spa_open_common(name, spapp, tag, NULL, NULL, lock)); 2980 } 2981 2982 /* 2983 * Lookup the given spa_t, incrementing the inject count in the process, 2984 * preventing it from being exported or destroyed. 2985 */ 2986 spa_t * 2987 spa_inject_addref(char *name) 2988 { 2989 spa_t *spa; 2990 2991 mutex_enter(&spa_namespace_lock); 2992 if ((spa = spa_lookup(name)) == NULL) { 2993 mutex_exit(&spa_namespace_lock); 2994 return (NULL); 2995 } 2996 spa->spa_inject_ref++; 2997 mutex_exit(&spa_namespace_lock); 2998 2999 return (spa); 3000 } 3001 3002 void 3003 spa_inject_delref(spa_t *spa) 3004 { 3005 mutex_enter(&spa_namespace_lock); 3006 spa->spa_inject_ref--; 3007 mutex_exit(&spa_namespace_lock); 3008 } 3009 3010 /* 3011 * Add spares device information to the nvlist. 3012 */ 3013 static void 3014 spa_add_spares(spa_t *spa, nvlist_t *config) 3015 { 3016 nvlist_t **spares; 3017 uint_t i, nspares; 3018 nvlist_t *nvroot; 3019 uint64_t guid; 3020 vdev_stat_t *vs; 3021 uint_t vsc; 3022 uint64_t pool; 3023 3024 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3025 3026 if (spa->spa_spares.sav_count == 0) 3027 return; 3028 3029 VERIFY(nvlist_lookup_nvlist(config, 3030 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3031 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3032 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3033 if (nspares != 0) { 3034 VERIFY(nvlist_add_nvlist_array(nvroot, 3035 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3036 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3037 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3038 3039 /* 3040 * Go through and find any spares which have since been 3041 * repurposed as an active spare. If this is the case, update 3042 * their status appropriately. 3043 */ 3044 for (i = 0; i < nspares; i++) { 3045 VERIFY(nvlist_lookup_uint64(spares[i], 3046 ZPOOL_CONFIG_GUID, &guid) == 0); 3047 if (spa_spare_exists(guid, &pool, NULL) && 3048 pool != 0ULL) { 3049 VERIFY(nvlist_lookup_uint64_array( 3050 spares[i], ZPOOL_CONFIG_VDEV_STATS, 3051 (uint64_t **)&vs, &vsc) == 0); 3052 vs->vs_state = VDEV_STATE_CANT_OPEN; 3053 vs->vs_aux = VDEV_AUX_SPARED; 3054 } 3055 } 3056 } 3057 } 3058 3059 /* 3060 * Add l2cache device information to the nvlist, including vdev stats. 3061 */ 3062 static void 3063 spa_add_l2cache(spa_t *spa, nvlist_t *config) 3064 { 3065 nvlist_t **l2cache; 3066 uint_t i, j, nl2cache; 3067 nvlist_t *nvroot; 3068 uint64_t guid; 3069 vdev_t *vd; 3070 vdev_stat_t *vs; 3071 uint_t vsc; 3072 3073 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3074 3075 if (spa->spa_l2cache.sav_count == 0) 3076 return; 3077 3078 VERIFY(nvlist_lookup_nvlist(config, 3079 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3080 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3081 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3082 if (nl2cache != 0) { 3083 VERIFY(nvlist_add_nvlist_array(nvroot, 3084 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3085 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3086 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3087 3088 /* 3089 * Update level 2 cache device stats. 3090 */ 3091 3092 for (i = 0; i < nl2cache; i++) { 3093 VERIFY(nvlist_lookup_uint64(l2cache[i], 3094 ZPOOL_CONFIG_GUID, &guid) == 0); 3095 3096 vd = NULL; 3097 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3098 if (guid == 3099 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3100 vd = spa->spa_l2cache.sav_vdevs[j]; 3101 break; 3102 } 3103 } 3104 ASSERT(vd != NULL); 3105 3106 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 3107 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 3108 == 0); 3109 vdev_get_stats(vd, vs); 3110 } 3111 } 3112 } 3113 3114 static void 3115 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 3116 { 3117 nvlist_t *features; 3118 zap_cursor_t zc; 3119 zap_attribute_t za; 3120 3121 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3122 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3123 3124 if (spa->spa_feat_for_read_obj != 0) { 3125 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3126 spa->spa_feat_for_read_obj); 3127 zap_cursor_retrieve(&zc, &za) == 0; 3128 zap_cursor_advance(&zc)) { 3129 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3130 za.za_num_integers == 1); 3131 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3132 za.za_first_integer)); 3133 } 3134 zap_cursor_fini(&zc); 3135 } 3136 3137 if (spa->spa_feat_for_write_obj != 0) { 3138 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3139 spa->spa_feat_for_write_obj); 3140 zap_cursor_retrieve(&zc, &za) == 0; 3141 zap_cursor_advance(&zc)) { 3142 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3143 za.za_num_integers == 1); 3144 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3145 za.za_first_integer)); 3146 } 3147 zap_cursor_fini(&zc); 3148 } 3149 3150 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3151 features) == 0); 3152 nvlist_free(features); 3153 } 3154 3155 int 3156 spa_get_stats(const char *name, nvlist_t **config, 3157 char *altroot, size_t buflen) 3158 { 3159 int error; 3160 spa_t *spa; 3161 3162 *config = NULL; 3163 error = spa_open_common(name, &spa, FTAG, NULL, config, 1); 3164 3165 if (spa != NULL) { 3166 /* 3167 * This still leaves a window of inconsistency where the spares 3168 * or l2cache devices could change and the config would be 3169 * self-inconsistent. 3170 */ 3171 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3172 3173 if (*config != NULL) { 3174 uint64_t loadtimes[2]; 3175 3176 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 3177 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 3178 VERIFY(nvlist_add_uint64_array(*config, 3179 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 3180 3181 VERIFY(nvlist_add_uint64(*config, 3182 ZPOOL_CONFIG_ERRCOUNT, 3183 spa_get_errlog_size(spa)) == 0); 3184 3185 if (spa_suspended(spa)) 3186 VERIFY(nvlist_add_uint64(*config, 3187 ZPOOL_CONFIG_SUSPENDED, 3188 spa->spa_failmode) == 0); 3189 3190 spa_add_spares(spa, *config); 3191 spa_add_l2cache(spa, *config); 3192 spa_add_feature_stats(spa, *config); 3193 } 3194 } 3195 3196 /* 3197 * We want to get the alternate root even for faulted pools, so we cheat 3198 * and call spa_lookup() directly. 3199 */ 3200 if (altroot) { 3201 if (spa == NULL) { 3202 mutex_enter(&spa_namespace_lock); 3203 spa = spa_lookup(name); 3204 if (spa) 3205 spa_altroot(spa, altroot, buflen); 3206 else 3207 altroot[0] = '\0'; 3208 spa = NULL; 3209 mutex_exit(&spa_namespace_lock); 3210 } else { 3211 spa_altroot(spa, altroot, buflen); 3212 } 3213 } 3214 3215 if (spa != NULL) { 3216 spa_config_exit(spa, SCL_CONFIG, FTAG); 3217 spa_close(spa, FTAG); 3218 } 3219 3220 return (error); 3221 } 3222 3223 /* 3224 * Validate that the auxiliary device array is well formed. We must have an 3225 * array of nvlists, each which describes a valid leaf vdev. If this is an 3226 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3227 * specified, as long as they are well-formed. 3228 */ 3229 static int 3230 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3231 spa_aux_vdev_t *sav, const char *config, uint64_t version, 3232 vdev_labeltype_t label) 3233 { 3234 nvlist_t **dev; 3235 uint_t i, ndev; 3236 vdev_t *vd; 3237 int error; 3238 3239 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3240 3241 /* 3242 * It's acceptable to have no devs specified. 3243 */ 3244 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 3245 return (0); 3246 3247 if (ndev == 0) 3248 return (SET_ERROR(EINVAL)); 3249 3250 /* 3251 * Make sure the pool is formatted with a version that supports this 3252 * device type. 3253 */ 3254 if (spa_version(spa) < version) 3255 return (SET_ERROR(ENOTSUP)); 3256 3257 /* 3258 * Set the pending device list so we correctly handle device in-use 3259 * checking. 3260 */ 3261 sav->sav_pending = dev; 3262 sav->sav_npending = ndev; 3263 3264 for (i = 0; i < ndev; i++) { 3265 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 3266 mode)) != 0) 3267 goto out; 3268 3269 if (!vd->vdev_ops->vdev_op_leaf) { 3270 vdev_free(vd); 3271 error = SET_ERROR(EINVAL); 3272 goto out; 3273 } 3274 3275 /* 3276 * The L2ARC currently only supports disk devices in 3277 * kernel context. For user-level testing, we allow it. 3278 */ 3279 #ifdef _KERNEL 3280 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3281 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3282 error = SET_ERROR(ENOTBLK); 3283 vdev_free(vd); 3284 goto out; 3285 } 3286 #endif 3287 vd->vdev_top = vd; 3288 3289 if ((error = vdev_open(vd)) == 0 && 3290 (error = vdev_label_init(vd, crtxg, label)) == 0) { 3291 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 3292 vd->vdev_guid) == 0); 3293 } 3294 3295 vdev_free(vd); 3296 3297 if (error && 3298 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 3299 goto out; 3300 else 3301 error = 0; 3302 } 3303 3304 out: 3305 sav->sav_pending = NULL; 3306 sav->sav_npending = 0; 3307 return (error); 3308 } 3309 3310 static int 3311 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3312 { 3313 int error; 3314 3315 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3316 3317 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3318 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3319 VDEV_LABEL_SPARE)) != 0) { 3320 return (error); 3321 } 3322 3323 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3324 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3325 VDEV_LABEL_L2CACHE)); 3326 } 3327 3328 static void 3329 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3330 const char *config) 3331 { 3332 int i; 3333 3334 if (sav->sav_config != NULL) { 3335 nvlist_t **olddevs; 3336 uint_t oldndevs; 3337 nvlist_t **newdevs; 3338 3339 /* 3340 * Generate new dev list by concatentating with the 3341 * current dev list. 3342 */ 3343 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3344 &olddevs, &oldndevs) == 0); 3345 3346 newdevs = kmem_alloc(sizeof (void *) * 3347 (ndevs + oldndevs), KM_SLEEP); 3348 for (i = 0; i < oldndevs; i++) 3349 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3350 KM_SLEEP) == 0); 3351 for (i = 0; i < ndevs; i++) 3352 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3353 KM_SLEEP) == 0); 3354 3355 VERIFY(nvlist_remove(sav->sav_config, config, 3356 DATA_TYPE_NVLIST_ARRAY) == 0); 3357 3358 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3359 config, newdevs, ndevs + oldndevs) == 0); 3360 for (i = 0; i < oldndevs + ndevs; i++) 3361 nvlist_free(newdevs[i]); 3362 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3363 } else { 3364 /* 3365 * Generate a new dev list. 3366 */ 3367 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3368 KM_SLEEP) == 0); 3369 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3370 devs, ndevs) == 0); 3371 } 3372 } 3373 3374 /* 3375 * Stop and drop level 2 ARC devices 3376 */ 3377 void 3378 spa_l2cache_drop(spa_t *spa) 3379 { 3380 vdev_t *vd; 3381 int i; 3382 spa_aux_vdev_t *sav = &spa->spa_l2cache; 3383 3384 for (i = 0; i < sav->sav_count; i++) { 3385 uint64_t pool; 3386 3387 vd = sav->sav_vdevs[i]; 3388 ASSERT(vd != NULL); 3389 3390 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 3391 pool != 0ULL && l2arc_vdev_present(vd)) 3392 l2arc_remove_vdev(vd); 3393 } 3394 } 3395 3396 /* 3397 * Pool Creation 3398 */ 3399 int 3400 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 3401 nvlist_t *zplprops) 3402 { 3403 spa_t *spa; 3404 char *altroot = NULL; 3405 vdev_t *rvd; 3406 dsl_pool_t *dp; 3407 dmu_tx_t *tx; 3408 int error = 0; 3409 uint64_t txg = TXG_INITIAL; 3410 nvlist_t **spares, **l2cache; 3411 uint_t nspares, nl2cache; 3412 uint64_t version, obj; 3413 boolean_t has_features; 3414 3415 /* 3416 * If this pool already exists, return failure. 3417 */ 3418 mutex_enter(&spa_namespace_lock); 3419 if (spa_lookup(pool) != NULL) { 3420 mutex_exit(&spa_namespace_lock); 3421 return (SET_ERROR(EEXIST)); 3422 } 3423 3424 /* 3425 * Allocate a new spa_t structure. 3426 */ 3427 (void) nvlist_lookup_string(props, 3428 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3429 spa = spa_add(pool, NULL, altroot); 3430 spa_activate(spa, spa_mode_global); 3431 3432 if (props && (error = spa_prop_validate(spa, props))) { 3433 spa_deactivate(spa); 3434 spa_remove(spa); 3435 mutex_exit(&spa_namespace_lock); 3436 return (error); 3437 } 3438 3439 has_features = B_FALSE; 3440 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3441 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3442 if (zpool_prop_feature(nvpair_name(elem))) 3443 has_features = B_TRUE; 3444 } 3445 3446 if (has_features || nvlist_lookup_uint64(props, 3447 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3448 version = SPA_VERSION; 3449 } 3450 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3451 3452 spa->spa_first_txg = txg; 3453 spa->spa_uberblock.ub_txg = txg - 1; 3454 spa->spa_uberblock.ub_version = version; 3455 spa->spa_ubsync = spa->spa_uberblock; 3456 3457 /* 3458 * Create "The Godfather" zio to hold all async IOs 3459 */ 3460 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 3461 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 3462 3463 /* 3464 * Create the root vdev. 3465 */ 3466 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3467 3468 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 3469 3470 ASSERT(error != 0 || rvd != NULL); 3471 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 3472 3473 if (error == 0 && !zfs_allocatable_devs(nvroot)) 3474 error = SET_ERROR(EINVAL); 3475 3476 if (error == 0 && 3477 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3478 (error = spa_validate_aux(spa, nvroot, txg, 3479 VDEV_ALLOC_ADD)) == 0) { 3480 for (int c = 0; c < rvd->vdev_children; c++) { 3481 vdev_metaslab_set_size(rvd->vdev_child[c]); 3482 vdev_expand(rvd->vdev_child[c], txg); 3483 } 3484 } 3485 3486 spa_config_exit(spa, SCL_ALL, FTAG); 3487 3488 if (error != 0) { 3489 spa_unload(spa); 3490 spa_deactivate(spa); 3491 spa_remove(spa); 3492 mutex_exit(&spa_namespace_lock); 3493 return (error); 3494 } 3495 3496 /* 3497 * Get the list of spares, if specified. 3498 */ 3499 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3500 &spares, &nspares) == 0) { 3501 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 3502 KM_SLEEP) == 0); 3503 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3504 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3505 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3506 spa_load_spares(spa); 3507 spa_config_exit(spa, SCL_ALL, FTAG); 3508 spa->spa_spares.sav_sync = B_TRUE; 3509 } 3510 3511 /* 3512 * Get the list of level 2 cache devices, if specified. 3513 */ 3514 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3515 &l2cache, &nl2cache) == 0) { 3516 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3517 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3518 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3519 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3520 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3521 spa_load_l2cache(spa); 3522 spa_config_exit(spa, SCL_ALL, FTAG); 3523 spa->spa_l2cache.sav_sync = B_TRUE; 3524 } 3525 3526 spa->spa_is_initializing = B_TRUE; 3527 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3528 spa->spa_meta_objset = dp->dp_meta_objset; 3529 spa->spa_is_initializing = B_FALSE; 3530 3531 /* 3532 * Create DDTs (dedup tables). 3533 */ 3534 ddt_create(spa); 3535 3536 spa_update_dspace(spa); 3537 3538 tx = dmu_tx_create_assigned(dp, txg); 3539 3540 /* 3541 * Create the pool config object. 3542 */ 3543 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3544 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3545 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3546 3547 if (zap_add(spa->spa_meta_objset, 3548 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3549 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3550 cmn_err(CE_PANIC, "failed to add pool config"); 3551 } 3552 3553 if (spa_version(spa) >= SPA_VERSION_FEATURES) 3554 spa_feature_create_zap_objects(spa, tx); 3555 3556 if (zap_add(spa->spa_meta_objset, 3557 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 3558 sizeof (uint64_t), 1, &version, tx) != 0) { 3559 cmn_err(CE_PANIC, "failed to add pool version"); 3560 } 3561 3562 /* Newly created pools with the right version are always deflated. */ 3563 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 3564 spa->spa_deflate = TRUE; 3565 if (zap_add(spa->spa_meta_objset, 3566 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3567 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 3568 cmn_err(CE_PANIC, "failed to add deflate"); 3569 } 3570 } 3571 3572 /* 3573 * Create the deferred-free bpobj. Turn off compression 3574 * because sync-to-convergence takes longer if the blocksize 3575 * keeps changing. 3576 */ 3577 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3578 dmu_object_set_compress(spa->spa_meta_objset, obj, 3579 ZIO_COMPRESS_OFF, tx); 3580 if (zap_add(spa->spa_meta_objset, 3581 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3582 sizeof (uint64_t), 1, &obj, tx) != 0) { 3583 cmn_err(CE_PANIC, "failed to add bpobj"); 3584 } 3585 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3586 spa->spa_meta_objset, obj)); 3587 3588 /* 3589 * Create the pool's history object. 3590 */ 3591 if (version >= SPA_VERSION_ZPOOL_HISTORY) 3592 spa_history_create_obj(spa, tx); 3593 3594 /* 3595 * Set pool properties. 3596 */ 3597 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3598 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3599 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3600 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3601 3602 if (props != NULL) { 3603 spa_configfile_set(spa, props, B_FALSE); 3604 spa_sync_props(props, tx); 3605 } 3606 3607 dmu_tx_commit(tx); 3608 3609 spa->spa_sync_on = B_TRUE; 3610 txg_sync_start(spa->spa_dsl_pool); 3611 3612 /* 3613 * We explicitly wait for the first transaction to complete so that our 3614 * bean counters are appropriately updated. 3615 */ 3616 txg_wait_synced(spa->spa_dsl_pool, txg); 3617 3618 spa_config_sync(spa, B_FALSE, B_TRUE); 3619 3620 spa_history_log_version(spa, "create"); 3621 3622 spa->spa_minref = refcount_count(&spa->spa_refcount); 3623 3624 mutex_exit(&spa_namespace_lock); 3625 3626 return (0); 3627 } 3628 3629 #ifdef _KERNEL 3630 /* 3631 * Get the root pool information from the root disk, then import the root pool 3632 * during the system boot up time. 3633 */ 3634 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 3635 3636 static nvlist_t * 3637 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3638 { 3639 nvlist_t *config; 3640 nvlist_t *nvtop, *nvroot; 3641 uint64_t pgid; 3642 3643 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 3644 return (NULL); 3645 3646 /* 3647 * Add this top-level vdev to the child array. 3648 */ 3649 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3650 &nvtop) == 0); 3651 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3652 &pgid) == 0); 3653 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3654 3655 /* 3656 * Put this pool's top-level vdevs into a root vdev. 3657 */ 3658 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3659 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3660 VDEV_TYPE_ROOT) == 0); 3661 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3662 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3663 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3664 &nvtop, 1) == 0); 3665 3666 /* 3667 * Replace the existing vdev_tree with the new root vdev in 3668 * this pool's configuration (remove the old, add the new). 3669 */ 3670 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3671 nvlist_free(nvroot); 3672 return (config); 3673 } 3674 3675 /* 3676 * Walk the vdev tree and see if we can find a device with "better" 3677 * configuration. A configuration is "better" if the label on that 3678 * device has a more recent txg. 3679 */ 3680 static void 3681 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3682 { 3683 for (int c = 0; c < vd->vdev_children; c++) 3684 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3685 3686 if (vd->vdev_ops->vdev_op_leaf) { 3687 nvlist_t *label; 3688 uint64_t label_txg; 3689 3690 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 3691 &label) != 0) 3692 return; 3693 3694 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 3695 &label_txg) == 0); 3696 3697 /* 3698 * Do we have a better boot device? 3699 */ 3700 if (label_txg > *txg) { 3701 *txg = label_txg; 3702 *avd = vd; 3703 } 3704 nvlist_free(label); 3705 } 3706 } 3707 3708 /* 3709 * Import a root pool. 3710 * 3711 * For x86. devpath_list will consist of devid and/or physpath name of 3712 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 3713 * The GRUB "findroot" command will return the vdev we should boot. 3714 * 3715 * For Sparc, devpath_list consists the physpath name of the booting device 3716 * no matter the rootpool is a single device pool or a mirrored pool. 3717 * e.g. 3718 * "/pci@1f,0/ide@d/disk@0,0:a" 3719 */ 3720 int 3721 spa_import_rootpool(char *devpath, char *devid) 3722 { 3723 spa_t *spa; 3724 vdev_t *rvd, *bvd, *avd = NULL; 3725 nvlist_t *config, *nvtop; 3726 uint64_t guid, txg; 3727 char *pname; 3728 int error; 3729 3730 /* 3731 * Read the label from the boot device and generate a configuration. 3732 */ 3733 config = spa_generate_rootconf(devpath, devid, &guid); 3734 #if defined(_OBP) && defined(_KERNEL) 3735 if (config == NULL) { 3736 if (strstr(devpath, "/iscsi/ssd") != NULL) { 3737 /* iscsi boot */ 3738 get_iscsi_bootpath_phy(devpath); 3739 config = spa_generate_rootconf(devpath, devid, &guid); 3740 } 3741 } 3742 #endif 3743 if (config == NULL) { 3744 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 3745 devpath); 3746 return (SET_ERROR(EIO)); 3747 } 3748 3749 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3750 &pname) == 0); 3751 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 3752 3753 mutex_enter(&spa_namespace_lock); 3754 if ((spa = spa_lookup(pname)) != NULL) { 3755 /* 3756 * Remove the existing root pool from the namespace so that we 3757 * can replace it with the correct config we just read in. 3758 */ 3759 spa_remove(spa); 3760 } 3761 3762 spa = spa_add(pname, config, NULL); 3763 spa->spa_is_root = B_TRUE; 3764 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 3765 3766 /* 3767 * Build up a vdev tree based on the boot device's label config. 3768 */ 3769 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3770 &nvtop) == 0); 3771 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3772 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 3773 VDEV_ALLOC_ROOTPOOL); 3774 spa_config_exit(spa, SCL_ALL, FTAG); 3775 if (error) { 3776 mutex_exit(&spa_namespace_lock); 3777 nvlist_free(config); 3778 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 3779 pname); 3780 return (error); 3781 } 3782 3783 /* 3784 * Get the boot vdev. 3785 */ 3786 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 3787 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 3788 (u_longlong_t)guid); 3789 error = SET_ERROR(ENOENT); 3790 goto out; 3791 } 3792 3793 /* 3794 * Determine if there is a better boot device. 3795 */ 3796 avd = bvd; 3797 spa_alt_rootvdev(rvd, &avd, &txg); 3798 if (avd != bvd) { 3799 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 3800 "try booting from '%s'", avd->vdev_path); 3801 error = SET_ERROR(EINVAL); 3802 goto out; 3803 } 3804 3805 /* 3806 * If the boot device is part of a spare vdev then ensure that 3807 * we're booting off the active spare. 3808 */ 3809 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3810 !bvd->vdev_isspare) { 3811 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 3812 "try booting from '%s'", 3813 bvd->vdev_parent-> 3814 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 3815 error = SET_ERROR(EINVAL); 3816 goto out; 3817 } 3818 3819 error = 0; 3820 out: 3821 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3822 vdev_free(rvd); 3823 spa_config_exit(spa, SCL_ALL, FTAG); 3824 mutex_exit(&spa_namespace_lock); 3825 3826 nvlist_free(config); 3827 return (error); 3828 } 3829 3830 #endif 3831 3832 /* 3833 * Import a non-root pool into the system. 3834 */ 3835 int 3836 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 3837 { 3838 spa_t *spa; 3839 char *altroot = NULL; 3840 spa_load_state_t state = SPA_LOAD_IMPORT; 3841 zpool_rewind_policy_t policy; 3842 uint64_t mode = spa_mode_global; 3843 uint64_t readonly = B_FALSE; 3844 int error; 3845 nvlist_t *nvroot; 3846 nvlist_t **spares, **l2cache; 3847 uint_t nspares, nl2cache; 3848 3849 /* 3850 * If a pool with this name exists, return failure. 3851 */ 3852 mutex_enter(&spa_namespace_lock); 3853 if (spa_lookup(pool) != NULL) { 3854 mutex_exit(&spa_namespace_lock); 3855 return (SET_ERROR(EEXIST)); 3856 } 3857 3858 /* 3859 * Create and initialize the spa structure. 3860 */ 3861 (void) nvlist_lookup_string(props, 3862 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3863 (void) nvlist_lookup_uint64(props, 3864 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3865 if (readonly) 3866 mode = FREAD; 3867 spa = spa_add(pool, config, altroot); 3868 spa->spa_import_flags = flags; 3869 3870 /* 3871 * Verbatim import - Take a pool and insert it into the namespace 3872 * as if it had been loaded at boot. 3873 */ 3874 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 3875 if (props != NULL) 3876 spa_configfile_set(spa, props, B_FALSE); 3877 3878 spa_config_sync(spa, B_FALSE, B_TRUE); 3879 3880 mutex_exit(&spa_namespace_lock); 3881 return (0); 3882 } 3883 3884 spa_activate(spa, mode); 3885 3886 /* 3887 * Don't start async tasks until we know everything is healthy. 3888 */ 3889 spa_async_suspend(spa); 3890 3891 zpool_get_rewind_policy(config, &policy); 3892 if (policy.zrp_request & ZPOOL_DO_REWIND) 3893 state = SPA_LOAD_RECOVER; 3894 3895 /* 3896 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 3897 * because the user-supplied config is actually the one to trust when 3898 * doing an import. 3899 */ 3900 if (state != SPA_LOAD_RECOVER) 3901 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 3902 3903 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 3904 policy.zrp_request); 3905 3906 /* 3907 * Propagate anything learned while loading the pool and pass it 3908 * back to caller (i.e. rewind info, missing devices, etc). 3909 */ 3910 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 3911 spa->spa_load_info) == 0); 3912 3913 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3914 /* 3915 * Toss any existing sparelist, as it doesn't have any validity 3916 * anymore, and conflicts with spa_has_spare(). 3917 */ 3918 if (spa->spa_spares.sav_config) { 3919 nvlist_free(spa->spa_spares.sav_config); 3920 spa->spa_spares.sav_config = NULL; 3921 spa_load_spares(spa); 3922 } 3923 if (spa->spa_l2cache.sav_config) { 3924 nvlist_free(spa->spa_l2cache.sav_config); 3925 spa->spa_l2cache.sav_config = NULL; 3926 spa_load_l2cache(spa); 3927 } 3928 3929 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3930 &nvroot) == 0); 3931 if (error == 0) 3932 error = spa_validate_aux(spa, nvroot, -1ULL, 3933 VDEV_ALLOC_SPARE); 3934 if (error == 0) 3935 error = spa_validate_aux(spa, nvroot, -1ULL, 3936 VDEV_ALLOC_L2CACHE); 3937 spa_config_exit(spa, SCL_ALL, FTAG); 3938 3939 if (props != NULL) 3940 spa_configfile_set(spa, props, B_FALSE); 3941 3942 if (error != 0 || (props && spa_writeable(spa) && 3943 (error = spa_prop_set(spa, props)))) { 3944 spa_unload(spa); 3945 spa_deactivate(spa); 3946 spa_remove(spa); 3947 mutex_exit(&spa_namespace_lock); 3948 return (error); 3949 } 3950 3951 spa_async_resume(spa); 3952 3953 /* 3954 * Override any spares and level 2 cache devices as specified by 3955 * the user, as these may have correct device names/devids, etc. 3956 */ 3957 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3958 &spares, &nspares) == 0) { 3959 if (spa->spa_spares.sav_config) 3960 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 3961 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 3962 else 3963 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 3964 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3965 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3966 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3967 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3968 spa_load_spares(spa); 3969 spa_config_exit(spa, SCL_ALL, FTAG); 3970 spa->spa_spares.sav_sync = B_TRUE; 3971 } 3972 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3973 &l2cache, &nl2cache) == 0) { 3974 if (spa->spa_l2cache.sav_config) 3975 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 3976 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 3977 else 3978 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3979 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3980 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3981 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3982 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3983 spa_load_l2cache(spa); 3984 spa_config_exit(spa, SCL_ALL, FTAG); 3985 spa->spa_l2cache.sav_sync = B_TRUE; 3986 } 3987 3988 /* 3989 * Check for any removed devices. 3990 */ 3991 if (spa->spa_autoreplace) { 3992 spa_aux_check_removed(&spa->spa_spares); 3993 spa_aux_check_removed(&spa->spa_l2cache); 3994 } 3995 3996 if (spa_writeable(spa)) { 3997 /* 3998 * Update the config cache to include the newly-imported pool. 3999 */ 4000 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4001 } 4002 4003 /* 4004 * It's possible that the pool was expanded while it was exported. 4005 * We kick off an async task to handle this for us. 4006 */ 4007 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4008 4009 mutex_exit(&spa_namespace_lock); 4010 spa_history_log_version(spa, "import"); 4011 4012 return (0); 4013 } 4014 4015 nvlist_t * 4016 spa_tryimport(nvlist_t *tryconfig) 4017 { 4018 nvlist_t *config = NULL; 4019 char *poolname; 4020 spa_t *spa; 4021 uint64_t state; 4022 int error; 4023 4024 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4025 return (NULL); 4026 4027 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4028 return (NULL); 4029 4030 /* 4031 * Create and initialize the spa structure. 4032 */ 4033 mutex_enter(&spa_namespace_lock); 4034 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 4035 spa_activate(spa, FREAD); 4036 4037 /* 4038 * Pass off the heavy lifting to spa_load(). 4039 * Pass TRUE for mosconfig because the user-supplied config 4040 * is actually the one to trust when doing an import. 4041 */ 4042 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4043 4044 /* 4045 * If 'tryconfig' was at least parsable, return the current config. 4046 */ 4047 if (spa->spa_root_vdev != NULL) { 4048 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4049 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4050 poolname) == 0); 4051 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4052 state) == 0); 4053 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 4054 spa->spa_uberblock.ub_timestamp) == 0); 4055 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4056 spa->spa_load_info) == 0); 4057 4058 /* 4059 * If the bootfs property exists on this pool then we 4060 * copy it out so that external consumers can tell which 4061 * pools are bootable. 4062 */ 4063 if ((!error || error == EEXIST) && spa->spa_bootfs) { 4064 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4065 4066 /* 4067 * We have to play games with the name since the 4068 * pool was opened as TRYIMPORT_NAME. 4069 */ 4070 if (dsl_dsobj_to_dsname(spa_name(spa), 4071 spa->spa_bootfs, tmpname) == 0) { 4072 char *cp; 4073 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4074 4075 cp = strchr(tmpname, '/'); 4076 if (cp == NULL) { 4077 (void) strlcpy(dsname, tmpname, 4078 MAXPATHLEN); 4079 } else { 4080 (void) snprintf(dsname, MAXPATHLEN, 4081 "%s/%s", poolname, ++cp); 4082 } 4083 VERIFY(nvlist_add_string(config, 4084 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4085 kmem_free(dsname, MAXPATHLEN); 4086 } 4087 kmem_free(tmpname, MAXPATHLEN); 4088 } 4089 4090 /* 4091 * Add the list of hot spares and level 2 cache devices. 4092 */ 4093 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4094 spa_add_spares(spa, config); 4095 spa_add_l2cache(spa, config); 4096 spa_config_exit(spa, SCL_CONFIG, FTAG); 4097 } 4098 4099 spa_unload(spa); 4100 spa_deactivate(spa); 4101 spa_remove(spa); 4102 mutex_exit(&spa_namespace_lock); 4103 4104 return (config); 4105 } 4106 4107 /* 4108 * Pool export/destroy 4109 * 4110 * The act of destroying or exporting a pool is very simple. We make sure there 4111 * is no more pending I/O and any references to the pool are gone. Then, we 4112 * update the pool state and sync all the labels to disk, removing the 4113 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4114 * we don't sync the labels or remove the configuration cache. 4115 */ 4116 static int 4117 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4118 boolean_t force, boolean_t hardforce) 4119 { 4120 spa_t *spa; 4121 4122 if (oldconfig) 4123 *oldconfig = NULL; 4124 4125 if (!(spa_mode_global & FWRITE)) 4126 return (SET_ERROR(EROFS)); 4127 4128 mutex_enter(&spa_namespace_lock); 4129 if ((spa = spa_lookup(pool)) == NULL) { 4130 mutex_exit(&spa_namespace_lock); 4131 return (SET_ERROR(ENOENT)); 4132 } 4133 4134 /* 4135 * Put a hold on the pool, drop the namespace lock, stop async tasks, 4136 * reacquire the namespace lock, and see if we can export. 4137 */ 4138 spa_open_ref(spa, FTAG); 4139 mutex_exit(&spa_namespace_lock); 4140 spa_async_suspend(spa); 4141 mutex_enter(&spa_namespace_lock); 4142 spa_close(spa, FTAG); 4143 4144 /* 4145 * The pool will be in core if it's openable, 4146 * in which case we can modify its state. 4147 */ 4148 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4149 /* 4150 * Objsets may be open only because they're dirty, so we 4151 * have to force it to sync before checking spa_refcnt. 4152 */ 4153 txg_wait_synced(spa->spa_dsl_pool, 0); 4154 4155 /* 4156 * A pool cannot be exported or destroyed if there are active 4157 * references. If we are resetting a pool, allow references by 4158 * fault injection handlers. 4159 */ 4160 if (!spa_refcount_zero(spa) || 4161 (spa->spa_inject_ref != 0 && 4162 new_state != POOL_STATE_UNINITIALIZED)) { 4163 spa_async_resume(spa); 4164 mutex_exit(&spa_namespace_lock); 4165 return (SET_ERROR(EBUSY)); 4166 } 4167 4168 /* 4169 * A pool cannot be exported if it has an active shared spare. 4170 * This is to prevent other pools stealing the active spare 4171 * from an exported pool. At user's own will, such pool can 4172 * be forcedly exported. 4173 */ 4174 if (!force && new_state == POOL_STATE_EXPORTED && 4175 spa_has_active_shared_spare(spa)) { 4176 spa_async_resume(spa); 4177 mutex_exit(&spa_namespace_lock); 4178 return (SET_ERROR(EXDEV)); 4179 } 4180 4181 /* 4182 * We want this to be reflected on every label, 4183 * so mark them all dirty. spa_unload() will do the 4184 * final sync that pushes these changes out. 4185 */ 4186 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4187 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4188 spa->spa_state = new_state; 4189 spa->spa_final_txg = spa_last_synced_txg(spa) + 4190 TXG_DEFER_SIZE + 1; 4191 vdev_config_dirty(spa->spa_root_vdev); 4192 spa_config_exit(spa, SCL_ALL, FTAG); 4193 } 4194 } 4195 4196 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 4197 4198 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4199 spa_unload(spa); 4200 spa_deactivate(spa); 4201 } 4202 4203 if (oldconfig && spa->spa_config) 4204 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 4205 4206 if (new_state != POOL_STATE_UNINITIALIZED) { 4207 if (!hardforce) 4208 spa_config_sync(spa, B_TRUE, B_TRUE); 4209 spa_remove(spa); 4210 } 4211 mutex_exit(&spa_namespace_lock); 4212 4213 return (0); 4214 } 4215 4216 /* 4217 * Destroy a storage pool. 4218 */ 4219 int 4220 spa_destroy(char *pool) 4221 { 4222 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4223 B_FALSE, B_FALSE)); 4224 } 4225 4226 /* 4227 * Export a storage pool. 4228 */ 4229 int 4230 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4231 boolean_t hardforce) 4232 { 4233 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4234 force, hardforce)); 4235 } 4236 4237 /* 4238 * Similar to spa_export(), this unloads the spa_t without actually removing it 4239 * from the namespace in any way. 4240 */ 4241 int 4242 spa_reset(char *pool) 4243 { 4244 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4245 B_FALSE, B_FALSE)); 4246 } 4247 4248 /* 4249 * ========================================================================== 4250 * Device manipulation 4251 * ========================================================================== 4252 */ 4253 4254 /* 4255 * Add a device to a storage pool. 4256 */ 4257 int 4258 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4259 { 4260 uint64_t txg, id; 4261 int error; 4262 vdev_t *rvd = spa->spa_root_vdev; 4263 vdev_t *vd, *tvd; 4264 nvlist_t **spares, **l2cache; 4265 uint_t nspares, nl2cache; 4266 4267 ASSERT(spa_writeable(spa)); 4268 4269 txg = spa_vdev_enter(spa); 4270 4271 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 4272 VDEV_ALLOC_ADD)) != 0) 4273 return (spa_vdev_exit(spa, NULL, txg, error)); 4274 4275 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 4276 4277 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4278 &nspares) != 0) 4279 nspares = 0; 4280 4281 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4282 &nl2cache) != 0) 4283 nl2cache = 0; 4284 4285 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4286 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4287 4288 if (vd->vdev_children != 0 && 4289 (error = vdev_create(vd, txg, B_FALSE)) != 0) 4290 return (spa_vdev_exit(spa, vd, txg, error)); 4291 4292 /* 4293 * We must validate the spares and l2cache devices after checking the 4294 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 4295 */ 4296 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 4297 return (spa_vdev_exit(spa, vd, txg, error)); 4298 4299 /* 4300 * Transfer each new top-level vdev from vd to rvd. 4301 */ 4302 for (int c = 0; c < vd->vdev_children; c++) { 4303 4304 /* 4305 * Set the vdev id to the first hole, if one exists. 4306 */ 4307 for (id = 0; id < rvd->vdev_children; id++) { 4308 if (rvd->vdev_child[id]->vdev_ishole) { 4309 vdev_free(rvd->vdev_child[id]); 4310 break; 4311 } 4312 } 4313 tvd = vd->vdev_child[c]; 4314 vdev_remove_child(vd, tvd); 4315 tvd->vdev_id = id; 4316 vdev_add_child(rvd, tvd); 4317 vdev_config_dirty(tvd); 4318 } 4319 4320 if (nspares != 0) { 4321 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4322 ZPOOL_CONFIG_SPARES); 4323 spa_load_spares(spa); 4324 spa->spa_spares.sav_sync = B_TRUE; 4325 } 4326 4327 if (nl2cache != 0) { 4328 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4329 ZPOOL_CONFIG_L2CACHE); 4330 spa_load_l2cache(spa); 4331 spa->spa_l2cache.sav_sync = B_TRUE; 4332 } 4333 4334 /* 4335 * We have to be careful when adding new vdevs to an existing pool. 4336 * If other threads start allocating from these vdevs before we 4337 * sync the config cache, and we lose power, then upon reboot we may 4338 * fail to open the pool because there are DVAs that the config cache 4339 * can't translate. Therefore, we first add the vdevs without 4340 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 4341 * and then let spa_config_update() initialize the new metaslabs. 4342 * 4343 * spa_load() checks for added-but-not-initialized vdevs, so that 4344 * if we lose power at any point in this sequence, the remaining 4345 * steps will be completed the next time we load the pool. 4346 */ 4347 (void) spa_vdev_exit(spa, vd, txg, 0); 4348 4349 mutex_enter(&spa_namespace_lock); 4350 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4351 mutex_exit(&spa_namespace_lock); 4352 4353 return (0); 4354 } 4355 4356 /* 4357 * Attach a device to a mirror. The arguments are the path to any device 4358 * in the mirror, and the nvroot for the new device. If the path specifies 4359 * a device that is not mirrored, we automatically insert the mirror vdev. 4360 * 4361 * If 'replacing' is specified, the new device is intended to replace the 4362 * existing device; in this case the two devices are made into their own 4363 * mirror using the 'replacing' vdev, which is functionally identical to 4364 * the mirror vdev (it actually reuses all the same ops) but has a few 4365 * extra rules: you can't attach to it after it's been created, and upon 4366 * completion of resilvering, the first disk (the one being replaced) 4367 * is automatically detached. 4368 */ 4369 int 4370 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4371 { 4372 uint64_t txg, dtl_max_txg; 4373 vdev_t *rvd = spa->spa_root_vdev; 4374 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 4375 vdev_ops_t *pvops; 4376 char *oldvdpath, *newvdpath; 4377 int newvd_isspare; 4378 int error; 4379 4380 ASSERT(spa_writeable(spa)); 4381 4382 txg = spa_vdev_enter(spa); 4383 4384 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4385 4386 if (oldvd == NULL) 4387 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4388 4389 if (!oldvd->vdev_ops->vdev_op_leaf) 4390 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4391 4392 pvd = oldvd->vdev_parent; 4393 4394 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4395 VDEV_ALLOC_ATTACH)) != 0) 4396 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4397 4398 if (newrootvd->vdev_children != 1) 4399 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4400 4401 newvd = newrootvd->vdev_child[0]; 4402 4403 if (!newvd->vdev_ops->vdev_op_leaf) 4404 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4405 4406 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4407 return (spa_vdev_exit(spa, newrootvd, txg, error)); 4408 4409 /* 4410 * Spares can't replace logs 4411 */ 4412 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 4413 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4414 4415 if (!replacing) { 4416 /* 4417 * For attach, the only allowable parent is a mirror or the root 4418 * vdev. 4419 */ 4420 if (pvd->vdev_ops != &vdev_mirror_ops && 4421 pvd->vdev_ops != &vdev_root_ops) 4422 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4423 4424 pvops = &vdev_mirror_ops; 4425 } else { 4426 /* 4427 * Active hot spares can only be replaced by inactive hot 4428 * spares. 4429 */ 4430 if (pvd->vdev_ops == &vdev_spare_ops && 4431 oldvd->vdev_isspare && 4432 !spa_has_spare(spa, newvd->vdev_guid)) 4433 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4434 4435 /* 4436 * If the source is a hot spare, and the parent isn't already a 4437 * spare, then we want to create a new hot spare. Otherwise, we 4438 * want to create a replacing vdev. The user is not allowed to 4439 * attach to a spared vdev child unless the 'isspare' state is 4440 * the same (spare replaces spare, non-spare replaces 4441 * non-spare). 4442 */ 4443 if (pvd->vdev_ops == &vdev_replacing_ops && 4444 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 4445 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4446 } else if (pvd->vdev_ops == &vdev_spare_ops && 4447 newvd->vdev_isspare != oldvd->vdev_isspare) { 4448 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4449 } 4450 4451 if (newvd->vdev_isspare) 4452 pvops = &vdev_spare_ops; 4453 else 4454 pvops = &vdev_replacing_ops; 4455 } 4456 4457 /* 4458 * Make sure the new device is big enough. 4459 */ 4460 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4461 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4462 4463 /* 4464 * The new device cannot have a higher alignment requirement 4465 * than the top-level vdev. 4466 */ 4467 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4468 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4469 4470 /* 4471 * If this is an in-place replacement, update oldvd's path and devid 4472 * to make it distinguishable from newvd, and unopenable from now on. 4473 */ 4474 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4475 spa_strfree(oldvd->vdev_path); 4476 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4477 KM_SLEEP); 4478 (void) sprintf(oldvd->vdev_path, "%s/%s", 4479 newvd->vdev_path, "old"); 4480 if (oldvd->vdev_devid != NULL) { 4481 spa_strfree(oldvd->vdev_devid); 4482 oldvd->vdev_devid = NULL; 4483 } 4484 } 4485 4486 /* mark the device being resilvered */ 4487 newvd->vdev_resilver_txg = txg; 4488 4489 /* 4490 * If the parent is not a mirror, or if we're replacing, insert the new 4491 * mirror/replacing/spare vdev above oldvd. 4492 */ 4493 if (pvd->vdev_ops != pvops) 4494 pvd = vdev_add_parent(oldvd, pvops); 4495 4496 ASSERT(pvd->vdev_top->vdev_parent == rvd); 4497 ASSERT(pvd->vdev_ops == pvops); 4498 ASSERT(oldvd->vdev_parent == pvd); 4499 4500 /* 4501 * Extract the new device from its root and add it to pvd. 4502 */ 4503 vdev_remove_child(newrootvd, newvd); 4504 newvd->vdev_id = pvd->vdev_children; 4505 newvd->vdev_crtxg = oldvd->vdev_crtxg; 4506 vdev_add_child(pvd, newvd); 4507 4508 tvd = newvd->vdev_top; 4509 ASSERT(pvd->vdev_top == tvd); 4510 ASSERT(tvd->vdev_parent == rvd); 4511 4512 vdev_config_dirty(tvd); 4513 4514 /* 4515 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 4516 * for any dmu_sync-ed blocks. It will propagate upward when 4517 * spa_vdev_exit() calls vdev_dtl_reassess(). 4518 */ 4519 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 4520 4521 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 4522 dtl_max_txg - TXG_INITIAL); 4523 4524 if (newvd->vdev_isspare) { 4525 spa_spare_activate(newvd); 4526 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 4527 } 4528 4529 oldvdpath = spa_strdup(oldvd->vdev_path); 4530 newvdpath = spa_strdup(newvd->vdev_path); 4531 newvd_isspare = newvd->vdev_isspare; 4532 4533 /* 4534 * Mark newvd's DTL dirty in this txg. 4535 */ 4536 vdev_dirty(tvd, VDD_DTL, newvd, txg); 4537 4538 /* 4539 * Schedule the resilver to restart in the future. We do this to 4540 * ensure that dmu_sync-ed blocks have been stitched into the 4541 * respective datasets. 4542 */ 4543 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 4544 4545 /* 4546 * Commit the config 4547 */ 4548 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 4549 4550 spa_history_log_internal(spa, "vdev attach", NULL, 4551 "%s vdev=%s %s vdev=%s", 4552 replacing && newvd_isspare ? "spare in" : 4553 replacing ? "replace" : "attach", newvdpath, 4554 replacing ? "for" : "to", oldvdpath); 4555 4556 spa_strfree(oldvdpath); 4557 spa_strfree(newvdpath); 4558 4559 if (spa->spa_bootfs) 4560 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 4561 4562 return (0); 4563 } 4564 4565 /* 4566 * Detach a device from a mirror or replacing vdev. 4567 * 4568 * If 'replace_done' is specified, only detach if the parent 4569 * is a replacing vdev. 4570 */ 4571 int 4572 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 4573 { 4574 uint64_t txg; 4575 int error; 4576 vdev_t *rvd = spa->spa_root_vdev; 4577 vdev_t *vd, *pvd, *cvd, *tvd; 4578 boolean_t unspare = B_FALSE; 4579 uint64_t unspare_guid = 0; 4580 char *vdpath; 4581 4582 ASSERT(spa_writeable(spa)); 4583 4584 txg = spa_vdev_enter(spa); 4585 4586 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 4587 4588 if (vd == NULL) 4589 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4590 4591 if (!vd->vdev_ops->vdev_op_leaf) 4592 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4593 4594 pvd = vd->vdev_parent; 4595 4596 /* 4597 * If the parent/child relationship is not as expected, don't do it. 4598 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 4599 * vdev that's replacing B with C. The user's intent in replacing 4600 * is to go from M(A,B) to M(A,C). If the user decides to cancel 4601 * the replace by detaching C, the expected behavior is to end up 4602 * M(A,B). But suppose that right after deciding to detach C, 4603 * the replacement of B completes. We would have M(A,C), and then 4604 * ask to detach C, which would leave us with just A -- not what 4605 * the user wanted. To prevent this, we make sure that the 4606 * parent/child relationship hasn't changed -- in this example, 4607 * that C's parent is still the replacing vdev R. 4608 */ 4609 if (pvd->vdev_guid != pguid && pguid != 0) 4610 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4611 4612 /* 4613 * Only 'replacing' or 'spare' vdevs can be replaced. 4614 */ 4615 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 4616 pvd->vdev_ops != &vdev_spare_ops) 4617 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4618 4619 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 4620 spa_version(spa) >= SPA_VERSION_SPARES); 4621 4622 /* 4623 * Only mirror, replacing, and spare vdevs support detach. 4624 */ 4625 if (pvd->vdev_ops != &vdev_replacing_ops && 4626 pvd->vdev_ops != &vdev_mirror_ops && 4627 pvd->vdev_ops != &vdev_spare_ops) 4628 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4629 4630 /* 4631 * If this device has the only valid copy of some data, 4632 * we cannot safely detach it. 4633 */ 4634 if (vdev_dtl_required(vd)) 4635 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4636 4637 ASSERT(pvd->vdev_children >= 2); 4638 4639 /* 4640 * If we are detaching the second disk from a replacing vdev, then 4641 * check to see if we changed the original vdev's path to have "/old" 4642 * at the end in spa_vdev_attach(). If so, undo that change now. 4643 */ 4644 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 4645 vd->vdev_path != NULL) { 4646 size_t len = strlen(vd->vdev_path); 4647 4648 for (int c = 0; c < pvd->vdev_children; c++) { 4649 cvd = pvd->vdev_child[c]; 4650 4651 if (cvd == vd || cvd->vdev_path == NULL) 4652 continue; 4653 4654 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 4655 strcmp(cvd->vdev_path + len, "/old") == 0) { 4656 spa_strfree(cvd->vdev_path); 4657 cvd->vdev_path = spa_strdup(vd->vdev_path); 4658 break; 4659 } 4660 } 4661 } 4662 4663 /* 4664 * If we are detaching the original disk from a spare, then it implies 4665 * that the spare should become a real disk, and be removed from the 4666 * active spare list for the pool. 4667 */ 4668 if (pvd->vdev_ops == &vdev_spare_ops && 4669 vd->vdev_id == 0 && 4670 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 4671 unspare = B_TRUE; 4672 4673 /* 4674 * Erase the disk labels so the disk can be used for other things. 4675 * This must be done after all other error cases are handled, 4676 * but before we disembowel vd (so we can still do I/O to it). 4677 * But if we can't do it, don't treat the error as fatal -- 4678 * it may be that the unwritability of the disk is the reason 4679 * it's being detached! 4680 */ 4681 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 4682 4683 /* 4684 * Remove vd from its parent and compact the parent's children. 4685 */ 4686 vdev_remove_child(pvd, vd); 4687 vdev_compact_children(pvd); 4688 4689 /* 4690 * Remember one of the remaining children so we can get tvd below. 4691 */ 4692 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 4693 4694 /* 4695 * If we need to remove the remaining child from the list of hot spares, 4696 * do it now, marking the vdev as no longer a spare in the process. 4697 * We must do this before vdev_remove_parent(), because that can 4698 * change the GUID if it creates a new toplevel GUID. For a similar 4699 * reason, we must remove the spare now, in the same txg as the detach; 4700 * otherwise someone could attach a new sibling, change the GUID, and 4701 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 4702 */ 4703 if (unspare) { 4704 ASSERT(cvd->vdev_isspare); 4705 spa_spare_remove(cvd); 4706 unspare_guid = cvd->vdev_guid; 4707 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 4708 cvd->vdev_unspare = B_TRUE; 4709 } 4710 4711 /* 4712 * If the parent mirror/replacing vdev only has one child, 4713 * the parent is no longer needed. Remove it from the tree. 4714 */ 4715 if (pvd->vdev_children == 1) { 4716 if (pvd->vdev_ops == &vdev_spare_ops) 4717 cvd->vdev_unspare = B_FALSE; 4718 vdev_remove_parent(cvd); 4719 } 4720 4721 4722 /* 4723 * We don't set tvd until now because the parent we just removed 4724 * may have been the previous top-level vdev. 4725 */ 4726 tvd = cvd->vdev_top; 4727 ASSERT(tvd->vdev_parent == rvd); 4728 4729 /* 4730 * Reevaluate the parent vdev state. 4731 */ 4732 vdev_propagate_state(cvd); 4733 4734 /* 4735 * If the 'autoexpand' property is set on the pool then automatically 4736 * try to expand the size of the pool. For example if the device we 4737 * just detached was smaller than the others, it may be possible to 4738 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 4739 * first so that we can obtain the updated sizes of the leaf vdevs. 4740 */ 4741 if (spa->spa_autoexpand) { 4742 vdev_reopen(tvd); 4743 vdev_expand(tvd, txg); 4744 } 4745 4746 vdev_config_dirty(tvd); 4747 4748 /* 4749 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 4750 * vd->vdev_detached is set and free vd's DTL object in syncing context. 4751 * But first make sure we're not on any *other* txg's DTL list, to 4752 * prevent vd from being accessed after it's freed. 4753 */ 4754 vdpath = spa_strdup(vd->vdev_path); 4755 for (int t = 0; t < TXG_SIZE; t++) 4756 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 4757 vd->vdev_detached = B_TRUE; 4758 vdev_dirty(tvd, VDD_DTL, vd, txg); 4759 4760 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 4761 4762 /* hang on to the spa before we release the lock */ 4763 spa_open_ref(spa, FTAG); 4764 4765 error = spa_vdev_exit(spa, vd, txg, 0); 4766 4767 spa_history_log_internal(spa, "detach", NULL, 4768 "vdev=%s", vdpath); 4769 spa_strfree(vdpath); 4770 4771 /* 4772 * If this was the removal of the original device in a hot spare vdev, 4773 * then we want to go through and remove the device from the hot spare 4774 * list of every other pool. 4775 */ 4776 if (unspare) { 4777 spa_t *altspa = NULL; 4778 4779 mutex_enter(&spa_namespace_lock); 4780 while ((altspa = spa_next(altspa)) != NULL) { 4781 if (altspa->spa_state != POOL_STATE_ACTIVE || 4782 altspa == spa) 4783 continue; 4784 4785 spa_open_ref(altspa, FTAG); 4786 mutex_exit(&spa_namespace_lock); 4787 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 4788 mutex_enter(&spa_namespace_lock); 4789 spa_close(altspa, FTAG); 4790 } 4791 mutex_exit(&spa_namespace_lock); 4792 4793 /* search the rest of the vdevs for spares to remove */ 4794 spa_vdev_resilver_done(spa); 4795 } 4796 4797 /* all done with the spa; OK to release */ 4798 mutex_enter(&spa_namespace_lock); 4799 spa_close(spa, FTAG); 4800 mutex_exit(&spa_namespace_lock); 4801 4802 return (error); 4803 } 4804 4805 /* 4806 * Split a set of devices from their mirrors, and create a new pool from them. 4807 */ 4808 int 4809 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 4810 nvlist_t *props, boolean_t exp) 4811 { 4812 int error = 0; 4813 uint64_t txg, *glist; 4814 spa_t *newspa; 4815 uint_t c, children, lastlog; 4816 nvlist_t **child, *nvl, *tmp; 4817 dmu_tx_t *tx; 4818 char *altroot = NULL; 4819 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 4820 boolean_t activate_slog; 4821 4822 ASSERT(spa_writeable(spa)); 4823 4824 txg = spa_vdev_enter(spa); 4825 4826 /* clear the log and flush everything up to now */ 4827 activate_slog = spa_passivate_log(spa); 4828 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 4829 error = spa_offline_log(spa); 4830 txg = spa_vdev_config_enter(spa); 4831 4832 if (activate_slog) 4833 spa_activate_log(spa); 4834 4835 if (error != 0) 4836 return (spa_vdev_exit(spa, NULL, txg, error)); 4837 4838 /* check new spa name before going any further */ 4839 if (spa_lookup(newname) != NULL) 4840 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 4841 4842 /* 4843 * scan through all the children to ensure they're all mirrors 4844 */ 4845 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 4846 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 4847 &children) != 0) 4848 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4849 4850 /* first, check to ensure we've got the right child count */ 4851 rvd = spa->spa_root_vdev; 4852 lastlog = 0; 4853 for (c = 0; c < rvd->vdev_children; c++) { 4854 vdev_t *vd = rvd->vdev_child[c]; 4855 4856 /* don't count the holes & logs as children */ 4857 if (vd->vdev_islog || vd->vdev_ishole) { 4858 if (lastlog == 0) 4859 lastlog = c; 4860 continue; 4861 } 4862 4863 lastlog = 0; 4864 } 4865 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 4866 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4867 4868 /* next, ensure no spare or cache devices are part of the split */ 4869 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 4870 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 4871 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4872 4873 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 4874 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 4875 4876 /* then, loop over each vdev and validate it */ 4877 for (c = 0; c < children; c++) { 4878 uint64_t is_hole = 0; 4879 4880 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 4881 &is_hole); 4882 4883 if (is_hole != 0) { 4884 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 4885 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 4886 continue; 4887 } else { 4888 error = SET_ERROR(EINVAL); 4889 break; 4890 } 4891 } 4892 4893 /* which disk is going to be split? */ 4894 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 4895 &glist[c]) != 0) { 4896 error = SET_ERROR(EINVAL); 4897 break; 4898 } 4899 4900 /* look it up in the spa */ 4901 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 4902 if (vml[c] == NULL) { 4903 error = SET_ERROR(ENODEV); 4904 break; 4905 } 4906 4907 /* make sure there's nothing stopping the split */ 4908 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 4909 vml[c]->vdev_islog || 4910 vml[c]->vdev_ishole || 4911 vml[c]->vdev_isspare || 4912 vml[c]->vdev_isl2cache || 4913 !vdev_writeable(vml[c]) || 4914 vml[c]->vdev_children != 0 || 4915 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 4916 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 4917 error = SET_ERROR(EINVAL); 4918 break; 4919 } 4920 4921 if (vdev_dtl_required(vml[c])) { 4922 error = SET_ERROR(EBUSY); 4923 break; 4924 } 4925 4926 /* we need certain info from the top level */ 4927 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 4928 vml[c]->vdev_top->vdev_ms_array) == 0); 4929 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 4930 vml[c]->vdev_top->vdev_ms_shift) == 0); 4931 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 4932 vml[c]->vdev_top->vdev_asize) == 0); 4933 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 4934 vml[c]->vdev_top->vdev_ashift) == 0); 4935 } 4936 4937 if (error != 0) { 4938 kmem_free(vml, children * sizeof (vdev_t *)); 4939 kmem_free(glist, children * sizeof (uint64_t)); 4940 return (spa_vdev_exit(spa, NULL, txg, error)); 4941 } 4942 4943 /* stop writers from using the disks */ 4944 for (c = 0; c < children; c++) { 4945 if (vml[c] != NULL) 4946 vml[c]->vdev_offline = B_TRUE; 4947 } 4948 vdev_reopen(spa->spa_root_vdev); 4949 4950 /* 4951 * Temporarily record the splitting vdevs in the spa config. This 4952 * will disappear once the config is regenerated. 4953 */ 4954 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4955 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 4956 glist, children) == 0); 4957 kmem_free(glist, children * sizeof (uint64_t)); 4958 4959 mutex_enter(&spa->spa_props_lock); 4960 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 4961 nvl) == 0); 4962 mutex_exit(&spa->spa_props_lock); 4963 spa->spa_config_splitting = nvl; 4964 vdev_config_dirty(spa->spa_root_vdev); 4965 4966 /* configure and create the new pool */ 4967 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 4968 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4969 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 4970 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 4971 spa_version(spa)) == 0); 4972 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 4973 spa->spa_config_txg) == 0); 4974 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 4975 spa_generate_guid(NULL)) == 0); 4976 (void) nvlist_lookup_string(props, 4977 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4978 4979 /* add the new pool to the namespace */ 4980 newspa = spa_add(newname, config, altroot); 4981 newspa->spa_config_txg = spa->spa_config_txg; 4982 spa_set_log_state(newspa, SPA_LOG_CLEAR); 4983 4984 /* release the spa config lock, retaining the namespace lock */ 4985 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 4986 4987 if (zio_injection_enabled) 4988 zio_handle_panic_injection(spa, FTAG, 1); 4989 4990 spa_activate(newspa, spa_mode_global); 4991 spa_async_suspend(newspa); 4992 4993 /* create the new pool from the disks of the original pool */ 4994 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 4995 if (error) 4996 goto out; 4997 4998 /* if that worked, generate a real config for the new pool */ 4999 if (newspa->spa_root_vdev != NULL) { 5000 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 5001 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5002 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 5003 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 5004 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 5005 B_TRUE)); 5006 } 5007 5008 /* set the props */ 5009 if (props != NULL) { 5010 spa_configfile_set(newspa, props, B_FALSE); 5011 error = spa_prop_set(newspa, props); 5012 if (error) 5013 goto out; 5014 } 5015 5016 /* flush everything */ 5017 txg = spa_vdev_config_enter(newspa); 5018 vdev_config_dirty(newspa->spa_root_vdev); 5019 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 5020 5021 if (zio_injection_enabled) 5022 zio_handle_panic_injection(spa, FTAG, 2); 5023 5024 spa_async_resume(newspa); 5025 5026 /* finally, update the original pool's config */ 5027 txg = spa_vdev_config_enter(spa); 5028 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 5029 error = dmu_tx_assign(tx, TXG_WAIT); 5030 if (error != 0) 5031 dmu_tx_abort(tx); 5032 for (c = 0; c < children; c++) { 5033 if (vml[c] != NULL) { 5034 vdev_split(vml[c]); 5035 if (error == 0) 5036 spa_history_log_internal(spa, "detach", tx, 5037 "vdev=%s", vml[c]->vdev_path); 5038 vdev_free(vml[c]); 5039 } 5040 } 5041 vdev_config_dirty(spa->spa_root_vdev); 5042 spa->spa_config_splitting = NULL; 5043 nvlist_free(nvl); 5044 if (error == 0) 5045 dmu_tx_commit(tx); 5046 (void) spa_vdev_exit(spa, NULL, txg, 0); 5047 5048 if (zio_injection_enabled) 5049 zio_handle_panic_injection(spa, FTAG, 3); 5050 5051 /* split is complete; log a history record */ 5052 spa_history_log_internal(newspa, "split", NULL, 5053 "from pool %s", spa_name(spa)); 5054 5055 kmem_free(vml, children * sizeof (vdev_t *)); 5056 5057 /* if we're not going to mount the filesystems in userland, export */ 5058 if (exp) 5059 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 5060 B_FALSE, B_FALSE); 5061 5062 return (error); 5063 5064 out: 5065 spa_unload(newspa); 5066 spa_deactivate(newspa); 5067 spa_remove(newspa); 5068 5069 txg = spa_vdev_config_enter(spa); 5070 5071 /* re-online all offlined disks */ 5072 for (c = 0; c < children; c++) { 5073 if (vml[c] != NULL) 5074 vml[c]->vdev_offline = B_FALSE; 5075 } 5076 vdev_reopen(spa->spa_root_vdev); 5077 5078 nvlist_free(spa->spa_config_splitting); 5079 spa->spa_config_splitting = NULL; 5080 (void) spa_vdev_exit(spa, NULL, txg, error); 5081 5082 kmem_free(vml, children * sizeof (vdev_t *)); 5083 return (error); 5084 } 5085 5086 static nvlist_t * 5087 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 5088 { 5089 for (int i = 0; i < count; i++) { 5090 uint64_t guid; 5091 5092 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5093 &guid) == 0); 5094 5095 if (guid == target_guid) 5096 return (nvpp[i]); 5097 } 5098 5099 return (NULL); 5100 } 5101 5102 static void 5103 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5104 nvlist_t *dev_to_remove) 5105 { 5106 nvlist_t **newdev = NULL; 5107 5108 if (count > 1) 5109 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5110 5111 for (int i = 0, j = 0; i < count; i++) { 5112 if (dev[i] == dev_to_remove) 5113 continue; 5114 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5115 } 5116 5117 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5118 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5119 5120 for (int i = 0; i < count - 1; i++) 5121 nvlist_free(newdev[i]); 5122 5123 if (count > 1) 5124 kmem_free(newdev, (count - 1) * sizeof (void *)); 5125 } 5126 5127 /* 5128 * Evacuate the device. 5129 */ 5130 static int 5131 spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 5132 { 5133 uint64_t txg; 5134 int error = 0; 5135 5136 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5137 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5138 ASSERT(vd == vd->vdev_top); 5139 5140 /* 5141 * Evacuate the device. We don't hold the config lock as writer 5142 * since we need to do I/O but we do keep the 5143 * spa_namespace_lock held. Once this completes the device 5144 * should no longer have any blocks allocated on it. 5145 */ 5146 if (vd->vdev_islog) { 5147 if (vd->vdev_stat.vs_alloc != 0) 5148 error = spa_offline_log(spa); 5149 } else { 5150 error = SET_ERROR(ENOTSUP); 5151 } 5152 5153 if (error) 5154 return (error); 5155 5156 /* 5157 * The evacuation succeeded. Remove any remaining MOS metadata 5158 * associated with this vdev, and wait for these changes to sync. 5159 */ 5160 ASSERT0(vd->vdev_stat.vs_alloc); 5161 txg = spa_vdev_config_enter(spa); 5162 vd->vdev_removing = B_TRUE; 5163 vdev_dirty_leaves(vd, VDD_DTL, txg); 5164 vdev_config_dirty(vd); 5165 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5166 5167 return (0); 5168 } 5169 5170 /* 5171 * Complete the removal by cleaning up the namespace. 5172 */ 5173 static void 5174 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 5175 { 5176 vdev_t *rvd = spa->spa_root_vdev; 5177 uint64_t id = vd->vdev_id; 5178 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 5179 5180 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5181 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5182 ASSERT(vd == vd->vdev_top); 5183 5184 /* 5185 * Only remove any devices which are empty. 5186 */ 5187 if (vd->vdev_stat.vs_alloc != 0) 5188 return; 5189 5190 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5191 5192 if (list_link_active(&vd->vdev_state_dirty_node)) 5193 vdev_state_clean(vd); 5194 if (list_link_active(&vd->vdev_config_dirty_node)) 5195 vdev_config_clean(vd); 5196 5197 vdev_free(vd); 5198 5199 if (last_vdev) { 5200 vdev_compact_children(rvd); 5201 } else { 5202 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 5203 vdev_add_child(rvd, vd); 5204 } 5205 vdev_config_dirty(rvd); 5206 5207 /* 5208 * Reassess the health of our root vdev. 5209 */ 5210 vdev_reopen(rvd); 5211 } 5212 5213 /* 5214 * Remove a device from the pool - 5215 * 5216 * Removing a device from the vdev namespace requires several steps 5217 * and can take a significant amount of time. As a result we use 5218 * the spa_vdev_config_[enter/exit] functions which allow us to 5219 * grab and release the spa_config_lock while still holding the namespace 5220 * lock. During each step the configuration is synced out. 5221 * 5222 * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5223 * devices. 5224 */ 5225 int 5226 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5227 { 5228 vdev_t *vd; 5229 metaslab_group_t *mg; 5230 nvlist_t **spares, **l2cache, *nv; 5231 uint64_t txg = 0; 5232 uint_t nspares, nl2cache; 5233 int error = 0; 5234 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5235 5236 ASSERT(spa_writeable(spa)); 5237 5238 if (!locked) 5239 txg = spa_vdev_enter(spa); 5240 5241 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5242 5243 if (spa->spa_spares.sav_vdevs != NULL && 5244 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5245 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5246 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5247 /* 5248 * Only remove the hot spare if it's not currently in use 5249 * in this pool. 5250 */ 5251 if (vd == NULL || unspare) { 5252 spa_vdev_remove_aux(spa->spa_spares.sav_config, 5253 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 5254 spa_load_spares(spa); 5255 spa->spa_spares.sav_sync = B_TRUE; 5256 } else { 5257 error = SET_ERROR(EBUSY); 5258 } 5259 } else if (spa->spa_l2cache.sav_vdevs != NULL && 5260 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5261 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5262 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5263 /* 5264 * Cache devices can always be removed. 5265 */ 5266 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5267 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5268 spa_load_l2cache(spa); 5269 spa->spa_l2cache.sav_sync = B_TRUE; 5270 } else if (vd != NULL && vd->vdev_islog) { 5271 ASSERT(!locked); 5272 ASSERT(vd == vd->vdev_top); 5273 5274 mg = vd->vdev_mg; 5275 5276 /* 5277 * Stop allocating from this vdev. 5278 */ 5279 metaslab_group_passivate(mg); 5280 5281 /* 5282 * Wait for the youngest allocations and frees to sync, 5283 * and then wait for the deferral of those frees to finish. 5284 */ 5285 spa_vdev_config_exit(spa, NULL, 5286 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5287 5288 /* 5289 * Attempt to evacuate the vdev. 5290 */ 5291 error = spa_vdev_remove_evacuate(spa, vd); 5292 5293 txg = spa_vdev_config_enter(spa); 5294 5295 /* 5296 * If we couldn't evacuate the vdev, unwind. 5297 */ 5298 if (error) { 5299 metaslab_group_activate(mg); 5300 return (spa_vdev_exit(spa, NULL, txg, error)); 5301 } 5302 5303 /* 5304 * Clean up the vdev namespace. 5305 */ 5306 spa_vdev_remove_from_namespace(spa, vd); 5307 5308 } else if (vd != NULL) { 5309 /* 5310 * Normal vdevs cannot be removed (yet). 5311 */ 5312 error = SET_ERROR(ENOTSUP); 5313 } else { 5314 /* 5315 * There is no vdev of any kind with the specified guid. 5316 */ 5317 error = SET_ERROR(ENOENT); 5318 } 5319 5320 if (!locked) 5321 return (spa_vdev_exit(spa, NULL, txg, error)); 5322 5323 return (error); 5324 } 5325 5326 /* 5327 * Find any device that's done replacing, or a vdev marked 'unspare' that's 5328 * currently spared, so we can detach it. 5329 */ 5330 static vdev_t * 5331 spa_vdev_resilver_done_hunt(vdev_t *vd) 5332 { 5333 vdev_t *newvd, *oldvd; 5334 5335 for (int c = 0; c < vd->vdev_children; c++) { 5336 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5337 if (oldvd != NULL) 5338 return (oldvd); 5339 } 5340 5341 /* 5342 * Check for a completed replacement. We always consider the first 5343 * vdev in the list to be the oldest vdev, and the last one to be 5344 * the newest (see spa_vdev_attach() for how that works). In 5345 * the case where the newest vdev is faulted, we will not automatically 5346 * remove it after a resilver completes. This is OK as it will require 5347 * user intervention to determine which disk the admin wishes to keep. 5348 */ 5349 if (vd->vdev_ops == &vdev_replacing_ops) { 5350 ASSERT(vd->vdev_children > 1); 5351 5352 newvd = vd->vdev_child[vd->vdev_children - 1]; 5353 oldvd = vd->vdev_child[0]; 5354 5355 if (vdev_dtl_empty(newvd, DTL_MISSING) && 5356 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5357 !vdev_dtl_required(oldvd)) 5358 return (oldvd); 5359 } 5360 5361 /* 5362 * Check for a completed resilver with the 'unspare' flag set. 5363 */ 5364 if (vd->vdev_ops == &vdev_spare_ops) { 5365 vdev_t *first = vd->vdev_child[0]; 5366 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 5367 5368 if (last->vdev_unspare) { 5369 oldvd = first; 5370 newvd = last; 5371 } else if (first->vdev_unspare) { 5372 oldvd = last; 5373 newvd = first; 5374 } else { 5375 oldvd = NULL; 5376 } 5377 5378 if (oldvd != NULL && 5379 vdev_dtl_empty(newvd, DTL_MISSING) && 5380 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5381 !vdev_dtl_required(oldvd)) 5382 return (oldvd); 5383 5384 /* 5385 * If there are more than two spares attached to a disk, 5386 * and those spares are not required, then we want to 5387 * attempt to free them up now so that they can be used 5388 * by other pools. Once we're back down to a single 5389 * disk+spare, we stop removing them. 5390 */ 5391 if (vd->vdev_children > 2) { 5392 newvd = vd->vdev_child[1]; 5393 5394 if (newvd->vdev_isspare && last->vdev_isspare && 5395 vdev_dtl_empty(last, DTL_MISSING) && 5396 vdev_dtl_empty(last, DTL_OUTAGE) && 5397 !vdev_dtl_required(newvd)) 5398 return (newvd); 5399 } 5400 } 5401 5402 return (NULL); 5403 } 5404 5405 static void 5406 spa_vdev_resilver_done(spa_t *spa) 5407 { 5408 vdev_t *vd, *pvd, *ppvd; 5409 uint64_t guid, sguid, pguid, ppguid; 5410 5411 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5412 5413 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 5414 pvd = vd->vdev_parent; 5415 ppvd = pvd->vdev_parent; 5416 guid = vd->vdev_guid; 5417 pguid = pvd->vdev_guid; 5418 ppguid = ppvd->vdev_guid; 5419 sguid = 0; 5420 /* 5421 * If we have just finished replacing a hot spared device, then 5422 * we need to detach the parent's first child (the original hot 5423 * spare) as well. 5424 */ 5425 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5426 ppvd->vdev_children == 2) { 5427 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 5428 sguid = ppvd->vdev_child[1]->vdev_guid; 5429 } 5430 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5431 5432 spa_config_exit(spa, SCL_ALL, FTAG); 5433 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5434 return; 5435 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 5436 return; 5437 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5438 } 5439 5440 spa_config_exit(spa, SCL_ALL, FTAG); 5441 } 5442 5443 /* 5444 * Update the stored path or FRU for this vdev. 5445 */ 5446 int 5447 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 5448 boolean_t ispath) 5449 { 5450 vdev_t *vd; 5451 boolean_t sync = B_FALSE; 5452 5453 ASSERT(spa_writeable(spa)); 5454 5455 spa_vdev_state_enter(spa, SCL_ALL); 5456 5457 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 5458 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 5459 5460 if (!vd->vdev_ops->vdev_op_leaf) 5461 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 5462 5463 if (ispath) { 5464 if (strcmp(value, vd->vdev_path) != 0) { 5465 spa_strfree(vd->vdev_path); 5466 vd->vdev_path = spa_strdup(value); 5467 sync = B_TRUE; 5468 } 5469 } else { 5470 if (vd->vdev_fru == NULL) { 5471 vd->vdev_fru = spa_strdup(value); 5472 sync = B_TRUE; 5473 } else if (strcmp(value, vd->vdev_fru) != 0) { 5474 spa_strfree(vd->vdev_fru); 5475 vd->vdev_fru = spa_strdup(value); 5476 sync = B_TRUE; 5477 } 5478 } 5479 5480 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 5481 } 5482 5483 int 5484 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 5485 { 5486 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 5487 } 5488 5489 int 5490 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 5491 { 5492 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 5493 } 5494 5495 /* 5496 * ========================================================================== 5497 * SPA Scanning 5498 * ========================================================================== 5499 */ 5500 5501 int 5502 spa_scan_stop(spa_t *spa) 5503 { 5504 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5505 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 5506 return (SET_ERROR(EBUSY)); 5507 return (dsl_scan_cancel(spa->spa_dsl_pool)); 5508 } 5509 5510 int 5511 spa_scan(spa_t *spa, pool_scan_func_t func) 5512 { 5513 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5514 5515 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 5516 return (SET_ERROR(ENOTSUP)); 5517 5518 /* 5519 * If a resilver was requested, but there is no DTL on a 5520 * writeable leaf device, we have nothing to do. 5521 */ 5522 if (func == POOL_SCAN_RESILVER && 5523 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5524 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 5525 return (0); 5526 } 5527 5528 return (dsl_scan(spa->spa_dsl_pool, func)); 5529 } 5530 5531 /* 5532 * ========================================================================== 5533 * SPA async task processing 5534 * ========================================================================== 5535 */ 5536 5537 static void 5538 spa_async_remove(spa_t *spa, vdev_t *vd) 5539 { 5540 if (vd->vdev_remove_wanted) { 5541 vd->vdev_remove_wanted = B_FALSE; 5542 vd->vdev_delayed_close = B_FALSE; 5543 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 5544 5545 /* 5546 * We want to clear the stats, but we don't want to do a full 5547 * vdev_clear() as that will cause us to throw away 5548 * degraded/faulted state as well as attempt to reopen the 5549 * device, all of which is a waste. 5550 */ 5551 vd->vdev_stat.vs_read_errors = 0; 5552 vd->vdev_stat.vs_write_errors = 0; 5553 vd->vdev_stat.vs_checksum_errors = 0; 5554 5555 vdev_state_dirty(vd->vdev_top); 5556 } 5557 5558 for (int c = 0; c < vd->vdev_children; c++) 5559 spa_async_remove(spa, vd->vdev_child[c]); 5560 } 5561 5562 static void 5563 spa_async_probe(spa_t *spa, vdev_t *vd) 5564 { 5565 if (vd->vdev_probe_wanted) { 5566 vd->vdev_probe_wanted = B_FALSE; 5567 vdev_reopen(vd); /* vdev_open() does the actual probe */ 5568 } 5569 5570 for (int c = 0; c < vd->vdev_children; c++) 5571 spa_async_probe(spa, vd->vdev_child[c]); 5572 } 5573 5574 static void 5575 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 5576 { 5577 sysevent_id_t eid; 5578 nvlist_t *attr; 5579 char *physpath; 5580 5581 if (!spa->spa_autoexpand) 5582 return; 5583 5584 for (int c = 0; c < vd->vdev_children; c++) { 5585 vdev_t *cvd = vd->vdev_child[c]; 5586 spa_async_autoexpand(spa, cvd); 5587 } 5588 5589 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 5590 return; 5591 5592 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5593 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 5594 5595 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5596 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 5597 5598 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 5599 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 5600 5601 nvlist_free(attr); 5602 kmem_free(physpath, MAXPATHLEN); 5603 } 5604 5605 static void 5606 spa_async_thread(spa_t *spa) 5607 { 5608 int tasks; 5609 5610 ASSERT(spa->spa_sync_on); 5611 5612 mutex_enter(&spa->spa_async_lock); 5613 tasks = spa->spa_async_tasks; 5614 spa->spa_async_tasks = 0; 5615 mutex_exit(&spa->spa_async_lock); 5616 5617 /* 5618 * See if the config needs to be updated. 5619 */ 5620 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 5621 uint64_t old_space, new_space; 5622 5623 mutex_enter(&spa_namespace_lock); 5624 old_space = metaslab_class_get_space(spa_normal_class(spa)); 5625 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5626 new_space = metaslab_class_get_space(spa_normal_class(spa)); 5627 mutex_exit(&spa_namespace_lock); 5628 5629 /* 5630 * If the pool grew as a result of the config update, 5631 * then log an internal history event. 5632 */ 5633 if (new_space != old_space) { 5634 spa_history_log_internal(spa, "vdev online", NULL, 5635 "pool '%s' size: %llu(+%llu)", 5636 spa_name(spa), new_space, new_space - old_space); 5637 } 5638 } 5639 5640 /* 5641 * See if any devices need to be marked REMOVED. 5642 */ 5643 if (tasks & SPA_ASYNC_REMOVE) { 5644 spa_vdev_state_enter(spa, SCL_NONE); 5645 spa_async_remove(spa, spa->spa_root_vdev); 5646 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 5647 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 5648 for (int i = 0; i < spa->spa_spares.sav_count; i++) 5649 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 5650 (void) spa_vdev_state_exit(spa, NULL, 0); 5651 } 5652 5653 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 5654 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5655 spa_async_autoexpand(spa, spa->spa_root_vdev); 5656 spa_config_exit(spa, SCL_CONFIG, FTAG); 5657 } 5658 5659 /* 5660 * See if any devices need to be probed. 5661 */ 5662 if (tasks & SPA_ASYNC_PROBE) { 5663 spa_vdev_state_enter(spa, SCL_NONE); 5664 spa_async_probe(spa, spa->spa_root_vdev); 5665 (void) spa_vdev_state_exit(spa, NULL, 0); 5666 } 5667 5668 /* 5669 * If any devices are done replacing, detach them. 5670 */ 5671 if (tasks & SPA_ASYNC_RESILVER_DONE) 5672 spa_vdev_resilver_done(spa); 5673 5674 /* 5675 * Kick off a resilver. 5676 */ 5677 if (tasks & SPA_ASYNC_RESILVER) 5678 dsl_resilver_restart(spa->spa_dsl_pool, 0); 5679 5680 /* 5681 * Let the world know that we're done. 5682 */ 5683 mutex_enter(&spa->spa_async_lock); 5684 spa->spa_async_thread = NULL; 5685 cv_broadcast(&spa->spa_async_cv); 5686 mutex_exit(&spa->spa_async_lock); 5687 thread_exit(); 5688 } 5689 5690 void 5691 spa_async_suspend(spa_t *spa) 5692 { 5693 mutex_enter(&spa->spa_async_lock); 5694 spa->spa_async_suspended++; 5695 while (spa->spa_async_thread != NULL) 5696 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 5697 mutex_exit(&spa->spa_async_lock); 5698 } 5699 5700 void 5701 spa_async_resume(spa_t *spa) 5702 { 5703 mutex_enter(&spa->spa_async_lock); 5704 ASSERT(spa->spa_async_suspended != 0); 5705 spa->spa_async_suspended--; 5706 mutex_exit(&spa->spa_async_lock); 5707 } 5708 5709 static boolean_t 5710 spa_async_tasks_pending(spa_t *spa) 5711 { 5712 uint_t non_config_tasks; 5713 uint_t config_task; 5714 boolean_t config_task_suspended; 5715 5716 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 5717 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 5718 if (spa->spa_ccw_fail_time == 0) { 5719 config_task_suspended = B_FALSE; 5720 } else { 5721 config_task_suspended = 5722 (gethrtime() - spa->spa_ccw_fail_time) < 5723 (zfs_ccw_retry_interval * NANOSEC); 5724 } 5725 5726 return (non_config_tasks || (config_task && !config_task_suspended)); 5727 } 5728 5729 static void 5730 spa_async_dispatch(spa_t *spa) 5731 { 5732 mutex_enter(&spa->spa_async_lock); 5733 if (spa_async_tasks_pending(spa) && 5734 !spa->spa_async_suspended && 5735 spa->spa_async_thread == NULL && 5736 rootdir != NULL) 5737 spa->spa_async_thread = thread_create(NULL, 0, 5738 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 5739 mutex_exit(&spa->spa_async_lock); 5740 } 5741 5742 void 5743 spa_async_request(spa_t *spa, int task) 5744 { 5745 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 5746 mutex_enter(&spa->spa_async_lock); 5747 spa->spa_async_tasks |= task; 5748 mutex_exit(&spa->spa_async_lock); 5749 } 5750 5751 /* 5752 * ========================================================================== 5753 * SPA syncing routines 5754 * ========================================================================== 5755 */ 5756 5757 static int 5758 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 5759 { 5760 bpobj_t *bpo = arg; 5761 bpobj_enqueue(bpo, bp, tx); 5762 return (0); 5763 } 5764 5765 static int 5766 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 5767 { 5768 zio_t *zio = arg; 5769 5770 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 5771 zio->io_flags)); 5772 return (0); 5773 } 5774 5775 /* 5776 * Note: this simple function is not inlined to make it easier to dtrace the 5777 * amount of time spent syncing frees. 5778 */ 5779 static void 5780 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 5781 { 5782 zio_t *zio = zio_root(spa, NULL, NULL, 0); 5783 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 5784 VERIFY(zio_wait(zio) == 0); 5785 } 5786 5787 /* 5788 * Note: this simple function is not inlined to make it easier to dtrace the 5789 * amount of time spent syncing deferred frees. 5790 */ 5791 static void 5792 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 5793 { 5794 zio_t *zio = zio_root(spa, NULL, NULL, 0); 5795 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 5796 spa_free_sync_cb, zio, tx), ==, 0); 5797 VERIFY0(zio_wait(zio)); 5798 } 5799 5800 5801 static void 5802 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 5803 { 5804 char *packed = NULL; 5805 size_t bufsize; 5806 size_t nvsize = 0; 5807 dmu_buf_t *db; 5808 5809 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 5810 5811 /* 5812 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 5813 * information. This avoids the dmu_buf_will_dirty() path and 5814 * saves us a pre-read to get data we don't actually care about. 5815 */ 5816 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 5817 packed = kmem_alloc(bufsize, KM_SLEEP); 5818 5819 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 5820 KM_SLEEP) == 0); 5821 bzero(packed + nvsize, bufsize - nvsize); 5822 5823 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 5824 5825 kmem_free(packed, bufsize); 5826 5827 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 5828 dmu_buf_will_dirty(db, tx); 5829 *(uint64_t *)db->db_data = nvsize; 5830 dmu_buf_rele(db, FTAG); 5831 } 5832 5833 static void 5834 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 5835 const char *config, const char *entry) 5836 { 5837 nvlist_t *nvroot; 5838 nvlist_t **list; 5839 int i; 5840 5841 if (!sav->sav_sync) 5842 return; 5843 5844 /* 5845 * Update the MOS nvlist describing the list of available devices. 5846 * spa_validate_aux() will have already made sure this nvlist is 5847 * valid and the vdevs are labeled appropriately. 5848 */ 5849 if (sav->sav_object == 0) { 5850 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 5851 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 5852 sizeof (uint64_t), tx); 5853 VERIFY(zap_update(spa->spa_meta_objset, 5854 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 5855 &sav->sav_object, tx) == 0); 5856 } 5857 5858 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5859 if (sav->sav_count == 0) { 5860 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 5861 } else { 5862 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 5863 for (i = 0; i < sav->sav_count; i++) 5864 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 5865 B_FALSE, VDEV_CONFIG_L2CACHE); 5866 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 5867 sav->sav_count) == 0); 5868 for (i = 0; i < sav->sav_count; i++) 5869 nvlist_free(list[i]); 5870 kmem_free(list, sav->sav_count * sizeof (void *)); 5871 } 5872 5873 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 5874 nvlist_free(nvroot); 5875 5876 sav->sav_sync = B_FALSE; 5877 } 5878 5879 static void 5880 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 5881 { 5882 nvlist_t *config; 5883 5884 if (list_is_empty(&spa->spa_config_dirty_list)) 5885 return; 5886 5887 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 5888 5889 config = spa_config_generate(spa, spa->spa_root_vdev, 5890 dmu_tx_get_txg(tx), B_FALSE); 5891 5892 /* 5893 * If we're upgrading the spa version then make sure that 5894 * the config object gets updated with the correct version. 5895 */ 5896 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 5897 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5898 spa->spa_uberblock.ub_version); 5899 5900 spa_config_exit(spa, SCL_STATE, FTAG); 5901 5902 if (spa->spa_config_syncing) 5903 nvlist_free(spa->spa_config_syncing); 5904 spa->spa_config_syncing = config; 5905 5906 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 5907 } 5908 5909 static void 5910 spa_sync_version(void *arg, dmu_tx_t *tx) 5911 { 5912 uint64_t *versionp = arg; 5913 uint64_t version = *versionp; 5914 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 5915 5916 /* 5917 * Setting the version is special cased when first creating the pool. 5918 */ 5919 ASSERT(tx->tx_txg != TXG_INITIAL); 5920 5921 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 5922 ASSERT(version >= spa_version(spa)); 5923 5924 spa->spa_uberblock.ub_version = version; 5925 vdev_config_dirty(spa->spa_root_vdev); 5926 spa_history_log_internal(spa, "set", tx, "version=%lld", version); 5927 } 5928 5929 /* 5930 * Set zpool properties. 5931 */ 5932 static void 5933 spa_sync_props(void *arg, dmu_tx_t *tx) 5934 { 5935 nvlist_t *nvp = arg; 5936 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 5937 objset_t *mos = spa->spa_meta_objset; 5938 nvpair_t *elem = NULL; 5939 5940 mutex_enter(&spa->spa_props_lock); 5941 5942 while ((elem = nvlist_next_nvpair(nvp, elem))) { 5943 uint64_t intval; 5944 char *strval, *fname; 5945 zpool_prop_t prop; 5946 const char *propname; 5947 zprop_type_t proptype; 5948 spa_feature_t fid; 5949 5950 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 5951 case ZPROP_INVAL: 5952 /* 5953 * We checked this earlier in spa_prop_validate(). 5954 */ 5955 ASSERT(zpool_prop_feature(nvpair_name(elem))); 5956 5957 fname = strchr(nvpair_name(elem), '@') + 1; 5958 VERIFY0(zfeature_lookup_name(fname, &fid)); 5959 5960 spa_feature_enable(spa, fid, tx); 5961 spa_history_log_internal(spa, "set", tx, 5962 "%s=enabled", nvpair_name(elem)); 5963 break; 5964 5965 case ZPOOL_PROP_VERSION: 5966 intval = fnvpair_value_uint64(elem); 5967 /* 5968 * The version is synced seperatly before other 5969 * properties and should be correct by now. 5970 */ 5971 ASSERT3U(spa_version(spa), >=, intval); 5972 break; 5973 5974 case ZPOOL_PROP_ALTROOT: 5975 /* 5976 * 'altroot' is a non-persistent property. It should 5977 * have been set temporarily at creation or import time. 5978 */ 5979 ASSERT(spa->spa_root != NULL); 5980 break; 5981 5982 case ZPOOL_PROP_READONLY: 5983 case ZPOOL_PROP_CACHEFILE: 5984 /* 5985 * 'readonly' and 'cachefile' are also non-persisitent 5986 * properties. 5987 */ 5988 break; 5989 case ZPOOL_PROP_COMMENT: 5990 strval = fnvpair_value_string(elem); 5991 if (spa->spa_comment != NULL) 5992 spa_strfree(spa->spa_comment); 5993 spa->spa_comment = spa_strdup(strval); 5994 /* 5995 * We need to dirty the configuration on all the vdevs 5996 * so that their labels get updated. It's unnecessary 5997 * to do this for pool creation since the vdev's 5998 * configuratoin has already been dirtied. 5999 */ 6000 if (tx->tx_txg != TXG_INITIAL) 6001 vdev_config_dirty(spa->spa_root_vdev); 6002 spa_history_log_internal(spa, "set", tx, 6003 "%s=%s", nvpair_name(elem), strval); 6004 break; 6005 default: 6006 /* 6007 * Set pool property values in the poolprops mos object. 6008 */ 6009 if (spa->spa_pool_props_object == 0) { 6010 spa->spa_pool_props_object = 6011 zap_create_link(mos, DMU_OT_POOL_PROPS, 6012 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6013 tx); 6014 } 6015 6016 /* normalize the property name */ 6017 propname = zpool_prop_to_name(prop); 6018 proptype = zpool_prop_get_type(prop); 6019 6020 if (nvpair_type(elem) == DATA_TYPE_STRING) { 6021 ASSERT(proptype == PROP_TYPE_STRING); 6022 strval = fnvpair_value_string(elem); 6023 VERIFY0(zap_update(mos, 6024 spa->spa_pool_props_object, propname, 6025 1, strlen(strval) + 1, strval, tx)); 6026 spa_history_log_internal(spa, "set", tx, 6027 "%s=%s", nvpair_name(elem), strval); 6028 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 6029 intval = fnvpair_value_uint64(elem); 6030 6031 if (proptype == PROP_TYPE_INDEX) { 6032 const char *unused; 6033 VERIFY0(zpool_prop_index_to_string( 6034 prop, intval, &unused)); 6035 } 6036 VERIFY0(zap_update(mos, 6037 spa->spa_pool_props_object, propname, 6038 8, 1, &intval, tx)); 6039 spa_history_log_internal(spa, "set", tx, 6040 "%s=%lld", nvpair_name(elem), intval); 6041 } else { 6042 ASSERT(0); /* not allowed */ 6043 } 6044 6045 switch (prop) { 6046 case ZPOOL_PROP_DELEGATION: 6047 spa->spa_delegation = intval; 6048 break; 6049 case ZPOOL_PROP_BOOTFS: 6050 spa->spa_bootfs = intval; 6051 break; 6052 case ZPOOL_PROP_FAILUREMODE: 6053 spa->spa_failmode = intval; 6054 break; 6055 case ZPOOL_PROP_AUTOEXPAND: 6056 spa->spa_autoexpand = intval; 6057 if (tx->tx_txg != TXG_INITIAL) 6058 spa_async_request(spa, 6059 SPA_ASYNC_AUTOEXPAND); 6060 break; 6061 case ZPOOL_PROP_DEDUPDITTO: 6062 spa->spa_dedup_ditto = intval; 6063 break; 6064 default: 6065 break; 6066 } 6067 } 6068 6069 } 6070 6071 mutex_exit(&spa->spa_props_lock); 6072 } 6073 6074 /* 6075 * Perform one-time upgrade on-disk changes. spa_version() does not 6076 * reflect the new version this txg, so there must be no changes this 6077 * txg to anything that the upgrade code depends on after it executes. 6078 * Therefore this must be called after dsl_pool_sync() does the sync 6079 * tasks. 6080 */ 6081 static void 6082 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6083 { 6084 dsl_pool_t *dp = spa->spa_dsl_pool; 6085 6086 ASSERT(spa->spa_sync_pass == 1); 6087 6088 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 6089 6090 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6091 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6092 dsl_pool_create_origin(dp, tx); 6093 6094 /* Keeping the origin open increases spa_minref */ 6095 spa->spa_minref += 3; 6096 } 6097 6098 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6099 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6100 dsl_pool_upgrade_clones(dp, tx); 6101 } 6102 6103 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6104 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6105 dsl_pool_upgrade_dir_clones(dp, tx); 6106 6107 /* Keeping the freedir open increases spa_minref */ 6108 spa->spa_minref += 3; 6109 } 6110 6111 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6112 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6113 spa_feature_create_zap_objects(spa, tx); 6114 } 6115 rrw_exit(&dp->dp_config_rwlock, FTAG); 6116 } 6117 6118 /* 6119 * Sync the specified transaction group. New blocks may be dirtied as 6120 * part of the process, so we iterate until it converges. 6121 */ 6122 void 6123 spa_sync(spa_t *spa, uint64_t txg) 6124 { 6125 dsl_pool_t *dp = spa->spa_dsl_pool; 6126 objset_t *mos = spa->spa_meta_objset; 6127 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 6128 vdev_t *rvd = spa->spa_root_vdev; 6129 vdev_t *vd; 6130 dmu_tx_t *tx; 6131 int error; 6132 6133 VERIFY(spa_writeable(spa)); 6134 6135 /* 6136 * Lock out configuration changes. 6137 */ 6138 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6139 6140 spa->spa_syncing_txg = txg; 6141 spa->spa_sync_pass = 0; 6142 6143 /* 6144 * If there are any pending vdev state changes, convert them 6145 * into config changes that go out with this transaction group. 6146 */ 6147 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6148 while (list_head(&spa->spa_state_dirty_list) != NULL) { 6149 /* 6150 * We need the write lock here because, for aux vdevs, 6151 * calling vdev_config_dirty() modifies sav_config. 6152 * This is ugly and will become unnecessary when we 6153 * eliminate the aux vdev wart by integrating all vdevs 6154 * into the root vdev tree. 6155 */ 6156 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6157 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6158 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6159 vdev_state_clean(vd); 6160 vdev_config_dirty(vd); 6161 } 6162 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6163 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6164 } 6165 spa_config_exit(spa, SCL_STATE, FTAG); 6166 6167 tx = dmu_tx_create_assigned(dp, txg); 6168 6169 spa->spa_sync_starttime = gethrtime(); 6170 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6171 spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6172 6173 /* 6174 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 6175 * set spa_deflate if we have no raid-z vdevs. 6176 */ 6177 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6178 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 6179 int i; 6180 6181 for (i = 0; i < rvd->vdev_children; i++) { 6182 vd = rvd->vdev_child[i]; 6183 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 6184 break; 6185 } 6186 if (i == rvd->vdev_children) { 6187 spa->spa_deflate = TRUE; 6188 VERIFY(0 == zap_add(spa->spa_meta_objset, 6189 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6190 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 6191 } 6192 } 6193 6194 /* 6195 * If anything has changed in this txg, or if someone is waiting 6196 * for this txg to sync (eg, spa_vdev_remove()), push the 6197 * deferred frees from the previous txg. If not, leave them 6198 * alone so that we don't generate work on an otherwise idle 6199 * system. 6200 */ 6201 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 6202 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 6203 !txg_list_empty(&dp->dp_sync_tasks, txg) || 6204 ((dsl_scan_active(dp->dp_scan) || 6205 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) { 6206 spa_sync_deferred_frees(spa, tx); 6207 } 6208 6209 /* 6210 * Iterate to convergence. 6211 */ 6212 do { 6213 int pass = ++spa->spa_sync_pass; 6214 6215 spa_sync_config_object(spa, tx); 6216 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6217 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6218 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6219 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6220 spa_errlog_sync(spa, txg); 6221 dsl_pool_sync(dp, txg); 6222 6223 if (pass < zfs_sync_pass_deferred_free) { 6224 spa_sync_frees(spa, free_bpl, tx); 6225 } else { 6226 bplist_iterate(free_bpl, bpobj_enqueue_cb, 6227 &spa->spa_deferred_bpobj, tx); 6228 } 6229 6230 ddt_sync(spa, txg); 6231 dsl_scan_sync(dp, tx); 6232 6233 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6234 vdev_sync(vd, txg); 6235 6236 if (pass == 1) 6237 spa_sync_upgrades(spa, tx); 6238 6239 } while (dmu_objset_is_dirty(mos, txg)); 6240 6241 /* 6242 * Rewrite the vdev configuration (which includes the uberblock) 6243 * to commit the transaction group. 6244 * 6245 * If there are no dirty vdevs, we sync the uberblock to a few 6246 * random top-level vdevs that are known to be visible in the 6247 * config cache (see spa_vdev_add() for a complete description). 6248 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 6249 */ 6250 for (;;) { 6251 /* 6252 * We hold SCL_STATE to prevent vdev open/close/etc. 6253 * while we're attempting to write the vdev labels. 6254 */ 6255 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6256 6257 if (list_is_empty(&spa->spa_config_dirty_list)) { 6258 vdev_t *svd[SPA_DVAS_PER_BP]; 6259 int svdcount = 0; 6260 int children = rvd->vdev_children; 6261 int c0 = spa_get_random(children); 6262 6263 for (int c = 0; c < children; c++) { 6264 vd = rvd->vdev_child[(c0 + c) % children]; 6265 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 6266 continue; 6267 svd[svdcount++] = vd; 6268 if (svdcount == SPA_DVAS_PER_BP) 6269 break; 6270 } 6271 error = vdev_config_sync(svd, svdcount, txg, B_FALSE); 6272 if (error != 0) 6273 error = vdev_config_sync(svd, svdcount, txg, 6274 B_TRUE); 6275 } else { 6276 error = vdev_config_sync(rvd->vdev_child, 6277 rvd->vdev_children, txg, B_FALSE); 6278 if (error != 0) 6279 error = vdev_config_sync(rvd->vdev_child, 6280 rvd->vdev_children, txg, B_TRUE); 6281 } 6282 6283 if (error == 0) 6284 spa->spa_last_synced_guid = rvd->vdev_guid; 6285 6286 spa_config_exit(spa, SCL_STATE, FTAG); 6287 6288 if (error == 0) 6289 break; 6290 zio_suspend(spa, NULL); 6291 zio_resume_wait(spa); 6292 } 6293 dmu_tx_commit(tx); 6294 6295 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 6296 6297 /* 6298 * Clear the dirty config list. 6299 */ 6300 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 6301 vdev_config_clean(vd); 6302 6303 /* 6304 * Now that the new config has synced transactionally, 6305 * let it become visible to the config cache. 6306 */ 6307 if (spa->spa_config_syncing != NULL) { 6308 spa_config_set(spa, spa->spa_config_syncing); 6309 spa->spa_config_txg = txg; 6310 spa->spa_config_syncing = NULL; 6311 } 6312 6313 spa->spa_ubsync = spa->spa_uberblock; 6314 6315 dsl_pool_sync_done(dp, txg); 6316 6317 /* 6318 * Update usable space statistics. 6319 */ 6320 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 6321 vdev_sync_done(vd, txg); 6322 6323 spa_update_dspace(spa); 6324 6325 /* 6326 * It had better be the case that we didn't dirty anything 6327 * since vdev_config_sync(). 6328 */ 6329 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 6330 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6331 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 6332 6333 spa->spa_sync_pass = 0; 6334 6335 spa_config_exit(spa, SCL_CONFIG, FTAG); 6336 6337 spa_handle_ignored_writes(spa); 6338 6339 /* 6340 * If any async tasks have been requested, kick them off. 6341 */ 6342 spa_async_dispatch(spa); 6343 } 6344 6345 /* 6346 * Sync all pools. We don't want to hold the namespace lock across these 6347 * operations, so we take a reference on the spa_t and drop the lock during the 6348 * sync. 6349 */ 6350 void 6351 spa_sync_allpools(void) 6352 { 6353 spa_t *spa = NULL; 6354 mutex_enter(&spa_namespace_lock); 6355 while ((spa = spa_next(spa)) != NULL) { 6356 if (spa_state(spa) != POOL_STATE_ACTIVE || 6357 !spa_writeable(spa) || spa_suspended(spa)) 6358 continue; 6359 spa_open_ref(spa, FTAG); 6360 mutex_exit(&spa_namespace_lock); 6361 txg_wait_synced(spa_get_dsl(spa), 0); 6362 mutex_enter(&spa_namespace_lock); 6363 spa_close(spa, FTAG); 6364 } 6365 mutex_exit(&spa_namespace_lock); 6366 } 6367 6368 /* 6369 * ========================================================================== 6370 * Miscellaneous routines 6371 * ========================================================================== 6372 */ 6373 6374 /* 6375 * Remove all pools in the system. 6376 */ 6377 void 6378 spa_evict_all(void) 6379 { 6380 spa_t *spa; 6381 6382 /* 6383 * Remove all cached state. All pools should be closed now, 6384 * so every spa in the AVL tree should be unreferenced. 6385 */ 6386 mutex_enter(&spa_namespace_lock); 6387 while ((spa = spa_next(NULL)) != NULL) { 6388 /* 6389 * Stop async tasks. The async thread may need to detach 6390 * a device that's been replaced, which requires grabbing 6391 * spa_namespace_lock, so we must drop it here. 6392 */ 6393 spa_open_ref(spa, FTAG); 6394 mutex_exit(&spa_namespace_lock); 6395 spa_async_suspend(spa); 6396 mutex_enter(&spa_namespace_lock); 6397 spa_close(spa, FTAG); 6398 6399 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6400 spa_unload(spa); 6401 spa_deactivate(spa); 6402 } 6403 spa_remove(spa); 6404 } 6405 mutex_exit(&spa_namespace_lock); 6406 } 6407 6408 vdev_t * 6409 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 6410 { 6411 vdev_t *vd; 6412 int i; 6413 6414 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 6415 return (vd); 6416 6417 if (aux) { 6418 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 6419 vd = spa->spa_l2cache.sav_vdevs[i]; 6420 if (vd->vdev_guid == guid) 6421 return (vd); 6422 } 6423 6424 for (i = 0; i < spa->spa_spares.sav_count; i++) { 6425 vd = spa->spa_spares.sav_vdevs[i]; 6426 if (vd->vdev_guid == guid) 6427 return (vd); 6428 } 6429 } 6430 6431 return (NULL); 6432 } 6433 6434 void 6435 spa_upgrade(spa_t *spa, uint64_t version) 6436 { 6437 ASSERT(spa_writeable(spa)); 6438 6439 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6440 6441 /* 6442 * This should only be called for a non-faulted pool, and since a 6443 * future version would result in an unopenable pool, this shouldn't be 6444 * possible. 6445 */ 6446 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 6447 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 6448 6449 spa->spa_uberblock.ub_version = version; 6450 vdev_config_dirty(spa->spa_root_vdev); 6451 6452 spa_config_exit(spa, SCL_ALL, FTAG); 6453 6454 txg_wait_synced(spa_get_dsl(spa), 0); 6455 } 6456 6457 boolean_t 6458 spa_has_spare(spa_t *spa, uint64_t guid) 6459 { 6460 int i; 6461 uint64_t spareguid; 6462 spa_aux_vdev_t *sav = &spa->spa_spares; 6463 6464 for (i = 0; i < sav->sav_count; i++) 6465 if (sav->sav_vdevs[i]->vdev_guid == guid) 6466 return (B_TRUE); 6467 6468 for (i = 0; i < sav->sav_npending; i++) { 6469 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 6470 &spareguid) == 0 && spareguid == guid) 6471 return (B_TRUE); 6472 } 6473 6474 return (B_FALSE); 6475 } 6476 6477 /* 6478 * Check if a pool has an active shared spare device. 6479 * Note: reference count of an active spare is 2, as a spare and as a replace 6480 */ 6481 static boolean_t 6482 spa_has_active_shared_spare(spa_t *spa) 6483 { 6484 int i, refcnt; 6485 uint64_t pool; 6486 spa_aux_vdev_t *sav = &spa->spa_spares; 6487 6488 for (i = 0; i < sav->sav_count; i++) { 6489 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 6490 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 6491 refcnt > 2) 6492 return (B_TRUE); 6493 } 6494 6495 return (B_FALSE); 6496 } 6497 6498 /* 6499 * Post a sysevent corresponding to the given event. The 'name' must be one of 6500 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 6501 * filled in from the spa and (optionally) the vdev. This doesn't do anything 6502 * in the userland libzpool, as we don't want consumers to misinterpret ztest 6503 * or zdb as real changes. 6504 */ 6505 void 6506 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 6507 { 6508 #ifdef _KERNEL 6509 sysevent_t *ev; 6510 sysevent_attr_list_t *attr = NULL; 6511 sysevent_value_t value; 6512 sysevent_id_t eid; 6513 6514 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 6515 SE_SLEEP); 6516 6517 value.value_type = SE_DATA_TYPE_STRING; 6518 value.value.sv_string = spa_name(spa); 6519 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 6520 goto done; 6521 6522 value.value_type = SE_DATA_TYPE_UINT64; 6523 value.value.sv_uint64 = spa_guid(spa); 6524 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 6525 goto done; 6526 6527 if (vd) { 6528 value.value_type = SE_DATA_TYPE_UINT64; 6529 value.value.sv_uint64 = vd->vdev_guid; 6530 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 6531 SE_SLEEP) != 0) 6532 goto done; 6533 6534 if (vd->vdev_path) { 6535 value.value_type = SE_DATA_TYPE_STRING; 6536 value.value.sv_string = vd->vdev_path; 6537 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 6538 &value, SE_SLEEP) != 0) 6539 goto done; 6540 } 6541 } 6542 6543 if (sysevent_attach_attributes(ev, attr) != 0) 6544 goto done; 6545 attr = NULL; 6546 6547 (void) log_sysevent(ev, SE_SLEEP, &eid); 6548 6549 done: 6550 if (attr) 6551 sysevent_free_attr(attr); 6552 sysevent_free(ev); 6553 #endif 6554 }