1210 spa_async_suspend(spa);
1211
1212 /*
1213 * Stop syncing.
1214 */
1215 if (spa->spa_sync_on) {
1216 txg_sync_stop(spa->spa_dsl_pool);
1217 spa->spa_sync_on = B_FALSE;
1218 }
1219
1220 /*
1221 * Wait for any outstanding async I/O to complete.
1222 */
1223 if (spa->spa_async_zio_root != NULL) {
1224 (void) zio_wait(spa->spa_async_zio_root);
1225 spa->spa_async_zio_root = NULL;
1226 }
1227
1228 bpobj_close(&spa->spa_deferred_bpobj);
1229
1230 /*
1231 * Close the dsl pool.
1232 */
1233 if (spa->spa_dsl_pool) {
1234 dsl_pool_close(spa->spa_dsl_pool);
1235 spa->spa_dsl_pool = NULL;
1236 spa->spa_meta_objset = NULL;
1237 }
1238
1239 ddt_unload(spa);
1240
1241 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1242
1243 /*
1244 * Drop and purge level 2 cache
1245 */
1246 spa_l2cache_drop(spa);
1247
1248 /*
1249 * Close all vdevs.
1250 */
1251 if (spa->spa_root_vdev)
1252 vdev_free(spa->spa_root_vdev);
1253 ASSERT(spa->spa_root_vdev == NULL);
1254
1255 for (i = 0; i < spa->spa_spares.sav_count; i++)
1256 vdev_free(spa->spa_spares.sav_vdevs[i]);
1257 if (spa->spa_spares.sav_vdevs) {
1258 kmem_free(spa->spa_spares.sav_vdevs,
1259 spa->spa_spares.sav_count * sizeof (void *));
1260 spa->spa_spares.sav_vdevs = NULL;
1261 }
1262 if (spa->spa_spares.sav_config) {
1263 nvlist_free(spa->spa_spares.sav_config);
1264 spa->spa_spares.sav_config = NULL;
1265 }
1266 spa->spa_spares.sav_count = 0;
1267
1268 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1269 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1270 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1271 }
1272 if (spa->spa_l2cache.sav_vdevs) {
1273 kmem_free(spa->spa_l2cache.sav_vdevs,
1274 spa->spa_l2cache.sav_count * sizeof (void *));
4484 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
4485
4486 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4487 dtl_max_txg - TXG_INITIAL);
4488
4489 if (newvd->vdev_isspare) {
4490 spa_spare_activate(newvd);
4491 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
4492 }
4493
4494 oldvdpath = spa_strdup(oldvd->vdev_path);
4495 newvdpath = spa_strdup(newvd->vdev_path);
4496 newvd_isspare = newvd->vdev_isspare;
4497
4498 /*
4499 * Mark newvd's DTL dirty in this txg.
4500 */
4501 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4502
4503 /*
4504 * Restart the resilver
4505 */
4506 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4507
4508 /*
4509 * Commit the config
4510 */
4511 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
4512
4513 spa_history_log_internal(spa, "vdev attach", NULL,
4514 "%s vdev=%s %s vdev=%s",
4515 replacing && newvd_isspare ? "spare in" :
4516 replacing ? "replace" : "attach", newvdpath,
4517 replacing ? "for" : "to", oldvdpath);
4518
4519 spa_strfree(oldvdpath);
4520 spa_strfree(newvdpath);
4521
4522 if (spa->spa_bootfs)
4523 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
4524
5106 * spa_namespace_lock held. Once this completes the device
5107 * should no longer have any blocks allocated on it.
5108 */
5109 if (vd->vdev_islog) {
5110 if (vd->vdev_stat.vs_alloc != 0)
5111 error = spa_offline_log(spa);
5112 } else {
5113 error = SET_ERROR(ENOTSUP);
5114 }
5115
5116 if (error)
5117 return (error);
5118
5119 /*
5120 * The evacuation succeeded. Remove any remaining MOS metadata
5121 * associated with this vdev, and wait for these changes to sync.
5122 */
5123 ASSERT0(vd->vdev_stat.vs_alloc);
5124 txg = spa_vdev_config_enter(spa);
5125 vd->vdev_removing = B_TRUE;
5126 vdev_dirty(vd, 0, NULL, txg);
5127 vdev_config_dirty(vd);
5128 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5129
5130 return (0);
5131 }
5132
5133 /*
5134 * Complete the removal by cleaning up the namespace.
5135 */
5136 static void
5137 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5138 {
5139 vdev_t *rvd = spa->spa_root_vdev;
5140 uint64_t id = vd->vdev_id;
5141 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5142
5143 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5144 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5145 ASSERT(vd == vd->vdev_top);
5146
5906 nvpair_t *elem = NULL;
5907
5908 mutex_enter(&spa->spa_props_lock);
5909
5910 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5911 uint64_t intval;
5912 char *strval, *fname;
5913 zpool_prop_t prop;
5914 const char *propname;
5915 zprop_type_t proptype;
5916 zfeature_info_t *feature;
5917
5918 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5919 case ZPROP_INVAL:
5920 /*
5921 * We checked this earlier in spa_prop_validate().
5922 */
5923 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5924
5925 fname = strchr(nvpair_name(elem), '@') + 1;
5926 VERIFY3U(0, ==, zfeature_lookup_name(fname, &feature));
5927
5928 spa_feature_enable(spa, feature, tx);
5929 spa_history_log_internal(spa, "set", tx,
5930 "%s=enabled", nvpair_name(elem));
5931 break;
5932
5933 case ZPOOL_PROP_VERSION:
5934 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5935 /*
5936 * The version is synced seperatly before other
5937 * properties and should be correct by now.
5938 */
5939 ASSERT3U(spa_version(spa), >=, intval);
5940 break;
5941
5942 case ZPOOL_PROP_ALTROOT:
5943 /*
5944 * 'altroot' is a non-persistent property. It should
5945 * have been set temporarily at creation or import time.
5946 */
5947 ASSERT(spa->spa_root != NULL);
5948 break;
5949
5950 case ZPOOL_PROP_READONLY:
5951 case ZPOOL_PROP_CACHEFILE:
5952 /*
5953 * 'readonly' and 'cachefile' are also non-persisitent
5954 * properties.
5955 */
5956 break;
5957 case ZPOOL_PROP_COMMENT:
5958 VERIFY(nvpair_value_string(elem, &strval) == 0);
5959 if (spa->spa_comment != NULL)
5960 spa_strfree(spa->spa_comment);
5961 spa->spa_comment = spa_strdup(strval);
5962 /*
5963 * We need to dirty the configuration on all the vdevs
5964 * so that their labels get updated. It's unnecessary
5965 * to do this for pool creation since the vdev's
5966 * configuratoin has already been dirtied.
5967 */
5968 if (tx->tx_txg != TXG_INITIAL)
5969 vdev_config_dirty(spa->spa_root_vdev);
5970 spa_history_log_internal(spa, "set", tx,
5971 "%s=%s", nvpair_name(elem), strval);
5972 break;
5973 default:
5974 /*
5975 * Set pool property values in the poolprops mos object.
5976 */
5977 if (spa->spa_pool_props_object == 0) {
5978 spa->spa_pool_props_object =
5979 zap_create_link(mos, DMU_OT_POOL_PROPS,
5980 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
5981 tx);
5982 }
5983
5984 /* normalize the property name */
5985 propname = zpool_prop_to_name(prop);
5986 proptype = zpool_prop_get_type(prop);
5987
5988 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5989 ASSERT(proptype == PROP_TYPE_STRING);
5990 VERIFY(nvpair_value_string(elem, &strval) == 0);
5991 VERIFY(zap_update(mos,
5992 spa->spa_pool_props_object, propname,
5993 1, strlen(strval) + 1, strval, tx) == 0);
5994 spa_history_log_internal(spa, "set", tx,
5995 "%s=%s", nvpair_name(elem), strval);
5996 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
5997 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5998
5999 if (proptype == PROP_TYPE_INDEX) {
6000 const char *unused;
6001 VERIFY(zpool_prop_index_to_string(
6002 prop, intval, &unused) == 0);
6003 }
6004 VERIFY(zap_update(mos,
6005 spa->spa_pool_props_object, propname,
6006 8, 1, &intval, tx) == 0);
6007 spa_history_log_internal(spa, "set", tx,
6008 "%s=%lld", nvpair_name(elem), intval);
6009 } else {
6010 ASSERT(0); /* not allowed */
6011 }
6012
6013 switch (prop) {
6014 case ZPOOL_PROP_DELEGATION:
6015 spa->spa_delegation = intval;
6016 break;
6017 case ZPOOL_PROP_BOOTFS:
6018 spa->spa_bootfs = intval;
6019 break;
6020 case ZPOOL_PROP_FAILUREMODE:
6021 spa->spa_failmode = intval;
6022 break;
6023 case ZPOOL_PROP_AUTOEXPAND:
6024 spa->spa_autoexpand = intval;
6025 if (tx->tx_txg != TXG_INITIAL)
6026 spa_async_request(spa,
|
1210 spa_async_suspend(spa);
1211
1212 /*
1213 * Stop syncing.
1214 */
1215 if (spa->spa_sync_on) {
1216 txg_sync_stop(spa->spa_dsl_pool);
1217 spa->spa_sync_on = B_FALSE;
1218 }
1219
1220 /*
1221 * Wait for any outstanding async I/O to complete.
1222 */
1223 if (spa->spa_async_zio_root != NULL) {
1224 (void) zio_wait(spa->spa_async_zio_root);
1225 spa->spa_async_zio_root = NULL;
1226 }
1227
1228 bpobj_close(&spa->spa_deferred_bpobj);
1229
1230 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1231
1232 /*
1233 * Close all vdevs.
1234 */
1235 if (spa->spa_root_vdev)
1236 vdev_free(spa->spa_root_vdev);
1237 ASSERT(spa->spa_root_vdev == NULL);
1238
1239 /*
1240 * Close the dsl pool.
1241 */
1242 if (spa->spa_dsl_pool) {
1243 dsl_pool_close(spa->spa_dsl_pool);
1244 spa->spa_dsl_pool = NULL;
1245 spa->spa_meta_objset = NULL;
1246 }
1247
1248 ddt_unload(spa);
1249
1250
1251 /*
1252 * Drop and purge level 2 cache
1253 */
1254 spa_l2cache_drop(spa);
1255
1256 for (i = 0; i < spa->spa_spares.sav_count; i++)
1257 vdev_free(spa->spa_spares.sav_vdevs[i]);
1258 if (spa->spa_spares.sav_vdevs) {
1259 kmem_free(spa->spa_spares.sav_vdevs,
1260 spa->spa_spares.sav_count * sizeof (void *));
1261 spa->spa_spares.sav_vdevs = NULL;
1262 }
1263 if (spa->spa_spares.sav_config) {
1264 nvlist_free(spa->spa_spares.sav_config);
1265 spa->spa_spares.sav_config = NULL;
1266 }
1267 spa->spa_spares.sav_count = 0;
1268
1269 for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
1270 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
1271 vdev_free(spa->spa_l2cache.sav_vdevs[i]);
1272 }
1273 if (spa->spa_l2cache.sav_vdevs) {
1274 kmem_free(spa->spa_l2cache.sav_vdevs,
1275 spa->spa_l2cache.sav_count * sizeof (void *));
4485 dtl_max_txg = txg + TXG_CONCURRENT_STATES;
4486
4487 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
4488 dtl_max_txg - TXG_INITIAL);
4489
4490 if (newvd->vdev_isspare) {
4491 spa_spare_activate(newvd);
4492 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE);
4493 }
4494
4495 oldvdpath = spa_strdup(oldvd->vdev_path);
4496 newvdpath = spa_strdup(newvd->vdev_path);
4497 newvd_isspare = newvd->vdev_isspare;
4498
4499 /*
4500 * Mark newvd's DTL dirty in this txg.
4501 */
4502 vdev_dirty(tvd, VDD_DTL, newvd, txg);
4503
4504 /*
4505 * Schedule the resilver to restart in the future. We do this to
4506 * ensure that dmu_sync-ed blocks have been stitched into the
4507 * respective datasets.
4508 */
4509 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
4510
4511 /*
4512 * Commit the config
4513 */
4514 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
4515
4516 spa_history_log_internal(spa, "vdev attach", NULL,
4517 "%s vdev=%s %s vdev=%s",
4518 replacing && newvd_isspare ? "spare in" :
4519 replacing ? "replace" : "attach", newvdpath,
4520 replacing ? "for" : "to", oldvdpath);
4521
4522 spa_strfree(oldvdpath);
4523 spa_strfree(newvdpath);
4524
4525 if (spa->spa_bootfs)
4526 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
4527
5109 * spa_namespace_lock held. Once this completes the device
5110 * should no longer have any blocks allocated on it.
5111 */
5112 if (vd->vdev_islog) {
5113 if (vd->vdev_stat.vs_alloc != 0)
5114 error = spa_offline_log(spa);
5115 } else {
5116 error = SET_ERROR(ENOTSUP);
5117 }
5118
5119 if (error)
5120 return (error);
5121
5122 /*
5123 * The evacuation succeeded. Remove any remaining MOS metadata
5124 * associated with this vdev, and wait for these changes to sync.
5125 */
5126 ASSERT0(vd->vdev_stat.vs_alloc);
5127 txg = spa_vdev_config_enter(spa);
5128 vd->vdev_removing = B_TRUE;
5129 vdev_dirty_leaves(vd, VDD_DTL, txg);
5130 vdev_config_dirty(vd);
5131 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
5132
5133 return (0);
5134 }
5135
5136 /*
5137 * Complete the removal by cleaning up the namespace.
5138 */
5139 static void
5140 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5141 {
5142 vdev_t *rvd = spa->spa_root_vdev;
5143 uint64_t id = vd->vdev_id;
5144 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5145
5146 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5147 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
5148 ASSERT(vd == vd->vdev_top);
5149
5909 nvpair_t *elem = NULL;
5910
5911 mutex_enter(&spa->spa_props_lock);
5912
5913 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5914 uint64_t intval;
5915 char *strval, *fname;
5916 zpool_prop_t prop;
5917 const char *propname;
5918 zprop_type_t proptype;
5919 zfeature_info_t *feature;
5920
5921 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5922 case ZPROP_INVAL:
5923 /*
5924 * We checked this earlier in spa_prop_validate().
5925 */
5926 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5927
5928 fname = strchr(nvpair_name(elem), '@') + 1;
5929 VERIFY0(zfeature_lookup_name(fname, &feature));
5930
5931 spa_feature_enable(spa, feature, tx);
5932 spa_history_log_internal(spa, "set", tx,
5933 "%s=enabled", nvpair_name(elem));
5934 break;
5935
5936 case ZPOOL_PROP_VERSION:
5937 intval = fnvpair_value_uint64(elem);
5938 /*
5939 * The version is synced seperatly before other
5940 * properties and should be correct by now.
5941 */
5942 ASSERT3U(spa_version(spa), >=, intval);
5943 break;
5944
5945 case ZPOOL_PROP_ALTROOT:
5946 /*
5947 * 'altroot' is a non-persistent property. It should
5948 * have been set temporarily at creation or import time.
5949 */
5950 ASSERT(spa->spa_root != NULL);
5951 break;
5952
5953 case ZPOOL_PROP_READONLY:
5954 case ZPOOL_PROP_CACHEFILE:
5955 /*
5956 * 'readonly' and 'cachefile' are also non-persisitent
5957 * properties.
5958 */
5959 break;
5960 case ZPOOL_PROP_COMMENT:
5961 strval = fnvpair_value_string(elem);
5962 if (spa->spa_comment != NULL)
5963 spa_strfree(spa->spa_comment);
5964 spa->spa_comment = spa_strdup(strval);
5965 /*
5966 * We need to dirty the configuration on all the vdevs
5967 * so that their labels get updated. It's unnecessary
5968 * to do this for pool creation since the vdev's
5969 * configuratoin has already been dirtied.
5970 */
5971 if (tx->tx_txg != TXG_INITIAL)
5972 vdev_config_dirty(spa->spa_root_vdev);
5973 spa_history_log_internal(spa, "set", tx,
5974 "%s=%s", nvpair_name(elem), strval);
5975 break;
5976 default:
5977 /*
5978 * Set pool property values in the poolprops mos object.
5979 */
5980 if (spa->spa_pool_props_object == 0) {
5981 spa->spa_pool_props_object =
5982 zap_create_link(mos, DMU_OT_POOL_PROPS,
5983 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
5984 tx);
5985 }
5986
5987 /* normalize the property name */
5988 propname = zpool_prop_to_name(prop);
5989 proptype = zpool_prop_get_type(prop);
5990
5991 if (nvpair_type(elem) == DATA_TYPE_STRING) {
5992 ASSERT(proptype == PROP_TYPE_STRING);
5993 strval = fnvpair_value_string(elem);
5994 VERIFY0(zap_update(mos,
5995 spa->spa_pool_props_object, propname,
5996 1, strlen(strval) + 1, strval, tx));
5997 spa_history_log_internal(spa, "set", tx,
5998 "%s=%s", nvpair_name(elem), strval);
5999 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
6000 intval = fnvpair_value_uint64(elem);
6001
6002 if (proptype == PROP_TYPE_INDEX) {
6003 const char *unused;
6004 VERIFY0(zpool_prop_index_to_string(
6005 prop, intval, &unused));
6006 }
6007 VERIFY0(zap_update(mos,
6008 spa->spa_pool_props_object, propname,
6009 8, 1, &intval, tx));
6010 spa_history_log_internal(spa, "set", tx,
6011 "%s=%lld", nvpair_name(elem), intval);
6012 } else {
6013 ASSERT(0); /* not allowed */
6014 }
6015
6016 switch (prop) {
6017 case ZPOOL_PROP_DELEGATION:
6018 spa->spa_delegation = intval;
6019 break;
6020 case ZPOOL_PROP_BOOTFS:
6021 spa->spa_bootfs = intval;
6022 break;
6023 case ZPOOL_PROP_FAILUREMODE:
6024 spa->spa_failmode = intval;
6025 break;
6026 case ZPOOL_PROP_AUTOEXPAND:
6027 spa->spa_autoexpand = intval;
6028 if (tx->tx_txg != TXG_INITIAL)
6029 spa_async_request(spa,
|