2939 }
2940 }
2941
2942 static void
2943 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
2944 {
2945 nvlist_t *features;
2946 zap_cursor_t zc;
2947 zap_attribute_t za;
2948
2949 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2950 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2951
2952 if (spa->spa_feat_for_read_obj != 0) {
2953 for (zap_cursor_init(&zc, spa->spa_meta_objset,
2954 spa->spa_feat_for_read_obj);
2955 zap_cursor_retrieve(&zc, &za) == 0;
2956 zap_cursor_advance(&zc)) {
2957 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
2958 za.za_num_integers == 1);
2959 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
2960 za.za_first_integer));
2961 }
2962 zap_cursor_fini(&zc);
2963 }
2964
2965 if (spa->spa_feat_for_write_obj != 0) {
2966 for (zap_cursor_init(&zc, spa->spa_meta_objset,
2967 spa->spa_feat_for_write_obj);
2968 zap_cursor_retrieve(&zc, &za) == 0;
2969 zap_cursor_advance(&zc)) {
2970 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
2971 za.za_num_integers == 1);
2972 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
2973 za.za_first_integer));
2974 }
2975 zap_cursor_fini(&zc);
2976 }
2977
2978 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
2979 features) == 0);
2980 nvlist_free(features);
2981 }
2982
2983 int
2984 spa_get_stats(const char *name, nvlist_t **config,
2985 char *altroot, size_t buflen)
2986 {
2987 int error;
2988 spa_t *spa;
2989
2990 *config = NULL;
2991 error = spa_open_common(name, &spa, FTAG, NULL, config);
2992
3393 if (zap_add(spa->spa_meta_objset,
3394 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3395 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3396 cmn_err(CE_PANIC, "failed to add deflate");
3397 }
3398 }
3399
3400 /*
3401 * Create the deferred-free bpobj. Turn off compression
3402 * because sync-to-convergence takes longer if the blocksize
3403 * keeps changing.
3404 */
3405 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3406 dmu_object_set_compress(spa->spa_meta_objset, obj,
3407 ZIO_COMPRESS_OFF, tx);
3408 if (zap_add(spa->spa_meta_objset,
3409 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3410 sizeof (uint64_t), 1, &obj, tx) != 0) {
3411 cmn_err(CE_PANIC, "failed to add bpobj");
3412 }
3413 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
3414 spa->spa_meta_objset, obj));
3415
3416 /*
3417 * Create the pool's history object.
3418 */
3419 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3420 spa_history_create_obj(spa, tx);
3421
3422 /*
3423 * Set pool properties.
3424 */
3425 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3426 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3427 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3428 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3429
3430 if (props != NULL) {
3431 spa_configfile_set(spa, props, B_FALSE);
3432 spa_sync_props(spa, props, tx);
3433 }
4968 /*
4969 * Evacuate the device. We don't hold the config lock as writer
4970 * since we need to do I/O but we do keep the
4971 * spa_namespace_lock held. Once this completes the device
4972 * should no longer have any blocks allocated on it.
4973 */
4974 if (vd->vdev_islog) {
4975 if (vd->vdev_stat.vs_alloc != 0)
4976 error = spa_offline_log(spa);
4977 } else {
4978 error = ENOTSUP;
4979 }
4980
4981 if (error)
4982 return (error);
4983
4984 /*
4985 * The evacuation succeeded. Remove any remaining MOS metadata
4986 * associated with this vdev, and wait for these changes to sync.
4987 */
4988 ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);
4989 txg = spa_vdev_config_enter(spa);
4990 vd->vdev_removing = B_TRUE;
4991 vdev_dirty(vd, 0, NULL, txg);
4992 vdev_config_dirty(vd);
4993 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4994
4995 return (0);
4996 }
4997
4998 /*
4999 * Complete the removal by cleaning up the namespace.
5000 */
5001 static void
5002 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5003 {
5004 vdev_t *rvd = spa->spa_root_vdev;
5005 uint64_t id = vd->vdev_id;
5006 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5007
5008 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5715 nvpair_t *elem = NULL;
5716
5717 mutex_enter(&spa->spa_props_lock);
5718
5719 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5720 uint64_t intval;
5721 char *strval, *fname;
5722 zpool_prop_t prop;
5723 const char *propname;
5724 zprop_type_t proptype;
5725 zfeature_info_t *feature;
5726
5727 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5728 case ZPROP_INVAL:
5729 /*
5730 * We checked this earlier in spa_prop_validate().
5731 */
5732 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5733
5734 fname = strchr(nvpair_name(elem), '@') + 1;
5735 VERIFY3U(0, ==, zfeature_lookup_name(fname, &feature));
5736
5737 spa_feature_enable(spa, feature, tx);
5738 spa_history_log_internal(spa, "set", tx,
5739 "%s=enabled", nvpair_name(elem));
5740 break;
5741
5742 case ZPOOL_PROP_VERSION:
5743 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5744 /*
5745 * The version is synced seperatly before other
5746 * properties and should be correct by now.
5747 */
5748 ASSERT3U(spa_version(spa), >=, intval);
5749 break;
5750
5751 case ZPOOL_PROP_ALTROOT:
5752 /*
5753 * 'altroot' is a non-persistent property. It should
5754 * have been set temporarily at creation or import time.
5755 */
5960 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5961 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
5962 }
5963 }
5964
5965 /*
5966 * If anything has changed in this txg, or if someone is waiting
5967 * for this txg to sync (eg, spa_vdev_remove()), push the
5968 * deferred frees from the previous txg. If not, leave them
5969 * alone so that we don't generate work on an otherwise idle
5970 * system.
5971 */
5972 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
5973 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
5974 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
5975 ((dsl_scan_active(dp->dp_scan) ||
5976 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
5977 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5978 VERIFY3U(bpobj_iterate(defer_bpo,
5979 spa_free_sync_cb, zio, tx), ==, 0);
5980 VERIFY3U(zio_wait(zio), ==, 0);
5981 }
5982
5983 /*
5984 * Iterate to convergence.
5985 */
5986 do {
5987 int pass = ++spa->spa_sync_pass;
5988
5989 spa_sync_config_object(spa, tx);
5990 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
5991 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
5992 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
5993 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
5994 spa_errlog_sync(spa, txg);
5995 dsl_pool_sync(dp, txg);
5996
5997 if (pass <= SYNC_PASS_DEFERRED_FREE) {
5998 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5999 bplist_iterate(free_bpl, spa_free_sync_cb,
6000 zio, tx);
|
2939 }
2940 }
2941
2942 static void
2943 spa_add_feature_stats(spa_t *spa, nvlist_t *config)
2944 {
2945 nvlist_t *features;
2946 zap_cursor_t zc;
2947 zap_attribute_t za;
2948
2949 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
2950 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2951
2952 if (spa->spa_feat_for_read_obj != 0) {
2953 for (zap_cursor_init(&zc, spa->spa_meta_objset,
2954 spa->spa_feat_for_read_obj);
2955 zap_cursor_retrieve(&zc, &za) == 0;
2956 zap_cursor_advance(&zc)) {
2957 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
2958 za.za_num_integers == 1);
2959 VERIFY0(nvlist_add_uint64(features, za.za_name,
2960 za.za_first_integer));
2961 }
2962 zap_cursor_fini(&zc);
2963 }
2964
2965 if (spa->spa_feat_for_write_obj != 0) {
2966 for (zap_cursor_init(&zc, spa->spa_meta_objset,
2967 spa->spa_feat_for_write_obj);
2968 zap_cursor_retrieve(&zc, &za) == 0;
2969 zap_cursor_advance(&zc)) {
2970 ASSERT(za.za_integer_length == sizeof (uint64_t) &&
2971 za.za_num_integers == 1);
2972 VERIFY0(nvlist_add_uint64(features, za.za_name,
2973 za.za_first_integer));
2974 }
2975 zap_cursor_fini(&zc);
2976 }
2977
2978 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
2979 features) == 0);
2980 nvlist_free(features);
2981 }
2982
2983 int
2984 spa_get_stats(const char *name, nvlist_t **config,
2985 char *altroot, size_t buflen)
2986 {
2987 int error;
2988 spa_t *spa;
2989
2990 *config = NULL;
2991 error = spa_open_common(name, &spa, FTAG, NULL, config);
2992
3393 if (zap_add(spa->spa_meta_objset,
3394 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
3395 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
3396 cmn_err(CE_PANIC, "failed to add deflate");
3397 }
3398 }
3399
3400 /*
3401 * Create the deferred-free bpobj. Turn off compression
3402 * because sync-to-convergence takes longer if the blocksize
3403 * keeps changing.
3404 */
3405 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
3406 dmu_object_set_compress(spa->spa_meta_objset, obj,
3407 ZIO_COMPRESS_OFF, tx);
3408 if (zap_add(spa->spa_meta_objset,
3409 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
3410 sizeof (uint64_t), 1, &obj, tx) != 0) {
3411 cmn_err(CE_PANIC, "failed to add bpobj");
3412 }
3413 VERIFY0(bpobj_open(&spa->spa_deferred_bpobj,
3414 spa->spa_meta_objset, obj));
3415
3416 /*
3417 * Create the pool's history object.
3418 */
3419 if (version >= SPA_VERSION_ZPOOL_HISTORY)
3420 spa_history_create_obj(spa, tx);
3421
3422 /*
3423 * Set pool properties.
3424 */
3425 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
3426 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
3427 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
3428 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
3429
3430 if (props != NULL) {
3431 spa_configfile_set(spa, props, B_FALSE);
3432 spa_sync_props(spa, props, tx);
3433 }
4968 /*
4969 * Evacuate the device. We don't hold the config lock as writer
4970 * since we need to do I/O but we do keep the
4971 * spa_namespace_lock held. Once this completes the device
4972 * should no longer have any blocks allocated on it.
4973 */
4974 if (vd->vdev_islog) {
4975 if (vd->vdev_stat.vs_alloc != 0)
4976 error = spa_offline_log(spa);
4977 } else {
4978 error = ENOTSUP;
4979 }
4980
4981 if (error)
4982 return (error);
4983
4984 /*
4985 * The evacuation succeeded. Remove any remaining MOS metadata
4986 * associated with this vdev, and wait for these changes to sync.
4987 */
4988 ASSERT0(vd->vdev_stat.vs_alloc);
4989 txg = spa_vdev_config_enter(spa);
4990 vd->vdev_removing = B_TRUE;
4991 vdev_dirty(vd, 0, NULL, txg);
4992 vdev_config_dirty(vd);
4993 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
4994
4995 return (0);
4996 }
4997
4998 /*
4999 * Complete the removal by cleaning up the namespace.
5000 */
5001 static void
5002 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
5003 {
5004 vdev_t *rvd = spa->spa_root_vdev;
5005 uint64_t id = vd->vdev_id;
5006 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
5007
5008 ASSERT(MUTEX_HELD(&spa_namespace_lock));
5715 nvpair_t *elem = NULL;
5716
5717 mutex_enter(&spa->spa_props_lock);
5718
5719 while ((elem = nvlist_next_nvpair(nvp, elem))) {
5720 uint64_t intval;
5721 char *strval, *fname;
5722 zpool_prop_t prop;
5723 const char *propname;
5724 zprop_type_t proptype;
5725 zfeature_info_t *feature;
5726
5727 switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
5728 case ZPROP_INVAL:
5729 /*
5730 * We checked this earlier in spa_prop_validate().
5731 */
5732 ASSERT(zpool_prop_feature(nvpair_name(elem)));
5733
5734 fname = strchr(nvpair_name(elem), '@') + 1;
5735 VERIFY0(zfeature_lookup_name(fname, &feature));
5736
5737 spa_feature_enable(spa, feature, tx);
5738 spa_history_log_internal(spa, "set", tx,
5739 "%s=enabled", nvpair_name(elem));
5740 break;
5741
5742 case ZPOOL_PROP_VERSION:
5743 VERIFY(nvpair_value_uint64(elem, &intval) == 0);
5744 /*
5745 * The version is synced seperatly before other
5746 * properties and should be correct by now.
5747 */
5748 ASSERT3U(spa_version(spa), >=, intval);
5749 break;
5750
5751 case ZPOOL_PROP_ALTROOT:
5752 /*
5753 * 'altroot' is a non-persistent property. It should
5754 * have been set temporarily at creation or import time.
5755 */
5960 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
5961 sizeof (uint64_t), 1, &spa->spa_deflate, tx));
5962 }
5963 }
5964
5965 /*
5966 * If anything has changed in this txg, or if someone is waiting
5967 * for this txg to sync (eg, spa_vdev_remove()), push the
5968 * deferred frees from the previous txg. If not, leave them
5969 * alone so that we don't generate work on an otherwise idle
5970 * system.
5971 */
5972 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) ||
5973 !txg_list_empty(&dp->dp_dirty_dirs, txg) ||
5974 !txg_list_empty(&dp->dp_sync_tasks, txg) ||
5975 ((dsl_scan_active(dp->dp_scan) ||
5976 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) {
5977 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5978 VERIFY3U(bpobj_iterate(defer_bpo,
5979 spa_free_sync_cb, zio, tx), ==, 0);
5980 VERIFY0(zio_wait(zio));
5981 }
5982
5983 /*
5984 * Iterate to convergence.
5985 */
5986 do {
5987 int pass = ++spa->spa_sync_pass;
5988
5989 spa_sync_config_object(spa, tx);
5990 spa_sync_aux_dev(spa, &spa->spa_spares, tx,
5991 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
5992 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
5993 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
5994 spa_errlog_sync(spa, txg);
5995 dsl_pool_sync(dp, txg);
5996
5997 if (pass <= SYNC_PASS_DEFERRED_FREE) {
5998 zio_t *zio = zio_root(spa, NULL, NULL, 0);
5999 bplist_iterate(free_bpl, spa_free_sync_cb,
6000 zio, tx);
|