1700 spa_config_exit(spa, SCL_ALL, FTAG);
1701
1702 /*
1703 * Ensure we were able to validate the config.
1704 */
1705 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1706 }
1707
1708 /*
1709 * Check for missing log devices
1710 */
1711 static boolean_t
1712 spa_check_logs(spa_t *spa)
1713 {
1714 boolean_t rv = B_FALSE;
1715
1716 switch (spa->spa_log_state) {
1717 case SPA_LOG_MISSING:
1718 /* need to recheck in case slog has been restored */
1719 case SPA_LOG_UNKNOWN:
1720 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
1721 NULL, DS_FIND_CHILDREN) != 0);
1722 if (rv)
1723 spa_set_log_state(spa, SPA_LOG_MISSING);
1724 break;
1725 }
1726 return (rv);
1727 }
1728
1729 static boolean_t
1730 spa_passivate_log(spa_t *spa)
1731 {
1732 vdev_t *rvd = spa->spa_root_vdev;
1733 boolean_t slog_found = B_FALSE;
1734
1735 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1736
1737 if (!spa_has_slogs(spa))
1738 return (B_FALSE);
1739
1740 for (int c = 0; c < rvd->vdev_children; c++) {
1741 vdev_t *tvd = rvd->vdev_child[c];
2643 }
2644
2645 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2646 spa->spa_load_max_txg == UINT64_MAX)) {
2647 dmu_tx_t *tx;
2648 int need_update = B_FALSE;
2649
2650 ASSERT(state != SPA_LOAD_TRYIMPORT);
2651
2652 /*
2653 * Claim log blocks that haven't been committed yet.
2654 * This must all happen in a single txg.
2655 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2656 * invoked from zil_claim_log_block()'s i/o done callback.
2657 * Price of rollback is that we abandon the log.
2658 */
2659 spa->spa_claiming = B_TRUE;
2660
2661 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2662 spa_first_txg(spa));
2663 (void) dmu_objset_find(spa_name(spa),
2664 zil_claim, tx, DS_FIND_CHILDREN);
2665 dmu_tx_commit(tx);
2666
2667 spa->spa_claiming = B_FALSE;
2668
2669 spa_set_log_state(spa, SPA_LOG_GOOD);
2670 spa->spa_sync_on = B_TRUE;
2671 txg_sync_start(spa->spa_dsl_pool);
2672
2673 /*
2674 * Wait for all claims to sync. We sync up to the highest
2675 * claimed log block birth time so that claimed log blocks
2676 * don't appear to be from the future. spa_claim_max_txg
2677 * will have been set for us by either zil_check_log_chain()
2678 * (invoked from spa_check_logs()) or zil_claim() above.
2679 */
2680 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2681
2682 /*
2683 * If the config cache is stale, or we have uninitialized
2835 spa->spa_load_info = loadinfo;
2836
2837 return (load_error);
2838 }
2839 }
2840
2841 /*
2842 * Pool Open/Import
2843 *
2844 * The import case is identical to an open except that the configuration is sent
2845 * down from userland, instead of grabbed from the configuration cache. For the
2846 * case of an open, the pool configuration will exist in the
2847 * POOL_STATE_UNINITIALIZED state.
2848 *
2849 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2850 * the same time open the pool, without having to keep around the spa_t in some
2851 * ambiguous state.
2852 */
2853 static int
2854 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2855 nvlist_t **config)
2856 {
2857 spa_t *spa;
2858 spa_load_state_t state = SPA_LOAD_OPEN;
2859 int error;
2860 int locked = B_FALSE;
2861
2862 *spapp = NULL;
2863
2864 /*
2865 * As disgusting as this is, we need to support recursive calls to this
2866 * function because dsl_dir_open() is called during spa_load(), and ends
2867 * up calling spa_open() again. The real fix is to figure out how to
2868 * avoid dsl_dir_open() calling this in the first place.
2869 */
2870 if (mutex_owner(&spa_namespace_lock) != curthread) {
2871 mutex_enter(&spa_namespace_lock);
2872 locked = B_TRUE;
2873 }
2874
2875 if ((spa = spa_lookup(pool)) == NULL) {
2876 if (locked)
2877 mutex_exit(&spa_namespace_lock);
2878 return (SET_ERROR(ENOENT));
2879 }
2880
2881 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
2882 zpool_rewind_policy_t policy;
2883
2884 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2885 &policy);
2886 if (policy.zrp_request & ZPOOL_DO_REWIND)
2887 state = SPA_LOAD_RECOVER;
2888
2889 spa_activate(spa, spa_mode_global);
2890
2947 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
2948 spa->spa_load_info) == 0);
2949 }
2950
2951 if (locked) {
2952 spa->spa_last_open_failed = 0;
2953 spa->spa_last_ubsync_txg = 0;
2954 spa->spa_load_txg = 0;
2955 mutex_exit(&spa_namespace_lock);
2956 }
2957
2958 *spapp = spa;
2959
2960 return (0);
2961 }
2962
2963 int
2964 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
2965 nvlist_t **config)
2966 {
2967 return (spa_open_common(name, spapp, tag, policy, config));
2968 }
2969
2970 int
2971 spa_open(const char *name, spa_t **spapp, void *tag)
2972 {
2973 return (spa_open_common(name, spapp, tag, NULL, NULL));
2974 }
2975
2976 /*
2977 * Lookup the given spa_t, incrementing the inject count in the process,
2978 * preventing it from being exported or destroyed.
2979 */
2980 spa_t *
2981 spa_inject_addref(char *name)
2982 {
2983 spa_t *spa;
2984
2985 mutex_enter(&spa_namespace_lock);
2986 if ((spa = spa_lookup(name)) == NULL) {
2987 mutex_exit(&spa_namespace_lock);
2988 return (NULL);
2989 }
2990 spa->spa_inject_ref++;
2991 mutex_exit(&spa_namespace_lock);
2992
2993 return (spa);
3137 za.za_num_integers == 1);
3138 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3139 za.za_first_integer));
3140 }
3141 zap_cursor_fini(&zc);
3142 }
3143
3144 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3145 features) == 0);
3146 nvlist_free(features);
3147 }
3148
3149 int
3150 spa_get_stats(const char *name, nvlist_t **config,
3151 char *altroot, size_t buflen)
3152 {
3153 int error;
3154 spa_t *spa;
3155
3156 *config = NULL;
3157 error = spa_open_common(name, &spa, FTAG, NULL, config);
3158
3159 if (spa != NULL) {
3160 /*
3161 * This still leaves a window of inconsistency where the spares
3162 * or l2cache devices could change and the config would be
3163 * self-inconsistent.
3164 */
3165 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3166
3167 if (*config != NULL) {
3168 uint64_t loadtimes[2];
3169
3170 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3171 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3172 VERIFY(nvlist_add_uint64_array(*config,
3173 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3174
3175 VERIFY(nvlist_add_uint64(*config,
3176 ZPOOL_CONFIG_ERRCOUNT,
3177 spa_get_errlog_size(spa)) == 0);
|
1700 spa_config_exit(spa, SCL_ALL, FTAG);
1701
1702 /*
1703 * Ensure we were able to validate the config.
1704 */
1705 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
1706 }
1707
1708 /*
1709 * Check for missing log devices
1710 */
1711 static boolean_t
1712 spa_check_logs(spa_t *spa)
1713 {
1714 boolean_t rv = B_FALSE;
1715
1716 switch (spa->spa_log_state) {
1717 case SPA_LOG_MISSING:
1718 /* need to recheck in case slog has been restored */
1719 case SPA_LOG_UNKNOWN:
1720 rv = (dmu_objset_find_parallel(spa->spa_name,
1721 zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
1722 if (rv)
1723 spa_set_log_state(spa, SPA_LOG_MISSING);
1724 break;
1725 }
1726 return (rv);
1727 }
1728
1729 static boolean_t
1730 spa_passivate_log(spa_t *spa)
1731 {
1732 vdev_t *rvd = spa->spa_root_vdev;
1733 boolean_t slog_found = B_FALSE;
1734
1735 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1736
1737 if (!spa_has_slogs(spa))
1738 return (B_FALSE);
1739
1740 for (int c = 0; c < rvd->vdev_children; c++) {
1741 vdev_t *tvd = rvd->vdev_child[c];
2643 }
2644
2645 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
2646 spa->spa_load_max_txg == UINT64_MAX)) {
2647 dmu_tx_t *tx;
2648 int need_update = B_FALSE;
2649
2650 ASSERT(state != SPA_LOAD_TRYIMPORT);
2651
2652 /*
2653 * Claim log blocks that haven't been committed yet.
2654 * This must all happen in a single txg.
2655 * Note: spa_claim_max_txg is updated by spa_claim_notify(),
2656 * invoked from zil_claim_log_block()'s i/o done callback.
2657 * Price of rollback is that we abandon the log.
2658 */
2659 spa->spa_claiming = B_TRUE;
2660
2661 tx = dmu_tx_create_assigned(spa_get_dsl(spa),
2662 spa_first_txg(spa));
2663 (void) dmu_objset_find_parallel(spa_name(spa),
2664 zil_claim, tx, DS_FIND_CHILDREN);
2665 dmu_tx_commit(tx);
2666
2667 spa->spa_claiming = B_FALSE;
2668
2669 spa_set_log_state(spa, SPA_LOG_GOOD);
2670 spa->spa_sync_on = B_TRUE;
2671 txg_sync_start(spa->spa_dsl_pool);
2672
2673 /*
2674 * Wait for all claims to sync. We sync up to the highest
2675 * claimed log block birth time so that claimed log blocks
2676 * don't appear to be from the future. spa_claim_max_txg
2677 * will have been set for us by either zil_check_log_chain()
2678 * (invoked from spa_check_logs()) or zil_claim() above.
2679 */
2680 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
2681
2682 /*
2683 * If the config cache is stale, or we have uninitialized
2835 spa->spa_load_info = loadinfo;
2836
2837 return (load_error);
2838 }
2839 }
2840
2841 /*
2842 * Pool Open/Import
2843 *
2844 * The import case is identical to an open except that the configuration is sent
2845 * down from userland, instead of grabbed from the configuration cache. For the
2846 * case of an open, the pool configuration will exist in the
2847 * POOL_STATE_UNINITIALIZED state.
2848 *
2849 * The stats information (gen/count/ustats) is used to gather vdev statistics at
2850 * the same time open the pool, without having to keep around the spa_t in some
2851 * ambiguous state.
2852 */
2853 static int
2854 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
2855 nvlist_t **config, int lock)
2856 {
2857 spa_t *spa;
2858 spa_load_state_t state = SPA_LOAD_OPEN;
2859 int error;
2860 int locked = B_FALSE;
2861
2862 *spapp = NULL;
2863
2864 /*
2865 * As disgusting as this is, we need to support recursive calls to this
2866 * function because dsl_dir_open() is called during spa_load(), and ends
2867 * up calling spa_open() again. The real fix is to figure out how to
2868 * avoid dsl_dir_open() calling this in the first place.
2869 */
2870 if (lock && (mutex_owner(&spa_namespace_lock) != curthread)) {
2871 mutex_enter(&spa_namespace_lock);
2872 locked = B_TRUE;
2873 }
2874
2875 if ((spa = spa_lookup(pool)) == NULL) {
2876 if (locked)
2877 mutex_exit(&spa_namespace_lock);
2878 return (SET_ERROR(ENOENT));
2879 }
2880
2881 if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
2882 zpool_rewind_policy_t policy;
2883
2884 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
2885 &policy);
2886 if (policy.zrp_request & ZPOOL_DO_REWIND)
2887 state = SPA_LOAD_RECOVER;
2888
2889 spa_activate(spa, spa_mode_global);
2890
2947 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
2948 spa->spa_load_info) == 0);
2949 }
2950
2951 if (locked) {
2952 spa->spa_last_open_failed = 0;
2953 spa->spa_last_ubsync_txg = 0;
2954 spa->spa_load_txg = 0;
2955 mutex_exit(&spa_namespace_lock);
2956 }
2957
2958 *spapp = spa;
2959
2960 return (0);
2961 }
2962
2963 int
2964 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
2965 nvlist_t **config)
2966 {
2967 return (spa_open_common(name, spapp, tag, policy, config, 1));
2968 }
2969
2970 int
2971 spa_open(const char *name, spa_t **spapp, void *tag)
2972 {
2973 return (spa_open_common(name, spapp, tag, NULL, NULL, 1));
2974 }
2975
2976 int
2977 spa_open_lock(const char *name, spa_t **spapp, void *tag, int lock)
2978 {
2979 return (spa_open_common(name, spapp, tag, NULL, NULL, lock));
2980 }
2981
2982 /*
2983 * Lookup the given spa_t, incrementing the inject count in the process,
2984 * preventing it from being exported or destroyed.
2985 */
2986 spa_t *
2987 spa_inject_addref(char *name)
2988 {
2989 spa_t *spa;
2990
2991 mutex_enter(&spa_namespace_lock);
2992 if ((spa = spa_lookup(name)) == NULL) {
2993 mutex_exit(&spa_namespace_lock);
2994 return (NULL);
2995 }
2996 spa->spa_inject_ref++;
2997 mutex_exit(&spa_namespace_lock);
2998
2999 return (spa);
3143 za.za_num_integers == 1);
3144 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name,
3145 za.za_first_integer));
3146 }
3147 zap_cursor_fini(&zc);
3148 }
3149
3150 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
3151 features) == 0);
3152 nvlist_free(features);
3153 }
3154
3155 int
3156 spa_get_stats(const char *name, nvlist_t **config,
3157 char *altroot, size_t buflen)
3158 {
3159 int error;
3160 spa_t *spa;
3161
3162 *config = NULL;
3163 error = spa_open_common(name, &spa, FTAG, NULL, config, 1);
3164
3165 if (spa != NULL) {
3166 /*
3167 * This still leaves a window of inconsistency where the spares
3168 * or l2cache devices could change and the config would be
3169 * self-inconsistent.
3170 */
3171 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
3172
3173 if (*config != NULL) {
3174 uint64_t loadtimes[2];
3175
3176 loadtimes[0] = spa->spa_loaded_ts.tv_sec;
3177 loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
3178 VERIFY(nvlist_add_uint64_array(*config,
3179 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
3180
3181 VERIFY(nvlist_add_uint64(*config,
3182 ZPOOL_CONFIG_ERRCOUNT,
3183 spa_get_errlog_size(spa)) == 0);
|