Print this page
zpool import speedup
*** 1715,1726 ****
switch (spa->spa_log_state) {
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
! rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain,
! NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
--- 1715,1726 ----
switch (spa->spa_log_state) {
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
! rv = (dmu_objset_find_parallel(spa->spa_name,
! zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
*** 2658,2668 ****
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(spa_get_dsl(spa),
spa_first_txg(spa));
! (void) dmu_objset_find(spa_name(spa),
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
--- 2658,2668 ----
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(spa_get_dsl(spa),
spa_first_txg(spa));
! (void) dmu_objset_find_parallel(spa_name(spa),
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
*** 2850,2860 ****
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
! nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
--- 2850,2860 ----
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
! nvlist_t **config, int lock)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
*** 2865,2875 ****
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
! if (mutex_owner(&spa_namespace_lock) != curthread) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
--- 2865,2875 ----
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
! if (lock && (mutex_owner(&spa_namespace_lock) != curthread)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
*** 2962,2978 ****
int
spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
nvlist_t **config)
{
! return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, void *tag)
{
! return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
--- 2962,2984 ----
int
spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
nvlist_t **config)
{
! return (spa_open_common(name, spapp, tag, policy, config, 1));
}
int
spa_open(const char *name, spa_t **spapp, void *tag)
{
! return (spa_open_common(name, spapp, tag, NULL, NULL, 1));
! }
!
! int
! spa_open_lock(const char *name, spa_t **spapp, void *tag, int lock)
! {
! return (spa_open_common(name, spapp, tag, NULL, NULL, lock));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*** 3152,3162 ****
{
int error;
spa_t *spa;
*config = NULL;
! error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
--- 3158,3168 ----
{
int error;
spa_t *spa;
*config = NULL;
! error = spa_open_common(name, &spa, FTAG, NULL, config, 1);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be