Print this page
8115 parallel zfs mount
*** 23,32 ****
--- 23,33 ----
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
+ * Copyright 2017 RackTop Systems.
*/
/*
* The objective of this program is to provide a DMU/ZAP/SPA stress test
* that runs entirely in userland, is easy to use, and easy to extend.
*** 243,254 ****
} rl_type_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
! mutex_t rll_lock;
! cond_t rll_cv;
} rll_t;
typedef struct rl {
uint64_t rl_object;
uint64_t rl_offset;
--- 244,255 ----
} rl_type_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
! kmutex_t rll_lock;
! kcondvar_t rll_cv;
} rll_t;
typedef struct rl {
uint64_t rl_object;
uint64_t rl_offset;
*** 278,292 ****
* Per-dataset state.
*/
typedef struct ztest_ds {
ztest_shared_ds_t *zd_shared;
objset_t *zd_os;
! rwlock_t zd_zilog_lock;
zilog_t *zd_zilog;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[ZFS_MAX_DATASET_NAME_LEN];
! mutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
--- 279,293 ----
* Per-dataset state.
*/
typedef struct ztest_ds {
ztest_shared_ds_t *zd_shared;
objset_t *zd_os;
! krwlock_t zd_zilog_lock;
zilog_t *zd_zilog;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[ZFS_MAX_DATASET_NAME_LEN];
! kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
*** 389,399 ****
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
! mutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
/*
* Stuff we need to share writably between parent and child.
--- 390,400 ----
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
! kmutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
/*
* Stuff we need to share writably between parent and child.
*** 424,442 ****
ztest_shared_t *ztest_shared;
static spa_t *ztest_spa = NULL;
static ztest_ds_t *ztest_ds;
! static mutex_t ztest_vdev_lock;
/*
* The ztest_name_lock protects the pool and dataset namespace used by
* the individual tests. To modify the namespace, consumers must grab
* this lock as writer. Grabbing the lock as reader will ensure that the
* namespace does not change while the lock is held.
*/
! static rwlock_t ztest_name_lock;
static boolean_t ztest_dump_core = B_TRUE;
static boolean_t ztest_exiting;
/* Global commit callback list */
--- 425,443 ----
ztest_shared_t *ztest_shared;
static spa_t *ztest_spa = NULL;
static ztest_ds_t *ztest_ds;
! static kmutex_t ztest_vdev_lock;
/*
* The ztest_name_lock protects the pool and dataset namespace used by
* the individual tests. To modify the namespace, consumers must grab
* this lock as writer. Grabbing the lock as reader will ensure that the
* namespace does not change while the lock is held.
*/
! static krwlock_t ztest_name_lock;
static boolean_t ztest_dump_core = B_TRUE;
static boolean_t ztest_exiting;
/* Global commit callback list */
*** 1088,1132 ****
static void
ztest_rll_init(rll_t *rll)
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
! VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
! VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
}
static void
ztest_rll_destroy(rll_t *rll)
{
ASSERT(rll->rll_writer == NULL);
ASSERT(rll->rll_readers == 0);
! VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
! VERIFY(cond_destroy(&rll->rll_cv) == 0);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
! VERIFY(mutex_lock(&rll->rll_lock) == 0);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
! (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
! (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
! VERIFY(mutex_unlock(&rll->rll_lock) == 0);
}
static void
ztest_rll_unlock(rll_t *rll)
{
! VERIFY(mutex_lock(&rll->rll_lock) == 0);
if (rll->rll_writer) {
ASSERT(rll->rll_readers == 0);
rll->rll_writer = NULL;
} else {
--- 1089,1133 ----
static void
ztest_rll_init(rll_t *rll)
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
! mutex_init(&rll->rll_lock, NULL, USYNC_THREAD, NULL);
! cv_init(&rll->rll_cv, NULL, USYNC_THREAD, NULL);
}
static void
ztest_rll_destroy(rll_t *rll)
{
ASSERT(rll->rll_writer == NULL);
ASSERT(rll->rll_readers == 0);
! mutex_destroy(&rll->rll_lock);
! cv_destroy(&rll->rll_cv);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
! mutex_enter(&rll->rll_lock);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
! cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
! cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
! mutex_exit(&rll->rll_lock);
}
static void
ztest_rll_unlock(rll_t *rll)
{
! mutex_enter(&rll->rll_lock);
if (rll->rll_writer) {
ASSERT(rll->rll_readers == 0);
rll->rll_writer = NULL;
} else {
*** 1134,1146 ****
ASSERT(rll->rll_writer == NULL);
rll->rll_readers--;
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
! VERIFY(cond_broadcast(&rll->rll_cv) == 0);
! VERIFY(mutex_unlock(&rll->rll_lock) == 0);
}
static void
ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
{
--- 1135,1147 ----
ASSERT(rll->rll_writer == NULL);
rll->rll_readers--;
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
! cv_broadcast(&rll->rll_cv);
! mutex_exit(&rll->rll_lock);
}
static void
ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
{
*** 1195,1206 ****
dmu_objset_name(os, zd->zd_name);
if (zd->zd_shared != NULL)
zd->zd_shared->zd_seq = 0;
! VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
! VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
--- 1196,1207 ----
dmu_objset_name(os, zd->zd_name);
if (zd->zd_shared != NULL)
zd->zd_shared->zd_seq = 0;
! rw_init(&zd->zd_zilog_lock, NULL, USYNC_THREAD, NULL);
! mutex_init(&zd->zd_dirobj_lock, NULL, USYNC_THREAD, NULL);
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
*** 1208,1218 ****
}
static void
ztest_zd_fini(ztest_ds_t *zd)
{
! VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
--- 1209,1219 ----
}
static void
ztest_zd_fini(ztest_ds_t *zd)
{
! mutex_destroy(&zd->zd_dirobj_lock);
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
*** 1963,1973 ****
ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
! ASSERT(_mutex_held(&zd->zd_dirobj_lock));
for (int i = 0; i < count; i++, od++) {
od->od_object = 0;
error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
sizeof (uint64_t), 1, &od->od_object);
--- 1964,1974 ----
ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
! ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (int i = 0; i < count; i++, od++) {
od->od_object = 0;
error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
sizeof (uint64_t), 1, &od->od_object);
*** 2003,2013 ****
static int
ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
! ASSERT(_mutex_held(&zd->zd_dirobj_lock));
for (int i = 0; i < count; i++, od++) {
if (missing) {
od->od_object = 0;
missing++;
--- 2004,2014 ----
static int
ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
! ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (int i = 0; i < count; i++, od++) {
if (missing) {
od->od_object = 0;
missing++;
*** 2048,2058 ****
ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
! ASSERT(_mutex_held(&zd->zd_dirobj_lock));
od += count - 1;
for (int i = count - 1; i >= 0; i--, od--) {
if (missing) {
--- 2049,2059 ----
ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
! ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
od += count - 1;
for (int i = count - 1; i >= 0; i--, od--) {
if (missing) {
*** 2194,2204 ****
*/
io_type = ztest_random(ZTEST_IO_TYPES);
if (ztest_random(2) == 0)
io_type = ZTEST_IO_WRITE_TAG;
! (void) rw_rdlock(&zd->zd_zilog_lock);
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
--- 2195,2205 ----
*/
io_type = ztest_random(ZTEST_IO_TYPES);
if (ztest_random(2) == 0)
io_type = ZTEST_IO_WRITE_TAG;
! rw_enter(&zd->zd_zilog_lock, RW_READER);
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
*** 2231,2260 ****
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
case ZTEST_IO_REWRITE:
! (void) rw_rdlock(&ztest_name_lock);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_COMPRESSION,
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
! (void) rw_unlock(&ztest_name_lock);
VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
DMU_READ_NO_PREFETCH));
(void) ztest_write(zd, object, offset, blocksize, data);
break;
}
! (void) rw_unlock(&zd->zd_zilog_lock);
umem_free(data, blocksize);
}
/*
--- 2232,2261 ----
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
case ZTEST_IO_REWRITE:
! rw_enter(&ztest_name_lock, RW_READER);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_COMPRESSION,
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
! rw_exit(&ztest_name_lock);
VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
DMU_READ_NO_PREFETCH));
(void) ztest_write(zd, object, offset, blocksize, data);
break;
}
! rw_exit(&zd->zd_zilog_lock);
umem_free(data, blocksize);
}
/*
*** 2289,2305 ****
ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
{
int count = size / sizeof (*od);
int rv = 0;
! VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
! VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
return (rv);
}
/* ARGSUSED */
--- 2290,2306 ----
ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
{
int count = size / sizeof (*od);
int rv = 0;
! mutex_enter(&zd->zd_dirobj_lock);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
! mutex_exit(&zd->zd_dirobj_lock);
return (rv);
}
/* ARGSUSED */
*** 2306,2316 ****
void
ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
{
zilog_t *zilog = zd->zd_zilog;
! (void) rw_rdlock(&zd->zd_zilog_lock);
zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
/*
* Remember the committed values in zd, which is in parent/child
--- 2307,2317 ----
void
ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
{
zilog_t *zilog = zd->zd_zilog;
! rw_enter(&zd->zd_zilog_lock, RW_READER);
zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
/*
* Remember the committed values in zd, which is in parent/child
*** 2321,2331 ****
ASSERT(zd->zd_shared != NULL);
ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
mutex_exit(&zilog->zl_lock);
! (void) rw_unlock(&zd->zd_zilog_lock);
}
/*
* This function is designed to simulate the operations that occur during a
* mount/unmount operation. We hold the dataset across these operations in an
--- 2322,2332 ----
ASSERT(zd->zd_shared != NULL);
ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
mutex_exit(&zilog->zl_lock);
! rw_exit(&zd->zd_zilog_lock);
}
/*
* This function is designed to simulate the operations that occur during a
* mount/unmount operation. We hold the dataset across these operations in an
*** 2340,2361 ****
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
* zd_zilog_lock to block any I/O.
*/
! VERIFY0(mutex_lock(&zd->zd_dirobj_lock));
! (void) rw_wrlock(&zd->zd_zilog_lock);
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */
VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
zil_replay(os, zd, ztest_replay_vector);
! (void) rw_unlock(&zd->zd_zilog_lock);
! VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
--- 2341,2362 ----
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
* zd_zilog_lock to block any I/O.
*/
! mutex_enter(&zd->zd_dirobj_lock);
! rw_enter(&zd->zd_zilog_lock, RW_WRITER);
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */
VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
zil_replay(os, zd, ztest_replay_vector);
! rw_exit(&zd->zd_zilog_lock);
! mutex_exit(&zd->zd_dirobj_lock);
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
*** 2386,2404 ****
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
! (void) rw_rdlock(&ztest_name_lock);
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
nvlist_free(nvroot);
VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
spa_close(spa, FTAG);
! (void) rw_unlock(&ztest_name_lock);
}
/* ARGSUSED */
void
ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
--- 2387,2405 ----
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
! rw_enter(&ztest_name_lock, RW_READER);
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
nvlist_free(nvroot);
VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
spa_close(spa, FTAG);
! rw_exit(&ztest_name_lock);
}
/* ARGSUSED */
void
ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
*** 2407,2417 ****
uint64_t initial_version = SPA_VERSION_INITIAL;
uint64_t version, newversion;
nvlist_t *nvroot, *props;
char *name;
! VERIFY0(mutex_lock(&ztest_vdev_lock));
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
/*
* Clean up from previous runs.
*/
--- 2408,2418 ----
uint64_t initial_version = SPA_VERSION_INITIAL;
uint64_t version, newversion;
nvlist_t *nvroot, *props;
char *name;
! mutex_enter(&ztest_vdev_lock);
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
/*
* Clean up from previous runs.
*/
*** 2466,2476 ****
VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
zpool_prop_to_name(ZPOOL_PROP_VERSION)));
spa_close(spa, FTAG);
strfree(name);
! VERIFY0(mutex_unlock(&ztest_vdev_lock));
}
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *path)
{
--- 2467,2477 ----
VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
zpool_prop_to_name(ZPOOL_PROP_VERSION)));
spa_close(spa, FTAG);
strfree(name);
! mutex_exit(&ztest_vdev_lock);
}
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *path)
{
*** 2519,2529 ****
uint64_t leaves;
uint64_t guid;
nvlist_t *nvroot;
int error;
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
--- 2520,2530 ----
uint64_t leaves;
uint64_t guid;
nvlist_t *nvroot;
int error;
! mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
*** 2545,2557 ****
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
! VERIFY(rw_wrlock(&ztest_name_lock) == 0);
error = spa_vdev_remove(spa, guid, B_FALSE);
! VERIFY(rw_unlock(&ztest_name_lock) == 0);
if (error && error != EEXIST)
fatal(0, "spa_vdev_remove() = %d", error);
} else {
spa_config_exit(spa, SCL_VDEV, FTAG);
--- 2546,2558 ----
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
! rw_enter(&ztest_name_lock, RW_WRITER);
error = spa_vdev_remove(spa, guid, B_FALSE);
! rw_exit(&ztest_name_lock);
if (error && error != EEXIST)
fatal(0, "spa_vdev_remove() = %d", error);
} else {
spa_config_exit(spa, SCL_VDEV, FTAG);
*** 2571,2581 ****
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
fatal(0, "spa_vdev_add() = %d", error);
}
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
/*
* Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
*/
--- 2572,2582 ----
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
fatal(0, "spa_vdev_add() = %d", error);
}
! mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
*/
*** 2597,2607 ****
} else {
sav = &spa->spa_l2cache;
aux = ZPOOL_CONFIG_L2CACHE;
}
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (sav->sav_count != 0 && ztest_random(4) == 0) {
/*
--- 2598,2608 ----
} else {
sav = &spa->spa_l2cache;
aux = ZPOOL_CONFIG_L2CACHE;
}
! mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (sav->sav_count != 0 && ztest_random(4) == 0) {
/*
*** 2654,2664 ****
error = spa_vdev_remove(spa, guid, B_FALSE);
if (error != 0 && error != EBUSY)
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
}
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
/*
* split a pool if it has mirror tlvdevs
*/
--- 2655,2665 ----
error = spa_vdev_remove(spa, guid, B_FALSE);
if (error != 0 && error != EBUSY)
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
}
! mutex_exit(&ztest_vdev_lock);
}
/*
* split a pool if it has mirror tlvdevs
*/
*** 2671,2685 ****
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *tree, **child, *config, *split, **schild;
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
/* ensure we have a useable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
--- 2672,2686 ----
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *tree, **child, *config, *split, **schild;
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
! mutex_enter(&ztest_vdev_lock);
/* ensure we have a useable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
! mutex_exit(&ztest_vdev_lock);
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
*** 2734,2746 ****
free(schild);
nvlist_free(split);
spa_config_exit(spa, SCL_VDEV, FTAG);
! (void) rw_wrlock(&ztest_name_lock);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
! (void) rw_unlock(&ztest_name_lock);
nvlist_free(config);
if (error == 0) {
(void) printf("successful split - results:\n");
--- 2735,2747 ----
free(schild);
nvlist_free(split);
spa_config_exit(spa, SCL_VDEV, FTAG);
! rw_enter(&ztest_name_lock, RW_WRITER);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
! rw_exit(&ztest_name_lock);
nvlist_free(config);
if (error == 0) {
(void) printf("successful split - results:\n");
*** 2749,2759 ****
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
++zs->zs_splits;
--zs->zs_mirrors;
}
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
/*
* Verify that we can attach and detach devices.
--- 2750,2760 ----
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
++zs->zs_splits;
--zs->zs_mirrors;
}
! mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can attach and detach devices.
*** 2778,2788 ****
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
--- 2779,2789 ----
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
! mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
*** 2839,2849 ****
spa_config_exit(spa, SCL_VDEV, FTAG);
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP)
fatal(0, "detach (%s) returned %d", oldpath, error);
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
return;
}
/*
* For the new vdev, choose with equal probability between the two
--- 2840,2850 ----
spa_config_exit(spa, SCL_VDEV, FTAG);
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP)
fatal(0, "detach (%s) returned %d", oldpath, error);
! mutex_exit(&ztest_vdev_lock);
return;
}
/*
* For the new vdev, choose with equal probability between the two
*** 2933,2943 ****
"returned %d, expected %d",
oldpath, oldsize, newpath,
newsize, replacing, error, expected_error);
}
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
/*
* Callback function which expands the physical size of the vdev.
*/
--- 2934,2944 ----
"returned %d, expected %d",
oldpath, oldsize, newpath,
newsize, replacing, error, expected_error);
}
! mutex_exit(&ztest_vdev_lock);
}
/*
* Callback function which expands the physical size of the vdev.
*/
*** 3061,3071 ****
metaslab_group_t *mg;
size_t psize, newsize;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
top = ztest_random_vdev_top(spa, B_TRUE);
tvd = spa->spa_root_vdev->vdev_child[top];
--- 3062,3072 ----
metaslab_group_t *mg;
size_t psize, newsize;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
! mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
top = ztest_random_vdev_top(spa, B_TRUE);
tvd = spa->spa_root_vdev->vdev_child[top];
*** 3089,3099 ****
* original size, and it has a valid psize.
*/
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
return;
}
ASSERT(psize > 0);
newsize = psize + psize / 8;
ASSERT3U(newsize, >, psize);
--- 3090,3100 ----
* original size, and it has a valid psize.
*/
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
! mutex_exit(&ztest_vdev_lock);
return;
}
ASSERT(psize > 0);
newsize = psize + psize / 8;
ASSERT3U(newsize, >, psize);
*** 3114,3124 ****
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
return;
}
spa_config_exit(spa, SCL_STATE, spa);
--- 3115,3125 ----
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
! mutex_exit(&ztest_vdev_lock);
return;
}
spa_config_exit(spa, SCL_STATE, spa);
*** 3148,3158 ****
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
return;
}
/*
* Make sure we were able to grow the vdev.
--- 3149,3159 ----
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
! mutex_exit(&ztest_vdev_lock);
return;
}
/*
* Make sure we were able to grow the vdev.
*** 3176,3186 ****
(void) printf("%s grew from %s to %s\n",
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
--- 3177,3187 ----
(void) printf("%s grew from %s to %s\n",
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
! mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
*** 3290,3300 ****
int error;
objset_t *os, *os2;
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog_t *zilog;
! (void) rw_rdlock(&ztest_name_lock);
(void) snprintf(name, sizeof (name), "%s/temp_%llu",
ztest_opts.zo_pool, (u_longlong_t)id);
/*
--- 3291,3301 ----
int error;
objset_t *os, *os2;
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog_t *zilog;
! rw_enter(&ztest_name_lock, RW_READER);
(void) snprintf(name, sizeof (name), "%s/temp_%llu",
ztest_opts.zo_pool, (u_longlong_t)id);
/*
*** 3329,3339 ****
*/
error = ztest_dataset_create(name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
! (void) rw_unlock(&ztest_name_lock);
return;
}
fatal(0, "dmu_objset_create(%s) = %d", name, error);
}
--- 3330,3340 ----
*/
error = ztest_dataset_create(name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
! rw_exit(&ztest_name_lock);
return;
}
fatal(0, "dmu_objset_create(%s) = %d", name, error);
}
*** 3377,3399 ****
zil_close(zilog);
dmu_objset_disown(os, FTAG);
ztest_zd_fini(&zdtmp);
! (void) rw_unlock(&ztest_name_lock);
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
{
! (void) rw_rdlock(&ztest_name_lock);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
! (void) rw_unlock(&ztest_name_lock);
}
/*
* Cleanup non-standard snapshots and clones.
*/
--- 3378,3400 ----
zil_close(zilog);
dmu_objset_disown(os, FTAG);
ztest_zd_fini(&zdtmp);
! rw_exit(&ztest_name_lock);
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
{
! rw_enter(&ztest_name_lock, RW_READER);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
! rw_exit(&ztest_name_lock);
}
/*
* Cleanup non-standard snapshots and clones.
*/
*** 3448,3458 ****
char clone2name[ZFS_MAX_DATASET_NAME_LEN];
char snap3name[ZFS_MAX_DATASET_NAME_LEN];
char *osname = zd->zd_name;
int error;
! (void) rw_rdlock(&ztest_name_lock);
ztest_dsl_dataset_cleanup(osname, id);
(void) snprintf(snap1name, sizeof (snap1name),
"%s@s1_%llu", osname, id);
--- 3449,3459 ----
char clone2name[ZFS_MAX_DATASET_NAME_LEN];
char snap3name[ZFS_MAX_DATASET_NAME_LEN];
char *osname = zd->zd_name;
int error;
! rw_enter(&ztest_name_lock, RW_READER);
ztest_dsl_dataset_cleanup(osname, id);
(void) snprintf(snap1name, sizeof (snap1name),
"%s@s1_%llu", osname, id);
*** 3525,3535 ****
dmu_objset_disown(os, FTAG);
out:
ztest_dsl_dataset_cleanup(osname, id);
! (void) rw_unlock(&ztest_name_lock);
}
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
--- 3526,3536 ----
dmu_objset_disown(os, FTAG);
out:
ztest_dsl_dataset_cleanup(osname, id);
! rw_exit(&ztest_name_lock);
}
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
*** 4459,4471 ****
goto out;
ASSERT3U(data->zcd_txg, !=, 0);
/* Remove our callback from the list */
! (void) mutex_lock(&zcl.zcl_callbacks_lock);
list_remove(&zcl.zcl_callbacks, data);
! (void) mutex_unlock(&zcl.zcl_callbacks_lock);
out:
umem_free(data, sizeof (ztest_cb_data_t));
}
--- 4460,4472 ----
goto out;
ASSERT3U(data->zcd_txg, !=, 0);
/* Remove our callback from the list */
! mutex_enter(&zcl.zcl_callbacks_lock);
list_remove(&zcl.zcl_callbacks, data);
! mutex_exit(&zcl.zcl_callbacks_lock);
out:
umem_free(data, sizeof (ztest_cb_data_t));
}
*** 4563,4573 ****
fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
old_txg, txg);
dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
! (void) mutex_lock(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
--- 4564,4574 ----
fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
old_txg, txg);
dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
! mutex_enter(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
*** 4610,4620 ****
VERIFY(!cb_data[i]->zcd_called);
tmp_cb = cb_data[i];
}
! (void) mutex_unlock(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
}
/* ARGSUSED */
--- 4611,4621 ----
VERIFY(!cb_data[i]->zcd_called);
tmp_cb = cb_data[i];
}
! mutex_exit(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
}
/* ARGSUSED */
*** 4626,4651 ****
ZFS_PROP_COMPRESSION,
ZFS_PROP_COPIES,
ZFS_PROP_DEDUP
};
! (void) rw_rdlock(&ztest_name_lock);
for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
! (void) rw_unlock(&ztest_name_lock);
}
/* ARGSUSED */
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
nvlist_t *props = NULL;
! (void) rw_rdlock(&ztest_name_lock);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
VERIFY0(spa_prop_get(ztest_spa, &props));
--- 4627,4652 ----
ZFS_PROP_COMPRESSION,
ZFS_PROP_COPIES,
ZFS_PROP_DEDUP
};
! rw_enter(&ztest_name_lock, RW_READER);
for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
! rw_exit(&ztest_name_lock);
}
/* ARGSUSED */
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
nvlist_t *props = NULL;
! rw_enter(&ztest_name_lock, RW_READER);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
VERIFY0(spa_prop_get(ztest_spa, &props));
*** 4653,4663 ****
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
nvlist_free(props);
! (void) rw_unlock(&ztest_name_lock);
}
static int
user_release_one(const char *snapname, const char *holdname)
{
--- 4654,4664 ----
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
nvlist_free(props);
! rw_exit(&ztest_name_lock);
}
static int
user_release_one(const char *snapname, const char *holdname)
{
*** 4688,4698 ****
char clonename[100];
char tag[100];
char osname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *holds;
! (void) rw_rdlock(&ztest_name_lock);
dmu_objset_name(os, osname);
(void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id);
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
--- 4689,4699 ----
char clonename[100];
char tag[100];
char osname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *holds;
! rw_enter(&ztest_name_lock, RW_READER);
dmu_objset_name(os, osname);
(void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id);
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
*** 4793,4803 ****
fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
out:
! (void) rw_unlock(&ztest_name_lock);
}
/*
* Inject random faults into the on-disk data.
*/
--- 4794,4804 ----
fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
out:
! rw_exit(&ztest_name_lock);
}
/*
* Inject random faults into the on-disk data.
*/
*** 4821,4845 ****
int mirror_save;
vdev_t *vd0 = NULL;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
maxfaults = MAXFAULTS();
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
mirror_save = zs->zs_mirrors;
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
ASSERT(leaves >= 1);
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
* they are in progress (i.e. spa_change_guid). Those
* operations will have grabbed the name lock as writer.
*/
! (void) rw_rdlock(&ztest_name_lock);
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
--- 4822,4846 ----
int mirror_save;
vdev_t *vd0 = NULL;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
! mutex_enter(&ztest_vdev_lock);
maxfaults = MAXFAULTS();
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
mirror_save = zs->zs_mirrors;
! mutex_exit(&ztest_vdev_lock);
ASSERT(leaves >= 1);
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
* they are in progress (i.e. spa_change_guid). Those
* operations will have grabbed the name lock as writer.
*/
! rw_enter(&ztest_name_lock, RW_READER);
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
*** 4904,4914 ****
*/
spa_aux_vdev_t *sav = &spa->spa_l2cache;
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
! (void) rw_unlock(&ztest_name_lock);
return;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
(void) strcpy(path0, vd0->vdev_path);
--- 4905,4915 ----
*/
spa_aux_vdev_t *sav = &spa->spa_l2cache;
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
! rw_exit(&ztest_name_lock);
return;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
(void) strcpy(path0, vd0->vdev_path);
*** 4918,4928 ****
leaves = 1;
maxfaults = INT_MAX; /* no limit on cache devices */
}
spa_config_exit(spa, SCL_STATE, FTAG);
! (void) rw_unlock(&ztest_name_lock);
/*
* If we can tolerate two or more faults, or we're dealing
* with a slog, randomly online/offline vd0.
*/
--- 4919,4929 ----
leaves = 1;
maxfaults = INT_MAX; /* no limit on cache devices */
}
spa_config_exit(spa, SCL_STATE, FTAG);
! rw_exit(&ztest_name_lock);
/*
* If we can tolerate two or more faults, or we're dealing
* with a slog, randomly online/offline vd0.
*/
*** 4938,4953 ****
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
! (void) rw_wrlock(&ztest_name_lock);
VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
if (islog)
! (void) rw_unlock(&ztest_name_lock);
} else {
/*
* Ideally we would like to be able to randomly
* call vdev_[on|off]line without holding locks
* to force unpredictable failures but the side
--- 4939,4954 ----
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
! rw_enter(&ztest_name_lock, RW_WRITER);
VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
if (islog)
! rw_exit(&ztest_name_lock);
} else {
/*
* Ideally we would like to be able to randomly
* call vdev_[on|off]line without holding locks
* to force unpredictable failures but the side
*** 4954,4966 ****
* effects of vdev_[on|off]line prevent us from
* doing so. We grab the ztest_vdev_lock here to
* prevent a race between injection testing and
* aux_vdev removal.
*/
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
(void) vdev_online(spa, guid0, 0, NULL);
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
}
if (maxfaults == 0)
return;
--- 4955,4967 ----
* effects of vdev_[on|off]line prevent us from
* doing so. We grab the ztest_vdev_lock here to
* prevent a race between injection testing and
* aux_vdev removal.
*/
! mutex_enter(&ztest_vdev_lock);
(void) vdev_online(spa, guid0, 0, NULL);
! mutex_exit(&ztest_vdev_lock);
}
}
if (maxfaults == 0)
return;
*** 5028,5049 ****
uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
if ((leaf & 1) == 1 &&
offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
continue;
! VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
if (mirror_save != zs->zs_mirrors) {
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
(void) close(fd);
return;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
fatal(1, "can't inject bad word at 0x%llx in %s",
offset, pathrand);
! VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
if (ztest_opts.zo_verbose >= 7)
(void) printf("injected bad word into %s,"
" offset 0x%llx\n", pathrand, (u_longlong_t)offset);
}
--- 5029,5050 ----
uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
if ((leaf & 1) == 1 &&
offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
continue;
! mutex_enter(&ztest_vdev_lock);
if (mirror_save != zs->zs_mirrors) {
! mutex_exit(&ztest_vdev_lock);
(void) close(fd);
return;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
fatal(1, "can't inject bad word at 0x%llx in %s",
offset, pathrand);
! mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 7)
(void) printf("injected bad word into %s,"
" offset 0x%llx\n", pathrand, (u_longlong_t)offset);
}
*** 5079,5095 ****
/*
* Take the name lock as writer to prevent anyone else from changing
* the pool and dataset properies we need to maintain during this test.
*/
! (void) rw_wrlock(&ztest_name_lock);
if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
B_FALSE) != 0 ||
ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
B_FALSE) != 0) {
! (void) rw_unlock(&ztest_name_lock);
return;
}
dmu_objset_stats_t dds;
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
--- 5080,5096 ----
/*
* Take the name lock as writer to prevent anyone else from changing
* the pool and dataset properies we need to maintain during this test.
*/
! rw_enter(&ztest_name_lock, RW_WRITER);
if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
B_FALSE) != 0 ||
ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
B_FALSE) != 0) {
! rw_exit(&ztest_name_lock);
return;
}
dmu_objset_stats_t dds;
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
*** 5104,5114 ****
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, 0, copies * blocksize);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
! (void) rw_unlock(&ztest_name_lock);
return;
}
/*
* Write all the copies of our block.
--- 5105,5115 ----
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, 0, copies * blocksize);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
! rw_exit(&ztest_name_lock);
return;
}
/*
* Write all the copies of our block.
*** 5152,5162 ****
abd, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
abd_free(abd);
! (void) rw_unlock(&ztest_name_lock);
}
/*
* Scrub the pool.
*/
--- 5153,5163 ----
abd, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
abd_free(abd);
! rw_exit(&ztest_name_lock);
}
/*
* Scrub the pool.
*/
*** 5183,5195 ****
int error;
orig = spa_guid(spa);
load = spa_load_guid(spa);
! (void) rw_wrlock(&ztest_name_lock);
error = spa_change_guid(spa);
! (void) rw_unlock(&ztest_name_lock);
if (error != 0)
return;
if (ztest_opts.zo_verbose >= 4) {
--- 5184,5196 ----
int error;
orig = spa_guid(spa);
load = spa_load_guid(spa);
! rw_enter(&ztest_name_lock, RW_WRITER);
error = spa_change_guid(spa);
! rw_exit(&ztest_name_lock);
if (error != 0)
return;
if (ztest_opts.zo_verbose >= 4) {
*** 5209,5219 ****
ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
{
char *oldname, *newname;
spa_t *spa;
! (void) rw_wrlock(&ztest_name_lock);
oldname = ztest_opts.zo_pool;
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
(void) strcpy(newname, oldname);
(void) strcat(newname, "_tmp");
--- 5210,5220 ----
ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
{
char *oldname, *newname;
spa_t *spa;
! rw_enter(&ztest_name_lock, RW_WRITER);
oldname = ztest_opts.zo_pool;
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
(void) strcpy(newname, oldname);
(void) strcat(newname, "_tmp");
*** 5249,5259 ****
ASSERT(spa == ztest_spa);
spa_close(spa, FTAG);
umem_free(newname, strlen(newname) + 1);
! (void) rw_unlock(&ztest_name_lock);
}
/*
* Verify pool integrity by running zdb.
*/
--- 5250,5260 ----
ASSERT(spa == ztest_spa);
spa_close(spa, FTAG);
umem_free(newname, strlen(newname) + 1);
! rw_exit(&ztest_name_lock);
}
/*
* Verify pool integrity by running zdb.
*/
*** 5604,5625 ****
char name[ZFS_MAX_DATASET_NAME_LEN];
int error;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
! (void) rw_rdlock(&ztest_name_lock);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
! (void) rw_unlock(&ztest_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os));
! (void) rw_unlock(&ztest_name_lock);
ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
zilog = zd->zd_zilog;
--- 5605,5626 ----
char name[ZFS_MAX_DATASET_NAME_LEN];
int error;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
! rw_enter(&ztest_name_lock, RW_READER);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
! rw_exit(&ztest_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os));
! rw_exit(&ztest_name_lock);
ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
zilog = zd->zd_zilog;
*** 5666,5688 ****
* Kick off threads to run tests on all datasets in parallel.
*/
static void
ztest_run(ztest_shared_t *zs)
{
! thread_t *tid;
spa_t *spa;
objset_t *os;
! thread_t resume_tid;
int error;
ztest_exiting = B_FALSE;
/*
* Initialize parent/child shared state.
*/
! VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
! VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop =
zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
--- 5667,5689 ----
* Kick off threads to run tests on all datasets in parallel.
*/
static void
ztest_run(ztest_shared_t *zs)
{
! pthread_t *tid;
spa_t *spa;
objset_t *os;
! pthread_t resume_tid;
int error;
ztest_exiting = B_FALSE;
/*
* Initialize parent/child shared state.
*/
! mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL);
! rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL);
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop =
zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
*** 5690,5700 ****
if (ztest_random(100) < ztest_opts.zo_killrate) {
zs->zs_thread_kill -=
ztest_random(ztest_opts.zo_passtime * NANOSEC);
}
! (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
--- 5691,5701 ----
if (ztest_random(100) < ztest_opts.zo_killrate) {
zs->zs_thread_kill -=
ztest_random(ztest_opts.zo_passtime * NANOSEC);
}
! mutex_init(&zcl.zcl_callbacks_lock, NULL, USYNC_THREAD, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
*** 5728,5745 ****
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
/*
* Create a thread to periodically resume suspended I/O.
*/
! VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
! &resume_tid) == 0);
/*
* Create a deadman thread to abort() if we hang.
*/
! VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
! NULL) == 0);
/*
* Verify that we can safely inquire about about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
--- 5729,5746 ----
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
/*
* Create a thread to periodically resume suspended I/O.
*/
! VERIFY(pthread_create(&resume_tid, NULL, ztest_resume_thread,
! spa) == 0);
/*
* Create a deadman thread to abort() if we hang.
*/
! VERIFY(pthread_create(&resume_tid, NULL, ztest_deadman_thread,
! zs) == 0);
/*
* Verify that we can safely inquire about about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
*** 5761,5771 ****
int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d);
}
zs->zs_enospc_count = 0;
! tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
(void) printf("starting main threads...\n");
--- 5762,5772 ----
int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d);
}
zs->zs_enospc_count = 0;
! tid = umem_zalloc(ztest_opts.zo_threads * sizeof (pthread_t),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
(void) printf("starting main threads...\n");
*** 5774,5793 ****
*/
for (int t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets &&
ztest_dataset_open(t) != 0)
return;
! VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
! THR_BOUND, &tid[t]) == 0);
}
/*
* Wait for all of the tests to complete. We go in reverse order
* so we don't close datasets while threads are still using them.
*/
for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
! VERIFY(thr_join(tid[t], NULL, NULL) == 0);
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0);
--- 5775,5794 ----
*/
for (int t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets &&
ztest_dataset_open(t) != 0)
return;
! VERIFY(pthread_create(&tid[t], NULL, ztest_thread,
! (void *)(uintptr_t)t) == 0);
}
/*
* Wait for all of the tests to complete. We go in reverse order
* so we don't close datasets while threads are still using them.
*/
for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
! VERIFY(pthread_join(tid[t], NULL) == 0);
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0);
*** 5794,5808 ****
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
zfs_dbgmsg_print(FTAG);
! umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
/* Kill the resume thread */
ztest_exiting = B_TRUE;
! VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
--- 5795,5809 ----
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
zfs_dbgmsg_print(FTAG);
! umem_free(tid, ztest_opts.zo_threads * sizeof (pthread_t));
/* Kill the resume thread */
ztest_exiting = B_TRUE;
! VERIFY(pthread_join(resume_tid, NULL) == 0);
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*** 5837,5850 ****
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
! (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
! (void) rwlock_destroy(&ztest_name_lock);
! (void) _mutex_destroy(&ztest_vdev_lock);
}
static void
ztest_freeze(void)
{
--- 5838,5851 ----
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
! mutex_destroy(&zcl.zcl_callbacks_lock);
! rw_destroy(&ztest_name_lock);
! mutex_destroy(&ztest_vdev_lock);
}
static void
ztest_freeze(void)
{
*** 5984,5995 ****
ztest_init(ztest_shared_t *zs)
{
spa_t *spa;
nvlist_t *nvroot, *props;
! VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
! VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
kernel_init(FREAD | FWRITE);
/*
* Create the storage pool.
--- 5985,5996 ----
ztest_init(ztest_shared_t *zs)
{
spa_t *spa;
nvlist_t *nvroot, *props;
! mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL);
! rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL);
kernel_init(FREAD | FWRITE);
/*
* Create the storage pool.
*** 6023,6034 ****
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
! (void) rwlock_destroy(&ztest_name_lock);
! (void) _mutex_destroy(&ztest_vdev_lock);
}
static void
setup_data_fd(void)
{
--- 6024,6035 ----
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
! rw_destroy(&ztest_name_lock);
! mutex_destroy(&ztest_vdev_lock);
}
static void
setup_data_fd(void)
{