Print this page
8115 parallel zfs mount
@@ -23,10 +23,11 @@
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
+ * Copyright 2017 RackTop Systems.
*/
/*
* The objective of this program is to provide a DMU/ZAP/SPA stress test
* that runs entirely in userland, is easy to use, and easy to extend.
@@ -243,12 +244,12 @@
} rl_type_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
- mutex_t rll_lock;
- cond_t rll_cv;
+ kmutex_t rll_lock;
+ kcondvar_t rll_cv;
} rll_t;
typedef struct rl {
uint64_t rl_object;
uint64_t rl_offset;
@@ -278,15 +279,15 @@
* Per-dataset state.
*/
typedef struct ztest_ds {
ztest_shared_ds_t *zd_shared;
objset_t *zd_os;
- rwlock_t zd_zilog_lock;
+ krwlock_t zd_zilog_lock;
zilog_t *zd_zilog;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[ZFS_MAX_DATASET_NAME_LEN];
- mutex_t zd_dirobj_lock;
+ kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
@@ -389,11 +390,11 @@
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
- mutex_t zcl_callbacks_lock;
+ kmutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
/*
* Stuff we need to share writably between parent and child.
@@ -424,19 +425,19 @@
ztest_shared_t *ztest_shared;
static spa_t *ztest_spa = NULL;
static ztest_ds_t *ztest_ds;
-static mutex_t ztest_vdev_lock;
+static kmutex_t ztest_vdev_lock;
/*
* The ztest_name_lock protects the pool and dataset namespace used by
* the individual tests. To modify the namespace, consumers must grab
* this lock as writer. Grabbing the lock as reader will ensure that the
* namespace does not change while the lock is held.
*/
-static rwlock_t ztest_name_lock;
+static krwlock_t ztest_name_lock;
static boolean_t ztest_dump_core = B_TRUE;
static boolean_t ztest_exiting;
/* Global commit callback list */
@@ -1088,45 +1089,45 @@
static void
ztest_rll_init(rll_t *rll)
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
- VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
- VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
+ mutex_init(&rll->rll_lock, NULL, USYNC_THREAD, NULL);
+ cv_init(&rll->rll_cv, NULL, USYNC_THREAD, NULL);
}
static void
ztest_rll_destroy(rll_t *rll)
{
ASSERT(rll->rll_writer == NULL);
ASSERT(rll->rll_readers == 0);
- VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
- VERIFY(cond_destroy(&rll->rll_cv) == 0);
+ mutex_destroy(&rll->rll_lock);
+ cv_destroy(&rll->rll_cv);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
- VERIFY(mutex_lock(&rll->rll_lock) == 0);
+ mutex_enter(&rll->rll_lock);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
- (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
+ cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
- (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
+ cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
- VERIFY(mutex_unlock(&rll->rll_lock) == 0);
+ mutex_exit(&rll->rll_lock);
}
static void
ztest_rll_unlock(rll_t *rll)
{
- VERIFY(mutex_lock(&rll->rll_lock) == 0);
+ mutex_enter(&rll->rll_lock);
if (rll->rll_writer) {
ASSERT(rll->rll_readers == 0);
rll->rll_writer = NULL;
} else {
@@ -1134,13 +1135,13 @@
ASSERT(rll->rll_writer == NULL);
rll->rll_readers--;
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
- VERIFY(cond_broadcast(&rll->rll_cv) == 0);
+ cv_broadcast(&rll->rll_cv);
- VERIFY(mutex_unlock(&rll->rll_lock) == 0);
+ mutex_exit(&rll->rll_lock);
}
static void
ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
{
@@ -1195,12 +1196,12 @@
dmu_objset_name(os, zd->zd_name);
if (zd->zd_shared != NULL)
zd->zd_shared->zd_seq = 0;
- VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
- VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
+ rw_init(&zd->zd_zilog_lock, NULL, USYNC_THREAD, NULL);
+ mutex_init(&zd->zd_dirobj_lock, NULL, USYNC_THREAD, NULL);
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
@@ -1208,11 +1209,11 @@
}
static void
ztest_zd_fini(ztest_ds_t *zd)
{
- VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
+ mutex_destroy(&zd->zd_dirobj_lock);
for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
@@ -1963,11 +1964,11 @@
ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
- ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (int i = 0; i < count; i++, od++) {
od->od_object = 0;
error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
sizeof (uint64_t), 1, &od->od_object);
@@ -2003,11 +2004,11 @@
static int
ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
- ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (int i = 0; i < count; i++, od++) {
if (missing) {
od->od_object = 0;
missing++;
@@ -2048,11 +2049,11 @@
ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
- ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
od += count - 1;
for (int i = count - 1; i >= 0; i--, od--) {
if (missing) {
@@ -2194,11 +2195,11 @@
*/
io_type = ztest_random(ZTEST_IO_TYPES);
if (ztest_random(2) == 0)
io_type = ZTEST_IO_WRITE_TAG;
- (void) rw_rdlock(&zd->zd_zilog_lock);
+ rw_enter(&zd->zd_zilog_lock, RW_READER);
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
@@ -2231,30 +2232,30 @@
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
case ZTEST_IO_REWRITE:
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_COMPRESSION,
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
DMU_READ_NO_PREFETCH));
(void) ztest_write(zd, object, offset, blocksize, data);
break;
}
- (void) rw_unlock(&zd->zd_zilog_lock);
+ rw_exit(&zd->zd_zilog_lock);
umem_free(data, blocksize);
}
/*
@@ -2289,17 +2290,17 @@
ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
{
int count = size / sizeof (*od);
int rv = 0;
- VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
+ mutex_enter(&zd->zd_dirobj_lock);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
- VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
+ mutex_exit(&zd->zd_dirobj_lock);
return (rv);
}
/* ARGSUSED */
@@ -2306,11 +2307,11 @@
void
ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
{
zilog_t *zilog = zd->zd_zilog;
- (void) rw_rdlock(&zd->zd_zilog_lock);
+ rw_enter(&zd->zd_zilog_lock, RW_READER);
zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
/*
* Remember the committed values in zd, which is in parent/child
@@ -2321,11 +2322,11 @@
ASSERT(zd->zd_shared != NULL);
ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
mutex_exit(&zilog->zl_lock);
- (void) rw_unlock(&zd->zd_zilog_lock);
+ rw_exit(&zd->zd_zilog_lock);
}
/*
* This function is designed to simulate the operations that occur during a
* mount/unmount operation. We hold the dataset across these operations in an
@@ -2340,22 +2341,22 @@
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
* zd_zilog_lock to block any I/O.
*/
- VERIFY0(mutex_lock(&zd->zd_dirobj_lock));
- (void) rw_wrlock(&zd->zd_zilog_lock);
+ mutex_enter(&zd->zd_dirobj_lock);
+ rw_enter(&zd->zd_zilog_lock, RW_WRITER);
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */
VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
zil_replay(os, zd, ztest_replay_vector);
- (void) rw_unlock(&zd->zd_zilog_lock);
- VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
+ rw_exit(&zd->zd_zilog_lock);
+ mutex_exit(&zd->zd_dirobj_lock);
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
@@ -2386,19 +2387,19 @@
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
nvlist_free(nvroot);
VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
spa_close(spa, FTAG);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/* ARGSUSED */
void
ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
@@ -2407,11 +2408,11 @@
uint64_t initial_version = SPA_VERSION_INITIAL;
uint64_t version, newversion;
nvlist_t *nvroot, *props;
char *name;
- VERIFY0(mutex_lock(&ztest_vdev_lock));
+ mutex_enter(&ztest_vdev_lock);
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
/*
* Clean up from previous runs.
*/
@@ -2466,11 +2467,11 @@
VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
zpool_prop_to_name(ZPOOL_PROP_VERSION)));
spa_close(spa, FTAG);
strfree(name);
- VERIFY0(mutex_unlock(&ztest_vdev_lock));
+ mutex_exit(&ztest_vdev_lock);
}
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *path)
{
@@ -2519,11 +2520,11 @@
uint64_t leaves;
uint64_t guid;
nvlist_t *nvroot;
int error;
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
@@ -2545,13 +2546,13 @@
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
- VERIFY(rw_wrlock(&ztest_name_lock) == 0);
+ rw_enter(&ztest_name_lock, RW_WRITER);
error = spa_vdev_remove(spa, guid, B_FALSE);
- VERIFY(rw_unlock(&ztest_name_lock) == 0);
+ rw_exit(&ztest_name_lock);
if (error && error != EEXIST)
fatal(0, "spa_vdev_remove() = %d", error);
} else {
spa_config_exit(spa, SCL_VDEV, FTAG);
@@ -2571,11 +2572,11 @@
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
fatal(0, "spa_vdev_add() = %d", error);
}
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
*/
@@ -2597,11 +2598,11 @@
} else {
sav = &spa->spa_l2cache;
aux = ZPOOL_CONFIG_L2CACHE;
}
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (sav->sav_count != 0 && ztest_random(4) == 0) {
/*
@@ -2654,11 +2655,11 @@
error = spa_vdev_remove(spa, guid, B_FALSE);
if (error != 0 && error != EBUSY)
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
}
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
}
/*
* split a pool if it has mirror tlvdevs
*/
@@ -2671,15 +2672,15 @@
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *tree, **child, *config, *split, **schild;
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
/* ensure we have a useable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
@@ -2734,13 +2735,13 @@
free(schild);
nvlist_free(split);
spa_config_exit(spa, SCL_VDEV, FTAG);
- (void) rw_wrlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_WRITER);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
nvlist_free(config);
if (error == 0) {
(void) printf("successful split - results:\n");
@@ -2749,11 +2750,11 @@
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
++zs->zs_splits;
--zs->zs_mirrors;
}
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can attach and detach devices.
@@ -2778,11 +2779,11 @@
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
@@ -2839,11 +2840,11 @@
spa_config_exit(spa, SCL_VDEV, FTAG);
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP)
fatal(0, "detach (%s) returned %d", oldpath, error);
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
return;
}
/*
* For the new vdev, choose with equal probability between the two
@@ -2933,11 +2934,11 @@
"returned %d, expected %d",
oldpath, oldsize, newpath,
newsize, replacing, error, expected_error);
}
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
}
/*
* Callback function which expands the physical size of the vdev.
*/
@@ -3061,11 +3062,11 @@
metaslab_group_t *mg;
size_t psize, newsize;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
top = ztest_random_vdev_top(spa, B_TRUE);
tvd = spa->spa_root_vdev->vdev_child[top];
@@ -3089,11 +3090,11 @@
* original size, and it has a valid psize.
*/
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
return;
}
ASSERT(psize > 0);
newsize = psize + psize / 8;
ASSERT3U(newsize, >, psize);
@@ -3114,11 +3115,11 @@
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
return;
}
spa_config_exit(spa, SCL_STATE, spa);
@@ -3148,11 +3149,11 @@
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
return;
}
/*
* Make sure we were able to grow the vdev.
@@ -3176,11 +3177,11 @@
(void) printf("%s grew from %s to %s\n",
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
@@ -3290,11 +3291,11 @@
int error;
objset_t *os, *os2;
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog_t *zilog;
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
(void) snprintf(name, sizeof (name), "%s/temp_%llu",
ztest_opts.zo_pool, (u_longlong_t)id);
/*
@@ -3329,11 +3330,11 @@
*/
error = ztest_dataset_create(name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
return;
}
fatal(0, "dmu_objset_create(%s) = %d", name, error);
}
@@ -3377,23 +3378,23 @@
zil_close(zilog);
dmu_objset_disown(os, FTAG);
ztest_zd_fini(&zdtmp);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
{
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/*
* Cleanup non-standard snapshots and clones.
*/
@@ -3448,11 +3449,11 @@
char clone2name[ZFS_MAX_DATASET_NAME_LEN];
char snap3name[ZFS_MAX_DATASET_NAME_LEN];
char *osname = zd->zd_name;
int error;
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
ztest_dsl_dataset_cleanup(osname, id);
(void) snprintf(snap1name, sizeof (snap1name),
"%s@s1_%llu", osname, id);
@@ -3525,11 +3526,11 @@
dmu_objset_disown(os, FTAG);
out:
ztest_dsl_dataset_cleanup(osname, id);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
@@ -4459,13 +4460,13 @@
goto out;
ASSERT3U(data->zcd_txg, !=, 0);
/* Remove our callback from the list */
- (void) mutex_lock(&zcl.zcl_callbacks_lock);
+ mutex_enter(&zcl.zcl_callbacks_lock);
list_remove(&zcl.zcl_callbacks, data);
- (void) mutex_unlock(&zcl.zcl_callbacks_lock);
+ mutex_exit(&zcl.zcl_callbacks_lock);
out:
umem_free(data, sizeof (ztest_cb_data_t));
}
@@ -4563,11 +4564,11 @@
fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
old_txg, txg);
dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
- (void) mutex_lock(&zcl.zcl_callbacks_lock);
+ mutex_enter(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
@@ -4610,11 +4611,11 @@
VERIFY(!cb_data[i]->zcd_called);
tmp_cb = cb_data[i];
}
- (void) mutex_unlock(&zcl.zcl_callbacks_lock);
+ mutex_exit(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
}
/* ARGSUSED */
@@ -4626,26 +4627,26 @@
ZFS_PROP_COMPRESSION,
ZFS_PROP_COPIES,
ZFS_PROP_DEDUP
};
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/* ARGSUSED */
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
nvlist_t *props = NULL;
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
VERIFY0(spa_prop_get(ztest_spa, &props));
@@ -4653,11 +4654,11 @@
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
nvlist_free(props);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
static int
user_release_one(const char *snapname, const char *holdname)
{
@@ -4688,11 +4689,11 @@
char clonename[100];
char tag[100];
char osname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *holds;
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
dmu_objset_name(os, osname);
(void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id);
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
@@ -4793,11 +4794,11 @@
fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
out:
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/*
* Inject random faults into the on-disk data.
*/
@@ -4821,25 +4822,25 @@
int mirror_save;
vdev_t *vd0 = NULL;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
maxfaults = MAXFAULTS();
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
mirror_save = zs->zs_mirrors;
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
ASSERT(leaves >= 1);
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
* they are in progress (i.e. spa_change_guid). Those
* operations will have grabbed the name lock as writer.
*/
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
@@ -4904,11 +4905,11 @@
*/
spa_aux_vdev_t *sav = &spa->spa_l2cache;
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
return;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
(void) strcpy(path0, vd0->vdev_path);
@@ -4918,11 +4919,11 @@
leaves = 1;
maxfaults = INT_MAX; /* no limit on cache devices */
}
spa_config_exit(spa, SCL_STATE, FTAG);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
/*
* If we can tolerate two or more faults, or we're dealing
* with a slog, randomly online/offline vd0.
*/
@@ -4938,16 +4939,16 @@
* grab a reference on the dataset which may cause
* dmu_objset_destroy() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
- (void) rw_wrlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_WRITER);
VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
if (islog)
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
} else {
/*
* Ideally we would like to be able to randomly
* call vdev_[on|off]line without holding locks
* to force unpredictable failures but the side
@@ -4954,13 +4955,13 @@
* effects of vdev_[on|off]line prevent us from
* doing so. We grab the ztest_vdev_lock here to
* prevent a race between injection testing and
* aux_vdev removal.
*/
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
(void) vdev_online(spa, guid0, 0, NULL);
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
}
}
if (maxfaults == 0)
return;
@@ -5028,22 +5029,22 @@
uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
if ((leaf & 1) == 1 &&
offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
continue;
- VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+ mutex_enter(&ztest_vdev_lock);
if (mirror_save != zs->zs_mirrors) {
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
(void) close(fd);
return;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
fatal(1, "can't inject bad word at 0x%llx in %s",
offset, pathrand);
- VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+ mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 7)
(void) printf("injected bad word into %s,"
" offset 0x%llx\n", pathrand, (u_longlong_t)offset);
}
@@ -5079,17 +5080,17 @@
/*
* Take the name lock as writer to prevent anyone else from changing
* the pool and dataset properies we need to maintain during this test.
*/
- (void) rw_wrlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_WRITER);
if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
B_FALSE) != 0 ||
ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
B_FALSE) != 0) {
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
return;
}
dmu_objset_stats_t dds;
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
@@ -5104,11 +5105,11 @@
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, 0, copies * blocksize);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
return;
}
/*
* Write all the copies of our block.
@@ -5152,11 +5153,11 @@
abd, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
abd_free(abd);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/*
* Scrub the pool.
*/
@@ -5183,13 +5184,13 @@
int error;
orig = spa_guid(spa);
load = spa_load_guid(spa);
- (void) rw_wrlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_WRITER);
error = spa_change_guid(spa);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
if (error != 0)
return;
if (ztest_opts.zo_verbose >= 4) {
@@ -5209,11 +5210,11 @@
ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
{
char *oldname, *newname;
spa_t *spa;
- (void) rw_wrlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_WRITER);
oldname = ztest_opts.zo_pool;
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
(void) strcpy(newname, oldname);
(void) strcat(newname, "_tmp");
@@ -5249,11 +5250,11 @@
ASSERT(spa == ztest_spa);
spa_close(spa, FTAG);
umem_free(newname, strlen(newname) + 1);
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
}
/*
* Verify pool integrity by running zdb.
*/
@@ -5604,22 +5605,22 @@
char name[ZFS_MAX_DATASET_NAME_LEN];
int error;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
- (void) rw_rdlock(&ztest_name_lock);
+ rw_enter(&ztest_name_lock, RW_READER);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os));
- (void) rw_unlock(&ztest_name_lock);
+ rw_exit(&ztest_name_lock);
ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
zilog = zd->zd_zilog;
@@ -5666,23 +5667,23 @@
* Kick off threads to run tests on all datasets in parallel.
*/
static void
ztest_run(ztest_shared_t *zs)
{
- thread_t *tid;
+ pthread_t *tid;
spa_t *spa;
objset_t *os;
- thread_t resume_tid;
+ pthread_t resume_tid;
int error;
ztest_exiting = B_FALSE;
/*
* Initialize parent/child shared state.
*/
- VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
- VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
+ mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL);
+ rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL);
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop =
zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
@@ -5690,11 +5691,11 @@
if (ztest_random(100) < ztest_opts.zo_killrate) {
zs->zs_thread_kill -=
ztest_random(ztest_opts.zo_passtime * NANOSEC);
}
- (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
+ mutex_init(&zcl.zcl_callbacks_lock, NULL, USYNC_THREAD, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
@@ -5728,18 +5729,18 @@
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
/*
* Create a thread to periodically resume suspended I/O.
*/
- VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
- &resume_tid) == 0);
+ VERIFY(pthread_create(&resume_tid, NULL, ztest_resume_thread,
+ spa) == 0);
/*
* Create a deadman thread to abort() if we hang.
*/
- VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
- NULL) == 0);
+ VERIFY(pthread_create(&resume_tid, NULL, ztest_deadman_thread,
+ zs) == 0);
/*
* Verify that we can safely inquire about about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
@@ -5761,11 +5762,11 @@
int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d);
}
zs->zs_enospc_count = 0;
- tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
+ tid = umem_zalloc(ztest_opts.zo_threads * sizeof (pthread_t),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
(void) printf("starting main threads...\n");
@@ -5774,20 +5775,20 @@
*/
for (int t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets &&
ztest_dataset_open(t) != 0)
return;
- VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
- THR_BOUND, &tid[t]) == 0);
+ VERIFY(pthread_create(&tid[t], NULL, ztest_thread,
+ (void *)(uintptr_t)t) == 0);
}
/*
* Wait for all of the tests to complete. We go in reverse order
* so we don't close datasets while threads are still using them.
*/
for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
- VERIFY(thr_join(tid[t], NULL, NULL) == 0);
+ VERIFY(pthread_join(tid[t], NULL) == 0);
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0);
@@ -5794,15 +5795,15 @@
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
zfs_dbgmsg_print(FTAG);
- umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
+ umem_free(tid, ztest_opts.zo_threads * sizeof (pthread_t));
/* Kill the resume thread */
ztest_exiting = B_TRUE;
- VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
+ VERIFY(pthread_join(resume_tid, NULL) == 0);
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
@@ -5837,14 +5838,14 @@
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
- (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
+ mutex_destroy(&zcl.zcl_callbacks_lock);
- (void) rwlock_destroy(&ztest_name_lock);
- (void) _mutex_destroy(&ztest_vdev_lock);
+ rw_destroy(&ztest_name_lock);
+ mutex_destroy(&ztest_vdev_lock);
}
static void
ztest_freeze(void)
{
@@ -5984,12 +5985,12 @@
ztest_init(ztest_shared_t *zs)
{
spa_t *spa;
nvlist_t *nvroot, *props;
- VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
- VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
+ mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL);
+ rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL);
kernel_init(FREAD | FWRITE);
/*
* Create the storage pool.
@@ -6023,12 +6024,12 @@
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
- (void) rwlock_destroy(&ztest_name_lock);
- (void) _mutex_destroy(&ztest_vdev_lock);
+ rw_destroy(&ztest_name_lock);
+ mutex_destroy(&ztest_vdev_lock);
}
static void
setup_data_fd(void)
{