167 .zo_alt_libpath = { '\0' },
168 .zo_vdevs = 5,
169 .zo_ashift = SPA_MINBLOCKSHIFT,
170 .zo_mirrors = 2,
171 .zo_raidz = 4,
172 .zo_raidz_parity = 1,
173 .zo_vdev_size = SPA_MINDEVSIZE,
174 .zo_datasets = 7,
175 .zo_threads = 23,
176 .zo_passtime = 60, /* 60 seconds */
177 .zo_killrate = 70, /* 70% kill rate */
178 .zo_verbose = 0,
179 .zo_init = 1,
180 .zo_time = 300, /* 5 minutes */
181 .zo_maxloops = 50, /* max loops during spa_freeze() */
182 .zo_metaslab_gang_bang = 32 << 10
183 };
184
185 extern uint64_t metaslab_gang_bang;
186 extern uint64_t metaslab_df_alloc_threshold;
187
188 static ztest_shared_opts_t *ztest_shared_opts;
189 static ztest_shared_opts_t ztest_opts;
190
191 typedef struct ztest_shared_ds {
192 uint64_t zd_seq;
193 } ztest_shared_ds_t;
194
195 static ztest_shared_ds_t *ztest_shared_ds;
196 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
197
198 #define BT_MAGIC 0x123456789abcdefULL
199 #define MAXFAULTS() \
200 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
201
202 enum ztest_io_type {
203 ZTEST_IO_WRITE_TAG,
204 ZTEST_IO_WRITE_PATTERN,
205 ZTEST_IO_WRITE_ZEROES,
206 ZTEST_IO_TRUNCATE,
346 { ztest_dmu_object_alloc_free, 1, &zopt_always },
347 { ztest_dmu_commit_callbacks, 1, &zopt_always },
348 { ztest_zap, 30, &zopt_always },
349 { ztest_zap_parallel, 100, &zopt_always },
350 { ztest_split_pool, 1, &zopt_always },
351 { ztest_zil_commit, 1, &zopt_incessant },
352 { ztest_zil_remount, 1, &zopt_sometimes },
353 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
354 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
355 { ztest_dsl_prop_get_set, 1, &zopt_often },
356 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
357 #if 0
358 { ztest_dmu_prealloc, 1, &zopt_sometimes },
359 #endif
360 { ztest_fzap, 1, &zopt_sometimes },
361 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
362 { ztest_spa_create_destroy, 1, &zopt_sometimes },
363 { ztest_fault_inject, 1, &zopt_sometimes },
364 { ztest_ddt_repair, 1, &zopt_sometimes },
365 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
366 { ztest_reguid, 1, &zopt_sometimes },
367 { ztest_spa_rename, 1, &zopt_rarely },
368 { ztest_scrub, 1, &zopt_rarely },
369 { ztest_spa_upgrade, 1, &zopt_rarely },
370 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
371 { ztest_vdev_attach_detach, 1, &zopt_sometimes },
372 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
373 { ztest_vdev_add_remove, 1,
374 &ztest_opts.zo_vdevtime },
375 { ztest_vdev_aux_add_remove, 1,
376 &ztest_opts.zo_vdevtime },
377 };
378
379 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
380
381 /*
382 * The following struct is used to hold a list of uncalled commit callbacks.
383 * The callbacks are ordered by txg number.
384 */
385 typedef struct ztest_cb_list {
386 mutex_t zcl_callbacks_lock;
4737 char path0[MAXPATHLEN];
4738 char pathrand[MAXPATHLEN];
4739 size_t fsize;
4740 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4741 int iters = 1000;
4742 int maxfaults;
4743 int mirror_save;
4744 vdev_t *vd0 = NULL;
4745 uint64_t guid0 = 0;
4746 boolean_t islog = B_FALSE;
4747
4748 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4749 maxfaults = MAXFAULTS();
4750 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4751 mirror_save = zs->zs_mirrors;
4752 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4753
4754 ASSERT(leaves >= 1);
4755
4756 /*
4757 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4758 */
4759 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4760
4761 if (ztest_random(2) == 0) {
4762 /*
4763 * Inject errors on a normal data device or slog device.
4764 */
4765 top = ztest_random_vdev_top(spa, B_TRUE);
4766 leaf = ztest_random(leaves) + zs->zs_splits;
4767
4768 /*
4769 * Generate paths to the first leaf in this top-level vdev,
4770 * and to the random leaf we selected. We'll induce transient
4771 * write failures and random online/offline activity on leaf 0,
4772 * and we'll write random garbage to the randomly chosen leaf.
4773 */
4774 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4775 ztest_opts.zo_dir, ztest_opts.zo_pool,
4776 top * leaves + zs->zs_splits);
4777 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4778 ztest_opts.zo_dir, ztest_opts.zo_pool,
4779 top * leaves + leaf);
4780
4781 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4782 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4783 islog = B_TRUE;
4784
4785 if (vd0 != NULL && maxfaults != 1) {
4786 /*
4787 * Make vd0 explicitly claim to be unreadable,
4788 * or unwriteable, or reach behind its back
4789 * and close the underlying fd. We can do this if
4790 * maxfaults == 0 because we'll fail and reexecute,
4791 * and we can do it if maxfaults >= 2 because we'll
4792 * have enough redundancy. If maxfaults == 1, the
4793 * combination of this with injection of random data
4794 * corruption below exceeds the pool's fault tolerance.
4795 */
4796 vdev_file_t *vf = vd0->vdev_tsd;
4797
4798 if (vf != NULL && ztest_random(3) == 0) {
4799 (void) close(vf->vf_vnode->v_fd);
4800 vf->vf_vnode->v_fd = -1;
4801 } else if (ztest_random(2) == 0) {
4802 vd0->vdev_cant_read = B_TRUE;
4803 } else {
4804 vd0->vdev_cant_write = B_TRUE;
4805 }
4806 guid0 = vd0->vdev_guid;
4807 }
4808 } else {
4809 /*
4810 * Inject errors on an l2cache device.
4811 */
4812 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4813
4814 if (sav->sav_count == 0) {
4815 spa_config_exit(spa, SCL_STATE, FTAG);
4816 return;
4817 }
4818 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4819 guid0 = vd0->vdev_guid;
4820 (void) strcpy(path0, vd0->vdev_path);
4821 (void) strcpy(pathrand, vd0->vdev_path);
4822
4823 leaf = 0;
4824 leaves = 1;
4825 maxfaults = INT_MAX; /* no limit on cache devices */
4826 }
4827
4828 spa_config_exit(spa, SCL_STATE, FTAG);
4829
4830 /*
4831 * If we can tolerate two or more faults, or we're dealing
4832 * with a slog, randomly online/offline vd0.
4833 */
4834 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4835 if (ztest_random(10) < 6) {
4836 int flags = (ztest_random(2) == 0 ?
4837 ZFS_OFFLINE_TEMPORARY : 0);
4838
4839 /*
4840 * We have to grab the zs_name_lock as writer to
4841 * prevent a race between offlining a slog and
4842 * destroying a dataset. Offlining the slog will
4843 * grab a reference on the dataset which may cause
4844 * dmu_objset_destroy() to fail with EBUSY thus
4845 * leaving the dataset in an inconsistent state.
4846 */
4847 if (islog)
4848 (void) rw_wrlock(&ztest_name_lock);
5273 (void) zio_resume(spa);
5274 }
5275
5276 static void *
5277 ztest_resume_thread(void *arg)
5278 {
5279 spa_t *spa = arg;
5280
5281 while (!ztest_exiting) {
5282 if (spa_suspended(spa))
5283 ztest_resume(spa);
5284 (void) poll(NULL, 0, 100);
5285 }
5286 return (NULL);
5287 }
5288
5289 static void *
5290 ztest_deadman_thread(void *arg)
5291 {
5292 ztest_shared_t *zs = arg;
5293 int grace = 300;
5294 hrtime_t delta;
5295
5296 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
5297
5298 (void) poll(NULL, 0, (int)(1000 * delta));
5299
5300 fatal(0, "failed to complete within %d seconds of deadline", grace);
5301
5302 return (NULL);
5303 }
5304
5305 static void
5306 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5307 {
5308 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5309 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5310 hrtime_t functime = gethrtime();
5311
5312 for (int i = 0; i < zi->zi_iters; i++)
5313 zi->zi_func(zd, id);
5314
5315 functime = gethrtime() - functime;
5316
5317 atomic_add_64(&zc->zc_count, 1);
5318 atomic_add_64(&zc->zc_time, functime);
5319
5320 if (ztest_opts.zo_verbose >= 4) {
5321 Dl_info dli;
5322 (void) dladdr((void *)zi->zi_func, &dli);
6007 int
6008 main(int argc, char **argv)
6009 {
6010 int kills = 0;
6011 int iters = 0;
6012 int older = 0;
6013 int newer = 0;
6014 ztest_shared_t *zs;
6015 ztest_info_t *zi;
6016 ztest_shared_callstate_t *zc;
6017 char timebuf[100];
6018 char numbuf[6];
6019 spa_t *spa;
6020 char *cmd;
6021 boolean_t hasalt;
6022 char *fd_data_str = getenv("ZTEST_FD_DATA");
6023
6024 (void) setvbuf(stdout, NULL, _IOLBF, 0);
6025
6026 dprintf_setup(&argc, argv);
6027
6028 ztest_fd_rand = open("/dev/urandom", O_RDONLY);
6029 ASSERT3S(ztest_fd_rand, >=, 0);
6030
6031 if (!fd_data_str) {
6032 process_options(argc, argv);
6033
6034 setup_data_fd();
6035 setup_hdr();
6036 setup_data();
6037 bcopy(&ztest_opts, ztest_shared_opts,
6038 sizeof (*ztest_shared_opts));
6039 } else {
6040 ztest_fd_data = atoi(fd_data_str);
6041 setup_data();
6042 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
6043 }
6044 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
6045
6046 /* Override location of zpool.cache */
|
167 .zo_alt_libpath = { '\0' },
168 .zo_vdevs = 5,
169 .zo_ashift = SPA_MINBLOCKSHIFT,
170 .zo_mirrors = 2,
171 .zo_raidz = 4,
172 .zo_raidz_parity = 1,
173 .zo_vdev_size = SPA_MINDEVSIZE,
174 .zo_datasets = 7,
175 .zo_threads = 23,
176 .zo_passtime = 60, /* 60 seconds */
177 .zo_killrate = 70, /* 70% kill rate */
178 .zo_verbose = 0,
179 .zo_init = 1,
180 .zo_time = 300, /* 5 minutes */
181 .zo_maxloops = 50, /* max loops during spa_freeze() */
182 .zo_metaslab_gang_bang = 32 << 10
183 };
184
185 extern uint64_t metaslab_gang_bang;
186 extern uint64_t metaslab_df_alloc_threshold;
187 extern uint64_t zfs_deadman_synctime;
188
189 static ztest_shared_opts_t *ztest_shared_opts;
190 static ztest_shared_opts_t ztest_opts;
191
192 typedef struct ztest_shared_ds {
193 uint64_t zd_seq;
194 } ztest_shared_ds_t;
195
196 static ztest_shared_ds_t *ztest_shared_ds;
197 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
198
199 #define BT_MAGIC 0x123456789abcdefULL
200 #define MAXFAULTS() \
201 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
202
203 enum ztest_io_type {
204 ZTEST_IO_WRITE_TAG,
205 ZTEST_IO_WRITE_PATTERN,
206 ZTEST_IO_WRITE_ZEROES,
207 ZTEST_IO_TRUNCATE,
347 { ztest_dmu_object_alloc_free, 1, &zopt_always },
348 { ztest_dmu_commit_callbacks, 1, &zopt_always },
349 { ztest_zap, 30, &zopt_always },
350 { ztest_zap_parallel, 100, &zopt_always },
351 { ztest_split_pool, 1, &zopt_always },
352 { ztest_zil_commit, 1, &zopt_incessant },
353 { ztest_zil_remount, 1, &zopt_sometimes },
354 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
355 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
356 { ztest_dsl_prop_get_set, 1, &zopt_often },
357 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
358 #if 0
359 { ztest_dmu_prealloc, 1, &zopt_sometimes },
360 #endif
361 { ztest_fzap, 1, &zopt_sometimes },
362 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
363 { ztest_spa_create_destroy, 1, &zopt_sometimes },
364 { ztest_fault_inject, 1, &zopt_sometimes },
365 { ztest_ddt_repair, 1, &zopt_sometimes },
366 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
367 { ztest_reguid, 1, &zopt_rarely },
368 { ztest_spa_rename, 1, &zopt_rarely },
369 { ztest_scrub, 1, &zopt_rarely },
370 { ztest_spa_upgrade, 1, &zopt_rarely },
371 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
372 { ztest_vdev_attach_detach, 1, &zopt_sometimes },
373 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
374 { ztest_vdev_add_remove, 1,
375 &ztest_opts.zo_vdevtime },
376 { ztest_vdev_aux_add_remove, 1,
377 &ztest_opts.zo_vdevtime },
378 };
379
380 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
381
382 /*
383 * The following struct is used to hold a list of uncalled commit callbacks.
384 * The callbacks are ordered by txg number.
385 */
386 typedef struct ztest_cb_list {
387 mutex_t zcl_callbacks_lock;
4738 char path0[MAXPATHLEN];
4739 char pathrand[MAXPATHLEN];
4740 size_t fsize;
4741 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4742 int iters = 1000;
4743 int maxfaults;
4744 int mirror_save;
4745 vdev_t *vd0 = NULL;
4746 uint64_t guid0 = 0;
4747 boolean_t islog = B_FALSE;
4748
4749 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4750 maxfaults = MAXFAULTS();
4751 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4752 mirror_save = zs->zs_mirrors;
4753 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4754
4755 ASSERT(leaves >= 1);
4756
4757 /*
4758 * Grab the name lock as reader. There are some operations
4759 * which don't like to have their vdevs changed while
4760 * they are in progress (i.e. spa_change_guid). Those
4761 * operations will have grabbed the name lock as writer.
4762 */
4763 (void) rw_rdlock(&ztest_name_lock);
4764
4765 /*
4766 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4767 */
4768 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4769
4770 if (ztest_random(2) == 0) {
4771 /*
4772 * Inject errors on a normal data device or slog device.
4773 */
4774 top = ztest_random_vdev_top(spa, B_TRUE);
4775 leaf = ztest_random(leaves) + zs->zs_splits;
4776
4777 /*
4778 * Generate paths to the first leaf in this top-level vdev,
4779 * and to the random leaf we selected. We'll induce transient
4780 * write failures and random online/offline activity on leaf 0,
4781 * and we'll write random garbage to the randomly chosen leaf.
4782 */
4783 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4784 ztest_opts.zo_dir, ztest_opts.zo_pool,
4785 top * leaves + zs->zs_splits);
4786 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4787 ztest_opts.zo_dir, ztest_opts.zo_pool,
4788 top * leaves + leaf);
4789
4790 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4791 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4792 islog = B_TRUE;
4793
4794 /*
4795 * If the top-level vdev needs to be resilvered
4796 * then we only allow faults on the device that is
4797 * resilvering.
4798 */
4799 if (vd0 != NULL && maxfaults != 1 &&
4800 (!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) ||
4801 vd0->vdev_resilvering)) {
4802 /*
4803 * Make vd0 explicitly claim to be unreadable,
4804 * or unwriteable, or reach behind its back
4805 * and close the underlying fd. We can do this if
4806 * maxfaults == 0 because we'll fail and reexecute,
4807 * and we can do it if maxfaults >= 2 because we'll
4808 * have enough redundancy. If maxfaults == 1, the
4809 * combination of this with injection of random data
4810 * corruption below exceeds the pool's fault tolerance.
4811 */
4812 vdev_file_t *vf = vd0->vdev_tsd;
4813
4814 if (vf != NULL && ztest_random(3) == 0) {
4815 (void) close(vf->vf_vnode->v_fd);
4816 vf->vf_vnode->v_fd = -1;
4817 } else if (ztest_random(2) == 0) {
4818 vd0->vdev_cant_read = B_TRUE;
4819 } else {
4820 vd0->vdev_cant_write = B_TRUE;
4821 }
4822 guid0 = vd0->vdev_guid;
4823 }
4824 } else {
4825 /*
4826 * Inject errors on an l2cache device.
4827 */
4828 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4829
4830 if (sav->sav_count == 0) {
4831 spa_config_exit(spa, SCL_STATE, FTAG);
4832 (void) rw_unlock(&ztest_name_lock);
4833 return;
4834 }
4835 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4836 guid0 = vd0->vdev_guid;
4837 (void) strcpy(path0, vd0->vdev_path);
4838 (void) strcpy(pathrand, vd0->vdev_path);
4839
4840 leaf = 0;
4841 leaves = 1;
4842 maxfaults = INT_MAX; /* no limit on cache devices */
4843 }
4844
4845 spa_config_exit(spa, SCL_STATE, FTAG);
4846 (void) rw_unlock(&ztest_name_lock);
4847
4848 /*
4849 * If we can tolerate two or more faults, or we're dealing
4850 * with a slog, randomly online/offline vd0.
4851 */
4852 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4853 if (ztest_random(10) < 6) {
4854 int flags = (ztest_random(2) == 0 ?
4855 ZFS_OFFLINE_TEMPORARY : 0);
4856
4857 /*
4858 * We have to grab the zs_name_lock as writer to
4859 * prevent a race between offlining a slog and
4860 * destroying a dataset. Offlining the slog will
4861 * grab a reference on the dataset which may cause
4862 * dmu_objset_destroy() to fail with EBUSY thus
4863 * leaving the dataset in an inconsistent state.
4864 */
4865 if (islog)
4866 (void) rw_wrlock(&ztest_name_lock);
5291 (void) zio_resume(spa);
5292 }
5293
5294 static void *
5295 ztest_resume_thread(void *arg)
5296 {
5297 spa_t *spa = arg;
5298
5299 while (!ztest_exiting) {
5300 if (spa_suspended(spa))
5301 ztest_resume(spa);
5302 (void) poll(NULL, 0, 100);
5303 }
5304 return (NULL);
5305 }
5306
5307 static void *
5308 ztest_deadman_thread(void *arg)
5309 {
5310 ztest_shared_t *zs = arg;
5311 spa_t *spa = ztest_spa;
5312 hrtime_t delta, total = 0;
5313
5314 for (;;) {
5315 delta = (zs->zs_thread_stop - zs->zs_thread_start) /
5316 NANOSEC + zfs_deadman_synctime;
5317
5318 (void) poll(NULL, 0, (int)(1000 * delta));
5319
5320 /*
5321 * If the pool is suspended then fail immediately. Otherwise,
5322 * check to see if the pool is making any progress. If
5323 * vdev_deadman() discovers that there hasn't been any recent
5324 * I/Os then it will end up aborting the tests.
5325 */
5326 if (spa_suspended(spa)) {
5327 fatal(0, "aborting test after %llu seconds because "
5328 "pool has transitioned to a suspended state.",
5329 zfs_deadman_synctime);
5330 return (NULL);
5331 }
5332 vdev_deadman(spa->spa_root_vdev);
5333
5334 total += zfs_deadman_synctime;
5335 (void) printf("ztest has been running for %lld seconds\n",
5336 total);
5337 }
5338 }
5339
5340 static void
5341 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5342 {
5343 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5344 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5345 hrtime_t functime = gethrtime();
5346
5347 for (int i = 0; i < zi->zi_iters; i++)
5348 zi->zi_func(zd, id);
5349
5350 functime = gethrtime() - functime;
5351
5352 atomic_add_64(&zc->zc_count, 1);
5353 atomic_add_64(&zc->zc_time, functime);
5354
5355 if (ztest_opts.zo_verbose >= 4) {
5356 Dl_info dli;
5357 (void) dladdr((void *)zi->zi_func, &dli);
6042 int
6043 main(int argc, char **argv)
6044 {
6045 int kills = 0;
6046 int iters = 0;
6047 int older = 0;
6048 int newer = 0;
6049 ztest_shared_t *zs;
6050 ztest_info_t *zi;
6051 ztest_shared_callstate_t *zc;
6052 char timebuf[100];
6053 char numbuf[6];
6054 spa_t *spa;
6055 char *cmd;
6056 boolean_t hasalt;
6057 char *fd_data_str = getenv("ZTEST_FD_DATA");
6058
6059 (void) setvbuf(stdout, NULL, _IOLBF, 0);
6060
6061 dprintf_setup(&argc, argv);
6062 zfs_deadman_synctime = 300;
6063
6064 ztest_fd_rand = open("/dev/urandom", O_RDONLY);
6065 ASSERT3S(ztest_fd_rand, >=, 0);
6066
6067 if (!fd_data_str) {
6068 process_options(argc, argv);
6069
6070 setup_data_fd();
6071 setup_hdr();
6072 setup_data();
6073 bcopy(&ztest_opts, ztest_shared_opts,
6074 sizeof (*ztest_shared_opts));
6075 } else {
6076 ztest_fd_data = atoi(fd_data_str);
6077 setup_data();
6078 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
6079 }
6080 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
6081
6082 /* Override location of zpool.cache */
|