955 }
956
957 static int
958 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
959 boolean_t inherit)
960 {
961 const char *propname = zfs_prop_to_name(prop);
962 const char *valname;
963 char setpoint[MAXPATHLEN];
964 uint64_t curval;
965 int error;
966
967 error = dsl_prop_set(osname, propname,
968 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
969 sizeof (value), 1, &value);
970
971 if (error == ENOSPC) {
972 ztest_record_enospc(FTAG);
973 return (error);
974 }
975 ASSERT3U(error, ==, 0);
976
977 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
978 1, &curval, setpoint), ==, 0);
979
980 if (ztest_opts.zo_verbose >= 6) {
981 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
982 (void) printf("%s %s = %s at '%s'\n",
983 osname, propname, valname, setpoint);
984 }
985
986 return (error);
987 }
988
989 static int
990 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
991 {
992 spa_t *spa = ztest_spa;
993 nvlist_t *props = NULL;
994 int error;
995
996 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
997 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
998
999 error = spa_prop_set(spa, props);
1000
1001 nvlist_free(props);
1002
1003 if (error == ENOSPC) {
1004 ztest_record_enospc(FTAG);
1005 return (error);
1006 }
1007 ASSERT3U(error, ==, 0);
1008
1009 return (error);
1010 }
1011
1012 static void
1013 ztest_rll_init(rll_t *rll)
1014 {
1015 rll->rll_writer = NULL;
1016 rll->rll_readers = 0;
1017 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1018 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1019 }
1020
1021 static void
1022 ztest_rll_destroy(rll_t *rll)
1023 {
1024 ASSERT(rll->rll_writer == NULL);
1025 ASSERT(rll->rll_readers == 0);
1026 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1027 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1395 lr->lr_foid = dmu_object_alloc(os,
1396 lr->lrz_type, 0, lr->lrz_bonustype,
1397 lr->lrz_bonuslen, tx);
1398 } else {
1399 error = dmu_object_claim(os, lr->lr_foid,
1400 lr->lrz_type, 0, lr->lrz_bonustype,
1401 lr->lrz_bonuslen, tx);
1402 }
1403 }
1404
1405 if (error) {
1406 ASSERT3U(error, ==, EEXIST);
1407 ASSERT(zd->zd_zilog->zl_replay);
1408 dmu_tx_commit(tx);
1409 return (error);
1410 }
1411
1412 ASSERT(lr->lr_foid != 0);
1413
1414 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1415 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1416 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1417
1418 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1419 bbt = ztest_bt_bonus(db);
1420 dmu_buf_will_dirty(db, tx);
1421 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1422 dmu_buf_rele(db, FTAG);
1423
1424 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1425 &lr->lr_foid, tx));
1426
1427 (void) ztest_log_create(zd, tx, lr);
1428
1429 dmu_tx_commit(tx);
1430
1431 return (0);
1432 }
1433
1434 static int
1435 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1436 {
1437 char *name = (void *)(lr + 1); /* name follows lr */
1438 objset_t *os = zd->zd_os;
1439 dmu_object_info_t doi;
1440 dmu_tx_t *tx;
1441 uint64_t object, txg;
1442
1443 if (byteswap)
1444 byteswap_uint64_array(lr, sizeof (*lr));
1445
1446 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1447 ASSERT(name[0] != '\0');
1448
1449 VERIFY3U(0, ==,
1450 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1451 ASSERT(object != 0);
1452
1453 ztest_object_lock(zd, object, RL_WRITER);
1454
1455 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1456
1457 tx = dmu_tx_create(os);
1458
1459 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1460 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1461
1462 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1463 if (txg == 0) {
1464 ztest_object_unlock(zd, object);
1465 return (ENOSPC);
1466 }
1467
1468 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1469 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1470 } else {
1471 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1472 }
1473
1474 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1475
1476 (void) ztest_log_remove(zd, tx, lr, object);
1477
1478 dmu_tx_commit(tx);
1479
1480 ztest_object_unlock(zd, object);
1481
1482 return (0);
1483 }
1484
1485 static int
1486 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1487 {
1488 objset_t *os = zd->zd_os;
1489 void *data = lr + 1; /* data follows lr */
1490 uint64_t offset, length;
1491 ztest_block_tag_t *bt = data;
1492 ztest_block_tag_t *bbt;
1493 uint64_t gen, txg, lrtxg, crtxg;
1494 dmu_object_info_t doi;
1504 length = lr->lr_length;
1505
1506 /* If it's a dmu_sync() block, write the whole block */
1507 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1508 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1509 if (length < blocksize) {
1510 offset -= offset % blocksize;
1511 length = blocksize;
1512 }
1513 }
1514
1515 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1516 byteswap_uint64_array(bt, sizeof (*bt));
1517
1518 if (bt->bt_magic != BT_MAGIC)
1519 bt = NULL;
1520
1521 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1522 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1523
1524 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1525
1526 dmu_object_info_from_db(db, &doi);
1527
1528 bbt = ztest_bt_bonus(db);
1529 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1530 gen = bbt->bt_gen;
1531 crtxg = bbt->bt_crtxg;
1532 lrtxg = lr->lr_common.lrc_txg;
1533
1534 tx = dmu_tx_create(os);
1535
1536 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1537
1538 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1539 P2PHASE(offset, length) == 0)
1540 abuf = dmu_request_arcbuf(db, length);
1541
1542 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1543 if (txg == 0) {
1544 if (abuf != NULL)
1643 ztest_range_unlock(rl);
1644 ztest_object_unlock(zd, lr->lr_foid);
1645
1646 return (0);
1647 }
1648
1649 static int
1650 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1651 {
1652 objset_t *os = zd->zd_os;
1653 dmu_tx_t *tx;
1654 dmu_buf_t *db;
1655 ztest_block_tag_t *bbt;
1656 uint64_t txg, lrtxg, crtxg;
1657
1658 if (byteswap)
1659 byteswap_uint64_array(lr, sizeof (*lr));
1660
1661 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1662
1663 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1664
1665 tx = dmu_tx_create(os);
1666 dmu_tx_hold_bonus(tx, lr->lr_foid);
1667
1668 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1669 if (txg == 0) {
1670 dmu_buf_rele(db, FTAG);
1671 ztest_object_unlock(zd, lr->lr_foid);
1672 return (ENOSPC);
1673 }
1674
1675 bbt = ztest_bt_bonus(db);
1676 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1677 crtxg = bbt->bt_crtxg;
1678 lrtxg = lr->lr_common.lrc_txg;
1679
1680 if (zd->zd_zilog->zl_replay) {
1681 ASSERT(lr->lr_size != 0);
1682 ASSERT(lr->lr_mode != 0);
1683 ASSERT(lrtxg != 0);
1684 } else {
1685 /*
1686 * Randomly change the size and increment the generation.
1687 */
1688 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1689 sizeof (*bbt);
1690 lr->lr_mode = bbt->bt_gen + 1;
1691 ASSERT(lrtxg == 0);
1692 }
1693
1694 /*
1695 * Verify that the current bonus buffer is not newer than our txg.
1696 */
1697 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1698 MAX(txg, lrtxg), crtxg);
1699
1700 dmu_buf_will_dirty(db, tx);
1701
1702 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1703 ASSERT3U(lr->lr_size, <=, db->db_size);
1704 VERIFY3U(dmu_set_bonus(db, lr->lr_size, tx), ==, 0);
1705 bbt = ztest_bt_bonus(db);
1706
1707 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1708
1709 dmu_buf_rele(db, FTAG);
1710
1711 (void) ztest_log_setattr(zd, tx, lr);
1712
1713 dmu_tx_commit(tx);
1714
1715 ztest_object_unlock(zd, lr->lr_foid);
1716
1717 return (0);
1718 }
1719
1720 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1721 NULL, /* 0 no such transaction type */
1722 ztest_replay_create, /* TX_CREATE */
1723 NULL, /* TX_MKDIR */
1724 NULL, /* TX_MKXATTR */
1876
1877 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1878
1879 for (int i = 0; i < count; i++, od++) {
1880 od->od_object = 0;
1881 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1882 sizeof (uint64_t), 1, &od->od_object);
1883 if (error) {
1884 ASSERT(error == ENOENT);
1885 ASSERT(od->od_object == 0);
1886 missing++;
1887 } else {
1888 dmu_buf_t *db;
1889 ztest_block_tag_t *bbt;
1890 dmu_object_info_t doi;
1891
1892 ASSERT(od->od_object != 0);
1893 ASSERT(missing == 0); /* there should be no gaps */
1894
1895 ztest_object_lock(zd, od->od_object, RL_READER);
1896 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1897 od->od_object, FTAG, &db));
1898 dmu_object_info_from_db(db, &doi);
1899 bbt = ztest_bt_bonus(db);
1900 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1901 od->od_type = doi.doi_type;
1902 od->od_blocksize = doi.doi_data_block_size;
1903 od->od_gen = bbt->bt_gen;
1904 dmu_buf_rele(db, FTAG);
1905 ztest_object_unlock(zd, od->od_object);
1906 }
1907 }
1908
1909 return (missing);
1910 }
1911
1912 static int
1913 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1914 {
1915 int missing = 0;
1916
2254 VERIFY3U(ENOENT, ==,
2255 spa_create("ztest_bad_file", nvroot, NULL, NULL));
2256 nvlist_free(nvroot);
2257
2258 /*
2259 * Attempt to create using a bad mirror.
2260 */
2261 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1);
2262 VERIFY3U(ENOENT, ==,
2263 spa_create("ztest_bad_mirror", nvroot, NULL, NULL));
2264 nvlist_free(nvroot);
2265
2266 /*
2267 * Attempt to create an existing pool. It shouldn't matter
2268 * what's in the nvroot; we should fail with EEXIST.
2269 */
2270 (void) rw_rdlock(&ztest_name_lock);
2271 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2272 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
2273 nvlist_free(nvroot);
2274 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2275 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2276 spa_close(spa, FTAG);
2277
2278 (void) rw_unlock(&ztest_name_lock);
2279 }
2280
2281 static vdev_t *
2282 vdev_lookup_by_path(vdev_t *vd, const char *path)
2283 {
2284 vdev_t *mvd;
2285
2286 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2287 return (vd);
2288
2289 for (int c = 0; c < vd->vdev_children; c++)
2290 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2291 NULL)
2292 return (mvd);
2293
2294 return (NULL);
3013 if (err || zilset < 80)
3014 return (err);
3015
3016 if (ztest_opts.zo_verbose >= 6)
3017 (void) printf("Setting dataset %s to sync always\n", dsname);
3018 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3019 ZFS_SYNC_ALWAYS, B_FALSE));
3020 }
3021
3022 /* ARGSUSED */
3023 static int
3024 ztest_objset_destroy_cb(const char *name, void *arg)
3025 {
3026 objset_t *os;
3027 dmu_object_info_t doi;
3028 int error;
3029
3030 /*
3031 * Verify that the dataset contains a directory object.
3032 */
3033 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os));
3034 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3035 if (error != ENOENT) {
3036 /* We could have crashed in the middle of destroying it */
3037 ASSERT3U(error, ==, 0);
3038 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3039 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3040 }
3041 dmu_objset_rele(os, FTAG);
3042
3043 /*
3044 * Destroy the dataset.
3045 */
3046 VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE));
3047 return (0);
3048 }
3049
3050 static boolean_t
3051 ztest_snapshot_create(char *osname, uint64_t id)
3052 {
3053 char snapname[MAXNAMELEN];
3054 int error;
3055
3056 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3057 (u_longlong_t)id);
3058
3059 error = dmu_objset_snapshot_one(osname, strchr(snapname, '@') + 1);
3060 if (error == ENOSPC) {
3061 ztest_record_enospc(FTAG);
3062 return (B_FALSE);
3063 }
3064 if (error != 0 && error != EEXIST)
3065 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
3066 return (B_TRUE);
3119 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3120
3121 /*
3122 * Verify that the destroyed dataset is no longer in the namespace.
3123 */
3124 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
3125
3126 /*
3127 * Verify that we can create a new dataset.
3128 */
3129 error = ztest_dataset_create(name);
3130 if (error) {
3131 if (error == ENOSPC) {
3132 ztest_record_enospc(FTAG);
3133 (void) rw_unlock(&ztest_name_lock);
3134 return;
3135 }
3136 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3137 }
3138
3139 VERIFY3U(0, ==,
3140 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3141
3142 ztest_zd_init(&zdtmp, NULL, os);
3143
3144 /*
3145 * Open the intent log for it.
3146 */
3147 zilog = zil_open(os, ztest_get_data);
3148
3149 /*
3150 * Put some objects in there, do a little I/O to them,
3151 * and randomly take a couple of snapshots along the way.
3152 */
3153 iters = ztest_random(5);
3154 for (int i = 0; i < iters; i++) {
3155 ztest_dmu_object_alloc_free(&zdtmp, id);
3156 if (ztest_random(iters) == 0)
3157 (void) ztest_snapshot_create(name, i);
3158 }
3159
3160 /*
3161 * Verify that we cannot create an existing dataset.
3162 */
3163 VERIFY3U(EEXIST, ==,
3164 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3165
3166 /*
3167 * Verify that we can hold an objset that is also owned.
3168 */
3169 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3170 dmu_objset_rele(os2, FTAG);
3171
3172 /*
3173 * Verify that we cannot own an objset that is already owned.
3174 */
3175 VERIFY3U(EBUSY, ==,
3176 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3177
3178 zil_close(zilog);
3179 dmu_objset_disown(os, FTAG);
3180 ztest_zd_fini(&zdtmp);
3181
3182 (void) rw_unlock(&ztest_name_lock);
3183 }
3184
3185 /*
3186 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3187 */
3188 void
3189 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3426 packoff = n * sizeof (bufwad_t);
3427 packsize = s * sizeof (bufwad_t);
3428
3429 bigoff = n * chunksize;
3430 bigsize = s * chunksize;
3431
3432 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3433 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3434
3435 /*
3436 * free_percent of the time, free a range of bigobj rather than
3437 * overwriting it.
3438 */
3439 freeit = (ztest_random(100) < free_percent);
3440
3441 /*
3442 * Read the current contents of our objects.
3443 */
3444 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3445 DMU_READ_PREFETCH);
3446 ASSERT3U(error, ==, 0);
3447 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3448 DMU_READ_PREFETCH);
3449 ASSERT3U(error, ==, 0);
3450
3451 /*
3452 * Get a tx for the mods to both packobj and bigobj.
3453 */
3454 tx = dmu_tx_create(os);
3455
3456 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3457
3458 if (freeit)
3459 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3460 else
3461 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3462
3463 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3464 if (txg == 0) {
3465 umem_free(packbuf, packsize);
3466 umem_free(bigbuf, bigsize);
3467 return;
3468 }
3469
3669 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3670 VERIFY(ISP2(doi.doi_data_block_size));
3671 VERIFY(chunksize == doi.doi_data_block_size);
3672 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3673
3674 /*
3675 * Pick a random index and compute the offsets into packobj and bigobj.
3676 */
3677 n = ztest_random(regions) * stride + ztest_random(width);
3678 s = 1 + ztest_random(width - 1);
3679
3680 packoff = n * sizeof (bufwad_t);
3681 packsize = s * sizeof (bufwad_t);
3682
3683 bigoff = n * chunksize;
3684 bigsize = s * chunksize;
3685
3686 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3687 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3688
3689 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3690
3691 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3692
3693 /*
3694 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3695 * Iteration 1 test zcopy to already referenced dbufs.
3696 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3697 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3698 * Iteration 4 test zcopy when dbuf is no longer dirty.
3699 * Iteration 5 test zcopy when it can't be done.
3700 * Iteration 6 one more zcopy write.
3701 */
3702 for (i = 0; i < 7; i++) {
3703 uint64_t j;
3704 uint64_t off;
3705
3706 /*
3707 * In iteration 5 (i == 5) use arcbufs
3708 * that don't match bigobj blksz to test
3709 * dmu_assign_arcbuf() when it can't directly
3739 } else {
3740 dmu_return_arcbuf(
3741 bigbuf_arcbufs[2 * j]);
3742 dmu_return_arcbuf(
3743 bigbuf_arcbufs[2 * j + 1]);
3744 }
3745 }
3746 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3747 dmu_buf_rele(bonus_db, FTAG);
3748 return;
3749 }
3750
3751 /*
3752 * 50% of the time don't read objects in the 1st iteration to
3753 * test dmu_assign_arcbuf() for the case when there're no
3754 * existing dbufs for the specified offsets.
3755 */
3756 if (i != 0 || ztest_random(2) != 0) {
3757 error = dmu_read(os, packobj, packoff,
3758 packsize, packbuf, DMU_READ_PREFETCH);
3759 ASSERT3U(error, ==, 0);
3760 error = dmu_read(os, bigobj, bigoff, bigsize,
3761 bigbuf, DMU_READ_PREFETCH);
3762 ASSERT3U(error, ==, 0);
3763 }
3764 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3765 n, chunksize, txg);
3766
3767 /*
3768 * We've verified all the old bufwads, and made new ones.
3769 * Now write them out.
3770 */
3771 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3772 if (ztest_opts.zo_verbose >= 7) {
3773 (void) printf("writing offset %llx size %llx"
3774 " txg %llx\n",
3775 (u_longlong_t)bigoff,
3776 (u_longlong_t)bigsize,
3777 (u_longlong_t)txg);
3778 }
3779 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3780 dmu_buf_t *dbt;
3781 if (i != 5) {
3782 bcopy((caddr_t)bigbuf + (off - bigoff),
3921 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
3922
3923 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
3924
3925 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3926 return;
3927
3928 object = od[0].od_object;
3929
3930 /*
3931 * Generate a known hash collision, and verify that
3932 * we can lookup and remove both entries.
3933 */
3934 tx = dmu_tx_create(os);
3935 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
3936 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3937 if (txg == 0)
3938 return;
3939 for (i = 0; i < 2; i++) {
3940 value[i] = i;
3941 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
3942 1, &value[i], tx));
3943 }
3944 for (i = 0; i < 2; i++) {
3945 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
3946 sizeof (uint64_t), 1, &value[i], tx));
3947 VERIFY3U(0, ==,
3948 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
3949 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3950 ASSERT3U(zl_ints, ==, 1);
3951 }
3952 for (i = 0; i < 2; i++) {
3953 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
3954 }
3955 dmu_tx_commit(tx);
3956
3957 /*
3958 * Generate a buch of random entries.
3959 */
3960 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
3961
3962 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
3963 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
3964 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
3965 bzero(value, sizeof (value));
3966 last_txg = 0;
3967
3968 /*
3969 * If these zap entries already exist, validate their contents.
3970 */
3971 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
3972 if (error == 0) {
3973 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3994
3995 /*
3996 * Atomically update two entries in our zap object.
3997 * The first is named txg_%llu, and contains the txg
3998 * in which the property was last updated. The second
3999 * is named prop_%llu, and the nth element of its value
4000 * should be txg + object + n.
4001 */
4002 tx = dmu_tx_create(os);
4003 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4004 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4005 if (txg == 0)
4006 return;
4007
4008 if (last_txg > txg)
4009 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4010
4011 for (i = 0; i < ints; i++)
4012 value[i] = txg + object + i;
4013
4014 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4015 1, &txg, tx));
4016 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4017 ints, value, tx));
4018
4019 dmu_tx_commit(tx);
4020
4021 /*
4022 * Remove a random pair of entries.
4023 */
4024 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4025 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4026 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4027
4028 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4029
4030 if (error == ENOENT)
4031 return;
4032
4033 ASSERT3U(error, ==, 0);
4034
4035 tx = dmu_tx_create(os);
4036 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4037 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4038 if (txg == 0)
4039 return;
4040 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4041 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4042 dmu_tx_commit(tx);
4043 }
4044
4045 /*
4046 * Testcase to test the upgrading of a microzap to fatzap.
4047 */
4048 void
4049 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4050 {
4051 objset_t *os = zd->zd_os;
4052 ztest_od_t od[1];
4053 uint64_t object, txg;
4054
4055 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4056
4057 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4058 return;
4059
4060 object = od[0].od_object;
4061
4209 /* This is the actual commit callback function */
4210 static void
4211 ztest_commit_callback(void *arg, int error)
4212 {
4213 ztest_cb_data_t *data = arg;
4214 uint64_t synced_txg;
4215
4216 VERIFY(data != NULL);
4217 VERIFY3S(data->zcd_expected_err, ==, error);
4218 VERIFY(!data->zcd_called);
4219
4220 synced_txg = spa_last_synced_txg(data->zcd_spa);
4221 if (data->zcd_txg > synced_txg)
4222 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4223 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4224 synced_txg);
4225
4226 data->zcd_called = B_TRUE;
4227
4228 if (error == ECANCELED) {
4229 ASSERT3U(data->zcd_txg, ==, 0);
4230 ASSERT(!data->zcd_added);
4231
4232 /*
4233 * The private callback data should be destroyed here, but
4234 * since we are going to check the zcd_called field after
4235 * dmu_tx_abort(), we will destroy it there.
4236 */
4237 return;
4238 }
4239
4240 /* Was this callback added to the global callback list? */
4241 if (!data->zcd_added)
4242 goto out;
4243
4244 ASSERT3U(data->zcd_txg, !=, 0);
4245
4246 /* Remove our callback from the list */
4247 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4248 list_remove(&zcl.zcl_callbacks, data);
4249 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4414 (void) rw_rdlock(&ztest_name_lock);
4415
4416 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4417 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4418 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4419
4420 (void) rw_unlock(&ztest_name_lock);
4421 }
4422
4423 /* ARGSUSED */
4424 void
4425 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4426 {
4427 nvlist_t *props = NULL;
4428
4429 (void) rw_rdlock(&ztest_name_lock);
4430
4431 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4432 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4433
4434 VERIFY3U(spa_prop_get(ztest_spa, &props), ==, 0);
4435
4436 if (ztest_opts.zo_verbose >= 6)
4437 dump_nvlist(props, 4);
4438
4439 nvlist_free(props);
4440
4441 (void) rw_unlock(&ztest_name_lock);
4442 }
4443
4444 /*
4445 * Test snapshot hold/release and deferred destroy.
4446 */
4447 void
4448 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4449 {
4450 int error;
4451 objset_t *os = zd->zd_os;
4452 objset_t *origin;
4453 char snapname[100];
4454 char fullname[100];
4870 /*
4871 * Rename the pool to a different name and then rename it back.
4872 */
4873 /* ARGSUSED */
4874 void
4875 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
4876 {
4877 char *oldname, *newname;
4878 spa_t *spa;
4879
4880 (void) rw_wrlock(&ztest_name_lock);
4881
4882 oldname = ztest_opts.zo_pool;
4883 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
4884 (void) strcpy(newname, oldname);
4885 (void) strcat(newname, "_tmp");
4886
4887 /*
4888 * Do the rename
4889 */
4890 VERIFY3U(0, ==, spa_rename(oldname, newname));
4891
4892 /*
4893 * Try to open it under the old name, which shouldn't exist
4894 */
4895 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
4896
4897 /*
4898 * Open it under the new name and make sure it's still the same spa_t.
4899 */
4900 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
4901
4902 ASSERT(spa == ztest_spa);
4903 spa_close(spa, FTAG);
4904
4905 /*
4906 * Rename it back to the original
4907 */
4908 VERIFY3U(0, ==, spa_rename(newname, oldname));
4909
4910 /*
4911 * Make sure it can still be opened
4912 */
4913 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
4914
4915 ASSERT(spa == ztest_spa);
4916 spa_close(spa, FTAG);
4917
4918 umem_free(newname, strlen(newname) + 1);
4919
4920 (void) rw_unlock(&ztest_name_lock);
4921 }
4922
4923 /*
4924 * Verify pool integrity by running zdb.
4925 */
4926 static void
4927 ztest_run_zdb(char *pool)
4928 {
4929 int status;
4930 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
4931 char zbuf[1024];
4932 char *bin;
4933 char *ztest;
4993 static void
4994 ztest_spa_import_export(char *oldname, char *newname)
4995 {
4996 nvlist_t *config, *newconfig;
4997 uint64_t pool_guid;
4998 spa_t *spa;
4999
5000 if (ztest_opts.zo_verbose >= 4) {
5001 (void) printf("import/export: old = %s, new = %s\n",
5002 oldname, newname);
5003 }
5004
5005 /*
5006 * Clean up from previous runs.
5007 */
5008 (void) spa_destroy(newname);
5009
5010 /*
5011 * Get the pool's configuration and guid.
5012 */
5013 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5014
5015 /*
5016 * Kick off a scrub to tickle scrub/export races.
5017 */
5018 if (ztest_random(2) == 0)
5019 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5020
5021 pool_guid = spa_guid(spa);
5022 spa_close(spa, FTAG);
5023
5024 ztest_walk_pool_directory("pools before export");
5025
5026 /*
5027 * Export it.
5028 */
5029 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5030
5031 ztest_walk_pool_directory("pools after export");
5032
5033 /*
5034 * Try to import it.
5035 */
5036 newconfig = spa_tryimport(config);
5037 ASSERT(newconfig != NULL);
5038 nvlist_free(newconfig);
5039
5040 /*
5041 * Import it under the new name.
5042 */
5043 VERIFY3U(0, ==, spa_import(newname, config, NULL, 0));
5044
5045 ztest_walk_pool_directory("pools after import");
5046
5047 /*
5048 * Try to import it again -- should fail with EEXIST.
5049 */
5050 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5051
5052 /*
5053 * Try to import it under a different name -- should fail with EEXIST.
5054 */
5055 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5056
5057 /*
5058 * Verify that the pool is no longer visible under the old name.
5059 */
5060 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5061
5062 /*
5063 * Verify that we can open and close the pool using the new name.
5064 */
5065 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5066 ASSERT(pool_guid == spa_guid(spa));
5067 spa_close(spa, FTAG);
5068
5069 nvlist_free(config);
5070 }
5071
5072 static void
5073 ztest_resume(spa_t *spa)
5074 {
5075 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5076 (void) printf("resuming from suspended state\n");
5077 spa_vdev_state_enter(spa, SCL_NONE);
5078 vdev_clear(spa, NULL);
5079 (void) spa_vdev_state_exit(spa, NULL, 0);
5080 (void) zio_resume(spa);
5081 }
5082
5083 static void *
5084 ztest_resume_thread(void *arg)
5085 {
5203 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5204 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5205 }
5206
5207 static void
5208 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5209 {
5210 uint64_t usedobjs, dirobjs, scratch;
5211
5212 /*
5213 * ZTEST_DIROBJ is the object directory for the entire dataset.
5214 * Therefore, the number of objects in use should equal the
5215 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5216 * If not, we have an object leak.
5217 *
5218 * Note that we can only check this in ztest_dataset_open(),
5219 * when the open-context and syncing-context values agree.
5220 * That's because zap_count() returns the open-context value,
5221 * while dmu_objset_space() returns the rootbp fill count.
5222 */
5223 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5224 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5225 ASSERT3U(dirobjs + 1, ==, usedobjs);
5226 }
5227
5228 static int
5229 ztest_dataset_open(int d)
5230 {
5231 ztest_ds_t *zd = &ztest_ds[d];
5232 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5233 objset_t *os;
5234 zilog_t *zilog;
5235 char name[MAXNAMELEN];
5236 int error;
5237
5238 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5239
5240 (void) rw_rdlock(&ztest_name_lock);
5241
5242 error = ztest_dataset_create(name);
5243 if (error == ENOSPC) {
5244 (void) rw_unlock(&ztest_name_lock);
5245 ztest_record_enospc(FTAG);
5246 return (error);
5247 }
5248 ASSERT(error == 0 || error == EEXIST);
5249
5250 VERIFY3U(dmu_objset_hold(name, zd, &os), ==, 0);
5251 (void) rw_unlock(&ztest_name_lock);
5252
5253 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5254
5255 zilog = zd->zd_zilog;
5256
5257 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5258 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5259 fatal(0, "missing log records: claimed %llu < committed %llu",
5260 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5261
5262 ztest_dataset_dirobj_verify(zd);
5263
5264 zil_replay(os, zd, ztest_replay_vector);
5265
5266 ztest_dataset_dirobj_verify(zd);
5267
5268 if (ztest_opts.zo_verbose >= 6)
5269 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5270 zd->zd_name,
5319 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5320 zs->zs_thread_kill = zs->zs_thread_stop;
5321 if (ztest_random(100) < ztest_opts.zo_killrate) {
5322 zs->zs_thread_kill -=
5323 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5324 }
5325
5326 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5327
5328 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5329 offsetof(ztest_cb_data_t, zcd_node));
5330
5331 /*
5332 * Open our pool.
5333 */
5334 kernel_init(FREAD | FWRITE);
5335 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0);
5336 spa->spa_debug = B_TRUE;
5337 ztest_spa = spa;
5338
5339 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os));
5340 zs->zs_guid = dmu_objset_fsid_guid(os);
5341 dmu_objset_rele(os, FTAG);
5342
5343 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5344
5345 /*
5346 * We don't expect the pool to suspend unless maxfaults == 0,
5347 * in which case ztest_fault_inject() temporarily takes away
5348 * the only valid replica.
5349 */
5350 if (MAXFAULTS() == 0)
5351 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5352 else
5353 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5354
5355 /*
5356 * Create a thread to periodically resume suspended I/O.
5357 */
5358 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5359 &resume_tid) == 0);
5461
5462 list_destroy(&zcl.zcl_callbacks);
5463
5464 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5465
5466 (void) rwlock_destroy(&ztest_name_lock);
5467 (void) _mutex_destroy(&ztest_vdev_lock);
5468 }
5469
5470 static void
5471 ztest_freeze(void)
5472 {
5473 ztest_ds_t *zd = &ztest_ds[0];
5474 spa_t *spa;
5475 int numloops = 0;
5476
5477 if (ztest_opts.zo_verbose >= 3)
5478 (void) printf("testing spa_freeze()...\n");
5479
5480 kernel_init(FREAD | FWRITE);
5481 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5482 VERIFY3U(0, ==, ztest_dataset_open(0));
5483
5484 /*
5485 * Force the first log block to be transactionally allocated.
5486 * We have to do this before we freeze the pool -- otherwise
5487 * the log chain won't be anchored.
5488 */
5489 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5490 ztest_dmu_object_alloc_free(zd, 0);
5491 zil_commit(zd->zd_zilog, 0);
5492 }
5493
5494 txg_wait_synced(spa_get_dsl(spa), 0);
5495
5496 /*
5497 * Freeze the pool. This stops spa_sync() from doing anything,
5498 * so that the only way to record changes from now on is the ZIL.
5499 */
5500 spa_freeze(spa);
5501
5502 /*
5513 txg_wait_synced(spa_get_dsl(spa), 0);
5514 }
5515
5516 /*
5517 * Commit all of the changes we just generated.
5518 */
5519 zil_commit(zd->zd_zilog, 0);
5520 txg_wait_synced(spa_get_dsl(spa), 0);
5521
5522 /*
5523 * Close our dataset and close the pool.
5524 */
5525 ztest_dataset_close(0);
5526 spa_close(spa, FTAG);
5527 kernel_fini();
5528
5529 /*
5530 * Open and close the pool and dataset to induce log replay.
5531 */
5532 kernel_init(FREAD | FWRITE);
5533 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5534 VERIFY3U(0, ==, ztest_dataset_open(0));
5535 ztest_dataset_close(0);
5536 spa_close(spa, FTAG);
5537 kernel_fini();
5538 }
5539
5540 void
5541 print_time(hrtime_t t, char *timebuf)
5542 {
5543 hrtime_t s = t / NANOSEC;
5544 hrtime_t m = s / 60;
5545 hrtime_t h = m / 60;
5546 hrtime_t d = h / 24;
5547
5548 s -= m * 60;
5549 m -= h * 60;
5550 h -= d * 24;
5551
5552 timebuf[0] = '\0';
5553
5554 if (d)
5587
5588 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5589 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5590
5591 kernel_init(FREAD | FWRITE);
5592
5593 /*
5594 * Create the storage pool.
5595 */
5596 (void) spa_destroy(ztest_opts.zo_pool);
5597 ztest_shared->zs_vdev_next_leaf = 0;
5598 zs->zs_splits = 0;
5599 zs->zs_mirrors = ztest_opts.zo_mirrors;
5600 nvroot = make_vdev_root(NULL, NULL, ztest_opts.zo_vdev_size, 0,
5601 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5602 props = make_random_props();
5603 for (int i = 0; i < SPA_FEATURES; i++) {
5604 char buf[1024];
5605 (void) snprintf(buf, sizeof (buf), "feature@%s",
5606 spa_feature_table[i].fi_uname);
5607 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
5608 }
5609 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL));
5610 nvlist_free(nvroot);
5611
5612 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5613 zs->zs_metaslab_sz =
5614 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5615
5616 spa_close(spa, FTAG);
5617
5618 kernel_fini();
5619
5620 ztest_run_zdb(ztest_opts.zo_pool);
5621
5622 ztest_freeze();
5623
5624 ztest_run_zdb(ztest_opts.zo_pool);
5625
5626 (void) rwlock_destroy(&ztest_name_lock);
5627 (void) _mutex_destroy(&ztest_vdev_lock);
5628 }
5629
5630 static void
5631 setup_fds(void)
5632 {
5649
5650 size = hdr->zh_hdr_size;
5651 size += hdr->zh_opts_size;
5652 size += hdr->zh_size;
5653 size += hdr->zh_stats_size * hdr->zh_stats_count;
5654 size += hdr->zh_ds_size * hdr->zh_ds_count;
5655
5656 return (size);
5657 }
5658
5659 static void
5660 setup_hdr(void)
5661 {
5662 int size;
5663 ztest_shared_hdr_t *hdr;
5664
5665 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5666 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5667 ASSERT(hdr != MAP_FAILED);
5668
5669 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, sizeof (ztest_shared_hdr_t)));
5670
5671 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5672 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5673 hdr->zh_size = sizeof (ztest_shared_t);
5674 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5675 hdr->zh_stats_count = ZTEST_FUNCS;
5676 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5677 hdr->zh_ds_count = ztest_opts.zo_datasets;
5678
5679 size = shared_data_size(hdr);
5680 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, size));
5681
5682 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5683 }
5684
5685 static void
5686 setup_data(void)
5687 {
5688 int size, offset;
5689 ztest_shared_hdr_t *hdr;
5690 uint8_t *buf;
5691
5692 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5693 PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0);
5694 ASSERT(hdr != MAP_FAILED);
5695
5696 size = shared_data_size(hdr);
5697
5698 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5699 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5700 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
|
955 }
956
957 static int
958 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
959 boolean_t inherit)
960 {
961 const char *propname = zfs_prop_to_name(prop);
962 const char *valname;
963 char setpoint[MAXPATHLEN];
964 uint64_t curval;
965 int error;
966
967 error = dsl_prop_set(osname, propname,
968 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
969 sizeof (value), 1, &value);
970
971 if (error == ENOSPC) {
972 ztest_record_enospc(FTAG);
973 return (error);
974 }
975 ASSERT0(error);
976
977 VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
978 1, &curval, setpoint), ==, 0);
979
980 if (ztest_opts.zo_verbose >= 6) {
981 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
982 (void) printf("%s %s = %s at '%s'\n",
983 osname, propname, valname, setpoint);
984 }
985
986 return (error);
987 }
988
989 static int
990 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
991 {
992 spa_t *spa = ztest_spa;
993 nvlist_t *props = NULL;
994 int error;
995
996 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
997 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
998
999 error = spa_prop_set(spa, props);
1000
1001 nvlist_free(props);
1002
1003 if (error == ENOSPC) {
1004 ztest_record_enospc(FTAG);
1005 return (error);
1006 }
1007 ASSERT0(error);
1008
1009 return (error);
1010 }
1011
1012 static void
1013 ztest_rll_init(rll_t *rll)
1014 {
1015 rll->rll_writer = NULL;
1016 rll->rll_readers = 0;
1017 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1018 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1019 }
1020
1021 static void
1022 ztest_rll_destroy(rll_t *rll)
1023 {
1024 ASSERT(rll->rll_writer == NULL);
1025 ASSERT(rll->rll_readers == 0);
1026 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1027 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1395 lr->lr_foid = dmu_object_alloc(os,
1396 lr->lrz_type, 0, lr->lrz_bonustype,
1397 lr->lrz_bonuslen, tx);
1398 } else {
1399 error = dmu_object_claim(os, lr->lr_foid,
1400 lr->lrz_type, 0, lr->lrz_bonustype,
1401 lr->lrz_bonuslen, tx);
1402 }
1403 }
1404
1405 if (error) {
1406 ASSERT3U(error, ==, EEXIST);
1407 ASSERT(zd->zd_zilog->zl_replay);
1408 dmu_tx_commit(tx);
1409 return (error);
1410 }
1411
1412 ASSERT(lr->lr_foid != 0);
1413
1414 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1415 VERIFY0(dmu_object_set_blocksize(os, lr->lr_foid,
1416 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1417
1418 VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1419 bbt = ztest_bt_bonus(db);
1420 dmu_buf_will_dirty(db, tx);
1421 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1422 dmu_buf_rele(db, FTAG);
1423
1424 VERIFY0(zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1425 &lr->lr_foid, tx));
1426
1427 (void) ztest_log_create(zd, tx, lr);
1428
1429 dmu_tx_commit(tx);
1430
1431 return (0);
1432 }
1433
1434 static int
1435 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1436 {
1437 char *name = (void *)(lr + 1); /* name follows lr */
1438 objset_t *os = zd->zd_os;
1439 dmu_object_info_t doi;
1440 dmu_tx_t *tx;
1441 uint64_t object, txg;
1442
1443 if (byteswap)
1444 byteswap_uint64_array(lr, sizeof (*lr));
1445
1446 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1447 ASSERT(name[0] != '\0');
1448
1449 VERIFY0(
1450 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1451 ASSERT(object != 0);
1452
1453 ztest_object_lock(zd, object, RL_WRITER);
1454
1455 VERIFY0(dmu_object_info(os, object, &doi));
1456
1457 tx = dmu_tx_create(os);
1458
1459 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1460 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1461
1462 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1463 if (txg == 0) {
1464 ztest_object_unlock(zd, object);
1465 return (ENOSPC);
1466 }
1467
1468 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1469 VERIFY0(zap_destroy(os, object, tx));
1470 } else {
1471 VERIFY0(dmu_object_free(os, object, tx));
1472 }
1473
1474 VERIFY0(zap_remove(os, lr->lr_doid, name, tx));
1475
1476 (void) ztest_log_remove(zd, tx, lr, object);
1477
1478 dmu_tx_commit(tx);
1479
1480 ztest_object_unlock(zd, object);
1481
1482 return (0);
1483 }
1484
1485 static int
1486 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1487 {
1488 objset_t *os = zd->zd_os;
1489 void *data = lr + 1; /* data follows lr */
1490 uint64_t offset, length;
1491 ztest_block_tag_t *bt = data;
1492 ztest_block_tag_t *bbt;
1493 uint64_t gen, txg, lrtxg, crtxg;
1494 dmu_object_info_t doi;
1504 length = lr->lr_length;
1505
1506 /* If it's a dmu_sync() block, write the whole block */
1507 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1508 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1509 if (length < blocksize) {
1510 offset -= offset % blocksize;
1511 length = blocksize;
1512 }
1513 }
1514
1515 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1516 byteswap_uint64_array(bt, sizeof (*bt));
1517
1518 if (bt->bt_magic != BT_MAGIC)
1519 bt = NULL;
1520
1521 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1522 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1523
1524 VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1525
1526 dmu_object_info_from_db(db, &doi);
1527
1528 bbt = ztest_bt_bonus(db);
1529 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1530 gen = bbt->bt_gen;
1531 crtxg = bbt->bt_crtxg;
1532 lrtxg = lr->lr_common.lrc_txg;
1533
1534 tx = dmu_tx_create(os);
1535
1536 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1537
1538 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1539 P2PHASE(offset, length) == 0)
1540 abuf = dmu_request_arcbuf(db, length);
1541
1542 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1543 if (txg == 0) {
1544 if (abuf != NULL)
1643 ztest_range_unlock(rl);
1644 ztest_object_unlock(zd, lr->lr_foid);
1645
1646 return (0);
1647 }
1648
1649 static int
1650 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1651 {
1652 objset_t *os = zd->zd_os;
1653 dmu_tx_t *tx;
1654 dmu_buf_t *db;
1655 ztest_block_tag_t *bbt;
1656 uint64_t txg, lrtxg, crtxg;
1657
1658 if (byteswap)
1659 byteswap_uint64_array(lr, sizeof (*lr));
1660
1661 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1662
1663 VERIFY0(dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1664
1665 tx = dmu_tx_create(os);
1666 dmu_tx_hold_bonus(tx, lr->lr_foid);
1667
1668 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1669 if (txg == 0) {
1670 dmu_buf_rele(db, FTAG);
1671 ztest_object_unlock(zd, lr->lr_foid);
1672 return (ENOSPC);
1673 }
1674
1675 bbt = ztest_bt_bonus(db);
1676 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1677 crtxg = bbt->bt_crtxg;
1678 lrtxg = lr->lr_common.lrc_txg;
1679
1680 if (zd->zd_zilog->zl_replay) {
1681 ASSERT(lr->lr_size != 0);
1682 ASSERT(lr->lr_mode != 0);
1683 ASSERT(lrtxg != 0);
1684 } else {
1685 /*
1686 * Randomly change the size and increment the generation.
1687 */
1688 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1689 sizeof (*bbt);
1690 lr->lr_mode = bbt->bt_gen + 1;
1691 ASSERT(lrtxg == 0);
1692 }
1693
1694 /*
1695 * Verify that the current bonus buffer is not newer than our txg.
1696 */
1697 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1698 MAX(txg, lrtxg), crtxg);
1699
1700 dmu_buf_will_dirty(db, tx);
1701
1702 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1703 ASSERT3U(lr->lr_size, <=, db->db_size);
1704 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
1705 bbt = ztest_bt_bonus(db);
1706
1707 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1708
1709 dmu_buf_rele(db, FTAG);
1710
1711 (void) ztest_log_setattr(zd, tx, lr);
1712
1713 dmu_tx_commit(tx);
1714
1715 ztest_object_unlock(zd, lr->lr_foid);
1716
1717 return (0);
1718 }
1719
1720 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1721 NULL, /* 0 no such transaction type */
1722 ztest_replay_create, /* TX_CREATE */
1723 NULL, /* TX_MKDIR */
1724 NULL, /* TX_MKXATTR */
1876
1877 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1878
1879 for (int i = 0; i < count; i++, od++) {
1880 od->od_object = 0;
1881 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1882 sizeof (uint64_t), 1, &od->od_object);
1883 if (error) {
1884 ASSERT(error == ENOENT);
1885 ASSERT(od->od_object == 0);
1886 missing++;
1887 } else {
1888 dmu_buf_t *db;
1889 ztest_block_tag_t *bbt;
1890 dmu_object_info_t doi;
1891
1892 ASSERT(od->od_object != 0);
1893 ASSERT(missing == 0); /* there should be no gaps */
1894
1895 ztest_object_lock(zd, od->od_object, RL_READER);
1896 VERIFY0(dmu_bonus_hold(zd->zd_os,
1897 od->od_object, FTAG, &db));
1898 dmu_object_info_from_db(db, &doi);
1899 bbt = ztest_bt_bonus(db);
1900 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1901 od->od_type = doi.doi_type;
1902 od->od_blocksize = doi.doi_data_block_size;
1903 od->od_gen = bbt->bt_gen;
1904 dmu_buf_rele(db, FTAG);
1905 ztest_object_unlock(zd, od->od_object);
1906 }
1907 }
1908
1909 return (missing);
1910 }
1911
1912 static int
1913 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1914 {
1915 int missing = 0;
1916
2254 VERIFY3U(ENOENT, ==,
2255 spa_create("ztest_bad_file", nvroot, NULL, NULL));
2256 nvlist_free(nvroot);
2257
2258 /*
2259 * Attempt to create using a bad mirror.
2260 */
2261 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 2, 1);
2262 VERIFY3U(ENOENT, ==,
2263 spa_create("ztest_bad_mirror", nvroot, NULL, NULL));
2264 nvlist_free(nvroot);
2265
2266 /*
2267 * Attempt to create an existing pool. It shouldn't matter
2268 * what's in the nvroot; we should fail with EEXIST.
2269 */
2270 (void) rw_rdlock(&ztest_name_lock);
2271 nvroot = make_vdev_root("/dev/bogus", NULL, 0, 0, 0, 0, 0, 1);
2272 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
2273 nvlist_free(nvroot);
2274 VERIFY0(spa_open(zo->zo_pool, &spa, FTAG));
2275 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2276 spa_close(spa, FTAG);
2277
2278 (void) rw_unlock(&ztest_name_lock);
2279 }
2280
2281 static vdev_t *
2282 vdev_lookup_by_path(vdev_t *vd, const char *path)
2283 {
2284 vdev_t *mvd;
2285
2286 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2287 return (vd);
2288
2289 for (int c = 0; c < vd->vdev_children; c++)
2290 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2291 NULL)
2292 return (mvd);
2293
2294 return (NULL);
3013 if (err || zilset < 80)
3014 return (err);
3015
3016 if (ztest_opts.zo_verbose >= 6)
3017 (void) printf("Setting dataset %s to sync always\n", dsname);
3018 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3019 ZFS_SYNC_ALWAYS, B_FALSE));
3020 }
3021
3022 /* ARGSUSED */
3023 static int
3024 ztest_objset_destroy_cb(const char *name, void *arg)
3025 {
3026 objset_t *os;
3027 dmu_object_info_t doi;
3028 int error;
3029
3030 /*
3031 * Verify that the dataset contains a directory object.
3032 */
3033 VERIFY0(dmu_objset_hold(name, FTAG, &os));
3034 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3035 if (error != ENOENT) {
3036 /* We could have crashed in the middle of destroying it */
3037 ASSERT0(error);
3038 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3039 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3040 }
3041 dmu_objset_rele(os, FTAG);
3042
3043 /*
3044 * Destroy the dataset.
3045 */
3046 VERIFY0(dmu_objset_destroy(name, B_FALSE));
3047 return (0);
3048 }
3049
3050 static boolean_t
3051 ztest_snapshot_create(char *osname, uint64_t id)
3052 {
3053 char snapname[MAXNAMELEN];
3054 int error;
3055
3056 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3057 (u_longlong_t)id);
3058
3059 error = dmu_objset_snapshot_one(osname, strchr(snapname, '@') + 1);
3060 if (error == ENOSPC) {
3061 ztest_record_enospc(FTAG);
3062 return (B_FALSE);
3063 }
3064 if (error != 0 && error != EEXIST)
3065 fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
3066 return (B_TRUE);
3119 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3120
3121 /*
3122 * Verify that the destroyed dataset is no longer in the namespace.
3123 */
3124 VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
3125
3126 /*
3127 * Verify that we can create a new dataset.
3128 */
3129 error = ztest_dataset_create(name);
3130 if (error) {
3131 if (error == ENOSPC) {
3132 ztest_record_enospc(FTAG);
3133 (void) rw_unlock(&ztest_name_lock);
3134 return;
3135 }
3136 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3137 }
3138
3139 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3140
3141 ztest_zd_init(&zdtmp, NULL, os);
3142
3143 /*
3144 * Open the intent log for it.
3145 */
3146 zilog = zil_open(os, ztest_get_data);
3147
3148 /*
3149 * Put some objects in there, do a little I/O to them,
3150 * and randomly take a couple of snapshots along the way.
3151 */
3152 iters = ztest_random(5);
3153 for (int i = 0; i < iters; i++) {
3154 ztest_dmu_object_alloc_free(&zdtmp, id);
3155 if (ztest_random(iters) == 0)
3156 (void) ztest_snapshot_create(name, i);
3157 }
3158
3159 /*
3160 * Verify that we cannot create an existing dataset.
3161 */
3162 VERIFY3U(EEXIST, ==,
3163 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3164
3165 /*
3166 * Verify that we can hold an objset that is also owned.
3167 */
3168 VERIFY0(dmu_objset_hold(name, FTAG, &os2));
3169 dmu_objset_rele(os2, FTAG);
3170
3171 /*
3172 * Verify that we cannot own an objset that is already owned.
3173 */
3174 VERIFY3U(EBUSY, ==,
3175 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3176
3177 zil_close(zilog);
3178 dmu_objset_disown(os, FTAG);
3179 ztest_zd_fini(&zdtmp);
3180
3181 (void) rw_unlock(&ztest_name_lock);
3182 }
3183
3184 /*
3185 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3186 */
3187 void
3188 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3425 packoff = n * sizeof (bufwad_t);
3426 packsize = s * sizeof (bufwad_t);
3427
3428 bigoff = n * chunksize;
3429 bigsize = s * chunksize;
3430
3431 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3432 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3433
3434 /*
3435 * free_percent of the time, free a range of bigobj rather than
3436 * overwriting it.
3437 */
3438 freeit = (ztest_random(100) < free_percent);
3439
3440 /*
3441 * Read the current contents of our objects.
3442 */
3443 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3444 DMU_READ_PREFETCH);
3445 ASSERT0(error);
3446 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3447 DMU_READ_PREFETCH);
3448 ASSERT0(error);
3449
3450 /*
3451 * Get a tx for the mods to both packobj and bigobj.
3452 */
3453 tx = dmu_tx_create(os);
3454
3455 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3456
3457 if (freeit)
3458 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3459 else
3460 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3461
3462 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3463 if (txg == 0) {
3464 umem_free(packbuf, packsize);
3465 umem_free(bigbuf, bigsize);
3466 return;
3467 }
3468
3668 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3669 VERIFY(ISP2(doi.doi_data_block_size));
3670 VERIFY(chunksize == doi.doi_data_block_size);
3671 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3672
3673 /*
3674 * Pick a random index and compute the offsets into packobj and bigobj.
3675 */
3676 n = ztest_random(regions) * stride + ztest_random(width);
3677 s = 1 + ztest_random(width - 1);
3678
3679 packoff = n * sizeof (bufwad_t);
3680 packsize = s * sizeof (bufwad_t);
3681
3682 bigoff = n * chunksize;
3683 bigsize = s * chunksize;
3684
3685 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3686 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3687
3688 VERIFY0(dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3689
3690 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3691
3692 /*
3693 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3694 * Iteration 1 test zcopy to already referenced dbufs.
3695 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3696 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3697 * Iteration 4 test zcopy when dbuf is no longer dirty.
3698 * Iteration 5 test zcopy when it can't be done.
3699 * Iteration 6 one more zcopy write.
3700 */
3701 for (i = 0; i < 7; i++) {
3702 uint64_t j;
3703 uint64_t off;
3704
3705 /*
3706 * In iteration 5 (i == 5) use arcbufs
3707 * that don't match bigobj blksz to test
3708 * dmu_assign_arcbuf() when it can't directly
3738 } else {
3739 dmu_return_arcbuf(
3740 bigbuf_arcbufs[2 * j]);
3741 dmu_return_arcbuf(
3742 bigbuf_arcbufs[2 * j + 1]);
3743 }
3744 }
3745 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3746 dmu_buf_rele(bonus_db, FTAG);
3747 return;
3748 }
3749
3750 /*
3751 * 50% of the time don't read objects in the 1st iteration to
3752 * test dmu_assign_arcbuf() for the case when there're no
3753 * existing dbufs for the specified offsets.
3754 */
3755 if (i != 0 || ztest_random(2) != 0) {
3756 error = dmu_read(os, packobj, packoff,
3757 packsize, packbuf, DMU_READ_PREFETCH);
3758 ASSERT0(error);
3759 error = dmu_read(os, bigobj, bigoff, bigsize,
3760 bigbuf, DMU_READ_PREFETCH);
3761 ASSERT0(error);
3762 }
3763 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3764 n, chunksize, txg);
3765
3766 /*
3767 * We've verified all the old bufwads, and made new ones.
3768 * Now write them out.
3769 */
3770 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3771 if (ztest_opts.zo_verbose >= 7) {
3772 (void) printf("writing offset %llx size %llx"
3773 " txg %llx\n",
3774 (u_longlong_t)bigoff,
3775 (u_longlong_t)bigsize,
3776 (u_longlong_t)txg);
3777 }
3778 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3779 dmu_buf_t *dbt;
3780 if (i != 5) {
3781 bcopy((caddr_t)bigbuf + (off - bigoff),
3920 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
3921
3922 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
3923
3924 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
3925 return;
3926
3927 object = od[0].od_object;
3928
3929 /*
3930 * Generate a known hash collision, and verify that
3931 * we can lookup and remove both entries.
3932 */
3933 tx = dmu_tx_create(os);
3934 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
3935 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3936 if (txg == 0)
3937 return;
3938 for (i = 0; i < 2; i++) {
3939 value[i] = i;
3940 VERIFY0(zap_add(os, object, hc[i], sizeof (uint64_t),
3941 1, &value[i], tx));
3942 }
3943 for (i = 0; i < 2; i++) {
3944 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
3945 sizeof (uint64_t), 1, &value[i], tx));
3946 VERIFY0(zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
3947 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3948 ASSERT3U(zl_ints, ==, 1);
3949 }
3950 for (i = 0; i < 2; i++) {
3951 VERIFY0(zap_remove(os, object, hc[i], tx));
3952 }
3953 dmu_tx_commit(tx);
3954
3955 /*
3956 * Generate a buch of random entries.
3957 */
3958 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
3959
3960 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
3961 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
3962 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
3963 bzero(value, sizeof (value));
3964 last_txg = 0;
3965
3966 /*
3967 * If these zap entries already exist, validate their contents.
3968 */
3969 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
3970 if (error == 0) {
3971 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
3992
3993 /*
3994 * Atomically update two entries in our zap object.
3995 * The first is named txg_%llu, and contains the txg
3996 * in which the property was last updated. The second
3997 * is named prop_%llu, and the nth element of its value
3998 * should be txg + object + n.
3999 */
4000 tx = dmu_tx_create(os);
4001 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4002 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4003 if (txg == 0)
4004 return;
4005
4006 if (last_txg > txg)
4007 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4008
4009 for (i = 0; i < ints; i++)
4010 value[i] = txg + object + i;
4011
4012 VERIFY0(zap_update(os, object, txgname, sizeof (uint64_t),
4013 1, &txg, tx));
4014 VERIFY0(zap_update(os, object, propname, sizeof (uint64_t),
4015 ints, value, tx));
4016
4017 dmu_tx_commit(tx);
4018
4019 /*
4020 * Remove a random pair of entries.
4021 */
4022 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4023 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4024 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4025
4026 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4027
4028 if (error == ENOENT)
4029 return;
4030
4031 ASSERT0(error);
4032
4033 tx = dmu_tx_create(os);
4034 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4035 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4036 if (txg == 0)
4037 return;
4038 VERIFY0(zap_remove(os, object, txgname, tx));
4039 VERIFY0(zap_remove(os, object, propname, tx));
4040 dmu_tx_commit(tx);
4041 }
4042
4043 /*
4044 * Testcase to test the upgrading of a microzap to fatzap.
4045 */
4046 void
4047 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4048 {
4049 objset_t *os = zd->zd_os;
4050 ztest_od_t od[1];
4051 uint64_t object, txg;
4052
4053 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4054
4055 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4056 return;
4057
4058 object = od[0].od_object;
4059
4207 /* This is the actual commit callback function */
4208 static void
4209 ztest_commit_callback(void *arg, int error)
4210 {
4211 ztest_cb_data_t *data = arg;
4212 uint64_t synced_txg;
4213
4214 VERIFY(data != NULL);
4215 VERIFY3S(data->zcd_expected_err, ==, error);
4216 VERIFY(!data->zcd_called);
4217
4218 synced_txg = spa_last_synced_txg(data->zcd_spa);
4219 if (data->zcd_txg > synced_txg)
4220 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4221 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4222 synced_txg);
4223
4224 data->zcd_called = B_TRUE;
4225
4226 if (error == ECANCELED) {
4227 ASSERT0(data->zcd_txg);
4228 ASSERT(!data->zcd_added);
4229
4230 /*
4231 * The private callback data should be destroyed here, but
4232 * since we are going to check the zcd_called field after
4233 * dmu_tx_abort(), we will destroy it there.
4234 */
4235 return;
4236 }
4237
4238 /* Was this callback added to the global callback list? */
4239 if (!data->zcd_added)
4240 goto out;
4241
4242 ASSERT3U(data->zcd_txg, !=, 0);
4243
4244 /* Remove our callback from the list */
4245 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4246 list_remove(&zcl.zcl_callbacks, data);
4247 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4412 (void) rw_rdlock(&ztest_name_lock);
4413
4414 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4415 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4416 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4417
4418 (void) rw_unlock(&ztest_name_lock);
4419 }
4420
4421 /* ARGSUSED */
4422 void
4423 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4424 {
4425 nvlist_t *props = NULL;
4426
4427 (void) rw_rdlock(&ztest_name_lock);
4428
4429 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4430 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4431
4432 VERIFY0(spa_prop_get(ztest_spa, &props));
4433
4434 if (ztest_opts.zo_verbose >= 6)
4435 dump_nvlist(props, 4);
4436
4437 nvlist_free(props);
4438
4439 (void) rw_unlock(&ztest_name_lock);
4440 }
4441
4442 /*
4443 * Test snapshot hold/release and deferred destroy.
4444 */
4445 void
4446 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4447 {
4448 int error;
4449 objset_t *os = zd->zd_os;
4450 objset_t *origin;
4451 char snapname[100];
4452 char fullname[100];
4868 /*
4869 * Rename the pool to a different name and then rename it back.
4870 */
4871 /* ARGSUSED */
4872 void
4873 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
4874 {
4875 char *oldname, *newname;
4876 spa_t *spa;
4877
4878 (void) rw_wrlock(&ztest_name_lock);
4879
4880 oldname = ztest_opts.zo_pool;
4881 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
4882 (void) strcpy(newname, oldname);
4883 (void) strcat(newname, "_tmp");
4884
4885 /*
4886 * Do the rename
4887 */
4888 VERIFY0(spa_rename(oldname, newname));
4889
4890 /*
4891 * Try to open it under the old name, which shouldn't exist
4892 */
4893 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
4894
4895 /*
4896 * Open it under the new name and make sure it's still the same spa_t.
4897 */
4898 VERIFY0(spa_open(newname, &spa, FTAG));
4899
4900 ASSERT(spa == ztest_spa);
4901 spa_close(spa, FTAG);
4902
4903 /*
4904 * Rename it back to the original
4905 */
4906 VERIFY0(spa_rename(newname, oldname));
4907
4908 /*
4909 * Make sure it can still be opened
4910 */
4911 VERIFY0(spa_open(oldname, &spa, FTAG));
4912
4913 ASSERT(spa == ztest_spa);
4914 spa_close(spa, FTAG);
4915
4916 umem_free(newname, strlen(newname) + 1);
4917
4918 (void) rw_unlock(&ztest_name_lock);
4919 }
4920
4921 /*
4922 * Verify pool integrity by running zdb.
4923 */
4924 static void
4925 ztest_run_zdb(char *pool)
4926 {
4927 int status;
4928 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
4929 char zbuf[1024];
4930 char *bin;
4931 char *ztest;
4991 static void
4992 ztest_spa_import_export(char *oldname, char *newname)
4993 {
4994 nvlist_t *config, *newconfig;
4995 uint64_t pool_guid;
4996 spa_t *spa;
4997
4998 if (ztest_opts.zo_verbose >= 4) {
4999 (void) printf("import/export: old = %s, new = %s\n",
5000 oldname, newname);
5001 }
5002
5003 /*
5004 * Clean up from previous runs.
5005 */
5006 (void) spa_destroy(newname);
5007
5008 /*
5009 * Get the pool's configuration and guid.
5010 */
5011 VERIFY0(spa_open(oldname, &spa, FTAG));
5012
5013 /*
5014 * Kick off a scrub to tickle scrub/export races.
5015 */
5016 if (ztest_random(2) == 0)
5017 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5018
5019 pool_guid = spa_guid(spa);
5020 spa_close(spa, FTAG);
5021
5022 ztest_walk_pool_directory("pools before export");
5023
5024 /*
5025 * Export it.
5026 */
5027 VERIFY0(spa_export(oldname, &config, B_FALSE, B_FALSE));
5028
5029 ztest_walk_pool_directory("pools after export");
5030
5031 /*
5032 * Try to import it.
5033 */
5034 newconfig = spa_tryimport(config);
5035 ASSERT(newconfig != NULL);
5036 nvlist_free(newconfig);
5037
5038 /*
5039 * Import it under the new name.
5040 */
5041 VERIFY0(spa_import(newname, config, NULL, 0));
5042
5043 ztest_walk_pool_directory("pools after import");
5044
5045 /*
5046 * Try to import it again -- should fail with EEXIST.
5047 */
5048 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5049
5050 /*
5051 * Try to import it under a different name -- should fail with EEXIST.
5052 */
5053 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5054
5055 /*
5056 * Verify that the pool is no longer visible under the old name.
5057 */
5058 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5059
5060 /*
5061 * Verify that we can open and close the pool using the new name.
5062 */
5063 VERIFY0(spa_open(newname, &spa, FTAG));
5064 ASSERT(pool_guid == spa_guid(spa));
5065 spa_close(spa, FTAG);
5066
5067 nvlist_free(config);
5068 }
5069
5070 static void
5071 ztest_resume(spa_t *spa)
5072 {
5073 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5074 (void) printf("resuming from suspended state\n");
5075 spa_vdev_state_enter(spa, SCL_NONE);
5076 vdev_clear(spa, NULL);
5077 (void) spa_vdev_state_exit(spa, NULL, 0);
5078 (void) zio_resume(spa);
5079 }
5080
5081 static void *
5082 ztest_resume_thread(void *arg)
5083 {
5201 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5202 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5203 }
5204
5205 static void
5206 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5207 {
5208 uint64_t usedobjs, dirobjs, scratch;
5209
5210 /*
5211 * ZTEST_DIROBJ is the object directory for the entire dataset.
5212 * Therefore, the number of objects in use should equal the
5213 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5214 * If not, we have an object leak.
5215 *
5216 * Note that we can only check this in ztest_dataset_open(),
5217 * when the open-context and syncing-context values agree.
5218 * That's because zap_count() returns the open-context value,
5219 * while dmu_objset_space() returns the rootbp fill count.
5220 */
5221 VERIFY0(zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5222 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5223 ASSERT3U(dirobjs + 1, ==, usedobjs);
5224 }
5225
5226 static int
5227 ztest_dataset_open(int d)
5228 {
5229 ztest_ds_t *zd = &ztest_ds[d];
5230 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5231 objset_t *os;
5232 zilog_t *zilog;
5233 char name[MAXNAMELEN];
5234 int error;
5235
5236 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5237
5238 (void) rw_rdlock(&ztest_name_lock);
5239
5240 error = ztest_dataset_create(name);
5241 if (error == ENOSPC) {
5242 (void) rw_unlock(&ztest_name_lock);
5243 ztest_record_enospc(FTAG);
5244 return (error);
5245 }
5246 ASSERT(error == 0 || error == EEXIST);
5247
5248 VERIFY0(dmu_objset_hold(name, zd, &os));
5249 (void) rw_unlock(&ztest_name_lock);
5250
5251 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5252
5253 zilog = zd->zd_zilog;
5254
5255 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5256 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5257 fatal(0, "missing log records: claimed %llu < committed %llu",
5258 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5259
5260 ztest_dataset_dirobj_verify(zd);
5261
5262 zil_replay(os, zd, ztest_replay_vector);
5263
5264 ztest_dataset_dirobj_verify(zd);
5265
5266 if (ztest_opts.zo_verbose >= 6)
5267 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5268 zd->zd_name,
5317 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5318 zs->zs_thread_kill = zs->zs_thread_stop;
5319 if (ztest_random(100) < ztest_opts.zo_killrate) {
5320 zs->zs_thread_kill -=
5321 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5322 }
5323
5324 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5325
5326 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5327 offsetof(ztest_cb_data_t, zcd_node));
5328
5329 /*
5330 * Open our pool.
5331 */
5332 kernel_init(FREAD | FWRITE);
5333 VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0);
5334 spa->spa_debug = B_TRUE;
5335 ztest_spa = spa;
5336
5337 VERIFY0(dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os));
5338 zs->zs_guid = dmu_objset_fsid_guid(os);
5339 dmu_objset_rele(os, FTAG);
5340
5341 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5342
5343 /*
5344 * We don't expect the pool to suspend unless maxfaults == 0,
5345 * in which case ztest_fault_inject() temporarily takes away
5346 * the only valid replica.
5347 */
5348 if (MAXFAULTS() == 0)
5349 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5350 else
5351 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5352
5353 /*
5354 * Create a thread to periodically resume suspended I/O.
5355 */
5356 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5357 &resume_tid) == 0);
5459
5460 list_destroy(&zcl.zcl_callbacks);
5461
5462 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5463
5464 (void) rwlock_destroy(&ztest_name_lock);
5465 (void) _mutex_destroy(&ztest_vdev_lock);
5466 }
5467
5468 static void
5469 ztest_freeze(void)
5470 {
5471 ztest_ds_t *zd = &ztest_ds[0];
5472 spa_t *spa;
5473 int numloops = 0;
5474
5475 if (ztest_opts.zo_verbose >= 3)
5476 (void) printf("testing spa_freeze()...\n");
5477
5478 kernel_init(FREAD | FWRITE);
5479 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5480 VERIFY0(ztest_dataset_open(0));
5481
5482 /*
5483 * Force the first log block to be transactionally allocated.
5484 * We have to do this before we freeze the pool -- otherwise
5485 * the log chain won't be anchored.
5486 */
5487 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5488 ztest_dmu_object_alloc_free(zd, 0);
5489 zil_commit(zd->zd_zilog, 0);
5490 }
5491
5492 txg_wait_synced(spa_get_dsl(spa), 0);
5493
5494 /*
5495 * Freeze the pool. This stops spa_sync() from doing anything,
5496 * so that the only way to record changes from now on is the ZIL.
5497 */
5498 spa_freeze(spa);
5499
5500 /*
5511 txg_wait_synced(spa_get_dsl(spa), 0);
5512 }
5513
5514 /*
5515 * Commit all of the changes we just generated.
5516 */
5517 zil_commit(zd->zd_zilog, 0);
5518 txg_wait_synced(spa_get_dsl(spa), 0);
5519
5520 /*
5521 * Close our dataset and close the pool.
5522 */
5523 ztest_dataset_close(0);
5524 spa_close(spa, FTAG);
5525 kernel_fini();
5526
5527 /*
5528 * Open and close the pool and dataset to induce log replay.
5529 */
5530 kernel_init(FREAD | FWRITE);
5531 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5532 VERIFY0(ztest_dataset_open(0));
5533 ztest_dataset_close(0);
5534 spa_close(spa, FTAG);
5535 kernel_fini();
5536 }
5537
5538 void
5539 print_time(hrtime_t t, char *timebuf)
5540 {
5541 hrtime_t s = t / NANOSEC;
5542 hrtime_t m = s / 60;
5543 hrtime_t h = m / 60;
5544 hrtime_t d = h / 24;
5545
5546 s -= m * 60;
5547 m -= h * 60;
5548 h -= d * 24;
5549
5550 timebuf[0] = '\0';
5551
5552 if (d)
5585
5586 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5587 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5588
5589 kernel_init(FREAD | FWRITE);
5590
5591 /*
5592 * Create the storage pool.
5593 */
5594 (void) spa_destroy(ztest_opts.zo_pool);
5595 ztest_shared->zs_vdev_next_leaf = 0;
5596 zs->zs_splits = 0;
5597 zs->zs_mirrors = ztest_opts.zo_mirrors;
5598 nvroot = make_vdev_root(NULL, NULL, ztest_opts.zo_vdev_size, 0,
5599 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5600 props = make_random_props();
5601 for (int i = 0; i < SPA_FEATURES; i++) {
5602 char buf[1024];
5603 (void) snprintf(buf, sizeof (buf), "feature@%s",
5604 spa_feature_table[i].fi_uname);
5605 VERIFY0(nvlist_add_uint64(props, buf, 0));
5606 }
5607 VERIFY0(spa_create(ztest_opts.zo_pool, nvroot, props, NULL));
5608 nvlist_free(nvroot);
5609
5610 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5611 zs->zs_metaslab_sz =
5612 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5613
5614 spa_close(spa, FTAG);
5615
5616 kernel_fini();
5617
5618 ztest_run_zdb(ztest_opts.zo_pool);
5619
5620 ztest_freeze();
5621
5622 ztest_run_zdb(ztest_opts.zo_pool);
5623
5624 (void) rwlock_destroy(&ztest_name_lock);
5625 (void) _mutex_destroy(&ztest_vdev_lock);
5626 }
5627
5628 static void
5629 setup_fds(void)
5630 {
5647
5648 size = hdr->zh_hdr_size;
5649 size += hdr->zh_opts_size;
5650 size += hdr->zh_size;
5651 size += hdr->zh_stats_size * hdr->zh_stats_count;
5652 size += hdr->zh_ds_size * hdr->zh_ds_count;
5653
5654 return (size);
5655 }
5656
5657 static void
5658 setup_hdr(void)
5659 {
5660 int size;
5661 ztest_shared_hdr_t *hdr;
5662
5663 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5664 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
5665 ASSERT(hdr != MAP_FAILED);
5666
5667 VERIFY0(ftruncate(ZTEST_FD_DATA, sizeof (ztest_shared_hdr_t)));
5668
5669 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5670 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5671 hdr->zh_size = sizeof (ztest_shared_t);
5672 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5673 hdr->zh_stats_count = ZTEST_FUNCS;
5674 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5675 hdr->zh_ds_count = ztest_opts.zo_datasets;
5676
5677 size = shared_data_size(hdr);
5678 VERIFY0(ftruncate(ZTEST_FD_DATA, size));
5679
5680 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5681 }
5682
5683 static void
5684 setup_data(void)
5685 {
5686 int size, offset;
5687 ztest_shared_hdr_t *hdr;
5688 uint8_t *buf;
5689
5690 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5691 PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0);
5692 ASSERT(hdr != MAP_FAILED);
5693
5694 size = shared_data_size(hdr);
5695
5696 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5697 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5698 PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
|