Print this page
3747 txg commit callbacks don't work
Submitted by: Will Andrews <willa@spectralogic.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/ztest/ztest.c
+++ new/usr/src/cmd/ztest/ztest.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * The objective of this program is to provide a DMU/ZAP/SPA stress test
29 29 * that runs entirely in userland, is easy to use, and easy to extend.
30 30 *
31 31 * The overall design of the ztest program is as follows:
32 32 *
33 33 * (1) For each major functional area (e.g. adding vdevs to a pool,
34 34 * creating and destroying datasets, reading and writing objects, etc)
35 35 * we have a simple routine to test that functionality. These
36 36 * individual routines do not have to do anything "stressful".
37 37 *
38 38 * (2) We turn these simple functionality tests into a stress test by
39 39 * running them all in parallel, with as many threads as desired,
40 40 * and spread across as many datasets, objects, and vdevs as desired.
41 41 *
42 42 * (3) While all this is happening, we inject faults into the pool to
43 43 * verify that self-healing data really works.
44 44 *
45 45 * (4) Every time we open a dataset, we change its checksum and compression
46 46 * functions. Thus even individual objects vary from block to block
47 47 * in which checksum they use and whether they're compressed.
48 48 *
49 49 * (5) To verify that we never lose on-disk consistency after a crash,
50 50 * we run the entire test in a child of the main process.
51 51 * At random times, the child self-immolates with a SIGKILL.
52 52 * This is the software equivalent of pulling the power cord.
53 53 * The parent then runs the test again, using the existing
54 54 * storage pool, as many times as desired. If backwards compatability
55 55 * testing is enabled ztest will sometimes run the "older" version
56 56 * of ztest after a SIGKILL.
57 57 *
58 58 * (6) To verify that we don't have future leaks or temporal incursions,
59 59 * many of the functional tests record the transaction group number
60 60 * as part of their data. When reading old data, they verify that
61 61 * the transaction group number is less than the current, open txg.
62 62 * If you add a new test, please do this if applicable.
63 63 *
64 64 * When run with no arguments, ztest runs for about five minutes and
65 65 * produces no output if successful. To get a little bit of information,
66 66 * specify -V. To get more information, specify -VV, and so on.
67 67 *
68 68 * To turn this into an overnight stress test, use -T to specify run time.
69 69 *
70 70 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
71 71 * to increase the pool capacity, fanout, and overall stress level.
72 72 *
73 73 * Use the -k option to set the desired frequency of kills.
74 74 *
75 75 * When ztest invokes itself it passes all relevant information through a
76 76 * temporary file which is mmap-ed in the child process. This allows shared
77 77 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
78 78 * stored at offset 0 of this file and contains information on the size and
79 79 * number of shared structures in the file. The information stored in this file
80 80 * must remain backwards compatible with older versions of ztest so that
81 81 * ztest can invoke them during backwards compatibility testing (-B).
82 82 */
83 83
84 84 #include <sys/zfs_context.h>
85 85 #include <sys/spa.h>
86 86 #include <sys/dmu.h>
87 87 #include <sys/txg.h>
88 88 #include <sys/dbuf.h>
89 89 #include <sys/zap.h>
90 90 #include <sys/dmu_objset.h>
91 91 #include <sys/poll.h>
92 92 #include <sys/stat.h>
93 93 #include <sys/time.h>
94 94 #include <sys/wait.h>
95 95 #include <sys/mman.h>
96 96 #include <sys/resource.h>
97 97 #include <sys/zio.h>
98 98 #include <sys/zil.h>
99 99 #include <sys/zil_impl.h>
100 100 #include <sys/vdev_impl.h>
101 101 #include <sys/vdev_file.h>
102 102 #include <sys/spa_impl.h>
103 103 #include <sys/metaslab_impl.h>
104 104 #include <sys/dsl_prop.h>
105 105 #include <sys/dsl_dataset.h>
106 106 #include <sys/dsl_destroy.h>
107 107 #include <sys/dsl_scan.h>
108 108 #include <sys/zio_checksum.h>
109 109 #include <sys/refcount.h>
110 110 #include <sys/zfeature.h>
111 111 #include <sys/dsl_userhold.h>
112 112 #include <stdio.h>
113 113 #include <stdio_ext.h>
114 114 #include <stdlib.h>
115 115 #include <unistd.h>
116 116 #include <signal.h>
117 117 #include <umem.h>
118 118 #include <dlfcn.h>
119 119 #include <ctype.h>
120 120 #include <math.h>
121 121 #include <sys/fs/zfs.h>
122 122 #include <libnvpair.h>
123 123
124 124 static int ztest_fd_data = -1;
125 125 static int ztest_fd_rand = -1;
126 126
127 127 typedef struct ztest_shared_hdr {
128 128 uint64_t zh_hdr_size;
129 129 uint64_t zh_opts_size;
130 130 uint64_t zh_size;
131 131 uint64_t zh_stats_size;
132 132 uint64_t zh_stats_count;
133 133 uint64_t zh_ds_size;
134 134 uint64_t zh_ds_count;
135 135 } ztest_shared_hdr_t;
136 136
137 137 static ztest_shared_hdr_t *ztest_shared_hdr;
138 138
139 139 typedef struct ztest_shared_opts {
140 140 char zo_pool[MAXNAMELEN];
141 141 char zo_dir[MAXNAMELEN];
142 142 char zo_alt_ztest[MAXNAMELEN];
143 143 char zo_alt_libpath[MAXNAMELEN];
144 144 uint64_t zo_vdevs;
145 145 uint64_t zo_vdevtime;
146 146 size_t zo_vdev_size;
147 147 int zo_ashift;
148 148 int zo_mirrors;
149 149 int zo_raidz;
150 150 int zo_raidz_parity;
151 151 int zo_datasets;
152 152 int zo_threads;
153 153 uint64_t zo_passtime;
154 154 uint64_t zo_killrate;
155 155 int zo_verbose;
156 156 int zo_init;
157 157 uint64_t zo_time;
158 158 uint64_t zo_maxloops;
159 159 uint64_t zo_metaslab_gang_bang;
160 160 } ztest_shared_opts_t;
161 161
162 162 static const ztest_shared_opts_t ztest_opts_defaults = {
163 163 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
164 164 .zo_dir = { '/', 't', 'm', 'p', '\0' },
165 165 .zo_alt_ztest = { '\0' },
166 166 .zo_alt_libpath = { '\0' },
167 167 .zo_vdevs = 5,
168 168 .zo_ashift = SPA_MINBLOCKSHIFT,
169 169 .zo_mirrors = 2,
170 170 .zo_raidz = 4,
171 171 .zo_raidz_parity = 1,
172 172 .zo_vdev_size = SPA_MINDEVSIZE,
173 173 .zo_datasets = 7,
174 174 .zo_threads = 23,
175 175 .zo_passtime = 60, /* 60 seconds */
176 176 .zo_killrate = 70, /* 70% kill rate */
177 177 .zo_verbose = 0,
178 178 .zo_init = 1,
179 179 .zo_time = 300, /* 5 minutes */
180 180 .zo_maxloops = 50, /* max loops during spa_freeze() */
181 181 .zo_metaslab_gang_bang = 32 << 10
182 182 };
183 183
184 184 extern uint64_t metaslab_gang_bang;
185 185 extern uint64_t metaslab_df_alloc_threshold;
186 186
187 187 static ztest_shared_opts_t *ztest_shared_opts;
188 188 static ztest_shared_opts_t ztest_opts;
189 189
190 190 typedef struct ztest_shared_ds {
191 191 uint64_t zd_seq;
192 192 } ztest_shared_ds_t;
193 193
194 194 static ztest_shared_ds_t *ztest_shared_ds;
195 195 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
196 196
197 197 #define BT_MAGIC 0x123456789abcdefULL
198 198 #define MAXFAULTS() \
199 199 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
200 200
201 201 enum ztest_io_type {
202 202 ZTEST_IO_WRITE_TAG,
203 203 ZTEST_IO_WRITE_PATTERN,
204 204 ZTEST_IO_WRITE_ZEROES,
205 205 ZTEST_IO_TRUNCATE,
206 206 ZTEST_IO_SETATTR,
207 207 ZTEST_IO_REWRITE,
208 208 ZTEST_IO_TYPES
209 209 };
210 210
211 211 typedef struct ztest_block_tag {
212 212 uint64_t bt_magic;
213 213 uint64_t bt_objset;
214 214 uint64_t bt_object;
215 215 uint64_t bt_offset;
216 216 uint64_t bt_gen;
217 217 uint64_t bt_txg;
218 218 uint64_t bt_crtxg;
219 219 } ztest_block_tag_t;
220 220
221 221 typedef struct bufwad {
222 222 uint64_t bw_index;
223 223 uint64_t bw_txg;
224 224 uint64_t bw_data;
225 225 } bufwad_t;
226 226
227 227 /*
228 228 * XXX -- fix zfs range locks to be generic so we can use them here.
229 229 */
230 230 typedef enum {
231 231 RL_READER,
232 232 RL_WRITER,
233 233 RL_APPEND
234 234 } rl_type_t;
235 235
236 236 typedef struct rll {
237 237 void *rll_writer;
238 238 int rll_readers;
239 239 mutex_t rll_lock;
240 240 cond_t rll_cv;
241 241 } rll_t;
242 242
243 243 typedef struct rl {
244 244 uint64_t rl_object;
245 245 uint64_t rl_offset;
246 246 uint64_t rl_size;
247 247 rll_t *rl_lock;
248 248 } rl_t;
249 249
250 250 #define ZTEST_RANGE_LOCKS 64
251 251 #define ZTEST_OBJECT_LOCKS 64
252 252
253 253 /*
254 254 * Object descriptor. Used as a template for object lookup/create/remove.
255 255 */
256 256 typedef struct ztest_od {
257 257 uint64_t od_dir;
258 258 uint64_t od_object;
259 259 dmu_object_type_t od_type;
260 260 dmu_object_type_t od_crtype;
261 261 uint64_t od_blocksize;
262 262 uint64_t od_crblocksize;
263 263 uint64_t od_gen;
264 264 uint64_t od_crgen;
265 265 char od_name[MAXNAMELEN];
266 266 } ztest_od_t;
267 267
268 268 /*
269 269 * Per-dataset state.
270 270 */
271 271 typedef struct ztest_ds {
272 272 ztest_shared_ds_t *zd_shared;
273 273 objset_t *zd_os;
274 274 rwlock_t zd_zilog_lock;
275 275 zilog_t *zd_zilog;
276 276 ztest_od_t *zd_od; /* debugging aid */
277 277 char zd_name[MAXNAMELEN];
278 278 mutex_t zd_dirobj_lock;
279 279 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
280 280 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
281 281 } ztest_ds_t;
282 282
283 283 /*
284 284 * Per-iteration state.
285 285 */
286 286 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
287 287
288 288 typedef struct ztest_info {
289 289 ztest_func_t *zi_func; /* test function */
290 290 uint64_t zi_iters; /* iterations per execution */
291 291 uint64_t *zi_interval; /* execute every <interval> seconds */
292 292 } ztest_info_t;
293 293
294 294 typedef struct ztest_shared_callstate {
295 295 uint64_t zc_count; /* per-pass count */
296 296 uint64_t zc_time; /* per-pass time */
297 297 uint64_t zc_next; /* next time to call this function */
298 298 } ztest_shared_callstate_t;
299 299
300 300 static ztest_shared_callstate_t *ztest_shared_callstate;
301 301 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
302 302
303 303 /*
304 304 * Note: these aren't static because we want dladdr() to work.
305 305 */
306 306 ztest_func_t ztest_dmu_read_write;
307 307 ztest_func_t ztest_dmu_write_parallel;
308 308 ztest_func_t ztest_dmu_object_alloc_free;
309 309 ztest_func_t ztest_dmu_commit_callbacks;
310 310 ztest_func_t ztest_zap;
311 311 ztest_func_t ztest_zap_parallel;
312 312 ztest_func_t ztest_zil_commit;
313 313 ztest_func_t ztest_zil_remount;
314 314 ztest_func_t ztest_dmu_read_write_zcopy;
315 315 ztest_func_t ztest_dmu_objset_create_destroy;
316 316 ztest_func_t ztest_dmu_prealloc;
317 317 ztest_func_t ztest_fzap;
318 318 ztest_func_t ztest_dmu_snapshot_create_destroy;
319 319 ztest_func_t ztest_dsl_prop_get_set;
320 320 ztest_func_t ztest_spa_prop_get_set;
321 321 ztest_func_t ztest_spa_create_destroy;
322 322 ztest_func_t ztest_fault_inject;
323 323 ztest_func_t ztest_ddt_repair;
324 324 ztest_func_t ztest_dmu_snapshot_hold;
325 325 ztest_func_t ztest_spa_rename;
326 326 ztest_func_t ztest_scrub;
327 327 ztest_func_t ztest_dsl_dataset_promote_busy;
328 328 ztest_func_t ztest_vdev_attach_detach;
329 329 ztest_func_t ztest_vdev_LUN_growth;
330 330 ztest_func_t ztest_vdev_add_remove;
331 331 ztest_func_t ztest_vdev_aux_add_remove;
332 332 ztest_func_t ztest_split_pool;
333 333 ztest_func_t ztest_reguid;
334 334 ztest_func_t ztest_spa_upgrade;
335 335
336 336 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
337 337 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
338 338 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
339 339 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
340 340 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
341 341
342 342 ztest_info_t ztest_info[] = {
343 343 { ztest_dmu_read_write, 1, &zopt_always },
344 344 { ztest_dmu_write_parallel, 10, &zopt_always },
345 345 { ztest_dmu_object_alloc_free, 1, &zopt_always },
346 346 { ztest_dmu_commit_callbacks, 1, &zopt_always },
347 347 { ztest_zap, 30, &zopt_always },
348 348 { ztest_zap_parallel, 100, &zopt_always },
349 349 { ztest_split_pool, 1, &zopt_always },
350 350 { ztest_zil_commit, 1, &zopt_incessant },
351 351 { ztest_zil_remount, 1, &zopt_sometimes },
352 352 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
353 353 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
354 354 { ztest_dsl_prop_get_set, 1, &zopt_often },
355 355 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
356 356 #if 0
357 357 { ztest_dmu_prealloc, 1, &zopt_sometimes },
358 358 #endif
359 359 { ztest_fzap, 1, &zopt_sometimes },
360 360 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
361 361 { ztest_spa_create_destroy, 1, &zopt_sometimes },
362 362 { ztest_fault_inject, 1, &zopt_sometimes },
363 363 { ztest_ddt_repair, 1, &zopt_sometimes },
364 364 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
365 365 { ztest_reguid, 1, &zopt_sometimes },
366 366 { ztest_spa_rename, 1, &zopt_rarely },
367 367 { ztest_scrub, 1, &zopt_rarely },
368 368 { ztest_spa_upgrade, 1, &zopt_rarely },
369 369 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
370 370 { ztest_vdev_attach_detach, 1, &zopt_sometimes },
371 371 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
372 372 { ztest_vdev_add_remove, 1,
373 373 &ztest_opts.zo_vdevtime },
374 374 { ztest_vdev_aux_add_remove, 1,
375 375 &ztest_opts.zo_vdevtime },
376 376 };
377 377
378 378 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
379 379
380 380 /*
381 381 * The following struct is used to hold a list of uncalled commit callbacks.
382 382 * The callbacks are ordered by txg number.
383 383 */
384 384 typedef struct ztest_cb_list {
385 385 mutex_t zcl_callbacks_lock;
386 386 list_t zcl_callbacks;
387 387 } ztest_cb_list_t;
388 388
389 389 /*
390 390 * Stuff we need to share writably between parent and child.
391 391 */
392 392 typedef struct ztest_shared {
393 393 boolean_t zs_do_init;
394 394 hrtime_t zs_proc_start;
395 395 hrtime_t zs_proc_stop;
396 396 hrtime_t zs_thread_start;
397 397 hrtime_t zs_thread_stop;
398 398 hrtime_t zs_thread_kill;
399 399 uint64_t zs_enospc_count;
400 400 uint64_t zs_vdev_next_leaf;
401 401 uint64_t zs_vdev_aux;
402 402 uint64_t zs_alloc;
403 403 uint64_t zs_space;
404 404 uint64_t zs_splits;
405 405 uint64_t zs_mirrors;
406 406 uint64_t zs_metaslab_sz;
407 407 uint64_t zs_metaslab_df_alloc_threshold;
408 408 uint64_t zs_guid;
409 409 } ztest_shared_t;
410 410
411 411 #define ID_PARALLEL -1ULL
412 412
413 413 static char ztest_dev_template[] = "%s/%s.%llua";
414 414 static char ztest_aux_template[] = "%s/%s.%s.%llu";
415 415 ztest_shared_t *ztest_shared;
416 416
417 417 static spa_t *ztest_spa = NULL;
418 418 static ztest_ds_t *ztest_ds;
419 419
420 420 static mutex_t ztest_vdev_lock;
421 421
422 422 /*
423 423 * The ztest_name_lock protects the pool and dataset namespace used by
424 424 * the individual tests. To modify the namespace, consumers must grab
425 425 * this lock as writer. Grabbing the lock as reader will ensure that the
426 426 * namespace does not change while the lock is held.
427 427 */
428 428 static rwlock_t ztest_name_lock;
429 429
430 430 static boolean_t ztest_dump_core = B_TRUE;
431 431 static boolean_t ztest_exiting;
432 432
433 433 /* Global commit callback list */
434 434 static ztest_cb_list_t zcl;
435 435
436 436 enum ztest_object {
437 437 ZTEST_META_DNODE = 0,
438 438 ZTEST_DIROBJ,
439 439 ZTEST_OBJECTS
440 440 };
441 441
442 442 static void usage(boolean_t) __NORETURN;
443 443
444 444 /*
445 445 * These libumem hooks provide a reasonable set of defaults for the allocator's
446 446 * debugging facilities.
447 447 */
448 448 const char *
449 449 _umem_debug_init()
450 450 {
451 451 return ("default,verbose"); /* $UMEM_DEBUG setting */
452 452 }
453 453
454 454 const char *
455 455 _umem_logging_init(void)
456 456 {
457 457 return ("fail,contents"); /* $UMEM_LOGGING setting */
458 458 }
459 459
460 460 #define FATAL_MSG_SZ 1024
461 461
462 462 char *fatal_msg;
463 463
464 464 static void
465 465 fatal(int do_perror, char *message, ...)
466 466 {
467 467 va_list args;
468 468 int save_errno = errno;
469 469 char buf[FATAL_MSG_SZ];
470 470
471 471 (void) fflush(stdout);
472 472
473 473 va_start(args, message);
474 474 (void) sprintf(buf, "ztest: ");
475 475 /* LINTED */
476 476 (void) vsprintf(buf + strlen(buf), message, args);
477 477 va_end(args);
478 478 if (do_perror) {
479 479 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
480 480 ": %s", strerror(save_errno));
481 481 }
482 482 (void) fprintf(stderr, "%s\n", buf);
483 483 fatal_msg = buf; /* to ease debugging */
484 484 fflush(NULL);
485 485 if (ztest_dump_core)
486 486 abort();
487 487 exit(3);
488 488 }
489 489
490 490 static int
491 491 str2shift(const char *buf)
492 492 {
493 493 const char *ends = "BKMGTPEZ";
494 494 int i;
495 495
496 496 if (buf[0] == '\0')
497 497 return (0);
498 498 for (i = 0; i < strlen(ends); i++) {
499 499 if (toupper(buf[0]) == ends[i])
500 500 break;
501 501 }
502 502 if (i == strlen(ends)) {
503 503 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
504 504 buf);
505 505 usage(B_FALSE);
506 506 }
507 507 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
508 508 return (10*i);
509 509 }
510 510 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
511 511 usage(B_FALSE);
512 512 /* NOTREACHED */
513 513 }
514 514
515 515 static uint64_t
516 516 nicenumtoull(const char *buf)
517 517 {
518 518 char *end;
519 519 uint64_t val;
520 520
521 521 val = strtoull(buf, &end, 0);
522 522 if (end == buf) {
523 523 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
524 524 usage(B_FALSE);
525 525 } else if (end[0] == '.') {
526 526 double fval = strtod(buf, &end);
527 527 fval *= pow(2, str2shift(end));
528 528 if (fval > UINT64_MAX) {
529 529 (void) fprintf(stderr, "ztest: value too large: %s\n",
530 530 buf);
531 531 usage(B_FALSE);
532 532 }
533 533 val = (uint64_t)fval;
534 534 } else {
535 535 int shift = str2shift(end);
536 536 if (shift >= 64 || (val << shift) >> shift != val) {
537 537 (void) fprintf(stderr, "ztest: value too large: %s\n",
538 538 buf);
539 539 usage(B_FALSE);
540 540 }
541 541 val <<= shift;
542 542 }
543 543 return (val);
544 544 }
545 545
546 546 static void
547 547 usage(boolean_t requested)
548 548 {
549 549 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
550 550
551 551 char nice_vdev_size[10];
552 552 char nice_gang_bang[10];
553 553 FILE *fp = requested ? stdout : stderr;
554 554
555 555 nicenum(zo->zo_vdev_size, nice_vdev_size);
556 556 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
557 557
558 558 (void) fprintf(fp, "Usage: %s\n"
559 559 "\t[-v vdevs (default: %llu)]\n"
560 560 "\t[-s size_of_each_vdev (default: %s)]\n"
561 561 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
562 562 "\t[-m mirror_copies (default: %d)]\n"
563 563 "\t[-r raidz_disks (default: %d)]\n"
564 564 "\t[-R raidz_parity (default: %d)]\n"
565 565 "\t[-d datasets (default: %d)]\n"
566 566 "\t[-t threads (default: %d)]\n"
567 567 "\t[-g gang_block_threshold (default: %s)]\n"
568 568 "\t[-i init_count (default: %d)] initialize pool i times\n"
569 569 "\t[-k kill_percentage (default: %llu%%)]\n"
570 570 "\t[-p pool_name (default: %s)]\n"
571 571 "\t[-f dir (default: %s)] file directory for vdev files\n"
572 572 "\t[-V] verbose (use multiple times for ever more blather)\n"
573 573 "\t[-E] use existing pool instead of creating new one\n"
574 574 "\t[-T time (default: %llu sec)] total run time\n"
575 575 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
576 576 "\t[-P passtime (default: %llu sec)] time per pass\n"
577 577 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
578 578 "\t[-h] (print help)\n"
579 579 "",
580 580 zo->zo_pool,
581 581 (u_longlong_t)zo->zo_vdevs, /* -v */
582 582 nice_vdev_size, /* -s */
583 583 zo->zo_ashift, /* -a */
584 584 zo->zo_mirrors, /* -m */
585 585 zo->zo_raidz, /* -r */
586 586 zo->zo_raidz_parity, /* -R */
587 587 zo->zo_datasets, /* -d */
588 588 zo->zo_threads, /* -t */
589 589 nice_gang_bang, /* -g */
590 590 zo->zo_init, /* -i */
591 591 (u_longlong_t)zo->zo_killrate, /* -k */
592 592 zo->zo_pool, /* -p */
593 593 zo->zo_dir, /* -f */
594 594 (u_longlong_t)zo->zo_time, /* -T */
595 595 (u_longlong_t)zo->zo_maxloops, /* -F */
596 596 (u_longlong_t)zo->zo_passtime);
597 597 exit(requested ? 0 : 1);
598 598 }
599 599
600 600 static void
601 601 process_options(int argc, char **argv)
602 602 {
603 603 char *path;
604 604 ztest_shared_opts_t *zo = &ztest_opts;
605 605
606 606 int opt;
607 607 uint64_t value;
608 608 char altdir[MAXNAMELEN] = { 0 };
609 609
610 610 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
611 611
612 612 while ((opt = getopt(argc, argv,
613 613 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
614 614 value = 0;
615 615 switch (opt) {
616 616 case 'v':
617 617 case 's':
618 618 case 'a':
619 619 case 'm':
620 620 case 'r':
621 621 case 'R':
622 622 case 'd':
623 623 case 't':
624 624 case 'g':
625 625 case 'i':
626 626 case 'k':
627 627 case 'T':
628 628 case 'P':
629 629 case 'F':
630 630 value = nicenumtoull(optarg);
631 631 }
632 632 switch (opt) {
633 633 case 'v':
634 634 zo->zo_vdevs = value;
635 635 break;
636 636 case 's':
637 637 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
638 638 break;
639 639 case 'a':
640 640 zo->zo_ashift = value;
641 641 break;
642 642 case 'm':
643 643 zo->zo_mirrors = value;
644 644 break;
645 645 case 'r':
646 646 zo->zo_raidz = MAX(1, value);
647 647 break;
648 648 case 'R':
649 649 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
650 650 break;
651 651 case 'd':
652 652 zo->zo_datasets = MAX(1, value);
653 653 break;
654 654 case 't':
655 655 zo->zo_threads = MAX(1, value);
656 656 break;
657 657 case 'g':
658 658 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
659 659 value);
660 660 break;
661 661 case 'i':
662 662 zo->zo_init = value;
663 663 break;
664 664 case 'k':
665 665 zo->zo_killrate = value;
666 666 break;
667 667 case 'p':
668 668 (void) strlcpy(zo->zo_pool, optarg,
669 669 sizeof (zo->zo_pool));
670 670 break;
671 671 case 'f':
672 672 path = realpath(optarg, NULL);
673 673 if (path == NULL) {
674 674 (void) fprintf(stderr, "error: %s: %s\n",
675 675 optarg, strerror(errno));
676 676 usage(B_FALSE);
677 677 } else {
678 678 (void) strlcpy(zo->zo_dir, path,
679 679 sizeof (zo->zo_dir));
680 680 }
681 681 break;
682 682 case 'V':
683 683 zo->zo_verbose++;
684 684 break;
685 685 case 'E':
686 686 zo->zo_init = 0;
687 687 break;
688 688 case 'T':
689 689 zo->zo_time = value;
690 690 break;
691 691 case 'P':
692 692 zo->zo_passtime = MAX(1, value);
693 693 break;
694 694 case 'F':
695 695 zo->zo_maxloops = MAX(1, value);
696 696 break;
697 697 case 'B':
698 698 (void) strlcpy(altdir, optarg, sizeof (altdir));
699 699 break;
700 700 case 'h':
701 701 usage(B_TRUE);
702 702 break;
703 703 case '?':
704 704 default:
705 705 usage(B_FALSE);
706 706 break;
707 707 }
708 708 }
709 709
710 710 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
711 711
712 712 zo->zo_vdevtime =
713 713 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
714 714 UINT64_MAX >> 2);
715 715
716 716 if (strlen(altdir) > 0) {
717 717 char *cmd;
718 718 char *realaltdir;
719 719 char *bin;
720 720 char *ztest;
721 721 char *isa;
722 722 int isalen;
723 723
724 724 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
725 725 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
726 726
727 727 VERIFY(NULL != realpath(getexecname(), cmd));
728 728 if (0 != access(altdir, F_OK)) {
729 729 ztest_dump_core = B_FALSE;
730 730 fatal(B_TRUE, "invalid alternate ztest path: %s",
731 731 altdir);
732 732 }
733 733 VERIFY(NULL != realpath(altdir, realaltdir));
734 734
735 735 /*
736 736 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
737 737 * We want to extract <isa> to determine if we should use
738 738 * 32 or 64 bit binaries.
739 739 */
740 740 bin = strstr(cmd, "/usr/bin/");
741 741 ztest = strstr(bin, "/ztest");
742 742 isa = bin + 9;
743 743 isalen = ztest - isa;
744 744 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
745 745 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
746 746 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
747 747 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
748 748
749 749 if (0 != access(zo->zo_alt_ztest, X_OK)) {
750 750 ztest_dump_core = B_FALSE;
751 751 fatal(B_TRUE, "invalid alternate ztest: %s",
752 752 zo->zo_alt_ztest);
753 753 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
754 754 ztest_dump_core = B_FALSE;
755 755 fatal(B_TRUE, "invalid alternate lib directory %s",
756 756 zo->zo_alt_libpath);
757 757 }
758 758
759 759 umem_free(cmd, MAXPATHLEN);
760 760 umem_free(realaltdir, MAXPATHLEN);
761 761 }
762 762 }
763 763
764 764 static void
765 765 ztest_kill(ztest_shared_t *zs)
766 766 {
767 767 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
768 768 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
769 769 (void) kill(getpid(), SIGKILL);
770 770 }
771 771
772 772 static uint64_t
773 773 ztest_random(uint64_t range)
774 774 {
775 775 uint64_t r;
776 776
777 777 ASSERT3S(ztest_fd_rand, >=, 0);
778 778
779 779 if (range == 0)
780 780 return (0);
781 781
782 782 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
783 783 fatal(1, "short read from /dev/urandom");
784 784
785 785 return (r % range);
786 786 }
787 787
788 788 /* ARGSUSED */
789 789 static void
790 790 ztest_record_enospc(const char *s)
791 791 {
792 792 ztest_shared->zs_enospc_count++;
793 793 }
794 794
795 795 static uint64_t
796 796 ztest_get_ashift(void)
797 797 {
798 798 if (ztest_opts.zo_ashift == 0)
799 799 return (SPA_MINBLOCKSHIFT + ztest_random(3));
800 800 return (ztest_opts.zo_ashift);
801 801 }
802 802
803 803 static nvlist_t *
804 804 make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
805 805 {
806 806 char pathbuf[MAXPATHLEN];
807 807 uint64_t vdev;
808 808 nvlist_t *file;
809 809
810 810 if (ashift == 0)
811 811 ashift = ztest_get_ashift();
812 812
813 813 if (path == NULL) {
814 814 path = pathbuf;
815 815
816 816 if (aux != NULL) {
817 817 vdev = ztest_shared->zs_vdev_aux;
818 818 (void) snprintf(path, sizeof (pathbuf),
819 819 ztest_aux_template, ztest_opts.zo_dir,
820 820 pool == NULL ? ztest_opts.zo_pool : pool,
821 821 aux, vdev);
822 822 } else {
823 823 vdev = ztest_shared->zs_vdev_next_leaf++;
824 824 (void) snprintf(path, sizeof (pathbuf),
825 825 ztest_dev_template, ztest_opts.zo_dir,
826 826 pool == NULL ? ztest_opts.zo_pool : pool, vdev);
827 827 }
828 828 }
829 829
830 830 if (size != 0) {
831 831 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
832 832 if (fd == -1)
833 833 fatal(1, "can't open %s", path);
834 834 if (ftruncate(fd, size) != 0)
835 835 fatal(1, "can't ftruncate %s", path);
836 836 (void) close(fd);
837 837 }
838 838
839 839 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
840 840 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
841 841 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
842 842 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
843 843
844 844 return (file);
845 845 }
846 846
847 847 static nvlist_t *
848 848 make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
849 849 uint64_t ashift, int r)
850 850 {
851 851 nvlist_t *raidz, **child;
852 852 int c;
853 853
854 854 if (r < 2)
855 855 return (make_vdev_file(path, aux, pool, size, ashift));
856 856 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
857 857
858 858 for (c = 0; c < r; c++)
859 859 child[c] = make_vdev_file(path, aux, pool, size, ashift);
860 860
861 861 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
862 862 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
863 863 VDEV_TYPE_RAIDZ) == 0);
864 864 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
865 865 ztest_opts.zo_raidz_parity) == 0);
866 866 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
867 867 child, r) == 0);
868 868
869 869 for (c = 0; c < r; c++)
870 870 nvlist_free(child[c]);
871 871
872 872 umem_free(child, r * sizeof (nvlist_t *));
873 873
874 874 return (raidz);
875 875 }
876 876
877 877 static nvlist_t *
878 878 make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
879 879 uint64_t ashift, int r, int m)
880 880 {
881 881 nvlist_t *mirror, **child;
882 882 int c;
883 883
884 884 if (m < 1)
885 885 return (make_vdev_raidz(path, aux, pool, size, ashift, r));
886 886
887 887 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
888 888
889 889 for (c = 0; c < m; c++)
890 890 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r);
891 891
892 892 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
893 893 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
894 894 VDEV_TYPE_MIRROR) == 0);
895 895 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
896 896 child, m) == 0);
897 897
898 898 for (c = 0; c < m; c++)
899 899 nvlist_free(child[c]);
900 900
901 901 umem_free(child, m * sizeof (nvlist_t *));
902 902
903 903 return (mirror);
904 904 }
905 905
906 906 static nvlist_t *
907 907 make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift,
908 908 int log, int r, int m, int t)
909 909 {
910 910 nvlist_t *root, **child;
911 911 int c;
912 912
913 913 ASSERT(t > 0);
914 914
915 915 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
916 916
917 917 for (c = 0; c < t; c++) {
918 918 child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
919 919 r, m);
920 920 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
921 921 log) == 0);
922 922 }
923 923
924 924 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
925 925 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
926 926 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
927 927 child, t) == 0);
928 928
929 929 for (c = 0; c < t; c++)
930 930 nvlist_free(child[c]);
931 931
932 932 umem_free(child, t * sizeof (nvlist_t *));
933 933
934 934 return (root);
935 935 }
936 936
937 937 /*
938 938 * Find a random spa version. Returns back a random spa version in the
939 939 * range [initial_version, SPA_VERSION_FEATURES].
940 940 */
941 941 static uint64_t
942 942 ztest_random_spa_version(uint64_t initial_version)
943 943 {
944 944 uint64_t version = initial_version;
945 945
946 946 if (version <= SPA_VERSION_BEFORE_FEATURES) {
947 947 version = version +
948 948 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
949 949 }
950 950
951 951 if (version > SPA_VERSION_BEFORE_FEATURES)
952 952 version = SPA_VERSION_FEATURES;
953 953
954 954 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
955 955 return (version);
956 956 }
957 957
958 958 static int
959 959 ztest_random_blocksize(void)
960 960 {
961 961 return (1 << (SPA_MINBLOCKSHIFT +
962 962 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
963 963 }
964 964
965 965 static int
966 966 ztest_random_ibshift(void)
967 967 {
968 968 return (DN_MIN_INDBLKSHIFT +
969 969 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
970 970 }
971 971
972 972 static uint64_t
973 973 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
974 974 {
975 975 uint64_t top;
976 976 vdev_t *rvd = spa->spa_root_vdev;
977 977 vdev_t *tvd;
978 978
979 979 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
980 980
981 981 do {
982 982 top = ztest_random(rvd->vdev_children);
983 983 tvd = rvd->vdev_child[top];
984 984 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
985 985 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
986 986
987 987 return (top);
988 988 }
989 989
990 990 static uint64_t
991 991 ztest_random_dsl_prop(zfs_prop_t prop)
992 992 {
993 993 uint64_t value;
994 994
995 995 do {
996 996 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
997 997 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
998 998
999 999 return (value);
1000 1000 }
1001 1001
1002 1002 static int
1003 1003 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
1004 1004 boolean_t inherit)
1005 1005 {
1006 1006 const char *propname = zfs_prop_to_name(prop);
1007 1007 const char *valname;
1008 1008 char setpoint[MAXPATHLEN];
1009 1009 uint64_t curval;
1010 1010 int error;
1011 1011
1012 1012 error = dsl_prop_set_int(osname, propname,
1013 1013 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
1014 1014
1015 1015 if (error == ENOSPC) {
1016 1016 ztest_record_enospc(FTAG);
1017 1017 return (error);
1018 1018 }
1019 1019 ASSERT0(error);
1020 1020
1021 1021 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
1022 1022
1023 1023 if (ztest_opts.zo_verbose >= 6) {
1024 1024 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
1025 1025 (void) printf("%s %s = %s at '%s'\n",
1026 1026 osname, propname, valname, setpoint);
1027 1027 }
1028 1028
1029 1029 return (error);
1030 1030 }
1031 1031
1032 1032 static int
1033 1033 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
1034 1034 {
1035 1035 spa_t *spa = ztest_spa;
1036 1036 nvlist_t *props = NULL;
1037 1037 int error;
1038 1038
1039 1039 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
1040 1040 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
1041 1041
1042 1042 error = spa_prop_set(spa, props);
1043 1043
1044 1044 nvlist_free(props);
1045 1045
1046 1046 if (error == ENOSPC) {
1047 1047 ztest_record_enospc(FTAG);
1048 1048 return (error);
1049 1049 }
1050 1050 ASSERT0(error);
1051 1051
1052 1052 return (error);
1053 1053 }
1054 1054
1055 1055 static void
1056 1056 ztest_rll_init(rll_t *rll)
1057 1057 {
1058 1058 rll->rll_writer = NULL;
1059 1059 rll->rll_readers = 0;
1060 1060 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1061 1061 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1062 1062 }
1063 1063
1064 1064 static void
1065 1065 ztest_rll_destroy(rll_t *rll)
1066 1066 {
1067 1067 ASSERT(rll->rll_writer == NULL);
1068 1068 ASSERT(rll->rll_readers == 0);
1069 1069 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1070 1070 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1071 1071 }
1072 1072
1073 1073 static void
1074 1074 ztest_rll_lock(rll_t *rll, rl_type_t type)
1075 1075 {
1076 1076 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1077 1077
1078 1078 if (type == RL_READER) {
1079 1079 while (rll->rll_writer != NULL)
1080 1080 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1081 1081 rll->rll_readers++;
1082 1082 } else {
1083 1083 while (rll->rll_writer != NULL || rll->rll_readers)
1084 1084 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1085 1085 rll->rll_writer = curthread;
1086 1086 }
1087 1087
1088 1088 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1089 1089 }
1090 1090
1091 1091 static void
1092 1092 ztest_rll_unlock(rll_t *rll)
1093 1093 {
1094 1094 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1095 1095
1096 1096 if (rll->rll_writer) {
1097 1097 ASSERT(rll->rll_readers == 0);
1098 1098 rll->rll_writer = NULL;
1099 1099 } else {
1100 1100 ASSERT(rll->rll_readers != 0);
1101 1101 ASSERT(rll->rll_writer == NULL);
1102 1102 rll->rll_readers--;
1103 1103 }
1104 1104
1105 1105 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1106 1106 VERIFY(cond_broadcast(&rll->rll_cv) == 0);
1107 1107
1108 1108 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1109 1109 }
1110 1110
1111 1111 static void
1112 1112 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1113 1113 {
1114 1114 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1115 1115
1116 1116 ztest_rll_lock(rll, type);
1117 1117 }
1118 1118
1119 1119 static void
1120 1120 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1121 1121 {
1122 1122 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1123 1123
1124 1124 ztest_rll_unlock(rll);
1125 1125 }
1126 1126
1127 1127 static rl_t *
1128 1128 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1129 1129 uint64_t size, rl_type_t type)
1130 1130 {
1131 1131 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1132 1132 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1133 1133 rl_t *rl;
1134 1134
1135 1135 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1136 1136 rl->rl_object = object;
1137 1137 rl->rl_offset = offset;
1138 1138 rl->rl_size = size;
1139 1139 rl->rl_lock = rll;
1140 1140
1141 1141 ztest_rll_lock(rll, type);
1142 1142
1143 1143 return (rl);
1144 1144 }
1145 1145
1146 1146 static void
1147 1147 ztest_range_unlock(rl_t *rl)
1148 1148 {
1149 1149 rll_t *rll = rl->rl_lock;
1150 1150
1151 1151 ztest_rll_unlock(rll);
1152 1152
1153 1153 umem_free(rl, sizeof (*rl));
1154 1154 }
1155 1155
1156 1156 static void
1157 1157 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1158 1158 {
1159 1159 zd->zd_os = os;
1160 1160 zd->zd_zilog = dmu_objset_zil(os);
1161 1161 zd->zd_shared = szd;
1162 1162 dmu_objset_name(os, zd->zd_name);
1163 1163
1164 1164 if (zd->zd_shared != NULL)
1165 1165 zd->zd_shared->zd_seq = 0;
1166 1166
1167 1167 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
1168 1168 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
1169 1169
1170 1170 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1171 1171 ztest_rll_init(&zd->zd_object_lock[l]);
1172 1172
1173 1173 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1174 1174 ztest_rll_init(&zd->zd_range_lock[l]);
1175 1175 }
1176 1176
1177 1177 static void
1178 1178 ztest_zd_fini(ztest_ds_t *zd)
1179 1179 {
1180 1180 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
1181 1181
1182 1182 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1183 1183 ztest_rll_destroy(&zd->zd_object_lock[l]);
1184 1184
1185 1185 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1186 1186 ztest_rll_destroy(&zd->zd_range_lock[l]);
1187 1187 }
1188 1188
1189 1189 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1190 1190
1191 1191 static uint64_t
1192 1192 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1193 1193 {
1194 1194 uint64_t txg;
1195 1195 int error;
1196 1196
1197 1197 /*
1198 1198 * Attempt to assign tx to some transaction group.
1199 1199 */
1200 1200 error = dmu_tx_assign(tx, txg_how);
1201 1201 if (error) {
1202 1202 if (error == ERESTART) {
1203 1203 ASSERT(txg_how == TXG_NOWAIT);
1204 1204 dmu_tx_wait(tx);
1205 1205 } else {
1206 1206 ASSERT3U(error, ==, ENOSPC);
1207 1207 ztest_record_enospc(tag);
1208 1208 }
1209 1209 dmu_tx_abort(tx);
1210 1210 return (0);
1211 1211 }
1212 1212 txg = dmu_tx_get_txg(tx);
1213 1213 ASSERT(txg != 0);
1214 1214 return (txg);
1215 1215 }
1216 1216
1217 1217 static void
1218 1218 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1219 1219 {
1220 1220 uint64_t *ip = buf;
1221 1221 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1222 1222
1223 1223 while (ip < ip_end)
1224 1224 *ip++ = value;
1225 1225 }
1226 1226
1227 1227 static boolean_t
1228 1228 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1229 1229 {
1230 1230 uint64_t *ip = buf;
1231 1231 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1232 1232 uint64_t diff = 0;
1233 1233
1234 1234 while (ip < ip_end)
1235 1235 diff |= (value - *ip++);
1236 1236
1237 1237 return (diff == 0);
1238 1238 }
1239 1239
1240 1240 static void
1241 1241 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1242 1242 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1243 1243 {
1244 1244 bt->bt_magic = BT_MAGIC;
1245 1245 bt->bt_objset = dmu_objset_id(os);
1246 1246 bt->bt_object = object;
1247 1247 bt->bt_offset = offset;
1248 1248 bt->bt_gen = gen;
1249 1249 bt->bt_txg = txg;
1250 1250 bt->bt_crtxg = crtxg;
1251 1251 }
1252 1252
1253 1253 static void
1254 1254 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1255 1255 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1256 1256 {
1257 1257 ASSERT(bt->bt_magic == BT_MAGIC);
1258 1258 ASSERT(bt->bt_objset == dmu_objset_id(os));
1259 1259 ASSERT(bt->bt_object == object);
1260 1260 ASSERT(bt->bt_offset == offset);
1261 1261 ASSERT(bt->bt_gen <= gen);
1262 1262 ASSERT(bt->bt_txg <= txg);
1263 1263 ASSERT(bt->bt_crtxg == crtxg);
1264 1264 }
1265 1265
1266 1266 static ztest_block_tag_t *
1267 1267 ztest_bt_bonus(dmu_buf_t *db)
1268 1268 {
1269 1269 dmu_object_info_t doi;
1270 1270 ztest_block_tag_t *bt;
1271 1271
1272 1272 dmu_object_info_from_db(db, &doi);
1273 1273 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1274 1274 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1275 1275 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1276 1276
1277 1277 return (bt);
1278 1278 }
1279 1279
1280 1280 /*
1281 1281 * ZIL logging ops
1282 1282 */
1283 1283
1284 1284 #define lrz_type lr_mode
1285 1285 #define lrz_blocksize lr_uid
1286 1286 #define lrz_ibshift lr_gid
1287 1287 #define lrz_bonustype lr_rdev
1288 1288 #define lrz_bonuslen lr_crtime[1]
1289 1289
1290 1290 static void
1291 1291 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1292 1292 {
1293 1293 char *name = (void *)(lr + 1); /* name follows lr */
1294 1294 size_t namesize = strlen(name) + 1;
1295 1295 itx_t *itx;
1296 1296
1297 1297 if (zil_replaying(zd->zd_zilog, tx))
1298 1298 return;
1299 1299
1300 1300 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1301 1301 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1302 1302 sizeof (*lr) + namesize - sizeof (lr_t));
1303 1303
1304 1304 zil_itx_assign(zd->zd_zilog, itx, tx);
1305 1305 }
1306 1306
1307 1307 static void
1308 1308 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1309 1309 {
1310 1310 char *name = (void *)(lr + 1); /* name follows lr */
1311 1311 size_t namesize = strlen(name) + 1;
1312 1312 itx_t *itx;
1313 1313
1314 1314 if (zil_replaying(zd->zd_zilog, tx))
1315 1315 return;
1316 1316
1317 1317 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1318 1318 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1319 1319 sizeof (*lr) + namesize - sizeof (lr_t));
1320 1320
1321 1321 itx->itx_oid = object;
1322 1322 zil_itx_assign(zd->zd_zilog, itx, tx);
1323 1323 }
1324 1324
1325 1325 static void
1326 1326 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1327 1327 {
1328 1328 itx_t *itx;
1329 1329 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1330 1330
1331 1331 if (zil_replaying(zd->zd_zilog, tx))
1332 1332 return;
1333 1333
1334 1334 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1335 1335 write_state = WR_INDIRECT;
1336 1336
1337 1337 itx = zil_itx_create(TX_WRITE,
1338 1338 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1339 1339
1340 1340 if (write_state == WR_COPIED &&
1341 1341 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1342 1342 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1343 1343 zil_itx_destroy(itx);
1344 1344 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1345 1345 write_state = WR_NEED_COPY;
1346 1346 }
1347 1347 itx->itx_private = zd;
1348 1348 itx->itx_wr_state = write_state;
1349 1349 itx->itx_sync = (ztest_random(8) == 0);
1350 1350 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1351 1351
1352 1352 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1353 1353 sizeof (*lr) - sizeof (lr_t));
1354 1354
1355 1355 zil_itx_assign(zd->zd_zilog, itx, tx);
1356 1356 }
1357 1357
1358 1358 static void
1359 1359 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1360 1360 {
1361 1361 itx_t *itx;
1362 1362
1363 1363 if (zil_replaying(zd->zd_zilog, tx))
1364 1364 return;
1365 1365
1366 1366 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1367 1367 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1368 1368 sizeof (*lr) - sizeof (lr_t));
1369 1369
1370 1370 itx->itx_sync = B_FALSE;
1371 1371 zil_itx_assign(zd->zd_zilog, itx, tx);
1372 1372 }
1373 1373
1374 1374 static void
1375 1375 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1376 1376 {
1377 1377 itx_t *itx;
1378 1378
1379 1379 if (zil_replaying(zd->zd_zilog, tx))
1380 1380 return;
1381 1381
1382 1382 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1383 1383 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1384 1384 sizeof (*lr) - sizeof (lr_t));
1385 1385
1386 1386 itx->itx_sync = B_FALSE;
1387 1387 zil_itx_assign(zd->zd_zilog, itx, tx);
1388 1388 }
1389 1389
1390 1390 /*
1391 1391 * ZIL replay ops
1392 1392 */
1393 1393 static int
1394 1394 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1395 1395 {
1396 1396 char *name = (void *)(lr + 1); /* name follows lr */
1397 1397 objset_t *os = zd->zd_os;
1398 1398 ztest_block_tag_t *bbt;
1399 1399 dmu_buf_t *db;
1400 1400 dmu_tx_t *tx;
1401 1401 uint64_t txg;
1402 1402 int error = 0;
1403 1403
1404 1404 if (byteswap)
1405 1405 byteswap_uint64_array(lr, sizeof (*lr));
1406 1406
1407 1407 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1408 1408 ASSERT(name[0] != '\0');
1409 1409
1410 1410 tx = dmu_tx_create(os);
1411 1411
1412 1412 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1413 1413
1414 1414 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1415 1415 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1416 1416 } else {
1417 1417 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1418 1418 }
1419 1419
1420 1420 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1421 1421 if (txg == 0)
1422 1422 return (ENOSPC);
1423 1423
1424 1424 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1425 1425
1426 1426 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1427 1427 if (lr->lr_foid == 0) {
1428 1428 lr->lr_foid = zap_create(os,
1429 1429 lr->lrz_type, lr->lrz_bonustype,
1430 1430 lr->lrz_bonuslen, tx);
1431 1431 } else {
1432 1432 error = zap_create_claim(os, lr->lr_foid,
1433 1433 lr->lrz_type, lr->lrz_bonustype,
1434 1434 lr->lrz_bonuslen, tx);
1435 1435 }
1436 1436 } else {
1437 1437 if (lr->lr_foid == 0) {
1438 1438 lr->lr_foid = dmu_object_alloc(os,
1439 1439 lr->lrz_type, 0, lr->lrz_bonustype,
1440 1440 lr->lrz_bonuslen, tx);
1441 1441 } else {
1442 1442 error = dmu_object_claim(os, lr->lr_foid,
1443 1443 lr->lrz_type, 0, lr->lrz_bonustype,
1444 1444 lr->lrz_bonuslen, tx);
1445 1445 }
1446 1446 }
1447 1447
1448 1448 if (error) {
1449 1449 ASSERT3U(error, ==, EEXIST);
1450 1450 ASSERT(zd->zd_zilog->zl_replay);
1451 1451 dmu_tx_commit(tx);
1452 1452 return (error);
1453 1453 }
1454 1454
1455 1455 ASSERT(lr->lr_foid != 0);
1456 1456
1457 1457 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1458 1458 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1459 1459 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1460 1460
1461 1461 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1462 1462 bbt = ztest_bt_bonus(db);
1463 1463 dmu_buf_will_dirty(db, tx);
1464 1464 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1465 1465 dmu_buf_rele(db, FTAG);
1466 1466
1467 1467 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1468 1468 &lr->lr_foid, tx));
1469 1469
1470 1470 (void) ztest_log_create(zd, tx, lr);
1471 1471
1472 1472 dmu_tx_commit(tx);
1473 1473
1474 1474 return (0);
1475 1475 }
1476 1476
1477 1477 static int
1478 1478 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1479 1479 {
1480 1480 char *name = (void *)(lr + 1); /* name follows lr */
1481 1481 objset_t *os = zd->zd_os;
1482 1482 dmu_object_info_t doi;
1483 1483 dmu_tx_t *tx;
1484 1484 uint64_t object, txg;
1485 1485
1486 1486 if (byteswap)
1487 1487 byteswap_uint64_array(lr, sizeof (*lr));
1488 1488
1489 1489 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1490 1490 ASSERT(name[0] != '\0');
1491 1491
1492 1492 VERIFY3U(0, ==,
1493 1493 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1494 1494 ASSERT(object != 0);
1495 1495
1496 1496 ztest_object_lock(zd, object, RL_WRITER);
1497 1497
1498 1498 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1499 1499
1500 1500 tx = dmu_tx_create(os);
1501 1501
1502 1502 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1503 1503 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1504 1504
1505 1505 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1506 1506 if (txg == 0) {
1507 1507 ztest_object_unlock(zd, object);
1508 1508 return (ENOSPC);
1509 1509 }
1510 1510
1511 1511 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1512 1512 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1513 1513 } else {
1514 1514 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1515 1515 }
1516 1516
1517 1517 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1518 1518
1519 1519 (void) ztest_log_remove(zd, tx, lr, object);
1520 1520
1521 1521 dmu_tx_commit(tx);
1522 1522
1523 1523 ztest_object_unlock(zd, object);
1524 1524
1525 1525 return (0);
1526 1526 }
1527 1527
1528 1528 static int
1529 1529 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1530 1530 {
1531 1531 objset_t *os = zd->zd_os;
1532 1532 void *data = lr + 1; /* data follows lr */
1533 1533 uint64_t offset, length;
1534 1534 ztest_block_tag_t *bt = data;
1535 1535 ztest_block_tag_t *bbt;
1536 1536 uint64_t gen, txg, lrtxg, crtxg;
1537 1537 dmu_object_info_t doi;
1538 1538 dmu_tx_t *tx;
1539 1539 dmu_buf_t *db;
1540 1540 arc_buf_t *abuf = NULL;
1541 1541 rl_t *rl;
1542 1542
1543 1543 if (byteswap)
1544 1544 byteswap_uint64_array(lr, sizeof (*lr));
1545 1545
1546 1546 offset = lr->lr_offset;
1547 1547 length = lr->lr_length;
1548 1548
1549 1549 /* If it's a dmu_sync() block, write the whole block */
1550 1550 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1551 1551 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1552 1552 if (length < blocksize) {
1553 1553 offset -= offset % blocksize;
1554 1554 length = blocksize;
1555 1555 }
1556 1556 }
1557 1557
1558 1558 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1559 1559 byteswap_uint64_array(bt, sizeof (*bt));
1560 1560
1561 1561 if (bt->bt_magic != BT_MAGIC)
1562 1562 bt = NULL;
1563 1563
1564 1564 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1565 1565 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1566 1566
1567 1567 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1568 1568
1569 1569 dmu_object_info_from_db(db, &doi);
1570 1570
1571 1571 bbt = ztest_bt_bonus(db);
1572 1572 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1573 1573 gen = bbt->bt_gen;
1574 1574 crtxg = bbt->bt_crtxg;
1575 1575 lrtxg = lr->lr_common.lrc_txg;
1576 1576
1577 1577 tx = dmu_tx_create(os);
1578 1578
1579 1579 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1580 1580
1581 1581 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1582 1582 P2PHASE(offset, length) == 0)
1583 1583 abuf = dmu_request_arcbuf(db, length);
1584 1584
1585 1585 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1586 1586 if (txg == 0) {
1587 1587 if (abuf != NULL)
1588 1588 dmu_return_arcbuf(abuf);
1589 1589 dmu_buf_rele(db, FTAG);
1590 1590 ztest_range_unlock(rl);
1591 1591 ztest_object_unlock(zd, lr->lr_foid);
1592 1592 return (ENOSPC);
1593 1593 }
1594 1594
1595 1595 if (bt != NULL) {
1596 1596 /*
1597 1597 * Usually, verify the old data before writing new data --
1598 1598 * but not always, because we also want to verify correct
1599 1599 * behavior when the data was not recently read into cache.
1600 1600 */
1601 1601 ASSERT(offset % doi.doi_data_block_size == 0);
1602 1602 if (ztest_random(4) != 0) {
1603 1603 int prefetch = ztest_random(2) ?
1604 1604 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1605 1605 ztest_block_tag_t rbt;
1606 1606
1607 1607 VERIFY(dmu_read(os, lr->lr_foid, offset,
1608 1608 sizeof (rbt), &rbt, prefetch) == 0);
1609 1609 if (rbt.bt_magic == BT_MAGIC) {
1610 1610 ztest_bt_verify(&rbt, os, lr->lr_foid,
1611 1611 offset, gen, txg, crtxg);
1612 1612 }
1613 1613 }
1614 1614
1615 1615 /*
1616 1616 * Writes can appear to be newer than the bonus buffer because
1617 1617 * the ztest_get_data() callback does a dmu_read() of the
1618 1618 * open-context data, which may be different than the data
1619 1619 * as it was when the write was generated.
1620 1620 */
1621 1621 if (zd->zd_zilog->zl_replay) {
1622 1622 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1623 1623 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1624 1624 bt->bt_crtxg);
1625 1625 }
1626 1626
1627 1627 /*
1628 1628 * Set the bt's gen/txg to the bonus buffer's gen/txg
1629 1629 * so that all of the usual ASSERTs will work.
1630 1630 */
1631 1631 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1632 1632 }
1633 1633
1634 1634 if (abuf == NULL) {
1635 1635 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1636 1636 } else {
1637 1637 bcopy(data, abuf->b_data, length);
1638 1638 dmu_assign_arcbuf(db, offset, abuf, tx);
1639 1639 }
1640 1640
1641 1641 (void) ztest_log_write(zd, tx, lr);
1642 1642
1643 1643 dmu_buf_rele(db, FTAG);
1644 1644
1645 1645 dmu_tx_commit(tx);
1646 1646
1647 1647 ztest_range_unlock(rl);
1648 1648 ztest_object_unlock(zd, lr->lr_foid);
1649 1649
1650 1650 return (0);
1651 1651 }
1652 1652
1653 1653 static int
1654 1654 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1655 1655 {
1656 1656 objset_t *os = zd->zd_os;
1657 1657 dmu_tx_t *tx;
1658 1658 uint64_t txg;
1659 1659 rl_t *rl;
1660 1660
1661 1661 if (byteswap)
1662 1662 byteswap_uint64_array(lr, sizeof (*lr));
1663 1663
1664 1664 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1665 1665 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1666 1666 RL_WRITER);
1667 1667
1668 1668 tx = dmu_tx_create(os);
1669 1669
1670 1670 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1671 1671
1672 1672 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1673 1673 if (txg == 0) {
1674 1674 ztest_range_unlock(rl);
1675 1675 ztest_object_unlock(zd, lr->lr_foid);
1676 1676 return (ENOSPC);
1677 1677 }
1678 1678
1679 1679 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1680 1680 lr->lr_length, tx) == 0);
1681 1681
1682 1682 (void) ztest_log_truncate(zd, tx, lr);
1683 1683
1684 1684 dmu_tx_commit(tx);
1685 1685
1686 1686 ztest_range_unlock(rl);
1687 1687 ztest_object_unlock(zd, lr->lr_foid);
1688 1688
1689 1689 return (0);
1690 1690 }
1691 1691
1692 1692 static int
1693 1693 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1694 1694 {
1695 1695 objset_t *os = zd->zd_os;
1696 1696 dmu_tx_t *tx;
1697 1697 dmu_buf_t *db;
1698 1698 ztest_block_tag_t *bbt;
1699 1699 uint64_t txg, lrtxg, crtxg;
1700 1700
1701 1701 if (byteswap)
1702 1702 byteswap_uint64_array(lr, sizeof (*lr));
1703 1703
1704 1704 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1705 1705
1706 1706 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1707 1707
1708 1708 tx = dmu_tx_create(os);
1709 1709 dmu_tx_hold_bonus(tx, lr->lr_foid);
1710 1710
1711 1711 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1712 1712 if (txg == 0) {
1713 1713 dmu_buf_rele(db, FTAG);
1714 1714 ztest_object_unlock(zd, lr->lr_foid);
1715 1715 return (ENOSPC);
1716 1716 }
1717 1717
1718 1718 bbt = ztest_bt_bonus(db);
1719 1719 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1720 1720 crtxg = bbt->bt_crtxg;
1721 1721 lrtxg = lr->lr_common.lrc_txg;
1722 1722
1723 1723 if (zd->zd_zilog->zl_replay) {
1724 1724 ASSERT(lr->lr_size != 0);
1725 1725 ASSERT(lr->lr_mode != 0);
1726 1726 ASSERT(lrtxg != 0);
1727 1727 } else {
1728 1728 /*
1729 1729 * Randomly change the size and increment the generation.
1730 1730 */
1731 1731 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1732 1732 sizeof (*bbt);
1733 1733 lr->lr_mode = bbt->bt_gen + 1;
1734 1734 ASSERT(lrtxg == 0);
1735 1735 }
1736 1736
1737 1737 /*
1738 1738 * Verify that the current bonus buffer is not newer than our txg.
1739 1739 */
1740 1740 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1741 1741 MAX(txg, lrtxg), crtxg);
1742 1742
1743 1743 dmu_buf_will_dirty(db, tx);
1744 1744
1745 1745 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1746 1746 ASSERT3U(lr->lr_size, <=, db->db_size);
1747 1747 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
1748 1748 bbt = ztest_bt_bonus(db);
1749 1749
1750 1750 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1751 1751
1752 1752 dmu_buf_rele(db, FTAG);
1753 1753
1754 1754 (void) ztest_log_setattr(zd, tx, lr);
1755 1755
1756 1756 dmu_tx_commit(tx);
1757 1757
1758 1758 ztest_object_unlock(zd, lr->lr_foid);
1759 1759
1760 1760 return (0);
1761 1761 }
1762 1762
1763 1763 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1764 1764 NULL, /* 0 no such transaction type */
1765 1765 ztest_replay_create, /* TX_CREATE */
1766 1766 NULL, /* TX_MKDIR */
1767 1767 NULL, /* TX_MKXATTR */
1768 1768 NULL, /* TX_SYMLINK */
1769 1769 ztest_replay_remove, /* TX_REMOVE */
1770 1770 NULL, /* TX_RMDIR */
1771 1771 NULL, /* TX_LINK */
1772 1772 NULL, /* TX_RENAME */
1773 1773 ztest_replay_write, /* TX_WRITE */
1774 1774 ztest_replay_truncate, /* TX_TRUNCATE */
1775 1775 ztest_replay_setattr, /* TX_SETATTR */
1776 1776 NULL, /* TX_ACL */
1777 1777 NULL, /* TX_CREATE_ACL */
1778 1778 NULL, /* TX_CREATE_ATTR */
1779 1779 NULL, /* TX_CREATE_ACL_ATTR */
1780 1780 NULL, /* TX_MKDIR_ACL */
1781 1781 NULL, /* TX_MKDIR_ATTR */
1782 1782 NULL, /* TX_MKDIR_ACL_ATTR */
1783 1783 NULL, /* TX_WRITE2 */
1784 1784 };
1785 1785
1786 1786 /*
1787 1787 * ZIL get_data callbacks
1788 1788 */
1789 1789
1790 1790 static void
1791 1791 ztest_get_done(zgd_t *zgd, int error)
1792 1792 {
1793 1793 ztest_ds_t *zd = zgd->zgd_private;
1794 1794 uint64_t object = zgd->zgd_rl->rl_object;
1795 1795
1796 1796 if (zgd->zgd_db)
1797 1797 dmu_buf_rele(zgd->zgd_db, zgd);
1798 1798
1799 1799 ztest_range_unlock(zgd->zgd_rl);
1800 1800 ztest_object_unlock(zd, object);
1801 1801
1802 1802 if (error == 0 && zgd->zgd_bp)
1803 1803 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1804 1804
1805 1805 umem_free(zgd, sizeof (*zgd));
1806 1806 }
1807 1807
1808 1808 static int
1809 1809 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1810 1810 {
1811 1811 ztest_ds_t *zd = arg;
1812 1812 objset_t *os = zd->zd_os;
1813 1813 uint64_t object = lr->lr_foid;
1814 1814 uint64_t offset = lr->lr_offset;
1815 1815 uint64_t size = lr->lr_length;
1816 1816 blkptr_t *bp = &lr->lr_blkptr;
1817 1817 uint64_t txg = lr->lr_common.lrc_txg;
1818 1818 uint64_t crtxg;
1819 1819 dmu_object_info_t doi;
1820 1820 dmu_buf_t *db;
1821 1821 zgd_t *zgd;
1822 1822 int error;
1823 1823
1824 1824 ztest_object_lock(zd, object, RL_READER);
1825 1825 error = dmu_bonus_hold(os, object, FTAG, &db);
1826 1826 if (error) {
1827 1827 ztest_object_unlock(zd, object);
1828 1828 return (error);
1829 1829 }
1830 1830
1831 1831 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1832 1832
1833 1833 if (crtxg == 0 || crtxg > txg) {
1834 1834 dmu_buf_rele(db, FTAG);
1835 1835 ztest_object_unlock(zd, object);
1836 1836 return (ENOENT);
1837 1837 }
1838 1838
1839 1839 dmu_object_info_from_db(db, &doi);
1840 1840 dmu_buf_rele(db, FTAG);
1841 1841 db = NULL;
1842 1842
1843 1843 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1844 1844 zgd->zgd_zilog = zd->zd_zilog;
1845 1845 zgd->zgd_private = zd;
1846 1846
1847 1847 if (buf != NULL) { /* immediate write */
1848 1848 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1849 1849 RL_READER);
1850 1850
1851 1851 error = dmu_read(os, object, offset, size, buf,
1852 1852 DMU_READ_NO_PREFETCH);
1853 1853 ASSERT(error == 0);
1854 1854 } else {
1855 1855 size = doi.doi_data_block_size;
1856 1856 if (ISP2(size)) {
1857 1857 offset = P2ALIGN(offset, size);
1858 1858 } else {
1859 1859 ASSERT(offset < size);
1860 1860 offset = 0;
1861 1861 }
1862 1862
1863 1863 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1864 1864 RL_READER);
1865 1865
1866 1866 error = dmu_buf_hold(os, object, offset, zgd, &db,
1867 1867 DMU_READ_NO_PREFETCH);
1868 1868
1869 1869 if (error == 0) {
1870 1870 blkptr_t *obp = dmu_buf_get_blkptr(db);
1871 1871 if (obp) {
1872 1872 ASSERT(BP_IS_HOLE(bp));
1873 1873 *bp = *obp;
1874 1874 }
1875 1875
1876 1876 zgd->zgd_db = db;
1877 1877 zgd->zgd_bp = bp;
1878 1878
1879 1879 ASSERT(db->db_offset == offset);
1880 1880 ASSERT(db->db_size == size);
1881 1881
1882 1882 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1883 1883 ztest_get_done, zgd);
1884 1884
1885 1885 if (error == 0)
1886 1886 return (0);
1887 1887 }
1888 1888 }
1889 1889
1890 1890 ztest_get_done(zgd, error);
1891 1891
1892 1892 return (error);
1893 1893 }
1894 1894
1895 1895 static void *
1896 1896 ztest_lr_alloc(size_t lrsize, char *name)
1897 1897 {
1898 1898 char *lr;
1899 1899 size_t namesize = name ? strlen(name) + 1 : 0;
1900 1900
1901 1901 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1902 1902
1903 1903 if (name)
1904 1904 bcopy(name, lr + lrsize, namesize);
1905 1905
1906 1906 return (lr);
1907 1907 }
1908 1908
1909 1909 void
1910 1910 ztest_lr_free(void *lr, size_t lrsize, char *name)
1911 1911 {
1912 1912 size_t namesize = name ? strlen(name) + 1 : 0;
1913 1913
1914 1914 umem_free(lr, lrsize + namesize);
1915 1915 }
1916 1916
1917 1917 /*
1918 1918 * Lookup a bunch of objects. Returns the number of objects not found.
1919 1919 */
1920 1920 static int
1921 1921 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1922 1922 {
1923 1923 int missing = 0;
1924 1924 int error;
1925 1925
1926 1926 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1927 1927
1928 1928 for (int i = 0; i < count; i++, od++) {
1929 1929 od->od_object = 0;
1930 1930 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1931 1931 sizeof (uint64_t), 1, &od->od_object);
1932 1932 if (error) {
1933 1933 ASSERT(error == ENOENT);
1934 1934 ASSERT(od->od_object == 0);
1935 1935 missing++;
1936 1936 } else {
1937 1937 dmu_buf_t *db;
1938 1938 ztest_block_tag_t *bbt;
1939 1939 dmu_object_info_t doi;
1940 1940
1941 1941 ASSERT(od->od_object != 0);
1942 1942 ASSERT(missing == 0); /* there should be no gaps */
1943 1943
1944 1944 ztest_object_lock(zd, od->od_object, RL_READER);
1945 1945 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1946 1946 od->od_object, FTAG, &db));
1947 1947 dmu_object_info_from_db(db, &doi);
1948 1948 bbt = ztest_bt_bonus(db);
1949 1949 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1950 1950 od->od_type = doi.doi_type;
1951 1951 od->od_blocksize = doi.doi_data_block_size;
1952 1952 od->od_gen = bbt->bt_gen;
1953 1953 dmu_buf_rele(db, FTAG);
1954 1954 ztest_object_unlock(zd, od->od_object);
1955 1955 }
1956 1956 }
1957 1957
1958 1958 return (missing);
1959 1959 }
1960 1960
1961 1961 static int
1962 1962 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1963 1963 {
1964 1964 int missing = 0;
1965 1965
1966 1966 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1967 1967
1968 1968 for (int i = 0; i < count; i++, od++) {
1969 1969 if (missing) {
1970 1970 od->od_object = 0;
1971 1971 missing++;
1972 1972 continue;
1973 1973 }
1974 1974
1975 1975 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1976 1976
1977 1977 lr->lr_doid = od->od_dir;
1978 1978 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1979 1979 lr->lrz_type = od->od_crtype;
1980 1980 lr->lrz_blocksize = od->od_crblocksize;
1981 1981 lr->lrz_ibshift = ztest_random_ibshift();
1982 1982 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1983 1983 lr->lrz_bonuslen = dmu_bonus_max();
1984 1984 lr->lr_gen = od->od_crgen;
1985 1985 lr->lr_crtime[0] = time(NULL);
1986 1986
1987 1987 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1988 1988 ASSERT(missing == 0);
1989 1989 od->od_object = 0;
1990 1990 missing++;
1991 1991 } else {
1992 1992 od->od_object = lr->lr_foid;
1993 1993 od->od_type = od->od_crtype;
1994 1994 od->od_blocksize = od->od_crblocksize;
1995 1995 od->od_gen = od->od_crgen;
1996 1996 ASSERT(od->od_object != 0);
1997 1997 }
1998 1998
1999 1999 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2000 2000 }
2001 2001
2002 2002 return (missing);
2003 2003 }
2004 2004
2005 2005 static int
2006 2006 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
2007 2007 {
2008 2008 int missing = 0;
2009 2009 int error;
2010 2010
2011 2011 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
2012 2012
2013 2013 od += count - 1;
2014 2014
2015 2015 for (int i = count - 1; i >= 0; i--, od--) {
2016 2016 if (missing) {
2017 2017 missing++;
2018 2018 continue;
2019 2019 }
2020 2020
2021 2021 /*
2022 2022 * No object was found.
2023 2023 */
2024 2024 if (od->od_object == 0)
2025 2025 continue;
2026 2026
2027 2027 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
2028 2028
2029 2029 lr->lr_doid = od->od_dir;
2030 2030
2031 2031 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
2032 2032 ASSERT3U(error, ==, ENOSPC);
2033 2033 missing++;
2034 2034 } else {
2035 2035 od->od_object = 0;
2036 2036 }
2037 2037 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2038 2038 }
2039 2039
2040 2040 return (missing);
2041 2041 }
2042 2042
2043 2043 static int
2044 2044 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
2045 2045 void *data)
2046 2046 {
2047 2047 lr_write_t *lr;
2048 2048 int error;
2049 2049
2050 2050 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
2051 2051
2052 2052 lr->lr_foid = object;
2053 2053 lr->lr_offset = offset;
2054 2054 lr->lr_length = size;
2055 2055 lr->lr_blkoff = 0;
2056 2056 BP_ZERO(&lr->lr_blkptr);
2057 2057
2058 2058 bcopy(data, lr + 1, size);
2059 2059
2060 2060 error = ztest_replay_write(zd, lr, B_FALSE);
2061 2061
2062 2062 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2063 2063
2064 2064 return (error);
2065 2065 }
2066 2066
2067 2067 static int
2068 2068 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2069 2069 {
2070 2070 lr_truncate_t *lr;
2071 2071 int error;
2072 2072
2073 2073 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2074 2074
2075 2075 lr->lr_foid = object;
2076 2076 lr->lr_offset = offset;
2077 2077 lr->lr_length = size;
2078 2078
2079 2079 error = ztest_replay_truncate(zd, lr, B_FALSE);
2080 2080
2081 2081 ztest_lr_free(lr, sizeof (*lr), NULL);
2082 2082
2083 2083 return (error);
2084 2084 }
2085 2085
2086 2086 static int
2087 2087 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2088 2088 {
2089 2089 lr_setattr_t *lr;
2090 2090 int error;
2091 2091
2092 2092 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2093 2093
2094 2094 lr->lr_foid = object;
2095 2095 lr->lr_size = 0;
2096 2096 lr->lr_mode = 0;
2097 2097
2098 2098 error = ztest_replay_setattr(zd, lr, B_FALSE);
2099 2099
2100 2100 ztest_lr_free(lr, sizeof (*lr), NULL);
2101 2101
2102 2102 return (error);
2103 2103 }
2104 2104
2105 2105 static void
2106 2106 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2107 2107 {
2108 2108 objset_t *os = zd->zd_os;
2109 2109 dmu_tx_t *tx;
2110 2110 uint64_t txg;
2111 2111 rl_t *rl;
2112 2112
2113 2113 txg_wait_synced(dmu_objset_pool(os), 0);
2114 2114
2115 2115 ztest_object_lock(zd, object, RL_READER);
2116 2116 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2117 2117
2118 2118 tx = dmu_tx_create(os);
2119 2119
2120 2120 dmu_tx_hold_write(tx, object, offset, size);
2121 2121
2122 2122 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2123 2123
2124 2124 if (txg != 0) {
2125 2125 dmu_prealloc(os, object, offset, size, tx);
2126 2126 dmu_tx_commit(tx);
2127 2127 txg_wait_synced(dmu_objset_pool(os), txg);
2128 2128 } else {
2129 2129 (void) dmu_free_long_range(os, object, offset, size);
2130 2130 }
2131 2131
2132 2132 ztest_range_unlock(rl);
2133 2133 ztest_object_unlock(zd, object);
2134 2134 }
2135 2135
2136 2136 static void
2137 2137 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2138 2138 {
2139 2139 int err;
2140 2140 ztest_block_tag_t wbt;
2141 2141 dmu_object_info_t doi;
2142 2142 enum ztest_io_type io_type;
2143 2143 uint64_t blocksize;
2144 2144 void *data;
2145 2145
2146 2146 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2147 2147 blocksize = doi.doi_data_block_size;
2148 2148 data = umem_alloc(blocksize, UMEM_NOFAIL);
2149 2149
2150 2150 /*
2151 2151 * Pick an i/o type at random, biased toward writing block tags.
2152 2152 */
2153 2153 io_type = ztest_random(ZTEST_IO_TYPES);
2154 2154 if (ztest_random(2) == 0)
2155 2155 io_type = ZTEST_IO_WRITE_TAG;
2156 2156
2157 2157 (void) rw_rdlock(&zd->zd_zilog_lock);
2158 2158
2159 2159 switch (io_type) {
2160 2160
2161 2161 case ZTEST_IO_WRITE_TAG:
2162 2162 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2163 2163 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2164 2164 break;
2165 2165
2166 2166 case ZTEST_IO_WRITE_PATTERN:
2167 2167 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2168 2168 if (ztest_random(2) == 0) {
2169 2169 /*
2170 2170 * Induce fletcher2 collisions to ensure that
2171 2171 * zio_ddt_collision() detects and resolves them
2172 2172 * when using fletcher2-verify for deduplication.
2173 2173 */
2174 2174 ((uint64_t *)data)[0] ^= 1ULL << 63;
2175 2175 ((uint64_t *)data)[4] ^= 1ULL << 63;
2176 2176 }
2177 2177 (void) ztest_write(zd, object, offset, blocksize, data);
2178 2178 break;
2179 2179
2180 2180 case ZTEST_IO_WRITE_ZEROES:
2181 2181 bzero(data, blocksize);
2182 2182 (void) ztest_write(zd, object, offset, blocksize, data);
2183 2183 break;
2184 2184
2185 2185 case ZTEST_IO_TRUNCATE:
2186 2186 (void) ztest_truncate(zd, object, offset, blocksize);
2187 2187 break;
2188 2188
2189 2189 case ZTEST_IO_SETATTR:
2190 2190 (void) ztest_setattr(zd, object);
2191 2191 break;
2192 2192
2193 2193 case ZTEST_IO_REWRITE:
2194 2194 (void) rw_rdlock(&ztest_name_lock);
2195 2195 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2196 2196 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
2197 2197 B_FALSE);
2198 2198 VERIFY(err == 0 || err == ENOSPC);
2199 2199 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2200 2200 ZFS_PROP_COMPRESSION,
2201 2201 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
2202 2202 B_FALSE);
2203 2203 VERIFY(err == 0 || err == ENOSPC);
2204 2204 (void) rw_unlock(&ztest_name_lock);
2205 2205
2206 2206 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
2207 2207 DMU_READ_NO_PREFETCH));
2208 2208
2209 2209 (void) ztest_write(zd, object, offset, blocksize, data);
2210 2210 break;
2211 2211 }
2212 2212
2213 2213 (void) rw_unlock(&zd->zd_zilog_lock);
2214 2214
2215 2215 umem_free(data, blocksize);
2216 2216 }
2217 2217
2218 2218 /*
2219 2219 * Initialize an object description template.
2220 2220 */
2221 2221 static void
2222 2222 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2223 2223 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2224 2224 {
2225 2225 od->od_dir = ZTEST_DIROBJ;
2226 2226 od->od_object = 0;
2227 2227
2228 2228 od->od_crtype = type;
2229 2229 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2230 2230 od->od_crgen = gen;
2231 2231
2232 2232 od->od_type = DMU_OT_NONE;
2233 2233 od->od_blocksize = 0;
2234 2234 od->od_gen = 0;
2235 2235
2236 2236 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2237 2237 tag, (int64_t)id, index);
2238 2238 }
2239 2239
2240 2240 /*
2241 2241 * Lookup or create the objects for a test using the od template.
2242 2242 * If the objects do not all exist, or if 'remove' is specified,
2243 2243 * remove any existing objects and create new ones. Otherwise,
2244 2244 * use the existing objects.
2245 2245 */
2246 2246 static int
2247 2247 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2248 2248 {
2249 2249 int count = size / sizeof (*od);
2250 2250 int rv = 0;
2251 2251
2252 2252 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
2253 2253 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2254 2254 (ztest_remove(zd, od, count) != 0 ||
2255 2255 ztest_create(zd, od, count) != 0))
2256 2256 rv = -1;
2257 2257 zd->zd_od = od;
2258 2258 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2259 2259
2260 2260 return (rv);
2261 2261 }
2262 2262
2263 2263 /* ARGSUSED */
2264 2264 void
2265 2265 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2266 2266 {
2267 2267 zilog_t *zilog = zd->zd_zilog;
2268 2268
2269 2269 (void) rw_rdlock(&zd->zd_zilog_lock);
2270 2270
2271 2271 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2272 2272
2273 2273 /*
2274 2274 * Remember the committed values in zd, which is in parent/child
2275 2275 * shared memory. If we die, the next iteration of ztest_run()
2276 2276 * will verify that the log really does contain this record.
2277 2277 */
2278 2278 mutex_enter(&zilog->zl_lock);
2279 2279 ASSERT(zd->zd_shared != NULL);
2280 2280 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2281 2281 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2282 2282 mutex_exit(&zilog->zl_lock);
2283 2283
2284 2284 (void) rw_unlock(&zd->zd_zilog_lock);
2285 2285 }
2286 2286
2287 2287 /*
2288 2288 * This function is designed to simulate the operations that occur during a
2289 2289 * mount/unmount operation. We hold the dataset across these operations in an
2290 2290 * attempt to expose any implicit assumptions about ZIL management.
2291 2291 */
2292 2292 /* ARGSUSED */
2293 2293 void
2294 2294 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2295 2295 {
2296 2296 objset_t *os = zd->zd_os;
2297 2297
2298 2298 /*
2299 2299 * We grab the zd_dirobj_lock to ensure that no other thread is
2300 2300 * updating the zil (i.e. adding in-memory log records) and the
2301 2301 * zd_zilog_lock to block any I/O.
2302 2302 */
2303 2303 VERIFY0(mutex_lock(&zd->zd_dirobj_lock));
2304 2304 (void) rw_wrlock(&zd->zd_zilog_lock);
2305 2305
2306 2306 /* zfsvfs_teardown() */
2307 2307 zil_close(zd->zd_zilog);
2308 2308
2309 2309 /* zfsvfs_setup() */
2310 2310 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2311 2311 zil_replay(os, zd, ztest_replay_vector);
2312 2312
2313 2313 (void) rw_unlock(&zd->zd_zilog_lock);
2314 2314 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2315 2315 }
2316 2316
2317 2317 /*
2318 2318 * Verify that we can't destroy an active pool, create an existing pool,
2319 2319 * or create a pool with a bad vdev spec.
2320 2320 */
2321 2321 /* ARGSUSED */
2322 2322 void
2323 2323 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2324 2324 {
2325 2325 ztest_shared_opts_t *zo = &ztest_opts;
2326 2326 spa_t *spa;
2327 2327 nvlist_t *nvroot;
2328 2328
2329 2329 /*
2330 2330 * Attempt to create using a bad file.
2331 2331 */
2332 2332 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2333 2333 VERIFY3U(ENOENT, ==,
2334 2334 spa_create("ztest_bad_file", nvroot, NULL, NULL));
2335 2335 nvlist_free(nvroot);
2336 2336
2337 2337 /*
2338 2338 * Attempt to create using a bad mirror.
2339 2339 */
2340 2340 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1);
2341 2341 VERIFY3U(ENOENT, ==,
2342 2342 spa_create("ztest_bad_mirror", nvroot, NULL, NULL));
2343 2343 nvlist_free(nvroot);
2344 2344
2345 2345 /*
2346 2346 * Attempt to create an existing pool. It shouldn't matter
2347 2347 * what's in the nvroot; we should fail with EEXIST.
2348 2348 */
2349 2349 (void) rw_rdlock(&ztest_name_lock);
2350 2350 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2351 2351 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
2352 2352 nvlist_free(nvroot);
2353 2353 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2354 2354 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2355 2355 spa_close(spa, FTAG);
2356 2356
2357 2357 (void) rw_unlock(&ztest_name_lock);
2358 2358 }
2359 2359
2360 2360 /* ARGSUSED */
2361 2361 void
2362 2362 ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
2363 2363 {
2364 2364 spa_t *spa;
2365 2365 uint64_t initial_version = SPA_VERSION_INITIAL;
2366 2366 uint64_t version, newversion;
2367 2367 nvlist_t *nvroot, *props;
2368 2368 char *name;
2369 2369
2370 2370 VERIFY0(mutex_lock(&ztest_vdev_lock));
2371 2371 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
2372 2372
2373 2373 /*
2374 2374 * Clean up from previous runs.
2375 2375 */
2376 2376 (void) spa_destroy(name);
2377 2377
2378 2378 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
2379 2379 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
2380 2380
2381 2381 /*
2382 2382 * If we're configuring a RAIDZ device then make sure that the
2383 2383 * the initial version is capable of supporting that feature.
2384 2384 */
2385 2385 switch (ztest_opts.zo_raidz_parity) {
2386 2386 case 0:
2387 2387 case 1:
2388 2388 initial_version = SPA_VERSION_INITIAL;
2389 2389 break;
2390 2390 case 2:
2391 2391 initial_version = SPA_VERSION_RAIDZ2;
2392 2392 break;
2393 2393 case 3:
2394 2394 initial_version = SPA_VERSION_RAIDZ3;
2395 2395 break;
2396 2396 }
2397 2397
2398 2398 /*
2399 2399 * Create a pool with a spa version that can be upgraded. Pick
2400 2400 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
2401 2401 */
2402 2402 do {
2403 2403 version = ztest_random_spa_version(initial_version);
2404 2404 } while (version > SPA_VERSION_BEFORE_FEATURES);
2405 2405
2406 2406 props = fnvlist_alloc();
2407 2407 fnvlist_add_uint64(props,
2408 2408 zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
2409 2409 VERIFY0(spa_create(name, nvroot, props, NULL));
2410 2410 fnvlist_free(nvroot);
2411 2411 fnvlist_free(props);
2412 2412
2413 2413 VERIFY0(spa_open(name, &spa, FTAG));
2414 2414 VERIFY3U(spa_version(spa), ==, version);
2415 2415 newversion = ztest_random_spa_version(version + 1);
2416 2416
2417 2417 if (ztest_opts.zo_verbose >= 4) {
2418 2418 (void) printf("upgrading spa version from %llu to %llu\n",
2419 2419 (u_longlong_t)version, (u_longlong_t)newversion);
2420 2420 }
2421 2421
2422 2422 spa_upgrade(spa, newversion);
2423 2423 VERIFY3U(spa_version(spa), >, version);
2424 2424 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
2425 2425 zpool_prop_to_name(ZPOOL_PROP_VERSION)));
2426 2426 spa_close(spa, FTAG);
2427 2427
2428 2428 strfree(name);
2429 2429 VERIFY0(mutex_unlock(&ztest_vdev_lock));
2430 2430 }
2431 2431
2432 2432 static vdev_t *
2433 2433 vdev_lookup_by_path(vdev_t *vd, const char *path)
2434 2434 {
2435 2435 vdev_t *mvd;
2436 2436
2437 2437 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2438 2438 return (vd);
2439 2439
2440 2440 for (int c = 0; c < vd->vdev_children; c++)
2441 2441 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2442 2442 NULL)
2443 2443 return (mvd);
2444 2444
2445 2445 return (NULL);
2446 2446 }
2447 2447
2448 2448 /*
2449 2449 * Find the first available hole which can be used as a top-level.
2450 2450 */
2451 2451 int
2452 2452 find_vdev_hole(spa_t *spa)
2453 2453 {
2454 2454 vdev_t *rvd = spa->spa_root_vdev;
2455 2455 int c;
2456 2456
2457 2457 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2458 2458
2459 2459 for (c = 0; c < rvd->vdev_children; c++) {
2460 2460 vdev_t *cvd = rvd->vdev_child[c];
2461 2461
2462 2462 if (cvd->vdev_ishole)
2463 2463 break;
2464 2464 }
2465 2465 return (c);
2466 2466 }
2467 2467
2468 2468 /*
2469 2469 * Verify that vdev_add() works as expected.
2470 2470 */
2471 2471 /* ARGSUSED */
2472 2472 void
2473 2473 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2474 2474 {
2475 2475 ztest_shared_t *zs = ztest_shared;
2476 2476 spa_t *spa = ztest_spa;
2477 2477 uint64_t leaves;
2478 2478 uint64_t guid;
2479 2479 nvlist_t *nvroot;
2480 2480 int error;
2481 2481
2482 2482 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2483 2483 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2484 2484
2485 2485 /*
2486 2486 * SCL_VDEV doesn't protect against spa_passivate_log(), which
2487 2487 * only asserts SCL_ALLOC, and can remove the metaslab class out
2488 2488 * from under this function.
2489 2489 */
2490 2490 spa_config_enter(spa, SCL_VDEV|SCL_ALLOC, FTAG, RW_READER);
2491 2491
2492 2492 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2493 2493
2494 2494 /*
2495 2495 * If we have slogs then remove them 1/4 of the time.
2496 2496 */
2497 2497 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2498 2498 /*
2499 2499 * Grab the guid from the head of the log class rotor.
2500 2500 */
2501 2501 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2502 2502
2503 2503 spa_config_exit(spa, SCL_VDEV|SCL_ALLOC, FTAG);
2504 2504
2505 2505 /*
2506 2506 * We have to grab the zs_name_lock as writer to
2507 2507 * prevent a race between removing a slog (dmu_objset_find)
2508 2508 * and destroying a dataset. Removing the slog will
2509 2509 * grab a reference on the dataset which may cause
2510 2510 * dmu_objset_destroy() to fail with EBUSY thus
2511 2511 * leaving the dataset in an inconsistent state.
2512 2512 */
2513 2513 VERIFY(rw_wrlock(&ztest_name_lock) == 0);
2514 2514 error = spa_vdev_remove(spa, guid, B_FALSE);
2515 2515 VERIFY(rw_unlock(&ztest_name_lock) == 0);
2516 2516
2517 2517 if (error && error != EEXIST)
2518 2518 fatal(0, "spa_vdev_remove() = %d", error);
2519 2519 } else {
2520 2520 spa_config_exit(spa, SCL_VDEV|SCL_ALLOC, FTAG);
2521 2521
2522 2522 /*
2523 2523 * Make 1/4 of the devices be log devices.
2524 2524 */
2525 2525 nvroot = make_vdev_root(NULL, NULL, NULL,
2526 2526 ztest_opts.zo_vdev_size, 0,
2527 2527 ztest_random(4) == 0, ztest_opts.zo_raidz,
2528 2528 zs->zs_mirrors, 1);
2529 2529
2530 2530 error = spa_vdev_add(spa, nvroot);
2531 2531 nvlist_free(nvroot);
2532 2532
2533 2533 if (error == ENOSPC)
2534 2534 ztest_record_enospc("spa_vdev_add");
2535 2535 else if (error != 0)
2536 2536 fatal(0, "spa_vdev_add() = %d", error);
2537 2537 }
2538 2538
2539 2539 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2540 2540 }
2541 2541
2542 2542 /*
2543 2543 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2544 2544 */
2545 2545 /* ARGSUSED */
2546 2546 void
2547 2547 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2548 2548 {
2549 2549 ztest_shared_t *zs = ztest_shared;
2550 2550 spa_t *spa = ztest_spa;
2551 2551 vdev_t *rvd = spa->spa_root_vdev;
2552 2552 spa_aux_vdev_t *sav;
2553 2553 char *aux;
2554 2554 uint64_t guid = 0;
2555 2555 int error;
2556 2556
2557 2557 if (ztest_random(2) == 0) {
2558 2558 sav = &spa->spa_spares;
2559 2559 aux = ZPOOL_CONFIG_SPARES;
2560 2560 } else {
2561 2561 sav = &spa->spa_l2cache;
2562 2562 aux = ZPOOL_CONFIG_L2CACHE;
2563 2563 }
2564 2564
2565 2565 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2566 2566
2567 2567 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2568 2568
2569 2569 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2570 2570 /*
2571 2571 * Pick a random device to remove.
2572 2572 */
2573 2573 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2574 2574 } else {
2575 2575 /*
2576 2576 * Find an unused device we can add.
2577 2577 */
2578 2578 zs->zs_vdev_aux = 0;
2579 2579 for (;;) {
2580 2580 char path[MAXPATHLEN];
2581 2581 int c;
2582 2582 (void) snprintf(path, sizeof (path), ztest_aux_template,
2583 2583 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2584 2584 zs->zs_vdev_aux);
2585 2585 for (c = 0; c < sav->sav_count; c++)
2586 2586 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2587 2587 path) == 0)
2588 2588 break;
2589 2589 if (c == sav->sav_count &&
2590 2590 vdev_lookup_by_path(rvd, path) == NULL)
2591 2591 break;
2592 2592 zs->zs_vdev_aux++;
2593 2593 }
2594 2594 }
2595 2595
2596 2596 spa_config_exit(spa, SCL_VDEV, FTAG);
2597 2597
2598 2598 if (guid == 0) {
2599 2599 /*
2600 2600 * Add a new device.
2601 2601 */
2602 2602 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
2603 2603 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2604 2604 error = spa_vdev_add(spa, nvroot);
2605 2605 if (error != 0)
2606 2606 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2607 2607 nvlist_free(nvroot);
2608 2608 } else {
2609 2609 /*
2610 2610 * Remove an existing device. Sometimes, dirty its
2611 2611 * vdev state first to make sure we handle removal
2612 2612 * of devices that have pending state changes.
2613 2613 */
2614 2614 if (ztest_random(2) == 0)
2615 2615 (void) vdev_online(spa, guid, 0, NULL);
2616 2616
2617 2617 error = spa_vdev_remove(spa, guid, B_FALSE);
2618 2618 if (error != 0 && error != EBUSY)
2619 2619 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2620 2620 }
2621 2621
2622 2622 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2623 2623 }
2624 2624
2625 2625 /*
2626 2626 * split a pool if it has mirror tlvdevs
2627 2627 */
2628 2628 /* ARGSUSED */
2629 2629 void
2630 2630 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2631 2631 {
2632 2632 ztest_shared_t *zs = ztest_shared;
2633 2633 spa_t *spa = ztest_spa;
2634 2634 vdev_t *rvd = spa->spa_root_vdev;
2635 2635 nvlist_t *tree, **child, *config, *split, **schild;
2636 2636 uint_t c, children, schildren = 0, lastlogid = 0;
2637 2637 int error = 0;
2638 2638
2639 2639 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2640 2640
2641 2641 /* ensure we have a useable config; mirrors of raidz aren't supported */
2642 2642 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2643 2643 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2644 2644 return;
2645 2645 }
2646 2646
2647 2647 /* clean up the old pool, if any */
2648 2648 (void) spa_destroy("splitp");
2649 2649
2650 2650 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2651 2651
2652 2652 /* generate a config from the existing config */
2653 2653 mutex_enter(&spa->spa_props_lock);
2654 2654 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2655 2655 &tree) == 0);
2656 2656 mutex_exit(&spa->spa_props_lock);
2657 2657
2658 2658 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2659 2659 &children) == 0);
2660 2660
2661 2661 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2662 2662 for (c = 0; c < children; c++) {
2663 2663 vdev_t *tvd = rvd->vdev_child[c];
2664 2664 nvlist_t **mchild;
2665 2665 uint_t mchildren;
2666 2666
2667 2667 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2668 2668 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2669 2669 0) == 0);
2670 2670 VERIFY(nvlist_add_string(schild[schildren],
2671 2671 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2672 2672 VERIFY(nvlist_add_uint64(schild[schildren],
2673 2673 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2674 2674 if (lastlogid == 0)
2675 2675 lastlogid = schildren;
2676 2676 ++schildren;
2677 2677 continue;
2678 2678 }
2679 2679 lastlogid = 0;
2680 2680 VERIFY(nvlist_lookup_nvlist_array(child[c],
2681 2681 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2682 2682 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2683 2683 }
2684 2684
2685 2685 /* OK, create a config that can be used to split */
2686 2686 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2687 2687 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2688 2688 VDEV_TYPE_ROOT) == 0);
2689 2689 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2690 2690 lastlogid != 0 ? lastlogid : schildren) == 0);
2691 2691
2692 2692 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2693 2693 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2694 2694
2695 2695 for (c = 0; c < schildren; c++)
2696 2696 nvlist_free(schild[c]);
2697 2697 free(schild);
2698 2698 nvlist_free(split);
2699 2699
2700 2700 spa_config_exit(spa, SCL_VDEV, FTAG);
2701 2701
2702 2702 (void) rw_wrlock(&ztest_name_lock);
2703 2703 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2704 2704 (void) rw_unlock(&ztest_name_lock);
2705 2705
2706 2706 nvlist_free(config);
2707 2707
2708 2708 if (error == 0) {
2709 2709 (void) printf("successful split - results:\n");
2710 2710 mutex_enter(&spa_namespace_lock);
2711 2711 show_pool_stats(spa);
2712 2712 show_pool_stats(spa_lookup("splitp"));
2713 2713 mutex_exit(&spa_namespace_lock);
2714 2714 ++zs->zs_splits;
2715 2715 --zs->zs_mirrors;
2716 2716 }
2717 2717 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2718 2718
2719 2719 }
2720 2720
2721 2721 /*
2722 2722 * Verify that we can attach and detach devices.
2723 2723 */
2724 2724 /* ARGSUSED */
2725 2725 void
2726 2726 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2727 2727 {
2728 2728 ztest_shared_t *zs = ztest_shared;
2729 2729 spa_t *spa = ztest_spa;
2730 2730 spa_aux_vdev_t *sav = &spa->spa_spares;
2731 2731 vdev_t *rvd = spa->spa_root_vdev;
2732 2732 vdev_t *oldvd, *newvd, *pvd;
2733 2733 nvlist_t *root;
2734 2734 uint64_t leaves;
2735 2735 uint64_t leaf, top;
2736 2736 uint64_t ashift = ztest_get_ashift();
2737 2737 uint64_t oldguid, pguid;
2738 2738 size_t oldsize, newsize;
2739 2739 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
2740 2740 int replacing;
2741 2741 int oldvd_has_siblings = B_FALSE;
2742 2742 int newvd_is_spare = B_FALSE;
2743 2743 int oldvd_is_log;
2744 2744 int error, expected_error;
2745 2745
2746 2746 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2747 2747 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2748 2748
2749 2749 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2750 2750
2751 2751 /*
2752 2752 * Decide whether to do an attach or a replace.
2753 2753 */
2754 2754 replacing = ztest_random(2);
2755 2755
2756 2756 /*
2757 2757 * Pick a random top-level vdev.
2758 2758 */
2759 2759 top = ztest_random_vdev_top(spa, B_TRUE);
2760 2760
2761 2761 /*
2762 2762 * Pick a random leaf within it.
2763 2763 */
2764 2764 leaf = ztest_random(leaves);
2765 2765
2766 2766 /*
2767 2767 * Locate this vdev.
2768 2768 */
2769 2769 oldvd = rvd->vdev_child[top];
2770 2770 if (zs->zs_mirrors >= 1) {
2771 2771 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2772 2772 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2773 2773 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2774 2774 }
2775 2775 if (ztest_opts.zo_raidz > 1) {
2776 2776 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2777 2777 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2778 2778 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2779 2779 }
2780 2780
2781 2781 /*
2782 2782 * If we're already doing an attach or replace, oldvd may be a
2783 2783 * mirror vdev -- in which case, pick a random child.
2784 2784 */
2785 2785 while (oldvd->vdev_children != 0) {
2786 2786 oldvd_has_siblings = B_TRUE;
2787 2787 ASSERT(oldvd->vdev_children >= 2);
2788 2788 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2789 2789 }
2790 2790
2791 2791 oldguid = oldvd->vdev_guid;
2792 2792 oldsize = vdev_get_min_asize(oldvd);
2793 2793 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2794 2794 (void) strcpy(oldpath, oldvd->vdev_path);
2795 2795 pvd = oldvd->vdev_parent;
2796 2796 pguid = pvd->vdev_guid;
2797 2797
2798 2798 /*
2799 2799 * If oldvd has siblings, then half of the time, detach it.
2800 2800 */
2801 2801 if (oldvd_has_siblings && ztest_random(2) == 0) {
2802 2802 spa_config_exit(spa, SCL_VDEV, FTAG);
2803 2803 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2804 2804 if (error != 0 && error != ENODEV && error != EBUSY &&
2805 2805 error != ENOTSUP)
2806 2806 fatal(0, "detach (%s) returned %d", oldpath, error);
2807 2807 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2808 2808 return;
2809 2809 }
2810 2810
2811 2811 /*
2812 2812 * For the new vdev, choose with equal probability between the two
2813 2813 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2814 2814 */
2815 2815 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2816 2816 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2817 2817 newvd_is_spare = B_TRUE;
2818 2818 (void) strcpy(newpath, newvd->vdev_path);
2819 2819 } else {
2820 2820 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
2821 2821 ztest_opts.zo_dir, ztest_opts.zo_pool,
2822 2822 top * leaves + leaf);
2823 2823 if (ztest_random(2) == 0)
2824 2824 newpath[strlen(newpath) - 1] = 'b';
2825 2825 newvd = vdev_lookup_by_path(rvd, newpath);
2826 2826 }
2827 2827
2828 2828 if (newvd) {
2829 2829 newsize = vdev_get_min_asize(newvd);
2830 2830 } else {
2831 2831 /*
2832 2832 * Make newsize a little bigger or smaller than oldsize.
2833 2833 * If it's smaller, the attach should fail.
2834 2834 * If it's larger, and we're doing a replace,
2835 2835 * we should get dynamic LUN growth when we're done.
2836 2836 */
2837 2837 newsize = 10 * oldsize / (9 + ztest_random(3));
2838 2838 }
2839 2839
2840 2840 /*
2841 2841 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2842 2842 * unless it's a replace; in that case any non-replacing parent is OK.
2843 2843 *
2844 2844 * If newvd is already part of the pool, it should fail with EBUSY.
2845 2845 *
2846 2846 * If newvd is too small, it should fail with EOVERFLOW.
2847 2847 */
2848 2848 if (pvd->vdev_ops != &vdev_mirror_ops &&
2849 2849 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2850 2850 pvd->vdev_ops == &vdev_replacing_ops ||
2851 2851 pvd->vdev_ops == &vdev_spare_ops))
2852 2852 expected_error = ENOTSUP;
2853 2853 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2854 2854 expected_error = ENOTSUP;
2855 2855 else if (newvd == oldvd)
2856 2856 expected_error = replacing ? 0 : EBUSY;
2857 2857 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2858 2858 expected_error = EBUSY;
2859 2859 else if (newsize < oldsize)
2860 2860 expected_error = EOVERFLOW;
2861 2861 else if (ashift > oldvd->vdev_top->vdev_ashift)
2862 2862 expected_error = EDOM;
2863 2863 else
2864 2864 expected_error = 0;
2865 2865
2866 2866 spa_config_exit(spa, SCL_VDEV, FTAG);
2867 2867
2868 2868 /*
2869 2869 * Build the nvlist describing newpath.
2870 2870 */
2871 2871 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
2872 2872 ashift, 0, 0, 0, 1);
2873 2873
2874 2874 error = spa_vdev_attach(spa, oldguid, root, replacing);
2875 2875
2876 2876 nvlist_free(root);
2877 2877
2878 2878 /*
2879 2879 * If our parent was the replacing vdev, but the replace completed,
2880 2880 * then instead of failing with ENOTSUP we may either succeed,
2881 2881 * fail with ENODEV, or fail with EOVERFLOW.
2882 2882 */
2883 2883 if (expected_error == ENOTSUP &&
2884 2884 (error == 0 || error == ENODEV || error == EOVERFLOW))
2885 2885 expected_error = error;
2886 2886
2887 2887 /*
2888 2888 * If someone grew the LUN, the replacement may be too small.
2889 2889 */
2890 2890 if (error == EOVERFLOW || error == EBUSY)
2891 2891 expected_error = error;
2892 2892
2893 2893 /* XXX workaround 6690467 */
2894 2894 if (error != expected_error && expected_error != EBUSY) {
2895 2895 fatal(0, "attach (%s %llu, %s %llu, %d) "
2896 2896 "returned %d, expected %d",
2897 2897 oldpath, (longlong_t)oldsize, newpath,
2898 2898 (longlong_t)newsize, replacing, error, expected_error);
2899 2899 }
2900 2900
2901 2901 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2902 2902 }
2903 2903
2904 2904 /*
2905 2905 * Callback function which expands the physical size of the vdev.
2906 2906 */
2907 2907 vdev_t *
2908 2908 grow_vdev(vdev_t *vd, void *arg)
2909 2909 {
2910 2910 spa_t *spa = vd->vdev_spa;
2911 2911 size_t *newsize = arg;
2912 2912 size_t fsize;
2913 2913 int fd;
2914 2914
2915 2915 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2916 2916 ASSERT(vd->vdev_ops->vdev_op_leaf);
2917 2917
2918 2918 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2919 2919 return (vd);
2920 2920
2921 2921 fsize = lseek(fd, 0, SEEK_END);
2922 2922 (void) ftruncate(fd, *newsize);
2923 2923
2924 2924 if (ztest_opts.zo_verbose >= 6) {
2925 2925 (void) printf("%s grew from %lu to %lu bytes\n",
2926 2926 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2927 2927 }
2928 2928 (void) close(fd);
2929 2929 return (NULL);
2930 2930 }
2931 2931
2932 2932 /*
2933 2933 * Callback function which expands a given vdev by calling vdev_online().
2934 2934 */
2935 2935 /* ARGSUSED */
2936 2936 vdev_t *
2937 2937 online_vdev(vdev_t *vd, void *arg)
2938 2938 {
2939 2939 spa_t *spa = vd->vdev_spa;
2940 2940 vdev_t *tvd = vd->vdev_top;
2941 2941 uint64_t guid = vd->vdev_guid;
2942 2942 uint64_t generation = spa->spa_config_generation + 1;
2943 2943 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2944 2944 int error;
2945 2945
2946 2946 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2947 2947 ASSERT(vd->vdev_ops->vdev_op_leaf);
2948 2948
2949 2949 /* Calling vdev_online will initialize the new metaslabs */
2950 2950 spa_config_exit(spa, SCL_STATE, spa);
2951 2951 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2952 2952 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2953 2953
2954 2954 /*
2955 2955 * If vdev_online returned an error or the underlying vdev_open
2956 2956 * failed then we abort the expand. The only way to know that
2957 2957 * vdev_open fails is by checking the returned newstate.
2958 2958 */
2959 2959 if (error || newstate != VDEV_STATE_HEALTHY) {
2960 2960 if (ztest_opts.zo_verbose >= 5) {
2961 2961 (void) printf("Unable to expand vdev, state %llu, "
2962 2962 "error %d\n", (u_longlong_t)newstate, error);
2963 2963 }
2964 2964 return (vd);
2965 2965 }
2966 2966 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2967 2967
2968 2968 /*
2969 2969 * Since we dropped the lock we need to ensure that we're
2970 2970 * still talking to the original vdev. It's possible this
2971 2971 * vdev may have been detached/replaced while we were
2972 2972 * trying to online it.
2973 2973 */
2974 2974 if (generation != spa->spa_config_generation) {
2975 2975 if (ztest_opts.zo_verbose >= 5) {
2976 2976 (void) printf("vdev configuration has changed, "
2977 2977 "guid %llu, state %llu, expected gen %llu, "
2978 2978 "got gen %llu\n",
2979 2979 (u_longlong_t)guid,
2980 2980 (u_longlong_t)tvd->vdev_state,
2981 2981 (u_longlong_t)generation,
2982 2982 (u_longlong_t)spa->spa_config_generation);
2983 2983 }
2984 2984 return (vd);
2985 2985 }
2986 2986 return (NULL);
2987 2987 }
2988 2988
2989 2989 /*
2990 2990 * Traverse the vdev tree calling the supplied function.
2991 2991 * We continue to walk the tree until we either have walked all
2992 2992 * children or we receive a non-NULL return from the callback.
2993 2993 * If a NULL callback is passed, then we just return back the first
2994 2994 * leaf vdev we encounter.
2995 2995 */
2996 2996 vdev_t *
2997 2997 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2998 2998 {
2999 2999 if (vd->vdev_ops->vdev_op_leaf) {
3000 3000 if (func == NULL)
3001 3001 return (vd);
3002 3002 else
3003 3003 return (func(vd, arg));
3004 3004 }
3005 3005
3006 3006 for (uint_t c = 0; c < vd->vdev_children; c++) {
3007 3007 vdev_t *cvd = vd->vdev_child[c];
3008 3008 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
3009 3009 return (cvd);
3010 3010 }
3011 3011 return (NULL);
3012 3012 }
3013 3013
3014 3014 /*
3015 3015 * Verify that dynamic LUN growth works as expected.
3016 3016 */
3017 3017 /* ARGSUSED */
3018 3018 void
3019 3019 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
3020 3020 {
3021 3021 spa_t *spa = ztest_spa;
3022 3022 vdev_t *vd, *tvd;
3023 3023 metaslab_class_t *mc;
3024 3024 metaslab_group_t *mg;
3025 3025 size_t psize, newsize;
3026 3026 uint64_t top;
3027 3027 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
3028 3028
3029 3029 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
3030 3030 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3031 3031
3032 3032 top = ztest_random_vdev_top(spa, B_TRUE);
3033 3033
3034 3034 tvd = spa->spa_root_vdev->vdev_child[top];
3035 3035 mg = tvd->vdev_mg;
3036 3036 mc = mg->mg_class;
3037 3037 old_ms_count = tvd->vdev_ms_count;
3038 3038 old_class_space = metaslab_class_get_space(mc);
3039 3039
3040 3040 /*
3041 3041 * Determine the size of the first leaf vdev associated with
3042 3042 * our top-level device.
3043 3043 */
3044 3044 vd = vdev_walk_tree(tvd, NULL, NULL);
3045 3045 ASSERT3P(vd, !=, NULL);
3046 3046 ASSERT(vd->vdev_ops->vdev_op_leaf);
3047 3047
3048 3048 psize = vd->vdev_psize;
3049 3049
3050 3050 /*
3051 3051 * We only try to expand the vdev if it's healthy, less than 4x its
3052 3052 * original size, and it has a valid psize.
3053 3053 */
3054 3054 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
3055 3055 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
3056 3056 spa_config_exit(spa, SCL_STATE, spa);
3057 3057 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3058 3058 return;
3059 3059 }
3060 3060 ASSERT(psize > 0);
3061 3061 newsize = psize + psize / 8;
3062 3062 ASSERT3U(newsize, >, psize);
3063 3063
3064 3064 if (ztest_opts.zo_verbose >= 6) {
3065 3065 (void) printf("Expanding LUN %s from %lu to %lu\n",
3066 3066 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
3067 3067 }
3068 3068
3069 3069 /*
3070 3070 * Growing the vdev is a two step process:
3071 3071 * 1). expand the physical size (i.e. relabel)
3072 3072 * 2). online the vdev to create the new metaslabs
3073 3073 */
3074 3074 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
3075 3075 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
3076 3076 tvd->vdev_state != VDEV_STATE_HEALTHY) {
3077 3077 if (ztest_opts.zo_verbose >= 5) {
3078 3078 (void) printf("Could not expand LUN because "
3079 3079 "the vdev configuration changed.\n");
3080 3080 }
3081 3081 spa_config_exit(spa, SCL_STATE, spa);
3082 3082 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3083 3083 return;
3084 3084 }
3085 3085
3086 3086 spa_config_exit(spa, SCL_STATE, spa);
3087 3087
3088 3088 /*
3089 3089 * Expanding the LUN will update the config asynchronously,
3090 3090 * thus we must wait for the async thread to complete any
3091 3091 * pending tasks before proceeding.
3092 3092 */
3093 3093 for (;;) {
3094 3094 boolean_t done;
3095 3095 mutex_enter(&spa->spa_async_lock);
3096 3096 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
3097 3097 mutex_exit(&spa->spa_async_lock);
3098 3098 if (done)
3099 3099 break;
3100 3100 txg_wait_synced(spa_get_dsl(spa), 0);
3101 3101 (void) poll(NULL, 0, 100);
3102 3102 }
3103 3103
3104 3104 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3105 3105
3106 3106 tvd = spa->spa_root_vdev->vdev_child[top];
3107 3107 new_ms_count = tvd->vdev_ms_count;
3108 3108 new_class_space = metaslab_class_get_space(mc);
3109 3109
3110 3110 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
3111 3111 if (ztest_opts.zo_verbose >= 5) {
3112 3112 (void) printf("Could not verify LUN expansion due to "
3113 3113 "intervening vdev offline or remove.\n");
3114 3114 }
3115 3115 spa_config_exit(spa, SCL_STATE, spa);
3116 3116 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3117 3117 return;
3118 3118 }
3119 3119
3120 3120 /*
3121 3121 * Make sure we were able to grow the vdev.
3122 3122 */
3123 3123 if (new_ms_count <= old_ms_count)
3124 3124 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3125 3125 old_ms_count, new_ms_count);
3126 3126
3127 3127 /*
3128 3128 * Make sure we were able to grow the pool.
3129 3129 */
3130 3130 if (new_class_space <= old_class_space)
3131 3131 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
3132 3132 old_class_space, new_class_space);
3133 3133
3134 3134 if (ztest_opts.zo_verbose >= 5) {
3135 3135 char oldnumbuf[6], newnumbuf[6];
3136 3136
3137 3137 nicenum(old_class_space, oldnumbuf);
3138 3138 nicenum(new_class_space, newnumbuf);
3139 3139 (void) printf("%s grew from %s to %s\n",
3140 3140 spa->spa_name, oldnumbuf, newnumbuf);
3141 3141 }
3142 3142
3143 3143 spa_config_exit(spa, SCL_STATE, spa);
3144 3144 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3145 3145 }
3146 3146
3147 3147 /*
3148 3148 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3149 3149 */
3150 3150 /* ARGSUSED */
3151 3151 static void
3152 3152 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3153 3153 {
3154 3154 /*
3155 3155 * Create the objects common to all ztest datasets.
3156 3156 */
3157 3157 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3158 3158 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3159 3159 }
3160 3160
3161 3161 static int
3162 3162 ztest_dataset_create(char *dsname)
3163 3163 {
3164 3164 uint64_t zilset = ztest_random(100);
3165 3165 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3166 3166 ztest_objset_create_cb, NULL);
3167 3167
3168 3168 if (err || zilset < 80)
3169 3169 return (err);
3170 3170
3171 3171 if (ztest_opts.zo_verbose >= 6)
3172 3172 (void) printf("Setting dataset %s to sync always\n", dsname);
3173 3173 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3174 3174 ZFS_SYNC_ALWAYS, B_FALSE));
3175 3175 }
3176 3176
3177 3177 /* ARGSUSED */
3178 3178 static int
3179 3179 ztest_objset_destroy_cb(const char *name, void *arg)
3180 3180 {
3181 3181 objset_t *os;
3182 3182 dmu_object_info_t doi;
3183 3183 int error;
3184 3184
3185 3185 /*
3186 3186 * Verify that the dataset contains a directory object.
3187 3187 */
3188 3188 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os));
3189 3189 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3190 3190 if (error != ENOENT) {
3191 3191 /* We could have crashed in the middle of destroying it */
3192 3192 ASSERT0(error);
3193 3193 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3194 3194 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3195 3195 }
3196 3196 dmu_objset_disown(os, FTAG);
3197 3197
3198 3198 /*
3199 3199 * Destroy the dataset.
3200 3200 */
3201 3201 if (strchr(name, '@') != NULL) {
3202 3202 VERIFY0(dsl_destroy_snapshot(name, B_FALSE));
3203 3203 } else {
3204 3204 VERIFY0(dsl_destroy_head(name));
3205 3205 }
3206 3206 return (0);
3207 3207 }
3208 3208
3209 3209 static boolean_t
3210 3210 ztest_snapshot_create(char *osname, uint64_t id)
3211 3211 {
3212 3212 char snapname[MAXNAMELEN];
3213 3213 int error;
3214 3214
3215 3215 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id);
3216 3216
3217 3217 error = dmu_objset_snapshot_one(osname, snapname);
3218 3218 if (error == ENOSPC) {
3219 3219 ztest_record_enospc(FTAG);
3220 3220 return (B_FALSE);
3221 3221 }
3222 3222 if (error != 0 && error != EEXIST) {
3223 3223 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname,
3224 3224 snapname, error);
3225 3225 }
3226 3226 return (B_TRUE);
3227 3227 }
3228 3228
3229 3229 static boolean_t
3230 3230 ztest_snapshot_destroy(char *osname, uint64_t id)
3231 3231 {
3232 3232 char snapname[MAXNAMELEN];
3233 3233 int error;
3234 3234
3235 3235 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3236 3236 (u_longlong_t)id);
3237 3237
3238 3238 error = dsl_destroy_snapshot(snapname, B_FALSE);
3239 3239 if (error != 0 && error != ENOENT)
3240 3240 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3241 3241 return (B_TRUE);
3242 3242 }
3243 3243
3244 3244 /* ARGSUSED */
3245 3245 void
3246 3246 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3247 3247 {
3248 3248 ztest_ds_t zdtmp;
3249 3249 int iters;
3250 3250 int error;
3251 3251 objset_t *os, *os2;
3252 3252 char name[MAXNAMELEN];
3253 3253 zilog_t *zilog;
3254 3254
3255 3255 (void) rd_wrlock(&ztest_name_lock);
3256 3256
3257 3257 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3258 3258 ztest_opts.zo_pool, (u_longlong_t)id);
3259 3259
3260 3260 /*
3261 3261 * If this dataset exists from a previous run, process its replay log
3262 3262 * half of the time. If we don't replay it, then dmu_objset_destroy()
3263 3263 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3264 3264 */
3265 3265 if (ztest_random(2) == 0 &&
3266 3266 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3267 3267 ztest_zd_init(&zdtmp, NULL, os);
3268 3268 zil_replay(os, &zdtmp, ztest_replay_vector);
3269 3269 ztest_zd_fini(&zdtmp);
3270 3270 dmu_objset_disown(os, FTAG);
3271 3271 }
3272 3272
3273 3273 /*
3274 3274 * There may be an old instance of the dataset we're about to
3275 3275 * create lying around from a previous run. If so, destroy it
3276 3276 * and all of its snapshots.
3277 3277 */
3278 3278 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3279 3279 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3280 3280
3281 3281 /*
3282 3282 * Verify that the destroyed dataset is no longer in the namespace.
3283 3283 */
3284 3284 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
3285 3285 FTAG, &os));
3286 3286
3287 3287 /*
3288 3288 * Verify that we can create a new dataset.
3289 3289 */
3290 3290 error = ztest_dataset_create(name);
3291 3291 if (error) {
3292 3292 if (error == ENOSPC) {
3293 3293 ztest_record_enospc(FTAG);
3294 3294 (void) rw_unlock(&ztest_name_lock);
3295 3295 return;
3296 3296 }
3297 3297 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3298 3298 }
3299 3299
3300 3300 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3301 3301
3302 3302 ztest_zd_init(&zdtmp, NULL, os);
3303 3303
3304 3304 /*
3305 3305 * Open the intent log for it.
3306 3306 */
3307 3307 zilog = zil_open(os, ztest_get_data);
3308 3308
3309 3309 /*
3310 3310 * Put some objects in there, do a little I/O to them,
3311 3311 * and randomly take a couple of snapshots along the way.
3312 3312 */
3313 3313 iters = ztest_random(5);
3314 3314 for (int i = 0; i < iters; i++) {
3315 3315 ztest_dmu_object_alloc_free(&zdtmp, id);
3316 3316 if (ztest_random(iters) == 0)
3317 3317 (void) ztest_snapshot_create(name, i);
3318 3318 }
3319 3319
3320 3320 /*
3321 3321 * Verify that we cannot create an existing dataset.
3322 3322 */
3323 3323 VERIFY3U(EEXIST, ==,
3324 3324 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3325 3325
3326 3326 /*
3327 3327 * Verify that we can hold an objset that is also owned.
3328 3328 */
3329 3329 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3330 3330 dmu_objset_rele(os2, FTAG);
3331 3331
3332 3332 /*
3333 3333 * Verify that we cannot own an objset that is already owned.
3334 3334 */
3335 3335 VERIFY3U(EBUSY, ==,
3336 3336 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3337 3337
3338 3338 zil_close(zilog);
3339 3339 dmu_objset_disown(os, FTAG);
3340 3340 ztest_zd_fini(&zdtmp);
3341 3341
3342 3342 (void) rw_unlock(&ztest_name_lock);
3343 3343 }
3344 3344
3345 3345 /*
3346 3346 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3347 3347 */
3348 3348 void
3349 3349 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3350 3350 {
3351 3351 (void) rw_rdlock(&ztest_name_lock);
3352 3352 (void) ztest_snapshot_destroy(zd->zd_name, id);
3353 3353 (void) ztest_snapshot_create(zd->zd_name, id);
3354 3354 (void) rw_unlock(&ztest_name_lock);
3355 3355 }
3356 3356
3357 3357 /*
3358 3358 * Cleanup non-standard snapshots and clones.
3359 3359 */
3360 3360 void
3361 3361 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3362 3362 {
3363 3363 char snap1name[MAXNAMELEN];
3364 3364 char clone1name[MAXNAMELEN];
3365 3365 char snap2name[MAXNAMELEN];
3366 3366 char clone2name[MAXNAMELEN];
3367 3367 char snap3name[MAXNAMELEN];
3368 3368 int error;
3369 3369
3370 3370 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3371 3371 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3372 3372 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3373 3373 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3374 3374 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3375 3375
3376 3376 error = dsl_destroy_head(clone2name);
3377 3377 if (error && error != ENOENT)
3378 3378 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error);
3379 3379 error = dsl_destroy_snapshot(snap3name, B_FALSE);
3380 3380 if (error && error != ENOENT)
3381 3381 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error);
3382 3382 error = dsl_destroy_snapshot(snap2name, B_FALSE);
3383 3383 if (error && error != ENOENT)
3384 3384 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error);
3385 3385 error = dsl_destroy_head(clone1name);
3386 3386 if (error && error != ENOENT)
3387 3387 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error);
3388 3388 error = dsl_destroy_snapshot(snap1name, B_FALSE);
3389 3389 if (error && error != ENOENT)
3390 3390 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error);
3391 3391 }
3392 3392
3393 3393 /*
3394 3394 * Verify dsl_dataset_promote handles EBUSY
3395 3395 */
3396 3396 void
3397 3397 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3398 3398 {
3399 3399 objset_t *os;
3400 3400 char snap1name[MAXNAMELEN];
3401 3401 char clone1name[MAXNAMELEN];
3402 3402 char snap2name[MAXNAMELEN];
3403 3403 char clone2name[MAXNAMELEN];
3404 3404 char snap3name[MAXNAMELEN];
3405 3405 char *osname = zd->zd_name;
3406 3406 int error;
3407 3407
3408 3408 (void) rw_rdlock(&ztest_name_lock);
3409 3409
3410 3410 ztest_dsl_dataset_cleanup(osname, id);
3411 3411
3412 3412 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3413 3413 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3414 3414 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3415 3415 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3416 3416 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3417 3417
3418 3418 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
3419 3419 if (error && error != EEXIST) {
3420 3420 if (error == ENOSPC) {
3421 3421 ztest_record_enospc(FTAG);
3422 3422 goto out;
3423 3423 }
3424 3424 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3425 3425 }
3426 3426
3427 3427 error = dmu_objset_clone(clone1name, snap1name);
3428 3428 if (error) {
3429 3429 if (error == ENOSPC) {
3430 3430 ztest_record_enospc(FTAG);
3431 3431 goto out;
3432 3432 }
3433 3433 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3434 3434 }
3435 3435
3436 3436 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
3437 3437 if (error && error != EEXIST) {
3438 3438 if (error == ENOSPC) {
3439 3439 ztest_record_enospc(FTAG);
3440 3440 goto out;
3441 3441 }
3442 3442 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3443 3443 }
3444 3444
3445 3445 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
3446 3446 if (error && error != EEXIST) {
3447 3447 if (error == ENOSPC) {
3448 3448 ztest_record_enospc(FTAG);
3449 3449 goto out;
3450 3450 }
3451 3451 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3452 3452 }
3453 3453
3454 3454 error = dmu_objset_clone(clone2name, snap3name);
3455 3455 if (error) {
3456 3456 if (error == ENOSPC) {
3457 3457 ztest_record_enospc(FTAG);
3458 3458 goto out;
3459 3459 }
3460 3460 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3461 3461 }
3462 3462
3463 3463 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os);
3464 3464 if (error)
3465 3465 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error);
3466 3466 error = dsl_dataset_promote(clone2name, NULL);
3467 3467 if (error != EBUSY)
3468 3468 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3469 3469 error);
3470 3470 dmu_objset_disown(os, FTAG);
3471 3471
3472 3472 out:
3473 3473 ztest_dsl_dataset_cleanup(osname, id);
3474 3474
3475 3475 (void) rw_unlock(&ztest_name_lock);
3476 3476 }
3477 3477
3478 3478 /*
3479 3479 * Verify that dmu_object_{alloc,free} work as expected.
3480 3480 */
3481 3481 void
3482 3482 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3483 3483 {
3484 3484 ztest_od_t od[4];
3485 3485 int batchsize = sizeof (od) / sizeof (od[0]);
3486 3486
3487 3487 for (int b = 0; b < batchsize; b++)
3488 3488 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3489 3489
3490 3490 /*
3491 3491 * Destroy the previous batch of objects, create a new batch,
3492 3492 * and do some I/O on the new objects.
3493 3493 */
3494 3494 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
3495 3495 return;
3496 3496
3497 3497 while (ztest_random(4 * batchsize) != 0)
3498 3498 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3499 3499 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3500 3500 }
3501 3501
3502 3502 /*
3503 3503 * Verify that dmu_{read,write} work as expected.
3504 3504 */
3505 3505 void
3506 3506 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3507 3507 {
3508 3508 objset_t *os = zd->zd_os;
3509 3509 ztest_od_t od[2];
3510 3510 dmu_tx_t *tx;
3511 3511 int i, freeit, error;
3512 3512 uint64_t n, s, txg;
3513 3513 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3514 3514 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3515 3515 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3516 3516 uint64_t regions = 997;
3517 3517 uint64_t stride = 123456789ULL;
3518 3518 uint64_t width = 40;
3519 3519 int free_percent = 5;
3520 3520
3521 3521 /*
3522 3522 * This test uses two objects, packobj and bigobj, that are always
3523 3523 * updated together (i.e. in the same tx) so that their contents are
3524 3524 * in sync and can be compared. Their contents relate to each other
3525 3525 * in a simple way: packobj is a dense array of 'bufwad' structures,
3526 3526 * while bigobj is a sparse array of the same bufwads. Specifically,
3527 3527 * for any index n, there are three bufwads that should be identical:
3528 3528 *
3529 3529 * packobj, at offset n * sizeof (bufwad_t)
3530 3530 * bigobj, at the head of the nth chunk
3531 3531 * bigobj, at the tail of the nth chunk
3532 3532 *
3533 3533 * The chunk size is arbitrary. It doesn't have to be a power of two,
3534 3534 * and it doesn't have any relation to the object blocksize.
3535 3535 * The only requirement is that it can hold at least two bufwads.
3536 3536 *
3537 3537 * Normally, we write the bufwad to each of these locations.
3538 3538 * However, free_percent of the time we instead write zeroes to
3539 3539 * packobj and perform a dmu_free_range() on bigobj. By comparing
3540 3540 * bigobj to packobj, we can verify that the DMU is correctly
3541 3541 * tracking which parts of an object are allocated and free,
3542 3542 * and that the contents of the allocated blocks are correct.
3543 3543 */
3544 3544
3545 3545 /*
3546 3546 * Read the directory info. If it's the first time, set things up.
3547 3547 */
3548 3548 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3549 3549 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3550 3550
3551 3551 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3552 3552 return;
3553 3553
3554 3554 bigobj = od[0].od_object;
3555 3555 packobj = od[1].od_object;
3556 3556 chunksize = od[0].od_gen;
3557 3557 ASSERT(chunksize == od[1].od_gen);
3558 3558
3559 3559 /*
3560 3560 * Prefetch a random chunk of the big object.
3561 3561 * Our aim here is to get some async reads in flight
3562 3562 * for blocks that we may free below; the DMU should
3563 3563 * handle this race correctly.
3564 3564 */
3565 3565 n = ztest_random(regions) * stride + ztest_random(width);
3566 3566 s = 1 + ztest_random(2 * width - 1);
3567 3567 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3568 3568
3569 3569 /*
3570 3570 * Pick a random index and compute the offsets into packobj and bigobj.
3571 3571 */
3572 3572 n = ztest_random(regions) * stride + ztest_random(width);
3573 3573 s = 1 + ztest_random(width - 1);
3574 3574
3575 3575 packoff = n * sizeof (bufwad_t);
3576 3576 packsize = s * sizeof (bufwad_t);
3577 3577
3578 3578 bigoff = n * chunksize;
3579 3579 bigsize = s * chunksize;
3580 3580
3581 3581 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3582 3582 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3583 3583
3584 3584 /*
3585 3585 * free_percent of the time, free a range of bigobj rather than
3586 3586 * overwriting it.
3587 3587 */
3588 3588 freeit = (ztest_random(100) < free_percent);
3589 3589
3590 3590 /*
3591 3591 * Read the current contents of our objects.
3592 3592 */
3593 3593 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3594 3594 DMU_READ_PREFETCH);
3595 3595 ASSERT0(error);
3596 3596 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3597 3597 DMU_READ_PREFETCH);
3598 3598 ASSERT0(error);
3599 3599
3600 3600 /*
3601 3601 * Get a tx for the mods to both packobj and bigobj.
3602 3602 */
3603 3603 tx = dmu_tx_create(os);
3604 3604
3605 3605 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3606 3606
3607 3607 if (freeit)
3608 3608 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3609 3609 else
3610 3610 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3611 3611
3612 3612 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3613 3613 if (txg == 0) {
3614 3614 umem_free(packbuf, packsize);
3615 3615 umem_free(bigbuf, bigsize);
3616 3616 return;
3617 3617 }
3618 3618
3619 3619 dmu_object_set_checksum(os, bigobj,
3620 3620 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3621 3621
3622 3622 dmu_object_set_compress(os, bigobj,
3623 3623 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3624 3624
3625 3625 /*
3626 3626 * For each index from n to n + s, verify that the existing bufwad
3627 3627 * in packobj matches the bufwads at the head and tail of the
3628 3628 * corresponding chunk in bigobj. Then update all three bufwads
3629 3629 * with the new values we want to write out.
3630 3630 */
3631 3631 for (i = 0; i < s; i++) {
3632 3632 /* LINTED */
3633 3633 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3634 3634 /* LINTED */
3635 3635 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3636 3636 /* LINTED */
3637 3637 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3638 3638
3639 3639 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3640 3640 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3641 3641
3642 3642 if (pack->bw_txg > txg)
3643 3643 fatal(0, "future leak: got %llx, open txg is %llx",
3644 3644 pack->bw_txg, txg);
3645 3645
3646 3646 if (pack->bw_data != 0 && pack->bw_index != n + i)
3647 3647 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3648 3648 pack->bw_index, n, i);
3649 3649
3650 3650 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3651 3651 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3652 3652
3653 3653 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3654 3654 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3655 3655
3656 3656 if (freeit) {
3657 3657 bzero(pack, sizeof (bufwad_t));
3658 3658 } else {
3659 3659 pack->bw_index = n + i;
3660 3660 pack->bw_txg = txg;
3661 3661 pack->bw_data = 1 + ztest_random(-2ULL);
3662 3662 }
3663 3663 *bigH = *pack;
3664 3664 *bigT = *pack;
3665 3665 }
3666 3666
3667 3667 /*
3668 3668 * We've verified all the old bufwads, and made new ones.
3669 3669 * Now write them out.
3670 3670 */
3671 3671 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3672 3672
3673 3673 if (freeit) {
3674 3674 if (ztest_opts.zo_verbose >= 7) {
3675 3675 (void) printf("freeing offset %llx size %llx"
3676 3676 " txg %llx\n",
3677 3677 (u_longlong_t)bigoff,
3678 3678 (u_longlong_t)bigsize,
3679 3679 (u_longlong_t)txg);
3680 3680 }
3681 3681 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3682 3682 } else {
3683 3683 if (ztest_opts.zo_verbose >= 7) {
3684 3684 (void) printf("writing offset %llx size %llx"
3685 3685 " txg %llx\n",
3686 3686 (u_longlong_t)bigoff,
3687 3687 (u_longlong_t)bigsize,
3688 3688 (u_longlong_t)txg);
3689 3689 }
3690 3690 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3691 3691 }
3692 3692
3693 3693 dmu_tx_commit(tx);
3694 3694
3695 3695 /*
3696 3696 * Sanity check the stuff we just wrote.
3697 3697 */
3698 3698 {
3699 3699 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3700 3700 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3701 3701
3702 3702 VERIFY(0 == dmu_read(os, packobj, packoff,
3703 3703 packsize, packcheck, DMU_READ_PREFETCH));
3704 3704 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3705 3705 bigsize, bigcheck, DMU_READ_PREFETCH));
3706 3706
3707 3707 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3708 3708 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3709 3709
3710 3710 umem_free(packcheck, packsize);
3711 3711 umem_free(bigcheck, bigsize);
3712 3712 }
3713 3713
3714 3714 umem_free(packbuf, packsize);
3715 3715 umem_free(bigbuf, bigsize);
3716 3716 }
3717 3717
3718 3718 void
3719 3719 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3720 3720 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3721 3721 {
3722 3722 uint64_t i;
3723 3723 bufwad_t *pack;
3724 3724 bufwad_t *bigH;
3725 3725 bufwad_t *bigT;
3726 3726
3727 3727 /*
3728 3728 * For each index from n to n + s, verify that the existing bufwad
3729 3729 * in packobj matches the bufwads at the head and tail of the
3730 3730 * corresponding chunk in bigobj. Then update all three bufwads
3731 3731 * with the new values we want to write out.
3732 3732 */
3733 3733 for (i = 0; i < s; i++) {
3734 3734 /* LINTED */
3735 3735 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3736 3736 /* LINTED */
3737 3737 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3738 3738 /* LINTED */
3739 3739 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3740 3740
3741 3741 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3742 3742 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3743 3743
3744 3744 if (pack->bw_txg > txg)
3745 3745 fatal(0, "future leak: got %llx, open txg is %llx",
3746 3746 pack->bw_txg, txg);
3747 3747
3748 3748 if (pack->bw_data != 0 && pack->bw_index != n + i)
3749 3749 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3750 3750 pack->bw_index, n, i);
3751 3751
3752 3752 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3753 3753 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3754 3754
3755 3755 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3756 3756 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3757 3757
3758 3758 pack->bw_index = n + i;
3759 3759 pack->bw_txg = txg;
3760 3760 pack->bw_data = 1 + ztest_random(-2ULL);
3761 3761
3762 3762 *bigH = *pack;
3763 3763 *bigT = *pack;
3764 3764 }
3765 3765 }
3766 3766
3767 3767 void
3768 3768 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3769 3769 {
3770 3770 objset_t *os = zd->zd_os;
3771 3771 ztest_od_t od[2];
3772 3772 dmu_tx_t *tx;
3773 3773 uint64_t i;
3774 3774 int error;
3775 3775 uint64_t n, s, txg;
3776 3776 bufwad_t *packbuf, *bigbuf;
3777 3777 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3778 3778 uint64_t blocksize = ztest_random_blocksize();
3779 3779 uint64_t chunksize = blocksize;
3780 3780 uint64_t regions = 997;
3781 3781 uint64_t stride = 123456789ULL;
3782 3782 uint64_t width = 9;
3783 3783 dmu_buf_t *bonus_db;
3784 3784 arc_buf_t **bigbuf_arcbufs;
3785 3785 dmu_object_info_t doi;
3786 3786
3787 3787 /*
3788 3788 * This test uses two objects, packobj and bigobj, that are always
3789 3789 * updated together (i.e. in the same tx) so that their contents are
3790 3790 * in sync and can be compared. Their contents relate to each other
3791 3791 * in a simple way: packobj is a dense array of 'bufwad' structures,
3792 3792 * while bigobj is a sparse array of the same bufwads. Specifically,
3793 3793 * for any index n, there are three bufwads that should be identical:
3794 3794 *
3795 3795 * packobj, at offset n * sizeof (bufwad_t)
3796 3796 * bigobj, at the head of the nth chunk
3797 3797 * bigobj, at the tail of the nth chunk
3798 3798 *
3799 3799 * The chunk size is set equal to bigobj block size so that
3800 3800 * dmu_assign_arcbuf() can be tested for object updates.
3801 3801 */
3802 3802
3803 3803 /*
3804 3804 * Read the directory info. If it's the first time, set things up.
3805 3805 */
3806 3806 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3807 3807 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3808 3808
3809 3809 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3810 3810 return;
3811 3811
3812 3812 bigobj = od[0].od_object;
3813 3813 packobj = od[1].od_object;
3814 3814 blocksize = od[0].od_blocksize;
3815 3815 chunksize = blocksize;
3816 3816 ASSERT(chunksize == od[1].od_gen);
3817 3817
3818 3818 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3819 3819 VERIFY(ISP2(doi.doi_data_block_size));
3820 3820 VERIFY(chunksize == doi.doi_data_block_size);
3821 3821 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3822 3822
3823 3823 /*
3824 3824 * Pick a random index and compute the offsets into packobj and bigobj.
3825 3825 */
3826 3826 n = ztest_random(regions) * stride + ztest_random(width);
3827 3827 s = 1 + ztest_random(width - 1);
3828 3828
3829 3829 packoff = n * sizeof (bufwad_t);
3830 3830 packsize = s * sizeof (bufwad_t);
3831 3831
3832 3832 bigoff = n * chunksize;
3833 3833 bigsize = s * chunksize;
3834 3834
3835 3835 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3836 3836 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3837 3837
3838 3838 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3839 3839
3840 3840 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3841 3841
3842 3842 /*
3843 3843 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3844 3844 * Iteration 1 test zcopy to already referenced dbufs.
3845 3845 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3846 3846 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3847 3847 * Iteration 4 test zcopy when dbuf is no longer dirty.
3848 3848 * Iteration 5 test zcopy when it can't be done.
3849 3849 * Iteration 6 one more zcopy write.
3850 3850 */
3851 3851 for (i = 0; i < 7; i++) {
3852 3852 uint64_t j;
3853 3853 uint64_t off;
3854 3854
3855 3855 /*
3856 3856 * In iteration 5 (i == 5) use arcbufs
3857 3857 * that don't match bigobj blksz to test
3858 3858 * dmu_assign_arcbuf() when it can't directly
3859 3859 * assign an arcbuf to a dbuf.
3860 3860 */
3861 3861 for (j = 0; j < s; j++) {
3862 3862 if (i != 5) {
3863 3863 bigbuf_arcbufs[j] =
3864 3864 dmu_request_arcbuf(bonus_db, chunksize);
3865 3865 } else {
3866 3866 bigbuf_arcbufs[2 * j] =
3867 3867 dmu_request_arcbuf(bonus_db, chunksize / 2);
3868 3868 bigbuf_arcbufs[2 * j + 1] =
3869 3869 dmu_request_arcbuf(bonus_db, chunksize / 2);
3870 3870 }
3871 3871 }
3872 3872
3873 3873 /*
3874 3874 * Get a tx for the mods to both packobj and bigobj.
3875 3875 */
3876 3876 tx = dmu_tx_create(os);
3877 3877
3878 3878 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3879 3879 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3880 3880
3881 3881 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3882 3882 if (txg == 0) {
3883 3883 umem_free(packbuf, packsize);
3884 3884 umem_free(bigbuf, bigsize);
3885 3885 for (j = 0; j < s; j++) {
3886 3886 if (i != 5) {
3887 3887 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3888 3888 } else {
3889 3889 dmu_return_arcbuf(
3890 3890 bigbuf_arcbufs[2 * j]);
3891 3891 dmu_return_arcbuf(
3892 3892 bigbuf_arcbufs[2 * j + 1]);
3893 3893 }
3894 3894 }
3895 3895 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3896 3896 dmu_buf_rele(bonus_db, FTAG);
3897 3897 return;
3898 3898 }
3899 3899
3900 3900 /*
3901 3901 * 50% of the time don't read objects in the 1st iteration to
3902 3902 * test dmu_assign_arcbuf() for the case when there're no
3903 3903 * existing dbufs for the specified offsets.
3904 3904 */
3905 3905 if (i != 0 || ztest_random(2) != 0) {
3906 3906 error = dmu_read(os, packobj, packoff,
3907 3907 packsize, packbuf, DMU_READ_PREFETCH);
3908 3908 ASSERT0(error);
3909 3909 error = dmu_read(os, bigobj, bigoff, bigsize,
3910 3910 bigbuf, DMU_READ_PREFETCH);
3911 3911 ASSERT0(error);
3912 3912 }
3913 3913 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3914 3914 n, chunksize, txg);
3915 3915
3916 3916 /*
3917 3917 * We've verified all the old bufwads, and made new ones.
3918 3918 * Now write them out.
3919 3919 */
3920 3920 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3921 3921 if (ztest_opts.zo_verbose >= 7) {
3922 3922 (void) printf("writing offset %llx size %llx"
3923 3923 " txg %llx\n",
3924 3924 (u_longlong_t)bigoff,
3925 3925 (u_longlong_t)bigsize,
3926 3926 (u_longlong_t)txg);
3927 3927 }
3928 3928 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3929 3929 dmu_buf_t *dbt;
3930 3930 if (i != 5) {
3931 3931 bcopy((caddr_t)bigbuf + (off - bigoff),
3932 3932 bigbuf_arcbufs[j]->b_data, chunksize);
3933 3933 } else {
3934 3934 bcopy((caddr_t)bigbuf + (off - bigoff),
3935 3935 bigbuf_arcbufs[2 * j]->b_data,
3936 3936 chunksize / 2);
3937 3937 bcopy((caddr_t)bigbuf + (off - bigoff) +
3938 3938 chunksize / 2,
3939 3939 bigbuf_arcbufs[2 * j + 1]->b_data,
3940 3940 chunksize / 2);
3941 3941 }
3942 3942
3943 3943 if (i == 1) {
3944 3944 VERIFY(dmu_buf_hold(os, bigobj, off,
3945 3945 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3946 3946 }
3947 3947 if (i != 5) {
3948 3948 dmu_assign_arcbuf(bonus_db, off,
3949 3949 bigbuf_arcbufs[j], tx);
3950 3950 } else {
3951 3951 dmu_assign_arcbuf(bonus_db, off,
3952 3952 bigbuf_arcbufs[2 * j], tx);
3953 3953 dmu_assign_arcbuf(bonus_db,
3954 3954 off + chunksize / 2,
3955 3955 bigbuf_arcbufs[2 * j + 1], tx);
3956 3956 }
3957 3957 if (i == 1) {
3958 3958 dmu_buf_rele(dbt, FTAG);
3959 3959 }
3960 3960 }
3961 3961 dmu_tx_commit(tx);
3962 3962
3963 3963 /*
3964 3964 * Sanity check the stuff we just wrote.
3965 3965 */
3966 3966 {
3967 3967 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3968 3968 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3969 3969
3970 3970 VERIFY(0 == dmu_read(os, packobj, packoff,
3971 3971 packsize, packcheck, DMU_READ_PREFETCH));
3972 3972 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3973 3973 bigsize, bigcheck, DMU_READ_PREFETCH));
3974 3974
3975 3975 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3976 3976 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3977 3977
3978 3978 umem_free(packcheck, packsize);
3979 3979 umem_free(bigcheck, bigsize);
3980 3980 }
3981 3981 if (i == 2) {
3982 3982 txg_wait_open(dmu_objset_pool(os), 0);
3983 3983 } else if (i == 3) {
3984 3984 txg_wait_synced(dmu_objset_pool(os), 0);
3985 3985 }
3986 3986 }
3987 3987
3988 3988 dmu_buf_rele(bonus_db, FTAG);
3989 3989 umem_free(packbuf, packsize);
3990 3990 umem_free(bigbuf, bigsize);
3991 3991 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3992 3992 }
3993 3993
3994 3994 /* ARGSUSED */
3995 3995 void
3996 3996 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3997 3997 {
3998 3998 ztest_od_t od[1];
3999 3999 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
4000 4000 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4001 4001
4002 4002 /*
4003 4003 * Have multiple threads write to large offsets in an object
4004 4004 * to verify that parallel writes to an object -- even to the
4005 4005 * same blocks within the object -- doesn't cause any trouble.
4006 4006 */
4007 4007 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4008 4008
4009 4009 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4010 4010 return;
4011 4011
4012 4012 while (ztest_random(10) != 0)
4013 4013 ztest_io(zd, od[0].od_object, offset);
4014 4014 }
4015 4015
4016 4016 void
4017 4017 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
4018 4018 {
4019 4019 ztest_od_t od[1];
4020 4020 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
4021 4021 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4022 4022 uint64_t count = ztest_random(20) + 1;
4023 4023 uint64_t blocksize = ztest_random_blocksize();
4024 4024 void *data;
4025 4025
4026 4026 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4027 4027
4028 4028 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4029 4029 return;
4030 4030
4031 4031 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
4032 4032 return;
4033 4033
4034 4034 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
4035 4035
4036 4036 data = umem_zalloc(blocksize, UMEM_NOFAIL);
4037 4037
4038 4038 while (ztest_random(count) != 0) {
4039 4039 uint64_t randoff = offset + (ztest_random(count) * blocksize);
4040 4040 if (ztest_write(zd, od[0].od_object, randoff, blocksize,
4041 4041 data) != 0)
4042 4042 break;
4043 4043 while (ztest_random(4) != 0)
4044 4044 ztest_io(zd, od[0].od_object, randoff);
4045 4045 }
4046 4046
4047 4047 umem_free(data, blocksize);
4048 4048 }
4049 4049
4050 4050 /*
4051 4051 * Verify that zap_{create,destroy,add,remove,update} work as expected.
4052 4052 */
4053 4053 #define ZTEST_ZAP_MIN_INTS 1
4054 4054 #define ZTEST_ZAP_MAX_INTS 4
4055 4055 #define ZTEST_ZAP_MAX_PROPS 1000
4056 4056
4057 4057 void
4058 4058 ztest_zap(ztest_ds_t *zd, uint64_t id)
4059 4059 {
4060 4060 objset_t *os = zd->zd_os;
4061 4061 ztest_od_t od[1];
4062 4062 uint64_t object;
4063 4063 uint64_t txg, last_txg;
4064 4064 uint64_t value[ZTEST_ZAP_MAX_INTS];
4065 4065 uint64_t zl_ints, zl_intsize, prop;
4066 4066 int i, ints;
4067 4067 dmu_tx_t *tx;
4068 4068 char propname[100], txgname[100];
4069 4069 int error;
4070 4070 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
4071 4071
4072 4072 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4073 4073
4074 4074 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4075 4075 return;
4076 4076
4077 4077 object = od[0].od_object;
4078 4078
4079 4079 /*
4080 4080 * Generate a known hash collision, and verify that
4081 4081 * we can lookup and remove both entries.
4082 4082 */
4083 4083 tx = dmu_tx_create(os);
4084 4084 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4085 4085 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4086 4086 if (txg == 0)
4087 4087 return;
4088 4088 for (i = 0; i < 2; i++) {
4089 4089 value[i] = i;
4090 4090 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
4091 4091 1, &value[i], tx));
4092 4092 }
4093 4093 for (i = 0; i < 2; i++) {
4094 4094 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
4095 4095 sizeof (uint64_t), 1, &value[i], tx));
4096 4096 VERIFY3U(0, ==,
4097 4097 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
4098 4098 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4099 4099 ASSERT3U(zl_ints, ==, 1);
4100 4100 }
4101 4101 for (i = 0; i < 2; i++) {
4102 4102 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
4103 4103 }
4104 4104 dmu_tx_commit(tx);
4105 4105
4106 4106 /*
4107 4107 * Generate a buch of random entries.
4108 4108 */
4109 4109 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
4110 4110
4111 4111 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4112 4112 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4113 4113 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4114 4114 bzero(value, sizeof (value));
4115 4115 last_txg = 0;
4116 4116
4117 4117 /*
4118 4118 * If these zap entries already exist, validate their contents.
4119 4119 */
4120 4120 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4121 4121 if (error == 0) {
4122 4122 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4123 4123 ASSERT3U(zl_ints, ==, 1);
4124 4124
4125 4125 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
4126 4126 zl_ints, &last_txg) == 0);
4127 4127
4128 4128 VERIFY(zap_length(os, object, propname, &zl_intsize,
4129 4129 &zl_ints) == 0);
4130 4130
4131 4131 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4132 4132 ASSERT3U(zl_ints, ==, ints);
4133 4133
4134 4134 VERIFY(zap_lookup(os, object, propname, zl_intsize,
4135 4135 zl_ints, value) == 0);
4136 4136
4137 4137 for (i = 0; i < ints; i++) {
4138 4138 ASSERT3U(value[i], ==, last_txg + object + i);
4139 4139 }
4140 4140 } else {
4141 4141 ASSERT3U(error, ==, ENOENT);
4142 4142 }
4143 4143
4144 4144 /*
4145 4145 * Atomically update two entries in our zap object.
4146 4146 * The first is named txg_%llu, and contains the txg
4147 4147 * in which the property was last updated. The second
4148 4148 * is named prop_%llu, and the nth element of its value
4149 4149 * should be txg + object + n.
4150 4150 */
4151 4151 tx = dmu_tx_create(os);
4152 4152 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4153 4153 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4154 4154 if (txg == 0)
4155 4155 return;
4156 4156
4157 4157 if (last_txg > txg)
4158 4158 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4159 4159
4160 4160 for (i = 0; i < ints; i++)
4161 4161 value[i] = txg + object + i;
4162 4162
4163 4163 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4164 4164 1, &txg, tx));
4165 4165 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4166 4166 ints, value, tx));
4167 4167
4168 4168 dmu_tx_commit(tx);
4169 4169
4170 4170 /*
4171 4171 * Remove a random pair of entries.
4172 4172 */
4173 4173 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4174 4174 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4175 4175 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4176 4176
4177 4177 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4178 4178
4179 4179 if (error == ENOENT)
4180 4180 return;
4181 4181
4182 4182 ASSERT0(error);
4183 4183
4184 4184 tx = dmu_tx_create(os);
4185 4185 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4186 4186 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4187 4187 if (txg == 0)
4188 4188 return;
4189 4189 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4190 4190 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4191 4191 dmu_tx_commit(tx);
4192 4192 }
4193 4193
4194 4194 /*
4195 4195 * Testcase to test the upgrading of a microzap to fatzap.
4196 4196 */
4197 4197 void
4198 4198 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4199 4199 {
4200 4200 objset_t *os = zd->zd_os;
4201 4201 ztest_od_t od[1];
4202 4202 uint64_t object, txg;
4203 4203
4204 4204 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4205 4205
4206 4206 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4207 4207 return;
4208 4208
4209 4209 object = od[0].od_object;
4210 4210
4211 4211 /*
4212 4212 * Add entries to this ZAP and make sure it spills over
4213 4213 * and gets upgraded to a fatzap. Also, since we are adding
4214 4214 * 2050 entries we should see ptrtbl growth and leaf-block split.
4215 4215 */
4216 4216 for (int i = 0; i < 2050; i++) {
4217 4217 char name[MAXNAMELEN];
4218 4218 uint64_t value = i;
4219 4219 dmu_tx_t *tx;
4220 4220 int error;
4221 4221
4222 4222 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4223 4223 id, value);
4224 4224
4225 4225 tx = dmu_tx_create(os);
4226 4226 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4227 4227 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4228 4228 if (txg == 0)
4229 4229 return;
4230 4230 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4231 4231 &value, tx);
4232 4232 ASSERT(error == 0 || error == EEXIST);
4233 4233 dmu_tx_commit(tx);
4234 4234 }
4235 4235 }
4236 4236
4237 4237 /* ARGSUSED */
4238 4238 void
4239 4239 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4240 4240 {
4241 4241 objset_t *os = zd->zd_os;
4242 4242 ztest_od_t od[1];
4243 4243 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4244 4244 dmu_tx_t *tx;
4245 4245 int i, namelen, error;
4246 4246 int micro = ztest_random(2);
4247 4247 char name[20], string_value[20];
4248 4248 void *data;
4249 4249
4250 4250 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4251 4251
4252 4252 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4253 4253 return;
4254 4254
4255 4255 object = od[0].od_object;
4256 4256
4257 4257 /*
4258 4258 * Generate a random name of the form 'xxx.....' where each
4259 4259 * x is a random printable character and the dots are dots.
4260 4260 * There are 94 such characters, and the name length goes from
4261 4261 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4262 4262 */
4263 4263 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4264 4264
4265 4265 for (i = 0; i < 3; i++)
4266 4266 name[i] = '!' + ztest_random('~' - '!' + 1);
4267 4267 for (; i < namelen - 1; i++)
4268 4268 name[i] = '.';
4269 4269 name[i] = '\0';
4270 4270
4271 4271 if ((namelen & 1) || micro) {
4272 4272 wsize = sizeof (txg);
4273 4273 wc = 1;
4274 4274 data = &txg;
4275 4275 } else {
4276 4276 wsize = 1;
4277 4277 wc = namelen;
4278 4278 data = string_value;
4279 4279 }
4280 4280
4281 4281 count = -1ULL;
4282 4282 VERIFY0(zap_count(os, object, &count));
4283 4283 ASSERT(count != -1ULL);
4284 4284
4285 4285 /*
4286 4286 * Select an operation: length, lookup, add, update, remove.
4287 4287 */
4288 4288 i = ztest_random(5);
4289 4289
4290 4290 if (i >= 2) {
4291 4291 tx = dmu_tx_create(os);
4292 4292 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4293 4293 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4294 4294 if (txg == 0)
4295 4295 return;
4296 4296 bcopy(name, string_value, namelen);
4297 4297 } else {
4298 4298 tx = NULL;
4299 4299 txg = 0;
4300 4300 bzero(string_value, namelen);
4301 4301 }
4302 4302
4303 4303 switch (i) {
4304 4304
4305 4305 case 0:
4306 4306 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4307 4307 if (error == 0) {
4308 4308 ASSERT3U(wsize, ==, zl_wsize);
4309 4309 ASSERT3U(wc, ==, zl_wc);
4310 4310 } else {
4311 4311 ASSERT3U(error, ==, ENOENT);
4312 4312 }
4313 4313 break;
4314 4314
4315 4315 case 1:
4316 4316 error = zap_lookup(os, object, name, wsize, wc, data);
4317 4317 if (error == 0) {
4318 4318 if (data == string_value &&
4319 4319 bcmp(name, data, namelen) != 0)
4320 4320 fatal(0, "name '%s' != val '%s' len %d",
4321 4321 name, data, namelen);
4322 4322 } else {
4323 4323 ASSERT3U(error, ==, ENOENT);
4324 4324 }
4325 4325 break;
4326 4326
4327 4327 case 2:
4328 4328 error = zap_add(os, object, name, wsize, wc, data, tx);
4329 4329 ASSERT(error == 0 || error == EEXIST);
4330 4330 break;
4331 4331
4332 4332 case 3:
4333 4333 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4334 4334 break;
4335 4335
4336 4336 case 4:
4337 4337 error = zap_remove(os, object, name, tx);
4338 4338 ASSERT(error == 0 || error == ENOENT);
4339 4339 break;
4340 4340 }
4341 4341
4342 4342 if (tx != NULL)
4343 4343 dmu_tx_commit(tx);
4344 4344 }
4345 4345
4346 4346 /*
4347 4347 * Commit callback data.
4348 4348 */
4349 4349 typedef struct ztest_cb_data {
4350 4350 list_node_t zcd_node;
4351 4351 uint64_t zcd_txg;
4352 4352 int zcd_expected_err;
4353 4353 boolean_t zcd_added;
4354 4354 boolean_t zcd_called;
4355 4355 spa_t *zcd_spa;
4356 4356 } ztest_cb_data_t;
4357 4357
4358 4358 /* This is the actual commit callback function */
4359 4359 static void
4360 4360 ztest_commit_callback(void *arg, int error)
4361 4361 {
4362 4362 ztest_cb_data_t *data = arg;
4363 4363 uint64_t synced_txg;
4364 4364
4365 4365 VERIFY(data != NULL);
4366 4366 VERIFY3S(data->zcd_expected_err, ==, error);
4367 4367 VERIFY(!data->zcd_called);
4368 4368
4369 4369 synced_txg = spa_last_synced_txg(data->zcd_spa);
4370 4370 if (data->zcd_txg > synced_txg)
4371 4371 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4372 4372 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4373 4373 synced_txg);
4374 4374
4375 4375 data->zcd_called = B_TRUE;
4376 4376
4377 4377 if (error == ECANCELED) {
4378 4378 ASSERT0(data->zcd_txg);
4379 4379 ASSERT(!data->zcd_added);
4380 4380
4381 4381 /*
4382 4382 * The private callback data should be destroyed here, but
4383 4383 * since we are going to check the zcd_called field after
4384 4384 * dmu_tx_abort(), we will destroy it there.
4385 4385 */
4386 4386 return;
4387 4387 }
4388 4388
4389 4389 /* Was this callback added to the global callback list? */
4390 4390 if (!data->zcd_added)
4391 4391 goto out;
4392 4392
4393 4393 ASSERT3U(data->zcd_txg, !=, 0);
4394 4394
4395 4395 /* Remove our callback from the list */
4396 4396 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4397 4397 list_remove(&zcl.zcl_callbacks, data);
4398 4398 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4399 4399
4400 4400 out:
4401 4401 umem_free(data, sizeof (ztest_cb_data_t));
4402 4402 }
4403 4403
4404 4404 /* Allocate and initialize callback data structure */
4405 4405 static ztest_cb_data_t *
4406 4406 ztest_create_cb_data(objset_t *os, uint64_t txg)
4407 4407 {
4408 4408 ztest_cb_data_t *cb_data;
4409 4409
4410 4410 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4411 4411
4412 4412 cb_data->zcd_txg = txg;
4413 4413 cb_data->zcd_spa = dmu_objset_spa(os);
4414 4414
4415 4415 return (cb_data);
4416 4416 }
4417 4417
4418 4418 /*
4419 4419 * If a number of txgs equal to this threshold have been created after a commit
4420 4420 * callback has been registered but not called, then we assume there is an
4421 4421 * implementation bug.
4422 4422 */
4423 4423 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
4424 4424
4425 4425 /*
4426 4426 * Commit callback test.
4427 4427 */
4428 4428 void
4429 4429 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4430 4430 {
4431 4431 objset_t *os = zd->zd_os;
4432 4432 ztest_od_t od[1];
4433 4433 dmu_tx_t *tx;
4434 4434 ztest_cb_data_t *cb_data[3], *tmp_cb;
4435 4435 uint64_t old_txg, txg;
4436 4436 int i, error;
4437 4437
4438 4438 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4439 4439
4440 4440 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4441 4441 return;
4442 4442
4443 4443 tx = dmu_tx_create(os);
4444 4444
4445 4445 cb_data[0] = ztest_create_cb_data(os, 0);
4446 4446 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4447 4447
4448 4448 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
4449 4449
4450 4450 /* Every once in a while, abort the transaction on purpose */
4451 4451 if (ztest_random(100) == 0)
4452 4452 error = -1;
4453 4453
4454 4454 if (!error)
4455 4455 error = dmu_tx_assign(tx, TXG_NOWAIT);
4456 4456
4457 4457 txg = error ? 0 : dmu_tx_get_txg(tx);
4458 4458
4459 4459 cb_data[0]->zcd_txg = txg;
4460 4460 cb_data[1] = ztest_create_cb_data(os, txg);
4461 4461 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4462 4462
4463 4463 if (error) {
4464 4464 /*
4465 4465 * It's not a strict requirement to call the registered
4466 4466 * callbacks from inside dmu_tx_abort(), but that's what
4467 4467 * it's supposed to happen in the current implementation
4468 4468 * so we will check for that.
4469 4469 */
4470 4470 for (i = 0; i < 2; i++) {
4471 4471 cb_data[i]->zcd_expected_err = ECANCELED;
4472 4472 VERIFY(!cb_data[i]->zcd_called);
4473 4473 }
4474 4474
4475 4475 dmu_tx_abort(tx);
4476 4476
4477 4477 for (i = 0; i < 2; i++) {
4478 4478 VERIFY(cb_data[i]->zcd_called);
4479 4479 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4480 4480 }
4481 4481
4482 4482 return;
4483 4483 }
4484 4484
4485 4485 cb_data[2] = ztest_create_cb_data(os, txg);
4486 4486 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4487 4487
4488 4488 /*
4489 4489 * Read existing data to make sure there isn't a future leak.
4490 4490 */
4491 4491 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
4492 4492 &old_txg, DMU_READ_PREFETCH));
4493 4493
4494 4494 if (old_txg > txg)
4495 4495 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4496 4496 old_txg, txg);
4497 4497
4498 4498 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
4499 4499
4500 4500 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4501 4501
4502 4502 /*
4503 4503 * Since commit callbacks don't have any ordering requirement and since
4504 4504 * it is theoretically possible for a commit callback to be called
4505 4505 * after an arbitrary amount of time has elapsed since its txg has been
↓ open down ↓ |
4505 lines elided |
↑ open up ↑ |
4506 4506 * synced, it is difficult to reliably determine whether a commit
4507 4507 * callback hasn't been called due to high load or due to a flawed
4508 4508 * implementation.
4509 4509 *
4510 4510 * In practice, we will assume that if after a certain number of txgs a
4511 4511 * commit callback hasn't been called, then most likely there's an
4512 4512 * implementation bug..
4513 4513 */
4514 4514 tmp_cb = list_head(&zcl.zcl_callbacks);
4515 4515 if (tmp_cb != NULL &&
4516 - tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) {
4516 + (txg - ZTEST_COMMIT_CALLBACK_THRESH) > tmp_cb->zcd_txg) {
4517 4517 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4518 4518 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4519 4519 }
4520 4520
4521 4521 /*
4522 4522 * Let's find the place to insert our callbacks.
4523 4523 *
4524 4524 * Even though the list is ordered by txg, it is possible for the
4525 4525 * insertion point to not be the end because our txg may already be
4526 4526 * quiescing at this point and other callbacks in the open txg
4527 4527 * (from other objsets) may have sneaked in.
4528 4528 */
4529 4529 tmp_cb = list_tail(&zcl.zcl_callbacks);
4530 4530 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4531 4531 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4532 4532
4533 4533 /* Add the 3 callbacks to the list */
4534 4534 for (i = 0; i < 3; i++) {
4535 4535 if (tmp_cb == NULL)
4536 4536 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4537 4537 else
4538 4538 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4539 4539 cb_data[i]);
4540 4540
4541 4541 cb_data[i]->zcd_added = B_TRUE;
4542 4542 VERIFY(!cb_data[i]->zcd_called);
4543 4543
4544 4544 tmp_cb = cb_data[i];
4545 4545 }
4546 4546
4547 4547 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4548 4548
4549 4549 dmu_tx_commit(tx);
4550 4550 }
4551 4551
4552 4552 /* ARGSUSED */
4553 4553 void
4554 4554 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4555 4555 {
4556 4556 zfs_prop_t proplist[] = {
4557 4557 ZFS_PROP_CHECKSUM,
4558 4558 ZFS_PROP_COMPRESSION,
4559 4559 ZFS_PROP_COPIES,
4560 4560 ZFS_PROP_DEDUP
4561 4561 };
4562 4562
4563 4563 (void) rw_rdlock(&ztest_name_lock);
4564 4564
4565 4565 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4566 4566 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4567 4567 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4568 4568
4569 4569 (void) rw_unlock(&ztest_name_lock);
4570 4570 }
4571 4571
4572 4572 /* ARGSUSED */
4573 4573 void
4574 4574 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4575 4575 {
4576 4576 nvlist_t *props = NULL;
4577 4577
4578 4578 (void) rw_rdlock(&ztest_name_lock);
4579 4579
4580 4580 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4581 4581 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4582 4582
4583 4583 VERIFY0(spa_prop_get(ztest_spa, &props));
4584 4584
4585 4585 if (ztest_opts.zo_verbose >= 6)
4586 4586 dump_nvlist(props, 4);
4587 4587
4588 4588 nvlist_free(props);
4589 4589
4590 4590 (void) rw_unlock(&ztest_name_lock);
4591 4591 }
4592 4592
4593 4593 static int
4594 4594 user_release_one(const char *snapname, const char *holdname)
4595 4595 {
4596 4596 nvlist_t *snaps, *holds;
4597 4597 int error;
4598 4598
4599 4599 snaps = fnvlist_alloc();
4600 4600 holds = fnvlist_alloc();
4601 4601 fnvlist_add_boolean(holds, holdname);
4602 4602 fnvlist_add_nvlist(snaps, snapname, holds);
4603 4603 fnvlist_free(holds);
4604 4604 error = dsl_dataset_user_release(snaps, NULL);
4605 4605 fnvlist_free(snaps);
4606 4606 return (error);
4607 4607 }
4608 4608
4609 4609 /*
4610 4610 * Test snapshot hold/release and deferred destroy.
4611 4611 */
4612 4612 void
4613 4613 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4614 4614 {
4615 4615 int error;
4616 4616 objset_t *os = zd->zd_os;
4617 4617 objset_t *origin;
4618 4618 char snapname[100];
4619 4619 char fullname[100];
4620 4620 char clonename[100];
4621 4621 char tag[100];
4622 4622 char osname[MAXNAMELEN];
4623 4623 nvlist_t *holds;
4624 4624
4625 4625 (void) rw_rdlock(&ztest_name_lock);
4626 4626
4627 4627 dmu_objset_name(os, osname);
4628 4628
4629 4629 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id);
4630 4630 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
4631 4631 (void) snprintf(clonename, sizeof (clonename),
4632 4632 "%s/ch1_%llu", osname, id);
4633 4633 (void) snprintf(tag, sizeof (tag), "tag_%llu", id);
4634 4634
4635 4635 /*
4636 4636 * Clean up from any previous run.
4637 4637 */
4638 4638 error = dsl_destroy_head(clonename);
4639 4639 if (error != ENOENT)
4640 4640 ASSERT0(error);
4641 4641 error = user_release_one(fullname, tag);
4642 4642 if (error != ESRCH && error != ENOENT)
4643 4643 ASSERT0(error);
4644 4644 error = dsl_destroy_snapshot(fullname, B_FALSE);
4645 4645 if (error != ENOENT)
4646 4646 ASSERT0(error);
4647 4647
4648 4648 /*
4649 4649 * Create snapshot, clone it, mark snap for deferred destroy,
4650 4650 * destroy clone, verify snap was also destroyed.
4651 4651 */
4652 4652 error = dmu_objset_snapshot_one(osname, snapname);
4653 4653 if (error) {
4654 4654 if (error == ENOSPC) {
4655 4655 ztest_record_enospc("dmu_objset_snapshot");
4656 4656 goto out;
4657 4657 }
4658 4658 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4659 4659 }
4660 4660
4661 4661 error = dmu_objset_clone(clonename, fullname);
4662 4662 if (error) {
4663 4663 if (error == ENOSPC) {
4664 4664 ztest_record_enospc("dmu_objset_clone");
4665 4665 goto out;
4666 4666 }
4667 4667 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4668 4668 }
4669 4669
4670 4670 error = dsl_destroy_snapshot(fullname, B_TRUE);
4671 4671 if (error) {
4672 4672 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4673 4673 fullname, error);
4674 4674 }
4675 4675
4676 4676 error = dsl_destroy_head(clonename);
4677 4677 if (error)
4678 4678 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error);
4679 4679
4680 4680 error = dmu_objset_hold(fullname, FTAG, &origin);
4681 4681 if (error != ENOENT)
4682 4682 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4683 4683
4684 4684 /*
4685 4685 * Create snapshot, add temporary hold, verify that we can't
4686 4686 * destroy a held snapshot, mark for deferred destroy,
4687 4687 * release hold, verify snapshot was destroyed.
4688 4688 */
4689 4689 error = dmu_objset_snapshot_one(osname, snapname);
4690 4690 if (error) {
4691 4691 if (error == ENOSPC) {
4692 4692 ztest_record_enospc("dmu_objset_snapshot");
4693 4693 goto out;
4694 4694 }
4695 4695 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4696 4696 }
4697 4697
4698 4698 holds = fnvlist_alloc();
4699 4699 fnvlist_add_string(holds, fullname, tag);
4700 4700 error = dsl_dataset_user_hold(holds, 0, NULL);
4701 4701 fnvlist_free(holds);
4702 4702
4703 4703 if (error)
4704 4704 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4705 4705
4706 4706 error = dsl_destroy_snapshot(fullname, B_FALSE);
4707 4707 if (error != EBUSY) {
4708 4708 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
4709 4709 fullname, error);
4710 4710 }
4711 4711
4712 4712 error = dsl_destroy_snapshot(fullname, B_TRUE);
4713 4713 if (error) {
4714 4714 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4715 4715 fullname, error);
4716 4716 }
4717 4717
4718 4718 error = user_release_one(fullname, tag);
4719 4719 if (error)
4720 4720 fatal(0, "user_release_one(%s)", fullname, tag);
4721 4721
4722 4722 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
4723 4723
4724 4724 out:
4725 4725 (void) rw_unlock(&ztest_name_lock);
4726 4726 }
4727 4727
4728 4728 /*
4729 4729 * Inject random faults into the on-disk data.
4730 4730 */
4731 4731 /* ARGSUSED */
4732 4732 void
4733 4733 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4734 4734 {
4735 4735 ztest_shared_t *zs = ztest_shared;
4736 4736 spa_t *spa = ztest_spa;
4737 4737 int fd;
4738 4738 uint64_t offset;
4739 4739 uint64_t leaves;
4740 4740 uint64_t bad = 0x1990c0ffeedecade;
4741 4741 uint64_t top, leaf;
4742 4742 char path0[MAXPATHLEN];
4743 4743 char pathrand[MAXPATHLEN];
4744 4744 size_t fsize;
4745 4745 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4746 4746 int iters = 1000;
4747 4747 int maxfaults;
4748 4748 int mirror_save;
4749 4749 vdev_t *vd0 = NULL;
4750 4750 uint64_t guid0 = 0;
4751 4751 boolean_t islog = B_FALSE;
4752 4752
4753 4753 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4754 4754 maxfaults = MAXFAULTS();
4755 4755 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4756 4756 mirror_save = zs->zs_mirrors;
4757 4757 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4758 4758
4759 4759 ASSERT(leaves >= 1);
4760 4760
4761 4761 /*
4762 4762 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4763 4763 */
4764 4764 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4765 4765
4766 4766 if (ztest_random(2) == 0) {
4767 4767 /*
4768 4768 * Inject errors on a normal data device or slog device.
4769 4769 */
4770 4770 top = ztest_random_vdev_top(spa, B_TRUE);
4771 4771 leaf = ztest_random(leaves) + zs->zs_splits;
4772 4772
4773 4773 /*
4774 4774 * Generate paths to the first leaf in this top-level vdev,
4775 4775 * and to the random leaf we selected. We'll induce transient
4776 4776 * write failures and random online/offline activity on leaf 0,
4777 4777 * and we'll write random garbage to the randomly chosen leaf.
4778 4778 */
4779 4779 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4780 4780 ztest_opts.zo_dir, ztest_opts.zo_pool,
4781 4781 top * leaves + zs->zs_splits);
4782 4782 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4783 4783 ztest_opts.zo_dir, ztest_opts.zo_pool,
4784 4784 top * leaves + leaf);
4785 4785
4786 4786 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4787 4787 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4788 4788 islog = B_TRUE;
4789 4789
4790 4790 if (vd0 != NULL && maxfaults != 1) {
4791 4791 /*
4792 4792 * Make vd0 explicitly claim to be unreadable,
4793 4793 * or unwriteable, or reach behind its back
4794 4794 * and close the underlying fd. We can do this if
4795 4795 * maxfaults == 0 because we'll fail and reexecute,
4796 4796 * and we can do it if maxfaults >= 2 because we'll
4797 4797 * have enough redundancy. If maxfaults == 1, the
4798 4798 * combination of this with injection of random data
4799 4799 * corruption below exceeds the pool's fault tolerance.
4800 4800 */
4801 4801 vdev_file_t *vf = vd0->vdev_tsd;
4802 4802
4803 4803 if (vf != NULL && ztest_random(3) == 0) {
4804 4804 (void) close(vf->vf_vnode->v_fd);
4805 4805 vf->vf_vnode->v_fd = -1;
4806 4806 } else if (ztest_random(2) == 0) {
4807 4807 vd0->vdev_cant_read = B_TRUE;
4808 4808 } else {
4809 4809 vd0->vdev_cant_write = B_TRUE;
4810 4810 }
4811 4811 guid0 = vd0->vdev_guid;
4812 4812 }
4813 4813 } else {
4814 4814 /*
4815 4815 * Inject errors on an l2cache device.
4816 4816 */
4817 4817 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4818 4818
4819 4819 if (sav->sav_count == 0) {
4820 4820 spa_config_exit(spa, SCL_STATE, FTAG);
4821 4821 return;
4822 4822 }
4823 4823 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4824 4824 guid0 = vd0->vdev_guid;
4825 4825 (void) strcpy(path0, vd0->vdev_path);
4826 4826 (void) strcpy(pathrand, vd0->vdev_path);
4827 4827
4828 4828 leaf = 0;
4829 4829 leaves = 1;
4830 4830 maxfaults = INT_MAX; /* no limit on cache devices */
4831 4831 }
4832 4832
4833 4833 spa_config_exit(spa, SCL_STATE, FTAG);
4834 4834
4835 4835 /*
4836 4836 * If we can tolerate two or more faults, or we're dealing
4837 4837 * with a slog, randomly online/offline vd0.
4838 4838 */
4839 4839 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4840 4840 if (ztest_random(10) < 6) {
4841 4841 int flags = (ztest_random(2) == 0 ?
4842 4842 ZFS_OFFLINE_TEMPORARY : 0);
4843 4843
4844 4844 /*
4845 4845 * We have to grab the zs_name_lock as writer to
4846 4846 * prevent a race between offlining a slog and
4847 4847 * destroying a dataset. Offlining the slog will
4848 4848 * grab a reference on the dataset which may cause
4849 4849 * dmu_objset_destroy() to fail with EBUSY thus
4850 4850 * leaving the dataset in an inconsistent state.
4851 4851 */
4852 4852 if (islog)
4853 4853 (void) rw_wrlock(&ztest_name_lock);
4854 4854
4855 4855 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4856 4856
4857 4857 if (islog)
4858 4858 (void) rw_unlock(&ztest_name_lock);
4859 4859 } else {
4860 4860 /*
4861 4861 * Ideally we would like to be able to randomly
4862 4862 * call vdev_[on|off]line without holding locks
4863 4863 * to force unpredictable failures but the side
4864 4864 * effects of vdev_[on|off]line prevent us from
4865 4865 * doing so. We grab the ztest_vdev_lock here to
4866 4866 * prevent a race between injection testing and
4867 4867 * aux_vdev removal.
4868 4868 */
4869 4869 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4870 4870 (void) vdev_online(spa, guid0, 0, NULL);
4871 4871 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4872 4872 }
4873 4873 }
4874 4874
4875 4875 if (maxfaults == 0)
4876 4876 return;
4877 4877
4878 4878 /*
4879 4879 * We have at least single-fault tolerance, so inject data corruption.
4880 4880 */
4881 4881 fd = open(pathrand, O_RDWR);
4882 4882
4883 4883 if (fd == -1) /* we hit a gap in the device namespace */
4884 4884 return;
4885 4885
4886 4886 fsize = lseek(fd, 0, SEEK_END);
4887 4887
4888 4888 while (--iters != 0) {
4889 4889 offset = ztest_random(fsize / (leaves << bshift)) *
4890 4890 (leaves << bshift) + (leaf << bshift) +
4891 4891 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4892 4892
4893 4893 if (offset >= fsize)
4894 4894 continue;
4895 4895
4896 4896 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4897 4897 if (mirror_save != zs->zs_mirrors) {
4898 4898 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4899 4899 (void) close(fd);
4900 4900 return;
4901 4901 }
4902 4902
4903 4903 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4904 4904 fatal(1, "can't inject bad word at 0x%llx in %s",
4905 4905 offset, pathrand);
4906 4906
4907 4907 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4908 4908
4909 4909 if (ztest_opts.zo_verbose >= 7)
4910 4910 (void) printf("injected bad word into %s,"
4911 4911 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4912 4912 }
4913 4913
4914 4914 (void) close(fd);
4915 4915 }
4916 4916
4917 4917 /*
4918 4918 * Verify that DDT repair works as expected.
4919 4919 */
4920 4920 void
4921 4921 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4922 4922 {
4923 4923 ztest_shared_t *zs = ztest_shared;
4924 4924 spa_t *spa = ztest_spa;
4925 4925 objset_t *os = zd->zd_os;
4926 4926 ztest_od_t od[1];
4927 4927 uint64_t object, blocksize, txg, pattern, psize;
4928 4928 enum zio_checksum checksum = spa_dedup_checksum(spa);
4929 4929 dmu_buf_t *db;
4930 4930 dmu_tx_t *tx;
4931 4931 void *buf;
4932 4932 blkptr_t blk;
4933 4933 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4934 4934
4935 4935 blocksize = ztest_random_blocksize();
4936 4936 blocksize = MIN(blocksize, 2048); /* because we write so many */
4937 4937
4938 4938 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4939 4939
4940 4940 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4941 4941 return;
4942 4942
4943 4943 /*
4944 4944 * Take the name lock as writer to prevent anyone else from changing
4945 4945 * the pool and dataset properies we need to maintain during this test.
4946 4946 */
4947 4947 (void) rw_wrlock(&ztest_name_lock);
4948 4948
4949 4949 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4950 4950 B_FALSE) != 0 ||
4951 4951 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4952 4952 B_FALSE) != 0) {
4953 4953 (void) rw_unlock(&ztest_name_lock);
4954 4954 return;
4955 4955 }
4956 4956
4957 4957 object = od[0].od_object;
4958 4958 blocksize = od[0].od_blocksize;
4959 4959 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
4960 4960
4961 4961 ASSERT(object != 0);
4962 4962
4963 4963 tx = dmu_tx_create(os);
4964 4964 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4965 4965 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4966 4966 if (txg == 0) {
4967 4967 (void) rw_unlock(&ztest_name_lock);
4968 4968 return;
4969 4969 }
4970 4970
4971 4971 /*
4972 4972 * Write all the copies of our block.
4973 4973 */
4974 4974 for (int i = 0; i < copies; i++) {
4975 4975 uint64_t offset = i * blocksize;
4976 4976 int error = dmu_buf_hold(os, object, offset, FTAG, &db,
4977 4977 DMU_READ_NO_PREFETCH);
4978 4978 if (error != 0) {
4979 4979 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u",
4980 4980 os, (long long)object, (long long) offset, error);
4981 4981 }
4982 4982 ASSERT(db->db_offset == offset);
4983 4983 ASSERT(db->db_size == blocksize);
4984 4984 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4985 4985 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4986 4986 dmu_buf_will_fill(db, tx);
4987 4987 ztest_pattern_set(db->db_data, db->db_size, pattern);
4988 4988 dmu_buf_rele(db, FTAG);
4989 4989 }
4990 4990
4991 4991 dmu_tx_commit(tx);
4992 4992 txg_wait_synced(spa_get_dsl(spa), txg);
4993 4993
4994 4994 /*
4995 4995 * Find out what block we got.
4996 4996 */
4997 4997 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db,
4998 4998 DMU_READ_NO_PREFETCH));
4999 4999 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
5000 5000 dmu_buf_rele(db, FTAG);
5001 5001
5002 5002 /*
5003 5003 * Damage the block. Dedup-ditto will save us when we read it later.
5004 5004 */
5005 5005 psize = BP_GET_PSIZE(&blk);
5006 5006 buf = zio_buf_alloc(psize);
5007 5007 ztest_pattern_set(buf, psize, ~pattern);
5008 5008
5009 5009 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
5010 5010 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
5011 5011 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
5012 5012
5013 5013 zio_buf_free(buf, psize);
5014 5014
5015 5015 (void) rw_unlock(&ztest_name_lock);
5016 5016 }
5017 5017
5018 5018 /*
5019 5019 * Scrub the pool.
5020 5020 */
5021 5021 /* ARGSUSED */
5022 5022 void
5023 5023 ztest_scrub(ztest_ds_t *zd, uint64_t id)
5024 5024 {
5025 5025 spa_t *spa = ztest_spa;
5026 5026
5027 5027 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5028 5028 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
5029 5029 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5030 5030 }
5031 5031
5032 5032 /*
5033 5033 * Change the guid for the pool.
5034 5034 */
5035 5035 /* ARGSUSED */
5036 5036 void
5037 5037 ztest_reguid(ztest_ds_t *zd, uint64_t id)
5038 5038 {
5039 5039 spa_t *spa = ztest_spa;
5040 5040 uint64_t orig, load;
5041 5041 int error;
5042 5042
5043 5043 orig = spa_guid(spa);
5044 5044 load = spa_load_guid(spa);
5045 5045
5046 5046 (void) rw_wrlock(&ztest_name_lock);
5047 5047 error = spa_change_guid(spa);
5048 5048 (void) rw_unlock(&ztest_name_lock);
5049 5049
5050 5050 if (error != 0)
5051 5051 return;
5052 5052
5053 5053 if (ztest_opts.zo_verbose >= 4) {
5054 5054 (void) printf("Changed guid old %llu -> %llu\n",
5055 5055 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
5056 5056 }
5057 5057
5058 5058 VERIFY3U(orig, !=, spa_guid(spa));
5059 5059 VERIFY3U(load, ==, spa_load_guid(spa));
5060 5060 }
5061 5061
5062 5062 /*
5063 5063 * Rename the pool to a different name and then rename it back.
5064 5064 */
5065 5065 /* ARGSUSED */
5066 5066 void
5067 5067 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
5068 5068 {
5069 5069 char *oldname, *newname;
5070 5070 spa_t *spa;
5071 5071
5072 5072 (void) rw_wrlock(&ztest_name_lock);
5073 5073
5074 5074 oldname = ztest_opts.zo_pool;
5075 5075 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
5076 5076 (void) strcpy(newname, oldname);
5077 5077 (void) strcat(newname, "_tmp");
5078 5078
5079 5079 /*
5080 5080 * Do the rename
5081 5081 */
5082 5082 VERIFY3U(0, ==, spa_rename(oldname, newname));
5083 5083
5084 5084 /*
5085 5085 * Try to open it under the old name, which shouldn't exist
5086 5086 */
5087 5087 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5088 5088
5089 5089 /*
5090 5090 * Open it under the new name and make sure it's still the same spa_t.
5091 5091 */
5092 5092 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5093 5093
5094 5094 ASSERT(spa == ztest_spa);
5095 5095 spa_close(spa, FTAG);
5096 5096
5097 5097 /*
5098 5098 * Rename it back to the original
5099 5099 */
5100 5100 VERIFY3U(0, ==, spa_rename(newname, oldname));
5101 5101
5102 5102 /*
5103 5103 * Make sure it can still be opened
5104 5104 */
5105 5105 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5106 5106
5107 5107 ASSERT(spa == ztest_spa);
5108 5108 spa_close(spa, FTAG);
5109 5109
5110 5110 umem_free(newname, strlen(newname) + 1);
5111 5111
5112 5112 (void) rw_unlock(&ztest_name_lock);
5113 5113 }
5114 5114
5115 5115 /*
5116 5116 * Verify pool integrity by running zdb.
5117 5117 */
5118 5118 static void
5119 5119 ztest_run_zdb(char *pool)
5120 5120 {
5121 5121 int status;
5122 5122 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
5123 5123 char zbuf[1024];
5124 5124 FILE *fp;
5125 5125
5126 5126 /* LINTED */
5127 5127 (void) sprintf(zdb,
5128 5128 "zdb -bcc%s%s -U %s %s",
5129 5129 ztest_opts.zo_verbose >= 3 ? "s" : "",
5130 5130 ztest_opts.zo_verbose >= 4 ? "v" : "",
5131 5131 spa_config_path,
5132 5132 pool);
5133 5133
5134 5134 if (ztest_opts.zo_verbose >= 5)
5135 5135 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
5136 5136
5137 5137 fp = popen(zdb, "r");
5138 5138
5139 5139 while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
5140 5140 if (ztest_opts.zo_verbose >= 3)
5141 5141 (void) printf("%s", zbuf);
5142 5142
5143 5143 status = pclose(fp);
5144 5144
5145 5145 if (status == 0)
5146 5146 return;
5147 5147
5148 5148 ztest_dump_core = 0;
5149 5149 if (WIFEXITED(status))
5150 5150 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
5151 5151 else
5152 5152 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
5153 5153 }
5154 5154
5155 5155 static void
5156 5156 ztest_walk_pool_directory(char *header)
5157 5157 {
5158 5158 spa_t *spa = NULL;
5159 5159
5160 5160 if (ztest_opts.zo_verbose >= 6)
5161 5161 (void) printf("%s\n", header);
5162 5162
5163 5163 mutex_enter(&spa_namespace_lock);
5164 5164 while ((spa = spa_next(spa)) != NULL)
5165 5165 if (ztest_opts.zo_verbose >= 6)
5166 5166 (void) printf("\t%s\n", spa_name(spa));
5167 5167 mutex_exit(&spa_namespace_lock);
5168 5168 }
5169 5169
5170 5170 static void
5171 5171 ztest_spa_import_export(char *oldname, char *newname)
5172 5172 {
5173 5173 nvlist_t *config, *newconfig;
5174 5174 uint64_t pool_guid;
5175 5175 spa_t *spa;
5176 5176 int error;
5177 5177
5178 5178 if (ztest_opts.zo_verbose >= 4) {
5179 5179 (void) printf("import/export: old = %s, new = %s\n",
5180 5180 oldname, newname);
5181 5181 }
5182 5182
5183 5183 /*
5184 5184 * Clean up from previous runs.
5185 5185 */
5186 5186 (void) spa_destroy(newname);
5187 5187
5188 5188 /*
5189 5189 * Get the pool's configuration and guid.
5190 5190 */
5191 5191 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5192 5192
5193 5193 /*
5194 5194 * Kick off a scrub to tickle scrub/export races.
5195 5195 */
5196 5196 if (ztest_random(2) == 0)
5197 5197 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5198 5198
5199 5199 pool_guid = spa_guid(spa);
5200 5200 spa_close(spa, FTAG);
5201 5201
5202 5202 ztest_walk_pool_directory("pools before export");
5203 5203
5204 5204 /*
5205 5205 * Export it.
5206 5206 */
5207 5207 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5208 5208
5209 5209 ztest_walk_pool_directory("pools after export");
5210 5210
5211 5211 /*
5212 5212 * Try to import it.
5213 5213 */
5214 5214 newconfig = spa_tryimport(config);
5215 5215 ASSERT(newconfig != NULL);
5216 5216 nvlist_free(newconfig);
5217 5217
5218 5218 /*
5219 5219 * Import it under the new name.
5220 5220 */
5221 5221 error = spa_import(newname, config, NULL, 0);
5222 5222 if (error != 0) {
5223 5223 dump_nvlist(config, 0);
5224 5224 fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
5225 5225 oldname, newname, error);
5226 5226 }
5227 5227
5228 5228 ztest_walk_pool_directory("pools after import");
5229 5229
5230 5230 /*
5231 5231 * Try to import it again -- should fail with EEXIST.
5232 5232 */
5233 5233 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5234 5234
5235 5235 /*
5236 5236 * Try to import it under a different name -- should fail with EEXIST.
5237 5237 */
5238 5238 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5239 5239
5240 5240 /*
5241 5241 * Verify that the pool is no longer visible under the old name.
5242 5242 */
5243 5243 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5244 5244
5245 5245 /*
5246 5246 * Verify that we can open and close the pool using the new name.
5247 5247 */
5248 5248 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5249 5249 ASSERT(pool_guid == spa_guid(spa));
5250 5250 spa_close(spa, FTAG);
5251 5251
5252 5252 nvlist_free(config);
5253 5253 }
5254 5254
5255 5255 static void
5256 5256 ztest_resume(spa_t *spa)
5257 5257 {
5258 5258 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5259 5259 (void) printf("resuming from suspended state\n");
5260 5260 spa_vdev_state_enter(spa, SCL_NONE);
5261 5261 vdev_clear(spa, NULL);
5262 5262 (void) spa_vdev_state_exit(spa, NULL, 0);
5263 5263 (void) zio_resume(spa);
5264 5264 }
5265 5265
5266 5266 static void *
5267 5267 ztest_resume_thread(void *arg)
5268 5268 {
5269 5269 spa_t *spa = arg;
5270 5270
5271 5271 while (!ztest_exiting) {
5272 5272 if (spa_suspended(spa))
5273 5273 ztest_resume(spa);
5274 5274 (void) poll(NULL, 0, 100);
5275 5275 }
5276 5276 return (NULL);
5277 5277 }
5278 5278
5279 5279 static void *
5280 5280 ztest_deadman_thread(void *arg)
5281 5281 {
5282 5282 ztest_shared_t *zs = arg;
5283 5283 int grace = 300;
5284 5284 hrtime_t delta;
5285 5285
5286 5286 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
5287 5287
5288 5288 (void) poll(NULL, 0, (int)(1000 * delta));
5289 5289
5290 5290 fatal(0, "failed to complete within %d seconds of deadline", grace);
5291 5291
5292 5292 return (NULL);
5293 5293 }
5294 5294
5295 5295 static void
5296 5296 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5297 5297 {
5298 5298 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5299 5299 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5300 5300 hrtime_t functime = gethrtime();
5301 5301
5302 5302 for (int i = 0; i < zi->zi_iters; i++)
5303 5303 zi->zi_func(zd, id);
5304 5304
5305 5305 functime = gethrtime() - functime;
5306 5306
5307 5307 atomic_add_64(&zc->zc_count, 1);
5308 5308 atomic_add_64(&zc->zc_time, functime);
5309 5309
5310 5310 if (ztest_opts.zo_verbose >= 4) {
5311 5311 Dl_info dli;
5312 5312 (void) dladdr((void *)zi->zi_func, &dli);
5313 5313 (void) printf("%6.2f sec in %s\n",
5314 5314 (double)functime / NANOSEC, dli.dli_sname);
5315 5315 }
5316 5316 }
5317 5317
5318 5318 static void *
5319 5319 ztest_thread(void *arg)
5320 5320 {
5321 5321 int rand;
5322 5322 uint64_t id = (uintptr_t)arg;
5323 5323 ztest_shared_t *zs = ztest_shared;
5324 5324 uint64_t call_next;
5325 5325 hrtime_t now;
5326 5326 ztest_info_t *zi;
5327 5327 ztest_shared_callstate_t *zc;
5328 5328
5329 5329 while ((now = gethrtime()) < zs->zs_thread_stop) {
5330 5330 /*
5331 5331 * See if it's time to force a crash.
5332 5332 */
5333 5333 if (now > zs->zs_thread_kill)
5334 5334 ztest_kill(zs);
5335 5335
5336 5336 /*
5337 5337 * If we're getting ENOSPC with some regularity, stop.
5338 5338 */
5339 5339 if (zs->zs_enospc_count > 10)
5340 5340 break;
5341 5341
5342 5342 /*
5343 5343 * Pick a random function to execute.
5344 5344 */
5345 5345 rand = ztest_random(ZTEST_FUNCS);
5346 5346 zi = &ztest_info[rand];
5347 5347 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5348 5348 call_next = zc->zc_next;
5349 5349
5350 5350 if (now >= call_next &&
5351 5351 atomic_cas_64(&zc->zc_next, call_next, call_next +
5352 5352 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5353 5353 ztest_execute(rand, zi, id);
5354 5354 }
5355 5355 }
5356 5356
5357 5357 return (NULL);
5358 5358 }
5359 5359
5360 5360 static void
5361 5361 ztest_dataset_name(char *dsname, char *pool, int d)
5362 5362 {
5363 5363 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5364 5364 }
5365 5365
5366 5366 static void
5367 5367 ztest_dataset_destroy(int d)
5368 5368 {
5369 5369 char name[MAXNAMELEN];
5370 5370
5371 5371 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5372 5372
5373 5373 if (ztest_opts.zo_verbose >= 3)
5374 5374 (void) printf("Destroying %s to free up space\n", name);
5375 5375
5376 5376 /*
5377 5377 * Cleanup any non-standard clones and snapshots. In general,
5378 5378 * ztest thread t operates on dataset (t % zopt_datasets),
5379 5379 * so there may be more than one thing to clean up.
5380 5380 */
5381 5381 for (int t = d; t < ztest_opts.zo_threads;
5382 5382 t += ztest_opts.zo_datasets) {
5383 5383 ztest_dsl_dataset_cleanup(name, t);
5384 5384 }
5385 5385
5386 5386 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5387 5387 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5388 5388 }
5389 5389
5390 5390 static void
5391 5391 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5392 5392 {
5393 5393 uint64_t usedobjs, dirobjs, scratch;
5394 5394
5395 5395 /*
5396 5396 * ZTEST_DIROBJ is the object directory for the entire dataset.
5397 5397 * Therefore, the number of objects in use should equal the
5398 5398 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5399 5399 * If not, we have an object leak.
5400 5400 *
5401 5401 * Note that we can only check this in ztest_dataset_open(),
5402 5402 * when the open-context and syncing-context values agree.
5403 5403 * That's because zap_count() returns the open-context value,
5404 5404 * while dmu_objset_space() returns the rootbp fill count.
5405 5405 */
5406 5406 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5407 5407 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5408 5408 ASSERT3U(dirobjs + 1, ==, usedobjs);
5409 5409 }
5410 5410
5411 5411 static int
5412 5412 ztest_dataset_open(int d)
5413 5413 {
5414 5414 ztest_ds_t *zd = &ztest_ds[d];
5415 5415 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5416 5416 objset_t *os;
5417 5417 zilog_t *zilog;
5418 5418 char name[MAXNAMELEN];
5419 5419 int error;
5420 5420
5421 5421 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5422 5422
5423 5423 (void) rw_wrlock(&ztest_name_lock);
5424 5424
5425 5425 error = ztest_dataset_create(name);
5426 5426 if (error == ENOSPC) {
5427 5427 (void) rw_unlock(&ztest_name_lock);
5428 5428 ztest_record_enospc(FTAG);
5429 5429 return (error);
5430 5430 }
5431 5431 ASSERT(error == 0 || error == EEXIST);
5432 5432
5433 5433 VERIFY0(dmu_objset_hold(name, zd, &os));
5434 5434 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5435 5435 (void) rw_unlock(&ztest_name_lock);
5436 5436
5437 5437 zilog = zd->zd_zilog;
5438 5438
5439 5439 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5440 5440 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5441 5441 fatal(0, "missing log records: claimed %llu < committed %llu",
5442 5442 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5443 5443
5444 5444 ztest_dataset_dirobj_verify(zd);
5445 5445
5446 5446 zil_replay(os, zd, ztest_replay_vector);
5447 5447
5448 5448 ztest_dataset_dirobj_verify(zd);
5449 5449
5450 5450 if (ztest_opts.zo_verbose >= 6)
5451 5451 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5452 5452 zd->zd_name,
5453 5453 (u_longlong_t)zilog->zl_parse_blk_count,
5454 5454 (u_longlong_t)zilog->zl_parse_lr_count,
5455 5455 (u_longlong_t)zilog->zl_replaying_seq);
5456 5456
5457 5457 zilog = zil_open(os, ztest_get_data);
5458 5458
5459 5459 if (zilog->zl_replaying_seq != 0 &&
5460 5460 zilog->zl_replaying_seq < committed_seq)
5461 5461 fatal(0, "missing log records: replayed %llu < committed %llu",
5462 5462 zilog->zl_replaying_seq, committed_seq);
5463 5463
5464 5464 return (0);
5465 5465 }
5466 5466
5467 5467 static void
5468 5468 ztest_dataset_close(int d)
5469 5469 {
5470 5470 ztest_ds_t *zd = &ztest_ds[d];
5471 5471
5472 5472 zil_close(zd->zd_zilog);
5473 5473 dmu_objset_disown(zd->zd_os, zd);
5474 5474
5475 5475 ztest_zd_fini(zd);
5476 5476 }
5477 5477
5478 5478 /*
5479 5479 * Kick off threads to run tests on all datasets in parallel.
5480 5480 */
5481 5481 static void
5482 5482 ztest_run(ztest_shared_t *zs)
5483 5483 {
5484 5484 thread_t *tid;
5485 5485 spa_t *spa;
5486 5486 objset_t *os;
5487 5487 thread_t resume_tid;
5488 5488 int error;
5489 5489
5490 5490 ztest_exiting = B_FALSE;
5491 5491
5492 5492 /*
5493 5493 * Initialize parent/child shared state.
5494 5494 */
5495 5495 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5496 5496 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5497 5497
5498 5498 zs->zs_thread_start = gethrtime();
5499 5499 zs->zs_thread_stop =
5500 5500 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5501 5501 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5502 5502 zs->zs_thread_kill = zs->zs_thread_stop;
5503 5503 if (ztest_random(100) < ztest_opts.zo_killrate) {
5504 5504 zs->zs_thread_kill -=
5505 5505 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5506 5506 }
5507 5507
5508 5508 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5509 5509
5510 5510 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5511 5511 offsetof(ztest_cb_data_t, zcd_node));
5512 5512
5513 5513 /*
5514 5514 * Open our pool.
5515 5515 */
5516 5516 kernel_init(FREAD | FWRITE);
5517 5517 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5518 5518 spa->spa_debug = B_TRUE;
5519 5519 ztest_spa = spa;
5520 5520
5521 5521 VERIFY0(dmu_objset_own(ztest_opts.zo_pool,
5522 5522 DMU_OST_ANY, B_TRUE, FTAG, &os));
5523 5523 zs->zs_guid = dmu_objset_fsid_guid(os);
5524 5524 dmu_objset_disown(os, FTAG);
5525 5525
5526 5526 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5527 5527
5528 5528 /*
5529 5529 * We don't expect the pool to suspend unless maxfaults == 0,
5530 5530 * in which case ztest_fault_inject() temporarily takes away
5531 5531 * the only valid replica.
5532 5532 */
5533 5533 if (MAXFAULTS() == 0)
5534 5534 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5535 5535 else
5536 5536 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5537 5537
5538 5538 /*
5539 5539 * Create a thread to periodically resume suspended I/O.
5540 5540 */
5541 5541 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5542 5542 &resume_tid) == 0);
5543 5543
5544 5544 /*
5545 5545 * Create a deadman thread to abort() if we hang.
5546 5546 */
5547 5547 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
5548 5548 NULL) == 0);
5549 5549
5550 5550 /*
5551 5551 * Verify that we can safely inquire about about any object,
5552 5552 * whether it's allocated or not. To make it interesting,
5553 5553 * we probe a 5-wide window around each power of two.
5554 5554 * This hits all edge cases, including zero and the max.
5555 5555 */
5556 5556 for (int t = 0; t < 64; t++) {
5557 5557 for (int d = -5; d <= 5; d++) {
5558 5558 error = dmu_object_info(spa->spa_meta_objset,
5559 5559 (1ULL << t) + d, NULL);
5560 5560 ASSERT(error == 0 || error == ENOENT ||
5561 5561 error == EINVAL);
5562 5562 }
5563 5563 }
5564 5564
5565 5565 /*
5566 5566 * If we got any ENOSPC errors on the previous run, destroy something.
5567 5567 */
5568 5568 if (zs->zs_enospc_count != 0) {
5569 5569 int d = ztest_random(ztest_opts.zo_datasets);
5570 5570 ztest_dataset_destroy(d);
5571 5571 }
5572 5572 zs->zs_enospc_count = 0;
5573 5573
5574 5574 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
5575 5575 UMEM_NOFAIL);
5576 5576
5577 5577 if (ztest_opts.zo_verbose >= 4)
5578 5578 (void) printf("starting main threads...\n");
5579 5579
5580 5580 /*
5581 5581 * Kick off all the tests that run in parallel.
5582 5582 */
5583 5583 for (int t = 0; t < ztest_opts.zo_threads; t++) {
5584 5584 if (t < ztest_opts.zo_datasets &&
5585 5585 ztest_dataset_open(t) != 0)
5586 5586 return;
5587 5587 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
5588 5588 THR_BOUND, &tid[t]) == 0);
5589 5589 }
5590 5590
5591 5591 /*
5592 5592 * Wait for all of the tests to complete. We go in reverse order
5593 5593 * so we don't close datasets while threads are still using them.
5594 5594 */
5595 5595 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5596 5596 VERIFY(thr_join(tid[t], NULL, NULL) == 0);
5597 5597 if (t < ztest_opts.zo_datasets)
5598 5598 ztest_dataset_close(t);
5599 5599 }
5600 5600
5601 5601 txg_wait_synced(spa_get_dsl(spa), 0);
5602 5602
5603 5603 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5604 5604 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5605 5605
5606 5606 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
5607 5607
5608 5608 /* Kill the resume thread */
5609 5609 ztest_exiting = B_TRUE;
5610 5610 VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
5611 5611 ztest_resume(spa);
5612 5612
5613 5613 /*
5614 5614 * Right before closing the pool, kick off a bunch of async I/O;
5615 5615 * spa_close() should wait for it to complete.
5616 5616 */
5617 5617 for (uint64_t object = 1; object < 50; object++)
5618 5618 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5619 5619
5620 5620 spa_close(spa, FTAG);
5621 5621
5622 5622 /*
5623 5623 * Verify that we can loop over all pools.
5624 5624 */
5625 5625 mutex_enter(&spa_namespace_lock);
5626 5626 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5627 5627 if (ztest_opts.zo_verbose > 3)
5628 5628 (void) printf("spa_next: found %s\n", spa_name(spa));
5629 5629 mutex_exit(&spa_namespace_lock);
5630 5630
5631 5631 /*
5632 5632 * Verify that we can export the pool and reimport it under a
5633 5633 * different name.
5634 5634 */
5635 5635 if (ztest_random(2) == 0) {
5636 5636 char name[MAXNAMELEN];
5637 5637 (void) snprintf(name, MAXNAMELEN, "%s_import",
5638 5638 ztest_opts.zo_pool);
5639 5639 ztest_spa_import_export(ztest_opts.zo_pool, name);
5640 5640 ztest_spa_import_export(name, ztest_opts.zo_pool);
5641 5641 }
5642 5642
5643 5643 kernel_fini();
5644 5644
5645 5645 list_destroy(&zcl.zcl_callbacks);
5646 5646
5647 5647 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5648 5648
5649 5649 (void) rwlock_destroy(&ztest_name_lock);
5650 5650 (void) _mutex_destroy(&ztest_vdev_lock);
5651 5651 }
5652 5652
5653 5653 static void
5654 5654 ztest_freeze(void)
5655 5655 {
5656 5656 ztest_ds_t *zd = &ztest_ds[0];
5657 5657 spa_t *spa;
5658 5658 int numloops = 0;
5659 5659
5660 5660 if (ztest_opts.zo_verbose >= 3)
5661 5661 (void) printf("testing spa_freeze()...\n");
5662 5662
5663 5663 kernel_init(FREAD | FWRITE);
5664 5664 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5665 5665 VERIFY3U(0, ==, ztest_dataset_open(0));
5666 5666 spa->spa_debug = B_TRUE;
5667 5667 ztest_spa = spa;
5668 5668
5669 5669 /*
5670 5670 * Force the first log block to be transactionally allocated.
5671 5671 * We have to do this before we freeze the pool -- otherwise
5672 5672 * the log chain won't be anchored.
5673 5673 */
5674 5674 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5675 5675 ztest_dmu_object_alloc_free(zd, 0);
5676 5676 zil_commit(zd->zd_zilog, 0);
5677 5677 }
5678 5678
5679 5679 txg_wait_synced(spa_get_dsl(spa), 0);
5680 5680
5681 5681 /*
5682 5682 * Freeze the pool. This stops spa_sync() from doing anything,
5683 5683 * so that the only way to record changes from now on is the ZIL.
5684 5684 */
5685 5685 spa_freeze(spa);
5686 5686
5687 5687 /*
5688 5688 * Run tests that generate log records but don't alter the pool config
5689 5689 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5690 5690 * We do a txg_wait_synced() after each iteration to force the txg
5691 5691 * to increase well beyond the last synced value in the uberblock.
5692 5692 * The ZIL should be OK with that.
5693 5693 */
5694 5694 while (ztest_random(10) != 0 &&
5695 5695 numloops++ < ztest_opts.zo_maxloops) {
5696 5696 ztest_dmu_write_parallel(zd, 0);
5697 5697 ztest_dmu_object_alloc_free(zd, 0);
5698 5698 txg_wait_synced(spa_get_dsl(spa), 0);
5699 5699 }
5700 5700
5701 5701 /*
5702 5702 * Commit all of the changes we just generated.
5703 5703 */
5704 5704 zil_commit(zd->zd_zilog, 0);
5705 5705 txg_wait_synced(spa_get_dsl(spa), 0);
5706 5706
5707 5707 /*
5708 5708 * Close our dataset and close the pool.
5709 5709 */
5710 5710 ztest_dataset_close(0);
5711 5711 spa_close(spa, FTAG);
5712 5712 kernel_fini();
5713 5713
5714 5714 /*
5715 5715 * Open and close the pool and dataset to induce log replay.
5716 5716 */
5717 5717 kernel_init(FREAD | FWRITE);
5718 5718 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5719 5719 ASSERT(spa_freeze_txg(spa) == UINT64_MAX);
5720 5720 VERIFY3U(0, ==, ztest_dataset_open(0));
5721 5721 ztest_dataset_close(0);
5722 5722
5723 5723 spa->spa_debug = B_TRUE;
5724 5724 ztest_spa = spa;
5725 5725 txg_wait_synced(spa_get_dsl(spa), 0);
5726 5726 ztest_reguid(NULL, 0);
5727 5727
5728 5728 spa_close(spa, FTAG);
5729 5729 kernel_fini();
5730 5730 }
5731 5731
5732 5732 void
5733 5733 print_time(hrtime_t t, char *timebuf)
5734 5734 {
5735 5735 hrtime_t s = t / NANOSEC;
5736 5736 hrtime_t m = s / 60;
5737 5737 hrtime_t h = m / 60;
5738 5738 hrtime_t d = h / 24;
5739 5739
5740 5740 s -= m * 60;
5741 5741 m -= h * 60;
5742 5742 h -= d * 24;
5743 5743
5744 5744 timebuf[0] = '\0';
5745 5745
5746 5746 if (d)
5747 5747 (void) sprintf(timebuf,
5748 5748 "%llud%02lluh%02llum%02llus", d, h, m, s);
5749 5749 else if (h)
5750 5750 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5751 5751 else if (m)
5752 5752 (void) sprintf(timebuf, "%llum%02llus", m, s);
5753 5753 else
5754 5754 (void) sprintf(timebuf, "%llus", s);
5755 5755 }
5756 5756
5757 5757 static nvlist_t *
5758 5758 make_random_props()
5759 5759 {
5760 5760 nvlist_t *props;
5761 5761
5762 5762 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5763 5763 if (ztest_random(2) == 0)
5764 5764 return (props);
5765 5765 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5766 5766
5767 5767 return (props);
5768 5768 }
5769 5769
5770 5770 /*
5771 5771 * Create a storage pool with the given name and initial vdev size.
5772 5772 * Then test spa_freeze() functionality.
5773 5773 */
5774 5774 static void
5775 5775 ztest_init(ztest_shared_t *zs)
5776 5776 {
5777 5777 spa_t *spa;
5778 5778 nvlist_t *nvroot, *props;
5779 5779
5780 5780 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5781 5781 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5782 5782
5783 5783 kernel_init(FREAD | FWRITE);
5784 5784
5785 5785 /*
5786 5786 * Create the storage pool.
5787 5787 */
5788 5788 (void) spa_destroy(ztest_opts.zo_pool);
5789 5789 ztest_shared->zs_vdev_next_leaf = 0;
5790 5790 zs->zs_splits = 0;
5791 5791 zs->zs_mirrors = ztest_opts.zo_mirrors;
5792 5792 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
5793 5793 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5794 5794 props = make_random_props();
5795 5795 for (int i = 0; i < SPA_FEATURES; i++) {
5796 5796 char buf[1024];
5797 5797 (void) snprintf(buf, sizeof (buf), "feature@%s",
5798 5798 spa_feature_table[i].fi_uname);
5799 5799 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
5800 5800 }
5801 5801 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL));
5802 5802 nvlist_free(nvroot);
5803 5803
5804 5804 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5805 5805 zs->zs_metaslab_sz =
5806 5806 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5807 5807
5808 5808 spa_close(spa, FTAG);
5809 5809
5810 5810 kernel_fini();
5811 5811
5812 5812 ztest_run_zdb(ztest_opts.zo_pool);
5813 5813
5814 5814 ztest_freeze();
5815 5815
5816 5816 ztest_run_zdb(ztest_opts.zo_pool);
5817 5817
5818 5818 (void) rwlock_destroy(&ztest_name_lock);
5819 5819 (void) _mutex_destroy(&ztest_vdev_lock);
5820 5820 }
5821 5821
5822 5822 static void
5823 5823 setup_data_fd(void)
5824 5824 {
5825 5825 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
5826 5826
5827 5827 ztest_fd_data = mkstemp(ztest_name_data);
5828 5828 ASSERT3S(ztest_fd_data, >=, 0);
5829 5829 (void) unlink(ztest_name_data);
5830 5830 }
5831 5831
5832 5832
5833 5833 static int
5834 5834 shared_data_size(ztest_shared_hdr_t *hdr)
5835 5835 {
5836 5836 int size;
5837 5837
5838 5838 size = hdr->zh_hdr_size;
5839 5839 size += hdr->zh_opts_size;
5840 5840 size += hdr->zh_size;
5841 5841 size += hdr->zh_stats_size * hdr->zh_stats_count;
5842 5842 size += hdr->zh_ds_size * hdr->zh_ds_count;
5843 5843
5844 5844 return (size);
5845 5845 }
5846 5846
5847 5847 static void
5848 5848 setup_hdr(void)
5849 5849 {
5850 5850 int size;
5851 5851 ztest_shared_hdr_t *hdr;
5852 5852
5853 5853 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5854 5854 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5855 5855 ASSERT(hdr != MAP_FAILED);
5856 5856
5857 5857 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
5858 5858
5859 5859 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5860 5860 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5861 5861 hdr->zh_size = sizeof (ztest_shared_t);
5862 5862 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5863 5863 hdr->zh_stats_count = ZTEST_FUNCS;
5864 5864 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5865 5865 hdr->zh_ds_count = ztest_opts.zo_datasets;
5866 5866
5867 5867 size = shared_data_size(hdr);
5868 5868 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
5869 5869
5870 5870 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5871 5871 }
5872 5872
5873 5873 static void
5874 5874 setup_data(void)
5875 5875 {
5876 5876 int size, offset;
5877 5877 ztest_shared_hdr_t *hdr;
5878 5878 uint8_t *buf;
5879 5879
5880 5880 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5881 5881 PROT_READ, MAP_SHARED, ztest_fd_data, 0);
5882 5882 ASSERT(hdr != MAP_FAILED);
5883 5883
5884 5884 size = shared_data_size(hdr);
5885 5885
5886 5886 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5887 5887 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5888 5888 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5889 5889 ASSERT(hdr != MAP_FAILED);
5890 5890 buf = (uint8_t *)hdr;
5891 5891
5892 5892 offset = hdr->zh_hdr_size;
5893 5893 ztest_shared_opts = (void *)&buf[offset];
5894 5894 offset += hdr->zh_opts_size;
5895 5895 ztest_shared = (void *)&buf[offset];
5896 5896 offset += hdr->zh_size;
5897 5897 ztest_shared_callstate = (void *)&buf[offset];
5898 5898 offset += hdr->zh_stats_size * hdr->zh_stats_count;
5899 5899 ztest_shared_ds = (void *)&buf[offset];
5900 5900 }
5901 5901
5902 5902 static boolean_t
5903 5903 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
5904 5904 {
5905 5905 pid_t pid;
5906 5906 int status;
5907 5907 char *cmdbuf = NULL;
5908 5908
5909 5909 pid = fork();
5910 5910
5911 5911 if (cmd == NULL) {
5912 5912 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
5913 5913 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
5914 5914 cmd = cmdbuf;
5915 5915 }
5916 5916
5917 5917 if (pid == -1)
5918 5918 fatal(1, "fork failed");
5919 5919
5920 5920 if (pid == 0) { /* child */
5921 5921 char *emptyargv[2] = { cmd, NULL };
5922 5922 char fd_data_str[12];
5923 5923
5924 5924 struct rlimit rl = { 1024, 1024 };
5925 5925 (void) setrlimit(RLIMIT_NOFILE, &rl);
5926 5926
5927 5927 (void) close(ztest_fd_rand);
5928 5928 VERIFY3U(11, >=,
5929 5929 snprintf(fd_data_str, 12, "%d", ztest_fd_data));
5930 5930 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
5931 5931
5932 5932 (void) enable_extended_FILE_stdio(-1, -1);
5933 5933 if (libpath != NULL)
5934 5934 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
5935 5935 (void) execv(cmd, emptyargv);
5936 5936 ztest_dump_core = B_FALSE;
5937 5937 fatal(B_TRUE, "exec failed: %s", cmd);
5938 5938 }
5939 5939
5940 5940 if (cmdbuf != NULL) {
5941 5941 umem_free(cmdbuf, MAXPATHLEN);
5942 5942 cmd = NULL;
5943 5943 }
5944 5944
5945 5945 while (waitpid(pid, &status, 0) != pid)
5946 5946 continue;
5947 5947 if (statusp != NULL)
5948 5948 *statusp = status;
5949 5949
5950 5950 if (WIFEXITED(status)) {
5951 5951 if (WEXITSTATUS(status) != 0) {
5952 5952 (void) fprintf(stderr, "child exited with code %d\n",
5953 5953 WEXITSTATUS(status));
5954 5954 exit(2);
5955 5955 }
5956 5956 return (B_FALSE);
5957 5957 } else if (WIFSIGNALED(status)) {
5958 5958 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
5959 5959 (void) fprintf(stderr, "child died with signal %d\n",
5960 5960 WTERMSIG(status));
5961 5961 exit(3);
5962 5962 }
5963 5963 return (B_TRUE);
5964 5964 } else {
5965 5965 (void) fprintf(stderr, "something strange happened to child\n");
5966 5966 exit(4);
5967 5967 /* NOTREACHED */
5968 5968 }
5969 5969 }
5970 5970
5971 5971 static void
5972 5972 ztest_run_init(void)
5973 5973 {
5974 5974 ztest_shared_t *zs = ztest_shared;
5975 5975
5976 5976 ASSERT(ztest_opts.zo_init != 0);
5977 5977
5978 5978 /*
5979 5979 * Blow away any existing copy of zpool.cache
5980 5980 */
5981 5981 (void) remove(spa_config_path);
5982 5982
5983 5983 /*
5984 5984 * Create and initialize our storage pool.
5985 5985 */
5986 5986 for (int i = 1; i <= ztest_opts.zo_init; i++) {
5987 5987 bzero(zs, sizeof (ztest_shared_t));
5988 5988 if (ztest_opts.zo_verbose >= 3 &&
5989 5989 ztest_opts.zo_init != 1) {
5990 5990 (void) printf("ztest_init(), pass %d\n", i);
5991 5991 }
5992 5992 ztest_init(zs);
5993 5993 }
5994 5994 }
5995 5995
5996 5996 int
5997 5997 main(int argc, char **argv)
5998 5998 {
5999 5999 int kills = 0;
6000 6000 int iters = 0;
6001 6001 int older = 0;
6002 6002 int newer = 0;
6003 6003 ztest_shared_t *zs;
6004 6004 ztest_info_t *zi;
6005 6005 ztest_shared_callstate_t *zc;
6006 6006 char timebuf[100];
6007 6007 char numbuf[6];
6008 6008 spa_t *spa;
6009 6009 char *cmd;
6010 6010 boolean_t hasalt;
6011 6011 char *fd_data_str = getenv("ZTEST_FD_DATA");
6012 6012
6013 6013 (void) setvbuf(stdout, NULL, _IOLBF, 0);
6014 6014
6015 6015 dprintf_setup(&argc, argv);
6016 6016
6017 6017 ztest_fd_rand = open("/dev/urandom", O_RDONLY);
6018 6018 ASSERT3S(ztest_fd_rand, >=, 0);
6019 6019
6020 6020 if (!fd_data_str) {
6021 6021 process_options(argc, argv);
6022 6022
6023 6023 setup_data_fd();
6024 6024 setup_hdr();
6025 6025 setup_data();
6026 6026 bcopy(&ztest_opts, ztest_shared_opts,
6027 6027 sizeof (*ztest_shared_opts));
6028 6028 } else {
6029 6029 ztest_fd_data = atoi(fd_data_str);
6030 6030 setup_data();
6031 6031 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
6032 6032 }
6033 6033 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
6034 6034
6035 6035 /* Override location of zpool.cache */
6036 6036 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache",
6037 6037 ztest_opts.zo_dir), !=, -1);
6038 6038
6039 6039 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
6040 6040 UMEM_NOFAIL);
6041 6041 zs = ztest_shared;
6042 6042
6043 6043 if (fd_data_str) {
6044 6044 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
6045 6045 metaslab_df_alloc_threshold =
6046 6046 zs->zs_metaslab_df_alloc_threshold;
6047 6047
6048 6048 if (zs->zs_do_init)
6049 6049 ztest_run_init();
6050 6050 else
6051 6051 ztest_run(zs);
6052 6052 exit(0);
6053 6053 }
6054 6054
6055 6055 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
6056 6056
6057 6057 if (ztest_opts.zo_verbose >= 1) {
6058 6058 (void) printf("%llu vdevs, %d datasets, %d threads,"
6059 6059 " %llu seconds...\n",
6060 6060 (u_longlong_t)ztest_opts.zo_vdevs,
6061 6061 ztest_opts.zo_datasets,
6062 6062 ztest_opts.zo_threads,
6063 6063 (u_longlong_t)ztest_opts.zo_time);
6064 6064 }
6065 6065
6066 6066 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
6067 6067 (void) strlcpy(cmd, getexecname(), MAXNAMELEN);
6068 6068
6069 6069 zs->zs_do_init = B_TRUE;
6070 6070 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
6071 6071 if (ztest_opts.zo_verbose >= 1) {
6072 6072 (void) printf("Executing older ztest for "
6073 6073 "initialization: %s\n", ztest_opts.zo_alt_ztest);
6074 6074 }
6075 6075 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
6076 6076 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
6077 6077 } else {
6078 6078 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
6079 6079 }
6080 6080 zs->zs_do_init = B_FALSE;
6081 6081
6082 6082 zs->zs_proc_start = gethrtime();
6083 6083 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
6084 6084
6085 6085 for (int f = 0; f < ZTEST_FUNCS; f++) {
6086 6086 zi = &ztest_info[f];
6087 6087 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6088 6088 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
6089 6089 zc->zc_next = UINT64_MAX;
6090 6090 else
6091 6091 zc->zc_next = zs->zs_proc_start +
6092 6092 ztest_random(2 * zi->zi_interval[0] + 1);
6093 6093 }
6094 6094
6095 6095 /*
6096 6096 * Run the tests in a loop. These tests include fault injection
6097 6097 * to verify that self-healing data works, and forced crashes
6098 6098 * to verify that we never lose on-disk consistency.
6099 6099 */
6100 6100 while (gethrtime() < zs->zs_proc_stop) {
6101 6101 int status;
6102 6102 boolean_t killed;
6103 6103
6104 6104 /*
6105 6105 * Initialize the workload counters for each function.
6106 6106 */
6107 6107 for (int f = 0; f < ZTEST_FUNCS; f++) {
6108 6108 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6109 6109 zc->zc_count = 0;
6110 6110 zc->zc_time = 0;
6111 6111 }
6112 6112
6113 6113 /* Set the allocation switch size */
6114 6114 zs->zs_metaslab_df_alloc_threshold =
6115 6115 ztest_random(zs->zs_metaslab_sz / 4) + 1;
6116 6116
6117 6117 if (!hasalt || ztest_random(2) == 0) {
6118 6118 if (hasalt && ztest_opts.zo_verbose >= 1) {
6119 6119 (void) printf("Executing newer ztest: %s\n",
6120 6120 cmd);
6121 6121 }
6122 6122 newer++;
6123 6123 killed = exec_child(cmd, NULL, B_TRUE, &status);
6124 6124 } else {
6125 6125 if (hasalt && ztest_opts.zo_verbose >= 1) {
6126 6126 (void) printf("Executing older ztest: %s\n",
6127 6127 ztest_opts.zo_alt_ztest);
6128 6128 }
6129 6129 older++;
6130 6130 killed = exec_child(ztest_opts.zo_alt_ztest,
6131 6131 ztest_opts.zo_alt_libpath, B_TRUE, &status);
6132 6132 }
6133 6133
6134 6134 if (killed)
6135 6135 kills++;
6136 6136 iters++;
6137 6137
6138 6138 if (ztest_opts.zo_verbose >= 1) {
6139 6139 hrtime_t now = gethrtime();
6140 6140
6141 6141 now = MIN(now, zs->zs_proc_stop);
6142 6142 print_time(zs->zs_proc_stop - now, timebuf);
6143 6143 nicenum(zs->zs_space, numbuf);
6144 6144
6145 6145 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
6146 6146 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
6147 6147 iters,
6148 6148 WIFEXITED(status) ? "Complete" : "SIGKILL",
6149 6149 (u_longlong_t)zs->zs_enospc_count,
6150 6150 100.0 * zs->zs_alloc / zs->zs_space,
6151 6151 numbuf,
6152 6152 100.0 * (now - zs->zs_proc_start) /
6153 6153 (ztest_opts.zo_time * NANOSEC), timebuf);
6154 6154 }
6155 6155
6156 6156 if (ztest_opts.zo_verbose >= 2) {
6157 6157 (void) printf("\nWorkload summary:\n\n");
6158 6158 (void) printf("%7s %9s %s\n",
6159 6159 "Calls", "Time", "Function");
6160 6160 (void) printf("%7s %9s %s\n",
6161 6161 "-----", "----", "--------");
6162 6162 for (int f = 0; f < ZTEST_FUNCS; f++) {
6163 6163 Dl_info dli;
6164 6164
6165 6165 zi = &ztest_info[f];
6166 6166 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6167 6167 print_time(zc->zc_time, timebuf);
6168 6168 (void) dladdr((void *)zi->zi_func, &dli);
6169 6169 (void) printf("%7llu %9s %s\n",
6170 6170 (u_longlong_t)zc->zc_count, timebuf,
6171 6171 dli.dli_sname);
6172 6172 }
6173 6173 (void) printf("\n");
6174 6174 }
6175 6175
6176 6176 /*
6177 6177 * It's possible that we killed a child during a rename test,
6178 6178 * in which case we'll have a 'ztest_tmp' pool lying around
6179 6179 * instead of 'ztest'. Do a blind rename in case this happened.
6180 6180 */
6181 6181 kernel_init(FREAD);
6182 6182 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
6183 6183 spa_close(spa, FTAG);
6184 6184 } else {
6185 6185 char tmpname[MAXNAMELEN];
6186 6186 kernel_fini();
6187 6187 kernel_init(FREAD | FWRITE);
6188 6188 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
6189 6189 ztest_opts.zo_pool);
6190 6190 (void) spa_rename(tmpname, ztest_opts.zo_pool);
6191 6191 }
6192 6192 kernel_fini();
6193 6193
6194 6194 ztest_run_zdb(ztest_opts.zo_pool);
6195 6195 }
6196 6196
6197 6197 if (ztest_opts.zo_verbose >= 1) {
6198 6198 if (hasalt) {
6199 6199 (void) printf("%d runs of older ztest: %s\n", older,
6200 6200 ztest_opts.zo_alt_ztest);
6201 6201 (void) printf("%d runs of newer ztest: %s\n", newer,
6202 6202 cmd);
6203 6203 }
6204 6204 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6205 6205 kills, iters - kills, (100.0 * kills) / MAX(1, iters));
6206 6206 }
6207 6207
6208 6208 umem_free(cmd, MAXNAMELEN);
6209 6209
6210 6210 return (0);
6211 6211 }
↓ open down ↓ |
1685 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX