Print this page
3740 Poor ZFS send / receive performance due to snapshot hold / release processing
Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/ztest/ztest.c
+++ new/usr/src/cmd/ztest/ztest.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 + * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 26 */
26 27
27 28 /*
28 29 * The objective of this program is to provide a DMU/ZAP/SPA stress test
29 30 * that runs entirely in userland, is easy to use, and easy to extend.
30 31 *
31 32 * The overall design of the ztest program is as follows:
32 33 *
33 34 * (1) For each major functional area (e.g. adding vdevs to a pool,
34 35 * creating and destroying datasets, reading and writing objects, etc)
35 36 * we have a simple routine to test that functionality. These
36 37 * individual routines do not have to do anything "stressful".
37 38 *
38 39 * (2) We turn these simple functionality tests into a stress test by
39 40 * running them all in parallel, with as many threads as desired,
40 41 * and spread across as many datasets, objects, and vdevs as desired.
41 42 *
42 43 * (3) While all this is happening, we inject faults into the pool to
43 44 * verify that self-healing data really works.
44 45 *
45 46 * (4) Every time we open a dataset, we change its checksum and compression
46 47 * functions. Thus even individual objects vary from block to block
47 48 * in which checksum they use and whether they're compressed.
48 49 *
49 50 * (5) To verify that we never lose on-disk consistency after a crash,
50 51 * we run the entire test in a child of the main process.
51 52 * At random times, the child self-immolates with a SIGKILL.
52 53 * This is the software equivalent of pulling the power cord.
53 54 * The parent then runs the test again, using the existing
54 55 * storage pool, as many times as desired. If backwards compatability
55 56 * testing is enabled ztest will sometimes run the "older" version
56 57 * of ztest after a SIGKILL.
57 58 *
58 59 * (6) To verify that we don't have future leaks or temporal incursions,
59 60 * many of the functional tests record the transaction group number
60 61 * as part of their data. When reading old data, they verify that
61 62 * the transaction group number is less than the current, open txg.
62 63 * If you add a new test, please do this if applicable.
63 64 *
64 65 * When run with no arguments, ztest runs for about five minutes and
65 66 * produces no output if successful. To get a little bit of information,
66 67 * specify -V. To get more information, specify -VV, and so on.
67 68 *
68 69 * To turn this into an overnight stress test, use -T to specify run time.
69 70 *
70 71 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
71 72 * to increase the pool capacity, fanout, and overall stress level.
72 73 *
73 74 * Use the -k option to set the desired frequency of kills.
74 75 *
75 76 * When ztest invokes itself it passes all relevant information through a
76 77 * temporary file which is mmap-ed in the child process. This allows shared
77 78 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
78 79 * stored at offset 0 of this file and contains information on the size and
79 80 * number of shared structures in the file. The information stored in this file
80 81 * must remain backwards compatible with older versions of ztest so that
81 82 * ztest can invoke them during backwards compatibility testing (-B).
82 83 */
83 84
84 85 #include <sys/zfs_context.h>
85 86 #include <sys/spa.h>
86 87 #include <sys/dmu.h>
87 88 #include <sys/txg.h>
88 89 #include <sys/dbuf.h>
89 90 #include <sys/zap.h>
90 91 #include <sys/dmu_objset.h>
91 92 #include <sys/poll.h>
92 93 #include <sys/stat.h>
93 94 #include <sys/time.h>
94 95 #include <sys/wait.h>
95 96 #include <sys/mman.h>
96 97 #include <sys/resource.h>
97 98 #include <sys/zio.h>
98 99 #include <sys/zil.h>
99 100 #include <sys/zil_impl.h>
100 101 #include <sys/vdev_impl.h>
101 102 #include <sys/vdev_file.h>
102 103 #include <sys/spa_impl.h>
103 104 #include <sys/metaslab_impl.h>
104 105 #include <sys/dsl_prop.h>
105 106 #include <sys/dsl_dataset.h>
106 107 #include <sys/dsl_destroy.h>
107 108 #include <sys/dsl_scan.h>
108 109 #include <sys/zio_checksum.h>
109 110 #include <sys/refcount.h>
110 111 #include <sys/zfeature.h>
111 112 #include <sys/dsl_userhold.h>
112 113 #include <stdio.h>
113 114 #include <stdio_ext.h>
114 115 #include <stdlib.h>
115 116 #include <unistd.h>
116 117 #include <signal.h>
117 118 #include <umem.h>
118 119 #include <dlfcn.h>
119 120 #include <ctype.h>
120 121 #include <math.h>
121 122 #include <sys/fs/zfs.h>
122 123 #include <libnvpair.h>
123 124
124 125 static int ztest_fd_data = -1;
125 126 static int ztest_fd_rand = -1;
126 127
127 128 typedef struct ztest_shared_hdr {
128 129 uint64_t zh_hdr_size;
129 130 uint64_t zh_opts_size;
130 131 uint64_t zh_size;
131 132 uint64_t zh_stats_size;
132 133 uint64_t zh_stats_count;
133 134 uint64_t zh_ds_size;
134 135 uint64_t zh_ds_count;
135 136 } ztest_shared_hdr_t;
136 137
137 138 static ztest_shared_hdr_t *ztest_shared_hdr;
138 139
139 140 typedef struct ztest_shared_opts {
140 141 char zo_pool[MAXNAMELEN];
141 142 char zo_dir[MAXNAMELEN];
142 143 char zo_alt_ztest[MAXNAMELEN];
143 144 char zo_alt_libpath[MAXNAMELEN];
144 145 uint64_t zo_vdevs;
145 146 uint64_t zo_vdevtime;
146 147 size_t zo_vdev_size;
147 148 int zo_ashift;
148 149 int zo_mirrors;
149 150 int zo_raidz;
150 151 int zo_raidz_parity;
151 152 int zo_datasets;
152 153 int zo_threads;
153 154 uint64_t zo_passtime;
154 155 uint64_t zo_killrate;
155 156 int zo_verbose;
156 157 int zo_init;
157 158 uint64_t zo_time;
158 159 uint64_t zo_maxloops;
159 160 uint64_t zo_metaslab_gang_bang;
160 161 } ztest_shared_opts_t;
161 162
162 163 static const ztest_shared_opts_t ztest_opts_defaults = {
163 164 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
164 165 .zo_dir = { '/', 't', 'm', 'p', '\0' },
165 166 .zo_alt_ztest = { '\0' },
166 167 .zo_alt_libpath = { '\0' },
167 168 .zo_vdevs = 5,
168 169 .zo_ashift = SPA_MINBLOCKSHIFT,
169 170 .zo_mirrors = 2,
170 171 .zo_raidz = 4,
171 172 .zo_raidz_parity = 1,
172 173 .zo_vdev_size = SPA_MINDEVSIZE,
173 174 .zo_datasets = 7,
174 175 .zo_threads = 23,
175 176 .zo_passtime = 60, /* 60 seconds */
176 177 .zo_killrate = 70, /* 70% kill rate */
177 178 .zo_verbose = 0,
178 179 .zo_init = 1,
179 180 .zo_time = 300, /* 5 minutes */
180 181 .zo_maxloops = 50, /* max loops during spa_freeze() */
181 182 .zo_metaslab_gang_bang = 32 << 10
182 183 };
183 184
184 185 extern uint64_t metaslab_gang_bang;
185 186 extern uint64_t metaslab_df_alloc_threshold;
186 187
187 188 static ztest_shared_opts_t *ztest_shared_opts;
188 189 static ztest_shared_opts_t ztest_opts;
189 190
190 191 typedef struct ztest_shared_ds {
191 192 uint64_t zd_seq;
192 193 } ztest_shared_ds_t;
193 194
194 195 static ztest_shared_ds_t *ztest_shared_ds;
195 196 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
196 197
197 198 #define BT_MAGIC 0x123456789abcdefULL
198 199 #define MAXFAULTS() \
199 200 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
200 201
201 202 enum ztest_io_type {
202 203 ZTEST_IO_WRITE_TAG,
203 204 ZTEST_IO_WRITE_PATTERN,
204 205 ZTEST_IO_WRITE_ZEROES,
205 206 ZTEST_IO_TRUNCATE,
206 207 ZTEST_IO_SETATTR,
207 208 ZTEST_IO_REWRITE,
208 209 ZTEST_IO_TYPES
209 210 };
210 211
211 212 typedef struct ztest_block_tag {
212 213 uint64_t bt_magic;
213 214 uint64_t bt_objset;
214 215 uint64_t bt_object;
215 216 uint64_t bt_offset;
216 217 uint64_t bt_gen;
217 218 uint64_t bt_txg;
218 219 uint64_t bt_crtxg;
219 220 } ztest_block_tag_t;
220 221
221 222 typedef struct bufwad {
222 223 uint64_t bw_index;
223 224 uint64_t bw_txg;
224 225 uint64_t bw_data;
225 226 } bufwad_t;
226 227
227 228 /*
228 229 * XXX -- fix zfs range locks to be generic so we can use them here.
229 230 */
230 231 typedef enum {
231 232 RL_READER,
232 233 RL_WRITER,
233 234 RL_APPEND
234 235 } rl_type_t;
235 236
236 237 typedef struct rll {
237 238 void *rll_writer;
238 239 int rll_readers;
239 240 mutex_t rll_lock;
240 241 cond_t rll_cv;
241 242 } rll_t;
242 243
243 244 typedef struct rl {
244 245 uint64_t rl_object;
245 246 uint64_t rl_offset;
246 247 uint64_t rl_size;
247 248 rll_t *rl_lock;
248 249 } rl_t;
249 250
250 251 #define ZTEST_RANGE_LOCKS 64
251 252 #define ZTEST_OBJECT_LOCKS 64
252 253
253 254 /*
254 255 * Object descriptor. Used as a template for object lookup/create/remove.
255 256 */
256 257 typedef struct ztest_od {
257 258 uint64_t od_dir;
258 259 uint64_t od_object;
259 260 dmu_object_type_t od_type;
260 261 dmu_object_type_t od_crtype;
261 262 uint64_t od_blocksize;
262 263 uint64_t od_crblocksize;
263 264 uint64_t od_gen;
264 265 uint64_t od_crgen;
265 266 char od_name[MAXNAMELEN];
266 267 } ztest_od_t;
267 268
268 269 /*
269 270 * Per-dataset state.
270 271 */
271 272 typedef struct ztest_ds {
272 273 ztest_shared_ds_t *zd_shared;
273 274 objset_t *zd_os;
274 275 rwlock_t zd_zilog_lock;
275 276 zilog_t *zd_zilog;
276 277 ztest_od_t *zd_od; /* debugging aid */
277 278 char zd_name[MAXNAMELEN];
278 279 mutex_t zd_dirobj_lock;
279 280 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
280 281 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
281 282 } ztest_ds_t;
282 283
283 284 /*
284 285 * Per-iteration state.
285 286 */
286 287 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
287 288
288 289 typedef struct ztest_info {
289 290 ztest_func_t *zi_func; /* test function */
290 291 uint64_t zi_iters; /* iterations per execution */
291 292 uint64_t *zi_interval; /* execute every <interval> seconds */
292 293 } ztest_info_t;
293 294
294 295 typedef struct ztest_shared_callstate {
295 296 uint64_t zc_count; /* per-pass count */
296 297 uint64_t zc_time; /* per-pass time */
297 298 uint64_t zc_next; /* next time to call this function */
298 299 } ztest_shared_callstate_t;
299 300
300 301 static ztest_shared_callstate_t *ztest_shared_callstate;
301 302 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
302 303
303 304 /*
304 305 * Note: these aren't static because we want dladdr() to work.
305 306 */
306 307 ztest_func_t ztest_dmu_read_write;
307 308 ztest_func_t ztest_dmu_write_parallel;
308 309 ztest_func_t ztest_dmu_object_alloc_free;
309 310 ztest_func_t ztest_dmu_commit_callbacks;
310 311 ztest_func_t ztest_zap;
311 312 ztest_func_t ztest_zap_parallel;
312 313 ztest_func_t ztest_zil_commit;
313 314 ztest_func_t ztest_zil_remount;
314 315 ztest_func_t ztest_dmu_read_write_zcopy;
315 316 ztest_func_t ztest_dmu_objset_create_destroy;
316 317 ztest_func_t ztest_dmu_prealloc;
317 318 ztest_func_t ztest_fzap;
318 319 ztest_func_t ztest_dmu_snapshot_create_destroy;
319 320 ztest_func_t ztest_dsl_prop_get_set;
320 321 ztest_func_t ztest_spa_prop_get_set;
321 322 ztest_func_t ztest_spa_create_destroy;
322 323 ztest_func_t ztest_fault_inject;
323 324 ztest_func_t ztest_ddt_repair;
324 325 ztest_func_t ztest_dmu_snapshot_hold;
325 326 ztest_func_t ztest_spa_rename;
326 327 ztest_func_t ztest_scrub;
327 328 ztest_func_t ztest_dsl_dataset_promote_busy;
328 329 ztest_func_t ztest_vdev_attach_detach;
329 330 ztest_func_t ztest_vdev_LUN_growth;
330 331 ztest_func_t ztest_vdev_add_remove;
331 332 ztest_func_t ztest_vdev_aux_add_remove;
332 333 ztest_func_t ztest_split_pool;
333 334 ztest_func_t ztest_reguid;
334 335 ztest_func_t ztest_spa_upgrade;
335 336
336 337 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
337 338 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
338 339 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
339 340 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
340 341 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
341 342
342 343 ztest_info_t ztest_info[] = {
343 344 { ztest_dmu_read_write, 1, &zopt_always },
344 345 { ztest_dmu_write_parallel, 10, &zopt_always },
345 346 { ztest_dmu_object_alloc_free, 1, &zopt_always },
346 347 { ztest_dmu_commit_callbacks, 1, &zopt_always },
347 348 { ztest_zap, 30, &zopt_always },
348 349 { ztest_zap_parallel, 100, &zopt_always },
349 350 { ztest_split_pool, 1, &zopt_always },
350 351 { ztest_zil_commit, 1, &zopt_incessant },
351 352 { ztest_zil_remount, 1, &zopt_sometimes },
352 353 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
353 354 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
354 355 { ztest_dsl_prop_get_set, 1, &zopt_often },
355 356 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
356 357 #if 0
357 358 { ztest_dmu_prealloc, 1, &zopt_sometimes },
358 359 #endif
359 360 { ztest_fzap, 1, &zopt_sometimes },
360 361 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
361 362 { ztest_spa_create_destroy, 1, &zopt_sometimes },
362 363 { ztest_fault_inject, 1, &zopt_sometimes },
363 364 { ztest_ddt_repair, 1, &zopt_sometimes },
364 365 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
365 366 { ztest_reguid, 1, &zopt_sometimes },
366 367 { ztest_spa_rename, 1, &zopt_rarely },
367 368 { ztest_scrub, 1, &zopt_rarely },
368 369 { ztest_spa_upgrade, 1, &zopt_rarely },
369 370 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
370 371 { ztest_vdev_attach_detach, 1, &zopt_sometimes },
371 372 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
372 373 { ztest_vdev_add_remove, 1,
373 374 &ztest_opts.zo_vdevtime },
374 375 { ztest_vdev_aux_add_remove, 1,
375 376 &ztest_opts.zo_vdevtime },
376 377 };
377 378
378 379 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
379 380
380 381 /*
381 382 * The following struct is used to hold a list of uncalled commit callbacks.
382 383 * The callbacks are ordered by txg number.
383 384 */
384 385 typedef struct ztest_cb_list {
385 386 mutex_t zcl_callbacks_lock;
386 387 list_t zcl_callbacks;
387 388 } ztest_cb_list_t;
388 389
389 390 /*
390 391 * Stuff we need to share writably between parent and child.
391 392 */
392 393 typedef struct ztest_shared {
393 394 boolean_t zs_do_init;
394 395 hrtime_t zs_proc_start;
395 396 hrtime_t zs_proc_stop;
396 397 hrtime_t zs_thread_start;
397 398 hrtime_t zs_thread_stop;
398 399 hrtime_t zs_thread_kill;
399 400 uint64_t zs_enospc_count;
400 401 uint64_t zs_vdev_next_leaf;
401 402 uint64_t zs_vdev_aux;
402 403 uint64_t zs_alloc;
403 404 uint64_t zs_space;
404 405 uint64_t zs_splits;
405 406 uint64_t zs_mirrors;
406 407 uint64_t zs_metaslab_sz;
407 408 uint64_t zs_metaslab_df_alloc_threshold;
408 409 uint64_t zs_guid;
409 410 } ztest_shared_t;
410 411
411 412 #define ID_PARALLEL -1ULL
412 413
413 414 static char ztest_dev_template[] = "%s/%s.%llua";
414 415 static char ztest_aux_template[] = "%s/%s.%s.%llu";
415 416 ztest_shared_t *ztest_shared;
416 417
417 418 static spa_t *ztest_spa = NULL;
418 419 static ztest_ds_t *ztest_ds;
419 420
420 421 static mutex_t ztest_vdev_lock;
421 422
422 423 /*
423 424 * The ztest_name_lock protects the pool and dataset namespace used by
424 425 * the individual tests. To modify the namespace, consumers must grab
425 426 * this lock as writer. Grabbing the lock as reader will ensure that the
426 427 * namespace does not change while the lock is held.
427 428 */
428 429 static rwlock_t ztest_name_lock;
429 430
430 431 static boolean_t ztest_dump_core = B_TRUE;
431 432 static boolean_t ztest_exiting;
432 433
433 434 /* Global commit callback list */
434 435 static ztest_cb_list_t zcl;
435 436
436 437 enum ztest_object {
437 438 ZTEST_META_DNODE = 0,
438 439 ZTEST_DIROBJ,
439 440 ZTEST_OBJECTS
440 441 };
441 442
442 443 static void usage(boolean_t) __NORETURN;
443 444
444 445 /*
445 446 * These libumem hooks provide a reasonable set of defaults for the allocator's
446 447 * debugging facilities.
447 448 */
448 449 const char *
449 450 _umem_debug_init()
450 451 {
451 452 return ("default,verbose"); /* $UMEM_DEBUG setting */
452 453 }
453 454
454 455 const char *
455 456 _umem_logging_init(void)
456 457 {
457 458 return ("fail,contents"); /* $UMEM_LOGGING setting */
458 459 }
459 460
460 461 #define FATAL_MSG_SZ 1024
461 462
462 463 char *fatal_msg;
463 464
464 465 static void
465 466 fatal(int do_perror, char *message, ...)
466 467 {
467 468 va_list args;
468 469 int save_errno = errno;
469 470 char buf[FATAL_MSG_SZ];
470 471
471 472 (void) fflush(stdout);
472 473
473 474 va_start(args, message);
474 475 (void) sprintf(buf, "ztest: ");
475 476 /* LINTED */
476 477 (void) vsprintf(buf + strlen(buf), message, args);
477 478 va_end(args);
478 479 if (do_perror) {
479 480 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
480 481 ": %s", strerror(save_errno));
481 482 }
482 483 (void) fprintf(stderr, "%s\n", buf);
483 484 fatal_msg = buf; /* to ease debugging */
484 485 if (ztest_dump_core)
485 486 abort();
486 487 exit(3);
487 488 }
488 489
489 490 static int
490 491 str2shift(const char *buf)
491 492 {
492 493 const char *ends = "BKMGTPEZ";
493 494 int i;
494 495
495 496 if (buf[0] == '\0')
496 497 return (0);
497 498 for (i = 0; i < strlen(ends); i++) {
498 499 if (toupper(buf[0]) == ends[i])
499 500 break;
500 501 }
501 502 if (i == strlen(ends)) {
502 503 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
503 504 buf);
504 505 usage(B_FALSE);
505 506 }
506 507 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
507 508 return (10*i);
508 509 }
509 510 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
510 511 usage(B_FALSE);
511 512 /* NOTREACHED */
512 513 }
513 514
514 515 static uint64_t
515 516 nicenumtoull(const char *buf)
516 517 {
517 518 char *end;
518 519 uint64_t val;
519 520
520 521 val = strtoull(buf, &end, 0);
521 522 if (end == buf) {
522 523 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
523 524 usage(B_FALSE);
524 525 } else if (end[0] == '.') {
525 526 double fval = strtod(buf, &end);
526 527 fval *= pow(2, str2shift(end));
527 528 if (fval > UINT64_MAX) {
528 529 (void) fprintf(stderr, "ztest: value too large: %s\n",
529 530 buf);
530 531 usage(B_FALSE);
531 532 }
532 533 val = (uint64_t)fval;
533 534 } else {
534 535 int shift = str2shift(end);
535 536 if (shift >= 64 || (val << shift) >> shift != val) {
536 537 (void) fprintf(stderr, "ztest: value too large: %s\n",
537 538 buf);
538 539 usage(B_FALSE);
539 540 }
540 541 val <<= shift;
541 542 }
542 543 return (val);
543 544 }
544 545
545 546 static void
546 547 usage(boolean_t requested)
547 548 {
548 549 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
549 550
550 551 char nice_vdev_size[10];
551 552 char nice_gang_bang[10];
552 553 FILE *fp = requested ? stdout : stderr;
553 554
554 555 nicenum(zo->zo_vdev_size, nice_vdev_size);
555 556 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
556 557
557 558 (void) fprintf(fp, "Usage: %s\n"
558 559 "\t[-v vdevs (default: %llu)]\n"
559 560 "\t[-s size_of_each_vdev (default: %s)]\n"
560 561 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
561 562 "\t[-m mirror_copies (default: %d)]\n"
562 563 "\t[-r raidz_disks (default: %d)]\n"
563 564 "\t[-R raidz_parity (default: %d)]\n"
564 565 "\t[-d datasets (default: %d)]\n"
565 566 "\t[-t threads (default: %d)]\n"
566 567 "\t[-g gang_block_threshold (default: %s)]\n"
567 568 "\t[-i init_count (default: %d)] initialize pool i times\n"
568 569 "\t[-k kill_percentage (default: %llu%%)]\n"
569 570 "\t[-p pool_name (default: %s)]\n"
570 571 "\t[-f dir (default: %s)] file directory for vdev files\n"
571 572 "\t[-V] verbose (use multiple times for ever more blather)\n"
572 573 "\t[-E] use existing pool instead of creating new one\n"
573 574 "\t[-T time (default: %llu sec)] total run time\n"
574 575 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
575 576 "\t[-P passtime (default: %llu sec)] time per pass\n"
576 577 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
577 578 "\t[-h] (print help)\n"
578 579 "",
579 580 zo->zo_pool,
580 581 (u_longlong_t)zo->zo_vdevs, /* -v */
581 582 nice_vdev_size, /* -s */
582 583 zo->zo_ashift, /* -a */
583 584 zo->zo_mirrors, /* -m */
584 585 zo->zo_raidz, /* -r */
585 586 zo->zo_raidz_parity, /* -R */
586 587 zo->zo_datasets, /* -d */
587 588 zo->zo_threads, /* -t */
588 589 nice_gang_bang, /* -g */
589 590 zo->zo_init, /* -i */
590 591 (u_longlong_t)zo->zo_killrate, /* -k */
591 592 zo->zo_pool, /* -p */
592 593 zo->zo_dir, /* -f */
593 594 (u_longlong_t)zo->zo_time, /* -T */
594 595 (u_longlong_t)zo->zo_maxloops, /* -F */
595 596 (u_longlong_t)zo->zo_passtime);
596 597 exit(requested ? 0 : 1);
597 598 }
598 599
599 600 static void
600 601 process_options(int argc, char **argv)
601 602 {
602 603 char *path;
603 604 ztest_shared_opts_t *zo = &ztest_opts;
604 605
605 606 int opt;
606 607 uint64_t value;
607 608 char altdir[MAXNAMELEN] = { 0 };
608 609
609 610 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
610 611
611 612 while ((opt = getopt(argc, argv,
612 613 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
613 614 value = 0;
614 615 switch (opt) {
615 616 case 'v':
616 617 case 's':
617 618 case 'a':
618 619 case 'm':
619 620 case 'r':
620 621 case 'R':
621 622 case 'd':
622 623 case 't':
623 624 case 'g':
624 625 case 'i':
625 626 case 'k':
626 627 case 'T':
627 628 case 'P':
628 629 case 'F':
629 630 value = nicenumtoull(optarg);
630 631 }
631 632 switch (opt) {
632 633 case 'v':
633 634 zo->zo_vdevs = value;
634 635 break;
635 636 case 's':
636 637 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
637 638 break;
638 639 case 'a':
639 640 zo->zo_ashift = value;
640 641 break;
641 642 case 'm':
642 643 zo->zo_mirrors = value;
643 644 break;
644 645 case 'r':
645 646 zo->zo_raidz = MAX(1, value);
646 647 break;
647 648 case 'R':
648 649 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
649 650 break;
650 651 case 'd':
651 652 zo->zo_datasets = MAX(1, value);
652 653 break;
653 654 case 't':
654 655 zo->zo_threads = MAX(1, value);
655 656 break;
656 657 case 'g':
657 658 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
658 659 value);
659 660 break;
660 661 case 'i':
661 662 zo->zo_init = value;
662 663 break;
663 664 case 'k':
664 665 zo->zo_killrate = value;
665 666 break;
666 667 case 'p':
667 668 (void) strlcpy(zo->zo_pool, optarg,
668 669 sizeof (zo->zo_pool));
669 670 break;
670 671 case 'f':
671 672 path = realpath(optarg, NULL);
672 673 if (path == NULL) {
673 674 (void) fprintf(stderr, "error: %s: %s\n",
674 675 optarg, strerror(errno));
675 676 usage(B_FALSE);
676 677 } else {
677 678 (void) strlcpy(zo->zo_dir, path,
678 679 sizeof (zo->zo_dir));
679 680 }
680 681 break;
681 682 case 'V':
682 683 zo->zo_verbose++;
683 684 break;
684 685 case 'E':
685 686 zo->zo_init = 0;
686 687 break;
687 688 case 'T':
688 689 zo->zo_time = value;
689 690 break;
690 691 case 'P':
691 692 zo->zo_passtime = MAX(1, value);
692 693 break;
693 694 case 'F':
694 695 zo->zo_maxloops = MAX(1, value);
695 696 break;
696 697 case 'B':
697 698 (void) strlcpy(altdir, optarg, sizeof (altdir));
698 699 break;
699 700 case 'h':
700 701 usage(B_TRUE);
701 702 break;
702 703 case '?':
703 704 default:
704 705 usage(B_FALSE);
705 706 break;
706 707 }
707 708 }
708 709
709 710 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
710 711
711 712 zo->zo_vdevtime =
712 713 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
713 714 UINT64_MAX >> 2);
714 715
715 716 if (strlen(altdir) > 0) {
716 717 char *cmd;
717 718 char *realaltdir;
718 719 char *bin;
719 720 char *ztest;
720 721 char *isa;
721 722 int isalen;
722 723
723 724 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
724 725 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
725 726
726 727 VERIFY(NULL != realpath(getexecname(), cmd));
727 728 if (0 != access(altdir, F_OK)) {
728 729 ztest_dump_core = B_FALSE;
729 730 fatal(B_TRUE, "invalid alternate ztest path: %s",
730 731 altdir);
731 732 }
732 733 VERIFY(NULL != realpath(altdir, realaltdir));
733 734
734 735 /*
735 736 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
736 737 * We want to extract <isa> to determine if we should use
737 738 * 32 or 64 bit binaries.
738 739 */
739 740 bin = strstr(cmd, "/usr/bin/");
740 741 ztest = strstr(bin, "/ztest");
741 742 isa = bin + 9;
742 743 isalen = ztest - isa;
743 744 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
744 745 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
745 746 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
746 747 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
747 748
748 749 if (0 != access(zo->zo_alt_ztest, X_OK)) {
749 750 ztest_dump_core = B_FALSE;
750 751 fatal(B_TRUE, "invalid alternate ztest: %s",
751 752 zo->zo_alt_ztest);
752 753 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
753 754 ztest_dump_core = B_FALSE;
754 755 fatal(B_TRUE, "invalid alternate lib directory %s",
755 756 zo->zo_alt_libpath);
756 757 }
757 758
758 759 umem_free(cmd, MAXPATHLEN);
759 760 umem_free(realaltdir, MAXPATHLEN);
760 761 }
761 762 }
762 763
763 764 static void
764 765 ztest_kill(ztest_shared_t *zs)
765 766 {
766 767 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
767 768 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
768 769 (void) kill(getpid(), SIGKILL);
769 770 }
770 771
771 772 static uint64_t
772 773 ztest_random(uint64_t range)
773 774 {
774 775 uint64_t r;
775 776
776 777 ASSERT3S(ztest_fd_rand, >=, 0);
777 778
778 779 if (range == 0)
779 780 return (0);
780 781
781 782 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
782 783 fatal(1, "short read from /dev/urandom");
783 784
784 785 return (r % range);
785 786 }
786 787
787 788 /* ARGSUSED */
788 789 static void
789 790 ztest_record_enospc(const char *s)
790 791 {
791 792 ztest_shared->zs_enospc_count++;
792 793 }
793 794
794 795 static uint64_t
795 796 ztest_get_ashift(void)
796 797 {
797 798 if (ztest_opts.zo_ashift == 0)
798 799 return (SPA_MINBLOCKSHIFT + ztest_random(3));
799 800 return (ztest_opts.zo_ashift);
800 801 }
801 802
802 803 static nvlist_t *
803 804 make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
804 805 {
805 806 char pathbuf[MAXPATHLEN];
806 807 uint64_t vdev;
807 808 nvlist_t *file;
808 809
809 810 if (ashift == 0)
810 811 ashift = ztest_get_ashift();
811 812
812 813 if (path == NULL) {
813 814 path = pathbuf;
814 815
815 816 if (aux != NULL) {
816 817 vdev = ztest_shared->zs_vdev_aux;
817 818 (void) snprintf(path, sizeof (pathbuf),
818 819 ztest_aux_template, ztest_opts.zo_dir,
819 820 pool == NULL ? ztest_opts.zo_pool : pool,
820 821 aux, vdev);
821 822 } else {
822 823 vdev = ztest_shared->zs_vdev_next_leaf++;
823 824 (void) snprintf(path, sizeof (pathbuf),
824 825 ztest_dev_template, ztest_opts.zo_dir,
825 826 pool == NULL ? ztest_opts.zo_pool : pool, vdev);
826 827 }
827 828 }
828 829
829 830 if (size != 0) {
830 831 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
831 832 if (fd == -1)
832 833 fatal(1, "can't open %s", path);
833 834 if (ftruncate(fd, size) != 0)
834 835 fatal(1, "can't ftruncate %s", path);
835 836 (void) close(fd);
836 837 }
837 838
838 839 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
839 840 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
840 841 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
841 842 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
842 843
843 844 return (file);
844 845 }
845 846
846 847 static nvlist_t *
847 848 make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
848 849 uint64_t ashift, int r)
849 850 {
850 851 nvlist_t *raidz, **child;
851 852 int c;
852 853
853 854 if (r < 2)
854 855 return (make_vdev_file(path, aux, pool, size, ashift));
855 856 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
856 857
857 858 for (c = 0; c < r; c++)
858 859 child[c] = make_vdev_file(path, aux, pool, size, ashift);
859 860
860 861 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
861 862 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
862 863 VDEV_TYPE_RAIDZ) == 0);
863 864 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
864 865 ztest_opts.zo_raidz_parity) == 0);
865 866 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
866 867 child, r) == 0);
867 868
868 869 for (c = 0; c < r; c++)
869 870 nvlist_free(child[c]);
870 871
871 872 umem_free(child, r * sizeof (nvlist_t *));
872 873
873 874 return (raidz);
874 875 }
875 876
876 877 static nvlist_t *
877 878 make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
878 879 uint64_t ashift, int r, int m)
879 880 {
880 881 nvlist_t *mirror, **child;
881 882 int c;
882 883
883 884 if (m < 1)
884 885 return (make_vdev_raidz(path, aux, pool, size, ashift, r));
885 886
886 887 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
887 888
888 889 for (c = 0; c < m; c++)
889 890 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r);
890 891
891 892 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
892 893 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
893 894 VDEV_TYPE_MIRROR) == 0);
894 895 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
895 896 child, m) == 0);
896 897
897 898 for (c = 0; c < m; c++)
898 899 nvlist_free(child[c]);
899 900
900 901 umem_free(child, m * sizeof (nvlist_t *));
901 902
902 903 return (mirror);
903 904 }
904 905
905 906 static nvlist_t *
906 907 make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift,
907 908 int log, int r, int m, int t)
908 909 {
909 910 nvlist_t *root, **child;
910 911 int c;
911 912
912 913 ASSERT(t > 0);
913 914
914 915 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
915 916
916 917 for (c = 0; c < t; c++) {
917 918 child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
918 919 r, m);
919 920 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
920 921 log) == 0);
921 922 }
922 923
923 924 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
924 925 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
925 926 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
926 927 child, t) == 0);
927 928
928 929 for (c = 0; c < t; c++)
929 930 nvlist_free(child[c]);
930 931
931 932 umem_free(child, t * sizeof (nvlist_t *));
932 933
933 934 return (root);
934 935 }
935 936
936 937 /*
937 938 * Find a random spa version. Returns back a random spa version in the
938 939 * range [initial_version, SPA_VERSION_FEATURES].
939 940 */
940 941 static uint64_t
941 942 ztest_random_spa_version(uint64_t initial_version)
942 943 {
943 944 uint64_t version = initial_version;
944 945
945 946 if (version <= SPA_VERSION_BEFORE_FEATURES) {
946 947 version = version +
947 948 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
948 949 }
949 950
950 951 if (version > SPA_VERSION_BEFORE_FEATURES)
951 952 version = SPA_VERSION_FEATURES;
952 953
953 954 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
954 955 return (version);
955 956 }
956 957
957 958 static int
958 959 ztest_random_blocksize(void)
959 960 {
960 961 return (1 << (SPA_MINBLOCKSHIFT +
961 962 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
962 963 }
963 964
964 965 static int
965 966 ztest_random_ibshift(void)
966 967 {
967 968 return (DN_MIN_INDBLKSHIFT +
968 969 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
969 970 }
970 971
971 972 static uint64_t
972 973 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
973 974 {
974 975 uint64_t top;
975 976 vdev_t *rvd = spa->spa_root_vdev;
976 977 vdev_t *tvd;
977 978
978 979 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
979 980
980 981 do {
981 982 top = ztest_random(rvd->vdev_children);
982 983 tvd = rvd->vdev_child[top];
983 984 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
984 985 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
985 986
986 987 return (top);
987 988 }
988 989
989 990 static uint64_t
990 991 ztest_random_dsl_prop(zfs_prop_t prop)
991 992 {
992 993 uint64_t value;
993 994
994 995 do {
995 996 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
996 997 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
997 998
998 999 return (value);
999 1000 }
1000 1001
1001 1002 static int
1002 1003 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
1003 1004 boolean_t inherit)
1004 1005 {
1005 1006 const char *propname = zfs_prop_to_name(prop);
1006 1007 const char *valname;
1007 1008 char setpoint[MAXPATHLEN];
1008 1009 uint64_t curval;
1009 1010 int error;
1010 1011
1011 1012 error = dsl_prop_set_int(osname, propname,
1012 1013 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
1013 1014
1014 1015 if (error == ENOSPC) {
1015 1016 ztest_record_enospc(FTAG);
1016 1017 return (error);
1017 1018 }
1018 1019 ASSERT0(error);
1019 1020
1020 1021 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
1021 1022
1022 1023 if (ztest_opts.zo_verbose >= 6) {
1023 1024 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
1024 1025 (void) printf("%s %s = %s at '%s'\n",
1025 1026 osname, propname, valname, setpoint);
1026 1027 }
1027 1028
1028 1029 return (error);
1029 1030 }
1030 1031
1031 1032 static int
1032 1033 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
1033 1034 {
1034 1035 spa_t *spa = ztest_spa;
1035 1036 nvlist_t *props = NULL;
1036 1037 int error;
1037 1038
1038 1039 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
1039 1040 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
1040 1041
1041 1042 error = spa_prop_set(spa, props);
1042 1043
1043 1044 nvlist_free(props);
1044 1045
1045 1046 if (error == ENOSPC) {
1046 1047 ztest_record_enospc(FTAG);
1047 1048 return (error);
1048 1049 }
1049 1050 ASSERT0(error);
1050 1051
1051 1052 return (error);
1052 1053 }
1053 1054
1054 1055 static void
1055 1056 ztest_rll_init(rll_t *rll)
1056 1057 {
1057 1058 rll->rll_writer = NULL;
1058 1059 rll->rll_readers = 0;
1059 1060 VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1060 1061 VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
1061 1062 }
1062 1063
1063 1064 static void
1064 1065 ztest_rll_destroy(rll_t *rll)
1065 1066 {
1066 1067 ASSERT(rll->rll_writer == NULL);
1067 1068 ASSERT(rll->rll_readers == 0);
1068 1069 VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1069 1070 VERIFY(cond_destroy(&rll->rll_cv) == 0);
1070 1071 }
1071 1072
1072 1073 static void
1073 1074 ztest_rll_lock(rll_t *rll, rl_type_t type)
1074 1075 {
1075 1076 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1076 1077
1077 1078 if (type == RL_READER) {
1078 1079 while (rll->rll_writer != NULL)
1079 1080 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1080 1081 rll->rll_readers++;
1081 1082 } else {
1082 1083 while (rll->rll_writer != NULL || rll->rll_readers)
1083 1084 (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
1084 1085 rll->rll_writer = curthread;
1085 1086 }
1086 1087
1087 1088 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1088 1089 }
1089 1090
1090 1091 static void
1091 1092 ztest_rll_unlock(rll_t *rll)
1092 1093 {
1093 1094 VERIFY(mutex_lock(&rll->rll_lock) == 0);
1094 1095
1095 1096 if (rll->rll_writer) {
1096 1097 ASSERT(rll->rll_readers == 0);
1097 1098 rll->rll_writer = NULL;
1098 1099 } else {
1099 1100 ASSERT(rll->rll_readers != 0);
1100 1101 ASSERT(rll->rll_writer == NULL);
1101 1102 rll->rll_readers--;
1102 1103 }
1103 1104
1104 1105 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1105 1106 VERIFY(cond_broadcast(&rll->rll_cv) == 0);
1106 1107
1107 1108 VERIFY(mutex_unlock(&rll->rll_lock) == 0);
1108 1109 }
1109 1110
1110 1111 static void
1111 1112 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1112 1113 {
1113 1114 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1114 1115
1115 1116 ztest_rll_lock(rll, type);
1116 1117 }
1117 1118
1118 1119 static void
1119 1120 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1120 1121 {
1121 1122 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1122 1123
1123 1124 ztest_rll_unlock(rll);
1124 1125 }
1125 1126
1126 1127 static rl_t *
1127 1128 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1128 1129 uint64_t size, rl_type_t type)
1129 1130 {
1130 1131 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1131 1132 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1132 1133 rl_t *rl;
1133 1134
1134 1135 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1135 1136 rl->rl_object = object;
1136 1137 rl->rl_offset = offset;
1137 1138 rl->rl_size = size;
1138 1139 rl->rl_lock = rll;
1139 1140
1140 1141 ztest_rll_lock(rll, type);
1141 1142
1142 1143 return (rl);
1143 1144 }
1144 1145
1145 1146 static void
1146 1147 ztest_range_unlock(rl_t *rl)
1147 1148 {
1148 1149 rll_t *rll = rl->rl_lock;
1149 1150
1150 1151 ztest_rll_unlock(rll);
1151 1152
1152 1153 umem_free(rl, sizeof (*rl));
1153 1154 }
1154 1155
1155 1156 static void
1156 1157 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1157 1158 {
1158 1159 zd->zd_os = os;
1159 1160 zd->zd_zilog = dmu_objset_zil(os);
1160 1161 zd->zd_shared = szd;
1161 1162 dmu_objset_name(os, zd->zd_name);
1162 1163
1163 1164 if (zd->zd_shared != NULL)
1164 1165 zd->zd_shared->zd_seq = 0;
1165 1166
1166 1167 VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
1167 1168 VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
1168 1169
1169 1170 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1170 1171 ztest_rll_init(&zd->zd_object_lock[l]);
1171 1172
1172 1173 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1173 1174 ztest_rll_init(&zd->zd_range_lock[l]);
1174 1175 }
1175 1176
1176 1177 static void
1177 1178 ztest_zd_fini(ztest_ds_t *zd)
1178 1179 {
1179 1180 VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
1180 1181
1181 1182 for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1182 1183 ztest_rll_destroy(&zd->zd_object_lock[l]);
1183 1184
1184 1185 for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1185 1186 ztest_rll_destroy(&zd->zd_range_lock[l]);
1186 1187 }
1187 1188
1188 1189 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1189 1190
1190 1191 static uint64_t
1191 1192 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1192 1193 {
1193 1194 uint64_t txg;
1194 1195 int error;
1195 1196
1196 1197 /*
1197 1198 * Attempt to assign tx to some transaction group.
1198 1199 */
1199 1200 error = dmu_tx_assign(tx, txg_how);
1200 1201 if (error) {
1201 1202 if (error == ERESTART) {
1202 1203 ASSERT(txg_how == TXG_NOWAIT);
1203 1204 dmu_tx_wait(tx);
1204 1205 } else {
1205 1206 ASSERT3U(error, ==, ENOSPC);
1206 1207 ztest_record_enospc(tag);
1207 1208 }
1208 1209 dmu_tx_abort(tx);
1209 1210 return (0);
1210 1211 }
1211 1212 txg = dmu_tx_get_txg(tx);
1212 1213 ASSERT(txg != 0);
1213 1214 return (txg);
1214 1215 }
1215 1216
1216 1217 static void
1217 1218 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1218 1219 {
1219 1220 uint64_t *ip = buf;
1220 1221 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1221 1222
1222 1223 while (ip < ip_end)
1223 1224 *ip++ = value;
1224 1225 }
1225 1226
1226 1227 static boolean_t
1227 1228 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1228 1229 {
1229 1230 uint64_t *ip = buf;
1230 1231 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1231 1232 uint64_t diff = 0;
1232 1233
1233 1234 while (ip < ip_end)
1234 1235 diff |= (value - *ip++);
1235 1236
1236 1237 return (diff == 0);
1237 1238 }
1238 1239
1239 1240 static void
1240 1241 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1241 1242 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1242 1243 {
1243 1244 bt->bt_magic = BT_MAGIC;
1244 1245 bt->bt_objset = dmu_objset_id(os);
1245 1246 bt->bt_object = object;
1246 1247 bt->bt_offset = offset;
1247 1248 bt->bt_gen = gen;
1248 1249 bt->bt_txg = txg;
1249 1250 bt->bt_crtxg = crtxg;
1250 1251 }
1251 1252
1252 1253 static void
1253 1254 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1254 1255 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1255 1256 {
1256 1257 ASSERT(bt->bt_magic == BT_MAGIC);
1257 1258 ASSERT(bt->bt_objset == dmu_objset_id(os));
1258 1259 ASSERT(bt->bt_object == object);
1259 1260 ASSERT(bt->bt_offset == offset);
1260 1261 ASSERT(bt->bt_gen <= gen);
1261 1262 ASSERT(bt->bt_txg <= txg);
1262 1263 ASSERT(bt->bt_crtxg == crtxg);
1263 1264 }
1264 1265
1265 1266 static ztest_block_tag_t *
1266 1267 ztest_bt_bonus(dmu_buf_t *db)
1267 1268 {
1268 1269 dmu_object_info_t doi;
1269 1270 ztest_block_tag_t *bt;
1270 1271
1271 1272 dmu_object_info_from_db(db, &doi);
1272 1273 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1273 1274 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1274 1275 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1275 1276
1276 1277 return (bt);
1277 1278 }
1278 1279
1279 1280 /*
1280 1281 * ZIL logging ops
1281 1282 */
1282 1283
1283 1284 #define lrz_type lr_mode
1284 1285 #define lrz_blocksize lr_uid
1285 1286 #define lrz_ibshift lr_gid
1286 1287 #define lrz_bonustype lr_rdev
1287 1288 #define lrz_bonuslen lr_crtime[1]
1288 1289
1289 1290 static void
1290 1291 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1291 1292 {
1292 1293 char *name = (void *)(lr + 1); /* name follows lr */
1293 1294 size_t namesize = strlen(name) + 1;
1294 1295 itx_t *itx;
1295 1296
1296 1297 if (zil_replaying(zd->zd_zilog, tx))
1297 1298 return;
1298 1299
1299 1300 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1300 1301 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1301 1302 sizeof (*lr) + namesize - sizeof (lr_t));
1302 1303
1303 1304 zil_itx_assign(zd->zd_zilog, itx, tx);
1304 1305 }
1305 1306
1306 1307 static void
1307 1308 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1308 1309 {
1309 1310 char *name = (void *)(lr + 1); /* name follows lr */
1310 1311 size_t namesize = strlen(name) + 1;
1311 1312 itx_t *itx;
1312 1313
1313 1314 if (zil_replaying(zd->zd_zilog, tx))
1314 1315 return;
1315 1316
1316 1317 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1317 1318 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1318 1319 sizeof (*lr) + namesize - sizeof (lr_t));
1319 1320
1320 1321 itx->itx_oid = object;
1321 1322 zil_itx_assign(zd->zd_zilog, itx, tx);
1322 1323 }
1323 1324
1324 1325 static void
1325 1326 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1326 1327 {
1327 1328 itx_t *itx;
1328 1329 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1329 1330
1330 1331 if (zil_replaying(zd->zd_zilog, tx))
1331 1332 return;
1332 1333
1333 1334 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1334 1335 write_state = WR_INDIRECT;
1335 1336
1336 1337 itx = zil_itx_create(TX_WRITE,
1337 1338 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1338 1339
1339 1340 if (write_state == WR_COPIED &&
1340 1341 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1341 1342 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1342 1343 zil_itx_destroy(itx);
1343 1344 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1344 1345 write_state = WR_NEED_COPY;
1345 1346 }
1346 1347 itx->itx_private = zd;
1347 1348 itx->itx_wr_state = write_state;
1348 1349 itx->itx_sync = (ztest_random(8) == 0);
1349 1350 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1350 1351
1351 1352 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1352 1353 sizeof (*lr) - sizeof (lr_t));
1353 1354
1354 1355 zil_itx_assign(zd->zd_zilog, itx, tx);
1355 1356 }
1356 1357
1357 1358 static void
1358 1359 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1359 1360 {
1360 1361 itx_t *itx;
1361 1362
1362 1363 if (zil_replaying(zd->zd_zilog, tx))
1363 1364 return;
1364 1365
1365 1366 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1366 1367 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1367 1368 sizeof (*lr) - sizeof (lr_t));
1368 1369
1369 1370 itx->itx_sync = B_FALSE;
1370 1371 zil_itx_assign(zd->zd_zilog, itx, tx);
1371 1372 }
1372 1373
1373 1374 static void
1374 1375 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1375 1376 {
1376 1377 itx_t *itx;
1377 1378
1378 1379 if (zil_replaying(zd->zd_zilog, tx))
1379 1380 return;
1380 1381
1381 1382 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1382 1383 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1383 1384 sizeof (*lr) - sizeof (lr_t));
1384 1385
1385 1386 itx->itx_sync = B_FALSE;
1386 1387 zil_itx_assign(zd->zd_zilog, itx, tx);
1387 1388 }
1388 1389
1389 1390 /*
1390 1391 * ZIL replay ops
1391 1392 */
1392 1393 static int
1393 1394 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1394 1395 {
1395 1396 char *name = (void *)(lr + 1); /* name follows lr */
1396 1397 objset_t *os = zd->zd_os;
1397 1398 ztest_block_tag_t *bbt;
1398 1399 dmu_buf_t *db;
1399 1400 dmu_tx_t *tx;
1400 1401 uint64_t txg;
1401 1402 int error = 0;
1402 1403
1403 1404 if (byteswap)
1404 1405 byteswap_uint64_array(lr, sizeof (*lr));
1405 1406
1406 1407 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1407 1408 ASSERT(name[0] != '\0');
1408 1409
1409 1410 tx = dmu_tx_create(os);
1410 1411
1411 1412 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1412 1413
1413 1414 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1414 1415 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1415 1416 } else {
1416 1417 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1417 1418 }
1418 1419
1419 1420 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1420 1421 if (txg == 0)
1421 1422 return (ENOSPC);
1422 1423
1423 1424 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1424 1425
1425 1426 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1426 1427 if (lr->lr_foid == 0) {
1427 1428 lr->lr_foid = zap_create(os,
1428 1429 lr->lrz_type, lr->lrz_bonustype,
1429 1430 lr->lrz_bonuslen, tx);
1430 1431 } else {
1431 1432 error = zap_create_claim(os, lr->lr_foid,
1432 1433 lr->lrz_type, lr->lrz_bonustype,
1433 1434 lr->lrz_bonuslen, tx);
1434 1435 }
1435 1436 } else {
1436 1437 if (lr->lr_foid == 0) {
1437 1438 lr->lr_foid = dmu_object_alloc(os,
1438 1439 lr->lrz_type, 0, lr->lrz_bonustype,
1439 1440 lr->lrz_bonuslen, tx);
1440 1441 } else {
1441 1442 error = dmu_object_claim(os, lr->lr_foid,
1442 1443 lr->lrz_type, 0, lr->lrz_bonustype,
1443 1444 lr->lrz_bonuslen, tx);
1444 1445 }
1445 1446 }
1446 1447
1447 1448 if (error) {
1448 1449 ASSERT3U(error, ==, EEXIST);
1449 1450 ASSERT(zd->zd_zilog->zl_replay);
1450 1451 dmu_tx_commit(tx);
1451 1452 return (error);
1452 1453 }
1453 1454
1454 1455 ASSERT(lr->lr_foid != 0);
1455 1456
1456 1457 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1457 1458 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1458 1459 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1459 1460
1460 1461 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1461 1462 bbt = ztest_bt_bonus(db);
1462 1463 dmu_buf_will_dirty(db, tx);
1463 1464 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1464 1465 dmu_buf_rele(db, FTAG);
1465 1466
1466 1467 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1467 1468 &lr->lr_foid, tx));
1468 1469
1469 1470 (void) ztest_log_create(zd, tx, lr);
1470 1471
1471 1472 dmu_tx_commit(tx);
1472 1473
1473 1474 return (0);
1474 1475 }
1475 1476
1476 1477 static int
1477 1478 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1478 1479 {
1479 1480 char *name = (void *)(lr + 1); /* name follows lr */
1480 1481 objset_t *os = zd->zd_os;
1481 1482 dmu_object_info_t doi;
1482 1483 dmu_tx_t *tx;
1483 1484 uint64_t object, txg;
1484 1485
1485 1486 if (byteswap)
1486 1487 byteswap_uint64_array(lr, sizeof (*lr));
1487 1488
1488 1489 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1489 1490 ASSERT(name[0] != '\0');
1490 1491
1491 1492 VERIFY3U(0, ==,
1492 1493 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1493 1494 ASSERT(object != 0);
1494 1495
1495 1496 ztest_object_lock(zd, object, RL_WRITER);
1496 1497
1497 1498 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1498 1499
1499 1500 tx = dmu_tx_create(os);
1500 1501
1501 1502 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1502 1503 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1503 1504
1504 1505 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1505 1506 if (txg == 0) {
1506 1507 ztest_object_unlock(zd, object);
1507 1508 return (ENOSPC);
1508 1509 }
1509 1510
1510 1511 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1511 1512 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1512 1513 } else {
1513 1514 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1514 1515 }
1515 1516
1516 1517 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1517 1518
1518 1519 (void) ztest_log_remove(zd, tx, lr, object);
1519 1520
1520 1521 dmu_tx_commit(tx);
1521 1522
1522 1523 ztest_object_unlock(zd, object);
1523 1524
1524 1525 return (0);
1525 1526 }
1526 1527
1527 1528 static int
1528 1529 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1529 1530 {
1530 1531 objset_t *os = zd->zd_os;
1531 1532 void *data = lr + 1; /* data follows lr */
1532 1533 uint64_t offset, length;
1533 1534 ztest_block_tag_t *bt = data;
1534 1535 ztest_block_tag_t *bbt;
1535 1536 uint64_t gen, txg, lrtxg, crtxg;
1536 1537 dmu_object_info_t doi;
1537 1538 dmu_tx_t *tx;
1538 1539 dmu_buf_t *db;
1539 1540 arc_buf_t *abuf = NULL;
1540 1541 rl_t *rl;
1541 1542
1542 1543 if (byteswap)
1543 1544 byteswap_uint64_array(lr, sizeof (*lr));
1544 1545
1545 1546 offset = lr->lr_offset;
1546 1547 length = lr->lr_length;
1547 1548
1548 1549 /* If it's a dmu_sync() block, write the whole block */
1549 1550 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1550 1551 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1551 1552 if (length < blocksize) {
1552 1553 offset -= offset % blocksize;
1553 1554 length = blocksize;
1554 1555 }
1555 1556 }
1556 1557
1557 1558 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1558 1559 byteswap_uint64_array(bt, sizeof (*bt));
1559 1560
1560 1561 if (bt->bt_magic != BT_MAGIC)
1561 1562 bt = NULL;
1562 1563
1563 1564 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1564 1565 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1565 1566
1566 1567 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1567 1568
1568 1569 dmu_object_info_from_db(db, &doi);
1569 1570
1570 1571 bbt = ztest_bt_bonus(db);
1571 1572 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1572 1573 gen = bbt->bt_gen;
1573 1574 crtxg = bbt->bt_crtxg;
1574 1575 lrtxg = lr->lr_common.lrc_txg;
1575 1576
1576 1577 tx = dmu_tx_create(os);
1577 1578
1578 1579 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1579 1580
1580 1581 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1581 1582 P2PHASE(offset, length) == 0)
1582 1583 abuf = dmu_request_arcbuf(db, length);
1583 1584
1584 1585 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1585 1586 if (txg == 0) {
1586 1587 if (abuf != NULL)
1587 1588 dmu_return_arcbuf(abuf);
1588 1589 dmu_buf_rele(db, FTAG);
1589 1590 ztest_range_unlock(rl);
1590 1591 ztest_object_unlock(zd, lr->lr_foid);
1591 1592 return (ENOSPC);
1592 1593 }
1593 1594
1594 1595 if (bt != NULL) {
1595 1596 /*
1596 1597 * Usually, verify the old data before writing new data --
1597 1598 * but not always, because we also want to verify correct
1598 1599 * behavior when the data was not recently read into cache.
1599 1600 */
1600 1601 ASSERT(offset % doi.doi_data_block_size == 0);
1601 1602 if (ztest_random(4) != 0) {
1602 1603 int prefetch = ztest_random(2) ?
1603 1604 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1604 1605 ztest_block_tag_t rbt;
1605 1606
1606 1607 VERIFY(dmu_read(os, lr->lr_foid, offset,
1607 1608 sizeof (rbt), &rbt, prefetch) == 0);
1608 1609 if (rbt.bt_magic == BT_MAGIC) {
1609 1610 ztest_bt_verify(&rbt, os, lr->lr_foid,
1610 1611 offset, gen, txg, crtxg);
1611 1612 }
1612 1613 }
1613 1614
1614 1615 /*
1615 1616 * Writes can appear to be newer than the bonus buffer because
1616 1617 * the ztest_get_data() callback does a dmu_read() of the
1617 1618 * open-context data, which may be different than the data
1618 1619 * as it was when the write was generated.
1619 1620 */
1620 1621 if (zd->zd_zilog->zl_replay) {
1621 1622 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1622 1623 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1623 1624 bt->bt_crtxg);
1624 1625 }
1625 1626
1626 1627 /*
1627 1628 * Set the bt's gen/txg to the bonus buffer's gen/txg
1628 1629 * so that all of the usual ASSERTs will work.
1629 1630 */
1630 1631 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1631 1632 }
1632 1633
1633 1634 if (abuf == NULL) {
1634 1635 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1635 1636 } else {
1636 1637 bcopy(data, abuf->b_data, length);
1637 1638 dmu_assign_arcbuf(db, offset, abuf, tx);
1638 1639 }
1639 1640
1640 1641 (void) ztest_log_write(zd, tx, lr);
1641 1642
1642 1643 dmu_buf_rele(db, FTAG);
1643 1644
1644 1645 dmu_tx_commit(tx);
1645 1646
1646 1647 ztest_range_unlock(rl);
1647 1648 ztest_object_unlock(zd, lr->lr_foid);
1648 1649
1649 1650 return (0);
1650 1651 }
1651 1652
1652 1653 static int
1653 1654 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1654 1655 {
1655 1656 objset_t *os = zd->zd_os;
1656 1657 dmu_tx_t *tx;
1657 1658 uint64_t txg;
1658 1659 rl_t *rl;
1659 1660
1660 1661 if (byteswap)
1661 1662 byteswap_uint64_array(lr, sizeof (*lr));
1662 1663
1663 1664 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1664 1665 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1665 1666 RL_WRITER);
1666 1667
1667 1668 tx = dmu_tx_create(os);
1668 1669
1669 1670 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1670 1671
1671 1672 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1672 1673 if (txg == 0) {
1673 1674 ztest_range_unlock(rl);
1674 1675 ztest_object_unlock(zd, lr->lr_foid);
1675 1676 return (ENOSPC);
1676 1677 }
1677 1678
1678 1679 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1679 1680 lr->lr_length, tx) == 0);
1680 1681
1681 1682 (void) ztest_log_truncate(zd, tx, lr);
1682 1683
1683 1684 dmu_tx_commit(tx);
1684 1685
1685 1686 ztest_range_unlock(rl);
1686 1687 ztest_object_unlock(zd, lr->lr_foid);
1687 1688
1688 1689 return (0);
1689 1690 }
1690 1691
1691 1692 static int
1692 1693 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1693 1694 {
1694 1695 objset_t *os = zd->zd_os;
1695 1696 dmu_tx_t *tx;
1696 1697 dmu_buf_t *db;
1697 1698 ztest_block_tag_t *bbt;
1698 1699 uint64_t txg, lrtxg, crtxg;
1699 1700
1700 1701 if (byteswap)
1701 1702 byteswap_uint64_array(lr, sizeof (*lr));
1702 1703
1703 1704 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1704 1705
1705 1706 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1706 1707
1707 1708 tx = dmu_tx_create(os);
1708 1709 dmu_tx_hold_bonus(tx, lr->lr_foid);
1709 1710
1710 1711 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1711 1712 if (txg == 0) {
1712 1713 dmu_buf_rele(db, FTAG);
1713 1714 ztest_object_unlock(zd, lr->lr_foid);
1714 1715 return (ENOSPC);
1715 1716 }
1716 1717
1717 1718 bbt = ztest_bt_bonus(db);
1718 1719 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1719 1720 crtxg = bbt->bt_crtxg;
1720 1721 lrtxg = lr->lr_common.lrc_txg;
1721 1722
1722 1723 if (zd->zd_zilog->zl_replay) {
1723 1724 ASSERT(lr->lr_size != 0);
1724 1725 ASSERT(lr->lr_mode != 0);
1725 1726 ASSERT(lrtxg != 0);
1726 1727 } else {
1727 1728 /*
1728 1729 * Randomly change the size and increment the generation.
1729 1730 */
1730 1731 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1731 1732 sizeof (*bbt);
1732 1733 lr->lr_mode = bbt->bt_gen + 1;
1733 1734 ASSERT(lrtxg == 0);
1734 1735 }
1735 1736
1736 1737 /*
1737 1738 * Verify that the current bonus buffer is not newer than our txg.
1738 1739 */
1739 1740 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1740 1741 MAX(txg, lrtxg), crtxg);
1741 1742
1742 1743 dmu_buf_will_dirty(db, tx);
1743 1744
1744 1745 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1745 1746 ASSERT3U(lr->lr_size, <=, db->db_size);
1746 1747 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
1747 1748 bbt = ztest_bt_bonus(db);
1748 1749
1749 1750 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1750 1751
1751 1752 dmu_buf_rele(db, FTAG);
1752 1753
1753 1754 (void) ztest_log_setattr(zd, tx, lr);
1754 1755
1755 1756 dmu_tx_commit(tx);
1756 1757
1757 1758 ztest_object_unlock(zd, lr->lr_foid);
1758 1759
1759 1760 return (0);
1760 1761 }
1761 1762
1762 1763 zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
1763 1764 NULL, /* 0 no such transaction type */
1764 1765 ztest_replay_create, /* TX_CREATE */
1765 1766 NULL, /* TX_MKDIR */
1766 1767 NULL, /* TX_MKXATTR */
1767 1768 NULL, /* TX_SYMLINK */
1768 1769 ztest_replay_remove, /* TX_REMOVE */
1769 1770 NULL, /* TX_RMDIR */
1770 1771 NULL, /* TX_LINK */
1771 1772 NULL, /* TX_RENAME */
1772 1773 ztest_replay_write, /* TX_WRITE */
1773 1774 ztest_replay_truncate, /* TX_TRUNCATE */
1774 1775 ztest_replay_setattr, /* TX_SETATTR */
1775 1776 NULL, /* TX_ACL */
1776 1777 NULL, /* TX_CREATE_ACL */
1777 1778 NULL, /* TX_CREATE_ATTR */
1778 1779 NULL, /* TX_CREATE_ACL_ATTR */
1779 1780 NULL, /* TX_MKDIR_ACL */
1780 1781 NULL, /* TX_MKDIR_ATTR */
1781 1782 NULL, /* TX_MKDIR_ACL_ATTR */
1782 1783 NULL, /* TX_WRITE2 */
1783 1784 };
1784 1785
1785 1786 /*
1786 1787 * ZIL get_data callbacks
1787 1788 */
1788 1789
1789 1790 static void
1790 1791 ztest_get_done(zgd_t *zgd, int error)
1791 1792 {
1792 1793 ztest_ds_t *zd = zgd->zgd_private;
1793 1794 uint64_t object = zgd->zgd_rl->rl_object;
1794 1795
1795 1796 if (zgd->zgd_db)
1796 1797 dmu_buf_rele(zgd->zgd_db, zgd);
1797 1798
1798 1799 ztest_range_unlock(zgd->zgd_rl);
1799 1800 ztest_object_unlock(zd, object);
1800 1801
1801 1802 if (error == 0 && zgd->zgd_bp)
1802 1803 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1803 1804
1804 1805 umem_free(zgd, sizeof (*zgd));
1805 1806 }
1806 1807
1807 1808 static int
1808 1809 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1809 1810 {
1810 1811 ztest_ds_t *zd = arg;
1811 1812 objset_t *os = zd->zd_os;
1812 1813 uint64_t object = lr->lr_foid;
1813 1814 uint64_t offset = lr->lr_offset;
1814 1815 uint64_t size = lr->lr_length;
1815 1816 blkptr_t *bp = &lr->lr_blkptr;
1816 1817 uint64_t txg = lr->lr_common.lrc_txg;
1817 1818 uint64_t crtxg;
1818 1819 dmu_object_info_t doi;
1819 1820 dmu_buf_t *db;
1820 1821 zgd_t *zgd;
1821 1822 int error;
1822 1823
1823 1824 ztest_object_lock(zd, object, RL_READER);
1824 1825 error = dmu_bonus_hold(os, object, FTAG, &db);
1825 1826 if (error) {
1826 1827 ztest_object_unlock(zd, object);
1827 1828 return (error);
1828 1829 }
1829 1830
1830 1831 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1831 1832
1832 1833 if (crtxg == 0 || crtxg > txg) {
1833 1834 dmu_buf_rele(db, FTAG);
1834 1835 ztest_object_unlock(zd, object);
1835 1836 return (ENOENT);
1836 1837 }
1837 1838
1838 1839 dmu_object_info_from_db(db, &doi);
1839 1840 dmu_buf_rele(db, FTAG);
1840 1841 db = NULL;
1841 1842
1842 1843 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1843 1844 zgd->zgd_zilog = zd->zd_zilog;
1844 1845 zgd->zgd_private = zd;
1845 1846
1846 1847 if (buf != NULL) { /* immediate write */
1847 1848 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1848 1849 RL_READER);
1849 1850
1850 1851 error = dmu_read(os, object, offset, size, buf,
1851 1852 DMU_READ_NO_PREFETCH);
1852 1853 ASSERT(error == 0);
1853 1854 } else {
1854 1855 size = doi.doi_data_block_size;
1855 1856 if (ISP2(size)) {
1856 1857 offset = P2ALIGN(offset, size);
1857 1858 } else {
1858 1859 ASSERT(offset < size);
1859 1860 offset = 0;
1860 1861 }
1861 1862
1862 1863 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1863 1864 RL_READER);
1864 1865
1865 1866 error = dmu_buf_hold(os, object, offset, zgd, &db,
1866 1867 DMU_READ_NO_PREFETCH);
1867 1868
1868 1869 if (error == 0) {
1869 1870 blkptr_t *obp = dmu_buf_get_blkptr(db);
1870 1871 if (obp) {
1871 1872 ASSERT(BP_IS_HOLE(bp));
1872 1873 *bp = *obp;
1873 1874 }
1874 1875
1875 1876 zgd->zgd_db = db;
1876 1877 zgd->zgd_bp = bp;
1877 1878
1878 1879 ASSERT(db->db_offset == offset);
1879 1880 ASSERT(db->db_size == size);
1880 1881
1881 1882 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1882 1883 ztest_get_done, zgd);
1883 1884
1884 1885 if (error == 0)
1885 1886 return (0);
1886 1887 }
1887 1888 }
1888 1889
1889 1890 ztest_get_done(zgd, error);
1890 1891
1891 1892 return (error);
1892 1893 }
1893 1894
1894 1895 static void *
1895 1896 ztest_lr_alloc(size_t lrsize, char *name)
1896 1897 {
1897 1898 char *lr;
1898 1899 size_t namesize = name ? strlen(name) + 1 : 0;
1899 1900
1900 1901 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1901 1902
1902 1903 if (name)
1903 1904 bcopy(name, lr + lrsize, namesize);
1904 1905
1905 1906 return (lr);
1906 1907 }
1907 1908
1908 1909 void
1909 1910 ztest_lr_free(void *lr, size_t lrsize, char *name)
1910 1911 {
1911 1912 size_t namesize = name ? strlen(name) + 1 : 0;
1912 1913
1913 1914 umem_free(lr, lrsize + namesize);
1914 1915 }
1915 1916
1916 1917 /*
1917 1918 * Lookup a bunch of objects. Returns the number of objects not found.
1918 1919 */
1919 1920 static int
1920 1921 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1921 1922 {
1922 1923 int missing = 0;
1923 1924 int error;
1924 1925
1925 1926 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1926 1927
1927 1928 for (int i = 0; i < count; i++, od++) {
1928 1929 od->od_object = 0;
1929 1930 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1930 1931 sizeof (uint64_t), 1, &od->od_object);
1931 1932 if (error) {
1932 1933 ASSERT(error == ENOENT);
1933 1934 ASSERT(od->od_object == 0);
1934 1935 missing++;
1935 1936 } else {
1936 1937 dmu_buf_t *db;
1937 1938 ztest_block_tag_t *bbt;
1938 1939 dmu_object_info_t doi;
1939 1940
1940 1941 ASSERT(od->od_object != 0);
1941 1942 ASSERT(missing == 0); /* there should be no gaps */
1942 1943
1943 1944 ztest_object_lock(zd, od->od_object, RL_READER);
1944 1945 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1945 1946 od->od_object, FTAG, &db));
1946 1947 dmu_object_info_from_db(db, &doi);
1947 1948 bbt = ztest_bt_bonus(db);
1948 1949 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1949 1950 od->od_type = doi.doi_type;
1950 1951 od->od_blocksize = doi.doi_data_block_size;
1951 1952 od->od_gen = bbt->bt_gen;
1952 1953 dmu_buf_rele(db, FTAG);
1953 1954 ztest_object_unlock(zd, od->od_object);
1954 1955 }
1955 1956 }
1956 1957
1957 1958 return (missing);
1958 1959 }
1959 1960
1960 1961 static int
1961 1962 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1962 1963 {
1963 1964 int missing = 0;
1964 1965
1965 1966 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
1966 1967
1967 1968 for (int i = 0; i < count; i++, od++) {
1968 1969 if (missing) {
1969 1970 od->od_object = 0;
1970 1971 missing++;
1971 1972 continue;
1972 1973 }
1973 1974
1974 1975 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
1975 1976
1976 1977 lr->lr_doid = od->od_dir;
1977 1978 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
1978 1979 lr->lrz_type = od->od_crtype;
1979 1980 lr->lrz_blocksize = od->od_crblocksize;
1980 1981 lr->lrz_ibshift = ztest_random_ibshift();
1981 1982 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
1982 1983 lr->lrz_bonuslen = dmu_bonus_max();
1983 1984 lr->lr_gen = od->od_crgen;
1984 1985 lr->lr_crtime[0] = time(NULL);
1985 1986
1986 1987 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
1987 1988 ASSERT(missing == 0);
1988 1989 od->od_object = 0;
1989 1990 missing++;
1990 1991 } else {
1991 1992 od->od_object = lr->lr_foid;
1992 1993 od->od_type = od->od_crtype;
1993 1994 od->od_blocksize = od->od_crblocksize;
1994 1995 od->od_gen = od->od_crgen;
1995 1996 ASSERT(od->od_object != 0);
1996 1997 }
1997 1998
1998 1999 ztest_lr_free(lr, sizeof (*lr), od->od_name);
1999 2000 }
2000 2001
2001 2002 return (missing);
2002 2003 }
2003 2004
2004 2005 static int
2005 2006 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
2006 2007 {
2007 2008 int missing = 0;
2008 2009 int error;
2009 2010
2010 2011 ASSERT(_mutex_held(&zd->zd_dirobj_lock));
2011 2012
2012 2013 od += count - 1;
2013 2014
2014 2015 for (int i = count - 1; i >= 0; i--, od--) {
2015 2016 if (missing) {
2016 2017 missing++;
2017 2018 continue;
2018 2019 }
2019 2020
2020 2021 /*
2021 2022 * No object was found.
2022 2023 */
2023 2024 if (od->od_object == 0)
2024 2025 continue;
2025 2026
2026 2027 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
2027 2028
2028 2029 lr->lr_doid = od->od_dir;
2029 2030
2030 2031 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
2031 2032 ASSERT3U(error, ==, ENOSPC);
2032 2033 missing++;
2033 2034 } else {
2034 2035 od->od_object = 0;
2035 2036 }
2036 2037 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2037 2038 }
2038 2039
2039 2040 return (missing);
2040 2041 }
2041 2042
2042 2043 static int
2043 2044 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
2044 2045 void *data)
2045 2046 {
2046 2047 lr_write_t *lr;
2047 2048 int error;
2048 2049
2049 2050 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
2050 2051
2051 2052 lr->lr_foid = object;
2052 2053 lr->lr_offset = offset;
2053 2054 lr->lr_length = size;
2054 2055 lr->lr_blkoff = 0;
2055 2056 BP_ZERO(&lr->lr_blkptr);
2056 2057
2057 2058 bcopy(data, lr + 1, size);
2058 2059
2059 2060 error = ztest_replay_write(zd, lr, B_FALSE);
2060 2061
2061 2062 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2062 2063
2063 2064 return (error);
2064 2065 }
2065 2066
2066 2067 static int
2067 2068 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2068 2069 {
2069 2070 lr_truncate_t *lr;
2070 2071 int error;
2071 2072
2072 2073 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2073 2074
2074 2075 lr->lr_foid = object;
2075 2076 lr->lr_offset = offset;
2076 2077 lr->lr_length = size;
2077 2078
2078 2079 error = ztest_replay_truncate(zd, lr, B_FALSE);
2079 2080
2080 2081 ztest_lr_free(lr, sizeof (*lr), NULL);
2081 2082
2082 2083 return (error);
2083 2084 }
2084 2085
2085 2086 static int
2086 2087 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2087 2088 {
2088 2089 lr_setattr_t *lr;
2089 2090 int error;
2090 2091
2091 2092 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2092 2093
2093 2094 lr->lr_foid = object;
2094 2095 lr->lr_size = 0;
2095 2096 lr->lr_mode = 0;
2096 2097
2097 2098 error = ztest_replay_setattr(zd, lr, B_FALSE);
2098 2099
2099 2100 ztest_lr_free(lr, sizeof (*lr), NULL);
2100 2101
2101 2102 return (error);
2102 2103 }
2103 2104
2104 2105 static void
2105 2106 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2106 2107 {
2107 2108 objset_t *os = zd->zd_os;
2108 2109 dmu_tx_t *tx;
2109 2110 uint64_t txg;
2110 2111 rl_t *rl;
2111 2112
2112 2113 txg_wait_synced(dmu_objset_pool(os), 0);
2113 2114
2114 2115 ztest_object_lock(zd, object, RL_READER);
2115 2116 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2116 2117
2117 2118 tx = dmu_tx_create(os);
2118 2119
2119 2120 dmu_tx_hold_write(tx, object, offset, size);
2120 2121
2121 2122 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2122 2123
2123 2124 if (txg != 0) {
2124 2125 dmu_prealloc(os, object, offset, size, tx);
2125 2126 dmu_tx_commit(tx);
2126 2127 txg_wait_synced(dmu_objset_pool(os), txg);
2127 2128 } else {
2128 2129 (void) dmu_free_long_range(os, object, offset, size);
2129 2130 }
2130 2131
2131 2132 ztest_range_unlock(rl);
2132 2133 ztest_object_unlock(zd, object);
2133 2134 }
2134 2135
2135 2136 static void
2136 2137 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2137 2138 {
2138 2139 int err;
2139 2140 ztest_block_tag_t wbt;
2140 2141 dmu_object_info_t doi;
2141 2142 enum ztest_io_type io_type;
2142 2143 uint64_t blocksize;
2143 2144 void *data;
2144 2145
2145 2146 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2146 2147 blocksize = doi.doi_data_block_size;
2147 2148 data = umem_alloc(blocksize, UMEM_NOFAIL);
2148 2149
2149 2150 /*
2150 2151 * Pick an i/o type at random, biased toward writing block tags.
2151 2152 */
2152 2153 io_type = ztest_random(ZTEST_IO_TYPES);
2153 2154 if (ztest_random(2) == 0)
2154 2155 io_type = ZTEST_IO_WRITE_TAG;
2155 2156
2156 2157 (void) rw_rdlock(&zd->zd_zilog_lock);
2157 2158
2158 2159 switch (io_type) {
2159 2160
2160 2161 case ZTEST_IO_WRITE_TAG:
2161 2162 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2162 2163 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2163 2164 break;
2164 2165
2165 2166 case ZTEST_IO_WRITE_PATTERN:
2166 2167 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2167 2168 if (ztest_random(2) == 0) {
2168 2169 /*
2169 2170 * Induce fletcher2 collisions to ensure that
2170 2171 * zio_ddt_collision() detects and resolves them
2171 2172 * when using fletcher2-verify for deduplication.
2172 2173 */
2173 2174 ((uint64_t *)data)[0] ^= 1ULL << 63;
2174 2175 ((uint64_t *)data)[4] ^= 1ULL << 63;
2175 2176 }
2176 2177 (void) ztest_write(zd, object, offset, blocksize, data);
2177 2178 break;
2178 2179
2179 2180 case ZTEST_IO_WRITE_ZEROES:
2180 2181 bzero(data, blocksize);
2181 2182 (void) ztest_write(zd, object, offset, blocksize, data);
2182 2183 break;
2183 2184
2184 2185 case ZTEST_IO_TRUNCATE:
2185 2186 (void) ztest_truncate(zd, object, offset, blocksize);
2186 2187 break;
2187 2188
2188 2189 case ZTEST_IO_SETATTR:
2189 2190 (void) ztest_setattr(zd, object);
2190 2191 break;
2191 2192
2192 2193 case ZTEST_IO_REWRITE:
2193 2194 (void) rw_rdlock(&ztest_name_lock);
2194 2195 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2195 2196 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
2196 2197 B_FALSE);
2197 2198 VERIFY(err == 0 || err == ENOSPC);
2198 2199 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2199 2200 ZFS_PROP_COMPRESSION,
2200 2201 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
2201 2202 B_FALSE);
2202 2203 VERIFY(err == 0 || err == ENOSPC);
2203 2204 (void) rw_unlock(&ztest_name_lock);
2204 2205
2205 2206 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
2206 2207 DMU_READ_NO_PREFETCH));
2207 2208
2208 2209 (void) ztest_write(zd, object, offset, blocksize, data);
2209 2210 break;
2210 2211 }
2211 2212
2212 2213 (void) rw_unlock(&zd->zd_zilog_lock);
2213 2214
2214 2215 umem_free(data, blocksize);
2215 2216 }
2216 2217
2217 2218 /*
2218 2219 * Initialize an object description template.
2219 2220 */
2220 2221 static void
2221 2222 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2222 2223 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2223 2224 {
2224 2225 od->od_dir = ZTEST_DIROBJ;
2225 2226 od->od_object = 0;
2226 2227
2227 2228 od->od_crtype = type;
2228 2229 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2229 2230 od->od_crgen = gen;
2230 2231
2231 2232 od->od_type = DMU_OT_NONE;
2232 2233 od->od_blocksize = 0;
2233 2234 od->od_gen = 0;
2234 2235
2235 2236 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2236 2237 tag, (int64_t)id, index);
2237 2238 }
2238 2239
2239 2240 /*
2240 2241 * Lookup or create the objects for a test using the od template.
2241 2242 * If the objects do not all exist, or if 'remove' is specified,
2242 2243 * remove any existing objects and create new ones. Otherwise,
2243 2244 * use the existing objects.
2244 2245 */
2245 2246 static int
2246 2247 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2247 2248 {
2248 2249 int count = size / sizeof (*od);
2249 2250 int rv = 0;
2250 2251
2251 2252 VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
2252 2253 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2253 2254 (ztest_remove(zd, od, count) != 0 ||
2254 2255 ztest_create(zd, od, count) != 0))
2255 2256 rv = -1;
2256 2257 zd->zd_od = od;
2257 2258 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2258 2259
2259 2260 return (rv);
2260 2261 }
2261 2262
2262 2263 /* ARGSUSED */
2263 2264 void
2264 2265 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2265 2266 {
2266 2267 zilog_t *zilog = zd->zd_zilog;
2267 2268
2268 2269 (void) rw_rdlock(&zd->zd_zilog_lock);
2269 2270
2270 2271 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2271 2272
2272 2273 /*
2273 2274 * Remember the committed values in zd, which is in parent/child
2274 2275 * shared memory. If we die, the next iteration of ztest_run()
2275 2276 * will verify that the log really does contain this record.
2276 2277 */
2277 2278 mutex_enter(&zilog->zl_lock);
2278 2279 ASSERT(zd->zd_shared != NULL);
2279 2280 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2280 2281 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2281 2282 mutex_exit(&zilog->zl_lock);
2282 2283
2283 2284 (void) rw_unlock(&zd->zd_zilog_lock);
2284 2285 }
2285 2286
2286 2287 /*
2287 2288 * This function is designed to simulate the operations that occur during a
2288 2289 * mount/unmount operation. We hold the dataset across these operations in an
2289 2290 * attempt to expose any implicit assumptions about ZIL management.
2290 2291 */
2291 2292 /* ARGSUSED */
2292 2293 void
2293 2294 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2294 2295 {
2295 2296 objset_t *os = zd->zd_os;
2296 2297
2297 2298 /*
2298 2299 * We grab the zd_dirobj_lock to ensure that no other thread is
2299 2300 * updating the zil (i.e. adding in-memory log records) and the
2300 2301 * zd_zilog_lock to block any I/O.
2301 2302 */
2302 2303 VERIFY0(mutex_lock(&zd->zd_dirobj_lock));
2303 2304 (void) rw_wrlock(&zd->zd_zilog_lock);
2304 2305
2305 2306 /* zfsvfs_teardown() */
2306 2307 zil_close(zd->zd_zilog);
2307 2308
2308 2309 /* zfsvfs_setup() */
2309 2310 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2310 2311 zil_replay(os, zd, ztest_replay_vector);
2311 2312
2312 2313 (void) rw_unlock(&zd->zd_zilog_lock);
2313 2314 VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
2314 2315 }
2315 2316
2316 2317 /*
2317 2318 * Verify that we can't destroy an active pool, create an existing pool,
2318 2319 * or create a pool with a bad vdev spec.
2319 2320 */
2320 2321 /* ARGSUSED */
2321 2322 void
2322 2323 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2323 2324 {
2324 2325 ztest_shared_opts_t *zo = &ztest_opts;
2325 2326 spa_t *spa;
2326 2327 nvlist_t *nvroot;
2327 2328
2328 2329 /*
2329 2330 * Attempt to create using a bad file.
2330 2331 */
2331 2332 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2332 2333 VERIFY3U(ENOENT, ==,
2333 2334 spa_create("ztest_bad_file", nvroot, NULL, NULL));
2334 2335 nvlist_free(nvroot);
2335 2336
2336 2337 /*
2337 2338 * Attempt to create using a bad mirror.
2338 2339 */
2339 2340 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1);
2340 2341 VERIFY3U(ENOENT, ==,
2341 2342 spa_create("ztest_bad_mirror", nvroot, NULL, NULL));
2342 2343 nvlist_free(nvroot);
2343 2344
2344 2345 /*
2345 2346 * Attempt to create an existing pool. It shouldn't matter
2346 2347 * what's in the nvroot; we should fail with EEXIST.
2347 2348 */
2348 2349 (void) rw_rdlock(&ztest_name_lock);
2349 2350 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2350 2351 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
2351 2352 nvlist_free(nvroot);
2352 2353 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2353 2354 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2354 2355 spa_close(spa, FTAG);
2355 2356
2356 2357 (void) rw_unlock(&ztest_name_lock);
2357 2358 }
2358 2359
2359 2360 /* ARGSUSED */
2360 2361 void
2361 2362 ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
2362 2363 {
2363 2364 spa_t *spa;
2364 2365 uint64_t initial_version = SPA_VERSION_INITIAL;
2365 2366 uint64_t version, newversion;
2366 2367 nvlist_t *nvroot, *props;
2367 2368 char *name;
2368 2369
2369 2370 VERIFY0(mutex_lock(&ztest_vdev_lock));
2370 2371 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
2371 2372
2372 2373 /*
2373 2374 * Clean up from previous runs.
2374 2375 */
2375 2376 (void) spa_destroy(name);
2376 2377
2377 2378 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
2378 2379 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
2379 2380
2380 2381 /*
2381 2382 * If we're configuring a RAIDZ device then make sure that the
2382 2383 * the initial version is capable of supporting that feature.
2383 2384 */
2384 2385 switch (ztest_opts.zo_raidz_parity) {
2385 2386 case 0:
2386 2387 case 1:
2387 2388 initial_version = SPA_VERSION_INITIAL;
2388 2389 break;
2389 2390 case 2:
2390 2391 initial_version = SPA_VERSION_RAIDZ2;
2391 2392 break;
2392 2393 case 3:
2393 2394 initial_version = SPA_VERSION_RAIDZ3;
2394 2395 break;
2395 2396 }
2396 2397
2397 2398 /*
2398 2399 * Create a pool with a spa version that can be upgraded. Pick
2399 2400 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
2400 2401 */
2401 2402 do {
2402 2403 version = ztest_random_spa_version(initial_version);
2403 2404 } while (version > SPA_VERSION_BEFORE_FEATURES);
2404 2405
2405 2406 props = fnvlist_alloc();
2406 2407 fnvlist_add_uint64(props,
2407 2408 zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
2408 2409 VERIFY0(spa_create(name, nvroot, props, NULL));
2409 2410 fnvlist_free(nvroot);
2410 2411 fnvlist_free(props);
2411 2412
2412 2413 VERIFY0(spa_open(name, &spa, FTAG));
2413 2414 VERIFY3U(spa_version(spa), ==, version);
2414 2415 newversion = ztest_random_spa_version(version + 1);
2415 2416
2416 2417 if (ztest_opts.zo_verbose >= 4) {
2417 2418 (void) printf("upgrading spa version from %llu to %llu\n",
2418 2419 (u_longlong_t)version, (u_longlong_t)newversion);
2419 2420 }
2420 2421
2421 2422 spa_upgrade(spa, newversion);
2422 2423 VERIFY3U(spa_version(spa), >, version);
2423 2424 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
2424 2425 zpool_prop_to_name(ZPOOL_PROP_VERSION)));
2425 2426 spa_close(spa, FTAG);
2426 2427
2427 2428 strfree(name);
2428 2429 VERIFY0(mutex_unlock(&ztest_vdev_lock));
2429 2430 }
2430 2431
2431 2432 static vdev_t *
2432 2433 vdev_lookup_by_path(vdev_t *vd, const char *path)
2433 2434 {
2434 2435 vdev_t *mvd;
2435 2436
2436 2437 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2437 2438 return (vd);
2438 2439
2439 2440 for (int c = 0; c < vd->vdev_children; c++)
2440 2441 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2441 2442 NULL)
2442 2443 return (mvd);
2443 2444
2444 2445 return (NULL);
2445 2446 }
2446 2447
2447 2448 /*
2448 2449 * Find the first available hole which can be used as a top-level.
2449 2450 */
2450 2451 int
2451 2452 find_vdev_hole(spa_t *spa)
2452 2453 {
2453 2454 vdev_t *rvd = spa->spa_root_vdev;
2454 2455 int c;
2455 2456
2456 2457 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2457 2458
2458 2459 for (c = 0; c < rvd->vdev_children; c++) {
2459 2460 vdev_t *cvd = rvd->vdev_child[c];
2460 2461
2461 2462 if (cvd->vdev_ishole)
2462 2463 break;
2463 2464 }
2464 2465 return (c);
2465 2466 }
2466 2467
2467 2468 /*
2468 2469 * Verify that vdev_add() works as expected.
2469 2470 */
2470 2471 /* ARGSUSED */
2471 2472 void
2472 2473 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2473 2474 {
2474 2475 ztest_shared_t *zs = ztest_shared;
2475 2476 spa_t *spa = ztest_spa;
2476 2477 uint64_t leaves;
2477 2478 uint64_t guid;
2478 2479 nvlist_t *nvroot;
2479 2480 int error;
2480 2481
2481 2482 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2482 2483 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2483 2484
2484 2485 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2485 2486
2486 2487 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2487 2488
2488 2489 /*
2489 2490 * If we have slogs then remove them 1/4 of the time.
2490 2491 */
2491 2492 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2492 2493 /*
2493 2494 * Grab the guid from the head of the log class rotor.
2494 2495 */
2495 2496 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2496 2497
2497 2498 spa_config_exit(spa, SCL_VDEV, FTAG);
2498 2499
2499 2500 /*
2500 2501 * We have to grab the zs_name_lock as writer to
2501 2502 * prevent a race between removing a slog (dmu_objset_find)
2502 2503 * and destroying a dataset. Removing the slog will
2503 2504 * grab a reference on the dataset which may cause
2504 2505 * dmu_objset_destroy() to fail with EBUSY thus
2505 2506 * leaving the dataset in an inconsistent state.
2506 2507 */
2507 2508 VERIFY(rw_wrlock(&ztest_name_lock) == 0);
2508 2509 error = spa_vdev_remove(spa, guid, B_FALSE);
2509 2510 VERIFY(rw_unlock(&ztest_name_lock) == 0);
2510 2511
2511 2512 if (error && error != EEXIST)
2512 2513 fatal(0, "spa_vdev_remove() = %d", error);
2513 2514 } else {
2514 2515 spa_config_exit(spa, SCL_VDEV, FTAG);
2515 2516
2516 2517 /*
2517 2518 * Make 1/4 of the devices be log devices.
2518 2519 */
2519 2520 nvroot = make_vdev_root(NULL, NULL, NULL,
2520 2521 ztest_opts.zo_vdev_size, 0,
2521 2522 ztest_random(4) == 0, ztest_opts.zo_raidz,
2522 2523 zs->zs_mirrors, 1);
2523 2524
2524 2525 error = spa_vdev_add(spa, nvroot);
2525 2526 nvlist_free(nvroot);
2526 2527
2527 2528 if (error == ENOSPC)
2528 2529 ztest_record_enospc("spa_vdev_add");
2529 2530 else if (error != 0)
2530 2531 fatal(0, "spa_vdev_add() = %d", error);
2531 2532 }
2532 2533
2533 2534 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2534 2535 }
2535 2536
2536 2537 /*
2537 2538 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2538 2539 */
2539 2540 /* ARGSUSED */
2540 2541 void
2541 2542 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2542 2543 {
2543 2544 ztest_shared_t *zs = ztest_shared;
2544 2545 spa_t *spa = ztest_spa;
2545 2546 vdev_t *rvd = spa->spa_root_vdev;
2546 2547 spa_aux_vdev_t *sav;
2547 2548 char *aux;
2548 2549 uint64_t guid = 0;
2549 2550 int error;
2550 2551
2551 2552 if (ztest_random(2) == 0) {
2552 2553 sav = &spa->spa_spares;
2553 2554 aux = ZPOOL_CONFIG_SPARES;
2554 2555 } else {
2555 2556 sav = &spa->spa_l2cache;
2556 2557 aux = ZPOOL_CONFIG_L2CACHE;
2557 2558 }
2558 2559
2559 2560 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2560 2561
2561 2562 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2562 2563
2563 2564 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2564 2565 /*
2565 2566 * Pick a random device to remove.
2566 2567 */
2567 2568 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2568 2569 } else {
2569 2570 /*
2570 2571 * Find an unused device we can add.
2571 2572 */
2572 2573 zs->zs_vdev_aux = 0;
2573 2574 for (;;) {
2574 2575 char path[MAXPATHLEN];
2575 2576 int c;
2576 2577 (void) snprintf(path, sizeof (path), ztest_aux_template,
2577 2578 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2578 2579 zs->zs_vdev_aux);
2579 2580 for (c = 0; c < sav->sav_count; c++)
2580 2581 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2581 2582 path) == 0)
2582 2583 break;
2583 2584 if (c == sav->sav_count &&
2584 2585 vdev_lookup_by_path(rvd, path) == NULL)
2585 2586 break;
2586 2587 zs->zs_vdev_aux++;
2587 2588 }
2588 2589 }
2589 2590
2590 2591 spa_config_exit(spa, SCL_VDEV, FTAG);
2591 2592
2592 2593 if (guid == 0) {
2593 2594 /*
2594 2595 * Add a new device.
2595 2596 */
2596 2597 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
2597 2598 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2598 2599 error = spa_vdev_add(spa, nvroot);
2599 2600 if (error != 0)
2600 2601 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2601 2602 nvlist_free(nvroot);
2602 2603 } else {
2603 2604 /*
2604 2605 * Remove an existing device. Sometimes, dirty its
2605 2606 * vdev state first to make sure we handle removal
2606 2607 * of devices that have pending state changes.
2607 2608 */
2608 2609 if (ztest_random(2) == 0)
2609 2610 (void) vdev_online(spa, guid, 0, NULL);
2610 2611
2611 2612 error = spa_vdev_remove(spa, guid, B_FALSE);
2612 2613 if (error != 0 && error != EBUSY)
2613 2614 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2614 2615 }
2615 2616
2616 2617 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2617 2618 }
2618 2619
2619 2620 /*
2620 2621 * split a pool if it has mirror tlvdevs
2621 2622 */
2622 2623 /* ARGSUSED */
2623 2624 void
2624 2625 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2625 2626 {
2626 2627 ztest_shared_t *zs = ztest_shared;
2627 2628 spa_t *spa = ztest_spa;
2628 2629 vdev_t *rvd = spa->spa_root_vdev;
2629 2630 nvlist_t *tree, **child, *config, *split, **schild;
2630 2631 uint_t c, children, schildren = 0, lastlogid = 0;
2631 2632 int error = 0;
2632 2633
2633 2634 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2634 2635
2635 2636 /* ensure we have a useable config; mirrors of raidz aren't supported */
2636 2637 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2637 2638 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2638 2639 return;
2639 2640 }
2640 2641
2641 2642 /* clean up the old pool, if any */
2642 2643 (void) spa_destroy("splitp");
2643 2644
2644 2645 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2645 2646
2646 2647 /* generate a config from the existing config */
2647 2648 mutex_enter(&spa->spa_props_lock);
2648 2649 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2649 2650 &tree) == 0);
2650 2651 mutex_exit(&spa->spa_props_lock);
2651 2652
2652 2653 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2653 2654 &children) == 0);
2654 2655
2655 2656 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2656 2657 for (c = 0; c < children; c++) {
2657 2658 vdev_t *tvd = rvd->vdev_child[c];
2658 2659 nvlist_t **mchild;
2659 2660 uint_t mchildren;
2660 2661
2661 2662 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2662 2663 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2663 2664 0) == 0);
2664 2665 VERIFY(nvlist_add_string(schild[schildren],
2665 2666 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2666 2667 VERIFY(nvlist_add_uint64(schild[schildren],
2667 2668 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2668 2669 if (lastlogid == 0)
2669 2670 lastlogid = schildren;
2670 2671 ++schildren;
2671 2672 continue;
2672 2673 }
2673 2674 lastlogid = 0;
2674 2675 VERIFY(nvlist_lookup_nvlist_array(child[c],
2675 2676 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2676 2677 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2677 2678 }
2678 2679
2679 2680 /* OK, create a config that can be used to split */
2680 2681 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2681 2682 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2682 2683 VDEV_TYPE_ROOT) == 0);
2683 2684 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2684 2685 lastlogid != 0 ? lastlogid : schildren) == 0);
2685 2686
2686 2687 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2687 2688 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2688 2689
2689 2690 for (c = 0; c < schildren; c++)
2690 2691 nvlist_free(schild[c]);
2691 2692 free(schild);
2692 2693 nvlist_free(split);
2693 2694
2694 2695 spa_config_exit(spa, SCL_VDEV, FTAG);
2695 2696
2696 2697 (void) rw_wrlock(&ztest_name_lock);
2697 2698 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2698 2699 (void) rw_unlock(&ztest_name_lock);
2699 2700
2700 2701 nvlist_free(config);
2701 2702
2702 2703 if (error == 0) {
2703 2704 (void) printf("successful split - results:\n");
2704 2705 mutex_enter(&spa_namespace_lock);
2705 2706 show_pool_stats(spa);
2706 2707 show_pool_stats(spa_lookup("splitp"));
2707 2708 mutex_exit(&spa_namespace_lock);
2708 2709 ++zs->zs_splits;
2709 2710 --zs->zs_mirrors;
2710 2711 }
2711 2712 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2712 2713
2713 2714 }
2714 2715
2715 2716 /*
2716 2717 * Verify that we can attach and detach devices.
2717 2718 */
2718 2719 /* ARGSUSED */
2719 2720 void
2720 2721 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2721 2722 {
2722 2723 ztest_shared_t *zs = ztest_shared;
2723 2724 spa_t *spa = ztest_spa;
2724 2725 spa_aux_vdev_t *sav = &spa->spa_spares;
2725 2726 vdev_t *rvd = spa->spa_root_vdev;
2726 2727 vdev_t *oldvd, *newvd, *pvd;
2727 2728 nvlist_t *root;
2728 2729 uint64_t leaves;
2729 2730 uint64_t leaf, top;
2730 2731 uint64_t ashift = ztest_get_ashift();
2731 2732 uint64_t oldguid, pguid;
2732 2733 size_t oldsize, newsize;
2733 2734 char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
2734 2735 int replacing;
2735 2736 int oldvd_has_siblings = B_FALSE;
2736 2737 int newvd_is_spare = B_FALSE;
2737 2738 int oldvd_is_log;
2738 2739 int error, expected_error;
2739 2740
2740 2741 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
2741 2742 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2742 2743
2743 2744 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2744 2745
2745 2746 /*
2746 2747 * Decide whether to do an attach or a replace.
2747 2748 */
2748 2749 replacing = ztest_random(2);
2749 2750
2750 2751 /*
2751 2752 * Pick a random top-level vdev.
2752 2753 */
2753 2754 top = ztest_random_vdev_top(spa, B_TRUE);
2754 2755
2755 2756 /*
2756 2757 * Pick a random leaf within it.
2757 2758 */
2758 2759 leaf = ztest_random(leaves);
2759 2760
2760 2761 /*
2761 2762 * Locate this vdev.
2762 2763 */
2763 2764 oldvd = rvd->vdev_child[top];
2764 2765 if (zs->zs_mirrors >= 1) {
2765 2766 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2766 2767 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2767 2768 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2768 2769 }
2769 2770 if (ztest_opts.zo_raidz > 1) {
2770 2771 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2771 2772 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2772 2773 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2773 2774 }
2774 2775
2775 2776 /*
2776 2777 * If we're already doing an attach or replace, oldvd may be a
2777 2778 * mirror vdev -- in which case, pick a random child.
2778 2779 */
2779 2780 while (oldvd->vdev_children != 0) {
2780 2781 oldvd_has_siblings = B_TRUE;
2781 2782 ASSERT(oldvd->vdev_children >= 2);
2782 2783 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2783 2784 }
2784 2785
2785 2786 oldguid = oldvd->vdev_guid;
2786 2787 oldsize = vdev_get_min_asize(oldvd);
2787 2788 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2788 2789 (void) strcpy(oldpath, oldvd->vdev_path);
2789 2790 pvd = oldvd->vdev_parent;
2790 2791 pguid = pvd->vdev_guid;
2791 2792
2792 2793 /*
2793 2794 * If oldvd has siblings, then half of the time, detach it.
2794 2795 */
2795 2796 if (oldvd_has_siblings && ztest_random(2) == 0) {
2796 2797 spa_config_exit(spa, SCL_VDEV, FTAG);
2797 2798 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2798 2799 if (error != 0 && error != ENODEV && error != EBUSY &&
2799 2800 error != ENOTSUP)
2800 2801 fatal(0, "detach (%s) returned %d", oldpath, error);
2801 2802 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2802 2803 return;
2803 2804 }
2804 2805
2805 2806 /*
2806 2807 * For the new vdev, choose with equal probability between the two
2807 2808 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2808 2809 */
2809 2810 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2810 2811 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2811 2812 newvd_is_spare = B_TRUE;
2812 2813 (void) strcpy(newpath, newvd->vdev_path);
2813 2814 } else {
2814 2815 (void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
2815 2816 ztest_opts.zo_dir, ztest_opts.zo_pool,
2816 2817 top * leaves + leaf);
2817 2818 if (ztest_random(2) == 0)
2818 2819 newpath[strlen(newpath) - 1] = 'b';
2819 2820 newvd = vdev_lookup_by_path(rvd, newpath);
2820 2821 }
2821 2822
2822 2823 if (newvd) {
2823 2824 newsize = vdev_get_min_asize(newvd);
2824 2825 } else {
2825 2826 /*
2826 2827 * Make newsize a little bigger or smaller than oldsize.
2827 2828 * If it's smaller, the attach should fail.
2828 2829 * If it's larger, and we're doing a replace,
2829 2830 * we should get dynamic LUN growth when we're done.
2830 2831 */
2831 2832 newsize = 10 * oldsize / (9 + ztest_random(3));
2832 2833 }
2833 2834
2834 2835 /*
2835 2836 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2836 2837 * unless it's a replace; in that case any non-replacing parent is OK.
2837 2838 *
2838 2839 * If newvd is already part of the pool, it should fail with EBUSY.
2839 2840 *
2840 2841 * If newvd is too small, it should fail with EOVERFLOW.
2841 2842 */
2842 2843 if (pvd->vdev_ops != &vdev_mirror_ops &&
2843 2844 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2844 2845 pvd->vdev_ops == &vdev_replacing_ops ||
2845 2846 pvd->vdev_ops == &vdev_spare_ops))
2846 2847 expected_error = ENOTSUP;
2847 2848 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2848 2849 expected_error = ENOTSUP;
2849 2850 else if (newvd == oldvd)
2850 2851 expected_error = replacing ? 0 : EBUSY;
2851 2852 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2852 2853 expected_error = EBUSY;
2853 2854 else if (newsize < oldsize)
2854 2855 expected_error = EOVERFLOW;
2855 2856 else if (ashift > oldvd->vdev_top->vdev_ashift)
2856 2857 expected_error = EDOM;
2857 2858 else
2858 2859 expected_error = 0;
2859 2860
2860 2861 spa_config_exit(spa, SCL_VDEV, FTAG);
2861 2862
2862 2863 /*
2863 2864 * Build the nvlist describing newpath.
2864 2865 */
2865 2866 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
2866 2867 ashift, 0, 0, 0, 1);
2867 2868
2868 2869 error = spa_vdev_attach(spa, oldguid, root, replacing);
2869 2870
2870 2871 nvlist_free(root);
2871 2872
2872 2873 /*
2873 2874 * If our parent was the replacing vdev, but the replace completed,
2874 2875 * then instead of failing with ENOTSUP we may either succeed,
2875 2876 * fail with ENODEV, or fail with EOVERFLOW.
2876 2877 */
2877 2878 if (expected_error == ENOTSUP &&
2878 2879 (error == 0 || error == ENODEV || error == EOVERFLOW))
2879 2880 expected_error = error;
2880 2881
2881 2882 /*
2882 2883 * If someone grew the LUN, the replacement may be too small.
2883 2884 */
2884 2885 if (error == EOVERFLOW || error == EBUSY)
2885 2886 expected_error = error;
2886 2887
2887 2888 /* XXX workaround 6690467 */
2888 2889 if (error != expected_error && expected_error != EBUSY) {
2889 2890 fatal(0, "attach (%s %llu, %s %llu, %d) "
2890 2891 "returned %d, expected %d",
2891 2892 oldpath, (longlong_t)oldsize, newpath,
2892 2893 (longlong_t)newsize, replacing, error, expected_error);
2893 2894 }
2894 2895
2895 2896 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
2896 2897 }
2897 2898
2898 2899 /*
2899 2900 * Callback function which expands the physical size of the vdev.
2900 2901 */
2901 2902 vdev_t *
2902 2903 grow_vdev(vdev_t *vd, void *arg)
2903 2904 {
2904 2905 spa_t *spa = vd->vdev_spa;
2905 2906 size_t *newsize = arg;
2906 2907 size_t fsize;
2907 2908 int fd;
2908 2909
2909 2910 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2910 2911 ASSERT(vd->vdev_ops->vdev_op_leaf);
2911 2912
2912 2913 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2913 2914 return (vd);
2914 2915
2915 2916 fsize = lseek(fd, 0, SEEK_END);
2916 2917 (void) ftruncate(fd, *newsize);
2917 2918
2918 2919 if (ztest_opts.zo_verbose >= 6) {
2919 2920 (void) printf("%s grew from %lu to %lu bytes\n",
2920 2921 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2921 2922 }
2922 2923 (void) close(fd);
2923 2924 return (NULL);
2924 2925 }
2925 2926
2926 2927 /*
2927 2928 * Callback function which expands a given vdev by calling vdev_online().
2928 2929 */
2929 2930 /* ARGSUSED */
2930 2931 vdev_t *
2931 2932 online_vdev(vdev_t *vd, void *arg)
2932 2933 {
2933 2934 spa_t *spa = vd->vdev_spa;
2934 2935 vdev_t *tvd = vd->vdev_top;
2935 2936 uint64_t guid = vd->vdev_guid;
2936 2937 uint64_t generation = spa->spa_config_generation + 1;
2937 2938 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2938 2939 int error;
2939 2940
2940 2941 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2941 2942 ASSERT(vd->vdev_ops->vdev_op_leaf);
2942 2943
2943 2944 /* Calling vdev_online will initialize the new metaslabs */
2944 2945 spa_config_exit(spa, SCL_STATE, spa);
2945 2946 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2946 2947 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2947 2948
2948 2949 /*
2949 2950 * If vdev_online returned an error or the underlying vdev_open
2950 2951 * failed then we abort the expand. The only way to know that
2951 2952 * vdev_open fails is by checking the returned newstate.
2952 2953 */
2953 2954 if (error || newstate != VDEV_STATE_HEALTHY) {
2954 2955 if (ztest_opts.zo_verbose >= 5) {
2955 2956 (void) printf("Unable to expand vdev, state %llu, "
2956 2957 "error %d\n", (u_longlong_t)newstate, error);
2957 2958 }
2958 2959 return (vd);
2959 2960 }
2960 2961 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
2961 2962
2962 2963 /*
2963 2964 * Since we dropped the lock we need to ensure that we're
2964 2965 * still talking to the original vdev. It's possible this
2965 2966 * vdev may have been detached/replaced while we were
2966 2967 * trying to online it.
2967 2968 */
2968 2969 if (generation != spa->spa_config_generation) {
2969 2970 if (ztest_opts.zo_verbose >= 5) {
2970 2971 (void) printf("vdev configuration has changed, "
2971 2972 "guid %llu, state %llu, expected gen %llu, "
2972 2973 "got gen %llu\n",
2973 2974 (u_longlong_t)guid,
2974 2975 (u_longlong_t)tvd->vdev_state,
2975 2976 (u_longlong_t)generation,
2976 2977 (u_longlong_t)spa->spa_config_generation);
2977 2978 }
2978 2979 return (vd);
2979 2980 }
2980 2981 return (NULL);
2981 2982 }
2982 2983
2983 2984 /*
2984 2985 * Traverse the vdev tree calling the supplied function.
2985 2986 * We continue to walk the tree until we either have walked all
2986 2987 * children or we receive a non-NULL return from the callback.
2987 2988 * If a NULL callback is passed, then we just return back the first
2988 2989 * leaf vdev we encounter.
2989 2990 */
2990 2991 vdev_t *
2991 2992 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
2992 2993 {
2993 2994 if (vd->vdev_ops->vdev_op_leaf) {
2994 2995 if (func == NULL)
2995 2996 return (vd);
2996 2997 else
2997 2998 return (func(vd, arg));
2998 2999 }
2999 3000
3000 3001 for (uint_t c = 0; c < vd->vdev_children; c++) {
3001 3002 vdev_t *cvd = vd->vdev_child[c];
3002 3003 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
3003 3004 return (cvd);
3004 3005 }
3005 3006 return (NULL);
3006 3007 }
3007 3008
3008 3009 /*
3009 3010 * Verify that dynamic LUN growth works as expected.
3010 3011 */
3011 3012 /* ARGSUSED */
3012 3013 void
3013 3014 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
3014 3015 {
3015 3016 spa_t *spa = ztest_spa;
3016 3017 vdev_t *vd, *tvd;
3017 3018 metaslab_class_t *mc;
3018 3019 metaslab_group_t *mg;
3019 3020 size_t psize, newsize;
3020 3021 uint64_t top;
3021 3022 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
3022 3023
3023 3024 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
3024 3025 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3025 3026
3026 3027 top = ztest_random_vdev_top(spa, B_TRUE);
3027 3028
3028 3029 tvd = spa->spa_root_vdev->vdev_child[top];
3029 3030 mg = tvd->vdev_mg;
3030 3031 mc = mg->mg_class;
3031 3032 old_ms_count = tvd->vdev_ms_count;
3032 3033 old_class_space = metaslab_class_get_space(mc);
3033 3034
3034 3035 /*
3035 3036 * Determine the size of the first leaf vdev associated with
3036 3037 * our top-level device.
3037 3038 */
3038 3039 vd = vdev_walk_tree(tvd, NULL, NULL);
3039 3040 ASSERT3P(vd, !=, NULL);
3040 3041 ASSERT(vd->vdev_ops->vdev_op_leaf);
3041 3042
3042 3043 psize = vd->vdev_psize;
3043 3044
3044 3045 /*
3045 3046 * We only try to expand the vdev if it's healthy, less than 4x its
3046 3047 * original size, and it has a valid psize.
3047 3048 */
3048 3049 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
3049 3050 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
3050 3051 spa_config_exit(spa, SCL_STATE, spa);
3051 3052 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3052 3053 return;
3053 3054 }
3054 3055 ASSERT(psize > 0);
3055 3056 newsize = psize + psize / 8;
3056 3057 ASSERT3U(newsize, >, psize);
3057 3058
3058 3059 if (ztest_opts.zo_verbose >= 6) {
3059 3060 (void) printf("Expanding LUN %s from %lu to %lu\n",
3060 3061 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
3061 3062 }
3062 3063
3063 3064 /*
3064 3065 * Growing the vdev is a two step process:
3065 3066 * 1). expand the physical size (i.e. relabel)
3066 3067 * 2). online the vdev to create the new metaslabs
3067 3068 */
3068 3069 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
3069 3070 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
3070 3071 tvd->vdev_state != VDEV_STATE_HEALTHY) {
3071 3072 if (ztest_opts.zo_verbose >= 5) {
3072 3073 (void) printf("Could not expand LUN because "
3073 3074 "the vdev configuration changed.\n");
3074 3075 }
3075 3076 spa_config_exit(spa, SCL_STATE, spa);
3076 3077 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3077 3078 return;
3078 3079 }
3079 3080
3080 3081 spa_config_exit(spa, SCL_STATE, spa);
3081 3082
3082 3083 /*
3083 3084 * Expanding the LUN will update the config asynchronously,
3084 3085 * thus we must wait for the async thread to complete any
3085 3086 * pending tasks before proceeding.
3086 3087 */
3087 3088 for (;;) {
3088 3089 boolean_t done;
3089 3090 mutex_enter(&spa->spa_async_lock);
3090 3091 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
3091 3092 mutex_exit(&spa->spa_async_lock);
3092 3093 if (done)
3093 3094 break;
3094 3095 txg_wait_synced(spa_get_dsl(spa), 0);
3095 3096 (void) poll(NULL, 0, 100);
3096 3097 }
3097 3098
3098 3099 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3099 3100
3100 3101 tvd = spa->spa_root_vdev->vdev_child[top];
3101 3102 new_ms_count = tvd->vdev_ms_count;
3102 3103 new_class_space = metaslab_class_get_space(mc);
3103 3104
3104 3105 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
3105 3106 if (ztest_opts.zo_verbose >= 5) {
3106 3107 (void) printf("Could not verify LUN expansion due to "
3107 3108 "intervening vdev offline or remove.\n");
3108 3109 }
3109 3110 spa_config_exit(spa, SCL_STATE, spa);
3110 3111 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3111 3112 return;
3112 3113 }
3113 3114
3114 3115 /*
3115 3116 * Make sure we were able to grow the vdev.
3116 3117 */
3117 3118 if (new_ms_count <= old_ms_count)
3118 3119 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3119 3120 old_ms_count, new_ms_count);
3120 3121
3121 3122 /*
3122 3123 * Make sure we were able to grow the pool.
3123 3124 */
3124 3125 if (new_class_space <= old_class_space)
3125 3126 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
3126 3127 old_class_space, new_class_space);
3127 3128
3128 3129 if (ztest_opts.zo_verbose >= 5) {
3129 3130 char oldnumbuf[6], newnumbuf[6];
3130 3131
3131 3132 nicenum(old_class_space, oldnumbuf);
3132 3133 nicenum(new_class_space, newnumbuf);
3133 3134 (void) printf("%s grew from %s to %s\n",
3134 3135 spa->spa_name, oldnumbuf, newnumbuf);
3135 3136 }
3136 3137
3137 3138 spa_config_exit(spa, SCL_STATE, spa);
3138 3139 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
3139 3140 }
3140 3141
3141 3142 /*
3142 3143 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3143 3144 */
3144 3145 /* ARGSUSED */
3145 3146 static void
3146 3147 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3147 3148 {
3148 3149 /*
3149 3150 * Create the objects common to all ztest datasets.
3150 3151 */
3151 3152 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3152 3153 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3153 3154 }
3154 3155
3155 3156 static int
3156 3157 ztest_dataset_create(char *dsname)
3157 3158 {
3158 3159 uint64_t zilset = ztest_random(100);
3159 3160 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3160 3161 ztest_objset_create_cb, NULL);
3161 3162
3162 3163 if (err || zilset < 80)
3163 3164 return (err);
3164 3165
3165 3166 if (ztest_opts.zo_verbose >= 6)
3166 3167 (void) printf("Setting dataset %s to sync always\n", dsname);
3167 3168 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3168 3169 ZFS_SYNC_ALWAYS, B_FALSE));
3169 3170 }
3170 3171
3171 3172 /* ARGSUSED */
3172 3173 static int
3173 3174 ztest_objset_destroy_cb(const char *name, void *arg)
3174 3175 {
3175 3176 objset_t *os;
3176 3177 dmu_object_info_t doi;
3177 3178 int error;
3178 3179
3179 3180 /*
3180 3181 * Verify that the dataset contains a directory object.
3181 3182 */
3182 3183 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os));
3183 3184 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3184 3185 if (error != ENOENT) {
3185 3186 /* We could have crashed in the middle of destroying it */
3186 3187 ASSERT0(error);
3187 3188 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3188 3189 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3189 3190 }
3190 3191 dmu_objset_disown(os, FTAG);
3191 3192
3192 3193 /*
3193 3194 * Destroy the dataset.
3194 3195 */
3195 3196 if (strchr(name, '@') != NULL) {
3196 3197 VERIFY0(dsl_destroy_snapshot(name, B_FALSE));
3197 3198 } else {
3198 3199 VERIFY0(dsl_destroy_head(name));
3199 3200 }
3200 3201 return (0);
3201 3202 }
3202 3203
3203 3204 static boolean_t
3204 3205 ztest_snapshot_create(char *osname, uint64_t id)
3205 3206 {
3206 3207 char snapname[MAXNAMELEN];
3207 3208 int error;
3208 3209
3209 3210 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id);
3210 3211
3211 3212 error = dmu_objset_snapshot_one(osname, snapname);
3212 3213 if (error == ENOSPC) {
3213 3214 ztest_record_enospc(FTAG);
3214 3215 return (B_FALSE);
3215 3216 }
3216 3217 if (error != 0 && error != EEXIST) {
3217 3218 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname,
3218 3219 snapname, error);
3219 3220 }
3220 3221 return (B_TRUE);
3221 3222 }
3222 3223
3223 3224 static boolean_t
3224 3225 ztest_snapshot_destroy(char *osname, uint64_t id)
3225 3226 {
3226 3227 char snapname[MAXNAMELEN];
3227 3228 int error;
3228 3229
3229 3230 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3230 3231 (u_longlong_t)id);
3231 3232
3232 3233 error = dsl_destroy_snapshot(snapname, B_FALSE);
3233 3234 if (error != 0 && error != ENOENT)
3234 3235 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3235 3236 return (B_TRUE);
3236 3237 }
3237 3238
3238 3239 /* ARGSUSED */
3239 3240 void
3240 3241 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3241 3242 {
3242 3243 ztest_ds_t zdtmp;
3243 3244 int iters;
3244 3245 int error;
3245 3246 objset_t *os, *os2;
3246 3247 char name[MAXNAMELEN];
3247 3248 zilog_t *zilog;
3248 3249
3249 3250 (void) rw_rdlock(&ztest_name_lock);
3250 3251
3251 3252 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3252 3253 ztest_opts.zo_pool, (u_longlong_t)id);
3253 3254
3254 3255 /*
3255 3256 * If this dataset exists from a previous run, process its replay log
3256 3257 * half of the time. If we don't replay it, then dmu_objset_destroy()
3257 3258 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3258 3259 */
3259 3260 if (ztest_random(2) == 0 &&
3260 3261 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3261 3262 ztest_zd_init(&zdtmp, NULL, os);
3262 3263 zil_replay(os, &zdtmp, ztest_replay_vector);
3263 3264 ztest_zd_fini(&zdtmp);
3264 3265 dmu_objset_disown(os, FTAG);
3265 3266 }
3266 3267
3267 3268 /*
3268 3269 * There may be an old instance of the dataset we're about to
3269 3270 * create lying around from a previous run. If so, destroy it
3270 3271 * and all of its snapshots.
3271 3272 */
3272 3273 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3273 3274 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3274 3275
3275 3276 /*
3276 3277 * Verify that the destroyed dataset is no longer in the namespace.
3277 3278 */
3278 3279 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
3279 3280 FTAG, &os));
3280 3281
3281 3282 /*
3282 3283 * Verify that we can create a new dataset.
3283 3284 */
3284 3285 error = ztest_dataset_create(name);
3285 3286 if (error) {
3286 3287 if (error == ENOSPC) {
3287 3288 ztest_record_enospc(FTAG);
3288 3289 (void) rw_unlock(&ztest_name_lock);
3289 3290 return;
3290 3291 }
3291 3292 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3292 3293 }
3293 3294
3294 3295 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3295 3296
3296 3297 ztest_zd_init(&zdtmp, NULL, os);
3297 3298
3298 3299 /*
3299 3300 * Open the intent log for it.
3300 3301 */
3301 3302 zilog = zil_open(os, ztest_get_data);
3302 3303
3303 3304 /*
3304 3305 * Put some objects in there, do a little I/O to them,
3305 3306 * and randomly take a couple of snapshots along the way.
3306 3307 */
3307 3308 iters = ztest_random(5);
3308 3309 for (int i = 0; i < iters; i++) {
3309 3310 ztest_dmu_object_alloc_free(&zdtmp, id);
3310 3311 if (ztest_random(iters) == 0)
3311 3312 (void) ztest_snapshot_create(name, i);
3312 3313 }
3313 3314
3314 3315 /*
3315 3316 * Verify that we cannot create an existing dataset.
3316 3317 */
3317 3318 VERIFY3U(EEXIST, ==,
3318 3319 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3319 3320
3320 3321 /*
3321 3322 * Verify that we can hold an objset that is also owned.
3322 3323 */
3323 3324 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3324 3325 dmu_objset_rele(os2, FTAG);
3325 3326
3326 3327 /*
3327 3328 * Verify that we cannot own an objset that is already owned.
3328 3329 */
3329 3330 VERIFY3U(EBUSY, ==,
3330 3331 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3331 3332
3332 3333 zil_close(zilog);
3333 3334 dmu_objset_disown(os, FTAG);
3334 3335 ztest_zd_fini(&zdtmp);
3335 3336
3336 3337 (void) rw_unlock(&ztest_name_lock);
3337 3338 }
3338 3339
3339 3340 /*
3340 3341 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3341 3342 */
3342 3343 void
3343 3344 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3344 3345 {
3345 3346 (void) rw_rdlock(&ztest_name_lock);
3346 3347 (void) ztest_snapshot_destroy(zd->zd_name, id);
3347 3348 (void) ztest_snapshot_create(zd->zd_name, id);
3348 3349 (void) rw_unlock(&ztest_name_lock);
3349 3350 }
3350 3351
3351 3352 /*
3352 3353 * Cleanup non-standard snapshots and clones.
3353 3354 */
3354 3355 void
3355 3356 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3356 3357 {
3357 3358 char snap1name[MAXNAMELEN];
3358 3359 char clone1name[MAXNAMELEN];
3359 3360 char snap2name[MAXNAMELEN];
3360 3361 char clone2name[MAXNAMELEN];
3361 3362 char snap3name[MAXNAMELEN];
3362 3363 int error;
3363 3364
3364 3365 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3365 3366 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3366 3367 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3367 3368 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3368 3369 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3369 3370
3370 3371 error = dsl_destroy_head(clone2name);
3371 3372 if (error && error != ENOENT)
3372 3373 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error);
3373 3374 error = dsl_destroy_snapshot(snap3name, B_FALSE);
3374 3375 if (error && error != ENOENT)
3375 3376 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error);
3376 3377 error = dsl_destroy_snapshot(snap2name, B_FALSE);
3377 3378 if (error && error != ENOENT)
3378 3379 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error);
3379 3380 error = dsl_destroy_head(clone1name);
3380 3381 if (error && error != ENOENT)
3381 3382 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error);
3382 3383 error = dsl_destroy_snapshot(snap1name, B_FALSE);
3383 3384 if (error && error != ENOENT)
3384 3385 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error);
3385 3386 }
3386 3387
3387 3388 /*
3388 3389 * Verify dsl_dataset_promote handles EBUSY
3389 3390 */
3390 3391 void
3391 3392 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3392 3393 {
3393 3394 objset_t *os;
3394 3395 char snap1name[MAXNAMELEN];
3395 3396 char clone1name[MAXNAMELEN];
3396 3397 char snap2name[MAXNAMELEN];
3397 3398 char clone2name[MAXNAMELEN];
3398 3399 char snap3name[MAXNAMELEN];
3399 3400 char *osname = zd->zd_name;
3400 3401 int error;
3401 3402
3402 3403 (void) rw_rdlock(&ztest_name_lock);
3403 3404
3404 3405 ztest_dsl_dataset_cleanup(osname, id);
3405 3406
3406 3407 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
3407 3408 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
3408 3409 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
3409 3410 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
3410 3411 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
3411 3412
3412 3413 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
3413 3414 if (error && error != EEXIST) {
3414 3415 if (error == ENOSPC) {
3415 3416 ztest_record_enospc(FTAG);
3416 3417 goto out;
3417 3418 }
3418 3419 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3419 3420 }
3420 3421
3421 3422 error = dmu_objset_clone(clone1name, snap1name);
3422 3423 if (error) {
3423 3424 if (error == ENOSPC) {
3424 3425 ztest_record_enospc(FTAG);
3425 3426 goto out;
3426 3427 }
3427 3428 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3428 3429 }
3429 3430
3430 3431 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
3431 3432 if (error && error != EEXIST) {
3432 3433 if (error == ENOSPC) {
3433 3434 ztest_record_enospc(FTAG);
3434 3435 goto out;
3435 3436 }
3436 3437 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3437 3438 }
3438 3439
3439 3440 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
3440 3441 if (error && error != EEXIST) {
3441 3442 if (error == ENOSPC) {
3442 3443 ztest_record_enospc(FTAG);
3443 3444 goto out;
3444 3445 }
3445 3446 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3446 3447 }
3447 3448
3448 3449 error = dmu_objset_clone(clone2name, snap3name);
3449 3450 if (error) {
3450 3451 if (error == ENOSPC) {
3451 3452 ztest_record_enospc(FTAG);
3452 3453 goto out;
3453 3454 }
3454 3455 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3455 3456 }
3456 3457
3457 3458 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os);
3458 3459 if (error)
3459 3460 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error);
3460 3461 error = dsl_dataset_promote(clone2name, NULL);
3461 3462 if (error != EBUSY)
3462 3463 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3463 3464 error);
3464 3465 dmu_objset_disown(os, FTAG);
3465 3466
3466 3467 out:
3467 3468 ztest_dsl_dataset_cleanup(osname, id);
3468 3469
3469 3470 (void) rw_unlock(&ztest_name_lock);
3470 3471 }
3471 3472
3472 3473 /*
3473 3474 * Verify that dmu_object_{alloc,free} work as expected.
3474 3475 */
3475 3476 void
3476 3477 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3477 3478 {
3478 3479 ztest_od_t od[4];
3479 3480 int batchsize = sizeof (od) / sizeof (od[0]);
3480 3481
3481 3482 for (int b = 0; b < batchsize; b++)
3482 3483 ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3483 3484
3484 3485 /*
3485 3486 * Destroy the previous batch of objects, create a new batch,
3486 3487 * and do some I/O on the new objects.
3487 3488 */
3488 3489 if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
3489 3490 return;
3490 3491
3491 3492 while (ztest_random(4 * batchsize) != 0)
3492 3493 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3493 3494 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3494 3495 }
3495 3496
3496 3497 /*
3497 3498 * Verify that dmu_{read,write} work as expected.
3498 3499 */
3499 3500 void
3500 3501 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3501 3502 {
3502 3503 objset_t *os = zd->zd_os;
3503 3504 ztest_od_t od[2];
3504 3505 dmu_tx_t *tx;
3505 3506 int i, freeit, error;
3506 3507 uint64_t n, s, txg;
3507 3508 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3508 3509 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3509 3510 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3510 3511 uint64_t regions = 997;
3511 3512 uint64_t stride = 123456789ULL;
3512 3513 uint64_t width = 40;
3513 3514 int free_percent = 5;
3514 3515
3515 3516 /*
3516 3517 * This test uses two objects, packobj and bigobj, that are always
3517 3518 * updated together (i.e. in the same tx) so that their contents are
3518 3519 * in sync and can be compared. Their contents relate to each other
3519 3520 * in a simple way: packobj is a dense array of 'bufwad' structures,
3520 3521 * while bigobj is a sparse array of the same bufwads. Specifically,
3521 3522 * for any index n, there are three bufwads that should be identical:
3522 3523 *
3523 3524 * packobj, at offset n * sizeof (bufwad_t)
3524 3525 * bigobj, at the head of the nth chunk
3525 3526 * bigobj, at the tail of the nth chunk
3526 3527 *
3527 3528 * The chunk size is arbitrary. It doesn't have to be a power of two,
3528 3529 * and it doesn't have any relation to the object blocksize.
3529 3530 * The only requirement is that it can hold at least two bufwads.
3530 3531 *
3531 3532 * Normally, we write the bufwad to each of these locations.
3532 3533 * However, free_percent of the time we instead write zeroes to
3533 3534 * packobj and perform a dmu_free_range() on bigobj. By comparing
3534 3535 * bigobj to packobj, we can verify that the DMU is correctly
3535 3536 * tracking which parts of an object are allocated and free,
3536 3537 * and that the contents of the allocated blocks are correct.
3537 3538 */
3538 3539
3539 3540 /*
3540 3541 * Read the directory info. If it's the first time, set things up.
3541 3542 */
3542 3543 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3543 3544 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3544 3545
3545 3546 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3546 3547 return;
3547 3548
3548 3549 bigobj = od[0].od_object;
3549 3550 packobj = od[1].od_object;
3550 3551 chunksize = od[0].od_gen;
3551 3552 ASSERT(chunksize == od[1].od_gen);
3552 3553
3553 3554 /*
3554 3555 * Prefetch a random chunk of the big object.
3555 3556 * Our aim here is to get some async reads in flight
3556 3557 * for blocks that we may free below; the DMU should
3557 3558 * handle this race correctly.
3558 3559 */
3559 3560 n = ztest_random(regions) * stride + ztest_random(width);
3560 3561 s = 1 + ztest_random(2 * width - 1);
3561 3562 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3562 3563
3563 3564 /*
3564 3565 * Pick a random index and compute the offsets into packobj and bigobj.
3565 3566 */
3566 3567 n = ztest_random(regions) * stride + ztest_random(width);
3567 3568 s = 1 + ztest_random(width - 1);
3568 3569
3569 3570 packoff = n * sizeof (bufwad_t);
3570 3571 packsize = s * sizeof (bufwad_t);
3571 3572
3572 3573 bigoff = n * chunksize;
3573 3574 bigsize = s * chunksize;
3574 3575
3575 3576 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3576 3577 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3577 3578
3578 3579 /*
3579 3580 * free_percent of the time, free a range of bigobj rather than
3580 3581 * overwriting it.
3581 3582 */
3582 3583 freeit = (ztest_random(100) < free_percent);
3583 3584
3584 3585 /*
3585 3586 * Read the current contents of our objects.
3586 3587 */
3587 3588 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3588 3589 DMU_READ_PREFETCH);
3589 3590 ASSERT0(error);
3590 3591 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3591 3592 DMU_READ_PREFETCH);
3592 3593 ASSERT0(error);
3593 3594
3594 3595 /*
3595 3596 * Get a tx for the mods to both packobj and bigobj.
3596 3597 */
3597 3598 tx = dmu_tx_create(os);
3598 3599
3599 3600 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3600 3601
3601 3602 if (freeit)
3602 3603 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3603 3604 else
3604 3605 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3605 3606
3606 3607 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3607 3608 if (txg == 0) {
3608 3609 umem_free(packbuf, packsize);
3609 3610 umem_free(bigbuf, bigsize);
3610 3611 return;
3611 3612 }
3612 3613
3613 3614 dmu_object_set_checksum(os, bigobj,
3614 3615 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3615 3616
3616 3617 dmu_object_set_compress(os, bigobj,
3617 3618 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3618 3619
3619 3620 /*
3620 3621 * For each index from n to n + s, verify that the existing bufwad
3621 3622 * in packobj matches the bufwads at the head and tail of the
3622 3623 * corresponding chunk in bigobj. Then update all three bufwads
3623 3624 * with the new values we want to write out.
3624 3625 */
3625 3626 for (i = 0; i < s; i++) {
3626 3627 /* LINTED */
3627 3628 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3628 3629 /* LINTED */
3629 3630 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3630 3631 /* LINTED */
3631 3632 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3632 3633
3633 3634 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3634 3635 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3635 3636
3636 3637 if (pack->bw_txg > txg)
3637 3638 fatal(0, "future leak: got %llx, open txg is %llx",
3638 3639 pack->bw_txg, txg);
3639 3640
3640 3641 if (pack->bw_data != 0 && pack->bw_index != n + i)
3641 3642 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3642 3643 pack->bw_index, n, i);
3643 3644
3644 3645 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3645 3646 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3646 3647
3647 3648 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3648 3649 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3649 3650
3650 3651 if (freeit) {
3651 3652 bzero(pack, sizeof (bufwad_t));
3652 3653 } else {
3653 3654 pack->bw_index = n + i;
3654 3655 pack->bw_txg = txg;
3655 3656 pack->bw_data = 1 + ztest_random(-2ULL);
3656 3657 }
3657 3658 *bigH = *pack;
3658 3659 *bigT = *pack;
3659 3660 }
3660 3661
3661 3662 /*
3662 3663 * We've verified all the old bufwads, and made new ones.
3663 3664 * Now write them out.
3664 3665 */
3665 3666 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3666 3667
3667 3668 if (freeit) {
3668 3669 if (ztest_opts.zo_verbose >= 7) {
3669 3670 (void) printf("freeing offset %llx size %llx"
3670 3671 " txg %llx\n",
3671 3672 (u_longlong_t)bigoff,
3672 3673 (u_longlong_t)bigsize,
3673 3674 (u_longlong_t)txg);
3674 3675 }
3675 3676 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3676 3677 } else {
3677 3678 if (ztest_opts.zo_verbose >= 7) {
3678 3679 (void) printf("writing offset %llx size %llx"
3679 3680 " txg %llx\n",
3680 3681 (u_longlong_t)bigoff,
3681 3682 (u_longlong_t)bigsize,
3682 3683 (u_longlong_t)txg);
3683 3684 }
3684 3685 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3685 3686 }
3686 3687
3687 3688 dmu_tx_commit(tx);
3688 3689
3689 3690 /*
3690 3691 * Sanity check the stuff we just wrote.
3691 3692 */
3692 3693 {
3693 3694 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3694 3695 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3695 3696
3696 3697 VERIFY(0 == dmu_read(os, packobj, packoff,
3697 3698 packsize, packcheck, DMU_READ_PREFETCH));
3698 3699 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3699 3700 bigsize, bigcheck, DMU_READ_PREFETCH));
3700 3701
3701 3702 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3702 3703 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3703 3704
3704 3705 umem_free(packcheck, packsize);
3705 3706 umem_free(bigcheck, bigsize);
3706 3707 }
3707 3708
3708 3709 umem_free(packbuf, packsize);
3709 3710 umem_free(bigbuf, bigsize);
3710 3711 }
3711 3712
3712 3713 void
3713 3714 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3714 3715 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3715 3716 {
3716 3717 uint64_t i;
3717 3718 bufwad_t *pack;
3718 3719 bufwad_t *bigH;
3719 3720 bufwad_t *bigT;
3720 3721
3721 3722 /*
3722 3723 * For each index from n to n + s, verify that the existing bufwad
3723 3724 * in packobj matches the bufwads at the head and tail of the
3724 3725 * corresponding chunk in bigobj. Then update all three bufwads
3725 3726 * with the new values we want to write out.
3726 3727 */
3727 3728 for (i = 0; i < s; i++) {
3728 3729 /* LINTED */
3729 3730 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3730 3731 /* LINTED */
3731 3732 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3732 3733 /* LINTED */
3733 3734 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3734 3735
3735 3736 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3736 3737 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3737 3738
3738 3739 if (pack->bw_txg > txg)
3739 3740 fatal(0, "future leak: got %llx, open txg is %llx",
3740 3741 pack->bw_txg, txg);
3741 3742
3742 3743 if (pack->bw_data != 0 && pack->bw_index != n + i)
3743 3744 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3744 3745 pack->bw_index, n, i);
3745 3746
3746 3747 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3747 3748 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3748 3749
3749 3750 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3750 3751 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3751 3752
3752 3753 pack->bw_index = n + i;
3753 3754 pack->bw_txg = txg;
3754 3755 pack->bw_data = 1 + ztest_random(-2ULL);
3755 3756
3756 3757 *bigH = *pack;
3757 3758 *bigT = *pack;
3758 3759 }
3759 3760 }
3760 3761
3761 3762 void
3762 3763 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3763 3764 {
3764 3765 objset_t *os = zd->zd_os;
3765 3766 ztest_od_t od[2];
3766 3767 dmu_tx_t *tx;
3767 3768 uint64_t i;
3768 3769 int error;
3769 3770 uint64_t n, s, txg;
3770 3771 bufwad_t *packbuf, *bigbuf;
3771 3772 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3772 3773 uint64_t blocksize = ztest_random_blocksize();
3773 3774 uint64_t chunksize = blocksize;
3774 3775 uint64_t regions = 997;
3775 3776 uint64_t stride = 123456789ULL;
3776 3777 uint64_t width = 9;
3777 3778 dmu_buf_t *bonus_db;
3778 3779 arc_buf_t **bigbuf_arcbufs;
3779 3780 dmu_object_info_t doi;
3780 3781
3781 3782 /*
3782 3783 * This test uses two objects, packobj and bigobj, that are always
3783 3784 * updated together (i.e. in the same tx) so that their contents are
3784 3785 * in sync and can be compared. Their contents relate to each other
3785 3786 * in a simple way: packobj is a dense array of 'bufwad' structures,
3786 3787 * while bigobj is a sparse array of the same bufwads. Specifically,
3787 3788 * for any index n, there are three bufwads that should be identical:
3788 3789 *
3789 3790 * packobj, at offset n * sizeof (bufwad_t)
3790 3791 * bigobj, at the head of the nth chunk
3791 3792 * bigobj, at the tail of the nth chunk
3792 3793 *
3793 3794 * The chunk size is set equal to bigobj block size so that
3794 3795 * dmu_assign_arcbuf() can be tested for object updates.
3795 3796 */
3796 3797
3797 3798 /*
3798 3799 * Read the directory info. If it's the first time, set things up.
3799 3800 */
3800 3801 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3801 3802 ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3802 3803
3803 3804 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
3804 3805 return;
3805 3806
3806 3807 bigobj = od[0].od_object;
3807 3808 packobj = od[1].od_object;
3808 3809 blocksize = od[0].od_blocksize;
3809 3810 chunksize = blocksize;
3810 3811 ASSERT(chunksize == od[1].od_gen);
3811 3812
3812 3813 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3813 3814 VERIFY(ISP2(doi.doi_data_block_size));
3814 3815 VERIFY(chunksize == doi.doi_data_block_size);
3815 3816 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3816 3817
3817 3818 /*
3818 3819 * Pick a random index and compute the offsets into packobj and bigobj.
3819 3820 */
3820 3821 n = ztest_random(regions) * stride + ztest_random(width);
3821 3822 s = 1 + ztest_random(width - 1);
3822 3823
3823 3824 packoff = n * sizeof (bufwad_t);
3824 3825 packsize = s * sizeof (bufwad_t);
3825 3826
3826 3827 bigoff = n * chunksize;
3827 3828 bigsize = s * chunksize;
3828 3829
3829 3830 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3830 3831 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3831 3832
3832 3833 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3833 3834
3834 3835 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3835 3836
3836 3837 /*
3837 3838 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3838 3839 * Iteration 1 test zcopy to already referenced dbufs.
3839 3840 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3840 3841 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3841 3842 * Iteration 4 test zcopy when dbuf is no longer dirty.
3842 3843 * Iteration 5 test zcopy when it can't be done.
3843 3844 * Iteration 6 one more zcopy write.
3844 3845 */
3845 3846 for (i = 0; i < 7; i++) {
3846 3847 uint64_t j;
3847 3848 uint64_t off;
3848 3849
3849 3850 /*
3850 3851 * In iteration 5 (i == 5) use arcbufs
3851 3852 * that don't match bigobj blksz to test
3852 3853 * dmu_assign_arcbuf() when it can't directly
3853 3854 * assign an arcbuf to a dbuf.
3854 3855 */
3855 3856 for (j = 0; j < s; j++) {
3856 3857 if (i != 5) {
3857 3858 bigbuf_arcbufs[j] =
3858 3859 dmu_request_arcbuf(bonus_db, chunksize);
3859 3860 } else {
3860 3861 bigbuf_arcbufs[2 * j] =
3861 3862 dmu_request_arcbuf(bonus_db, chunksize / 2);
3862 3863 bigbuf_arcbufs[2 * j + 1] =
3863 3864 dmu_request_arcbuf(bonus_db, chunksize / 2);
3864 3865 }
3865 3866 }
3866 3867
3867 3868 /*
3868 3869 * Get a tx for the mods to both packobj and bigobj.
3869 3870 */
3870 3871 tx = dmu_tx_create(os);
3871 3872
3872 3873 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3873 3874 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3874 3875
3875 3876 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3876 3877 if (txg == 0) {
3877 3878 umem_free(packbuf, packsize);
3878 3879 umem_free(bigbuf, bigsize);
3879 3880 for (j = 0; j < s; j++) {
3880 3881 if (i != 5) {
3881 3882 dmu_return_arcbuf(bigbuf_arcbufs[j]);
3882 3883 } else {
3883 3884 dmu_return_arcbuf(
3884 3885 bigbuf_arcbufs[2 * j]);
3885 3886 dmu_return_arcbuf(
3886 3887 bigbuf_arcbufs[2 * j + 1]);
3887 3888 }
3888 3889 }
3889 3890 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3890 3891 dmu_buf_rele(bonus_db, FTAG);
3891 3892 return;
3892 3893 }
3893 3894
3894 3895 /*
3895 3896 * 50% of the time don't read objects in the 1st iteration to
3896 3897 * test dmu_assign_arcbuf() for the case when there're no
3897 3898 * existing dbufs for the specified offsets.
3898 3899 */
3899 3900 if (i != 0 || ztest_random(2) != 0) {
3900 3901 error = dmu_read(os, packobj, packoff,
3901 3902 packsize, packbuf, DMU_READ_PREFETCH);
3902 3903 ASSERT0(error);
3903 3904 error = dmu_read(os, bigobj, bigoff, bigsize,
3904 3905 bigbuf, DMU_READ_PREFETCH);
3905 3906 ASSERT0(error);
3906 3907 }
3907 3908 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
3908 3909 n, chunksize, txg);
3909 3910
3910 3911 /*
3911 3912 * We've verified all the old bufwads, and made new ones.
3912 3913 * Now write them out.
3913 3914 */
3914 3915 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3915 3916 if (ztest_opts.zo_verbose >= 7) {
3916 3917 (void) printf("writing offset %llx size %llx"
3917 3918 " txg %llx\n",
3918 3919 (u_longlong_t)bigoff,
3919 3920 (u_longlong_t)bigsize,
3920 3921 (u_longlong_t)txg);
3921 3922 }
3922 3923 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
3923 3924 dmu_buf_t *dbt;
3924 3925 if (i != 5) {
3925 3926 bcopy((caddr_t)bigbuf + (off - bigoff),
3926 3927 bigbuf_arcbufs[j]->b_data, chunksize);
3927 3928 } else {
3928 3929 bcopy((caddr_t)bigbuf + (off - bigoff),
3929 3930 bigbuf_arcbufs[2 * j]->b_data,
3930 3931 chunksize / 2);
3931 3932 bcopy((caddr_t)bigbuf + (off - bigoff) +
3932 3933 chunksize / 2,
3933 3934 bigbuf_arcbufs[2 * j + 1]->b_data,
3934 3935 chunksize / 2);
3935 3936 }
3936 3937
3937 3938 if (i == 1) {
3938 3939 VERIFY(dmu_buf_hold(os, bigobj, off,
3939 3940 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
3940 3941 }
3941 3942 if (i != 5) {
3942 3943 dmu_assign_arcbuf(bonus_db, off,
3943 3944 bigbuf_arcbufs[j], tx);
3944 3945 } else {
3945 3946 dmu_assign_arcbuf(bonus_db, off,
3946 3947 bigbuf_arcbufs[2 * j], tx);
3947 3948 dmu_assign_arcbuf(bonus_db,
3948 3949 off + chunksize / 2,
3949 3950 bigbuf_arcbufs[2 * j + 1], tx);
3950 3951 }
3951 3952 if (i == 1) {
3952 3953 dmu_buf_rele(dbt, FTAG);
3953 3954 }
3954 3955 }
3955 3956 dmu_tx_commit(tx);
3956 3957
3957 3958 /*
3958 3959 * Sanity check the stuff we just wrote.
3959 3960 */
3960 3961 {
3961 3962 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3962 3963 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3963 3964
3964 3965 VERIFY(0 == dmu_read(os, packobj, packoff,
3965 3966 packsize, packcheck, DMU_READ_PREFETCH));
3966 3967 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3967 3968 bigsize, bigcheck, DMU_READ_PREFETCH));
3968 3969
3969 3970 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3970 3971 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3971 3972
3972 3973 umem_free(packcheck, packsize);
3973 3974 umem_free(bigcheck, bigsize);
3974 3975 }
3975 3976 if (i == 2) {
3976 3977 txg_wait_open(dmu_objset_pool(os), 0);
3977 3978 } else if (i == 3) {
3978 3979 txg_wait_synced(dmu_objset_pool(os), 0);
3979 3980 }
3980 3981 }
3981 3982
3982 3983 dmu_buf_rele(bonus_db, FTAG);
3983 3984 umem_free(packbuf, packsize);
3984 3985 umem_free(bigbuf, bigsize);
3985 3986 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
3986 3987 }
3987 3988
3988 3989 /* ARGSUSED */
3989 3990 void
3990 3991 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
3991 3992 {
3992 3993 ztest_od_t od[1];
3993 3994 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
3994 3995 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3995 3996
3996 3997 /*
3997 3998 * Have multiple threads write to large offsets in an object
3998 3999 * to verify that parallel writes to an object -- even to the
3999 4000 * same blocks within the object -- doesn't cause any trouble.
4000 4001 */
4001 4002 ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4002 4003
4003 4004 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4004 4005 return;
4005 4006
4006 4007 while (ztest_random(10) != 0)
4007 4008 ztest_io(zd, od[0].od_object, offset);
4008 4009 }
4009 4010
4010 4011 void
4011 4012 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
4012 4013 {
4013 4014 ztest_od_t od[1];
4014 4015 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
4015 4016 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4016 4017 uint64_t count = ztest_random(20) + 1;
4017 4018 uint64_t blocksize = ztest_random_blocksize();
4018 4019 void *data;
4019 4020
4020 4021 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4021 4022
4022 4023 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4023 4024 return;
4024 4025
4025 4026 if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
4026 4027 return;
4027 4028
4028 4029 ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
4029 4030
4030 4031 data = umem_zalloc(blocksize, UMEM_NOFAIL);
4031 4032
4032 4033 while (ztest_random(count) != 0) {
4033 4034 uint64_t randoff = offset + (ztest_random(count) * blocksize);
4034 4035 if (ztest_write(zd, od[0].od_object, randoff, blocksize,
4035 4036 data) != 0)
4036 4037 break;
4037 4038 while (ztest_random(4) != 0)
4038 4039 ztest_io(zd, od[0].od_object, randoff);
4039 4040 }
4040 4041
4041 4042 umem_free(data, blocksize);
4042 4043 }
4043 4044
4044 4045 /*
4045 4046 * Verify that zap_{create,destroy,add,remove,update} work as expected.
4046 4047 */
4047 4048 #define ZTEST_ZAP_MIN_INTS 1
4048 4049 #define ZTEST_ZAP_MAX_INTS 4
4049 4050 #define ZTEST_ZAP_MAX_PROPS 1000
4050 4051
4051 4052 void
4052 4053 ztest_zap(ztest_ds_t *zd, uint64_t id)
4053 4054 {
4054 4055 objset_t *os = zd->zd_os;
4055 4056 ztest_od_t od[1];
4056 4057 uint64_t object;
4057 4058 uint64_t txg, last_txg;
4058 4059 uint64_t value[ZTEST_ZAP_MAX_INTS];
4059 4060 uint64_t zl_ints, zl_intsize, prop;
4060 4061 int i, ints;
4061 4062 dmu_tx_t *tx;
4062 4063 char propname[100], txgname[100];
4063 4064 int error;
4064 4065 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
4065 4066
4066 4067 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4067 4068
4068 4069 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4069 4070 return;
4070 4071
4071 4072 object = od[0].od_object;
4072 4073
4073 4074 /*
4074 4075 * Generate a known hash collision, and verify that
4075 4076 * we can lookup and remove both entries.
4076 4077 */
4077 4078 tx = dmu_tx_create(os);
4078 4079 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4079 4080 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4080 4081 if (txg == 0)
4081 4082 return;
4082 4083 for (i = 0; i < 2; i++) {
4083 4084 value[i] = i;
4084 4085 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
4085 4086 1, &value[i], tx));
4086 4087 }
4087 4088 for (i = 0; i < 2; i++) {
4088 4089 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
4089 4090 sizeof (uint64_t), 1, &value[i], tx));
4090 4091 VERIFY3U(0, ==,
4091 4092 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
4092 4093 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4093 4094 ASSERT3U(zl_ints, ==, 1);
4094 4095 }
4095 4096 for (i = 0; i < 2; i++) {
4096 4097 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
4097 4098 }
4098 4099 dmu_tx_commit(tx);
4099 4100
4100 4101 /*
4101 4102 * Generate a buch of random entries.
4102 4103 */
4103 4104 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
4104 4105
4105 4106 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4106 4107 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4107 4108 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4108 4109 bzero(value, sizeof (value));
4109 4110 last_txg = 0;
4110 4111
4111 4112 /*
4112 4113 * If these zap entries already exist, validate their contents.
4113 4114 */
4114 4115 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4115 4116 if (error == 0) {
4116 4117 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4117 4118 ASSERT3U(zl_ints, ==, 1);
4118 4119
4119 4120 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
4120 4121 zl_ints, &last_txg) == 0);
4121 4122
4122 4123 VERIFY(zap_length(os, object, propname, &zl_intsize,
4123 4124 &zl_ints) == 0);
4124 4125
4125 4126 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4126 4127 ASSERT3U(zl_ints, ==, ints);
4127 4128
4128 4129 VERIFY(zap_lookup(os, object, propname, zl_intsize,
4129 4130 zl_ints, value) == 0);
4130 4131
4131 4132 for (i = 0; i < ints; i++) {
4132 4133 ASSERT3U(value[i], ==, last_txg + object + i);
4133 4134 }
4134 4135 } else {
4135 4136 ASSERT3U(error, ==, ENOENT);
4136 4137 }
4137 4138
4138 4139 /*
4139 4140 * Atomically update two entries in our zap object.
4140 4141 * The first is named txg_%llu, and contains the txg
4141 4142 * in which the property was last updated. The second
4142 4143 * is named prop_%llu, and the nth element of its value
4143 4144 * should be txg + object + n.
4144 4145 */
4145 4146 tx = dmu_tx_create(os);
4146 4147 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4147 4148 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4148 4149 if (txg == 0)
4149 4150 return;
4150 4151
4151 4152 if (last_txg > txg)
4152 4153 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4153 4154
4154 4155 for (i = 0; i < ints; i++)
4155 4156 value[i] = txg + object + i;
4156 4157
4157 4158 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4158 4159 1, &txg, tx));
4159 4160 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4160 4161 ints, value, tx));
4161 4162
4162 4163 dmu_tx_commit(tx);
4163 4164
4164 4165 /*
4165 4166 * Remove a random pair of entries.
4166 4167 */
4167 4168 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4168 4169 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4169 4170 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4170 4171
4171 4172 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4172 4173
4173 4174 if (error == ENOENT)
4174 4175 return;
4175 4176
4176 4177 ASSERT0(error);
4177 4178
4178 4179 tx = dmu_tx_create(os);
4179 4180 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4180 4181 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4181 4182 if (txg == 0)
4182 4183 return;
4183 4184 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4184 4185 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4185 4186 dmu_tx_commit(tx);
4186 4187 }
4187 4188
4188 4189 /*
4189 4190 * Testcase to test the upgrading of a microzap to fatzap.
4190 4191 */
4191 4192 void
4192 4193 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4193 4194 {
4194 4195 objset_t *os = zd->zd_os;
4195 4196 ztest_od_t od[1];
4196 4197 uint64_t object, txg;
4197 4198
4198 4199 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4199 4200
4200 4201 if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
4201 4202 return;
4202 4203
4203 4204 object = od[0].od_object;
4204 4205
4205 4206 /*
4206 4207 * Add entries to this ZAP and make sure it spills over
4207 4208 * and gets upgraded to a fatzap. Also, since we are adding
4208 4209 * 2050 entries we should see ptrtbl growth and leaf-block split.
4209 4210 */
4210 4211 for (int i = 0; i < 2050; i++) {
4211 4212 char name[MAXNAMELEN];
4212 4213 uint64_t value = i;
4213 4214 dmu_tx_t *tx;
4214 4215 int error;
4215 4216
4216 4217 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4217 4218 id, value);
4218 4219
4219 4220 tx = dmu_tx_create(os);
4220 4221 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4221 4222 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4222 4223 if (txg == 0)
4223 4224 return;
4224 4225 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4225 4226 &value, tx);
4226 4227 ASSERT(error == 0 || error == EEXIST);
4227 4228 dmu_tx_commit(tx);
4228 4229 }
4229 4230 }
4230 4231
4231 4232 /* ARGSUSED */
4232 4233 void
4233 4234 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4234 4235 {
4235 4236 objset_t *os = zd->zd_os;
4236 4237 ztest_od_t od[1];
4237 4238 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4238 4239 dmu_tx_t *tx;
4239 4240 int i, namelen, error;
4240 4241 int micro = ztest_random(2);
4241 4242 char name[20], string_value[20];
4242 4243 void *data;
4243 4244
4244 4245 ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4245 4246
4246 4247 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4247 4248 return;
4248 4249
4249 4250 object = od[0].od_object;
4250 4251
4251 4252 /*
4252 4253 * Generate a random name of the form 'xxx.....' where each
4253 4254 * x is a random printable character and the dots are dots.
4254 4255 * There are 94 such characters, and the name length goes from
4255 4256 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4256 4257 */
4257 4258 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4258 4259
4259 4260 for (i = 0; i < 3; i++)
4260 4261 name[i] = '!' + ztest_random('~' - '!' + 1);
4261 4262 for (; i < namelen - 1; i++)
4262 4263 name[i] = '.';
4263 4264 name[i] = '\0';
4264 4265
4265 4266 if ((namelen & 1) || micro) {
4266 4267 wsize = sizeof (txg);
4267 4268 wc = 1;
4268 4269 data = &txg;
4269 4270 } else {
4270 4271 wsize = 1;
4271 4272 wc = namelen;
4272 4273 data = string_value;
4273 4274 }
4274 4275
4275 4276 count = -1ULL;
4276 4277 VERIFY0(zap_count(os, object, &count));
4277 4278 ASSERT(count != -1ULL);
4278 4279
4279 4280 /*
4280 4281 * Select an operation: length, lookup, add, update, remove.
4281 4282 */
4282 4283 i = ztest_random(5);
4283 4284
4284 4285 if (i >= 2) {
4285 4286 tx = dmu_tx_create(os);
4286 4287 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4287 4288 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4288 4289 if (txg == 0)
4289 4290 return;
4290 4291 bcopy(name, string_value, namelen);
4291 4292 } else {
4292 4293 tx = NULL;
4293 4294 txg = 0;
4294 4295 bzero(string_value, namelen);
4295 4296 }
4296 4297
4297 4298 switch (i) {
4298 4299
4299 4300 case 0:
4300 4301 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4301 4302 if (error == 0) {
4302 4303 ASSERT3U(wsize, ==, zl_wsize);
4303 4304 ASSERT3U(wc, ==, zl_wc);
4304 4305 } else {
4305 4306 ASSERT3U(error, ==, ENOENT);
4306 4307 }
4307 4308 break;
4308 4309
4309 4310 case 1:
4310 4311 error = zap_lookup(os, object, name, wsize, wc, data);
4311 4312 if (error == 0) {
4312 4313 if (data == string_value &&
4313 4314 bcmp(name, data, namelen) != 0)
4314 4315 fatal(0, "name '%s' != val '%s' len %d",
4315 4316 name, data, namelen);
4316 4317 } else {
4317 4318 ASSERT3U(error, ==, ENOENT);
4318 4319 }
4319 4320 break;
4320 4321
4321 4322 case 2:
4322 4323 error = zap_add(os, object, name, wsize, wc, data, tx);
4323 4324 ASSERT(error == 0 || error == EEXIST);
4324 4325 break;
4325 4326
4326 4327 case 3:
4327 4328 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4328 4329 break;
4329 4330
4330 4331 case 4:
4331 4332 error = zap_remove(os, object, name, tx);
4332 4333 ASSERT(error == 0 || error == ENOENT);
4333 4334 break;
4334 4335 }
4335 4336
4336 4337 if (tx != NULL)
4337 4338 dmu_tx_commit(tx);
4338 4339 }
4339 4340
4340 4341 /*
4341 4342 * Commit callback data.
4342 4343 */
4343 4344 typedef struct ztest_cb_data {
4344 4345 list_node_t zcd_node;
4345 4346 uint64_t zcd_txg;
4346 4347 int zcd_expected_err;
4347 4348 boolean_t zcd_added;
4348 4349 boolean_t zcd_called;
4349 4350 spa_t *zcd_spa;
4350 4351 } ztest_cb_data_t;
4351 4352
4352 4353 /* This is the actual commit callback function */
4353 4354 static void
4354 4355 ztest_commit_callback(void *arg, int error)
4355 4356 {
4356 4357 ztest_cb_data_t *data = arg;
4357 4358 uint64_t synced_txg;
4358 4359
4359 4360 VERIFY(data != NULL);
4360 4361 VERIFY3S(data->zcd_expected_err, ==, error);
4361 4362 VERIFY(!data->zcd_called);
4362 4363
4363 4364 synced_txg = spa_last_synced_txg(data->zcd_spa);
4364 4365 if (data->zcd_txg > synced_txg)
4365 4366 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4366 4367 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4367 4368 synced_txg);
4368 4369
4369 4370 data->zcd_called = B_TRUE;
4370 4371
4371 4372 if (error == ECANCELED) {
4372 4373 ASSERT0(data->zcd_txg);
4373 4374 ASSERT(!data->zcd_added);
4374 4375
4375 4376 /*
4376 4377 * The private callback data should be destroyed here, but
4377 4378 * since we are going to check the zcd_called field after
4378 4379 * dmu_tx_abort(), we will destroy it there.
4379 4380 */
4380 4381 return;
4381 4382 }
4382 4383
4383 4384 /* Was this callback added to the global callback list? */
4384 4385 if (!data->zcd_added)
4385 4386 goto out;
4386 4387
4387 4388 ASSERT3U(data->zcd_txg, !=, 0);
4388 4389
4389 4390 /* Remove our callback from the list */
4390 4391 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4391 4392 list_remove(&zcl.zcl_callbacks, data);
4392 4393 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4393 4394
4394 4395 out:
4395 4396 umem_free(data, sizeof (ztest_cb_data_t));
4396 4397 }
4397 4398
4398 4399 /* Allocate and initialize callback data structure */
4399 4400 static ztest_cb_data_t *
4400 4401 ztest_create_cb_data(objset_t *os, uint64_t txg)
4401 4402 {
4402 4403 ztest_cb_data_t *cb_data;
4403 4404
4404 4405 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4405 4406
4406 4407 cb_data->zcd_txg = txg;
4407 4408 cb_data->zcd_spa = dmu_objset_spa(os);
4408 4409
4409 4410 return (cb_data);
4410 4411 }
4411 4412
4412 4413 /*
4413 4414 * If a number of txgs equal to this threshold have been created after a commit
4414 4415 * callback has been registered but not called, then we assume there is an
4415 4416 * implementation bug.
4416 4417 */
4417 4418 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
4418 4419
4419 4420 /*
4420 4421 * Commit callback test.
4421 4422 */
4422 4423 void
4423 4424 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4424 4425 {
4425 4426 objset_t *os = zd->zd_os;
4426 4427 ztest_od_t od[1];
4427 4428 dmu_tx_t *tx;
4428 4429 ztest_cb_data_t *cb_data[3], *tmp_cb;
4429 4430 uint64_t old_txg, txg;
4430 4431 int i, error;
4431 4432
4432 4433 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4433 4434
4434 4435 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4435 4436 return;
4436 4437
4437 4438 tx = dmu_tx_create(os);
4438 4439
4439 4440 cb_data[0] = ztest_create_cb_data(os, 0);
4440 4441 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4441 4442
4442 4443 dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
4443 4444
4444 4445 /* Every once in a while, abort the transaction on purpose */
4445 4446 if (ztest_random(100) == 0)
4446 4447 error = -1;
4447 4448
4448 4449 if (!error)
4449 4450 error = dmu_tx_assign(tx, TXG_NOWAIT);
4450 4451
4451 4452 txg = error ? 0 : dmu_tx_get_txg(tx);
4452 4453
4453 4454 cb_data[0]->zcd_txg = txg;
4454 4455 cb_data[1] = ztest_create_cb_data(os, txg);
4455 4456 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4456 4457
4457 4458 if (error) {
4458 4459 /*
4459 4460 * It's not a strict requirement to call the registered
4460 4461 * callbacks from inside dmu_tx_abort(), but that's what
4461 4462 * it's supposed to happen in the current implementation
4462 4463 * so we will check for that.
4463 4464 */
4464 4465 for (i = 0; i < 2; i++) {
4465 4466 cb_data[i]->zcd_expected_err = ECANCELED;
4466 4467 VERIFY(!cb_data[i]->zcd_called);
4467 4468 }
4468 4469
4469 4470 dmu_tx_abort(tx);
4470 4471
4471 4472 for (i = 0; i < 2; i++) {
4472 4473 VERIFY(cb_data[i]->zcd_called);
4473 4474 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4474 4475 }
4475 4476
4476 4477 return;
4477 4478 }
4478 4479
4479 4480 cb_data[2] = ztest_create_cb_data(os, txg);
4480 4481 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4481 4482
4482 4483 /*
4483 4484 * Read existing data to make sure there isn't a future leak.
4484 4485 */
4485 4486 VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
4486 4487 &old_txg, DMU_READ_PREFETCH));
4487 4488
4488 4489 if (old_txg > txg)
4489 4490 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4490 4491 old_txg, txg);
4491 4492
4492 4493 dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
4493 4494
4494 4495 (void) mutex_lock(&zcl.zcl_callbacks_lock);
4495 4496
4496 4497 /*
4497 4498 * Since commit callbacks don't have any ordering requirement and since
4498 4499 * it is theoretically possible for a commit callback to be called
4499 4500 * after an arbitrary amount of time has elapsed since its txg has been
4500 4501 * synced, it is difficult to reliably determine whether a commit
4501 4502 * callback hasn't been called due to high load or due to a flawed
4502 4503 * implementation.
4503 4504 *
4504 4505 * In practice, we will assume that if after a certain number of txgs a
4505 4506 * commit callback hasn't been called, then most likely there's an
4506 4507 * implementation bug..
4507 4508 */
4508 4509 tmp_cb = list_head(&zcl.zcl_callbacks);
4509 4510 if (tmp_cb != NULL &&
4510 4511 tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) {
4511 4512 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4512 4513 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4513 4514 }
4514 4515
4515 4516 /*
4516 4517 * Let's find the place to insert our callbacks.
4517 4518 *
4518 4519 * Even though the list is ordered by txg, it is possible for the
4519 4520 * insertion point to not be the end because our txg may already be
4520 4521 * quiescing at this point and other callbacks in the open txg
4521 4522 * (from other objsets) may have sneaked in.
4522 4523 */
4523 4524 tmp_cb = list_tail(&zcl.zcl_callbacks);
4524 4525 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4525 4526 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4526 4527
4527 4528 /* Add the 3 callbacks to the list */
4528 4529 for (i = 0; i < 3; i++) {
4529 4530 if (tmp_cb == NULL)
4530 4531 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4531 4532 else
4532 4533 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4533 4534 cb_data[i]);
4534 4535
4535 4536 cb_data[i]->zcd_added = B_TRUE;
4536 4537 VERIFY(!cb_data[i]->zcd_called);
4537 4538
4538 4539 tmp_cb = cb_data[i];
4539 4540 }
4540 4541
4541 4542 (void) mutex_unlock(&zcl.zcl_callbacks_lock);
4542 4543
4543 4544 dmu_tx_commit(tx);
4544 4545 }
4545 4546
4546 4547 /* ARGSUSED */
4547 4548 void
4548 4549 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4549 4550 {
4550 4551 zfs_prop_t proplist[] = {
4551 4552 ZFS_PROP_CHECKSUM,
4552 4553 ZFS_PROP_COMPRESSION,
4553 4554 ZFS_PROP_COPIES,
4554 4555 ZFS_PROP_DEDUP
4555 4556 };
4556 4557
4557 4558 (void) rw_rdlock(&ztest_name_lock);
4558 4559
4559 4560 for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4560 4561 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4561 4562 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4562 4563
4563 4564 (void) rw_unlock(&ztest_name_lock);
4564 4565 }
4565 4566
4566 4567 /* ARGSUSED */
4567 4568 void
4568 4569 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4569 4570 {
4570 4571 nvlist_t *props = NULL;
4571 4572
4572 4573 (void) rw_rdlock(&ztest_name_lock);
4573 4574
4574 4575 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4575 4576 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4576 4577
4577 4578 VERIFY0(spa_prop_get(ztest_spa, &props));
4578 4579
4579 4580 if (ztest_opts.zo_verbose >= 6)
4580 4581 dump_nvlist(props, 4);
4581 4582
4582 4583 nvlist_free(props);
4583 4584
4584 4585 (void) rw_unlock(&ztest_name_lock);
4585 4586 }
4586 4587
4587 4588 static int
4588 4589 user_release_one(const char *snapname, const char *holdname)
4589 4590 {
4590 4591 nvlist_t *snaps, *holds;
4591 4592 int error;
4592 4593
4593 4594 snaps = fnvlist_alloc();
4594 4595 holds = fnvlist_alloc();
4595 4596 fnvlist_add_boolean(holds, holdname);
4596 4597 fnvlist_add_nvlist(snaps, snapname, holds);
4597 4598 fnvlist_free(holds);
4598 4599 error = dsl_dataset_user_release(snaps, NULL);
4599 4600 fnvlist_free(snaps);
4600 4601 return (error);
4601 4602 }
4602 4603
4603 4604 /*
4604 4605 * Test snapshot hold/release and deferred destroy.
4605 4606 */
4606 4607 void
4607 4608 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4608 4609 {
4609 4610 int error;
4610 4611 objset_t *os = zd->zd_os;
4611 4612 objset_t *origin;
4612 4613 char snapname[100];
4613 4614 char fullname[100];
4614 4615 char clonename[100];
4615 4616 char tag[100];
4616 4617 char osname[MAXNAMELEN];
4617 4618 nvlist_t *holds;
4618 4619
4619 4620 (void) rw_rdlock(&ztest_name_lock);
4620 4621
4621 4622 dmu_objset_name(os, osname);
4622 4623
4623 4624 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id);
4624 4625 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
4625 4626 (void) snprintf(clonename, sizeof (clonename),
4626 4627 "%s/ch1_%llu", osname, id);
4627 4628 (void) snprintf(tag, sizeof (tag), "tag_%llu", id);
4628 4629
4629 4630 /*
4630 4631 * Clean up from any previous run.
4631 4632 */
4632 4633 error = dsl_destroy_head(clonename);
4633 4634 if (error != ENOENT)
4634 4635 ASSERT0(error);
4635 4636 error = user_release_one(fullname, tag);
4636 4637 if (error != ESRCH && error != ENOENT)
4637 4638 ASSERT0(error);
4638 4639 error = dsl_destroy_snapshot(fullname, B_FALSE);
4639 4640 if (error != ENOENT)
4640 4641 ASSERT0(error);
4641 4642
4642 4643 /*
4643 4644 * Create snapshot, clone it, mark snap for deferred destroy,
4644 4645 * destroy clone, verify snap was also destroyed.
4645 4646 */
4646 4647 error = dmu_objset_snapshot_one(osname, snapname);
4647 4648 if (error) {
4648 4649 if (error == ENOSPC) {
4649 4650 ztest_record_enospc("dmu_objset_snapshot");
4650 4651 goto out;
4651 4652 }
4652 4653 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4653 4654 }
4654 4655
4655 4656 error = dmu_objset_clone(clonename, fullname);
4656 4657 if (error) {
4657 4658 if (error == ENOSPC) {
4658 4659 ztest_record_enospc("dmu_objset_clone");
4659 4660 goto out;
4660 4661 }
4661 4662 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4662 4663 }
4663 4664
4664 4665 error = dsl_destroy_snapshot(fullname, B_TRUE);
4665 4666 if (error) {
4666 4667 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4667 4668 fullname, error);
4668 4669 }
4669 4670
4670 4671 error = dsl_destroy_head(clonename);
4671 4672 if (error)
4672 4673 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error);
4673 4674
4674 4675 error = dmu_objset_hold(fullname, FTAG, &origin);
4675 4676 if (error != ENOENT)
4676 4677 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4677 4678
4678 4679 /*
4679 4680 * Create snapshot, add temporary hold, verify that we can't
4680 4681 * destroy a held snapshot, mark for deferred destroy,
4681 4682 * release hold, verify snapshot was destroyed.
4682 4683 */
4683 4684 error = dmu_objset_snapshot_one(osname, snapname);
4684 4685 if (error) {
4685 4686 if (error == ENOSPC) {
4686 4687 ztest_record_enospc("dmu_objset_snapshot");
4687 4688 goto out;
4688 4689 }
4689 4690 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4690 4691 }
4691 4692
4692 4693 holds = fnvlist_alloc();
4693 4694 fnvlist_add_string(holds, fullname, tag);
4694 4695 error = dsl_dataset_user_hold(holds, 0, NULL);
4695 4696 fnvlist_free(holds);
4696 4697
4697 4698 if (error)
4698 4699 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4699 4700
4700 4701 error = dsl_destroy_snapshot(fullname, B_FALSE);
4701 4702 if (error != EBUSY) {
4702 4703 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
4703 4704 fullname, error);
↓ open down ↓ |
4669 lines elided |
↑ open up ↑ |
4704 4705 }
4705 4706
4706 4707 error = dsl_destroy_snapshot(fullname, B_TRUE);
4707 4708 if (error) {
4708 4709 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4709 4710 fullname, error);
4710 4711 }
4711 4712
4712 4713 error = user_release_one(fullname, tag);
4713 4714 if (error)
4714 - fatal(0, "user_release_one(%s)", fullname, tag);
4715 + fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
4715 4716
4716 4717 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
4717 4718
4718 4719 out:
4719 4720 (void) rw_unlock(&ztest_name_lock);
4720 4721 }
4721 4722
4722 4723 /*
4723 4724 * Inject random faults into the on-disk data.
4724 4725 */
4725 4726 /* ARGSUSED */
4726 4727 void
4727 4728 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4728 4729 {
4729 4730 ztest_shared_t *zs = ztest_shared;
4730 4731 spa_t *spa = ztest_spa;
4731 4732 int fd;
4732 4733 uint64_t offset;
4733 4734 uint64_t leaves;
4734 4735 uint64_t bad = 0x1990c0ffeedecade;
4735 4736 uint64_t top, leaf;
4736 4737 char path0[MAXPATHLEN];
4737 4738 char pathrand[MAXPATHLEN];
4738 4739 size_t fsize;
4739 4740 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4740 4741 int iters = 1000;
4741 4742 int maxfaults;
4742 4743 int mirror_save;
4743 4744 vdev_t *vd0 = NULL;
4744 4745 uint64_t guid0 = 0;
4745 4746 boolean_t islog = B_FALSE;
4746 4747
4747 4748 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4748 4749 maxfaults = MAXFAULTS();
4749 4750 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4750 4751 mirror_save = zs->zs_mirrors;
4751 4752 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4752 4753
4753 4754 ASSERT(leaves >= 1);
4754 4755
4755 4756 /*
4756 4757 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4757 4758 */
4758 4759 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4759 4760
4760 4761 if (ztest_random(2) == 0) {
4761 4762 /*
4762 4763 * Inject errors on a normal data device or slog device.
4763 4764 */
4764 4765 top = ztest_random_vdev_top(spa, B_TRUE);
4765 4766 leaf = ztest_random(leaves) + zs->zs_splits;
4766 4767
4767 4768 /*
4768 4769 * Generate paths to the first leaf in this top-level vdev,
4769 4770 * and to the random leaf we selected. We'll induce transient
4770 4771 * write failures and random online/offline activity on leaf 0,
4771 4772 * and we'll write random garbage to the randomly chosen leaf.
4772 4773 */
4773 4774 (void) snprintf(path0, sizeof (path0), ztest_dev_template,
4774 4775 ztest_opts.zo_dir, ztest_opts.zo_pool,
4775 4776 top * leaves + zs->zs_splits);
4776 4777 (void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
4777 4778 ztest_opts.zo_dir, ztest_opts.zo_pool,
4778 4779 top * leaves + leaf);
4779 4780
4780 4781 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4781 4782 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4782 4783 islog = B_TRUE;
4783 4784
4784 4785 if (vd0 != NULL && maxfaults != 1) {
4785 4786 /*
4786 4787 * Make vd0 explicitly claim to be unreadable,
4787 4788 * or unwriteable, or reach behind its back
4788 4789 * and close the underlying fd. We can do this if
4789 4790 * maxfaults == 0 because we'll fail and reexecute,
4790 4791 * and we can do it if maxfaults >= 2 because we'll
4791 4792 * have enough redundancy. If maxfaults == 1, the
4792 4793 * combination of this with injection of random data
4793 4794 * corruption below exceeds the pool's fault tolerance.
4794 4795 */
4795 4796 vdev_file_t *vf = vd0->vdev_tsd;
4796 4797
4797 4798 if (vf != NULL && ztest_random(3) == 0) {
4798 4799 (void) close(vf->vf_vnode->v_fd);
4799 4800 vf->vf_vnode->v_fd = -1;
4800 4801 } else if (ztest_random(2) == 0) {
4801 4802 vd0->vdev_cant_read = B_TRUE;
4802 4803 } else {
4803 4804 vd0->vdev_cant_write = B_TRUE;
4804 4805 }
4805 4806 guid0 = vd0->vdev_guid;
4806 4807 }
4807 4808 } else {
4808 4809 /*
4809 4810 * Inject errors on an l2cache device.
4810 4811 */
4811 4812 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4812 4813
4813 4814 if (sav->sav_count == 0) {
4814 4815 spa_config_exit(spa, SCL_STATE, FTAG);
4815 4816 return;
4816 4817 }
4817 4818 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4818 4819 guid0 = vd0->vdev_guid;
4819 4820 (void) strcpy(path0, vd0->vdev_path);
4820 4821 (void) strcpy(pathrand, vd0->vdev_path);
4821 4822
4822 4823 leaf = 0;
4823 4824 leaves = 1;
4824 4825 maxfaults = INT_MAX; /* no limit on cache devices */
4825 4826 }
4826 4827
4827 4828 spa_config_exit(spa, SCL_STATE, FTAG);
4828 4829
4829 4830 /*
4830 4831 * If we can tolerate two or more faults, or we're dealing
4831 4832 * with a slog, randomly online/offline vd0.
4832 4833 */
4833 4834 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4834 4835 if (ztest_random(10) < 6) {
4835 4836 int flags = (ztest_random(2) == 0 ?
4836 4837 ZFS_OFFLINE_TEMPORARY : 0);
4837 4838
4838 4839 /*
4839 4840 * We have to grab the zs_name_lock as writer to
4840 4841 * prevent a race between offlining a slog and
4841 4842 * destroying a dataset. Offlining the slog will
4842 4843 * grab a reference on the dataset which may cause
4843 4844 * dmu_objset_destroy() to fail with EBUSY thus
4844 4845 * leaving the dataset in an inconsistent state.
4845 4846 */
4846 4847 if (islog)
4847 4848 (void) rw_wrlock(&ztest_name_lock);
4848 4849
4849 4850 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4850 4851
4851 4852 if (islog)
4852 4853 (void) rw_unlock(&ztest_name_lock);
4853 4854 } else {
4854 4855 /*
4855 4856 * Ideally we would like to be able to randomly
4856 4857 * call vdev_[on|off]line without holding locks
4857 4858 * to force unpredictable failures but the side
4858 4859 * effects of vdev_[on|off]line prevent us from
4859 4860 * doing so. We grab the ztest_vdev_lock here to
4860 4861 * prevent a race between injection testing and
4861 4862 * aux_vdev removal.
4862 4863 */
4863 4864 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4864 4865 (void) vdev_online(spa, guid0, 0, NULL);
4865 4866 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4866 4867 }
4867 4868 }
4868 4869
4869 4870 if (maxfaults == 0)
4870 4871 return;
4871 4872
4872 4873 /*
4873 4874 * We have at least single-fault tolerance, so inject data corruption.
4874 4875 */
4875 4876 fd = open(pathrand, O_RDWR);
4876 4877
4877 4878 if (fd == -1) /* we hit a gap in the device namespace */
4878 4879 return;
4879 4880
4880 4881 fsize = lseek(fd, 0, SEEK_END);
4881 4882
4882 4883 while (--iters != 0) {
4883 4884 offset = ztest_random(fsize / (leaves << bshift)) *
4884 4885 (leaves << bshift) + (leaf << bshift) +
4885 4886 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
4886 4887
4887 4888 if (offset >= fsize)
4888 4889 continue;
4889 4890
4890 4891 VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
4891 4892 if (mirror_save != zs->zs_mirrors) {
4892 4893 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4893 4894 (void) close(fd);
4894 4895 return;
4895 4896 }
4896 4897
4897 4898 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
4898 4899 fatal(1, "can't inject bad word at 0x%llx in %s",
4899 4900 offset, pathrand);
4900 4901
4901 4902 VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
4902 4903
4903 4904 if (ztest_opts.zo_verbose >= 7)
4904 4905 (void) printf("injected bad word into %s,"
4905 4906 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
4906 4907 }
4907 4908
4908 4909 (void) close(fd);
4909 4910 }
4910 4911
4911 4912 /*
4912 4913 * Verify that DDT repair works as expected.
4913 4914 */
4914 4915 void
4915 4916 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
4916 4917 {
4917 4918 ztest_shared_t *zs = ztest_shared;
4918 4919 spa_t *spa = ztest_spa;
4919 4920 objset_t *os = zd->zd_os;
4920 4921 ztest_od_t od[1];
4921 4922 uint64_t object, blocksize, txg, pattern, psize;
4922 4923 enum zio_checksum checksum = spa_dedup_checksum(spa);
4923 4924 dmu_buf_t *db;
4924 4925 dmu_tx_t *tx;
4925 4926 void *buf;
4926 4927 blkptr_t blk;
4927 4928 int copies = 2 * ZIO_DEDUPDITTO_MIN;
4928 4929
4929 4930 blocksize = ztest_random_blocksize();
4930 4931 blocksize = MIN(blocksize, 2048); /* because we write so many */
4931 4932
4932 4933 ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4933 4934
4934 4935 if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
4935 4936 return;
4936 4937
4937 4938 /*
4938 4939 * Take the name lock as writer to prevent anyone else from changing
4939 4940 * the pool and dataset properies we need to maintain during this test.
4940 4941 */
4941 4942 (void) rw_wrlock(&ztest_name_lock);
4942 4943
4943 4944 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
4944 4945 B_FALSE) != 0 ||
4945 4946 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
4946 4947 B_FALSE) != 0) {
4947 4948 (void) rw_unlock(&ztest_name_lock);
4948 4949 return;
4949 4950 }
4950 4951
4951 4952 object = od[0].od_object;
4952 4953 blocksize = od[0].od_blocksize;
4953 4954 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
4954 4955
4955 4956 ASSERT(object != 0);
4956 4957
4957 4958 tx = dmu_tx_create(os);
4958 4959 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
4959 4960 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
4960 4961 if (txg == 0) {
4961 4962 (void) rw_unlock(&ztest_name_lock);
4962 4963 return;
4963 4964 }
4964 4965
4965 4966 /*
4966 4967 * Write all the copies of our block.
4967 4968 */
4968 4969 for (int i = 0; i < copies; i++) {
4969 4970 uint64_t offset = i * blocksize;
4970 4971 int error = dmu_buf_hold(os, object, offset, FTAG, &db,
4971 4972 DMU_READ_NO_PREFETCH);
4972 4973 if (error != 0) {
4973 4974 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u",
4974 4975 os, (long long)object, (long long) offset, error);
4975 4976 }
4976 4977 ASSERT(db->db_offset == offset);
4977 4978 ASSERT(db->db_size == blocksize);
4978 4979 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
4979 4980 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
4980 4981 dmu_buf_will_fill(db, tx);
4981 4982 ztest_pattern_set(db->db_data, db->db_size, pattern);
4982 4983 dmu_buf_rele(db, FTAG);
4983 4984 }
4984 4985
4985 4986 dmu_tx_commit(tx);
4986 4987 txg_wait_synced(spa_get_dsl(spa), txg);
4987 4988
4988 4989 /*
4989 4990 * Find out what block we got.
4990 4991 */
4991 4992 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db,
4992 4993 DMU_READ_NO_PREFETCH));
4993 4994 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
4994 4995 dmu_buf_rele(db, FTAG);
4995 4996
4996 4997 /*
4997 4998 * Damage the block. Dedup-ditto will save us when we read it later.
4998 4999 */
4999 5000 psize = BP_GET_PSIZE(&blk);
5000 5001 buf = zio_buf_alloc(psize);
5001 5002 ztest_pattern_set(buf, psize, ~pattern);
5002 5003
5003 5004 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
5004 5005 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
5005 5006 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
5006 5007
5007 5008 zio_buf_free(buf, psize);
5008 5009
5009 5010 (void) rw_unlock(&ztest_name_lock);
5010 5011 }
5011 5012
5012 5013 /*
5013 5014 * Scrub the pool.
5014 5015 */
5015 5016 /* ARGSUSED */
5016 5017 void
5017 5018 ztest_scrub(ztest_ds_t *zd, uint64_t id)
5018 5019 {
5019 5020 spa_t *spa = ztest_spa;
5020 5021
5021 5022 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5022 5023 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
5023 5024 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5024 5025 }
5025 5026
5026 5027 /*
5027 5028 * Change the guid for the pool.
5028 5029 */
5029 5030 /* ARGSUSED */
5030 5031 void
5031 5032 ztest_reguid(ztest_ds_t *zd, uint64_t id)
5032 5033 {
5033 5034 spa_t *spa = ztest_spa;
5034 5035 uint64_t orig, load;
5035 5036 int error;
5036 5037
5037 5038 orig = spa_guid(spa);
5038 5039 load = spa_load_guid(spa);
5039 5040
5040 5041 (void) rw_wrlock(&ztest_name_lock);
5041 5042 error = spa_change_guid(spa);
5042 5043 (void) rw_unlock(&ztest_name_lock);
5043 5044
5044 5045 if (error != 0)
5045 5046 return;
5046 5047
5047 5048 if (ztest_opts.zo_verbose >= 4) {
5048 5049 (void) printf("Changed guid old %llu -> %llu\n",
5049 5050 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
5050 5051 }
5051 5052
5052 5053 VERIFY3U(orig, !=, spa_guid(spa));
5053 5054 VERIFY3U(load, ==, spa_load_guid(spa));
5054 5055 }
5055 5056
5056 5057 /*
5057 5058 * Rename the pool to a different name and then rename it back.
5058 5059 */
5059 5060 /* ARGSUSED */
5060 5061 void
5061 5062 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
5062 5063 {
5063 5064 char *oldname, *newname;
5064 5065 spa_t *spa;
5065 5066
5066 5067 (void) rw_wrlock(&ztest_name_lock);
5067 5068
5068 5069 oldname = ztest_opts.zo_pool;
5069 5070 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
5070 5071 (void) strcpy(newname, oldname);
5071 5072 (void) strcat(newname, "_tmp");
5072 5073
5073 5074 /*
5074 5075 * Do the rename
5075 5076 */
5076 5077 VERIFY3U(0, ==, spa_rename(oldname, newname));
5077 5078
5078 5079 /*
5079 5080 * Try to open it under the old name, which shouldn't exist
5080 5081 */
5081 5082 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5082 5083
5083 5084 /*
5084 5085 * Open it under the new name and make sure it's still the same spa_t.
5085 5086 */
5086 5087 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5087 5088
5088 5089 ASSERT(spa == ztest_spa);
5089 5090 spa_close(spa, FTAG);
5090 5091
5091 5092 /*
5092 5093 * Rename it back to the original
5093 5094 */
5094 5095 VERIFY3U(0, ==, spa_rename(newname, oldname));
5095 5096
5096 5097 /*
5097 5098 * Make sure it can still be opened
5098 5099 */
5099 5100 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5100 5101
5101 5102 ASSERT(spa == ztest_spa);
5102 5103 spa_close(spa, FTAG);
5103 5104
5104 5105 umem_free(newname, strlen(newname) + 1);
5105 5106
5106 5107 (void) rw_unlock(&ztest_name_lock);
5107 5108 }
5108 5109
5109 5110 /*
5110 5111 * Verify pool integrity by running zdb.
5111 5112 */
5112 5113 static void
5113 5114 ztest_run_zdb(char *pool)
5114 5115 {
5115 5116 int status;
5116 5117 char zdb[MAXPATHLEN + MAXNAMELEN + 20];
5117 5118 char zbuf[1024];
5118 5119 char *bin;
5119 5120 char *ztest;
5120 5121 char *isa;
5121 5122 int isalen;
5122 5123 FILE *fp;
5123 5124
5124 5125 (void) realpath(getexecname(), zdb);
5125 5126
5126 5127 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
5127 5128 bin = strstr(zdb, "/usr/bin/");
5128 5129 ztest = strstr(bin, "/ztest");
5129 5130 isa = bin + 8;
5130 5131 isalen = ztest - isa;
5131 5132 isa = strdup(isa);
5132 5133 /* LINTED */
5133 5134 (void) sprintf(bin,
5134 5135 "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
5135 5136 isalen,
5136 5137 isa,
5137 5138 ztest_opts.zo_verbose >= 3 ? "s" : "",
5138 5139 ztest_opts.zo_verbose >= 4 ? "v" : "",
5139 5140 spa_config_path,
5140 5141 pool);
5141 5142 free(isa);
5142 5143
5143 5144 if (ztest_opts.zo_verbose >= 5)
5144 5145 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
5145 5146
5146 5147 fp = popen(zdb, "r");
5147 5148
5148 5149 while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
5149 5150 if (ztest_opts.zo_verbose >= 3)
5150 5151 (void) printf("%s", zbuf);
5151 5152
5152 5153 status = pclose(fp);
5153 5154
5154 5155 if (status == 0)
5155 5156 return;
5156 5157
5157 5158 ztest_dump_core = 0;
5158 5159 if (WIFEXITED(status))
5159 5160 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
5160 5161 else
5161 5162 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
5162 5163 }
5163 5164
5164 5165 static void
5165 5166 ztest_walk_pool_directory(char *header)
5166 5167 {
5167 5168 spa_t *spa = NULL;
5168 5169
5169 5170 if (ztest_opts.zo_verbose >= 6)
5170 5171 (void) printf("%s\n", header);
5171 5172
5172 5173 mutex_enter(&spa_namespace_lock);
5173 5174 while ((spa = spa_next(spa)) != NULL)
5174 5175 if (ztest_opts.zo_verbose >= 6)
5175 5176 (void) printf("\t%s\n", spa_name(spa));
5176 5177 mutex_exit(&spa_namespace_lock);
5177 5178 }
5178 5179
5179 5180 static void
5180 5181 ztest_spa_import_export(char *oldname, char *newname)
5181 5182 {
5182 5183 nvlist_t *config, *newconfig;
5183 5184 uint64_t pool_guid;
5184 5185 spa_t *spa;
5185 5186 int error;
5186 5187
5187 5188 if (ztest_opts.zo_verbose >= 4) {
5188 5189 (void) printf("import/export: old = %s, new = %s\n",
5189 5190 oldname, newname);
5190 5191 }
5191 5192
5192 5193 /*
5193 5194 * Clean up from previous runs.
5194 5195 */
5195 5196 (void) spa_destroy(newname);
5196 5197
5197 5198 /*
5198 5199 * Get the pool's configuration and guid.
5199 5200 */
5200 5201 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5201 5202
5202 5203 /*
5203 5204 * Kick off a scrub to tickle scrub/export races.
5204 5205 */
5205 5206 if (ztest_random(2) == 0)
5206 5207 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5207 5208
5208 5209 pool_guid = spa_guid(spa);
5209 5210 spa_close(spa, FTAG);
5210 5211
5211 5212 ztest_walk_pool_directory("pools before export");
5212 5213
5213 5214 /*
5214 5215 * Export it.
5215 5216 */
5216 5217 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5217 5218
5218 5219 ztest_walk_pool_directory("pools after export");
5219 5220
5220 5221 /*
5221 5222 * Try to import it.
5222 5223 */
5223 5224 newconfig = spa_tryimport(config);
5224 5225 ASSERT(newconfig != NULL);
5225 5226 nvlist_free(newconfig);
5226 5227
5227 5228 /*
5228 5229 * Import it under the new name.
5229 5230 */
5230 5231 error = spa_import(newname, config, NULL, 0);
5231 5232 if (error != 0) {
5232 5233 dump_nvlist(config, 0);
5233 5234 fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
5234 5235 oldname, newname, error);
5235 5236 }
5236 5237
5237 5238 ztest_walk_pool_directory("pools after import");
5238 5239
5239 5240 /*
5240 5241 * Try to import it again -- should fail with EEXIST.
5241 5242 */
5242 5243 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5243 5244
5244 5245 /*
5245 5246 * Try to import it under a different name -- should fail with EEXIST.
5246 5247 */
5247 5248 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5248 5249
5249 5250 /*
5250 5251 * Verify that the pool is no longer visible under the old name.
5251 5252 */
5252 5253 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5253 5254
5254 5255 /*
5255 5256 * Verify that we can open and close the pool using the new name.
5256 5257 */
5257 5258 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5258 5259 ASSERT(pool_guid == spa_guid(spa));
5259 5260 spa_close(spa, FTAG);
5260 5261
5261 5262 nvlist_free(config);
5262 5263 }
5263 5264
5264 5265 static void
5265 5266 ztest_resume(spa_t *spa)
5266 5267 {
5267 5268 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5268 5269 (void) printf("resuming from suspended state\n");
5269 5270 spa_vdev_state_enter(spa, SCL_NONE);
5270 5271 vdev_clear(spa, NULL);
5271 5272 (void) spa_vdev_state_exit(spa, NULL, 0);
5272 5273 (void) zio_resume(spa);
5273 5274 }
5274 5275
5275 5276 static void *
5276 5277 ztest_resume_thread(void *arg)
5277 5278 {
5278 5279 spa_t *spa = arg;
5279 5280
5280 5281 while (!ztest_exiting) {
5281 5282 if (spa_suspended(spa))
5282 5283 ztest_resume(spa);
5283 5284 (void) poll(NULL, 0, 100);
5284 5285 }
5285 5286 return (NULL);
5286 5287 }
5287 5288
5288 5289 static void *
5289 5290 ztest_deadman_thread(void *arg)
5290 5291 {
5291 5292 ztest_shared_t *zs = arg;
5292 5293 int grace = 300;
5293 5294 hrtime_t delta;
5294 5295
5295 5296 delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
5296 5297
5297 5298 (void) poll(NULL, 0, (int)(1000 * delta));
5298 5299
5299 5300 fatal(0, "failed to complete within %d seconds of deadline", grace);
5300 5301
5301 5302 return (NULL);
5302 5303 }
5303 5304
5304 5305 static void
5305 5306 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5306 5307 {
5307 5308 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5308 5309 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5309 5310 hrtime_t functime = gethrtime();
5310 5311
5311 5312 for (int i = 0; i < zi->zi_iters; i++)
5312 5313 zi->zi_func(zd, id);
5313 5314
5314 5315 functime = gethrtime() - functime;
5315 5316
5316 5317 atomic_add_64(&zc->zc_count, 1);
5317 5318 atomic_add_64(&zc->zc_time, functime);
5318 5319
5319 5320 if (ztest_opts.zo_verbose >= 4) {
5320 5321 Dl_info dli;
5321 5322 (void) dladdr((void *)zi->zi_func, &dli);
5322 5323 (void) printf("%6.2f sec in %s\n",
5323 5324 (double)functime / NANOSEC, dli.dli_sname);
5324 5325 }
5325 5326 }
5326 5327
5327 5328 static void *
5328 5329 ztest_thread(void *arg)
5329 5330 {
5330 5331 int rand;
5331 5332 uint64_t id = (uintptr_t)arg;
5332 5333 ztest_shared_t *zs = ztest_shared;
5333 5334 uint64_t call_next;
5334 5335 hrtime_t now;
5335 5336 ztest_info_t *zi;
5336 5337 ztest_shared_callstate_t *zc;
5337 5338
5338 5339 while ((now = gethrtime()) < zs->zs_thread_stop) {
5339 5340 /*
5340 5341 * See if it's time to force a crash.
5341 5342 */
5342 5343 if (now > zs->zs_thread_kill)
5343 5344 ztest_kill(zs);
5344 5345
5345 5346 /*
5346 5347 * If we're getting ENOSPC with some regularity, stop.
5347 5348 */
5348 5349 if (zs->zs_enospc_count > 10)
5349 5350 break;
5350 5351
5351 5352 /*
5352 5353 * Pick a random function to execute.
5353 5354 */
5354 5355 rand = ztest_random(ZTEST_FUNCS);
5355 5356 zi = &ztest_info[rand];
5356 5357 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5357 5358 call_next = zc->zc_next;
5358 5359
5359 5360 if (now >= call_next &&
5360 5361 atomic_cas_64(&zc->zc_next, call_next, call_next +
5361 5362 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5362 5363 ztest_execute(rand, zi, id);
5363 5364 }
5364 5365 }
5365 5366
5366 5367 return (NULL);
5367 5368 }
5368 5369
5369 5370 static void
5370 5371 ztest_dataset_name(char *dsname, char *pool, int d)
5371 5372 {
5372 5373 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5373 5374 }
5374 5375
5375 5376 static void
5376 5377 ztest_dataset_destroy(int d)
5377 5378 {
5378 5379 char name[MAXNAMELEN];
5379 5380
5380 5381 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5381 5382
5382 5383 if (ztest_opts.zo_verbose >= 3)
5383 5384 (void) printf("Destroying %s to free up space\n", name);
5384 5385
5385 5386 /*
5386 5387 * Cleanup any non-standard clones and snapshots. In general,
5387 5388 * ztest thread t operates on dataset (t % zopt_datasets),
5388 5389 * so there may be more than one thing to clean up.
5389 5390 */
5390 5391 for (int t = d; t < ztest_opts.zo_threads;
5391 5392 t += ztest_opts.zo_datasets) {
5392 5393 ztest_dsl_dataset_cleanup(name, t);
5393 5394 }
5394 5395
5395 5396 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5396 5397 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5397 5398 }
5398 5399
5399 5400 static void
5400 5401 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5401 5402 {
5402 5403 uint64_t usedobjs, dirobjs, scratch;
5403 5404
5404 5405 /*
5405 5406 * ZTEST_DIROBJ is the object directory for the entire dataset.
5406 5407 * Therefore, the number of objects in use should equal the
5407 5408 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5408 5409 * If not, we have an object leak.
5409 5410 *
5410 5411 * Note that we can only check this in ztest_dataset_open(),
5411 5412 * when the open-context and syncing-context values agree.
5412 5413 * That's because zap_count() returns the open-context value,
5413 5414 * while dmu_objset_space() returns the rootbp fill count.
5414 5415 */
5415 5416 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5416 5417 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5417 5418 ASSERT3U(dirobjs + 1, ==, usedobjs);
5418 5419 }
5419 5420
5420 5421 static int
5421 5422 ztest_dataset_open(int d)
5422 5423 {
5423 5424 ztest_ds_t *zd = &ztest_ds[d];
5424 5425 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5425 5426 objset_t *os;
5426 5427 zilog_t *zilog;
5427 5428 char name[MAXNAMELEN];
5428 5429 int error;
5429 5430
5430 5431 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5431 5432
5432 5433 (void) rw_rdlock(&ztest_name_lock);
5433 5434
5434 5435 error = ztest_dataset_create(name);
5435 5436 if (error == ENOSPC) {
5436 5437 (void) rw_unlock(&ztest_name_lock);
5437 5438 ztest_record_enospc(FTAG);
5438 5439 return (error);
5439 5440 }
5440 5441 ASSERT(error == 0 || error == EEXIST);
5441 5442
5442 5443 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os));
5443 5444 (void) rw_unlock(&ztest_name_lock);
5444 5445
5445 5446 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5446 5447
5447 5448 zilog = zd->zd_zilog;
5448 5449
5449 5450 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5450 5451 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5451 5452 fatal(0, "missing log records: claimed %llu < committed %llu",
5452 5453 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5453 5454
5454 5455 ztest_dataset_dirobj_verify(zd);
5455 5456
5456 5457 zil_replay(os, zd, ztest_replay_vector);
5457 5458
5458 5459 ztest_dataset_dirobj_verify(zd);
5459 5460
5460 5461 if (ztest_opts.zo_verbose >= 6)
5461 5462 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5462 5463 zd->zd_name,
5463 5464 (u_longlong_t)zilog->zl_parse_blk_count,
5464 5465 (u_longlong_t)zilog->zl_parse_lr_count,
5465 5466 (u_longlong_t)zilog->zl_replaying_seq);
5466 5467
5467 5468 zilog = zil_open(os, ztest_get_data);
5468 5469
5469 5470 if (zilog->zl_replaying_seq != 0 &&
5470 5471 zilog->zl_replaying_seq < committed_seq)
5471 5472 fatal(0, "missing log records: replayed %llu < committed %llu",
5472 5473 zilog->zl_replaying_seq, committed_seq);
5473 5474
5474 5475 return (0);
5475 5476 }
5476 5477
5477 5478 static void
5478 5479 ztest_dataset_close(int d)
5479 5480 {
5480 5481 ztest_ds_t *zd = &ztest_ds[d];
5481 5482
5482 5483 zil_close(zd->zd_zilog);
5483 5484 dmu_objset_disown(zd->zd_os, zd);
5484 5485
5485 5486 ztest_zd_fini(zd);
5486 5487 }
5487 5488
5488 5489 /*
5489 5490 * Kick off threads to run tests on all datasets in parallel.
5490 5491 */
5491 5492 static void
5492 5493 ztest_run(ztest_shared_t *zs)
5493 5494 {
5494 5495 thread_t *tid;
5495 5496 spa_t *spa;
5496 5497 objset_t *os;
5497 5498 thread_t resume_tid;
5498 5499 int error;
5499 5500
5500 5501 ztest_exiting = B_FALSE;
5501 5502
5502 5503 /*
5503 5504 * Initialize parent/child shared state.
5504 5505 */
5505 5506 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5506 5507 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5507 5508
5508 5509 zs->zs_thread_start = gethrtime();
5509 5510 zs->zs_thread_stop =
5510 5511 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5511 5512 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5512 5513 zs->zs_thread_kill = zs->zs_thread_stop;
5513 5514 if (ztest_random(100) < ztest_opts.zo_killrate) {
5514 5515 zs->zs_thread_kill -=
5515 5516 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5516 5517 }
5517 5518
5518 5519 (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
5519 5520
5520 5521 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5521 5522 offsetof(ztest_cb_data_t, zcd_node));
5522 5523
5523 5524 /*
5524 5525 * Open our pool.
5525 5526 */
5526 5527 kernel_init(FREAD | FWRITE);
5527 5528 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5528 5529 spa->spa_debug = B_TRUE;
5529 5530 ztest_spa = spa;
5530 5531
5531 5532 VERIFY0(dmu_objset_own(ztest_opts.zo_pool,
5532 5533 DMU_OST_ANY, B_TRUE, FTAG, &os));
5533 5534 zs->zs_guid = dmu_objset_fsid_guid(os);
5534 5535 dmu_objset_disown(os, FTAG);
5535 5536
5536 5537 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5537 5538
5538 5539 /*
5539 5540 * We don't expect the pool to suspend unless maxfaults == 0,
5540 5541 * in which case ztest_fault_inject() temporarily takes away
5541 5542 * the only valid replica.
5542 5543 */
5543 5544 if (MAXFAULTS() == 0)
5544 5545 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5545 5546 else
5546 5547 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5547 5548
5548 5549 /*
5549 5550 * Create a thread to periodically resume suspended I/O.
5550 5551 */
5551 5552 VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5552 5553 &resume_tid) == 0);
5553 5554
5554 5555 /*
5555 5556 * Create a deadman thread to abort() if we hang.
5556 5557 */
5557 5558 VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
5558 5559 NULL) == 0);
5559 5560
5560 5561 /*
5561 5562 * Verify that we can safely inquire about about any object,
5562 5563 * whether it's allocated or not. To make it interesting,
5563 5564 * we probe a 5-wide window around each power of two.
5564 5565 * This hits all edge cases, including zero and the max.
5565 5566 */
5566 5567 for (int t = 0; t < 64; t++) {
5567 5568 for (int d = -5; d <= 5; d++) {
5568 5569 error = dmu_object_info(spa->spa_meta_objset,
5569 5570 (1ULL << t) + d, NULL);
5570 5571 ASSERT(error == 0 || error == ENOENT ||
5571 5572 error == EINVAL);
5572 5573 }
5573 5574 }
5574 5575
5575 5576 /*
5576 5577 * If we got any ENOSPC errors on the previous run, destroy something.
5577 5578 */
5578 5579 if (zs->zs_enospc_count != 0) {
5579 5580 int d = ztest_random(ztest_opts.zo_datasets);
5580 5581 ztest_dataset_destroy(d);
5581 5582 }
5582 5583 zs->zs_enospc_count = 0;
5583 5584
5584 5585 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
5585 5586 UMEM_NOFAIL);
5586 5587
5587 5588 if (ztest_opts.zo_verbose >= 4)
5588 5589 (void) printf("starting main threads...\n");
5589 5590
5590 5591 /*
5591 5592 * Kick off all the tests that run in parallel.
5592 5593 */
5593 5594 for (int t = 0; t < ztest_opts.zo_threads; t++) {
5594 5595 if (t < ztest_opts.zo_datasets &&
5595 5596 ztest_dataset_open(t) != 0)
5596 5597 return;
5597 5598 VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
5598 5599 THR_BOUND, &tid[t]) == 0);
5599 5600 }
5600 5601
5601 5602 /*
5602 5603 * Wait for all of the tests to complete. We go in reverse order
5603 5604 * so we don't close datasets while threads are still using them.
5604 5605 */
5605 5606 for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5606 5607 VERIFY(thr_join(tid[t], NULL, NULL) == 0);
5607 5608 if (t < ztest_opts.zo_datasets)
5608 5609 ztest_dataset_close(t);
5609 5610 }
5610 5611
5611 5612 txg_wait_synced(spa_get_dsl(spa), 0);
5612 5613
5613 5614 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5614 5615 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5615 5616
5616 5617 umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
5617 5618
5618 5619 /* Kill the resume thread */
5619 5620 ztest_exiting = B_TRUE;
5620 5621 VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
5621 5622 ztest_resume(spa);
5622 5623
5623 5624 /*
5624 5625 * Right before closing the pool, kick off a bunch of async I/O;
5625 5626 * spa_close() should wait for it to complete.
5626 5627 */
5627 5628 for (uint64_t object = 1; object < 50; object++)
5628 5629 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5629 5630
5630 5631 spa_close(spa, FTAG);
5631 5632
5632 5633 /*
5633 5634 * Verify that we can loop over all pools.
5634 5635 */
5635 5636 mutex_enter(&spa_namespace_lock);
5636 5637 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5637 5638 if (ztest_opts.zo_verbose > 3)
5638 5639 (void) printf("spa_next: found %s\n", spa_name(spa));
5639 5640 mutex_exit(&spa_namespace_lock);
5640 5641
5641 5642 /*
5642 5643 * Verify that we can export the pool and reimport it under a
5643 5644 * different name.
5644 5645 */
5645 5646 if (ztest_random(2) == 0) {
5646 5647 char name[MAXNAMELEN];
5647 5648 (void) snprintf(name, MAXNAMELEN, "%s_import",
5648 5649 ztest_opts.zo_pool);
5649 5650 ztest_spa_import_export(ztest_opts.zo_pool, name);
5650 5651 ztest_spa_import_export(name, ztest_opts.zo_pool);
5651 5652 }
5652 5653
5653 5654 kernel_fini();
5654 5655
5655 5656 list_destroy(&zcl.zcl_callbacks);
5656 5657
5657 5658 (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
5658 5659
5659 5660 (void) rwlock_destroy(&ztest_name_lock);
5660 5661 (void) _mutex_destroy(&ztest_vdev_lock);
5661 5662 }
5662 5663
5663 5664 static void
5664 5665 ztest_freeze(void)
5665 5666 {
5666 5667 ztest_ds_t *zd = &ztest_ds[0];
5667 5668 spa_t *spa;
5668 5669 int numloops = 0;
5669 5670
5670 5671 if (ztest_opts.zo_verbose >= 3)
5671 5672 (void) printf("testing spa_freeze()...\n");
5672 5673
5673 5674 kernel_init(FREAD | FWRITE);
5674 5675 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5675 5676 VERIFY3U(0, ==, ztest_dataset_open(0));
5676 5677 spa->spa_debug = B_TRUE;
5677 5678 ztest_spa = spa;
5678 5679
5679 5680 /*
5680 5681 * Force the first log block to be transactionally allocated.
5681 5682 * We have to do this before we freeze the pool -- otherwise
5682 5683 * the log chain won't be anchored.
5683 5684 */
5684 5685 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5685 5686 ztest_dmu_object_alloc_free(zd, 0);
5686 5687 zil_commit(zd->zd_zilog, 0);
5687 5688 }
5688 5689
5689 5690 txg_wait_synced(spa_get_dsl(spa), 0);
5690 5691
5691 5692 /*
5692 5693 * Freeze the pool. This stops spa_sync() from doing anything,
5693 5694 * so that the only way to record changes from now on is the ZIL.
5694 5695 */
5695 5696 spa_freeze(spa);
5696 5697
5697 5698 /*
5698 5699 * Run tests that generate log records but don't alter the pool config
5699 5700 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5700 5701 * We do a txg_wait_synced() after each iteration to force the txg
5701 5702 * to increase well beyond the last synced value in the uberblock.
5702 5703 * The ZIL should be OK with that.
5703 5704 */
5704 5705 while (ztest_random(10) != 0 &&
5705 5706 numloops++ < ztest_opts.zo_maxloops) {
5706 5707 ztest_dmu_write_parallel(zd, 0);
5707 5708 ztest_dmu_object_alloc_free(zd, 0);
5708 5709 txg_wait_synced(spa_get_dsl(spa), 0);
5709 5710 }
5710 5711
5711 5712 /*
5712 5713 * Commit all of the changes we just generated.
5713 5714 */
5714 5715 zil_commit(zd->zd_zilog, 0);
5715 5716 txg_wait_synced(spa_get_dsl(spa), 0);
5716 5717
5717 5718 /*
5718 5719 * Close our dataset and close the pool.
5719 5720 */
5720 5721 ztest_dataset_close(0);
5721 5722 spa_close(spa, FTAG);
5722 5723 kernel_fini();
5723 5724
5724 5725 /*
5725 5726 * Open and close the pool and dataset to induce log replay.
5726 5727 */
5727 5728 kernel_init(FREAD | FWRITE);
5728 5729 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5729 5730 ASSERT(spa_freeze_txg(spa) == UINT64_MAX);
5730 5731 VERIFY3U(0, ==, ztest_dataset_open(0));
5731 5732 ztest_dataset_close(0);
5732 5733
5733 5734 spa->spa_debug = B_TRUE;
5734 5735 ztest_spa = spa;
5735 5736 txg_wait_synced(spa_get_dsl(spa), 0);
5736 5737 ztest_reguid(NULL, 0);
5737 5738
5738 5739 spa_close(spa, FTAG);
5739 5740 kernel_fini();
5740 5741 }
5741 5742
5742 5743 void
5743 5744 print_time(hrtime_t t, char *timebuf)
5744 5745 {
5745 5746 hrtime_t s = t / NANOSEC;
5746 5747 hrtime_t m = s / 60;
5747 5748 hrtime_t h = m / 60;
5748 5749 hrtime_t d = h / 24;
5749 5750
5750 5751 s -= m * 60;
5751 5752 m -= h * 60;
5752 5753 h -= d * 24;
5753 5754
5754 5755 timebuf[0] = '\0';
5755 5756
5756 5757 if (d)
5757 5758 (void) sprintf(timebuf,
5758 5759 "%llud%02lluh%02llum%02llus", d, h, m, s);
5759 5760 else if (h)
5760 5761 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5761 5762 else if (m)
5762 5763 (void) sprintf(timebuf, "%llum%02llus", m, s);
5763 5764 else
5764 5765 (void) sprintf(timebuf, "%llus", s);
5765 5766 }
5766 5767
5767 5768 static nvlist_t *
5768 5769 make_random_props()
5769 5770 {
5770 5771 nvlist_t *props;
5771 5772
5772 5773 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5773 5774 if (ztest_random(2) == 0)
5774 5775 return (props);
5775 5776 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5776 5777
5777 5778 return (props);
5778 5779 }
5779 5780
5780 5781 /*
5781 5782 * Create a storage pool with the given name and initial vdev size.
5782 5783 * Then test spa_freeze() functionality.
5783 5784 */
5784 5785 static void
5785 5786 ztest_init(ztest_shared_t *zs)
5786 5787 {
5787 5788 spa_t *spa;
5788 5789 nvlist_t *nvroot, *props;
5789 5790
5790 5791 VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5791 5792 VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
5792 5793
5793 5794 kernel_init(FREAD | FWRITE);
5794 5795
5795 5796 /*
5796 5797 * Create the storage pool.
5797 5798 */
5798 5799 (void) spa_destroy(ztest_opts.zo_pool);
5799 5800 ztest_shared->zs_vdev_next_leaf = 0;
5800 5801 zs->zs_splits = 0;
5801 5802 zs->zs_mirrors = ztest_opts.zo_mirrors;
5802 5803 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
5803 5804 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5804 5805 props = make_random_props();
5805 5806 for (int i = 0; i < SPA_FEATURES; i++) {
5806 5807 char buf[1024];
5807 5808 (void) snprintf(buf, sizeof (buf), "feature@%s",
5808 5809 spa_feature_table[i].fi_uname);
5809 5810 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
5810 5811 }
5811 5812 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL));
5812 5813 nvlist_free(nvroot);
5813 5814
5814 5815 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5815 5816 zs->zs_metaslab_sz =
5816 5817 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
5817 5818
5818 5819 spa_close(spa, FTAG);
5819 5820
5820 5821 kernel_fini();
5821 5822
5822 5823 ztest_run_zdb(ztest_opts.zo_pool);
5823 5824
5824 5825 ztest_freeze();
5825 5826
5826 5827 ztest_run_zdb(ztest_opts.zo_pool);
5827 5828
5828 5829 (void) rwlock_destroy(&ztest_name_lock);
5829 5830 (void) _mutex_destroy(&ztest_vdev_lock);
5830 5831 }
5831 5832
5832 5833 static void
5833 5834 setup_data_fd(void)
5834 5835 {
5835 5836 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
5836 5837
5837 5838 ztest_fd_data = mkstemp(ztest_name_data);
5838 5839 ASSERT3S(ztest_fd_data, >=, 0);
5839 5840 (void) unlink(ztest_name_data);
5840 5841 }
5841 5842
5842 5843
5843 5844 static int
5844 5845 shared_data_size(ztest_shared_hdr_t *hdr)
5845 5846 {
5846 5847 int size;
5847 5848
5848 5849 size = hdr->zh_hdr_size;
5849 5850 size += hdr->zh_opts_size;
5850 5851 size += hdr->zh_size;
5851 5852 size += hdr->zh_stats_size * hdr->zh_stats_count;
5852 5853 size += hdr->zh_ds_size * hdr->zh_ds_count;
5853 5854
5854 5855 return (size);
5855 5856 }
5856 5857
5857 5858 static void
5858 5859 setup_hdr(void)
5859 5860 {
5860 5861 int size;
5861 5862 ztest_shared_hdr_t *hdr;
5862 5863
5863 5864 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5864 5865 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5865 5866 ASSERT(hdr != MAP_FAILED);
5866 5867
5867 5868 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
5868 5869
5869 5870 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
5870 5871 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
5871 5872 hdr->zh_size = sizeof (ztest_shared_t);
5872 5873 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
5873 5874 hdr->zh_stats_count = ZTEST_FUNCS;
5874 5875 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
5875 5876 hdr->zh_ds_count = ztest_opts.zo_datasets;
5876 5877
5877 5878 size = shared_data_size(hdr);
5878 5879 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
5879 5880
5880 5881 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5881 5882 }
5882 5883
5883 5884 static void
5884 5885 setup_data(void)
5885 5886 {
5886 5887 int size, offset;
5887 5888 ztest_shared_hdr_t *hdr;
5888 5889 uint8_t *buf;
5889 5890
5890 5891 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
5891 5892 PROT_READ, MAP_SHARED, ztest_fd_data, 0);
5892 5893 ASSERT(hdr != MAP_FAILED);
5893 5894
5894 5895 size = shared_data_size(hdr);
5895 5896
5896 5897 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
5897 5898 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
5898 5899 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
5899 5900 ASSERT(hdr != MAP_FAILED);
5900 5901 buf = (uint8_t *)hdr;
5901 5902
5902 5903 offset = hdr->zh_hdr_size;
5903 5904 ztest_shared_opts = (void *)&buf[offset];
5904 5905 offset += hdr->zh_opts_size;
5905 5906 ztest_shared = (void *)&buf[offset];
5906 5907 offset += hdr->zh_size;
5907 5908 ztest_shared_callstate = (void *)&buf[offset];
5908 5909 offset += hdr->zh_stats_size * hdr->zh_stats_count;
5909 5910 ztest_shared_ds = (void *)&buf[offset];
5910 5911 }
5911 5912
5912 5913 static boolean_t
5913 5914 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
5914 5915 {
5915 5916 pid_t pid;
5916 5917 int status;
5917 5918 char *cmdbuf = NULL;
5918 5919
5919 5920 pid = fork();
5920 5921
5921 5922 if (cmd == NULL) {
5922 5923 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
5923 5924 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
5924 5925 cmd = cmdbuf;
5925 5926 }
5926 5927
5927 5928 if (pid == -1)
5928 5929 fatal(1, "fork failed");
5929 5930
5930 5931 if (pid == 0) { /* child */
5931 5932 char *emptyargv[2] = { cmd, NULL };
5932 5933 char fd_data_str[12];
5933 5934
5934 5935 struct rlimit rl = { 1024, 1024 };
5935 5936 (void) setrlimit(RLIMIT_NOFILE, &rl);
5936 5937
5937 5938 (void) close(ztest_fd_rand);
5938 5939 VERIFY3U(11, >=,
5939 5940 snprintf(fd_data_str, 12, "%d", ztest_fd_data));
5940 5941 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
5941 5942
5942 5943 (void) enable_extended_FILE_stdio(-1, -1);
5943 5944 if (libpath != NULL)
5944 5945 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
5945 5946 (void) execv(cmd, emptyargv);
5946 5947 ztest_dump_core = B_FALSE;
5947 5948 fatal(B_TRUE, "exec failed: %s", cmd);
5948 5949 }
5949 5950
5950 5951 if (cmdbuf != NULL) {
5951 5952 umem_free(cmdbuf, MAXPATHLEN);
5952 5953 cmd = NULL;
5953 5954 }
5954 5955
5955 5956 while (waitpid(pid, &status, 0) != pid)
5956 5957 continue;
5957 5958 if (statusp != NULL)
5958 5959 *statusp = status;
5959 5960
5960 5961 if (WIFEXITED(status)) {
5961 5962 if (WEXITSTATUS(status) != 0) {
5962 5963 (void) fprintf(stderr, "child exited with code %d\n",
5963 5964 WEXITSTATUS(status));
5964 5965 exit(2);
5965 5966 }
5966 5967 return (B_FALSE);
5967 5968 } else if (WIFSIGNALED(status)) {
5968 5969 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
5969 5970 (void) fprintf(stderr, "child died with signal %d\n",
5970 5971 WTERMSIG(status));
5971 5972 exit(3);
5972 5973 }
5973 5974 return (B_TRUE);
5974 5975 } else {
5975 5976 (void) fprintf(stderr, "something strange happened to child\n");
5976 5977 exit(4);
5977 5978 /* NOTREACHED */
5978 5979 }
5979 5980 }
5980 5981
5981 5982 static void
5982 5983 ztest_run_init(void)
5983 5984 {
5984 5985 ztest_shared_t *zs = ztest_shared;
5985 5986
5986 5987 ASSERT(ztest_opts.zo_init != 0);
5987 5988
5988 5989 /*
5989 5990 * Blow away any existing copy of zpool.cache
5990 5991 */
5991 5992 (void) remove(spa_config_path);
5992 5993
5993 5994 /*
5994 5995 * Create and initialize our storage pool.
5995 5996 */
5996 5997 for (int i = 1; i <= ztest_opts.zo_init; i++) {
5997 5998 bzero(zs, sizeof (ztest_shared_t));
5998 5999 if (ztest_opts.zo_verbose >= 3 &&
5999 6000 ztest_opts.zo_init != 1) {
6000 6001 (void) printf("ztest_init(), pass %d\n", i);
6001 6002 }
6002 6003 ztest_init(zs);
6003 6004 }
6004 6005 }
6005 6006
6006 6007 int
6007 6008 main(int argc, char **argv)
6008 6009 {
6009 6010 int kills = 0;
6010 6011 int iters = 0;
6011 6012 int older = 0;
6012 6013 int newer = 0;
6013 6014 ztest_shared_t *zs;
6014 6015 ztest_info_t *zi;
6015 6016 ztest_shared_callstate_t *zc;
6016 6017 char timebuf[100];
6017 6018 char numbuf[6];
6018 6019 spa_t *spa;
6019 6020 char *cmd;
6020 6021 boolean_t hasalt;
6021 6022 char *fd_data_str = getenv("ZTEST_FD_DATA");
6022 6023
6023 6024 (void) setvbuf(stdout, NULL, _IOLBF, 0);
6024 6025
6025 6026 dprintf_setup(&argc, argv);
6026 6027
6027 6028 ztest_fd_rand = open("/dev/urandom", O_RDONLY);
6028 6029 ASSERT3S(ztest_fd_rand, >=, 0);
6029 6030
6030 6031 if (!fd_data_str) {
6031 6032 process_options(argc, argv);
6032 6033
6033 6034 setup_data_fd();
6034 6035 setup_hdr();
6035 6036 setup_data();
6036 6037 bcopy(&ztest_opts, ztest_shared_opts,
6037 6038 sizeof (*ztest_shared_opts));
6038 6039 } else {
6039 6040 ztest_fd_data = atoi(fd_data_str);
6040 6041 setup_data();
6041 6042 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
6042 6043 }
6043 6044 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
6044 6045
6045 6046 /* Override location of zpool.cache */
6046 6047 VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache",
6047 6048 ztest_opts.zo_dir), !=, -1);
6048 6049
6049 6050 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
6050 6051 UMEM_NOFAIL);
6051 6052 zs = ztest_shared;
6052 6053
6053 6054 if (fd_data_str) {
6054 6055 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
6055 6056 metaslab_df_alloc_threshold =
6056 6057 zs->zs_metaslab_df_alloc_threshold;
6057 6058
6058 6059 if (zs->zs_do_init)
6059 6060 ztest_run_init();
6060 6061 else
6061 6062 ztest_run(zs);
6062 6063 exit(0);
6063 6064 }
6064 6065
6065 6066 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
6066 6067
6067 6068 if (ztest_opts.zo_verbose >= 1) {
6068 6069 (void) printf("%llu vdevs, %d datasets, %d threads,"
6069 6070 " %llu seconds...\n",
6070 6071 (u_longlong_t)ztest_opts.zo_vdevs,
6071 6072 ztest_opts.zo_datasets,
6072 6073 ztest_opts.zo_threads,
6073 6074 (u_longlong_t)ztest_opts.zo_time);
6074 6075 }
6075 6076
6076 6077 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
6077 6078 (void) strlcpy(cmd, getexecname(), MAXNAMELEN);
6078 6079
6079 6080 zs->zs_do_init = B_TRUE;
6080 6081 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
6081 6082 if (ztest_opts.zo_verbose >= 1) {
6082 6083 (void) printf("Executing older ztest for "
6083 6084 "initialization: %s\n", ztest_opts.zo_alt_ztest);
6084 6085 }
6085 6086 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
6086 6087 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
6087 6088 } else {
6088 6089 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
6089 6090 }
6090 6091 zs->zs_do_init = B_FALSE;
6091 6092
6092 6093 zs->zs_proc_start = gethrtime();
6093 6094 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
6094 6095
6095 6096 for (int f = 0; f < ZTEST_FUNCS; f++) {
6096 6097 zi = &ztest_info[f];
6097 6098 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6098 6099 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
6099 6100 zc->zc_next = UINT64_MAX;
6100 6101 else
6101 6102 zc->zc_next = zs->zs_proc_start +
6102 6103 ztest_random(2 * zi->zi_interval[0] + 1);
6103 6104 }
6104 6105
6105 6106 /*
6106 6107 * Run the tests in a loop. These tests include fault injection
6107 6108 * to verify that self-healing data works, and forced crashes
6108 6109 * to verify that we never lose on-disk consistency.
6109 6110 */
6110 6111 while (gethrtime() < zs->zs_proc_stop) {
6111 6112 int status;
6112 6113 boolean_t killed;
6113 6114
6114 6115 /*
6115 6116 * Initialize the workload counters for each function.
6116 6117 */
6117 6118 for (int f = 0; f < ZTEST_FUNCS; f++) {
6118 6119 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6119 6120 zc->zc_count = 0;
6120 6121 zc->zc_time = 0;
6121 6122 }
6122 6123
6123 6124 /* Set the allocation switch size */
6124 6125 zs->zs_metaslab_df_alloc_threshold =
6125 6126 ztest_random(zs->zs_metaslab_sz / 4) + 1;
6126 6127
6127 6128 if (!hasalt || ztest_random(2) == 0) {
6128 6129 if (hasalt && ztest_opts.zo_verbose >= 1) {
6129 6130 (void) printf("Executing newer ztest: %s\n",
6130 6131 cmd);
6131 6132 }
6132 6133 newer++;
6133 6134 killed = exec_child(cmd, NULL, B_TRUE, &status);
6134 6135 } else {
6135 6136 if (hasalt && ztest_opts.zo_verbose >= 1) {
6136 6137 (void) printf("Executing older ztest: %s\n",
6137 6138 ztest_opts.zo_alt_ztest);
6138 6139 }
6139 6140 older++;
6140 6141 killed = exec_child(ztest_opts.zo_alt_ztest,
6141 6142 ztest_opts.zo_alt_libpath, B_TRUE, &status);
6142 6143 }
6143 6144
6144 6145 if (killed)
6145 6146 kills++;
6146 6147 iters++;
6147 6148
6148 6149 if (ztest_opts.zo_verbose >= 1) {
6149 6150 hrtime_t now = gethrtime();
6150 6151
6151 6152 now = MIN(now, zs->zs_proc_stop);
6152 6153 print_time(zs->zs_proc_stop - now, timebuf);
6153 6154 nicenum(zs->zs_space, numbuf);
6154 6155
6155 6156 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
6156 6157 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
6157 6158 iters,
6158 6159 WIFEXITED(status) ? "Complete" : "SIGKILL",
6159 6160 (u_longlong_t)zs->zs_enospc_count,
6160 6161 100.0 * zs->zs_alloc / zs->zs_space,
6161 6162 numbuf,
6162 6163 100.0 * (now - zs->zs_proc_start) /
6163 6164 (ztest_opts.zo_time * NANOSEC), timebuf);
6164 6165 }
6165 6166
6166 6167 if (ztest_opts.zo_verbose >= 2) {
6167 6168 (void) printf("\nWorkload summary:\n\n");
6168 6169 (void) printf("%7s %9s %s\n",
6169 6170 "Calls", "Time", "Function");
6170 6171 (void) printf("%7s %9s %s\n",
6171 6172 "-----", "----", "--------");
6172 6173 for (int f = 0; f < ZTEST_FUNCS; f++) {
6173 6174 Dl_info dli;
6174 6175
6175 6176 zi = &ztest_info[f];
6176 6177 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6177 6178 print_time(zc->zc_time, timebuf);
6178 6179 (void) dladdr((void *)zi->zi_func, &dli);
6179 6180 (void) printf("%7llu %9s %s\n",
6180 6181 (u_longlong_t)zc->zc_count, timebuf,
6181 6182 dli.dli_sname);
6182 6183 }
6183 6184 (void) printf("\n");
6184 6185 }
6185 6186
6186 6187 /*
6187 6188 * It's possible that we killed a child during a rename test,
6188 6189 * in which case we'll have a 'ztest_tmp' pool lying around
6189 6190 * instead of 'ztest'. Do a blind rename in case this happened.
6190 6191 */
6191 6192 kernel_init(FREAD);
6192 6193 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
6193 6194 spa_close(spa, FTAG);
6194 6195 } else {
6195 6196 char tmpname[MAXNAMELEN];
6196 6197 kernel_fini();
6197 6198 kernel_init(FREAD | FWRITE);
6198 6199 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
6199 6200 ztest_opts.zo_pool);
6200 6201 (void) spa_rename(tmpname, ztest_opts.zo_pool);
6201 6202 }
6202 6203 kernel_fini();
6203 6204
6204 6205 ztest_run_zdb(ztest_opts.zo_pool);
6205 6206 }
6206 6207
6207 6208 if (ztest_opts.zo_verbose >= 1) {
6208 6209 if (hasalt) {
6209 6210 (void) printf("%d runs of older ztest: %s\n", older,
6210 6211 ztest_opts.zo_alt_ztest);
6211 6212 (void) printf("%d runs of newer ztest: %s\n", newer,
6212 6213 cmd);
6213 6214 }
6214 6215 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6215 6216 kills, iters - kills, (100.0 * kills) / MAX(1, iters));
6216 6217 }
6217 6218
6218 6219 umem_free(cmd, MAXNAMELEN);
6219 6220
6220 6221 return (0);
6221 6222 }
↓ open down ↓ |
1497 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX