Print this page
8115 parallel zfs mount

Split Close
Expand all
Collapse all
          --- old/usr/src/cmd/ztest/ztest.c
          +++ new/usr/src/cmd/ztest/ztest.c
↓ open down ↓ 17 lines elided ↑ open up ↑
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
  24   24   * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  25   25   * Copyright (c) 2013 Steven Hartland. All rights reserved.
  26   26   * Copyright (c) 2014 Integros [integros.com]
  27   27   * Copyright 2017 Joyent, Inc.
       28 + * Copyright 2017 RackTop Systems.
  28   29   */
  29   30  
  30   31  /*
  31   32   * The objective of this program is to provide a DMU/ZAP/SPA stress test
  32   33   * that runs entirely in userland, is easy to use, and easy to extend.
  33   34   *
  34   35   * The overall design of the ztest program is as follows:
  35   36   *
  36   37   * (1) For each major functional area (e.g. adding vdevs to a pool,
  37   38   *     creating and destroying datasets, reading and writing objects, etc)
↓ open down ↓ 200 lines elided ↑ open up ↑
 238  239   */
 239  240  typedef enum {
 240  241          RL_READER,
 241  242          RL_WRITER,
 242  243          RL_APPEND
 243  244  } rl_type_t;
 244  245  
 245  246  typedef struct rll {
 246  247          void            *rll_writer;
 247  248          int             rll_readers;
 248      -        mutex_t         rll_lock;
 249      -        cond_t          rll_cv;
      249 +        kmutex_t        rll_lock;
      250 +        kcondvar_t      rll_cv;
 250  251  } rll_t;
 251  252  
 252  253  typedef struct rl {
 253  254          uint64_t        rl_object;
 254  255          uint64_t        rl_offset;
 255  256          uint64_t        rl_size;
 256  257          rll_t           *rl_lock;
 257  258  } rl_t;
 258  259  
 259  260  #define ZTEST_RANGE_LOCKS       64
↓ open down ↓ 13 lines elided ↑ open up ↑
 273  274          uint64_t        od_crgen;
 274  275          char            od_name[ZFS_MAX_DATASET_NAME_LEN];
 275  276  } ztest_od_t;
 276  277  
 277  278  /*
 278  279   * Per-dataset state.
 279  280   */
 280  281  typedef struct ztest_ds {
 281  282          ztest_shared_ds_t *zd_shared;
 282  283          objset_t        *zd_os;
 283      -        rwlock_t        zd_zilog_lock;
      284 +        krwlock_t       zd_zilog_lock;
 284  285          zilog_t         *zd_zilog;
 285  286          ztest_od_t      *zd_od;         /* debugging aid */
 286  287          char            zd_name[ZFS_MAX_DATASET_NAME_LEN];
 287      -        mutex_t         zd_dirobj_lock;
      288 +        kmutex_t        zd_dirobj_lock;
 288  289          rll_t           zd_object_lock[ZTEST_OBJECT_LOCKS];
 289  290          rll_t           zd_range_lock[ZTEST_RANGE_LOCKS];
 290  291  } ztest_ds_t;
 291  292  
 292  293  /*
 293  294   * Per-iteration state.
 294  295   */
 295  296  typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
 296  297  
 297  298  typedef struct ztest_info {
↓ open down ↓ 86 lines elided ↑ open up ↑
 384  385              &ztest_opts.zo_vdevtime                             },
 385  386  };
 386  387  
 387  388  #define ZTEST_FUNCS     (sizeof (ztest_info) / sizeof (ztest_info_t))
 388  389  
 389  390  /*
 390  391   * The following struct is used to hold a list of uncalled commit callbacks.
 391  392   * The callbacks are ordered by txg number.
 392  393   */
 393  394  typedef struct ztest_cb_list {
 394      -        mutex_t zcl_callbacks_lock;
      395 +        kmutex_t zcl_callbacks_lock;
 395  396          list_t  zcl_callbacks;
 396  397  } ztest_cb_list_t;
 397  398  
 398  399  /*
 399  400   * Stuff we need to share writably between parent and child.
 400  401   */
 401  402  typedef struct ztest_shared {
 402  403          boolean_t       zs_do_init;
 403  404          hrtime_t        zs_proc_start;
 404  405          hrtime_t        zs_proc_stop;
↓ open down ↓ 14 lines elided ↑ open up ↑
 419  420  
 420  421  #define ID_PARALLEL     -1ULL
 421  422  
 422  423  static char ztest_dev_template[] = "%s/%s.%llua";
 423  424  static char ztest_aux_template[] = "%s/%s.%s.%llu";
 424  425  ztest_shared_t *ztest_shared;
 425  426  
 426  427  static spa_t *ztest_spa = NULL;
 427  428  static ztest_ds_t *ztest_ds;
 428  429  
 429      -static mutex_t ztest_vdev_lock;
      430 +static kmutex_t ztest_vdev_lock;
 430  431  
 431  432  /*
 432  433   * The ztest_name_lock protects the pool and dataset namespace used by
 433  434   * the individual tests. To modify the namespace, consumers must grab
 434  435   * this lock as writer. Grabbing the lock as reader will ensure that the
 435  436   * namespace does not change while the lock is held.
 436  437   */
 437      -static rwlock_t ztest_name_lock;
      438 +static krwlock_t ztest_name_lock;
 438  439  
 439  440  static boolean_t ztest_dump_core = B_TRUE;
 440  441  static boolean_t ztest_exiting;
 441  442  
 442  443  /* Global commit callback list */
 443  444  static ztest_cb_list_t zcl;
 444  445  
 445  446  enum ztest_object {
 446  447          ZTEST_META_DNODE = 0,
 447  448          ZTEST_DIROBJ,
↓ open down ↓ 635 lines elided ↑ open up ↑
1083 1084          ASSERT0(error);
1084 1085  
1085 1086          return (error);
1086 1087  }
1087 1088  
1088 1089  static void
1089 1090  ztest_rll_init(rll_t *rll)
1090 1091  {
1091 1092          rll->rll_writer = NULL;
1092 1093          rll->rll_readers = 0;
1093      -        VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
1094      -        VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
     1094 +        mutex_init(&rll->rll_lock, NULL, USYNC_THREAD, NULL);
     1095 +        cv_init(&rll->rll_cv, NULL, USYNC_THREAD, NULL);
1095 1096  }
1096 1097  
1097 1098  static void
1098 1099  ztest_rll_destroy(rll_t *rll)
1099 1100  {
1100 1101          ASSERT(rll->rll_writer == NULL);
1101 1102          ASSERT(rll->rll_readers == 0);
1102      -        VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
1103      -        VERIFY(cond_destroy(&rll->rll_cv) == 0);
     1103 +        mutex_destroy(&rll->rll_lock);
     1104 +        cv_destroy(&rll->rll_cv);
1104 1105  }
1105 1106  
1106 1107  static void
1107 1108  ztest_rll_lock(rll_t *rll, rl_type_t type)
1108 1109  {
1109      -        VERIFY(mutex_lock(&rll->rll_lock) == 0);
     1110 +        mutex_enter(&rll->rll_lock);
1110 1111  
1111 1112          if (type == RL_READER) {
1112 1113                  while (rll->rll_writer != NULL)
1113      -                        (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
     1114 +                        cv_wait(&rll->rll_cv, &rll->rll_lock);
1114 1115                  rll->rll_readers++;
1115 1116          } else {
1116 1117                  while (rll->rll_writer != NULL || rll->rll_readers)
1117      -                        (void) cond_wait(&rll->rll_cv, &rll->rll_lock);
     1118 +                        cv_wait(&rll->rll_cv, &rll->rll_lock);
1118 1119                  rll->rll_writer = curthread;
1119 1120          }
1120 1121  
1121      -        VERIFY(mutex_unlock(&rll->rll_lock) == 0);
     1122 +        mutex_exit(&rll->rll_lock);
1122 1123  }
1123 1124  
1124 1125  static void
1125 1126  ztest_rll_unlock(rll_t *rll)
1126 1127  {
1127      -        VERIFY(mutex_lock(&rll->rll_lock) == 0);
     1128 +        mutex_enter(&rll->rll_lock);
1128 1129  
1129 1130          if (rll->rll_writer) {
1130 1131                  ASSERT(rll->rll_readers == 0);
1131 1132                  rll->rll_writer = NULL;
1132 1133          } else {
1133 1134                  ASSERT(rll->rll_readers != 0);
1134 1135                  ASSERT(rll->rll_writer == NULL);
1135 1136                  rll->rll_readers--;
1136 1137          }
1137 1138  
1138 1139          if (rll->rll_writer == NULL && rll->rll_readers == 0)
1139      -                VERIFY(cond_broadcast(&rll->rll_cv) == 0);
     1140 +                cv_broadcast(&rll->rll_cv);
1140 1141  
1141      -        VERIFY(mutex_unlock(&rll->rll_lock) == 0);
     1142 +        mutex_exit(&rll->rll_lock);
1142 1143  }
1143 1144  
1144 1145  static void
1145 1146  ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1146 1147  {
1147 1148          rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1148 1149  
1149 1150          ztest_rll_lock(rll, type);
1150 1151  }
1151 1152  
↓ open down ↓ 38 lines elided ↑ open up ↑
1190 1191  ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1191 1192  {
1192 1193          zd->zd_os = os;
1193 1194          zd->zd_zilog = dmu_objset_zil(os);
1194 1195          zd->zd_shared = szd;
1195 1196          dmu_objset_name(os, zd->zd_name);
1196 1197  
1197 1198          if (zd->zd_shared != NULL)
1198 1199                  zd->zd_shared->zd_seq = 0;
1199 1200  
1200      -        VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
1201      -        VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
     1201 +        rw_init(&zd->zd_zilog_lock, NULL, USYNC_THREAD, NULL);
     1202 +        mutex_init(&zd->zd_dirobj_lock, NULL, USYNC_THREAD, NULL);
1202 1203  
1203 1204          for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1204 1205                  ztest_rll_init(&zd->zd_object_lock[l]);
1205 1206  
1206 1207          for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1207 1208                  ztest_rll_init(&zd->zd_range_lock[l]);
1208 1209  }
1209 1210  
1210 1211  static void
1211 1212  ztest_zd_fini(ztest_ds_t *zd)
1212 1213  {
1213      -        VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
     1214 +        mutex_destroy(&zd->zd_dirobj_lock);
1214 1215  
1215 1216          for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1216 1217                  ztest_rll_destroy(&zd->zd_object_lock[l]);
1217 1218  
1218 1219          for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
1219 1220                  ztest_rll_destroy(&zd->zd_range_lock[l]);
1220 1221  }
1221 1222  
1222 1223  #define TXG_MIGHTWAIT   (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1223 1224  
↓ open down ↓ 734 lines elided ↑ open up ↑
1958 1959  
1959 1960  /*
1960 1961   * Lookup a bunch of objects.  Returns the number of objects not found.
1961 1962   */
1962 1963  static int
1963 1964  ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1964 1965  {
1965 1966          int missing = 0;
1966 1967          int error;
1967 1968  
1968      -        ASSERT(_mutex_held(&zd->zd_dirobj_lock));
     1969 +        ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
1969 1970  
1970 1971          for (int i = 0; i < count; i++, od++) {
1971 1972                  od->od_object = 0;
1972 1973                  error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1973 1974                      sizeof (uint64_t), 1, &od->od_object);
1974 1975                  if (error) {
1975 1976                          ASSERT(error == ENOENT);
1976 1977                          ASSERT(od->od_object == 0);
1977 1978                          missing++;
1978 1979                  } else {
↓ open down ↓ 19 lines elided ↑ open up ↑
1998 1999          }
1999 2000  
2000 2001          return (missing);
2001 2002  }
2002 2003  
2003 2004  static int
2004 2005  ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
2005 2006  {
2006 2007          int missing = 0;
2007 2008  
2008      -        ASSERT(_mutex_held(&zd->zd_dirobj_lock));
     2009 +        ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
2009 2010  
2010 2011          for (int i = 0; i < count; i++, od++) {
2011 2012                  if (missing) {
2012 2013                          od->od_object = 0;
2013 2014                          missing++;
2014 2015                          continue;
2015 2016                  }
2016 2017  
2017 2018                  lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
2018 2019  
↓ open down ↓ 24 lines elided ↑ open up ↑
2043 2044  
2044 2045          return (missing);
2045 2046  }
2046 2047  
2047 2048  static int
2048 2049  ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
2049 2050  {
2050 2051          int missing = 0;
2051 2052          int error;
2052 2053  
2053      -        ASSERT(_mutex_held(&zd->zd_dirobj_lock));
     2054 +        ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
2054 2055  
2055 2056          od += count - 1;
2056 2057  
2057 2058          for (int i = count - 1; i >= 0; i--, od--) {
2058 2059                  if (missing) {
2059 2060                          missing++;
2060 2061                          continue;
2061 2062                  }
2062 2063  
2063 2064                  /*
↓ open down ↓ 125 lines elided ↑ open up ↑
2189 2190          blocksize = doi.doi_data_block_size;
2190 2191          data = umem_alloc(blocksize, UMEM_NOFAIL);
2191 2192  
2192 2193          /*
2193 2194           * Pick an i/o type at random, biased toward writing block tags.
2194 2195           */
2195 2196          io_type = ztest_random(ZTEST_IO_TYPES);
2196 2197          if (ztest_random(2) == 0)
2197 2198                  io_type = ZTEST_IO_WRITE_TAG;
2198 2199  
2199      -        (void) rw_rdlock(&zd->zd_zilog_lock);
     2200 +        rw_enter(&zd->zd_zilog_lock, RW_READER);
2200 2201  
2201 2202          switch (io_type) {
2202 2203  
2203 2204          case ZTEST_IO_WRITE_TAG:
2204 2205                  ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2205 2206                  (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2206 2207                  break;
2207 2208  
2208 2209          case ZTEST_IO_WRITE_PATTERN:
2209 2210                  (void) memset(data, 'a' + (object + offset) % 5, blocksize);
↓ open down ↓ 16 lines elided ↑ open up ↑
2226 2227  
2227 2228          case ZTEST_IO_TRUNCATE:
2228 2229                  (void) ztest_truncate(zd, object, offset, blocksize);
2229 2230                  break;
2230 2231  
2231 2232          case ZTEST_IO_SETATTR:
2232 2233                  (void) ztest_setattr(zd, object);
2233 2234                  break;
2234 2235  
2235 2236          case ZTEST_IO_REWRITE:
2236      -                (void) rw_rdlock(&ztest_name_lock);
     2237 +                rw_enter(&ztest_name_lock, RW_READER);
2237 2238                  err = ztest_dsl_prop_set_uint64(zd->zd_name,
2238 2239                      ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
2239 2240                      B_FALSE);
2240 2241                  VERIFY(err == 0 || err == ENOSPC);
2241 2242                  err = ztest_dsl_prop_set_uint64(zd->zd_name,
2242 2243                      ZFS_PROP_COMPRESSION,
2243 2244                      ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
2244 2245                      B_FALSE);
2245 2246                  VERIFY(err == 0 || err == ENOSPC);
2246      -                (void) rw_unlock(&ztest_name_lock);
     2247 +                rw_exit(&ztest_name_lock);
2247 2248  
2248 2249                  VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
2249 2250                      DMU_READ_NO_PREFETCH));
2250 2251  
2251 2252                  (void) ztest_write(zd, object, offset, blocksize, data);
2252 2253                  break;
2253 2254          }
2254 2255  
2255      -        (void) rw_unlock(&zd->zd_zilog_lock);
     2256 +        rw_exit(&zd->zd_zilog_lock);
2256 2257  
2257 2258          umem_free(data, blocksize);
2258 2259  }
2259 2260  
2260 2261  /*
2261 2262   * Initialize an object description template.
2262 2263   */
2263 2264  static void
2264 2265  ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2265 2266      dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
↓ open down ↓ 18 lines elided ↑ open up ↑
2284 2285   * If the objects do not all exist, or if 'remove' is specified,
2285 2286   * remove any existing objects and create new ones.  Otherwise,
2286 2287   * use the existing objects.
2287 2288   */
2288 2289  static int
2289 2290  ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2290 2291  {
2291 2292          int count = size / sizeof (*od);
2292 2293          int rv = 0;
2293 2294  
2294      -        VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
     2295 +        mutex_enter(&zd->zd_dirobj_lock);
2295 2296          if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2296 2297              (ztest_remove(zd, od, count) != 0 ||
2297 2298              ztest_create(zd, od, count) != 0))
2298 2299                  rv = -1;
2299 2300          zd->zd_od = od;
2300      -        VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
     2301 +        mutex_exit(&zd->zd_dirobj_lock);
2301 2302  
2302 2303          return (rv);
2303 2304  }
2304 2305  
2305 2306  /* ARGSUSED */
2306 2307  void
2307 2308  ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2308 2309  {
2309 2310          zilog_t *zilog = zd->zd_zilog;
2310 2311  
2311      -        (void) rw_rdlock(&zd->zd_zilog_lock);
     2312 +        rw_enter(&zd->zd_zilog_lock, RW_READER);
2312 2313  
2313 2314          zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2314 2315  
2315 2316          /*
2316 2317           * Remember the committed values in zd, which is in parent/child
2317 2318           * shared memory.  If we die, the next iteration of ztest_run()
2318 2319           * will verify that the log really does contain this record.
2319 2320           */
2320 2321          mutex_enter(&zilog->zl_lock);
2321 2322          ASSERT(zd->zd_shared != NULL);
2322 2323          ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2323 2324          zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2324 2325          mutex_exit(&zilog->zl_lock);
2325 2326  
2326      -        (void) rw_unlock(&zd->zd_zilog_lock);
     2327 +        rw_exit(&zd->zd_zilog_lock);
2327 2328  }
2328 2329  
2329 2330  /*
2330 2331   * This function is designed to simulate the operations that occur during a
2331 2332   * mount/unmount operation.  We hold the dataset across these operations in an
2332 2333   * attempt to expose any implicit assumptions about ZIL management.
2333 2334   */
2334 2335  /* ARGSUSED */
2335 2336  void
2336 2337  ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2337 2338  {
2338 2339          objset_t *os = zd->zd_os;
2339 2340  
2340 2341          /*
2341 2342           * We grab the zd_dirobj_lock to ensure that no other thread is
2342 2343           * updating the zil (i.e. adding in-memory log records) and the
2343 2344           * zd_zilog_lock to block any I/O.
2344 2345           */
2345      -        VERIFY0(mutex_lock(&zd->zd_dirobj_lock));
2346      -        (void) rw_wrlock(&zd->zd_zilog_lock);
     2346 +        mutex_enter(&zd->zd_dirobj_lock);
     2347 +        rw_enter(&zd->zd_zilog_lock, RW_WRITER);
2347 2348  
2348 2349          /* zfsvfs_teardown() */
2349 2350          zil_close(zd->zd_zilog);
2350 2351  
2351 2352          /* zfsvfs_setup() */
2352 2353          VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2353 2354          zil_replay(os, zd, ztest_replay_vector);
2354 2355  
2355      -        (void) rw_unlock(&zd->zd_zilog_lock);
2356      -        VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
     2356 +        rw_exit(&zd->zd_zilog_lock);
     2357 +        mutex_exit(&zd->zd_dirobj_lock);
2357 2358  }
2358 2359  
2359 2360  /*
2360 2361   * Verify that we can't destroy an active pool, create an existing pool,
2361 2362   * or create a pool with a bad vdev spec.
2362 2363   */
2363 2364  /* ARGSUSED */
2364 2365  void
2365 2366  ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2366 2367  {
↓ open down ↓ 14 lines elided ↑ open up ↑
2381 2382           */
2382 2383          nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1);
2383 2384          VERIFY3U(ENOENT, ==,
2384 2385              spa_create("ztest_bad_mirror", nvroot, NULL, NULL));
2385 2386          nvlist_free(nvroot);
2386 2387  
2387 2388          /*
2388 2389           * Attempt to create an existing pool.  It shouldn't matter
2389 2390           * what's in the nvroot; we should fail with EEXIST.
2390 2391           */
2391      -        (void) rw_rdlock(&ztest_name_lock);
     2392 +        rw_enter(&ztest_name_lock, RW_READER);
2392 2393          nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2393 2394          VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
2394 2395          nvlist_free(nvroot);
2395 2396          VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2396 2397          VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2397 2398          spa_close(spa, FTAG);
2398 2399  
2399      -        (void) rw_unlock(&ztest_name_lock);
     2400 +        rw_exit(&ztest_name_lock);
2400 2401  }
2401 2402  
2402 2403  /* ARGSUSED */
2403 2404  void
2404 2405  ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
2405 2406  {
2406 2407          spa_t *spa;
2407 2408          uint64_t initial_version = SPA_VERSION_INITIAL;
2408 2409          uint64_t version, newversion;
2409 2410          nvlist_t *nvroot, *props;
2410 2411          char *name;
2411 2412  
2412      -        VERIFY0(mutex_lock(&ztest_vdev_lock));
     2413 +        mutex_enter(&ztest_vdev_lock);
2413 2414          name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
2414 2415  
2415 2416          /*
2416 2417           * Clean up from previous runs.
2417 2418           */
2418 2419          (void) spa_destroy(name);
2419 2420  
2420 2421          nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
2421 2422              0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
2422 2423  
↓ open down ↓ 38 lines elided ↑ open up ↑
2461 2462                      (u_longlong_t)version, (u_longlong_t)newversion);
2462 2463          }
2463 2464  
2464 2465          spa_upgrade(spa, newversion);
2465 2466          VERIFY3U(spa_version(spa), >, version);
2466 2467          VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
2467 2468              zpool_prop_to_name(ZPOOL_PROP_VERSION)));
2468 2469          spa_close(spa, FTAG);
2469 2470  
2470 2471          strfree(name);
2471      -        VERIFY0(mutex_unlock(&ztest_vdev_lock));
     2472 +        mutex_exit(&ztest_vdev_lock);
2472 2473  }
2473 2474  
2474 2475  static vdev_t *
2475 2476  vdev_lookup_by_path(vdev_t *vd, const char *path)
2476 2477  {
2477 2478          vdev_t *mvd;
2478 2479  
2479 2480          if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2480 2481                  return (vd);
2481 2482  
↓ open down ↓ 32 lines elided ↑ open up ↑
2514 2515  void
2515 2516  ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2516 2517  {
2517 2518          ztest_shared_t *zs = ztest_shared;
2518 2519          spa_t *spa = ztest_spa;
2519 2520          uint64_t leaves;
2520 2521          uint64_t guid;
2521 2522          nvlist_t *nvroot;
2522 2523          int error;
2523 2524  
2524      -        VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     2525 +        mutex_enter(&ztest_vdev_lock);
2525 2526          leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2526 2527  
2527 2528          spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2528 2529  
2529 2530          ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2530 2531  
2531 2532          /*
2532 2533           * If we have slogs then remove them 1/4 of the time.
2533 2534           */
2534 2535          if (spa_has_slogs(spa) && ztest_random(4) == 0) {
↓ open down ↓ 5 lines elided ↑ open up ↑
2540 2541                  spa_config_exit(spa, SCL_VDEV, FTAG);
2541 2542  
2542 2543                  /*
2543 2544                   * We have to grab the zs_name_lock as writer to
2544 2545                   * prevent a race between removing a slog (dmu_objset_find)
2545 2546                   * and destroying a dataset. Removing the slog will
2546 2547                   * grab a reference on the dataset which may cause
2547 2548                   * dmu_objset_destroy() to fail with EBUSY thus
2548 2549                   * leaving the dataset in an inconsistent state.
2549 2550                   */
2550      -                VERIFY(rw_wrlock(&ztest_name_lock) == 0);
     2551 +                rw_enter(&ztest_name_lock, RW_WRITER);
2551 2552                  error = spa_vdev_remove(spa, guid, B_FALSE);
2552      -                VERIFY(rw_unlock(&ztest_name_lock) == 0);
     2553 +                rw_exit(&ztest_name_lock);
2553 2554  
2554 2555                  if (error && error != EEXIST)
2555 2556                          fatal(0, "spa_vdev_remove() = %d", error);
2556 2557          } else {
2557 2558                  spa_config_exit(spa, SCL_VDEV, FTAG);
2558 2559  
2559 2560                  /*
2560 2561                   * Make 1/4 of the devices be log devices.
2561 2562                   */
2562 2563                  nvroot = make_vdev_root(NULL, NULL, NULL,
↓ open down ↓ 3 lines elided ↑ open up ↑
2566 2567  
2567 2568                  error = spa_vdev_add(spa, nvroot);
2568 2569                  nvlist_free(nvroot);
2569 2570  
2570 2571                  if (error == ENOSPC)
2571 2572                          ztest_record_enospc("spa_vdev_add");
2572 2573                  else if (error != 0)
2573 2574                          fatal(0, "spa_vdev_add() = %d", error);
2574 2575          }
2575 2576  
2576      -        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     2577 +        mutex_exit(&ztest_vdev_lock);
2577 2578  }
2578 2579  
2579 2580  /*
2580 2581   * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2581 2582   */
2582 2583  /* ARGSUSED */
2583 2584  void
2584 2585  ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2585 2586  {
2586 2587          ztest_shared_t *zs = ztest_shared;
↓ open down ↓ 5 lines elided ↑ open up ↑
2592 2593          int error;
2593 2594  
2594 2595          if (ztest_random(2) == 0) {
2595 2596                  sav = &spa->spa_spares;
2596 2597                  aux = ZPOOL_CONFIG_SPARES;
2597 2598          } else {
2598 2599                  sav = &spa->spa_l2cache;
2599 2600                  aux = ZPOOL_CONFIG_L2CACHE;
2600 2601          }
2601 2602  
2602      -        VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     2603 +        mutex_enter(&ztest_vdev_lock);
2603 2604  
2604 2605          spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2605 2606  
2606 2607          if (sav->sav_count != 0 && ztest_random(4) == 0) {
2607 2608                  /*
2608 2609                   * Pick a random device to remove.
2609 2610                   */
2610 2611                  guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2611 2612          } else {
2612 2613                  /*
↓ open down ↓ 36 lines elided ↑ open up ↑
2649 2650                   * of devices that have pending state changes.
2650 2651                   */
2651 2652                  if (ztest_random(2) == 0)
2652 2653                          (void) vdev_online(spa, guid, 0, NULL);
2653 2654  
2654 2655                  error = spa_vdev_remove(spa, guid, B_FALSE);
2655 2656                  if (error != 0 && error != EBUSY)
2656 2657                          fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2657 2658          }
2658 2659  
2659      -        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     2660 +        mutex_exit(&ztest_vdev_lock);
2660 2661  }
2661 2662  
2662 2663  /*
2663 2664   * split a pool if it has mirror tlvdevs
2664 2665   */
2665 2666  /* ARGSUSED */
2666 2667  void
2667 2668  ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2668 2669  {
2669 2670          ztest_shared_t *zs = ztest_shared;
2670 2671          spa_t *spa = ztest_spa;
2671 2672          vdev_t *rvd = spa->spa_root_vdev;
2672 2673          nvlist_t *tree, **child, *config, *split, **schild;
2673 2674          uint_t c, children, schildren = 0, lastlogid = 0;
2674 2675          int error = 0;
2675 2676  
2676      -        VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     2677 +        mutex_enter(&ztest_vdev_lock);
2677 2678  
2678 2679          /* ensure we have a useable config; mirrors of raidz aren't supported */
2679 2680          if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2680      -                VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     2681 +                mutex_exit(&ztest_vdev_lock);
2681 2682                  return;
2682 2683          }
2683 2684  
2684 2685          /* clean up the old pool, if any */
2685 2686          (void) spa_destroy("splitp");
2686 2687  
2687 2688          spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2688 2689  
2689 2690          /* generate a config from the existing config */
2690 2691          mutex_enter(&spa->spa_props_lock);
↓ open down ↓ 38 lines elided ↑ open up ↑
2729 2730          VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2730 2731          VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2731 2732  
2732 2733          for (c = 0; c < schildren; c++)
2733 2734                  nvlist_free(schild[c]);
2734 2735          free(schild);
2735 2736          nvlist_free(split);
2736 2737  
2737 2738          spa_config_exit(spa, SCL_VDEV, FTAG);
2738 2739  
2739      -        (void) rw_wrlock(&ztest_name_lock);
     2740 +        rw_enter(&ztest_name_lock, RW_WRITER);
2740 2741          error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2741      -        (void) rw_unlock(&ztest_name_lock);
     2742 +        rw_exit(&ztest_name_lock);
2742 2743  
2743 2744          nvlist_free(config);
2744 2745  
2745 2746          if (error == 0) {
2746 2747                  (void) printf("successful split - results:\n");
2747 2748                  mutex_enter(&spa_namespace_lock);
2748 2749                  show_pool_stats(spa);
2749 2750                  show_pool_stats(spa_lookup("splitp"));
2750 2751                  mutex_exit(&spa_namespace_lock);
2751 2752                  ++zs->zs_splits;
2752 2753                  --zs->zs_mirrors;
2753 2754          }
2754      -        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     2755 +        mutex_exit(&ztest_vdev_lock);
2755 2756  
2756 2757  }
2757 2758  
2758 2759  /*
2759 2760   * Verify that we can attach and detach devices.
2760 2761   */
2761 2762  /* ARGSUSED */
2762 2763  void
2763 2764  ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2764 2765  {
↓ open down ↓ 8 lines elided ↑ open up ↑
2773 2774          uint64_t ashift = ztest_get_ashift();
2774 2775          uint64_t oldguid, pguid;
2775 2776          uint64_t oldsize, newsize;
2776 2777          char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
2777 2778          int replacing;
2778 2779          int oldvd_has_siblings = B_FALSE;
2779 2780          int newvd_is_spare = B_FALSE;
2780 2781          int oldvd_is_log;
2781 2782          int error, expected_error;
2782 2783  
2783      -        VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     2784 +        mutex_enter(&ztest_vdev_lock);
2784 2785          leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2785 2786  
2786 2787          spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2787 2788  
2788 2789          /*
2789 2790           * Decide whether to do an attach or a replace.
2790 2791           */
2791 2792          replacing = ztest_random(2);
2792 2793  
2793 2794          /*
↓ open down ↓ 40 lines elided ↑ open up ↑
2834 2835  
2835 2836          /*
2836 2837           * If oldvd has siblings, then half of the time, detach it.
2837 2838           */
2838 2839          if (oldvd_has_siblings && ztest_random(2) == 0) {
2839 2840                  spa_config_exit(spa, SCL_VDEV, FTAG);
2840 2841                  error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2841 2842                  if (error != 0 && error != ENODEV && error != EBUSY &&
2842 2843                      error != ENOTSUP)
2843 2844                          fatal(0, "detach (%s) returned %d", oldpath, error);
2844      -                VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     2845 +                mutex_exit(&ztest_vdev_lock);
2845 2846                  return;
2846 2847          }
2847 2848  
2848 2849          /*
2849 2850           * For the new vdev, choose with equal probability between the two
2850 2851           * standard paths (ending in either 'a' or 'b') or a random hot spare.
2851 2852           */
2852 2853          if (sav->sav_count != 0 && ztest_random(3) == 0) {
2853 2854                  newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2854 2855                  newvd_is_spare = B_TRUE;
↓ open down ↓ 73 lines elided ↑ open up ↑
2928 2929                  expected_error = error;
2929 2930  
2930 2931          /* XXX workaround 6690467 */
2931 2932          if (error != expected_error && expected_error != EBUSY) {
2932 2933                  fatal(0, "attach (%s %llu, %s %llu, %d) "
2933 2934                      "returned %d, expected %d",
2934 2935                      oldpath, oldsize, newpath,
2935 2936                      newsize, replacing, error, expected_error);
2936 2937          }
2937 2938  
2938      -        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     2939 +        mutex_exit(&ztest_vdev_lock);
2939 2940  }
2940 2941  
2941 2942  /*
2942 2943   * Callback function which expands the physical size of the vdev.
2943 2944   */
2944 2945  vdev_t *
2945 2946  grow_vdev(vdev_t *vd, void *arg)
2946 2947  {
2947 2948          spa_t *spa = vd->vdev_spa;
2948 2949          size_t *newsize = arg;
↓ open down ↓ 107 lines elided ↑ open up ↑
3056 3057  ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
3057 3058  {
3058 3059          spa_t *spa = ztest_spa;
3059 3060          vdev_t *vd, *tvd;
3060 3061          metaslab_class_t *mc;
3061 3062          metaslab_group_t *mg;
3062 3063          size_t psize, newsize;
3063 3064          uint64_t top;
3064 3065          uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
3065 3066  
3066      -        VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     3067 +        mutex_enter(&ztest_vdev_lock);
3067 3068          spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3068 3069  
3069 3070          top = ztest_random_vdev_top(spa, B_TRUE);
3070 3071  
3071 3072          tvd = spa->spa_root_vdev->vdev_child[top];
3072 3073          mg = tvd->vdev_mg;
3073 3074          mc = mg->mg_class;
3074 3075          old_ms_count = tvd->vdev_ms_count;
3075 3076          old_class_space = metaslab_class_get_space(mc);
3076 3077  
↓ open down ↓ 7 lines elided ↑ open up ↑
3084 3085  
3085 3086          psize = vd->vdev_psize;
3086 3087  
3087 3088          /*
3088 3089           * We only try to expand the vdev if it's healthy, less than 4x its
3089 3090           * original size, and it has a valid psize.
3090 3091           */
3091 3092          if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
3092 3093              psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
3093 3094                  spa_config_exit(spa, SCL_STATE, spa);
3094      -                VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     3095 +                mutex_exit(&ztest_vdev_lock);
3095 3096                  return;
3096 3097          }
3097 3098          ASSERT(psize > 0);
3098 3099          newsize = psize + psize / 8;
3099 3100          ASSERT3U(newsize, >, psize);
3100 3101  
3101 3102          if (ztest_opts.zo_verbose >= 6) {
3102 3103                  (void) printf("Expanding LUN %s from %lu to %lu\n",
3103 3104                      vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
3104 3105          }
↓ open down ↓ 4 lines elided ↑ open up ↑
3109 3110           *      2). online the vdev to create the new metaslabs
3110 3111           */
3111 3112          if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
3112 3113              vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
3113 3114              tvd->vdev_state != VDEV_STATE_HEALTHY) {
3114 3115                  if (ztest_opts.zo_verbose >= 5) {
3115 3116                          (void) printf("Could not expand LUN because "
3116 3117                              "the vdev configuration changed.\n");
3117 3118                  }
3118 3119                  spa_config_exit(spa, SCL_STATE, spa);
3119      -                VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     3120 +                mutex_exit(&ztest_vdev_lock);
3120 3121                  return;
3121 3122          }
3122 3123  
3123 3124          spa_config_exit(spa, SCL_STATE, spa);
3124 3125  
3125 3126          /*
3126 3127           * Expanding the LUN will update the config asynchronously,
3127 3128           * thus we must wait for the async thread to complete any
3128 3129           * pending tasks before proceeding.
3129 3130           */
↓ open down ↓ 13 lines elided ↑ open up ↑
3143 3144          tvd = spa->spa_root_vdev->vdev_child[top];
3144 3145          new_ms_count = tvd->vdev_ms_count;
3145 3146          new_class_space = metaslab_class_get_space(mc);
3146 3147  
3147 3148          if (tvd->vdev_mg != mg || mg->mg_class != mc) {
3148 3149                  if (ztest_opts.zo_verbose >= 5) {
3149 3150                          (void) printf("Could not verify LUN expansion due to "
3150 3151                              "intervening vdev offline or remove.\n");
3151 3152                  }
3152 3153                  spa_config_exit(spa, SCL_STATE, spa);
3153      -                VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     3154 +                mutex_exit(&ztest_vdev_lock);
3154 3155                  return;
3155 3156          }
3156 3157  
3157 3158          /*
3158 3159           * Make sure we were able to grow the vdev.
3159 3160           */
3160 3161          if (new_ms_count <= old_ms_count)
3161 3162                  fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3162 3163                      old_ms_count, new_ms_count);
3163 3164  
↓ open down ↓ 7 lines elided ↑ open up ↑
3171 3172          if (ztest_opts.zo_verbose >= 5) {
3172 3173                  char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ];
3173 3174  
3174 3175                  nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf));
3175 3176                  nicenum(new_class_space, newnumbuf, sizeof (newnumbuf));
3176 3177                  (void) printf("%s grew from %s to %s\n",
3177 3178                      spa->spa_name, oldnumbuf, newnumbuf);
3178 3179          }
3179 3180  
3180 3181          spa_config_exit(spa, SCL_STATE, spa);
3181      -        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     3182 +        mutex_exit(&ztest_vdev_lock);
3182 3183  }
3183 3184  
3184 3185  /*
3185 3186   * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3186 3187   */
3187 3188  /* ARGSUSED */
3188 3189  static void
3189 3190  ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3190 3191  {
3191 3192          /*
↓ open down ↓ 93 lines elided ↑ open up ↑
3285 3286  void
3286 3287  ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3287 3288  {
3288 3289          ztest_ds_t zdtmp;
3289 3290          int iters;
3290 3291          int error;
3291 3292          objset_t *os, *os2;
3292 3293          char name[ZFS_MAX_DATASET_NAME_LEN];
3293 3294          zilog_t *zilog;
3294 3295  
3295      -        (void) rw_rdlock(&ztest_name_lock);
     3296 +        rw_enter(&ztest_name_lock, RW_READER);
3296 3297  
3297 3298          (void) snprintf(name, sizeof (name), "%s/temp_%llu",
3298 3299              ztest_opts.zo_pool, (u_longlong_t)id);
3299 3300  
3300 3301          /*
3301 3302           * If this dataset exists from a previous run, process its replay log
3302 3303           * half of the time.  If we don't replay it, then dmu_objset_destroy()
3303 3304           * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3304 3305           */
3305 3306          if (ztest_random(2) == 0 &&
↓ open down ↓ 18 lines elided ↑ open up ↑
3324 3325          VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
3325 3326              FTAG, &os));
3326 3327  
3327 3328          /*
3328 3329           * Verify that we can create a new dataset.
3329 3330           */
3330 3331          error = ztest_dataset_create(name);
3331 3332          if (error) {
3332 3333                  if (error == ENOSPC) {
3333 3334                          ztest_record_enospc(FTAG);
3334      -                        (void) rw_unlock(&ztest_name_lock);
     3335 +                        rw_exit(&ztest_name_lock);
3335 3336                          return;
3336 3337                  }
3337 3338                  fatal(0, "dmu_objset_create(%s) = %d", name, error);
3338 3339          }
3339 3340  
3340 3341          VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3341 3342  
3342 3343          ztest_zd_init(&zdtmp, NULL, os);
3343 3344  
3344 3345          /*
↓ open down ↓ 27 lines elided ↑ open up ↑
3372 3373          /*
3373 3374           * Verify that we cannot own an objset that is already owned.
3374 3375           */
3375 3376          VERIFY3U(EBUSY, ==,
3376 3377              dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3377 3378  
3378 3379          zil_close(zilog);
3379 3380          dmu_objset_disown(os, FTAG);
3380 3381          ztest_zd_fini(&zdtmp);
3381 3382  
3382      -        (void) rw_unlock(&ztest_name_lock);
     3383 +        rw_exit(&ztest_name_lock);
3383 3384  }
3384 3385  
3385 3386  /*
3386 3387   * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3387 3388   */
3388 3389  void
3389 3390  ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3390 3391  {
3391      -        (void) rw_rdlock(&ztest_name_lock);
     3392 +        rw_enter(&ztest_name_lock, RW_READER);
3392 3393          (void) ztest_snapshot_destroy(zd->zd_name, id);
3393 3394          (void) ztest_snapshot_create(zd->zd_name, id);
3394      -        (void) rw_unlock(&ztest_name_lock);
     3395 +        rw_exit(&ztest_name_lock);
3395 3396  }
3396 3397  
3397 3398  /*
3398 3399   * Cleanup non-standard snapshots and clones.
3399 3400   */
3400 3401  void
3401 3402  ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3402 3403  {
3403 3404          char snap1name[ZFS_MAX_DATASET_NAME_LEN];
3404 3405          char clone1name[ZFS_MAX_DATASET_NAME_LEN];
↓ open down ↓ 38 lines elided ↑ open up ↑
3443 3444  {
3444 3445          objset_t *os;
3445 3446          char snap1name[ZFS_MAX_DATASET_NAME_LEN];
3446 3447          char clone1name[ZFS_MAX_DATASET_NAME_LEN];
3447 3448          char snap2name[ZFS_MAX_DATASET_NAME_LEN];
3448 3449          char clone2name[ZFS_MAX_DATASET_NAME_LEN];
3449 3450          char snap3name[ZFS_MAX_DATASET_NAME_LEN];
3450 3451          char *osname = zd->zd_name;
3451 3452          int error;
3452 3453  
3453      -        (void) rw_rdlock(&ztest_name_lock);
     3454 +        rw_enter(&ztest_name_lock, RW_READER);
3454 3455  
3455 3456          ztest_dsl_dataset_cleanup(osname, id);
3456 3457  
3457 3458          (void) snprintf(snap1name, sizeof (snap1name),
3458 3459              "%s@s1_%llu", osname, id);
3459 3460          (void) snprintf(clone1name, sizeof (clone1name),
3460 3461              "%s/c1_%llu", osname, id);
3461 3462          (void) snprintf(snap2name, sizeof (snap2name),
3462 3463              "%s@s2_%llu", clone1name, id);
3463 3464          (void) snprintf(clone2name, sizeof (clone2name),
↓ open down ↓ 56 lines elided ↑ open up ↑
3520 3521                  goto out;
3521 3522          }
3522 3523          if (error != EBUSY)
3523 3524                  fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3524 3525                      error);
3525 3526          dmu_objset_disown(os, FTAG);
3526 3527  
3527 3528  out:
3528 3529          ztest_dsl_dataset_cleanup(osname, id);
3529 3530  
3530      -        (void) rw_unlock(&ztest_name_lock);
     3531 +        rw_exit(&ztest_name_lock);
3531 3532  }
3532 3533  
3533 3534  /*
3534 3535   * Verify that dmu_object_{alloc,free} work as expected.
3535 3536   */
3536 3537  void
3537 3538  ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3538 3539  {
3539 3540          ztest_od_t od[4];
3540 3541          int batchsize = sizeof (od) / sizeof (od[0]);
↓ open down ↓ 913 lines elided ↑ open up ↑
4454 4455                  return;
4455 4456          }
4456 4457  
4457 4458          /* Was this callback added to the global callback list? */
4458 4459          if (!data->zcd_added)
4459 4460                  goto out;
4460 4461  
4461 4462          ASSERT3U(data->zcd_txg, !=, 0);
4462 4463  
4463 4464          /* Remove our callback from the list */
4464      -        (void) mutex_lock(&zcl.zcl_callbacks_lock);
     4465 +        mutex_enter(&zcl.zcl_callbacks_lock);
4465 4466          list_remove(&zcl.zcl_callbacks, data);
4466      -        (void) mutex_unlock(&zcl.zcl_callbacks_lock);
     4467 +        mutex_exit(&zcl.zcl_callbacks_lock);
4467 4468  
4468 4469  out:
4469 4470          umem_free(data, sizeof (ztest_cb_data_t));
4470 4471  }
4471 4472  
4472 4473  /* Allocate and initialize callback data structure */
4473 4474  static ztest_cb_data_t *
4474 4475  ztest_create_cb_data(objset_t *os, uint64_t txg)
4475 4476  {
4476 4477          ztest_cb_data_t *cb_data;
↓ open down ↓ 81 lines elided ↑ open up ↑
4558 4559           */
4559 4560          VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
4560 4561              &old_txg, DMU_READ_PREFETCH));
4561 4562  
4562 4563          if (old_txg > txg)
4563 4564                  fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4564 4565                      old_txg, txg);
4565 4566  
4566 4567          dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
4567 4568  
4568      -        (void) mutex_lock(&zcl.zcl_callbacks_lock);
     4569 +        mutex_enter(&zcl.zcl_callbacks_lock);
4569 4570  
4570 4571          /*
4571 4572           * Since commit callbacks don't have any ordering requirement and since
4572 4573           * it is theoretically possible for a commit callback to be called
4573 4574           * after an arbitrary amount of time has elapsed since its txg has been
4574 4575           * synced, it is difficult to reliably determine whether a commit
4575 4576           * callback hasn't been called due to high load or due to a flawed
4576 4577           * implementation.
4577 4578           *
4578 4579           * In practice, we will assume that if after a certain number of txgs a
↓ open down ↓ 26 lines elided ↑ open up ↑
4605 4606                  else
4606 4607                          list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4607 4608                              cb_data[i]);
4608 4609  
4609 4610                  cb_data[i]->zcd_added = B_TRUE;
4610 4611                  VERIFY(!cb_data[i]->zcd_called);
4611 4612  
4612 4613                  tmp_cb = cb_data[i];
4613 4614          }
4614 4615  
4615      -        (void) mutex_unlock(&zcl.zcl_callbacks_lock);
     4616 +        mutex_exit(&zcl.zcl_callbacks_lock);
4616 4617  
4617 4618          dmu_tx_commit(tx);
4618 4619  }
4619 4620  
4620 4621  /* ARGSUSED */
4621 4622  void
4622 4623  ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4623 4624  {
4624 4625          zfs_prop_t proplist[] = {
4625 4626                  ZFS_PROP_CHECKSUM,
4626 4627                  ZFS_PROP_COMPRESSION,
4627 4628                  ZFS_PROP_COPIES,
4628 4629                  ZFS_PROP_DEDUP
4629 4630          };
4630 4631  
4631      -        (void) rw_rdlock(&ztest_name_lock);
     4632 +        rw_enter(&ztest_name_lock, RW_READER);
4632 4633  
4633 4634          for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4634 4635                  (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4635 4636                      ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4636 4637  
4637      -        (void) rw_unlock(&ztest_name_lock);
     4638 +        rw_exit(&ztest_name_lock);
4638 4639  }
4639 4640  
4640 4641  /* ARGSUSED */
4641 4642  void
4642 4643  ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4643 4644  {
4644 4645          nvlist_t *props = NULL;
4645 4646  
4646      -        (void) rw_rdlock(&ztest_name_lock);
     4647 +        rw_enter(&ztest_name_lock, RW_READER);
4647 4648  
4648 4649          (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4649 4650              ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4650 4651  
4651 4652          VERIFY0(spa_prop_get(ztest_spa, &props));
4652 4653  
4653 4654          if (ztest_opts.zo_verbose >= 6)
4654 4655                  dump_nvlist(props, 4);
4655 4656  
4656 4657          nvlist_free(props);
4657 4658  
4658      -        (void) rw_unlock(&ztest_name_lock);
     4659 +        rw_exit(&ztest_name_lock);
4659 4660  }
4660 4661  
4661 4662  static int
4662 4663  user_release_one(const char *snapname, const char *holdname)
4663 4664  {
4664 4665          nvlist_t *snaps, *holds;
4665 4666          int error;
4666 4667  
4667 4668          snaps = fnvlist_alloc();
4668 4669          holds = fnvlist_alloc();
↓ open down ↓ 14 lines elided ↑ open up ↑
4683 4684          int error;
4684 4685          objset_t *os = zd->zd_os;
4685 4686          objset_t *origin;
4686 4687          char snapname[100];
4687 4688          char fullname[100];
4688 4689          char clonename[100];
4689 4690          char tag[100];
4690 4691          char osname[ZFS_MAX_DATASET_NAME_LEN];
4691 4692          nvlist_t *holds;
4692 4693  
4693      -        (void) rw_rdlock(&ztest_name_lock);
     4694 +        rw_enter(&ztest_name_lock, RW_READER);
4694 4695  
4695 4696          dmu_objset_name(os, osname);
4696 4697  
4697 4698          (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", id);
4698 4699          (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
4699 4700          (void) snprintf(clonename, sizeof (clonename),
4700 4701              "%s/ch1_%llu", osname, id);
4701 4702          (void) snprintf(tag, sizeof (tag), "tag_%llu", id);
4702 4703  
4703 4704          /*
↓ open down ↓ 84 lines elided ↑ open up ↑
4788 4789                      fullname, error);
4789 4790          }
4790 4791  
4791 4792          error = user_release_one(fullname, tag);
4792 4793          if (error)
4793 4794                  fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
4794 4795  
4795 4796          VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
4796 4797  
4797 4798  out:
4798      -        (void) rw_unlock(&ztest_name_lock);
     4799 +        rw_exit(&ztest_name_lock);
4799 4800  }
4800 4801  
4801 4802  /*
4802 4803   * Inject random faults into the on-disk data.
4803 4804   */
4804 4805  /* ARGSUSED */
4805 4806  void
4806 4807  ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4807 4808  {
4808 4809          ztest_shared_t *zs = ztest_shared;
↓ open down ↓ 7 lines elided ↑ open up ↑
4816 4817          char pathrand[MAXPATHLEN];
4817 4818          size_t fsize;
4818 4819          int bshift = SPA_MAXBLOCKSHIFT + 2;
4819 4820          int iters = 1000;
4820 4821          int maxfaults;
4821 4822          int mirror_save;
4822 4823          vdev_t *vd0 = NULL;
4823 4824          uint64_t guid0 = 0;
4824 4825          boolean_t islog = B_FALSE;
4825 4826  
4826      -        VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     4827 +        mutex_enter(&ztest_vdev_lock);
4827 4828          maxfaults = MAXFAULTS();
4828 4829          leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4829 4830          mirror_save = zs->zs_mirrors;
4830      -        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     4831 +        mutex_exit(&ztest_vdev_lock);
4831 4832  
4832 4833          ASSERT(leaves >= 1);
4833 4834  
4834 4835          /*
4835 4836           * Grab the name lock as reader. There are some operations
4836 4837           * which don't like to have their vdevs changed while
4837 4838           * they are in progress (i.e. spa_change_guid). Those
4838 4839           * operations will have grabbed the name lock as writer.
4839 4840           */
4840      -        (void) rw_rdlock(&ztest_name_lock);
     4841 +        rw_enter(&ztest_name_lock, RW_READER);
4841 4842  
4842 4843          /*
4843 4844           * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4844 4845           */
4845 4846          spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4846 4847  
4847 4848          if (ztest_random(2) == 0) {
4848 4849                  /*
4849 4850                   * Inject errors on a normal data device or slog device.
4850 4851                   */
↓ open down ↓ 48 lines elided ↑ open up ↑
4899 4900                          guid0 = vd0->vdev_guid;
4900 4901                  }
4901 4902          } else {
4902 4903                  /*
4903 4904                   * Inject errors on an l2cache device.
4904 4905                   */
4905 4906                  spa_aux_vdev_t *sav = &spa->spa_l2cache;
4906 4907  
4907 4908                  if (sav->sav_count == 0) {
4908 4909                          spa_config_exit(spa, SCL_STATE, FTAG);
4909      -                        (void) rw_unlock(&ztest_name_lock);
     4910 +                        rw_exit(&ztest_name_lock);
4910 4911                          return;
4911 4912                  }
4912 4913                  vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4913 4914                  guid0 = vd0->vdev_guid;
4914 4915                  (void) strcpy(path0, vd0->vdev_path);
4915 4916                  (void) strcpy(pathrand, vd0->vdev_path);
4916 4917  
4917 4918                  leaf = 0;
4918 4919                  leaves = 1;
4919 4920                  maxfaults = INT_MAX;    /* no limit on cache devices */
4920 4921          }
4921 4922  
4922 4923          spa_config_exit(spa, SCL_STATE, FTAG);
4923      -        (void) rw_unlock(&ztest_name_lock);
     4924 +        rw_exit(&ztest_name_lock);
4924 4925  
4925 4926          /*
4926 4927           * If we can tolerate two or more faults, or we're dealing
4927 4928           * with a slog, randomly online/offline vd0.
4928 4929           */
4929 4930          if ((maxfaults >= 2 || islog) && guid0 != 0) {
4930 4931                  if (ztest_random(10) < 6) {
4931 4932                          int flags = (ztest_random(2) == 0 ?
4932 4933                              ZFS_OFFLINE_TEMPORARY : 0);
4933 4934  
4934 4935                          /*
4935 4936                           * We have to grab the zs_name_lock as writer to
4936 4937                           * prevent a race between offlining a slog and
4937 4938                           * destroying a dataset. Offlining the slog will
4938 4939                           * grab a reference on the dataset which may cause
4939 4940                           * dmu_objset_destroy() to fail with EBUSY thus
4940 4941                           * leaving the dataset in an inconsistent state.
4941 4942                           */
4942 4943                          if (islog)
4943      -                                (void) rw_wrlock(&ztest_name_lock);
     4944 +                                rw_enter(&ztest_name_lock, RW_WRITER);
4944 4945  
4945 4946                          VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
4946 4947  
4947 4948                          if (islog)
4948      -                                (void) rw_unlock(&ztest_name_lock);
     4949 +                                rw_exit(&ztest_name_lock);
4949 4950                  } else {
4950 4951                          /*
4951 4952                           * Ideally we would like to be able to randomly
4952 4953                           * call vdev_[on|off]line without holding locks
4953 4954                           * to force unpredictable failures but the side
4954 4955                           * effects of vdev_[on|off]line prevent us from
4955 4956                           * doing so. We grab the ztest_vdev_lock here to
4956 4957                           * prevent a race between injection testing and
4957 4958                           * aux_vdev removal.
4958 4959                           */
4959      -                        VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     4960 +                        mutex_enter(&ztest_vdev_lock);
4960 4961                          (void) vdev_online(spa, guid0, 0, NULL);
4961      -                        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     4962 +                        mutex_exit(&ztest_vdev_lock);
4962 4963                  }
4963 4964          }
4964 4965  
4965 4966          if (maxfaults == 0)
4966 4967                  return;
4967 4968  
4968 4969          /*
4969 4970           * We have at least single-fault tolerance, so inject data corruption.
4970 4971           */
4971 4972          fd = open(pathrand, O_RDWR);
↓ open down ↓ 51 lines elided ↑ open up ↑
5023 5024                  /*
5024 5025                   * The two end labels are stored at the "end" of the disk, but
5025 5026                   * the end of the disk (vdev_psize) is aligned to
5026 5027                   * sizeof (vdev_label_t).
5027 5028                   */
5028 5029                  uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
5029 5030                  if ((leaf & 1) == 1 &&
5030 5031                      offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
5031 5032                          continue;
5032 5033  
5033      -                VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
     5034 +                mutex_enter(&ztest_vdev_lock);
5034 5035                  if (mirror_save != zs->zs_mirrors) {
5035      -                        VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     5036 +                        mutex_exit(&ztest_vdev_lock);
5036 5037                          (void) close(fd);
5037 5038                          return;
5038 5039                  }
5039 5040  
5040 5041                  if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
5041 5042                          fatal(1, "can't inject bad word at 0x%llx in %s",
5042 5043                              offset, pathrand);
5043 5044  
5044      -                VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
     5045 +                mutex_exit(&ztest_vdev_lock);
5045 5046  
5046 5047                  if (ztest_opts.zo_verbose >= 7)
5047 5048                          (void) printf("injected bad word into %s,"
5048 5049                              " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
5049 5050          }
5050 5051  
5051 5052          (void) close(fd);
5052 5053  }
5053 5054  
5054 5055  /*
↓ open down ↓ 19 lines elided ↑ open up ↑
5074 5075  
5075 5076          ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
5076 5077  
5077 5078          if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
5078 5079                  return;
5079 5080  
5080 5081          /*
5081 5082           * Take the name lock as writer to prevent anyone else from changing
5082 5083           * the pool and dataset properies we need to maintain during this test.
5083 5084           */
5084      -        (void) rw_wrlock(&ztest_name_lock);
     5085 +        rw_enter(&ztest_name_lock, RW_WRITER);
5085 5086  
5086 5087          if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
5087 5088              B_FALSE) != 0 ||
5088 5089              ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
5089 5090              B_FALSE) != 0) {
5090      -                (void) rw_unlock(&ztest_name_lock);
     5091 +                rw_exit(&ztest_name_lock);
5091 5092                  return;
5092 5093          }
5093 5094  
5094 5095          dmu_objset_stats_t dds;
5095 5096          dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
5096 5097          dmu_objset_fast_stat(os, &dds);
5097 5098          dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
5098 5099  
5099 5100          object = od[0].od_object;
5100 5101          blocksize = od[0].od_blocksize;
5101 5102          pattern = zs->zs_guid ^ dds.dds_guid;
5102 5103  
5103 5104          ASSERT(object != 0);
5104 5105  
5105 5106          tx = dmu_tx_create(os);
5106 5107          dmu_tx_hold_write(tx, object, 0, copies * blocksize);
5107 5108          txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
5108 5109          if (txg == 0) {
5109      -                (void) rw_unlock(&ztest_name_lock);
     5110 +                rw_exit(&ztest_name_lock);
5110 5111                  return;
5111 5112          }
5112 5113  
5113 5114          /*
5114 5115           * Write all the copies of our block.
5115 5116           */
5116 5117          for (int i = 0; i < copies; i++) {
5117 5118                  uint64_t offset = i * blocksize;
5118 5119                  int error = dmu_buf_hold(os, object, offset, FTAG, &db,
5119 5120                      DMU_READ_NO_PREFETCH);
↓ open down ↓ 27 lines elided ↑ open up ↑
5147 5148          psize = BP_GET_PSIZE(&blk);
5148 5149          abd = abd_alloc_linear(psize, B_TRUE);
5149 5150          ztest_pattern_set(abd_to_buf(abd), psize, ~pattern);
5150 5151  
5151 5152          (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
5152 5153              abd, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
5153 5154              ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
5154 5155  
5155 5156          abd_free(abd);
5156 5157  
5157      -        (void) rw_unlock(&ztest_name_lock);
     5158 +        rw_exit(&ztest_name_lock);
5158 5159  }
5159 5160  
5160 5161  /*
5161 5162   * Scrub the pool.
5162 5163   */
5163 5164  /* ARGSUSED */
5164 5165  void
5165 5166  ztest_scrub(ztest_ds_t *zd, uint64_t id)
5166 5167  {
5167 5168          spa_t *spa = ztest_spa;
↓ open down ↓ 10 lines elided ↑ open up ↑
5178 5179  void
5179 5180  ztest_reguid(ztest_ds_t *zd, uint64_t id)
5180 5181  {
5181 5182          spa_t *spa = ztest_spa;
5182 5183          uint64_t orig, load;
5183 5184          int error;
5184 5185  
5185 5186          orig = spa_guid(spa);
5186 5187          load = spa_load_guid(spa);
5187 5188  
5188      -        (void) rw_wrlock(&ztest_name_lock);
     5189 +        rw_enter(&ztest_name_lock, RW_WRITER);
5189 5190          error = spa_change_guid(spa);
5190      -        (void) rw_unlock(&ztest_name_lock);
     5191 +        rw_exit(&ztest_name_lock);
5191 5192  
5192 5193          if (error != 0)
5193 5194                  return;
5194 5195  
5195 5196          if (ztest_opts.zo_verbose >= 4) {
5196 5197                  (void) printf("Changed guid old %llu -> %llu\n",
5197 5198                      (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
5198 5199          }
5199 5200  
5200 5201          VERIFY3U(orig, !=, spa_guid(spa));
↓ open down ↓ 3 lines elided ↑ open up ↑
5204 5205  /*
5205 5206   * Rename the pool to a different name and then rename it back.
5206 5207   */
5207 5208  /* ARGSUSED */
5208 5209  void
5209 5210  ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
5210 5211  {
5211 5212          char *oldname, *newname;
5212 5213          spa_t *spa;
5213 5214  
5214      -        (void) rw_wrlock(&ztest_name_lock);
     5215 +        rw_enter(&ztest_name_lock, RW_WRITER);
5215 5216  
5216 5217          oldname = ztest_opts.zo_pool;
5217 5218          newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
5218 5219          (void) strcpy(newname, oldname);
5219 5220          (void) strcat(newname, "_tmp");
5220 5221  
5221 5222          /*
5222 5223           * Do the rename
5223 5224           */
5224 5225          VERIFY3U(0, ==, spa_rename(oldname, newname));
↓ open down ↓ 19 lines elided ↑ open up ↑
5244 5245          /*
5245 5246           * Make sure it can still be opened
5246 5247           */
5247 5248          VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5248 5249  
5249 5250          ASSERT(spa == ztest_spa);
5250 5251          spa_close(spa, FTAG);
5251 5252  
5252 5253          umem_free(newname, strlen(newname) + 1);
5253 5254  
5254      -        (void) rw_unlock(&ztest_name_lock);
     5255 +        rw_exit(&ztest_name_lock);
5255 5256  }
5256 5257  
5257 5258  /*
5258 5259   * Verify pool integrity by running zdb.
5259 5260   */
5260 5261  static void
5261 5262  ztest_run_zdb(char *pool)
5262 5263  {
5263 5264          int status;
5264 5265          char zdb[MAXPATHLEN + MAXNAMELEN + 20];
↓ open down ↓ 334 lines elided ↑ open up ↑
5599 5600  {
5600 5601          ztest_ds_t *zd = &ztest_ds[d];
5601 5602          uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5602 5603          objset_t *os;
5603 5604          zilog_t *zilog;
5604 5605          char name[ZFS_MAX_DATASET_NAME_LEN];
5605 5606          int error;
5606 5607  
5607 5608          ztest_dataset_name(name, ztest_opts.zo_pool, d);
5608 5609  
5609      -        (void) rw_rdlock(&ztest_name_lock);
     5610 +        rw_enter(&ztest_name_lock, RW_READER);
5610 5611  
5611 5612          error = ztest_dataset_create(name);
5612 5613          if (error == ENOSPC) {
5613      -                (void) rw_unlock(&ztest_name_lock);
     5614 +                rw_exit(&ztest_name_lock);
5614 5615                  ztest_record_enospc(FTAG);
5615 5616                  return (error);
5616 5617          }
5617 5618          ASSERT(error == 0 || error == EEXIST);
5618 5619  
5619 5620          VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os));
5620      -        (void) rw_unlock(&ztest_name_lock);
     5621 +        rw_exit(&ztest_name_lock);
5621 5622  
5622 5623          ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5623 5624  
5624 5625          zilog = zd->zd_zilog;
5625 5626  
5626 5627          if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5627 5628              zilog->zl_header->zh_claim_lr_seq < committed_seq)
5628 5629                  fatal(0, "missing log records: claimed %llu < committed %llu",
5629 5630                      zilog->zl_header->zh_claim_lr_seq, committed_seq);
5630 5631  
↓ open down ↓ 30 lines elided ↑ open up ↑
5661 5662  
5662 5663          ztest_zd_fini(zd);
5663 5664  }
5664 5665  
5665 5666  /*
5666 5667   * Kick off threads to run tests on all datasets in parallel.
5667 5668   */
5668 5669  static void
5669 5670  ztest_run(ztest_shared_t *zs)
5670 5671  {
5671      -        thread_t *tid;
     5672 +        pthread_t *tid;
5672 5673          spa_t *spa;
5673 5674          objset_t *os;
5674      -        thread_t resume_tid;
     5675 +        pthread_t resume_tid;
5675 5676          int error;
5676 5677  
5677 5678          ztest_exiting = B_FALSE;
5678 5679  
5679 5680          /*
5680 5681           * Initialize parent/child shared state.
5681 5682           */
5682      -        VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5683      -        VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
     5683 +        mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL);
     5684 +        rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL);
5684 5685  
5685 5686          zs->zs_thread_start = gethrtime();
5686 5687          zs->zs_thread_stop =
5687 5688              zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5688 5689          zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5689 5690          zs->zs_thread_kill = zs->zs_thread_stop;
5690 5691          if (ztest_random(100) < ztest_opts.zo_killrate) {
5691 5692                  zs->zs_thread_kill -=
5692 5693                      ztest_random(ztest_opts.zo_passtime * NANOSEC);
5693 5694          }
5694 5695  
5695      -        (void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
     5696 +        mutex_init(&zcl.zcl_callbacks_lock, NULL, USYNC_THREAD, NULL);
5696 5697  
5697 5698          list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5698 5699              offsetof(ztest_cb_data_t, zcd_node));
5699 5700  
5700 5701          /*
5701 5702           * Open our pool.
5702 5703           */
5703 5704          kernel_init(FREAD | FWRITE);
5704 5705          VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5705 5706          spa->spa_debug = B_TRUE;
↓ open down ↓ 17 lines elided ↑ open up ↑
5723 5724           * the only valid replica.
5724 5725           */
5725 5726          if (MAXFAULTS() == 0)
5726 5727                  spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5727 5728          else
5728 5729                  spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5729 5730  
5730 5731          /*
5731 5732           * Create a thread to periodically resume suspended I/O.
5732 5733           */
5733      -        VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
5734      -            &resume_tid) == 0);
     5734 +        VERIFY(pthread_create(&resume_tid, NULL, ztest_resume_thread,
     5735 +            spa) == 0);
5735 5736  
5736 5737          /*
5737 5738           * Create a deadman thread to abort() if we hang.
5738 5739           */
5739      -        VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
5740      -            NULL) == 0);
     5740 +        VERIFY(pthread_create(&resume_tid, NULL, ztest_deadman_thread,
     5741 +            zs) == 0);
5741 5742  
5742 5743          /*
5743 5744           * Verify that we can safely inquire about about any object,
5744 5745           * whether it's allocated or not.  To make it interesting,
5745 5746           * we probe a 5-wide window around each power of two.
5746 5747           * This hits all edge cases, including zero and the max.
5747 5748           */
5748 5749          for (int t = 0; t < 64; t++) {
5749 5750                  for (int d = -5; d <= 5; d++) {
5750 5751                          error = dmu_object_info(spa->spa_meta_objset,
↓ open down ↓ 5 lines elided ↑ open up ↑
5756 5757  
5757 5758          /*
5758 5759           * If we got any ENOSPC errors on the previous run, destroy something.
5759 5760           */
5760 5761          if (zs->zs_enospc_count != 0) {
5761 5762                  int d = ztest_random(ztest_opts.zo_datasets);
5762 5763                  ztest_dataset_destroy(d);
5763 5764          }
5764 5765          zs->zs_enospc_count = 0;
5765 5766  
5766      -        tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
     5767 +        tid = umem_zalloc(ztest_opts.zo_threads * sizeof (pthread_t),
5767 5768              UMEM_NOFAIL);
5768 5769  
5769 5770          if (ztest_opts.zo_verbose >= 4)
5770 5771                  (void) printf("starting main threads...\n");
5771 5772  
5772 5773          /*
5773 5774           * Kick off all the tests that run in parallel.
5774 5775           */
5775 5776          for (int t = 0; t < ztest_opts.zo_threads; t++) {
5776 5777                  if (t < ztest_opts.zo_datasets &&
5777 5778                      ztest_dataset_open(t) != 0)
5778 5779                          return;
5779      -                VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
5780      -                    THR_BOUND, &tid[t]) == 0);
     5780 +                VERIFY(pthread_create(&tid[t], NULL, ztest_thread,
     5781 +                    (void *)(uintptr_t)t) == 0);
5781 5782          }
5782 5783  
5783 5784          /*
5784 5785           * Wait for all of the tests to complete.  We go in reverse order
5785 5786           * so we don't close datasets while threads are still using them.
5786 5787           */
5787 5788          for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5788      -                VERIFY(thr_join(tid[t], NULL, NULL) == 0);
     5789 +                VERIFY(pthread_join(tid[t], NULL) == 0);
5789 5790                  if (t < ztest_opts.zo_datasets)
5790 5791                          ztest_dataset_close(t);
5791 5792          }
5792 5793  
5793 5794          txg_wait_synced(spa_get_dsl(spa), 0);
5794 5795  
5795 5796          zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5796 5797          zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5797 5798          zfs_dbgmsg_print(FTAG);
5798 5799  
5799      -        umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
     5800 +        umem_free(tid, ztest_opts.zo_threads * sizeof (pthread_t));
5800 5801  
5801 5802          /* Kill the resume thread */
5802 5803          ztest_exiting = B_TRUE;
5803      -        VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
     5804 +        VERIFY(pthread_join(resume_tid, NULL) == 0);
5804 5805          ztest_resume(spa);
5805 5806  
5806 5807          /*
5807 5808           * Right before closing the pool, kick off a bunch of async I/O;
5808 5809           * spa_close() should wait for it to complete.
5809 5810           */
5810 5811          for (uint64_t object = 1; object < 50; object++) {
5811 5812                  dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20,
5812 5813                      ZIO_PRIORITY_SYNC_READ);
5813 5814          }
↓ open down ↓ 18 lines elided ↑ open up ↑
5832 5833                  (void) snprintf(name, sizeof (name), "%s_import",
5833 5834                      ztest_opts.zo_pool);
5834 5835                  ztest_spa_import_export(ztest_opts.zo_pool, name);
5835 5836                  ztest_spa_import_export(name, ztest_opts.zo_pool);
5836 5837          }
5837 5838  
5838 5839          kernel_fini();
5839 5840  
5840 5841          list_destroy(&zcl.zcl_callbacks);
5841 5842  
5842      -        (void) _mutex_destroy(&zcl.zcl_callbacks_lock);
     5843 +        mutex_destroy(&zcl.zcl_callbacks_lock);
5843 5844  
5844      -        (void) rwlock_destroy(&ztest_name_lock);
5845      -        (void) _mutex_destroy(&ztest_vdev_lock);
     5845 +        rw_destroy(&ztest_name_lock);
     5846 +        mutex_destroy(&ztest_vdev_lock);
5846 5847  }
5847 5848  
5848 5849  static void
5849 5850  ztest_freeze(void)
5850 5851  {
5851 5852          ztest_ds_t *zd = &ztest_ds[0];
5852 5853          spa_t *spa;
5853 5854          int numloops = 0;
5854 5855  
5855 5856          if (ztest_opts.zo_verbose >= 3)
↓ open down ↓ 123 lines elided ↑ open up ↑
5979 5980  /*
5980 5981   * Create a storage pool with the given name and initial vdev size.
5981 5982   * Then test spa_freeze() functionality.
5982 5983   */
5983 5984  static void
5984 5985  ztest_init(ztest_shared_t *zs)
5985 5986  {
5986 5987          spa_t *spa;
5987 5988          nvlist_t *nvroot, *props;
5988 5989  
5989      -        VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
5990      -        VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
     5990 +        mutex_init(&ztest_vdev_lock, NULL, USYNC_THREAD, NULL);
     5991 +        rw_init(&ztest_name_lock, NULL, USYNC_THREAD, NULL);
5991 5992  
5992 5993          kernel_init(FREAD | FWRITE);
5993 5994  
5994 5995          /*
5995 5996           * Create the storage pool.
5996 5997           */
5997 5998          (void) spa_destroy(ztest_opts.zo_pool);
5998 5999          ztest_shared->zs_vdev_next_leaf = 0;
5999 6000          zs->zs_splits = 0;
6000 6001          zs->zs_mirrors = ztest_opts.zo_mirrors;
↓ open down ↓ 17 lines elided ↑ open up ↑
6018 6019          spa_close(spa, FTAG);
6019 6020  
6020 6021          kernel_fini();
6021 6022  
6022 6023          ztest_run_zdb(ztest_opts.zo_pool);
6023 6024  
6024 6025          ztest_freeze();
6025 6026  
6026 6027          ztest_run_zdb(ztest_opts.zo_pool);
6027 6028  
6028      -        (void) rwlock_destroy(&ztest_name_lock);
6029      -        (void) _mutex_destroy(&ztest_vdev_lock);
     6029 +        rw_destroy(&ztest_name_lock);
     6030 +        mutex_destroy(&ztest_vdev_lock);
6030 6031  }
6031 6032  
6032 6033  static void
6033 6034  setup_data_fd(void)
6034 6035  {
6035 6036          static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
6036 6037  
6037 6038          ztest_fd_data = mkstemp(ztest_name_data);
6038 6039          ASSERT3S(ztest_fd_data, >=, 0);
6039 6040          (void) unlink(ztest_name_data);
↓ open down ↓ 383 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX