1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2013 by Delphix. All rights reserved.
  28  * Copyright (c) 2013 Steven Hartland. All rights reserved.
  29  */
  30 
  31 #include <sys/zfs_context.h>
  32 #include <sys/spa.h>
  33 #include <sys/vdev_impl.h>
  34 #include <sys/zio.h>
  35 #include <sys/fs/zfs.h>
  36 
  37 /*
  38  * Virtual device vector for mirroring.
  39  */
  40 
  41 typedef struct mirror_child {
  42         vdev_t          *mc_vd;
  43         uint64_t        mc_offset;
  44         int             mc_error;
  45         int             mc_load;
  46         uint8_t         mc_tried;
  47         uint8_t         mc_skipped;
  48         uint8_t         mc_speculative;
  49 } mirror_child_t;
  50 
  51 typedef struct mirror_map {
  52         int             *mm_preferred;
  53         int             mm_preferred_cnt;
  54         int             mm_children;
  55         boolean_t       mm_replacing;
  56         boolean_t       mm_root;
  57         mirror_child_t  mm_child[];
  58 } mirror_map_t;
  59 
  60 int zfs_vdev_mirror_shift = 21;
  61 
  62 /*
  63  * The load configuration settings below are tuned by default for
  64  * the case where all devices are of the same rotational type.
  65  *
  66  * If there is a mixture of rotating and non-rotating media, setting
  67  * non_rotating_seek_inc to 0 may well provide better results as it
  68  * will direct more reads to the non-rotating vdevs which are more
  69  * likely to have a higher performance.
  70  */
  71 
  72 /* Rotating media load calculation configuration. */
  73 /* Rotating media load increment for non-seeking I/O's. */
  74 int zfs_vdev_mirror_rotating_inc = 0;
  75 
  76 /* Rotating media load increment for seeking I/O's. */
  77 int zfs_vdev_mirror_rotating_seek_inc = 5;
  78 
  79 /*
  80  * Offset in bytes from the last I/O which triggers a reduced rotating media
  81  * seek increment.
  82  */
  83 int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
  84 
  85 /* Non-rotating media load calculation configuration. */
  86 /* Non-rotating media load increment for non-seeking I/O's. */
  87 int zfs_vdev_mirror_non_rotating_inc = 0;
  88 
  89 /* Non-rotating media load increment for seeking I/O's. */
  90 int zfs_vdev_mirror_non_rotating_seek_inc = 1;
  91 
  92 static inline size_t
  93 vdev_mirror_map_size(int children)
  94 {
  95         return (offsetof(mirror_map_t, mm_child[children]) +
  96             sizeof (int) * children);
  97 }
  98 
  99 static inline mirror_map_t *
 100 vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
 101 {
 102         mirror_map_t *mm;
 103 
 104         mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
 105         mm->mm_children = children;
 106         mm->mm_replacing = replacing;
 107         mm->mm_root = root;
 108         mm->mm_preferred = (int *)((uintptr_t)mm +
 109             offsetof(mirror_map_t, mm_child[children]));
 110 
 111         return (mm);
 112 }
 113 
 114 static void
 115 vdev_mirror_map_free(zio_t *zio)
 116 {
 117         mirror_map_t *mm = zio->io_vsd;
 118 
 119         kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
 120 }
 121 
 122 static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
 123         vdev_mirror_map_free,
 124         zio_vsd_default_cksum_report
 125 };
 126 
 127 /*
 128  * Calculate and return the load of the specified vdev adjusted for a zio at
 129  * the given offset.
 130  *
 131  * The calcuation takes into account the vdev's:
 132  * 1. Rotation rate
 133  * 2. The distance of zio_offset from the last queued request
 134  */
 135 static int
 136 vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
 137 {
 138         uint64_t lastoffset;
 139         int load;
 140 
 141         /* All DVAs have equal weight at the root. */
 142         if (mm->mm_root)
 143                 return (INT_MAX);
 144 
 145         /*
 146          * We don't return INT_MAX if the device is resilvering i.e.
 147          * vdev_resilver_txg != 0 as when tested performance was slightly
 148          * worse overall when resilvering with compared to without.
 149          */
 150 
 151         /* Standard load based on pending queue length. */
 152         load = vdev_queue_length(vd);
 153         lastoffset = vdev_queue_last_queued_offset(vd);
 154 
 155         if (vd->vdev_rotation_rate == VDEV_RATE_NON_ROTATING) {
 156                 /* Non-rotating media. */
 157                 if (lastoffset == zio_offset)
 158                         return (load + zfs_vdev_mirror_non_rotating_inc);
 159 
 160                 /*
 161                  * Apply a seek penalty even for non-rotating devices as
 162                  * sequential I/O'a can be aggregated into fewer operations
 163                  * on the device, thus avoiding unnecessary per-command
 164                  * overhead and boosting performance.
 165                  */
 166                 return (load + zfs_vdev_mirror_non_rotating_seek_inc);
 167         }
 168 
 169         /* Rotating media I/O's which directly follow the last I/O. */
 170         if (lastoffset == zio_offset)
 171                 return (load + zfs_vdev_mirror_rotating_inc);
 172 
 173         /*
 174          * Apply half the seek increment to I/O's within seek offset
 175          * of the last I/O queued to this vdev as they should incure less
 176          * of a seek increment.
 177          */
 178         if (ABS(lastoffset - zio_offset) <
 179             zfs_vdev_mirror_rotating_seek_offset)
 180                 return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
 181 
 182         /* Apply the full seek increment to all other I/O's. */
 183         return (load + zfs_vdev_mirror_rotating_seek_inc);
 184 }
 185 
 186 
 187 static mirror_map_t *
 188 vdev_mirror_map_init(zio_t *zio)
 189 {
 190         mirror_map_t *mm = NULL;
 191         mirror_child_t *mc;
 192         vdev_t *vd = zio->io_vd;
 193         int c;
 194 
 195         if (vd == NULL) {
 196                 dva_t *dva = zio->io_bp->blk_dva;
 197                 spa_t *spa = zio->io_spa;
 198 
 199                 mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
 200                     B_TRUE);
 201                 for (c = 0; c < mm->mm_children; c++) {
 202                         mc = &mm->mm_child[c];
 203                         mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
 204                         mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
 205                 }
 206         } else {
 207                 mm = vdev_mirror_map_alloc(vd->vdev_children,
 208                     (vd->vdev_ops == &vdev_replacing_ops ||
 209                     vd->vdev_ops == &vdev_spare_ops), B_FALSE);
 210                 for (c = 0; c < mm->mm_children; c++) {
 211                         mc = &mm->mm_child[c];
 212                         mc->mc_vd = vd->vdev_child[c];
 213                         mc->mc_offset = zio->io_offset;
 214                 }
 215         }
 216 
 217         zio->io_vsd = mm;
 218         zio->io_vsd_ops = &vdev_mirror_vsd_ops;
 219         return (mm);
 220 }
 221 
 222 static int
 223 vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
 224     uint64_t *ashift)
 225 {
 226         int numerrors = 0;
 227         int lasterror = 0;
 228 
 229         if (vd->vdev_children == 0) {
 230                 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
 231                 return (SET_ERROR(EINVAL));
 232         }
 233 
 234         vdev_open_children(vd);
 235 
 236         for (int c = 0; c < vd->vdev_children; c++) {
 237                 vdev_t *cvd = vd->vdev_child[c];
 238 
 239                 if (cvd->vdev_open_error) {
 240                         lasterror = cvd->vdev_open_error;
 241                         numerrors++;
 242                         continue;
 243                 }
 244 
 245                 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
 246                 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
 247                 *ashift = MAX(*ashift, cvd->vdev_ashift);
 248         }
 249 
 250         if (numerrors == vd->vdev_children) {
 251                 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
 252                 return (lasterror);
 253         }
 254 
 255         return (0);
 256 }
 257 
 258 static void
 259 vdev_mirror_close(vdev_t *vd)
 260 {
 261         for (int c = 0; c < vd->vdev_children; c++)
 262                 vdev_close(vd->vdev_child[c]);
 263 }
 264 
 265 static void
 266 vdev_mirror_child_done(zio_t *zio)
 267 {
 268         mirror_child_t *mc = zio->io_private;
 269 
 270         mc->mc_error = zio->io_error;
 271         mc->mc_tried = 1;
 272         mc->mc_skipped = 0;
 273 }
 274 
 275 static void
 276 vdev_mirror_scrub_done(zio_t *zio)
 277 {
 278         mirror_child_t *mc = zio->io_private;
 279 
 280         if (zio->io_error == 0) {
 281                 zio_t *pio;
 282 
 283                 mutex_enter(&zio->io_lock);
 284                 while ((pio = zio_walk_parents(zio)) != NULL) {
 285                         mutex_enter(&pio->io_lock);
 286                         ASSERT3U(zio->io_size, >=, pio->io_size);
 287                         bcopy(zio->io_data, pio->io_data, pio->io_size);
 288                         mutex_exit(&pio->io_lock);
 289                 }
 290                 mutex_exit(&zio->io_lock);
 291         }
 292 
 293         zio_buf_free(zio->io_data, zio->io_size);
 294 
 295         mc->mc_error = zio->io_error;
 296         mc->mc_tried = 1;
 297         mc->mc_skipped = 0;
 298 }
 299 
 300 /*
 301  * Check the other, lower-index DVAs to see if they're on the same
 302  * vdev as the child we picked.  If they are, use them since they
 303  * are likely to have been allocated from the primary metaslab in
 304  * use at the time, and hence are more likely to have locality with
 305  * single-copy data.
 306  */
 307 static int
 308 vdev_mirror_dva_select(zio_t *zio, int preferred)
 309 {
 310         dva_t *dva = zio->io_bp->blk_dva;
 311         int c;
 312 
 313         for (c = preferred - 1; c >= 0; c--) {
 314                 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
 315                         preferred = c;
 316         }
 317         return (preferred);
 318 }
 319 
 320 static int
 321 vdev_mirror_preferred_child_randomize(zio_t *zio)
 322 {
 323         mirror_map_t *mm = zio->io_vsd;
 324         int p;
 325 
 326         if (mm->mm_root) {
 327                 p = spa_get_random(mm->mm_preferred_cnt);
 328                 return (vdev_mirror_dva_select(zio, mm->mm_preferred[p]));
 329         }
 330 
 331         /*
 332          * To ensure we don't always favour the first matching vdev,
 333          * which could lead to wear leveling issues on SSD's, we
 334          * use the I/O offset as a pseudo random seed into the vdevs
 335          * which have the lowest load.
 336          */
 337         p = (zio->io_offset >> zfs_vdev_mirror_shift) % mm->mm_preferred_cnt;
 338         return (mm->mm_preferred[p]);
 339 }
 340 
 341 /*
 342  * Try to find a vdev whose DTL doesn't contain the block we want to read
 343  * prefering vdevs based on determined load.
 344  *
 345  * If we can't, try the read on any vdev we haven't already tried.
 346  */
 347 static int
 348 vdev_mirror_child_select(zio_t *zio)
 349 {
 350         mirror_map_t *mm = zio->io_vsd;
 351         uint64_t txg = zio->io_txg;
 352         int c, lowest_load;
 353 
 354         ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
 355 
 356         lowest_load = INT_MAX;
 357         mm->mm_preferred_cnt = 0;
 358         for (c = 0; c < mm->mm_children; c++) {
 359                 mirror_child_t *mc;
 360 
 361                 mc = &mm->mm_child[c];
 362                 if (mc->mc_tried || mc->mc_skipped)
 363                         continue;
 364 
 365                 if (!vdev_readable(mc->mc_vd)) {
 366                         mc->mc_error = SET_ERROR(ENXIO);
 367                         mc->mc_tried = 1;    /* don't even try */
 368                         mc->mc_skipped = 1;
 369                         continue;
 370                 }
 371 
 372                 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
 373                         mc->mc_error = SET_ERROR(ESTALE);
 374                         mc->mc_skipped = 1;
 375                         mc->mc_speculative = 1;
 376                         continue;
 377                 }
 378 
 379                 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
 380                 if (mc->mc_load > lowest_load)
 381                         continue;
 382 
 383                 if (mc->mc_load < lowest_load) {
 384                         lowest_load = mc->mc_load;
 385                         mm->mm_preferred_cnt = 0;
 386                 }
 387                 mm->mm_preferred[mm->mm_preferred_cnt] = c;
 388                 mm->mm_preferred_cnt++;
 389         }
 390 
 391         if (mm->mm_preferred_cnt == 1) {
 392                 vdev_queue_register_last_queued_offset(
 393                     mm->mm_child[mm->mm_preferred[0]].mc_vd, zio);
 394                 return (mm->mm_preferred[0]);
 395         }
 396 
 397         if (mm->mm_preferred_cnt > 1) {
 398                 int c = vdev_mirror_preferred_child_randomize(zio);
 399 
 400                 vdev_queue_register_last_queued_offset(mm->mm_child[c].mc_vd,
 401                     zio);
 402                 return (c);
 403         }
 404 
 405         /*
 406          * Every device is either missing or has this txg in its DTL.
 407          * Look for any child we haven't already tried before giving up.
 408          */
 409         for (c = 0; c < mm->mm_children; c++) {
 410                 if (!mm->mm_child[c].mc_tried) {
 411                         vdev_queue_register_last_queued_offset(
 412                             mm->mm_child[c].mc_vd, zio);
 413                         return (c);
 414                 }
 415         }
 416 
 417         /*
 418          * Every child failed.  There's no place left to look.
 419          */
 420         return (-1);
 421 }
 422 
 423 static int
 424 vdev_mirror_io_start(zio_t *zio)
 425 {
 426         mirror_map_t *mm;
 427         mirror_child_t *mc;
 428         int c, children;
 429 
 430         mm = vdev_mirror_map_init(zio);
 431 
 432         if (zio->io_type == ZIO_TYPE_READ) {
 433                 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) {
 434                         /*
 435                          * For scrubbing reads we need to allocate a read
 436                          * buffer for each child and issue reads to all
 437                          * children.  If any child succeeds, it will copy its
 438                          * data into zio->io_data in vdev_mirror_scrub_done.
 439                          */
 440                         for (c = 0; c < mm->mm_children; c++) {
 441                                 mc = &mm->mm_child[c];
 442                                 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
 443                                     mc->mc_vd, mc->mc_offset,
 444                                     zio_buf_alloc(zio->io_size), zio->io_size,
 445                                     zio->io_type, zio->io_priority, 0,
 446                                     vdev_mirror_scrub_done, mc));
 447                         }
 448                         return (ZIO_PIPELINE_CONTINUE);
 449                 }
 450                 /*
 451                  * For normal reads just pick one child.
 452                  */
 453                 c = vdev_mirror_child_select(zio);
 454                 children = (c >= 0);
 455         } else {
 456                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 457 
 458                 /*
 459                  * Writes go to all children.
 460                  */
 461                 c = 0;
 462                 children = mm->mm_children;
 463         }
 464 
 465         while (children--) {
 466                 mc = &mm->mm_child[c];
 467                 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
 468                     mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
 469                     zio->io_type, zio->io_priority, 0,
 470                     vdev_mirror_child_done, mc));
 471                 c++;
 472         }
 473 
 474         return (ZIO_PIPELINE_CONTINUE);
 475 }
 476 
 477 static int
 478 vdev_mirror_worst_error(mirror_map_t *mm)
 479 {
 480         int error[2] = { 0, 0 };
 481 
 482         for (int c = 0; c < mm->mm_children; c++) {
 483                 mirror_child_t *mc = &mm->mm_child[c];
 484                 int s = mc->mc_speculative;
 485                 error[s] = zio_worst_error(error[s], mc->mc_error);
 486         }
 487 
 488         return (error[0] ? error[0] : error[1]);
 489 }
 490 
 491 static void
 492 vdev_mirror_io_done(zio_t *zio)
 493 {
 494         mirror_map_t *mm = zio->io_vsd;
 495         mirror_child_t *mc;
 496         int c;
 497         int good_copies = 0;
 498         int unexpected_errors = 0;
 499 
 500         for (c = 0; c < mm->mm_children; c++) {
 501                 mc = &mm->mm_child[c];
 502 
 503                 if (mc->mc_error) {
 504                         if (!mc->mc_skipped)
 505                                 unexpected_errors++;
 506                 } else if (mc->mc_tried) {
 507                         good_copies++;
 508                 }
 509         }
 510 
 511         if (zio->io_type == ZIO_TYPE_WRITE) {
 512                 /*
 513                  * XXX -- for now, treat partial writes as success.
 514                  *
 515                  * Now that we support write reallocation, it would be better
 516                  * to treat partial failure as real failure unless there are
 517                  * no non-degraded top-level vdevs left, and not update DTLs
 518                  * if we intend to reallocate.
 519                  */
 520                 /* XXPOLICY */
 521                 if (good_copies != mm->mm_children) {
 522                         /*
 523                          * Always require at least one good copy.
 524                          *
 525                          * For ditto blocks (io_vd == NULL), require
 526                          * all copies to be good.
 527                          *
 528                          * XXX -- for replacing vdevs, there's no great answer.
 529                          * If the old device is really dead, we may not even
 530                          * be able to access it -- so we only want to
 531                          * require good writes to the new device.  But if
 532                          * the new device turns out to be flaky, we want
 533                          * to be able to detach it -- which requires all
 534                          * writes to the old device to have succeeded.
 535                          */
 536                         if (good_copies == 0 || zio->io_vd == NULL)
 537                                 zio->io_error = vdev_mirror_worst_error(mm);
 538                 }
 539                 return;
 540         }
 541 
 542         ASSERT(zio->io_type == ZIO_TYPE_READ);
 543 
 544         /*
 545          * If we don't have a good copy yet, keep trying other children.
 546          */
 547         /* XXPOLICY */
 548         if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
 549                 ASSERT(c >= 0 && c < mm->mm_children);
 550                 mc = &mm->mm_child[c];
 551                 zio_vdev_io_redone(zio);
 552                 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
 553                     mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
 554                     ZIO_TYPE_READ, zio->io_priority, 0,
 555                     vdev_mirror_child_done, mc));
 556                 return;
 557         }
 558 
 559         /* XXPOLICY */
 560         if (good_copies == 0) {
 561                 zio->io_error = vdev_mirror_worst_error(mm);
 562                 ASSERT(zio->io_error != 0);
 563         }
 564 
 565         if (good_copies && spa_writeable(zio->io_spa) &&
 566             (unexpected_errors ||
 567             (zio->io_flags & ZIO_FLAG_RESILVER) ||
 568             ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) {
 569                 /*
 570                  * Use the good data we have in hand to repair damaged children.
 571                  */
 572                 for (c = 0; c < mm->mm_children; c++) {
 573                         /*
 574                          * Don't rewrite known good children.
 575                          * Not only is it unnecessary, it could
 576                          * actually be harmful: if the system lost
 577                          * power while rewriting the only good copy,
 578                          * there would be no good copies left!
 579                          */
 580                         mc = &mm->mm_child[c];
 581 
 582                         if (mc->mc_error == 0) {
 583                                 if (mc->mc_tried)
 584                                         continue;
 585                                 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
 586                                     !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
 587                                     zio->io_txg, 1))
 588                                         continue;
 589                                 mc->mc_error = SET_ERROR(ESTALE);
 590                         }
 591 
 592                         zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
 593                             mc->mc_vd, mc->mc_offset,
 594                             zio->io_data, zio->io_size,
 595                             ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
 596                             ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
 597                             ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
 598                 }
 599         }
 600 }
 601 
 602 static void
 603 vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
 604 {
 605         if (faulted == vd->vdev_children)
 606                 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
 607                     VDEV_AUX_NO_REPLICAS);
 608         else if (degraded + faulted != 0)
 609                 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
 610         else
 611                 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
 612 }
 613 
 614 vdev_ops_t vdev_mirror_ops = {
 615         vdev_mirror_open,
 616         vdev_mirror_close,
 617         vdev_default_asize,
 618         vdev_mirror_io_start,
 619         vdev_mirror_io_done,
 620         vdev_mirror_state_change,
 621         NULL,
 622         NULL,
 623         VDEV_TYPE_MIRROR,       /* name of this vdev type */
 624         B_FALSE                 /* not a leaf vdev */
 625 };
 626 
 627 vdev_ops_t vdev_replacing_ops = {
 628         vdev_mirror_open,
 629         vdev_mirror_close,
 630         vdev_default_asize,
 631         vdev_mirror_io_start,
 632         vdev_mirror_io_done,
 633         vdev_mirror_state_change,
 634         NULL,
 635         NULL,
 636         VDEV_TYPE_REPLACING,    /* name of this vdev type */
 637         B_FALSE                 /* not a leaf vdev */
 638 };
 639 
 640 vdev_ops_t vdev_spare_ops = {
 641         vdev_mirror_open,
 642         vdev_mirror_close,
 643         vdev_default_asize,
 644         vdev_mirror_io_start,
 645         vdev_mirror_io_done,
 646         vdev_mirror_state_change,
 647         NULL,
 648         NULL,
 649         VDEV_TYPE_SPARE,        /* name of this vdev type */
 650         B_FALSE                 /* not a leaf vdev */
 651 };