Print this page
4334 Improve ZFS N-way mirror read performance

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/vdev_mirror.c
          +++ new/usr/src/uts/common/fs/zfs/vdev_mirror.c
↓ open down ↓ 17 lines elided ↑ open up ↑
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26   26  /*
  27   27   * Copyright (c) 2013 by Delphix. All rights reserved.
       28 + * Copyright (c) 2013 Steven Hartland. All rights reserved.
  28   29   */
  29   30  
  30   31  #include <sys/zfs_context.h>
  31   32  #include <sys/spa.h>
  32   33  #include <sys/vdev_impl.h>
  33   34  #include <sys/zio.h>
  34   35  #include <sys/fs/zfs.h>
  35   36  
  36   37  /*
  37   38   * Virtual device vector for mirroring.
  38   39   */
  39   40  
  40   41  typedef struct mirror_child {
  41   42          vdev_t          *mc_vd;
  42   43          uint64_t        mc_offset;
  43   44          int             mc_error;
       45 +        int             mc_load;
  44   46          uint8_t         mc_tried;
  45   47          uint8_t         mc_skipped;
  46   48          uint8_t         mc_speculative;
  47   49  } mirror_child_t;
  48   50  
  49   51  typedef struct mirror_map {
       52 +        int             *mm_preferred;
       53 +        int             mm_preferred_cnt;
  50   54          int             mm_children;
  51      -        int             mm_replacing;
  52      -        int             mm_preferred;
  53      -        int             mm_root;
  54      -        mirror_child_t  mm_child[1];
       55 +        boolean_t       mm_replacing;
       56 +        boolean_t       mm_root;
       57 +        mirror_child_t  mm_child[];
  55   58  } mirror_map_t;
  56   59  
  57      -int vdev_mirror_shift = 21;
       60 +int zfs_vdev_mirror_shift = 21;
       61 +
       62 +/*
       63 + * The load configuration settings below are tuned by default for
       64 + * the case where all devices are of the same rotational type.
       65 + *
       66 + * If there is a mixture of rotating and non-rotating media, setting
       67 + * non_rotating_seek_inc to 0 may well provide better results as it
       68 + * will direct more reads to the non-rotating vdevs which are more
       69 + * likely to have a higher performance.
       70 + */
       71 +
       72 +/* Rotating media load calculation configuration. */
       73 +/* Rotating media load increment for non-seeking I/O's. */
       74 +int zfs_vdev_mirror_rotating_inc = 0;
       75 +
       76 +/* Rotating media load increment for seeking I/O's. */
       77 +int zfs_vdev_mirror_rotating_seek_inc = 5;
       78 +
       79 +/*
       80 + * Offset in bytes from the last I/O which triggers a reduced rotating media
       81 + * seek increment.
       82 + */
       83 +int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
       84 +
       85 +/* Non-rotating media load calculation configuration. */
       86 +/* Non-rotating media load increment for non-seeking I/O's. */
       87 +int zfs_vdev_mirror_non_rotating_inc = 0;
       88 +
       89 +/* Non-rotating media load increment for seeking I/O's. */
       90 +int zfs_vdev_mirror_non_rotating_seek_inc = 1;
       91 +
       92 +static inline size_t
       93 +vdev_mirror_map_size(int children)
       94 +{
       95 +        return (offsetof(mirror_map_t, mm_child[children]) +
       96 +            sizeof (int) * children);
       97 +}
       98 +
       99 +static inline mirror_map_t *
      100 +vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
      101 +{
      102 +        mirror_map_t *mm;
      103 +
      104 +        mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
      105 +        mm->mm_children = children;
      106 +        mm->mm_replacing = replacing;
      107 +        mm->mm_root = root;
      108 +        mm->mm_preferred = (int *)((uintptr_t)mm +
      109 +            offsetof(mirror_map_t, mm_child[children]));
      110 +
      111 +        return (mm);
      112 +}
  58  113  
  59  114  static void
  60  115  vdev_mirror_map_free(zio_t *zio)
  61  116  {
  62  117          mirror_map_t *mm = zio->io_vsd;
  63  118  
  64      -        kmem_free(mm, offsetof(mirror_map_t, mm_child[mm->mm_children]));
      119 +        kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
  65  120  }
  66  121  
  67  122  static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
  68  123          vdev_mirror_map_free,
  69  124          zio_vsd_default_cksum_report
  70  125  };
  71  126  
      127 +/*
      128 + * Calculate and return the load of the specified vdev adjusted for a zio at
      129 + * the given offset.
      130 + *
      131 + * The calcuation takes into account the vdev's:
      132 + * 1. Rotation rate
      133 + * 2. The distance of zio_offset from the last queued request
      134 + */
      135 +static int
      136 +vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
      137 +{
      138 +        uint64_t lastoffset;
      139 +        int load;
      140 +
      141 +        /* All DVAs have equal weight at the root. */
      142 +        if (mm->mm_root)
      143 +                return (INT_MAX);
      144 +
      145 +        /*
      146 +         * We don't return INT_MAX if the device is resilvering i.e.
      147 +         * vdev_resilver_txg != 0 as when tested performance was slightly
      148 +         * worse overall when resilvering with compared to without.
      149 +         */
      150 +
      151 +        /* Standard load based on pending queue length. */
      152 +        load = vdev_queue_length(vd);
      153 +        lastoffset = vdev_queue_last_queued_offset(vd);
      154 +
      155 +        if (vd->vdev_rotation_rate == VDEV_RATE_NON_ROTATING) {
      156 +                /* Non-rotating media. */
      157 +                if (lastoffset == zio_offset)
      158 +                        return (load + zfs_vdev_mirror_non_rotating_inc);
      159 +
      160 +                /*
      161 +                 * Apply a seek penalty even for non-rotating devices as
      162 +                 * sequential I/O'a can be aggregated into fewer operations
      163 +                 * on the device, thus avoiding unnecessary per-command
      164 +                 * overhead and boosting performance.
      165 +                 */
      166 +                return (load + zfs_vdev_mirror_non_rotating_seek_inc);
      167 +        }
      168 +
      169 +        /* Rotating media I/O's which directly follow the last I/O. */
      170 +        if (lastoffset == zio_offset)
      171 +                return (load + zfs_vdev_mirror_rotating_inc);
      172 +
      173 +        /*
      174 +         * Apply half the seek increment to I/O's within seek offset
      175 +         * of the last I/O queued to this vdev as they should incure less
      176 +         * of a seek increment.
      177 +         */
      178 +        if (ABS(lastoffset - zio_offset) <
      179 +            zfs_vdev_mirror_rotating_seek_offset)
      180 +                return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
      181 +
      182 +        /* Apply the full seek increment to all other I/O's. */
      183 +        return (load + zfs_vdev_mirror_rotating_seek_inc);
      184 +}
      185 +
      186 +
  72  187  static mirror_map_t *
  73      -vdev_mirror_map_alloc(zio_t *zio)
      188 +vdev_mirror_map_init(zio_t *zio)
  74  189  {
  75  190          mirror_map_t *mm = NULL;
  76  191          mirror_child_t *mc;
  77  192          vdev_t *vd = zio->io_vd;
  78      -        int c, d;
      193 +        int c;
  79  194  
  80  195          if (vd == NULL) {
  81  196                  dva_t *dva = zio->io_bp->blk_dva;
  82  197                  spa_t *spa = zio->io_spa;
  83  198  
  84      -                c = BP_GET_NDVAS(zio->io_bp);
  85      -
  86      -                mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP);
  87      -                mm->mm_children = c;
  88      -                mm->mm_replacing = B_FALSE;
  89      -                mm->mm_preferred = spa_get_random(c);
  90      -                mm->mm_root = B_TRUE;
  91      -
  92      -                /*
  93      -                 * Check the other, lower-index DVAs to see if they're on
  94      -                 * the same vdev as the child we picked.  If they are, use
  95      -                 * them since they are likely to have been allocated from
  96      -                 * the primary metaslab in use at the time, and hence are
  97      -                 * more likely to have locality with single-copy data.
  98      -                 */
  99      -                for (c = mm->mm_preferred, d = c - 1; d >= 0; d--) {
 100      -                        if (DVA_GET_VDEV(&dva[d]) == DVA_GET_VDEV(&dva[c]))
 101      -                                mm->mm_preferred = d;
 102      -                }
 103      -
      199 +                mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
      200 +                    B_TRUE);
 104  201                  for (c = 0; c < mm->mm_children; c++) {
 105  202                          mc = &mm->mm_child[c];
 106      -
 107  203                          mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
 108  204                          mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
 109  205                  }
 110  206          } else {
 111      -                c = vd->vdev_children;
 112      -
 113      -                mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP);
 114      -                mm->mm_children = c;
 115      -                mm->mm_replacing = (vd->vdev_ops == &vdev_replacing_ops ||
 116      -                    vd->vdev_ops == &vdev_spare_ops);
 117      -                mm->mm_preferred = mm->mm_replacing ? 0 :
 118      -                    (zio->io_offset >> vdev_mirror_shift) % c;
 119      -                mm->mm_root = B_FALSE;
 120      -
      207 +                mm = vdev_mirror_map_alloc(vd->vdev_children,
      208 +                    (vd->vdev_ops == &vdev_replacing_ops ||
      209 +                    vd->vdev_ops == &vdev_spare_ops), B_FALSE);
 121  210                  for (c = 0; c < mm->mm_children; c++) {
 122  211                          mc = &mm->mm_child[c];
 123  212                          mc->mc_vd = vd->vdev_child[c];
 124  213                          mc->mc_offset = zio->io_offset;
 125  214                  }
 126  215          }
 127  216  
 128  217          zio->io_vsd = mm;
 129  218          zio->io_vsd_ops = &vdev_mirror_vsd_ops;
 130  219          return (mm);
↓ open down ↓ 71 lines elided ↑ open up ↑
 202  291          }
 203  292  
 204  293          zio_buf_free(zio->io_data, zio->io_size);
 205  294  
 206  295          mc->mc_error = zio->io_error;
 207  296          mc->mc_tried = 1;
 208  297          mc->mc_skipped = 0;
 209  298  }
 210  299  
 211  300  /*
 212      - * Try to find a child whose DTL doesn't contain the block we want to read.
      301 + * Check the other, lower-index DVAs to see if they're on the same
      302 + * vdev as the child we picked.  If they are, use them since they
      303 + * are likely to have been allocated from the primary metaslab in
      304 + * use at the time, and hence are more likely to have locality with
      305 + * single-copy data.
      306 + */
      307 +static int
      308 +vdev_mirror_dva_select(zio_t *zio, int preferred)
      309 +{
      310 +        dva_t *dva = zio->io_bp->blk_dva;
      311 +        int c;
      312 +
      313 +        for (c = preferred - 1; c >= 0; c--) {
      314 +                if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
      315 +                        preferred = c;
      316 +        }
      317 +        return (preferred);
      318 +}
      319 +
      320 +static int
      321 +vdev_mirror_preferred_child_randomize(zio_t *zio)
      322 +{
      323 +        mirror_map_t *mm = zio->io_vsd;
      324 +        int p;
      325 +
      326 +        if (mm->mm_root) {
      327 +                p = spa_get_random(mm->mm_preferred_cnt);
      328 +                return (vdev_mirror_dva_select(zio, mm->mm_preferred[p]));
      329 +        }
      330 +
      331 +        /*
      332 +         * To ensure we don't always favour the first matching vdev,
      333 +         * which could lead to wear leveling issues on SSD's, we
      334 +         * use the I/O offset as a pseudo random seed into the vdevs
      335 +         * which have the lowest load.
      336 +         */
      337 +        p = (zio->io_offset >> zfs_vdev_mirror_shift) % mm->mm_preferred_cnt;
      338 +        return (mm->mm_preferred[p]);
      339 +}
      340 +
      341 +/*
      342 + * Try to find a vdev whose DTL doesn't contain the block we want to read
      343 + * prefering vdevs based on determined load.
      344 + *
 213  345   * If we can't, try the read on any vdev we haven't already tried.
 214  346   */
 215  347  static int
 216  348  vdev_mirror_child_select(zio_t *zio)
 217  349  {
 218  350          mirror_map_t *mm = zio->io_vsd;
 219      -        mirror_child_t *mc;
 220  351          uint64_t txg = zio->io_txg;
 221      -        int i, c;
      352 +        int c, lowest_load;
 222  353  
 223  354          ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
 224  355  
 225      -        /*
 226      -         * Try to find a child whose DTL doesn't contain the block to read.
 227      -         * If a child is known to be completely inaccessible (indicated by
 228      -         * vdev_readable() returning B_FALSE), don't even try.
 229      -         */
 230      -        for (i = 0, c = mm->mm_preferred; i < mm->mm_children; i++, c++) {
 231      -                if (c >= mm->mm_children)
 232      -                        c = 0;
      356 +        lowest_load = INT_MAX;
      357 +        mm->mm_preferred_cnt = 0;
      358 +        for (c = 0; c < mm->mm_children; c++) {
      359 +                mirror_child_t *mc;
      360 +
 233  361                  mc = &mm->mm_child[c];
 234  362                  if (mc->mc_tried || mc->mc_skipped)
 235  363                          continue;
      364 +
 236  365                  if (!vdev_readable(mc->mc_vd)) {
 237  366                          mc->mc_error = SET_ERROR(ENXIO);
 238  367                          mc->mc_tried = 1;       /* don't even try */
 239  368                          mc->mc_skipped = 1;
 240  369                          continue;
 241  370                  }
 242      -                if (!vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1))
 243      -                        return (c);
 244      -                mc->mc_error = SET_ERROR(ESTALE);
 245      -                mc->mc_skipped = 1;
 246      -                mc->mc_speculative = 1;
      371 +
      372 +                if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
      373 +                        mc->mc_error = SET_ERROR(ESTALE);
      374 +                        mc->mc_skipped = 1;
      375 +                        mc->mc_speculative = 1;
      376 +                        continue;
      377 +                }
      378 +
      379 +                mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
      380 +                if (mc->mc_load > lowest_load)
      381 +                        continue;
      382 +
      383 +                if (mc->mc_load < lowest_load) {
      384 +                        lowest_load = mc->mc_load;
      385 +                        mm->mm_preferred_cnt = 0;
      386 +                }
      387 +                mm->mm_preferred[mm->mm_preferred_cnt] = c;
      388 +                mm->mm_preferred_cnt++;
      389 +        }
      390 +
      391 +        if (mm->mm_preferred_cnt == 1) {
      392 +                vdev_queue_register_last_queued_offset(
      393 +                    mm->mm_child[mm->mm_preferred[0]].mc_vd, zio);
      394 +                return (mm->mm_preferred[0]);
      395 +        }
      396 +
      397 +        if (mm->mm_preferred_cnt > 1) {
      398 +                int c = vdev_mirror_preferred_child_randomize(zio);
      399 +
      400 +                vdev_queue_register_last_queued_offset(mm->mm_child[c].mc_vd,
      401 +                    zio);
      402 +                return (c);
 247  403          }
 248  404  
 249  405          /*
 250  406           * Every device is either missing or has this txg in its DTL.
 251  407           * Look for any child we haven't already tried before giving up.
 252  408           */
 253      -        for (c = 0; c < mm->mm_children; c++)
 254      -                if (!mm->mm_child[c].mc_tried)
      409 +        for (c = 0; c < mm->mm_children; c++) {
      410 +                if (!mm->mm_child[c].mc_tried) {
      411 +                        vdev_queue_register_last_queued_offset(
      412 +                            mm->mm_child[c].mc_vd, zio);
 255  413                          return (c);
      414 +                }
      415 +        }
 256  416  
 257  417          /*
 258  418           * Every child failed.  There's no place left to look.
 259  419           */
 260  420          return (-1);
 261  421  }
 262  422  
 263  423  static int
 264  424  vdev_mirror_io_start(zio_t *zio)
 265  425  {
 266  426          mirror_map_t *mm;
 267  427          mirror_child_t *mc;
 268  428          int c, children;
 269  429  
 270      -        mm = vdev_mirror_map_alloc(zio);
      430 +        mm = vdev_mirror_map_init(zio);
 271  431  
 272  432          if (zio->io_type == ZIO_TYPE_READ) {
 273  433                  if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) {
 274  434                          /*
 275  435                           * For scrubbing reads we need to allocate a read
 276  436                           * buffer for each child and issue reads to all
 277  437                           * children.  If any child succeeds, it will copy its
 278  438                           * data into zio->io_data in vdev_mirror_scrub_done.
 279  439                           */
 280  440                          for (c = 0; c < mm->mm_children; c++) {
↓ open down ↓ 211 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX