1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2012 by Delphix. All rights reserved.
  28  */
  29 
  30 #include <sys/zfs_context.h>
  31 #include <sys/vdev_impl.h>
  32 #include <sys/spa_impl.h>
  33 #include <sys/zio.h>
  34 #include <sys/avl.h>
  35 
  36 /*
  37  * These tunables are for performance analysis.
  38  */
  39 /*
  40  * zfs_vdev_max_pending is the maximum number of i/os concurrently
  41  * pending to each device.  zfs_vdev_min_pending is the initial number
  42  * of i/os pending to each device (before it starts ramping up to
  43  * max_pending).
  44  */
  45 int zfs_vdev_max_pending = 10;
  46 int zfs_vdev_min_pending = 4;
  47 
  48 /*
  49  * The deadlines are grouped into buckets based on zfs_vdev_time_shift:
  50  * deadline = pri + gethrtime() >> time_shift)
  51  */
  52 int zfs_vdev_time_shift = 29; /* each bucket is 0.537 seconds */
  53 
  54 /* exponential I/O issue ramp-up rate */
  55 int zfs_vdev_ramp_rate = 2;
  56 
  57 /*
  58  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
  59  * For read I/Os, we also aggregate across small adjacency gaps; for writes
  60  * we include spans of optional I/Os to aid aggregation at the disk even when
  61  * they aren't able to help us aggregate at this level.
  62  */
  63 int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE;
  64 int zfs_vdev_read_gap_limit = 32 << 10;
  65 int zfs_vdev_write_gap_limit = 4 << 10;
  66 
  67 /*
  68  * Virtual device vector for disk I/O scheduling.
  69  */
  70 int
  71 vdev_queue_deadline_compare(const void *x1, const void *x2)
  72 {
  73         const zio_t *z1 = x1;
  74         const zio_t *z2 = x2;
  75 
  76         if (z1->io_deadline < z2->io_deadline)
  77                 return (-1);
  78         if (z1->io_deadline > z2->io_deadline)
  79                 return (1);
  80 
  81         if (z1->io_offset < z2->io_offset)
  82                 return (-1);
  83         if (z1->io_offset > z2->io_offset)
  84                 return (1);
  85 
  86         if (z1 < z2)
  87                 return (-1);
  88         if (z1 > z2)
  89                 return (1);
  90 
  91         return (0);
  92 }
  93 
  94 int
  95 vdev_queue_offset_compare(const void *x1, const void *x2)
  96 {
  97         const zio_t *z1 = x1;
  98         const zio_t *z2 = x2;
  99 
 100         if (z1->io_offset < z2->io_offset)
 101                 return (-1);
 102         if (z1->io_offset > z2->io_offset)
 103                 return (1);
 104 
 105         if (z1 < z2)
 106                 return (-1);
 107         if (z1 > z2)
 108                 return (1);
 109 
 110         return (0);
 111 }
 112 
 113 void
 114 vdev_queue_init(vdev_t *vd)
 115 {
 116         vdev_queue_t *vq = &vd->vdev_queue;
 117 
 118         mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
 119 
 120         avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
 121             sizeof (zio_t), offsetof(struct zio, io_deadline_node));
 122 
 123         avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
 124             sizeof (zio_t), offsetof(struct zio, io_offset_node));
 125 
 126         avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
 127             sizeof (zio_t), offsetof(struct zio, io_offset_node));
 128 
 129         avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
 130             sizeof (zio_t), offsetof(struct zio, io_offset_node));
 131 }
 132 
 133 void
 134 vdev_queue_fini(vdev_t *vd)
 135 {
 136         vdev_queue_t *vq = &vd->vdev_queue;
 137 
 138         avl_destroy(&vq->vq_deadline_tree);
 139         avl_destroy(&vq->vq_read_tree);
 140         avl_destroy(&vq->vq_write_tree);
 141         avl_destroy(&vq->vq_pending_tree);
 142 
 143         mutex_destroy(&vq->vq_lock);
 144 }
 145 
 146 static void
 147 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
 148 {
 149         spa_t *spa = zio->io_spa;
 150         avl_add(&vq->vq_deadline_tree, zio);
 151         avl_add(zio->io_vdev_tree, zio);
 152 
 153         if (spa->spa_iokstat != NULL) {
 154                 mutex_enter(&spa->spa_iokstat_lock);
 155                 kstat_waitq_enter(spa->spa_iokstat->ks_data);
 156                 mutex_exit(&spa->spa_iokstat_lock);
 157         }
 158 }
 159 
 160 static void
 161 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
 162 {
 163         spa_t *spa = zio->io_spa;
 164         avl_remove(&vq->vq_deadline_tree, zio);
 165         avl_remove(zio->io_vdev_tree, zio);
 166 
 167         if (spa->spa_iokstat != NULL) {
 168                 mutex_enter(&spa->spa_iokstat_lock);
 169                 kstat_waitq_exit(spa->spa_iokstat->ks_data);
 170                 mutex_exit(&spa->spa_iokstat_lock);
 171         }
 172 }
 173 
 174 static void
 175 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
 176 {
 177         spa_t *spa = zio->io_spa;
 178         avl_add(&vq->vq_pending_tree, zio);
 179         if (spa->spa_iokstat != NULL) {
 180                 mutex_enter(&spa->spa_iokstat_lock);
 181                 kstat_runq_enter(spa->spa_iokstat->ks_data);
 182                 mutex_exit(&spa->spa_iokstat_lock);
 183         }
 184 }
 185 
 186 static void
 187 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
 188 {
 189         spa_t *spa = zio->io_spa;
 190         avl_remove(&vq->vq_pending_tree, zio);
 191         if (spa->spa_iokstat != NULL) {
 192                 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
 193 
 194                 mutex_enter(&spa->spa_iokstat_lock);
 195                 kstat_runq_exit(spa->spa_iokstat->ks_data);
 196                 if (zio->io_type == ZIO_TYPE_READ) {
 197                         ksio->reads++;
 198                         ksio->nread += zio->io_size;
 199                 } else if (zio->io_type == ZIO_TYPE_WRITE) {
 200                         ksio->writes++;
 201                         ksio->nwritten += zio->io_size;
 202                 }
 203                 mutex_exit(&spa->spa_iokstat_lock);
 204         }
 205 }
 206 
 207 static void
 208 vdev_queue_agg_io_done(zio_t *aio)
 209 {
 210         zio_t *pio;
 211 
 212         while ((pio = zio_walk_parents(aio)) != NULL)
 213                 if (aio->io_type == ZIO_TYPE_READ)
 214                         bcopy((char *)aio->io_data + (pio->io_offset -
 215                             aio->io_offset), pio->io_data, pio->io_size);
 216 
 217         zio_buf_free(aio->io_data, aio->io_size);
 218 }
 219 
 220 /*
 221  * Compute the range spanned by two i/os, which is the endpoint of the last
 222  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
 223  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
 224  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
 225  */
 226 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
 227 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
 228 
 229 static zio_t *
 230 vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit)
 231 {
 232         zio_t *fio, *lio, *aio, *dio, *nio, *mio;
 233         avl_tree_t *t;
 234         int flags;
 235         uint64_t maxspan = zfs_vdev_aggregation_limit;
 236         uint64_t maxgap;
 237         int stretch;
 238 
 239 again:
 240         ASSERT(MUTEX_HELD(&vq->vq_lock));
 241 
 242         if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit ||
 243             avl_numnodes(&vq->vq_deadline_tree) == 0)
 244                 return (NULL);
 245 
 246         fio = lio = avl_first(&vq->vq_deadline_tree);
 247 
 248         t = fio->io_vdev_tree;
 249         flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT;
 250         maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0;
 251 
 252         if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) {
 253                 /*
 254                  * We can aggregate I/Os that are sufficiently adjacent and of
 255                  * the same flavor, as expressed by the AGG_INHERIT flags.
 256                  * The latter requirement is necessary so that certain
 257                  * attributes of the I/O, such as whether it's a normal I/O
 258                  * or a scrub/resilver, can be preserved in the aggregate.
 259                  * We can include optional I/Os, but don't allow them
 260                  * to begin a range as they add no benefit in that situation.
 261                  */
 262 
 263                 /*
 264                  * We keep track of the last non-optional I/O.
 265                  */
 266                 mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio;
 267 
 268                 /*
 269                  * Walk backwards through sufficiently contiguous I/Os
 270                  * recording the last non-option I/O.
 271                  */
 272                 while ((dio = AVL_PREV(t, fio)) != NULL &&
 273                     (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 274                     IO_SPAN(dio, lio) <= maxspan &&
 275                     IO_GAP(dio, fio) <= maxgap) {
 276                         fio = dio;
 277                         if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL))
 278                                 mio = fio;
 279                 }
 280 
 281                 /*
 282                  * Skip any initial optional I/Os.
 283                  */
 284                 while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) {
 285                         fio = AVL_NEXT(t, fio);
 286                         ASSERT(fio != NULL);
 287                 }
 288 
 289                 /*
 290                  * Walk forward through sufficiently contiguous I/Os.
 291                  */
 292                 while ((dio = AVL_NEXT(t, lio)) != NULL &&
 293                     (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 294                     IO_SPAN(fio, dio) <= maxspan &&
 295                     IO_GAP(lio, dio) <= maxgap) {
 296                         lio = dio;
 297                         if (!(lio->io_flags & ZIO_FLAG_OPTIONAL))
 298                                 mio = lio;
 299                 }
 300 
 301                 /*
 302                  * Now that we've established the range of the I/O aggregation
 303                  * we must decide what to do with trailing optional I/Os.
 304                  * For reads, there's nothing to do. While we are unable to
 305                  * aggregate further, it's possible that a trailing optional
 306                  * I/O would allow the underlying device to aggregate with
 307                  * subsequent I/Os. We must therefore determine if the next
 308                  * non-optional I/O is close enough to make aggregation
 309                  * worthwhile.
 310                  */
 311                 stretch = B_FALSE;
 312                 if (t != &vq->vq_read_tree && mio != NULL) {
 313                         nio = lio;
 314                         while ((dio = AVL_NEXT(t, nio)) != NULL &&
 315                             IO_GAP(nio, dio) == 0 &&
 316                             IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) {
 317                                 nio = dio;
 318                                 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
 319                                         stretch = B_TRUE;
 320                                         break;
 321                                 }
 322                         }
 323                 }
 324 
 325                 if (stretch) {
 326                         /* This may be a no-op. */
 327                         VERIFY((dio = AVL_NEXT(t, lio)) != NULL);
 328                         dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
 329                 } else {
 330                         while (lio != mio && lio != fio) {
 331                                 ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL);
 332                                 lio = AVL_PREV(t, lio);
 333                                 ASSERT(lio != NULL);
 334                         }
 335                 }
 336         }
 337 
 338         if (fio != lio) {
 339                 uint64_t size = IO_SPAN(fio, lio);
 340                 ASSERT(size <= zfs_vdev_aggregation_limit);
 341 
 342                 aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset,
 343                     zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_AGG,
 344                     flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
 345                     vdev_queue_agg_io_done, NULL);
 346                 aio->io_timestamp = fio->io_timestamp;
 347 
 348                 nio = fio;
 349                 do {
 350                         dio = nio;
 351                         nio = AVL_NEXT(t, dio);
 352                         ASSERT(dio->io_type == aio->io_type);
 353                         ASSERT(dio->io_vdev_tree == t);
 354 
 355                         if (dio->io_flags & ZIO_FLAG_NODATA) {
 356                                 ASSERT(dio->io_type == ZIO_TYPE_WRITE);
 357                                 bzero((char *)aio->io_data + (dio->io_offset -
 358                                     aio->io_offset), dio->io_size);
 359                         } else if (dio->io_type == ZIO_TYPE_WRITE) {
 360                                 bcopy(dio->io_data, (char *)aio->io_data +
 361                                     (dio->io_offset - aio->io_offset),
 362                                     dio->io_size);
 363                         }
 364 
 365                         zio_add_child(dio, aio);
 366                         vdev_queue_io_remove(vq, dio);
 367                         zio_vdev_io_bypass(dio);
 368                         zio_execute(dio);
 369                 } while (dio != lio);
 370 
 371                 vdev_queue_pending_add(vq, aio);
 372 
 373                 return (aio);
 374         }
 375 
 376         ASSERT(fio->io_vdev_tree == t);
 377         vdev_queue_io_remove(vq, fio);
 378 
 379         /*
 380          * If the I/O is or was optional and therefore has no data, we need to
 381          * simply discard it. We need to drop the vdev queue's lock to avoid a
 382          * deadlock that we could encounter since this I/O will complete
 383          * immediately.
 384          */
 385         if (fio->io_flags & ZIO_FLAG_NODATA) {
 386                 mutex_exit(&vq->vq_lock);
 387                 zio_vdev_io_bypass(fio);
 388                 zio_execute(fio);
 389                 mutex_enter(&vq->vq_lock);
 390                 goto again;
 391         }
 392 
 393         vdev_queue_pending_add(vq, fio);
 394 
 395         return (fio);
 396 }
 397 
 398 zio_t *
 399 vdev_queue_io(zio_t *zio)
 400 {
 401         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 402         zio_t *nio;
 403 
 404         ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
 405 
 406         if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
 407                 return (zio);
 408 
 409         zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
 410 
 411         if (zio->io_type == ZIO_TYPE_READ)
 412                 zio->io_vdev_tree = &vq->vq_read_tree;
 413         else
 414                 zio->io_vdev_tree = &vq->vq_write_tree;
 415 
 416         mutex_enter(&vq->vq_lock);
 417 
 418         zio->io_timestamp = gethrtime();
 419         zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) +
 420             zio->io_priority;
 421 
 422         vdev_queue_io_add(vq, zio);
 423 
 424         nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending);
 425 
 426         mutex_exit(&vq->vq_lock);
 427 
 428         if (nio == NULL)
 429                 return (NULL);
 430 
 431         if (nio->io_done == vdev_queue_agg_io_done) {
 432                 zio_nowait(nio);
 433                 return (NULL);
 434         }
 435 
 436         return (nio);
 437 }
 438 
 439 void
 440 vdev_queue_io_done(zio_t *zio)
 441 {
 442         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 443 
 444         if (zio_injection_enabled)
 445                 delay(SEC_TO_TICK(zio_handle_io_delay(zio)));
 446 
 447         mutex_enter(&vq->vq_lock);
 448 
 449         vdev_queue_pending_remove(vq, zio);
 450 
 451         vq->vq_io_complete_ts = gethrtime();
 452 
 453         for (int i = 0; i < zfs_vdev_ramp_rate; i++) {
 454                 zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
 455                 if (nio == NULL)
 456                         break;
 457                 mutex_exit(&vq->vq_lock);
 458                 if (nio->io_done == vdev_queue_agg_io_done) {
 459                         zio_nowait(nio);
 460                 } else {
 461                         zio_vdev_io_reissue(nio);
 462                         zio_execute(nio);
 463                 }
 464                 mutex_enter(&vq->vq_lock);
 465         }
 466 
 467         mutex_exit(&vq->vq_lock);
 468 }