1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
  28  * Copyright (c) 2014 Integros [integros.com]
  29  */
  30 
  31 #include <sys/zfs_context.h>
  32 #include <sys/vdev_impl.h>
  33 #include <sys/spa_impl.h>
  34 #include <sys/zio.h>
  35 #include <sys/avl.h>
  36 #include <sys/dsl_pool.h>
  37 #include <sys/metaslab_impl.h>
  38 #include <sys/abd.h>
  39 
  40 /*
  41  * ZFS I/O Scheduler
  42  * ---------------
  43  *
  44  * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios.  The
  45  * I/O scheduler determines when and in what order those operations are
  46  * issued.  The I/O scheduler divides operations into five I/O classes
  47  * prioritized in the following order: sync read, sync write, async read,
  48  * async write, and scrub/resilver.  Each queue defines the minimum and
  49  * maximum number of concurrent operations that may be issued to the device.
  50  * In addition, the device has an aggregate maximum. Note that the sum of the
  51  * per-queue minimums must not exceed the aggregate maximum, and if the
  52  * aggregate maximum is equal to or greater than the sum of the per-queue
  53  * maximums, the per-queue minimum has no effect.
  54  *
  55  * For many physical devices, throughput increases with the number of
  56  * concurrent operations, but latency typically suffers. Further, physical
  57  * devices typically have a limit at which more concurrent operations have no
  58  * effect on throughput or can actually cause it to decrease.
  59  *
  60  * The scheduler selects the next operation to issue by first looking for an
  61  * I/O class whose minimum has not been satisfied. Once all are satisfied and
  62  * the aggregate maximum has not been hit, the scheduler looks for classes
  63  * whose maximum has not been satisfied. Iteration through the I/O classes is
  64  * done in the order specified above. No further operations are issued if the
  65  * aggregate maximum number of concurrent operations has been hit or if there
  66  * are no operations queued for an I/O class that has not hit its maximum.
  67  * Every time an i/o is queued or an operation completes, the I/O scheduler
  68  * looks for new operations to issue.
  69  *
  70  * All I/O classes have a fixed maximum number of outstanding operations
  71  * except for the async write class. Asynchronous writes represent the data
  72  * that is committed to stable storage during the syncing stage for
  73  * transaction groups (see txg.c). Transaction groups enter the syncing state
  74  * periodically so the number of queued async writes will quickly burst up and
  75  * then bleed down to zero. Rather than servicing them as quickly as possible,
  76  * the I/O scheduler changes the maximum number of active async write i/os
  77  * according to the amount of dirty data in the pool (see dsl_pool.c). Since
  78  * both throughput and latency typically increase with the number of
  79  * concurrent operations issued to physical devices, reducing the burstiness
  80  * in the number of concurrent operations also stabilizes the response time of
  81  * operations from other -- and in particular synchronous -- queues. In broad
  82  * strokes, the I/O scheduler will issue more concurrent operations from the
  83  * async write queue as there's more dirty data in the pool.
  84  *
  85  * Async Writes
  86  *
  87  * The number of concurrent operations issued for the async write I/O class
  88  * follows a piece-wise linear function defined by a few adjustable points.
  89  *
  90  *        |                   o---------| <-- zfs_vdev_async_write_max_active
  91  *   ^    |                  /^         |
  92  *   |    |                 / |         |
  93  * active |                /  |         |
  94  *  I/O   |               /   |         |
  95  * count  |              /    |         |
  96  *        |             /     |         |
  97  *        |------------o      |         | <-- zfs_vdev_async_write_min_active
  98  *       0|____________^______|_________|
  99  *        0%           |      |       100% of zfs_dirty_data_max
 100  *                     |      |
 101  *                     |      `-- zfs_vdev_async_write_active_max_dirty_percent
 102  *                     `--------- zfs_vdev_async_write_active_min_dirty_percent
 103  *
 104  * Until the amount of dirty data exceeds a minimum percentage of the dirty
 105  * data allowed in the pool, the I/O scheduler will limit the number of
 106  * concurrent operations to the minimum. As that threshold is crossed, the
 107  * number of concurrent operations issued increases linearly to the maximum at
 108  * the specified maximum percentage of the dirty data allowed in the pool.
 109  *
 110  * Ideally, the amount of dirty data on a busy pool will stay in the sloped
 111  * part of the function between zfs_vdev_async_write_active_min_dirty_percent
 112  * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
 113  * maximum percentage, this indicates that the rate of incoming data is
 114  * greater than the rate that the backend storage can handle. In this case, we
 115  * must further throttle incoming writes (see dmu_tx_delay() for details).
 116  */
 117 
 118 /*
 119  * The maximum number of i/os active to each device.  Ideally, this will be >=
 120  * the sum of each queue's max_active.  It must be at least the sum of each
 121  * queue's min_active.
 122  */
 123 uint32_t zfs_vdev_max_active = 1000;
 124 
 125 /*
 126  * Per-queue limits on the number of i/os active to each device.  If the
 127  * sum of the queue's max_active is < zfs_vdev_max_active, then the
 128  * min_active comes into play.  We will send min_active from each queue,
 129  * and then select from queues in the order defined by zio_priority_t.
 130  *
 131  * In general, smaller max_active's will lead to lower latency of synchronous
 132  * operations.  Larger max_active's may lead to higher overall throughput,
 133  * depending on underlying storage.
 134  *
 135  * The ratio of the queues' max_actives determines the balance of performance
 136  * between reads, writes, and scrubs.  E.g., increasing
 137  * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
 138  * more quickly, but reads and writes to have higher latency and lower
 139  * throughput.
 140  */
 141 uint32_t zfs_vdev_sync_read_min_active = 10;
 142 uint32_t zfs_vdev_sync_read_max_active = 10;
 143 uint32_t zfs_vdev_sync_write_min_active = 10;
 144 uint32_t zfs_vdev_sync_write_max_active = 10;
 145 uint32_t zfs_vdev_async_read_min_active = 1;
 146 uint32_t zfs_vdev_async_read_max_active = 3;
 147 uint32_t zfs_vdev_async_write_min_active = 1;
 148 uint32_t zfs_vdev_async_write_max_active = 10;
 149 uint32_t zfs_vdev_scrub_min_active = 1;
 150 uint32_t zfs_vdev_scrub_max_active = 2;
 151 uint32_t zfs_vdev_removal_min_active = 1;
 152 uint32_t zfs_vdev_removal_max_active = 2;
 153 uint32_t zfs_vdev_initializing_min_active = 1;
 154 uint32_t zfs_vdev_initializing_max_active = 1;
 155 
 156 /*
 157  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
 158  * dirty data, use zfs_vdev_async_write_min_active.  When it has more than
 159  * zfs_vdev_async_write_active_max_dirty_percent, use
 160  * zfs_vdev_async_write_max_active. The value is linearly interpolated
 161  * between min and max.
 162  */
 163 int zfs_vdev_async_write_active_min_dirty_percent = 30;
 164 int zfs_vdev_async_write_active_max_dirty_percent = 60;
 165 
 166 /*
 167  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
 168  * For read I/Os, we also aggregate across small adjacency gaps; for writes
 169  * we include spans of optional I/Os to aid aggregation at the disk even when
 170  * they aren't able to help us aggregate at this level.
 171  */
 172 int zfs_vdev_aggregation_limit = 1 << 20;
 173 int zfs_vdev_read_gap_limit = 32 << 10;
 174 int zfs_vdev_write_gap_limit = 4 << 10;
 175 
 176 /*
 177  * Define the queue depth percentage for each top-level. This percentage is
 178  * used in conjunction with zfs_vdev_async_max_active to determine how many
 179  * allocations a specific top-level vdev should handle. Once the queue depth
 180  * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
 181  * then allocator will stop allocating blocks on that top-level device.
 182  * The default kernel setting is 1000% which will yield 100 allocations per
 183  * device. For userland testing, the default setting is 300% which equates
 184  * to 30 allocations per device.
 185  */
 186 #ifdef _KERNEL
 187 int zfs_vdev_queue_depth_pct = 1000;
 188 #else
 189 int zfs_vdev_queue_depth_pct = 300;
 190 #endif
 191 
 192 /*
 193  * When performing allocations for a given metaslab, we want to make sure that
 194  * there are enough IOs to aggregate together to improve throughput. We want to
 195  * ensure that there are at least 128k worth of IOs that can be aggregated, and
 196  * we assume that the average allocation size is 4k, so we need the queue depth
 197  * to be 32 per allocator to get good aggregation of sequential writes.
 198  */
 199 int zfs_vdev_def_queue_depth = 32;
 200 
 201 
 202 int
 203 vdev_queue_offset_compare(const void *x1, const void *x2)
 204 {
 205         const zio_t *z1 = (const zio_t *)x1;
 206         const zio_t *z2 = (const zio_t *)x2;
 207 
 208         int cmp = AVL_CMP(z1->io_offset, z2->io_offset);
 209 
 210         if (likely(cmp))
 211                 return (cmp);
 212 
 213         return (AVL_PCMP(z1, z2));
 214 }
 215 
 216 static inline avl_tree_t *
 217 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
 218 {
 219         return (&vq->vq_class[p].vqc_queued_tree);
 220 }
 221 
 222 static inline avl_tree_t *
 223 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
 224 {
 225         ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE);
 226         if (t == ZIO_TYPE_READ)
 227                 return (&vq->vq_read_offset_tree);
 228         else
 229                 return (&vq->vq_write_offset_tree);
 230 }
 231 
 232 int
 233 vdev_queue_timestamp_compare(const void *x1, const void *x2)
 234 {
 235         const zio_t *z1 = (const zio_t *)x1;
 236         const zio_t *z2 = (const zio_t *)x2;
 237 
 238         int cmp = AVL_CMP(z1->io_timestamp, z2->io_timestamp);
 239 
 240         if (likely(cmp))
 241                 return (cmp);
 242 
 243         return (AVL_PCMP(z1, z2));
 244 }
 245 
 246 void
 247 vdev_queue_init(vdev_t *vd)
 248 {
 249         vdev_queue_t *vq = &vd->vdev_queue;
 250 
 251         mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
 252         vq->vq_vdev = vd;
 253 
 254         avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
 255             sizeof (zio_t), offsetof(struct zio, io_queue_node));
 256         avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
 257             vdev_queue_offset_compare, sizeof (zio_t),
 258             offsetof(struct zio, io_offset_node));
 259         avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
 260             vdev_queue_offset_compare, sizeof (zio_t),
 261             offsetof(struct zio, io_offset_node));
 262 
 263         for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 264                 int (*compfn) (const void *, const void *);
 265 
 266                 /*
 267                  * The synchronous i/o queues are dispatched in FIFO rather
 268                  * than LBA order.  This provides more consistent latency for
 269                  * these i/os.
 270                  */
 271                 if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE)
 272                         compfn = vdev_queue_timestamp_compare;
 273                 else
 274                         compfn = vdev_queue_offset_compare;
 275 
 276                 avl_create(vdev_queue_class_tree(vq, p), compfn,
 277                     sizeof (zio_t), offsetof(struct zio, io_queue_node));
 278         }
 279 
 280         vq->vq_last_offset = 0;
 281 }
 282 
 283 void
 284 vdev_queue_fini(vdev_t *vd)
 285 {
 286         vdev_queue_t *vq = &vd->vdev_queue;
 287 
 288         for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
 289                 avl_destroy(vdev_queue_class_tree(vq, p));
 290         avl_destroy(&vq->vq_active_tree);
 291         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
 292         avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
 293 
 294         mutex_destroy(&vq->vq_lock);
 295 }
 296 
 297 static void
 298 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
 299 {
 300         spa_t *spa = zio->io_spa;
 301 
 302         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 303         avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
 304         avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
 305 
 306         mutex_enter(&spa->spa_iokstat_lock);
 307         spa->spa_queue_stats[zio->io_priority].spa_queued++;
 308         if (spa->spa_iokstat != NULL)
 309                 kstat_waitq_enter(spa->spa_iokstat->ks_data);
 310         mutex_exit(&spa->spa_iokstat_lock);
 311 }
 312 
 313 static void
 314 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
 315 {
 316         spa_t *spa = zio->io_spa;
 317 
 318         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 319         avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
 320         avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
 321 
 322         mutex_enter(&spa->spa_iokstat_lock);
 323         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
 324         spa->spa_queue_stats[zio->io_priority].spa_queued--;
 325         if (spa->spa_iokstat != NULL)
 326                 kstat_waitq_exit(spa->spa_iokstat->ks_data);
 327         mutex_exit(&spa->spa_iokstat_lock);
 328 }
 329 
 330 static void
 331 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
 332 {
 333         spa_t *spa = zio->io_spa;
 334         ASSERT(MUTEX_HELD(&vq->vq_lock));
 335         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 336         vq->vq_class[zio->io_priority].vqc_active++;
 337         avl_add(&vq->vq_active_tree, zio);
 338 
 339         mutex_enter(&spa->spa_iokstat_lock);
 340         spa->spa_queue_stats[zio->io_priority].spa_active++;
 341         if (spa->spa_iokstat != NULL)
 342                 kstat_runq_enter(spa->spa_iokstat->ks_data);
 343         mutex_exit(&spa->spa_iokstat_lock);
 344 }
 345 
 346 static void
 347 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
 348 {
 349         spa_t *spa = zio->io_spa;
 350         ASSERT(MUTEX_HELD(&vq->vq_lock));
 351         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 352         vq->vq_class[zio->io_priority].vqc_active--;
 353         avl_remove(&vq->vq_active_tree, zio);
 354 
 355         mutex_enter(&spa->spa_iokstat_lock);
 356         ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
 357         spa->spa_queue_stats[zio->io_priority].spa_active--;
 358         if (spa->spa_iokstat != NULL) {
 359                 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
 360 
 361                 kstat_runq_exit(spa->spa_iokstat->ks_data);
 362                 if (zio->io_type == ZIO_TYPE_READ) {
 363                         ksio->reads++;
 364                         ksio->nread += zio->io_size;
 365                 } else if (zio->io_type == ZIO_TYPE_WRITE) {
 366                         ksio->writes++;
 367                         ksio->nwritten += zio->io_size;
 368                 }
 369         }
 370         mutex_exit(&spa->spa_iokstat_lock);
 371 }
 372 
 373 static void
 374 vdev_queue_agg_io_done(zio_t *aio)
 375 {
 376         if (aio->io_type == ZIO_TYPE_READ) {
 377                 zio_t *pio;
 378                 zio_link_t *zl = NULL;
 379                 while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
 380                         abd_copy_off(pio->io_abd, aio->io_abd,
 381                             0, pio->io_offset - aio->io_offset, pio->io_size);
 382                 }
 383         }
 384 
 385         abd_free(aio->io_abd);
 386 }
 387 
 388 static int
 389 vdev_queue_class_min_active(zio_priority_t p)
 390 {
 391         switch (p) {
 392         case ZIO_PRIORITY_SYNC_READ:
 393                 return (zfs_vdev_sync_read_min_active);
 394         case ZIO_PRIORITY_SYNC_WRITE:
 395                 return (zfs_vdev_sync_write_min_active);
 396         case ZIO_PRIORITY_ASYNC_READ:
 397                 return (zfs_vdev_async_read_min_active);
 398         case ZIO_PRIORITY_ASYNC_WRITE:
 399                 return (zfs_vdev_async_write_min_active);
 400         case ZIO_PRIORITY_SCRUB:
 401                 return (zfs_vdev_scrub_min_active);
 402         case ZIO_PRIORITY_REMOVAL:
 403                 return (zfs_vdev_removal_min_active);
 404         case ZIO_PRIORITY_INITIALIZING:
 405                 return (zfs_vdev_initializing_min_active);
 406         default:
 407                 panic("invalid priority %u", p);
 408                 return (0);
 409         }
 410 }
 411 
 412 static int
 413 vdev_queue_max_async_writes(spa_t *spa)
 414 {
 415         int writes;
 416         uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
 417         uint64_t min_bytes = zfs_dirty_data_max *
 418             zfs_vdev_async_write_active_min_dirty_percent / 100;
 419         uint64_t max_bytes = zfs_dirty_data_max *
 420             zfs_vdev_async_write_active_max_dirty_percent / 100;
 421 
 422         /*
 423          * Sync tasks correspond to interactive user actions. To reduce the
 424          * execution time of those actions we push data out as fast as possible.
 425          */
 426         if (spa_has_pending_synctask(spa)) {
 427                 return (zfs_vdev_async_write_max_active);
 428         }
 429 
 430         if (dirty < min_bytes)
 431                 return (zfs_vdev_async_write_min_active);
 432         if (dirty > max_bytes)
 433                 return (zfs_vdev_async_write_max_active);
 434 
 435         /*
 436          * linear interpolation:
 437          * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
 438          * move right by min_bytes
 439          * move up by min_writes
 440          */
 441         writes = (dirty - min_bytes) *
 442             (zfs_vdev_async_write_max_active -
 443             zfs_vdev_async_write_min_active) /
 444             (max_bytes - min_bytes) +
 445             zfs_vdev_async_write_min_active;
 446         ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
 447         ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
 448         return (writes);
 449 }
 450 
 451 static int
 452 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
 453 {
 454         switch (p) {
 455         case ZIO_PRIORITY_SYNC_READ:
 456                 return (zfs_vdev_sync_read_max_active);
 457         case ZIO_PRIORITY_SYNC_WRITE:
 458                 return (zfs_vdev_sync_write_max_active);
 459         case ZIO_PRIORITY_ASYNC_READ:
 460                 return (zfs_vdev_async_read_max_active);
 461         case ZIO_PRIORITY_ASYNC_WRITE:
 462                 return (vdev_queue_max_async_writes(spa));
 463         case ZIO_PRIORITY_SCRUB:
 464                 return (zfs_vdev_scrub_max_active);
 465         case ZIO_PRIORITY_REMOVAL:
 466                 return (zfs_vdev_removal_max_active);
 467         case ZIO_PRIORITY_INITIALIZING:
 468                 return (zfs_vdev_initializing_max_active);
 469         default:
 470                 panic("invalid priority %u", p);
 471                 return (0);
 472         }
 473 }
 474 
 475 /*
 476  * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
 477  * there is no eligible class.
 478  */
 479 static zio_priority_t
 480 vdev_queue_class_to_issue(vdev_queue_t *vq)
 481 {
 482         spa_t *spa = vq->vq_vdev->vdev_spa;
 483         zio_priority_t p;
 484 
 485         if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
 486                 return (ZIO_PRIORITY_NUM_QUEUEABLE);
 487 
 488         /* find a queue that has not reached its minimum # outstanding i/os */
 489         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 490                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 491                     vq->vq_class[p].vqc_active <
 492                     vdev_queue_class_min_active(p))
 493                         return (p);
 494         }
 495 
 496         /*
 497          * If we haven't found a queue, look for one that hasn't reached its
 498          * maximum # outstanding i/os.
 499          */
 500         for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
 501                 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
 502                     vq->vq_class[p].vqc_active <
 503                     vdev_queue_class_max_active(spa, p))
 504                         return (p);
 505         }
 506 
 507         /* No eligible queued i/os */
 508         return (ZIO_PRIORITY_NUM_QUEUEABLE);
 509 }
 510 
 511 /*
 512  * Compute the range spanned by two i/os, which is the endpoint of the last
 513  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
 514  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
 515  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
 516  */
 517 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
 518 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
 519 
 520 static zio_t *
 521 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
 522 {
 523         zio_t *first, *last, *aio, *dio, *mandatory, *nio;
 524         zio_link_t *zl = NULL;
 525         uint64_t maxgap = 0;
 526         uint64_t size;
 527         boolean_t stretch = B_FALSE;
 528         avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
 529         enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
 530 
 531         if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
 532                 return (NULL);
 533 
 534         first = last = zio;
 535 
 536         if (zio->io_type == ZIO_TYPE_READ)
 537                 maxgap = zfs_vdev_read_gap_limit;
 538 
 539         /*
 540          * We can aggregate I/Os that are sufficiently adjacent and of
 541          * the same flavor, as expressed by the AGG_INHERIT flags.
 542          * The latter requirement is necessary so that certain
 543          * attributes of the I/O, such as whether it's a normal I/O
 544          * or a scrub/resilver, can be preserved in the aggregate.
 545          * We can include optional I/Os, but don't allow them
 546          * to begin a range as they add no benefit in that situation.
 547          */
 548 
 549         /*
 550          * We keep track of the last non-optional I/O.
 551          */
 552         mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
 553 
 554         /*
 555          * Walk backwards through sufficiently contiguous I/Os
 556          * recording the last non-optional I/O.
 557          */
 558         while ((dio = AVL_PREV(t, first)) != NULL &&
 559             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 560             IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
 561             IO_GAP(dio, first) <= maxgap &&
 562             dio->io_type == zio->io_type) {
 563                 first = dio;
 564                 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
 565                         mandatory = first;
 566         }
 567 
 568         /*
 569          * Skip any initial optional I/Os.
 570          */
 571         while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
 572                 first = AVL_NEXT(t, first);
 573                 ASSERT(first != NULL);
 574         }
 575 
 576         /*
 577          * Walk forward through sufficiently contiguous I/Os.
 578          * The aggregation limit does not apply to optional i/os, so that
 579          * we can issue contiguous writes even if they are larger than the
 580          * aggregation limit.
 581          */
 582         while ((dio = AVL_NEXT(t, last)) != NULL &&
 583             (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
 584             (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit ||
 585             (dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
 586             IO_GAP(last, dio) <= maxgap &&
 587             dio->io_type == zio->io_type) {
 588                 last = dio;
 589                 if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
 590                         mandatory = last;
 591         }
 592 
 593         /*
 594          * Now that we've established the range of the I/O aggregation
 595          * we must decide what to do with trailing optional I/Os.
 596          * For reads, there's nothing to do. While we are unable to
 597          * aggregate further, it's possible that a trailing optional
 598          * I/O would allow the underlying device to aggregate with
 599          * subsequent I/Os. We must therefore determine if the next
 600          * non-optional I/O is close enough to make aggregation
 601          * worthwhile.
 602          */
 603         if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
 604                 zio_t *nio = last;
 605                 while ((dio = AVL_NEXT(t, nio)) != NULL &&
 606                     IO_GAP(nio, dio) == 0 &&
 607                     IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
 608                         nio = dio;
 609                         if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
 610                                 stretch = B_TRUE;
 611                                 break;
 612                         }
 613                 }
 614         }
 615 
 616         if (stretch) {
 617                 /*
 618                  * We are going to include an optional io in our aggregated
 619                  * span, thus closing the write gap.  Only mandatory i/os can
 620                  * start aggregated spans, so make sure that the next i/o
 621                  * after our span is mandatory.
 622                  */
 623                 dio = AVL_NEXT(t, last);
 624                 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
 625         } else {
 626                 /* do not include the optional i/o */
 627                 while (last != mandatory && last != first) {
 628                         ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
 629                         last = AVL_PREV(t, last);
 630                         ASSERT(last != NULL);
 631                 }
 632         }
 633 
 634         if (first == last)
 635                 return (NULL);
 636 
 637         size = IO_SPAN(first, last);
 638         ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
 639 
 640         aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
 641             abd_alloc_for_io(size, B_TRUE), size, first->io_type,
 642             zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
 643             vdev_queue_agg_io_done, NULL);
 644         aio->io_timestamp = first->io_timestamp;
 645 
 646         nio = first;
 647         do {
 648                 dio = nio;
 649                 nio = AVL_NEXT(t, dio);
 650                 ASSERT3U(dio->io_type, ==, aio->io_type);
 651 
 652                 if (dio->io_flags & ZIO_FLAG_NODATA) {
 653                         ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
 654                         abd_zero_off(aio->io_abd,
 655                             dio->io_offset - aio->io_offset, dio->io_size);
 656                 } else if (dio->io_type == ZIO_TYPE_WRITE) {
 657                         abd_copy_off(aio->io_abd, dio->io_abd,
 658                             dio->io_offset - aio->io_offset, 0, dio->io_size);
 659                 }
 660 
 661                 zio_add_child(dio, aio);
 662                 vdev_queue_io_remove(vq, dio);
 663         } while (dio != last);
 664 
 665         /*
 666          * We need to drop the vdev queue's lock to avoid a deadlock that we
 667          * could encounter since this I/O will complete immediately.
 668          */
 669         mutex_exit(&vq->vq_lock);
 670         while ((dio = zio_walk_parents(aio, &zl)) != NULL) {
 671                 zio_vdev_io_bypass(dio);
 672                 zio_execute(dio);
 673         }
 674         mutex_enter(&vq->vq_lock);
 675 
 676         return (aio);
 677 }
 678 
 679 static zio_t *
 680 vdev_queue_io_to_issue(vdev_queue_t *vq)
 681 {
 682         zio_t *zio, *aio;
 683         zio_priority_t p;
 684         avl_index_t idx;
 685         avl_tree_t *tree;
 686         zio_t search;
 687 
 688 again:
 689         ASSERT(MUTEX_HELD(&vq->vq_lock));
 690 
 691         p = vdev_queue_class_to_issue(vq);
 692 
 693         if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
 694                 /* No eligible queued i/os */
 695                 return (NULL);
 696         }
 697 
 698         /*
 699          * For LBA-ordered queues (async / scrub / initializing), issue the
 700          * i/o which follows the most recently issued i/o in LBA (offset) order.
 701          *
 702          * For FIFO queues (sync), issue the i/o with the lowest timestamp.
 703          */
 704         tree = vdev_queue_class_tree(vq, p);
 705         search.io_timestamp = 0;
 706         search.io_offset = vq->vq_last_offset - 1;
 707         VERIFY3P(avl_find(tree, &search, &idx), ==, NULL);
 708         zio = avl_nearest(tree, idx, AVL_AFTER);
 709         if (zio == NULL)
 710                 zio = avl_first(tree);
 711         ASSERT3U(zio->io_priority, ==, p);
 712 
 713         aio = vdev_queue_aggregate(vq, zio);
 714         if (aio != NULL)
 715                 zio = aio;
 716         else
 717                 vdev_queue_io_remove(vq, zio);
 718 
 719         /*
 720          * If the I/O is or was optional and therefore has no data, we need to
 721          * simply discard it. We need to drop the vdev queue's lock to avoid a
 722          * deadlock that we could encounter since this I/O will complete
 723          * immediately.
 724          */
 725         if (zio->io_flags & ZIO_FLAG_NODATA) {
 726                 mutex_exit(&vq->vq_lock);
 727                 zio_vdev_io_bypass(zio);
 728                 zio_execute(zio);
 729                 mutex_enter(&vq->vq_lock);
 730                 goto again;
 731         }
 732 
 733         vdev_queue_pending_add(vq, zio);
 734         vq->vq_last_offset = zio->io_offset + zio->io_size;
 735 
 736         return (zio);
 737 }
 738 
 739 zio_t *
 740 vdev_queue_io(zio_t *zio)
 741 {
 742         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 743         zio_t *nio;
 744 
 745         if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
 746                 return (zio);
 747 
 748         /*
 749          * Children i/os inherent their parent's priority, which might
 750          * not match the child's i/o type.  Fix it up here.
 751          */
 752         if (zio->io_type == ZIO_TYPE_READ) {
 753                 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
 754                     zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
 755                     zio->io_priority != ZIO_PRIORITY_SCRUB &&
 756                     zio->io_priority != ZIO_PRIORITY_REMOVAL &&
 757                     zio->io_priority != ZIO_PRIORITY_INITIALIZING)
 758                         zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
 759         } else {
 760                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 761                 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
 762                     zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
 763                     zio->io_priority != ZIO_PRIORITY_REMOVAL &&
 764                     zio->io_priority != ZIO_PRIORITY_INITIALIZING)
 765                         zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
 766         }
 767 
 768         zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
 769 
 770         mutex_enter(&vq->vq_lock);
 771         zio->io_timestamp = gethrtime();
 772         vdev_queue_io_add(vq, zio);
 773         nio = vdev_queue_io_to_issue(vq);
 774         mutex_exit(&vq->vq_lock);
 775 
 776         if (nio == NULL)
 777                 return (NULL);
 778 
 779         if (nio->io_done == vdev_queue_agg_io_done) {
 780                 zio_nowait(nio);
 781                 return (NULL);
 782         }
 783 
 784         return (nio);
 785 }
 786 
 787 void
 788 vdev_queue_io_done(zio_t *zio)
 789 {
 790         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 791         zio_t *nio;
 792 
 793         mutex_enter(&vq->vq_lock);
 794 
 795         vdev_queue_pending_remove(vq, zio);
 796 
 797         vq->vq_io_complete_ts = gethrtime();
 798 
 799         while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
 800                 mutex_exit(&vq->vq_lock);
 801                 if (nio->io_done == vdev_queue_agg_io_done) {
 802                         zio_nowait(nio);
 803                 } else {
 804                         zio_vdev_io_reissue(nio);
 805                         zio_execute(nio);
 806                 }
 807                 mutex_enter(&vq->vq_lock);
 808         }
 809 
 810         mutex_exit(&vq->vq_lock);
 811 }
 812 
 813 void
 814 vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
 815 {
 816         vdev_queue_t *vq = &zio->io_vd->vdev_queue;
 817         avl_tree_t *tree;
 818 
 819         /*
 820          * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio
 821          * code to issue IOs without adding them to the vdev queue. In this
 822          * case, the zio is already going to be issued as quickly as possible
 823          * and so it doesn't need any reprioitization to help.
 824          */
 825         if (zio->io_priority == ZIO_PRIORITY_NOW)
 826                 return;
 827 
 828         ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 829         ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 830 
 831         if (zio->io_type == ZIO_TYPE_READ) {
 832                 if (priority != ZIO_PRIORITY_SYNC_READ &&
 833                     priority != ZIO_PRIORITY_ASYNC_READ &&
 834                     priority != ZIO_PRIORITY_SCRUB)
 835                         priority = ZIO_PRIORITY_ASYNC_READ;
 836         } else {
 837                 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
 838                 if (priority != ZIO_PRIORITY_SYNC_WRITE &&
 839                     priority != ZIO_PRIORITY_ASYNC_WRITE)
 840                         priority = ZIO_PRIORITY_ASYNC_WRITE;
 841         }
 842 
 843         mutex_enter(&vq->vq_lock);
 844 
 845         /*
 846          * If the zio is in none of the queues we can simply change
 847          * the priority. If the zio is waiting to be submitted we must
 848          * remove it from the queue and re-insert it with the new priority.
 849          * Otherwise, the zio is currently active and we cannot change its
 850          * priority.
 851          */
 852         tree = vdev_queue_class_tree(vq, zio->io_priority);
 853         if (avl_find(tree, zio, NULL) == zio) {
 854                 spa_t *spa = zio->io_spa;
 855                 zio_priority_t oldpri = zio->io_priority;
 856 
 857                 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
 858                 zio->io_priority = priority;
 859                 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
 860 
 861                 mutex_enter(&spa->spa_iokstat_lock);
 862                 ASSERT3U(spa->spa_queue_stats[oldpri].spa_queued, >, 0);
 863                 spa->spa_queue_stats[oldpri].spa_queued--;
 864                 spa->spa_queue_stats[zio->io_priority].spa_queued++;
 865                 mutex_exit(&spa->spa_iokstat_lock);
 866         } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) {
 867                 zio->io_priority = priority;
 868         }
 869 
 870         mutex_exit(&vq->vq_lock);
 871 }
 872 
 873 /*
 874  * As these two methods are only used for load calculations we're not
 875  * concerned if we get an incorrect value on 32bit platforms due to lack of
 876  * vq_lock mutex use here, instead we prefer to keep it lock free for
 877  * performance.
 878  */
 879 int
 880 vdev_queue_length(vdev_t *vd)
 881 {
 882         return (avl_numnodes(&vd->vdev_queue.vq_active_tree));
 883 }
 884 
 885 uint64_t
 886 vdev_queue_last_offset(vdev_t *vd)
 887 {
 888         return (vd->vdev_queue.vq_last_offset);
 889 }