1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright 2019 Joyent, Inc. 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/spa_impl.h> 35 #include <sys/zio.h> 36 #include <sys/avl.h> 37 #include <sys/dsl_pool.h> 38 #include <sys/metaslab_impl.h> 39 #include <sys/abd.h> 40 41 /* 42 * ZFS I/O Scheduler 43 * --------------- 44 * 45 * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The 46 * I/O scheduler determines when and in what order those operations are 47 * issued. The I/O scheduler divides operations into five I/O classes 48 * prioritized in the following order: sync read, sync write, async read, 49 * async write, and scrub/resilver. Each queue defines the minimum and 50 * maximum number of concurrent operations that may be issued to the device. 51 * In addition, the device has an aggregate maximum. Note that the sum of the 52 * per-queue minimums must not exceed the aggregate maximum, and if the 53 * aggregate maximum is equal to or greater than the sum of the per-queue 54 * maximums, the per-queue minimum has no effect. 55 * 56 * For many physical devices, throughput increases with the number of 57 * concurrent operations, but latency typically suffers. Further, physical 58 * devices typically have a limit at which more concurrent operations have no 59 * effect on throughput or can actually cause it to decrease. 60 * 61 * The scheduler selects the next operation to issue by first looking for an 62 * I/O class whose minimum has not been satisfied. Once all are satisfied and 63 * the aggregate maximum has not been hit, the scheduler looks for classes 64 * whose maximum has not been satisfied. Iteration through the I/O classes is 65 * done in the order specified above. No further operations are issued if the 66 * aggregate maximum number of concurrent operations has been hit or if there 67 * are no operations queued for an I/O class that has not hit its maximum. 68 * Every time an i/o is queued or an operation completes, the I/O scheduler 69 * looks for new operations to issue. 70 * 71 * All I/O classes have a fixed maximum number of outstanding operations 72 * except for the async write class. Asynchronous writes represent the data 73 * that is committed to stable storage during the syncing stage for 74 * transaction groups (see txg.c). Transaction groups enter the syncing state 75 * periodically so the number of queued async writes will quickly burst up and 76 * then bleed down to zero. Rather than servicing them as quickly as possible, 77 * the I/O scheduler changes the maximum number of active async write i/os 78 * according to the amount of dirty data in the pool (see dsl_pool.c). Since 79 * both throughput and latency typically increase with the number of 80 * concurrent operations issued to physical devices, reducing the burstiness 81 * in the number of concurrent operations also stabilizes the response time of 82 * operations from other -- and in particular synchronous -- queues. In broad 83 * strokes, the I/O scheduler will issue more concurrent operations from the 84 * async write queue as there's more dirty data in the pool. 85 * 86 * Async Writes 87 * 88 * The number of concurrent operations issued for the async write I/O class 89 * follows a piece-wise linear function defined by a few adjustable points. 90 * 91 * | o---------| <-- zfs_vdev_async_write_max_active 92 * ^ | /^ | 93 * | | / | | 94 * active | / | | 95 * I/O | / | | 96 * count | / | | 97 * | / | | 98 * |------------o | | <-- zfs_vdev_async_write_min_active 99 * 0|____________^______|_________| 100 * 0% | | 100% of zfs_dirty_data_max 101 * | | 102 * | `-- zfs_vdev_async_write_active_max_dirty_percent 103 * `--------- zfs_vdev_async_write_active_min_dirty_percent 104 * 105 * Until the amount of dirty data exceeds a minimum percentage of the dirty 106 * data allowed in the pool, the I/O scheduler will limit the number of 107 * concurrent operations to the minimum. As that threshold is crossed, the 108 * number of concurrent operations issued increases linearly to the maximum at 109 * the specified maximum percentage of the dirty data allowed in the pool. 110 * 111 * Ideally, the amount of dirty data on a busy pool will stay in the sloped 112 * part of the function between zfs_vdev_async_write_active_min_dirty_percent 113 * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the 114 * maximum percentage, this indicates that the rate of incoming data is 115 * greater than the rate that the backend storage can handle. In this case, we 116 * must further throttle incoming writes (see dmu_tx_delay() for details). 117 */ 118 119 /* 120 * The maximum number of i/os active to each device. Ideally, this will be >= 121 * the sum of each queue's max_active. It must be at least the sum of each 122 * queue's min_active. 123 */ 124 uint32_t zfs_vdev_max_active = 1000; 125 126 /* 127 * Per-queue limits on the number of i/os active to each device. If the 128 * sum of the queue's max_active is < zfs_vdev_max_active, then the 129 * min_active comes into play. We will send min_active from each queue, 130 * and then select from queues in the order defined by zio_priority_t. 131 * 132 * In general, smaller max_active's will lead to lower latency of synchronous 133 * operations. Larger max_active's may lead to higher overall throughput, 134 * depending on underlying storage. 135 * 136 * The ratio of the queues' max_actives determines the balance of performance 137 * between reads, writes, and scrubs. E.g., increasing 138 * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete 139 * more quickly, but reads and writes to have higher latency and lower 140 * throughput. 141 */ 142 uint32_t zfs_vdev_sync_read_min_active = 10; 143 uint32_t zfs_vdev_sync_read_max_active = 10; 144 uint32_t zfs_vdev_sync_write_min_active = 10; 145 uint32_t zfs_vdev_sync_write_max_active = 10; 146 uint32_t zfs_vdev_async_read_min_active = 1; 147 uint32_t zfs_vdev_async_read_max_active = 3; 148 uint32_t zfs_vdev_async_write_min_active = 1; 149 uint32_t zfs_vdev_async_write_max_active = 10; 150 uint32_t zfs_vdev_scrub_min_active = 1; 151 uint32_t zfs_vdev_scrub_max_active = 2; 152 uint32_t zfs_vdev_removal_min_active = 1; 153 uint32_t zfs_vdev_removal_max_active = 2; 154 uint32_t zfs_vdev_initializing_min_active = 1; 155 uint32_t zfs_vdev_initializing_max_active = 1; 156 157 /* 158 * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent 159 * dirty data, use zfs_vdev_async_write_min_active. When it has more than 160 * zfs_vdev_async_write_active_max_dirty_percent, use 161 * zfs_vdev_async_write_max_active. The value is linearly interpolated 162 * between min and max. 163 */ 164 int zfs_vdev_async_write_active_min_dirty_percent = 30; 165 int zfs_vdev_async_write_active_max_dirty_percent = 60; 166 167 /* 168 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 169 * For read I/Os, we also aggregate across small adjacency gaps; for writes 170 * we include spans of optional I/Os to aid aggregation at the disk even when 171 * they aren't able to help us aggregate at this level. 172 */ 173 int zfs_vdev_aggregation_limit = 1 << 20; 174 int zfs_vdev_read_gap_limit = 32 << 10; 175 int zfs_vdev_write_gap_limit = 4 << 10; 176 177 /* 178 * Define the queue depth percentage for each top-level. This percentage is 179 * used in conjunction with zfs_vdev_async_max_active to determine how many 180 * allocations a specific top-level vdev should handle. Once the queue depth 181 * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100 182 * then allocator will stop allocating blocks on that top-level device. 183 * The default kernel setting is 1000% which will yield 100 allocations per 184 * device. For userland testing, the default setting is 300% which equates 185 * to 30 allocations per device. 186 */ 187 #ifdef _KERNEL 188 int zfs_vdev_queue_depth_pct = 1000; 189 #else 190 int zfs_vdev_queue_depth_pct = 300; 191 #endif 192 193 /* 194 * When performing allocations for a given metaslab, we want to make sure that 195 * there are enough IOs to aggregate together to improve throughput. We want to 196 * ensure that there are at least 128k worth of IOs that can be aggregated, and 197 * we assume that the average allocation size is 4k, so we need the queue depth 198 * to be 32 per allocator to get good aggregation of sequential writes. 199 */ 200 int zfs_vdev_def_queue_depth = 32; 201 202 203 int 204 vdev_queue_offset_compare(const void *x1, const void *x2) 205 { 206 const zio_t *z1 = (const zio_t *)x1; 207 const zio_t *z2 = (const zio_t *)x2; 208 209 int cmp = AVL_CMP(z1->io_offset, z2->io_offset); 210 211 if (likely(cmp)) 212 return (cmp); 213 214 return (AVL_PCMP(z1, z2)); 215 } 216 217 static inline avl_tree_t * 218 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p) 219 { 220 return (&vq->vq_class[p].vqc_queued_tree); 221 } 222 223 static inline avl_tree_t * 224 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t) 225 { 226 ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE); 227 if (t == ZIO_TYPE_READ) 228 return (&vq->vq_read_offset_tree); 229 else 230 return (&vq->vq_write_offset_tree); 231 } 232 233 int 234 vdev_queue_timestamp_compare(const void *x1, const void *x2) 235 { 236 const zio_t *z1 = (const zio_t *)x1; 237 const zio_t *z2 = (const zio_t *)x2; 238 239 int cmp = AVL_CMP(z1->io_timestamp, z2->io_timestamp); 240 241 if (likely(cmp)) 242 return (cmp); 243 244 return (AVL_PCMP(z1, z2)); 245 } 246 247 void 248 vdev_queue_init(vdev_t *vd) 249 { 250 vdev_queue_t *vq = &vd->vdev_queue; 251 252 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 253 vq->vq_vdev = vd; 254 255 avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 256 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 257 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ), 258 vdev_queue_offset_compare, sizeof (zio_t), 259 offsetof(struct zio, io_offset_node)); 260 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE), 261 vdev_queue_offset_compare, sizeof (zio_t), 262 offsetof(struct zio, io_offset_node)); 263 264 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 265 int (*compfn) (const void *, const void *); 266 267 /* 268 * The synchronous i/o queues are dispatched in FIFO rather 269 * than LBA order. This provides more consistent latency for 270 * these i/os. 271 */ 272 if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE) 273 compfn = vdev_queue_timestamp_compare; 274 else 275 compfn = vdev_queue_offset_compare; 276 277 avl_create(vdev_queue_class_tree(vq, p), compfn, 278 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 279 } 280 281 vq->vq_last_offset = 0; 282 } 283 284 void 285 vdev_queue_fini(vdev_t *vd) 286 { 287 vdev_queue_t *vq = &vd->vdev_queue; 288 289 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 290 avl_destroy(vdev_queue_class_tree(vq, p)); 291 avl_destroy(&vq->vq_active_tree); 292 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); 293 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE)); 294 295 mutex_destroy(&vq->vq_lock); 296 } 297 298 static void 299 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 300 { 301 spa_t *spa = zio->io_spa; 302 303 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 304 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 305 avl_add(vdev_queue_type_tree(vq, zio->io_type), zio); 306 307 mutex_enter(&spa->spa_iokstat_lock); 308 spa->spa_queue_stats[zio->io_priority].spa_queued++; 309 if (spa->spa_iokstat != NULL) 310 kstat_waitq_enter(spa->spa_iokstat->ks_data); 311 mutex_exit(&spa->spa_iokstat_lock); 312 } 313 314 static void 315 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 316 { 317 spa_t *spa = zio->io_spa; 318 319 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 320 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 321 avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio); 322 323 mutex_enter(&spa->spa_iokstat_lock); 324 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0); 325 spa->spa_queue_stats[zio->io_priority].spa_queued--; 326 if (spa->spa_iokstat != NULL) 327 kstat_waitq_exit(spa->spa_iokstat->ks_data); 328 mutex_exit(&spa->spa_iokstat_lock); 329 } 330 331 static void 332 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 333 { 334 spa_t *spa = zio->io_spa; 335 ASSERT(MUTEX_HELD(&vq->vq_lock)); 336 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 337 vq->vq_class[zio->io_priority].vqc_active++; 338 avl_add(&vq->vq_active_tree, zio); 339 340 mutex_enter(&spa->spa_iokstat_lock); 341 spa->spa_queue_stats[zio->io_priority].spa_active++; 342 if (spa->spa_iokstat != NULL) 343 kstat_runq_enter(spa->spa_iokstat->ks_data); 344 mutex_exit(&spa->spa_iokstat_lock); 345 } 346 347 static void 348 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 349 { 350 spa_t *spa = zio->io_spa; 351 ASSERT(MUTEX_HELD(&vq->vq_lock)); 352 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 353 vq->vq_class[zio->io_priority].vqc_active--; 354 avl_remove(&vq->vq_active_tree, zio); 355 356 mutex_enter(&spa->spa_iokstat_lock); 357 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0); 358 spa->spa_queue_stats[zio->io_priority].spa_active--; 359 if (spa->spa_iokstat != NULL) { 360 kstat_io_t *ksio = spa->spa_iokstat->ks_data; 361 362 kstat_runq_exit(spa->spa_iokstat->ks_data); 363 if (zio->io_type == ZIO_TYPE_READ) { 364 ksio->reads++; 365 ksio->nread += zio->io_size; 366 } else if (zio->io_type == ZIO_TYPE_WRITE) { 367 ksio->writes++; 368 ksio->nwritten += zio->io_size; 369 } 370 } 371 mutex_exit(&spa->spa_iokstat_lock); 372 } 373 374 static void 375 vdev_queue_agg_io_done(zio_t *aio) 376 { 377 if (aio->io_type == ZIO_TYPE_READ) { 378 zio_t *pio; 379 zio_link_t *zl = NULL; 380 while ((pio = zio_walk_parents(aio, &zl)) != NULL) { 381 abd_copy_off(pio->io_abd, aio->io_abd, 382 0, pio->io_offset - aio->io_offset, pio->io_size); 383 } 384 } 385 386 abd_free(aio->io_abd); 387 } 388 389 static int 390 vdev_queue_class_min_active(zio_priority_t p) 391 { 392 switch (p) { 393 case ZIO_PRIORITY_SYNC_READ: 394 return (zfs_vdev_sync_read_min_active); 395 case ZIO_PRIORITY_SYNC_WRITE: 396 return (zfs_vdev_sync_write_min_active); 397 case ZIO_PRIORITY_ASYNC_READ: 398 return (zfs_vdev_async_read_min_active); 399 case ZIO_PRIORITY_ASYNC_WRITE: 400 return (zfs_vdev_async_write_min_active); 401 case ZIO_PRIORITY_SCRUB: 402 return (zfs_vdev_scrub_min_active); 403 case ZIO_PRIORITY_REMOVAL: 404 return (zfs_vdev_removal_min_active); 405 case ZIO_PRIORITY_INITIALIZING: 406 return (zfs_vdev_initializing_min_active); 407 default: 408 panic("invalid priority %u", p); 409 } 410 } 411 412 static int 413 vdev_queue_max_async_writes(spa_t *spa) 414 { 415 int writes; 416 uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; 417 uint64_t min_bytes = zfs_dirty_data_max * 418 zfs_vdev_async_write_active_min_dirty_percent / 100; 419 uint64_t max_bytes = zfs_dirty_data_max * 420 zfs_vdev_async_write_active_max_dirty_percent / 100; 421 422 /* 423 * Sync tasks correspond to interactive user actions. To reduce the 424 * execution time of those actions we push data out as fast as possible. 425 */ 426 if (spa_has_pending_synctask(spa)) { 427 return (zfs_vdev_async_write_max_active); 428 } 429 430 if (dirty < min_bytes) 431 return (zfs_vdev_async_write_min_active); 432 if (dirty > max_bytes) 433 return (zfs_vdev_async_write_max_active); 434 435 /* 436 * linear interpolation: 437 * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 438 * move right by min_bytes 439 * move up by min_writes 440 */ 441 writes = (dirty - min_bytes) * 442 (zfs_vdev_async_write_max_active - 443 zfs_vdev_async_write_min_active) / 444 (max_bytes - min_bytes) + 445 zfs_vdev_async_write_min_active; 446 ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 447 ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 448 return (writes); 449 } 450 451 static int 452 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p) 453 { 454 switch (p) { 455 case ZIO_PRIORITY_SYNC_READ: 456 return (zfs_vdev_sync_read_max_active); 457 case ZIO_PRIORITY_SYNC_WRITE: 458 return (zfs_vdev_sync_write_max_active); 459 case ZIO_PRIORITY_ASYNC_READ: 460 return (zfs_vdev_async_read_max_active); 461 case ZIO_PRIORITY_ASYNC_WRITE: 462 return (vdev_queue_max_async_writes(spa)); 463 case ZIO_PRIORITY_SCRUB: 464 return (zfs_vdev_scrub_max_active); 465 case ZIO_PRIORITY_REMOVAL: 466 return (zfs_vdev_removal_max_active); 467 case ZIO_PRIORITY_INITIALIZING: 468 return (zfs_vdev_initializing_max_active); 469 default: 470 panic("invalid priority %u", p); 471 } 472 } 473 474 /* 475 * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if 476 * there is no eligible class. 477 */ 478 static zio_priority_t 479 vdev_queue_class_to_issue(vdev_queue_t *vq) 480 { 481 spa_t *spa = vq->vq_vdev->vdev_spa; 482 zio_priority_t p; 483 484 if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 485 return (ZIO_PRIORITY_NUM_QUEUEABLE); 486 487 /* find a queue that has not reached its minimum # outstanding i/os */ 488 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 489 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 490 vq->vq_class[p].vqc_active < 491 vdev_queue_class_min_active(p)) 492 return (p); 493 } 494 495 /* 496 * If we haven't found a queue, look for one that hasn't reached its 497 * maximum # outstanding i/os. 498 */ 499 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 500 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 501 vq->vq_class[p].vqc_active < 502 vdev_queue_class_max_active(spa, p)) 503 return (p); 504 } 505 506 /* No eligible queued i/os */ 507 return (ZIO_PRIORITY_NUM_QUEUEABLE); 508 } 509 510 /* 511 * Compute the range spanned by two i/os, which is the endpoint of the last 512 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 513 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 514 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 515 */ 516 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 517 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 518 519 static zio_t * 520 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 521 { 522 zio_t *first, *last, *aio, *dio, *mandatory, *nio; 523 zio_link_t *zl = NULL; 524 uint64_t maxgap = 0; 525 uint64_t size; 526 boolean_t stretch = B_FALSE; 527 avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type); 528 enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 529 530 if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE) 531 return (NULL); 532 533 first = last = zio; 534 535 if (zio->io_type == ZIO_TYPE_READ) 536 maxgap = zfs_vdev_read_gap_limit; 537 538 /* 539 * We can aggregate I/Os that are sufficiently adjacent and of 540 * the same flavor, as expressed by the AGG_INHERIT flags. 541 * The latter requirement is necessary so that certain 542 * attributes of the I/O, such as whether it's a normal I/O 543 * or a scrub/resilver, can be preserved in the aggregate. 544 * We can include optional I/Os, but don't allow them 545 * to begin a range as they add no benefit in that situation. 546 */ 547 548 /* 549 * We keep track of the last non-optional I/O. 550 */ 551 mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 552 553 /* 554 * Walk backwards through sufficiently contiguous I/Os 555 * recording the last non-optional I/O. 556 */ 557 while ((dio = AVL_PREV(t, first)) != NULL && 558 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 559 IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit && 560 IO_GAP(dio, first) <= maxgap && 561 dio->io_type == zio->io_type) { 562 first = dio; 563 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 564 mandatory = first; 565 } 566 567 /* 568 * Skip any initial optional I/Os. 569 */ 570 while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 571 first = AVL_NEXT(t, first); 572 ASSERT(first != NULL); 573 } 574 575 /* 576 * Walk forward through sufficiently contiguous I/Os. 577 * The aggregation limit does not apply to optional i/os, so that 578 * we can issue contiguous writes even if they are larger than the 579 * aggregation limit. 580 */ 581 while ((dio = AVL_NEXT(t, last)) != NULL && 582 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 583 (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit || 584 (dio->io_flags & ZIO_FLAG_OPTIONAL)) && 585 IO_GAP(last, dio) <= maxgap && 586 dio->io_type == zio->io_type) { 587 last = dio; 588 if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 589 mandatory = last; 590 } 591 592 /* 593 * Now that we've established the range of the I/O aggregation 594 * we must decide what to do with trailing optional I/Os. 595 * For reads, there's nothing to do. While we are unable to 596 * aggregate further, it's possible that a trailing optional 597 * I/O would allow the underlying device to aggregate with 598 * subsequent I/Os. We must therefore determine if the next 599 * non-optional I/O is close enough to make aggregation 600 * worthwhile. 601 */ 602 if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 603 zio_t *nio = last; 604 while ((dio = AVL_NEXT(t, nio)) != NULL && 605 IO_GAP(nio, dio) == 0 && 606 IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 607 nio = dio; 608 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 609 stretch = B_TRUE; 610 break; 611 } 612 } 613 } 614 615 if (stretch) { 616 /* 617 * We are going to include an optional io in our aggregated 618 * span, thus closing the write gap. Only mandatory i/os can 619 * start aggregated spans, so make sure that the next i/o 620 * after our span is mandatory. 621 */ 622 dio = AVL_NEXT(t, last); 623 dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 624 } else { 625 /* do not include the optional i/o */ 626 while (last != mandatory && last != first) { 627 ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 628 last = AVL_PREV(t, last); 629 ASSERT(last != NULL); 630 } 631 } 632 633 if (first == last) 634 return (NULL); 635 636 size = IO_SPAN(first, last); 637 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 638 639 aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 640 abd_alloc_for_io(size, B_TRUE), size, first->io_type, 641 zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 642 vdev_queue_agg_io_done, NULL); 643 aio->io_timestamp = first->io_timestamp; 644 645 nio = first; 646 do { 647 dio = nio; 648 nio = AVL_NEXT(t, dio); 649 ASSERT3U(dio->io_type, ==, aio->io_type); 650 651 if (dio->io_flags & ZIO_FLAG_NODATA) { 652 ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 653 abd_zero_off(aio->io_abd, 654 dio->io_offset - aio->io_offset, dio->io_size); 655 } else if (dio->io_type == ZIO_TYPE_WRITE) { 656 abd_copy_off(aio->io_abd, dio->io_abd, 657 dio->io_offset - aio->io_offset, 0, dio->io_size); 658 } 659 660 zio_add_child(dio, aio); 661 vdev_queue_io_remove(vq, dio); 662 } while (dio != last); 663 664 /* 665 * We need to drop the vdev queue's lock to avoid a deadlock that we 666 * could encounter since this I/O will complete immediately. 667 */ 668 mutex_exit(&vq->vq_lock); 669 while ((dio = zio_walk_parents(aio, &zl)) != NULL) { 670 zio_vdev_io_bypass(dio); 671 zio_execute(dio); 672 } 673 mutex_enter(&vq->vq_lock); 674 675 return (aio); 676 } 677 678 static zio_t * 679 vdev_queue_io_to_issue(vdev_queue_t *vq) 680 { 681 zio_t *zio, *aio; 682 zio_priority_t p; 683 avl_index_t idx; 684 avl_tree_t *tree; 685 zio_t search; 686 687 again: 688 ASSERT(MUTEX_HELD(&vq->vq_lock)); 689 690 p = vdev_queue_class_to_issue(vq); 691 692 if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 693 /* No eligible queued i/os */ 694 return (NULL); 695 } 696 697 /* 698 * For LBA-ordered queues (async / scrub / initializing), issue the 699 * i/o which follows the most recently issued i/o in LBA (offset) order. 700 * 701 * For FIFO queues (sync), issue the i/o with the lowest timestamp. 702 */ 703 tree = vdev_queue_class_tree(vq, p); 704 search.io_timestamp = 0; 705 search.io_offset = vq->vq_last_offset - 1; 706 VERIFY3P(avl_find(tree, &search, &idx), ==, NULL); 707 zio = avl_nearest(tree, idx, AVL_AFTER); 708 if (zio == NULL) 709 zio = avl_first(tree); 710 ASSERT3U(zio->io_priority, ==, p); 711 712 aio = vdev_queue_aggregate(vq, zio); 713 if (aio != NULL) 714 zio = aio; 715 else 716 vdev_queue_io_remove(vq, zio); 717 718 /* 719 * If the I/O is or was optional and therefore has no data, we need to 720 * simply discard it. We need to drop the vdev queue's lock to avoid a 721 * deadlock that we could encounter since this I/O will complete 722 * immediately. 723 */ 724 if (zio->io_flags & ZIO_FLAG_NODATA) { 725 mutex_exit(&vq->vq_lock); 726 zio_vdev_io_bypass(zio); 727 zio_execute(zio); 728 mutex_enter(&vq->vq_lock); 729 goto again; 730 } 731 732 vdev_queue_pending_add(vq, zio); 733 vq->vq_last_offset = zio->io_offset + zio->io_size; 734 735 return (zio); 736 } 737 738 zio_t * 739 vdev_queue_io(zio_t *zio) 740 { 741 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 742 zio_t *nio; 743 744 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 745 return (zio); 746 747 /* 748 * Children i/os inherent their parent's priority, which might 749 * not match the child's i/o type. Fix it up here. 750 */ 751 if (zio->io_type == ZIO_TYPE_READ) { 752 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 753 zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 754 zio->io_priority != ZIO_PRIORITY_SCRUB && 755 zio->io_priority != ZIO_PRIORITY_REMOVAL && 756 zio->io_priority != ZIO_PRIORITY_INITIALIZING) 757 zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 758 } else { 759 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 760 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 761 zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE && 762 zio->io_priority != ZIO_PRIORITY_REMOVAL && 763 zio->io_priority != ZIO_PRIORITY_INITIALIZING) 764 zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 765 } 766 767 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 768 769 mutex_enter(&vq->vq_lock); 770 zio->io_timestamp = gethrtime(); 771 vdev_queue_io_add(vq, zio); 772 nio = vdev_queue_io_to_issue(vq); 773 mutex_exit(&vq->vq_lock); 774 775 if (nio == NULL) 776 return (NULL); 777 778 if (nio->io_done == vdev_queue_agg_io_done) { 779 zio_nowait(nio); 780 return (NULL); 781 } 782 783 return (nio); 784 } 785 786 void 787 vdev_queue_io_done(zio_t *zio) 788 { 789 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 790 zio_t *nio; 791 792 mutex_enter(&vq->vq_lock); 793 794 vdev_queue_pending_remove(vq, zio); 795 796 vq->vq_io_complete_ts = gethrtime(); 797 798 while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 799 mutex_exit(&vq->vq_lock); 800 if (nio->io_done == vdev_queue_agg_io_done) { 801 zio_nowait(nio); 802 } else { 803 zio_vdev_io_reissue(nio); 804 zio_execute(nio); 805 } 806 mutex_enter(&vq->vq_lock); 807 } 808 809 mutex_exit(&vq->vq_lock); 810 } 811 812 void 813 vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority) 814 { 815 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 816 avl_tree_t *tree; 817 818 /* 819 * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio 820 * code to issue IOs without adding them to the vdev queue. In this 821 * case, the zio is already going to be issued as quickly as possible 822 * and so it doesn't need any reprioitization to help. 823 */ 824 if (zio->io_priority == ZIO_PRIORITY_NOW) 825 return; 826 827 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 828 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 829 830 if (zio->io_type == ZIO_TYPE_READ) { 831 if (priority != ZIO_PRIORITY_SYNC_READ && 832 priority != ZIO_PRIORITY_ASYNC_READ && 833 priority != ZIO_PRIORITY_SCRUB) 834 priority = ZIO_PRIORITY_ASYNC_READ; 835 } else { 836 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 837 if (priority != ZIO_PRIORITY_SYNC_WRITE && 838 priority != ZIO_PRIORITY_ASYNC_WRITE) 839 priority = ZIO_PRIORITY_ASYNC_WRITE; 840 } 841 842 mutex_enter(&vq->vq_lock); 843 844 /* 845 * If the zio is in none of the queues we can simply change 846 * the priority. If the zio is waiting to be submitted we must 847 * remove it from the queue and re-insert it with the new priority. 848 * Otherwise, the zio is currently active and we cannot change its 849 * priority. 850 */ 851 tree = vdev_queue_class_tree(vq, zio->io_priority); 852 if (avl_find(tree, zio, NULL) == zio) { 853 spa_t *spa = zio->io_spa; 854 zio_priority_t oldpri = zio->io_priority; 855 856 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 857 zio->io_priority = priority; 858 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 859 860 mutex_enter(&spa->spa_iokstat_lock); 861 ASSERT3U(spa->spa_queue_stats[oldpri].spa_queued, >, 0); 862 spa->spa_queue_stats[oldpri].spa_queued--; 863 spa->spa_queue_stats[zio->io_priority].spa_queued++; 864 mutex_exit(&spa->spa_iokstat_lock); 865 } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) { 866 zio->io_priority = priority; 867 } 868 869 mutex_exit(&vq->vq_lock); 870 } 871 872 /* 873 * As these two methods are only used for load calculations we're not 874 * concerned if we get an incorrect value on 32bit platforms due to lack of 875 * vq_lock mutex use here, instead we prefer to keep it lock free for 876 * performance. 877 */ 878 int 879 vdev_queue_length(vdev_t *vd) 880 { 881 return (avl_numnodes(&vd->vdev_queue.vq_active_tree)); 882 } 883 884 uint64_t 885 vdev_queue_last_offset(vdev_t *vd) 886 { 887 return (vd->vdev_queue.vq_last_offset); 888 }