Print this page
4045 zfs write throttle & i/o scheduler performance work
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/vdev_queue.c
+++ new/usr/src/uts/common/fs/zfs/vdev_queue.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 - * Copyright (c) 2012 by Delphix. All rights reserved.
27 + * Copyright (c) 2013 by Delphix. All rights reserved.
28 28 */
29 29
30 30 #include <sys/zfs_context.h>
31 31 #include <sys/vdev_impl.h>
32 32 #include <sys/spa_impl.h>
33 33 #include <sys/zio.h>
34 34 #include <sys/avl.h>
35 +#include <sys/dsl_pool.h>
35 36
36 37 /*
37 - * These tunables are for performance analysis.
38 + * ZFS I/O Scheduler
39 + * ---------------
40 + *
41 + * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The
42 + * I/O scheduler determines when and in what order those operations are
43 + * issued. The I/O scheduler divides operations into five I/O classes
44 + * prioritized in the following order: sync read, sync write, async read,
45 + * async write, and scrub/resilver. Each queue defines the minimum and
46 + * maximum number of concurrent operations that may be issued to the device.
47 + * In addition, the device has an aggregate maximum. Note that the sum of the
48 + * per-queue minimums must not exceed the aggregate maximum, and if the
49 + * aggregate maximum is equal to or greater than the sum of the per-queue
50 + * maximums, the per-queue minimum has no effect.
51 + *
52 + * For many physical devices, throughput increases with the number of
53 + * concurrent operations, but latency typically suffers. Further, physical
54 + * devices typically have a limit at which more concurrent operations have no
55 + * effect on throughput or can actually cause it to decrease.
56 + *
57 + * The scheduler selects the next operation to issue by first looking for an
58 + * I/O class whose minimum has not been satisfied. Once all are satisfied and
59 + * the aggregate maximum has not been hit, the scheduler looks for classes
60 + * whose maximum has not been satisfied. Iteration through the I/O classes is
61 + * done in the order specified above. No further operations are issued if the
62 + * aggregate maximum number of concurrent operations has been hit or if there
63 + * are no operations queued for an I/O class that has not hit its maximum.
64 + * Every time an i/o is queued or an operation completes, the I/O scheduler
65 + * looks for new operations to issue.
66 + *
67 + * All I/O classes have a fixed maximum number of outstanding operations
68 + * except for the async write class. Asynchronous writes represent the data
69 + * that is committed to stable storage during the syncing stage for
70 + * transaction groups (see txg.c). Transaction groups enter the syncing state
71 + * periodically so the number of queued async writes will quickly burst up and
72 + * then bleed down to zero. Rather than servicing them as quickly as possible,
73 + * the I/O scheduler changes the maximum number of active async write i/os
74 + * according to the amount of dirty data in the pool (see dsl_pool.c). Since
75 + * both throughput and latency typically increase with the number of
76 + * concurrent operations issued to physical devices, reducing the burstiness
77 + * in the number of concurrent operations also stabilizes the response time of
78 + * operations from other -- and in particular synchronous -- queues. In broad
79 + * strokes, the I/O scheduler will issue more concurrent operations from the
80 + * async write queue as there's more dirty data in the pool.
81 + *
82 + * Async Writes
83 + *
84 + * The number of concurrent operations issued for the async write I/O class
85 + * follows a piece-wise linear function defined by a few adjustable points.
86 + *
87 + * | o---------| <-- zfs_vdev_async_write_max_active
88 + * ^ | /^ |
89 + * | | / | |
90 + * active | / | |
91 + * I/O | / | |
92 + * count | / | |
93 + * | / | |
94 + * |------------o | | <-- zfs_vdev_async_write_min_active
95 + * 0|____________^______|_________|
96 + * 0% | | 100% of zfs_dirty_data_max
97 + * | |
98 + * | `-- zfs_vdev_async_write_active_max_dirty_percent
99 + * `--------- zfs_vdev_async_write_active_min_dirty_percent
100 + *
101 + * Until the amount of dirty data exceeds a minimum percentage of the dirty
102 + * data allowed in the pool, the I/O scheduler will limit the number of
103 + * concurrent operations to the minimum. As that threshold is crossed, the
104 + * number of concurrent operations issued increases linearly to the maximum at
105 + * the specified maximum percentage of the dirty data allowed in the pool.
106 + *
107 + * Ideally, the amount of dirty data on a busy pool will stay in the sloped
108 + * part of the function between zfs_vdev_async_write_active_min_dirty_percent
109 + * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
110 + * maximum percentage, this indicates that the rate of incoming data is
111 + * greater than the rate that the backend storage can handle. In this case, we
112 + * must further throttle incoming writes (see dmu_tx_delay() for details).
38 113 */
39 114
40 -/* The maximum number of I/Os concurrently pending to each device. */
41 -int zfs_vdev_max_pending = 10;
115 +/*
116 + * The maximum number of i/os active to each device. Ideally, this will be >=
117 + * the sum of each queue's max_active. It must be at least the sum of each
118 + * queue's min_active.
119 + */
120 +uint32_t zfs_vdev_max_active = 1000;
42 121
43 122 /*
44 - * The initial number of I/Os pending to each device, before it starts ramping
45 - * up to zfs_vdev_max_pending.
123 + * Per-queue limits on the number of i/os active to each device. If the
124 + * sum of the queue's max_active is < zfs_vdev_max_active, then the
125 + * min_active comes into play. We will send min_active from each queue,
126 + * and then select from queues in the order defined by zio_priority_t.
127 + *
128 + * In general, smaller max_active's will lead to lower latency of synchronous
129 + * operations. Larger max_active's may lead to higher overall throughput,
130 + * depending on underlying storage.
131 + *
132 + * The ratio of the queues' max_actives determines the balance of performance
133 + * between reads, writes, and scrubs. E.g., increasing
134 + * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
135 + * more quickly, but reads and writes to have higher latency and lower
136 + * throughput.
46 137 */
47 -int zfs_vdev_min_pending = 4;
138 +uint32_t zfs_vdev_sync_read_min_active = 10;
139 +uint32_t zfs_vdev_sync_read_max_active = 10;
140 +uint32_t zfs_vdev_sync_write_min_active = 10;
141 +uint32_t zfs_vdev_sync_write_max_active = 10;
142 +uint32_t zfs_vdev_async_read_min_active = 1;
143 +uint32_t zfs_vdev_async_read_max_active = 3;
144 +uint32_t zfs_vdev_async_write_min_active = 1;
145 +uint32_t zfs_vdev_async_write_max_active = 10;
146 +uint32_t zfs_vdev_scrub_min_active = 1;
147 +uint32_t zfs_vdev_scrub_max_active = 2;
48 148
49 149 /*
50 - * The deadlines are grouped into buckets based on zfs_vdev_time_shift:
51 - * deadline = pri + gethrtime() >> time_shift)
150 + * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
151 + * dirty data, use zfs_vdev_async_write_min_active. When it has more than
152 + * zfs_vdev_async_write_active_max_dirty_percent, use
153 + * zfs_vdev_async_write_max_active. The value is linearly interpolated
154 + * between min and max.
52 155 */
53 -int zfs_vdev_time_shift = 29; /* each bucket is 0.537 seconds */
156 +int zfs_vdev_async_write_active_min_dirty_percent = 30;
157 +int zfs_vdev_async_write_active_max_dirty_percent = 60;
54 158
55 -/* exponential I/O issue ramp-up rate */
56 -int zfs_vdev_ramp_rate = 2;
57 -
58 159 /*
59 160 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
60 161 * For read I/Os, we also aggregate across small adjacency gaps; for writes
61 162 * we include spans of optional I/Os to aid aggregation at the disk even when
62 163 * they aren't able to help us aggregate at this level.
63 164 */
64 165 int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE;
65 166 int zfs_vdev_read_gap_limit = 32 << 10;
66 167 int zfs_vdev_write_gap_limit = 4 << 10;
67 168
68 -/*
69 - * Virtual device vector for disk I/O scheduling.
70 - */
71 169 int
72 -vdev_queue_deadline_compare(const void *x1, const void *x2)
170 +vdev_queue_offset_compare(const void *x1, const void *x2)
73 171 {
74 172 const zio_t *z1 = x1;
75 173 const zio_t *z2 = x2;
76 174
77 - if (z1->io_deadline < z2->io_deadline)
78 - return (-1);
79 - if (z1->io_deadline > z2->io_deadline)
80 - return (1);
81 -
82 175 if (z1->io_offset < z2->io_offset)
83 176 return (-1);
84 177 if (z1->io_offset > z2->io_offset)
85 178 return (1);
86 179
87 180 if (z1 < z2)
88 181 return (-1);
89 182 if (z1 > z2)
90 183 return (1);
91 184
92 185 return (0);
93 186 }
94 187
95 188 int
96 -vdev_queue_offset_compare(const void *x1, const void *x2)
189 +vdev_queue_timestamp_compare(const void *x1, const void *x2)
97 190 {
98 191 const zio_t *z1 = x1;
99 192 const zio_t *z2 = x2;
100 193
101 - if (z1->io_offset < z2->io_offset)
194 + if (z1->io_timestamp < z2->io_timestamp)
102 195 return (-1);
103 - if (z1->io_offset > z2->io_offset)
196 + if (z1->io_timestamp > z2->io_timestamp)
104 197 return (1);
105 198
106 199 if (z1 < z2)
107 200 return (-1);
108 201 if (z1 > z2)
109 202 return (1);
110 203
111 204 return (0);
112 205 }
113 206
114 207 void
115 208 vdev_queue_init(vdev_t *vd)
116 209 {
117 210 vdev_queue_t *vq = &vd->vdev_queue;
118 211
119 212 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
213 + vq->vq_vdev = vd;
120 214
121 - avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
122 - sizeof (zio_t), offsetof(struct zio, io_deadline_node));
215 + avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
216 + sizeof (zio_t), offsetof(struct zio, io_queue_node));
123 217
124 - avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
125 - sizeof (zio_t), offsetof(struct zio, io_offset_node));
126 -
127 - avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
128 - sizeof (zio_t), offsetof(struct zio, io_offset_node));
129 -
130 - avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
131 - sizeof (zio_t), offsetof(struct zio, io_offset_node));
218 + for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
219 + /*
220 + * The synchronous i/o queues are FIFO rather than LBA ordered.
221 + * This provides more consistent latency for these i/os, and
222 + * they tend to not be tightly clustered anyway so there is
223 + * little to no throughput loss.
224 + */
225 + boolean_t fifo = (p == ZIO_PRIORITY_SYNC_READ ||
226 + p == ZIO_PRIORITY_SYNC_WRITE);
227 + avl_create(&vq->vq_class[p].vqc_queued_tree,
228 + fifo ? vdev_queue_timestamp_compare :
229 + vdev_queue_offset_compare,
230 + sizeof (zio_t), offsetof(struct zio, io_queue_node));
231 + }
132 232 }
133 233
134 234 void
135 235 vdev_queue_fini(vdev_t *vd)
136 236 {
137 237 vdev_queue_t *vq = &vd->vdev_queue;
138 238
139 - avl_destroy(&vq->vq_deadline_tree);
140 - avl_destroy(&vq->vq_read_tree);
141 - avl_destroy(&vq->vq_write_tree);
142 - avl_destroy(&vq->vq_pending_tree);
239 + for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
240 + avl_destroy(&vq->vq_class[p].vqc_queued_tree);
241 + avl_destroy(&vq->vq_active_tree);
143 242
144 243 mutex_destroy(&vq->vq_lock);
145 244 }
146 245
147 246 static void
148 247 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
149 248 {
150 249 spa_t *spa = zio->io_spa;
151 - avl_add(&vq->vq_deadline_tree, zio);
152 - avl_add(zio->io_vdev_tree, zio);
250 + ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
251 + avl_add(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio);
153 252
154 - if (spa->spa_iokstat != NULL) {
155 - mutex_enter(&spa->spa_iokstat_lock);
253 + mutex_enter(&spa->spa_iokstat_lock);
254 + spa->spa_queue_stats[zio->io_priority].spa_queued++;
255 + if (spa->spa_iokstat != NULL)
156 256 kstat_waitq_enter(spa->spa_iokstat->ks_data);
157 - mutex_exit(&spa->spa_iokstat_lock);
158 - }
257 + mutex_exit(&spa->spa_iokstat_lock);
159 258 }
160 259
161 260 static void
162 261 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
163 262 {
164 263 spa_t *spa = zio->io_spa;
165 - avl_remove(&vq->vq_deadline_tree, zio);
166 - avl_remove(zio->io_vdev_tree, zio);
264 + ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
265 + avl_remove(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio);
167 266
168 - if (spa->spa_iokstat != NULL) {
169 - mutex_enter(&spa->spa_iokstat_lock);
267 + mutex_enter(&spa->spa_iokstat_lock);
268 + ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
269 + spa->spa_queue_stats[zio->io_priority].spa_queued--;
270 + if (spa->spa_iokstat != NULL)
170 271 kstat_waitq_exit(spa->spa_iokstat->ks_data);
171 - mutex_exit(&spa->spa_iokstat_lock);
172 - }
272 + mutex_exit(&spa->spa_iokstat_lock);
173 273 }
174 274
175 275 static void
176 276 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
177 277 {
178 278 spa_t *spa = zio->io_spa;
179 - avl_add(&vq->vq_pending_tree, zio);
180 - if (spa->spa_iokstat != NULL) {
181 - mutex_enter(&spa->spa_iokstat_lock);
279 + ASSERT(MUTEX_HELD(&vq->vq_lock));
280 + ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
281 + vq->vq_class[zio->io_priority].vqc_active++;
282 + avl_add(&vq->vq_active_tree, zio);
283 +
284 + mutex_enter(&spa->spa_iokstat_lock);
285 + spa->spa_queue_stats[zio->io_priority].spa_active++;
286 + if (spa->spa_iokstat != NULL)
182 287 kstat_runq_enter(spa->spa_iokstat->ks_data);
183 - mutex_exit(&spa->spa_iokstat_lock);
184 - }
288 + mutex_exit(&spa->spa_iokstat_lock);
185 289 }
186 290
187 291 static void
188 292 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
189 293 {
190 294 spa_t *spa = zio->io_spa;
191 - avl_remove(&vq->vq_pending_tree, zio);
295 + ASSERT(MUTEX_HELD(&vq->vq_lock));
296 + ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
297 + vq->vq_class[zio->io_priority].vqc_active--;
298 + avl_remove(&vq->vq_active_tree, zio);
299 +
300 + mutex_enter(&spa->spa_iokstat_lock);
301 + ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
302 + spa->spa_queue_stats[zio->io_priority].spa_active--;
192 303 if (spa->spa_iokstat != NULL) {
193 304 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
194 305
195 - mutex_enter(&spa->spa_iokstat_lock);
196 306 kstat_runq_exit(spa->spa_iokstat->ks_data);
197 307 if (zio->io_type == ZIO_TYPE_READ) {
198 308 ksio->reads++;
199 309 ksio->nread += zio->io_size;
200 310 } else if (zio->io_type == ZIO_TYPE_WRITE) {
201 311 ksio->writes++;
202 312 ksio->nwritten += zio->io_size;
203 313 }
204 - mutex_exit(&spa->spa_iokstat_lock);
205 314 }
315 + mutex_exit(&spa->spa_iokstat_lock);
206 316 }
207 317
208 318 static void
209 319 vdev_queue_agg_io_done(zio_t *aio)
210 320 {
211 - zio_t *pio;
212 -
213 - while ((pio = zio_walk_parents(aio)) != NULL)
214 - if (aio->io_type == ZIO_TYPE_READ)
321 + if (aio->io_type == ZIO_TYPE_READ) {
322 + zio_t *pio;
323 + while ((pio = zio_walk_parents(aio)) != NULL) {
215 324 bcopy((char *)aio->io_data + (pio->io_offset -
216 325 aio->io_offset), pio->io_data, pio->io_size);
326 + }
327 + }
217 328
218 329 zio_buf_free(aio->io_data, aio->io_size);
219 330 }
220 331
332 +static int
333 +vdev_queue_class_min_active(zio_priority_t p)
334 +{
335 + switch (p) {
336 + case ZIO_PRIORITY_SYNC_READ:
337 + return (zfs_vdev_sync_read_min_active);
338 + case ZIO_PRIORITY_SYNC_WRITE:
339 + return (zfs_vdev_sync_write_min_active);
340 + case ZIO_PRIORITY_ASYNC_READ:
341 + return (zfs_vdev_async_read_min_active);
342 + case ZIO_PRIORITY_ASYNC_WRITE:
343 + return (zfs_vdev_async_write_min_active);
344 + case ZIO_PRIORITY_SCRUB:
345 + return (zfs_vdev_scrub_min_active);
346 + default:
347 + panic("invalid priority %u", p);
348 + return (0);
349 + }
350 +}
351 +
352 +static int
353 +vdev_queue_max_async_writes(uint64_t dirty)
354 +{
355 + int writes;
356 + uint64_t min_bytes = zfs_dirty_data_max *
357 + zfs_vdev_async_write_active_min_dirty_percent / 100;
358 + uint64_t max_bytes = zfs_dirty_data_max *
359 + zfs_vdev_async_write_active_max_dirty_percent / 100;
360 +
361 + if (dirty < min_bytes)
362 + return (zfs_vdev_async_write_min_active);
363 + if (dirty > max_bytes)
364 + return (zfs_vdev_async_write_max_active);
365 +
366 + /*
367 + * linear interpolation:
368 + * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
369 + * move right by min_bytes
370 + * move up by min_writes
371 + */
372 + writes = (dirty - min_bytes) *
373 + (zfs_vdev_async_write_max_active -
374 + zfs_vdev_async_write_min_active) /
375 + (max_bytes - min_bytes) +
376 + zfs_vdev_async_write_min_active;
377 + ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
378 + ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
379 + return (writes);
380 +}
381 +
382 +static int
383 +vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
384 +{
385 + switch (p) {
386 + case ZIO_PRIORITY_SYNC_READ:
387 + return (zfs_vdev_sync_read_max_active);
388 + case ZIO_PRIORITY_SYNC_WRITE:
389 + return (zfs_vdev_sync_write_max_active);
390 + case ZIO_PRIORITY_ASYNC_READ:
391 + return (zfs_vdev_async_read_max_active);
392 + case ZIO_PRIORITY_ASYNC_WRITE:
393 + return (vdev_queue_max_async_writes(
394 + spa->spa_dsl_pool->dp_dirty_total));
395 + case ZIO_PRIORITY_SCRUB:
396 + return (zfs_vdev_scrub_max_active);
397 + default:
398 + panic("invalid priority %u", p);
399 + return (0);
400 + }
401 +}
402 +
221 403 /*
404 + * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
405 + * there is no eligible class.
406 + */
407 +static zio_priority_t
408 +vdev_queue_class_to_issue(vdev_queue_t *vq)
409 +{
410 + spa_t *spa = vq->vq_vdev->vdev_spa;
411 + zio_priority_t p;
412 +
413 + if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
414 + return (ZIO_PRIORITY_NUM_QUEUEABLE);
415 +
416 + /* find a queue that has not reached its minimum # outstanding i/os */
417 + for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
418 + if (avl_numnodes(&vq->vq_class[p].vqc_queued_tree) > 0 &&
419 + vq->vq_class[p].vqc_active <
420 + vdev_queue_class_min_active(p))
421 + return (p);
422 + }
423 +
424 + /*
425 + * If we haven't found a queue, look for one that hasn't reached its
426 + * maximum # outstanding i/os.
427 + */
428 + for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
429 + if (avl_numnodes(&vq->vq_class[p].vqc_queued_tree) > 0 &&
430 + vq->vq_class[p].vqc_active <
431 + vdev_queue_class_max_active(spa, p))
432 + return (p);
433 + }
434 +
435 + /* No eligible queued i/os */
436 + return (ZIO_PRIORITY_NUM_QUEUEABLE);
437 +}
438 +
439 +/*
222 440 * Compute the range spanned by two i/os, which is the endpoint of the last
223 441 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
224 442 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
225 443 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
226 444 */
227 445 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
228 446 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
229 447
230 448 static zio_t *
231 -vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit)
449 +vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
232 450 {
233 - zio_t *fio, *lio, *aio, *dio, *nio, *mio;
234 - avl_tree_t *t;
235 - int flags;
236 - uint64_t maxspan = zfs_vdev_aggregation_limit;
237 - uint64_t maxgap;
238 - int stretch;
451 + zio_t *first, *last, *aio, *dio, *mandatory, *nio;
452 + uint64_t maxgap = 0;
453 + uint64_t size;
454 + boolean_t stretch = B_FALSE;
455 + vdev_queue_class_t *vqc = &vq->vq_class[zio->io_priority];
456 + avl_tree_t *t = &vqc->vqc_queued_tree;
457 + enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
239 458
240 -again:
241 - ASSERT(MUTEX_HELD(&vq->vq_lock));
459 + if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
460 + return (NULL);
242 461
243 - if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit ||
244 - avl_numnodes(&vq->vq_deadline_tree) == 0)
462 + /*
463 + * The synchronous i/o queues are not sorted by LBA, so we can't
464 + * find adjacent i/os. These i/os tend to not be tightly clustered,
465 + * or too large to aggregate, so this has little impact on performance.
466 + */
467 + if (zio->io_priority == ZIO_PRIORITY_SYNC_READ ||
468 + zio->io_priority == ZIO_PRIORITY_SYNC_WRITE)
245 469 return (NULL);
246 470
247 - fio = lio = avl_first(&vq->vq_deadline_tree);
471 + first = last = zio;
248 472
249 - t = fio->io_vdev_tree;
250 - flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT;
251 - maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0;
473 + if (zio->io_type == ZIO_TYPE_READ)
474 + maxgap = zfs_vdev_read_gap_limit;
252 475
253 - if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) {
254 - /*
255 - * We can aggregate I/Os that are sufficiently adjacent and of
256 - * the same flavor, as expressed by the AGG_INHERIT flags.
257 - * The latter requirement is necessary so that certain
258 - * attributes of the I/O, such as whether it's a normal I/O
259 - * or a scrub/resilver, can be preserved in the aggregate.
260 - * We can include optional I/Os, but don't allow them
261 - * to begin a range as they add no benefit in that situation.
262 - */
476 + /*
477 + * We can aggregate I/Os that are sufficiently adjacent and of
478 + * the same flavor, as expressed by the AGG_INHERIT flags.
479 + * The latter requirement is necessary so that certain
480 + * attributes of the I/O, such as whether it's a normal I/O
481 + * or a scrub/resilver, can be preserved in the aggregate.
482 + * We can include optional I/Os, but don't allow them
483 + * to begin a range as they add no benefit in that situation.
484 + */
263 485
264 - /*
265 - * We keep track of the last non-optional I/O.
266 - */
267 - mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio;
486 + /*
487 + * We keep track of the last non-optional I/O.
488 + */
489 + mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
268 490
269 - /*
270 - * Walk backwards through sufficiently contiguous I/Os
271 - * recording the last non-option I/O.
272 - */
273 - while ((dio = AVL_PREV(t, fio)) != NULL &&
274 - (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
275 - IO_SPAN(dio, lio) <= maxspan &&
276 - IO_GAP(dio, fio) <= maxgap) {
277 - fio = dio;
278 - if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL))
279 - mio = fio;
280 - }
491 + /*
492 + * Walk backwards through sufficiently contiguous I/Os
493 + * recording the last non-option I/O.
494 + */
495 + while ((dio = AVL_PREV(t, first)) != NULL &&
496 + (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
497 + IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
498 + IO_GAP(dio, first) <= maxgap) {
499 + first = dio;
500 + if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
501 + mandatory = first;
502 + }
281 503
282 - /*
283 - * Skip any initial optional I/Os.
284 - */
285 - while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) {
286 - fio = AVL_NEXT(t, fio);
287 - ASSERT(fio != NULL);
288 - }
504 + /*
505 + * Skip any initial optional I/Os.
506 + */
507 + while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
508 + first = AVL_NEXT(t, first);
509 + ASSERT(first != NULL);
510 + }
289 511
290 - /*
291 - * Walk forward through sufficiently contiguous I/Os.
292 - */
293 - while ((dio = AVL_NEXT(t, lio)) != NULL &&
294 - (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
295 - IO_SPAN(fio, dio) <= maxspan &&
296 - IO_GAP(lio, dio) <= maxgap) {
297 - lio = dio;
298 - if (!(lio->io_flags & ZIO_FLAG_OPTIONAL))
299 - mio = lio;
300 - }
512 + /*
513 + * Walk forward through sufficiently contiguous I/Os.
514 + */
515 + while ((dio = AVL_NEXT(t, last)) != NULL &&
516 + (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
517 + IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit &&
518 + IO_GAP(last, dio) <= maxgap) {
519 + last = dio;
520 + if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
521 + mandatory = last;
522 + }
301 523
302 - /*
303 - * Now that we've established the range of the I/O aggregation
304 - * we must decide what to do with trailing optional I/Os.
305 - * For reads, there's nothing to do. While we are unable to
306 - * aggregate further, it's possible that a trailing optional
307 - * I/O would allow the underlying device to aggregate with
308 - * subsequent I/Os. We must therefore determine if the next
309 - * non-optional I/O is close enough to make aggregation
310 - * worthwhile.
311 - */
312 - stretch = B_FALSE;
313 - if (t != &vq->vq_read_tree && mio != NULL) {
314 - nio = lio;
315 - while ((dio = AVL_NEXT(t, nio)) != NULL &&
316 - IO_GAP(nio, dio) == 0 &&
317 - IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) {
318 - nio = dio;
319 - if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
320 - stretch = B_TRUE;
321 - break;
322 - }
524 + /*
525 + * Now that we've established the range of the I/O aggregation
526 + * we must decide what to do with trailing optional I/Os.
527 + * For reads, there's nothing to do. While we are unable to
528 + * aggregate further, it's possible that a trailing optional
529 + * I/O would allow the underlying device to aggregate with
530 + * subsequent I/Os. We must therefore determine if the next
531 + * non-optional I/O is close enough to make aggregation
532 + * worthwhile.
533 + */
534 + if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
535 + zio_t *nio = last;
536 + while ((dio = AVL_NEXT(t, nio)) != NULL &&
537 + IO_GAP(nio, dio) == 0 &&
538 + IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
539 + nio = dio;
540 + if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
541 + stretch = B_TRUE;
542 + break;
323 543 }
324 544 }
545 + }
325 546
326 - if (stretch) {
327 - /* This may be a no-op. */
328 - VERIFY((dio = AVL_NEXT(t, lio)) != NULL);
329 - dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
330 - } else {
331 - while (lio != mio && lio != fio) {
332 - ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL);
333 - lio = AVL_PREV(t, lio);
334 - ASSERT(lio != NULL);
335 - }
547 + if (stretch) {
548 + /* This may be a no-op. */
549 + dio = AVL_NEXT(t, last);
550 + dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
551 + } else {
552 + while (last != mandatory && last != first) {
553 + ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
554 + last = AVL_PREV(t, last);
555 + ASSERT(last != NULL);
336 556 }
337 557 }
338 558
339 - if (fio != lio) {
340 - uint64_t size = IO_SPAN(fio, lio);
341 - ASSERT(size <= zfs_vdev_aggregation_limit);
559 + if (first == last)
560 + return (NULL);
342 561
343 - aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset,
344 - zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_AGG,
345 - flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
346 - vdev_queue_agg_io_done, NULL);
347 - aio->io_timestamp = fio->io_timestamp;
562 + size = IO_SPAN(first, last);
563 + ASSERT3U(size, <=, zfs_vdev_aggregation_limit);
348 564
349 - nio = fio;
350 - do {
351 - dio = nio;
352 - nio = AVL_NEXT(t, dio);
353 - ASSERT(dio->io_type == aio->io_type);
354 - ASSERT(dio->io_vdev_tree == t);
565 + aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
566 + zio_buf_alloc(size), size, first->io_type, zio->io_priority,
567 + flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
568 + vdev_queue_agg_io_done, NULL);
569 + aio->io_timestamp = first->io_timestamp;
355 570
356 - if (dio->io_flags & ZIO_FLAG_NODATA) {
357 - ASSERT(dio->io_type == ZIO_TYPE_WRITE);
358 - bzero((char *)aio->io_data + (dio->io_offset -
359 - aio->io_offset), dio->io_size);
360 - } else if (dio->io_type == ZIO_TYPE_WRITE) {
361 - bcopy(dio->io_data, (char *)aio->io_data +
362 - (dio->io_offset - aio->io_offset),
363 - dio->io_size);
364 - }
571 + nio = first;
572 + do {
573 + dio = nio;
574 + nio = AVL_NEXT(t, dio);
575 + ASSERT3U(dio->io_type, ==, aio->io_type);
365 576
366 - zio_add_child(dio, aio);
367 - vdev_queue_io_remove(vq, dio);
368 - zio_vdev_io_bypass(dio);
369 - zio_execute(dio);
370 - } while (dio != lio);
577 + if (dio->io_flags & ZIO_FLAG_NODATA) {
578 + ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
579 + bzero((char *)aio->io_data + (dio->io_offset -
580 + aio->io_offset), dio->io_size);
581 + } else if (dio->io_type == ZIO_TYPE_WRITE) {
582 + bcopy(dio->io_data, (char *)aio->io_data +
583 + (dio->io_offset - aio->io_offset),
584 + dio->io_size);
585 + }
371 586
372 - vdev_queue_pending_add(vq, aio);
587 + zio_add_child(dio, aio);
588 + vdev_queue_io_remove(vq, dio);
589 + zio_vdev_io_bypass(dio);
590 + zio_execute(dio);
591 + } while (dio != last);
373 592
374 - return (aio);
593 + return (aio);
594 +}
595 +
596 +static zio_t *
597 +vdev_queue_io_to_issue(vdev_queue_t *vq)
598 +{
599 + zio_t *zio, *aio;
600 + zio_priority_t p;
601 + avl_index_t idx;
602 + vdev_queue_class_t *vqc;
603 + zio_t search;
604 +
605 +again:
606 + ASSERT(MUTEX_HELD(&vq->vq_lock));
607 +
608 + p = vdev_queue_class_to_issue(vq);
609 +
610 + if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
611 + /* No eligible queued i/os */
612 + return (NULL);
375 613 }
376 614
377 - ASSERT(fio->io_vdev_tree == t);
378 - vdev_queue_io_remove(vq, fio);
615 + /*
616 + * For LBA-ordered queues (async / scrub), issue the i/o which follows
617 + * the most recently issued i/o in LBA (offset) order.
618 + *
619 + * For FIFO queues (sync), issue the i/o with the lowest timestamp.
620 + */
621 + vqc = &vq->vq_class[p];
622 + search.io_timestamp = 0;
623 + search.io_offset = vq->vq_last_offset + 1;
624 + VERIFY3P(avl_find(&vqc->vqc_queued_tree, &search, &idx), ==, NULL);
625 + zio = avl_nearest(&vqc->vqc_queued_tree, idx, AVL_AFTER);
626 + if (zio == NULL)
627 + zio = avl_first(&vqc->vqc_queued_tree);
628 + ASSERT3U(zio->io_priority, ==, p);
379 629
630 + aio = vdev_queue_aggregate(vq, zio);
631 + if (aio != NULL)
632 + zio = aio;
633 + else
634 + vdev_queue_io_remove(vq, zio);
635 +
380 636 /*
381 637 * If the I/O is or was optional and therefore has no data, we need to
382 638 * simply discard it. We need to drop the vdev queue's lock to avoid a
383 639 * deadlock that we could encounter since this I/O will complete
384 640 * immediately.
385 641 */
386 - if (fio->io_flags & ZIO_FLAG_NODATA) {
642 + if (zio->io_flags & ZIO_FLAG_NODATA) {
387 643 mutex_exit(&vq->vq_lock);
388 - zio_vdev_io_bypass(fio);
389 - zio_execute(fio);
644 + zio_vdev_io_bypass(zio);
645 + zio_execute(zio);
390 646 mutex_enter(&vq->vq_lock);
391 647 goto again;
392 648 }
393 649
394 - vdev_queue_pending_add(vq, fio);
650 + vdev_queue_pending_add(vq, zio);
651 + vq->vq_last_offset = zio->io_offset;
395 652
396 - return (fio);
653 + return (zio);
397 654 }
398 655
399 656 zio_t *
400 657 vdev_queue_io(zio_t *zio)
401 658 {
402 659 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
403 660 zio_t *nio;
404 661
405 - ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
406 -
407 662 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
408 663 return (zio);
409 664
665 + /*
666 + * Children i/os inherent their parent's priority, which might
667 + * not match the child's i/o type. Fix it up here.
668 + */
669 + if (zio->io_type == ZIO_TYPE_READ) {
670 + if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
671 + zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
672 + zio->io_priority != ZIO_PRIORITY_SCRUB)
673 + zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
674 + } else {
675 + ASSERT(zio->io_type == ZIO_TYPE_WRITE);
676 + if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
677 + zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE)
678 + zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
679 + }
680 +
410 681 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
411 682
412 - if (zio->io_type == ZIO_TYPE_READ)
413 - zio->io_vdev_tree = &vq->vq_read_tree;
414 - else
415 - zio->io_vdev_tree = &vq->vq_write_tree;
416 -
417 683 mutex_enter(&vq->vq_lock);
418 -
419 684 zio->io_timestamp = gethrtime();
420 - zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) +
421 - zio->io_priority;
422 -
423 685 vdev_queue_io_add(vq, zio);
424 -
425 - nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending);
426 -
686 + nio = vdev_queue_io_to_issue(vq);
427 687 mutex_exit(&vq->vq_lock);
428 688
429 689 if (nio == NULL)
430 690 return (NULL);
431 691
432 692 if (nio->io_done == vdev_queue_agg_io_done) {
433 693 zio_nowait(nio);
434 694 return (NULL);
435 695 }
436 696
437 697 return (nio);
438 698 }
439 699
440 700 void
441 701 vdev_queue_io_done(zio_t *zio)
442 702 {
443 703 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
704 + zio_t *nio;
444 705
445 706 if (zio_injection_enabled)
446 707 delay(SEC_TO_TICK(zio_handle_io_delay(zio)));
447 708
448 709 mutex_enter(&vq->vq_lock);
449 710
450 711 vdev_queue_pending_remove(vq, zio);
451 712
452 713 vq->vq_io_complete_ts = gethrtime();
453 714
454 - for (int i = 0; i < zfs_vdev_ramp_rate; i++) {
455 - zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
456 - if (nio == NULL)
457 - break;
715 + while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
458 716 mutex_exit(&vq->vq_lock);
459 717 if (nio->io_done == vdev_queue_agg_io_done) {
460 718 zio_nowait(nio);
461 719 } else {
462 720 zio_vdev_io_reissue(nio);
463 721 zio_execute(nio);
464 722 }
465 723 mutex_enter(&vq->vq_lock);
466 724 }
467 725
468 726 mutex_exit(&vq->vq_lock);
469 727 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX