Print this page
3742 zfs comments need cleaner, more consistent style
Submitted by: Will Andrews <willa@spectralogic.com>
Submitted by: Alan Somers <alans@spectralogic.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Eric Schrock <eric.schrock@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/vdev_queue.c
+++ new/usr/src/uts/common/fs/zfs/vdev_queue.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 28 */
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
29 29
30 30 #include <sys/zfs_context.h>
31 31 #include <sys/vdev_impl.h>
32 32 #include <sys/spa_impl.h>
33 33 #include <sys/zio.h>
34 34 #include <sys/avl.h>
35 35
36 36 /*
37 37 * These tunables are for performance analysis.
38 38 */
39 +
40 +/* The maximum number of I/Os concurrently pending to each device. */
41 +int zfs_vdev_max_pending = 10;
42 +
39 43 /*
40 - * zfs_vdev_max_pending is the maximum number of i/os concurrently
41 - * pending to each device. zfs_vdev_min_pending is the initial number
42 - * of i/os pending to each device (before it starts ramping up to
43 - * max_pending).
44 + * The initial number of I/Os pending to each device, before it starts ramping
45 + * up to zfs_vdev_max_pending.
44 46 */
45 -int zfs_vdev_max_pending = 10;
46 47 int zfs_vdev_min_pending = 4;
47 48
48 49 /*
49 50 * The deadlines are grouped into buckets based on zfs_vdev_time_shift:
50 51 * deadline = pri + gethrtime() >> time_shift)
51 52 */
52 53 int zfs_vdev_time_shift = 29; /* each bucket is 0.537 seconds */
53 54
54 55 /* exponential I/O issue ramp-up rate */
55 56 int zfs_vdev_ramp_rate = 2;
56 57
57 58 /*
58 59 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
59 60 * For read I/Os, we also aggregate across small adjacency gaps; for writes
60 61 * we include spans of optional I/Os to aid aggregation at the disk even when
61 62 * they aren't able to help us aggregate at this level.
62 63 */
63 64 int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE;
64 65 int zfs_vdev_read_gap_limit = 32 << 10;
65 66 int zfs_vdev_write_gap_limit = 4 << 10;
66 67
67 68 /*
68 69 * Virtual device vector for disk I/O scheduling.
69 70 */
70 71 int
71 72 vdev_queue_deadline_compare(const void *x1, const void *x2)
72 73 {
73 74 const zio_t *z1 = x1;
74 75 const zio_t *z2 = x2;
75 76
76 77 if (z1->io_deadline < z2->io_deadline)
77 78 return (-1);
78 79 if (z1->io_deadline > z2->io_deadline)
79 80 return (1);
80 81
81 82 if (z1->io_offset < z2->io_offset)
82 83 return (-1);
83 84 if (z1->io_offset > z2->io_offset)
84 85 return (1);
85 86
86 87 if (z1 < z2)
87 88 return (-1);
88 89 if (z1 > z2)
89 90 return (1);
90 91
91 92 return (0);
92 93 }
93 94
94 95 int
95 96 vdev_queue_offset_compare(const void *x1, const void *x2)
96 97 {
97 98 const zio_t *z1 = x1;
98 99 const zio_t *z2 = x2;
99 100
100 101 if (z1->io_offset < z2->io_offset)
101 102 return (-1);
102 103 if (z1->io_offset > z2->io_offset)
103 104 return (1);
104 105
105 106 if (z1 < z2)
106 107 return (-1);
107 108 if (z1 > z2)
108 109 return (1);
109 110
110 111 return (0);
111 112 }
112 113
113 114 void
114 115 vdev_queue_init(vdev_t *vd)
115 116 {
116 117 vdev_queue_t *vq = &vd->vdev_queue;
117 118
118 119 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
119 120
120 121 avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
121 122 sizeof (zio_t), offsetof(struct zio, io_deadline_node));
122 123
123 124 avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
124 125 sizeof (zio_t), offsetof(struct zio, io_offset_node));
125 126
126 127 avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
127 128 sizeof (zio_t), offsetof(struct zio, io_offset_node));
128 129
129 130 avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
130 131 sizeof (zio_t), offsetof(struct zio, io_offset_node));
131 132 }
132 133
133 134 void
134 135 vdev_queue_fini(vdev_t *vd)
135 136 {
136 137 vdev_queue_t *vq = &vd->vdev_queue;
137 138
138 139 avl_destroy(&vq->vq_deadline_tree);
139 140 avl_destroy(&vq->vq_read_tree);
140 141 avl_destroy(&vq->vq_write_tree);
141 142 avl_destroy(&vq->vq_pending_tree);
142 143
143 144 mutex_destroy(&vq->vq_lock);
144 145 }
145 146
146 147 static void
147 148 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
148 149 {
149 150 spa_t *spa = zio->io_spa;
150 151 avl_add(&vq->vq_deadline_tree, zio);
151 152 avl_add(zio->io_vdev_tree, zio);
152 153
153 154 if (spa->spa_iokstat != NULL) {
154 155 mutex_enter(&spa->spa_iokstat_lock);
155 156 kstat_waitq_enter(spa->spa_iokstat->ks_data);
156 157 mutex_exit(&spa->spa_iokstat_lock);
157 158 }
158 159 }
159 160
160 161 static void
161 162 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
162 163 {
163 164 spa_t *spa = zio->io_spa;
164 165 avl_remove(&vq->vq_deadline_tree, zio);
165 166 avl_remove(zio->io_vdev_tree, zio);
166 167
167 168 if (spa->spa_iokstat != NULL) {
168 169 mutex_enter(&spa->spa_iokstat_lock);
169 170 kstat_waitq_exit(spa->spa_iokstat->ks_data);
170 171 mutex_exit(&spa->spa_iokstat_lock);
171 172 }
172 173 }
173 174
174 175 static void
175 176 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
176 177 {
177 178 spa_t *spa = zio->io_spa;
178 179 avl_add(&vq->vq_pending_tree, zio);
179 180 if (spa->spa_iokstat != NULL) {
180 181 mutex_enter(&spa->spa_iokstat_lock);
181 182 kstat_runq_enter(spa->spa_iokstat->ks_data);
182 183 mutex_exit(&spa->spa_iokstat_lock);
183 184 }
184 185 }
185 186
186 187 static void
187 188 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
188 189 {
189 190 spa_t *spa = zio->io_spa;
190 191 avl_remove(&vq->vq_pending_tree, zio);
191 192 if (spa->spa_iokstat != NULL) {
192 193 kstat_io_t *ksio = spa->spa_iokstat->ks_data;
193 194
194 195 mutex_enter(&spa->spa_iokstat_lock);
195 196 kstat_runq_exit(spa->spa_iokstat->ks_data);
196 197 if (zio->io_type == ZIO_TYPE_READ) {
197 198 ksio->reads++;
198 199 ksio->nread += zio->io_size;
199 200 } else if (zio->io_type == ZIO_TYPE_WRITE) {
200 201 ksio->writes++;
201 202 ksio->nwritten += zio->io_size;
202 203 }
203 204 mutex_exit(&spa->spa_iokstat_lock);
204 205 }
205 206 }
206 207
207 208 static void
208 209 vdev_queue_agg_io_done(zio_t *aio)
209 210 {
210 211 zio_t *pio;
211 212
212 213 while ((pio = zio_walk_parents(aio)) != NULL)
213 214 if (aio->io_type == ZIO_TYPE_READ)
214 215 bcopy((char *)aio->io_data + (pio->io_offset -
215 216 aio->io_offset), pio->io_data, pio->io_size);
216 217
217 218 zio_buf_free(aio->io_data, aio->io_size);
218 219 }
219 220
220 221 /*
221 222 * Compute the range spanned by two i/os, which is the endpoint of the last
222 223 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
223 224 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
224 225 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
225 226 */
226 227 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
227 228 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
228 229
229 230 static zio_t *
230 231 vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit)
231 232 {
232 233 zio_t *fio, *lio, *aio, *dio, *nio, *mio;
233 234 avl_tree_t *t;
234 235 int flags;
235 236 uint64_t maxspan = zfs_vdev_aggregation_limit;
236 237 uint64_t maxgap;
237 238 int stretch;
238 239
239 240 again:
240 241 ASSERT(MUTEX_HELD(&vq->vq_lock));
241 242
242 243 if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit ||
243 244 avl_numnodes(&vq->vq_deadline_tree) == 0)
244 245 return (NULL);
245 246
246 247 fio = lio = avl_first(&vq->vq_deadline_tree);
247 248
248 249 t = fio->io_vdev_tree;
249 250 flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT;
250 251 maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0;
251 252
252 253 if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) {
253 254 /*
254 255 * We can aggregate I/Os that are sufficiently adjacent and of
255 256 * the same flavor, as expressed by the AGG_INHERIT flags.
256 257 * The latter requirement is necessary so that certain
257 258 * attributes of the I/O, such as whether it's a normal I/O
258 259 * or a scrub/resilver, can be preserved in the aggregate.
259 260 * We can include optional I/Os, but don't allow them
260 261 * to begin a range as they add no benefit in that situation.
261 262 */
262 263
263 264 /*
264 265 * We keep track of the last non-optional I/O.
265 266 */
266 267 mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio;
267 268
268 269 /*
269 270 * Walk backwards through sufficiently contiguous I/Os
270 271 * recording the last non-option I/O.
271 272 */
272 273 while ((dio = AVL_PREV(t, fio)) != NULL &&
273 274 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
274 275 IO_SPAN(dio, lio) <= maxspan &&
275 276 IO_GAP(dio, fio) <= maxgap) {
276 277 fio = dio;
277 278 if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL))
278 279 mio = fio;
279 280 }
280 281
281 282 /*
282 283 * Skip any initial optional I/Os.
283 284 */
284 285 while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) {
285 286 fio = AVL_NEXT(t, fio);
286 287 ASSERT(fio != NULL);
287 288 }
288 289
289 290 /*
290 291 * Walk forward through sufficiently contiguous I/Os.
291 292 */
292 293 while ((dio = AVL_NEXT(t, lio)) != NULL &&
293 294 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
294 295 IO_SPAN(fio, dio) <= maxspan &&
295 296 IO_GAP(lio, dio) <= maxgap) {
296 297 lio = dio;
297 298 if (!(lio->io_flags & ZIO_FLAG_OPTIONAL))
298 299 mio = lio;
299 300 }
300 301
301 302 /*
302 303 * Now that we've established the range of the I/O aggregation
303 304 * we must decide what to do with trailing optional I/Os.
304 305 * For reads, there's nothing to do. While we are unable to
305 306 * aggregate further, it's possible that a trailing optional
306 307 * I/O would allow the underlying device to aggregate with
307 308 * subsequent I/Os. We must therefore determine if the next
308 309 * non-optional I/O is close enough to make aggregation
309 310 * worthwhile.
310 311 */
311 312 stretch = B_FALSE;
312 313 if (t != &vq->vq_read_tree && mio != NULL) {
313 314 nio = lio;
314 315 while ((dio = AVL_NEXT(t, nio)) != NULL &&
315 316 IO_GAP(nio, dio) == 0 &&
316 317 IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) {
317 318 nio = dio;
318 319 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
319 320 stretch = B_TRUE;
320 321 break;
321 322 }
322 323 }
323 324 }
324 325
325 326 if (stretch) {
326 327 /* This may be a no-op. */
327 328 VERIFY((dio = AVL_NEXT(t, lio)) != NULL);
328 329 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
329 330 } else {
330 331 while (lio != mio && lio != fio) {
331 332 ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL);
332 333 lio = AVL_PREV(t, lio);
333 334 ASSERT(lio != NULL);
334 335 }
335 336 }
336 337 }
337 338
338 339 if (fio != lio) {
339 340 uint64_t size = IO_SPAN(fio, lio);
340 341 ASSERT(size <= zfs_vdev_aggregation_limit);
341 342
342 343 aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset,
343 344 zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_AGG,
344 345 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
345 346 vdev_queue_agg_io_done, NULL);
346 347 aio->io_timestamp = fio->io_timestamp;
347 348
348 349 nio = fio;
349 350 do {
350 351 dio = nio;
351 352 nio = AVL_NEXT(t, dio);
352 353 ASSERT(dio->io_type == aio->io_type);
353 354 ASSERT(dio->io_vdev_tree == t);
354 355
355 356 if (dio->io_flags & ZIO_FLAG_NODATA) {
356 357 ASSERT(dio->io_type == ZIO_TYPE_WRITE);
357 358 bzero((char *)aio->io_data + (dio->io_offset -
358 359 aio->io_offset), dio->io_size);
359 360 } else if (dio->io_type == ZIO_TYPE_WRITE) {
360 361 bcopy(dio->io_data, (char *)aio->io_data +
361 362 (dio->io_offset - aio->io_offset),
362 363 dio->io_size);
363 364 }
364 365
365 366 zio_add_child(dio, aio);
366 367 vdev_queue_io_remove(vq, dio);
367 368 zio_vdev_io_bypass(dio);
368 369 zio_execute(dio);
369 370 } while (dio != lio);
370 371
371 372 vdev_queue_pending_add(vq, aio);
372 373
373 374 return (aio);
374 375 }
375 376
376 377 ASSERT(fio->io_vdev_tree == t);
377 378 vdev_queue_io_remove(vq, fio);
378 379
379 380 /*
380 381 * If the I/O is or was optional and therefore has no data, we need to
381 382 * simply discard it. We need to drop the vdev queue's lock to avoid a
382 383 * deadlock that we could encounter since this I/O will complete
383 384 * immediately.
384 385 */
385 386 if (fio->io_flags & ZIO_FLAG_NODATA) {
386 387 mutex_exit(&vq->vq_lock);
387 388 zio_vdev_io_bypass(fio);
388 389 zio_execute(fio);
389 390 mutex_enter(&vq->vq_lock);
390 391 goto again;
391 392 }
392 393
393 394 vdev_queue_pending_add(vq, fio);
394 395
395 396 return (fio);
396 397 }
397 398
398 399 zio_t *
399 400 vdev_queue_io(zio_t *zio)
400 401 {
401 402 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
402 403 zio_t *nio;
403 404
404 405 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
405 406
406 407 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
407 408 return (zio);
408 409
409 410 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
410 411
411 412 if (zio->io_type == ZIO_TYPE_READ)
412 413 zio->io_vdev_tree = &vq->vq_read_tree;
413 414 else
414 415 zio->io_vdev_tree = &vq->vq_write_tree;
415 416
416 417 mutex_enter(&vq->vq_lock);
417 418
418 419 zio->io_timestamp = gethrtime();
419 420 zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) +
420 421 zio->io_priority;
421 422
422 423 vdev_queue_io_add(vq, zio);
423 424
424 425 nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending);
425 426
426 427 mutex_exit(&vq->vq_lock);
427 428
428 429 if (nio == NULL)
429 430 return (NULL);
430 431
431 432 if (nio->io_done == vdev_queue_agg_io_done) {
432 433 zio_nowait(nio);
433 434 return (NULL);
434 435 }
435 436
436 437 return (nio);
437 438 }
438 439
439 440 void
440 441 vdev_queue_io_done(zio_t *zio)
441 442 {
442 443 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
443 444
444 445 if (zio_injection_enabled)
445 446 delay(SEC_TO_TICK(zio_handle_io_delay(zio)));
446 447
447 448 mutex_enter(&vq->vq_lock);
448 449
449 450 vdev_queue_pending_remove(vq, zio);
450 451
451 452 vq->vq_io_complete_ts = gethrtime();
452 453
453 454 for (int i = 0; i < zfs_vdev_ramp_rate; i++) {
454 455 zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
455 456 if (nio == NULL)
456 457 break;
457 458 mutex_exit(&vq->vq_lock);
458 459 if (nio->io_done == vdev_queue_agg_io_done) {
459 460 zio_nowait(nio);
460 461 } else {
461 462 zio_vdev_io_reissue(nio);
462 463 zio_execute(nio);
463 464 }
464 465 mutex_enter(&vq->vq_lock);
465 466 }
466 467
467 468 mutex_exit(&vq->vq_lock);
468 469 }
↓ open down ↓ |
413 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX