Print this page
4045 zfs write throttle & i/o scheduler performance work
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/dmu_zfetch.c
+++ new/usr/src/uts/common/fs/zfs/dmu_zfetch.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 +/*
27 + * Copyright (c) 2013 by Delphix. All rights reserved.
28 + */
29 +
26 30 #include <sys/zfs_context.h>
27 31 #include <sys/dnode.h>
28 32 #include <sys/dmu_objset.h>
29 33 #include <sys/dmu_zfetch.h>
30 34 #include <sys/dmu.h>
31 35 #include <sys/dbuf.h>
32 36 #include <sys/kstat.h>
33 37
34 38 /*
35 39 * I'm against tune-ables, but these should probably exist as tweakable globals
36 40 * until we can get this working the way we want it to.
37 41 */
38 42
39 43 int zfs_prefetch_disable = 0;
40 44
41 45 /* max # of streams per zfetch */
42 46 uint32_t zfetch_max_streams = 8;
43 47 /* min time before stream reclaim */
44 48 uint32_t zfetch_min_sec_reap = 2;
45 49 /* max number of blocks to fetch at a time */
46 50 uint32_t zfetch_block_cap = 256;
47 51 /* number of bytes in a array_read at which we stop prefetching (1Mb) */
48 52 uint64_t zfetch_array_rd_sz = 1024 * 1024;
49 53
50 54 /* forward decls for static routines */
51 55 static boolean_t dmu_zfetch_colinear(zfetch_t *, zstream_t *);
52 56 static void dmu_zfetch_dofetch(zfetch_t *, zstream_t *);
53 57 static uint64_t dmu_zfetch_fetch(dnode_t *, uint64_t, uint64_t);
54 58 static uint64_t dmu_zfetch_fetchsz(dnode_t *, uint64_t, uint64_t);
55 59 static boolean_t dmu_zfetch_find(zfetch_t *, zstream_t *, int);
56 60 static int dmu_zfetch_stream_insert(zfetch_t *, zstream_t *);
57 61 static zstream_t *dmu_zfetch_stream_reclaim(zfetch_t *);
58 62 static void dmu_zfetch_stream_remove(zfetch_t *, zstream_t *);
59 63 static int dmu_zfetch_streams_equal(zstream_t *, zstream_t *);
60 64
61 65 typedef struct zfetch_stats {
62 66 kstat_named_t zfetchstat_hits;
63 67 kstat_named_t zfetchstat_misses;
64 68 kstat_named_t zfetchstat_colinear_hits;
65 69 kstat_named_t zfetchstat_colinear_misses;
66 70 kstat_named_t zfetchstat_stride_hits;
67 71 kstat_named_t zfetchstat_stride_misses;
68 72 kstat_named_t zfetchstat_reclaim_successes;
69 73 kstat_named_t zfetchstat_reclaim_failures;
70 74 kstat_named_t zfetchstat_stream_resets;
71 75 kstat_named_t zfetchstat_stream_noresets;
72 76 kstat_named_t zfetchstat_bogus_streams;
73 77 } zfetch_stats_t;
74 78
75 79 static zfetch_stats_t zfetch_stats = {
76 80 { "hits", KSTAT_DATA_UINT64 },
77 81 { "misses", KSTAT_DATA_UINT64 },
78 82 { "colinear_hits", KSTAT_DATA_UINT64 },
79 83 { "colinear_misses", KSTAT_DATA_UINT64 },
80 84 { "stride_hits", KSTAT_DATA_UINT64 },
81 85 { "stride_misses", KSTAT_DATA_UINT64 },
82 86 { "reclaim_successes", KSTAT_DATA_UINT64 },
83 87 { "reclaim_failures", KSTAT_DATA_UINT64 },
84 88 { "streams_resets", KSTAT_DATA_UINT64 },
85 89 { "streams_noresets", KSTAT_DATA_UINT64 },
86 90 { "bogus_streams", KSTAT_DATA_UINT64 },
87 91 };
88 92
89 93 #define ZFETCHSTAT_INCR(stat, val) \
90 94 atomic_add_64(&zfetch_stats.stat.value.ui64, (val));
91 95
92 96 #define ZFETCHSTAT_BUMP(stat) ZFETCHSTAT_INCR(stat, 1);
93 97
94 98 kstat_t *zfetch_ksp;
95 99
96 100 /*
97 101 * Given a zfetch structure and a zstream structure, determine whether the
98 102 * blocks to be read are part of a co-linear pair of existing prefetch
99 103 * streams. If a set is found, coalesce the streams, removing one, and
100 104 * configure the prefetch so it looks for a strided access pattern.
101 105 *
102 106 * In other words: if we find two sequential access streams that are
103 107 * the same length and distance N appart, and this read is N from the
104 108 * last stream, then we are probably in a strided access pattern. So
105 109 * combine the two sequential streams into a single strided stream.
106 110 *
107 111 * Returns whether co-linear streams were found.
108 112 */
109 113 static boolean_t
110 114 dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh)
111 115 {
112 116 zstream_t *z_walk;
113 117 zstream_t *z_comp;
114 118
115 119 if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
116 120 return (0);
117 121
118 122 if (zh == NULL) {
119 123 rw_exit(&zf->zf_rwlock);
120 124 return (0);
121 125 }
122 126
123 127 for (z_walk = list_head(&zf->zf_stream); z_walk;
124 128 z_walk = list_next(&zf->zf_stream, z_walk)) {
125 129 for (z_comp = list_next(&zf->zf_stream, z_walk); z_comp;
126 130 z_comp = list_next(&zf->zf_stream, z_comp)) {
127 131 int64_t diff;
128 132
129 133 if (z_walk->zst_len != z_walk->zst_stride ||
130 134 z_comp->zst_len != z_comp->zst_stride) {
131 135 continue;
132 136 }
133 137
134 138 diff = z_comp->zst_offset - z_walk->zst_offset;
135 139 if (z_comp->zst_offset + diff == zh->zst_offset) {
136 140 z_walk->zst_offset = zh->zst_offset;
137 141 z_walk->zst_direction = diff < 0 ? -1 : 1;
138 142 z_walk->zst_stride =
139 143 diff * z_walk->zst_direction;
140 144 z_walk->zst_ph_offset =
141 145 zh->zst_offset + z_walk->zst_stride;
142 146 dmu_zfetch_stream_remove(zf, z_comp);
143 147 mutex_destroy(&z_comp->zst_lock);
144 148 kmem_free(z_comp, sizeof (zstream_t));
145 149
146 150 dmu_zfetch_dofetch(zf, z_walk);
147 151
148 152 rw_exit(&zf->zf_rwlock);
149 153 return (1);
150 154 }
151 155
152 156 diff = z_walk->zst_offset - z_comp->zst_offset;
153 157 if (z_walk->zst_offset + diff == zh->zst_offset) {
154 158 z_walk->zst_offset = zh->zst_offset;
155 159 z_walk->zst_direction = diff < 0 ? -1 : 1;
156 160 z_walk->zst_stride =
157 161 diff * z_walk->zst_direction;
158 162 z_walk->zst_ph_offset =
159 163 zh->zst_offset + z_walk->zst_stride;
160 164 dmu_zfetch_stream_remove(zf, z_comp);
161 165 mutex_destroy(&z_comp->zst_lock);
162 166 kmem_free(z_comp, sizeof (zstream_t));
163 167
164 168 dmu_zfetch_dofetch(zf, z_walk);
165 169
166 170 rw_exit(&zf->zf_rwlock);
167 171 return (1);
168 172 }
169 173 }
170 174 }
171 175
172 176 rw_exit(&zf->zf_rwlock);
173 177 return (0);
174 178 }
175 179
176 180 /*
177 181 * Given a zstream_t, determine the bounds of the prefetch. Then call the
178 182 * routine that actually prefetches the individual blocks.
179 183 */
180 184 static void
181 185 dmu_zfetch_dofetch(zfetch_t *zf, zstream_t *zs)
182 186 {
183 187 uint64_t prefetch_tail;
184 188 uint64_t prefetch_limit;
185 189 uint64_t prefetch_ofst;
186 190 uint64_t prefetch_len;
187 191 uint64_t blocks_fetched;
188 192
189 193 zs->zst_stride = MAX((int64_t)zs->zst_stride, zs->zst_len);
190 194 zs->zst_cap = MIN(zfetch_block_cap, 2 * zs->zst_cap);
191 195
192 196 prefetch_tail = MAX((int64_t)zs->zst_ph_offset,
193 197 (int64_t)(zs->zst_offset + zs->zst_stride));
194 198 /*
195 199 * XXX: use a faster division method?
196 200 */
197 201 prefetch_limit = zs->zst_offset + zs->zst_len +
198 202 (zs->zst_cap * zs->zst_stride) / zs->zst_len;
199 203
200 204 while (prefetch_tail < prefetch_limit) {
201 205 prefetch_ofst = zs->zst_offset + zs->zst_direction *
202 206 (prefetch_tail - zs->zst_offset);
203 207
204 208 prefetch_len = zs->zst_len;
205 209
206 210 /*
207 211 * Don't prefetch beyond the end of the file, if working
208 212 * backwards.
209 213 */
210 214 if ((zs->zst_direction == ZFETCH_BACKWARD) &&
211 215 (prefetch_ofst > prefetch_tail)) {
212 216 prefetch_len += prefetch_ofst;
213 217 prefetch_ofst = 0;
214 218 }
215 219
216 220 /* don't prefetch more than we're supposed to */
217 221 if (prefetch_len > zs->zst_len)
218 222 break;
219 223
220 224 blocks_fetched = dmu_zfetch_fetch(zf->zf_dnode,
221 225 prefetch_ofst, zs->zst_len);
222 226
223 227 prefetch_tail += zs->zst_stride;
224 228 /* stop if we've run out of stuff to prefetch */
225 229 if (blocks_fetched < zs->zst_len)
226 230 break;
227 231 }
228 232 zs->zst_ph_offset = prefetch_tail;
229 233 zs->zst_last = ddi_get_lbolt();
230 234 }
231 235
232 236 void
233 237 zfetch_init(void)
234 238 {
235 239
236 240 zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
237 241 KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
238 242 KSTAT_FLAG_VIRTUAL);
239 243
240 244 if (zfetch_ksp != NULL) {
241 245 zfetch_ksp->ks_data = &zfetch_stats;
242 246 kstat_install(zfetch_ksp);
243 247 }
244 248 }
245 249
246 250 void
247 251 zfetch_fini(void)
248 252 {
249 253 if (zfetch_ksp != NULL) {
250 254 kstat_delete(zfetch_ksp);
251 255 zfetch_ksp = NULL;
252 256 }
253 257 }
254 258
255 259 /*
256 260 * This takes a pointer to a zfetch structure and a dnode. It performs the
257 261 * necessary setup for the zfetch structure, grokking data from the
258 262 * associated dnode.
259 263 */
260 264 void
261 265 dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
262 266 {
263 267 if (zf == NULL) {
264 268 return;
265 269 }
266 270
267 271 zf->zf_dnode = dno;
268 272 zf->zf_stream_cnt = 0;
269 273 zf->zf_alloc_fail = 0;
270 274
271 275 list_create(&zf->zf_stream, sizeof (zstream_t),
272 276 offsetof(zstream_t, zst_node));
273 277
274 278 rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL);
275 279 }
276 280
277 281 /*
278 282 * This function computes the actual size, in blocks, that can be prefetched,
279 283 * and fetches it.
↓ open down ↓ |
244 lines elided |
↑ open up ↑ |
280 284 */
281 285 static uint64_t
282 286 dmu_zfetch_fetch(dnode_t *dn, uint64_t blkid, uint64_t nblks)
283 287 {
284 288 uint64_t fetchsz;
285 289 uint64_t i;
286 290
287 291 fetchsz = dmu_zfetch_fetchsz(dn, blkid, nblks);
288 292
289 293 for (i = 0; i < fetchsz; i++) {
290 - dbuf_prefetch(dn, blkid + i);
294 + dbuf_prefetch(dn, blkid + i, ZIO_PRIORITY_ASYNC_READ);
291 295 }
292 296
293 297 return (fetchsz);
294 298 }
295 299
296 300 /*
297 301 * this function returns the number of blocks that would be prefetched, based
298 302 * upon the supplied dnode, blockid, and nblks. This is used so that we can
299 303 * update streams in place, and then prefetch with their old value after the
300 304 * fact. This way, we can delay the prefetch, but subsequent accesses to the
301 305 * stream won't result in the same data being prefetched multiple times.
302 306 */
303 307 static uint64_t
304 308 dmu_zfetch_fetchsz(dnode_t *dn, uint64_t blkid, uint64_t nblks)
305 309 {
306 310 uint64_t fetchsz;
307 311
308 312 if (blkid > dn->dn_maxblkid) {
309 313 return (0);
310 314 }
311 315
312 316 /* compute fetch size */
313 317 if (blkid + nblks + 1 > dn->dn_maxblkid) {
314 318 fetchsz = (dn->dn_maxblkid - blkid) + 1;
315 319 ASSERT(blkid + fetchsz - 1 <= dn->dn_maxblkid);
316 320 } else {
317 321 fetchsz = nblks;
318 322 }
319 323
320 324
321 325 return (fetchsz);
322 326 }
323 327
324 328 /*
325 329 * given a zfetch and a zstream structure, see if there is an associated zstream
326 330 * for this block read. If so, it starts a prefetch for the stream it
327 331 * located and returns true, otherwise it returns false
328 332 */
329 333 static boolean_t
330 334 dmu_zfetch_find(zfetch_t *zf, zstream_t *zh, int prefetched)
331 335 {
332 336 zstream_t *zs;
333 337 int64_t diff;
334 338 int reset = !prefetched;
335 339 int rc = 0;
336 340
337 341 if (zh == NULL)
338 342 return (0);
339 343
340 344 /*
341 345 * XXX: This locking strategy is a bit coarse; however, it's impact has
342 346 * yet to be tested. If this turns out to be an issue, it can be
343 347 * modified in a number of different ways.
344 348 */
345 349
346 350 rw_enter(&zf->zf_rwlock, RW_READER);
347 351 top:
348 352
349 353 for (zs = list_head(&zf->zf_stream); zs;
350 354 zs = list_next(&zf->zf_stream, zs)) {
351 355
352 356 /*
353 357 * XXX - should this be an assert?
354 358 */
355 359 if (zs->zst_len == 0) {
356 360 /* bogus stream */
357 361 ZFETCHSTAT_BUMP(zfetchstat_bogus_streams);
358 362 continue;
359 363 }
360 364
361 365 /*
362 366 * We hit this case when we are in a strided prefetch stream:
363 367 * we will read "len" blocks before "striding".
364 368 */
365 369 if (zh->zst_offset >= zs->zst_offset &&
366 370 zh->zst_offset < zs->zst_offset + zs->zst_len) {
367 371 if (prefetched) {
368 372 /* already fetched */
369 373 ZFETCHSTAT_BUMP(zfetchstat_stride_hits);
370 374 rc = 1;
371 375 goto out;
372 376 } else {
373 377 ZFETCHSTAT_BUMP(zfetchstat_stride_misses);
374 378 }
375 379 }
376 380
377 381 /*
378 382 * This is the forward sequential read case: we increment
379 383 * len by one each time we hit here, so we will enter this
380 384 * case on every read.
381 385 */
382 386 if (zh->zst_offset == zs->zst_offset + zs->zst_len) {
383 387
384 388 reset = !prefetched && zs->zst_len > 1;
385 389
386 390 mutex_enter(&zs->zst_lock);
387 391
388 392 if (zh->zst_offset != zs->zst_offset + zs->zst_len) {
389 393 mutex_exit(&zs->zst_lock);
390 394 goto top;
391 395 }
392 396 zs->zst_len += zh->zst_len;
393 397 diff = zs->zst_len - zfetch_block_cap;
394 398 if (diff > 0) {
395 399 zs->zst_offset += diff;
396 400 zs->zst_len = zs->zst_len > diff ?
397 401 zs->zst_len - diff : 0;
398 402 }
399 403 zs->zst_direction = ZFETCH_FORWARD;
400 404
401 405 break;
402 406
403 407 /*
404 408 * Same as above, but reading backwards through the file.
405 409 */
406 410 } else if (zh->zst_offset == zs->zst_offset - zh->zst_len) {
407 411 /* backwards sequential access */
408 412
409 413 reset = !prefetched && zs->zst_len > 1;
410 414
411 415 mutex_enter(&zs->zst_lock);
412 416
413 417 if (zh->zst_offset != zs->zst_offset - zh->zst_len) {
414 418 mutex_exit(&zs->zst_lock);
415 419 goto top;
416 420 }
417 421
418 422 zs->zst_offset = zs->zst_offset > zh->zst_len ?
419 423 zs->zst_offset - zh->zst_len : 0;
420 424 zs->zst_ph_offset = zs->zst_ph_offset > zh->zst_len ?
421 425 zs->zst_ph_offset - zh->zst_len : 0;
422 426 zs->zst_len += zh->zst_len;
423 427
424 428 diff = zs->zst_len - zfetch_block_cap;
425 429 if (diff > 0) {
426 430 zs->zst_ph_offset = zs->zst_ph_offset > diff ?
427 431 zs->zst_ph_offset - diff : 0;
428 432 zs->zst_len = zs->zst_len > diff ?
429 433 zs->zst_len - diff : zs->zst_len;
430 434 }
431 435 zs->zst_direction = ZFETCH_BACKWARD;
432 436
433 437 break;
434 438
435 439 } else if ((zh->zst_offset - zs->zst_offset - zs->zst_stride <
436 440 zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
437 441 /* strided forward access */
438 442
439 443 mutex_enter(&zs->zst_lock);
440 444
441 445 if ((zh->zst_offset - zs->zst_offset - zs->zst_stride >=
442 446 zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
443 447 mutex_exit(&zs->zst_lock);
444 448 goto top;
445 449 }
446 450
447 451 zs->zst_offset += zs->zst_stride;
448 452 zs->zst_direction = ZFETCH_FORWARD;
449 453
450 454 break;
451 455
452 456 } else if ((zh->zst_offset - zs->zst_offset + zs->zst_stride <
453 457 zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
454 458 /* strided reverse access */
455 459
456 460 mutex_enter(&zs->zst_lock);
457 461
458 462 if ((zh->zst_offset - zs->zst_offset + zs->zst_stride >=
459 463 zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
460 464 mutex_exit(&zs->zst_lock);
461 465 goto top;
462 466 }
463 467
464 468 zs->zst_offset = zs->zst_offset > zs->zst_stride ?
465 469 zs->zst_offset - zs->zst_stride : 0;
466 470 zs->zst_ph_offset = (zs->zst_ph_offset >
467 471 (2 * zs->zst_stride)) ?
468 472 (zs->zst_ph_offset - (2 * zs->zst_stride)) : 0;
469 473 zs->zst_direction = ZFETCH_BACKWARD;
470 474
471 475 break;
472 476 }
473 477 }
474 478
475 479 if (zs) {
476 480 if (reset) {
477 481 zstream_t *remove = zs;
478 482
479 483 ZFETCHSTAT_BUMP(zfetchstat_stream_resets);
480 484 rc = 0;
481 485 mutex_exit(&zs->zst_lock);
482 486 rw_exit(&zf->zf_rwlock);
483 487 rw_enter(&zf->zf_rwlock, RW_WRITER);
484 488 /*
485 489 * Relocate the stream, in case someone removes
486 490 * it while we were acquiring the WRITER lock.
487 491 */
488 492 for (zs = list_head(&zf->zf_stream); zs;
489 493 zs = list_next(&zf->zf_stream, zs)) {
490 494 if (zs == remove) {
491 495 dmu_zfetch_stream_remove(zf, zs);
492 496 mutex_destroy(&zs->zst_lock);
493 497 kmem_free(zs, sizeof (zstream_t));
494 498 break;
495 499 }
496 500 }
497 501 } else {
498 502 ZFETCHSTAT_BUMP(zfetchstat_stream_noresets);
499 503 rc = 1;
500 504 dmu_zfetch_dofetch(zf, zs);
501 505 mutex_exit(&zs->zst_lock);
502 506 }
503 507 }
504 508 out:
505 509 rw_exit(&zf->zf_rwlock);
506 510 return (rc);
507 511 }
508 512
509 513 /*
510 514 * Clean-up state associated with a zfetch structure. This frees allocated
511 515 * structure members, empties the zf_stream tree, and generally makes things
512 516 * nice. This doesn't free the zfetch_t itself, that's left to the caller.
513 517 */
514 518 void
515 519 dmu_zfetch_rele(zfetch_t *zf)
516 520 {
517 521 zstream_t *zs;
518 522 zstream_t *zs_next;
519 523
520 524 ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock));
521 525
522 526 for (zs = list_head(&zf->zf_stream); zs; zs = zs_next) {
523 527 zs_next = list_next(&zf->zf_stream, zs);
524 528
525 529 list_remove(&zf->zf_stream, zs);
526 530 mutex_destroy(&zs->zst_lock);
527 531 kmem_free(zs, sizeof (zstream_t));
528 532 }
529 533 list_destroy(&zf->zf_stream);
530 534 rw_destroy(&zf->zf_rwlock);
531 535
532 536 zf->zf_dnode = NULL;
533 537 }
534 538
535 539 /*
536 540 * Given a zfetch and zstream structure, insert the zstream structure into the
537 541 * AVL tree contained within the zfetch structure. Peform the appropriate
538 542 * book-keeping. It is possible that another thread has inserted a stream which
539 543 * matches one that we are about to insert, so we must be sure to check for this
540 544 * case. If one is found, return failure, and let the caller cleanup the
541 545 * duplicates.
542 546 */
543 547 static int
544 548 dmu_zfetch_stream_insert(zfetch_t *zf, zstream_t *zs)
545 549 {
546 550 zstream_t *zs_walk;
547 551 zstream_t *zs_next;
548 552
549 553 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
550 554
551 555 for (zs_walk = list_head(&zf->zf_stream); zs_walk; zs_walk = zs_next) {
552 556 zs_next = list_next(&zf->zf_stream, zs_walk);
553 557
554 558 if (dmu_zfetch_streams_equal(zs_walk, zs)) {
555 559 return (0);
556 560 }
557 561 }
558 562
559 563 list_insert_head(&zf->zf_stream, zs);
560 564 zf->zf_stream_cnt++;
561 565 return (1);
562 566 }
563 567
564 568
565 569 /*
566 570 * Walk the list of zstreams in the given zfetch, find an old one (by time), and
567 571 * reclaim it for use by the caller.
568 572 */
569 573 static zstream_t *
570 574 dmu_zfetch_stream_reclaim(zfetch_t *zf)
571 575 {
572 576 zstream_t *zs;
573 577
574 578 if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
575 579 return (0);
576 580
577 581 for (zs = list_head(&zf->zf_stream); zs;
578 582 zs = list_next(&zf->zf_stream, zs)) {
579 583
580 584 if (((ddi_get_lbolt() - zs->zst_last)/hz) > zfetch_min_sec_reap)
581 585 break;
582 586 }
583 587
584 588 if (zs) {
585 589 dmu_zfetch_stream_remove(zf, zs);
586 590 mutex_destroy(&zs->zst_lock);
587 591 bzero(zs, sizeof (zstream_t));
588 592 } else {
589 593 zf->zf_alloc_fail++;
590 594 }
591 595 rw_exit(&zf->zf_rwlock);
592 596
593 597 return (zs);
594 598 }
595 599
596 600 /*
597 601 * Given a zfetch and zstream structure, remove the zstream structure from its
598 602 * container in the zfetch structure. Perform the appropriate book-keeping.
599 603 */
600 604 static void
601 605 dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
602 606 {
603 607 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
604 608
605 609 list_remove(&zf->zf_stream, zs);
606 610 zf->zf_stream_cnt--;
607 611 }
608 612
609 613 static int
610 614 dmu_zfetch_streams_equal(zstream_t *zs1, zstream_t *zs2)
611 615 {
612 616 if (zs1->zst_offset != zs2->zst_offset)
613 617 return (0);
614 618
615 619 if (zs1->zst_len != zs2->zst_len)
616 620 return (0);
617 621
618 622 if (zs1->zst_stride != zs2->zst_stride)
619 623 return (0);
620 624
621 625 if (zs1->zst_ph_offset != zs2->zst_ph_offset)
622 626 return (0);
623 627
624 628 if (zs1->zst_cap != zs2->zst_cap)
625 629 return (0);
626 630
627 631 if (zs1->zst_direction != zs2->zst_direction)
628 632 return (0);
629 633
630 634 return (1);
631 635 }
632 636
633 637 /*
634 638 * This is the prefetch entry point. It calls all of the other dmu_zfetch
635 639 * routines to create, delete, find, or operate upon prefetch streams.
636 640 */
637 641 void
638 642 dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
639 643 {
640 644 zstream_t zst;
641 645 zstream_t *newstream;
642 646 boolean_t fetched;
643 647 int inserted;
644 648 unsigned int blkshft;
645 649 uint64_t blksz;
646 650
647 651 if (zfs_prefetch_disable)
648 652 return;
649 653
650 654 /* files that aren't ln2 blocksz are only one block -- nothing to do */
651 655 if (!zf->zf_dnode->dn_datablkshift)
652 656 return;
653 657
654 658 /* convert offset and size, into blockid and nblocks */
655 659 blkshft = zf->zf_dnode->dn_datablkshift;
656 660 blksz = (1 << blkshft);
657 661
658 662 bzero(&zst, sizeof (zstream_t));
659 663 zst.zst_offset = offset >> blkshft;
660 664 zst.zst_len = (P2ROUNDUP(offset + size, blksz) -
661 665 P2ALIGN(offset, blksz)) >> blkshft;
662 666
663 667 fetched = dmu_zfetch_find(zf, &zst, prefetched);
664 668 if (fetched) {
665 669 ZFETCHSTAT_BUMP(zfetchstat_hits);
666 670 } else {
667 671 ZFETCHSTAT_BUMP(zfetchstat_misses);
668 672 fetched = dmu_zfetch_colinear(zf, &zst);
669 673 if (fetched) {
670 674 ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
671 675 } else {
672 676 ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
673 677 }
674 678 }
675 679
676 680 if (!fetched) {
677 681 newstream = dmu_zfetch_stream_reclaim(zf);
678 682
679 683 /*
680 684 * we still couldn't find a stream, drop the lock, and allocate
681 685 * one if possible. Otherwise, give up and go home.
682 686 */
683 687 if (newstream) {
684 688 ZFETCHSTAT_BUMP(zfetchstat_reclaim_successes);
685 689 } else {
686 690 uint64_t maxblocks;
687 691 uint32_t max_streams;
688 692 uint32_t cur_streams;
689 693
690 694 ZFETCHSTAT_BUMP(zfetchstat_reclaim_failures);
691 695 cur_streams = zf->zf_stream_cnt;
692 696 maxblocks = zf->zf_dnode->dn_maxblkid;
693 697
694 698 max_streams = MIN(zfetch_max_streams,
695 699 (maxblocks / zfetch_block_cap));
696 700 if (max_streams == 0) {
697 701 max_streams++;
698 702 }
699 703
700 704 if (cur_streams >= max_streams) {
701 705 return;
702 706 }
703 707 newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
704 708 }
705 709
706 710 newstream->zst_offset = zst.zst_offset;
707 711 newstream->zst_len = zst.zst_len;
708 712 newstream->zst_stride = zst.zst_len;
709 713 newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
710 714 newstream->zst_cap = zst.zst_len;
711 715 newstream->zst_direction = ZFETCH_FORWARD;
712 716 newstream->zst_last = ddi_get_lbolt();
713 717
714 718 mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);
715 719
716 720 rw_enter(&zf->zf_rwlock, RW_WRITER);
717 721 inserted = dmu_zfetch_stream_insert(zf, newstream);
718 722 rw_exit(&zf->zf_rwlock);
719 723
720 724 if (!inserted) {
721 725 mutex_destroy(&newstream->zst_lock);
722 726 kmem_free(newstream, sizeof (zstream_t));
723 727 }
724 728 }
725 729 }
↓ open down ↓ |
425 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX