Print this page
3742 zfs comments need cleaner, more consistent style
Submitted by: Will Andrews <willa@spectralogic.com>
Submitted by: Alan Somers <alans@spectralogic.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Eric Schrock <eric.schrock@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/zfs_rlock.c
+++ new/usr/src/uts/common/fs/zfs/zfs_rlock.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * This file contains the code to implement file range locking in
31 - * ZFS, although there isn't much specific to ZFS (all that comes to mind
31 + * ZFS, although there isn't much specific to ZFS (all that comes to mind is
32 32 * support for growing the blocksize).
33 33 *
34 34 * Interface
35 35 * ---------
36 36 * Defined in zfs_rlock.h but essentially:
37 37 * rl = zfs_range_lock(zp, off, len, lock_type);
38 38 * zfs_range_unlock(rl);
39 39 * zfs_range_reduce(rl, off, len);
40 40 *
41 41 * AVL tree
42 42 * --------
43 43 * An AVL tree is used to maintain the state of the existing ranges
44 44 * that are locked for exclusive (writer) or shared (reader) use.
45 45 * The starting range offset is used for searching and sorting the tree.
46 46 *
47 47 * Common case
48 48 * -----------
49 49 * The (hopefully) usual case is of no overlaps or contention for
50 50 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree
51 51 * searched that finds no overlap, and *this* rl_t is placed in the tree.
52 52 *
53 53 * Overlaps/Reference counting/Proxy locks
54 54 * ---------------------------------------
55 55 * The avl code only allows one node at a particular offset. Also it's very
56 56 * inefficient to search through all previous entries looking for overlaps
57 57 * (because the very 1st in the ordered list might be at offset 0 but
58 58 * cover the whole file).
59 59 * So this implementation uses reference counts and proxy range locks.
60 60 * Firstly, only reader locks use reference counts and proxy locks,
61 61 * because writer locks are exclusive.
62 62 * When a reader lock overlaps with another then a proxy lock is created
63 63 * for that range and replaces the original lock. If the overlap
64 64 * is exact then the reference count of the proxy is simply incremented.
65 65 * Otherwise, the proxy lock is split into smaller lock ranges and
66 66 * new proxy locks created for non overlapping ranges.
67 67 * The reference counts are adjusted accordingly.
68 68 * Meanwhile, the orginal lock is kept around (this is the callers handle)
69 69 * and its offset and length are used when releasing the lock.
70 70 *
71 71 * Thread coordination
72 72 * -------------------
73 73 * In order to make wakeups efficient and to ensure multiple continuous
74 74 * readers on a range don't starve a writer for the same range lock,
75 75 * two condition variables are allocated in each rl_t.
76 76 * If a writer (or reader) can't get a range it initialises the writer
77 77 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
78 78 * and waits on that cv. When a thread unlocks that range it wakes up all
79 79 * writers then all readers before destroying the lock.
80 80 *
81 81 * Append mode writes
82 82 * ------------------
83 83 * Append mode writes need to lock a range at the end of a file.
84 84 * The offset of the end of the file is determined under the
85 85 * range locking mutex, and the lock type converted from RL_APPEND to
86 86 * RL_WRITER and the range locked.
87 87 *
88 88 * Grow block handling
89 89 * -------------------
90 90 * ZFS supports multiple block sizes currently upto 128K. The smallest
91 91 * block size is used for the file which is grown as needed. During this
92 92 * growth all other writers and readers must be excluded.
93 93 * So if the block size needs to be grown then the whole file is
94 94 * exclusively locked, then later the caller will reduce the lock
95 95 * range to just the range to be written using zfs_reduce_range.
96 96 */
97 97
98 98 #include <sys/zfs_rlock.h>
99 99
100 100 /*
101 101 * Check if a write lock can be grabbed, or wait and recheck until available.
102 102 */
103 103 static void
104 104 zfs_range_lock_writer(znode_t *zp, rl_t *new)
105 105 {
106 106 avl_tree_t *tree = &zp->z_range_avl;
107 107 rl_t *rl;
108 108 avl_index_t where;
109 109 uint64_t end_size;
110 110 uint64_t off = new->r_off;
111 111 uint64_t len = new->r_len;
112 112
113 113 for (;;) {
114 114 /*
115 115 * Range locking is also used by zvol and uses a
116 116 * dummied up znode. However, for zvol, we don't need to
117 117 * append or grow blocksize, and besides we don't have
118 118 * a "sa" data or z_zfsvfs - so skip that processing.
119 119 *
120 120 * Yes, this is ugly, and would be solved by not handling
121 121 * grow or append in range lock code. If that was done then
122 122 * we could make the range locking code generically available
123 123 * to other non-zfs consumers.
124 124 */
125 125 if (zp->z_vnode) { /* caller is ZPL */
126 126 /*
127 127 * If in append mode pick up the current end of file.
128 128 * This is done under z_range_lock to avoid races.
129 129 */
130 130 if (new->r_type == RL_APPEND)
131 131 new->r_off = zp->z_size;
132 132
133 133 /*
134 134 * If we need to grow the block size then grab the whole
135 135 * file range. This is also done under z_range_lock to
136 136 * avoid races.
137 137 */
138 138 end_size = MAX(zp->z_size, new->r_off + len);
139 139 if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
140 140 zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) {
141 141 new->r_off = 0;
142 142 new->r_len = UINT64_MAX;
143 143 }
144 144 }
145 145
146 146 /*
147 147 * First check for the usual case of no locks
148 148 */
149 149 if (avl_numnodes(tree) == 0) {
150 150 new->r_type = RL_WRITER; /* convert to writer */
151 151 avl_add(tree, new);
152 152 return;
153 153 }
154 154
155 155 /*
156 156 * Look for any locks in the range.
157 157 */
158 158 rl = avl_find(tree, new, &where);
159 159 if (rl)
160 160 goto wait; /* already locked at same offset */
161 161
162 162 rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
163 163 if (rl && (rl->r_off < new->r_off + new->r_len))
164 164 goto wait;
165 165
166 166 rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE);
167 167 if (rl && rl->r_off + rl->r_len > new->r_off)
168 168 goto wait;
169 169
170 170 new->r_type = RL_WRITER; /* convert possible RL_APPEND */
171 171 avl_insert(tree, new, where);
172 172 return;
173 173 wait:
174 174 if (!rl->r_write_wanted) {
175 175 cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL);
176 176 rl->r_write_wanted = B_TRUE;
177 177 }
178 178 cv_wait(&rl->r_wr_cv, &zp->z_range_lock);
179 179
180 180 /* reset to original */
181 181 new->r_off = off;
182 182 new->r_len = len;
183 183 }
184 184 }
185 185
186 186 /*
187 187 * If this is an original (non-proxy) lock then replace it by
188 188 * a proxy and return the proxy.
189 189 */
190 190 static rl_t *
191 191 zfs_range_proxify(avl_tree_t *tree, rl_t *rl)
192 192 {
193 193 rl_t *proxy;
194 194
195 195 if (rl->r_proxy)
196 196 return (rl); /* already a proxy */
197 197
198 198 ASSERT3U(rl->r_cnt, ==, 1);
199 199 ASSERT(rl->r_write_wanted == B_FALSE);
200 200 ASSERT(rl->r_read_wanted == B_FALSE);
201 201 avl_remove(tree, rl);
202 202 rl->r_cnt = 0;
203 203
204 204 /* create a proxy range lock */
205 205 proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP);
206 206 proxy->r_off = rl->r_off;
207 207 proxy->r_len = rl->r_len;
208 208 proxy->r_cnt = 1;
209 209 proxy->r_type = RL_READER;
210 210 proxy->r_proxy = B_TRUE;
211 211 proxy->r_write_wanted = B_FALSE;
212 212 proxy->r_read_wanted = B_FALSE;
213 213 avl_add(tree, proxy);
214 214
215 215 return (proxy);
216 216 }
217 217
218 218 /*
219 219 * Split the range lock at the supplied offset
220 220 * returning the *front* proxy.
221 221 */
222 222 static rl_t *
223 223 zfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off)
224 224 {
225 225 rl_t *front, *rear;
226 226
227 227 ASSERT3U(rl->r_len, >, 1);
228 228 ASSERT3U(off, >, rl->r_off);
229 229 ASSERT3U(off, <, rl->r_off + rl->r_len);
230 230 ASSERT(rl->r_write_wanted == B_FALSE);
231 231 ASSERT(rl->r_read_wanted == B_FALSE);
232 232
233 233 /* create the rear proxy range lock */
234 234 rear = kmem_alloc(sizeof (rl_t), KM_SLEEP);
235 235 rear->r_off = off;
236 236 rear->r_len = rl->r_off + rl->r_len - off;
237 237 rear->r_cnt = rl->r_cnt;
238 238 rear->r_type = RL_READER;
239 239 rear->r_proxy = B_TRUE;
240 240 rear->r_write_wanted = B_FALSE;
241 241 rear->r_read_wanted = B_FALSE;
242 242
243 243 front = zfs_range_proxify(tree, rl);
244 244 front->r_len = off - rl->r_off;
245 245
246 246 avl_insert_here(tree, rear, front, AVL_AFTER);
247 247 return (front);
248 248 }
249 249
250 250 /*
251 251 * Create and add a new proxy range lock for the supplied range.
252 252 */
253 253 static void
254 254 zfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
255 255 {
256 256 rl_t *rl;
257 257
258 258 ASSERT(len);
259 259 rl = kmem_alloc(sizeof (rl_t), KM_SLEEP);
260 260 rl->r_off = off;
261 261 rl->r_len = len;
262 262 rl->r_cnt = 1;
263 263 rl->r_type = RL_READER;
264 264 rl->r_proxy = B_TRUE;
265 265 rl->r_write_wanted = B_FALSE;
266 266 rl->r_read_wanted = B_FALSE;
267 267 avl_add(tree, rl);
268 268 }
269 269
270 270 static void
271 271 zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where)
272 272 {
273 273 rl_t *next;
274 274 uint64_t off = new->r_off;
275 275 uint64_t len = new->r_len;
276 276
277 277 /*
278 278 * prev arrives either:
279 279 * - pointing to an entry at the same offset
280 280 * - pointing to the entry with the closest previous offset whose
281 281 * range may overlap with the new range
282 282 * - null, if there were no ranges starting before the new one
283 283 */
284 284 if (prev) {
285 285 if (prev->r_off + prev->r_len <= off) {
286 286 prev = NULL;
287 287 } else if (prev->r_off != off) {
288 288 /*
289 289 * convert to proxy if needed then
290 290 * split this entry and bump ref count
291 291 */
292 292 prev = zfs_range_split(tree, prev, off);
293 293 prev = AVL_NEXT(tree, prev); /* move to rear range */
294 294 }
295 295 }
296 296 ASSERT((prev == NULL) || (prev->r_off == off));
297 297
298 298 if (prev)
299 299 next = prev;
300 300 else
301 301 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
302 302
303 303 if (next == NULL || off + len <= next->r_off) {
304 304 /* no overlaps, use the original new rl_t in the tree */
305 305 avl_insert(tree, new, where);
306 306 return;
307 307 }
308 308
309 309 if (off < next->r_off) {
310 310 /* Add a proxy for initial range before the overlap */
311 311 zfs_range_new_proxy(tree, off, next->r_off - off);
312 312 }
313 313
314 314 new->r_cnt = 0; /* will use proxies in tree */
315 315 /*
316 316 * We now search forward through the ranges, until we go past the end
317 317 * of the new range. For each entry we make it a proxy if it
318 318 * isn't already, then bump its reference count. If there's any
319 319 * gaps between the ranges then we create a new proxy range.
320 320 */
321 321 for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) {
322 322 if (off + len <= next->r_off)
323 323 break;
324 324 if (prev && prev->r_off + prev->r_len < next->r_off) {
325 325 /* there's a gap */
326 326 ASSERT3U(next->r_off, >, prev->r_off + prev->r_len);
327 327 zfs_range_new_proxy(tree, prev->r_off + prev->r_len,
328 328 next->r_off - (prev->r_off + prev->r_len));
329 329 }
330 330 if (off + len == next->r_off + next->r_len) {
331 331 /* exact overlap with end */
332 332 next = zfs_range_proxify(tree, next);
333 333 next->r_cnt++;
334 334 return;
335 335 }
336 336 if (off + len < next->r_off + next->r_len) {
337 337 /* new range ends in the middle of this block */
338 338 next = zfs_range_split(tree, next, off + len);
339 339 next->r_cnt++;
340 340 return;
341 341 }
342 342 ASSERT3U(off + len, >, next->r_off + next->r_len);
343 343 next = zfs_range_proxify(tree, next);
344 344 next->r_cnt++;
345 345 }
346 346
347 347 /* Add the remaining end range. */
348 348 zfs_range_new_proxy(tree, prev->r_off + prev->r_len,
349 349 (off + len) - (prev->r_off + prev->r_len));
350 350 }
351 351
352 352 /*
353 353 * Check if a reader lock can be grabbed, or wait and recheck until available.
354 354 */
355 355 static void
356 356 zfs_range_lock_reader(znode_t *zp, rl_t *new)
357 357 {
358 358 avl_tree_t *tree = &zp->z_range_avl;
359 359 rl_t *prev, *next;
360 360 avl_index_t where;
361 361 uint64_t off = new->r_off;
362 362 uint64_t len = new->r_len;
363 363
364 364 /*
365 365 * Look for any writer locks in the range.
366 366 */
367 367 retry:
368 368 prev = avl_find(tree, new, &where);
369 369 if (prev == NULL)
370 370 prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE);
371 371
372 372 /*
373 373 * Check the previous range for a writer lock overlap.
374 374 */
375 375 if (prev && (off < prev->r_off + prev->r_len)) {
376 376 if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) {
377 377 if (!prev->r_read_wanted) {
378 378 cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL);
379 379 prev->r_read_wanted = B_TRUE;
380 380 }
381 381 cv_wait(&prev->r_rd_cv, &zp->z_range_lock);
382 382 goto retry;
383 383 }
384 384 if (off + len < prev->r_off + prev->r_len)
385 385 goto got_lock;
386 386 }
387 387
388 388 /*
389 389 * Search through the following ranges to see if there's
390 390 * write lock any overlap.
391 391 */
392 392 if (prev)
393 393 next = AVL_NEXT(tree, prev);
394 394 else
395 395 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
396 396 for (; next; next = AVL_NEXT(tree, next)) {
397 397 if (off + len <= next->r_off)
398 398 goto got_lock;
399 399 if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) {
400 400 if (!next->r_read_wanted) {
401 401 cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL);
402 402 next->r_read_wanted = B_TRUE;
403 403 }
404 404 cv_wait(&next->r_rd_cv, &zp->z_range_lock);
405 405 goto retry;
406 406 }
407 407 if (off + len <= next->r_off + next->r_len)
408 408 goto got_lock;
409 409 }
410 410
411 411 got_lock:
412 412 /*
413 413 * Add the read lock, which may involve splitting existing
414 414 * locks and bumping ref counts (r_cnt).
415 415 */
416 416 zfs_range_add_reader(tree, new, prev, where);
417 417 }
418 418
419 419 /*
420 420 * Lock a range (offset, length) as either shared (RL_READER)
421 421 * or exclusive (RL_WRITER). Returns the range lock structure
422 422 * for later unlocking or reduce range (if entire file
423 423 * previously locked as RL_WRITER).
424 424 */
425 425 rl_t *
426 426 zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type)
427 427 {
428 428 rl_t *new;
429 429
430 430 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
431 431
432 432 new = kmem_alloc(sizeof (rl_t), KM_SLEEP);
433 433 new->r_zp = zp;
434 434 new->r_off = off;
435 435 if (len + off < off) /* overflow */
436 436 len = UINT64_MAX - off;
437 437 new->r_len = len;
438 438 new->r_cnt = 1; /* assume it's going to be in the tree */
439 439 new->r_type = type;
440 440 new->r_proxy = B_FALSE;
441 441 new->r_write_wanted = B_FALSE;
442 442 new->r_read_wanted = B_FALSE;
443 443
444 444 mutex_enter(&zp->z_range_lock);
445 445 if (type == RL_READER) {
446 446 /*
447 447 * First check for the usual case of no locks
448 448 */
449 449 if (avl_numnodes(&zp->z_range_avl) == 0)
450 450 avl_add(&zp->z_range_avl, new);
451 451 else
452 452 zfs_range_lock_reader(zp, new);
453 453 } else
454 454 zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */
455 455 mutex_exit(&zp->z_range_lock);
456 456 return (new);
457 457 }
458 458
459 459 /*
460 460 * Unlock a reader lock
461 461 */
462 462 static void
463 463 zfs_range_unlock_reader(znode_t *zp, rl_t *remove)
464 464 {
465 465 avl_tree_t *tree = &zp->z_range_avl;
466 466 rl_t *rl, *next = NULL;
467 467 uint64_t len;
468 468
469 469 /*
470 470 * The common case is when the remove entry is in the tree
471 471 * (cnt == 1) meaning there's been no other reader locks overlapping
472 472 * with this one. Otherwise the remove entry will have been
473 473 * removed from the tree and replaced by proxies (one or
474 474 * more ranges mapping to the entire range).
475 475 */
476 476 if (remove->r_cnt == 1) {
477 477 avl_remove(tree, remove);
478 478 if (remove->r_write_wanted) {
479 479 cv_broadcast(&remove->r_wr_cv);
480 480 cv_destroy(&remove->r_wr_cv);
481 481 }
482 482 if (remove->r_read_wanted) {
483 483 cv_broadcast(&remove->r_rd_cv);
484 484 cv_destroy(&remove->r_rd_cv);
485 485 }
486 486 } else {
487 487 ASSERT0(remove->r_cnt);
488 488 ASSERT0(remove->r_write_wanted);
489 489 ASSERT0(remove->r_read_wanted);
490 490 /*
491 491 * Find start proxy representing this reader lock,
492 492 * then decrement ref count on all proxies
493 493 * that make up this range, freeing them as needed.
494 494 */
495 495 rl = avl_find(tree, remove, NULL);
496 496 ASSERT(rl);
497 497 ASSERT(rl->r_cnt);
498 498 ASSERT(rl->r_type == RL_READER);
499 499 for (len = remove->r_len; len != 0; rl = next) {
500 500 len -= rl->r_len;
501 501 if (len) {
502 502 next = AVL_NEXT(tree, rl);
503 503 ASSERT(next);
504 504 ASSERT(rl->r_off + rl->r_len == next->r_off);
505 505 ASSERT(next->r_cnt);
506 506 ASSERT(next->r_type == RL_READER);
507 507 }
508 508 rl->r_cnt--;
509 509 if (rl->r_cnt == 0) {
510 510 avl_remove(tree, rl);
511 511 if (rl->r_write_wanted) {
512 512 cv_broadcast(&rl->r_wr_cv);
513 513 cv_destroy(&rl->r_wr_cv);
514 514 }
515 515 if (rl->r_read_wanted) {
516 516 cv_broadcast(&rl->r_rd_cv);
517 517 cv_destroy(&rl->r_rd_cv);
518 518 }
519 519 kmem_free(rl, sizeof (rl_t));
520 520 }
521 521 }
522 522 }
523 523 kmem_free(remove, sizeof (rl_t));
524 524 }
525 525
526 526 /*
527 527 * Unlock range and destroy range lock structure.
528 528 */
529 529 void
530 530 zfs_range_unlock(rl_t *rl)
531 531 {
532 532 znode_t *zp = rl->r_zp;
533 533
534 534 ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER);
535 535 ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0);
536 536 ASSERT(!rl->r_proxy);
537 537
538 538 mutex_enter(&zp->z_range_lock);
539 539 if (rl->r_type == RL_WRITER) {
540 540 /* writer locks can't be shared or split */
541 541 avl_remove(&zp->z_range_avl, rl);
542 542 mutex_exit(&zp->z_range_lock);
543 543 if (rl->r_write_wanted) {
544 544 cv_broadcast(&rl->r_wr_cv);
545 545 cv_destroy(&rl->r_wr_cv);
546 546 }
547 547 if (rl->r_read_wanted) {
548 548 cv_broadcast(&rl->r_rd_cv);
549 549 cv_destroy(&rl->r_rd_cv);
550 550 }
551 551 kmem_free(rl, sizeof (rl_t));
552 552 } else {
553 553 /*
554 554 * lock may be shared, let zfs_range_unlock_reader()
555 555 * release the lock and free the rl_t
556 556 */
557 557 zfs_range_unlock_reader(zp, rl);
558 558 mutex_exit(&zp->z_range_lock);
559 559 }
560 560 }
561 561
562 562 /*
563 563 * Reduce range locked as RL_WRITER from whole file to specified range.
564 564 * Asserts the whole file is exclusivly locked and so there's only one
565 565 * entry in the tree.
566 566 */
567 567 void
568 568 zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len)
569 569 {
570 570 znode_t *zp = rl->r_zp;
571 571
572 572 /* Ensure there are no other locks */
573 573 ASSERT(avl_numnodes(&zp->z_range_avl) == 1);
574 574 ASSERT(rl->r_off == 0);
575 575 ASSERT(rl->r_type == RL_WRITER);
576 576 ASSERT(!rl->r_proxy);
577 577 ASSERT3U(rl->r_len, ==, UINT64_MAX);
578 578 ASSERT3U(rl->r_cnt, ==, 1);
579 579
580 580 mutex_enter(&zp->z_range_lock);
581 581 rl->r_off = off;
582 582 rl->r_len = len;
583 583 mutex_exit(&zp->z_range_lock);
584 584 if (rl->r_write_wanted)
585 585 cv_broadcast(&rl->r_wr_cv);
586 586 if (rl->r_read_wanted)
587 587 cv_broadcast(&rl->r_rd_cv);
588 588 }
589 589
590 590 /*
591 591 * AVL comparison function used to order range locks
592 592 * Locks are ordered on the start offset of the range.
593 593 */
594 594 int
595 595 zfs_range_compare(const void *arg1, const void *arg2)
596 596 {
597 597 const rl_t *rl1 = arg1;
598 598 const rl_t *rl2 = arg2;
599 599
600 600 if (rl1->r_off > rl2->r_off)
601 601 return (1);
602 602 if (rl1->r_off < rl2->r_off)
603 603 return (-1);
604 604 return (0);
605 605 }
↓ open down ↓ |
564 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX