1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 */
29
30
31
32 /*
33 * This file contains the code to implement file range locking in
34 * ZFS, although there isn't much specific to ZFS (all that comes to mind
35 * support for growing the blocksize).
36 *
37 * Interface
38 * ---------
39 * Defined in zfs_rlock.h but essentially:
40 * rl = zfs_range_lock(zp, off, len, lock_type);
41 * zfs_range_unlock(rl);
42 * zfs_range_reduce(rl, off, len);
43 *
44 * AVL tree
45 * --------
46 * An AVL tree is used to maintain the state of the existing ranges
47 * that are locked for exclusive (writer) or shared (reader) use.
48 * The starting range offset is used for searching and sorting the tree.
49 *
50 * Common case
51 * -----------
52 * The (hopefully) usual case is of no overlaps or contention for
53 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree
54 * searched that finds no overlap, and *this* rl_t is placed in the tree.
55 *
56 * Overlaps/Reference counting/Proxy locks
57 * ---------------------------------------
58 * The avl code only allows one node at a particular offset. Also it's very
59 * inefficient to search through all previous entries looking for overlaps
60 * (because the very 1st in the ordered list might be at offset 0 but
61 * cover the whole file).
62 * So this implementation uses reference counts and proxy range locks.
63 * Firstly, only reader locks use reference counts and proxy locks,
64 * because writer locks are exclusive.
65 * When a reader lock overlaps with another then a proxy lock is created
66 * for that range and replaces the original lock. If the overlap
67 * is exact then the reference count of the proxy is simply incremented.
68 * Otherwise, the proxy lock is split into smaller lock ranges and
69 * new proxy locks created for non overlapping ranges.
70 * The reference counts are adjusted accordingly.
71 * Meanwhile, the orginal lock is kept around (this is the callers handle)
72 * and its offset and length are used when releasing the lock.
73 *
74 * Thread coordination
75 * -------------------
76 * In order to make wakeups efficient and to ensure multiple continuous
77 * readers on a range don't starve a writer for the same range lock,
78 * two condition variables are allocated in each rl_t.
79 * If a writer (or reader) can't get a range it initialises the writer
80 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
81 * and waits on that cv. When a thread unlocks that range it wakes up all
82 * writers then all readers before destroying the lock.
83 *
84 * Append mode writes
85 * ------------------
86 * Append mode writes need to lock a range at the end of a file.
87 * The offset of the end of the file is determined under the
88 * range locking mutex, and the lock type converted from RL_APPEND to
89 * RL_WRITER and the range locked.
90 *
91 * Grow block handling
92 * -------------------
93 * ZFS supports multiple block sizes currently upto 128K. The smallest
94 * block size is used for the file which is grown as needed. During this
95 * growth all other writers and readers must be excluded.
96 * So if the block size needs to be grown then the whole file is
97 * exclusively locked, then later the caller will reduce the lock
98 * range to just the range to be written using zfs_reduce_range.
99 */
100
101 #include <sys/zfs_rlock.h>
102
103 /*
104 * Check if a write lock can be grabbed, or wait and recheck until available.
105 */
106 static void
107 zfs_range_lock_writer(znode_t *zp, rl_t *new)
108 {
109 avl_tree_t *tree = &zp->z_range_avl;
110 rl_t *rl;
111 avl_index_t where;
112 uint64_t end_size;
113 uint64_t off = new->r_off;
114 uint64_t len = new->r_len;
115
116 for (;;) {
117 /*
118 * Range locking is also used by zvol and uses a
119 * dummied up znode. However, for zvol, we don't need to
120 * append or grow blocksize, and besides we don't have
121 * a "sa" data or z_zfsvfs - so skip that processing.
122 *
123 * Yes, this is ugly, and would be solved by not handling
124 * grow or append in range lock code. If that was done then
125 * we could make the range locking code generically available
126 * to other non-zfs consumers.
127 */
128 if (zp->z_vnode) { /* caller is ZPL */
129 /*
130 * If in append mode pick up the current end of file.
131 * This is done under z_range_lock to avoid races.
132 */
133 if (new->r_type == RL_APPEND)
134 new->r_off = zp->z_size;
135
136 /*
137 * If we need to grow the block size then grab the whole
138 * file range. This is also done under z_range_lock to
139 * avoid races.
140 */
141 end_size = MAX(zp->z_size, new->r_off + len);
142 if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
143 zp->z_blksz < zp->z_zfsvfs->z_max_blksz)) {
144 new->r_off = 0;
145 new->r_len = UINT64_MAX;
146 }
147 }
148
149 /*
150 * First check for the usual case of no locks
151 */
152 if (avl_numnodes(tree) == 0) {
153 new->r_type = RL_WRITER; /* convert to writer */
154 avl_add(tree, new);
155 return;
156 }
157
158 /*
159 * Look for any locks in the range.
160 */
161 rl = avl_find(tree, new, &where);
162 if (rl)
163 goto wait; /* already locked at same offset */
164
165 rl = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
166 if (rl && (rl->r_off < new->r_off + new->r_len))
167 goto wait;
168
169 rl = (rl_t *)avl_nearest(tree, where, AVL_BEFORE);
170 if (rl && rl->r_off + rl->r_len > new->r_off)
171 goto wait;
172
173 new->r_type = RL_WRITER; /* convert possible RL_APPEND */
174 avl_insert(tree, new, where);
175 return;
176 wait:
177 if (!rl->r_write_wanted) {
178 cv_init(&rl->r_wr_cv, NULL, CV_DEFAULT, NULL);
179 rl->r_write_wanted = B_TRUE;
180 }
181 cv_wait(&rl->r_wr_cv, &zp->z_range_lock);
182
183 /* reset to original */
184 new->r_off = off;
185 new->r_len = len;
186 }
187 }
188
189 /*
190 * If this is an original (non-proxy) lock then replace it by
191 * a proxy and return the proxy.
192 */
193 static rl_t *
194 zfs_range_proxify(avl_tree_t *tree, rl_t *rl)
195 {
196 rl_t *proxy;
197
198 if (rl->r_proxy)
199 return (rl); /* already a proxy */
200
201 ASSERT3U(rl->r_cnt, ==, 1);
202 ASSERT(rl->r_write_wanted == B_FALSE);
203 ASSERT(rl->r_read_wanted == B_FALSE);
204 avl_remove(tree, rl);
205 rl->r_cnt = 0;
206
207 /* create a proxy range lock */
208 proxy = kmem_alloc(sizeof (rl_t), KM_SLEEP);
209 proxy->r_off = rl->r_off;
210 proxy->r_len = rl->r_len;
211 proxy->r_cnt = 1;
212 proxy->r_type = RL_READER;
213 proxy->r_proxy = B_TRUE;
214 proxy->r_write_wanted = B_FALSE;
215 proxy->r_read_wanted = B_FALSE;
216 avl_add(tree, proxy);
217
218 return (proxy);
219 }
220
221 /*
222 * Split the range lock at the supplied offset
223 * returning the *front* proxy.
224 */
225 static rl_t *
226 zfs_range_split(avl_tree_t *tree, rl_t *rl, uint64_t off)
227 {
228 rl_t *front, *rear;
229
230 ASSERT3U(rl->r_len, >, 1);
231 ASSERT3U(off, >, rl->r_off);
232 ASSERT3U(off, <, rl->r_off + rl->r_len);
233 ASSERT(rl->r_write_wanted == B_FALSE);
234 ASSERT(rl->r_read_wanted == B_FALSE);
235
236 /* create the rear proxy range lock */
237 rear = kmem_alloc(sizeof (rl_t), KM_SLEEP);
238 rear->r_off = off;
239 rear->r_len = rl->r_off + rl->r_len - off;
240 rear->r_cnt = rl->r_cnt;
241 rear->r_type = RL_READER;
242 rear->r_proxy = B_TRUE;
243 rear->r_write_wanted = B_FALSE;
244 rear->r_read_wanted = B_FALSE;
245
246 front = zfs_range_proxify(tree, rl);
247 front->r_len = off - rl->r_off;
248
249 avl_insert_here(tree, rear, front, AVL_AFTER);
250 return (front);
251 }
252
253 /*
254 * Create and add a new proxy range lock for the supplied range.
255 */
256 static void
257 zfs_range_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
258 {
259 rl_t *rl;
260
261 ASSERT(len);
262 rl = kmem_alloc(sizeof (rl_t), KM_SLEEP);
263 rl->r_off = off;
264 rl->r_len = len;
265 rl->r_cnt = 1;
266 rl->r_type = RL_READER;
267 rl->r_proxy = B_TRUE;
268 rl->r_write_wanted = B_FALSE;
269 rl->r_read_wanted = B_FALSE;
270 avl_add(tree, rl);
271 }
272
273 static void
274 zfs_range_add_reader(avl_tree_t *tree, rl_t *new, rl_t *prev, avl_index_t where)
275 {
276 rl_t *next;
277 uint64_t off = new->r_off;
278 uint64_t len = new->r_len;
279
280 /*
281 * prev arrives either:
282 * - pointing to an entry at the same offset
283 * - pointing to the entry with the closest previous offset whose
284 * range may overlap with the new range
285 * - null, if there were no ranges starting before the new one
286 */
287 if (prev) {
288 if (prev->r_off + prev->r_len <= off) {
289 prev = NULL;
290 } else if (prev->r_off != off) {
291 /*
292 * convert to proxy if needed then
293 * split this entry and bump ref count
294 */
295 prev = zfs_range_split(tree, prev, off);
296 prev = AVL_NEXT(tree, prev); /* move to rear range */
297 }
298 }
299 ASSERT((prev == NULL) || (prev->r_off == off));
300
301 if (prev)
302 next = prev;
303 else
304 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
305
306 if (next == NULL || off + len <= next->r_off) {
307 /* no overlaps, use the original new rl_t in the tree */
308 avl_insert(tree, new, where);
309 return;
310 }
311
312 if (off < next->r_off) {
313 /* Add a proxy for initial range before the overlap */
314 zfs_range_new_proxy(tree, off, next->r_off - off);
315 }
316
317 new->r_cnt = 0; /* will use proxies in tree */
318 /*
319 * We now search forward through the ranges, until we go past the end
320 * of the new range. For each entry we make it a proxy if it
321 * isn't already, then bump its reference count. If there's any
322 * gaps between the ranges then we create a new proxy range.
323 */
324 for (prev = NULL; next; prev = next, next = AVL_NEXT(tree, next)) {
325 if (off + len <= next->r_off)
326 break;
327 if (prev && prev->r_off + prev->r_len < next->r_off) {
328 /* there's a gap */
329 ASSERT3U(next->r_off, >, prev->r_off + prev->r_len);
330 zfs_range_new_proxy(tree, prev->r_off + prev->r_len,
331 next->r_off - (prev->r_off + prev->r_len));
332 }
333 if (off + len == next->r_off + next->r_len) {
334 /* exact overlap with end */
335 next = zfs_range_proxify(tree, next);
336 next->r_cnt++;
337 return;
338 }
339 if (off + len < next->r_off + next->r_len) {
340 /* new range ends in the middle of this block */
341 next = zfs_range_split(tree, next, off + len);
342 next->r_cnt++;
343 return;
344 }
345 ASSERT3U(off + len, >, next->r_off + next->r_len);
346 next = zfs_range_proxify(tree, next);
347 next->r_cnt++;
348 }
349
350 /* Add the remaining end range. */
351 zfs_range_new_proxy(tree, prev->r_off + prev->r_len,
352 (off + len) - (prev->r_off + prev->r_len));
353 }
354
355 /*
356 * Check if a reader lock can be grabbed, or wait and recheck until available.
357 */
358 static void
359 zfs_range_lock_reader(znode_t *zp, rl_t *new)
360 {
361 avl_tree_t *tree = &zp->z_range_avl;
362 rl_t *prev, *next;
363 avl_index_t where;
364 uint64_t off = new->r_off;
365 uint64_t len = new->r_len;
366
367 /*
368 * Look for any writer locks in the range.
369 */
370 retry:
371 prev = avl_find(tree, new, &where);
372 if (prev == NULL)
373 prev = (rl_t *)avl_nearest(tree, where, AVL_BEFORE);
374
375 /*
376 * Check the previous range for a writer lock overlap.
377 */
378 if (prev && (off < prev->r_off + prev->r_len)) {
379 if ((prev->r_type == RL_WRITER) || (prev->r_write_wanted)) {
380 if (!prev->r_read_wanted) {
381 cv_init(&prev->r_rd_cv, NULL, CV_DEFAULT, NULL);
382 prev->r_read_wanted = B_TRUE;
383 }
384 cv_wait(&prev->r_rd_cv, &zp->z_range_lock);
385 goto retry;
386 }
387 if (off + len < prev->r_off + prev->r_len)
388 goto got_lock;
389 }
390
391 /*
392 * Search through the following ranges to see if there's
393 * write lock any overlap.
394 */
395 if (prev)
396 next = AVL_NEXT(tree, prev);
397 else
398 next = (rl_t *)avl_nearest(tree, where, AVL_AFTER);
399 for (; next; next = AVL_NEXT(tree, next)) {
400 if (off + len <= next->r_off)
401 goto got_lock;
402 if ((next->r_type == RL_WRITER) || (next->r_write_wanted)) {
403 if (!next->r_read_wanted) {
404 cv_init(&next->r_rd_cv, NULL, CV_DEFAULT, NULL);
405 next->r_read_wanted = B_TRUE;
406 }
407 cv_wait(&next->r_rd_cv, &zp->z_range_lock);
408 goto retry;
409 }
410 if (off + len <= next->r_off + next->r_len)
411 goto got_lock;
412 }
413
414 got_lock:
415 /*
416 * Add the read lock, which may involve splitting existing
417 * locks and bumping ref counts (r_cnt).
418 */
419 zfs_range_add_reader(tree, new, prev, where);
420 }
421
422 /*
423 * Lock a range (offset, length) as either shared (RL_READER)
424 * or exclusive (RL_WRITER). Returns the range lock structure
425 * for later unlocking or reduce range (if entire file
426 * previously locked as RL_WRITER).
427 */
428 rl_t *
429 zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type)
430 {
431 rl_t *new;
432
433 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
434
435 new = kmem_alloc(sizeof (rl_t), KM_SLEEP);
436 new->r_zp = zp;
437 new->r_off = off;
438 if (len + off < off) /* overflow */
439 len = UINT64_MAX - off;
440 new->r_len = len;
441 new->r_cnt = 1; /* assume it's going to be in the tree */
442 new->r_type = type;
443 new->r_proxy = B_FALSE;
444 new->r_write_wanted = B_FALSE;
445 new->r_read_wanted = B_FALSE;
446
447 mutex_enter(&zp->z_range_lock);
448 if (type == RL_READER) {
449 /*
450 * First check for the usual case of no locks
451 */
452 if (avl_numnodes(&zp->z_range_avl) == 0)
453 avl_add(&zp->z_range_avl, new);
454 else
455 zfs_range_lock_reader(zp, new);
456 } else
457 zfs_range_lock_writer(zp, new); /* RL_WRITER or RL_APPEND */
458 mutex_exit(&zp->z_range_lock);
459 return (new);
460 }
461
462 /*
463 * Unlock a reader lock
464 */
465 static void
466 zfs_range_unlock_reader(znode_t *zp, rl_t *remove)
467 {
468 avl_tree_t *tree = &zp->z_range_avl;
469 rl_t *rl, *next;
470 uint64_t len;
471
472 /*
473 * The common case is when the remove entry is in the tree
474 * (cnt == 1) meaning there's been no other reader locks overlapping
475 * with this one. Otherwise the remove entry will have been
476 * removed from the tree and replaced by proxies (one or
477 * more ranges mapping to the entire range).
478 */
479 if (remove->r_cnt == 1) {
480 avl_remove(tree, remove);
481 if (remove->r_write_wanted) {
482 cv_broadcast(&remove->r_wr_cv);
483 cv_destroy(&remove->r_wr_cv);
484 }
485 if (remove->r_read_wanted) {
486 cv_broadcast(&remove->r_rd_cv);
487 cv_destroy(&remove->r_rd_cv);
488 }
489 } else {
490 ASSERT0(remove->r_cnt);
491 ASSERT0(remove->r_write_wanted);
492 ASSERT0(remove->r_read_wanted);
493 /*
494 * Find start proxy representing this reader lock,
495 * then decrement ref count on all proxies
496 * that make up this range, freeing them as needed.
497 */
498 rl = avl_find(tree, remove, NULL);
499 ASSERT(rl);
500 ASSERT(rl->r_cnt);
501 ASSERT(rl->r_type == RL_READER);
502 for (len = remove->r_len; len != 0; rl = next) {
503 len -= rl->r_len;
504 if (len) {
505 next = AVL_NEXT(tree, rl);
506 ASSERT(next);
507 ASSERT(rl->r_off + rl->r_len == next->r_off);
508 ASSERT(next->r_cnt);
509 ASSERT(next->r_type == RL_READER);
510 }
511 rl->r_cnt--;
512 if (rl->r_cnt == 0) {
513 avl_remove(tree, rl);
514 if (rl->r_write_wanted) {
515 cv_broadcast(&rl->r_wr_cv);
516 cv_destroy(&rl->r_wr_cv);
517 }
518 if (rl->r_read_wanted) {
519 cv_broadcast(&rl->r_rd_cv);
520 cv_destroy(&rl->r_rd_cv);
521 }
522 kmem_free(rl, sizeof (rl_t));
523 }
524 }
525 }
526 kmem_free(remove, sizeof (rl_t));
527 }
528
529 /*
530 * Unlock range and destroy range lock structure.
531 */
532 void
533 zfs_range_unlock(rl_t *rl)
534 {
535 znode_t *zp = rl->r_zp;
536
537 ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER);
538 ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0);
539 ASSERT(!rl->r_proxy);
540
541 mutex_enter(&zp->z_range_lock);
542 if (rl->r_type == RL_WRITER) {
543 /* writer locks can't be shared or split */
544 avl_remove(&zp->z_range_avl, rl);
545 mutex_exit(&zp->z_range_lock);
546 if (rl->r_write_wanted) {
547 cv_broadcast(&rl->r_wr_cv);
548 cv_destroy(&rl->r_wr_cv);
549 }
550 if (rl->r_read_wanted) {
551 cv_broadcast(&rl->r_rd_cv);
552 cv_destroy(&rl->r_rd_cv);
553 }
554 kmem_free(rl, sizeof (rl_t));
555 } else {
556 /*
557 * lock may be shared, let zfs_range_unlock_reader()
558 * release the lock and free the rl_t
559 */
560 zfs_range_unlock_reader(zp, rl);
561 mutex_exit(&zp->z_range_lock);
562 }
563 }
564
565 /*
566 * Reduce range locked as RL_WRITER from whole file to specified range.
567 * Asserts the whole file is exclusivly locked and so there's only one
568 * entry in the tree.
569 */
570 void
571 zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len)
572 {
573 znode_t *zp = rl->r_zp;
574
575 /* Ensure there are no other locks */
576 ASSERT(avl_numnodes(&zp->z_range_avl) == 1);
577 ASSERT(rl->r_off == 0);
578 ASSERT(rl->r_type == RL_WRITER);
579 ASSERT(!rl->r_proxy);
580 ASSERT3U(rl->r_len, ==, UINT64_MAX);
581 ASSERT3U(rl->r_cnt, ==, 1);
582
583 mutex_enter(&zp->z_range_lock);
584 rl->r_off = off;
585 rl->r_len = len;
586 mutex_exit(&zp->z_range_lock);
587 if (rl->r_write_wanted)
588 cv_broadcast(&rl->r_wr_cv);
589 if (rl->r_read_wanted)
590 cv_broadcast(&rl->r_rd_cv);
591 }
592
593 /*
594 * AVL comparison function used to order range locks
595 * Locks are ordered on the start offset of the range.
596 */
597 int
598 zfs_range_compare(const void *arg1, const void *arg2)
599 {
600 const rl_t *rl1 = arg1;
601 const rl_t *rl2 = arg2;
602
603 if (rl1->r_off > rl2->r_off)
604 return (1);
605 if (rl1->r_off < rl2->r_off)
606 return (-1);
607 return (0);
608 }