Print this page
3746 ZRLs are racy
Submitted by: Justin Gibbs <justing@spectralogic.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/zrlock.c
+++ new/usr/src/uts/common/fs/zfs/zrlock.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * A Zero Reference Lock (ZRL) is a reference count that can lock out new
27 27 * references only when the count is zero and only without waiting if the count
28 28 * is not already zero. It is similar to a read-write lock in that it allows
29 29 * multiple readers and only a single writer, but it does not allow a writer to
30 30 * block while waiting for readers to exit, and therefore the question of
31 31 * reader/writer priority is moot (no WRWANT bit). Since the equivalent of
32 32 * rw_enter(&lock, RW_WRITER) is disallowed and only tryenter() is allowed, it
33 33 * is perfectly safe for the same reader to acquire the same lock multiple
34 34 * times. The fact that a ZRL is reentrant for readers (through multiple calls
35 35 * to zrl_add()) makes it convenient for determining whether something is
36 36 * actively referenced without the fuss of flagging lock ownership across
37 37 * function calls.
38 38 */
39 39 #include <sys/zrlock.h>
40 40
41 41 /*
42 42 * A ZRL can be locked only while there are zero references, so ZRL_LOCKED is
43 43 * treated as zero references.
44 44 */
45 45 #define ZRL_LOCKED ((uint32_t)-1)
46 46 #define ZRL_DESTROYED -2
47 47
48 48 void
49 49 zrl_init(zrlock_t *zrl)
50 50 {
51 51 mutex_init(&zrl->zr_mtx, NULL, MUTEX_DEFAULT, NULL);
52 52 zrl->zr_refcount = 0;
53 53 cv_init(&zrl->zr_cv, NULL, CV_DEFAULT, NULL);
54 54 #ifdef ZFS_DEBUG
55 55 zrl->zr_owner = NULL;
56 56 zrl->zr_caller = NULL;
57 57 #endif
58 58 }
59 59
60 60 void
61 61 zrl_destroy(zrlock_t *zrl)
62 62 {
63 63 ASSERT(zrl->zr_refcount == 0);
64 64
65 65 mutex_destroy(&zrl->zr_mtx);
66 66 zrl->zr_refcount = ZRL_DESTROYED;
67 67 cv_destroy(&zrl->zr_cv);
68 68 }
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
69 69
70 70 void
71 71 #ifdef ZFS_DEBUG
72 72 zrl_add_debug(zrlock_t *zrl, const char *zc)
73 73 #else
74 74 zrl_add(zrlock_t *zrl)
75 75 #endif
76 76 {
77 77 uint32_t n = (uint32_t)zrl->zr_refcount;
78 78
79 - while (n != ZRL_LOCKED) {
80 - uint32_t cas = atomic_cas_32(
81 - (uint32_t *)&zrl->zr_refcount, n, n + 1);
82 - if (cas == n) {
83 - ASSERT((int32_t)n >= 0);
84 -#ifdef ZFS_DEBUG
85 - if (zrl->zr_owner == curthread) {
86 - DTRACE_PROBE2(zrlock__reentry,
87 - zrlock_t *, zrl, uint32_t, n);
88 - }
89 - zrl->zr_owner = curthread;
90 - zrl->zr_caller = zc;
79 + while (1) {
80 + while (n != ZRL_LOCKED) {
81 + uint32_t cas = atomic_cas_32(
82 + (uint32_t *)&zrl->zr_refcount, n, n + 1);
83 + if (cas == n) {
84 + ASSERT((int32_t)n >= 0);
85 +#ifdef ZFS_DEBUG
86 + if (zrl->zr_owner == curthread) {
87 + DTRACE_PROBE2(zrlock__reentry,
88 + zrlock_t *, zrl, uint32_t, n);
89 + }
90 + zrl->zr_owner = curthread;
91 + zrl->zr_caller = zc;
91 92 #endif
92 - return;
93 + return;
94 + }
95 + n = cas;
93 96 }
94 - n = cas;
95 - }
96 -
97 - mutex_enter(&zrl->zr_mtx);
98 - while (zrl->zr_refcount == ZRL_LOCKED) {
99 - cv_wait(&zrl->zr_cv, &zrl->zr_mtx);
97 + mutex_enter(&zrl->zr_mtx);
98 + while (zrl->zr_refcount == ZRL_LOCKED) {
99 + cv_wait(&zrl->zr_cv, &zrl->zr_mtx);
100 + }
101 + mutex_exit(&zrl->zr_mtx);
100 102 }
101 - ASSERT(zrl->zr_refcount >= 0);
102 - zrl->zr_refcount++;
103 -#ifdef ZFS_DEBUG
104 - zrl->zr_owner = curthread;
105 - zrl->zr_caller = zc;
106 -#endif
107 - mutex_exit(&zrl->zr_mtx);
108 103 }
109 104
110 105 void
111 106 zrl_remove(zrlock_t *zrl)
112 107 {
113 108 uint32_t n;
114 109
115 110 n = atomic_dec_32_nv((uint32_t *)&zrl->zr_refcount);
116 111 ASSERT((int32_t)n >= 0);
117 112 #ifdef ZFS_DEBUG
118 113 if (zrl->zr_owner == curthread) {
119 114 zrl->zr_owner = NULL;
120 115 zrl->zr_caller = NULL;
121 116 }
122 117 #endif
123 118 }
124 119
125 120 int
126 121 zrl_tryenter(zrlock_t *zrl)
127 122 {
128 123 uint32_t n = (uint32_t)zrl->zr_refcount;
129 124
130 125 if (n == 0) {
131 126 uint32_t cas = atomic_cas_32(
132 127 (uint32_t *)&zrl->zr_refcount, 0, ZRL_LOCKED);
133 128 if (cas == 0) {
134 129 #ifdef ZFS_DEBUG
135 130 ASSERT(zrl->zr_owner == NULL);
136 131 zrl->zr_owner = curthread;
137 132 #endif
138 133 return (1);
139 134 }
140 135 }
141 136
142 137 ASSERT((int32_t)n > ZRL_DESTROYED);
143 138
144 139 return (0);
145 140 }
146 141
147 142 void
148 143 zrl_exit(zrlock_t *zrl)
149 144 {
150 145 ASSERT(zrl->zr_refcount == ZRL_LOCKED);
151 146
152 147 mutex_enter(&zrl->zr_mtx);
153 148 #ifdef ZFS_DEBUG
154 149 ASSERT(zrl->zr_owner == curthread);
155 150 zrl->zr_owner = NULL;
156 151 membar_producer(); /* make sure the owner store happens first */
157 152 #endif
158 153 zrl->zr_refcount = 0;
159 154 cv_broadcast(&zrl->zr_cv);
160 155 mutex_exit(&zrl->zr_mtx);
161 156 }
162 157
163 158 int
164 159 zrl_refcount(zrlock_t *zrl)
165 160 {
166 161 ASSERT(zrl->zr_refcount > ZRL_DESTROYED);
167 162
168 163 int n = (int)zrl->zr_refcount;
169 164 return (n <= 0 ? 0 : n);
170 165 }
171 166
172 167 int
173 168 zrl_is_zero(zrlock_t *zrl)
174 169 {
175 170 ASSERT(zrl->zr_refcount > ZRL_DESTROYED);
176 171
177 172 return (zrl->zr_refcount <= 0);
178 173 }
179 174
180 175 int
181 176 zrl_is_locked(zrlock_t *zrl)
182 177 {
183 178 ASSERT(zrl->zr_refcount > ZRL_DESTROYED);
184 179
185 180 return (zrl->zr_refcount == ZRL_LOCKED);
186 181 }
187 182
188 183 #ifdef ZFS_DEBUG
189 184 kthread_t *
190 185 zrl_owner(zrlock_t *zrl)
191 186 {
192 187 return (zrl->zr_owner);
193 188 }
194 189 #endif
↓ open down ↓ |
77 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX