142 {
143 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145 rrl->rr_writer = NULL;
146 refcount_create(&rrl->rr_anon_rcount);
147 refcount_create(&rrl->rr_linked_rcount);
148 rrl->rr_writer_wanted = B_FALSE;
149 rrl->rr_track_all = track_all;
150 }
151
152 void
153 rrw_destroy(rrwlock_t *rrl)
154 {
155 mutex_destroy(&rrl->rr_lock);
156 cv_destroy(&rrl->rr_cv);
157 ASSERT(rrl->rr_writer == NULL);
158 refcount_destroy(&rrl->rr_anon_rcount);
159 refcount_destroy(&rrl->rr_linked_rcount);
160 }
161
162 void
163 rrw_enter_read(rrwlock_t *rrl, void *tag)
164 {
165 mutex_enter(&rrl->rr_lock);
166 #if !defined(DEBUG) && defined(_KERNEL)
167 if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
168 !rrl->rr_track_all) {
169 rrl->rr_anon_rcount.rc_count++;
170 mutex_exit(&rrl->rr_lock);
171 return;
172 }
173 DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174 #endif
175 ASSERT(rrl->rr_writer != curthread);
176 ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
177
178 while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
179 refcount_is_zero(&rrl->rr_anon_rcount) &&
180 rrn_find(rrl) == NULL))
181 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182
183 if (rrl->rr_writer_wanted || rrl->rr_track_all) {
184 /* may or may not be a re-entrant enter */
185 rrn_add(rrl, tag);
186 (void) refcount_add(&rrl->rr_linked_rcount, tag);
187 } else {
188 (void) refcount_add(&rrl->rr_anon_rcount, tag);
189 }
190 ASSERT(rrl->rr_writer == NULL);
191 mutex_exit(&rrl->rr_lock);
192 }
193
194 void
195 rrw_enter_write(rrwlock_t *rrl)
196 {
197 mutex_enter(&rrl->rr_lock);
198 ASSERT(rrl->rr_writer != curthread);
199
200 while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
201 refcount_count(&rrl->rr_linked_rcount) > 0 ||
202 rrl->rr_writer != NULL) {
203 rrl->rr_writer_wanted = B_TRUE;
204 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
205 }
206 rrl->rr_writer_wanted = B_FALSE;
207 rrl->rr_writer = curthread;
208 mutex_exit(&rrl->rr_lock);
209 }
210
211 void
212 rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
213 {
214 if (rw == RW_READER)
|
142 {
143 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145 rrl->rr_writer = NULL;
146 refcount_create(&rrl->rr_anon_rcount);
147 refcount_create(&rrl->rr_linked_rcount);
148 rrl->rr_writer_wanted = B_FALSE;
149 rrl->rr_track_all = track_all;
150 }
151
152 void
153 rrw_destroy(rrwlock_t *rrl)
154 {
155 mutex_destroy(&rrl->rr_lock);
156 cv_destroy(&rrl->rr_cv);
157 ASSERT(rrl->rr_writer == NULL);
158 refcount_destroy(&rrl->rr_anon_rcount);
159 refcount_destroy(&rrl->rr_linked_rcount);
160 }
161
162 static void
163 rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
164 {
165 mutex_enter(&rrl->rr_lock);
166 #if !defined(DEBUG) && defined(_KERNEL)
167 if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
168 !rrl->rr_track_all) {
169 rrl->rr_anon_rcount.rc_count++;
170 mutex_exit(&rrl->rr_lock);
171 return;
172 }
173 DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174 #endif
175 ASSERT(rrl->rr_writer != curthread);
176 ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
177
178 while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
179 refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
180 rrn_find(rrl) == NULL))
181 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182
183 if (rrl->rr_writer_wanted || rrl->rr_track_all) {
184 /* may or may not be a re-entrant enter */
185 rrn_add(rrl, tag);
186 (void) refcount_add(&rrl->rr_linked_rcount, tag);
187 } else {
188 (void) refcount_add(&rrl->rr_anon_rcount, tag);
189 }
190 ASSERT(rrl->rr_writer == NULL);
191 mutex_exit(&rrl->rr_lock);
192 }
193
194 void
195 rrw_enter_read(rrwlock_t *rrl, void *tag)
196 {
197 rrw_enter_read_impl(rrl, B_FALSE, tag);
198 }
199
200 /*
201 * take a read lock even if there are pending write lock requests. if we want
202 * to take a lock reentrantly, but from different threads (that have a
203 * relationship to each other), the normal detection mechanism to overrule
204 * the pending writer does not work, so we have to give an explicit hint here.
205 */
206 void
207 rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
208 {
209 rrw_enter_read_impl(rrl, B_TRUE, tag);
210 }
211
212
213 void
214 rrw_enter_write(rrwlock_t *rrl)
215 {
216 mutex_enter(&rrl->rr_lock);
217 ASSERT(rrl->rr_writer != curthread);
218
219 while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
220 refcount_count(&rrl->rr_linked_rcount) > 0 ||
221 rrl->rr_writer != NULL) {
222 rrl->rr_writer_wanted = B_TRUE;
223 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
224 }
225 rrl->rr_writer_wanted = B_FALSE;
226 rrl->rr_writer = curthread;
227 mutex_exit(&rrl->rr_lock);
228 }
229
230 void
231 rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
232 {
233 if (rw == RW_READER)
|