Print this page
11909 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/rwlock.c
+++ new/usr/src/uts/common/os/rwlock.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 - * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 + * Copyright 2019 Joyent, Inc.
28 28 */
29 29
30 30 #include <sys/param.h>
31 31 #include <sys/thread.h>
32 32 #include <sys/cmn_err.h>
33 33 #include <sys/debug.h>
34 34 #include <sys/cpuvar.h>
35 35 #include <sys/sobject.h>
36 36 #include <sys/turnstile.h>
37 37 #include <sys/rwlock.h>
38 38 #include <sys/rwlock_impl.h>
39 39 #include <sys/atomic.h>
40 40 #include <sys/lockstat.h>
41 41
42 42 /*
43 43 * Big Theory Statement for readers/writer locking primitives.
44 44 *
45 45 * An rwlock provides exclusive access to a single thread ("writer") or
46 46 * concurrent access to multiple threads ("readers"). See rwlock(9F)
47 47 * for a full description of the interfaces and programming model.
48 48 * The rest of this comment describes the implementation.
49 49 *
50 50 * An rwlock is a single word with the following structure:
51 51 *
52 52 * ---------------------------------------------------------------------
53 53 * | OWNER (writer) or HOLD COUNT (readers) | WRLOCK | WRWANT | WAIT |
54 54 * ---------------------------------------------------------------------
55 55 * 63 / 31 .. 3 2 1 0
56 56 *
57 57 * The waiters bit (0) indicates whether any threads are blocked waiting
58 58 * for the lock. The write-wanted bit (1) indicates whether any threads
59 59 * are blocked waiting for write access. The write-locked bit (2) indicates
60 60 * whether the lock is held by a writer, which determines whether the upper
61 61 * bits (3..31 in ILP32, 3..63 in LP64) should be interpreted as the owner
62 62 * (thread pointer) or the hold count (number of readers).
63 63 *
64 64 * In the absence of any contention, a writer gets the lock by setting
65 65 * this word to (curthread | RW_WRITE_LOCKED); a reader gets the lock
66 66 * by incrementing the hold count (i.e. adding 8, aka RW_READ_LOCK).
67 67 *
68 68 * A writer will fail to acquire the lock if any other thread owns it.
69 69 * A reader will fail if the lock is either owned (in the RW_READER and
70 70 * RW_READER_STARVEWRITER cases) or wanted by a writer (in the RW_READER
71 71 * case). rw_tryenter() returns 0 in these cases; rw_enter() blocks until
72 72 * the lock becomes available.
73 73 *
74 74 * When a thread blocks it acquires the rwlock's hashed turnstile lock and
75 75 * attempts to set RW_HAS_WAITERS (and RW_WRITE_WANTED in the writer case)
76 76 * atomically *only if the lock still appears busy*. A thread must never
77 77 * accidentally block for an available lock since there would be no owner
78 78 * to awaken it. casip() provides the required atomicity. Once casip()
79 79 * succeeds, the decision to block becomes final and irreversible. The
80 80 * thread will not become runnable again until it has been granted ownership
81 81 * of the lock via direct handoff from a former owner as described below.
82 82 *
83 83 * In the absence of any waiters, rw_exit() just clears the lock (if it
84 84 * is write-locked) or decrements the hold count (if it is read-locked).
85 85 * Note that even if waiters are present, decrementing the hold count
86 86 * to a non-zero value requires no special action since the lock is still
87 87 * held by at least one other thread.
88 88 *
89 89 * On the "final exit" (transition to unheld state) of a lock with waiters,
90 90 * rw_exit_wakeup() grabs the turnstile lock and transfers ownership directly
91 91 * to the next writer or set of readers. There are several advantages to this
92 92 * approach: (1) it closes all windows for priority inversion (when a new
93 93 * writer has grabbed the lock but has not yet inherited from blocked readers);
94 94 * (2) it prevents starvation of equal-priority threads by granting the lock
95 95 * in FIFO order; (3) it eliminates the need for a write-wanted count -- a
96 96 * single bit suffices because the lock remains held until all waiting
97 97 * writers are gone; (4) when we awaken N readers we can perform a single
98 98 * "atomic_add(&x, N)" to set the total hold count rather than having all N
99 99 * threads fight for the cache to perform an "atomic_add(&x, 1)" upon wakeup.
100 100 *
101 101 * The most interesting policy decision in rw_exit_wakeup() is which thread
102 102 * to wake. Starvation is always possible with priority-based scheduling,
103 103 * but any sane wakeup policy should at least satisfy these requirements:
104 104 *
105 105 * (1) The highest-priority thread in the system should not starve.
106 106 * (2) The highest-priority writer should not starve.
107 107 * (3) No writer should starve due to lower-priority threads.
108 108 * (4) No reader should starve due to lower-priority writers.
109 109 * (5) If all threads have equal priority, none of them should starve.
110 110 *
111 111 * We used to employ a writers-always-win policy, which doesn't even
112 112 * satisfy (1): a steady stream of low-priority writers can starve out
113 113 * a real-time reader! This is clearly a broken policy -- it violates
114 114 * (1), (4), and (5) -- but it's how rwlocks always used to behave.
115 115 *
116 116 * A round-robin policy (exiting readers grant the lock to blocked writers
117 117 * and vice versa) satisfies all but (3): a single high-priority writer
118 118 * and many low-priority readers can starve out medium-priority writers.
119 119 *
120 120 * A strict priority policy (grant the lock to the highest priority blocked
121 121 * thread) satisfies everything but (2): a steady stream of high-priority
122 122 * readers can permanently starve the highest-priority writer.
123 123 *
124 124 * The reason we care about (2) is that it's important to process writers
125 125 * reasonably quickly -- even if they're low priority -- because their very
126 126 * presence causes all readers to take the slow (blocking) path through this
127 127 * code. There is also a general sense that writers deserve some degree of
128 128 * deference because they're updating the data upon which all readers act.
129 129 * Presumably this data should not be allowed to become arbitrarily stale
130 130 * due to writer starvation. Finally, it seems reasonable to level the
131 131 * playing field a bit to compensate for the fact that it's so much harder
132 132 * for a writer to get in when there are already many readers present.
133 133 *
134 134 * A hybrid of round-robin and strict priority can be made to satisfy
135 135 * all five criteria. In this "writer priority policy" exiting readers
136 136 * always grant the lock to waiting writers, but exiting writers only
137 137 * grant the lock to readers of the same or higher priority than the
138 138 * highest-priority blocked writer. Thus requirement (2) is satisfied,
139 139 * necessarily, by a willful act of priority inversion: an exiting reader
140 140 * will grant the lock to a blocked writer even if there are blocked
141 141 * readers of higher priority. The situation is mitigated by the fact
142 142 * that writers always inherit priority from blocked readers, and the
143 143 * writer will awaken those readers as soon as it exits the lock.
144 144 *
145 145 * Finally, note that this hybrid scheme -- and indeed, any scheme that
146 146 * satisfies requirement (2) -- has an important consequence: if a lock is
147 147 * held as reader and a writer subsequently becomes blocked, any further
148 148 * readers must be blocked to avoid writer starvation. This implementation
149 149 * detail has ramifications for the semantics of rwlocks, as it prohibits
150 150 * recursively acquiring an rwlock as reader: any writer that wishes to
151 151 * acquire the lock after the first but before the second acquisition as
152 152 * reader will block the second acquisition -- resulting in deadlock. This
153 153 * itself is not necessarily prohibitive, as it is often straightforward to
154 154 * prevent a single thread from recursively acquiring an rwlock as reader.
155 155 * However, a more subtle situation arises when both a traditional mutex and
156 156 * a reader lock are acquired by two different threads in opposite order.
157 157 * (That is, one thread first acquires the mutex and then the rwlock as
158 158 * reader; the other acquires the rwlock as reader and then the mutex.) As
159 159 * with the single threaded case, this is fine absent a blocked writer: the
160 160 * thread that acquires the mutex before acquiring the rwlock as reader will
161 161 * be able to successfully acquire the rwlock -- even as/if the other thread
162 162 * has the rwlock as reader and is blocked on the held mutex. However, if
163 163 * an unrelated writer (that is, a third thread) becomes blocked on the
164 164 * rwlock after the first thread acquires the rwlock as reader but before
165 165 * it's able to acquire the mutex, the second thread -- with the mutex held
166 166 * -- will not be able to acquire the rwlock as reader due to the waiting
167 167 * writer, deadlocking the three threads. Unlike the single-threaded
168 168 * (recursive) rwlock acquisition case, this case can be quite a bit
169 169 * thornier to fix, especially as there is nothing inherently wrong in the
170 170 * locking strategy: the deadlock is really induced by requirement (2), not
171 171 * the consumers of the rwlock. To permit such consumers, we allow rwlock
172 172 * acquirers to explicitly opt out of requirement (2) by specifying
173 173 * RW_READER_STARVEWRITER when acquiring the rwlock. This (obviously) means
174 174 * that inifinite readers can starve writers, but it also allows for
175 175 * multiple readers in the presence of other synchronization primitives
176 176 * without regard for lock-ordering. And while certainly odd (and perhaps
177 177 * unwise), RW_READER_STARVEWRITER can be safely used alongside RW_READER on
178 178 * the same lock -- RW_READER_STARVEWRITER describes only the act of lock
179 179 * acquisition with respect to waiting writers, not the lock itself.
180 180 *
181 181 * rw_downgrade() follows the same wakeup policy as an exiting writer.
182 182 *
183 183 * rw_tryupgrade() has the same failure mode as rw_tryenter() for a
184 184 * write lock. Both honor the WRITE_WANTED bit by specification.
185 185 *
186 186 * The following rules apply to manipulation of rwlock internal state:
187 187 *
188 188 * (1) The rwlock is only modified via the atomic primitives casip()
189 189 * and atomic_add_ip().
190 190 *
191 191 * (2) The waiters bit and write-wanted bit are only modified under
192 192 * turnstile_lookup(). This ensures that the turnstile is consistent
193 193 * with the rwlock.
194 194 *
195 195 * (3) Waiters receive the lock by direct handoff from the previous
196 196 * owner. Therefore, waiters *always* wake up holding the lock.
197 197 */
198 198
199 199 /*
200 200 * The sobj_ops vector exports a set of functions needed when a thread
201 201 * is asleep on a synchronization object of a given type.
202 202 */
203 203 static sobj_ops_t rw_sobj_ops = {
204 204 SOBJ_RWLOCK, rw_owner, turnstile_stay_asleep, turnstile_change_pri
205 205 };
206 206
207 207 /*
208 208 * If the system panics on an rwlock, save the address of the offending
209 209 * rwlock in panic_rwlock_addr, and save the contents in panic_rwlock.
210 210 */
211 211 static rwlock_impl_t panic_rwlock;
212 212 static rwlock_impl_t *panic_rwlock_addr;
213 213
214 214 static void
215 215 rw_panic(char *msg, rwlock_impl_t *lp)
216 216 {
217 217 if (panicstr)
218 218 return;
219 219
220 220 if (atomic_cas_ptr(&panic_rwlock_addr, NULL, lp) == NULL)
221 221 panic_rwlock = *lp;
222 222
223 223 panic("%s, lp=%p wwwh=%lx thread=%p",
224 224 msg, (void *)lp, panic_rwlock.rw_wwwh, (void *)curthread);
225 225 }
226 226
227 227 /* ARGSUSED */
228 228 void
229 229 rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
230 230 {
231 231 ((rwlock_impl_t *)rwlp)->rw_wwwh = 0;
232 232 }
233 233
234 234 void
235 235 rw_destroy(krwlock_t *rwlp)
236 236 {
237 237 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
238 238
239 239 if (lp->rw_wwwh != 0) {
240 240 if ((lp->rw_wwwh & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK)
241 241 rw_panic("rw_destroy: lock already destroyed", lp);
242 242 else
243 243 rw_panic("rw_destroy: lock still active", lp);
244 244 }
245 245
246 246 lp->rw_wwwh = RW_DOUBLE_LOCK;
247 247 }
248 248
249 249 /*
250 250 * Verify that an rwlock is held correctly.
251 251 */
252 252 static int
253 253 rw_locked(rwlock_impl_t *lp, krw_t rw)
254 254 {
255 255 uintptr_t old = lp->rw_wwwh;
256 256
257 257 if (rw == RW_READER || rw == RW_READER_STARVEWRITER)
258 258 return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED));
259 259
260 260 if (rw == RW_WRITER)
261 261 return ((old & RW_OWNER) == (uintptr_t)curthread);
↓ open down ↓ |
224 lines elided |
↑ open up ↑ |
262 262
263 263 return (0);
264 264 }
265 265
266 266 uint_t (*rw_lock_backoff)(uint_t) = NULL;
267 267 void (*rw_lock_delay)(uint_t) = NULL;
268 268
269 269 /*
270 270 * Full-service implementation of rw_enter() to handle all the hard cases.
271 271 * Called from the assembly version if anything complicated is going on.
272 - * The only semantic difference between calling rw_enter() and calling
273 - * rw_enter_sleep() directly is that we assume the caller has already done
274 - * a THREAD_KPRI_REQUEST() in the RW_READER cases.
275 272 */
276 273 void
277 274 rw_enter_sleep(rwlock_impl_t *lp, krw_t rw)
278 275 {
279 276 uintptr_t old, new, lock_value, lock_busy, lock_wait;
280 277 hrtime_t sleep_time;
281 278 turnstile_t *ts;
282 279 uint_t backoff = 0;
283 280 int loop_count = 0;
284 281
285 282 if (rw == RW_READER) {
286 283 lock_value = RW_READ_LOCK;
287 284 lock_busy = RW_WRITE_CLAIMED;
288 285 lock_wait = RW_HAS_WAITERS;
289 286 } else if (rw == RW_READER_STARVEWRITER) {
290 287 lock_value = RW_READ_LOCK;
291 288 lock_busy = RW_WRITE_LOCKED;
292 289 lock_wait = RW_HAS_WAITERS;
293 290 } else {
294 291 lock_value = RW_WRITE_LOCK(curthread);
295 292 lock_busy = (uintptr_t)RW_LOCKED;
296 293 lock_wait = RW_HAS_WAITERS | RW_WRITE_WANTED;
297 294 }
298 295
299 296 for (;;) {
300 297 if (((old = lp->rw_wwwh) & lock_busy) == 0) {
301 298 if (casip(&lp->rw_wwwh, old, old + lock_value) != old) {
302 299 if (rw_lock_delay != NULL) {
303 300 backoff = rw_lock_backoff(backoff);
304 301 rw_lock_delay(backoff);
305 302 if (++loop_count == ncpus_online) {
306 303 backoff = 0;
307 304 loop_count = 0;
308 305 }
309 306 }
310 307 continue;
311 308 }
312 309 break;
313 310 }
314 311
315 312 if (panicstr)
316 313 return;
317 314
318 315 if ((old & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK) {
319 316 rw_panic("rw_enter: bad rwlock", lp);
320 317 return;
321 318 }
322 319
323 320 if ((old & RW_OWNER) == (uintptr_t)curthread) {
324 321 rw_panic("recursive rw_enter", lp);
325 322 return;
326 323 }
327 324
328 325 ts = turnstile_lookup(lp);
329 326
330 327 do {
331 328 if (((old = lp->rw_wwwh) & lock_busy) == 0)
332 329 break;
333 330 new = old | lock_wait;
334 331 } while (old != new && casip(&lp->rw_wwwh, old, new) != old);
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
335 332
336 333 if ((old & lock_busy) == 0) {
337 334 /*
338 335 * The lock appears free now; try the dance again
339 336 */
340 337 turnstile_exit(lp);
341 338 continue;
342 339 }
343 340
344 341 /*
345 - * We really are going to block. Bump the stats, and drop
346 - * kpri if we're a reader.
342 + * We really are going to block, so bump the stats.
347 343 */
348 344 ASSERT(lp->rw_wwwh & lock_wait);
349 345 ASSERT(lp->rw_wwwh & RW_LOCKED);
350 346
351 347 sleep_time = -gethrtime();
352 348 if (rw != RW_WRITER) {
353 - THREAD_KPRI_RELEASE();
354 349 CPU_STATS_ADDQ(CPU, sys, rw_rdfails, 1);
355 350 (void) turnstile_block(ts, TS_READER_Q, lp,
356 351 &rw_sobj_ops, NULL, NULL);
357 352 } else {
358 353 CPU_STATS_ADDQ(CPU, sys, rw_wrfails, 1);
359 354 (void) turnstile_block(ts, TS_WRITER_Q, lp,
360 355 &rw_sobj_ops, NULL, NULL);
361 356 }
362 357 sleep_time += gethrtime();
363 358
364 359 LOCKSTAT_RECORD4(LS_RW_ENTER_BLOCK, lp, sleep_time, rw,
365 360 (old & RW_WRITE_LOCKED) ? 1 : 0,
366 361 old >> RW_HOLD_COUNT_SHIFT);
367 362
368 363 /*
369 - * We wake up holding the lock (and having kpri if we're
370 - * a reader) via direct handoff from the previous owner.
364 + * We wake up holding the lock via direct handoff from the
365 + * previous owner.
371 366 */
372 367 break;
373 368 }
374 369
375 370 ASSERT(rw_locked(lp, rw));
376 371
377 372 membar_enter();
378 373
379 374 LOCKSTAT_RECORD(LS_RW_ENTER_ACQUIRE, lp, rw);
380 375 }
381 376
382 377 /*
383 378 * Return the number of readers to wake, or zero if we should wake a writer.
384 379 * Called only by exiting/downgrading writers (readers don't wake readers).
385 380 */
386 381 static int
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
387 382 rw_readers_to_wake(turnstile_t *ts)
388 383 {
389 384 kthread_t *next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first;
390 385 kthread_t *next_reader = ts->ts_sleepq[TS_READER_Q].sq_first;
391 386 pri_t wpri = (next_writer != NULL) ? DISP_PRIO(next_writer) : -1;
392 387 int count = 0;
393 388
394 389 while (next_reader != NULL) {
395 390 if (DISP_PRIO(next_reader) < wpri)
396 391 break;
397 - next_reader->t_kpri_req++;
398 392 next_reader = next_reader->t_link;
399 393 count++;
400 394 }
401 395 return (count);
402 396 }
403 397
404 398 /*
405 399 * Full-service implementation of rw_exit() to handle all the hard cases.
406 400 * Called from the assembly version if anything complicated is going on.
407 401 * There is no semantic difference between calling rw_exit() and calling
408 402 * rw_exit_wakeup() directly.
409 403 */
410 404 void
411 405 rw_exit_wakeup(rwlock_impl_t *lp)
412 406 {
413 407 turnstile_t *ts;
414 408 uintptr_t old, new, lock_value;
415 409 kthread_t *next_writer;
416 410 int nreaders;
417 411 uint_t backoff = 0;
418 412 int loop_count = 0;
419 413
420 414 membar_exit();
421 415
422 416 old = lp->rw_wwwh;
423 417 if (old & RW_WRITE_LOCKED) {
424 418 if ((old & RW_OWNER) != (uintptr_t)curthread) {
425 419 rw_panic("rw_exit: not owner", lp);
426 420 lp->rw_wwwh = 0;
427 421 return;
428 422 }
429 423 lock_value = RW_WRITE_LOCK(curthread);
430 424 } else {
431 425 if ((old & RW_LOCKED) == 0) {
432 426 rw_panic("rw_exit: lock not held", lp);
433 427 return;
434 428 }
435 429 lock_value = RW_READ_LOCK;
436 430 }
437 431
438 432 for (;;) {
439 433 /*
440 434 * If this is *not* the final exit of a lock with waiters,
441 435 * just drop the lock -- there's nothing tricky going on.
442 436 */
443 437 old = lp->rw_wwwh;
444 438 new = old - lock_value;
445 439 if ((new & (RW_LOCKED | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
446 440 if (casip(&lp->rw_wwwh, old, new) != old) {
447 441 if (rw_lock_delay != NULL) {
448 442 backoff = rw_lock_backoff(backoff);
449 443 rw_lock_delay(backoff);
450 444 if (++loop_count == ncpus_online) {
451 445 backoff = 0;
452 446 loop_count = 0;
453 447 }
454 448 }
455 449 continue;
456 450 }
457 451 break;
458 452 }
459 453
460 454 /*
461 455 * This appears to be the final exit of a lock with waiters.
462 456 * If we do not have the lock as writer (that is, if this is
463 457 * the last exit of a reader with waiting writers), we will
464 458 * grab the lock as writer to prevent additional readers.
465 459 * (This is required because a reader that is acquiring the
466 460 * lock via RW_READER_STARVEWRITER will not observe the
467 461 * RW_WRITE_WANTED bit -- and we could therefore be racing
468 462 * with such readers here.)
469 463 */
470 464 if (!(old & RW_WRITE_LOCKED)) {
471 465 new = RW_WRITE_LOCK(curthread) |
472 466 RW_HAS_WAITERS | RW_WRITE_WANTED;
473 467
474 468 if (casip(&lp->rw_wwwh, old, new) != old)
475 469 continue;
476 470 }
477 471
478 472 /*
479 473 * Perform the final exit of a lock that has waiters.
480 474 */
481 475 ts = turnstile_lookup(lp);
482 476
483 477 next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first;
484 478
485 479 if ((old & RW_WRITE_LOCKED) &&
486 480 (nreaders = rw_readers_to_wake(ts)) > 0) {
487 481 /*
488 482 * Don't drop the lock -- just set the hold count
489 483 * such that we grant the lock to all readers at once.
490 484 */
491 485 new = nreaders * RW_READ_LOCK;
492 486 if (ts->ts_waiters > nreaders)
493 487 new |= RW_HAS_WAITERS;
494 488 if (next_writer)
495 489 new |= RW_WRITE_WANTED;
496 490 lp->rw_wwwh = new;
497 491 membar_enter();
498 492 turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL);
499 493 } else {
500 494 /*
501 495 * Don't drop the lock -- just transfer ownership
502 496 * directly to next_writer. Note that there must
503 497 * be at least one waiting writer, because we get
504 498 * here only if (A) the lock is read-locked or
505 499 * (B) there are no waiting readers. In case (A),
506 500 * since the lock is read-locked there would be no
507 501 * reason for other readers to have blocked unless
508 502 * the RW_WRITE_WANTED bit was set. In case (B),
509 503 * since there are waiters but no waiting readers,
510 504 * they must all be waiting writers.
511 505 */
512 506 ASSERT(lp->rw_wwwh & RW_WRITE_WANTED);
513 507 new = RW_WRITE_LOCK(next_writer);
514 508 if (ts->ts_waiters > 1)
515 509 new |= RW_HAS_WAITERS;
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
516 510 if (next_writer->t_link)
517 511 new |= RW_WRITE_WANTED;
518 512 lp->rw_wwwh = new;
519 513 membar_enter();
520 514 turnstile_wakeup(ts, TS_WRITER_Q, 1, next_writer);
521 515 }
522 516 break;
523 517 }
524 518
525 519 if (lock_value == RW_READ_LOCK) {
526 - THREAD_KPRI_RELEASE();
527 520 LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_READER);
528 521 } else {
529 522 LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_WRITER);
530 523 }
531 524 }
532 525
533 526 int
534 527 rw_tryenter(krwlock_t *rwlp, krw_t rw)
535 528 {
536 529 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
537 530 uintptr_t old;
538 531
539 532 if (rw != RW_WRITER) {
540 533 uint_t backoff = 0;
541 534 int loop_count = 0;
542 - THREAD_KPRI_REQUEST();
543 535 for (;;) {
544 536 if ((old = lp->rw_wwwh) & (rw == RW_READER ?
545 537 RW_WRITE_CLAIMED : RW_WRITE_LOCKED)) {
546 - THREAD_KPRI_RELEASE();
547 538 return (0);
548 539 }
549 540 if (casip(&lp->rw_wwwh, old, old + RW_READ_LOCK) == old)
550 541 break;
551 542 if (rw_lock_delay != NULL) {
552 543 backoff = rw_lock_backoff(backoff);
553 544 rw_lock_delay(backoff);
554 545 if (++loop_count == ncpus_online) {
555 546 backoff = 0;
556 547 loop_count = 0;
557 548 }
558 549 }
559 550 }
560 551 LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw);
561 552 } else {
562 553 if (casip(&lp->rw_wwwh, 0, RW_WRITE_LOCK(curthread)) != 0)
563 554 return (0);
564 555 LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw);
565 556 }
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
566 557 ASSERT(rw_locked(lp, rw));
567 558 membar_enter();
568 559 return (1);
569 560 }
570 561
571 562 void
572 563 rw_downgrade(krwlock_t *rwlp)
573 564 {
574 565 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
575 566
576 - THREAD_KPRI_REQUEST();
577 567 membar_exit();
578 568
579 569 if ((lp->rw_wwwh & RW_OWNER) != (uintptr_t)curthread) {
580 570 rw_panic("rw_downgrade: not owner", lp);
581 571 return;
582 572 }
583 573
584 574 if (atomic_add_ip_nv(&lp->rw_wwwh,
585 575 RW_READ_LOCK - RW_WRITE_LOCK(curthread)) & RW_HAS_WAITERS) {
586 576 turnstile_t *ts = turnstile_lookup(lp);
587 577 int nreaders = rw_readers_to_wake(ts);
588 578 if (nreaders > 0) {
589 579 uintptr_t delta = nreaders * RW_READ_LOCK;
590 580 if (ts->ts_waiters == nreaders)
591 581 delta -= RW_HAS_WAITERS;
592 582 atomic_add_ip(&lp->rw_wwwh, delta);
593 583 }
594 584 turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL);
595 585 }
596 586 ASSERT(rw_locked(lp, RW_READER));
597 587 LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, lp);
598 588 }
599 589
600 590 int
601 591 rw_tryupgrade(krwlock_t *rwlp)
602 592 {
603 593 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
604 594 uintptr_t old, new;
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
605 595
606 596 ASSERT(rw_locked(lp, RW_READER));
607 597
608 598 do {
609 599 if (((old = lp->rw_wwwh) & ~RW_HAS_WAITERS) != RW_READ_LOCK)
610 600 return (0);
611 601 new = old + RW_WRITE_LOCK(curthread) - RW_READ_LOCK;
612 602 } while (casip(&lp->rw_wwwh, old, new) != old);
613 603
614 604 membar_enter();
615 - THREAD_KPRI_RELEASE();
616 605 LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, lp);
617 606 ASSERT(rw_locked(lp, RW_WRITER));
618 607 return (1);
619 608 }
620 609
621 610 int
622 611 rw_read_held(krwlock_t *rwlp)
623 612 {
624 613 uintptr_t tmp;
625 614
626 615 return (_RW_READ_HELD(rwlp, tmp));
627 616 }
628 617
629 618 int
630 619 rw_write_held(krwlock_t *rwlp)
631 620 {
632 621 return (_RW_WRITE_HELD(rwlp));
633 622 }
634 623
635 624 int
636 625 rw_lock_held(krwlock_t *rwlp)
637 626 {
638 627 return (_RW_LOCK_HELD(rwlp));
639 628 }
640 629
641 630 /*
642 631 * Like rw_read_held(), but ASSERTs that the lock is currently held
643 632 */
644 633 int
645 634 rw_read_locked(krwlock_t *rwlp)
646 635 {
647 636 uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh;
648 637
649 638 ASSERT(old & RW_LOCKED);
650 639 return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED));
651 640 }
652 641
653 642 /*
654 643 * Returns non-zero if the lock is either held or desired by a writer
655 644 */
656 645 int
657 646 rw_iswriter(krwlock_t *rwlp)
658 647 {
659 648 return (_RW_ISWRITER(rwlp));
660 649 }
661 650
662 651 kthread_t *
663 652 rw_owner(krwlock_t *rwlp)
664 653 {
665 654 uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh;
666 655
667 656 return ((old & RW_WRITE_LOCKED) ? (kthread_t *)(old & RW_OWNER) : NULL);
668 657 }
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX