Print this page
11909 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sparc/v9/ml/lock_prim.s
+++ new/usr/src/uts/sparc/v9/ml/lock_prim.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 + * Copyright 2019 Joyent, Inc.
24 25 */
25 26
26 -#pragma ident "%Z%%M% %I% %E% SMI"
27 -
28 27 #if defined(lint)
29 28 #include <sys/types.h>
30 29 #include <sys/thread.h>
31 30 #include <sys/cpuvar.h>
32 31 #else /* lint */
33 32 #include "assym.h"
34 33 #endif /* lint */
35 34
36 35 #include <sys/t_lock.h>
37 36 #include <sys/mutex.h>
38 37 #include <sys/mutex_impl.h>
39 38 #include <sys/rwlock_impl.h>
40 39 #include <sys/asm_linkage.h>
41 40 #include <sys/machlock.h>
42 41 #include <sys/machthread.h>
43 42 #include <sys/lockstat.h>
44 43
45 44 /* #define DEBUG */
46 45
47 46 #ifdef DEBUG
48 47 #include <sys/machparam.h>
49 48 #endif /* DEBUG */
50 49
51 50 /************************************************************************
52 51 * ATOMIC OPERATIONS
53 52 */
54 53
55 54 /*
56 55 * uint8_t ldstub(uint8_t *cp)
57 56 *
58 57 * Store 0xFF at the specified location, and return its previous content.
59 58 */
60 59
61 60 #if defined(lint)
62 61 uint8_t
63 62 ldstub(uint8_t *cp)
64 63 {
65 64 uint8_t rv;
66 65 rv = *cp;
67 66 *cp = 0xFF;
68 67 return rv;
69 68 }
70 69 #else /* lint */
71 70
72 71 ENTRY(ldstub)
73 72 retl
74 73 ldstub [%o0], %o0
75 74 SET_SIZE(ldstub)
76 75
77 76 #endif /* lint */
78 77
79 78 /************************************************************************
80 79 * MEMORY BARRIERS -- see atomic.h for full descriptions.
81 80 */
82 81
83 82 #if defined(lint)
84 83
85 84 void
86 85 membar_enter(void)
87 86 {}
88 87
89 88 void
90 89 membar_exit(void)
91 90 {}
92 91
93 92 void
94 93 membar_producer(void)
95 94 {}
96 95
97 96 void
98 97 membar_consumer(void)
99 98 {}
100 99
101 100 #else /* lint */
102 101
103 102 #ifdef SF_ERRATA_51
104 103 .align 32
105 104 ENTRY(membar_return)
106 105 retl
107 106 nop
108 107 SET_SIZE(membar_return)
109 108 #define MEMBAR_RETURN ba,pt %icc, membar_return
110 109 #else
111 110 #define MEMBAR_RETURN retl
112 111 #endif
113 112
114 113 ENTRY(membar_enter)
115 114 MEMBAR_RETURN
116 115 membar #StoreLoad|#StoreStore
117 116 SET_SIZE(membar_enter)
118 117
119 118 ENTRY(membar_exit)
120 119 MEMBAR_RETURN
121 120 membar #LoadStore|#StoreStore
122 121 SET_SIZE(membar_exit)
123 122
124 123 ENTRY(membar_producer)
125 124 MEMBAR_RETURN
126 125 membar #StoreStore
127 126 SET_SIZE(membar_producer)
128 127
129 128 ENTRY(membar_consumer)
130 129 MEMBAR_RETURN
131 130 membar #LoadLoad
132 131 SET_SIZE(membar_consumer)
133 132
134 133 #endif /* lint */
135 134
136 135 /************************************************************************
137 136 * MINIMUM LOCKS
138 137 */
139 138
140 139 #if defined(lint)
141 140
142 141 /*
143 142 * lock_try(lp), ulock_try(lp)
144 143 * - returns non-zero on success.
145 144 * - doesn't block interrupts so don't use this to spin on a lock.
146 145 * - uses "0xFF is busy, anything else is free" model.
147 146 *
148 147 * ulock_try() is for a lock in the user address space.
149 148 * For all V7/V8 sparc systems they are same since the kernel and
150 149 * user are mapped in a user' context.
151 150 * For V9 platforms the lock_try and ulock_try are different impl.
152 151 */
153 152
154 153 int
155 154 lock_try(lock_t *lp)
156 155 {
157 156 return (0xFF ^ ldstub(lp));
158 157 }
159 158
160 159 int
161 160 lock_spin_try(lock_t *lp)
162 161 {
163 162 return (0xFF ^ ldstub(lp));
164 163 }
165 164
166 165 void
167 166 lock_set(lock_t *lp)
168 167 {
169 168 extern void lock_set_spin(lock_t *);
170 169
171 170 if (!lock_try(lp))
172 171 lock_set_spin(lp);
173 172 membar_enter();
174 173 }
175 174
176 175 void
177 176 lock_clear(lock_t *lp)
178 177 {
179 178 membar_exit();
180 179 *lp = 0;
181 180 }
182 181
183 182 int
184 183 ulock_try(lock_t *lp)
185 184 {
186 185 return (0xFF ^ ldstub(lp));
187 186 }
188 187
189 188 void
190 189 ulock_clear(lock_t *lp)
191 190 {
192 191 membar_exit();
193 192 *lp = 0;
194 193 }
195 194
196 195 #else /* lint */
197 196
198 197 .align 32
199 198 ENTRY(lock_try)
200 199 ldstub [%o0], %o1 ! try to set lock, get value in %o1
201 200 brnz,pn %o1, 1f
202 201 membar #LoadLoad
203 202 .lock_try_lockstat_patch_point:
204 203 retl
205 204 or %o0, 1, %o0 ! ensure lo32 != 0
206 205 1:
207 206 retl
208 207 clr %o0
209 208 SET_SIZE(lock_try)
210 209
211 210 .align 32
212 211 ENTRY(lock_spin_try)
213 212 ldstub [%o0], %o1 ! try to set lock, get value in %o1
214 213 brnz,pn %o1, 1f
215 214 membar #LoadLoad
216 215 retl
217 216 or %o0, 1, %o0 ! ensure lo32 != 0
218 217 1:
219 218 retl
220 219 clr %o0
221 220 SET_SIZE(lock_spin_try)
222 221
223 222 .align 32
224 223 ENTRY(lock_set)
225 224 ldstub [%o0], %o1
226 225 brnz,pn %o1, 1f ! go to C for the hard case
227 226 membar #LoadLoad
228 227 .lock_set_lockstat_patch_point:
229 228 retl
230 229 nop
231 230 1:
232 231 sethi %hi(lock_set_spin), %o2 ! load up for jump to C
233 232 jmp %o2 + %lo(lock_set_spin)
234 233 nop ! delay: do nothing
235 234 SET_SIZE(lock_set)
236 235
237 236 ENTRY(lock_clear)
238 237 membar #LoadStore|#StoreStore
239 238 .lock_clear_lockstat_patch_point:
240 239 retl
241 240 clrb [%o0]
242 241 SET_SIZE(lock_clear)
243 242
244 243 .align 32
245 244 ENTRY(ulock_try)
246 245 ldstuba [%o0]ASI_USER, %o1 ! try to set lock, get value in %o1
247 246 xor %o1, 0xff, %o0 ! delay - return non-zero if success
248 247 retl
249 248 membar #LoadLoad
250 249 SET_SIZE(ulock_try)
251 250
252 251 ENTRY(ulock_clear)
↓ open down ↓ |
215 lines elided |
↑ open up ↑ |
253 252 membar #LoadStore|#StoreStore
254 253 retl
255 254 stba %g0, [%o0]ASI_USER ! clear lock
256 255 SET_SIZE(ulock_clear)
257 256
258 257 #endif /* lint */
259 258
260 259
261 260 /*
262 261 * lock_set_spl(lp, new_pil, *old_pil_addr)
263 - * Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
262 + * Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
264 263 */
265 264
266 265 #if defined(lint)
267 266
268 267 /* ARGSUSED */
269 268 void
270 269 lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
271 270 {
272 271 extern int splr(int);
273 272 extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
274 273 int old_pil;
275 274
276 275 old_pil = splr(new_pil);
277 276 if (!lock_try(lp)) {
278 277 lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
279 278 } else {
280 279 *old_pil_addr = (u_short)old_pil;
281 280 membar_enter();
282 281 }
283 282 }
284 283
285 284 #else /* lint */
286 285
287 286 ENTRY(lock_set_spl)
288 287 rdpr %pil, %o3 ! %o3 = current pil
289 288 cmp %o3, %o1 ! is current pil high enough?
290 289 bl,a,pt %icc, 1f ! if not, write %pil in delay
291 290 wrpr %g0, %o1, %pil
292 291 1:
293 292 ldstub [%o0], %o4 ! try the lock
294 293 brnz,pn %o4, 2f ! go to C for the miss case
295 294 membar #LoadLoad
296 295 .lock_set_spl_lockstat_patch_point:
297 296 retl
298 297 sth %o3, [%o2] ! delay - save original pil
299 298 2:
300 299 sethi %hi(lock_set_spl_spin), %o5 ! load up jmp to C
301 300 jmp %o5 + %lo(lock_set_spl_spin) ! jmp to lock_set_spl_spin
302 301 nop ! delay: do nothing
303 302 SET_SIZE(lock_set_spl)
304 303
305 304 #endif /* lint */
306 305
307 306 /*
308 307 * lock_clear_splx(lp, s)
309 308 */
310 309
311 310 #if defined(lint)
312 311
313 312 void
314 313 lock_clear_splx(lock_t *lp, int s)
315 314 {
316 315 extern void splx(int);
317 316
318 317 lock_clear(lp);
319 318 splx(s);
320 319 }
321 320
322 321 #else /* lint */
323 322
324 323 ENTRY(lock_clear_splx)
325 324 ldn [THREAD_REG + T_CPU], %o2 ! get CPU pointer
326 325 membar #LoadStore|#StoreStore
327 326 ld [%o2 + CPU_BASE_SPL], %o2
328 327 clrb [%o0] ! clear lock
329 328 cmp %o2, %o1 ! compare new to base
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
330 329 movl %xcc, %o1, %o2 ! use new pri if base is less
331 330 .lock_clear_splx_lockstat_patch_point:
332 331 retl
333 332 wrpr %g0, %o2, %pil
334 333 SET_SIZE(lock_clear_splx)
335 334
336 335 #endif /* lint */
337 336
338 337 /*
339 338 * mutex_enter() and mutex_exit().
340 - *
339 + *
341 340 * These routines handle the simple cases of mutex_enter() (adaptive
342 341 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
343 342 * If anything complicated is going on we punt to mutex_vector_enter().
344 343 *
345 344 * mutex_tryenter() is similar to mutex_enter() but returns zero if
346 345 * the lock cannot be acquired, nonzero on success.
347 346 *
348 347 * If mutex_exit() gets preempted in the window between checking waiters
349 348 * and clearing the lock, we can miss wakeups. Disabling preemption
350 349 * in the mutex code is prohibitively expensive, so instead we detect
351 350 * mutex preemption by examining the trapped PC in the interrupt path.
352 351 * If we interrupt a thread in mutex_exit() that has not yet cleared
353 352 * the lock, pil_interrupt() resets its PC back to the beginning of
354 353 * mutex_exit() so it will check again for waiters when it resumes.
355 354 *
356 355 * The lockstat code below is activated when the lockstat driver
357 356 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
358 357 * Note that we don't need to test lockstat_event_mask here -- we won't
359 358 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
360 359 */
361 360
362 361 #if defined (lint)
363 362
364 363 /* ARGSUSED */
365 364 void
366 365 mutex_enter(kmutex_t *lp)
367 366 {}
368 367
369 368 /* ARGSUSED */
370 369 int
371 370 mutex_tryenter(kmutex_t *lp)
372 371 { return (0); }
373 372
374 373 /* ARGSUSED */
375 374 void
376 375 mutex_exit(kmutex_t *lp)
377 376 {}
378 377
379 378 /* ARGSUSED */
380 379 void *
381 380 mutex_owner_running(mutex_impl_t *lp)
382 381 { return (NULL); }
383 382
384 383 #else
385 384 .align 32
386 385 ENTRY(mutex_enter)
387 386 mov THREAD_REG, %o1
388 387 casx [%o0], %g0, %o1 ! try to acquire as adaptive
389 388 brnz,pn %o1, 1f ! locked or wrong type
390 389 membar #LoadLoad
391 390 .mutex_enter_lockstat_patch_point:
392 391 retl
393 392 nop
394 393 1:
395 394 sethi %hi(mutex_vector_enter), %o2 ! load up for jump to C
396 395 jmp %o2 + %lo(mutex_vector_enter)
397 396 nop
398 397 SET_SIZE(mutex_enter)
399 398
400 399 ENTRY(mutex_tryenter)
401 400 mov THREAD_REG, %o1
402 401 casx [%o0], %g0, %o1 ! try to acquire as adaptive
403 402 brnz,pn %o1, 1f ! locked or wrong type continue
404 403 membar #LoadLoad
405 404 .mutex_tryenter_lockstat_patch_point:
406 405 retl
407 406 or %o0, 1, %o0 ! ensure lo32 != 0
408 407 1:
409 408 sethi %hi(mutex_vector_tryenter), %o2 ! hi bits
410 409 jmp %o2 + %lo(mutex_vector_tryenter) ! go to C
411 410 nop
412 411 SET_SIZE(mutex_tryenter)
413 412
414 413 ENTRY(mutex_adaptive_tryenter)
415 414 mov THREAD_REG, %o1
416 415 casx [%o0], %g0, %o1 ! try to acquire as adaptive
417 416 brnz,pn %o1, 0f ! locked or wrong type
418 417 membar #LoadLoad
419 418 retl
420 419 or %o0, 1, %o0 ! ensure lo32 != 0
421 420 0:
422 421 retl
423 422 mov %g0, %o0
424 423 SET_SIZE(mutex_adaptive_tryenter)
425 424
426 425 ! these need to be together and cache aligned for performance.
427 426 .align 64
428 427 .global mutex_exit_critical_size
429 428 .global mutex_exit_critical_start
430 429 .global mutex_owner_running_critical_size
431 430 .global mutex_owner_running_critical_start
432 431
433 432 mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
434 433
435 434 .align 32
436 435
437 436 ENTRY(mutex_exit)
438 437 mutex_exit_critical_start: ! If we are interrupted, restart here
439 438 ldn [%o0], %o1 ! get the owner field
440 439 membar #LoadStore|#StoreStore
441 440 cmp THREAD_REG, %o1 ! do we own lock with no waiters?
442 441 be,a,pt %ncc, 1f ! if so, drive on ...
443 442 stn %g0, [%o0] ! delay: clear lock if we owned it
444 443 .mutex_exit_critical_end: ! for pil_interrupt() hook
445 444 ba,a,pt %xcc, mutex_vector_exit ! go to C for the hard cases
446 445 1:
447 446 .mutex_exit_lockstat_patch_point:
448 447 retl
449 448 nop
450 449 SET_SIZE(mutex_exit)
451 450
452 451 mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
453 452
454 453 .align 32
455 454
456 455 ENTRY(mutex_owner_running)
457 456 mutex_owner_running_critical_start: ! If interrupted restart here
458 457 ldn [%o0], %o1 ! get the owner field
459 458 and %o1, MUTEX_THREAD, %o1 ! remove the waiters bit if any
460 459 brz,pn %o1, 1f ! if so, drive on ...
461 460 nop
462 461 ldn [%o1+T_CPU], %o2 ! get owner->t_cpu
463 462 ldn [%o2+CPU_THREAD], %o3 ! get owner->t_cpu->cpu_thread
464 463 .mutex_owner_running_critical_end: ! for pil_interrupt() hook
465 464 cmp %o1, %o3 ! owner == running thread?
466 465 be,a,pt %xcc, 2f ! yes, go return cpu
467 466 nop
468 467 1:
469 468 retl
↓ open down ↓ |
119 lines elided |
↑ open up ↑ |
470 469 mov %g0, %o0 ! return 0 (owner not running)
471 470 2:
472 471 retl
473 472 mov %o2, %o0 ! owner running, return cpu
474 473 SET_SIZE(mutex_owner_running)
475 474
476 475 #endif /* lint */
477 476
478 477 /*
479 478 * rw_enter() and rw_exit().
480 - *
479 + *
481 480 * These routines handle the simple cases of rw_enter (write-locking an unheld
482 481 * lock or read-locking a lock that's neither write-locked nor write-wanted)
483 482 * and rw_exit (no waiters or not the last reader). If anything complicated
484 483 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
485 484 */
486 485 #if defined(lint)
487 486
488 487 /* ARGSUSED */
489 488 void
490 489 rw_enter(krwlock_t *lp, krw_t rw)
491 490 {}
492 491
493 492 /* ARGSUSED */
494 493 void
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
495 494 rw_exit(krwlock_t *lp)
496 495 {}
497 496
498 497 #else
499 498
500 499 .align 16
501 500 ENTRY(rw_enter)
502 501 cmp %o1, RW_WRITER ! entering as writer?
503 502 be,a,pn %icc, 2f ! if so, go do it ...
504 503 or THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
505 - ld [THREAD_REG + T_KPRI_REQ], %o3 ! begin THREAD_KPRI_REQUEST()
506 504 ldn [%o0], %o4 ! %o4 = old lock value
507 - inc %o3 ! bump kpri
508 - st %o3, [THREAD_REG + T_KPRI_REQ] ! store new kpri
509 505 1:
510 506 andcc %o4, RW_WRITE_CLAIMED, %g0 ! write-locked or write-wanted?
511 - bz,pt %xcc, 3f ! if so, prepare to block
507 + bz,pt %xcc, 3f ! if so, prepare to block
512 508 add %o4, RW_READ_LOCK, %o5 ! delay: increment hold count
513 509 sethi %hi(rw_enter_sleep), %o2 ! load up jump
514 510 jmp %o2 + %lo(rw_enter_sleep) ! jmp to rw_enter_sleep
515 511 nop ! delay: do nothing
516 512 3:
517 513 casx [%o0], %o4, %o5 ! try to grab read lock
518 514 cmp %o4, %o5 ! did we get it?
519 515 #ifdef sun4v
520 516 be,a,pt %xcc, 0f
521 517 membar #LoadLoad
522 518 sethi %hi(rw_enter_sleep), %o2 ! load up jump
523 519 jmp %o2 + %lo(rw_enter_sleep) ! jmp to rw_enter_sleep
524 520 nop ! delay: do nothing
525 521 0:
526 522 #else /* sun4v */
527 523 bne,pn %xcc, 1b ! if not, try again
528 524 mov %o5, %o4 ! delay: %o4 = old lock value
529 525 membar #LoadLoad
530 526 #endif /* sun4v */
531 527 .rw_read_enter_lockstat_patch_point:
532 528 retl
533 529 nop
534 530 2:
535 531 casx [%o0], %g0, %o5 ! try to grab write lock
536 532 brz,pt %o5, 4f ! branch around if we got it
537 533 membar #LoadLoad ! done regardless of where we go
538 534 sethi %hi(rw_enter_sleep), %o2
539 535 jmp %o2 + %lo(rw_enter_sleep) ! jump to rw_enter_sleep if not
540 536 nop ! delay: do nothing
541 537 4:
542 538 .rw_write_enter_lockstat_patch_point:
543 539 retl
544 540 nop
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
545 541 SET_SIZE(rw_enter)
546 542
547 543 .align 16
548 544 ENTRY(rw_exit)
549 545 ldn [%o0], %o4 ! %o4 = old lock value
550 546 membar #LoadStore|#StoreStore ! membar_exit()
551 547 subcc %o4, RW_READ_LOCK, %o5 ! %o5 = new lock value if reader
552 548 bnz,pn %xcc, 2f ! single reader, no waiters?
553 549 clr %o1
554 550 1:
555 - ld [THREAD_REG + T_KPRI_REQ], %g1 ! begin THREAD_KPRI_RELEASE()
556 551 srl %o4, RW_HOLD_COUNT_SHIFT, %o3 ! %o3 = hold count (lockstat)
557 552 casx [%o0], %o4, %o5 ! try to drop lock
558 553 cmp %o4, %o5 ! did we succeed?
559 554 bne,pn %xcc, rw_exit_wakeup ! if not, go to C
560 - dec %g1 ! delay: drop kpri
555 + nop ! delay: do nothing
561 556 .rw_read_exit_lockstat_patch_point:
562 557 retl
563 - st %g1, [THREAD_REG + T_KPRI_REQ] ! delay: store new kpri
558 + nop ! delay: do nothing
564 559 2:
565 560 andcc %o4, RW_WRITE_LOCKED, %g0 ! are we a writer?
566 561 bnz,a,pt %xcc, 3f
567 562 or THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
568 563 cmp %o5, RW_READ_LOCK ! would lock still be held?
569 564 bge,pt %xcc, 1b ! if so, go ahead and drop it
570 565 nop
571 566 ba,pt %xcc, rw_exit_wakeup ! otherwise, wake waiters
572 567 nop
573 568 3:
574 569 casx [%o0], %o4, %o1 ! try to drop write lock
575 570 cmp %o4, %o1 ! did we succeed?
576 571 bne,pn %xcc, rw_exit_wakeup ! if not, go to C
577 572 nop
578 573 .rw_write_exit_lockstat_patch_point:
579 574 retl
580 575 nop
581 576 SET_SIZE(rw_exit)
582 577
583 578 #endif
584 579
585 580 #if defined(lint)
586 581
587 582 void
588 583 lockstat_hot_patch(void)
589 584 {}
590 585
591 586 #else
592 587
593 588 #define RETL 0x81c3e008
594 589 #define NOP 0x01000000
595 590 #define BA 0x10800000
596 591
597 592 #define DISP22 ((1 << 22) - 1)
598 593 #define ANNUL 0x20000000
599 594
600 595 #define HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs) \
601 596 ba 1f; \
602 597 rd %pc, %o0; \
603 598 save %sp, -SA(MINFRAME), %sp; \
604 599 set lockstat_probemap, %l1; \
605 600 ld [%l1 + (event * DTRACE_IDSIZE)], %o0; \
606 601 brz,pn %o0, 0f; \
607 602 ldub [THREAD_REG + T_LOCKSTAT], %l0; \
608 603 add %l0, 1, %l2; \
609 604 stub %l2, [THREAD_REG + T_LOCKSTAT]; \
610 605 set lockstat_probe, %g1; \
611 606 ld [%l1 + (event * DTRACE_IDSIZE)], %o0; \
612 607 brz,a,pn %o0, 0f; \
613 608 stub %l0, [THREAD_REG + T_LOCKSTAT]; \
614 609 ldn [%g1], %g2; \
615 610 mov rs, %o2; \
616 611 jmpl %g2, %o7; \
617 612 mov %i0, %o1; \
618 613 stub %l0, [THREAD_REG + T_LOCKSTAT]; \
619 614 0: ret; \
620 615 restore %g0, 1, %o0; /* for mutex_tryenter / lock_try */ \
621 616 1: set addr, %o1; \
622 617 sub %o0, %o1, %o0; \
623 618 srl %o0, 2, %o0; \
624 619 inc %o0; \
625 620 set DISP22, %o1; \
626 621 and %o1, %o0, %o0; \
627 622 set BA, %o1; \
628 623 or %o1, %o0, %o0; \
629 624 sethi %hi(annul), %o2; \
630 625 add %o0, %o2, %o2; \
631 626 set addr, %o0; \
632 627 set normal_instr, %o1; \
633 628 ld [%i0 + (event * DTRACE_IDSIZE)], %o3; \
634 629 tst %o3; \
635 630 movnz %icc, %o2, %o1; \
636 631 call hot_patch_kernel_text; \
637 632 mov 4, %o2; \
638 633 membar #Sync
639 634
640 635 #define HOT_PATCH(addr, event, normal_instr) \
641 636 HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
642 637
643 638 #define HOT_PATCH_ARG(addr, event, normal_instr, arg) \
644 639 HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
645 640
646 641 #define HOT_PATCH_ANNULLED(addr, event, normal_instr) \
647 642 HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
648 643
649 644 ENTRY(lockstat_hot_patch)
650 645 save %sp, -SA(MINFRAME), %sp
651 646 set lockstat_probemap, %i0
652 647 HOT_PATCH(.mutex_enter_lockstat_patch_point,
653 648 LS_MUTEX_ENTER_ACQUIRE, RETL)
654 649 HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
655 650 LS_MUTEX_TRYENTER_ACQUIRE, RETL)
656 651 HOT_PATCH(.mutex_exit_lockstat_patch_point,
657 652 LS_MUTEX_EXIT_RELEASE, RETL)
658 653 HOT_PATCH(.rw_write_enter_lockstat_patch_point,
659 654 LS_RW_ENTER_ACQUIRE, RETL)
660 655 HOT_PATCH(.rw_read_enter_lockstat_patch_point,
661 656 LS_RW_ENTER_ACQUIRE, RETL)
662 657 HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
663 658 LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
664 659 HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
665 660 LS_RW_EXIT_RELEASE, RETL, RW_READER)
666 661 HOT_PATCH(.lock_set_lockstat_patch_point,
667 662 LS_LOCK_SET_ACQUIRE, RETL)
668 663 HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
669 664 LS_LOCK_TRY_ACQUIRE, RETL)
670 665 HOT_PATCH(.lock_clear_lockstat_patch_point,
671 666 LS_LOCK_CLEAR_RELEASE, RETL)
672 667 HOT_PATCH(.lock_set_spl_lockstat_patch_point,
673 668 LS_LOCK_SET_SPL_ACQUIRE, RETL)
674 669 HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
675 670 LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
676 671 ret
677 672 restore
678 673 SET_SIZE(lockstat_hot_patch)
679 674
680 675 #endif /* lint */
681 676
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
682 677 /*
683 678 * asm_mutex_spin_enter(mutex_t *)
684 679 *
685 680 * For use by assembly interrupt handler only.
686 681 * Does not change spl, since the interrupt handler is assumed to be
687 682 * running at high level already.
688 683 * Traps may be off, so cannot panic.
689 684 * Does not keep statistics on the lock.
690 685 *
691 686 * Entry: %l6 - points to mutex
692 - * %l7 - address of call (returns to %l7+8)
687 + * %l7 - address of call (returns to %l7+8)
693 688 * Uses: %l6, %l5
694 689 */
695 690 #ifndef lint
696 691 .align 16
697 692 ENTRY_NP(asm_mutex_spin_enter)
698 693 ldstub [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
699 694 1:
700 695 tst %l5
701 696 bnz 3f ! lock already held - go spin
702 697 nop
703 -2:
698 +2:
704 699 jmp %l7 + 8 ! return
705 700 membar #LoadLoad
706 701 !
707 702 ! Spin on lock without using an atomic operation to prevent the caches
708 703 ! from unnecessarily moving ownership of the line around.
709 704 !
710 705 3:
711 706 ldub [%l6 + M_SPINLOCK], %l5
712 707 4:
713 708 tst %l5
714 709 bz,a 1b ! lock appears to be free, try again
715 710 ldstub [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
716 711
717 712 sethi %hi(panicstr) , %l5
718 713 ldn [%l5 + %lo(panicstr)], %l5
719 - tst %l5
714 + tst %l5
720 715 bnz 2b ! after panic, feign success
721 716 nop
722 717 b 4b
723 718 ldub [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
724 719 SET_SIZE(asm_mutex_spin_enter)
725 720 #endif /* lint */
726 721
727 722 /*
728 723 * asm_mutex_spin_exit(mutex_t *)
729 724 *
730 725 * For use by assembly interrupt handler only.
731 726 * Does not change spl, since the interrupt handler is assumed to be
732 727 * running at high level already.
733 728 *
734 729 * Entry: %l6 - points to mutex
735 - * %l7 - address of call (returns to %l7+8)
730 + * %l7 - address of call (returns to %l7+8)
736 731 * Uses: none
737 732 */
738 733 #ifndef lint
739 734 ENTRY_NP(asm_mutex_spin_exit)
740 735 membar #LoadStore|#StoreStore
741 736 jmp %l7 + 8 ! return
742 737 clrb [%l6 + M_SPINLOCK] ! delay - clear lock
743 738 SET_SIZE(asm_mutex_spin_exit)
744 739 #endif /* lint */
745 740
746 741 /*
747 742 * thread_onproc()
748 743 * Set thread in onproc state for the specified CPU.
749 744 * Also set the thread lock pointer to the CPU's onproc lock.
750 745 * Since the new lock isn't held, the store ordering is important.
751 746 * If not done in assembler, the compiler could reorder the stores.
752 747 */
753 748 #if defined(lint)
754 749
755 750 void
756 751 thread_onproc(kthread_id_t t, cpu_t *cp)
757 752 {
758 753 t->t_state = TS_ONPROC;
759 754 t->t_lockp = &cp->cpu_thread_lock;
760 755 }
761 756
762 757 #else /* lint */
763 758
764 759 ENTRY(thread_onproc)
765 760 set TS_ONPROC, %o2 ! TS_ONPROC state
766 761 st %o2, [%o0 + T_STATE] ! store state
767 762 add %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
768 763 retl ! return
769 764 stn %o3, [%o0 + T_LOCKP] ! delay - store new lock pointer
770 765 SET_SIZE(thread_onproc)
771 766
772 767 #endif /* lint */
773 768
774 769 /* delay function used in some mutex code - just do 3 nop cas ops */
775 770 #if defined(lint)
776 771
777 772 /* ARGSUSED */
778 773 void
779 774 cas_delay(void *addr)
780 775 {}
781 776 #else /* lint */
782 777 ENTRY(cas_delay)
783 778 casx [%o0], %g0, %g0
784 779 casx [%o0], %g0, %g0
785 780 retl
786 781 casx [%o0], %g0, %g0
787 782 SET_SIZE(cas_delay)
788 783 #endif /* lint */
789 784
790 785 #if defined(lint)
791 786
792 787 /*
793 788 * alternative delay function for some niagara processors. The rd
794 789 * instruction uses less resources than casx on those cpus.
795 790 */
796 791 /* ARGSUSED */
797 792 void
798 793 rdccr_delay(void)
799 794 {}
800 795 #else /* lint */
801 796 ENTRY(rdccr_delay)
802 797 rd %ccr, %g0
803 798 rd %ccr, %g0
804 799 retl
805 800 rd %ccr, %g0
806 801 SET_SIZE(rdccr_delay)
807 802 #endif /* lint */
808 803
809 804 /*
810 805 * mutex_delay_default(void)
811 806 * Spins for approx a few hundred processor cycles and returns to caller.
812 807 */
813 808 #if defined(lint)
814 809
815 810 void
816 811 mutex_delay_default(void)
817 812 {}
818 813
819 814 #else /* lint */
820 815
821 816 ENTRY(mutex_delay_default)
822 817 mov 72,%o0
823 818 1: brgz %o0, 1b
824 819 dec %o0
825 820 retl
826 821 nop
827 822 SET_SIZE(mutex_delay_default)
828 823
829 824 #endif /* lint */
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX