Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sparc/v9/ml/lock_prim.s
+++ new/usr/src/uts/sparc/v9/ml/lock_prim.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 -#pragma ident "%Z%%M% %I% %E% SMI"
27 -
28 -#if defined(lint)
29 -#include <sys/types.h>
30 -#include <sys/thread.h>
31 -#include <sys/cpuvar.h>
32 -#else /* lint */
33 26 #include "assym.h"
34 -#endif /* lint */
35 27
36 28 #include <sys/t_lock.h>
37 29 #include <sys/mutex.h>
38 30 #include <sys/mutex_impl.h>
39 31 #include <sys/rwlock_impl.h>
40 32 #include <sys/asm_linkage.h>
41 33 #include <sys/machlock.h>
42 34 #include <sys/machthread.h>
43 35 #include <sys/lockstat.h>
44 36
45 37 /* #define DEBUG */
46 38
47 39 #ifdef DEBUG
48 40 #include <sys/machparam.h>
49 41 #endif /* DEBUG */
50 42
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
51 43 /************************************************************************
52 44 * ATOMIC OPERATIONS
53 45 */
54 46
55 47 /*
56 48 * uint8_t ldstub(uint8_t *cp)
57 49 *
58 50 * Store 0xFF at the specified location, and return its previous content.
59 51 */
60 52
61 -#if defined(lint)
62 -uint8_t
63 -ldstub(uint8_t *cp)
64 -{
65 - uint8_t rv;
66 - rv = *cp;
67 - *cp = 0xFF;
68 - return rv;
69 -}
70 -#else /* lint */
71 -
72 53 ENTRY(ldstub)
73 54 retl
74 55 ldstub [%o0], %o0
75 56 SET_SIZE(ldstub)
76 57
77 -#endif /* lint */
78 -
79 58 /************************************************************************
80 59 * MEMORY BARRIERS -- see atomic.h for full descriptions.
81 60 */
82 61
83 -#if defined(lint)
84 -
85 -void
86 -membar_enter(void)
87 -{}
88 -
89 -void
90 -membar_exit(void)
91 -{}
92 -
93 -void
94 -membar_producer(void)
95 -{}
96 -
97 -void
98 -membar_consumer(void)
99 -{}
100 -
101 -#else /* lint */
102 -
103 62 #ifdef SF_ERRATA_51
104 63 .align 32
105 64 ENTRY(membar_return)
106 65 retl
107 66 nop
108 67 SET_SIZE(membar_return)
109 68 #define MEMBAR_RETURN ba,pt %icc, membar_return
110 69 #else
111 70 #define MEMBAR_RETURN retl
112 71 #endif
113 72
114 73 ENTRY(membar_enter)
115 74 MEMBAR_RETURN
116 75 membar #StoreLoad|#StoreStore
117 76 SET_SIZE(membar_enter)
118 77
119 78 ENTRY(membar_exit)
120 79 MEMBAR_RETURN
121 80 membar #LoadStore|#StoreStore
122 81 SET_SIZE(membar_exit)
123 82
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
124 83 ENTRY(membar_producer)
125 84 MEMBAR_RETURN
126 85 membar #StoreStore
127 86 SET_SIZE(membar_producer)
128 87
129 88 ENTRY(membar_consumer)
130 89 MEMBAR_RETURN
131 90 membar #LoadLoad
132 91 SET_SIZE(membar_consumer)
133 92
134 -#endif /* lint */
135 -
136 93 /************************************************************************
137 94 * MINIMUM LOCKS
138 95 */
139 96
140 -#if defined(lint)
141 -
142 -/*
143 - * lock_try(lp), ulock_try(lp)
144 - * - returns non-zero on success.
145 - * - doesn't block interrupts so don't use this to spin on a lock.
146 - * - uses "0xFF is busy, anything else is free" model.
147 - *
148 - * ulock_try() is for a lock in the user address space.
149 - * For all V7/V8 sparc systems they are same since the kernel and
150 - * user are mapped in a user' context.
151 - * For V9 platforms the lock_try and ulock_try are different impl.
152 - */
153 -
154 -int
155 -lock_try(lock_t *lp)
156 -{
157 - return (0xFF ^ ldstub(lp));
158 -}
159 -
160 -int
161 -lock_spin_try(lock_t *lp)
162 -{
163 - return (0xFF ^ ldstub(lp));
164 -}
165 -
166 -void
167 -lock_set(lock_t *lp)
168 -{
169 - extern void lock_set_spin(lock_t *);
170 -
171 - if (!lock_try(lp))
172 - lock_set_spin(lp);
173 - membar_enter();
174 -}
175 -
176 -void
177 -lock_clear(lock_t *lp)
178 -{
179 - membar_exit();
180 - *lp = 0;
181 -}
182 -
183 -int
184 -ulock_try(lock_t *lp)
185 -{
186 - return (0xFF ^ ldstub(lp));
187 -}
188 -
189 -void
190 -ulock_clear(lock_t *lp)
191 -{
192 - membar_exit();
193 - *lp = 0;
194 -}
195 -
196 -#else /* lint */
197 -
198 97 .align 32
199 98 ENTRY(lock_try)
200 99 ldstub [%o0], %o1 ! try to set lock, get value in %o1
201 100 brnz,pn %o1, 1f
202 101 membar #LoadLoad
203 102 .lock_try_lockstat_patch_point:
204 103 retl
205 104 or %o0, 1, %o0 ! ensure lo32 != 0
206 105 1:
207 106 retl
208 107 clr %o0
209 108 SET_SIZE(lock_try)
210 109
211 110 .align 32
212 111 ENTRY(lock_spin_try)
213 112 ldstub [%o0], %o1 ! try to set lock, get value in %o1
214 113 brnz,pn %o1, 1f
215 114 membar #LoadLoad
216 115 retl
217 116 or %o0, 1, %o0 ! ensure lo32 != 0
218 117 1:
219 118 retl
220 119 clr %o0
221 120 SET_SIZE(lock_spin_try)
222 121
223 122 .align 32
224 123 ENTRY(lock_set)
225 124 ldstub [%o0], %o1
226 125 brnz,pn %o1, 1f ! go to C for the hard case
227 126 membar #LoadLoad
228 127 .lock_set_lockstat_patch_point:
229 128 retl
230 129 nop
231 130 1:
232 131 sethi %hi(lock_set_spin), %o2 ! load up for jump to C
233 132 jmp %o2 + %lo(lock_set_spin)
234 133 nop ! delay: do nothing
235 134 SET_SIZE(lock_set)
236 135
237 136 ENTRY(lock_clear)
238 137 membar #LoadStore|#StoreStore
239 138 .lock_clear_lockstat_patch_point:
240 139 retl
241 140 clrb [%o0]
242 141 SET_SIZE(lock_clear)
243 142
244 143 .align 32
245 144 ENTRY(ulock_try)
246 145 ldstuba [%o0]ASI_USER, %o1 ! try to set lock, get value in %o1
247 146 xor %o1, 0xff, %o0 ! delay - return non-zero if success
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
248 147 retl
249 148 membar #LoadLoad
250 149 SET_SIZE(ulock_try)
251 150
252 151 ENTRY(ulock_clear)
253 152 membar #LoadStore|#StoreStore
254 153 retl
255 154 stba %g0, [%o0]ASI_USER ! clear lock
256 155 SET_SIZE(ulock_clear)
257 156
258 -#endif /* lint */
259 157
260 -
261 158 /*
262 159 * lock_set_spl(lp, new_pil, *old_pil_addr)
263 160 * Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
264 161 */
265 162
266 -#if defined(lint)
267 -
268 -/* ARGSUSED */
269 -void
270 -lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
271 -{
272 - extern int splr(int);
273 - extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
274 - int old_pil;
275 -
276 - old_pil = splr(new_pil);
277 - if (!lock_try(lp)) {
278 - lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
279 - } else {
280 - *old_pil_addr = (u_short)old_pil;
281 - membar_enter();
282 - }
283 -}
284 -
285 -#else /* lint */
286 -
287 163 ENTRY(lock_set_spl)
288 164 rdpr %pil, %o3 ! %o3 = current pil
289 165 cmp %o3, %o1 ! is current pil high enough?
290 166 bl,a,pt %icc, 1f ! if not, write %pil in delay
291 167 wrpr %g0, %o1, %pil
292 168 1:
293 169 ldstub [%o0], %o4 ! try the lock
294 170 brnz,pn %o4, 2f ! go to C for the miss case
295 171 membar #LoadLoad
296 172 .lock_set_spl_lockstat_patch_point:
297 173 retl
298 174 sth %o3, [%o2] ! delay - save original pil
299 175 2:
300 176 sethi %hi(lock_set_spl_spin), %o5 ! load up jmp to C
301 177 jmp %o5 + %lo(lock_set_spl_spin) ! jmp to lock_set_spl_spin
302 178 nop ! delay: do nothing
303 179 SET_SIZE(lock_set_spl)
304 180
305 -#endif /* lint */
306 -
307 181 /*
308 182 * lock_clear_splx(lp, s)
309 183 */
310 184
311 -#if defined(lint)
312 -
313 -void
314 -lock_clear_splx(lock_t *lp, int s)
315 -{
316 - extern void splx(int);
317 -
318 - lock_clear(lp);
319 - splx(s);
320 -}
321 -
322 -#else /* lint */
323 -
324 185 ENTRY(lock_clear_splx)
325 186 ldn [THREAD_REG + T_CPU], %o2 ! get CPU pointer
326 187 membar #LoadStore|#StoreStore
327 188 ld [%o2 + CPU_BASE_SPL], %o2
328 189 clrb [%o0] ! clear lock
329 190 cmp %o2, %o1 ! compare new to base
330 191 movl %xcc, %o1, %o2 ! use new pri if base is less
331 192 .lock_clear_splx_lockstat_patch_point:
332 193 retl
333 194 wrpr %g0, %o2, %pil
334 195 SET_SIZE(lock_clear_splx)
335 196
336 -#endif /* lint */
337 -
338 197 /*
339 198 * mutex_enter() and mutex_exit().
340 199 *
341 200 * These routines handle the simple cases of mutex_enter() (adaptive
342 201 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
343 202 * If anything complicated is going on we punt to mutex_vector_enter().
344 203 *
345 204 * mutex_tryenter() is similar to mutex_enter() but returns zero if
346 205 * the lock cannot be acquired, nonzero on success.
347 206 *
348 207 * If mutex_exit() gets preempted in the window between checking waiters
349 208 * and clearing the lock, we can miss wakeups. Disabling preemption
350 209 * in the mutex code is prohibitively expensive, so instead we detect
351 210 * mutex preemption by examining the trapped PC in the interrupt path.
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
352 211 * If we interrupt a thread in mutex_exit() that has not yet cleared
353 212 * the lock, pil_interrupt() resets its PC back to the beginning of
354 213 * mutex_exit() so it will check again for waiters when it resumes.
355 214 *
356 215 * The lockstat code below is activated when the lockstat driver
357 216 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
358 217 * Note that we don't need to test lockstat_event_mask here -- we won't
359 218 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
360 219 */
361 220
362 -#if defined (lint)
363 -
364 -/* ARGSUSED */
365 -void
366 -mutex_enter(kmutex_t *lp)
367 -{}
368 -
369 -/* ARGSUSED */
370 -int
371 -mutex_tryenter(kmutex_t *lp)
372 -{ return (0); }
373 -
374 -/* ARGSUSED */
375 -void
376 -mutex_exit(kmutex_t *lp)
377 -{}
378 -
379 -/* ARGSUSED */
380 -void *
381 -mutex_owner_running(mutex_impl_t *lp)
382 -{ return (NULL); }
383 -
384 -#else
385 221 .align 32
386 222 ENTRY(mutex_enter)
387 223 mov THREAD_REG, %o1
388 224 casx [%o0], %g0, %o1 ! try to acquire as adaptive
389 225 brnz,pn %o1, 1f ! locked or wrong type
390 226 membar #LoadLoad
391 227 .mutex_enter_lockstat_patch_point:
392 228 retl
393 229 nop
394 230 1:
395 231 sethi %hi(mutex_vector_enter), %o2 ! load up for jump to C
396 232 jmp %o2 + %lo(mutex_vector_enter)
397 233 nop
398 234 SET_SIZE(mutex_enter)
399 235
400 236 ENTRY(mutex_tryenter)
401 237 mov THREAD_REG, %o1
402 238 casx [%o0], %g0, %o1 ! try to acquire as adaptive
403 239 brnz,pn %o1, 1f ! locked or wrong type continue
404 240 membar #LoadLoad
405 241 .mutex_tryenter_lockstat_patch_point:
406 242 retl
407 243 or %o0, 1, %o0 ! ensure lo32 != 0
408 244 1:
409 245 sethi %hi(mutex_vector_tryenter), %o2 ! hi bits
410 246 jmp %o2 + %lo(mutex_vector_tryenter) ! go to C
411 247 nop
412 248 SET_SIZE(mutex_tryenter)
413 249
414 250 ENTRY(mutex_adaptive_tryenter)
415 251 mov THREAD_REG, %o1
416 252 casx [%o0], %g0, %o1 ! try to acquire as adaptive
417 253 brnz,pn %o1, 0f ! locked or wrong type
418 254 membar #LoadLoad
419 255 retl
420 256 or %o0, 1, %o0 ! ensure lo32 != 0
421 257 0:
422 258 retl
423 259 mov %g0, %o0
424 260 SET_SIZE(mutex_adaptive_tryenter)
425 261
426 262 ! these need to be together and cache aligned for performance.
427 263 .align 64
428 264 .global mutex_exit_critical_size
429 265 .global mutex_exit_critical_start
430 266 .global mutex_owner_running_critical_size
431 267 .global mutex_owner_running_critical_start
432 268
433 269 mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
434 270
435 271 .align 32
436 272
437 273 ENTRY(mutex_exit)
438 274 mutex_exit_critical_start: ! If we are interrupted, restart here
439 275 ldn [%o0], %o1 ! get the owner field
440 276 membar #LoadStore|#StoreStore
441 277 cmp THREAD_REG, %o1 ! do we own lock with no waiters?
442 278 be,a,pt %ncc, 1f ! if so, drive on ...
443 279 stn %g0, [%o0] ! delay: clear lock if we owned it
444 280 .mutex_exit_critical_end: ! for pil_interrupt() hook
445 281 ba,a,pt %xcc, mutex_vector_exit ! go to C for the hard cases
446 282 1:
447 283 .mutex_exit_lockstat_patch_point:
448 284 retl
449 285 nop
450 286 SET_SIZE(mutex_exit)
451 287
452 288 mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
453 289
454 290 .align 32
455 291
456 292 ENTRY(mutex_owner_running)
457 293 mutex_owner_running_critical_start: ! If interrupted restart here
458 294 ldn [%o0], %o1 ! get the owner field
459 295 and %o1, MUTEX_THREAD, %o1 ! remove the waiters bit if any
460 296 brz,pn %o1, 1f ! if so, drive on ...
461 297 nop
462 298 ldn [%o1+T_CPU], %o2 ! get owner->t_cpu
463 299 ldn [%o2+CPU_THREAD], %o3 ! get owner->t_cpu->cpu_thread
464 300 .mutex_owner_running_critical_end: ! for pil_interrupt() hook
465 301 cmp %o1, %o3 ! owner == running thread?
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
466 302 be,a,pt %xcc, 2f ! yes, go return cpu
467 303 nop
468 304 1:
469 305 retl
470 306 mov %g0, %o0 ! return 0 (owner not running)
471 307 2:
472 308 retl
473 309 mov %o2, %o0 ! owner running, return cpu
474 310 SET_SIZE(mutex_owner_running)
475 311
476 -#endif /* lint */
477 -
478 312 /*
479 313 * rw_enter() and rw_exit().
480 314 *
481 315 * These routines handle the simple cases of rw_enter (write-locking an unheld
482 316 * lock or read-locking a lock that's neither write-locked nor write-wanted)
483 317 * and rw_exit (no waiters or not the last reader). If anything complicated
484 318 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
485 319 */
486 -#if defined(lint)
487 320
488 -/* ARGSUSED */
489 -void
490 -rw_enter(krwlock_t *lp, krw_t rw)
491 -{}
492 -
493 -/* ARGSUSED */
494 -void
495 -rw_exit(krwlock_t *lp)
496 -{}
497 -
498 -#else
499 -
500 321 .align 16
501 322 ENTRY(rw_enter)
502 323 cmp %o1, RW_WRITER ! entering as writer?
503 324 be,a,pn %icc, 2f ! if so, go do it ...
504 325 or THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
505 326 ld [THREAD_REG + T_KPRI_REQ], %o3 ! begin THREAD_KPRI_REQUEST()
506 327 ldn [%o0], %o4 ! %o4 = old lock value
507 328 inc %o3 ! bump kpri
508 329 st %o3, [THREAD_REG + T_KPRI_REQ] ! store new kpri
509 330 1:
510 331 andcc %o4, RW_WRITE_CLAIMED, %g0 ! write-locked or write-wanted?
511 332 bz,pt %xcc, 3f ! if so, prepare to block
512 333 add %o4, RW_READ_LOCK, %o5 ! delay: increment hold count
513 334 sethi %hi(rw_enter_sleep), %o2 ! load up jump
514 335 jmp %o2 + %lo(rw_enter_sleep) ! jmp to rw_enter_sleep
515 336 nop ! delay: do nothing
516 337 3:
517 338 casx [%o0], %o4, %o5 ! try to grab read lock
518 339 cmp %o4, %o5 ! did we get it?
519 340 #ifdef sun4v
520 341 be,a,pt %xcc, 0f
521 342 membar #LoadLoad
522 343 sethi %hi(rw_enter_sleep), %o2 ! load up jump
523 344 jmp %o2 + %lo(rw_enter_sleep) ! jmp to rw_enter_sleep
524 345 nop ! delay: do nothing
525 346 0:
526 347 #else /* sun4v */
527 348 bne,pn %xcc, 1b ! if not, try again
528 349 mov %o5, %o4 ! delay: %o4 = old lock value
529 350 membar #LoadLoad
530 351 #endif /* sun4v */
531 352 .rw_read_enter_lockstat_patch_point:
532 353 retl
533 354 nop
534 355 2:
535 356 casx [%o0], %g0, %o5 ! try to grab write lock
536 357 brz,pt %o5, 4f ! branch around if we got it
537 358 membar #LoadLoad ! done regardless of where we go
538 359 sethi %hi(rw_enter_sleep), %o2
539 360 jmp %o2 + %lo(rw_enter_sleep) ! jump to rw_enter_sleep if not
540 361 nop ! delay: do nothing
541 362 4:
542 363 .rw_write_enter_lockstat_patch_point:
543 364 retl
544 365 nop
545 366 SET_SIZE(rw_enter)
546 367
547 368 .align 16
548 369 ENTRY(rw_exit)
549 370 ldn [%o0], %o4 ! %o4 = old lock value
550 371 membar #LoadStore|#StoreStore ! membar_exit()
551 372 subcc %o4, RW_READ_LOCK, %o5 ! %o5 = new lock value if reader
552 373 bnz,pn %xcc, 2f ! single reader, no waiters?
553 374 clr %o1
554 375 1:
555 376 ld [THREAD_REG + T_KPRI_REQ], %g1 ! begin THREAD_KPRI_RELEASE()
556 377 srl %o4, RW_HOLD_COUNT_SHIFT, %o3 ! %o3 = hold count (lockstat)
557 378 casx [%o0], %o4, %o5 ! try to drop lock
558 379 cmp %o4, %o5 ! did we succeed?
559 380 bne,pn %xcc, rw_exit_wakeup ! if not, go to C
560 381 dec %g1 ! delay: drop kpri
561 382 .rw_read_exit_lockstat_patch_point:
562 383 retl
563 384 st %g1, [THREAD_REG + T_KPRI_REQ] ! delay: store new kpri
564 385 2:
565 386 andcc %o4, RW_WRITE_LOCKED, %g0 ! are we a writer?
566 387 bnz,a,pt %xcc, 3f
567 388 or THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
568 389 cmp %o5, RW_READ_LOCK ! would lock still be held?
569 390 bge,pt %xcc, 1b ! if so, go ahead and drop it
570 391 nop
571 392 ba,pt %xcc, rw_exit_wakeup ! otherwise, wake waiters
572 393 nop
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
573 394 3:
574 395 casx [%o0], %o4, %o1 ! try to drop write lock
575 396 cmp %o4, %o1 ! did we succeed?
576 397 bne,pn %xcc, rw_exit_wakeup ! if not, go to C
577 398 nop
578 399 .rw_write_exit_lockstat_patch_point:
579 400 retl
580 401 nop
581 402 SET_SIZE(rw_exit)
582 403
583 -#endif
584 -
585 -#if defined(lint)
586 -
587 -void
588 -lockstat_hot_patch(void)
589 -{}
590 -
591 -#else
592 -
593 404 #define RETL 0x81c3e008
594 405 #define NOP 0x01000000
595 406 #define BA 0x10800000
596 407
597 408 #define DISP22 ((1 << 22) - 1)
598 409 #define ANNUL 0x20000000
599 410
600 411 #define HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs) \
601 412 ba 1f; \
602 413 rd %pc, %o0; \
603 414 save %sp, -SA(MINFRAME), %sp; \
604 415 set lockstat_probemap, %l1; \
605 416 ld [%l1 + (event * DTRACE_IDSIZE)], %o0; \
606 417 brz,pn %o0, 0f; \
607 418 ldub [THREAD_REG + T_LOCKSTAT], %l0; \
608 419 add %l0, 1, %l2; \
609 420 stub %l2, [THREAD_REG + T_LOCKSTAT]; \
610 421 set lockstat_probe, %g1; \
611 422 ld [%l1 + (event * DTRACE_IDSIZE)], %o0; \
612 423 brz,a,pn %o0, 0f; \
613 424 stub %l0, [THREAD_REG + T_LOCKSTAT]; \
614 425 ldn [%g1], %g2; \
615 426 mov rs, %o2; \
616 427 jmpl %g2, %o7; \
617 428 mov %i0, %o1; \
618 429 stub %l0, [THREAD_REG + T_LOCKSTAT]; \
619 430 0: ret; \
620 431 restore %g0, 1, %o0; /* for mutex_tryenter / lock_try */ \
621 432 1: set addr, %o1; \
622 433 sub %o0, %o1, %o0; \
623 434 srl %o0, 2, %o0; \
624 435 inc %o0; \
625 436 set DISP22, %o1; \
626 437 and %o1, %o0, %o0; \
627 438 set BA, %o1; \
628 439 or %o1, %o0, %o0; \
629 440 sethi %hi(annul), %o2; \
630 441 add %o0, %o2, %o2; \
631 442 set addr, %o0; \
632 443 set normal_instr, %o1; \
633 444 ld [%i0 + (event * DTRACE_IDSIZE)], %o3; \
634 445 tst %o3; \
635 446 movnz %icc, %o2, %o1; \
636 447 call hot_patch_kernel_text; \
637 448 mov 4, %o2; \
638 449 membar #Sync
639 450
640 451 #define HOT_PATCH(addr, event, normal_instr) \
641 452 HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
642 453
643 454 #define HOT_PATCH_ARG(addr, event, normal_instr, arg) \
644 455 HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
645 456
646 457 #define HOT_PATCH_ANNULLED(addr, event, normal_instr) \
647 458 HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
648 459
649 460 ENTRY(lockstat_hot_patch)
650 461 save %sp, -SA(MINFRAME), %sp
651 462 set lockstat_probemap, %i0
652 463 HOT_PATCH(.mutex_enter_lockstat_patch_point,
653 464 LS_MUTEX_ENTER_ACQUIRE, RETL)
654 465 HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
655 466 LS_MUTEX_TRYENTER_ACQUIRE, RETL)
656 467 HOT_PATCH(.mutex_exit_lockstat_patch_point,
657 468 LS_MUTEX_EXIT_RELEASE, RETL)
658 469 HOT_PATCH(.rw_write_enter_lockstat_patch_point,
659 470 LS_RW_ENTER_ACQUIRE, RETL)
660 471 HOT_PATCH(.rw_read_enter_lockstat_patch_point,
661 472 LS_RW_ENTER_ACQUIRE, RETL)
662 473 HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
663 474 LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
664 475 HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
665 476 LS_RW_EXIT_RELEASE, RETL, RW_READER)
666 477 HOT_PATCH(.lock_set_lockstat_patch_point,
667 478 LS_LOCK_SET_ACQUIRE, RETL)
668 479 HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
669 480 LS_LOCK_TRY_ACQUIRE, RETL)
↓ open down ↓ |
67 lines elided |
↑ open up ↑ |
670 481 HOT_PATCH(.lock_clear_lockstat_patch_point,
671 482 LS_LOCK_CLEAR_RELEASE, RETL)
672 483 HOT_PATCH(.lock_set_spl_lockstat_patch_point,
673 484 LS_LOCK_SET_SPL_ACQUIRE, RETL)
674 485 HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
675 486 LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
676 487 ret
677 488 restore
678 489 SET_SIZE(lockstat_hot_patch)
679 490
680 -#endif /* lint */
681 -
682 491 /*
683 492 * asm_mutex_spin_enter(mutex_t *)
684 493 *
685 494 * For use by assembly interrupt handler only.
686 495 * Does not change spl, since the interrupt handler is assumed to be
687 496 * running at high level already.
688 497 * Traps may be off, so cannot panic.
689 498 * Does not keep statistics on the lock.
690 499 *
691 500 * Entry: %l6 - points to mutex
692 501 * %l7 - address of call (returns to %l7+8)
693 502 * Uses: %l6, %l5
694 503 */
695 -#ifndef lint
696 504 .align 16
697 505 ENTRY_NP(asm_mutex_spin_enter)
698 506 ldstub [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
699 507 1:
700 508 tst %l5
701 509 bnz 3f ! lock already held - go spin
702 510 nop
703 511 2:
704 512 jmp %l7 + 8 ! return
705 513 membar #LoadLoad
706 514 !
707 515 ! Spin on lock without using an atomic operation to prevent the caches
708 516 ! from unnecessarily moving ownership of the line around.
709 517 !
710 518 3:
711 519 ldub [%l6 + M_SPINLOCK], %l5
712 520 4:
713 521 tst %l5
714 522 bz,a 1b ! lock appears to be free, try again
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
715 523 ldstub [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
716 524
717 525 sethi %hi(panicstr) , %l5
718 526 ldn [%l5 + %lo(panicstr)], %l5
719 527 tst %l5
720 528 bnz 2b ! after panic, feign success
721 529 nop
722 530 b 4b
723 531 ldub [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
724 532 SET_SIZE(asm_mutex_spin_enter)
725 -#endif /* lint */
726 533
727 534 /*
728 535 * asm_mutex_spin_exit(mutex_t *)
729 536 *
730 537 * For use by assembly interrupt handler only.
731 538 * Does not change spl, since the interrupt handler is assumed to be
732 539 * running at high level already.
733 540 *
734 541 * Entry: %l6 - points to mutex
735 542 * %l7 - address of call (returns to %l7+8)
736 543 * Uses: none
737 544 */
738 -#ifndef lint
739 545 ENTRY_NP(asm_mutex_spin_exit)
740 546 membar #LoadStore|#StoreStore
741 547 jmp %l7 + 8 ! return
742 548 clrb [%l6 + M_SPINLOCK] ! delay - clear lock
743 549 SET_SIZE(asm_mutex_spin_exit)
744 -#endif /* lint */
745 550
746 551 /*
747 552 * thread_onproc()
748 553 * Set thread in onproc state for the specified CPU.
749 554 * Also set the thread lock pointer to the CPU's onproc lock.
750 555 * Since the new lock isn't held, the store ordering is important.
751 556 * If not done in assembler, the compiler could reorder the stores.
752 557 */
753 -#if defined(lint)
754 558
755 -void
756 -thread_onproc(kthread_id_t t, cpu_t *cp)
757 -{
758 - t->t_state = TS_ONPROC;
759 - t->t_lockp = &cp->cpu_thread_lock;
760 -}
761 -
762 -#else /* lint */
763 -
764 559 ENTRY(thread_onproc)
765 560 set TS_ONPROC, %o2 ! TS_ONPROC state
766 561 st %o2, [%o0 + T_STATE] ! store state
767 562 add %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
768 563 retl ! return
769 564 stn %o3, [%o0 + T_LOCKP] ! delay - store new lock pointer
770 565 SET_SIZE(thread_onproc)
771 566
772 -#endif /* lint */
773 -
774 567 /* delay function used in some mutex code - just do 3 nop cas ops */
775 -#if defined(lint)
776 -
777 -/* ARGSUSED */
778 -void
779 -cas_delay(void *addr)
780 -{}
781 -#else /* lint */
782 568 ENTRY(cas_delay)
783 569 casx [%o0], %g0, %g0
784 570 casx [%o0], %g0, %g0
785 571 retl
786 572 casx [%o0], %g0, %g0
787 573 SET_SIZE(cas_delay)
788 -#endif /* lint */
789 574
790 -#if defined(lint)
791 -
792 -/*
793 - * alternative delay function for some niagara processors. The rd
794 - * instruction uses less resources than casx on those cpus.
795 - */
796 -/* ARGSUSED */
797 -void
798 -rdccr_delay(void)
799 -{}
800 -#else /* lint */
801 575 ENTRY(rdccr_delay)
802 576 rd %ccr, %g0
803 577 rd %ccr, %g0
804 578 retl
805 579 rd %ccr, %g0
806 580 SET_SIZE(rdccr_delay)
807 -#endif /* lint */
808 581
809 582 /*
810 583 * mutex_delay_default(void)
811 584 * Spins for approx a few hundred processor cycles and returns to caller.
812 585 */
813 -#if defined(lint)
814 586
815 -void
816 -mutex_delay_default(void)
817 -{}
818 -
819 -#else /* lint */
820 -
821 587 ENTRY(mutex_delay_default)
822 588 mov 72,%o0
823 589 1: brgz %o0, 1b
824 590 dec %o0
825 591 retl
826 592 nop
827 593 SET_SIZE(mutex_delay_default)
828 594
829 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX