1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #include "assym.h"
  27 
  28 #include <sys/t_lock.h>
  29 #include <sys/mutex.h>
  30 #include <sys/mutex_impl.h>
  31 #include <sys/rwlock_impl.h>
  32 #include <sys/asm_linkage.h>
  33 #include <sys/machlock.h>
  34 #include <sys/machthread.h>
  35 #include <sys/lockstat.h>
  36 
  37 /* #define DEBUG */
  38 
  39 #ifdef DEBUG
  40 #include <sys/machparam.h>
  41 #endif /* DEBUG */
  42 
  43 /************************************************************************
  44  *              ATOMIC OPERATIONS
  45  */
  46 
  47 /*
  48  * uint8_t      ldstub(uint8_t *cp)
  49  *
  50  * Store 0xFF at the specified location, and return its previous content.
  51  */
  52 
  53         ENTRY(ldstub)
  54         retl
  55         ldstub  [%o0], %o0
  56         SET_SIZE(ldstub)
  57 
  58 /************************************************************************
  59  *              MEMORY BARRIERS -- see atomic.h for full descriptions.
  60  */
  61 
  62 #ifdef SF_ERRATA_51
  63         .align 32
  64         ENTRY(membar_return)
  65         retl
  66         nop
  67         SET_SIZE(membar_return)
  68 #define MEMBAR_RETURN   ba,pt %icc, membar_return
  69 #else
  70 #define MEMBAR_RETURN   retl
  71 #endif
  72 
  73         ENTRY(membar_enter)
  74         MEMBAR_RETURN
  75         membar  #StoreLoad|#StoreStore
  76         SET_SIZE(membar_enter)
  77 
  78         ENTRY(membar_exit)
  79         MEMBAR_RETURN
  80         membar  #LoadStore|#StoreStore
  81         SET_SIZE(membar_exit)
  82 
  83         ENTRY(membar_producer)
  84         MEMBAR_RETURN
  85         membar  #StoreStore
  86         SET_SIZE(membar_producer)
  87 
  88         ENTRY(membar_consumer)
  89         MEMBAR_RETURN
  90         membar  #LoadLoad
  91         SET_SIZE(membar_consumer)
  92 
  93 /************************************************************************
  94  *              MINIMUM LOCKS
  95  */
  96 
  97         .align  32
  98         ENTRY(lock_try)
  99         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 100         brnz,pn %o1, 1f
 101         membar  #LoadLoad
 102 .lock_try_lockstat_patch_point:
 103         retl
 104         or      %o0, 1, %o0             ! ensure lo32 != 0
 105 1:
 106         retl
 107         clr     %o0
 108         SET_SIZE(lock_try)
 109 
 110         .align  32
 111         ENTRY(lock_spin_try)
 112         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 113         brnz,pn %o1, 1f
 114         membar  #LoadLoad
 115         retl
 116         or      %o0, 1, %o0             ! ensure lo32 != 0
 117 1:
 118         retl
 119         clr     %o0
 120         SET_SIZE(lock_spin_try)
 121 
 122         .align  32
 123         ENTRY(lock_set)
 124         ldstub  [%o0], %o1
 125         brnz,pn %o1, 1f                 ! go to C for the hard case
 126         membar  #LoadLoad
 127 .lock_set_lockstat_patch_point:
 128         retl
 129         nop
 130 1:
 131         sethi   %hi(lock_set_spin), %o2 ! load up for jump to C
 132         jmp     %o2 + %lo(lock_set_spin)
 133         nop                             ! delay: do nothing
 134         SET_SIZE(lock_set)
 135 
 136         ENTRY(lock_clear)
 137         membar  #LoadStore|#StoreStore
 138 .lock_clear_lockstat_patch_point:
 139         retl
 140         clrb    [%o0]
 141         SET_SIZE(lock_clear)
 142 
 143         .align  32
 144         ENTRY(ulock_try)
 145         ldstuba [%o0]ASI_USER, %o1      ! try to set lock, get value in %o1
 146         xor     %o1, 0xff, %o0          ! delay - return non-zero if success
 147         retl
 148           membar        #LoadLoad
 149         SET_SIZE(ulock_try)
 150 
 151         ENTRY(ulock_clear)
 152         membar  #LoadStore|#StoreStore
 153         retl
 154           stba  %g0, [%o0]ASI_USER      ! clear lock
 155         SET_SIZE(ulock_clear)
 156 
 157 
 158 /*
 159  * lock_set_spl(lp, new_pil, *old_pil_addr)
 160  *      Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
 161  */
 162 
 163         ENTRY(lock_set_spl)
 164         rdpr    %pil, %o3                       ! %o3 = current pil
 165         cmp     %o3, %o1                        ! is current pil high enough?
 166         bl,a,pt %icc, 1f                        ! if not, write %pil in delay
 167         wrpr    %g0, %o1, %pil
 168 1:
 169         ldstub  [%o0], %o4                      ! try the lock
 170         brnz,pn %o4, 2f                         ! go to C for the miss case
 171         membar  #LoadLoad
 172 .lock_set_spl_lockstat_patch_point:
 173         retl
 174         sth     %o3, [%o2]                      ! delay - save original pil
 175 2:
 176         sethi   %hi(lock_set_spl_spin), %o5     ! load up jmp to C
 177         jmp     %o5 + %lo(lock_set_spl_spin)    ! jmp to lock_set_spl_spin
 178         nop                                     ! delay: do nothing
 179         SET_SIZE(lock_set_spl)
 180 
 181 /*
 182  * lock_clear_splx(lp, s)
 183  */
 184 
 185         ENTRY(lock_clear_splx)
 186         ldn     [THREAD_REG + T_CPU], %o2       ! get CPU pointer
 187         membar  #LoadStore|#StoreStore
 188         ld      [%o2 + CPU_BASE_SPL], %o2
 189         clrb    [%o0]                           ! clear lock
 190         cmp     %o2, %o1                        ! compare new to base
 191         movl    %xcc, %o1, %o2                  ! use new pri if base is less
 192 .lock_clear_splx_lockstat_patch_point:
 193         retl
 194         wrpr    %g0, %o2, %pil
 195         SET_SIZE(lock_clear_splx)
 196 
 197 /*
 198  * mutex_enter() and mutex_exit().
 199  * 
 200  * These routines handle the simple cases of mutex_enter() (adaptive
 201  * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
 202  * If anything complicated is going on we punt to mutex_vector_enter().
 203  *
 204  * mutex_tryenter() is similar to mutex_enter() but returns zero if
 205  * the lock cannot be acquired, nonzero on success.
 206  *
 207  * If mutex_exit() gets preempted in the window between checking waiters
 208  * and clearing the lock, we can miss wakeups.  Disabling preemption
 209  * in the mutex code is prohibitively expensive, so instead we detect
 210  * mutex preemption by examining the trapped PC in the interrupt path.
 211  * If we interrupt a thread in mutex_exit() that has not yet cleared
 212  * the lock, pil_interrupt() resets its PC back to the beginning of
 213  * mutex_exit() so it will check again for waiters when it resumes.
 214  *
 215  * The lockstat code below is activated when the lockstat driver
 216  * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
 217  * Note that we don't need to test lockstat_event_mask here -- we won't
 218  * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
 219  */
 220 
 221         .align  32
 222         ENTRY(mutex_enter)
 223         mov     THREAD_REG, %o1
 224         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 225         brnz,pn %o1, 1f                         ! locked or wrong type
 226         membar  #LoadLoad
 227 .mutex_enter_lockstat_patch_point:
 228         retl
 229         nop
 230 1:
 231         sethi   %hi(mutex_vector_enter), %o2    ! load up for jump to C
 232         jmp     %o2 + %lo(mutex_vector_enter)
 233         nop
 234         SET_SIZE(mutex_enter)
 235 
 236         ENTRY(mutex_tryenter)
 237         mov     THREAD_REG, %o1
 238         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 239         brnz,pn %o1, 1f                         ! locked or wrong type continue
 240         membar  #LoadLoad
 241 .mutex_tryenter_lockstat_patch_point:
 242         retl
 243         or      %o0, 1, %o0                     ! ensure lo32 != 0
 244 1:
 245         sethi   %hi(mutex_vector_tryenter), %o2         ! hi bits
 246         jmp     %o2 + %lo(mutex_vector_tryenter)        ! go to C
 247         nop
 248         SET_SIZE(mutex_tryenter)
 249 
 250         ENTRY(mutex_adaptive_tryenter)
 251         mov     THREAD_REG, %o1
 252         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 253         brnz,pn %o1, 0f                         ! locked or wrong type
 254         membar  #LoadLoad
 255         retl
 256         or      %o0, 1, %o0                     ! ensure lo32 != 0
 257 0:
 258         retl
 259         mov     %g0, %o0
 260         SET_SIZE(mutex_adaptive_tryenter)
 261 
 262         ! these need to be together and cache aligned for performance.
 263         .align 64
 264         .global mutex_exit_critical_size
 265         .global mutex_exit_critical_start
 266         .global mutex_owner_running_critical_size
 267         .global mutex_owner_running_critical_start
 268 
 269 mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
 270 
 271         .align  32
 272 
 273         ENTRY(mutex_exit)
 274 mutex_exit_critical_start:              ! If we are interrupted, restart here
 275         ldn     [%o0], %o1              ! get the owner field
 276         membar  #LoadStore|#StoreStore
 277         cmp     THREAD_REG, %o1         ! do we own lock with no waiters?
 278         be,a,pt %ncc, 1f                ! if so, drive on ...
 279         stn     %g0, [%o0]              ! delay: clear lock if we owned it
 280 .mutex_exit_critical_end:               ! for pil_interrupt() hook
 281         ba,a,pt %xcc, mutex_vector_exit ! go to C for the hard cases
 282 1:
 283 .mutex_exit_lockstat_patch_point:
 284         retl
 285         nop
 286         SET_SIZE(mutex_exit)
 287 
 288 mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
 289 
 290         .align  32
 291 
 292         ENTRY(mutex_owner_running)
 293 mutex_owner_running_critical_start:     ! If interrupted restart here
 294         ldn     [%o0], %o1              ! get the owner field
 295         and     %o1, MUTEX_THREAD, %o1  ! remove the waiters bit if any
 296         brz,pn  %o1, 1f                 ! if so, drive on ...
 297         nop
 298         ldn     [%o1+T_CPU], %o2        ! get owner->t_cpu
 299         ldn     [%o2+CPU_THREAD], %o3   ! get owner->t_cpu->cpu_thread
 300 .mutex_owner_running_critical_end:      ! for pil_interrupt() hook
 301         cmp     %o1, %o3                ! owner == running thread?
 302         be,a,pt %xcc, 2f                ! yes, go return cpu
 303         nop
 304 1:
 305         retl
 306         mov     %g0, %o0                ! return 0 (owner not running)
 307 2:
 308         retl
 309         mov     %o2, %o0                ! owner running, return cpu
 310         SET_SIZE(mutex_owner_running)
 311 
 312 /*
 313  * rw_enter() and rw_exit().
 314  * 
 315  * These routines handle the simple cases of rw_enter (write-locking an unheld
 316  * lock or read-locking a lock that's neither write-locked nor write-wanted)
 317  * and rw_exit (no waiters or not the last reader).  If anything complicated
 318  * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
 319  */
 320 
 321         .align  16
 322         ENTRY(rw_enter)
 323         cmp     %o1, RW_WRITER                  ! entering as writer?
 324         be,a,pn %icc, 2f                        ! if so, go do it ...
 325         or      THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
 326         ld      [THREAD_REG + T_KPRI_REQ], %o3  ! begin THREAD_KPRI_REQUEST()
 327         ldn     [%o0], %o4                      ! %o4 = old lock value
 328         inc     %o3                             ! bump kpri
 329         st      %o3, [THREAD_REG + T_KPRI_REQ]  ! store new kpri
 330 1:
 331         andcc   %o4, RW_WRITE_CLAIMED, %g0      ! write-locked or write-wanted?
 332         bz,pt   %xcc, 3f                        ! if so, prepare to block
 333         add     %o4, RW_READ_LOCK, %o5          ! delay: increment hold count
 334         sethi   %hi(rw_enter_sleep), %o2        ! load up jump
 335         jmp     %o2 + %lo(rw_enter_sleep)       ! jmp to rw_enter_sleep
 336         nop                                     ! delay: do nothing
 337 3:
 338         casx    [%o0], %o4, %o5                 ! try to grab read lock
 339         cmp     %o4, %o5                        ! did we get it?
 340 #ifdef sun4v
 341         be,a,pt %xcc, 0f
 342         membar  #LoadLoad
 343         sethi   %hi(rw_enter_sleep), %o2        ! load up jump
 344         jmp     %o2 + %lo(rw_enter_sleep)       ! jmp to rw_enter_sleep
 345         nop                                     ! delay: do nothing
 346 0:
 347 #else /* sun4v */
 348         bne,pn  %xcc, 1b                        ! if not, try again
 349         mov     %o5, %o4                        ! delay: %o4 = old lock value
 350         membar  #LoadLoad
 351 #endif /* sun4v */
 352 .rw_read_enter_lockstat_patch_point:
 353         retl
 354         nop
 355 2:
 356         casx    [%o0], %g0, %o5                 ! try to grab write lock
 357         brz,pt %o5, 4f                          ! branch around if we got it
 358         membar  #LoadLoad                       ! done regardless of where we go
 359         sethi   %hi(rw_enter_sleep), %o2
 360         jmp     %o2 + %lo(rw_enter_sleep)       ! jump to rw_enter_sleep if not
 361         nop                                     ! delay: do nothing
 362 4:
 363 .rw_write_enter_lockstat_patch_point:
 364         retl
 365         nop
 366         SET_SIZE(rw_enter)
 367 
 368         .align  16
 369         ENTRY(rw_exit)
 370         ldn     [%o0], %o4                      ! %o4 = old lock value
 371         membar  #LoadStore|#StoreStore          ! membar_exit()
 372         subcc   %o4, RW_READ_LOCK, %o5          ! %o5 = new lock value if reader
 373         bnz,pn  %xcc, 2f                        ! single reader, no waiters?
 374         clr     %o1
 375 1:
 376         ld      [THREAD_REG + T_KPRI_REQ], %g1  ! begin THREAD_KPRI_RELEASE()
 377         srl     %o4, RW_HOLD_COUNT_SHIFT, %o3   ! %o3 = hold count (lockstat)
 378         casx    [%o0], %o4, %o5                 ! try to drop lock
 379         cmp     %o4, %o5                        ! did we succeed?
 380         bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 381         dec     %g1                             ! delay: drop kpri
 382 .rw_read_exit_lockstat_patch_point:
 383         retl
 384         st      %g1, [THREAD_REG + T_KPRI_REQ]  ! delay: store new kpri
 385 2:
 386         andcc   %o4, RW_WRITE_LOCKED, %g0       ! are we a writer?
 387         bnz,a,pt %xcc, 3f
 388         or      THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
 389         cmp     %o5, RW_READ_LOCK               ! would lock still be held?
 390         bge,pt  %xcc, 1b                        ! if so, go ahead and drop it
 391         nop
 392         ba,pt   %xcc, rw_exit_wakeup            ! otherwise, wake waiters
 393         nop
 394 3:
 395         casx    [%o0], %o4, %o1                 ! try to drop write lock
 396         cmp     %o4, %o1                        ! did we succeed?
 397         bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 398         nop
 399 .rw_write_exit_lockstat_patch_point:
 400         retl
 401         nop
 402         SET_SIZE(rw_exit)
 403 
 404 #define RETL                    0x81c3e008
 405 #define NOP                     0x01000000
 406 #define BA                      0x10800000
 407 
 408 #define DISP22                  ((1 << 22) - 1)
 409 #define ANNUL                   0x20000000
 410 
 411 #define HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)          \
 412         ba      1f;                                                     \
 413         rd      %pc, %o0;                                               \
 414         save    %sp, -SA(MINFRAME), %sp;                                \
 415         set     lockstat_probemap, %l1;                                 \
 416         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 417         brz,pn  %o0, 0f;                                                \
 418         ldub    [THREAD_REG + T_LOCKSTAT], %l0;                         \
 419         add     %l0, 1, %l2;                                            \
 420         stub    %l2, [THREAD_REG + T_LOCKSTAT];                         \
 421         set     lockstat_probe, %g1;                                    \
 422         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 423         brz,a,pn %o0, 0f;                                               \
 424         stub    %l0, [THREAD_REG + T_LOCKSTAT];                         \
 425         ldn     [%g1], %g2;                                             \
 426         mov     rs, %o2;                                                \
 427         jmpl    %g2, %o7;                                               \
 428         mov     %i0, %o1;                                               \
 429         stub    %l0, [THREAD_REG + T_LOCKSTAT];                         \
 430 0:      ret;                                                            \
 431         restore %g0, 1, %o0;    /* for mutex_tryenter / lock_try */     \
 432 1:      set     addr, %o1;                                              \
 433         sub     %o0, %o1, %o0;                                          \
 434         srl     %o0, 2, %o0;                                            \
 435         inc     %o0;                                                    \
 436         set     DISP22, %o1;                                            \
 437         and     %o1, %o0, %o0;                                          \
 438         set     BA, %o1;                                                \
 439         or      %o1, %o0, %o0;                                          \
 440         sethi   %hi(annul), %o2;                                        \
 441         add     %o0, %o2, %o2;                                          \
 442         set     addr, %o0;                                              \
 443         set     normal_instr, %o1;                                      \
 444         ld      [%i0 + (event * DTRACE_IDSIZE)], %o3;                   \
 445         tst     %o3;                                                    \
 446         movnz   %icc, %o2, %o1;                                         \
 447         call    hot_patch_kernel_text;                                  \
 448         mov     4, %o2;                                                 \
 449         membar  #Sync
 450 
 451 #define HOT_PATCH(addr, event, normal_instr)    \
 452         HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
 453 
 454 #define HOT_PATCH_ARG(addr, event, normal_instr, arg)   \
 455         HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
 456 
 457 #define HOT_PATCH_ANNULLED(addr, event, normal_instr)   \
 458         HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
 459 
 460         ENTRY(lockstat_hot_patch)
 461         save    %sp, -SA(MINFRAME), %sp
 462         set     lockstat_probemap, %i0
 463         HOT_PATCH(.mutex_enter_lockstat_patch_point,
 464                 LS_MUTEX_ENTER_ACQUIRE, RETL)
 465         HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
 466                 LS_MUTEX_TRYENTER_ACQUIRE, RETL)
 467         HOT_PATCH(.mutex_exit_lockstat_patch_point,
 468                 LS_MUTEX_EXIT_RELEASE, RETL)
 469         HOT_PATCH(.rw_write_enter_lockstat_patch_point,
 470                 LS_RW_ENTER_ACQUIRE, RETL)
 471         HOT_PATCH(.rw_read_enter_lockstat_patch_point,
 472                 LS_RW_ENTER_ACQUIRE, RETL)
 473         HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
 474                 LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
 475         HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
 476                 LS_RW_EXIT_RELEASE, RETL, RW_READER)
 477         HOT_PATCH(.lock_set_lockstat_patch_point,
 478                 LS_LOCK_SET_ACQUIRE, RETL)
 479         HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
 480                 LS_LOCK_TRY_ACQUIRE, RETL)
 481         HOT_PATCH(.lock_clear_lockstat_patch_point,
 482                 LS_LOCK_CLEAR_RELEASE, RETL)
 483         HOT_PATCH(.lock_set_spl_lockstat_patch_point,
 484                 LS_LOCK_SET_SPL_ACQUIRE, RETL)
 485         HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
 486                 LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
 487         ret
 488         restore
 489         SET_SIZE(lockstat_hot_patch)
 490 
 491 /*
 492  * asm_mutex_spin_enter(mutex_t *)
 493  *
 494  * For use by assembly interrupt handler only.
 495  * Does not change spl, since the interrupt handler is assumed to be
 496  * running at high level already.
 497  * Traps may be off, so cannot panic.
 498  * Does not keep statistics on the lock.
 499  *
 500  * Entry:       %l6 - points to mutex
 501  *              %l7 - address of call (returns to %l7+8)
 502  * Uses:        %l6, %l5
 503  */
 504         .align 16
 505         ENTRY_NP(asm_mutex_spin_enter)
 506         ldstub  [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
 507 1:
 508         tst     %l5
 509         bnz     3f                      ! lock already held - go spin
 510         nop
 511 2:      
 512         jmp     %l7 + 8                 ! return
 513         membar  #LoadLoad
 514         !
 515         ! Spin on lock without using an atomic operation to prevent the caches
 516         ! from unnecessarily moving ownership of the line around.
 517         !
 518 3:
 519         ldub    [%l6 + M_SPINLOCK], %l5
 520 4:
 521         tst     %l5
 522         bz,a    1b                      ! lock appears to be free, try again
 523         ldstub  [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
 524 
 525         sethi   %hi(panicstr) , %l5
 526         ldn     [%l5 + %lo(panicstr)], %l5
 527         tst     %l5
 528         bnz     2b                      ! after panic, feign success
 529         nop
 530         b       4b
 531         ldub    [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
 532         SET_SIZE(asm_mutex_spin_enter)
 533 
 534 /*
 535  * asm_mutex_spin_exit(mutex_t *)
 536  *
 537  * For use by assembly interrupt handler only.
 538  * Does not change spl, since the interrupt handler is assumed to be
 539  * running at high level already.
 540  *
 541  * Entry:       %l6 - points to mutex
 542  *              %l7 - address of call (returns to %l7+8)
 543  * Uses:        none
 544  */
 545         ENTRY_NP(asm_mutex_spin_exit)
 546         membar  #LoadStore|#StoreStore
 547         jmp     %l7 + 8                 ! return
 548         clrb    [%l6 + M_SPINLOCK]      ! delay - clear lock
 549         SET_SIZE(asm_mutex_spin_exit)
 550 
 551 /*
 552  * thread_onproc()
 553  * Set thread in onproc state for the specified CPU.
 554  * Also set the thread lock pointer to the CPU's onproc lock.
 555  * Since the new lock isn't held, the store ordering is important.
 556  * If not done in assembler, the compiler could reorder the stores.
 557  */
 558 
 559         ENTRY(thread_onproc)
 560         set     TS_ONPROC, %o2          ! TS_ONPROC state
 561         st      %o2, [%o0 + T_STATE]    ! store state
 562         add     %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
 563         retl                            ! return
 564         stn     %o3, [%o0 + T_LOCKP]    ! delay - store new lock pointer
 565         SET_SIZE(thread_onproc)
 566 
 567 /* delay function used in some mutex code - just do 3 nop cas ops */
 568         ENTRY(cas_delay)
 569         casx [%o0], %g0, %g0
 570         casx [%o0], %g0, %g0
 571         retl
 572         casx [%o0], %g0, %g0
 573         SET_SIZE(cas_delay)
 574 
 575         ENTRY(rdccr_delay)
 576         rd      %ccr, %g0
 577         rd      %ccr, %g0
 578         retl
 579         rd      %ccr, %g0
 580         SET_SIZE(rdccr_delay)
 581 
 582 /*
 583  * mutex_delay_default(void)
 584  * Spins for approx a few hundred processor cycles and returns to caller.
 585  */
 586 
 587         ENTRY(mutex_delay_default)
 588         mov     72,%o0
 589 1:      brgz    %o0, 1b
 590         dec     %o0
 591         retl
 592         nop
 593         SET_SIZE(mutex_delay_default)
 594