1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright 2019 Joyent, Inc.
  25  */
  26 
  27 #if defined(lint)
  28 #include <sys/types.h>
  29 #include <sys/thread.h>
  30 #include <sys/cpuvar.h>
  31 #else   /* lint */
  32 #include "assym.h"
  33 #endif  /* lint */
  34 
  35 #include <sys/t_lock.h>
  36 #include <sys/mutex.h>
  37 #include <sys/mutex_impl.h>
  38 #include <sys/rwlock_impl.h>
  39 #include <sys/asm_linkage.h>
  40 #include <sys/machlock.h>
  41 #include <sys/machthread.h>
  42 #include <sys/lockstat.h>
  43 
  44 /* #define DEBUG */
  45 
  46 #ifdef DEBUG
  47 #include <sys/machparam.h>
  48 #endif /* DEBUG */
  49 
  50 /************************************************************************
  51  *              ATOMIC OPERATIONS
  52  */
  53 
  54 /*
  55  * uint8_t      ldstub(uint8_t *cp)
  56  *
  57  * Store 0xFF at the specified location, and return its previous content.
  58  */
  59 
  60 #if defined(lint)
  61 uint8_t
  62 ldstub(uint8_t *cp)
  63 {
  64         uint8_t rv;
  65         rv = *cp;
  66         *cp = 0xFF;
  67         return rv;
  68 }
  69 #else   /* lint */
  70 
  71         ENTRY(ldstub)
  72         retl
  73         ldstub  [%o0], %o0
  74         SET_SIZE(ldstub)
  75 
  76 #endif  /* lint */
  77 
  78 /************************************************************************
  79  *              MEMORY BARRIERS -- see atomic.h for full descriptions.
  80  */
  81 
  82 #if defined(lint)
  83 
  84 void
  85 membar_enter(void)
  86 {}
  87 
  88 void
  89 membar_exit(void)
  90 {}
  91 
  92 void
  93 membar_producer(void)
  94 {}
  95 
  96 void
  97 membar_consumer(void)
  98 {}
  99 
 100 #else   /* lint */
 101 
 102 #ifdef SF_ERRATA_51
 103         .align 32
 104         ENTRY(membar_return)
 105         retl
 106         nop
 107         SET_SIZE(membar_return)
 108 #define MEMBAR_RETURN   ba,pt %icc, membar_return
 109 #else
 110 #define MEMBAR_RETURN   retl
 111 #endif
 112 
 113         ENTRY(membar_enter)
 114         MEMBAR_RETURN
 115         membar  #StoreLoad|#StoreStore
 116         SET_SIZE(membar_enter)
 117 
 118         ENTRY(membar_exit)
 119         MEMBAR_RETURN
 120         membar  #LoadStore|#StoreStore
 121         SET_SIZE(membar_exit)
 122 
 123         ENTRY(membar_producer)
 124         MEMBAR_RETURN
 125         membar  #StoreStore
 126         SET_SIZE(membar_producer)
 127 
 128         ENTRY(membar_consumer)
 129         MEMBAR_RETURN
 130         membar  #LoadLoad
 131         SET_SIZE(membar_consumer)
 132 
 133 #endif  /* lint */
 134 
 135 /************************************************************************
 136  *              MINIMUM LOCKS
 137  */
 138 
 139 #if defined(lint)
 140 
 141 /*
 142  * lock_try(lp), ulock_try(lp)
 143  *      - returns non-zero on success.
 144  *      - doesn't block interrupts so don't use this to spin on a lock.
 145  *      - uses "0xFF is busy, anything else is free" model.
 146  *
 147  *      ulock_try() is for a lock in the user address space.
 148  *      For all V7/V8 sparc systems they are same since the kernel and
 149  *      user are mapped in a user' context.
 150  *      For V9 platforms the lock_try and ulock_try are different impl.
 151  */
 152 
 153 int
 154 lock_try(lock_t *lp)
 155 {
 156         return (0xFF ^ ldstub(lp));
 157 }
 158 
 159 int
 160 lock_spin_try(lock_t *lp)
 161 {
 162         return (0xFF ^ ldstub(lp));
 163 }
 164 
 165 void
 166 lock_set(lock_t *lp)
 167 {
 168         extern void lock_set_spin(lock_t *);
 169 
 170         if (!lock_try(lp))
 171                 lock_set_spin(lp);
 172         membar_enter();
 173 }
 174 
 175 void
 176 lock_clear(lock_t *lp)
 177 {
 178         membar_exit();
 179         *lp = 0;
 180 }
 181 
 182 int
 183 ulock_try(lock_t *lp)
 184 {
 185         return (0xFF ^ ldstub(lp));
 186 }
 187 
 188 void
 189 ulock_clear(lock_t *lp)
 190 {
 191         membar_exit();
 192         *lp = 0;
 193 }
 194 
 195 #else   /* lint */
 196 
 197         .align  32
 198         ENTRY(lock_try)
 199         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 200         brnz,pn %o1, 1f
 201         membar  #LoadLoad
 202 .lock_try_lockstat_patch_point:
 203         retl
 204         or      %o0, 1, %o0             ! ensure lo32 != 0
 205 1:
 206         retl
 207         clr     %o0
 208         SET_SIZE(lock_try)
 209 
 210         .align  32
 211         ENTRY(lock_spin_try)
 212         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 213         brnz,pn %o1, 1f
 214         membar  #LoadLoad
 215         retl
 216         or      %o0, 1, %o0             ! ensure lo32 != 0
 217 1:
 218         retl
 219         clr     %o0
 220         SET_SIZE(lock_spin_try)
 221 
 222         .align  32
 223         ENTRY(lock_set)
 224         ldstub  [%o0], %o1
 225         brnz,pn %o1, 1f                 ! go to C for the hard case
 226         membar  #LoadLoad
 227 .lock_set_lockstat_patch_point:
 228         retl
 229         nop
 230 1:
 231         sethi   %hi(lock_set_spin), %o2 ! load up for jump to C
 232         jmp     %o2 + %lo(lock_set_spin)
 233         nop                             ! delay: do nothing
 234         SET_SIZE(lock_set)
 235 
 236         ENTRY(lock_clear)
 237         membar  #LoadStore|#StoreStore
 238 .lock_clear_lockstat_patch_point:
 239         retl
 240         clrb    [%o0]
 241         SET_SIZE(lock_clear)
 242 
 243         .align  32
 244         ENTRY(ulock_try)
 245         ldstuba [%o0]ASI_USER, %o1      ! try to set lock, get value in %o1
 246         xor     %o1, 0xff, %o0          ! delay - return non-zero if success
 247         retl
 248           membar        #LoadLoad
 249         SET_SIZE(ulock_try)
 250 
 251         ENTRY(ulock_clear)
 252         membar  #LoadStore|#StoreStore
 253         retl
 254           stba  %g0, [%o0]ASI_USER      ! clear lock
 255         SET_SIZE(ulock_clear)
 256 
 257 #endif  /* lint */
 258 
 259 
 260 /*
 261  * lock_set_spl(lp, new_pil, *old_pil_addr)
 262  *      Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
 263  */
 264 
 265 #if defined(lint)
 266 
 267 /* ARGSUSED */
 268 void
 269 lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
 270 {
 271         extern int splr(int);
 272         extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
 273         int old_pil;
 274 
 275         old_pil = splr(new_pil);
 276         if (!lock_try(lp)) {
 277                 lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
 278         } else {
 279                 *old_pil_addr = (u_short)old_pil;
 280                 membar_enter();
 281         }
 282 }
 283 
 284 #else   /* lint */
 285 
 286         ENTRY(lock_set_spl)
 287         rdpr    %pil, %o3                       ! %o3 = current pil
 288         cmp     %o3, %o1                        ! is current pil high enough?
 289         bl,a,pt %icc, 1f                        ! if not, write %pil in delay
 290         wrpr    %g0, %o1, %pil
 291 1:
 292         ldstub  [%o0], %o4                      ! try the lock
 293         brnz,pn %o4, 2f                         ! go to C for the miss case
 294         membar  #LoadLoad
 295 .lock_set_spl_lockstat_patch_point:
 296         retl
 297         sth     %o3, [%o2]                      ! delay - save original pil
 298 2:
 299         sethi   %hi(lock_set_spl_spin), %o5     ! load up jmp to C
 300         jmp     %o5 + %lo(lock_set_spl_spin)    ! jmp to lock_set_spl_spin
 301         nop                                     ! delay: do nothing
 302         SET_SIZE(lock_set_spl)
 303 
 304 #endif  /* lint */
 305 
 306 /*
 307  * lock_clear_splx(lp, s)
 308  */
 309 
 310 #if defined(lint)
 311 
 312 void
 313 lock_clear_splx(lock_t *lp, int s)
 314 {
 315         extern void splx(int);
 316 
 317         lock_clear(lp);
 318         splx(s);
 319 }
 320 
 321 #else   /* lint */
 322 
 323         ENTRY(lock_clear_splx)
 324         ldn     [THREAD_REG + T_CPU], %o2       ! get CPU pointer
 325         membar  #LoadStore|#StoreStore
 326         ld      [%o2 + CPU_BASE_SPL], %o2
 327         clrb    [%o0]                           ! clear lock
 328         cmp     %o2, %o1                        ! compare new to base
 329         movl    %xcc, %o1, %o2                  ! use new pri if base is less
 330 .lock_clear_splx_lockstat_patch_point:
 331         retl
 332         wrpr    %g0, %o2, %pil
 333         SET_SIZE(lock_clear_splx)
 334 
 335 #endif  /* lint */
 336 
 337 /*
 338  * mutex_enter() and mutex_exit().
 339  *
 340  * These routines handle the simple cases of mutex_enter() (adaptive
 341  * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
 342  * If anything complicated is going on we punt to mutex_vector_enter().
 343  *
 344  * mutex_tryenter() is similar to mutex_enter() but returns zero if
 345  * the lock cannot be acquired, nonzero on success.
 346  *
 347  * If mutex_exit() gets preempted in the window between checking waiters
 348  * and clearing the lock, we can miss wakeups.  Disabling preemption
 349  * in the mutex code is prohibitively expensive, so instead we detect
 350  * mutex preemption by examining the trapped PC in the interrupt path.
 351  * If we interrupt a thread in mutex_exit() that has not yet cleared
 352  * the lock, pil_interrupt() resets its PC back to the beginning of
 353  * mutex_exit() so it will check again for waiters when it resumes.
 354  *
 355  * The lockstat code below is activated when the lockstat driver
 356  * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
 357  * Note that we don't need to test lockstat_event_mask here -- we won't
 358  * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
 359  */
 360 
 361 #if defined (lint)
 362 
 363 /* ARGSUSED */
 364 void
 365 mutex_enter(kmutex_t *lp)
 366 {}
 367 
 368 /* ARGSUSED */
 369 int
 370 mutex_tryenter(kmutex_t *lp)
 371 { return (0); }
 372 
 373 /* ARGSUSED */
 374 void
 375 mutex_exit(kmutex_t *lp)
 376 {}
 377 
 378 /* ARGSUSED */
 379 void *
 380 mutex_owner_running(mutex_impl_t *lp)
 381 { return (NULL); }
 382 
 383 #else
 384         .align  32
 385         ENTRY(mutex_enter)
 386         mov     THREAD_REG, %o1
 387         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 388         brnz,pn %o1, 1f                         ! locked or wrong type
 389         membar  #LoadLoad
 390 .mutex_enter_lockstat_patch_point:
 391         retl
 392         nop
 393 1:
 394         sethi   %hi(mutex_vector_enter), %o2    ! load up for jump to C
 395         jmp     %o2 + %lo(mutex_vector_enter)
 396         nop
 397         SET_SIZE(mutex_enter)
 398 
 399         ENTRY(mutex_tryenter)
 400         mov     THREAD_REG, %o1
 401         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 402         brnz,pn %o1, 1f                         ! locked or wrong type continue
 403         membar  #LoadLoad
 404 .mutex_tryenter_lockstat_patch_point:
 405         retl
 406         or      %o0, 1, %o0                     ! ensure lo32 != 0
 407 1:
 408         sethi   %hi(mutex_vector_tryenter), %o2         ! hi bits
 409         jmp     %o2 + %lo(mutex_vector_tryenter)        ! go to C
 410         nop
 411         SET_SIZE(mutex_tryenter)
 412 
 413         ENTRY(mutex_adaptive_tryenter)
 414         mov     THREAD_REG, %o1
 415         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 416         brnz,pn %o1, 0f                         ! locked or wrong type
 417         membar  #LoadLoad
 418         retl
 419         or      %o0, 1, %o0                     ! ensure lo32 != 0
 420 0:
 421         retl
 422         mov     %g0, %o0
 423         SET_SIZE(mutex_adaptive_tryenter)
 424 
 425         ! these need to be together and cache aligned for performance.
 426         .align 64
 427         .global mutex_exit_critical_size
 428         .global mutex_exit_critical_start
 429         .global mutex_owner_running_critical_size
 430         .global mutex_owner_running_critical_start
 431 
 432 mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
 433 
 434         .align  32
 435 
 436         ENTRY(mutex_exit)
 437 mutex_exit_critical_start:              ! If we are interrupted, restart here
 438         ldn     [%o0], %o1              ! get the owner field
 439         membar  #LoadStore|#StoreStore
 440         cmp     THREAD_REG, %o1         ! do we own lock with no waiters?
 441         be,a,pt %ncc, 1f                ! if so, drive on ...
 442         stn     %g0, [%o0]              ! delay: clear lock if we owned it
 443 .mutex_exit_critical_end:               ! for pil_interrupt() hook
 444         ba,a,pt %xcc, mutex_vector_exit ! go to C for the hard cases
 445 1:
 446 .mutex_exit_lockstat_patch_point:
 447         retl
 448         nop
 449         SET_SIZE(mutex_exit)
 450 
 451 mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
 452 
 453         .align  32
 454 
 455         ENTRY(mutex_owner_running)
 456 mutex_owner_running_critical_start:     ! If interrupted restart here
 457         ldn     [%o0], %o1              ! get the owner field
 458         and     %o1, MUTEX_THREAD, %o1  ! remove the waiters bit if any
 459         brz,pn  %o1, 1f                 ! if so, drive on ...
 460         nop
 461         ldn     [%o1+T_CPU], %o2        ! get owner->t_cpu
 462         ldn     [%o2+CPU_THREAD], %o3   ! get owner->t_cpu->cpu_thread
 463 .mutex_owner_running_critical_end:      ! for pil_interrupt() hook
 464         cmp     %o1, %o3                ! owner == running thread?
 465         be,a,pt %xcc, 2f                ! yes, go return cpu
 466         nop
 467 1:
 468         retl
 469         mov     %g0, %o0                ! return 0 (owner not running)
 470 2:
 471         retl
 472         mov     %o2, %o0                ! owner running, return cpu
 473         SET_SIZE(mutex_owner_running)
 474 
 475 #endif  /* lint */
 476 
 477 /*
 478  * rw_enter() and rw_exit().
 479  *
 480  * These routines handle the simple cases of rw_enter (write-locking an unheld
 481  * lock or read-locking a lock that's neither write-locked nor write-wanted)
 482  * and rw_exit (no waiters or not the last reader).  If anything complicated
 483  * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
 484  */
 485 #if defined(lint)
 486 
 487 /* ARGSUSED */
 488 void
 489 rw_enter(krwlock_t *lp, krw_t rw)
 490 {}
 491 
 492 /* ARGSUSED */
 493 void
 494 rw_exit(krwlock_t *lp)
 495 {}
 496 
 497 #else
 498 
 499         .align  16
 500         ENTRY(rw_enter)
 501         cmp     %o1, RW_WRITER                  ! entering as writer?
 502         be,a,pn %icc, 2f                        ! if so, go do it ...
 503         or      THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
 504         ldn     [%o0], %o4                      ! %o4 = old lock value
 505 1:
 506         andcc   %o4, RW_WRITE_CLAIMED, %g0      ! write-locked or write-wanted?
 507         bz,pt   %xcc, 3f                        ! if so, prepare to block
 508         add     %o4, RW_READ_LOCK, %o5          ! delay: increment hold count
 509         sethi   %hi(rw_enter_sleep), %o2        ! load up jump
 510         jmp     %o2 + %lo(rw_enter_sleep)       ! jmp to rw_enter_sleep
 511         nop                                     ! delay: do nothing
 512 3:
 513         casx    [%o0], %o4, %o5                 ! try to grab read lock
 514         cmp     %o4, %o5                        ! did we get it?
 515 #ifdef sun4v
 516         be,a,pt %xcc, 0f
 517         membar  #LoadLoad
 518         sethi   %hi(rw_enter_sleep), %o2        ! load up jump
 519         jmp     %o2 + %lo(rw_enter_sleep)       ! jmp to rw_enter_sleep
 520         nop                                     ! delay: do nothing
 521 0:
 522 #else /* sun4v */
 523         bne,pn  %xcc, 1b                        ! if not, try again
 524         mov     %o5, %o4                        ! delay: %o4 = old lock value
 525         membar  #LoadLoad
 526 #endif /* sun4v */
 527 .rw_read_enter_lockstat_patch_point:
 528         retl
 529         nop
 530 2:
 531         casx    [%o0], %g0, %o5                 ! try to grab write lock
 532         brz,pt %o5, 4f                          ! branch around if we got it
 533         membar  #LoadLoad                       ! done regardless of where we go
 534         sethi   %hi(rw_enter_sleep), %o2
 535         jmp     %o2 + %lo(rw_enter_sleep)       ! jump to rw_enter_sleep if not
 536         nop                                     ! delay: do nothing
 537 4:
 538 .rw_write_enter_lockstat_patch_point:
 539         retl
 540         nop
 541         SET_SIZE(rw_enter)
 542 
 543         .align  16
 544         ENTRY(rw_exit)
 545         ldn     [%o0], %o4                      ! %o4 = old lock value
 546         membar  #LoadStore|#StoreStore          ! membar_exit()
 547         subcc   %o4, RW_READ_LOCK, %o5          ! %o5 = new lock value if reader
 548         bnz,pn  %xcc, 2f                        ! single reader, no waiters?
 549         clr     %o1
 550 1:
 551         srl     %o4, RW_HOLD_COUNT_SHIFT, %o3   ! %o3 = hold count (lockstat)
 552         casx    [%o0], %o4, %o5                 ! try to drop lock
 553         cmp     %o4, %o5                        ! did we succeed?
 554         bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 555         nop                                     ! delay: do nothing
 556 .rw_read_exit_lockstat_patch_point:
 557         retl
 558         nop                                     ! delay: do nothing
 559 2:
 560         andcc   %o4, RW_WRITE_LOCKED, %g0       ! are we a writer?
 561         bnz,a,pt %xcc, 3f
 562         or      THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
 563         cmp     %o5, RW_READ_LOCK               ! would lock still be held?
 564         bge,pt  %xcc, 1b                        ! if so, go ahead and drop it
 565         nop
 566         ba,pt   %xcc, rw_exit_wakeup            ! otherwise, wake waiters
 567         nop
 568 3:
 569         casx    [%o0], %o4, %o1                 ! try to drop write lock
 570         cmp     %o4, %o1                        ! did we succeed?
 571         bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 572         nop
 573 .rw_write_exit_lockstat_patch_point:
 574         retl
 575         nop
 576         SET_SIZE(rw_exit)
 577 
 578 #endif
 579 
 580 #if defined(lint)
 581 
 582 void
 583 lockstat_hot_patch(void)
 584 {}
 585 
 586 #else
 587 
 588 #define RETL                    0x81c3e008
 589 #define NOP                     0x01000000
 590 #define BA                      0x10800000
 591 
 592 #define DISP22                  ((1 << 22) - 1)
 593 #define ANNUL                   0x20000000
 594 
 595 #define HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)          \
 596         ba      1f;                                                     \
 597         rd      %pc, %o0;                                               \
 598         save    %sp, -SA(MINFRAME), %sp;                                \
 599         set     lockstat_probemap, %l1;                                 \
 600         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 601         brz,pn  %o0, 0f;                                                \
 602         ldub    [THREAD_REG + T_LOCKSTAT], %l0;                         \
 603         add     %l0, 1, %l2;                                            \
 604         stub    %l2, [THREAD_REG + T_LOCKSTAT];                         \
 605         set     lockstat_probe, %g1;                                    \
 606         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 607         brz,a,pn %o0, 0f;                                               \
 608         stub    %l0, [THREAD_REG + T_LOCKSTAT];                         \
 609         ldn     [%g1], %g2;                                             \
 610         mov     rs, %o2;                                                \
 611         jmpl    %g2, %o7;                                               \
 612         mov     %i0, %o1;                                               \
 613         stub    %l0, [THREAD_REG + T_LOCKSTAT];                         \
 614 0:      ret;                                                            \
 615         restore %g0, 1, %o0;    /* for mutex_tryenter / lock_try */     \
 616 1:      set     addr, %o1;                                              \
 617         sub     %o0, %o1, %o0;                                          \
 618         srl     %o0, 2, %o0;                                            \
 619         inc     %o0;                                                    \
 620         set     DISP22, %o1;                                            \
 621         and     %o1, %o0, %o0;                                          \
 622         set     BA, %o1;                                                \
 623         or      %o1, %o0, %o0;                                          \
 624         sethi   %hi(annul), %o2;                                        \
 625         add     %o0, %o2, %o2;                                          \
 626         set     addr, %o0;                                              \
 627         set     normal_instr, %o1;                                      \
 628         ld      [%i0 + (event * DTRACE_IDSIZE)], %o3;                   \
 629         tst     %o3;                                                    \
 630         movnz   %icc, %o2, %o1;                                         \
 631         call    hot_patch_kernel_text;                                  \
 632         mov     4, %o2;                                                 \
 633         membar  #Sync
 634 
 635 #define HOT_PATCH(addr, event, normal_instr)    \
 636         HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
 637 
 638 #define HOT_PATCH_ARG(addr, event, normal_instr, arg)   \
 639         HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
 640 
 641 #define HOT_PATCH_ANNULLED(addr, event, normal_instr)   \
 642         HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
 643 
 644         ENTRY(lockstat_hot_patch)
 645         save    %sp, -SA(MINFRAME), %sp
 646         set     lockstat_probemap, %i0
 647         HOT_PATCH(.mutex_enter_lockstat_patch_point,
 648                 LS_MUTEX_ENTER_ACQUIRE, RETL)
 649         HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
 650                 LS_MUTEX_TRYENTER_ACQUIRE, RETL)
 651         HOT_PATCH(.mutex_exit_lockstat_patch_point,
 652                 LS_MUTEX_EXIT_RELEASE, RETL)
 653         HOT_PATCH(.rw_write_enter_lockstat_patch_point,
 654                 LS_RW_ENTER_ACQUIRE, RETL)
 655         HOT_PATCH(.rw_read_enter_lockstat_patch_point,
 656                 LS_RW_ENTER_ACQUIRE, RETL)
 657         HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
 658                 LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
 659         HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
 660                 LS_RW_EXIT_RELEASE, RETL, RW_READER)
 661         HOT_PATCH(.lock_set_lockstat_patch_point,
 662                 LS_LOCK_SET_ACQUIRE, RETL)
 663         HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
 664                 LS_LOCK_TRY_ACQUIRE, RETL)
 665         HOT_PATCH(.lock_clear_lockstat_patch_point,
 666                 LS_LOCK_CLEAR_RELEASE, RETL)
 667         HOT_PATCH(.lock_set_spl_lockstat_patch_point,
 668                 LS_LOCK_SET_SPL_ACQUIRE, RETL)
 669         HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
 670                 LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
 671         ret
 672         restore
 673         SET_SIZE(lockstat_hot_patch)
 674 
 675 #endif  /* lint */
 676 
 677 /*
 678  * asm_mutex_spin_enter(mutex_t *)
 679  *
 680  * For use by assembly interrupt handler only.
 681  * Does not change spl, since the interrupt handler is assumed to be
 682  * running at high level already.
 683  * Traps may be off, so cannot panic.
 684  * Does not keep statistics on the lock.
 685  *
 686  * Entry:       %l6 - points to mutex
 687  *              %l7 - address of call (returns to %l7+8)
 688  * Uses:        %l6, %l5
 689  */
 690 #ifndef lint
 691         .align 16
 692         ENTRY_NP(asm_mutex_spin_enter)
 693         ldstub  [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
 694 1:
 695         tst     %l5
 696         bnz     3f                      ! lock already held - go spin
 697         nop
 698 2:
 699         jmp     %l7 + 8                 ! return
 700         membar  #LoadLoad
 701         !
 702         ! Spin on lock without using an atomic operation to prevent the caches
 703         ! from unnecessarily moving ownership of the line around.
 704         !
 705 3:
 706         ldub    [%l6 + M_SPINLOCK], %l5
 707 4:
 708         tst     %l5
 709         bz,a    1b                      ! lock appears to be free, try again
 710         ldstub  [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
 711 
 712         sethi   %hi(panicstr) , %l5
 713         ldn     [%l5 + %lo(panicstr)], %l5
 714         tst     %l5
 715         bnz     2b                      ! after panic, feign success
 716         nop
 717         b       4b
 718         ldub    [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
 719         SET_SIZE(asm_mutex_spin_enter)
 720 #endif /* lint */
 721 
 722 /*
 723  * asm_mutex_spin_exit(mutex_t *)
 724  *
 725  * For use by assembly interrupt handler only.
 726  * Does not change spl, since the interrupt handler is assumed to be
 727  * running at high level already.
 728  *
 729  * Entry:       %l6 - points to mutex
 730  *              %l7 - address of call (returns to %l7+8)
 731  * Uses:        none
 732  */
 733 #ifndef lint
 734         ENTRY_NP(asm_mutex_spin_exit)
 735         membar  #LoadStore|#StoreStore
 736         jmp     %l7 + 8                 ! return
 737         clrb    [%l6 + M_SPINLOCK]      ! delay - clear lock
 738         SET_SIZE(asm_mutex_spin_exit)
 739 #endif /* lint */
 740 
 741 /*
 742  * thread_onproc()
 743  * Set thread in onproc state for the specified CPU.
 744  * Also set the thread lock pointer to the CPU's onproc lock.
 745  * Since the new lock isn't held, the store ordering is important.
 746  * If not done in assembler, the compiler could reorder the stores.
 747  */
 748 #if defined(lint)
 749 
 750 void
 751 thread_onproc(kthread_id_t t, cpu_t *cp)
 752 {
 753         t->t_state = TS_ONPROC;
 754         t->t_lockp = &cp->cpu_thread_lock;
 755 }
 756 
 757 #else   /* lint */
 758 
 759         ENTRY(thread_onproc)
 760         set     TS_ONPROC, %o2          ! TS_ONPROC state
 761         st      %o2, [%o0 + T_STATE]    ! store state
 762         add     %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
 763         retl                            ! return
 764         stn     %o3, [%o0 + T_LOCKP]    ! delay - store new lock pointer
 765         SET_SIZE(thread_onproc)
 766 
 767 #endif  /* lint */
 768 
 769 /* delay function used in some mutex code - just do 3 nop cas ops */
 770 #if defined(lint)
 771 
 772 /* ARGSUSED */
 773 void
 774 cas_delay(void *addr)
 775 {}
 776 #else   /* lint */
 777         ENTRY(cas_delay)
 778         casx [%o0], %g0, %g0
 779         casx [%o0], %g0, %g0
 780         retl
 781         casx [%o0], %g0, %g0
 782         SET_SIZE(cas_delay)
 783 #endif  /* lint */
 784 
 785 #if defined(lint)
 786 
 787 /*
 788  * alternative delay function for some niagara processors.   The rd
 789  * instruction uses less resources than casx on those cpus.
 790  */
 791 /* ARGSUSED */
 792 void
 793 rdccr_delay(void)
 794 {}
 795 #else   /* lint */
 796         ENTRY(rdccr_delay)
 797         rd      %ccr, %g0
 798         rd      %ccr, %g0
 799         retl
 800         rd      %ccr, %g0
 801         SET_SIZE(rdccr_delay)
 802 #endif  /* lint */
 803 
 804 /*
 805  * mutex_delay_default(void)
 806  * Spins for approx a few hundred processor cycles and returns to caller.
 807  */
 808 #if defined(lint)
 809 
 810 void
 811 mutex_delay_default(void)
 812 {}
 813 
 814 #else   /* lint */
 815 
 816         ENTRY(mutex_delay_default)
 817         mov     72,%o0
 818 1:      brgz    %o0, 1b
 819         dec     %o0
 820         retl
 821         nop
 822         SET_SIZE(mutex_delay_default)
 823 
 824 #endif  /* lint */