Print this page
de-linting of .s files


   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #pragma ident   "%Z%%M% %I%     %E% SMI"
  27 
  28 #if defined(lint)
  29 #include <sys/types.h>
  30 #include <sys/thread.h>
  31 #include <sys/cpuvar.h>
  32 #else   /* lint */
  33 #include "assym.h"
  34 #endif  /* lint */
  35 
  36 #include <sys/t_lock.h>
  37 #include <sys/mutex.h>
  38 #include <sys/mutex_impl.h>
  39 #include <sys/rwlock_impl.h>
  40 #include <sys/asm_linkage.h>
  41 #include <sys/machlock.h>
  42 #include <sys/machthread.h>
  43 #include <sys/lockstat.h>
  44 
  45 /* #define DEBUG */
  46 
  47 #ifdef DEBUG
  48 #include <sys/machparam.h>
  49 #endif /* DEBUG */
  50 
  51 /************************************************************************
  52  *              ATOMIC OPERATIONS
  53  */
  54 
  55 /*
  56  * uint8_t      ldstub(uint8_t *cp)
  57  *
  58  * Store 0xFF at the specified location, and return its previous content.
  59  */
  60 
  61 #if defined(lint)
  62 uint8_t
  63 ldstub(uint8_t *cp)
  64 {
  65         uint8_t rv;
  66         rv = *cp;
  67         *cp = 0xFF;
  68         return rv;
  69 }
  70 #else   /* lint */
  71 
  72         ENTRY(ldstub)
  73         retl
  74         ldstub  [%o0], %o0
  75         SET_SIZE(ldstub)
  76 
  77 #endif  /* lint */
  78 
  79 /************************************************************************
  80  *              MEMORY BARRIERS -- see atomic.h for full descriptions.
  81  */
  82 
  83 #if defined(lint)
  84 
  85 void
  86 membar_enter(void)
  87 {}
  88 
  89 void
  90 membar_exit(void)
  91 {}
  92 
  93 void
  94 membar_producer(void)
  95 {}
  96 
  97 void
  98 membar_consumer(void)
  99 {}
 100 
 101 #else   /* lint */
 102 
 103 #ifdef SF_ERRATA_51
 104         .align 32
 105         ENTRY(membar_return)
 106         retl
 107         nop
 108         SET_SIZE(membar_return)
 109 #define MEMBAR_RETURN   ba,pt %icc, membar_return
 110 #else
 111 #define MEMBAR_RETURN   retl
 112 #endif
 113 
 114         ENTRY(membar_enter)
 115         MEMBAR_RETURN
 116         membar  #StoreLoad|#StoreStore
 117         SET_SIZE(membar_enter)
 118 
 119         ENTRY(membar_exit)
 120         MEMBAR_RETURN
 121         membar  #LoadStore|#StoreStore
 122         SET_SIZE(membar_exit)
 123 
 124         ENTRY(membar_producer)
 125         MEMBAR_RETURN
 126         membar  #StoreStore
 127         SET_SIZE(membar_producer)
 128 
 129         ENTRY(membar_consumer)
 130         MEMBAR_RETURN
 131         membar  #LoadLoad
 132         SET_SIZE(membar_consumer)
 133 
 134 #endif  /* lint */
 135 
 136 /************************************************************************
 137  *              MINIMUM LOCKS
 138  */
 139 
 140 #if defined(lint)
 141 
 142 /*
 143  * lock_try(lp), ulock_try(lp)
 144  *      - returns non-zero on success.
 145  *      - doesn't block interrupts so don't use this to spin on a lock.
 146  *      - uses "0xFF is busy, anything else is free" model.
 147  *
 148  *      ulock_try() is for a lock in the user address space.
 149  *      For all V7/V8 sparc systems they are same since the kernel and
 150  *      user are mapped in a user' context.
 151  *      For V9 platforms the lock_try and ulock_try are different impl.
 152  */
 153 
 154 int
 155 lock_try(lock_t *lp)
 156 {
 157         return (0xFF ^ ldstub(lp));
 158 }
 159 
 160 int
 161 lock_spin_try(lock_t *lp)
 162 {
 163         return (0xFF ^ ldstub(lp));
 164 }
 165 
 166 void
 167 lock_set(lock_t *lp)
 168 {
 169         extern void lock_set_spin(lock_t *);
 170 
 171         if (!lock_try(lp))
 172                 lock_set_spin(lp);
 173         membar_enter();
 174 }
 175 
 176 void
 177 lock_clear(lock_t *lp)
 178 {
 179         membar_exit();
 180         *lp = 0;
 181 }
 182 
 183 int
 184 ulock_try(lock_t *lp)
 185 {
 186         return (0xFF ^ ldstub(lp));
 187 }
 188 
 189 void
 190 ulock_clear(lock_t *lp)
 191 {
 192         membar_exit();
 193         *lp = 0;
 194 }
 195 
 196 #else   /* lint */
 197 
 198         .align  32
 199         ENTRY(lock_try)
 200         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 201         brnz,pn %o1, 1f
 202         membar  #LoadLoad
 203 .lock_try_lockstat_patch_point:
 204         retl
 205         or      %o0, 1, %o0             ! ensure lo32 != 0
 206 1:
 207         retl
 208         clr     %o0
 209         SET_SIZE(lock_try)
 210 
 211         .align  32
 212         ENTRY(lock_spin_try)
 213         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 214         brnz,pn %o1, 1f
 215         membar  #LoadLoad
 216         retl
 217         or      %o0, 1, %o0             ! ensure lo32 != 0


 238         membar  #LoadStore|#StoreStore
 239 .lock_clear_lockstat_patch_point:
 240         retl
 241         clrb    [%o0]
 242         SET_SIZE(lock_clear)
 243 
 244         .align  32
 245         ENTRY(ulock_try)
 246         ldstuba [%o0]ASI_USER, %o1      ! try to set lock, get value in %o1
 247         xor     %o1, 0xff, %o0          ! delay - return non-zero if success
 248         retl
 249           membar        #LoadLoad
 250         SET_SIZE(ulock_try)
 251 
 252         ENTRY(ulock_clear)
 253         membar  #LoadStore|#StoreStore
 254         retl
 255           stba  %g0, [%o0]ASI_USER      ! clear lock
 256         SET_SIZE(ulock_clear)
 257 
 258 #endif  /* lint */
 259 
 260 
 261 /*
 262  * lock_set_spl(lp, new_pil, *old_pil_addr)
 263  *      Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
 264  */
 265 
 266 #if defined(lint)
 267 
 268 /* ARGSUSED */
 269 void
 270 lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
 271 {
 272         extern int splr(int);
 273         extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
 274         int old_pil;
 275 
 276         old_pil = splr(new_pil);
 277         if (!lock_try(lp)) {
 278                 lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
 279         } else {
 280                 *old_pil_addr = (u_short)old_pil;
 281                 membar_enter();
 282         }
 283 }
 284 
 285 #else   /* lint */
 286 
 287         ENTRY(lock_set_spl)
 288         rdpr    %pil, %o3                       ! %o3 = current pil
 289         cmp     %o3, %o1                        ! is current pil high enough?
 290         bl,a,pt %icc, 1f                        ! if not, write %pil in delay
 291         wrpr    %g0, %o1, %pil
 292 1:
 293         ldstub  [%o0], %o4                      ! try the lock
 294         brnz,pn %o4, 2f                         ! go to C for the miss case
 295         membar  #LoadLoad
 296 .lock_set_spl_lockstat_patch_point:
 297         retl
 298         sth     %o3, [%o2]                      ! delay - save original pil
 299 2:
 300         sethi   %hi(lock_set_spl_spin), %o5     ! load up jmp to C
 301         jmp     %o5 + %lo(lock_set_spl_spin)    ! jmp to lock_set_spl_spin
 302         nop                                     ! delay: do nothing
 303         SET_SIZE(lock_set_spl)
 304 
 305 #endif  /* lint */
 306 
 307 /*
 308  * lock_clear_splx(lp, s)
 309  */
 310 
 311 #if defined(lint)
 312 
 313 void
 314 lock_clear_splx(lock_t *lp, int s)
 315 {
 316         extern void splx(int);
 317 
 318         lock_clear(lp);
 319         splx(s);
 320 }
 321 
 322 #else   /* lint */
 323 
 324         ENTRY(lock_clear_splx)
 325         ldn     [THREAD_REG + T_CPU], %o2       ! get CPU pointer
 326         membar  #LoadStore|#StoreStore
 327         ld      [%o2 + CPU_BASE_SPL], %o2
 328         clrb    [%o0]                           ! clear lock
 329         cmp     %o2, %o1                        ! compare new to base
 330         movl    %xcc, %o1, %o2                  ! use new pri if base is less
 331 .lock_clear_splx_lockstat_patch_point:
 332         retl
 333         wrpr    %g0, %o2, %pil
 334         SET_SIZE(lock_clear_splx)
 335 
 336 #endif  /* lint */
 337 
 338 /*
 339  * mutex_enter() and mutex_exit().
 340  * 
 341  * These routines handle the simple cases of mutex_enter() (adaptive
 342  * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
 343  * If anything complicated is going on we punt to mutex_vector_enter().
 344  *
 345  * mutex_tryenter() is similar to mutex_enter() but returns zero if
 346  * the lock cannot be acquired, nonzero on success.
 347  *
 348  * If mutex_exit() gets preempted in the window between checking waiters
 349  * and clearing the lock, we can miss wakeups.  Disabling preemption
 350  * in the mutex code is prohibitively expensive, so instead we detect
 351  * mutex preemption by examining the trapped PC in the interrupt path.
 352  * If we interrupt a thread in mutex_exit() that has not yet cleared
 353  * the lock, pil_interrupt() resets its PC back to the beginning of
 354  * mutex_exit() so it will check again for waiters when it resumes.
 355  *
 356  * The lockstat code below is activated when the lockstat driver
 357  * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
 358  * Note that we don't need to test lockstat_event_mask here -- we won't
 359  * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
 360  */
 361 
 362 #if defined (lint)
 363 
 364 /* ARGSUSED */
 365 void
 366 mutex_enter(kmutex_t *lp)
 367 {}
 368 
 369 /* ARGSUSED */
 370 int
 371 mutex_tryenter(kmutex_t *lp)
 372 { return (0); }
 373 
 374 /* ARGSUSED */
 375 void
 376 mutex_exit(kmutex_t *lp)
 377 {}
 378 
 379 /* ARGSUSED */
 380 void *
 381 mutex_owner_running(mutex_impl_t *lp)
 382 { return (NULL); }
 383 
 384 #else
 385         .align  32
 386         ENTRY(mutex_enter)
 387         mov     THREAD_REG, %o1
 388         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 389         brnz,pn %o1, 1f                         ! locked or wrong type
 390         membar  #LoadLoad
 391 .mutex_enter_lockstat_patch_point:
 392         retl
 393         nop
 394 1:
 395         sethi   %hi(mutex_vector_enter), %o2    ! load up for jump to C
 396         jmp     %o2 + %lo(mutex_vector_enter)
 397         nop
 398         SET_SIZE(mutex_enter)
 399 
 400         ENTRY(mutex_tryenter)
 401         mov     THREAD_REG, %o1
 402         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 403         brnz,pn %o1, 1f                         ! locked or wrong type continue
 404         membar  #LoadLoad


 456         ENTRY(mutex_owner_running)
 457 mutex_owner_running_critical_start:     ! If interrupted restart here
 458         ldn     [%o0], %o1              ! get the owner field
 459         and     %o1, MUTEX_THREAD, %o1  ! remove the waiters bit if any
 460         brz,pn  %o1, 1f                 ! if so, drive on ...
 461         nop
 462         ldn     [%o1+T_CPU], %o2        ! get owner->t_cpu
 463         ldn     [%o2+CPU_THREAD], %o3   ! get owner->t_cpu->cpu_thread
 464 .mutex_owner_running_critical_end:      ! for pil_interrupt() hook
 465         cmp     %o1, %o3                ! owner == running thread?
 466         be,a,pt %xcc, 2f                ! yes, go return cpu
 467         nop
 468 1:
 469         retl
 470         mov     %g0, %o0                ! return 0 (owner not running)
 471 2:
 472         retl
 473         mov     %o2, %o0                ! owner running, return cpu
 474         SET_SIZE(mutex_owner_running)
 475 
 476 #endif  /* lint */
 477 
 478 /*
 479  * rw_enter() and rw_exit().
 480  * 
 481  * These routines handle the simple cases of rw_enter (write-locking an unheld
 482  * lock or read-locking a lock that's neither write-locked nor write-wanted)
 483  * and rw_exit (no waiters or not the last reader).  If anything complicated
 484  * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
 485  */
 486 #if defined(lint)
 487 
 488 /* ARGSUSED */
 489 void
 490 rw_enter(krwlock_t *lp, krw_t rw)
 491 {}
 492 
 493 /* ARGSUSED */
 494 void
 495 rw_exit(krwlock_t *lp)
 496 {}
 497 
 498 #else
 499 
 500         .align  16
 501         ENTRY(rw_enter)
 502         cmp     %o1, RW_WRITER                  ! entering as writer?
 503         be,a,pn %icc, 2f                        ! if so, go do it ...
 504         or      THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
 505         ld      [THREAD_REG + T_KPRI_REQ], %o3  ! begin THREAD_KPRI_REQUEST()
 506         ldn     [%o0], %o4                      ! %o4 = old lock value
 507         inc     %o3                             ! bump kpri
 508         st      %o3, [THREAD_REG + T_KPRI_REQ]  ! store new kpri
 509 1:
 510         andcc   %o4, RW_WRITE_CLAIMED, %g0      ! write-locked or write-wanted?
 511         bz,pt   %xcc, 3f                        ! if so, prepare to block
 512         add     %o4, RW_READ_LOCK, %o5          ! delay: increment hold count
 513         sethi   %hi(rw_enter_sleep), %o2        ! load up jump
 514         jmp     %o2 + %lo(rw_enter_sleep)       ! jmp to rw_enter_sleep
 515         nop                                     ! delay: do nothing
 516 3:
 517         casx    [%o0], %o4, %o5                 ! try to grab read lock
 518         cmp     %o4, %o5                        ! did we get it?
 519 #ifdef sun4v


 563         st      %g1, [THREAD_REG + T_KPRI_REQ]  ! delay: store new kpri
 564 2:
 565         andcc   %o4, RW_WRITE_LOCKED, %g0       ! are we a writer?
 566         bnz,a,pt %xcc, 3f
 567         or      THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
 568         cmp     %o5, RW_READ_LOCK               ! would lock still be held?
 569         bge,pt  %xcc, 1b                        ! if so, go ahead and drop it
 570         nop
 571         ba,pt   %xcc, rw_exit_wakeup            ! otherwise, wake waiters
 572         nop
 573 3:
 574         casx    [%o0], %o4, %o1                 ! try to drop write lock
 575         cmp     %o4, %o1                        ! did we succeed?
 576         bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 577         nop
 578 .rw_write_exit_lockstat_patch_point:
 579         retl
 580         nop
 581         SET_SIZE(rw_exit)
 582 
 583 #endif
 584 
 585 #if defined(lint)
 586 
 587 void
 588 lockstat_hot_patch(void)
 589 {}
 590 
 591 #else
 592 
 593 #define RETL                    0x81c3e008
 594 #define NOP                     0x01000000
 595 #define BA                      0x10800000
 596 
 597 #define DISP22                  ((1 << 22) - 1)
 598 #define ANNUL                   0x20000000
 599 
 600 #define HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)          \
 601         ba      1f;                                                     \
 602         rd      %pc, %o0;                                               \
 603         save    %sp, -SA(MINFRAME), %sp;                                \
 604         set     lockstat_probemap, %l1;                                 \
 605         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 606         brz,pn  %o0, 0f;                                                \
 607         ldub    [THREAD_REG + T_LOCKSTAT], %l0;                         \
 608         add     %l0, 1, %l2;                                            \
 609         stub    %l2, [THREAD_REG + T_LOCKSTAT];                         \
 610         set     lockstat_probe, %g1;                                    \
 611         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 612         brz,a,pn %o0, 0f;                                               \


 660         HOT_PATCH(.rw_read_enter_lockstat_patch_point,
 661                 LS_RW_ENTER_ACQUIRE, RETL)
 662         HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
 663                 LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
 664         HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
 665                 LS_RW_EXIT_RELEASE, RETL, RW_READER)
 666         HOT_PATCH(.lock_set_lockstat_patch_point,
 667                 LS_LOCK_SET_ACQUIRE, RETL)
 668         HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
 669                 LS_LOCK_TRY_ACQUIRE, RETL)
 670         HOT_PATCH(.lock_clear_lockstat_patch_point,
 671                 LS_LOCK_CLEAR_RELEASE, RETL)
 672         HOT_PATCH(.lock_set_spl_lockstat_patch_point,
 673                 LS_LOCK_SET_SPL_ACQUIRE, RETL)
 674         HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
 675                 LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
 676         ret
 677         restore
 678         SET_SIZE(lockstat_hot_patch)
 679 
 680 #endif  /* lint */
 681 
 682 /*
 683  * asm_mutex_spin_enter(mutex_t *)
 684  *
 685  * For use by assembly interrupt handler only.
 686  * Does not change spl, since the interrupt handler is assumed to be
 687  * running at high level already.
 688  * Traps may be off, so cannot panic.
 689  * Does not keep statistics on the lock.
 690  *
 691  * Entry:       %l6 - points to mutex
 692  *              %l7 - address of call (returns to %l7+8)
 693  * Uses:        %l6, %l5
 694  */
 695 #ifndef lint
 696         .align 16
 697         ENTRY_NP(asm_mutex_spin_enter)
 698         ldstub  [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
 699 1:
 700         tst     %l5
 701         bnz     3f                      ! lock already held - go spin
 702         nop
 703 2:      
 704         jmp     %l7 + 8                 ! return
 705         membar  #LoadLoad
 706         !
 707         ! Spin on lock without using an atomic operation to prevent the caches
 708         ! from unnecessarily moving ownership of the line around.
 709         !
 710 3:
 711         ldub    [%l6 + M_SPINLOCK], %l5
 712 4:
 713         tst     %l5
 714         bz,a    1b                      ! lock appears to be free, try again
 715         ldstub  [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
 716 
 717         sethi   %hi(panicstr) , %l5
 718         ldn     [%l5 + %lo(panicstr)], %l5
 719         tst     %l5
 720         bnz     2b                      ! after panic, feign success
 721         nop
 722         b       4b
 723         ldub    [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
 724         SET_SIZE(asm_mutex_spin_enter)
 725 #endif /* lint */
 726 
 727 /*
 728  * asm_mutex_spin_exit(mutex_t *)
 729  *
 730  * For use by assembly interrupt handler only.
 731  * Does not change spl, since the interrupt handler is assumed to be
 732  * running at high level already.
 733  *
 734  * Entry:       %l6 - points to mutex
 735  *              %l7 - address of call (returns to %l7+8)
 736  * Uses:        none
 737  */
 738 #ifndef lint
 739         ENTRY_NP(asm_mutex_spin_exit)
 740         membar  #LoadStore|#StoreStore
 741         jmp     %l7 + 8                 ! return
 742         clrb    [%l6 + M_SPINLOCK]      ! delay - clear lock
 743         SET_SIZE(asm_mutex_spin_exit)
 744 #endif /* lint */
 745 
 746 /*
 747  * thread_onproc()
 748  * Set thread in onproc state for the specified CPU.
 749  * Also set the thread lock pointer to the CPU's onproc lock.
 750  * Since the new lock isn't held, the store ordering is important.
 751  * If not done in assembler, the compiler could reorder the stores.
 752  */
 753 #if defined(lint)
 754 
 755 void
 756 thread_onproc(kthread_id_t t, cpu_t *cp)
 757 {
 758         t->t_state = TS_ONPROC;
 759         t->t_lockp = &cp->cpu_thread_lock;
 760 }
 761 
 762 #else   /* lint */
 763 
 764         ENTRY(thread_onproc)
 765         set     TS_ONPROC, %o2          ! TS_ONPROC state
 766         st      %o2, [%o0 + T_STATE]    ! store state
 767         add     %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
 768         retl                            ! return
 769         stn     %o3, [%o0 + T_LOCKP]    ! delay - store new lock pointer
 770         SET_SIZE(thread_onproc)
 771 
 772 #endif  /* lint */
 773 
 774 /* delay function used in some mutex code - just do 3 nop cas ops */
 775 #if defined(lint)
 776 
 777 /* ARGSUSED */
 778 void
 779 cas_delay(void *addr)
 780 {}
 781 #else   /* lint */
 782         ENTRY(cas_delay)
 783         casx [%o0], %g0, %g0
 784         casx [%o0], %g0, %g0
 785         retl
 786         casx [%o0], %g0, %g0
 787         SET_SIZE(cas_delay)
 788 #endif  /* lint */
 789 
 790 #if defined(lint)
 791 
 792 /*
 793  * alternative delay function for some niagara processors.   The rd
 794  * instruction uses less resources than casx on those cpus.
 795  */
 796 /* ARGSUSED */
 797 void
 798 rdccr_delay(void)
 799 {}
 800 #else   /* lint */
 801         ENTRY(rdccr_delay)
 802         rd      %ccr, %g0
 803         rd      %ccr, %g0
 804         retl
 805         rd      %ccr, %g0
 806         SET_SIZE(rdccr_delay)
 807 #endif  /* lint */
 808 
 809 /*
 810  * mutex_delay_default(void)
 811  * Spins for approx a few hundred processor cycles and returns to caller.
 812  */
 813 #if defined(lint)
 814 
 815 void
 816 mutex_delay_default(void)
 817 {}
 818 
 819 #else   /* lint */
 820 
 821         ENTRY(mutex_delay_default)
 822         mov     72,%o0
 823 1:      brgz    %o0, 1b
 824         dec     %o0
 825         retl
 826         nop
 827         SET_SIZE(mutex_delay_default)
 828 
 829 #endif  /* lint */


   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 







  26 #include "assym.h"

  27 
  28 #include <sys/t_lock.h>
  29 #include <sys/mutex.h>
  30 #include <sys/mutex_impl.h>
  31 #include <sys/rwlock_impl.h>
  32 #include <sys/asm_linkage.h>
  33 #include <sys/machlock.h>
  34 #include <sys/machthread.h>
  35 #include <sys/lockstat.h>
  36 
  37 /* #define DEBUG */
  38 
  39 #ifdef DEBUG
  40 #include <sys/machparam.h>
  41 #endif /* DEBUG */
  42 
  43 /************************************************************************
  44  *              ATOMIC OPERATIONS
  45  */
  46 
  47 /*
  48  * uint8_t      ldstub(uint8_t *cp)
  49  *
  50  * Store 0xFF at the specified location, and return its previous content.
  51  */
  52 











  53         ENTRY(ldstub)
  54         retl
  55         ldstub  [%o0], %o0
  56         SET_SIZE(ldstub)
  57 


  58 /************************************************************************
  59  *              MEMORY BARRIERS -- see atomic.h for full descriptions.
  60  */
  61 




















  62 #ifdef SF_ERRATA_51
  63         .align 32
  64         ENTRY(membar_return)
  65         retl
  66         nop
  67         SET_SIZE(membar_return)
  68 #define MEMBAR_RETURN   ba,pt %icc, membar_return
  69 #else
  70 #define MEMBAR_RETURN   retl
  71 #endif
  72 
  73         ENTRY(membar_enter)
  74         MEMBAR_RETURN
  75         membar  #StoreLoad|#StoreStore
  76         SET_SIZE(membar_enter)
  77 
  78         ENTRY(membar_exit)
  79         MEMBAR_RETURN
  80         membar  #LoadStore|#StoreStore
  81         SET_SIZE(membar_exit)
  82 
  83         ENTRY(membar_producer)
  84         MEMBAR_RETURN
  85         membar  #StoreStore
  86         SET_SIZE(membar_producer)
  87 
  88         ENTRY(membar_consumer)
  89         MEMBAR_RETURN
  90         membar  #LoadLoad
  91         SET_SIZE(membar_consumer)
  92 


  93 /************************************************************************
  94  *              MINIMUM LOCKS
  95  */
  96 


























































  97         .align  32
  98         ENTRY(lock_try)
  99         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 100         brnz,pn %o1, 1f
 101         membar  #LoadLoad
 102 .lock_try_lockstat_patch_point:
 103         retl
 104         or      %o0, 1, %o0             ! ensure lo32 != 0
 105 1:
 106         retl
 107         clr     %o0
 108         SET_SIZE(lock_try)
 109 
 110         .align  32
 111         ENTRY(lock_spin_try)
 112         ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 113         brnz,pn %o1, 1f
 114         membar  #LoadLoad
 115         retl
 116         or      %o0, 1, %o0             ! ensure lo32 != 0


 137         membar  #LoadStore|#StoreStore
 138 .lock_clear_lockstat_patch_point:
 139         retl
 140         clrb    [%o0]
 141         SET_SIZE(lock_clear)
 142 
 143         .align  32
 144         ENTRY(ulock_try)
 145         ldstuba [%o0]ASI_USER, %o1      ! try to set lock, get value in %o1
 146         xor     %o1, 0xff, %o0          ! delay - return non-zero if success
 147         retl
 148           membar        #LoadLoad
 149         SET_SIZE(ulock_try)
 150 
 151         ENTRY(ulock_clear)
 152         membar  #LoadStore|#StoreStore
 153         retl
 154           stba  %g0, [%o0]ASI_USER      ! clear lock
 155         SET_SIZE(ulock_clear)
 156 

 157 

 158 /*
 159  * lock_set_spl(lp, new_pil, *old_pil_addr)
 160  *      Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
 161  */
 162 





















 163         ENTRY(lock_set_spl)
 164         rdpr    %pil, %o3                       ! %o3 = current pil
 165         cmp     %o3, %o1                        ! is current pil high enough?
 166         bl,a,pt %icc, 1f                        ! if not, write %pil in delay
 167         wrpr    %g0, %o1, %pil
 168 1:
 169         ldstub  [%o0], %o4                      ! try the lock
 170         brnz,pn %o4, 2f                         ! go to C for the miss case
 171         membar  #LoadLoad
 172 .lock_set_spl_lockstat_patch_point:
 173         retl
 174         sth     %o3, [%o2]                      ! delay - save original pil
 175 2:
 176         sethi   %hi(lock_set_spl_spin), %o5     ! load up jmp to C
 177         jmp     %o5 + %lo(lock_set_spl_spin)    ! jmp to lock_set_spl_spin
 178         nop                                     ! delay: do nothing
 179         SET_SIZE(lock_set_spl)
 180 


 181 /*
 182  * lock_clear_splx(lp, s)
 183  */
 184 













 185         ENTRY(lock_clear_splx)
 186         ldn     [THREAD_REG + T_CPU], %o2       ! get CPU pointer
 187         membar  #LoadStore|#StoreStore
 188         ld      [%o2 + CPU_BASE_SPL], %o2
 189         clrb    [%o0]                           ! clear lock
 190         cmp     %o2, %o1                        ! compare new to base
 191         movl    %xcc, %o1, %o2                  ! use new pri if base is less
 192 .lock_clear_splx_lockstat_patch_point:
 193         retl
 194         wrpr    %g0, %o2, %pil
 195         SET_SIZE(lock_clear_splx)
 196 


 197 /*
 198  * mutex_enter() and mutex_exit().
 199  * 
 200  * These routines handle the simple cases of mutex_enter() (adaptive
 201  * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
 202  * If anything complicated is going on we punt to mutex_vector_enter().
 203  *
 204  * mutex_tryenter() is similar to mutex_enter() but returns zero if
 205  * the lock cannot be acquired, nonzero on success.
 206  *
 207  * If mutex_exit() gets preempted in the window between checking waiters
 208  * and clearing the lock, we can miss wakeups.  Disabling preemption
 209  * in the mutex code is prohibitively expensive, so instead we detect
 210  * mutex preemption by examining the trapped PC in the interrupt path.
 211  * If we interrupt a thread in mutex_exit() that has not yet cleared
 212  * the lock, pil_interrupt() resets its PC back to the beginning of
 213  * mutex_exit() so it will check again for waiters when it resumes.
 214  *
 215  * The lockstat code below is activated when the lockstat driver
 216  * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
 217  * Note that we don't need to test lockstat_event_mask here -- we won't
 218  * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
 219  */
 220 























 221         .align  32
 222         ENTRY(mutex_enter)
 223         mov     THREAD_REG, %o1
 224         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 225         brnz,pn %o1, 1f                         ! locked or wrong type
 226         membar  #LoadLoad
 227 .mutex_enter_lockstat_patch_point:
 228         retl
 229         nop
 230 1:
 231         sethi   %hi(mutex_vector_enter), %o2    ! load up for jump to C
 232         jmp     %o2 + %lo(mutex_vector_enter)
 233         nop
 234         SET_SIZE(mutex_enter)
 235 
 236         ENTRY(mutex_tryenter)
 237         mov     THREAD_REG, %o1
 238         casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 239         brnz,pn %o1, 1f                         ! locked or wrong type continue
 240         membar  #LoadLoad


 292         ENTRY(mutex_owner_running)
 293 mutex_owner_running_critical_start:     ! If interrupted restart here
 294         ldn     [%o0], %o1              ! get the owner field
 295         and     %o1, MUTEX_THREAD, %o1  ! remove the waiters bit if any
 296         brz,pn  %o1, 1f                 ! if so, drive on ...
 297         nop
 298         ldn     [%o1+T_CPU], %o2        ! get owner->t_cpu
 299         ldn     [%o2+CPU_THREAD], %o3   ! get owner->t_cpu->cpu_thread
 300 .mutex_owner_running_critical_end:      ! for pil_interrupt() hook
 301         cmp     %o1, %o3                ! owner == running thread?
 302         be,a,pt %xcc, 2f                ! yes, go return cpu
 303         nop
 304 1:
 305         retl
 306         mov     %g0, %o0                ! return 0 (owner not running)
 307 2:
 308         retl
 309         mov     %o2, %o0                ! owner running, return cpu
 310         SET_SIZE(mutex_owner_running)
 311 


 312 /*
 313  * rw_enter() and rw_exit().
 314  * 
 315  * These routines handle the simple cases of rw_enter (write-locking an unheld
 316  * lock or read-locking a lock that's neither write-locked nor write-wanted)
 317  * and rw_exit (no waiters or not the last reader).  If anything complicated
 318  * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
 319  */

 320 












 321         .align  16
 322         ENTRY(rw_enter)
 323         cmp     %o1, RW_WRITER                  ! entering as writer?
 324         be,a,pn %icc, 2f                        ! if so, go do it ...
 325         or      THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
 326         ld      [THREAD_REG + T_KPRI_REQ], %o3  ! begin THREAD_KPRI_REQUEST()
 327         ldn     [%o0], %o4                      ! %o4 = old lock value
 328         inc     %o3                             ! bump kpri
 329         st      %o3, [THREAD_REG + T_KPRI_REQ]  ! store new kpri
 330 1:
 331         andcc   %o4, RW_WRITE_CLAIMED, %g0      ! write-locked or write-wanted?
 332         bz,pt   %xcc, 3f                        ! if so, prepare to block
 333         add     %o4, RW_READ_LOCK, %o5          ! delay: increment hold count
 334         sethi   %hi(rw_enter_sleep), %o2        ! load up jump
 335         jmp     %o2 + %lo(rw_enter_sleep)       ! jmp to rw_enter_sleep
 336         nop                                     ! delay: do nothing
 337 3:
 338         casx    [%o0], %o4, %o5                 ! try to grab read lock
 339         cmp     %o4, %o5                        ! did we get it?
 340 #ifdef sun4v


 384         st      %g1, [THREAD_REG + T_KPRI_REQ]  ! delay: store new kpri
 385 2:
 386         andcc   %o4, RW_WRITE_LOCKED, %g0       ! are we a writer?
 387         bnz,a,pt %xcc, 3f
 388         or      THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
 389         cmp     %o5, RW_READ_LOCK               ! would lock still be held?
 390         bge,pt  %xcc, 1b                        ! if so, go ahead and drop it
 391         nop
 392         ba,pt   %xcc, rw_exit_wakeup            ! otherwise, wake waiters
 393         nop
 394 3:
 395         casx    [%o0], %o4, %o1                 ! try to drop write lock
 396         cmp     %o4, %o1                        ! did we succeed?
 397         bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 398         nop
 399 .rw_write_exit_lockstat_patch_point:
 400         retl
 401         nop
 402         SET_SIZE(rw_exit)
 403 










 404 #define RETL                    0x81c3e008
 405 #define NOP                     0x01000000
 406 #define BA                      0x10800000
 407 
 408 #define DISP22                  ((1 << 22) - 1)
 409 #define ANNUL                   0x20000000
 410 
 411 #define HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)          \
 412         ba      1f;                                                     \
 413         rd      %pc, %o0;                                               \
 414         save    %sp, -SA(MINFRAME), %sp;                                \
 415         set     lockstat_probemap, %l1;                                 \
 416         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 417         brz,pn  %o0, 0f;                                                \
 418         ldub    [THREAD_REG + T_LOCKSTAT], %l0;                         \
 419         add     %l0, 1, %l2;                                            \
 420         stub    %l2, [THREAD_REG + T_LOCKSTAT];                         \
 421         set     lockstat_probe, %g1;                                    \
 422         ld      [%l1 + (event * DTRACE_IDSIZE)], %o0;                   \
 423         brz,a,pn %o0, 0f;                                               \


 471         HOT_PATCH(.rw_read_enter_lockstat_patch_point,
 472                 LS_RW_ENTER_ACQUIRE, RETL)
 473         HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
 474                 LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
 475         HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
 476                 LS_RW_EXIT_RELEASE, RETL, RW_READER)
 477         HOT_PATCH(.lock_set_lockstat_patch_point,
 478                 LS_LOCK_SET_ACQUIRE, RETL)
 479         HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
 480                 LS_LOCK_TRY_ACQUIRE, RETL)
 481         HOT_PATCH(.lock_clear_lockstat_patch_point,
 482                 LS_LOCK_CLEAR_RELEASE, RETL)
 483         HOT_PATCH(.lock_set_spl_lockstat_patch_point,
 484                 LS_LOCK_SET_SPL_ACQUIRE, RETL)
 485         HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
 486                 LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
 487         ret
 488         restore
 489         SET_SIZE(lockstat_hot_patch)
 490 


 491 /*
 492  * asm_mutex_spin_enter(mutex_t *)
 493  *
 494  * For use by assembly interrupt handler only.
 495  * Does not change spl, since the interrupt handler is assumed to be
 496  * running at high level already.
 497  * Traps may be off, so cannot panic.
 498  * Does not keep statistics on the lock.
 499  *
 500  * Entry:       %l6 - points to mutex
 501  *              %l7 - address of call (returns to %l7+8)
 502  * Uses:        %l6, %l5
 503  */

 504         .align 16
 505         ENTRY_NP(asm_mutex_spin_enter)
 506         ldstub  [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
 507 1:
 508         tst     %l5
 509         bnz     3f                      ! lock already held - go spin
 510         nop
 511 2:      
 512         jmp     %l7 + 8                 ! return
 513         membar  #LoadLoad
 514         !
 515         ! Spin on lock without using an atomic operation to prevent the caches
 516         ! from unnecessarily moving ownership of the line around.
 517         !
 518 3:
 519         ldub    [%l6 + M_SPINLOCK], %l5
 520 4:
 521         tst     %l5
 522         bz,a    1b                      ! lock appears to be free, try again
 523         ldstub  [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
 524 
 525         sethi   %hi(panicstr) , %l5
 526         ldn     [%l5 + %lo(panicstr)], %l5
 527         tst     %l5
 528         bnz     2b                      ! after panic, feign success
 529         nop
 530         b       4b
 531         ldub    [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
 532         SET_SIZE(asm_mutex_spin_enter)

 533 
 534 /*
 535  * asm_mutex_spin_exit(mutex_t *)
 536  *
 537  * For use by assembly interrupt handler only.
 538  * Does not change spl, since the interrupt handler is assumed to be
 539  * running at high level already.
 540  *
 541  * Entry:       %l6 - points to mutex
 542  *              %l7 - address of call (returns to %l7+8)
 543  * Uses:        none
 544  */

 545         ENTRY_NP(asm_mutex_spin_exit)
 546         membar  #LoadStore|#StoreStore
 547         jmp     %l7 + 8                 ! return
 548         clrb    [%l6 + M_SPINLOCK]      ! delay - clear lock
 549         SET_SIZE(asm_mutex_spin_exit)

 550 
 551 /*
 552  * thread_onproc()
 553  * Set thread in onproc state for the specified CPU.
 554  * Also set the thread lock pointer to the CPU's onproc lock.
 555  * Since the new lock isn't held, the store ordering is important.
 556  * If not done in assembler, the compiler could reorder the stores.
 557  */

 558 









 559         ENTRY(thread_onproc)
 560         set     TS_ONPROC, %o2          ! TS_ONPROC state
 561         st      %o2, [%o0 + T_STATE]    ! store state
 562         add     %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
 563         retl                            ! return
 564         stn     %o3, [%o0 + T_LOCKP]    ! delay - store new lock pointer
 565         SET_SIZE(thread_onproc)
 566 


 567 /* delay function used in some mutex code - just do 3 nop cas ops */







 568         ENTRY(cas_delay)
 569         casx [%o0], %g0, %g0
 570         casx [%o0], %g0, %g0
 571         retl
 572         casx [%o0], %g0, %g0
 573         SET_SIZE(cas_delay)

 574 











 575         ENTRY(rdccr_delay)
 576         rd      %ccr, %g0
 577         rd      %ccr, %g0
 578         retl
 579         rd      %ccr, %g0
 580         SET_SIZE(rdccr_delay)

 581 
 582 /*
 583  * mutex_delay_default(void)
 584  * Spins for approx a few hundred processor cycles and returns to caller.
 585  */

 586 






 587         ENTRY(mutex_delay_default)
 588         mov     72,%o0
 589 1:      brgz    %o0, 1b
 590         dec     %o0
 591         retl
 592         nop
 593         SET_SIZE(mutex_delay_default)
 594