Print this page
OS-7753 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sparc/v9/ml/lock_prim.s
          +++ new/usr/src/uts/sparc/v9/ml/lock_prim.s
↓ open down ↓ 13 lines elided ↑ open up ↑
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
       24 + * Copyright 2019 Joyent, Inc.
  24   25   */
  25   26  
  26      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  27      -
  28   27  #if defined(lint)
  29   28  #include <sys/types.h>
  30   29  #include <sys/thread.h>
  31   30  #include <sys/cpuvar.h>
  32   31  #else   /* lint */
  33   32  #include "assym.h"
  34   33  #endif  /* lint */
  35   34  
  36   35  #include <sys/t_lock.h>
  37   36  #include <sys/mutex.h>
↓ open down ↓ 215 lines elided ↑ open up ↑
 253  252          membar  #LoadStore|#StoreStore
 254  253          retl
 255  254            stba  %g0, [%o0]ASI_USER      ! clear lock
 256  255          SET_SIZE(ulock_clear)
 257  256  
 258  257  #endif  /* lint */
 259  258  
 260  259  
 261  260  /*
 262  261   * lock_set_spl(lp, new_pil, *old_pil_addr)
 263      - *      Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
      262 + *      Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
 264  263   */
 265  264  
 266  265  #if defined(lint)
 267  266  
 268  267  /* ARGSUSED */
 269  268  void
 270  269  lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
 271  270  {
 272  271          extern int splr(int);
 273  272          extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
↓ open down ↓ 56 lines elided ↑ open up ↑
 330  329          movl    %xcc, %o1, %o2                  ! use new pri if base is less
 331  330  .lock_clear_splx_lockstat_patch_point:
 332  331          retl
 333  332          wrpr    %g0, %o2, %pil
 334  333          SET_SIZE(lock_clear_splx)
 335  334  
 336  335  #endif  /* lint */
 337  336  
 338  337  /*
 339  338   * mutex_enter() and mutex_exit().
 340      - * 
      339 + *
 341  340   * These routines handle the simple cases of mutex_enter() (adaptive
 342  341   * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
 343  342   * If anything complicated is going on we punt to mutex_vector_enter().
 344  343   *
 345  344   * mutex_tryenter() is similar to mutex_enter() but returns zero if
 346  345   * the lock cannot be acquired, nonzero on success.
 347  346   *
 348  347   * If mutex_exit() gets preempted in the window between checking waiters
 349  348   * and clearing the lock, we can miss wakeups.  Disabling preemption
 350  349   * in the mutex code is prohibitively expensive, so instead we detect
↓ open down ↓ 119 lines elided ↑ open up ↑
 470  469          mov     %g0, %o0                ! return 0 (owner not running)
 471  470  2:
 472  471          retl
 473  472          mov     %o2, %o0                ! owner running, return cpu
 474  473          SET_SIZE(mutex_owner_running)
 475  474  
 476  475  #endif  /* lint */
 477  476  
 478  477  /*
 479  478   * rw_enter() and rw_exit().
 480      - * 
      479 + *
 481  480   * These routines handle the simple cases of rw_enter (write-locking an unheld
 482  481   * lock or read-locking a lock that's neither write-locked nor write-wanted)
 483  482   * and rw_exit (no waiters or not the last reader).  If anything complicated
 484  483   * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
 485  484   */
 486  485  #if defined(lint)
 487  486  
 488  487  /* ARGSUSED */
 489  488  void
 490  489  rw_enter(krwlock_t *lp, krw_t rw)
↓ open down ↓ 4 lines elided ↑ open up ↑
 495  494  rw_exit(krwlock_t *lp)
 496  495  {}
 497  496  
 498  497  #else
 499  498  
 500  499          .align  16
 501  500          ENTRY(rw_enter)
 502  501          cmp     %o1, RW_WRITER                  ! entering as writer?
 503  502          be,a,pn %icc, 2f                        ! if so, go do it ...
 504  503          or      THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
 505      -        ld      [THREAD_REG + T_KPRI_REQ], %o3  ! begin THREAD_KPRI_REQUEST()
 506  504          ldn     [%o0], %o4                      ! %o4 = old lock value
 507      -        inc     %o3                             ! bump kpri
 508      -        st      %o3, [THREAD_REG + T_KPRI_REQ]  ! store new kpri
 509  505  1:
 510  506          andcc   %o4, RW_WRITE_CLAIMED, %g0      ! write-locked or write-wanted?
 511      -        bz,pt   %xcc, 3f                        ! if so, prepare to block
      507 +        bz,pt   %xcc, 3f                        ! if so, prepare to block
 512  508          add     %o4, RW_READ_LOCK, %o5          ! delay: increment hold count
 513  509          sethi   %hi(rw_enter_sleep), %o2        ! load up jump
 514  510          jmp     %o2 + %lo(rw_enter_sleep)       ! jmp to rw_enter_sleep
 515  511          nop                                     ! delay: do nothing
 516  512  3:
 517  513          casx    [%o0], %o4, %o5                 ! try to grab read lock
 518  514          cmp     %o4, %o5                        ! did we get it?
 519  515  #ifdef sun4v
 520  516          be,a,pt %xcc, 0f
 521  517          membar  #LoadLoad
↓ open down ↓ 23 lines elided ↑ open up ↑
 545  541          SET_SIZE(rw_enter)
 546  542  
 547  543          .align  16
 548  544          ENTRY(rw_exit)
 549  545          ldn     [%o0], %o4                      ! %o4 = old lock value
 550  546          membar  #LoadStore|#StoreStore          ! membar_exit()
 551  547          subcc   %o4, RW_READ_LOCK, %o5          ! %o5 = new lock value if reader
 552  548          bnz,pn  %xcc, 2f                        ! single reader, no waiters?
 553  549          clr     %o1
 554  550  1:
 555      -        ld      [THREAD_REG + T_KPRI_REQ], %g1  ! begin THREAD_KPRI_RELEASE()
 556  551          srl     %o4, RW_HOLD_COUNT_SHIFT, %o3   ! %o3 = hold count (lockstat)
 557  552          casx    [%o0], %o4, %o5                 ! try to drop lock
 558  553          cmp     %o4, %o5                        ! did we succeed?
 559  554          bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 560      -        dec     %g1                             ! delay: drop kpri
      555 +        nop                                     ! delay: do nothing
 561  556  .rw_read_exit_lockstat_patch_point:
 562  557          retl
 563      -        st      %g1, [THREAD_REG + T_KPRI_REQ]  ! delay: store new kpri
      558 +        nop                                     ! delay: do nothing
 564  559  2:
 565  560          andcc   %o4, RW_WRITE_LOCKED, %g0       ! are we a writer?
 566  561          bnz,a,pt %xcc, 3f
 567  562          or      THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
 568  563          cmp     %o5, RW_READ_LOCK               ! would lock still be held?
 569  564          bge,pt  %xcc, 1b                        ! if so, go ahead and drop it
 570  565          nop
 571  566          ba,pt   %xcc, rw_exit_wakeup            ! otherwise, wake waiters
 572  567          nop
 573  568  3:
↓ open down ↓ 108 lines elided ↑ open up ↑
 682  677  /*
 683  678   * asm_mutex_spin_enter(mutex_t *)
 684  679   *
 685  680   * For use by assembly interrupt handler only.
 686  681   * Does not change spl, since the interrupt handler is assumed to be
 687  682   * running at high level already.
 688  683   * Traps may be off, so cannot panic.
 689  684   * Does not keep statistics on the lock.
 690  685   *
 691  686   * Entry:       %l6 - points to mutex
 692      - *              %l7 - address of call (returns to %l7+8)
      687 + *              %l7 - address of call (returns to %l7+8)
 693  688   * Uses:        %l6, %l5
 694  689   */
 695  690  #ifndef lint
 696  691          .align 16
 697  692          ENTRY_NP(asm_mutex_spin_enter)
 698  693          ldstub  [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
 699  694  1:
 700  695          tst     %l5
 701  696          bnz     3f                      ! lock already held - go spin
 702  697          nop
 703      -2:      
      698 +2:
 704  699          jmp     %l7 + 8                 ! return
 705  700          membar  #LoadLoad
 706  701          !
 707  702          ! Spin on lock without using an atomic operation to prevent the caches
 708  703          ! from unnecessarily moving ownership of the line around.
 709  704          !
 710  705  3:
 711  706          ldub    [%l6 + M_SPINLOCK], %l5
 712  707  4:
 713  708          tst     %l5
 714  709          bz,a    1b                      ! lock appears to be free, try again
 715  710          ldstub  [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
 716  711  
 717  712          sethi   %hi(panicstr) , %l5
 718  713          ldn     [%l5 + %lo(panicstr)], %l5
 719      -        tst     %l5
      714 +        tst     %l5
 720  715          bnz     2b                      ! after panic, feign success
 721  716          nop
 722  717          b       4b
 723  718          ldub    [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
 724  719          SET_SIZE(asm_mutex_spin_enter)
 725  720  #endif /* lint */
 726  721  
 727  722  /*
 728  723   * asm_mutex_spin_exit(mutex_t *)
 729  724   *
 730  725   * For use by assembly interrupt handler only.
 731  726   * Does not change spl, since the interrupt handler is assumed to be
 732  727   * running at high level already.
 733  728   *
 734  729   * Entry:       %l6 - points to mutex
 735      - *              %l7 - address of call (returns to %l7+8)
      730 + *              %l7 - address of call (returns to %l7+8)
 736  731   * Uses:        none
 737  732   */
 738  733  #ifndef lint
 739  734          ENTRY_NP(asm_mutex_spin_exit)
 740  735          membar  #LoadStore|#StoreStore
 741  736          jmp     %l7 + 8                 ! return
 742  737          clrb    [%l6 + M_SPINLOCK]      ! delay - clear lock
 743  738          SET_SIZE(asm_mutex_spin_exit)
 744  739  #endif /* lint */
 745  740  
↓ open down ↓ 84 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX