Print this page
de-linting of .s files

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sparc/v9/ml/lock_prim.s
          +++ new/usr/src/uts/sparc/v9/ml/lock_prim.s
↓ open down ↓ 15 lines elided ↑ open up ↑
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  27      -
  28      -#if defined(lint)
  29      -#include <sys/types.h>
  30      -#include <sys/thread.h>
  31      -#include <sys/cpuvar.h>
  32      -#else   /* lint */
  33   26  #include "assym.h"
  34      -#endif  /* lint */
  35   27  
  36   28  #include <sys/t_lock.h>
  37   29  #include <sys/mutex.h>
  38   30  #include <sys/mutex_impl.h>
  39   31  #include <sys/rwlock_impl.h>
  40   32  #include <sys/asm_linkage.h>
  41   33  #include <sys/machlock.h>
  42   34  #include <sys/machthread.h>
  43   35  #include <sys/lockstat.h>
  44   36  
↓ open down ↓ 6 lines elided ↑ open up ↑
  51   43  /************************************************************************
  52   44   *              ATOMIC OPERATIONS
  53   45   */
  54   46  
  55   47  /*
  56   48   * uint8_t      ldstub(uint8_t *cp)
  57   49   *
  58   50   * Store 0xFF at the specified location, and return its previous content.
  59   51   */
  60   52  
  61      -#if defined(lint)
  62      -uint8_t
  63      -ldstub(uint8_t *cp)
  64      -{
  65      -        uint8_t rv;
  66      -        rv = *cp;
  67      -        *cp = 0xFF;
  68      -        return rv;
  69      -}
  70      -#else   /* lint */
  71      -
  72   53          ENTRY(ldstub)
  73   54          retl
  74   55          ldstub  [%o0], %o0
  75   56          SET_SIZE(ldstub)
  76   57  
  77      -#endif  /* lint */
  78      -
  79   58  /************************************************************************
  80   59   *              MEMORY BARRIERS -- see atomic.h for full descriptions.
  81   60   */
  82   61  
  83      -#if defined(lint)
  84      -
  85      -void
  86      -membar_enter(void)
  87      -{}
  88      -
  89      -void
  90      -membar_exit(void)
  91      -{}
  92      -
  93      -void
  94      -membar_producer(void)
  95      -{}
  96      -
  97      -void
  98      -membar_consumer(void)
  99      -{}
 100      -
 101      -#else   /* lint */
 102      -
 103   62  #ifdef SF_ERRATA_51
 104   63          .align 32
 105   64          ENTRY(membar_return)
 106   65          retl
 107   66          nop
 108   67          SET_SIZE(membar_return)
 109   68  #define MEMBAR_RETURN   ba,pt %icc, membar_return
 110   69  #else
 111   70  #define MEMBAR_RETURN   retl
 112   71  #endif
↓ open down ↓ 11 lines elided ↑ open up ↑
 124   83          ENTRY(membar_producer)
 125   84          MEMBAR_RETURN
 126   85          membar  #StoreStore
 127   86          SET_SIZE(membar_producer)
 128   87  
 129   88          ENTRY(membar_consumer)
 130   89          MEMBAR_RETURN
 131   90          membar  #LoadLoad
 132   91          SET_SIZE(membar_consumer)
 133   92  
 134      -#endif  /* lint */
 135      -
 136   93  /************************************************************************
 137   94   *              MINIMUM LOCKS
 138   95   */
 139   96  
 140      -#if defined(lint)
 141      -
 142      -/*
 143      - * lock_try(lp), ulock_try(lp)
 144      - *      - returns non-zero on success.
 145      - *      - doesn't block interrupts so don't use this to spin on a lock.
 146      - *      - uses "0xFF is busy, anything else is free" model.
 147      - *
 148      - *      ulock_try() is for a lock in the user address space.
 149      - *      For all V7/V8 sparc systems they are same since the kernel and
 150      - *      user are mapped in a user' context.
 151      - *      For V9 platforms the lock_try and ulock_try are different impl.
 152      - */
 153      -
 154      -int
 155      -lock_try(lock_t *lp)
 156      -{
 157      -        return (0xFF ^ ldstub(lp));
 158      -}
 159      -
 160      -int
 161      -lock_spin_try(lock_t *lp)
 162      -{
 163      -        return (0xFF ^ ldstub(lp));
 164      -}
 165      -
 166      -void
 167      -lock_set(lock_t *lp)
 168      -{
 169      -        extern void lock_set_spin(lock_t *);
 170      -
 171      -        if (!lock_try(lp))
 172      -                lock_set_spin(lp);
 173      -        membar_enter();
 174      -}
 175      -
 176      -void
 177      -lock_clear(lock_t *lp)
 178      -{
 179      -        membar_exit();
 180      -        *lp = 0;
 181      -}
 182      -
 183      -int
 184      -ulock_try(lock_t *lp)
 185      -{
 186      -        return (0xFF ^ ldstub(lp));
 187      -}
 188      -
 189      -void
 190      -ulock_clear(lock_t *lp)
 191      -{
 192      -        membar_exit();
 193      -        *lp = 0;
 194      -}
 195      -
 196      -#else   /* lint */
 197      -
 198   97          .align  32
 199   98          ENTRY(lock_try)
 200   99          ldstub  [%o0], %o1              ! try to set lock, get value in %o1
 201  100          brnz,pn %o1, 1f
 202  101          membar  #LoadLoad
 203  102  .lock_try_lockstat_patch_point:
 204  103          retl
 205  104          or      %o0, 1, %o0             ! ensure lo32 != 0
 206  105  1:
 207  106          retl
↓ open down ↓ 40 lines elided ↑ open up ↑
 248  147          retl
 249  148            membar        #LoadLoad
 250  149          SET_SIZE(ulock_try)
 251  150  
 252  151          ENTRY(ulock_clear)
 253  152          membar  #LoadStore|#StoreStore
 254  153          retl
 255  154            stba  %g0, [%o0]ASI_USER      ! clear lock
 256  155          SET_SIZE(ulock_clear)
 257  156  
 258      -#endif  /* lint */
 259  157  
 260      -
 261  158  /*
 262  159   * lock_set_spl(lp, new_pil, *old_pil_addr)
 263  160   *      Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
 264  161   */
 265  162  
 266      -#if defined(lint)
 267      -
 268      -/* ARGSUSED */
 269      -void
 270      -lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
 271      -{
 272      -        extern int splr(int);
 273      -        extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
 274      -        int old_pil;
 275      -
 276      -        old_pil = splr(new_pil);
 277      -        if (!lock_try(lp)) {
 278      -                lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
 279      -        } else {
 280      -                *old_pil_addr = (u_short)old_pil;
 281      -                membar_enter();
 282      -        }
 283      -}
 284      -
 285      -#else   /* lint */
 286      -
 287  163          ENTRY(lock_set_spl)
 288  164          rdpr    %pil, %o3                       ! %o3 = current pil
 289  165          cmp     %o3, %o1                        ! is current pil high enough?
 290  166          bl,a,pt %icc, 1f                        ! if not, write %pil in delay
 291  167          wrpr    %g0, %o1, %pil
 292  168  1:
 293  169          ldstub  [%o0], %o4                      ! try the lock
 294  170          brnz,pn %o4, 2f                         ! go to C for the miss case
 295  171          membar  #LoadLoad
 296  172  .lock_set_spl_lockstat_patch_point:
 297  173          retl
 298  174          sth     %o3, [%o2]                      ! delay - save original pil
 299  175  2:
 300  176          sethi   %hi(lock_set_spl_spin), %o5     ! load up jmp to C
 301  177          jmp     %o5 + %lo(lock_set_spl_spin)    ! jmp to lock_set_spl_spin
 302  178          nop                                     ! delay: do nothing
 303  179          SET_SIZE(lock_set_spl)
 304  180  
 305      -#endif  /* lint */
 306      -
 307  181  /*
 308  182   * lock_clear_splx(lp, s)
 309  183   */
 310  184  
 311      -#if defined(lint)
 312      -
 313      -void
 314      -lock_clear_splx(lock_t *lp, int s)
 315      -{
 316      -        extern void splx(int);
 317      -
 318      -        lock_clear(lp);
 319      -        splx(s);
 320      -}
 321      -
 322      -#else   /* lint */
 323      -
 324  185          ENTRY(lock_clear_splx)
 325  186          ldn     [THREAD_REG + T_CPU], %o2       ! get CPU pointer
 326  187          membar  #LoadStore|#StoreStore
 327  188          ld      [%o2 + CPU_BASE_SPL], %o2
 328  189          clrb    [%o0]                           ! clear lock
 329  190          cmp     %o2, %o1                        ! compare new to base
 330  191          movl    %xcc, %o1, %o2                  ! use new pri if base is less
 331  192  .lock_clear_splx_lockstat_patch_point:
 332  193          retl
 333  194          wrpr    %g0, %o2, %pil
 334  195          SET_SIZE(lock_clear_splx)
 335  196  
 336      -#endif  /* lint */
 337      -
 338  197  /*
 339  198   * mutex_enter() and mutex_exit().
 340  199   * 
 341  200   * These routines handle the simple cases of mutex_enter() (adaptive
 342  201   * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
 343  202   * If anything complicated is going on we punt to mutex_vector_enter().
 344  203   *
 345  204   * mutex_tryenter() is similar to mutex_enter() but returns zero if
 346  205   * the lock cannot be acquired, nonzero on success.
 347  206   *
↓ open down ↓ 4 lines elided ↑ open up ↑
 352  211   * If we interrupt a thread in mutex_exit() that has not yet cleared
 353  212   * the lock, pil_interrupt() resets its PC back to the beginning of
 354  213   * mutex_exit() so it will check again for waiters when it resumes.
 355  214   *
 356  215   * The lockstat code below is activated when the lockstat driver
 357  216   * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
 358  217   * Note that we don't need to test lockstat_event_mask here -- we won't
 359  218   * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
 360  219   */
 361  220  
 362      -#if defined (lint)
 363      -
 364      -/* ARGSUSED */
 365      -void
 366      -mutex_enter(kmutex_t *lp)
 367      -{}
 368      -
 369      -/* ARGSUSED */
 370      -int
 371      -mutex_tryenter(kmutex_t *lp)
 372      -{ return (0); }
 373      -
 374      -/* ARGSUSED */
 375      -void
 376      -mutex_exit(kmutex_t *lp)
 377      -{}
 378      -
 379      -/* ARGSUSED */
 380      -void *
 381      -mutex_owner_running(mutex_impl_t *lp)
 382      -{ return (NULL); }
 383      -
 384      -#else
 385  221          .align  32
 386  222          ENTRY(mutex_enter)
 387  223          mov     THREAD_REG, %o1
 388  224          casx    [%o0], %g0, %o1                 ! try to acquire as adaptive
 389  225          brnz,pn %o1, 1f                         ! locked or wrong type
 390  226          membar  #LoadLoad
 391  227  .mutex_enter_lockstat_patch_point:
 392  228          retl
 393  229          nop
 394  230  1:
↓ open down ↓ 71 lines elided ↑ open up ↑
 466  302          be,a,pt %xcc, 2f                ! yes, go return cpu
 467  303          nop
 468  304  1:
 469  305          retl
 470  306          mov     %g0, %o0                ! return 0 (owner not running)
 471  307  2:
 472  308          retl
 473  309          mov     %o2, %o0                ! owner running, return cpu
 474  310          SET_SIZE(mutex_owner_running)
 475  311  
 476      -#endif  /* lint */
 477      -
 478  312  /*
 479  313   * rw_enter() and rw_exit().
 480  314   * 
 481  315   * These routines handle the simple cases of rw_enter (write-locking an unheld
 482  316   * lock or read-locking a lock that's neither write-locked nor write-wanted)
 483  317   * and rw_exit (no waiters or not the last reader).  If anything complicated
 484  318   * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
 485  319   */
 486      -#if defined(lint)
 487  320  
 488      -/* ARGSUSED */
 489      -void
 490      -rw_enter(krwlock_t *lp, krw_t rw)
 491      -{}
 492      -
 493      -/* ARGSUSED */
 494      -void
 495      -rw_exit(krwlock_t *lp)
 496      -{}
 497      -
 498      -#else
 499      -
 500  321          .align  16
 501  322          ENTRY(rw_enter)
 502  323          cmp     %o1, RW_WRITER                  ! entering as writer?
 503  324          be,a,pn %icc, 2f                        ! if so, go do it ...
 504  325          or      THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
 505  326          ld      [THREAD_REG + T_KPRI_REQ], %o3  ! begin THREAD_KPRI_REQUEST()
 506  327          ldn     [%o0], %o4                      ! %o4 = old lock value
 507  328          inc     %o3                             ! bump kpri
 508  329          st      %o3, [THREAD_REG + T_KPRI_REQ]  ! store new kpri
 509  330  1:
↓ open down ↓ 63 lines elided ↑ open up ↑
 573  394  3:
 574  395          casx    [%o0], %o4, %o1                 ! try to drop write lock
 575  396          cmp     %o4, %o1                        ! did we succeed?
 576  397          bne,pn  %xcc, rw_exit_wakeup            ! if not, go to C
 577  398          nop
 578  399  .rw_write_exit_lockstat_patch_point:
 579  400          retl
 580  401          nop
 581  402          SET_SIZE(rw_exit)
 582  403  
 583      -#endif
 584      -
 585      -#if defined(lint)
 586      -
 587      -void
 588      -lockstat_hot_patch(void)
 589      -{}
 590      -
 591      -#else
 592      -
 593  404  #define RETL                    0x81c3e008
 594  405  #define NOP                     0x01000000
 595  406  #define BA                      0x10800000
 596  407  
 597  408  #define DISP22                  ((1 << 22) - 1)
 598  409  #define ANNUL                   0x20000000
 599  410  
 600  411  #define HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)          \
 601  412          ba      1f;                                                     \
 602  413          rd      %pc, %o0;                                               \
↓ open down ↓ 67 lines elided ↑ open up ↑
 670  481          HOT_PATCH(.lock_clear_lockstat_patch_point,
 671  482                  LS_LOCK_CLEAR_RELEASE, RETL)
 672  483          HOT_PATCH(.lock_set_spl_lockstat_patch_point,
 673  484                  LS_LOCK_SET_SPL_ACQUIRE, RETL)
 674  485          HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
 675  486                  LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
 676  487          ret
 677  488          restore
 678  489          SET_SIZE(lockstat_hot_patch)
 679  490  
 680      -#endif  /* lint */
 681      -
 682  491  /*
 683  492   * asm_mutex_spin_enter(mutex_t *)
 684  493   *
 685  494   * For use by assembly interrupt handler only.
 686  495   * Does not change spl, since the interrupt handler is assumed to be
 687  496   * running at high level already.
 688  497   * Traps may be off, so cannot panic.
 689  498   * Does not keep statistics on the lock.
 690  499   *
 691  500   * Entry:       %l6 - points to mutex
 692  501   *              %l7 - address of call (returns to %l7+8)
 693  502   * Uses:        %l6, %l5
 694  503   */
 695      -#ifndef lint
 696  504          .align 16
 697  505          ENTRY_NP(asm_mutex_spin_enter)
 698  506          ldstub  [%l6 + M_SPINLOCK], %l5 ! try to set lock, get value in %l5
 699  507  1:
 700  508          tst     %l5
 701  509          bnz     3f                      ! lock already held - go spin
 702  510          nop
 703  511  2:      
 704  512          jmp     %l7 + 8                 ! return
 705  513          membar  #LoadLoad
↓ open down ↓ 9 lines elided ↑ open up ↑
 715  523          ldstub  [%l6 + M_SPINLOCK], %l5 ! delay slot - try to set lock
 716  524  
 717  525          sethi   %hi(panicstr) , %l5
 718  526          ldn     [%l5 + %lo(panicstr)], %l5
 719  527          tst     %l5
 720  528          bnz     2b                      ! after panic, feign success
 721  529          nop
 722  530          b       4b
 723  531          ldub    [%l6 + M_SPINLOCK], %l5 ! delay - reload lock
 724  532          SET_SIZE(asm_mutex_spin_enter)
 725      -#endif /* lint */
 726  533  
 727  534  /*
 728  535   * asm_mutex_spin_exit(mutex_t *)
 729  536   *
 730  537   * For use by assembly interrupt handler only.
 731  538   * Does not change spl, since the interrupt handler is assumed to be
 732  539   * running at high level already.
 733  540   *
 734  541   * Entry:       %l6 - points to mutex
 735  542   *              %l7 - address of call (returns to %l7+8)
 736  543   * Uses:        none
 737  544   */
 738      -#ifndef lint
 739  545          ENTRY_NP(asm_mutex_spin_exit)
 740  546          membar  #LoadStore|#StoreStore
 741  547          jmp     %l7 + 8                 ! return
 742  548          clrb    [%l6 + M_SPINLOCK]      ! delay - clear lock
 743  549          SET_SIZE(asm_mutex_spin_exit)
 744      -#endif /* lint */
 745  550  
 746  551  /*
 747  552   * thread_onproc()
 748  553   * Set thread in onproc state for the specified CPU.
 749  554   * Also set the thread lock pointer to the CPU's onproc lock.
 750  555   * Since the new lock isn't held, the store ordering is important.
 751  556   * If not done in assembler, the compiler could reorder the stores.
 752  557   */
 753      -#if defined(lint)
 754  558  
 755      -void
 756      -thread_onproc(kthread_id_t t, cpu_t *cp)
 757      -{
 758      -        t->t_state = TS_ONPROC;
 759      -        t->t_lockp = &cp->cpu_thread_lock;
 760      -}
 761      -
 762      -#else   /* lint */
 763      -
 764  559          ENTRY(thread_onproc)
 765  560          set     TS_ONPROC, %o2          ! TS_ONPROC state
 766  561          st      %o2, [%o0 + T_STATE]    ! store state
 767  562          add     %o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
 768  563          retl                            ! return
 769  564          stn     %o3, [%o0 + T_LOCKP]    ! delay - store new lock pointer
 770  565          SET_SIZE(thread_onproc)
 771  566  
 772      -#endif  /* lint */
 773      -
 774  567  /* delay function used in some mutex code - just do 3 nop cas ops */
 775      -#if defined(lint)
 776      -
 777      -/* ARGSUSED */
 778      -void
 779      -cas_delay(void *addr)
 780      -{}
 781      -#else   /* lint */
 782  568          ENTRY(cas_delay)
 783  569          casx [%o0], %g0, %g0
 784  570          casx [%o0], %g0, %g0
 785  571          retl
 786  572          casx [%o0], %g0, %g0
 787  573          SET_SIZE(cas_delay)
 788      -#endif  /* lint */
 789  574  
 790      -#if defined(lint)
 791      -
 792      -/*
 793      - * alternative delay function for some niagara processors.   The rd
 794      - * instruction uses less resources than casx on those cpus.
 795      - */
 796      -/* ARGSUSED */
 797      -void
 798      -rdccr_delay(void)
 799      -{}
 800      -#else   /* lint */
 801  575          ENTRY(rdccr_delay)
 802  576          rd      %ccr, %g0
 803  577          rd      %ccr, %g0
 804  578          retl
 805  579          rd      %ccr, %g0
 806  580          SET_SIZE(rdccr_delay)
 807      -#endif  /* lint */
 808  581  
 809  582  /*
 810  583   * mutex_delay_default(void)
 811  584   * Spins for approx a few hundred processor cycles and returns to caller.
 812  585   */
 813      -#if defined(lint)
 814  586  
 815      -void
 816      -mutex_delay_default(void)
 817      -{}
 818      -
 819      -#else   /* lint */
 820      -
 821  587          ENTRY(mutex_delay_default)
 822  588          mov     72,%o0
 823  589  1:      brgz    %o0, 1b
 824  590          dec     %o0
 825  591          retl
 826  592          nop
 827  593          SET_SIZE(mutex_delay_default)
 828  594  
 829      -#endif  /* lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX