Print this page
de-linting of .s files
first

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/lock_prim.s
          +++ new/usr/src/uts/intel/ia32/ml/lock_prim.s
↓ open down ↓ 15 lines elided ↑ open up ↑
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26      -#pragma ident   "%Z%%M% %I%     %E% SMI"
       26 +/*
       27 + * Copyright 2019 Joyent, Inc.
       28 + */
  27   29  
  28      -#if defined(lint) || defined(__lint)
  29      -#include <sys/types.h>
  30      -#include <sys/thread.h>
  31      -#include <sys/cpuvar.h>
  32      -#include <vm/page.h>
  33      -#else   /* __lint */
  34   30  #include "assym.h"
  35      -#endif  /* __lint */
  36   31  
  37   32  #include <sys/mutex_impl.h>
  38   33  #include <sys/asm_linkage.h>
  39   34  #include <sys/asm_misc.h>
  40   35  #include <sys/regset.h>
  41   36  #include <sys/rwlock_impl.h>
  42   37  #include <sys/lockstat.h>
  43   38  
  44   39  /*
  45   40   * lock_try(lp), ulock_try(lp)
  46   41   *      - returns non-zero on success.
  47   42   *      - doesn't block interrupts so don't use this to spin on a lock.
  48   43   *
  49   44   * ulock_try() is for a lock in the user address space.
  50   45   */
  51   46  
  52      -#if defined(lint) || defined(__lint)
  53      -
  54      -/* ARGSUSED */
  55      -int
  56      -lock_try(lock_t *lp)
  57      -{ return (0); }
  58      -
  59      -/* ARGSUSED */
  60      -int
  61      -lock_spin_try(lock_t *lp)
  62      -{ return (0); }
  63      -
  64      -/* ARGSUSED */
  65      -int
  66      -ulock_try(lock_t *lp)
  67      -{ return (0); }
  68      -
  69      -#else   /* __lint */
  70   47          .globl  kernelbase
  71   48  
  72      -#if defined(__amd64)
  73      -
  74   49          ENTRY(lock_try)
  75   50          movb    $-1, %dl
  76   51          movzbq  %dl, %rax
  77   52          xchgb   %dl, (%rdi)
  78   53          xorb    %dl, %al
  79   54  .lock_try_lockstat_patch_point:
  80   55          ret
  81   56          testb   %al, %al
  82   57          jnz     0f
  83   58          ret
↓ open down ↓ 27 lines elided ↑ open up ↑
 111   86  
 112   87  #endif /* DEBUG */
 113   88  
 114   89  ulock_pass:
 115   90          movl    $1, %eax
 116   91          xchgb   %al, (%rdi)
 117   92          xorb    $1, %al
 118   93          ret
 119   94          SET_SIZE(ulock_try)
 120   95  
 121      -#else
 122      -
 123      -        ENTRY(lock_try)
 124      -        movl    $1,%edx
 125      -        movl    4(%esp),%ecx            /* ecx = lock addr */
 126      -        xorl    %eax,%eax
 127      -        xchgb   %dl, (%ecx)             /* using dl will avoid partial */
 128      -        testb   %dl,%dl                 /* stalls on P6 ? */
 129      -        setz    %al
 130      -.lock_try_lockstat_patch_point:
 131      -        ret
 132      -        movl    %gs:CPU_THREAD, %edx    /* edx = thread addr */
 133      -        testl   %eax, %eax
 134      -        jz      0f
 135      -        movl    $LS_LOCK_TRY_ACQUIRE, %eax
 136      -        jmp     lockstat_wrapper
 137      -0:
 138      -        ret
 139      -        SET_SIZE(lock_try)
 140      -
 141      -        ENTRY(lock_spin_try)
 142      -        movl    $-1,%edx
 143      -        movl    4(%esp),%ecx            /* ecx = lock addr */
 144      -        xorl    %eax,%eax
 145      -        xchgb   %dl, (%ecx)             /* using dl will avoid partial */
 146      -        testb   %dl,%dl                 /* stalls on P6 ? */
 147      -        setz    %al
 148      -        ret
 149      -        SET_SIZE(lock_spin_try)
 150      -
 151      -        ENTRY(ulock_try)
 152   96  #ifdef DEBUG
 153      -        movl    kernelbase, %eax
 154      -        cmpl    %eax, 4(%esp)           /* test uaddr < kernelbase */
 155      -        jb      ulock_pass              /* uaddr < kernelbase, proceed */
 156      -
 157      -        pushl   $.ulock_panic_msg
 158      -        call    panic
 159      -
 160      -#endif /* DEBUG */
 161      -
 162      -ulock_pass:
 163      -        movl    $1,%eax
 164      -        movl    4(%esp),%ecx
 165      -        xchgb   %al, (%ecx)
 166      -        xorb    $1, %al
 167      -        ret
 168      -        SET_SIZE(ulock_try)
 169      -
 170      -#endif  /* !__amd64 */
 171      -
 172      -#ifdef DEBUG
 173   97          .data
 174   98  .ulock_panic_msg:
 175   99          .string "ulock_try: Argument is above kernelbase"
 176  100          .text
 177  101  #endif  /* DEBUG */
 178  102  
 179      -#endif  /* __lint */
 180      -
 181  103  /*
 182  104   * lock_clear(lp)
 183  105   *      - unlock lock without changing interrupt priority level.
 184  106   */
 185  107  
 186      -#if defined(lint) || defined(__lint)
 187      -
 188      -/* ARGSUSED */
 189      -void
 190      -lock_clear(lock_t *lp)
 191      -{}
 192      -
 193      -/* ARGSUSED */
 194      -void
 195      -ulock_clear(lock_t *lp)
 196      -{}
 197      -
 198      -#else   /* __lint */
 199      -
 200      -#if defined(__amd64)
 201      -
 202  108          ENTRY(lock_clear)
 203  109          movb    $0, (%rdi)
 204  110  .lock_clear_lockstat_patch_point:
 205  111          ret
 206  112          movq    %rdi, %rsi                      /* rsi = lock addr */
 207  113          movq    %gs:CPU_THREAD, %rdx            /* rdx = thread addr */
 208  114          movl    $LS_LOCK_CLEAR_RELEASE, %edi    /* edi = event */
 209  115          jmp     lockstat_wrapper
 210  116          SET_SIZE(lock_clear)
 211  117  
↓ open down ↓ 8 lines elided ↑ open up ↑
 220  126          movq    %rsp, %rbp
 221  127          xorl    %eax, %eax              /* clear for varargs */
 222  128          call    panic
 223  129  #endif
 224  130  
 225  131  ulock_clr:
 226  132          movb    $0, (%rdi)
 227  133          ret
 228  134          SET_SIZE(ulock_clear)
 229  135  
 230      -#else
 231      -
 232      -        ENTRY(lock_clear)
 233      -        movl    4(%esp), %eax
 234      -        movb    $0, (%eax)
 235      -.lock_clear_lockstat_patch_point:
 236      -        ret
 237      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread addr */
 238      -        movl    %eax, %ecx                      /* ecx = lock pointer */
 239      -        movl    $LS_LOCK_CLEAR_RELEASE, %eax
 240      -        jmp     lockstat_wrapper
 241      -        SET_SIZE(lock_clear)
 242      -
 243      -        ENTRY(ulock_clear)
 244  136  #ifdef DEBUG
 245      -        movl    kernelbase, %ecx
 246      -        cmpl    %ecx, 4(%esp)           /* test uaddr < kernelbase */
 247      -        jb      ulock_clr               /* uaddr < kernelbase, proceed */
 248      -
 249      -        pushl   $.ulock_clear_msg
 250      -        call    panic
 251      -#endif
 252      -
 253      -ulock_clr:
 254      -        movl    4(%esp),%eax
 255      -        xorl    %ecx,%ecx
 256      -        movb    %cl, (%eax)
 257      -        ret
 258      -        SET_SIZE(ulock_clear)
 259      -
 260      -#endif  /* !__amd64 */
 261      -
 262      -#ifdef DEBUG
 263  137          .data
 264  138  .ulock_clear_msg:
 265  139          .string "ulock_clear: Argument is above kernelbase"
 266  140          .text
 267  141  #endif  /* DEBUG */
 268  142  
 269  143  
 270      -#endif  /* __lint */
 271      -
 272  144  /*
 273  145   * lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
 274  146   * Drops lp, sets pil to new_pil, stores old pil in *old_pil.
 275  147   */
 276  148  
 277      -#if defined(lint) || defined(__lint)
 278      -
 279      -/* ARGSUSED */
 280      -void
 281      -lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
 282      -{}
 283      -
 284      -#else   /* __lint */
 285      -
 286      -#if defined(__amd64)
 287      -
 288  149          ENTRY(lock_set_spl)
 289  150          pushq   %rbp
 290  151          movq    %rsp, %rbp
 291  152          subq    $32, %rsp
 292  153          movl    %esi, 8(%rsp)           /* save priority level */
 293  154          movq    %rdx, 16(%rsp)          /* save old pil ptr */
 294  155          movq    %rdi, 24(%rsp)          /* save lock pointer */
 295  156          movl    %esi, %edi              /* pass priority level */
 296  157          call    splr                    /* raise priority level */
 297  158          movq    24(%rsp), %rdi          /* rdi = lock addr */
↓ open down ↓ 11 lines elided ↑ open up ↑
 309  170          movl    $LS_LOCK_SET_SPL_ACQUIRE, %edi
 310  171          jmp     lockstat_wrapper
 311  172  .lss_miss:
 312  173          movl    8(%rsp), %esi           /* new_pil */
 313  174          movq    16(%rsp), %rdx          /* old_pil_addr */
 314  175          movl    %eax, %ecx              /* original pil */
 315  176          leave                           /* unwind stack */
 316  177          jmp     lock_set_spl_spin
 317  178          SET_SIZE(lock_set_spl)
 318  179  
 319      -#else
 320      -
 321      -        ENTRY(lock_set_spl)
 322      -        movl    8(%esp), %eax           /* get priority level */
 323      -        pushl   %eax
 324      -        call    splr                    /* raise priority level */
 325      -        movl    8(%esp), %ecx           /* ecx = lock addr */
 326      -        movl    $-1, %edx
 327      -        addl    $4, %esp
 328      -        xchgb   %dl, (%ecx)             /* try to set lock */
 329      -        testb   %dl, %dl                /* did we get the lock? ... */
 330      -        movl    12(%esp), %edx          /* edx = olp pil addr (ZF unaffected) */
 331      -        jnz     .lss_miss               /* ... no, go to C for the hard case */
 332      -        movw    %ax, (%edx)             /* store old pil */
 333      -.lock_set_spl_lockstat_patch_point:
 334      -        ret
 335      -        movl    %gs:CPU_THREAD, %edx    /* edx = thread addr*/
 336      -        movl    $LS_LOCK_SET_SPL_ACQUIRE, %eax
 337      -        jmp     lockstat_wrapper
 338      -.lss_miss:
 339      -        pushl   %eax                    /* original pil */
 340      -        pushl   %edx                    /* old_pil addr */
 341      -        pushl   16(%esp)                /* new_pil */
 342      -        pushl   %ecx                    /* lock addr */
 343      -        call    lock_set_spl_spin
 344      -        addl    $16, %esp
 345      -        ret
 346      -        SET_SIZE(lock_set_spl)
 347      -
 348      -#endif  /* !__amd64 */
 349      -
 350      -#endif  /* __lint */
 351      -
 352  180  /*
 353  181   * void
 354  182   * lock_init(lp)
 355  183   */
 356  184  
 357      -#if defined(__lint)
 358      -
 359      -/* ARGSUSED */
 360      -void
 361      -lock_init(lock_t *lp)
 362      -{}
 363      -
 364      -#else   /* __lint */
 365      -
 366      -#if defined(__amd64)
 367      -
 368  185          ENTRY(lock_init)
 369  186          movb    $0, (%rdi)
 370  187          ret
 371  188          SET_SIZE(lock_init)
 372  189  
 373      -#else
 374      -
 375      -        ENTRY(lock_init)
 376      -        movl    4(%esp), %eax
 377      -        movb    $0, (%eax)
 378      -        ret
 379      -        SET_SIZE(lock_init)
 380      -
 381      -#endif  /* !__amd64 */
 382      -
 383      -#endif  /* __lint */
 384      -
 385  190  /*
 386  191   * void
 387  192   * lock_set(lp)
 388  193   */
 389  194  
 390      -#if defined(lint) || defined(__lint)
 391      -
 392      -/* ARGSUSED */
 393      -void
 394      -lock_set(lock_t *lp)
 395      -{}
 396      -
 397      -#else   /* __lint */
 398      -
 399      -#if defined(__amd64)
 400      -
 401  195          ENTRY(lock_set)
 402  196          movb    $-1, %dl
 403  197          xchgb   %dl, (%rdi)             /* try to set lock */
 404  198          testb   %dl, %dl                /* did we get it? */
 405  199          jnz     lock_set_spin           /* no, go to C for the hard case */
 406  200  .lock_set_lockstat_patch_point:
 407  201          ret
 408  202          movq    %rdi, %rsi              /* rsi = lock addr */
 409  203          movq    %gs:CPU_THREAD, %rdx    /* rdx = thread addr */
 410  204          movl    $LS_LOCK_SET_ACQUIRE, %edi
 411  205          jmp     lockstat_wrapper
 412  206          SET_SIZE(lock_set)
 413  207  
 414      -#else
 415      -
 416      -        ENTRY(lock_set)
 417      -        movl    4(%esp), %ecx           /* ecx = lock addr */
 418      -        movl    $-1, %edx
 419      -        xchgb   %dl, (%ecx)             /* try to set lock */
 420      -        testb   %dl, %dl                /* did we get it? */
 421      -        jnz     lock_set_spin           /* no, go to C for the hard case */
 422      -.lock_set_lockstat_patch_point:
 423      -        ret
 424      -        movl    %gs:CPU_THREAD, %edx    /* edx = thread addr */
 425      -        movl    $LS_LOCK_SET_ACQUIRE, %eax
 426      -        jmp     lockstat_wrapper
 427      -        SET_SIZE(lock_set)
 428      -
 429      -#endif  /* !__amd64 */
 430      -
 431      -#endif  /* __lint */
 432      -
 433  208  /*
 434  209   * lock_clear_splx(lp, s)
 435  210   */
 436  211  
 437      -#if defined(lint) || defined(__lint)
 438      -
 439      -/* ARGSUSED */
 440      -void
 441      -lock_clear_splx(lock_t *lp, int s)
 442      -{}
 443      -
 444      -#else   /* __lint */
 445      -
 446      -#if defined(__amd64)
 447      -
 448  212          ENTRY(lock_clear_splx)
 449  213          movb    $0, (%rdi)              /* clear lock */
 450  214  .lock_clear_splx_lockstat_patch_point:
 451  215          jmp     0f
 452  216  0:
 453  217          movl    %esi, %edi              /* arg for splx */
 454  218          jmp     splx                    /* let splx do its thing */
 455  219  .lock_clear_splx_lockstat:
 456  220          pushq   %rbp                    /* align stack properly */
 457  221          movq    %rsp, %rbp
↓ open down ↓ 1 lines elided ↑ open up ↑
 459  223          movq    %rdi, 8(%rsp)           /* save lock ptr across splx call */
 460  224          movl    %esi, %edi              /* arg for splx */
 461  225          call    splx                    /* lower the priority */
 462  226          movq    8(%rsp), %rsi           /* rsi = lock ptr */
 463  227          leave                           /* unwind stack */
 464  228          movq    %gs:CPU_THREAD, %rdx    /* rdx = thread addr */
 465  229          movl    $LS_LOCK_CLEAR_SPLX_RELEASE, %edi
 466  230          jmp     lockstat_wrapper
 467  231          SET_SIZE(lock_clear_splx)
 468  232  
 469      -#else
 470      -
 471      -        ENTRY(lock_clear_splx)
 472      -        movl    4(%esp), %eax           /* eax = lock addr */
 473      -        movb    $0, (%eax)              /* clear lock */
 474      -.lock_clear_splx_lockstat_patch_point:
 475      -        jmp     0f
 476      -0:
 477      -        movl    8(%esp), %edx           /* edx = desired pil */
 478      -        movl    %edx, 4(%esp)           /* set spl arg up for splx */
 479      -        jmp     splx                    /* let splx do it's thing */
 480      -.lock_clear_splx_lockstat:
 481      -        movl    8(%esp), %edx           /* edx = desired pil */
 482      -        pushl   %ebp                    /* set up stack frame */
 483      -        movl    %esp, %ebp
 484      -        pushl   %edx
 485      -        call    splx
 486      -        leave                           /* unwind stack */
 487      -        movl    4(%esp), %ecx           /* ecx = lock pointer */
 488      -        movl    %gs:CPU_THREAD, %edx    /* edx = thread addr */
 489      -        movl    $LS_LOCK_CLEAR_SPLX_RELEASE, %eax
 490      -        jmp     lockstat_wrapper
 491      -        SET_SIZE(lock_clear_splx)
 492      -
 493      -#endif  /* !__amd64 */
 494      -
 495  233  #if defined(__GNUC_AS__)
 496  234  #define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL      \
 497  235          (.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2)
 498  236  
 499  237  #define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT    \
 500  238          (.lock_clear_splx_lockstat_patch_point + 1)
 501  239  #else
 502  240  #define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL      \
 503  241          [.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2]
 504  242  
 505  243  #define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT    \
 506  244          [.lock_clear_splx_lockstat_patch_point + 1]
 507  245  #endif
 508  246  
 509      -#endif  /* __lint */
 510      -
 511  247  /*
 512  248   * mutex_enter() and mutex_exit().
 513  249   *
 514  250   * These routines handle the simple cases of mutex_enter() (adaptive
 515  251   * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
 516  252   * If anything complicated is going on we punt to mutex_vector_enter().
 517  253   *
 518  254   * mutex_tryenter() is similar to mutex_enter() but returns zero if
 519  255   * the lock cannot be acquired, nonzero on success.
 520  256   *
↓ open down ↓ 3 lines elided ↑ open up ↑
 524  260   * mutex preemption by examining the trapped PC in the interrupt path.
 525  261   * If we interrupt a thread in mutex_exit() that has not yet cleared
 526  262   * the lock, cmnint() resets its PC back to the beginning of
 527  263   * mutex_exit() so it will check again for waiters when it resumes.
 528  264   *
 529  265   * The lockstat code below is activated when the lockstat driver
 530  266   * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
 531  267   * Note that we don't need to test lockstat_event_mask here -- we won't
 532  268   * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
 533  269   */
 534      -#if defined(lint) || defined(__lint)
 535  270  
 536      -/* ARGSUSED */
 537      -void
 538      -mutex_enter(kmutex_t *lp)
 539      -{}
 540      -
 541      -/* ARGSUSED */
 542      -int
 543      -mutex_tryenter(kmutex_t *lp)
 544      -{ return (0); }
 545      -
 546      -/* ARGSUSED */
 547      -int
 548      -mutex_adaptive_tryenter(mutex_impl_t *lp)
 549      -{ return (0); }
 550      -
 551      -/* ARGSUSED */
 552      -void
 553      -mutex_exit(kmutex_t *lp)
 554      -{}
 555      -
 556      -#else
 557      -
 558      -#if defined(__amd64)
 559      -
 560  271          ENTRY_NP(mutex_enter)
 561  272          movq    %gs:CPU_THREAD, %rdx            /* rdx = thread ptr */
 562  273          xorl    %eax, %eax                      /* rax = 0 (unheld adaptive) */
 563  274          lock
 564  275          cmpxchgq %rdx, (%rdi)
 565  276          jnz     mutex_vector_enter
 566  277  .mutex_enter_lockstat_patch_point:
 567  278  #if defined(OPTERON_WORKAROUND_6323525)
 568  279  .mutex_enter_6323525_patch_point:
 569  280          ret                                     /* nop space for lfence */
↓ open down ↓ 141 lines elided ↑ open up ↑
 711  422          jmp     lockstat_wrapper
 712  423          SET_SIZE(mutex_exit)
 713  424  
 714  425          .globl  mutex_exit_critical_size
 715  426          .type   mutex_exit_critical_size, @object
 716  427          .align  CPTRSIZE
 717  428  mutex_exit_critical_size:
 718  429          .quad   .mutex_exit_critical_end - mutex_exit_critical_start
 719  430          SET_SIZE(mutex_exit_critical_size)
 720  431  
 721      -#else
 722      -
 723      -        ENTRY_NP(mutex_enter)
 724      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
 725      -        movl    4(%esp), %ecx                   /* ecx = lock ptr */
 726      -        xorl    %eax, %eax                      /* eax = 0 (unheld adaptive) */
 727      -        lock
 728      -        cmpxchgl %edx, (%ecx)
 729      -        jnz     mutex_vector_enter
 730      -#if defined(OPTERON_WORKAROUND_6323525)
 731      -.mutex_enter_lockstat_patch_point:
 732      -.mutex_enter_6323525_patch_point:
 733      -        ret                                     /* nop space for lfence */
 734      -        nop
 735      -        nop
 736      -.mutex_enter_lockstat_6323525_patch_point:      /* new patch point if lfence */
 737      -        nop
 738      -#else   /* OPTERON_WORKAROUND_6323525 */
 739      -.mutex_enter_lockstat_patch_point:
 740      -        ret
 741      -#endif  /* OPTERON_WORKAROUND_6323525 */
 742      -        movl    $LS_MUTEX_ENTER_ACQUIRE, %eax
 743      -        ALTENTRY(lockstat_wrapper)      /* expects edx=thread, ecx=lock, */
 744      -                                        /*   eax=lockstat event */
 745      -        pushl   %ebp                            /* buy a frame */
 746      -        movl    %esp, %ebp
 747      -        incb    T_LOCKSTAT(%edx)                /* curthread->t_lockstat++ */
 748      -        pushl   %edx                            /* save thread pointer   */
 749      -        movl    $lockstat_probemap, %edx
 750      -        movl    (%edx, %eax, DTRACE_IDSIZE), %eax
 751      -        testl   %eax, %eax                      /* check for non-zero probe */
 752      -        jz      1f
 753      -        pushl   %ecx                            /* push lock */
 754      -        pushl   %eax                            /* push probe ID */
 755      -        call    *lockstat_probe
 756      -        addl    $8, %esp
 757      -1:
 758      -        popl    %edx                            /* restore thread pointer */
 759      -        decb    T_LOCKSTAT(%edx)                /* curthread->t_lockstat-- */
 760      -        movl    $1, %eax                        /* return success if tryenter */
 761      -        popl    %ebp                            /* pop off frame */
 762      -        ret
 763      -        SET_SIZE(lockstat_wrapper)
 764      -        SET_SIZE(mutex_enter)
 765      -
 766      -        ENTRY(lockstat_wrapper_arg)     /* expects edx=thread, ecx=lock, */
 767      -                                        /* eax=lockstat event, pushed arg */
 768      -        incb    T_LOCKSTAT(%edx)                /* curthread->t_lockstat++ */
 769      -        pushl   %edx                            /* save thread pointer   */
 770      -        movl    $lockstat_probemap, %edx
 771      -        movl    (%edx, %eax, DTRACE_IDSIZE), %eax
 772      -        testl   %eax, %eax                      /* check for non-zero probe */
 773      -        jz      1f
 774      -        pushl   %ebp                            /* save %ebp */
 775      -        pushl   8(%esp)                         /* push arg1 */
 776      -        movl    %ebp, 12(%esp)                  /* fake up the stack frame */
 777      -        movl    %esp, %ebp                      /* fake up base pointer */
 778      -        addl    $12, %ebp                       /* adjust faked base pointer */
 779      -        pushl   %ecx                            /* push lock */
 780      -        pushl   %eax                            /* push probe ID */
 781      -        call    *lockstat_probe
 782      -        addl    $12, %esp                       /* adjust for arguments */
 783      -        popl    %ebp                            /* pop frame */
 784      -1:
 785      -        popl    %edx                            /* restore thread pointer */
 786      -        decb    T_LOCKSTAT(%edx)                /* curthread->t_lockstat-- */
 787      -        movl    $1, %eax                        /* return success if tryenter */
 788      -        addl    $4, %esp                        /* pop argument */
 789      -        ret
 790      -        SET_SIZE(lockstat_wrapper_arg)
 791      -
 792      -
 793      -        ENTRY(mutex_tryenter)
 794      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
 795      -        movl    4(%esp), %ecx                   /* ecx = lock ptr */
 796      -        xorl    %eax, %eax                      /* eax = 0 (unheld adaptive) */
 797      -        lock
 798      -        cmpxchgl %edx, (%ecx)
 799      -        jnz     mutex_vector_tryenter
 800      -        movl    %ecx, %eax
 801      -#if defined(OPTERON_WORKAROUND_6323525)
 802      -.mutex_tryenter_lockstat_patch_point:
 803      -.mutex_tryenter_6323525_patch_point:
 804      -        ret                                     /* nop space for lfence */
 805      -        nop
 806      -        nop
 807      -.mutex_tryenter_lockstat_6323525_patch_point:   /* new patch point if lfence */
 808      -        nop
 809      -#else   /* OPTERON_WORKAROUND_6323525 */
 810      -.mutex_tryenter_lockstat_patch_point:
 811      -        ret
 812      -#endif  /* OPTERON_WORKAROUND_6323525 */
 813      -        movl    $LS_MUTEX_ENTER_ACQUIRE, %eax
 814      -        jmp     lockstat_wrapper
 815      -        SET_SIZE(mutex_tryenter)
 816      -
 817      -        ENTRY(mutex_adaptive_tryenter)
 818      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
 819      -        movl    4(%esp), %ecx                   /* ecx = lock ptr */
 820      -        xorl    %eax, %eax                      /* eax = 0 (unheld adaptive) */
 821      -        lock
 822      -        cmpxchgl %edx, (%ecx)
 823      -        jnz     0f
 824      -        movl    %ecx, %eax
 825      -#if defined(OPTERON_WORKAROUND_6323525)
 826      -.mutex_atryenter_6323525_patch_point:
 827      -        ret                                     /* nop space for lfence */
 828      -        nop
 829      -        nop
 830      -        nop
 831      -#else   /* OPTERON_WORKAROUND_6323525 */
 832      -        ret
 833      -#endif  /* OPTERON_WORKAROUND_6323525 */
 834      -0:
 835      -        xorl    %eax, %eax
 836      -        ret
 837      -        SET_SIZE(mutex_adaptive_tryenter)
 838      -
 839      -        .globl  mutex_owner_running_critical_start
 840      -
 841      -        ENTRY(mutex_owner_running)
 842      -mutex_owner_running_critical_start:
 843      -        movl    4(%esp), %eax           /* get owner field */
 844      -        movl    (%eax), %eax
 845      -        andl    $MUTEX_THREAD, %eax     /* remove waiters bit */
 846      -        cmpl    $0, %eax                /* if free, skip */
 847      -        je      1f                      /* go return 0 */
 848      -        movl    T_CPU(%eax), %ecx       /* get owner->t_cpu */
 849      -        movl    CPU_THREAD(%ecx), %edx  /* get t_cpu->cpu_thread */
 850      -.mutex_owner_running_critical_end:
 851      -        cmpl    %eax, %edx      /* owner == running thread? */
 852      -        je      2f              /* yes, go return cpu */
 853      -1:
 854      -        xorl    %eax, %eax      /* return 0 */
 855      -        ret
 856      -2:
 857      -        movl    %ecx, %eax      /* return cpu */
 858      -        ret
 859      -
 860      -        SET_SIZE(mutex_owner_running)
 861      -
 862      -        .globl  mutex_owner_running_critical_size
 863      -        .type   mutex_owner_running_critical_size, @object
 864      -        .align  CPTRSIZE
 865      -mutex_owner_running_critical_size:
 866      -        .long   .mutex_owner_running_critical_end - mutex_owner_running_critical_start
 867      -        SET_SIZE(mutex_owner_running_critical_size)
 868      -
 869      -        .globl  mutex_exit_critical_start
 870      -
 871      -        ENTRY(mutex_exit)
 872      -mutex_exit_critical_start:              /* If interrupted, restart here */
 873      -        movl    %gs:CPU_THREAD, %edx
 874      -        movl    4(%esp), %ecx
 875      -        cmpl    %edx, (%ecx)
 876      -        jne     mutex_vector_exit               /* wrong type or wrong owner */
 877      -        movl    $0, (%ecx)                      /* clear owner AND lock */
 878      -.mutex_exit_critical_end:
 879      -.mutex_exit_lockstat_patch_point:
 880      -        ret
 881      -        movl    $LS_MUTEX_EXIT_RELEASE, %eax
 882      -        jmp     lockstat_wrapper
 883      -        SET_SIZE(mutex_exit)
 884      -
 885      -        .globl  mutex_exit_critical_size
 886      -        .type   mutex_exit_critical_size, @object
 887      -        .align  CPTRSIZE
 888      -mutex_exit_critical_size:
 889      -        .long   .mutex_exit_critical_end - mutex_exit_critical_start
 890      -        SET_SIZE(mutex_exit_critical_size)
 891      -
 892      -#endif  /* !__amd64 */
 893      -
 894      -#endif  /* __lint */
 895      -
 896  432  /*
 897  433   * rw_enter() and rw_exit().
 898  434   *
 899  435   * These routines handle the simple cases of rw_enter (write-locking an unheld
 900  436   * lock or read-locking a lock that's neither write-locked nor write-wanted)
 901  437   * and rw_exit (no waiters or not the last reader).  If anything complicated
 902  438   * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
 903  439   */
 904      -#if defined(lint) || defined(__lint)
 905  440  
 906      -/* ARGSUSED */
 907      -void
 908      -rw_enter(krwlock_t *lp, krw_t rw)
 909      -{}
 910      -
 911      -/* ARGSUSED */
 912      -void
 913      -rw_exit(krwlock_t *lp)
 914      -{}
 915      -
 916      -#else   /* __lint */
 917      -
 918      -#if defined(__amd64)
 919      -
 920  441          ENTRY(rw_enter)
 921  442          movq    %gs:CPU_THREAD, %rdx            /* rdx = thread ptr */
 922  443          cmpl    $RW_WRITER, %esi
 923  444          je      .rw_write_enter
 924  445          incl    T_KPRI_REQ(%rdx)                /* THREAD_KPRI_REQUEST() */
 925  446          movq    (%rdi), %rax                    /* rax = old rw_wwwh value */
 926  447          testl   $RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
 927  448          jnz     rw_enter_sleep
 928  449          leaq    RW_READ_LOCK(%rax), %rdx        /* rdx = new rw_wwwh value */
 929  450          lock
↓ open down ↓ 66 lines elided ↑ open up ↑
 996  517          jnz     rw_exit_wakeup
 997  518  .rw_write_exit_lockstat_patch_point:
 998  519          ret
 999  520          movq    %gs:CPU_THREAD, %rcx            /* rcx = thread ptr */
1000  521          movq    %rdi, %rsi                      /* rsi - lock ptr */
1001  522          movl    $LS_RW_EXIT_RELEASE, %edi
1002  523          movl    $RW_WRITER, %edx
1003  524          jmp     lockstat_wrapper_arg
1004  525          SET_SIZE(rw_exit)
1005  526  
1006      -#else
1007      -
1008      -        ENTRY(rw_enter)
1009      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
1010      -        movl    4(%esp), %ecx                   /* ecx = lock ptr */
1011      -        cmpl    $RW_WRITER, 8(%esp)
1012      -        je      .rw_write_enter
1013      -        incl    T_KPRI_REQ(%edx)                /* THREAD_KPRI_REQUEST() */
1014      -        movl    (%ecx), %eax                    /* eax = old rw_wwwh value */
1015      -        testl   $RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
1016      -        jnz     rw_enter_sleep
1017      -        leal    RW_READ_LOCK(%eax), %edx        /* edx = new rw_wwwh value */
1018      -        lock
1019      -        cmpxchgl %edx, (%ecx)                   /* try to grab read lock */
1020      -        jnz     rw_enter_sleep
1021      -.rw_read_enter_lockstat_patch_point:
1022      -        ret
1023      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
1024      -        movl    $LS_RW_ENTER_ACQUIRE, %eax
1025      -        pushl   $RW_READER
1026      -        jmp     lockstat_wrapper_arg
1027      -.rw_write_enter:
1028      -        orl     $RW_WRITE_LOCKED, %edx          /* edx = write-locked value */
1029      -        xorl    %eax, %eax                      /* eax = unheld value */
1030      -        lock
1031      -        cmpxchgl %edx, (%ecx)                   /* try to grab write lock */
1032      -        jnz     rw_enter_sleep
1033      -
1034  527  #if defined(OPTERON_WORKAROUND_6323525)
1035      -.rw_write_enter_lockstat_patch_point:
1036      -.rw_write_enter_6323525_patch_point:
1037      -        ret
1038      -        nop
1039      -        nop
1040      -.rw_write_enter_lockstat_6323525_patch_point:
1041      -        nop
1042      -#else   /* OPTERON_WORKAROUND_6323525 */
1043      -.rw_write_enter_lockstat_patch_point:
1044      -        ret
1045      -#endif  /* OPTERON_WORKAROUND_6323525 */
1046  528  
1047      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
1048      -        movl    $LS_RW_ENTER_ACQUIRE, %eax
1049      -        pushl   $RW_WRITER
1050      -        jmp     lockstat_wrapper_arg
1051      -        SET_SIZE(rw_enter)
1052      -
1053      -        ENTRY(rw_exit)
1054      -        movl    4(%esp), %ecx                   /* ecx = lock ptr */
1055      -        movl    (%ecx), %eax                    /* eax = old rw_wwwh value */
1056      -        cmpl    $RW_READ_LOCK, %eax             /* single-reader, no waiters? */
1057      -        jne     .rw_not_single_reader
1058      -        xorl    %edx, %edx                      /* edx = new value (unheld) */
1059      -.rw_read_exit:
1060      -        lock
1061      -        cmpxchgl %edx, (%ecx)                   /* try to drop read lock */
1062      -        jnz     rw_exit_wakeup
1063      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
1064      -        decl    T_KPRI_REQ(%edx)                /* THREAD_KPRI_RELEASE() */
1065      -.rw_read_exit_lockstat_patch_point:
1066      -        ret
1067      -        movl    $LS_RW_EXIT_RELEASE, %eax
1068      -        pushl   $RW_READER
1069      -        jmp     lockstat_wrapper_arg
1070      -.rw_not_single_reader:
1071      -        testl   $RW_WRITE_LOCKED, %eax  /* write-locked or write-wanted? */
1072      -        jnz     .rw_write_exit
1073      -        leal    -RW_READ_LOCK(%eax), %edx       /* edx = new value */
1074      -        cmpl    $RW_READ_LOCK, %edx
1075      -        jge     .rw_read_exit           /* not last reader, safe to drop */
1076      -        jmp     rw_exit_wakeup                  /* last reader with waiters */
1077      -.rw_write_exit:
1078      -        movl    %gs:CPU_THREAD, %eax            /* eax = thread ptr */
1079      -        xorl    %edx, %edx                      /* edx = new value (unheld) */
1080      -        orl     $RW_WRITE_LOCKED, %eax          /* eax = write-locked value */
1081      -        lock
1082      -        cmpxchgl %edx, (%ecx)                   /* try to drop read lock */
1083      -        jnz     rw_exit_wakeup
1084      -.rw_write_exit_lockstat_patch_point:
1085      -        ret
1086      -        movl    %gs:CPU_THREAD, %edx            /* edx = thread ptr */
1087      -        movl    $LS_RW_EXIT_RELEASE, %eax
1088      -        pushl   $RW_WRITER
1089      -        jmp     lockstat_wrapper_arg
1090      -        SET_SIZE(rw_exit)
1091      -
1092      -#endif  /* !__amd64 */
1093      -
1094      -#endif  /* __lint */
1095      -
1096      -#if defined(OPTERON_WORKAROUND_6323525)
1097      -#if defined(lint) || defined(__lint)
1098      -
1099      -int     workaround_6323525_patched;
1100      -
1101      -void
1102      -patch_workaround_6323525(void)
1103      -{}
1104      -
1105      -#else   /* lint */
1106      -
1107  529  /*
1108  530   * If it is necessary to patch the lock enter routines with the lfence
1109  531   * workaround, workaround_6323525_patched is set to a non-zero value so that
1110  532   * the lockstat_hat_patch routine can patch to the new location of the 'ret'
1111  533   * instruction.
1112  534   */
1113  535          DGDEF3(workaround_6323525_patched, 4, 4)
1114  536          .long   0
1115  537  
1116      -#if defined(__amd64)
1117      -
1118  538  #define HOT_MUTEX_PATCH(srcaddr, dstaddr, size) \
1119  539          movq    $size, %rbx;                    \
1120  540          movq    $dstaddr, %r13;                 \
1121  541          addq    %rbx, %r13;                     \
1122  542          movq    $srcaddr, %r12;                 \
1123  543          addq    %rbx, %r12;                     \
1124  544  0:                                              \
1125  545          decq    %r13;                           \
1126  546          decq    %r12;                           \
1127  547          movzbl  (%r12), %esi;                   \
↓ open down ↓ 43 lines elided ↑ open up ↑
1171  591          popq    %r12
1172  592          movq    %rbp, %rsp
1173  593          popq    %rbp
1174  594          ret
1175  595  _lfence_insn:
1176  596          lfence
1177  597          ret
1178  598          SET_SIZE(patch_workaround_6323525)
1179  599  
1180  600  
1181      -#else   /* __amd64 */
1182      -
1183      -#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size) \
1184      -        movl    $size, %ebx;                    \
1185      -        movl    $srcaddr, %esi;                 \
1186      -        addl    %ebx, %esi;                     \
1187      -        movl    $dstaddr, %edi;                 \
1188      -        addl    %ebx, %edi;                     \
1189      -0:                                              \
1190      -        decl    %esi;                           \
1191      -        decl    %edi;                           \
1192      -        pushl   $1;                             \
1193      -        movzbl  (%esi), %eax;                   \
1194      -        pushl   %eax;                           \
1195      -        pushl   %edi;                           \
1196      -        call    hot_patch_kernel_text;          \
1197      -        addl    $12, %esp;                      \
1198      -        decl    %ebx;                           \
1199      -        testl   %ebx, %ebx;                     \
1200      -        jg      0b;
1201      -
1202      -
1203      -        /* see comments above */
1204      -        ENTRY_NP(patch_workaround_6323525)
1205      -        pushl   %ebp
1206      -        movl    %esp, %ebp
1207      -        pushl   %ebx
1208      -        pushl   %esi
1209      -        pushl   %edi
1210      -
1211      -        movl    $1, workaround_6323525_patched
1212      -
1213      -        HOT_MUTEX_PATCH(_lfence_insn, .mutex_enter_6323525_patch_point, 4)
1214      -        HOT_MUTEX_PATCH(_lfence_insn, .mutex_tryenter_6323525_patch_point, 4)
1215      -        HOT_MUTEX_PATCH(_lfence_insn, .mutex_atryenter_6323525_patch_point, 4)
1216      -        HOT_MUTEX_PATCH(_lfence_insn, .rw_write_enter_6323525_patch_point, 4)
1217      -
1218      -        popl    %edi
1219      -        popl    %esi
1220      -        popl    %ebx
1221      -        movl    %ebp, %esp
1222      -        popl    %ebp
1223      -        ret
1224      -_lfence_insn:
1225      -        .byte   0xf, 0xae, 0xe8         / [lfence instruction]
1226      -        ret
1227      -        SET_SIZE(patch_workaround_6323525)
1228      -
1229      -#endif  /* !__amd64 */
1230      -#endif  /* !lint */
1231  601  #endif  /* OPTERON_WORKAROUND_6323525 */
1232  602  
1233  603  
1234      -#if defined(lint) || defined(__lint)
1235      -
1236      -void
1237      -lockstat_hot_patch(void)
1238      -{}
1239      -
1240      -#else
1241      -
1242      -#if defined(__amd64)
1243      -
1244  604  #define HOT_PATCH(addr, event, active_instr, normal_instr, len) \
1245  605          movq    $normal_instr, %rsi;            \
1246  606          movq    $active_instr, %rdi;            \
1247  607          leaq    lockstat_probemap(%rip), %rax;  \
1248  608          movl    _MUL(event, DTRACE_IDSIZE)(%rax), %eax; \
1249  609          testl   %eax, %eax;                     \
1250  610          jz      9f;                             \
1251  611          movq    %rdi, %rsi;                     \
1252  612  9:                                              \
1253  613          movq    $len, %rdx;                     \
1254  614          movq    $addr, %rdi;                    \
1255  615          call    hot_patch_kernel_text
1256  616  
1257      -#else
1258      -
1259      -#define HOT_PATCH(addr, event, active_instr, normal_instr, len) \
1260      -        movl    $normal_instr, %ecx;            \
1261      -        movl    $active_instr, %edx;            \
1262      -        movl    $lockstat_probemap, %eax;       \
1263      -        movl    _MUL(event, DTRACE_IDSIZE)(%eax), %eax; \
1264      -        testl   %eax, %eax;                     \
1265      -        jz      . + 4;                          \
1266      -        movl    %edx, %ecx;                     \
1267      -        pushl   $len;                           \
1268      -        pushl   %ecx;                           \
1269      -        pushl   $addr;                          \
1270      -        call    hot_patch_kernel_text;          \
1271      -        addl    $12, %esp;
1272      -
1273      -#endif  /* !__amd64 */
1274      -
1275  617          ENTRY(lockstat_hot_patch)
1276      -#if defined(__amd64)
1277  618          pushq   %rbp                    /* align stack properly */
1278  619          movq    %rsp, %rbp
1279      -#endif  /* __amd64 */
1280  620  
1281  621  #if defined(OPTERON_WORKAROUND_6323525)
1282  622          cmpl    $0, workaround_6323525_patched
1283  623          je      1f
1284  624          HOT_PATCH(.mutex_enter_lockstat_6323525_patch_point,
1285  625                  LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1286  626          HOT_PATCH(.mutex_tryenter_lockstat_6323525_patch_point,
1287  627                  LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1288  628          HOT_PATCH(.rw_write_enter_lockstat_6323525_patch_point,
1289  629                  LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
↓ open down ↓ 27 lines elided ↑ open up ↑
1317  657          HOT_PATCH(.lock_try_lockstat_patch_point,
1318  658                  LS_LOCK_TRY_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1319  659          HOT_PATCH(.lock_clear_lockstat_patch_point,
1320  660                  LS_LOCK_CLEAR_RELEASE, NOP_INSTR, RET_INSTR, 1)
1321  661          HOT_PATCH(.lock_set_spl_lockstat_patch_point,
1322  662                  LS_LOCK_SET_SPL_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1323  663  
1324  664          HOT_PATCH(LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT,
1325  665                  LS_LOCK_CLEAR_SPLX_RELEASE,
1326  666                  LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL, 0, 1);
1327      -#if defined(__amd64)
1328  667          leave                   /* unwind stack */
1329      -#endif  /* __amd64 */
1330  668          ret
1331  669          SET_SIZE(lockstat_hot_patch)
1332  670  
1333      -#endif  /* __lint */
1334      -
1335      -#if defined(lint) || defined(__lint)
1336      -
1337      -/* XX64 membar_*() should be inlines */
1338      -
1339      -void
1340      -membar_sync(void)
1341      -{}
1342      -
1343      -void
1344      -membar_enter(void)
1345      -{}
1346      -
1347      -void
1348      -membar_exit(void)
1349      -{}
1350      -
1351      -void
1352      -membar_producer(void)
1353      -{}
1354      -
1355      -void
1356      -membar_consumer(void)
1357      -{}
1358      -
1359      -#else   /* __lint */
1360      -
1361      -#if defined(__amd64)
1362      -
1363  671          ENTRY(membar_enter)
1364  672          ALTENTRY(membar_exit)
1365  673          ALTENTRY(membar_sync)
1366  674          mfence                  /* lighter weight than lock; xorq $0,(%rsp) */
1367  675          ret
1368  676          SET_SIZE(membar_sync)
1369  677          SET_SIZE(membar_exit)
1370  678          SET_SIZE(membar_enter)
1371  679  
1372  680          ENTRY(membar_producer)
1373  681          sfence
1374  682          ret
1375  683          SET_SIZE(membar_producer)
1376  684  
1377  685          ENTRY(membar_consumer)
1378  686          lfence
1379  687          ret
1380  688          SET_SIZE(membar_consumer)
1381  689  
1382      -#else
1383      -
1384      -        ENTRY(membar_enter)
1385      -        ALTENTRY(membar_exit)
1386      -        ALTENTRY(membar_sync)
1387      -        lock
1388      -        xorl    $0, (%esp)
1389      -        ret
1390      -        SET_SIZE(membar_sync)
1391      -        SET_SIZE(membar_exit)
1392      -        SET_SIZE(membar_enter)
1393      -
1394  690  /*
1395      - * On machines that support sfence and lfence, these
1396      - * memory barriers can be more precisely implemented
1397      - * without causing the whole world to stop
1398      - */
1399      -        ENTRY(membar_producer)
1400      -        .globl  _patch_sfence_ret
1401      -_patch_sfence_ret:                      /* c.f. membar #StoreStore */
1402      -        lock
1403      -        xorl    $0, (%esp)
1404      -        ret
1405      -        SET_SIZE(membar_producer)
1406      -
1407      -        ENTRY(membar_consumer)
1408      -        .globl  _patch_lfence_ret
1409      -_patch_lfence_ret:                      /* c.f. membar #LoadLoad */
1410      -        lock
1411      -        xorl    $0, (%esp)
1412      -        ret
1413      -        SET_SIZE(membar_consumer)
1414      -
1415      -#endif  /* !__amd64 */
1416      -
1417      -#endif  /* __lint */
1418      -
1419      -/*
1420  691   * thread_onproc()
1421  692   * Set thread in onproc state for the specified CPU.
1422  693   * Also set the thread lock pointer to the CPU's onproc lock.
1423  694   * Since the new lock isn't held, the store ordering is important.
1424  695   * If not done in assembler, the compiler could reorder the stores.
1425  696   */
1426      -#if defined(lint) || defined(__lint)
1427  697  
1428      -void
1429      -thread_onproc(kthread_id_t t, cpu_t *cp)
1430      -{
1431      -        t->t_state = TS_ONPROC;
1432      -        t->t_lockp = &cp->cpu_thread_lock;
1433      -}
1434      -
1435      -#else   /* __lint */
1436      -
1437      -#if defined(__amd64)
1438      -
1439  698          ENTRY(thread_onproc)
1440  699          addq    $CPU_THREAD_LOCK, %rsi  /* pointer to disp_lock while running */
1441  700          movl    $ONPROC_THREAD, T_STATE(%rdi)   /* set state to TS_ONPROC */
1442  701          movq    %rsi, T_LOCKP(%rdi)     /* store new lock pointer */
1443  702          ret
1444  703          SET_SIZE(thread_onproc)
1445  704  
1446      -#else
1447      -
1448      -        ENTRY(thread_onproc)
1449      -        movl    4(%esp), %eax
1450      -        movl    8(%esp), %ecx
1451      -        addl    $CPU_THREAD_LOCK, %ecx  /* pointer to disp_lock while running */
1452      -        movl    $ONPROC_THREAD, T_STATE(%eax)   /* set state to TS_ONPROC */
1453      -        movl    %ecx, T_LOCKP(%eax)     /* store new lock pointer */
1454      -        ret
1455      -        SET_SIZE(thread_onproc)
1456      -
1457      -#endif  /* !__amd64 */
1458      -
1459      -#endif  /* __lint */
1460      -
1461  705  /*
1462  706   * mutex_delay_default(void)
1463  707   * Spins for approx a few hundred processor cycles and returns to caller.
1464  708   */
1465  709  
1466      -#if defined(lint) || defined(__lint)
1467      -
1468      -void
1469      -mutex_delay_default(void)
1470      -{}
1471      -
1472      -#else   /* __lint */
1473      -
1474      -#if defined(__amd64)
1475      -
1476  710          ENTRY(mutex_delay_default)
1477  711          movq    $92,%r11
1478  712  0:      decq    %r11
1479  713          jg      0b
1480  714          ret
1481  715          SET_SIZE(mutex_delay_default)
1482  716  
1483      -#else
1484      -
1485      -        ENTRY(mutex_delay_default)
1486      -        push    %ebp
1487      -        movl    %esp,%ebp
1488      -        andl    $-16,%esp
1489      -        push    %ebx
1490      -        movl    $93,%ebx
1491      -0:      decl    %ebx
1492      -        jg      0b
1493      -        pop     %ebx
1494      -        leave
1495      -        ret
1496      -        SET_SIZE(mutex_delay_default)
1497      -
1498      -#endif  /* !__amd64 */
1499      -#endif  /* __lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX