Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/swtch.s
          +++ new/usr/src/uts/intel/ia32/ml/swtch.s
↓ open down ↓ 16 lines elided ↑ open up ↑
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26   26  /*
  27      - * Copyright (c) 2018 Joyent, Inc.
       27 + * Copyright 2019 Joyent, Inc.
  28   28   */
  29   29  
  30   30  /*
  31   31   * Process switching routines.
  32   32   */
  33   33  
  34      -#if defined(__lint)
  35      -#include <sys/thread.h>
  36      -#include <sys/systm.h>
  37      -#include <sys/time.h>
  38      -#else   /* __lint */
  39      -#include "assym.h"
  40      -#endif  /* __lint */
  41      -
  42   34  #include <sys/asm_linkage.h>
  43   35  #include <sys/asm_misc.h>
  44   36  #include <sys/regset.h>
  45   37  #include <sys/privregs.h>
  46   38  #include <sys/stack.h>
  47   39  #include <sys/segments.h>
  48   40  #include <sys/psw.h>
  49   41  
       42 +#include "assym.h"
       43 +
  50   44  /*
  51   45   * resume(thread_id_t t);
  52   46   *
  53   47   * a thread can only run on one processor at a time. there
  54   48   * exists a window on MPs where the current thread on one
  55   49   * processor is capable of being dispatched by another processor.
  56   50   * some overlap between outgoing and incoming threads can happen
  57   51   * when they are the same thread. in this case where the threads
  58   52   * are the same, resume() on one processor will spin on the incoming
  59   53   * thread until resume() on the other processor has finished with
↓ open down ↓ 7 lines elided ↑ open up ↑
  67   61   * resume_from_intr() is called when the thread being resumed was not
  68   62   * passivated by resume (e.g. was interrupted).  This means that the
  69   63   * resume lock is already held and that a restore context is not needed.
  70   64   * Also, the MMU context is not changed on the resume in this case.
  71   65   *
  72   66   * resume_from_zombie() is the same as resume except the calling thread
  73   67   * is a zombie and must be put on the deathrow list after the CPU is
  74   68   * off the stack.
  75   69   */
  76   70  
  77      -#if !defined(__lint)
  78      -
  79   71  #if LWP_PCB_FPU != 0
  80   72  #error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
  81   73  #endif  /* LWP_PCB_FPU != 0 */
  82   74  
  83      -#endif  /* !__lint */
  84      -
  85      -#if defined(__amd64)
  86      -
  87   75  /*
  88   76   * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
  89   77   *
  90   78   * The stack frame must be created before the save of %rsp so that tracebacks
  91   79   * of swtch()ed-out processes show the process as having last called swtch().
  92   80   */
  93   81  #define SAVE_REGS(thread_t, retaddr)                    \
  94   82          movq    %rbp, T_RBP(thread_t);                  \
  95   83          movq    %rbx, T_RBX(thread_t);                  \
  96   84          movq    %r12, T_R12(thread_t);                  \
↓ open down ↓ 49 lines elided ↑ open up ↑
 146  134  #define STORE_INTR_START(thread_t)                      \
 147  135          testw   $T_INTR_THREAD, T_FLAGS(thread_t);      \
 148  136          jz      1f;                                     \
 149  137  0:                                                      \
 150  138          TSC_READ();                                     \
 151  139          movq    T_INTR_START(thread_t), %rax;           \
 152  140          cmpxchgq %r14, T_INTR_START(thread_t);          \
 153  141          jnz     0b;                                     \
 154  142  1:
 155  143  
 156      -#elif defined (__i386)
 157      -
 158      -/*
 159      - * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
 160      - *
 161      - * The stack frame must be created before the save of %esp so that tracebacks
 162      - * of swtch()ed-out processes show the process as having last called swtch().
 163      - */
 164      -#define SAVE_REGS(thread_t, retaddr)                    \
 165      -        movl    %ebp, T_EBP(thread_t);                  \
 166      -        movl    %ebx, T_EBX(thread_t);                  \
 167      -        movl    %esi, T_ESI(thread_t);                  \
 168      -        movl    %edi, T_EDI(thread_t);                  \
 169      -        pushl   %ebp;                                   \
 170      -        movl    %esp, %ebp;                             \
 171      -        movl    %esp, T_SP(thread_t);                   \
 172      -        movl    retaddr, T_PC(thread_t);                \
 173      -        movl    8(%ebp), %edi;                          \
 174      -        pushl   %edi;                                   \
 175      -        call    __dtrace_probe___sched_off__cpu;        \
 176      -        addl    $CLONGSIZE, %esp
 177      -
 178      -/*
 179      - * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
 180      - *
 181      - * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
 182      - * already has the effect of putting the stack back the way it was when
 183      - * we came in.
 184      - */
 185      -#define RESTORE_REGS(scratch_reg)                       \
 186      -        movl    %gs:CPU_THREAD, scratch_reg;            \
 187      -        movl    T_EBP(scratch_reg), %ebp;               \
 188      -        movl    T_EBX(scratch_reg), %ebx;               \
 189      -        movl    T_ESI(scratch_reg), %esi;               \
 190      -        movl    T_EDI(scratch_reg), %edi
 191      -
 192      -/*
 193      - * Get pointer to a thread's hat structure
 194      - */
 195      -#define GET_THREAD_HATP(hatp, thread_t, scratch_reg)    \
 196      -        movl    T_PROCP(thread_t), hatp;                \
 197      -        movl    P_AS(hatp), scratch_reg;                \
 198      -        movl    A_HAT(scratch_reg), hatp
 199      -
 200      -/*
 201      - * If we are resuming an interrupt thread, store a timestamp in the thread
 202      - * structure.  If an interrupt occurs between tsc_read() and its subsequent
 203      - * store, the timestamp will be stale by the time it is stored.  We can detect
 204      - * this by doing a compare-and-swap on the thread's timestamp, since any
 205      - * interrupt occurring in this window will put a new timestamp in the thread's
 206      - * t_intr_start field.
 207      - */
 208      -#define STORE_INTR_START(thread_t)                      \
 209      -        testw   $T_INTR_THREAD, T_FLAGS(thread_t);      \
 210      -        jz      1f;                                     \
 211      -        pushl   %ecx;                                   \
 212      -0:                                                      \
 213      -        pushl   T_INTR_START(thread_t);                 \
 214      -        pushl   T_INTR_START+4(thread_t);               \
 215      -        call    tsc_read;                               \
 216      -        movl    %eax, %ebx;                             \
 217      -        movl    %edx, %ecx;                             \
 218      -        popl    %edx;                                   \
 219      -        popl    %eax;                                   \
 220      -        cmpxchg8b T_INTR_START(thread_t);               \
 221      -        jnz     0b;                                     \
 222      -        popl    %ecx;                                   \
 223      -1:
 224      -
 225      -#endif  /* __amd64 */
 226      -
 227      -#if defined(__lint)
 228      -
 229      -/* ARGSUSED */
 230      -void
 231      -resume(kthread_t *t)
 232      -{}
 233      -
 234      -#else   /* __lint */
 235      -
 236      -#if defined(__amd64)
 237      -
 238  144          .global kpti_enable
 239  145  
 240  146          ENTRY(resume)
 241  147          movq    %gs:CPU_THREAD, %rax
 242  148          leaq    resume_return(%rip), %r11
 243  149  
 244  150          /*
 245  151           * Deal with SMAP here. A thread may be switched out at any point while
 246  152           * it is executing. The thread could be under on_fault() or it could be
 247  153           * pre-empted while performing a copy interruption. If this happens and
↓ open down ↓ 181 lines elided ↑ open up ↑
 429  335           * flag out of paranoia.
 430  336           */
 431  337          movq    T_USERACC(%r12), %rax   /* should we disable smap? */
 432  338          cmpq    $0, %rax                /* skip call when zero */
 433  339          jz      .nosmap
 434  340          xorq    %rax, %rax
 435  341          movq    %rax, T_USERACC(%r12)
 436  342          call    smap_disable
 437  343  .nosmap:
 438  344  
      345 +        call    ht_mark
      346 +
 439  347          /*
 440  348           * Restore non-volatile registers, then have spl0 return to the
 441  349           * resuming thread's PC after first setting the priority as low as
 442  350           * possible and blocking all interrupt threads that may be active.
 443  351           */
 444  352          movq    %r13, %rax      /* save return address */
 445  353          RESTORE_REGS(%r11)
 446  354          pushq   %rax            /* push return address for spl0() */
 447  355          call    __dtrace_probe___sched_on__cpu
 448  356          jmp     spl0
 449  357  
 450  358  resume_return:
 451  359          /*
 452  360           * Remove stack frame created in SAVE_REGS()
 453  361           */
 454  362          addq    $CLONGSIZE, %rsp
 455  363          ret
 456  364          SET_SIZE(_resume_from_idle)
 457  365          SET_SIZE(resume)
 458  366  
 459      -#elif defined (__i386)
 460      -
 461      -        ENTRY(resume)
 462      -        movl    %gs:CPU_THREAD, %eax
 463      -        movl    $resume_return, %ecx
 464      -
 465      -        /*
 466      -         * Save non-volatile registers, and set return address for current
 467      -         * thread to resume_return.
 468      -         *
 469      -         * %edi = t (new thread) when done.
 470      -         */
 471      -        SAVE_REGS(%eax,  %ecx)
 472      -
 473      -        LOADCPU(%ebx)                   /* %ebx = CPU */
 474      -        movl    CPU_THREAD(%ebx), %esi  /* %esi = curthread */
 475      -
 476      -#ifdef DEBUG
 477      -        call    assert_ints_enabled     /* panics if we are cli'd */
 478      -#endif
 479      -        /*
 480      -         * Call savectx if thread has installed context ops.
 481      -         *
 482      -         * Note that if we have floating point context, the save op
 483      -         * (either fpsave_begin or fpxsave_begin) will issue the
 484      -         * async save instruction (fnsave or fxsave respectively)
 485      -         * that we fwait for below.
 486      -         */
 487      -        movl    T_CTX(%esi), %eax       /* should current thread savectx? */
 488      -        testl   %eax, %eax
 489      -        jz      .nosavectx              /* skip call when zero */
 490      -        pushl   %esi                    /* arg = thread pointer */
 491      -        call    savectx                 /* call ctx ops */
 492      -        addl    $4, %esp                /* restore stack pointer */
 493      -.nosavectx:
 494      -
 495      -        /*
 496      -         * Call savepctx if process has installed context ops.
 497      -         */
 498      -        movl    T_PROCP(%esi), %eax     /* %eax = proc */
 499      -        cmpl    $0, P_PCTX(%eax)        /* should current thread savectx? */
 500      -        je      .nosavepctx             /* skip call when zero */
 501      -        pushl   %eax                    /* arg = proc pointer */
 502      -        call    savepctx                /* call ctx ops */
 503      -        addl    $4, %esp
 504      -.nosavepctx:
 505      -
 506      -        /*
 507      -         * Temporarily switch to the idle thread's stack
 508      -         */
 509      -        movl    CPU_IDLE_THREAD(%ebx), %eax     /* idle thread pointer */
 510      -
 511      -        /*
 512      -         * Set the idle thread as the current thread
 513      -         */
 514      -        movl    T_SP(%eax), %esp        /* It is safe to set esp */
 515      -        movl    %eax, CPU_THREAD(%ebx)
 516      -
 517      -        /* switch in the hat context for the new thread */
 518      -        GET_THREAD_HATP(%ecx, %edi, %ecx)
 519      -        pushl   %ecx
 520      -        call    hat_switch
 521      -        addl    $4, %esp
 522      -
 523      -        /*
 524      -         * Clear and unlock previous thread's t_lock
 525      -         * to allow it to be dispatched by another processor.
 526      -         */
 527      -        movb    $0, T_LOCK(%esi)
 528      -
 529      -        /*
 530      -         * IMPORTANT: Registers at this point must be:
 531      -         *       %edi = new thread
 532      -         *
 533      -         * Here we are in the idle thread, have dropped the old thread.
 534      -         */
 535      -        ALTENTRY(_resume_from_idle)
 536      -        /*
 537      -         * spin until dispatched thread's mutex has
 538      -         * been unlocked. this mutex is unlocked when
 539      -         * it becomes safe for the thread to run.
 540      -         */
 541      -.L4:
 542      -        lock
 543      -        btsl    $0, T_LOCK(%edi) /* lock new thread's mutex */
 544      -        jc      .L4_2                   /* lock did not succeed */
 545      -
 546      -        /*
 547      -         * Fix CPU structure to indicate new running thread.
 548      -         * Set pointer in new thread to the CPU structure.
 549      -         */
 550      -        LOADCPU(%esi)                   /* load current CPU pointer */
 551      -        movl    T_STACK(%edi), %eax     /* here to use v pipeline of */
 552      -                                        /* Pentium. Used few lines below */
 553      -        cmpl    %esi, T_CPU(%edi)
 554      -        jne     .L5_2
 555      -.L5_1:
 556      -        /*
 557      -         * Setup esp0 (kernel stack) in TSS to curthread's stack.
 558      -         * (Note: Since we don't have saved 'regs' structure for all
 559      -         *        the threads we can't easily determine if we need to
 560      -         *        change esp0. So, we simply change the esp0 to bottom 
 561      -         *        of the thread stack and it will work for all cases.)
 562      -         */
 563      -        movl    CPU_TSS(%esi), %ecx
 564      -        addl    $REGSIZE+MINFRAME, %eax /* to the bottom of thread stack */
 565      -#if !defined(__xpv)
 566      -        movl    %eax, TSS_ESP0(%ecx)
 567      -#else
 568      -        pushl   %eax
 569      -        pushl   $KDS_SEL
 570      -        call    HYPERVISOR_stack_switch
 571      -        addl    $8, %esp
 572      -#endif  /* __xpv */
 573      -
 574      -        movl    %edi, CPU_THREAD(%esi)  /* set CPU's thread pointer */
 575      -        mfence                          /* synchronize with mutex_exit() */
 576      -        xorl    %ebp, %ebp              /* make $<threadlist behave better */
 577      -        movl    T_LWP(%edi), %eax       /* set associated lwp to  */
 578      -        movl    %eax, CPU_LWP(%esi)     /* CPU's lwp ptr */
 579      -
 580      -        movl    T_SP(%edi), %esp        /* switch to outgoing thread's stack */
 581      -        movl    T_PC(%edi), %esi        /* saved return addr */
 582      -
 583      -        /*
 584      -         * Call restorectx if context ops have been installed.
 585      -         */
 586      -        movl    T_CTX(%edi), %eax       /* should resumed thread restorectx? */
 587      -        testl   %eax, %eax
 588      -        jz      .norestorectx           /* skip call when zero */
 589      -        pushl   %edi                    /* arg = thread pointer */
 590      -        call    restorectx              /* call ctx ops */
 591      -        addl    $4, %esp                /* restore stack pointer */
 592      -.norestorectx:
 593      -
 594      -        /*
 595      -         * Call restorepctx if context ops have been installed for the proc.
 596      -         */
 597      -        movl    T_PROCP(%edi), %eax
 598      -        cmpl    $0, P_PCTX(%eax)
 599      -        je      .norestorepctx
 600      -        pushl   %eax                    /* arg = proc pointer */
 601      -        call    restorepctx
 602      -        addl    $4, %esp                /* restore stack pointer */
 603      -.norestorepctx:
 604      -
 605      -        STORE_INTR_START(%edi)
 606      -
 607      -        /*
 608      -         * Restore non-volatile registers, then have spl0 return to the
 609      -         * resuming thread's PC after first setting the priority as low as
 610      -         * possible and blocking all interrupt threads that may be active.
 611      -         */
 612      -        movl    %esi, %eax              /* save return address */
 613      -        RESTORE_REGS(%ecx)
 614      -        pushl   %eax                    /* push return address for spl0() */
 615      -        call    __dtrace_probe___sched_on__cpu
 616      -        jmp     spl0
 617      -
 618      -resume_return:
 619      -        /*
 620      -         * Remove stack frame created in SAVE_REGS()
 621      -         */
 622      -        addl    $CLONGSIZE, %esp
 623      -        ret
 624      -
 625      -.L4_2:
 626      -        pause
 627      -        cmpb    $0, T_LOCK(%edi)
 628      -        je      .L4
 629      -        jmp     .L4_2
 630      -
 631      -.L5_2:
 632      -        /* cp->cpu_stats.sys.cpumigrate++ */
 633      -        addl    $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
 634      -        adcl    $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
 635      -        movl    %esi, T_CPU(%edi)       /* set new thread's CPU pointer */
 636      -        jmp     .L5_1
 637      -
 638      -        SET_SIZE(_resume_from_idle)
 639      -        SET_SIZE(resume)
 640      -
 641      -#endif  /* __amd64 */
 642      -#endif  /* __lint */
 643      -
 644      -#if defined(__lint)
 645      -
 646      -/* ARGSUSED */
 647      -void
 648      -resume_from_zombie(kthread_t *t)
 649      -{}
 650      -
 651      -#else   /* __lint */
 652      -
 653      -#if defined(__amd64)
 654      -
 655  367          ENTRY(resume_from_zombie)
 656  368          movq    %gs:CPU_THREAD, %rax
 657  369          leaq    resume_from_zombie_return(%rip), %r11
 658  370  
 659  371          /*
 660  372           * Save non-volatile registers, and set return address for current
 661  373           * thread to resume_from_zombie_return.
 662  374           *
 663  375           * %r12 = t (new thread) when done
 664  376           */
↓ open down ↓ 54 lines elided ↑ open up ↑
 719  431          RESTORE_REGS(%r11)              /* restore non-volatile registers */
 720  432          call    __dtrace_probe___sched_on__cpu
 721  433  
 722  434          /*
 723  435           * Remove stack frame created in SAVE_REGS()
 724  436           */
 725  437          addq    $CLONGSIZE, %rsp
 726  438          ret
 727  439          SET_SIZE(resume_from_zombie)
 728  440  
 729      -#elif defined (__i386)
 730      -
 731      -        ENTRY(resume_from_zombie)
 732      -        movl    %gs:CPU_THREAD, %eax
 733      -        movl    $resume_from_zombie_return, %ecx
 734      -
 735      -        /*
 736      -         * Save non-volatile registers, and set return address for current
 737      -         * thread to resume_from_zombie_return.
 738      -         *
 739      -         * %edi = t (new thread) when done.
 740      -         */
 741      -        SAVE_REGS(%eax, %ecx)
 742      -
 743      -#ifdef DEBUG
 744      -        call    assert_ints_enabled     /* panics if we are cli'd */
 745      -#endif
 746      -        movl    %gs:CPU_THREAD, %esi    /* %esi = curthread */
 747      -
 748      -        /* clean up the fp unit. It might be left enabled */
 749      -
 750      -        movl    %cr0, %eax
 751      -        testl   $CR0_TS, %eax
 752      -        jnz     .zfpu_disabled          /* if TS already set, nothing to do */
 753      -        fninit                          /* init fpu & discard pending error */
 754      -        orl     $CR0_TS, %eax
 755      -        movl    %eax, %cr0
 756      -.zfpu_disabled:
 757      -
 758      -        /*
 759      -         * Temporarily switch to the idle thread's stack so that the zombie
 760      -         * thread's stack can be reclaimed by the reaper.
 761      -         */
 762      -        movl    %gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
 763      -        movl    T_SP(%eax), %esp        /* get onto idle thread stack */
 764      -
 765      -        /*
 766      -         * Set the idle thread as the current thread.
 767      -         */
 768      -        movl    %eax, %gs:CPU_THREAD
 769      -
 770      -        /*
 771      -         * switch in the hat context for the new thread
 772      -         */
 773      -        GET_THREAD_HATP(%ecx, %edi, %ecx)
 774      -        pushl   %ecx
 775      -        call    hat_switch
 776      -        addl    $4, %esp
 777      -
 778      -        /*
 779      -         * Put the zombie on death-row.
 780      -         */
 781      -        pushl   %esi
 782      -        call    reapq_add
 783      -        addl    $4, %esp
 784      -        jmp     _resume_from_idle       /* finish job of resume */
 785      -
 786      -resume_from_zombie_return:
 787      -        RESTORE_REGS(%ecx)              /* restore non-volatile registers */
 788      -        call    __dtrace_probe___sched_on__cpu
 789      -
 790      -        /*
 791      -         * Remove stack frame created in SAVE_REGS()
 792      -         */
 793      -        addl    $CLONGSIZE, %esp
 794      -        ret
 795      -        SET_SIZE(resume_from_zombie)
 796      -
 797      -#endif  /* __amd64 */
 798      -#endif  /* __lint */
 799      -
 800      -#if defined(__lint)
 801      -
 802      -/* ARGSUSED */
 803      -void
 804      -resume_from_intr(kthread_t *t)
 805      -{}
 806      -
 807      -#else   /* __lint */
 808      -
 809      -#if defined(__amd64)
 810      -
 811  441          ENTRY(resume_from_intr)
 812  442          movq    %gs:CPU_THREAD, %rax
 813  443          leaq    resume_from_intr_return(%rip), %r11
 814  444  
 815  445          /*
 816  446           * Save non-volatile registers, and set return address for current
 817  447           * thread to resume_from_intr_return.
 818  448           *
 819  449           * %r12 = t (new thread) when done
 820  450           */
↓ open down ↓ 6 lines elided ↑ open up ↑
 827  457          xorl    %ebp, %ebp              /* make $<threadlist behave better */
 828  458  
 829  459          /*
 830  460           * Unlock outgoing thread's mutex dispatched by another processor.
 831  461           */
 832  462          xorl    %eax, %eax
 833  463          xchgb   %al, T_LOCK(%r13)
 834  464  
 835  465          STORE_INTR_START(%r12)
 836  466  
      467 +        call    ht_mark
      468 +
 837  469          /*
 838  470           * Restore non-volatile registers, then have spl0 return to the
 839  471           * resuming thread's PC after first setting the priority as low as
 840  472           * possible and blocking all interrupt threads that may be active.
 841  473           */
 842  474          movq    T_PC(%r12), %rax        /* saved return addr */
 843  475          RESTORE_REGS(%r11);
 844  476          pushq   %rax                    /* push return address for spl0() */
 845  477          call    __dtrace_probe___sched_on__cpu
 846  478          jmp     spl0
 847  479  
 848  480  resume_from_intr_return:
 849  481          /*
 850  482           * Remove stack frame created in SAVE_REGS()
 851  483           */
 852  484          addq    $CLONGSIZE, %rsp
 853  485          ret
 854  486          SET_SIZE(resume_from_intr)
 855  487  
 856      -#elif defined (__i386)
 857      -
 858      -        ENTRY(resume_from_intr)
 859      -        movl    %gs:CPU_THREAD, %eax
 860      -        movl    $resume_from_intr_return, %ecx
 861      -
 862      -        /*
 863      -         * Save non-volatile registers, and set return address for current
 864      -         * thread to resume_return.
 865      -         *
 866      -         * %edi = t (new thread) when done.
 867      -         */
 868      -        SAVE_REGS(%eax, %ecx)
 869      -
 870      -#ifdef DEBUG
 871      -        call    assert_ints_enabled     /* panics if we are cli'd */
 872      -#endif
 873      -        movl    %gs:CPU_THREAD, %esi    /* %esi = curthread */
 874      -        movl    %edi, %gs:CPU_THREAD    /* set CPU's thread pointer */
 875      -        mfence                          /* synchronize with mutex_exit() */
 876      -        movl    T_SP(%edi), %esp        /* restore resuming thread's sp */
 877      -        xorl    %ebp, %ebp              /* make $<threadlist behave better */
 878      -
 879      -        /*
 880      -         * Unlock outgoing thread's mutex dispatched by another processor.
 881      -         */
 882      -        xorl    %eax,%eax
 883      -        xchgb   %al, T_LOCK(%esi)
 884      -
 885      -        STORE_INTR_START(%edi)
 886      -
 887      -        /*
 888      -         * Restore non-volatile registers, then have spl0 return to the
 889      -         * resuming thread's PC after first setting the priority as low as
 890      -         * possible and blocking all interrupt threads that may be active.
 891      -         */
 892      -        movl    T_PC(%edi), %eax        /* saved return addr */
 893      -        RESTORE_REGS(%ecx)
 894      -        pushl   %eax                    /* push return address for spl0() */
 895      -        call    __dtrace_probe___sched_on__cpu
 896      -        jmp     spl0
 897      -
 898      -resume_from_intr_return:
 899      -        /*
 900      -         * Remove stack frame created in SAVE_REGS()
 901      -         */
 902      -        addl    $CLONGSIZE, %esp
 903      -        ret
 904      -        SET_SIZE(resume_from_intr)
 905      -
 906      -#endif  /* __amd64 */
 907      -#endif /* __lint */
 908      -
 909      -#if defined(__lint)
 910      -
 911      -void
 912      -thread_start(void)
 913      -{}
 914      -
 915      -#else   /* __lint */
 916      -
 917      -#if defined(__amd64)
 918      -
 919  488          ENTRY(thread_start)
 920  489          popq    %rax            /* start() */
 921  490          popq    %rdi            /* arg */
 922  491          popq    %rsi            /* len */
 923  492          movq    %rsp, %rbp
 924  493          call    *%rax
 925  494          call    thread_exit     /* destroy thread if it returns. */
 926  495          /*NOTREACHED*/
 927  496          SET_SIZE(thread_start)
 928      -
 929      -#elif defined(__i386)
 930      -
 931      -        ENTRY(thread_start)
 932      -        popl    %eax
 933      -        movl    %esp, %ebp
 934      -        addl    $8, %ebp
 935      -        call    *%eax
 936      -        addl    $8, %esp
 937      -        call    thread_exit     /* destroy thread if it returns. */
 938      -        /*NOTREACHED*/
 939      -        SET_SIZE(thread_start)
 940      -
 941      -#endif  /* __i386 */
 942      -
 943      -#endif  /* __lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX