Print this page
11787 Kernel needs to be built with retpolines
11788 Kernel needs to generally use RSB stuffing
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/swtch.s
          +++ new/usr/src/uts/intel/ia32/ml/swtch.s
↓ open down ↓ 163 lines elided ↑ open up ↑
 164  164           * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
 165  165           * more information on rflags and SMAP.
 166  166           */
 167  167          pushfq
 168  168          popq    %rsi
 169  169          andq    $PS_ACHK, %rsi
 170  170          movq    %rsi, T_USERACC(%rax)
 171  171          call    smap_enable
 172  172  
 173  173          /*
      174 +         * Take a moment to potentially clear the RSB buffer. This is done to
      175 +         * prevent various Spectre variant 2 and SpectreRSB attacks. This may
      176 +         * not be sufficient. Please see uts/intel/ia32/ml/retpoline.s for more
      177 +         * information about this.
      178 +         */
      179 +        call    x86_rsb_stuff
      180 +
      181 +        /*
 174  182           * Save non-volatile registers, and set return address for current
 175  183           * thread to resume_return.
 176  184           *
 177  185           * %r12 = t (new thread) when done
 178  186           */
 179  187          SAVE_REGS(%rax, %r11)
 180  188  
 181  189  
 182  190          LOADCPU(%r15)                           /* %r15 = CPU */
 183  191          movq    CPU_THREAD(%r15), %r13          /* %r13 = curthread */
↓ open down ↓ 20 lines elided ↑ open up ↑
 204  212          cmpq    $0, P_PCTX(%r14)         /* should current thread savectx? */
 205  213          je      .nosavepctx              /* skip call when zero */
 206  214  
 207  215          movq    %r14, %rdi              /* arg = proc pointer */
 208  216          call    savepctx                 /* call ctx ops */
 209  217  .nosavepctx:
 210  218  
 211  219          /*
 212  220           * Temporarily switch to the idle thread's stack
 213  221           */
 214      -        movq    CPU_IDLE_THREAD(%r15), %rax     /* idle thread pointer */
      222 +        movq    CPU_IDLE_THREAD(%r15), %rax     /* idle thread pointer */
 215  223  
 216  224          /*
 217  225           * Set the idle thread as the current thread
 218  226           */
 219  227          movq    T_SP(%rax), %rsp        /* It is safe to set rsp */
 220  228          movq    %rax, CPU_THREAD(%r15)
 221  229  
 222  230          /*
 223  231           * Switch in the hat context for the new thread
 224  232           *
↓ open down ↓ 14 lines elided ↑ open up ↑
 239  247           * Here we are in the idle thread, have dropped the old thread.
 240  248           */
 241  249          ALTENTRY(_resume_from_idle)
 242  250          /*
 243  251           * spin until dispatched thread's mutex has
 244  252           * been unlocked. this mutex is unlocked when
 245  253           * it becomes safe for the thread to run.
 246  254           */
 247  255  .lock_thread_mutex:
 248  256          lock
 249      -        btsl    $0, T_LOCK(%r12)        /* attempt to lock new thread's mutex */
      257 +        btsl    $0, T_LOCK(%r12)        /* attempt to lock new thread's mutex */
 250  258          jnc     .thread_mutex_locked    /* got it */
 251  259  
 252  260  .spin_thread_mutex:
 253  261          pause
 254  262          cmpb    $0, T_LOCK(%r12)        /* check mutex status */
 255  263          jz      .lock_thread_mutex      /* clear, retry lock */
 256  264          jmp     .spin_thread_mutex      /* still locked, spin... */
 257  265  
 258  266  .thread_mutex_locked:
 259  267          /*
↓ open down ↓ 35 lines elided ↑ open up ↑
 295  303          movq    T_STACK(%r12), %rax
 296  304          addq    $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
 297  305          movl    $KDS_SEL, %edi
 298  306          movq    %rax, %rsi
 299  307          call    HYPERVISOR_stack_switch
 300  308  #endif  /* __xpv */
 301  309  
 302  310          movq    %r12, CPU_THREAD(%r13)  /* set CPU's thread pointer */
 303  311          mfence                          /* synchronize with mutex_exit() */
 304  312          xorl    %ebp, %ebp              /* make $<threadlist behave better */
 305      -        movq    T_LWP(%r12), %rax       /* set associated lwp to  */
 306      -        movq    %rax, CPU_LWP(%r13)     /* CPU's lwp ptr */
      313 +        movq    T_LWP(%r12), %rax       /* set associated lwp to  */
      314 +        movq    %rax, CPU_LWP(%r13)     /* CPU's lwp ptr */
 307  315  
 308  316          movq    T_SP(%r12), %rsp        /* switch to outgoing thread's stack */
 309  317          movq    T_PC(%r12), %r13        /* saved return addr */
 310  318  
 311  319          /*
 312  320           * Call restorectx if context ops have been installed.
 313  321           */
 314  322          cmpq    $0, T_CTX(%r12)         /* should resumed thread restorectx? */
 315  323          jz      .norestorectx           /* skip call when zero */
 316  324          movq    %r12, %rdi              /* arg = thread pointer */
↓ open down ↓ 157 lines elided ↑ open up ↑
 474  482          movq    T_PC(%r12), %rax        /* saved return addr */
 475  483          RESTORE_REGS(%r11);
 476  484          pushq   %rax                    /* push return address for spl0() */
 477  485          call    __dtrace_probe___sched_on__cpu
 478  486          jmp     spl0
 479  487  
 480  488  resume_from_intr_return:
 481  489          /*
 482  490           * Remove stack frame created in SAVE_REGS()
 483  491           */
 484      -        addq    $CLONGSIZE, %rsp
      492 +        addq    $CLONGSIZE, %rsp
 485  493          ret
 486  494          SET_SIZE(resume_from_intr)
 487  495  
 488  496          ENTRY(thread_start)
 489  497          popq    %rax            /* start() */
 490  498          popq    %rdi            /* arg */
 491  499          popq    %rsi            /* len */
 492  500          movq    %rsp, %rbp
 493      -        call    *%rax
      501 +        INDIRECT_CALL_REG(rax)
 494  502          call    thread_exit     /* destroy thread if it returns. */
 495  503          /*NOTREACHED*/
 496  504          SET_SIZE(thread_start)
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX