Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

@@ -22,11 +22,11 @@
  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
 /*
- * Copyright (c) 2013, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2018 Joyent, Inc.
  */
 
 /*
  * Process switching routines.
  */

@@ -233,10 +233,12 @@
 
 #else   /* __lint */
 
 #if defined(__amd64)
 
+        .global kpti_enable
+
         ENTRY(resume)
         movq    %gs:CPU_THREAD, %rax
         leaq    resume_return(%rip), %r11
 
         /*

@@ -360,24 +362,34 @@
         incq    CPU_STATS_SYS_CPUMIGRATE(%r13)
         movq    %r13, T_CPU(%r12)       /* set new thread's CPU pointer */
 
 .setup_cpu:
         /*
-         * Setup rsp0 (kernel stack) in TSS to curthread's stack.
-         * (Note: Since we don't have saved 'regs' structure for all
-         *        the threads we can't easily determine if we need to
-         *        change rsp0. So, we simply change the rsp0 to bottom 
-         *        of the thread stack and it will work for all cases.)
+         * Setup rsp0 (kernel stack) in TSS to curthread's saved regs
+         * structure.  If this thread doesn't have a regs structure above
+         * the stack -- that is, if lwp_stk_init() was never called for the
+         * thread -- this will set rsp0 to the wrong value, but it's harmless
+         * as it's a kernel thread, and it won't actually attempt to implicitly
+         * use the rsp0 via a privilege change.
          *
-         * XX64 - Is this correct?
+         * Note that when we have KPTI enabled on amd64, we never use this
+         * value at all (since all the interrupts have an IST set).
          */
         movq    CPU_TSS(%r13), %r14
+#if !defined(__xpv)
+        cmpq    $1, kpti_enable
+        jne     1f
+        leaq    CPU_KPTI_TR_RSP(%r13), %rax
+        jmp     2f
+1:
         movq    T_STACK(%r12), %rax
         addq    $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
-#if !defined(__xpv)
+2:
         movq    %rax, TSS_RSP0(%r14)
 #else
+        movq    T_STACK(%r12), %rax
+        addq    $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
         movl    $KDS_SEL, %edi
         movq    %rax, %rsi
         call    HYPERVISOR_stack_switch
 #endif  /* __xpv */