Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

*** 22,32 **** * Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* ! * Copyright (c) 2013, Joyent, Inc. All rights reserved. */ /* * Process switching routines. */ --- 22,32 ---- * Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ /* ! * Copyright (c) 2018 Joyent, Inc. */ /* * Process switching routines. */
*** 233,242 **** --- 233,244 ---- #else /* __lint */ #if defined(__amd64) + .global kpti_enable + ENTRY(resume) movq %gs:CPU_THREAD, %rax leaq resume_return(%rip), %r11 /*
*** 360,383 **** incq CPU_STATS_SYS_CPUMIGRATE(%r13) movq %r13, T_CPU(%r12) /* set new thread's CPU pointer */ .setup_cpu: /* ! * Setup rsp0 (kernel stack) in TSS to curthread's stack. ! * (Note: Since we don't have saved 'regs' structure for all ! * the threads we can't easily determine if we need to ! * change rsp0. So, we simply change the rsp0 to bottom ! * of the thread stack and it will work for all cases.) * ! * XX64 - Is this correct? */ movq CPU_TSS(%r13), %r14 movq T_STACK(%r12), %rax addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */ ! #if !defined(__xpv) movq %rax, TSS_RSP0(%r14) #else movl $KDS_SEL, %edi movq %rax, %rsi call HYPERVISOR_stack_switch #endif /* __xpv */ --- 362,395 ---- incq CPU_STATS_SYS_CPUMIGRATE(%r13) movq %r13, T_CPU(%r12) /* set new thread's CPU pointer */ .setup_cpu: /* ! * Setup rsp0 (kernel stack) in TSS to curthread's saved regs ! * structure. If this thread doesn't have a regs structure above ! * the stack -- that is, if lwp_stk_init() was never called for the ! * thread -- this will set rsp0 to the wrong value, but it's harmless ! * as it's a kernel thread, and it won't actually attempt to implicitly ! * use the rsp0 via a privilege change. * ! * Note that when we have KPTI enabled on amd64, we never use this ! * value at all (since all the interrupts have an IST set). */ movq CPU_TSS(%r13), %r14 + #if !defined(__xpv) + cmpq $1, kpti_enable + jne 1f + leaq CPU_KPTI_TR_RSP(%r13), %rax + jmp 2f + 1: movq T_STACK(%r12), %rax addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */ ! 2: movq %rax, TSS_RSP0(%r14) #else + movq T_STACK(%r12), %rax + addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */ movl $KDS_SEL, %edi movq %rax, %rsi call HYPERVISOR_stack_switch #endif /* __xpv */