Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

*** 1,9 **** /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2014 by Delphix. All rights reserved. ! * Copyright (c) 2017 Joyent, Inc. */ /* * Copyright (c) 1989, 1990 William F. Jolitz. * Copyright (c) 1990 The Regents of the University of California. --- 1,9 ---- /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2014 by Delphix. All rights reserved. ! * Copyright (c) 2018 Joyent, Inc. */ /* * Copyright (c) 1989, 1990 William F. Jolitz. * Copyright (c) 1990 The Regents of the University of California.
*** 110,119 **** --- 110,133 ---- #define TRAP_ERR(trapno) \ push $trapno #endif /* __xpv && __amd64 */ + /* + * These are the stacks used on cpu0 for taking double faults, + * NMIs and MCEs (the latter two only on amd64 where we have IST). + * + * We define them here instead of in a C file so that we can page-align + * them (gcc won't do that in a .c file). + */ + .data + DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE) + .fill DEFAULTSTKSZ, 1, 0 + DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE) + .fill DEFAULTSTKSZ, 1, 0 + DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE) + .fill DEFAULTSTKSZ, 1, 0 /* * #DE */ ENTRY_NP(div0trap)
*** 161,170 **** --- 175,190 ---- leaq sys_sysenter(%rip), %r11 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */ je 1f leaq brand_sys_sysenter(%rip), %r11 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */ + je 1f + leaq tr_sys_sysenter(%rip), %r11 + cmpq %r11, 24(%rsp) + je 1f + leaq tr_brand_sys_sysenter(%rip), %r11 + cmpq %r11, 24(%rsp) jne 2f 1: SWAPGS 2: popq %r11 #endif /* !__xpv */
*** 212,221 **** --- 232,245 ---- * for this processor. If we came from userland, set kgsbase else * set gsbase. We find the proper cpu struct by looping through * the cpu structs for all processors till we find a match for the gdt * of the trapping processor. The stack is expected to be pointing at * the standard regs pushed by hardware on a trap (plus error code and trapno). + * + * It's ok for us to clobber gsbase here (and possibly end up with both gsbase + * and kgsbase set to the same value) because we're not going back the normal + * way out of here (via IRET). Where we're going, we don't need no user %gs. */ #define SET_CPU_GSBASE \ subq $REGOFF_TRAPNO, %rsp; /* save regs */ \ movq %rax, REGOFF_RAX(%rsp); \ movq %rbx, REGOFF_RBX(%rsp); \
*** 292,302 **** movq %rbp, %rdi call av_dispatch_nmivect INTR_POP ! IRET /*NOTREACHED*/ SET_SIZE(nmiint) #elif defined(__i386) --- 316,326 ---- movq %rbp, %rdi call av_dispatch_nmivect INTR_POP ! jmp tr_iret_auto /*NOTREACHED*/ SET_SIZE(nmiint) #elif defined(__i386)
*** 431,441 **** movq 56(%rsp), %rax /* load calling SS */ movq %rax, 40(%rsp) /* store calling SS */ movq 32(%rsp), %rax /* reload calling RSP */ movq %rbp, (%rax) /* store %rbp there */ popq %rax /* pop off temp */ ! IRET /* return from interrupt */ /*NOTREACHED*/ ud_leave: /* * We must emulate a "leave", which is the same as a "movq %rbp, %rsp" --- 455,465 ---- movq 56(%rsp), %rax /* load calling SS */ movq %rax, 40(%rsp) /* store calling SS */ movq 32(%rsp), %rax /* reload calling RSP */ movq %rbp, (%rax) /* store %rbp there */ popq %rax /* pop off temp */ ! jmp tr_iret_kernel /* return from interrupt */ /*NOTREACHED*/ ud_leave: /* * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
*** 452,472 **** movq (%rbp), %rax /* get new %rbp */ addq $8, %rbp /* adjust new %rsp */ movq %rbp, 32(%rsp) /* store new %rsp */ movq %rax, %rbp /* set new %rbp */ popq %rax /* pop off temp */ ! IRET /* return from interrupt */ /*NOTREACHED*/ ud_nop: /* * We must emulate a "nop". This is obviously not hard: we need only * advance the %rip by one. */ INTR_POP incq (%rsp) ! IRET /*NOTREACHED*/ ud_ret: INTR_POP pushq %rax /* push temp */ --- 476,496 ---- movq (%rbp), %rax /* get new %rbp */ addq $8, %rbp /* adjust new %rsp */ movq %rbp, 32(%rsp) /* store new %rsp */ movq %rax, %rbp /* set new %rbp */ popq %rax /* pop off temp */ ! jmp tr_iret_kernel /* return from interrupt */ /*NOTREACHED*/ ud_nop: /* * We must emulate a "nop". This is obviously not hard: we need only * advance the %rip by one. */ INTR_POP incq (%rsp) ! jmp tr_iret_kernel /*NOTREACHED*/ ud_ret: INTR_POP pushq %rax /* push temp */
*** 473,483 **** movq 32(%rsp), %rax /* load %rsp */ movq (%rax), %rax /* load calling RIP */ movq %rax, 8(%rsp) /* store calling RIP */ addq $8, 32(%rsp) /* adjust new %rsp */ popq %rax /* pop off temp */ ! IRET /* return from interrupt */ /*NOTREACHED*/ ud_trap: /* * We're going to let the kernel handle this as a normal #UD. If, --- 497,507 ---- movq 32(%rsp), %rax /* load %rsp */ movq (%rax), %rax /* load calling RIP */ movq %rax, 8(%rsp) /* store calling RIP */ addq $8, 32(%rsp) /* adjust new %rsp */ popq %rax /* pop off temp */ ! jmp tr_iret_kernel /* return from interrupt */ /*NOTREACHED*/ ud_trap: /* * We're going to let the kernel handle this as a normal #UD. If,
*** 747,757 **** _patch_xrstorq_rbx: fxrstorq (%rbx) popq %rdx popq %rbx popq %rax ! IRET /*NOTREACHED*/ .handle_in_trap: popq %rdx popq %rbx --- 771,781 ---- _patch_xrstorq_rbx: fxrstorq (%rbx) popq %rdx popq %rbx popq %rax ! jmp tr_iret_auto /*NOTREACHED*/ .handle_in_trap: popq %rdx popq %rbx
*** 1125,1135 **** SET_SIZE(pentium_pftrap) #endif /* !__amd64 */ ENTRY_NP(resvtrap) ! TRAP_NOERR(15) /* (reserved) */ jmp cmntrap SET_SIZE(resvtrap) /* * #MF --- 1149,1159 ---- SET_SIZE(pentium_pftrap) #endif /* !__amd64 */ ENTRY_NP(resvtrap) ! TRAP_NOERR(T_RESVTRAP) /* (reserved) */ jmp cmntrap SET_SIZE(resvtrap) /* * #MF
*** 1205,1223 **** TRAP_NOERR(T_SIMDFPE) /* $19 */ jmp cmninttrap SET_SIZE(xmtrap) ENTRY_NP(invaltrap) ! TRAP_NOERR(30) /* very invalid */ jmp cmntrap SET_SIZE(invaltrap) - ENTRY_NP(invalint) - TRAP_NOERR(31) /* even more so */ - jmp cmnint - SET_SIZE(invalint) - .globl fasttable #if defined(__amd64) ENTRY_NP(fasttrap) --- 1229,1242 ---- TRAP_NOERR(T_SIMDFPE) /* $19 */ jmp cmninttrap SET_SIZE(xmtrap) ENTRY_NP(invaltrap) ! TRAP_NOERR(T_INVALTRAP) /* very invalid */ jmp cmntrap SET_SIZE(invaltrap) .globl fasttable #if defined(__amd64) ENTRY_NP(fasttrap)
*** 1284,1294 **** * XXX a constant would be nicer. */ ENTRY_NP(fast_null) XPV_TRAP_POP orq $PS_C, 24(%rsp) /* set carry bit in user flags */ ! IRET /*NOTREACHED*/ SET_SIZE(fast_null) #elif defined(__i386) --- 1303,1313 ---- * XXX a constant would be nicer. */ ENTRY_NP(fast_null) XPV_TRAP_POP orq $PS_C, 24(%rsp) /* set carry bit in user flags */ ! jmp tr_iret_auto /*NOTREACHED*/ SET_SIZE(fast_null) #elif defined(__i386)