234 mov %r14, %rsp; \
235 pushq KPTI_SS(%r13); \
236 pushq KPTI_RSP(%r13); \
237 pushq KPTI_RFLAGS(%r13); \
238 pushq KPTI_CS(%r13); \
239 pushq KPTI_RIP(%r13); \
240 errpush; \
241 mov KPTI_R14(%r13), %r14; \
242 mov KPTI_R13(%r13), %r13
243
244 #define INTERRUPT_TRAMPOLINE_NOERR \
245 INTERRUPT_TRAMPOLINE_P(/**/)
246
247 #define INTERRUPT_TRAMPOLINE \
248 INTERRUPT_TRAMPOLINE_P(pushq KPTI_ERR(%r13))
249
250 /*
251 * This is used for all interrupts that can plausibly be taken inside another
252 * interrupt and are using a kpti_frame stack (so #BP, #DB, #GP, #PF, #SS).
253 *
254 * We check for whether we took the interrupt while in another trampoline, in
255 * which case we need to use the kthread stack.
256 */
257 #define DBG_INTERRUPT_TRAMPOLINE_P(errpush) \
258 pushq %r13; \
259 pushq %r14; \
260 subq $KPTI_R14, %rsp; \
261 /* Check for clobbering */ \
262 cmp $0, KPTI_FLAG(%rsp); \
263 je 1f; \
264 /* Don't worry, this totally works */ \
265 int $8; \
266 1: \
267 movq $1, KPTI_FLAG(%rsp); \
268 /* Save current %cr3. */ \
269 mov %cr3, %r14; \
270 mov %r14, KPTI_TR_CR3(%rsp); \
271 \
272 cmpw $KCS_SEL, KPTI_CS(%rsp); \
273 je 4f; \
632 jmp isr; \
633 SET_SIZE(tr_/**/isr)
634
635 /* CPU didn't push an error code, and ISR doesn't want one */
636 #define MK_DBG_INTR_TRAMPOLINE_NOERR(isr) \
637 ENTRY_NP(tr_/**/isr); \
638 push $0; \
639 DBG_INTERRUPT_TRAMPOLINE_NOERR; \
640 jmp isr; \
641 SET_SIZE(tr_/**/isr)
642
643
644 MK_INTR_TRAMPOLINE_NOERR(div0trap)
645 MK_DBG_INTR_TRAMPOLINE_NOERR(dbgtrap)
646 MK_DBG_INTR_TRAMPOLINE_NOERR(brktrap)
647 MK_INTR_TRAMPOLINE_NOERR(ovflotrap)
648 MK_INTR_TRAMPOLINE_NOERR(boundstrap)
649 MK_INTR_TRAMPOLINE_NOERR(invoptrap)
650 MK_INTR_TRAMPOLINE_NOERR(ndptrap)
651 MK_INTR_TRAMPOLINE(invtsstrap)
652 MK_INTR_TRAMPOLINE(segnptrap)
653 MK_DBG_INTR_TRAMPOLINE(stktrap)
654 MK_DBG_INTR_TRAMPOLINE(gptrap)
655 MK_DBG_INTR_TRAMPOLINE(pftrap)
656 MK_INTR_TRAMPOLINE_NOERR(resvtrap)
657 MK_INTR_TRAMPOLINE_NOERR(ndperr)
658 MK_INTR_TRAMPOLINE(achktrap)
659 MK_INTR_TRAMPOLINE_NOERR(xmtrap)
660 MK_INTR_TRAMPOLINE_NOERR(invaltrap)
661 MK_INTR_TRAMPOLINE_NOERR(fasttrap)
662 MK_INTR_TRAMPOLINE_NOERR(dtrace_ret)
663
664 /*
665 * These are special because they can interrupt other traps, and
666 * each other. We don't need to pivot their stacks, because they have
667 * dedicated IST stack space, but we need to change %cr3.
668 */
669 ENTRY_NP(tr_nmiint)
670 pushq %r13
671 mov kpti_safe_cr3, %r13
672 mov %r13, %cr3
|
234 mov %r14, %rsp; \
235 pushq KPTI_SS(%r13); \
236 pushq KPTI_RSP(%r13); \
237 pushq KPTI_RFLAGS(%r13); \
238 pushq KPTI_CS(%r13); \
239 pushq KPTI_RIP(%r13); \
240 errpush; \
241 mov KPTI_R14(%r13), %r14; \
242 mov KPTI_R13(%r13), %r13
243
244 #define INTERRUPT_TRAMPOLINE_NOERR \
245 INTERRUPT_TRAMPOLINE_P(/**/)
246
247 #define INTERRUPT_TRAMPOLINE \
248 INTERRUPT_TRAMPOLINE_P(pushq KPTI_ERR(%r13))
249
250 /*
251 * This is used for all interrupts that can plausibly be taken inside another
252 * interrupt and are using a kpti_frame stack (so #BP, #DB, #GP, #PF, #SS).
253 *
254 * We also use this for #NP, even though it uses the standard IST: the
255 * additional %rsp checks below will catch when we get an exception doing an
256 * iret to userspace with a bad %cs/%ss. This appears as a kernel trap, and
257 * only later gets redirected via kern_gpfault().
258 *
259 * We check for whether we took the interrupt while in another trampoline, in
260 * which case we need to use the kthread stack.
261 */
262 #define DBG_INTERRUPT_TRAMPOLINE_P(errpush) \
263 pushq %r13; \
264 pushq %r14; \
265 subq $KPTI_R14, %rsp; \
266 /* Check for clobbering */ \
267 cmp $0, KPTI_FLAG(%rsp); \
268 je 1f; \
269 /* Don't worry, this totally works */ \
270 int $8; \
271 1: \
272 movq $1, KPTI_FLAG(%rsp); \
273 /* Save current %cr3. */ \
274 mov %cr3, %r14; \
275 mov %r14, KPTI_TR_CR3(%rsp); \
276 \
277 cmpw $KCS_SEL, KPTI_CS(%rsp); \
278 je 4f; \
637 jmp isr; \
638 SET_SIZE(tr_/**/isr)
639
640 /* CPU didn't push an error code, and ISR doesn't want one */
641 #define MK_DBG_INTR_TRAMPOLINE_NOERR(isr) \
642 ENTRY_NP(tr_/**/isr); \
643 push $0; \
644 DBG_INTERRUPT_TRAMPOLINE_NOERR; \
645 jmp isr; \
646 SET_SIZE(tr_/**/isr)
647
648
649 MK_INTR_TRAMPOLINE_NOERR(div0trap)
650 MK_DBG_INTR_TRAMPOLINE_NOERR(dbgtrap)
651 MK_DBG_INTR_TRAMPOLINE_NOERR(brktrap)
652 MK_INTR_TRAMPOLINE_NOERR(ovflotrap)
653 MK_INTR_TRAMPOLINE_NOERR(boundstrap)
654 MK_INTR_TRAMPOLINE_NOERR(invoptrap)
655 MK_INTR_TRAMPOLINE_NOERR(ndptrap)
656 MK_INTR_TRAMPOLINE(invtsstrap)
657 MK_DBG_INTR_TRAMPOLINE(segnptrap)
658 MK_DBG_INTR_TRAMPOLINE(stktrap)
659 MK_DBG_INTR_TRAMPOLINE(gptrap)
660 MK_DBG_INTR_TRAMPOLINE(pftrap)
661 MK_INTR_TRAMPOLINE_NOERR(resvtrap)
662 MK_INTR_TRAMPOLINE_NOERR(ndperr)
663 MK_INTR_TRAMPOLINE(achktrap)
664 MK_INTR_TRAMPOLINE_NOERR(xmtrap)
665 MK_INTR_TRAMPOLINE_NOERR(invaltrap)
666 MK_INTR_TRAMPOLINE_NOERR(fasttrap)
667 MK_INTR_TRAMPOLINE_NOERR(dtrace_ret)
668
669 /*
670 * These are special because they can interrupt other traps, and
671 * each other. We don't need to pivot their stacks, because they have
672 * dedicated IST stack space, but we need to change %cr3.
673 */
674 ENTRY_NP(tr_nmiint)
675 pushq %r13
676 mov kpti_safe_cr3, %r13
677 mov %r13, %cr3
|