34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
41 */
42
43 #include <sys/asm_linkage.h>
44 #include <sys/asm_misc.h>
45 #include <sys/trap.h>
46 #include <sys/psw.h>
47 #include <sys/regset.h>
48 #include <sys/privregs.h>
49 #include <sys/dtrace.h>
50 #include <sys/x86_archext.h>
51 #include <sys/traptrace.h>
52 #include <sys/machparam.h>
53
54 #if !defined(__lint)
55
56 #include "assym.h"
57
58 /*
59 * push $0 on stack for traps that do not
60 * generate an error code. This is so the rest
61 * of the kernel can expect a consistent stack
62 * from from any exception.
63 *
64 * Note that for all exceptions for amd64
65 * %r11 and %rcx are on the stack. Just pop
66 * them back into their appropriate registers and let
67 * it get saved as is running native.
68 */
69
70 #if defined(__xpv) && defined(__amd64)
71
72 #define NPTRAP_NOERR(trapno) \
73 pushq $0; \
74 pushq $trapno
75
76 #define TRAP_NOERR(trapno) \
77 XPV_TRAP_POP; \
78 NPTRAP_NOERR(trapno)
79
80 /*
81 * error code already pushed by hw
82 * onto stack.
83 */
84 #define TRAP_ERR(trapno) \
85 XPV_TRAP_POP; \
86 pushq $trapno
87
88 #else /* __xpv && __amd64 */
89
90 #define TRAP_NOERR(trapno) \
91 push $0; \
92 push $trapno
93
94 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
95
96 /*
97 * error code already pushed by hw
98 * onto stack.
99 */
100 #define TRAP_ERR(trapno) \
101 push $trapno
102
103 #endif /* __xpv && __amd64 */
104
105 /*
106 * These are the stacks used on cpu0 for taking double faults,
107 * NMIs and MCEs (the latter two only on amd64 where we have IST).
108 *
109 * We define them here instead of in a C file so that we can page-align
110 * them (gcc won't do that in a .c file).
111 */
112 .data
113 DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
114 .fill DEFAULTSTKSZ, 1, 0
115 DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
116 .fill DEFAULTSTKSZ, 1, 0
117 DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
118 .fill DEFAULTSTKSZ, 1, 0
119
120 /*
121 * #DE
122 */
123 ENTRY_NP(div0trap)
124 TRAP_NOERR(T_ZERODIV) /* $0 */
125 jmp cmntrap
126 SET_SIZE(div0trap)
127
128 /*
129 * #DB
130 *
131 * Fetch %dr6 and clear it, handing off the value to the
132 * cmntrap code in %r15/%esi
133 */
134 ENTRY_NP(dbgtrap)
135 TRAP_NOERR(T_SGLSTP) /* $1 */
136
137 #if defined(__amd64)
138 #if !defined(__xpv) /* no sysenter support yet */
139 /*
140 * If we get here as a result of single-stepping a sysenter
141 * instruction, we suddenly find ourselves taking a #db
142 * in kernel mode -before- we've swapgs'ed. So before we can
143 * take the trap, we do the swapgs here, and fix the return
144 * %rip in trap() so that we return immediately after the
145 * swapgs in the sysenter handler to avoid doing the swapgs again.
146 *
147 * Nobody said that the design of sysenter was particularly
148 * elegant, did they?
149 */
150
151 pushq %r11
152
153 /*
154 * At this point the stack looks like this:
155 *
156 * (high address) r_ss
157 * r_rsp
176 jne 2f
177 1: swapgs
178 2: lfence /* swapgs mitigation */
179 popq %r11
180 #endif /* !__xpv */
181
182 INTR_PUSH
183 #if defined(__xpv)
184 movl $6, %edi
185 call kdi_dreg_get
186 movq %rax, %r15 /* %db6 -> %r15 */
187 movl $6, %edi
188 movl $0, %esi
189 call kdi_dreg_set /* 0 -> %db6 */
190 #else
191 movq %db6, %r15
192 xorl %eax, %eax
193 movq %rax, %db6
194 #endif
195
196 #elif defined(__i386)
197
198 INTR_PUSH
199 #if defined(__xpv)
200 pushl $6
201 call kdi_dreg_get
202 addl $4, %esp
203 movl %eax, %esi /* %dr6 -> %esi */
204 pushl $0
205 pushl $6
206 call kdi_dreg_set /* 0 -> %dr6 */
207 addl $8, %esp
208 #else
209 movl %db6, %esi
210 xorl %eax, %eax
211 movl %eax, %db6
212 #endif
213 #endif /* __i386 */
214
215 jmp cmntrap_pushed
216 SET_SIZE(dbgtrap)
217
218 #if defined(__amd64)
219 #if !defined(__xpv)
220
221 /*
222 * Macro to set the gsbase or kgsbase to the address of the struct cpu
223 * for this processor. If we came from userland, set kgsbase else
224 * set gsbase. We find the proper cpu struct by looping through
225 * the cpu structs for all processors till we find a match for the gdt
226 * of the trapping processor. The stack is expected to be pointing at
227 * the standard regs pushed by hardware on a trap (plus error code and trapno).
228 *
229 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
230 * and kgsbase set to the same value) because we're not going back the normal
231 * way out of here (via IRET). Where we're going, we don't need no user %gs.
232 */
233 #define SET_CPU_GSBASE \
234 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
235 movq %rax, REGOFF_RAX(%rsp); \
236 movq %rbx, REGOFF_RBX(%rsp); \
237 movq %rcx, REGOFF_RCX(%rsp); \
238 movq %rdx, REGOFF_RDX(%rsp); \
260 jne 4f; /* no, go set KGSBASE */ \
261 movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \
262 mfence; /* OPTERON_ERRATUM_88 */ \
263 4: \
264 movq %rax, %rdx; /* write base register */ \
265 shrq $32, %rdx; \
266 wrmsr; \
267 movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \
268 movq REGOFF_RCX(%rbp), %rcx; \
269 movq REGOFF_RBX(%rbp), %rbx; \
270 movq REGOFF_RAX(%rbp), %rax; \
271 movq %rbp, %rsp; \
272 movq REGOFF_RBP(%rsp), %rbp; \
273 addq $REGOFF_TRAPNO, %rsp /* pop stack */
274
275 #else /* __xpv */
276
277 #define SET_CPU_GSBASE /* noop on the hypervisor */
278
279 #endif /* __xpv */
280 #endif /* __amd64 */
281
282
283 #if defined(__amd64)
284
285 /*
286 * #NMI
287 *
288 * XXPV: See 6532669.
289 */
290 ENTRY_NP(nmiint)
291 TRAP_NOERR(T_NMIFLT) /* $2 */
292
293 SET_CPU_GSBASE
294
295 /*
296 * Save all registers and setup segment registers
297 * with kernel selectors.
298 */
299 INTR_PUSH
300 INTGATE_INIT_KERNEL_FLAGS
301
302 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
303 TRACE_REGS(%r12, %rsp, %rax, %rbx)
304 TRACE_STAMP(%r12)
305
306 movq %rsp, %rbp
307
308 movq %rbp, %rdi
309 call av_dispatch_nmivect
310
311 INTR_POP
312 call x86_md_clear
313 jmp tr_iret_auto
314 /*NOTREACHED*/
315 SET_SIZE(nmiint)
316
317 #elif defined(__i386)
318
319 /*
320 * #NMI
321 */
322 ENTRY_NP(nmiint)
323 TRAP_NOERR(T_NMIFLT) /* $2 */
324
325 /*
326 * Save all registers and setup segment registers
327 * with kernel selectors.
328 */
329 INTR_PUSH
330 INTGATE_INIT_KERNEL_FLAGS
331
332 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
333 TRACE_REGS(%edi, %esp, %ebx, %ecx)
334 TRACE_STAMP(%edi)
335
336 movl %esp, %ebp
337
338 pushl %ebp
339 call av_dispatch_nmivect
340 addl $4, %esp
341
342 INTR_POP_USER
343 IRET
344 SET_SIZE(nmiint)
345
346 #endif /* __i386 */
347
348 /*
349 * #BP
350 */
351 ENTRY_NP(brktrap)
352
353 #if defined(__amd64)
354 XPV_TRAP_POP
355 cmpw $KCS_SEL, 8(%rsp)
356 jne bp_user
357
358 /*
359 * This is a breakpoint in the kernel -- it is very likely that this
360 * is DTrace-induced. To unify DTrace handling, we spoof this as an
361 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
362 * we must decrement the trapping %rip to make it appear as a fault.
363 * We then push a non-zero error code to indicate that this is coming
364 * from #BP.
365 */
366 decq (%rsp)
367 push $1 /* error code -- non-zero for #BP */
368 jmp ud_kernel
369
370 bp_user:
371 #endif /* __amd64 */
372
373 NPTRAP_NOERR(T_BPTFLT) /* $3 */
374 jmp dtrace_trap
375
376 SET_SIZE(brktrap)
377
378 /*
379 * #OF
380 */
381 ENTRY_NP(ovflotrap)
382 TRAP_NOERR(T_OVFLW) /* $4 */
383 jmp cmntrap
384 SET_SIZE(ovflotrap)
385
386 /*
387 * #BR
388 */
389 ENTRY_NP(boundstrap)
390 TRAP_NOERR(T_BOUNDFLT) /* $5 */
391 jmp cmntrap
392 SET_SIZE(boundstrap)
393
394 #if defined(__amd64)
395
396 ENTRY_NP(invoptrap)
397
398 XPV_TRAP_POP
399
400 cmpw $KCS_SEL, 8(%rsp)
401 jne ud_user
402
403 #if defined(__xpv)
404 movb $0, 12(%rsp) /* clear saved upcall_mask from %cs */
405 #endif
406 push $0 /* error code -- zero for #UD */
407 ud_kernel:
408 push $0xdddd /* a dummy trap number */
409 INTR_PUSH
410 movq REGOFF_RIP(%rsp), %rdi
411 movq REGOFF_RSP(%rsp), %rsi
412 movq REGOFF_RAX(%rsp), %rdx
413 pushq (%rsi)
414 movq %rsp, %rsi
415 subq $8, %rsp
437 movq 24(%rsp), %rax /* load calling RIP */
438 addq $1, %rax /* increment over trapping instr */
439 movq %rax, 8(%rsp) /* store calling RIP */
440 movq 32(%rsp), %rax /* load calling CS */
441 movq %rax, 16(%rsp) /* store calling CS */
442 movq 40(%rsp), %rax /* load calling RFLAGS */
443 movq %rax, 24(%rsp) /* store calling RFLAGS */
444 movq 48(%rsp), %rax /* load calling RSP */
445 subq $8, %rax /* make room for %rbp */
446 movq %rax, 32(%rsp) /* store calling RSP */
447 movq 56(%rsp), %rax /* load calling SS */
448 movq %rax, 40(%rsp) /* store calling SS */
449 movq 32(%rsp), %rax /* reload calling RSP */
450 movq %rbp, (%rax) /* store %rbp there */
451 popq %rax /* pop off temp */
452 jmp tr_iret_kernel /* return from interrupt */
453 /*NOTREACHED*/
454
455 ud_leave:
456 /*
457 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
458 * followed by a "popq %rbp". This is quite a bit simpler on amd64
459 * than it is on i386 -- we can exploit the fact that the %rsp is
460 * explicitly saved to effect the pop without having to reshuffle
461 * the other data pushed for the trap.
462 */
463 INTR_POP
464 pushq %rax /* push temp */
465 movq 8(%rsp), %rax /* load calling RIP */
466 addq $1, %rax /* increment over trapping instr */
467 movq %rax, 8(%rsp) /* store calling RIP */
468 movq (%rbp), %rax /* get new %rbp */
469 addq $8, %rbp /* adjust new %rsp */
470 movq %rbp, 32(%rsp) /* store new %rsp */
471 movq %rax, %rbp /* set new %rbp */
472 popq %rax /* pop off temp */
473 jmp tr_iret_kernel /* return from interrupt */
474 /*NOTREACHED*/
475
476 ud_nop:
477 /*
478 * We must emulate a "nop". This is obviously not hard: we need only
479 * advance the %rip by one.
480 */
481 INTR_POP
482 incq (%rsp)
498 /*
499 * We're going to let the kernel handle this as a normal #UD. If,
500 * however, we came through #BP and are spoofing #UD (in this case,
501 * the stored error value will be non-zero), we need to de-spoof
502 * the trap by incrementing %rip and pushing T_BPTFLT.
503 */
504 cmpq $0, REGOFF_ERR(%rsp)
505 je ud_ud
506 incq REGOFF_RIP(%rsp)
507 addq $REGOFF_RIP, %rsp
508 NPTRAP_NOERR(T_BPTFLT) /* $3 */
509 jmp cmntrap
510
511 ud_ud:
512 addq $REGOFF_RIP, %rsp
513 ud_user:
514 NPTRAP_NOERR(T_ILLINST)
515 jmp cmntrap
516 SET_SIZE(invoptrap)
517
518 #elif defined(__i386)
519
520 /*
521 * #UD
522 */
523 ENTRY_NP(invoptrap)
524 /*
525 * If we are taking an invalid opcode trap while in the kernel, this
526 * is likely an FBT probe point.
527 */
528 pushl %gs
529 cmpw $KGS_SEL, (%esp)
530 jne 8f
531
532 addl $4, %esp
533 #if defined(__xpv)
534 movb $0, 6(%esp) /* clear saved upcall_mask from %cs */
535 #endif /* __xpv */
536 pusha
537 pushl %eax /* push %eax -- may be return value */
538 pushl %esp /* push stack pointer */
539 addl $48, (%esp) /* adjust to incoming args */
540 pushl 40(%esp) /* push calling EIP */
541 call dtrace_invop
542 ALTENTRY(dtrace_invop_callsite)
543 addl $12, %esp
544 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
545 je 1f
546 cmpl $DTRACE_INVOP_POPL_EBP, %eax
547 je 2f
548 cmpl $DTRACE_INVOP_LEAVE, %eax
549 je 3f
550 cmpl $DTRACE_INVOP_NOP, %eax
551 je 4f
552 jmp 7f
553 1:
554 /*
555 * We must emulate a "pushl %ebp". To do this, we pull the stack
556 * down 4 bytes, and then store the base pointer.
557 */
558 popa
559 subl $4, %esp /* make room for %ebp */
560 pushl %eax /* push temp */
561 movl 8(%esp), %eax /* load calling EIP */
562 incl %eax /* increment over LOCK prefix */
563 movl %eax, 4(%esp) /* store calling EIP */
564 movl 12(%esp), %eax /* load calling CS */
565 movl %eax, 8(%esp) /* store calling CS */
566 movl 16(%esp), %eax /* load calling EFLAGS */
567 movl %eax, 12(%esp) /* store calling EFLAGS */
568 movl %ebp, 16(%esp) /* push %ebp */
569 popl %eax /* pop off temp */
570 jmp _emul_done
571 2:
572 /*
573 * We must emulate a "popl %ebp". To do this, we do the opposite of
574 * the above: we remove the %ebp from the stack, and squeeze up the
575 * saved state from the trap.
576 */
577 popa
578 pushl %eax /* push temp */
579 movl 16(%esp), %ebp /* pop %ebp */
580 movl 12(%esp), %eax /* load calling EFLAGS */
581 movl %eax, 16(%esp) /* store calling EFLAGS */
582 movl 8(%esp), %eax /* load calling CS */
583 movl %eax, 12(%esp) /* store calling CS */
584 movl 4(%esp), %eax /* load calling EIP */
585 incl %eax /* increment over LOCK prefix */
586 movl %eax, 8(%esp) /* store calling EIP */
587 popl %eax /* pop off temp */
588 addl $4, %esp /* adjust stack pointer */
589 jmp _emul_done
590 3:
591 /*
592 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
593 * followed by a "popl %ebp". This looks similar to the above, but
594 * requires two temporaries: one for the new base pointer, and one
595 * for the staging register.
596 */
597 popa
598 pushl %eax /* push temp */
599 pushl %ebx /* push temp */
600 movl %ebp, %ebx /* set temp to old %ebp */
601 movl (%ebx), %ebp /* pop %ebp */
602 movl 16(%esp), %eax /* load calling EFLAGS */
603 movl %eax, (%ebx) /* store calling EFLAGS */
604 movl 12(%esp), %eax /* load calling CS */
605 movl %eax, -4(%ebx) /* store calling CS */
606 movl 8(%esp), %eax /* load calling EIP */
607 incl %eax /* increment over LOCK prefix */
608 movl %eax, -8(%ebx) /* store calling EIP */
609 movl %ebx, -4(%esp) /* temporarily store new %esp */
610 popl %ebx /* pop off temp */
611 popl %eax /* pop off temp */
612 movl -12(%esp), %esp /* set stack pointer */
613 subl $8, %esp /* adjust for three pushes, one pop */
614 jmp _emul_done
615 4:
616 /*
617 * We must emulate a "nop". This is obviously not hard: we need only
618 * advance the %eip by one.
619 */
620 popa
621 incl (%esp)
622 _emul_done:
623 IRET /* return from interrupt */
624 7:
625 popa
626 pushl $0
627 pushl $T_ILLINST /* $6 */
628 jmp cmntrap
629 8:
630 addl $4, %esp
631 pushl $0
632 pushl $T_ILLINST /* $6 */
633 jmp cmntrap
634 SET_SIZE(invoptrap)
635
636 #endif /* __i386 */
637
638 /*
639 * #NM
640 */
641
642 ENTRY_NP(ndptrap)
643 TRAP_NOERR(T_NOEXTFLT) /* $0 */
644 SET_CPU_GSBASE
645 jmp cmntrap
646 SET_SIZE(ndptrap)
647
648 #if !defined(__xpv)
649 #if defined(__amd64)
650
651 /*
652 * #DF
653 */
654 ENTRY_NP(syserrtrap)
655 pushq $T_DBLFLT
656 SET_CPU_GSBASE
657
658 /*
659 * We share this handler with kmdb (if kmdb is loaded). As such, we
660 * may have reached this point after encountering a #df in kmdb. If
661 * that happens, we'll still be on kmdb's IDT. We need to switch back
662 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
663 * here from kmdb, kmdb is probably in a very sickly state, and
664 * shouldn't be entered from the panic flow. We'll suppress that
665 * entry by setting nopanicdebug.
666 */
667 pushq %rax
668 subq $DESCTBR_SIZE, %rsp
669 sidt (%rsp)
682
683 DFTRAP_PUSH
684
685 /*
686 * freeze trap trace.
687 */
688 #ifdef TRAPTRACE
689 leaq trap_trace_freeze(%rip), %r11
690 incl (%r11)
691 #endif
692
693 ENABLE_INTR_FLAGS
694
695 movq %rsp, %rdi /* ®s */
696 xorl %esi, %esi /* clear address */
697 xorl %edx, %edx /* cpuid = 0 */
698 call trap
699
700 SET_SIZE(syserrtrap)
701
702 #elif defined(__i386)
703
704 /*
705 * #DF
706 */
707 ENTRY_NP(syserrtrap)
708 cli /* disable interrupts */
709
710 /*
711 * We share this handler with kmdb (if kmdb is loaded). As such, we
712 * may have reached this point after encountering a #df in kmdb. If
713 * that happens, we'll still be on kmdb's IDT. We need to switch back
714 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
715 * here from kmdb, kmdb is probably in a very sickly state, and
716 * shouldn't be entered from the panic flow. We'll suppress that
717 * entry by setting nopanicdebug.
718 */
719
720 subl $DESCTBR_SIZE, %esp
721 movl %gs:CPU_IDT, %eax
722 sidt (%esp)
723 cmpl DTR_BASE(%esp), %eax
724 je 1f
725
726 movl %eax, DTR_BASE(%esp)
727 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
728 lidt (%esp)
729
730 movl $1, nopanicdebug
731
732 1: addl $DESCTBR_SIZE, %esp
733
734 /*
735 * Check the CPL in the TSS to see what mode
736 * (user or kernel) we took the fault in. At this
737 * point we are running in the context of the double
738 * fault task (dftss) but the CPU's task points to
739 * the previous task (ktss) where the process context
740 * has been saved as the result of the task switch.
741 */
742 movl %gs:CPU_TSS, %eax /* get the TSS */
743 movl TSS_SS(%eax), %ebx /* save the fault SS */
744 movl TSS_ESP(%eax), %edx /* save the fault ESP */
745 testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */
746 jz make_frame
747 movw TSS_SS0(%eax), %ss /* get on the kernel stack */
748 movl TSS_ESP0(%eax), %esp
749
750 /*
751 * Clear the NT flag to avoid a task switch when the process
752 * finally pops the EFL off the stack via an iret. Clear
753 * the TF flag since that is what the processor does for
754 * a normal exception. Clear the IE flag so that interrupts
755 * remain disabled.
756 */
757 movl TSS_EFL(%eax), %ecx
758 andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
759 pushl %ecx
760 popfl /* restore the EFL */
761 movw TSS_LDT(%eax), %cx /* restore the LDT */
762 lldt %cx
763
764 /*
765 * Restore process segment selectors.
766 */
767 movw TSS_DS(%eax), %ds
768 movw TSS_ES(%eax), %es
769 movw TSS_FS(%eax), %fs
770 movw TSS_GS(%eax), %gs
771
772 /*
773 * Restore task segment selectors.
774 */
775 movl $KDS_SEL, TSS_DS(%eax)
776 movl $KDS_SEL, TSS_ES(%eax)
777 movl $KDS_SEL, TSS_SS(%eax)
778 movl $KFS_SEL, TSS_FS(%eax)
779 movl $KGS_SEL, TSS_GS(%eax)
780
781 /*
782 * Clear the TS bit, the busy bits in both task
783 * descriptors, and switch tasks.
784 */
785 clts
786 leal gdt0, %ecx
787 movl DFTSS_SEL+4(%ecx), %esi
788 andl $_BITNOT(0x200), %esi
789 movl %esi, DFTSS_SEL+4(%ecx)
790 movl KTSS_SEL+4(%ecx), %esi
791 andl $_BITNOT(0x200), %esi
792 movl %esi, KTSS_SEL+4(%ecx)
793 movw $KTSS_SEL, %cx
794 ltr %cx
795
796 /*
797 * Restore part of the process registers.
798 */
799 movl TSS_EBP(%eax), %ebp
800 movl TSS_ECX(%eax), %ecx
801 movl TSS_ESI(%eax), %esi
802 movl TSS_EDI(%eax), %edi
803
804 make_frame:
805 /*
806 * Make a trap frame. Leave the error code (0) on
807 * the stack since the first word on a trap stack is
808 * unused anyway.
809 */
810 pushl %ebx / fault SS
811 pushl %edx / fault ESP
812 pushl TSS_EFL(%eax) / fault EFL
813 pushl TSS_CS(%eax) / fault CS
814 pushl TSS_EIP(%eax) / fault EIP
815 pushl $0 / error code
816 pushl $T_DBLFLT / trap number 8
817 movl TSS_EBX(%eax), %ebx / restore EBX
818 movl TSS_EDX(%eax), %edx / restore EDX
819 movl TSS_EAX(%eax), %eax / restore EAX
820 sti / enable interrupts
821 jmp cmntrap
822 SET_SIZE(syserrtrap)
823
824 #endif /* __i386 */
825 #endif /* !__xpv */
826
827 /*
828 * #TS
829 */
830 ENTRY_NP(invtsstrap)
831 TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */
832 jmp cmntrap
833 SET_SIZE(invtsstrap)
834
835 /*
836 * #NP
837 */
838 ENTRY_NP(segnptrap)
839 TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
840 #if defined(__amd64)
841 SET_CPU_GSBASE
842 #endif
843 jmp cmntrap
844 SET_SIZE(segnptrap)
845
846 /*
847 * #SS
848 */
849 ENTRY_NP(stktrap)
850 TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
851 #if defined(__amd64)
852 SET_CPU_GSBASE
853 #endif
854 jmp cmntrap
855 SET_SIZE(stktrap)
856
857 /*
858 * #GP
859 */
860 ENTRY_NP(gptrap)
861 TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
862 #if defined(__amd64)
863 SET_CPU_GSBASE
864 #endif
865 jmp cmntrap
866 SET_SIZE(gptrap)
867
868 /*
869 * #PF
870 */
871 ENTRY_NP(pftrap)
872 TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */
873 INTR_PUSH
874 #if defined(__xpv)
875
876 #if defined(__amd64)
877 movq %gs:CPU_VCPU_INFO, %r15
878 movq VCPU_INFO_ARCH_CR2(%r15), %r15 /* vcpu[].arch.cr2 */
879 #elif defined(__i386)
880 movl %gs:CPU_VCPU_INFO, %esi
881 movl VCPU_INFO_ARCH_CR2(%esi), %esi /* vcpu[].arch.cr2 */
882 #endif /* __i386 */
883
884 #else /* __xpv */
885
886 #if defined(__amd64)
887 movq %cr2, %r15
888 #elif defined(__i386)
889 movl %cr2, %esi
890 #endif /* __i386 */
891
892 #endif /* __xpv */
893 jmp cmntrap_pushed
894 SET_SIZE(pftrap)
895
896 #if !defined(__amd64)
897
898 .globl idt0_default_r
899
900 /*
901 * #PF pentium bug workaround
902 */
903 ENTRY_NP(pentium_pftrap)
904 pushl %eax
905 movl %cr2, %eax
906 andl $MMU_STD_PAGEMASK, %eax
907
908 cmpl %eax, %cs:idt0_default_r+2 /* fixme */
909
910 je check_for_user_address
911 user_mode:
912 popl %eax
913 pushl $T_PGFLT /* $14 */
914 jmp cmntrap
915 check_for_user_address:
916 /*
917 * Before we assume that we have an unmapped trap on our hands,
918 * check to see if this is a fault from user mode. If it is,
919 * we'll kick back into the page fault handler.
920 */
921 movl 4(%esp), %eax /* error code */
922 andl $PF_ERR_USER, %eax
923 jnz user_mode
924
925 /*
926 * We now know that this is the invalid opcode trap.
927 */
928 popl %eax
929 addl $4, %esp /* pop error code */
930 jmp invoptrap
931 SET_SIZE(pentium_pftrap)
932
933 #endif /* !__amd64 */
934
935 ENTRY_NP(resvtrap)
936 TRAP_NOERR(T_RESVTRAP) /* (reserved) */
937 jmp cmntrap
938 SET_SIZE(resvtrap)
939
940 /*
941 * #MF
942 */
943 ENTRY_NP(ndperr)
944 TRAP_NOERR(T_EXTERRFLT) /* $16 */
945 jmp cmninttrap
946 SET_SIZE(ndperr)
947
948 /*
949 * #AC
950 */
951 ENTRY_NP(achktrap)
952 TRAP_ERR(T_ALIGNMENT) /* $17 */
953 jmp cmntrap
954 SET_SIZE(achktrap)
955
956 /*
957 * #MC
958 */
959 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
960
961 #if defined(__amd64)
962
963 ENTRY_NP(mcetrap)
964 TRAP_NOERR(T_MCE) /* $18 */
965
966 SET_CPU_GSBASE
967
968 INTR_PUSH
969 INTGATE_INIT_KERNEL_FLAGS
970
971 TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
972 TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
973 TRACE_STAMP(%rdi)
974
975 movq %rsp, %rbp
976
977 movq %rsp, %rdi /* arg0 = struct regs *rp */
978 call cmi_mca_trap /* cmi_mca_trap(rp); */
979
980 jmp _sys_rtt
981 SET_SIZE(mcetrap)
982
983 #else
984
985 ENTRY_NP(mcetrap)
986 TRAP_NOERR(T_MCE) /* $18 */
987
988 INTR_PUSH
989 INTGATE_INIT_KERNEL_FLAGS
990
991 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
992 TRACE_REGS(%edi, %esp, %ebx, %ecx)
993 TRACE_STAMP(%edi)
994
995 movl %esp, %ebp
996
997 movl %esp, %ecx
998 pushl %ecx /* arg0 = struct regs *rp */
999 call cmi_mca_trap /* cmi_mca_trap(rp) */
1000 addl $4, %esp /* pop arg0 */
1001
1002 jmp _sys_rtt
1003 SET_SIZE(mcetrap)
1004
1005 #endif
1006
1007 /*
1008 * #XF
1009 */
1010 ENTRY_NP(xmtrap)
1011 TRAP_NOERR(T_SIMDFPE) /* $19 */
1012 jmp cmninttrap
1013 SET_SIZE(xmtrap)
1014
1015 ENTRY_NP(invaltrap)
1016 TRAP_NOERR(T_INVALTRAP) /* very invalid */
1017 jmp cmntrap
1018 SET_SIZE(invaltrap)
1019
1020 .globl fasttable
1021
1022 #if defined(__amd64)
1023
1024 ENTRY_NP(fasttrap)
1025 cmpl $T_LASTFAST, %eax
1026 ja 1f
1027 orl %eax, %eax /* (zero extend top 32-bits) */
1028 leaq fasttable(%rip), %r11
1029 leaq (%r11, %rax, CLONGSIZE), %r11
1030 movq (%r11), %r11
1031 INDIRECT_JMP_REG(r11)
1032 1:
1033 /*
1034 * Fast syscall number was illegal. Make it look
1035 * as if the INT failed. Modify %rip to point before the
1036 * INT, push the expected error code and fake a GP fault.
1037 *
1038 * XXX Why make the error code be offset into idt + 1?
1039 * Instead we should push a real (soft?) error code
1040 * on the stack and #gp handler could know about fasttraps?
1041 */
1042 XPV_TRAP_POP
1043
1044 subq $2, (%rsp) /* XXX int insn 2-bytes */
1045 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1046
1047 #if defined(__xpv)
1048 pushq %r11
1049 pushq %rcx
1050 #endif
1051 jmp gptrap
1052 SET_SIZE(fasttrap)
1053
1054 #elif defined(__i386)
1055
1056 ENTRY_NP(fasttrap)
1057 cmpl $T_LASTFAST, %eax
1058 ja 1f
1059 jmp *%cs:fasttable(, %eax, CLONGSIZE)
1060 1:
1061 /*
1062 * Fast syscall number was illegal. Make it look
1063 * as if the INT failed. Modify %eip to point before the
1064 * INT, push the expected error code and fake a GP fault.
1065 *
1066 * XXX Why make the error code be offset into idt + 1?
1067 * Instead we should push a real (soft?) error code
1068 * on the stack and #gp handler could know about fasttraps?
1069 */
1070 subl $2, (%esp) /* XXX int insn 2-bytes */
1071 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1072 jmp gptrap
1073 SET_SIZE(fasttrap)
1074
1075 #endif /* __i386 */
1076
1077 ENTRY_NP(dtrace_ret)
1078 TRAP_NOERR(T_DTRACE_RET)
1079 jmp dtrace_trap
1080 SET_SIZE(dtrace_ret)
1081
1082 #if defined(__amd64)
1083
1084 /*
1085 * RFLAGS 24 bytes up the stack from %rsp.
1086 * XXX a constant would be nicer.
1087 */
1088 ENTRY_NP(fast_null)
1089 XPV_TRAP_POP
1090 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1091 call x86_md_clear
1092 jmp tr_iret_auto
1093 /*NOTREACHED*/
1094 SET_SIZE(fast_null)
1095
1096 #elif defined(__i386)
1097
1098 ENTRY_NP(fast_null)
1099 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1100 IRET
1101 SET_SIZE(fast_null)
1102
1103 #endif /* __i386 */
1104
1105 /*
1106 * Interrupts start at 32
1107 */
1108 #define MKIVCT(n) \
1109 ENTRY_NP(ivct/**/n) \
1110 push $0; \
1111 push $n - 0x20; \
1112 jmp cmnint; \
1113 SET_SIZE(ivct/**/n)
1114
1115 MKIVCT(32)
1116 MKIVCT(33)
1117 MKIVCT(34)
1118 MKIVCT(35)
1119 MKIVCT(36)
1120 MKIVCT(37)
1121 MKIVCT(38)
1122 MKIVCT(39)
1123 MKIVCT(40)
1124 MKIVCT(41)
1320 MKIVCT(237)
1321 MKIVCT(238)
1322 MKIVCT(239)
1323 MKIVCT(240)
1324 MKIVCT(241)
1325 MKIVCT(242)
1326 MKIVCT(243)
1327 MKIVCT(244)
1328 MKIVCT(245)
1329 MKIVCT(246)
1330 MKIVCT(247)
1331 MKIVCT(248)
1332 MKIVCT(249)
1333 MKIVCT(250)
1334 MKIVCT(251)
1335 MKIVCT(252)
1336 MKIVCT(253)
1337 MKIVCT(254)
1338 MKIVCT(255)
1339
1340 #endif /* __lint */
|
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
41 */
42
43 #include <sys/asm_linkage.h>
44 #include <sys/asm_misc.h>
45 #include <sys/trap.h>
46 #include <sys/psw.h>
47 #include <sys/regset.h>
48 #include <sys/privregs.h>
49 #include <sys/dtrace.h>
50 #include <sys/x86_archext.h>
51 #include <sys/traptrace.h>
52 #include <sys/machparam.h>
53
54 #include "assym.h"
55
56 /*
57 * push $0 on stack for traps that do not
58 * generate an error code. This is so the rest
59 * of the kernel can expect a consistent stack
60 * from from any exception.
61 *
62 * Note that for all exceptions for amd64
63 * %r11 and %rcx are on the stack. Just pop
64 * them back into their appropriate registers and let
65 * it get saved as is running native.
66 */
67
68 #if defined(__xpv)
69
70 #define NPTRAP_NOERR(trapno) \
71 pushq $0; \
72 pushq $trapno
73
74 #define TRAP_NOERR(trapno) \
75 XPV_TRAP_POP; \
76 NPTRAP_NOERR(trapno)
77
78 /*
79 * error code already pushed by hw
80 * onto stack.
81 */
82 #define TRAP_ERR(trapno) \
83 XPV_TRAP_POP; \
84 pushq $trapno
85
86 #else /* __xpv */
87
88 #define TRAP_NOERR(trapno) \
89 push $0; \
90 push $trapno
91
92 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
93
94 /*
95 * error code already pushed by hw
96 * onto stack.
97 */
98 #define TRAP_ERR(trapno) \
99 push $trapno
100
101 #endif /* __xpv */
102
103 /*
104 * These are the stacks used on cpu0 for taking double faults,
105 * NMIs and MCEs.
106 *
107 * We define them here instead of in a C file so that we can page-align
108 * them (gcc won't do that in a .c file).
109 */
110 .data
111 DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
112 .fill DEFAULTSTKSZ, 1, 0
113 DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
114 .fill DEFAULTSTKSZ, 1, 0
115 DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
116 .fill DEFAULTSTKSZ, 1, 0
117
118 /*
119 * #DE
120 */
121 ENTRY_NP(div0trap)
122 TRAP_NOERR(T_ZERODIV) /* $0 */
123 jmp cmntrap
124 SET_SIZE(div0trap)
125
126 /*
127 * #DB
128 *
129 * Fetch %dr6 and clear it, handing off the value to the
130 * cmntrap code in %r15/%esi
131 */
132 ENTRY_NP(dbgtrap)
133 TRAP_NOERR(T_SGLSTP) /* $1 */
134
135 #if !defined(__xpv) /* no sysenter support yet */
136 /*
137 * If we get here as a result of single-stepping a sysenter
138 * instruction, we suddenly find ourselves taking a #db
139 * in kernel mode -before- we've swapgs'ed. So before we can
140 * take the trap, we do the swapgs here, and fix the return
141 * %rip in trap() so that we return immediately after the
142 * swapgs in the sysenter handler to avoid doing the swapgs again.
143 *
144 * Nobody said that the design of sysenter was particularly
145 * elegant, did they?
146 */
147
148 pushq %r11
149
150 /*
151 * At this point the stack looks like this:
152 *
153 * (high address) r_ss
154 * r_rsp
173 jne 2f
174 1: swapgs
175 2: lfence /* swapgs mitigation */
176 popq %r11
177 #endif /* !__xpv */
178
179 INTR_PUSH
180 #if defined(__xpv)
181 movl $6, %edi
182 call kdi_dreg_get
183 movq %rax, %r15 /* %db6 -> %r15 */
184 movl $6, %edi
185 movl $0, %esi
186 call kdi_dreg_set /* 0 -> %db6 */
187 #else
188 movq %db6, %r15
189 xorl %eax, %eax
190 movq %rax, %db6
191 #endif
192
193 jmp cmntrap_pushed
194 SET_SIZE(dbgtrap)
195
196 #if !defined(__xpv)
197
198 /*
199 * Macro to set the gsbase or kgsbase to the address of the struct cpu
200 * for this processor. If we came from userland, set kgsbase else
201 * set gsbase. We find the proper cpu struct by looping through
202 * the cpu structs for all processors till we find a match for the gdt
203 * of the trapping processor. The stack is expected to be pointing at
204 * the standard regs pushed by hardware on a trap (plus error code and trapno).
205 *
206 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
207 * and kgsbase set to the same value) because we're not going back the normal
208 * way out of here (via IRET). Where we're going, we don't need no user %gs.
209 */
210 #define SET_CPU_GSBASE \
211 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
212 movq %rax, REGOFF_RAX(%rsp); \
213 movq %rbx, REGOFF_RBX(%rsp); \
214 movq %rcx, REGOFF_RCX(%rsp); \
215 movq %rdx, REGOFF_RDX(%rsp); \
237 jne 4f; /* no, go set KGSBASE */ \
238 movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \
239 mfence; /* OPTERON_ERRATUM_88 */ \
240 4: \
241 movq %rax, %rdx; /* write base register */ \
242 shrq $32, %rdx; \
243 wrmsr; \
244 movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \
245 movq REGOFF_RCX(%rbp), %rcx; \
246 movq REGOFF_RBX(%rbp), %rbx; \
247 movq REGOFF_RAX(%rbp), %rax; \
248 movq %rbp, %rsp; \
249 movq REGOFF_RBP(%rsp), %rbp; \
250 addq $REGOFF_TRAPNO, %rsp /* pop stack */
251
252 #else /* __xpv */
253
254 #define SET_CPU_GSBASE /* noop on the hypervisor */
255
256 #endif /* __xpv */
257
258
259 /*
260 * #NMI
261 *
262 * XXPV: See 6532669.
263 */
264 ENTRY_NP(nmiint)
265 TRAP_NOERR(T_NMIFLT) /* $2 */
266
267 SET_CPU_GSBASE
268
269 /*
270 * Save all registers and setup segment registers
271 * with kernel selectors.
272 */
273 INTR_PUSH
274 INTGATE_INIT_KERNEL_FLAGS
275
276 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
277 TRACE_REGS(%r12, %rsp, %rax, %rbx)
278 TRACE_STAMP(%r12)
279
280 movq %rsp, %rbp
281
282 movq %rbp, %rdi
283 call av_dispatch_nmivect
284
285 INTR_POP
286 call x86_md_clear
287 jmp tr_iret_auto
288 /*NOTREACHED*/
289 SET_SIZE(nmiint)
290
291 /*
292 * #BP
293 */
294 ENTRY_NP(brktrap)
295 XPV_TRAP_POP
296 cmpw $KCS_SEL, 8(%rsp)
297 jne bp_user
298
299 /*
300 * This is a breakpoint in the kernel -- it is very likely that this
301 * is DTrace-induced. To unify DTrace handling, we spoof this as an
302 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
303 * we must decrement the trapping %rip to make it appear as a fault.
304 * We then push a non-zero error code to indicate that this is coming
305 * from #BP.
306 */
307 decq (%rsp)
308 push $1 /* error code -- non-zero for #BP */
309 jmp ud_kernel
310
311 bp_user:
312
313 NPTRAP_NOERR(T_BPTFLT) /* $3 */
314 jmp dtrace_trap
315
316 SET_SIZE(brktrap)
317
318 /*
319 * #OF
320 */
321 ENTRY_NP(ovflotrap)
322 TRAP_NOERR(T_OVFLW) /* $4 */
323 jmp cmntrap
324 SET_SIZE(ovflotrap)
325
326 /*
327 * #BR
328 */
329 ENTRY_NP(boundstrap)
330 TRAP_NOERR(T_BOUNDFLT) /* $5 */
331 jmp cmntrap
332 SET_SIZE(boundstrap)
333
334 ENTRY_NP(invoptrap)
335
336 XPV_TRAP_POP
337
338 cmpw $KCS_SEL, 8(%rsp)
339 jne ud_user
340
341 #if defined(__xpv)
342 movb $0, 12(%rsp) /* clear saved upcall_mask from %cs */
343 #endif
344 push $0 /* error code -- zero for #UD */
345 ud_kernel:
346 push $0xdddd /* a dummy trap number */
347 INTR_PUSH
348 movq REGOFF_RIP(%rsp), %rdi
349 movq REGOFF_RSP(%rsp), %rsi
350 movq REGOFF_RAX(%rsp), %rdx
351 pushq (%rsi)
352 movq %rsp, %rsi
353 subq $8, %rsp
375 movq 24(%rsp), %rax /* load calling RIP */
376 addq $1, %rax /* increment over trapping instr */
377 movq %rax, 8(%rsp) /* store calling RIP */
378 movq 32(%rsp), %rax /* load calling CS */
379 movq %rax, 16(%rsp) /* store calling CS */
380 movq 40(%rsp), %rax /* load calling RFLAGS */
381 movq %rax, 24(%rsp) /* store calling RFLAGS */
382 movq 48(%rsp), %rax /* load calling RSP */
383 subq $8, %rax /* make room for %rbp */
384 movq %rax, 32(%rsp) /* store calling RSP */
385 movq 56(%rsp), %rax /* load calling SS */
386 movq %rax, 40(%rsp) /* store calling SS */
387 movq 32(%rsp), %rax /* reload calling RSP */
388 movq %rbp, (%rax) /* store %rbp there */
389 popq %rax /* pop off temp */
390 jmp tr_iret_kernel /* return from interrupt */
391 /*NOTREACHED*/
392
393 ud_leave:
394 /*
395 * We must emulate a "leave", which is the same as a "movq %rbp,
396 * %rsp" followed by a "popq %rbp". We can exploit the fact
397 * that the %rsp is explicitly saved to effect the pop without
398 * having to reshuffle the other data pushed for the trap.
399 */
400
401 INTR_POP
402 pushq %rax /* push temp */
403 movq 8(%rsp), %rax /* load calling RIP */
404 addq $1, %rax /* increment over trapping instr */
405 movq %rax, 8(%rsp) /* store calling RIP */
406 movq (%rbp), %rax /* get new %rbp */
407 addq $8, %rbp /* adjust new %rsp */
408 movq %rbp, 32(%rsp) /* store new %rsp */
409 movq %rax, %rbp /* set new %rbp */
410 popq %rax /* pop off temp */
411 jmp tr_iret_kernel /* return from interrupt */
412 /*NOTREACHED*/
413
414 ud_nop:
415 /*
416 * We must emulate a "nop". This is obviously not hard: we need only
417 * advance the %rip by one.
418 */
419 INTR_POP
420 incq (%rsp)
436 /*
437 * We're going to let the kernel handle this as a normal #UD. If,
438 * however, we came through #BP and are spoofing #UD (in this case,
439 * the stored error value will be non-zero), we need to de-spoof
440 * the trap by incrementing %rip and pushing T_BPTFLT.
441 */
442 cmpq $0, REGOFF_ERR(%rsp)
443 je ud_ud
444 incq REGOFF_RIP(%rsp)
445 addq $REGOFF_RIP, %rsp
446 NPTRAP_NOERR(T_BPTFLT) /* $3 */
447 jmp cmntrap
448
449 ud_ud:
450 addq $REGOFF_RIP, %rsp
451 ud_user:
452 NPTRAP_NOERR(T_ILLINST)
453 jmp cmntrap
454 SET_SIZE(invoptrap)
455
456 /*
457 * #NM
458 */
459
460 ENTRY_NP(ndptrap)
461 TRAP_NOERR(T_NOEXTFLT) /* $0 */
462 SET_CPU_GSBASE
463 jmp cmntrap
464 SET_SIZE(ndptrap)
465
466 #if !defined(__xpv)
467
468 /*
469 * #DF
470 */
471 ENTRY_NP(syserrtrap)
472 pushq $T_DBLFLT
473 SET_CPU_GSBASE
474
475 /*
476 * We share this handler with kmdb (if kmdb is loaded). As such, we
477 * may have reached this point after encountering a #df in kmdb. If
478 * that happens, we'll still be on kmdb's IDT. We need to switch back
479 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
480 * here from kmdb, kmdb is probably in a very sickly state, and
481 * shouldn't be entered from the panic flow. We'll suppress that
482 * entry by setting nopanicdebug.
483 */
484 pushq %rax
485 subq $DESCTBR_SIZE, %rsp
486 sidt (%rsp)
499
500 DFTRAP_PUSH
501
502 /*
503 * freeze trap trace.
504 */
505 #ifdef TRAPTRACE
506 leaq trap_trace_freeze(%rip), %r11
507 incl (%r11)
508 #endif
509
510 ENABLE_INTR_FLAGS
511
512 movq %rsp, %rdi /* ®s */
513 xorl %esi, %esi /* clear address */
514 xorl %edx, %edx /* cpuid = 0 */
515 call trap
516
517 SET_SIZE(syserrtrap)
518
519 #endif /* !__xpv */
520
521 /*
522 * #TS
523 */
524 ENTRY_NP(invtsstrap)
525 TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */
526 jmp cmntrap
527 SET_SIZE(invtsstrap)
528
529 /*
530 * #NP
531 */
532 ENTRY_NP(segnptrap)
533 TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
534 SET_CPU_GSBASE
535 jmp cmntrap
536 SET_SIZE(segnptrap)
537
538 /*
539 * #SS
540 */
541 ENTRY_NP(stktrap)
542 TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
543 SET_CPU_GSBASE
544 jmp cmntrap
545 SET_SIZE(stktrap)
546
547 /*
548 * #GP
549 */
550 ENTRY_NP(gptrap)
551 TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
552 SET_CPU_GSBASE
553 jmp cmntrap
554 SET_SIZE(gptrap)
555
556 /*
557 * #PF
558 */
559 ENTRY_NP(pftrap)
560 TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */
561 INTR_PUSH
562 #if defined(__xpv)
563
564 movq %gs:CPU_VCPU_INFO, %r15
565 movq VCPU_INFO_ARCH_CR2(%r15), %r15 /* vcpu[].arch.cr2 */
566
567 #else /* __xpv */
568
569 movq %cr2, %r15
570
571 #endif /* __xpv */
572 jmp cmntrap_pushed
573 SET_SIZE(pftrap)
574
575 ENTRY_NP(resvtrap)
576 TRAP_NOERR(T_RESVTRAP) /* (reserved) */
577 jmp cmntrap
578 SET_SIZE(resvtrap)
579
580 /*
581 * #MF
582 */
583 ENTRY_NP(ndperr)
584 TRAP_NOERR(T_EXTERRFLT) /* $16 */
585 jmp cmninttrap
586 SET_SIZE(ndperr)
587
588 /*
589 * #AC
590 */
591 ENTRY_NP(achktrap)
592 TRAP_ERR(T_ALIGNMENT) /* $17 */
593 jmp cmntrap
594 SET_SIZE(achktrap)
595
596 /*
597 * #MC
598 */
599 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
600
601 ENTRY_NP(mcetrap)
602 TRAP_NOERR(T_MCE) /* $18 */
603
604 SET_CPU_GSBASE
605
606 INTR_PUSH
607 INTGATE_INIT_KERNEL_FLAGS
608
609 TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
610 TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
611 TRACE_STAMP(%rdi)
612
613 movq %rsp, %rbp
614
615 movq %rsp, %rdi /* arg0 = struct regs *rp */
616 call cmi_mca_trap /* cmi_mca_trap(rp); */
617
618 jmp _sys_rtt
619 SET_SIZE(mcetrap)
620
621 /*
622 * #XF
623 */
624 ENTRY_NP(xmtrap)
625 TRAP_NOERR(T_SIMDFPE) /* $19 */
626 jmp cmninttrap
627 SET_SIZE(xmtrap)
628
629 ENTRY_NP(invaltrap)
630 TRAP_NOERR(T_INVALTRAP) /* very invalid */
631 jmp cmntrap
632 SET_SIZE(invaltrap)
633
634 .globl fasttable
635
636 ENTRY_NP(fasttrap)
637 cmpl $T_LASTFAST, %eax
638 ja 1f
639 orl %eax, %eax /* (zero extend top 32-bits) */
640 leaq fasttable(%rip), %r11
641 leaq (%r11, %rax, CLONGSIZE), %r11
642 movq (%r11), %r11
643 INDIRECT_JMP_REG(r11)
644 1:
645 /*
646 * Fast syscall number was illegal. Make it look
647 * as if the INT failed. Modify %rip to point before the
648 * INT, push the expected error code and fake a GP fault.
649 *
650 * XXX Why make the error code be offset into idt + 1?
651 * Instead we should push a real (soft?) error code
652 * on the stack and #gp handler could know about fasttraps?
653 */
654 XPV_TRAP_POP
655
656 subq $2, (%rsp) /* XXX int insn 2-bytes */
657 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
658
659 #if defined(__xpv)
660 pushq %r11
661 pushq %rcx
662 #endif
663 jmp gptrap
664 SET_SIZE(fasttrap)
665
666 ENTRY_NP(dtrace_ret)
667 TRAP_NOERR(T_DTRACE_RET)
668 jmp dtrace_trap
669 SET_SIZE(dtrace_ret)
670
671 /*
672 * RFLAGS 24 bytes up the stack from %rsp.
673 * XXX a constant would be nicer.
674 */
675 ENTRY_NP(fast_null)
676 XPV_TRAP_POP
677 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
678 call x86_md_clear
679 jmp tr_iret_auto
680 /*NOTREACHED*/
681 SET_SIZE(fast_null)
682
683 /*
684 * Interrupts start at 32
685 */
686 #define MKIVCT(n) \
687 ENTRY_NP(ivct/**/n) \
688 push $0; \
689 push $n - 0x20; \
690 jmp cmnint; \
691 SET_SIZE(ivct/**/n)
692
693 MKIVCT(32)
694 MKIVCT(33)
695 MKIVCT(34)
696 MKIVCT(35)
697 MKIVCT(36)
698 MKIVCT(37)
699 MKIVCT(38)
700 MKIVCT(39)
701 MKIVCT(40)
702 MKIVCT(41)
898 MKIVCT(237)
899 MKIVCT(238)
900 MKIVCT(239)
901 MKIVCT(240)
902 MKIVCT(241)
903 MKIVCT(242)
904 MKIVCT(243)
905 MKIVCT(244)
906 MKIVCT(245)
907 MKIVCT(246)
908 MKIVCT(247)
909 MKIVCT(248)
910 MKIVCT(249)
911 MKIVCT(250)
912 MKIVCT(251)
913 MKIVCT(252)
914 MKIVCT(253)
915 MKIVCT(254)
916 MKIVCT(255)
917
|