291
292 SET_CPU_GSBASE
293
294 /*
295 * Save all registers and setup segment registers
296 * with kernel selectors.
297 */
298 INTR_PUSH
299 INTGATE_INIT_KERNEL_FLAGS
300
301 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
302 TRACE_REGS(%r12, %rsp, %rax, %rbx)
303 TRACE_STAMP(%r12)
304
305 movq %rsp, %rbp
306
307 movq %rbp, %rdi
308 call av_dispatch_nmivect
309
310 INTR_POP
311 call *x86_md_clear
312 jmp tr_iret_auto
313 /*NOTREACHED*/
314 SET_SIZE(nmiint)
315
316 #elif defined(__i386)
317
318 /*
319 * #NMI
320 */
321 ENTRY_NP(nmiint)
322 TRAP_NOERR(T_NMIFLT) /* $2 */
323
324 /*
325 * Save all registers and setup segment registers
326 * with kernel selectors.
327 */
328 INTR_PUSH
329 INTGATE_INIT_KERNEL_FLAGS
330
331 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1009 ENTRY_NP(xmtrap)
1010 TRAP_NOERR(T_SIMDFPE) /* $19 */
1011 jmp cmninttrap
1012 SET_SIZE(xmtrap)
1013
1014 ENTRY_NP(invaltrap)
1015 TRAP_NOERR(T_INVALTRAP) /* very invalid */
1016 jmp cmntrap
1017 SET_SIZE(invaltrap)
1018
1019 .globl fasttable
1020
1021 #if defined(__amd64)
1022
1023 ENTRY_NP(fasttrap)
1024 cmpl $T_LASTFAST, %eax
1025 ja 1f
1026 orl %eax, %eax /* (zero extend top 32-bits) */
1027 leaq fasttable(%rip), %r11
1028 leaq (%r11, %rax, CLONGSIZE), %r11
1029 jmp *(%r11)
1030 1:
1031 /*
1032 * Fast syscall number was illegal. Make it look
1033 * as if the INT failed. Modify %rip to point before the
1034 * INT, push the expected error code and fake a GP fault.
1035 *
1036 * XXX Why make the error code be offset into idt + 1?
1037 * Instead we should push a real (soft?) error code
1038 * on the stack and #gp handler could know about fasttraps?
1039 */
1040 XPV_TRAP_POP
1041
1042 subq $2, (%rsp) /* XXX int insn 2-bytes */
1043 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1044
1045 #if defined(__xpv)
1046 pushq %r11
1047 pushq %rcx
1048 #endif
1049 jmp gptrap
1069 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1070 jmp gptrap
1071 SET_SIZE(fasttrap)
1072
1073 #endif /* __i386 */
1074
1075 ENTRY_NP(dtrace_ret)
1076 TRAP_NOERR(T_DTRACE_RET)
1077 jmp dtrace_trap
1078 SET_SIZE(dtrace_ret)
1079
1080 #if defined(__amd64)
1081
1082 /*
1083 * RFLAGS 24 bytes up the stack from %rsp.
1084 * XXX a constant would be nicer.
1085 */
1086 ENTRY_NP(fast_null)
1087 XPV_TRAP_POP
1088 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1089 call *x86_md_clear
1090 jmp tr_iret_auto
1091 /*NOTREACHED*/
1092 SET_SIZE(fast_null)
1093
1094 #elif defined(__i386)
1095
1096 ENTRY_NP(fast_null)
1097 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1098 IRET
1099 SET_SIZE(fast_null)
1100
1101 #endif /* __i386 */
1102
1103 /*
1104 * Interrupts start at 32
1105 */
1106 #define MKIVCT(n) \
1107 ENTRY_NP(ivct/**/n) \
1108 push $0; \
1109 push $n - 0x20; \
|
291
292 SET_CPU_GSBASE
293
294 /*
295 * Save all registers and setup segment registers
296 * with kernel selectors.
297 */
298 INTR_PUSH
299 INTGATE_INIT_KERNEL_FLAGS
300
301 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
302 TRACE_REGS(%r12, %rsp, %rax, %rbx)
303 TRACE_STAMP(%r12)
304
305 movq %rsp, %rbp
306
307 movq %rbp, %rdi
308 call av_dispatch_nmivect
309
310 INTR_POP
311 call x86_md_clear
312 jmp tr_iret_auto
313 /*NOTREACHED*/
314 SET_SIZE(nmiint)
315
316 #elif defined(__i386)
317
318 /*
319 * #NMI
320 */
321 ENTRY_NP(nmiint)
322 TRAP_NOERR(T_NMIFLT) /* $2 */
323
324 /*
325 * Save all registers and setup segment registers
326 * with kernel selectors.
327 */
328 INTR_PUSH
329 INTGATE_INIT_KERNEL_FLAGS
330
331 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1009 ENTRY_NP(xmtrap)
1010 TRAP_NOERR(T_SIMDFPE) /* $19 */
1011 jmp cmninttrap
1012 SET_SIZE(xmtrap)
1013
1014 ENTRY_NP(invaltrap)
1015 TRAP_NOERR(T_INVALTRAP) /* very invalid */
1016 jmp cmntrap
1017 SET_SIZE(invaltrap)
1018
1019 .globl fasttable
1020
1021 #if defined(__amd64)
1022
1023 ENTRY_NP(fasttrap)
1024 cmpl $T_LASTFAST, %eax
1025 ja 1f
1026 orl %eax, %eax /* (zero extend top 32-bits) */
1027 leaq fasttable(%rip), %r11
1028 leaq (%r11, %rax, CLONGSIZE), %r11
1029 movq (%r11), %r11
1030 INDIRECT_JMP_REG(r11)
1031 1:
1032 /*
1033 * Fast syscall number was illegal. Make it look
1034 * as if the INT failed. Modify %rip to point before the
1035 * INT, push the expected error code and fake a GP fault.
1036 *
1037 * XXX Why make the error code be offset into idt + 1?
1038 * Instead we should push a real (soft?) error code
1039 * on the stack and #gp handler could know about fasttraps?
1040 */
1041 XPV_TRAP_POP
1042
1043 subq $2, (%rsp) /* XXX int insn 2-bytes */
1044 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1045
1046 #if defined(__xpv)
1047 pushq %r11
1048 pushq %rcx
1049 #endif
1050 jmp gptrap
1070 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1071 jmp gptrap
1072 SET_SIZE(fasttrap)
1073
1074 #endif /* __i386 */
1075
1076 ENTRY_NP(dtrace_ret)
1077 TRAP_NOERR(T_DTRACE_RET)
1078 jmp dtrace_trap
1079 SET_SIZE(dtrace_ret)
1080
1081 #if defined(__amd64)
1082
1083 /*
1084 * RFLAGS 24 bytes up the stack from %rsp.
1085 * XXX a constant would be nicer.
1086 */
1087 ENTRY_NP(fast_null)
1088 XPV_TRAP_POP
1089 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1090 call x86_md_clear
1091 jmp tr_iret_auto
1092 /*NOTREACHED*/
1093 SET_SIZE(fast_null)
1094
1095 #elif defined(__i386)
1096
1097 ENTRY_NP(fast_null)
1098 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1099 IRET
1100 SET_SIZE(fast_null)
1101
1102 #endif /* __i386 */
1103
1104 /*
1105 * Interrupts start at 32
1106 */
1107 #define MKIVCT(n) \
1108 ENTRY_NP(ivct/**/n) \
1109 push $0; \
1110 push $n - 0x20; \
|