1 /*
2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 * Copyright (c) 2017 Joyent, Inc.
5 */
6
7 /*
8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 * Copyright (c) 1990 The Regents of the University of California.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
95 XPV_TRAP_POP; \
96 pushq $trapno
97
98 #else /* __xpv && __amd64 */
99
100 #define TRAP_NOERR(trapno) \
101 push $0; \
102 push $trapno
103
104 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
105
106 /*
107 * error code already pushed by hw
108 * onto stack.
109 */
110 #define TRAP_ERR(trapno) \
111 push $trapno
112
113 #endif /* __xpv && __amd64 */
114
115
116 /*
117 * #DE
118 */
119 ENTRY_NP(div0trap)
120 TRAP_NOERR(T_ZERODIV) /* $0 */
121 jmp cmntrap
122 SET_SIZE(div0trap)
123
124 /*
125 * #DB
126 *
127 * Fetch %dr6 and clear it, handing off the value to the
128 * cmntrap code in %r15/%esi
129 */
130 ENTRY_NP(dbgtrap)
131 TRAP_NOERR(T_SGLSTP) /* $1 */
132
133 #if defined(__amd64)
134 #if !defined(__xpv) /* no sysenter support yet */
146
147 pushq %r11
148
149 /*
150 * At this point the stack looks like this:
151 *
152 * (high address) r_ss
153 * r_rsp
154 * r_rfl
155 * r_cs
156 * r_rip <-- %rsp + 24
157 * r_err <-- %rsp + 16
158 * r_trapno <-- %rsp + 8
159 * (low address) %r11 <-- %rsp
160 */
161 leaq sys_sysenter(%rip), %r11
162 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
163 je 1f
164 leaq brand_sys_sysenter(%rip), %r11
165 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
166 jne 2f
167 1: SWAPGS
168 2: popq %r11
169 #endif /* !__xpv */
170
171 INTR_PUSH
172 #if defined(__xpv)
173 movl $6, %edi
174 call kdi_dreg_get
175 movq %rax, %r15 /* %db6 -> %r15 */
176 movl $6, %edi
177 movl $0, %esi
178 call kdi_dreg_set /* 0 -> %db6 */
179 #else
180 movq %db6, %r15
181 xorl %eax, %eax
182 movq %rax, %db6
183 #endif
184
185 #elif defined(__i386)
197 #else
198 movl %db6, %esi
199 xorl %eax, %eax
200 movl %eax, %db6
201 #endif
202 #endif /* __i386 */
203
204 jmp cmntrap_pushed
205 SET_SIZE(dbgtrap)
206
207 #if defined(__amd64)
208 #if !defined(__xpv)
209
210 /*
211 * Macro to set the gsbase or kgsbase to the address of the struct cpu
212 * for this processor. If we came from userland, set kgsbase else
213 * set gsbase. We find the proper cpu struct by looping through
214 * the cpu structs for all processors till we find a match for the gdt
215 * of the trapping processor. The stack is expected to be pointing at
216 * the standard regs pushed by hardware on a trap (plus error code and trapno).
217 */
218 #define SET_CPU_GSBASE \
219 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
220 movq %rax, REGOFF_RAX(%rsp); \
221 movq %rbx, REGOFF_RBX(%rsp); \
222 movq %rcx, REGOFF_RCX(%rsp); \
223 movq %rdx, REGOFF_RDX(%rsp); \
224 movq %rbp, REGOFF_RBP(%rsp); \
225 movq %rsp, %rbp; \
226 subq $16, %rsp; /* space for gdt */ \
227 sgdt 6(%rsp); \
228 movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \
229 xorl %ebx, %ebx; /* loop index */ \
230 leaq cpu(%rip), %rdx; /* cpu pointer array */ \
231 1: \
232 movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \
233 cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \
234 je 2f; /* yes, continue */ \
235 cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \
236 je 3f; /* yes, go set gsbase */ \
277
278 SET_CPU_GSBASE
279
280 /*
281 * Save all registers and setup segment registers
282 * with kernel selectors.
283 */
284 INTR_PUSH
285 INTGATE_INIT_KERNEL_FLAGS
286
287 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
288 TRACE_REGS(%r12, %rsp, %rax, %rbx)
289 TRACE_STAMP(%r12)
290
291 movq %rsp, %rbp
292
293 movq %rbp, %rdi
294 call av_dispatch_nmivect
295
296 INTR_POP
297 IRET
298 /*NOTREACHED*/
299 SET_SIZE(nmiint)
300
301 #elif defined(__i386)
302
303 /*
304 * #NMI
305 */
306 ENTRY_NP(nmiint)
307 TRAP_NOERR(T_NMIFLT) /* $2 */
308
309 /*
310 * Save all registers and setup segment registers
311 * with kernel selectors.
312 */
313 INTR_PUSH
314 INTGATE_INIT_KERNEL_FLAGS
315
316 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
317 TRACE_REGS(%edi, %esp, %ebx, %ecx)
416 * down 8 bytes, and then store the base pointer.
417 */
418 INTR_POP
419 subq $16, %rsp /* make room for %rbp */
420 pushq %rax /* push temp */
421 movq 24(%rsp), %rax /* load calling RIP */
422 addq $1, %rax /* increment over trapping instr */
423 movq %rax, 8(%rsp) /* store calling RIP */
424 movq 32(%rsp), %rax /* load calling CS */
425 movq %rax, 16(%rsp) /* store calling CS */
426 movq 40(%rsp), %rax /* load calling RFLAGS */
427 movq %rax, 24(%rsp) /* store calling RFLAGS */
428 movq 48(%rsp), %rax /* load calling RSP */
429 subq $8, %rax /* make room for %rbp */
430 movq %rax, 32(%rsp) /* store calling RSP */
431 movq 56(%rsp), %rax /* load calling SS */
432 movq %rax, 40(%rsp) /* store calling SS */
433 movq 32(%rsp), %rax /* reload calling RSP */
434 movq %rbp, (%rax) /* store %rbp there */
435 popq %rax /* pop off temp */
436 IRET /* return from interrupt */
437 /*NOTREACHED*/
438
439 ud_leave:
440 /*
441 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
442 * followed by a "popq %rbp". This is quite a bit simpler on amd64
443 * than it is on i386 -- we can exploit the fact that the %rsp is
444 * explicitly saved to effect the pop without having to reshuffle
445 * the other data pushed for the trap.
446 */
447 INTR_POP
448 pushq %rax /* push temp */
449 movq 8(%rsp), %rax /* load calling RIP */
450 addq $1, %rax /* increment over trapping instr */
451 movq %rax, 8(%rsp) /* store calling RIP */
452 movq (%rbp), %rax /* get new %rbp */
453 addq $8, %rbp /* adjust new %rsp */
454 movq %rbp, 32(%rsp) /* store new %rsp */
455 movq %rax, %rbp /* set new %rbp */
456 popq %rax /* pop off temp */
457 IRET /* return from interrupt */
458 /*NOTREACHED*/
459
460 ud_nop:
461 /*
462 * We must emulate a "nop". This is obviously not hard: we need only
463 * advance the %rip by one.
464 */
465 INTR_POP
466 incq (%rsp)
467 IRET
468 /*NOTREACHED*/
469
470 ud_ret:
471 INTR_POP
472 pushq %rax /* push temp */
473 movq 32(%rsp), %rax /* load %rsp */
474 movq (%rax), %rax /* load calling RIP */
475 movq %rax, 8(%rsp) /* store calling RIP */
476 addq $8, 32(%rsp) /* adjust new %rsp */
477 popq %rax /* pop off temp */
478 IRET /* return from interrupt */
479 /*NOTREACHED*/
480
481 ud_trap:
482 /*
483 * We're going to let the kernel handle this as a normal #UD. If,
484 * however, we came through #BP and are spoofing #UD (in this case,
485 * the stored error value will be non-zero), we need to de-spoof
486 * the trap by incrementing %rip and pushing T_BPTFLT.
487 */
488 cmpq $0, REGOFF_ERR(%rsp)
489 je ud_ud
490 incq REGOFF_RIP(%rsp)
491 addq $REGOFF_RIP, %rsp
492 NPTRAP_NOERR(T_BPTFLT) /* $3 */
493 jmp cmntrap
494
495 ud_ud:
496 addq $REGOFF_RIP, %rsp
497 ud_user:
498 NPTRAP_NOERR(T_ILLINST)
732 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
733 #if FPU_CTX_FPU_REGS != 0
734 addq $FPU_CTX_FPU_REGS, %rbx
735 #endif
736
737 movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */
738 movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */
739
740 /*
741 * the label below is used in trap.c to detect FP faults in
742 * kernel due to user fault.
743 */
744 ALTENTRY(ndptrap_frstor)
745 movq (%rbx), %rbx /* fpu_regs.kfpu_u.kfpu_XX pointer */
746 .globl _patch_xrstorq_rbx
747 _patch_xrstorq_rbx:
748 fxrstorq (%rbx)
749 popq %rdx
750 popq %rbx
751 popq %rax
752 IRET
753 /*NOTREACHED*/
754
755 .handle_in_trap:
756 popq %rdx
757 popq %rbx
758 popq %rax
759 TRAP_NOERR(T_NOEXTFLT) /* $7 */
760 jmp cmninttrap
761 SET_SIZE(ndptrap_frstor)
762 SET_SIZE(ndptrap)
763
764 #endif /* __xpv */
765
766 #elif defined(__i386)
767
768 ENTRY_NP(ndptrap)
769 /*
770 * We want to do this quickly as every lwp using fp will take this
771 * after a context switch -- we do the frequent path in fpnoextflt
772 * below; for all other cases, we let the trap code handle it
1110 /*
1111 * Before we assume that we have an unmapped trap on our hands,
1112 * check to see if this is a fault from user mode. If it is,
1113 * we'll kick back into the page fault handler.
1114 */
1115 movl 4(%esp), %eax /* error code */
1116 andl $PF_ERR_USER, %eax
1117 jnz user_mode
1118
1119 /*
1120 * We now know that this is the invalid opcode trap.
1121 */
1122 popl %eax
1123 addl $4, %esp /* pop error code */
1124 jmp invoptrap
1125 SET_SIZE(pentium_pftrap)
1126
1127 #endif /* !__amd64 */
1128
1129 ENTRY_NP(resvtrap)
1130 TRAP_NOERR(15) /* (reserved) */
1131 jmp cmntrap
1132 SET_SIZE(resvtrap)
1133
1134 /*
1135 * #MF
1136 */
1137 ENTRY_NP(ndperr)
1138 TRAP_NOERR(T_EXTERRFLT) /* $16 */
1139 jmp cmninttrap
1140 SET_SIZE(ndperr)
1141
1142 /*
1143 * #AC
1144 */
1145 ENTRY_NP(achktrap)
1146 TRAP_ERR(T_ALIGNMENT) /* $17 */
1147 jmp cmntrap
1148 SET_SIZE(achktrap)
1149
1150 /*
1190
1191 movl %esp, %ecx
1192 pushl %ecx /* arg0 = struct regs *rp */
1193 call cmi_mca_trap /* cmi_mca_trap(rp) */
1194 addl $4, %esp /* pop arg0 */
1195
1196 jmp _sys_rtt
1197 SET_SIZE(mcetrap)
1198
1199 #endif
1200
1201 /*
1202 * #XF
1203 */
1204 ENTRY_NP(xmtrap)
1205 TRAP_NOERR(T_SIMDFPE) /* $19 */
1206 jmp cmninttrap
1207 SET_SIZE(xmtrap)
1208
1209 ENTRY_NP(invaltrap)
1210 TRAP_NOERR(30) /* very invalid */
1211 jmp cmntrap
1212 SET_SIZE(invaltrap)
1213
1214 ENTRY_NP(invalint)
1215 TRAP_NOERR(31) /* even more so */
1216 jmp cmnint
1217 SET_SIZE(invalint)
1218
1219 .globl fasttable
1220
1221 #if defined(__amd64)
1222
1223 ENTRY_NP(fasttrap)
1224 cmpl $T_LASTFAST, %eax
1225 ja 1f
1226 orl %eax, %eax /* (zero extend top 32-bits) */
1227 leaq fasttable(%rip), %r11
1228 leaq (%r11, %rax, CLONGSIZE), %r11
1229 jmp *(%r11)
1230 1:
1231 /*
1232 * Fast syscall number was illegal. Make it look
1233 * as if the INT failed. Modify %rip to point before the
1234 * INT, push the expected error code and fake a GP fault.
1235 *
1236 * XXX Why make the error code be offset into idt + 1?
1237 * Instead we should push a real (soft?) error code
1238 * on the stack and #gp handler could know about fasttraps?
1269 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1270 jmp gptrap
1271 SET_SIZE(fasttrap)
1272
1273 #endif /* __i386 */
1274
1275 ENTRY_NP(dtrace_ret)
1276 TRAP_NOERR(T_DTRACE_RET)
1277 jmp dtrace_trap
1278 SET_SIZE(dtrace_ret)
1279
1280 #if defined(__amd64)
1281
1282 /*
1283 * RFLAGS 24 bytes up the stack from %rsp.
1284 * XXX a constant would be nicer.
1285 */
1286 ENTRY_NP(fast_null)
1287 XPV_TRAP_POP
1288 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1289 IRET
1290 /*NOTREACHED*/
1291 SET_SIZE(fast_null)
1292
1293 #elif defined(__i386)
1294
1295 ENTRY_NP(fast_null)
1296 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1297 IRET
1298 SET_SIZE(fast_null)
1299
1300 #endif /* __i386 */
1301
1302 /*
1303 * Interrupts start at 32
1304 */
1305 #define MKIVCT(n) \
1306 ENTRY_NP(ivct/**/n) \
1307 push $0; \
1308 push $n - 0x20; \
1309 jmp cmnint; \
|
1 /*
2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 * Copyright (c) 2018 Joyent, Inc.
5 */
6
7 /*
8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 * Copyright (c) 1990 The Regents of the University of California.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
95 XPV_TRAP_POP; \
96 pushq $trapno
97
98 #else /* __xpv && __amd64 */
99
100 #define TRAP_NOERR(trapno) \
101 push $0; \
102 push $trapno
103
104 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
105
106 /*
107 * error code already pushed by hw
108 * onto stack.
109 */
110 #define TRAP_ERR(trapno) \
111 push $trapno
112
113 #endif /* __xpv && __amd64 */
114
115 /*
116 * These are the stacks used on cpu0 for taking double faults,
117 * NMIs and MCEs (the latter two only on amd64 where we have IST).
118 *
119 * We define them here instead of in a C file so that we can page-align
120 * them (gcc won't do that in a .c file).
121 */
122 .data
123 DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
124 .fill DEFAULTSTKSZ, 1, 0
125 DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
126 .fill DEFAULTSTKSZ, 1, 0
127 DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
128 .fill DEFAULTSTKSZ, 1, 0
129
130 /*
131 * #DE
132 */
133 ENTRY_NP(div0trap)
134 TRAP_NOERR(T_ZERODIV) /* $0 */
135 jmp cmntrap
136 SET_SIZE(div0trap)
137
138 /*
139 * #DB
140 *
141 * Fetch %dr6 and clear it, handing off the value to the
142 * cmntrap code in %r15/%esi
143 */
144 ENTRY_NP(dbgtrap)
145 TRAP_NOERR(T_SGLSTP) /* $1 */
146
147 #if defined(__amd64)
148 #if !defined(__xpv) /* no sysenter support yet */
160
161 pushq %r11
162
163 /*
164 * At this point the stack looks like this:
165 *
166 * (high address) r_ss
167 * r_rsp
168 * r_rfl
169 * r_cs
170 * r_rip <-- %rsp + 24
171 * r_err <-- %rsp + 16
172 * r_trapno <-- %rsp + 8
173 * (low address) %r11 <-- %rsp
174 */
175 leaq sys_sysenter(%rip), %r11
176 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
177 je 1f
178 leaq brand_sys_sysenter(%rip), %r11
179 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
180 je 1f
181 leaq tr_sys_sysenter(%rip), %r11
182 cmpq %r11, 24(%rsp)
183 je 1f
184 leaq tr_brand_sys_sysenter(%rip), %r11
185 cmpq %r11, 24(%rsp)
186 jne 2f
187 1: SWAPGS
188 2: popq %r11
189 #endif /* !__xpv */
190
191 INTR_PUSH
192 #if defined(__xpv)
193 movl $6, %edi
194 call kdi_dreg_get
195 movq %rax, %r15 /* %db6 -> %r15 */
196 movl $6, %edi
197 movl $0, %esi
198 call kdi_dreg_set /* 0 -> %db6 */
199 #else
200 movq %db6, %r15
201 xorl %eax, %eax
202 movq %rax, %db6
203 #endif
204
205 #elif defined(__i386)
217 #else
218 movl %db6, %esi
219 xorl %eax, %eax
220 movl %eax, %db6
221 #endif
222 #endif /* __i386 */
223
224 jmp cmntrap_pushed
225 SET_SIZE(dbgtrap)
226
227 #if defined(__amd64)
228 #if !defined(__xpv)
229
230 /*
231 * Macro to set the gsbase or kgsbase to the address of the struct cpu
232 * for this processor. If we came from userland, set kgsbase else
233 * set gsbase. We find the proper cpu struct by looping through
234 * the cpu structs for all processors till we find a match for the gdt
235 * of the trapping processor. The stack is expected to be pointing at
236 * the standard regs pushed by hardware on a trap (plus error code and trapno).
237 *
238 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
239 * and kgsbase set to the same value) because we're not going back the normal
240 * way out of here (via IRET). Where we're going, we don't need no user %gs.
241 */
242 #define SET_CPU_GSBASE \
243 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
244 movq %rax, REGOFF_RAX(%rsp); \
245 movq %rbx, REGOFF_RBX(%rsp); \
246 movq %rcx, REGOFF_RCX(%rsp); \
247 movq %rdx, REGOFF_RDX(%rsp); \
248 movq %rbp, REGOFF_RBP(%rsp); \
249 movq %rsp, %rbp; \
250 subq $16, %rsp; /* space for gdt */ \
251 sgdt 6(%rsp); \
252 movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \
253 xorl %ebx, %ebx; /* loop index */ \
254 leaq cpu(%rip), %rdx; /* cpu pointer array */ \
255 1: \
256 movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \
257 cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \
258 je 2f; /* yes, continue */ \
259 cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \
260 je 3f; /* yes, go set gsbase */ \
301
302 SET_CPU_GSBASE
303
304 /*
305 * Save all registers and setup segment registers
306 * with kernel selectors.
307 */
308 INTR_PUSH
309 INTGATE_INIT_KERNEL_FLAGS
310
311 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
312 TRACE_REGS(%r12, %rsp, %rax, %rbx)
313 TRACE_STAMP(%r12)
314
315 movq %rsp, %rbp
316
317 movq %rbp, %rdi
318 call av_dispatch_nmivect
319
320 INTR_POP
321 jmp tr_iret_auto
322 /*NOTREACHED*/
323 SET_SIZE(nmiint)
324
325 #elif defined(__i386)
326
327 /*
328 * #NMI
329 */
330 ENTRY_NP(nmiint)
331 TRAP_NOERR(T_NMIFLT) /* $2 */
332
333 /*
334 * Save all registers and setup segment registers
335 * with kernel selectors.
336 */
337 INTR_PUSH
338 INTGATE_INIT_KERNEL_FLAGS
339
340 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
341 TRACE_REGS(%edi, %esp, %ebx, %ecx)
440 * down 8 bytes, and then store the base pointer.
441 */
442 INTR_POP
443 subq $16, %rsp /* make room for %rbp */
444 pushq %rax /* push temp */
445 movq 24(%rsp), %rax /* load calling RIP */
446 addq $1, %rax /* increment over trapping instr */
447 movq %rax, 8(%rsp) /* store calling RIP */
448 movq 32(%rsp), %rax /* load calling CS */
449 movq %rax, 16(%rsp) /* store calling CS */
450 movq 40(%rsp), %rax /* load calling RFLAGS */
451 movq %rax, 24(%rsp) /* store calling RFLAGS */
452 movq 48(%rsp), %rax /* load calling RSP */
453 subq $8, %rax /* make room for %rbp */
454 movq %rax, 32(%rsp) /* store calling RSP */
455 movq 56(%rsp), %rax /* load calling SS */
456 movq %rax, 40(%rsp) /* store calling SS */
457 movq 32(%rsp), %rax /* reload calling RSP */
458 movq %rbp, (%rax) /* store %rbp there */
459 popq %rax /* pop off temp */
460 jmp tr_iret_kernel /* return from interrupt */
461 /*NOTREACHED*/
462
463 ud_leave:
464 /*
465 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
466 * followed by a "popq %rbp". This is quite a bit simpler on amd64
467 * than it is on i386 -- we can exploit the fact that the %rsp is
468 * explicitly saved to effect the pop without having to reshuffle
469 * the other data pushed for the trap.
470 */
471 INTR_POP
472 pushq %rax /* push temp */
473 movq 8(%rsp), %rax /* load calling RIP */
474 addq $1, %rax /* increment over trapping instr */
475 movq %rax, 8(%rsp) /* store calling RIP */
476 movq (%rbp), %rax /* get new %rbp */
477 addq $8, %rbp /* adjust new %rsp */
478 movq %rbp, 32(%rsp) /* store new %rsp */
479 movq %rax, %rbp /* set new %rbp */
480 popq %rax /* pop off temp */
481 jmp tr_iret_kernel /* return from interrupt */
482 /*NOTREACHED*/
483
484 ud_nop:
485 /*
486 * We must emulate a "nop". This is obviously not hard: we need only
487 * advance the %rip by one.
488 */
489 INTR_POP
490 incq (%rsp)
491 jmp tr_iret_kernel
492 /*NOTREACHED*/
493
494 ud_ret:
495 INTR_POP
496 pushq %rax /* push temp */
497 movq 32(%rsp), %rax /* load %rsp */
498 movq (%rax), %rax /* load calling RIP */
499 movq %rax, 8(%rsp) /* store calling RIP */
500 addq $8, 32(%rsp) /* adjust new %rsp */
501 popq %rax /* pop off temp */
502 jmp tr_iret_kernel /* return from interrupt */
503 /*NOTREACHED*/
504
505 ud_trap:
506 /*
507 * We're going to let the kernel handle this as a normal #UD. If,
508 * however, we came through #BP and are spoofing #UD (in this case,
509 * the stored error value will be non-zero), we need to de-spoof
510 * the trap by incrementing %rip and pushing T_BPTFLT.
511 */
512 cmpq $0, REGOFF_ERR(%rsp)
513 je ud_ud
514 incq REGOFF_RIP(%rsp)
515 addq $REGOFF_RIP, %rsp
516 NPTRAP_NOERR(T_BPTFLT) /* $3 */
517 jmp cmntrap
518
519 ud_ud:
520 addq $REGOFF_RIP, %rsp
521 ud_user:
522 NPTRAP_NOERR(T_ILLINST)
756 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
757 #if FPU_CTX_FPU_REGS != 0
758 addq $FPU_CTX_FPU_REGS, %rbx
759 #endif
760
761 movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */
762 movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */
763
764 /*
765 * the label below is used in trap.c to detect FP faults in
766 * kernel due to user fault.
767 */
768 ALTENTRY(ndptrap_frstor)
769 movq (%rbx), %rbx /* fpu_regs.kfpu_u.kfpu_XX pointer */
770 .globl _patch_xrstorq_rbx
771 _patch_xrstorq_rbx:
772 fxrstorq (%rbx)
773 popq %rdx
774 popq %rbx
775 popq %rax
776 jmp tr_iret_auto
777 /*NOTREACHED*/
778
779 .handle_in_trap:
780 popq %rdx
781 popq %rbx
782 popq %rax
783 TRAP_NOERR(T_NOEXTFLT) /* $7 */
784 jmp cmninttrap
785 SET_SIZE(ndptrap_frstor)
786 SET_SIZE(ndptrap)
787
788 #endif /* __xpv */
789
790 #elif defined(__i386)
791
792 ENTRY_NP(ndptrap)
793 /*
794 * We want to do this quickly as every lwp using fp will take this
795 * after a context switch -- we do the frequent path in fpnoextflt
796 * below; for all other cases, we let the trap code handle it
1134 /*
1135 * Before we assume that we have an unmapped trap on our hands,
1136 * check to see if this is a fault from user mode. If it is,
1137 * we'll kick back into the page fault handler.
1138 */
1139 movl 4(%esp), %eax /* error code */
1140 andl $PF_ERR_USER, %eax
1141 jnz user_mode
1142
1143 /*
1144 * We now know that this is the invalid opcode trap.
1145 */
1146 popl %eax
1147 addl $4, %esp /* pop error code */
1148 jmp invoptrap
1149 SET_SIZE(pentium_pftrap)
1150
1151 #endif /* !__amd64 */
1152
1153 ENTRY_NP(resvtrap)
1154 TRAP_NOERR(T_RESVTRAP) /* (reserved) */
1155 jmp cmntrap
1156 SET_SIZE(resvtrap)
1157
1158 /*
1159 * #MF
1160 */
1161 ENTRY_NP(ndperr)
1162 TRAP_NOERR(T_EXTERRFLT) /* $16 */
1163 jmp cmninttrap
1164 SET_SIZE(ndperr)
1165
1166 /*
1167 * #AC
1168 */
1169 ENTRY_NP(achktrap)
1170 TRAP_ERR(T_ALIGNMENT) /* $17 */
1171 jmp cmntrap
1172 SET_SIZE(achktrap)
1173
1174 /*
1214
1215 movl %esp, %ecx
1216 pushl %ecx /* arg0 = struct regs *rp */
1217 call cmi_mca_trap /* cmi_mca_trap(rp) */
1218 addl $4, %esp /* pop arg0 */
1219
1220 jmp _sys_rtt
1221 SET_SIZE(mcetrap)
1222
1223 #endif
1224
1225 /*
1226 * #XF
1227 */
1228 ENTRY_NP(xmtrap)
1229 TRAP_NOERR(T_SIMDFPE) /* $19 */
1230 jmp cmninttrap
1231 SET_SIZE(xmtrap)
1232
1233 ENTRY_NP(invaltrap)
1234 TRAP_NOERR(T_INVALTRAP) /* very invalid */
1235 jmp cmntrap
1236 SET_SIZE(invaltrap)
1237
1238 .globl fasttable
1239
1240 #if defined(__amd64)
1241
1242 ENTRY_NP(fasttrap)
1243 cmpl $T_LASTFAST, %eax
1244 ja 1f
1245 orl %eax, %eax /* (zero extend top 32-bits) */
1246 leaq fasttable(%rip), %r11
1247 leaq (%r11, %rax, CLONGSIZE), %r11
1248 jmp *(%r11)
1249 1:
1250 /*
1251 * Fast syscall number was illegal. Make it look
1252 * as if the INT failed. Modify %rip to point before the
1253 * INT, push the expected error code and fake a GP fault.
1254 *
1255 * XXX Why make the error code be offset into idt + 1?
1256 * Instead we should push a real (soft?) error code
1257 * on the stack and #gp handler could know about fasttraps?
1288 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1289 jmp gptrap
1290 SET_SIZE(fasttrap)
1291
1292 #endif /* __i386 */
1293
1294 ENTRY_NP(dtrace_ret)
1295 TRAP_NOERR(T_DTRACE_RET)
1296 jmp dtrace_trap
1297 SET_SIZE(dtrace_ret)
1298
1299 #if defined(__amd64)
1300
1301 /*
1302 * RFLAGS 24 bytes up the stack from %rsp.
1303 * XXX a constant would be nicer.
1304 */
1305 ENTRY_NP(fast_null)
1306 XPV_TRAP_POP
1307 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1308 jmp tr_iret_auto
1309 /*NOTREACHED*/
1310 SET_SIZE(fast_null)
1311
1312 #elif defined(__i386)
1313
1314 ENTRY_NP(fast_null)
1315 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1316 IRET
1317 SET_SIZE(fast_null)
1318
1319 #endif /* __i386 */
1320
1321 /*
1322 * Interrupts start at 32
1323 */
1324 #define MKIVCT(n) \
1325 ENTRY_NP(ivct/**/n) \
1326 push $0; \
1327 push $n - 0x20; \
1328 jmp cmnint; \
|