Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/ml/exception.s
+++ new/usr/src/uts/intel/ia32/ml/exception.s
1 1 /*
2 2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 - * Copyright (c) 2017 Joyent, Inc.
4 + * Copyright (c) 2018 Joyent, Inc.
5 5 */
6 6
7 7 /*
8 8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 9 * Copyright (c) 1990 The Regents of the University of California.
10 10 * All rights reserved.
11 11 *
12 12 * Redistribution and use in source and binary forms, with or without
13 13 * modification, are permitted provided that the following conditions
14 14 * are met:
15 15 * 1. Redistributions of source code must retain the above copyright
16 16 * notice, this list of conditions and the following disclaimer.
17 17 * 2. Redistributions in binary form must reproduce the above copyright
18 18 * notice, this list of conditions and the following disclaimer in the
19 19 * documentation and/or other materials provided with the distribution.
20 20 * 3. All advertising materials mentioning features or use of this software
21 21 * must display the following acknowledgement:
22 22 * This product includes software developed by the University of
23 23 * California, Berkeley and its contributors.
24 24 * 4. Neither the name of the University nor the names of its contributors
25 25 * may be used to endorse or promote products derived from this software
26 26 * without specific prior written permission.
27 27 *
28 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 38 * SUCH DAMAGE.
39 39 *
40 40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
41 41 */
42 42
43 43 #include <sys/asm_linkage.h>
44 44 #include <sys/asm_misc.h>
45 45 #include <sys/trap.h>
46 46 #include <sys/psw.h>
47 47 #include <sys/regset.h>
48 48 #include <sys/privregs.h>
49 49 #include <sys/dtrace.h>
50 50 #include <sys/x86_archext.h>
51 51 #include <sys/traptrace.h>
52 52 #include <sys/machparam.h>
53 53
54 54 /*
55 55 * only one routine in this file is interesting to lint
56 56 */
57 57
58 58 #if defined(__lint)
59 59
60 60 void
61 61 ndptrap_frstor(void)
62 62 {}
63 63
64 64 #else
65 65
66 66 #include "assym.h"
67 67
68 68 /*
69 69 * push $0 on stack for traps that do not
70 70 * generate an error code. This is so the rest
71 71 * of the kernel can expect a consistent stack
72 72 * from from any exception.
73 73 *
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
74 74 * Note that for all exceptions for amd64
75 75 * %r11 and %rcx are on the stack. Just pop
76 76 * them back into their appropriate registers and let
77 77 * it get saved as is running native.
78 78 */
79 79
80 80 #if defined(__xpv) && defined(__amd64)
81 81
82 82 #define NPTRAP_NOERR(trapno) \
83 83 pushq $0; \
84 - pushq $trapno
84 + pushq $trapno
85 85
86 86 #define TRAP_NOERR(trapno) \
87 87 XPV_TRAP_POP; \
88 88 NPTRAP_NOERR(trapno)
89 89
90 90 /*
91 91 * error code already pushed by hw
92 92 * onto stack.
93 93 */
94 94 #define TRAP_ERR(trapno) \
95 95 XPV_TRAP_POP; \
96 - pushq $trapno
96 + pushq $trapno
97 97
98 98 #else /* __xpv && __amd64 */
99 99
100 100 #define TRAP_NOERR(trapno) \
101 101 push $0; \
102 - push $trapno
102 + push $trapno
103 103
104 104 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
105 105
106 106 /*
107 107 * error code already pushed by hw
108 108 * onto stack.
109 109 */
110 110 #define TRAP_ERR(trapno) \
111 - push $trapno
111 + push $trapno
112 112
113 113 #endif /* __xpv && __amd64 */
114 114
115 + /*
116 + * These are the stacks used on cpu0 for taking double faults,
117 + * NMIs and MCEs (the latter two only on amd64 where we have IST).
118 + *
119 + * We define them here instead of in a C file so that we can page-align
120 + * them (gcc won't do that in a .c file).
121 + */
122 + .data
123 + DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
124 + .fill DEFAULTSTKSZ, 1, 0
125 + DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
126 + .fill DEFAULTSTKSZ, 1, 0
127 + DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
128 + .fill DEFAULTSTKSZ, 1, 0
115 129
116 130 /*
117 131 * #DE
118 132 */
119 133 ENTRY_NP(div0trap)
120 134 TRAP_NOERR(T_ZERODIV) /* $0 */
121 135 jmp cmntrap
122 136 SET_SIZE(div0trap)
123 137
124 138 /*
125 139 * #DB
126 140 *
127 141 * Fetch %dr6 and clear it, handing off the value to the
128 142 * cmntrap code in %r15/%esi
129 143 */
130 144 ENTRY_NP(dbgtrap)
131 145 TRAP_NOERR(T_SGLSTP) /* $1 */
132 146
133 147 #if defined(__amd64)
134 148 #if !defined(__xpv) /* no sysenter support yet */
135 149 /*
136 150 * If we get here as a result of single-stepping a sysenter
137 151 * instruction, we suddenly find ourselves taking a #db
138 152 * in kernel mode -before- we've swapgs'ed. So before we can
139 153 * take the trap, we do the swapgs here, and fix the return
140 154 * %rip in trap() so that we return immediately after the
141 155 * swapgs in the sysenter handler to avoid doing the swapgs again.
142 156 *
143 157 * Nobody said that the design of sysenter was particularly
144 158 * elegant, did they?
145 159 */
146 160
147 161 pushq %r11
148 162
149 163 /*
150 164 * At this point the stack looks like this:
151 165 *
152 166 * (high address) r_ss
153 167 * r_rsp
154 168 * r_rfl
155 169 * r_cs
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
156 170 * r_rip <-- %rsp + 24
157 171 * r_err <-- %rsp + 16
158 172 * r_trapno <-- %rsp + 8
159 173 * (low address) %r11 <-- %rsp
160 174 */
161 175 leaq sys_sysenter(%rip), %r11
162 176 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
163 177 je 1f
164 178 leaq brand_sys_sysenter(%rip), %r11
165 179 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
180 + je 1f
181 + leaq tr_sys_sysenter(%rip), %r11
182 + cmpq %r11, 24(%rsp)
183 + je 1f
184 + leaq tr_brand_sys_sysenter(%rip), %r11
185 + cmpq %r11, 24(%rsp)
166 186 jne 2f
167 187 1: SWAPGS
168 188 2: popq %r11
169 189 #endif /* !__xpv */
170 190
171 191 INTR_PUSH
172 192 #if defined(__xpv)
173 193 movl $6, %edi
174 194 call kdi_dreg_get
175 195 movq %rax, %r15 /* %db6 -> %r15 */
176 196 movl $6, %edi
177 197 movl $0, %esi
178 198 call kdi_dreg_set /* 0 -> %db6 */
179 199 #else
180 200 movq %db6, %r15
181 201 xorl %eax, %eax
182 202 movq %rax, %db6
183 203 #endif
184 204
185 205 #elif defined(__i386)
186 206
187 207 INTR_PUSH
188 208 #if defined(__xpv)
189 209 pushl $6
190 210 call kdi_dreg_get
191 211 addl $4, %esp
192 212 movl %eax, %esi /* %dr6 -> %esi */
193 213 pushl $0
194 214 pushl $6
195 215 call kdi_dreg_set /* 0 -> %dr6 */
196 216 addl $8, %esp
197 217 #else
198 218 movl %db6, %esi
199 219 xorl %eax, %eax
200 220 movl %eax, %db6
201 221 #endif
202 222 #endif /* __i386 */
203 223
204 224 jmp cmntrap_pushed
205 225 SET_SIZE(dbgtrap)
206 226
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
207 227 #if defined(__amd64)
208 228 #if !defined(__xpv)
209 229
210 230 /*
211 231 * Macro to set the gsbase or kgsbase to the address of the struct cpu
212 232 * for this processor. If we came from userland, set kgsbase else
213 233 * set gsbase. We find the proper cpu struct by looping through
214 234 * the cpu structs for all processors till we find a match for the gdt
215 235 * of the trapping processor. The stack is expected to be pointing at
216 236 * the standard regs pushed by hardware on a trap (plus error code and trapno).
237 + *
238 + * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
239 + * and kgsbase set to the same value) because we're not going back the normal
240 + * way out of here (via IRET). Where we're going, we don't need no user %gs.
217 241 */
218 242 #define SET_CPU_GSBASE \
219 243 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
220 244 movq %rax, REGOFF_RAX(%rsp); \
221 245 movq %rbx, REGOFF_RBX(%rsp); \
222 246 movq %rcx, REGOFF_RCX(%rsp); \
223 247 movq %rdx, REGOFF_RDX(%rsp); \
224 248 movq %rbp, REGOFF_RBP(%rsp); \
225 249 movq %rsp, %rbp; \
226 250 subq $16, %rsp; /* space for gdt */ \
227 251 sgdt 6(%rsp); \
228 252 movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \
229 253 xorl %ebx, %ebx; /* loop index */ \
230 254 leaq cpu(%rip), %rdx; /* cpu pointer array */ \
231 255 1: \
232 256 movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \
233 257 cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \
234 258 je 2f; /* yes, continue */ \
235 259 cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \
236 260 je 3f; /* yes, go set gsbase */ \
237 261 2: \
238 262 incl %ebx; /* i++ */ \
239 263 cmpl $NCPU, %ebx; /* i < NCPU ? */ \
240 264 jb 1b; /* yes, loop */ \
241 265 /* XXX BIG trouble if we fall thru here. We didn't find a gdt match */ \
242 266 3: \
243 267 movl $MSR_AMD_KGSBASE, %ecx; \
244 268 cmpw $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */ \
245 269 jne 4f; /* no, go set KGSBASE */ \
246 270 movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \
247 271 mfence; /* OPTERON_ERRATUM_88 */ \
248 272 4: \
249 273 movq %rax, %rdx; /* write base register */ \
250 274 shrq $32, %rdx; \
251 275 wrmsr; \
252 276 movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \
253 277 movq REGOFF_RCX(%rbp), %rcx; \
254 278 movq REGOFF_RBX(%rbp), %rbx; \
255 279 movq REGOFF_RAX(%rbp), %rax; \
256 280 movq %rbp, %rsp; \
257 281 movq REGOFF_RBP(%rsp), %rbp; \
258 282 addq $REGOFF_TRAPNO, %rsp /* pop stack */
259 283
260 284 #else /* __xpv */
261 285
262 286 #define SET_CPU_GSBASE /* noop on the hypervisor */
263 287
264 288 #endif /* __xpv */
265 289 #endif /* __amd64 */
266 290
267 291
268 292 #if defined(__amd64)
269 293
270 294 /*
271 295 * #NMI
272 296 *
273 297 * XXPV: See 6532669.
274 298 */
275 299 ENTRY_NP(nmiint)
276 300 TRAP_NOERR(T_NMIFLT) /* $2 */
277 301
278 302 SET_CPU_GSBASE
279 303
280 304 /*
281 305 * Save all registers and setup segment registers
282 306 * with kernel selectors.
283 307 */
284 308 INTR_PUSH
285 309 INTGATE_INIT_KERNEL_FLAGS
286 310
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
287 311 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
288 312 TRACE_REGS(%r12, %rsp, %rax, %rbx)
289 313 TRACE_STAMP(%r12)
290 314
291 315 movq %rsp, %rbp
292 316
293 317 movq %rbp, %rdi
294 318 call av_dispatch_nmivect
295 319
296 320 INTR_POP
297 - IRET
321 + jmp tr_iret_auto
298 322 /*NOTREACHED*/
299 323 SET_SIZE(nmiint)
300 324
301 325 #elif defined(__i386)
302 326
303 327 /*
304 328 * #NMI
305 329 */
306 330 ENTRY_NP(nmiint)
307 331 TRAP_NOERR(T_NMIFLT) /* $2 */
308 332
309 333 /*
310 334 * Save all registers and setup segment registers
311 335 * with kernel selectors.
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
312 336 */
313 337 INTR_PUSH
314 338 INTGATE_INIT_KERNEL_FLAGS
315 339
316 340 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
317 341 TRACE_REGS(%edi, %esp, %ebx, %ecx)
318 342 TRACE_STAMP(%edi)
319 343
320 344 movl %esp, %ebp
321 345
322 - pushl %ebp
323 - call av_dispatch_nmivect
346 + pushl %ebp
347 + call av_dispatch_nmivect
324 348 addl $4, %esp
325 349
326 350 INTR_POP_USER
327 351 IRET
328 352 SET_SIZE(nmiint)
329 353
330 354 #endif /* __i386 */
331 355
332 356 /*
333 357 * #BP
334 358 */
335 359 ENTRY_NP(brktrap)
336 360
337 361 #if defined(__amd64)
338 362 XPV_TRAP_POP
339 363 cmpw $KCS_SEL, 8(%rsp)
340 364 jne bp_user
341 365
342 366 /*
343 367 * This is a breakpoint in the kernel -- it is very likely that this
344 368 * is DTrace-induced. To unify DTrace handling, we spoof this as an
345 369 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
346 370 * we must decrement the trapping %rip to make it appear as a fault.
347 371 * We then push a non-zero error code to indicate that this is coming
348 372 * from #BP.
349 373 */
350 374 decq (%rsp)
351 375 push $1 /* error code -- non-zero for #BP */
352 376 jmp ud_kernel
353 377
354 378 bp_user:
355 379 #endif /* __amd64 */
356 380
357 381 NPTRAP_NOERR(T_BPTFLT) /* $3 */
358 382 jmp dtrace_trap
359 383
360 384 SET_SIZE(brktrap)
361 385
362 386 /*
363 387 * #OF
364 388 */
365 389 ENTRY_NP(ovflotrap)
366 390 TRAP_NOERR(T_OVFLW) /* $4 */
367 391 jmp cmntrap
368 392 SET_SIZE(ovflotrap)
369 393
370 394 /*
371 395 * #BR
372 396 */
373 397 ENTRY_NP(boundstrap)
374 398 TRAP_NOERR(T_BOUNDFLT) /* $5 */
375 399 jmp cmntrap
376 400 SET_SIZE(boundstrap)
377 401
378 402 #if defined(__amd64)
379 403
380 404 ENTRY_NP(invoptrap)
381 405
382 406 XPV_TRAP_POP
383 407
384 408 cmpw $KCS_SEL, 8(%rsp)
385 409 jne ud_user
386 410
387 411 #if defined(__xpv)
388 412 movb $0, 12(%rsp) /* clear saved upcall_mask from %cs */
389 413 #endif
390 414 push $0 /* error code -- zero for #UD */
391 415 ud_kernel:
392 416 push $0xdddd /* a dummy trap number */
393 417 INTR_PUSH
394 418 movq REGOFF_RIP(%rsp), %rdi
395 419 movq REGOFF_RSP(%rsp), %rsi
396 420 movq REGOFF_RAX(%rsp), %rdx
397 421 pushq (%rsi)
398 422 movq %rsp, %rsi
399 423 subq $8, %rsp
400 424 call dtrace_invop
401 425 ALTENTRY(dtrace_invop_callsite)
402 426 addq $16, %rsp
403 427 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
404 428 je ud_push
405 429 cmpl $DTRACE_INVOP_LEAVE, %eax
406 430 je ud_leave
407 431 cmpl $DTRACE_INVOP_NOP, %eax
408 432 je ud_nop
409 433 cmpl $DTRACE_INVOP_RET, %eax
410 434 je ud_ret
411 435 jmp ud_trap
412 436
413 437 ud_push:
414 438 /*
415 439 * We must emulate a "pushq %rbp". To do this, we pull the stack
416 440 * down 8 bytes, and then store the base pointer.
417 441 */
418 442 INTR_POP
419 443 subq $16, %rsp /* make room for %rbp */
420 444 pushq %rax /* push temp */
421 445 movq 24(%rsp), %rax /* load calling RIP */
422 446 addq $1, %rax /* increment over trapping instr */
423 447 movq %rax, 8(%rsp) /* store calling RIP */
424 448 movq 32(%rsp), %rax /* load calling CS */
425 449 movq %rax, 16(%rsp) /* store calling CS */
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
426 450 movq 40(%rsp), %rax /* load calling RFLAGS */
427 451 movq %rax, 24(%rsp) /* store calling RFLAGS */
428 452 movq 48(%rsp), %rax /* load calling RSP */
429 453 subq $8, %rax /* make room for %rbp */
430 454 movq %rax, 32(%rsp) /* store calling RSP */
431 455 movq 56(%rsp), %rax /* load calling SS */
432 456 movq %rax, 40(%rsp) /* store calling SS */
433 457 movq 32(%rsp), %rax /* reload calling RSP */
434 458 movq %rbp, (%rax) /* store %rbp there */
435 459 popq %rax /* pop off temp */
436 - IRET /* return from interrupt */
460 + jmp tr_iret_kernel /* return from interrupt */
437 461 /*NOTREACHED*/
438 462
439 463 ud_leave:
440 464 /*
441 465 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
442 466 * followed by a "popq %rbp". This is quite a bit simpler on amd64
443 467 * than it is on i386 -- we can exploit the fact that the %rsp is
444 468 * explicitly saved to effect the pop without having to reshuffle
445 469 * the other data pushed for the trap.
446 470 */
447 471 INTR_POP
448 472 pushq %rax /* push temp */
449 473 movq 8(%rsp), %rax /* load calling RIP */
450 474 addq $1, %rax /* increment over trapping instr */
451 475 movq %rax, 8(%rsp) /* store calling RIP */
452 476 movq (%rbp), %rax /* get new %rbp */
453 477 addq $8, %rbp /* adjust new %rsp */
454 478 movq %rbp, 32(%rsp) /* store new %rsp */
455 479 movq %rax, %rbp /* set new %rbp */
456 480 popq %rax /* pop off temp */
457 - IRET /* return from interrupt */
481 + jmp tr_iret_kernel /* return from interrupt */
458 482 /*NOTREACHED*/
459 483
460 484 ud_nop:
461 485 /*
462 486 * We must emulate a "nop". This is obviously not hard: we need only
463 487 * advance the %rip by one.
464 488 */
465 489 INTR_POP
466 490 incq (%rsp)
467 - IRET
491 + jmp tr_iret_kernel
468 492 /*NOTREACHED*/
469 493
470 494 ud_ret:
471 495 INTR_POP
472 496 pushq %rax /* push temp */
473 497 movq 32(%rsp), %rax /* load %rsp */
474 498 movq (%rax), %rax /* load calling RIP */
475 499 movq %rax, 8(%rsp) /* store calling RIP */
476 500 addq $8, 32(%rsp) /* adjust new %rsp */
477 501 popq %rax /* pop off temp */
478 - IRET /* return from interrupt */
502 + jmp tr_iret_kernel /* return from interrupt */
479 503 /*NOTREACHED*/
480 504
481 505 ud_trap:
482 506 /*
483 507 * We're going to let the kernel handle this as a normal #UD. If,
484 508 * however, we came through #BP and are spoofing #UD (in this case,
485 509 * the stored error value will be non-zero), we need to de-spoof
486 510 * the trap by incrementing %rip and pushing T_BPTFLT.
487 511 */
488 512 cmpq $0, REGOFF_ERR(%rsp)
489 513 je ud_ud
490 514 incq REGOFF_RIP(%rsp)
491 515 addq $REGOFF_RIP, %rsp
492 516 NPTRAP_NOERR(T_BPTFLT) /* $3 */
493 517 jmp cmntrap
494 518
495 519 ud_ud:
496 520 addq $REGOFF_RIP, %rsp
497 521 ud_user:
498 522 NPTRAP_NOERR(T_ILLINST)
499 523 jmp cmntrap
500 524 SET_SIZE(invoptrap)
501 525
502 526 #elif defined(__i386)
503 527
504 528 /*
505 529 * #UD
506 530 */
507 531 ENTRY_NP(invoptrap)
508 532 /*
509 533 * If we are taking an invalid opcode trap while in the kernel, this
510 534 * is likely an FBT probe point.
511 535 */
512 536 pushl %gs
513 537 cmpw $KGS_SEL, (%esp)
514 538 jne 8f
515 539
516 540 addl $4, %esp
517 541 #if defined(__xpv)
518 542 movb $0, 6(%esp) /* clear saved upcall_mask from %cs */
519 543 #endif /* __xpv */
520 544 pusha
521 545 pushl %eax /* push %eax -- may be return value */
522 546 pushl %esp /* push stack pointer */
523 547 addl $48, (%esp) /* adjust to incoming args */
524 548 pushl 40(%esp) /* push calling EIP */
525 549 call dtrace_invop
526 550 ALTENTRY(dtrace_invop_callsite)
527 551 addl $12, %esp
528 552 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
529 553 je 1f
530 554 cmpl $DTRACE_INVOP_POPL_EBP, %eax
531 555 je 2f
532 556 cmpl $DTRACE_INVOP_LEAVE, %eax
533 557 je 3f
534 558 cmpl $DTRACE_INVOP_NOP, %eax
535 559 je 4f
536 560 jmp 7f
537 561 1:
538 562 /*
539 563 * We must emulate a "pushl %ebp". To do this, we pull the stack
540 564 * down 4 bytes, and then store the base pointer.
541 565 */
542 566 popa
543 567 subl $4, %esp /* make room for %ebp */
544 568 pushl %eax /* push temp */
545 569 movl 8(%esp), %eax /* load calling EIP */
546 570 incl %eax /* increment over LOCK prefix */
547 571 movl %eax, 4(%esp) /* store calling EIP */
548 572 movl 12(%esp), %eax /* load calling CS */
549 573 movl %eax, 8(%esp) /* store calling CS */
550 574 movl 16(%esp), %eax /* load calling EFLAGS */
551 575 movl %eax, 12(%esp) /* store calling EFLAGS */
552 576 movl %ebp, 16(%esp) /* push %ebp */
553 577 popl %eax /* pop off temp */
554 578 jmp _emul_done
555 579 2:
556 580 /*
557 581 * We must emulate a "popl %ebp". To do this, we do the opposite of
558 582 * the above: we remove the %ebp from the stack, and squeeze up the
559 583 * saved state from the trap.
560 584 */
561 585 popa
562 586 pushl %eax /* push temp */
563 587 movl 16(%esp), %ebp /* pop %ebp */
564 588 movl 12(%esp), %eax /* load calling EFLAGS */
565 589 movl %eax, 16(%esp) /* store calling EFLAGS */
566 590 movl 8(%esp), %eax /* load calling CS */
567 591 movl %eax, 12(%esp) /* store calling CS */
568 592 movl 4(%esp), %eax /* load calling EIP */
569 593 incl %eax /* increment over LOCK prefix */
570 594 movl %eax, 8(%esp) /* store calling EIP */
571 595 popl %eax /* pop off temp */
572 596 addl $4, %esp /* adjust stack pointer */
573 597 jmp _emul_done
574 598 3:
575 599 /*
576 600 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
577 601 * followed by a "popl %ebp". This looks similar to the above, but
578 602 * requires two temporaries: one for the new base pointer, and one
579 603 * for the staging register.
580 604 */
581 605 popa
582 606 pushl %eax /* push temp */
583 607 pushl %ebx /* push temp */
584 608 movl %ebp, %ebx /* set temp to old %ebp */
585 609 movl (%ebx), %ebp /* pop %ebp */
586 610 movl 16(%esp), %eax /* load calling EFLAGS */
587 611 movl %eax, (%ebx) /* store calling EFLAGS */
588 612 movl 12(%esp), %eax /* load calling CS */
589 613 movl %eax, -4(%ebx) /* store calling CS */
590 614 movl 8(%esp), %eax /* load calling EIP */
591 615 incl %eax /* increment over LOCK prefix */
592 616 movl %eax, -8(%ebx) /* store calling EIP */
593 617 movl %ebx, -4(%esp) /* temporarily store new %esp */
594 618 popl %ebx /* pop off temp */
595 619 popl %eax /* pop off temp */
596 620 movl -12(%esp), %esp /* set stack pointer */
597 621 subl $8, %esp /* adjust for three pushes, one pop */
598 622 jmp _emul_done
599 623 4:
600 624 /*
601 625 * We must emulate a "nop". This is obviously not hard: we need only
602 626 * advance the %eip by one.
603 627 */
604 628 popa
605 629 incl (%esp)
606 630 _emul_done:
607 631 IRET /* return from interrupt */
608 632 7:
609 633 popa
610 634 pushl $0
611 635 pushl $T_ILLINST /* $6 */
612 636 jmp cmntrap
613 637 8:
614 638 addl $4, %esp
615 639 pushl $0
616 640 pushl $T_ILLINST /* $6 */
617 641 jmp cmntrap
618 642 SET_SIZE(invoptrap)
619 643
620 644 #endif /* __i386 */
621 645
622 646 #if defined(__amd64)
623 647
624 648 /*
625 649 * #NM
↓ open down ↓ |
137 lines elided |
↑ open up ↑ |
626 650 */
627 651 #if defined(__xpv)
628 652
629 653 ENTRY_NP(ndptrap)
630 654 /*
631 655 * (On the hypervisor we must make a hypercall so we might as well
632 656 * save everything and handle as in a normal trap.)
633 657 */
634 658 TRAP_NOERR(T_NOEXTFLT) /* $7 */
635 659 INTR_PUSH
636 -
660 +
637 661 /*
638 662 * We want to do this quickly as every lwp using fp will take this
639 663 * after a context switch -- we do the frequent path in ndptrap_frstor
640 664 * below; for all other cases, we let the trap code handle it
641 665 */
642 666 LOADCPU(%rax) /* swapgs handled in hypervisor */
643 667 cmpl $0, fpu_exists(%rip)
644 668 je .handle_in_trap /* let trap handle no fp case */
645 669 movq CPU_THREAD(%rax), %rbx /* %rbx = curthread */
646 670 movl $FPU_EN, %eax
647 671 movq T_LWP(%rbx), %rbx /* %rbx = lwp */
648 672 testq %rbx, %rbx
649 673 jz .handle_in_trap /* should not happen? */
650 674 #if LWP_PCB_FPU != 0
651 675 addq $LWP_PCB_FPU, %rbx /* &lwp->lwp_pcb.pcb_fpu */
652 676 #endif
653 677 testl %eax, PCB_FPU_FLAGS(%rbx)
654 678 jz .handle_in_trap /* must be the first fault */
655 679 CLTS
656 680 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
657 681 #if FPU_CTX_FPU_REGS != 0
658 682 addq $FPU_CTX_FPU_REGS, %rbx
659 683 #endif
660 684
661 685 movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */
662 686 movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */
663 687
664 688 /*
665 689 * the label below is used in trap.c to detect FP faults in
666 690 * kernel due to user fault.
667 691 */
668 692 ALTENTRY(ndptrap_frstor)
669 693 movq (%rbx), %rbx /* fpu_regs.kfpu_u.kfpu_XX pointer */
670 694 .globl _patch_xrstorq_rbx
671 695 _patch_xrstorq_rbx:
672 696 fxrstorq (%rbx)
673 697 cmpw $KCS_SEL, REGOFF_CS(%rsp)
674 698 je .return_to_kernel
675 699
676 700 ASSERT_UPCALL_MASK_IS_SET
677 701 USER_POP
678 702 IRET /* return to user mode */
679 703 /*NOTREACHED*/
680 704
681 705 .return_to_kernel:
682 706 INTR_POP
683 707 IRET
684 708 /*NOTREACHED*/
685 709
686 710 .handle_in_trap:
687 711 INTR_POP
688 712 pushq $0 /* can not use TRAP_NOERR */
689 713 pushq $T_NOEXTFLT
690 714 jmp cmninttrap
691 715 SET_SIZE(ndptrap_frstor)
692 716 SET_SIZE(ndptrap)
693 717
694 718 #else /* __xpv */
695 719
696 720 ENTRY_NP(ndptrap)
697 721 /*
698 722 * We want to do this quickly as every lwp using fp will take this
699 723 * after a context switch -- we do the frequent path in ndptrap_frstor
700 724 * below; for all other cases, we let the trap code handle it
701 725 */
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
702 726 pushq %rax
703 727 pushq %rbx
704 728 cmpw $KCS_SEL, 24(%rsp) /* did we come from kernel mode? */
705 729 jne 1f
706 730 LOADCPU(%rax) /* if yes, don't swapgs */
707 731 jmp 2f
708 732 1:
709 733 SWAPGS /* if from user, need swapgs */
710 734 LOADCPU(%rax)
711 735 SWAPGS
712 -2:
736 +2:
713 737 /*
714 738 * Xrstor needs to use edx as part of its flag.
715 739 * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24
716 740 * will not point to CS.
717 741 */
718 742 pushq %rdx
719 743 cmpl $0, fpu_exists(%rip)
720 744 je .handle_in_trap /* let trap handle no fp case */
721 745 movq CPU_THREAD(%rax), %rbx /* %rbx = curthread */
722 746 movl $FPU_EN, %eax
723 747 movq T_LWP(%rbx), %rbx /* %rbx = lwp */
724 748 testq %rbx, %rbx
725 749 jz .handle_in_trap /* should not happen? */
726 750 #if LWP_PCB_FPU != 0
727 751 addq $LWP_PCB_FPU, %rbx /* &lwp->lwp_pcb.pcb_fpu */
728 752 #endif
729 753 testl %eax, PCB_FPU_FLAGS(%rbx)
730 754 jz .handle_in_trap /* must be the first fault */
731 755 clts
732 756 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
733 757 #if FPU_CTX_FPU_REGS != 0
734 758 addq $FPU_CTX_FPU_REGS, %rbx
735 759 #endif
736 760
737 761 movl FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax /* for xrstor */
738 762 movl FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx /* for xrstor */
739 763
740 764 /*
741 765 * the label below is used in trap.c to detect FP faults in
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
742 766 * kernel due to user fault.
743 767 */
744 768 ALTENTRY(ndptrap_frstor)
745 769 movq (%rbx), %rbx /* fpu_regs.kfpu_u.kfpu_XX pointer */
746 770 .globl _patch_xrstorq_rbx
747 771 _patch_xrstorq_rbx:
748 772 fxrstorq (%rbx)
749 773 popq %rdx
750 774 popq %rbx
751 775 popq %rax
752 - IRET
776 + jmp tr_iret_auto
753 777 /*NOTREACHED*/
754 778
755 779 .handle_in_trap:
756 780 popq %rdx
757 781 popq %rbx
758 782 popq %rax
759 783 TRAP_NOERR(T_NOEXTFLT) /* $7 */
760 784 jmp cmninttrap
761 785 SET_SIZE(ndptrap_frstor)
762 786 SET_SIZE(ndptrap)
763 787
764 788 #endif /* __xpv */
765 789
766 790 #elif defined(__i386)
767 791
768 792 ENTRY_NP(ndptrap)
769 793 /*
770 794 * We want to do this quickly as every lwp using fp will take this
771 795 * after a context switch -- we do the frequent path in fpnoextflt
772 796 * below; for all other cases, we let the trap code handle it
773 797 */
774 798 pushl %eax
775 799 pushl %ebx
776 800 pushl %edx /* for xrstor */
777 801 pushl %ds
778 802 pushl %gs
779 803 movl $KDS_SEL, %ebx
780 804 movw %bx, %ds
781 805 movl $KGS_SEL, %eax
782 806 movw %ax, %gs
783 807 LOADCPU(%eax)
784 808 cmpl $0, fpu_exists
785 809 je .handle_in_trap /* let trap handle no fp case */
786 810 movl CPU_THREAD(%eax), %ebx /* %ebx = curthread */
787 811 movl $FPU_EN, %eax
788 812 movl T_LWP(%ebx), %ebx /* %ebx = lwp */
789 813 testl %ebx, %ebx
790 814 jz .handle_in_trap /* should not happen? */
791 815 #if LWP_PCB_FPU != 0
792 816 addl $LWP_PCB_FPU, %ebx /* &lwp->lwp_pcb.pcb_fpu */
793 817 #endif
794 818 testl %eax, PCB_FPU_FLAGS(%ebx)
795 819 jz .handle_in_trap /* must be the first fault */
796 820 CLTS
797 821 andl $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
798 822 #if FPU_CTX_FPU_REGS != 0
799 823 addl $FPU_CTX_FPU_REGS, %ebx
800 824 #endif
801 825
802 826 movl FPU_CTX_FPU_XSAVE_MASK(%ebx), %eax /* for xrstor */
803 827 movl FPU_CTX_FPU_XSAVE_MASK+4(%ebx), %edx /* for xrstor */
804 828
805 829 /*
806 830 * the label below is used in trap.c to detect FP faults in kernel
807 831 * due to user fault.
808 832 */
809 833 ALTENTRY(ndptrap_frstor)
810 834 movl (%ebx), %ebx /* fpu_regs.kfpu_u.kfpu_XX pointer */
811 835 .globl _patch_fxrstor_ebx
812 836 _patch_fxrstor_ebx:
813 837 .globl _patch_xrstor_ebx
814 838 _patch_xrstor_ebx:
815 839 frstor (%ebx) /* may be patched to fxrstor or xrstor */
816 840 popl %gs
817 841 popl %ds
818 842 popl %edx
819 843 popl %ebx
820 844 popl %eax
821 845 IRET
822 846
823 847 .handle_in_trap:
824 848 popl %gs
825 849 popl %ds
826 850 popl %edx
827 851 popl %ebx
828 852 popl %eax
829 853 TRAP_NOERR(T_NOEXTFLT) /* $7 */
830 854 jmp cmninttrap
831 855 SET_SIZE(ndptrap_frstor)
832 856 SET_SIZE(ndptrap)
833 857
834 858 #endif /* __i386 */
835 859
836 860 #if !defined(__xpv)
837 861 #if defined(__amd64)
838 862
839 863 /*
840 864 * #DF
841 865 */
842 866 ENTRY_NP(syserrtrap)
843 867 pushq $T_DBLFLT
844 868 SET_CPU_GSBASE
845 869
846 870 /*
847 871 * We share this handler with kmdb (if kmdb is loaded). As such, we
848 872 * may have reached this point after encountering a #df in kmdb. If
849 873 * that happens, we'll still be on kmdb's IDT. We need to switch back
850 874 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
851 875 * here from kmdb, kmdb is probably in a very sickly state, and
852 876 * shouldn't be entered from the panic flow. We'll suppress that
853 877 * entry by setting nopanicdebug.
854 878 */
855 879 pushq %rax
856 880 subq $DESCTBR_SIZE, %rsp
857 881 sidt (%rsp)
858 882 movq %gs:CPU_IDT, %rax
859 883 cmpq %rax, DTR_BASE(%rsp)
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
860 884 je 1f
861 885
862 886 movq %rax, DTR_BASE(%rsp)
863 887 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
864 888 lidt (%rsp)
865 889
866 890 movl $1, nopanicdebug
867 891
868 892 1: addq $DESCTBR_SIZE, %rsp
869 893 popq %rax
870 -
894 +
871 895 DFTRAP_PUSH
872 896
873 897 /*
874 898 * freeze trap trace.
875 899 */
876 900 #ifdef TRAPTRACE
877 901 leaq trap_trace_freeze(%rip), %r11
878 902 incl (%r11)
879 903 #endif
880 904
881 905 ENABLE_INTR_FLAGS
882 906
883 907 movq %rsp, %rdi /* ®s */
884 908 xorl %esi, %esi /* clear address */
885 909 xorl %edx, %edx /* cpuid = 0 */
886 910 call trap
887 911
888 912 SET_SIZE(syserrtrap)
889 913
890 914 #elif defined(__i386)
891 915
892 916 /*
893 917 * #DF
894 918 */
895 919 ENTRY_NP(syserrtrap)
896 920 cli /* disable interrupts */
897 921
898 922 /*
899 923 * We share this handler with kmdb (if kmdb is loaded). As such, we
900 924 * may have reached this point after encountering a #df in kmdb. If
901 925 * that happens, we'll still be on kmdb's IDT. We need to switch back
902 926 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
903 927 * here from kmdb, kmdb is probably in a very sickly state, and
904 928 * shouldn't be entered from the panic flow. We'll suppress that
905 929 * entry by setting nopanicdebug.
906 930 */
907 931
908 932 subl $DESCTBR_SIZE, %esp
909 933 movl %gs:CPU_IDT, %eax
910 934 sidt (%esp)
911 935 cmpl DTR_BASE(%esp), %eax
912 936 je 1f
913 937
914 938 movl %eax, DTR_BASE(%esp)
915 939 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
916 940 lidt (%esp)
917 941
918 942 movl $1, nopanicdebug
919 943
920 944 1: addl $DESCTBR_SIZE, %esp
921 945
922 946 /*
923 947 * Check the CPL in the TSS to see what mode
924 948 * (user or kernel) we took the fault in. At this
925 949 * point we are running in the context of the double
926 950 * fault task (dftss) but the CPU's task points to
927 951 * the previous task (ktss) where the process context
928 952 * has been saved as the result of the task switch.
929 953 */
930 954 movl %gs:CPU_TSS, %eax /* get the TSS */
931 955 movl TSS_SS(%eax), %ebx /* save the fault SS */
932 956 movl TSS_ESP(%eax), %edx /* save the fault ESP */
933 957 testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */
934 958 jz make_frame
935 959 movw TSS_SS0(%eax), %ss /* get on the kernel stack */
936 960 movl TSS_ESP0(%eax), %esp
937 961
938 962 /*
939 963 * Clear the NT flag to avoid a task switch when the process
940 964 * finally pops the EFL off the stack via an iret. Clear
941 965 * the TF flag since that is what the processor does for
942 966 * a normal exception. Clear the IE flag so that interrupts
943 967 * remain disabled.
944 968 */
945 969 movl TSS_EFL(%eax), %ecx
946 970 andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
947 971 pushl %ecx
948 972 popfl /* restore the EFL */
949 973 movw TSS_LDT(%eax), %cx /* restore the LDT */
950 974 lldt %cx
951 975
952 976 /*
953 977 * Restore process segment selectors.
954 978 */
955 979 movw TSS_DS(%eax), %ds
956 980 movw TSS_ES(%eax), %es
957 981 movw TSS_FS(%eax), %fs
958 982 movw TSS_GS(%eax), %gs
959 983
960 984 /*
961 985 * Restore task segment selectors.
962 986 */
963 987 movl $KDS_SEL, TSS_DS(%eax)
964 988 movl $KDS_SEL, TSS_ES(%eax)
965 989 movl $KDS_SEL, TSS_SS(%eax)
966 990 movl $KFS_SEL, TSS_FS(%eax)
967 991 movl $KGS_SEL, TSS_GS(%eax)
968 992
969 993 /*
970 994 * Clear the TS bit, the busy bits in both task
971 995 * descriptors, and switch tasks.
972 996 */
973 997 clts
974 998 leal gdt0, %ecx
975 999 movl DFTSS_SEL+4(%ecx), %esi
976 1000 andl $_BITNOT(0x200), %esi
977 1001 movl %esi, DFTSS_SEL+4(%ecx)
978 1002 movl KTSS_SEL+4(%ecx), %esi
979 1003 andl $_BITNOT(0x200), %esi
980 1004 movl %esi, KTSS_SEL+4(%ecx)
981 1005 movw $KTSS_SEL, %cx
982 1006 ltr %cx
983 1007
984 1008 /*
985 1009 * Restore part of the process registers.
986 1010 */
987 1011 movl TSS_EBP(%eax), %ebp
988 1012 movl TSS_ECX(%eax), %ecx
989 1013 movl TSS_ESI(%eax), %esi
990 1014 movl TSS_EDI(%eax), %edi
991 1015
992 1016 make_frame:
993 1017 /*
994 1018 * Make a trap frame. Leave the error code (0) on
995 1019 * the stack since the first word on a trap stack is
996 1020 * unused anyway.
997 1021 */
998 1022 pushl %ebx / fault SS
999 1023 pushl %edx / fault ESP
1000 1024 pushl TSS_EFL(%eax) / fault EFL
1001 1025 pushl TSS_CS(%eax) / fault CS
1002 1026 pushl TSS_EIP(%eax) / fault EIP
1003 1027 pushl $0 / error code
1004 1028 pushl $T_DBLFLT / trap number 8
1005 1029 movl TSS_EBX(%eax), %ebx / restore EBX
1006 1030 movl TSS_EDX(%eax), %edx / restore EDX
1007 1031 movl TSS_EAX(%eax), %eax / restore EAX
1008 1032 sti / enable interrupts
1009 1033 jmp cmntrap
1010 1034 SET_SIZE(syserrtrap)
1011 1035
1012 1036 #endif /* __i386 */
1013 1037 #endif /* !__xpv */
1014 1038
1015 1039 ENTRY_NP(overrun)
1016 1040 push $0
1017 1041 TRAP_NOERR(T_EXTOVRFLT) /* $9 i386 only - not generated */
1018 1042 jmp cmninttrap
1019 1043 SET_SIZE(overrun)
1020 1044
1021 1045 /*
1022 1046 * #TS
1023 1047 */
1024 1048 ENTRY_NP(invtsstrap)
1025 1049 TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */
1026 1050 jmp cmntrap
1027 1051 SET_SIZE(invtsstrap)
1028 1052
1029 1053 /*
1030 1054 * #NP
1031 1055 */
1032 1056 ENTRY_NP(segnptrap)
1033 1057 TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
1034 1058 #if defined(__amd64)
1035 1059 SET_CPU_GSBASE
1036 1060 #endif
1037 1061 jmp cmntrap
1038 1062 SET_SIZE(segnptrap)
1039 1063
1040 1064 /*
1041 1065 * #SS
1042 1066 */
1043 1067 ENTRY_NP(stktrap)
1044 1068 TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
1045 1069 #if defined(__amd64)
1046 1070 SET_CPU_GSBASE
1047 1071 #endif
1048 1072 jmp cmntrap
1049 1073 SET_SIZE(stktrap)
1050 1074
1051 1075 /*
1052 1076 * #GP
1053 1077 */
1054 1078 ENTRY_NP(gptrap)
1055 1079 TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
1056 1080 #if defined(__amd64)
1057 1081 SET_CPU_GSBASE
1058 1082 #endif
1059 1083 jmp cmntrap
1060 1084 SET_SIZE(gptrap)
1061 1085
1062 1086 /*
1063 1087 * #PF
1064 1088 */
1065 1089 ENTRY_NP(pftrap)
1066 1090 TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */
1067 1091 INTR_PUSH
1068 1092 #if defined(__xpv)
1069 1093
1070 1094 #if defined(__amd64)
1071 1095 movq %gs:CPU_VCPU_INFO, %r15
1072 1096 movq VCPU_INFO_ARCH_CR2(%r15), %r15 /* vcpu[].arch.cr2 */
1073 1097 #elif defined(__i386)
1074 1098 movl %gs:CPU_VCPU_INFO, %esi
1075 1099 movl VCPU_INFO_ARCH_CR2(%esi), %esi /* vcpu[].arch.cr2 */
1076 1100 #endif /* __i386 */
1077 1101
1078 1102 #else /* __xpv */
1079 1103
1080 1104 #if defined(__amd64)
1081 1105 movq %cr2, %r15
1082 1106 #elif defined(__i386)
1083 1107 movl %cr2, %esi
1084 1108 #endif /* __i386 */
1085 1109
1086 1110 #endif /* __xpv */
1087 1111 jmp cmntrap_pushed
1088 1112 SET_SIZE(pftrap)
1089 1113
1090 1114 #if !defined(__amd64)
1091 1115
1092 1116 .globl idt0_default_r
1093 1117
1094 1118 /*
1095 1119 * #PF pentium bug workaround
1096 1120 */
1097 1121 ENTRY_NP(pentium_pftrap)
1098 1122 pushl %eax
1099 1123 movl %cr2, %eax
1100 1124 andl $MMU_STD_PAGEMASK, %eax
1101 1125
1102 1126 cmpl %eax, %cs:idt0_default_r+2 /* fixme */
1103 1127
1104 1128 je check_for_user_address
1105 1129 user_mode:
1106 1130 popl %eax
1107 1131 pushl $T_PGFLT /* $14 */
1108 1132 jmp cmntrap
1109 1133 check_for_user_address:
1110 1134 /*
1111 1135 * Before we assume that we have an unmapped trap on our hands,
1112 1136 * check to see if this is a fault from user mode. If it is,
1113 1137 * we'll kick back into the page fault handler.
1114 1138 */
1115 1139 movl 4(%esp), %eax /* error code */
1116 1140 andl $PF_ERR_USER, %eax
1117 1141 jnz user_mode
1118 1142
1119 1143 /*
↓ open down ↓ |
239 lines elided |
↑ open up ↑ |
1120 1144 * We now know that this is the invalid opcode trap.
1121 1145 */
1122 1146 popl %eax
1123 1147 addl $4, %esp /* pop error code */
1124 1148 jmp invoptrap
1125 1149 SET_SIZE(pentium_pftrap)
1126 1150
1127 1151 #endif /* !__amd64 */
1128 1152
1129 1153 ENTRY_NP(resvtrap)
1130 - TRAP_NOERR(15) /* (reserved) */
1154 + TRAP_NOERR(T_RESVTRAP) /* (reserved) */
1131 1155 jmp cmntrap
1132 1156 SET_SIZE(resvtrap)
1133 1157
1134 1158 /*
1135 1159 * #MF
1136 1160 */
1137 1161 ENTRY_NP(ndperr)
1138 1162 TRAP_NOERR(T_EXTERRFLT) /* $16 */
1139 1163 jmp cmninttrap
1140 1164 SET_SIZE(ndperr)
1141 1165
1142 1166 /*
1143 1167 * #AC
1144 1168 */
1145 1169 ENTRY_NP(achktrap)
1146 1170 TRAP_ERR(T_ALIGNMENT) /* $17 */
1147 1171 jmp cmntrap
1148 1172 SET_SIZE(achktrap)
1149 1173
1150 1174 /*
1151 1175 * #MC
1152 1176 */
1153 1177 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
1154 1178
1155 1179 #if defined(__amd64)
1156 1180
1157 1181 ENTRY_NP(mcetrap)
1158 1182 TRAP_NOERR(T_MCE) /* $18 */
1159 1183
1160 1184 SET_CPU_GSBASE
1161 1185
1162 1186 INTR_PUSH
1163 1187 INTGATE_INIT_KERNEL_FLAGS
1164 1188
1165 1189 TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1166 1190 TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1167 1191 TRACE_STAMP(%rdi)
1168 1192
1169 1193 movq %rsp, %rbp
1170 1194
1171 1195 movq %rsp, %rdi /* arg0 = struct regs *rp */
1172 1196 call cmi_mca_trap /* cmi_mca_trap(rp); */
1173 1197
1174 1198 jmp _sys_rtt
1175 1199 SET_SIZE(mcetrap)
1176 1200
1177 1201 #else
1178 1202
1179 1203 ENTRY_NP(mcetrap)
1180 1204 TRAP_NOERR(T_MCE) /* $18 */
1181 1205
1182 1206 INTR_PUSH
1183 1207 INTGATE_INIT_KERNEL_FLAGS
1184 1208
1185 1209 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1186 1210 TRACE_REGS(%edi, %esp, %ebx, %ecx)
1187 1211 TRACE_STAMP(%edi)
1188 1212
1189 1213 movl %esp, %ebp
1190 1214
1191 1215 movl %esp, %ecx
1192 1216 pushl %ecx /* arg0 = struct regs *rp */
1193 1217 call cmi_mca_trap /* cmi_mca_trap(rp) */
1194 1218 addl $4, %esp /* pop arg0 */
1195 1219
1196 1220 jmp _sys_rtt
1197 1221 SET_SIZE(mcetrap)
1198 1222
1199 1223 #endif
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
1200 1224
1201 1225 /*
1202 1226 * #XF
1203 1227 */
1204 1228 ENTRY_NP(xmtrap)
1205 1229 TRAP_NOERR(T_SIMDFPE) /* $19 */
1206 1230 jmp cmninttrap
1207 1231 SET_SIZE(xmtrap)
1208 1232
1209 1233 ENTRY_NP(invaltrap)
1210 - TRAP_NOERR(30) /* very invalid */
1234 + TRAP_NOERR(T_INVALTRAP) /* very invalid */
1211 1235 jmp cmntrap
1212 1236 SET_SIZE(invaltrap)
1213 1237
1214 - ENTRY_NP(invalint)
1215 - TRAP_NOERR(31) /* even more so */
1216 - jmp cmnint
1217 - SET_SIZE(invalint)
1218 -
1219 1238 .globl fasttable
1220 1239
1221 1240 #if defined(__amd64)
1222 1241
1223 1242 ENTRY_NP(fasttrap)
1224 1243 cmpl $T_LASTFAST, %eax
1225 1244 ja 1f
1226 1245 orl %eax, %eax /* (zero extend top 32-bits) */
1227 1246 leaq fasttable(%rip), %r11
1228 1247 leaq (%r11, %rax, CLONGSIZE), %r11
1229 1248 jmp *(%r11)
1230 1249 1:
1231 1250 /*
1232 1251 * Fast syscall number was illegal. Make it look
1233 1252 * as if the INT failed. Modify %rip to point before the
1234 1253 * INT, push the expected error code and fake a GP fault.
1235 1254 *
1236 1255 * XXX Why make the error code be offset into idt + 1?
1237 1256 * Instead we should push a real (soft?) error code
1238 1257 * on the stack and #gp handler could know about fasttraps?
1239 1258 */
1240 1259 XPV_TRAP_POP
1241 1260
1242 1261 subq $2, (%rsp) /* XXX int insn 2-bytes */
1243 1262 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1244 1263
1245 1264 #if defined(__xpv)
1246 1265 pushq %r11
1247 1266 pushq %rcx
1248 1267 #endif
1249 1268 jmp gptrap
1250 1269 SET_SIZE(fasttrap)
1251 1270
1252 1271 #elif defined(__i386)
1253 1272
1254 1273 ENTRY_NP(fasttrap)
1255 1274 cmpl $T_LASTFAST, %eax
1256 1275 ja 1f
1257 1276 jmp *%cs:fasttable(, %eax, CLONGSIZE)
1258 1277 1:
1259 1278 /*
1260 1279 * Fast syscall number was illegal. Make it look
1261 1280 * as if the INT failed. Modify %eip to point before the
1262 1281 * INT, push the expected error code and fake a GP fault.
1263 1282 *
1264 1283 * XXX Why make the error code be offset into idt + 1?
1265 1284 * Instead we should push a real (soft?) error code
1266 1285 * on the stack and #gp handler could know about fasttraps?
1267 1286 */
1268 1287 subl $2, (%esp) /* XXX int insn 2-bytes */
1269 1288 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1270 1289 jmp gptrap
1271 1290 SET_SIZE(fasttrap)
1272 1291
1273 1292 #endif /* __i386 */
1274 1293
1275 1294 ENTRY_NP(dtrace_ret)
1276 1295 TRAP_NOERR(T_DTRACE_RET)
1277 1296 jmp dtrace_trap
1278 1297 SET_SIZE(dtrace_ret)
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
1279 1298
1280 1299 #if defined(__amd64)
1281 1300
1282 1301 /*
1283 1302 * RFLAGS 24 bytes up the stack from %rsp.
1284 1303 * XXX a constant would be nicer.
1285 1304 */
1286 1305 ENTRY_NP(fast_null)
1287 1306 XPV_TRAP_POP
1288 1307 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1289 - IRET
1308 + jmp tr_iret_auto
1290 1309 /*NOTREACHED*/
1291 1310 SET_SIZE(fast_null)
1292 1311
1293 1312 #elif defined(__i386)
1294 1313
1295 1314 ENTRY_NP(fast_null)
1296 1315 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1297 1316 IRET
1298 1317 SET_SIZE(fast_null)
1299 1318
1300 1319 #endif /* __i386 */
1301 1320
1302 1321 /*
1303 1322 * Interrupts start at 32
1304 1323 */
1305 1324 #define MKIVCT(n) \
1306 1325 ENTRY_NP(ivct/**/n) \
1307 1326 push $0; \
1308 1327 push $n - 0x20; \
1309 1328 jmp cmnint; \
1310 1329 SET_SIZE(ivct/**/n)
1311 1330
1312 1331 MKIVCT(32)
1313 1332 MKIVCT(33)
1314 1333 MKIVCT(34)
1315 1334 MKIVCT(35)
1316 1335 MKIVCT(36)
1317 1336 MKIVCT(37)
1318 1337 MKIVCT(38)
1319 1338 MKIVCT(39)
1320 1339 MKIVCT(40)
1321 1340 MKIVCT(41)
1322 1341 MKIVCT(42)
1323 1342 MKIVCT(43)
1324 1343 MKIVCT(44)
1325 1344 MKIVCT(45)
1326 1345 MKIVCT(46)
1327 1346 MKIVCT(47)
1328 1347 MKIVCT(48)
1329 1348 MKIVCT(49)
1330 1349 MKIVCT(50)
1331 1350 MKIVCT(51)
1332 1351 MKIVCT(52)
1333 1352 MKIVCT(53)
1334 1353 MKIVCT(54)
1335 1354 MKIVCT(55)
1336 1355 MKIVCT(56)
1337 1356 MKIVCT(57)
1338 1357 MKIVCT(58)
1339 1358 MKIVCT(59)
1340 1359 MKIVCT(60)
1341 1360 MKIVCT(61)
1342 1361 MKIVCT(62)
1343 1362 MKIVCT(63)
1344 1363 MKIVCT(64)
1345 1364 MKIVCT(65)
1346 1365 MKIVCT(66)
1347 1366 MKIVCT(67)
1348 1367 MKIVCT(68)
1349 1368 MKIVCT(69)
1350 1369 MKIVCT(70)
1351 1370 MKIVCT(71)
1352 1371 MKIVCT(72)
1353 1372 MKIVCT(73)
1354 1373 MKIVCT(74)
1355 1374 MKIVCT(75)
1356 1375 MKIVCT(76)
1357 1376 MKIVCT(77)
1358 1377 MKIVCT(78)
1359 1378 MKIVCT(79)
1360 1379 MKIVCT(80)
1361 1380 MKIVCT(81)
1362 1381 MKIVCT(82)
1363 1382 MKIVCT(83)
1364 1383 MKIVCT(84)
1365 1384 MKIVCT(85)
1366 1385 MKIVCT(86)
1367 1386 MKIVCT(87)
1368 1387 MKIVCT(88)
1369 1388 MKIVCT(89)
1370 1389 MKIVCT(90)
1371 1390 MKIVCT(91)
1372 1391 MKIVCT(92)
1373 1392 MKIVCT(93)
1374 1393 MKIVCT(94)
1375 1394 MKIVCT(95)
1376 1395 MKIVCT(96)
1377 1396 MKIVCT(97)
1378 1397 MKIVCT(98)
1379 1398 MKIVCT(99)
1380 1399 MKIVCT(100)
1381 1400 MKIVCT(101)
1382 1401 MKIVCT(102)
1383 1402 MKIVCT(103)
1384 1403 MKIVCT(104)
1385 1404 MKIVCT(105)
1386 1405 MKIVCT(106)
1387 1406 MKIVCT(107)
1388 1407 MKIVCT(108)
1389 1408 MKIVCT(109)
1390 1409 MKIVCT(110)
1391 1410 MKIVCT(111)
1392 1411 MKIVCT(112)
1393 1412 MKIVCT(113)
1394 1413 MKIVCT(114)
1395 1414 MKIVCT(115)
1396 1415 MKIVCT(116)
1397 1416 MKIVCT(117)
1398 1417 MKIVCT(118)
1399 1418 MKIVCT(119)
1400 1419 MKIVCT(120)
1401 1420 MKIVCT(121)
1402 1421 MKIVCT(122)
1403 1422 MKIVCT(123)
1404 1423 MKIVCT(124)
1405 1424 MKIVCT(125)
1406 1425 MKIVCT(126)
1407 1426 MKIVCT(127)
1408 1427 MKIVCT(128)
1409 1428 MKIVCT(129)
1410 1429 MKIVCT(130)
1411 1430 MKIVCT(131)
1412 1431 MKIVCT(132)
1413 1432 MKIVCT(133)
1414 1433 MKIVCT(134)
1415 1434 MKIVCT(135)
1416 1435 MKIVCT(136)
1417 1436 MKIVCT(137)
1418 1437 MKIVCT(138)
1419 1438 MKIVCT(139)
1420 1439 MKIVCT(140)
1421 1440 MKIVCT(141)
1422 1441 MKIVCT(142)
1423 1442 MKIVCT(143)
1424 1443 MKIVCT(144)
1425 1444 MKIVCT(145)
1426 1445 MKIVCT(146)
1427 1446 MKIVCT(147)
1428 1447 MKIVCT(148)
1429 1448 MKIVCT(149)
1430 1449 MKIVCT(150)
1431 1450 MKIVCT(151)
1432 1451 MKIVCT(152)
1433 1452 MKIVCT(153)
1434 1453 MKIVCT(154)
1435 1454 MKIVCT(155)
1436 1455 MKIVCT(156)
1437 1456 MKIVCT(157)
1438 1457 MKIVCT(158)
1439 1458 MKIVCT(159)
1440 1459 MKIVCT(160)
1441 1460 MKIVCT(161)
1442 1461 MKIVCT(162)
1443 1462 MKIVCT(163)
1444 1463 MKIVCT(164)
1445 1464 MKIVCT(165)
1446 1465 MKIVCT(166)
1447 1466 MKIVCT(167)
1448 1467 MKIVCT(168)
1449 1468 MKIVCT(169)
1450 1469 MKIVCT(170)
1451 1470 MKIVCT(171)
1452 1471 MKIVCT(172)
1453 1472 MKIVCT(173)
1454 1473 MKIVCT(174)
1455 1474 MKIVCT(175)
1456 1475 MKIVCT(176)
1457 1476 MKIVCT(177)
1458 1477 MKIVCT(178)
1459 1478 MKIVCT(179)
1460 1479 MKIVCT(180)
1461 1480 MKIVCT(181)
1462 1481 MKIVCT(182)
1463 1482 MKIVCT(183)
1464 1483 MKIVCT(184)
1465 1484 MKIVCT(185)
1466 1485 MKIVCT(186)
1467 1486 MKIVCT(187)
1468 1487 MKIVCT(188)
1469 1488 MKIVCT(189)
1470 1489 MKIVCT(190)
1471 1490 MKIVCT(191)
1472 1491 MKIVCT(192)
1473 1492 MKIVCT(193)
1474 1493 MKIVCT(194)
1475 1494 MKIVCT(195)
1476 1495 MKIVCT(196)
1477 1496 MKIVCT(197)
1478 1497 MKIVCT(198)
1479 1498 MKIVCT(199)
1480 1499 MKIVCT(200)
1481 1500 MKIVCT(201)
1482 1501 MKIVCT(202)
1483 1502 MKIVCT(203)
1484 1503 MKIVCT(204)
1485 1504 MKIVCT(205)
1486 1505 MKIVCT(206)
1487 1506 MKIVCT(207)
1488 1507 MKIVCT(208)
1489 1508 MKIVCT(209)
1490 1509 MKIVCT(210)
1491 1510 MKIVCT(211)
1492 1511 MKIVCT(212)
1493 1512 MKIVCT(213)
1494 1513 MKIVCT(214)
1495 1514 MKIVCT(215)
1496 1515 MKIVCT(216)
1497 1516 MKIVCT(217)
1498 1517 MKIVCT(218)
1499 1518 MKIVCT(219)
1500 1519 MKIVCT(220)
1501 1520 MKIVCT(221)
1502 1521 MKIVCT(222)
1503 1522 MKIVCT(223)
1504 1523 MKIVCT(224)
1505 1524 MKIVCT(225)
1506 1525 MKIVCT(226)
1507 1526 MKIVCT(227)
1508 1527 MKIVCT(228)
1509 1528 MKIVCT(229)
1510 1529 MKIVCT(230)
1511 1530 MKIVCT(231)
1512 1531 MKIVCT(232)
1513 1532 MKIVCT(233)
1514 1533 MKIVCT(234)
1515 1534 MKIVCT(235)
1516 1535 MKIVCT(236)
1517 1536 MKIVCT(237)
1518 1537 MKIVCT(238)
1519 1538 MKIVCT(239)
1520 1539 MKIVCT(240)
1521 1540 MKIVCT(241)
1522 1541 MKIVCT(242)
1523 1542 MKIVCT(243)
1524 1543 MKIVCT(244)
1525 1544 MKIVCT(245)
1526 1545 MKIVCT(246)
1527 1546 MKIVCT(247)
1528 1547 MKIVCT(248)
1529 1548 MKIVCT(249)
1530 1549 MKIVCT(250)
1531 1550 MKIVCT(251)
1532 1551 MKIVCT(252)
1533 1552 MKIVCT(253)
1534 1553 MKIVCT(254)
1535 1554 MKIVCT(255)
1536 1555
1537 1556 #endif /* __lint */
↓ open down ↓ |
238 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX