1 /*
   2  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
   4  * Copyright (c) 2018 Joyent, Inc.
   5  */
   6 
   7 /*
   8  * Copyright (c) 1989, 1990 William F. Jolitz.
   9  * Copyright (c) 1990 The Regents of the University of California.
  10  * All rights reserved.
  11  *
  12  * Redistribution and use in source and binary forms, with or without
  13  * modification, are permitted provided that the following conditions
  14  * are met:
  15  * 1. Redistributions of source code must retain the above copyright
  16  *    notice, this list of conditions and the following disclaimer.
  17  * 2. Redistributions in binary form must reproduce the above copyright
  18  *    notice, this list of conditions and the following disclaimer in the
  19  *    documentation and/or other materials provided with the distribution.
  20  * 3. All advertising materials mentioning features or use of this software
  21  *    must display the following acknowledgement:
  22  *      This product includes software developed by the University of
  23  *      California, Berkeley and its contributors.
  24  * 4. Neither the name of the University nor the names of its contributors
  25  *    may be used to endorse or promote products derived from this software
  26  *    without specific prior written permission.
  27  *
  28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  38  * SUCH DAMAGE.
  39  *
  40  * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
  41  */
  42 
  43 #include <sys/asm_linkage.h>
  44 #include <sys/asm_misc.h>
  45 #include <sys/trap.h>
  46 #include <sys/psw.h>
  47 #include <sys/regset.h>
  48 #include <sys/privregs.h>
  49 #include <sys/dtrace.h>
  50 #include <sys/x86_archext.h>
  51 #include <sys/traptrace.h>
  52 #include <sys/machparam.h>
  53 
  54 /*
  55  * only one routine in this file is interesting to lint
  56  */
  57 
  58 #if defined(__lint)
  59 
  60 void
  61 ndptrap_frstor(void)
  62 {}
  63 
  64 #else
  65 
  66 #include "assym.h"
  67 
  68 /*
  69  * push $0 on stack for traps that do not
  70  * generate an error code. This is so the rest
  71  * of the kernel can expect a consistent stack
  72  * from from any exception.
  73  *
  74  * Note that for all exceptions for amd64
  75  * %r11 and %rcx are on the stack. Just pop
  76  * them back into their appropriate registers and let
  77  * it get saved as is running native.
  78  */
  79 
  80 #if defined(__xpv) && defined(__amd64)
  81 
  82 #define NPTRAP_NOERR(trapno)    \
  83         pushq   $0;             \
  84         pushq   $trapno
  85 
  86 #define TRAP_NOERR(trapno)      \
  87         XPV_TRAP_POP;           \
  88         NPTRAP_NOERR(trapno)
  89 
  90 /*
  91  * error code already pushed by hw
  92  * onto stack.
  93  */
  94 #define TRAP_ERR(trapno)        \
  95         XPV_TRAP_POP;           \
  96         pushq   $trapno
  97 
  98 #else /* __xpv && __amd64 */
  99 
 100 #define TRAP_NOERR(trapno)      \
 101         push    $0;             \
 102         push    $trapno
 103 
 104 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
 105 
 106 /*
 107  * error code already pushed by hw
 108  * onto stack.
 109  */
 110 #define TRAP_ERR(trapno)        \
 111         push    $trapno
 112 
 113 #endif  /* __xpv && __amd64 */
 114 
 115         /*
 116          * These are the stacks used on cpu0 for taking double faults,
 117          * NMIs and MCEs (the latter two only on amd64 where we have IST).
 118          *
 119          * We define them here instead of in a C file so that we can page-align
 120          * them (gcc won't do that in a .c file).
 121          */
 122         .data
 123         DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 124         .fill   DEFAULTSTKSZ, 1, 0
 125         DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 126         .fill   DEFAULTSTKSZ, 1, 0
 127         DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 128         .fill   DEFAULTSTKSZ, 1, 0
 129 
 130         /*
 131          * #DE
 132          */
 133         ENTRY_NP(div0trap)
 134         TRAP_NOERR(T_ZERODIV)   /* $0 */
 135         jmp     cmntrap
 136         SET_SIZE(div0trap)
 137 
 138         /*
 139          * #DB
 140          *
 141          * Fetch %dr6 and clear it, handing off the value to the
 142          * cmntrap code in %r15/%esi
 143          */
 144         ENTRY_NP(dbgtrap)
 145         TRAP_NOERR(T_SGLSTP)    /* $1 */
 146 
 147 #if defined(__amd64)
 148 #if !defined(__xpv)             /* no sysenter support yet */
 149         /*
 150          * If we get here as a result of single-stepping a sysenter
 151          * instruction, we suddenly find ourselves taking a #db
 152          * in kernel mode -before- we've swapgs'ed.  So before we can
 153          * take the trap, we do the swapgs here, and fix the return
 154          * %rip in trap() so that we return immediately after the
 155          * swapgs in the sysenter handler to avoid doing the swapgs again.
 156          *
 157          * Nobody said that the design of sysenter was particularly
 158          * elegant, did they?
 159          */
 160 
 161         pushq   %r11
 162 
 163         /*
 164          * At this point the stack looks like this:
 165          *
 166          * (high address)       r_ss
 167          *                      r_rsp
 168          *                      r_rfl
 169          *                      r_cs
 170          *                      r_rip           <-- %rsp + 24
 171          *                      r_err           <-- %rsp + 16
 172          *                      r_trapno        <-- %rsp + 8
 173          * (low address)        %r11            <-- %rsp
 174          */
 175         leaq    sys_sysenter(%rip), %r11
 176         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 177         je      1f
 178         leaq    brand_sys_sysenter(%rip), %r11
 179         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 180         je      1f
 181         leaq    tr_sys_sysenter(%rip), %r11
 182         cmpq    %r11, 24(%rsp)
 183         je      1f
 184         leaq    tr_brand_sys_sysenter(%rip), %r11
 185         cmpq    %r11, 24(%rsp)
 186         jne     2f
 187 1:      SWAPGS
 188 2:      popq    %r11
 189 #endif  /* !__xpv */
 190 
 191         INTR_PUSH
 192 #if defined(__xpv)
 193         movl    $6, %edi
 194         call    kdi_dreg_get
 195         movq    %rax, %r15              /* %db6 -> %r15 */
 196         movl    $6, %edi
 197         movl    $0, %esi
 198         call    kdi_dreg_set            /* 0 -> %db6 */
 199 #else
 200         movq    %db6, %r15
 201         xorl    %eax, %eax
 202         movq    %rax, %db6
 203 #endif
 204 
 205 #elif defined(__i386)
 206 
 207         INTR_PUSH
 208 #if defined(__xpv)
 209         pushl   $6
 210         call    kdi_dreg_get
 211         addl    $4, %esp
 212         movl    %eax, %esi              /* %dr6 -> %esi */
 213         pushl   $0
 214         pushl   $6
 215         call    kdi_dreg_set            /* 0 -> %dr6 */
 216         addl    $8, %esp
 217 #else
 218         movl    %db6, %esi
 219         xorl    %eax, %eax
 220         movl    %eax, %db6
 221 #endif
 222 #endif  /* __i386 */
 223 
 224         jmp     cmntrap_pushed
 225         SET_SIZE(dbgtrap)
 226 
 227 #if defined(__amd64)
 228 #if !defined(__xpv)
 229 
 230 /*
 231  * Macro to set the gsbase or kgsbase to the address of the struct cpu
 232  * for this processor.  If we came from userland, set kgsbase else
 233  * set gsbase.  We find the proper cpu struct by looping through
 234  * the cpu structs for all processors till we find a match for the gdt
 235  * of the trapping processor.  The stack is expected to be pointing at
 236  * the standard regs pushed by hardware on a trap (plus error code and trapno).
 237  *
 238  * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
 239  * and kgsbase set to the same value) because we're not going back the normal
 240  * way out of here (via IRET). Where we're going, we don't need no user %gs.
 241  */
 242 #define SET_CPU_GSBASE                                                  \
 243         subq    $REGOFF_TRAPNO, %rsp;   /* save regs */                 \
 244         movq    %rax, REGOFF_RAX(%rsp);                                 \
 245         movq    %rbx, REGOFF_RBX(%rsp);                                 \
 246         movq    %rcx, REGOFF_RCX(%rsp);                                 \
 247         movq    %rdx, REGOFF_RDX(%rsp);                                 \
 248         movq    %rbp, REGOFF_RBP(%rsp);                                 \
 249         movq    %rsp, %rbp;                                             \
 250         subq    $16, %rsp;              /* space for gdt */             \
 251         sgdt    6(%rsp);                                                \
 252         movq    8(%rsp), %rcx;          /* %rcx has gdt to match */     \
 253         xorl    %ebx, %ebx;             /* loop index */                \
 254         leaq    cpu(%rip), %rdx;        /* cpu pointer array */         \
 255 1:                                                                      \
 256         movq    (%rdx, %rbx, CLONGSIZE), %rax;  /* get cpu[i] */        \
 257         cmpq    $0x0, %rax;             /* cpu[i] == NULL ? */          \
 258         je      2f;                     /* yes, continue */             \
 259         cmpq    %rcx, CPU_GDT(%rax);    /* gdt == cpu[i]->cpu_gdt ? */       \
 260         je      3f;                     /* yes, go set gsbase */        \
 261 2:                                                                      \
 262         incl    %ebx;                   /* i++ */                       \
 263         cmpl    $NCPU, %ebx;            /* i < NCPU ? */             \
 264         jb      1b;                     /* yes, loop */                 \
 265 /* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */ \
 266 3:                                                                      \
 267         movl    $MSR_AMD_KGSBASE, %ecx;                                 \
 268         cmpw    $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */      \
 269         jne     4f;                     /* no, go set KGSBASE */        \
 270         movl    $MSR_AMD_GSBASE, %ecx;  /* yes, set GSBASE */           \
 271         mfence;                         /* OPTERON_ERRATUM_88 */        \
 272 4:                                                                      \
 273         movq    %rax, %rdx;             /* write base register */       \
 274         shrq    $32, %rdx;                                              \
 275         wrmsr;                                                          \
 276         movq    REGOFF_RDX(%rbp), %rdx; /* restore regs */              \
 277         movq    REGOFF_RCX(%rbp), %rcx;                                 \
 278         movq    REGOFF_RBX(%rbp), %rbx;                                 \
 279         movq    REGOFF_RAX(%rbp), %rax;                                 \
 280         movq    %rbp, %rsp;                                             \
 281         movq    REGOFF_RBP(%rsp), %rbp;                                 \
 282         addq    $REGOFF_TRAPNO, %rsp    /* pop stack */
 283 
 284 #else   /* __xpv */
 285 
 286 #define SET_CPU_GSBASE  /* noop on the hypervisor */
 287 
 288 #endif  /* __xpv */
 289 #endif  /* __amd64 */
 290 
 291 
 292 #if defined(__amd64)
 293 
 294         /*
 295          * #NMI
 296          *
 297          * XXPV: See 6532669.
 298          */
 299         ENTRY_NP(nmiint)
 300         TRAP_NOERR(T_NMIFLT)    /* $2 */
 301 
 302         SET_CPU_GSBASE
 303 
 304         /*
 305          * Save all registers and setup segment registers
 306          * with kernel selectors.
 307          */
 308         INTR_PUSH
 309         INTGATE_INIT_KERNEL_FLAGS
 310 
 311         TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
 312         TRACE_REGS(%r12, %rsp, %rax, %rbx)
 313         TRACE_STAMP(%r12)
 314 
 315         movq    %rsp, %rbp
 316 
 317         movq    %rbp, %rdi
 318         call    av_dispatch_nmivect
 319 
 320         INTR_POP
 321         jmp     tr_iret_auto
 322         /*NOTREACHED*/
 323         SET_SIZE(nmiint)
 324 
 325 #elif defined(__i386)
 326 
 327         /*
 328          * #NMI
 329          */
 330         ENTRY_NP(nmiint)
 331         TRAP_NOERR(T_NMIFLT)    /* $2 */
 332 
 333         /*
 334          * Save all registers and setup segment registers
 335          * with kernel selectors.
 336          */
 337         INTR_PUSH
 338         INTGATE_INIT_KERNEL_FLAGS
 339 
 340         TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 341         TRACE_REGS(%edi, %esp, %ebx, %ecx)
 342         TRACE_STAMP(%edi)
 343 
 344         movl    %esp, %ebp
 345 
 346         pushl   %ebp
 347         call    av_dispatch_nmivect
 348         addl    $4, %esp
 349 
 350         INTR_POP_USER
 351         IRET
 352         SET_SIZE(nmiint)
 353 
 354 #endif  /* __i386 */
 355 
 356         /*
 357          * #BP
 358          */
 359         ENTRY_NP(brktrap)
 360 
 361 #if defined(__amd64)
 362         XPV_TRAP_POP
 363         cmpw    $KCS_SEL, 8(%rsp)
 364         jne     bp_user
 365 
 366         /*
 367          * This is a breakpoint in the kernel -- it is very likely that this
 368          * is DTrace-induced.  To unify DTrace handling, we spoof this as an
 369          * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
 370          * we must decrement the trapping %rip to make it appear as a fault.
 371          * We then push a non-zero error code to indicate that this is coming
 372          * from #BP.
 373          */
 374         decq    (%rsp)
 375         push    $1                      /* error code -- non-zero for #BP */
 376         jmp     ud_kernel
 377 
 378 bp_user:
 379 #endif /* __amd64 */
 380 
 381         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 382         jmp     dtrace_trap
 383 
 384         SET_SIZE(brktrap)
 385 
 386         /*
 387          * #OF
 388          */
 389         ENTRY_NP(ovflotrap)
 390         TRAP_NOERR(T_OVFLW)     /* $4 */
 391         jmp     cmntrap
 392         SET_SIZE(ovflotrap)
 393 
 394         /*
 395          * #BR
 396          */
 397         ENTRY_NP(boundstrap)
 398         TRAP_NOERR(T_BOUNDFLT)  /* $5 */
 399         jmp     cmntrap
 400         SET_SIZE(boundstrap)
 401 
 402 #if defined(__amd64)
 403 
 404         ENTRY_NP(invoptrap)
 405 
 406         XPV_TRAP_POP
 407 
 408         cmpw    $KCS_SEL, 8(%rsp)
 409         jne     ud_user
 410 
 411 #if defined(__xpv)
 412         movb    $0, 12(%rsp)            /* clear saved upcall_mask from %cs */
 413 #endif
 414         push    $0                      /* error code -- zero for #UD */
 415 ud_kernel:
 416         push    $0xdddd                 /* a dummy trap number */
 417         INTR_PUSH
 418         movq    REGOFF_RIP(%rsp), %rdi
 419         movq    REGOFF_RSP(%rsp), %rsi
 420         movq    REGOFF_RAX(%rsp), %rdx
 421         pushq   (%rsi)
 422         movq    %rsp, %rsi
 423         subq    $8, %rsp
 424         call    dtrace_invop
 425         ALTENTRY(dtrace_invop_callsite)
 426         addq    $16, %rsp
 427         cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 428         je      ud_push
 429         cmpl    $DTRACE_INVOP_LEAVE, %eax
 430         je      ud_leave
 431         cmpl    $DTRACE_INVOP_NOP, %eax
 432         je      ud_nop
 433         cmpl    $DTRACE_INVOP_RET, %eax
 434         je      ud_ret
 435         jmp     ud_trap
 436 
 437 ud_push:
 438         /*
 439          * We must emulate a "pushq %rbp".  To do this, we pull the stack
 440          * down 8 bytes, and then store the base pointer.
 441          */
 442         INTR_POP
 443         subq    $16, %rsp               /* make room for %rbp */
 444         pushq   %rax                    /* push temp */
 445         movq    24(%rsp), %rax          /* load calling RIP */
 446         addq    $1, %rax                /* increment over trapping instr */
 447         movq    %rax, 8(%rsp)           /* store calling RIP */
 448         movq    32(%rsp), %rax          /* load calling CS */
 449         movq    %rax, 16(%rsp)          /* store calling CS */
 450         movq    40(%rsp), %rax          /* load calling RFLAGS */
 451         movq    %rax, 24(%rsp)          /* store calling RFLAGS */
 452         movq    48(%rsp), %rax          /* load calling RSP */
 453         subq    $8, %rax                /* make room for %rbp */
 454         movq    %rax, 32(%rsp)          /* store calling RSP */
 455         movq    56(%rsp), %rax          /* load calling SS */
 456         movq    %rax, 40(%rsp)          /* store calling SS */
 457         movq    32(%rsp), %rax          /* reload calling RSP */
 458         movq    %rbp, (%rax)            /* store %rbp there */
 459         popq    %rax                    /* pop off temp */
 460         jmp     tr_iret_kernel          /* return from interrupt */
 461         /*NOTREACHED*/
 462 
 463 ud_leave:
 464         /*
 465          * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
 466          * followed by a "popq %rbp".  This is quite a bit simpler on amd64
 467          * than it is on i386 -- we can exploit the fact that the %rsp is
 468          * explicitly saved to effect the pop without having to reshuffle
 469          * the other data pushed for the trap.
 470          */
 471         INTR_POP
 472         pushq   %rax                    /* push temp */
 473         movq    8(%rsp), %rax           /* load calling RIP */
 474         addq    $1, %rax                /* increment over trapping instr */
 475         movq    %rax, 8(%rsp)           /* store calling RIP */
 476         movq    (%rbp), %rax            /* get new %rbp */
 477         addq    $8, %rbp                /* adjust new %rsp */
 478         movq    %rbp, 32(%rsp)          /* store new %rsp */
 479         movq    %rax, %rbp              /* set new %rbp */
 480         popq    %rax                    /* pop off temp */
 481         jmp     tr_iret_kernel          /* return from interrupt */
 482         /*NOTREACHED*/
 483 
 484 ud_nop:
 485         /*
 486          * We must emulate a "nop".  This is obviously not hard:  we need only
 487          * advance the %rip by one.
 488          */
 489         INTR_POP
 490         incq    (%rsp)
 491         jmp     tr_iret_kernel
 492         /*NOTREACHED*/
 493 
 494 ud_ret:
 495         INTR_POP
 496         pushq   %rax                    /* push temp */
 497         movq    32(%rsp), %rax          /* load %rsp */
 498         movq    (%rax), %rax            /* load calling RIP */
 499         movq    %rax, 8(%rsp)           /* store calling RIP */
 500         addq    $8, 32(%rsp)            /* adjust new %rsp */
 501         popq    %rax                    /* pop off temp */
 502         jmp     tr_iret_kernel          /* return from interrupt */
 503         /*NOTREACHED*/
 504 
 505 ud_trap:
 506         /*
 507          * We're going to let the kernel handle this as a normal #UD.  If,
 508          * however, we came through #BP and are spoofing #UD (in this case,
 509          * the stored error value will be non-zero), we need to de-spoof
 510          * the trap by incrementing %rip and pushing T_BPTFLT.
 511          */
 512         cmpq    $0, REGOFF_ERR(%rsp)
 513         je      ud_ud
 514         incq    REGOFF_RIP(%rsp)
 515         addq    $REGOFF_RIP, %rsp
 516         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 517         jmp     cmntrap
 518 
 519 ud_ud:
 520         addq    $REGOFF_RIP, %rsp
 521 ud_user:
 522         NPTRAP_NOERR(T_ILLINST)
 523         jmp     cmntrap
 524         SET_SIZE(invoptrap)
 525 
 526 #elif defined(__i386)
 527 
 528         /*
 529          * #UD
 530          */
 531         ENTRY_NP(invoptrap)
 532         /*
 533          * If we are taking an invalid opcode trap while in the kernel, this
 534          * is likely an FBT probe point.
 535          */
 536         pushl   %gs
 537         cmpw    $KGS_SEL, (%esp)
 538         jne     8f
 539 
 540         addl    $4, %esp
 541 #if defined(__xpv)
 542         movb    $0, 6(%esp)             /* clear saved upcall_mask from %cs */
 543 #endif  /* __xpv */
 544         pusha
 545         pushl   %eax                    /* push %eax -- may be return value */
 546         pushl   %esp                    /* push stack pointer */
 547         addl    $48, (%esp)             /* adjust to incoming args */
 548         pushl   40(%esp)                /* push calling EIP */
 549         call    dtrace_invop
 550         ALTENTRY(dtrace_invop_callsite)
 551         addl    $12, %esp
 552         cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 553         je      1f
 554         cmpl    $DTRACE_INVOP_POPL_EBP, %eax
 555         je      2f
 556         cmpl    $DTRACE_INVOP_LEAVE, %eax
 557         je      3f
 558         cmpl    $DTRACE_INVOP_NOP, %eax
 559         je      4f
 560         jmp     7f
 561 1:
 562         /*
 563          * We must emulate a "pushl %ebp".  To do this, we pull the stack
 564          * down 4 bytes, and then store the base pointer.
 565          */
 566         popa
 567         subl    $4, %esp                /* make room for %ebp */
 568         pushl   %eax                    /* push temp */
 569         movl    8(%esp), %eax           /* load calling EIP */
 570         incl    %eax                    /* increment over LOCK prefix */
 571         movl    %eax, 4(%esp)           /* store calling EIP */
 572         movl    12(%esp), %eax          /* load calling CS */
 573         movl    %eax, 8(%esp)           /* store calling CS */
 574         movl    16(%esp), %eax          /* load calling EFLAGS */
 575         movl    %eax, 12(%esp)          /* store calling EFLAGS */
 576         movl    %ebp, 16(%esp)          /* push %ebp */
 577         popl    %eax                    /* pop off temp */
 578         jmp     _emul_done
 579 2:
 580         /*
 581          * We must emulate a "popl %ebp".  To do this, we do the opposite of
 582          * the above:  we remove the %ebp from the stack, and squeeze up the
 583          * saved state from the trap.
 584          */
 585         popa
 586         pushl   %eax                    /* push temp */
 587         movl    16(%esp), %ebp          /* pop %ebp */
 588         movl    12(%esp), %eax          /* load calling EFLAGS */
 589         movl    %eax, 16(%esp)          /* store calling EFLAGS */
 590         movl    8(%esp), %eax           /* load calling CS */
 591         movl    %eax, 12(%esp)          /* store calling CS */
 592         movl    4(%esp), %eax           /* load calling EIP */
 593         incl    %eax                    /* increment over LOCK prefix */
 594         movl    %eax, 8(%esp)           /* store calling EIP */
 595         popl    %eax                    /* pop off temp */
 596         addl    $4, %esp                /* adjust stack pointer */
 597         jmp     _emul_done
 598 3:
 599         /*
 600          * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
 601          * followed by a "popl %ebp".  This looks similar to the above, but
 602          * requires two temporaries:  one for the new base pointer, and one
 603          * for the staging register.
 604          */
 605         popa
 606         pushl   %eax                    /* push temp */
 607         pushl   %ebx                    /* push temp */
 608         movl    %ebp, %ebx              /* set temp to old %ebp */
 609         movl    (%ebx), %ebp            /* pop %ebp */
 610         movl    16(%esp), %eax          /* load calling EFLAGS */
 611         movl    %eax, (%ebx)            /* store calling EFLAGS */
 612         movl    12(%esp), %eax          /* load calling CS */
 613         movl    %eax, -4(%ebx)          /* store calling CS */
 614         movl    8(%esp), %eax           /* load calling EIP */
 615         incl    %eax                    /* increment over LOCK prefix */
 616         movl    %eax, -8(%ebx)          /* store calling EIP */
 617         movl    %ebx, -4(%esp)          /* temporarily store new %esp */
 618         popl    %ebx                    /* pop off temp */
 619         popl    %eax                    /* pop off temp */
 620         movl    -12(%esp), %esp         /* set stack pointer */
 621         subl    $8, %esp                /* adjust for three pushes, one pop */
 622         jmp     _emul_done
 623 4:
 624         /*
 625          * We must emulate a "nop".  This is obviously not hard:  we need only
 626          * advance the %eip by one.
 627          */
 628         popa
 629         incl    (%esp)
 630 _emul_done:
 631         IRET                            /* return from interrupt */
 632 7:
 633         popa
 634         pushl   $0
 635         pushl   $T_ILLINST      /* $6 */
 636         jmp     cmntrap
 637 8:
 638         addl    $4, %esp
 639         pushl   $0
 640         pushl   $T_ILLINST      /* $6 */
 641         jmp     cmntrap
 642         SET_SIZE(invoptrap)
 643 
 644 #endif  /* __i386 */
 645 
 646 #if defined(__amd64)
 647 
 648         /*
 649          * #NM
 650          */
 651 #if defined(__xpv)
 652 
 653         ENTRY_NP(ndptrap)
 654         /*
 655          * (On the hypervisor we must make a hypercall so we might as well
 656          * save everything and handle as in a normal trap.)
 657          */
 658         TRAP_NOERR(T_NOEXTFLT)  /* $7 */
 659         INTR_PUSH
 660 
 661         /*
 662          * We want to do this quickly as every lwp using fp will take this
 663          * after a context switch -- we do the frequent path in ndptrap_frstor
 664          * below; for all other cases, we let the trap code handle it
 665          */
 666         LOADCPU(%rax)                   /* swapgs handled in hypervisor */
 667         cmpl    $0, fpu_exists(%rip)
 668         je      .handle_in_trap         /* let trap handle no fp case */
 669         movq    CPU_THREAD(%rax), %rbx  /* %rbx = curthread */
 670         movl    $FPU_EN, %eax
 671         movq    T_LWP(%rbx), %rbx       /* %rbx = lwp */
 672         testq   %rbx, %rbx
 673         jz      .handle_in_trap         /* should not happen? */
 674 #if LWP_PCB_FPU != 0
 675         addq    $LWP_PCB_FPU, %rbx      /* &lwp->lwp_pcb.pcb_fpu */
 676 #endif
 677         testl   %eax, PCB_FPU_FLAGS(%rbx)
 678         jz      .handle_in_trap         /* must be the first fault */
 679         CLTS
 680         andl    $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
 681 #if FPU_CTX_FPU_REGS != 0
 682         addq    $FPU_CTX_FPU_REGS, %rbx
 683 #endif
 684 
 685         movl    FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax      /* for xrstor */
 686         movl    FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx    /* for xrstor */
 687 
 688         /*
 689          * the label below is used in trap.c to detect FP faults in
 690          * kernel due to user fault.
 691          */
 692         ALTENTRY(ndptrap_frstor)
 693         movq (%rbx), %rbx               /* fpu_regs.kfpu_u.kfpu_XX pointer */
 694         .globl  _patch_xrstorq_rbx
 695 _patch_xrstorq_rbx:
 696         fxrstorq (%rbx)
 697         cmpw    $KCS_SEL, REGOFF_CS(%rsp)
 698         je      .return_to_kernel
 699 
 700         ASSERT_UPCALL_MASK_IS_SET
 701         USER_POP
 702         IRET                            /* return to user mode */
 703         /*NOTREACHED*/
 704 
 705 .return_to_kernel:
 706         INTR_POP
 707         IRET
 708         /*NOTREACHED*/
 709 
 710 .handle_in_trap:
 711         INTR_POP
 712         pushq   $0                      /* can not use TRAP_NOERR */
 713         pushq   $T_NOEXTFLT
 714         jmp     cmninttrap
 715         SET_SIZE(ndptrap_frstor)
 716         SET_SIZE(ndptrap)
 717 
 718 #else   /* __xpv */
 719 
 720         ENTRY_NP(ndptrap)
 721         /*
 722          * We want to do this quickly as every lwp using fp will take this
 723          * after a context switch -- we do the frequent path in ndptrap_frstor
 724          * below; for all other cases, we let the trap code handle it
 725          */
 726         pushq   %rax
 727         pushq   %rbx
 728         cmpw    $KCS_SEL, 24(%rsp)      /* did we come from kernel mode? */
 729         jne     1f
 730         LOADCPU(%rax)                   /* if yes, don't swapgs */
 731         jmp     2f
 732 1:
 733         SWAPGS                          /* if from user, need swapgs */
 734         LOADCPU(%rax)
 735         SWAPGS
 736 2:
 737         /*
 738          * Xrstor needs to use edx as part of its flag.
 739          * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24
 740          * will not point to CS.
 741          */
 742         pushq   %rdx
 743         cmpl    $0, fpu_exists(%rip)
 744         je      .handle_in_trap         /* let trap handle no fp case */
 745         movq    CPU_THREAD(%rax), %rbx  /* %rbx = curthread */
 746         movl    $FPU_EN, %eax
 747         movq    T_LWP(%rbx), %rbx       /* %rbx = lwp */
 748         testq   %rbx, %rbx
 749         jz      .handle_in_trap         /* should not happen? */
 750 #if LWP_PCB_FPU != 0
 751         addq    $LWP_PCB_FPU, %rbx      /* &lwp->lwp_pcb.pcb_fpu */
 752 #endif
 753         testl   %eax, PCB_FPU_FLAGS(%rbx)
 754         jz      .handle_in_trap         /* must be the first fault */
 755         clts
 756         andl    $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
 757 #if FPU_CTX_FPU_REGS != 0
 758         addq    $FPU_CTX_FPU_REGS, %rbx
 759 #endif
 760 
 761         movl    FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax      /* for xrstor */
 762         movl    FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx    /* for xrstor */
 763 
 764         /*
 765          * the label below is used in trap.c to detect FP faults in
 766          * kernel due to user fault.
 767          */
 768         ALTENTRY(ndptrap_frstor)
 769         movq (%rbx), %rbx               /* fpu_regs.kfpu_u.kfpu_XX pointer */
 770         .globl  _patch_xrstorq_rbx
 771 _patch_xrstorq_rbx:
 772         fxrstorq (%rbx)
 773         popq    %rdx
 774         popq    %rbx
 775         popq    %rax
 776         jmp     tr_iret_auto
 777         /*NOTREACHED*/
 778 
 779 .handle_in_trap:
 780         popq    %rdx
 781         popq    %rbx
 782         popq    %rax
 783         TRAP_NOERR(T_NOEXTFLT)  /* $7 */
 784         jmp     cmninttrap
 785         SET_SIZE(ndptrap_frstor)
 786         SET_SIZE(ndptrap)
 787 
 788 #endif  /* __xpv */
 789 
 790 #elif defined(__i386)
 791 
 792         ENTRY_NP(ndptrap)
 793         /*
 794          * We want to do this quickly as every lwp using fp will take this
 795          * after a context switch -- we do the frequent path in fpnoextflt
 796          * below; for all other cases, we let the trap code handle it
 797          */
 798         pushl   %eax
 799         pushl   %ebx
 800         pushl   %edx                    /* for xrstor */
 801         pushl   %ds
 802         pushl   %gs
 803         movl    $KDS_SEL, %ebx
 804         movw    %bx, %ds
 805         movl    $KGS_SEL, %eax
 806         movw    %ax, %gs
 807         LOADCPU(%eax)
 808         cmpl    $0, fpu_exists
 809         je      .handle_in_trap         /* let trap handle no fp case */
 810         movl    CPU_THREAD(%eax), %ebx  /* %ebx = curthread */
 811         movl    $FPU_EN, %eax
 812         movl    T_LWP(%ebx), %ebx       /* %ebx = lwp */
 813         testl   %ebx, %ebx
 814         jz      .handle_in_trap         /* should not happen? */
 815 #if LWP_PCB_FPU != 0
 816         addl    $LWP_PCB_FPU, %ebx      /* &lwp->lwp_pcb.pcb_fpu */
 817 #endif
 818         testl   %eax, PCB_FPU_FLAGS(%ebx)
 819         jz      .handle_in_trap         /* must be the first fault */
 820         CLTS
 821         andl    $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
 822 #if FPU_CTX_FPU_REGS != 0
 823         addl    $FPU_CTX_FPU_REGS, %ebx
 824 #endif
 825 
 826         movl    FPU_CTX_FPU_XSAVE_MASK(%ebx), %eax      /* for xrstor */
 827         movl    FPU_CTX_FPU_XSAVE_MASK+4(%ebx), %edx    /* for xrstor */
 828 
 829         /*
 830          * the label below is used in trap.c to detect FP faults in kernel
 831          * due to user fault.
 832          */
 833         ALTENTRY(ndptrap_frstor)
 834         movl    (%ebx), %ebx            /* fpu_regs.kfpu_u.kfpu_XX pointer */
 835         .globl  _patch_fxrstor_ebx
 836 _patch_fxrstor_ebx:
 837         .globl  _patch_xrstor_ebx
 838 _patch_xrstor_ebx:
 839         frstor  (%ebx)          /* may be patched to fxrstor or xrstor */
 840         popl    %gs
 841         popl    %ds
 842         popl    %edx
 843         popl    %ebx
 844         popl    %eax
 845         IRET
 846 
 847 .handle_in_trap:
 848         popl    %gs
 849         popl    %ds
 850         popl    %edx
 851         popl    %ebx
 852         popl    %eax
 853         TRAP_NOERR(T_NOEXTFLT)  /* $7 */
 854         jmp     cmninttrap
 855         SET_SIZE(ndptrap_frstor)
 856         SET_SIZE(ndptrap)
 857 
 858 #endif  /* __i386 */
 859 
 860 #if !defined(__xpv)
 861 #if defined(__amd64)
 862 
 863         /*
 864          * #DF
 865          */
 866         ENTRY_NP(syserrtrap)
 867         pushq   $T_DBLFLT
 868         SET_CPU_GSBASE
 869 
 870         /*
 871          * We share this handler with kmdb (if kmdb is loaded).  As such, we
 872          * may have reached this point after encountering a #df in kmdb.  If
 873          * that happens, we'll still be on kmdb's IDT.  We need to switch back
 874          * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 875          * here from kmdb, kmdb is probably in a very sickly state, and
 876          * shouldn't be entered from the panic flow.  We'll suppress that
 877          * entry by setting nopanicdebug.
 878          */
 879         pushq   %rax
 880         subq    $DESCTBR_SIZE, %rsp
 881         sidt    (%rsp)
 882         movq    %gs:CPU_IDT, %rax
 883         cmpq    %rax, DTR_BASE(%rsp)
 884         je      1f
 885 
 886         movq    %rax, DTR_BASE(%rsp)
 887         movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
 888         lidt    (%rsp)
 889 
 890         movl    $1, nopanicdebug
 891 
 892 1:      addq    $DESCTBR_SIZE, %rsp
 893         popq    %rax
 894 
 895         DFTRAP_PUSH
 896 
 897         /*
 898          * freeze trap trace.
 899          */
 900 #ifdef TRAPTRACE
 901         leaq    trap_trace_freeze(%rip), %r11
 902         incl    (%r11)
 903 #endif
 904 
 905         ENABLE_INTR_FLAGS
 906 
 907         movq    %rsp, %rdi      /* &regs */
 908         xorl    %esi, %esi      /* clear address */
 909         xorl    %edx, %edx      /* cpuid = 0 */
 910         call    trap
 911 
 912         SET_SIZE(syserrtrap)
 913 
 914 #elif defined(__i386)
 915 
 916         /*
 917          * #DF
 918          */
 919         ENTRY_NP(syserrtrap)
 920         cli                             /* disable interrupts */
 921 
 922         /*
 923          * We share this handler with kmdb (if kmdb is loaded).  As such, we
 924          * may have reached this point after encountering a #df in kmdb.  If
 925          * that happens, we'll still be on kmdb's IDT.  We need to switch back
 926          * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 927          * here from kmdb, kmdb is probably in a very sickly state, and
 928          * shouldn't be entered from the panic flow.  We'll suppress that
 929          * entry by setting nopanicdebug.
 930          */
 931 
 932         subl    $DESCTBR_SIZE, %esp
 933         movl    %gs:CPU_IDT, %eax
 934         sidt    (%esp)
 935         cmpl    DTR_BASE(%esp), %eax
 936         je      1f
 937 
 938         movl    %eax, DTR_BASE(%esp)
 939         movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
 940         lidt    (%esp)
 941 
 942         movl    $1, nopanicdebug
 943 
 944 1:      addl    $DESCTBR_SIZE, %esp
 945 
 946         /*
 947          * Check the CPL in the TSS to see what mode
 948          * (user or kernel) we took the fault in.  At this
 949          * point we are running in the context of the double
 950          * fault task (dftss) but the CPU's task points to
 951          * the previous task (ktss) where the process context
 952          * has been saved as the result of the task switch.
 953          */
 954         movl    %gs:CPU_TSS, %eax       /* get the TSS */
 955         movl    TSS_SS(%eax), %ebx      /* save the fault SS */
 956         movl    TSS_ESP(%eax), %edx     /* save the fault ESP */
 957         testw   $CPL_MASK, TSS_CS(%eax) /* user mode ? */
 958         jz      make_frame
 959         movw    TSS_SS0(%eax), %ss      /* get on the kernel stack */
 960         movl    TSS_ESP0(%eax), %esp
 961 
 962         /*
 963          * Clear the NT flag to avoid a task switch when the process
 964          * finally pops the EFL off the stack via an iret.  Clear
 965          * the TF flag since that is what the processor does for
 966          * a normal exception. Clear the IE flag so that interrupts
 967          * remain disabled.
 968          */
 969         movl    TSS_EFL(%eax), %ecx
 970         andl    $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
 971         pushl   %ecx
 972         popfl                           /* restore the EFL */
 973         movw    TSS_LDT(%eax), %cx      /* restore the LDT */
 974         lldt    %cx
 975 
 976         /*
 977          * Restore process segment selectors.
 978          */
 979         movw    TSS_DS(%eax), %ds
 980         movw    TSS_ES(%eax), %es
 981         movw    TSS_FS(%eax), %fs
 982         movw    TSS_GS(%eax), %gs
 983 
 984         /*
 985          * Restore task segment selectors.
 986          */
 987         movl    $KDS_SEL, TSS_DS(%eax)
 988         movl    $KDS_SEL, TSS_ES(%eax)
 989         movl    $KDS_SEL, TSS_SS(%eax)
 990         movl    $KFS_SEL, TSS_FS(%eax)
 991         movl    $KGS_SEL, TSS_GS(%eax)
 992 
 993         /*
 994          * Clear the TS bit, the busy bits in both task
 995          * descriptors, and switch tasks.
 996          */
 997         clts
 998         leal    gdt0, %ecx
 999         movl    DFTSS_SEL+4(%ecx), %esi
1000         andl    $_BITNOT(0x200), %esi
1001         movl    %esi, DFTSS_SEL+4(%ecx)
1002         movl    KTSS_SEL+4(%ecx), %esi
1003         andl    $_BITNOT(0x200), %esi
1004         movl    %esi, KTSS_SEL+4(%ecx)
1005         movw    $KTSS_SEL, %cx
1006         ltr     %cx
1007 
1008         /*
1009          * Restore part of the process registers.
1010          */
1011         movl    TSS_EBP(%eax), %ebp
1012         movl    TSS_ECX(%eax), %ecx
1013         movl    TSS_ESI(%eax), %esi
1014         movl    TSS_EDI(%eax), %edi
1015 
1016 make_frame:
1017         /*
1018          * Make a trap frame.  Leave the error code (0) on
1019          * the stack since the first word on a trap stack is
1020          * unused anyway.
1021          */
1022         pushl   %ebx                    / fault SS
1023         pushl   %edx                    / fault ESP
1024         pushl   TSS_EFL(%eax)           / fault EFL
1025         pushl   TSS_CS(%eax)            / fault CS
1026         pushl   TSS_EIP(%eax)           / fault EIP
1027         pushl   $0                      / error code
1028         pushl   $T_DBLFLT               / trap number 8
1029         movl    TSS_EBX(%eax), %ebx     / restore EBX
1030         movl    TSS_EDX(%eax), %edx     / restore EDX
1031         movl    TSS_EAX(%eax), %eax     / restore EAX
1032         sti                             / enable interrupts
1033         jmp     cmntrap
1034         SET_SIZE(syserrtrap)
1035 
1036 #endif  /* __i386 */
1037 #endif  /* !__xpv */
1038 
1039         ENTRY_NP(overrun)
1040         push    $0
1041         TRAP_NOERR(T_EXTOVRFLT) /* $9 i386 only - not generated */
1042         jmp     cmninttrap
1043         SET_SIZE(overrun)
1044 
1045         /*
1046          * #TS
1047          */
1048         ENTRY_NP(invtsstrap)
1049         TRAP_ERR(T_TSSFLT)      /* $10 already have error code on stack */
1050         jmp     cmntrap
1051         SET_SIZE(invtsstrap)
1052 
1053         /*
1054          * #NP
1055          */
1056         ENTRY_NP(segnptrap)
1057         TRAP_ERR(T_SEGFLT)      /* $11 already have error code on stack */
1058 #if defined(__amd64)
1059         SET_CPU_GSBASE
1060 #endif
1061         jmp     cmntrap
1062         SET_SIZE(segnptrap)
1063 
1064         /*
1065          * #SS
1066          */
1067         ENTRY_NP(stktrap)
1068         TRAP_ERR(T_STKFLT)      /* $12 already have error code on stack */
1069 #if defined(__amd64)
1070         SET_CPU_GSBASE
1071 #endif
1072         jmp     cmntrap
1073         SET_SIZE(stktrap)
1074 
1075         /*
1076          * #GP
1077          */
1078         ENTRY_NP(gptrap)
1079         TRAP_ERR(T_GPFLT)       /* $13 already have error code on stack */
1080 #if defined(__amd64)
1081         SET_CPU_GSBASE
1082 #endif
1083         jmp     cmntrap
1084         SET_SIZE(gptrap)
1085 
1086         /*
1087          * #PF
1088          */
1089         ENTRY_NP(pftrap)
1090         TRAP_ERR(T_PGFLT)       /* $14 already have error code on stack */
1091         INTR_PUSH
1092 #if defined(__xpv)
1093 
1094 #if defined(__amd64)
1095         movq    %gs:CPU_VCPU_INFO, %r15
1096         movq    VCPU_INFO_ARCH_CR2(%r15), %r15  /* vcpu[].arch.cr2 */
1097 #elif defined(__i386)
1098         movl    %gs:CPU_VCPU_INFO, %esi
1099         movl    VCPU_INFO_ARCH_CR2(%esi), %esi  /* vcpu[].arch.cr2 */
1100 #endif  /* __i386 */
1101 
1102 #else   /* __xpv */
1103 
1104 #if defined(__amd64)
1105         movq    %cr2, %r15
1106 #elif defined(__i386)
1107         movl    %cr2, %esi
1108 #endif  /* __i386 */
1109 
1110 #endif  /* __xpv */
1111         jmp     cmntrap_pushed
1112         SET_SIZE(pftrap)
1113 
1114 #if !defined(__amd64)
1115 
1116         .globl  idt0_default_r
1117 
1118         /*
1119          * #PF pentium bug workaround
1120          */
1121         ENTRY_NP(pentium_pftrap)
1122         pushl   %eax
1123         movl    %cr2, %eax
1124         andl    $MMU_STD_PAGEMASK, %eax
1125 
1126         cmpl    %eax, %cs:idt0_default_r+2      /* fixme */
1127 
1128         je      check_for_user_address
1129 user_mode:
1130         popl    %eax
1131         pushl   $T_PGFLT        /* $14 */
1132         jmp     cmntrap
1133 check_for_user_address:
1134         /*
1135          * Before we assume that we have an unmapped trap on our hands,
1136          * check to see if this is a fault from user mode.  If it is,
1137          * we'll kick back into the page fault handler.
1138          */
1139         movl    4(%esp), %eax   /* error code */
1140         andl    $PF_ERR_USER, %eax
1141         jnz     user_mode
1142 
1143         /*
1144          * We now know that this is the invalid opcode trap.
1145          */
1146         popl    %eax
1147         addl    $4, %esp        /* pop error code */
1148         jmp     invoptrap
1149         SET_SIZE(pentium_pftrap)
1150 
1151 #endif  /* !__amd64 */
1152 
1153         ENTRY_NP(resvtrap)
1154         TRAP_NOERR(T_RESVTRAP)  /* (reserved)  */
1155         jmp     cmntrap
1156         SET_SIZE(resvtrap)
1157 
1158         /*
1159          * #MF
1160          */
1161         ENTRY_NP(ndperr)
1162         TRAP_NOERR(T_EXTERRFLT) /* $16 */
1163         jmp     cmninttrap
1164         SET_SIZE(ndperr)
1165 
1166         /*
1167          * #AC
1168          */
1169         ENTRY_NP(achktrap)
1170         TRAP_ERR(T_ALIGNMENT)   /* $17 */
1171         jmp     cmntrap
1172         SET_SIZE(achktrap)
1173 
1174         /*
1175          * #MC
1176          */
1177         .globl  cmi_mca_trap    /* see uts/i86pc/os/cmi.c */
1178 
1179 #if defined(__amd64)
1180 
1181         ENTRY_NP(mcetrap)
1182         TRAP_NOERR(T_MCE)       /* $18 */
1183 
1184         SET_CPU_GSBASE
1185 
1186         INTR_PUSH
1187         INTGATE_INIT_KERNEL_FLAGS
1188 
1189         TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1190         TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1191         TRACE_STAMP(%rdi)
1192 
1193         movq    %rsp, %rbp
1194 
1195         movq    %rsp, %rdi      /* arg0 = struct regs *rp */
1196         call    cmi_mca_trap    /* cmi_mca_trap(rp); */
1197 
1198         jmp     _sys_rtt
1199         SET_SIZE(mcetrap)
1200 
1201 #else
1202 
1203         ENTRY_NP(mcetrap)
1204         TRAP_NOERR(T_MCE)       /* $18 */
1205 
1206         INTR_PUSH
1207         INTGATE_INIT_KERNEL_FLAGS
1208 
1209         TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1210         TRACE_REGS(%edi, %esp, %ebx, %ecx)
1211         TRACE_STAMP(%edi)
1212 
1213         movl    %esp, %ebp
1214 
1215         movl    %esp, %ecx
1216         pushl   %ecx            /* arg0 = struct regs *rp */
1217         call    cmi_mca_trap    /* cmi_mca_trap(rp) */
1218         addl    $4, %esp        /* pop arg0 */
1219 
1220         jmp     _sys_rtt
1221         SET_SIZE(mcetrap)
1222 
1223 #endif
1224 
1225         /*
1226          * #XF
1227          */
1228         ENTRY_NP(xmtrap)
1229         TRAP_NOERR(T_SIMDFPE)   /* $19 */
1230         jmp     cmninttrap
1231         SET_SIZE(xmtrap)
1232 
1233         ENTRY_NP(invaltrap)
1234         TRAP_NOERR(T_INVALTRAP) /* very invalid */
1235         jmp     cmntrap
1236         SET_SIZE(invaltrap)
1237 
1238         .globl  fasttable
1239 
1240 #if defined(__amd64)
1241 
1242         ENTRY_NP(fasttrap)
1243         cmpl    $T_LASTFAST, %eax
1244         ja      1f
1245         orl     %eax, %eax      /* (zero extend top 32-bits) */
1246         leaq    fasttable(%rip), %r11
1247         leaq    (%r11, %rax, CLONGSIZE), %r11
1248         jmp     *(%r11)
1249 1:
1250         /*
1251          * Fast syscall number was illegal.  Make it look
1252          * as if the INT failed.  Modify %rip to point before the
1253          * INT, push the expected error code and fake a GP fault.
1254          *
1255          * XXX Why make the error code be offset into idt + 1?
1256          * Instead we should push a real (soft?) error code
1257          * on the stack and #gp handler could know about fasttraps?
1258          */
1259         XPV_TRAP_POP
1260 
1261         subq    $2, (%rsp)      /* XXX int insn 2-bytes */
1262         pushq   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1263 
1264 #if defined(__xpv)
1265         pushq   %r11
1266         pushq   %rcx
1267 #endif
1268         jmp     gptrap
1269         SET_SIZE(fasttrap)
1270 
1271 #elif defined(__i386)
1272 
1273         ENTRY_NP(fasttrap)
1274         cmpl    $T_LASTFAST, %eax
1275         ja      1f
1276         jmp     *%cs:fasttable(, %eax, CLONGSIZE)
1277 1:
1278         /*
1279          * Fast syscall number was illegal.  Make it look
1280          * as if the INT failed.  Modify %eip to point before the
1281          * INT, push the expected error code and fake a GP fault.
1282          *
1283          * XXX Why make the error code be offset into idt + 1?
1284          * Instead we should push a real (soft?) error code
1285          * on the stack and #gp handler could know about fasttraps?
1286          */
1287         subl    $2, (%esp)      /* XXX int insn 2-bytes */
1288         pushl   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1289         jmp     gptrap
1290         SET_SIZE(fasttrap)
1291 
1292 #endif  /* __i386 */
1293 
1294         ENTRY_NP(dtrace_ret)
1295         TRAP_NOERR(T_DTRACE_RET)
1296         jmp     dtrace_trap
1297         SET_SIZE(dtrace_ret)
1298 
1299 #if defined(__amd64)
1300 
1301         /*
1302          * RFLAGS 24 bytes up the stack from %rsp.
1303          * XXX a constant would be nicer.
1304          */
1305         ENTRY_NP(fast_null)
1306         XPV_TRAP_POP
1307         orq     $PS_C, 24(%rsp) /* set carry bit in user flags */
1308         jmp     tr_iret_auto
1309         /*NOTREACHED*/
1310         SET_SIZE(fast_null)
1311 
1312 #elif defined(__i386)
1313 
1314         ENTRY_NP(fast_null)
1315         orw     $PS_C, 8(%esp)  /* set carry bit in user flags */
1316         IRET
1317         SET_SIZE(fast_null)
1318 
1319 #endif  /* __i386 */
1320 
1321         /*
1322          * Interrupts start at 32
1323          */
1324 #define MKIVCT(n)                       \
1325         ENTRY_NP(ivct/**/n)             \
1326         push    $0;                     \
1327         push    $n - 0x20;              \
1328         jmp     cmnint;                 \
1329         SET_SIZE(ivct/**/n)
1330 
1331         MKIVCT(32)
1332         MKIVCT(33)
1333         MKIVCT(34)
1334         MKIVCT(35)
1335         MKIVCT(36)
1336         MKIVCT(37)
1337         MKIVCT(38)
1338         MKIVCT(39)
1339         MKIVCT(40)
1340         MKIVCT(41)
1341         MKIVCT(42)
1342         MKIVCT(43)
1343         MKIVCT(44)
1344         MKIVCT(45)
1345         MKIVCT(46)
1346         MKIVCT(47)
1347         MKIVCT(48)
1348         MKIVCT(49)
1349         MKIVCT(50)
1350         MKIVCT(51)
1351         MKIVCT(52)
1352         MKIVCT(53)
1353         MKIVCT(54)
1354         MKIVCT(55)
1355         MKIVCT(56)
1356         MKIVCT(57)
1357         MKIVCT(58)
1358         MKIVCT(59)
1359         MKIVCT(60)
1360         MKIVCT(61)
1361         MKIVCT(62)
1362         MKIVCT(63)
1363         MKIVCT(64)
1364         MKIVCT(65)
1365         MKIVCT(66)
1366         MKIVCT(67)
1367         MKIVCT(68)
1368         MKIVCT(69)
1369         MKIVCT(70)
1370         MKIVCT(71)
1371         MKIVCT(72)
1372         MKIVCT(73)
1373         MKIVCT(74)
1374         MKIVCT(75)
1375         MKIVCT(76)
1376         MKIVCT(77)
1377         MKIVCT(78)
1378         MKIVCT(79)
1379         MKIVCT(80)
1380         MKIVCT(81)
1381         MKIVCT(82)
1382         MKIVCT(83)
1383         MKIVCT(84)
1384         MKIVCT(85)
1385         MKIVCT(86)
1386         MKIVCT(87)
1387         MKIVCT(88)
1388         MKIVCT(89)
1389         MKIVCT(90)
1390         MKIVCT(91)
1391         MKIVCT(92)
1392         MKIVCT(93)
1393         MKIVCT(94)
1394         MKIVCT(95)
1395         MKIVCT(96)
1396         MKIVCT(97)
1397         MKIVCT(98)
1398         MKIVCT(99)
1399         MKIVCT(100)
1400         MKIVCT(101)
1401         MKIVCT(102)
1402         MKIVCT(103)
1403         MKIVCT(104)
1404         MKIVCT(105)
1405         MKIVCT(106)
1406         MKIVCT(107)
1407         MKIVCT(108)
1408         MKIVCT(109)
1409         MKIVCT(110)
1410         MKIVCT(111)
1411         MKIVCT(112)
1412         MKIVCT(113)
1413         MKIVCT(114)
1414         MKIVCT(115)
1415         MKIVCT(116)
1416         MKIVCT(117)
1417         MKIVCT(118)
1418         MKIVCT(119)
1419         MKIVCT(120)
1420         MKIVCT(121)
1421         MKIVCT(122)
1422         MKIVCT(123)
1423         MKIVCT(124)
1424         MKIVCT(125)
1425         MKIVCT(126)
1426         MKIVCT(127)
1427         MKIVCT(128)
1428         MKIVCT(129)
1429         MKIVCT(130)
1430         MKIVCT(131)
1431         MKIVCT(132)
1432         MKIVCT(133)
1433         MKIVCT(134)
1434         MKIVCT(135)
1435         MKIVCT(136)
1436         MKIVCT(137)
1437         MKIVCT(138)
1438         MKIVCT(139)
1439         MKIVCT(140)
1440         MKIVCT(141)
1441         MKIVCT(142)
1442         MKIVCT(143)
1443         MKIVCT(144)
1444         MKIVCT(145)
1445         MKIVCT(146)
1446         MKIVCT(147)
1447         MKIVCT(148)
1448         MKIVCT(149)
1449         MKIVCT(150)
1450         MKIVCT(151)
1451         MKIVCT(152)
1452         MKIVCT(153)
1453         MKIVCT(154)
1454         MKIVCT(155)
1455         MKIVCT(156)
1456         MKIVCT(157)
1457         MKIVCT(158)
1458         MKIVCT(159)
1459         MKIVCT(160)
1460         MKIVCT(161)
1461         MKIVCT(162)
1462         MKIVCT(163)
1463         MKIVCT(164)
1464         MKIVCT(165)
1465         MKIVCT(166)
1466         MKIVCT(167)
1467         MKIVCT(168)
1468         MKIVCT(169)
1469         MKIVCT(170)
1470         MKIVCT(171)
1471         MKIVCT(172)
1472         MKIVCT(173)
1473         MKIVCT(174)
1474         MKIVCT(175)
1475         MKIVCT(176)
1476         MKIVCT(177)
1477         MKIVCT(178)
1478         MKIVCT(179)
1479         MKIVCT(180)
1480         MKIVCT(181)
1481         MKIVCT(182)
1482         MKIVCT(183)
1483         MKIVCT(184)
1484         MKIVCT(185)
1485         MKIVCT(186)
1486         MKIVCT(187)
1487         MKIVCT(188)
1488         MKIVCT(189)
1489         MKIVCT(190)
1490         MKIVCT(191)
1491         MKIVCT(192)
1492         MKIVCT(193)
1493         MKIVCT(194)
1494         MKIVCT(195)
1495         MKIVCT(196)
1496         MKIVCT(197)
1497         MKIVCT(198)
1498         MKIVCT(199)
1499         MKIVCT(200)
1500         MKIVCT(201)
1501         MKIVCT(202)
1502         MKIVCT(203)
1503         MKIVCT(204)
1504         MKIVCT(205)
1505         MKIVCT(206)
1506         MKIVCT(207)
1507         MKIVCT(208)
1508         MKIVCT(209)
1509         MKIVCT(210)
1510         MKIVCT(211)
1511         MKIVCT(212)
1512         MKIVCT(213)
1513         MKIVCT(214)
1514         MKIVCT(215)
1515         MKIVCT(216)
1516         MKIVCT(217)
1517         MKIVCT(218)
1518         MKIVCT(219)
1519         MKIVCT(220)
1520         MKIVCT(221)
1521         MKIVCT(222)
1522         MKIVCT(223)
1523         MKIVCT(224)
1524         MKIVCT(225)
1525         MKIVCT(226)
1526         MKIVCT(227)
1527         MKIVCT(228)
1528         MKIVCT(229)
1529         MKIVCT(230)
1530         MKIVCT(231)
1531         MKIVCT(232)
1532         MKIVCT(233)
1533         MKIVCT(234)
1534         MKIVCT(235)
1535         MKIVCT(236)
1536         MKIVCT(237)
1537         MKIVCT(238)
1538         MKIVCT(239)
1539         MKIVCT(240)
1540         MKIVCT(241)
1541         MKIVCT(242)
1542         MKIVCT(243)
1543         MKIVCT(244)
1544         MKIVCT(245)
1545         MKIVCT(246)
1546         MKIVCT(247)
1547         MKIVCT(248)
1548         MKIVCT(249)
1549         MKIVCT(250)
1550         MKIVCT(251)
1551         MKIVCT(252)
1552         MKIVCT(253)
1553         MKIVCT(254)
1554         MKIVCT(255)
1555 
1556 #endif  /* __lint */