1 /*
   2  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
   4  * Copyright (c) 2017 Joyent, Inc.
   5  */
   6 
   7 /*
   8  * Copyright (c) 1989, 1990 William F. Jolitz.
   9  * Copyright (c) 1990 The Regents of the University of California.
  10  * All rights reserved.
  11  *
  12  * Redistribution and use in source and binary forms, with or without
  13  * modification, are permitted provided that the following conditions
  14  * are met:
  15  * 1. Redistributions of source code must retain the above copyright
  16  *    notice, this list of conditions and the following disclaimer.
  17  * 2. Redistributions in binary form must reproduce the above copyright
  18  *    notice, this list of conditions and the following disclaimer in the
  19  *    documentation and/or other materials provided with the distribution.
  20  * 3. All advertising materials mentioning features or use of this software
  21  *    must display the following acknowledgement:
  22  *      This product includes software developed by the University of
  23  *      California, Berkeley and its contributors.
  24  * 4. Neither the name of the University nor the names of its contributors
  25  *    may be used to endorse or promote products derived from this software
  26  *    without specific prior written permission.
  27  *
  28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  38  * SUCH DAMAGE.
  39  *
  40  * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
  41  */
  42 
  43 #include <sys/asm_linkage.h>
  44 #include <sys/asm_misc.h>
  45 #include <sys/trap.h>
  46 #include <sys/psw.h>
  47 #include <sys/regset.h>
  48 #include <sys/privregs.h>
  49 #include <sys/dtrace.h>
  50 #include <sys/x86_archext.h>
  51 #include <sys/traptrace.h>
  52 #include <sys/machparam.h>
  53 
  54 /*
  55  * only one routine in this file is interesting to lint
  56  */
  57 
  58 #if defined(__lint)
  59 
  60 void
  61 ndptrap_frstor(void)
  62 {}
  63 
  64 #else
  65 
  66 #include "assym.h"
  67 
  68 /*
  69  * push $0 on stack for traps that do not
  70  * generate an error code. This is so the rest
  71  * of the kernel can expect a consistent stack
  72  * from from any exception.
  73  *
  74  * Note that for all exceptions for amd64
  75  * %r11 and %rcx are on the stack. Just pop
  76  * them back into their appropriate registers and let
  77  * it get saved as is running native.
  78  */
  79 
  80 #if defined(__xpv) && defined(__amd64)
  81 
  82 #define NPTRAP_NOERR(trapno)    \
  83         pushq   $0;             \
  84         pushq   $trapno 
  85 
  86 #define TRAP_NOERR(trapno)      \
  87         XPV_TRAP_POP;           \
  88         NPTRAP_NOERR(trapno)
  89 
  90 /*
  91  * error code already pushed by hw
  92  * onto stack.
  93  */
  94 #define TRAP_ERR(trapno)        \
  95         XPV_TRAP_POP;           \
  96         pushq   $trapno 
  97 
  98 #else /* __xpv && __amd64 */
  99 
 100 #define TRAP_NOERR(trapno)      \
 101         push    $0;             \
 102         push    $trapno 
 103 
 104 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
 105 
 106 /*
 107  * error code already pushed by hw
 108  * onto stack.
 109  */
 110 #define TRAP_ERR(trapno)        \
 111         push    $trapno 
 112 
 113 #endif  /* __xpv && __amd64 */
 114 
 115 
 116         /*
 117          * #DE
 118          */
 119         ENTRY_NP(div0trap)
 120         TRAP_NOERR(T_ZERODIV)   /* $0 */
 121         jmp     cmntrap
 122         SET_SIZE(div0trap)
 123 
 124         /*
 125          * #DB
 126          *
 127          * Fetch %dr6 and clear it, handing off the value to the
 128          * cmntrap code in %r15/%esi
 129          */
 130         ENTRY_NP(dbgtrap)
 131         TRAP_NOERR(T_SGLSTP)    /* $1 */
 132 
 133 #if defined(__amd64)
 134 #if !defined(__xpv)             /* no sysenter support yet */
 135         /*
 136          * If we get here as a result of single-stepping a sysenter
 137          * instruction, we suddenly find ourselves taking a #db
 138          * in kernel mode -before- we've swapgs'ed.  So before we can
 139          * take the trap, we do the swapgs here, and fix the return
 140          * %rip in trap() so that we return immediately after the
 141          * swapgs in the sysenter handler to avoid doing the swapgs again.
 142          *
 143          * Nobody said that the design of sysenter was particularly
 144          * elegant, did they?
 145          */
 146 
 147         pushq   %r11
 148 
 149         /*
 150          * At this point the stack looks like this:
 151          *
 152          * (high address)       r_ss
 153          *                      r_rsp
 154          *                      r_rfl
 155          *                      r_cs
 156          *                      r_rip           <-- %rsp + 24
 157          *                      r_err           <-- %rsp + 16
 158          *                      r_trapno        <-- %rsp + 8
 159          * (low address)        %r11            <-- %rsp
 160          */
 161         leaq    sys_sysenter(%rip), %r11
 162         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 163         je      1f
 164         leaq    brand_sys_sysenter(%rip), %r11
 165         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 166         jne     2f
 167 1:      SWAPGS
 168 2:      popq    %r11
 169 #endif  /* !__xpv */
 170 
 171         INTR_PUSH
 172 #if defined(__xpv)
 173         movl    $6, %edi
 174         call    kdi_dreg_get
 175         movq    %rax, %r15              /* %db6 -> %r15 */
 176         movl    $6, %edi
 177         movl    $0, %esi
 178         call    kdi_dreg_set            /* 0 -> %db6 */
 179 #else
 180         movq    %db6, %r15
 181         xorl    %eax, %eax
 182         movq    %rax, %db6
 183 #endif
 184 
 185 #elif defined(__i386)
 186 
 187         INTR_PUSH
 188 #if defined(__xpv)
 189         pushl   $6
 190         call    kdi_dreg_get
 191         addl    $4, %esp
 192         movl    %eax, %esi              /* %dr6 -> %esi */
 193         pushl   $0
 194         pushl   $6
 195         call    kdi_dreg_set            /* 0 -> %dr6 */
 196         addl    $8, %esp
 197 #else
 198         movl    %db6, %esi
 199         xorl    %eax, %eax
 200         movl    %eax, %db6
 201 #endif
 202 #endif  /* __i386 */
 203 
 204         jmp     cmntrap_pushed
 205         SET_SIZE(dbgtrap)
 206 
 207 #if defined(__amd64)
 208 #if !defined(__xpv)
 209 
 210 /*
 211  * Macro to set the gsbase or kgsbase to the address of the struct cpu
 212  * for this processor.  If we came from userland, set kgsbase else
 213  * set gsbase.  We find the proper cpu struct by looping through
 214  * the cpu structs for all processors till we find a match for the gdt
 215  * of the trapping processor.  The stack is expected to be pointing at
 216  * the standard regs pushed by hardware on a trap (plus error code and trapno).
 217  */
 218 #define SET_CPU_GSBASE                                                  \
 219         subq    $REGOFF_TRAPNO, %rsp;   /* save regs */                 \
 220         movq    %rax, REGOFF_RAX(%rsp);                                 \
 221         movq    %rbx, REGOFF_RBX(%rsp);                                 \
 222         movq    %rcx, REGOFF_RCX(%rsp);                                 \
 223         movq    %rdx, REGOFF_RDX(%rsp);                                 \
 224         movq    %rbp, REGOFF_RBP(%rsp);                                 \
 225         movq    %rsp, %rbp;                                             \
 226         subq    $16, %rsp;              /* space for gdt */             \
 227         sgdt    6(%rsp);                                                \
 228         movq    8(%rsp), %rcx;          /* %rcx has gdt to match */     \
 229         xorl    %ebx, %ebx;             /* loop index */                \
 230         leaq    cpu(%rip), %rdx;        /* cpu pointer array */         \
 231 1:                                                                      \
 232         movq    (%rdx, %rbx, CLONGSIZE), %rax;  /* get cpu[i] */        \
 233         cmpq    $0x0, %rax;             /* cpu[i] == NULL ? */          \
 234         je      2f;                     /* yes, continue */             \
 235         cmpq    %rcx, CPU_GDT(%rax);    /* gdt == cpu[i]->cpu_gdt ? */       \
 236         je      3f;                     /* yes, go set gsbase */        \
 237 2:                                                                      \
 238         incl    %ebx;                   /* i++ */                       \
 239         cmpl    $NCPU, %ebx;            /* i < NCPU ? */             \
 240         jb      1b;                     /* yes, loop */                 \
 241 /* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */ \
 242 3:                                                                      \
 243         movl    $MSR_AMD_KGSBASE, %ecx;                                 \
 244         cmpw    $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */      \
 245         jne     4f;                     /* no, go set KGSBASE */        \
 246         movl    $MSR_AMD_GSBASE, %ecx;  /* yes, set GSBASE */           \
 247         mfence;                         /* OPTERON_ERRATUM_88 */        \
 248 4:                                                                      \
 249         movq    %rax, %rdx;             /* write base register */       \
 250         shrq    $32, %rdx;                                              \
 251         wrmsr;                                                          \
 252         movq    REGOFF_RDX(%rbp), %rdx; /* restore regs */              \
 253         movq    REGOFF_RCX(%rbp), %rcx;                                 \
 254         movq    REGOFF_RBX(%rbp), %rbx;                                 \
 255         movq    REGOFF_RAX(%rbp), %rax;                                 \
 256         movq    %rbp, %rsp;                                             \
 257         movq    REGOFF_RBP(%rsp), %rbp;                                 \
 258         addq    $REGOFF_TRAPNO, %rsp    /* pop stack */
 259 
 260 #else   /* __xpv */
 261 
 262 #define SET_CPU_GSBASE  /* noop on the hypervisor */
 263 
 264 #endif  /* __xpv */
 265 #endif  /* __amd64 */
 266 
 267 
 268 #if defined(__amd64)
 269 
 270         /*
 271          * #NMI
 272          *
 273          * XXPV: See 6532669.
 274          */
 275         ENTRY_NP(nmiint)
 276         TRAP_NOERR(T_NMIFLT)    /* $2 */
 277 
 278         SET_CPU_GSBASE
 279 
 280         /*
 281          * Save all registers and setup segment registers
 282          * with kernel selectors.
 283          */
 284         INTR_PUSH
 285         INTGATE_INIT_KERNEL_FLAGS
 286 
 287         TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
 288         TRACE_REGS(%r12, %rsp, %rax, %rbx)
 289         TRACE_STAMP(%r12)
 290 
 291         movq    %rsp, %rbp
 292 
 293         movq    %rbp, %rdi
 294         call    av_dispatch_nmivect
 295 
 296         INTR_POP
 297         IRET
 298         /*NOTREACHED*/
 299         SET_SIZE(nmiint)
 300 
 301 #elif defined(__i386)
 302 
 303         /*
 304          * #NMI
 305          */
 306         ENTRY_NP(nmiint)
 307         TRAP_NOERR(T_NMIFLT)    /* $2 */
 308 
 309         /*
 310          * Save all registers and setup segment registers
 311          * with kernel selectors.
 312          */
 313         INTR_PUSH
 314         INTGATE_INIT_KERNEL_FLAGS
 315 
 316         TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 317         TRACE_REGS(%edi, %esp, %ebx, %ecx)
 318         TRACE_STAMP(%edi)
 319 
 320         movl    %esp, %ebp
 321 
 322         pushl   %ebp    
 323         call    av_dispatch_nmivect     
 324         addl    $4, %esp
 325 
 326         INTR_POP_USER
 327         IRET
 328         SET_SIZE(nmiint)
 329 
 330 #endif  /* __i386 */
 331 
 332         /*
 333          * #BP
 334          */
 335         ENTRY_NP(brktrap)
 336 
 337 #if defined(__amd64)
 338         XPV_TRAP_POP
 339         cmpw    $KCS_SEL, 8(%rsp)
 340         jne     bp_user
 341 
 342         /*
 343          * This is a breakpoint in the kernel -- it is very likely that this
 344          * is DTrace-induced.  To unify DTrace handling, we spoof this as an
 345          * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
 346          * we must decrement the trapping %rip to make it appear as a fault.
 347          * We then push a non-zero error code to indicate that this is coming
 348          * from #BP.
 349          */
 350         decq    (%rsp)
 351         push    $1                      /* error code -- non-zero for #BP */
 352         jmp     ud_kernel
 353 
 354 bp_user:
 355 #endif /* __amd64 */
 356 
 357         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 358         jmp     dtrace_trap
 359 
 360         SET_SIZE(brktrap)
 361 
 362         /*
 363          * #OF
 364          */
 365         ENTRY_NP(ovflotrap)
 366         TRAP_NOERR(T_OVFLW)     /* $4 */
 367         jmp     cmntrap
 368         SET_SIZE(ovflotrap)
 369 
 370         /*
 371          * #BR
 372          */
 373         ENTRY_NP(boundstrap)
 374         TRAP_NOERR(T_BOUNDFLT)  /* $5 */
 375         jmp     cmntrap
 376         SET_SIZE(boundstrap)
 377 
 378 #if defined(__amd64)
 379 
 380         ENTRY_NP(invoptrap)
 381 
 382         XPV_TRAP_POP
 383 
 384         cmpw    $KCS_SEL, 8(%rsp)
 385         jne     ud_user
 386 
 387 #if defined(__xpv)
 388         movb    $0, 12(%rsp)            /* clear saved upcall_mask from %cs */
 389 #endif
 390         push    $0                      /* error code -- zero for #UD */
 391 ud_kernel:
 392         push    $0xdddd                 /* a dummy trap number */
 393         INTR_PUSH
 394         movq    REGOFF_RIP(%rsp), %rdi
 395         movq    REGOFF_RSP(%rsp), %rsi
 396         movq    REGOFF_RAX(%rsp), %rdx
 397         pushq   (%rsi)
 398         movq    %rsp, %rsi
 399         subq    $8, %rsp
 400         call    dtrace_invop
 401         ALTENTRY(dtrace_invop_callsite)
 402         addq    $16, %rsp
 403         cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 404         je      ud_push
 405         cmpl    $DTRACE_INVOP_LEAVE, %eax
 406         je      ud_leave
 407         cmpl    $DTRACE_INVOP_NOP, %eax
 408         je      ud_nop
 409         cmpl    $DTRACE_INVOP_RET, %eax
 410         je      ud_ret
 411         jmp     ud_trap
 412 
 413 ud_push:
 414         /*
 415          * We must emulate a "pushq %rbp".  To do this, we pull the stack
 416          * down 8 bytes, and then store the base pointer.
 417          */
 418         INTR_POP
 419         subq    $16, %rsp               /* make room for %rbp */
 420         pushq   %rax                    /* push temp */
 421         movq    24(%rsp), %rax          /* load calling RIP */
 422         addq    $1, %rax                /* increment over trapping instr */
 423         movq    %rax, 8(%rsp)           /* store calling RIP */
 424         movq    32(%rsp), %rax          /* load calling CS */
 425         movq    %rax, 16(%rsp)          /* store calling CS */
 426         movq    40(%rsp), %rax          /* load calling RFLAGS */
 427         movq    %rax, 24(%rsp)          /* store calling RFLAGS */
 428         movq    48(%rsp), %rax          /* load calling RSP */
 429         subq    $8, %rax                /* make room for %rbp */
 430         movq    %rax, 32(%rsp)          /* store calling RSP */
 431         movq    56(%rsp), %rax          /* load calling SS */
 432         movq    %rax, 40(%rsp)          /* store calling SS */
 433         movq    32(%rsp), %rax          /* reload calling RSP */
 434         movq    %rbp, (%rax)            /* store %rbp there */
 435         popq    %rax                    /* pop off temp */
 436         IRET                            /* return from interrupt */
 437         /*NOTREACHED*/
 438 
 439 ud_leave:
 440         /*
 441          * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
 442          * followed by a "popq %rbp".  This is quite a bit simpler on amd64
 443          * than it is on i386 -- we can exploit the fact that the %rsp is
 444          * explicitly saved to effect the pop without having to reshuffle
 445          * the other data pushed for the trap.
 446          */
 447         INTR_POP
 448         pushq   %rax                    /* push temp */
 449         movq    8(%rsp), %rax           /* load calling RIP */
 450         addq    $1, %rax                /* increment over trapping instr */
 451         movq    %rax, 8(%rsp)           /* store calling RIP */
 452         movq    (%rbp), %rax            /* get new %rbp */
 453         addq    $8, %rbp                /* adjust new %rsp */
 454         movq    %rbp, 32(%rsp)          /* store new %rsp */
 455         movq    %rax, %rbp              /* set new %rbp */
 456         popq    %rax                    /* pop off temp */
 457         IRET                            /* return from interrupt */
 458         /*NOTREACHED*/
 459 
 460 ud_nop:
 461         /*
 462          * We must emulate a "nop".  This is obviously not hard:  we need only
 463          * advance the %rip by one.
 464          */
 465         INTR_POP
 466         incq    (%rsp)
 467         IRET
 468         /*NOTREACHED*/
 469 
 470 ud_ret:
 471         INTR_POP
 472         pushq   %rax                    /* push temp */
 473         movq    32(%rsp), %rax          /* load %rsp */
 474         movq    (%rax), %rax            /* load calling RIP */
 475         movq    %rax, 8(%rsp)           /* store calling RIP */
 476         addq    $8, 32(%rsp)            /* adjust new %rsp */
 477         popq    %rax                    /* pop off temp */
 478         IRET                            /* return from interrupt */
 479         /*NOTREACHED*/
 480 
 481 ud_trap:
 482         /*
 483          * We're going to let the kernel handle this as a normal #UD.  If,
 484          * however, we came through #BP and are spoofing #UD (in this case,
 485          * the stored error value will be non-zero), we need to de-spoof
 486          * the trap by incrementing %rip and pushing T_BPTFLT.
 487          */
 488         cmpq    $0, REGOFF_ERR(%rsp)
 489         je      ud_ud
 490         incq    REGOFF_RIP(%rsp)
 491         addq    $REGOFF_RIP, %rsp
 492         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 493         jmp     cmntrap
 494 
 495 ud_ud:
 496         addq    $REGOFF_RIP, %rsp
 497 ud_user:
 498         NPTRAP_NOERR(T_ILLINST)
 499         jmp     cmntrap
 500         SET_SIZE(invoptrap)
 501 
 502 #elif defined(__i386)
 503 
 504         /*
 505          * #UD
 506          */
 507         ENTRY_NP(invoptrap)
 508         /*
 509          * If we are taking an invalid opcode trap while in the kernel, this
 510          * is likely an FBT probe point.
 511          */
 512         pushl   %gs
 513         cmpw    $KGS_SEL, (%esp)
 514         jne     8f
 515 
 516         addl    $4, %esp
 517 #if defined(__xpv)
 518         movb    $0, 6(%esp)             /* clear saved upcall_mask from %cs */
 519 #endif  /* __xpv */
 520         pusha
 521         pushl   %eax                    /* push %eax -- may be return value */
 522         pushl   %esp                    /* push stack pointer */
 523         addl    $48, (%esp)             /* adjust to incoming args */
 524         pushl   40(%esp)                /* push calling EIP */
 525         call    dtrace_invop
 526         ALTENTRY(dtrace_invop_callsite)
 527         addl    $12, %esp
 528         cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 529         je      1f
 530         cmpl    $DTRACE_INVOP_POPL_EBP, %eax
 531         je      2f
 532         cmpl    $DTRACE_INVOP_LEAVE, %eax
 533         je      3f
 534         cmpl    $DTRACE_INVOP_NOP, %eax
 535         je      4f
 536         jmp     7f
 537 1:
 538         /*
 539          * We must emulate a "pushl %ebp".  To do this, we pull the stack
 540          * down 4 bytes, and then store the base pointer.
 541          */
 542         popa
 543         subl    $4, %esp                /* make room for %ebp */
 544         pushl   %eax                    /* push temp */
 545         movl    8(%esp), %eax           /* load calling EIP */
 546         incl    %eax                    /* increment over LOCK prefix */
 547         movl    %eax, 4(%esp)           /* store calling EIP */
 548         movl    12(%esp), %eax          /* load calling CS */
 549         movl    %eax, 8(%esp)           /* store calling CS */
 550         movl    16(%esp), %eax          /* load calling EFLAGS */
 551         movl    %eax, 12(%esp)          /* store calling EFLAGS */
 552         movl    %ebp, 16(%esp)          /* push %ebp */
 553         popl    %eax                    /* pop off temp */
 554         jmp     _emul_done
 555 2:
 556         /*
 557          * We must emulate a "popl %ebp".  To do this, we do the opposite of
 558          * the above:  we remove the %ebp from the stack, and squeeze up the
 559          * saved state from the trap.
 560          */
 561         popa
 562         pushl   %eax                    /* push temp */
 563         movl    16(%esp), %ebp          /* pop %ebp */
 564         movl    12(%esp), %eax          /* load calling EFLAGS */
 565         movl    %eax, 16(%esp)          /* store calling EFLAGS */
 566         movl    8(%esp), %eax           /* load calling CS */
 567         movl    %eax, 12(%esp)          /* store calling CS */
 568         movl    4(%esp), %eax           /* load calling EIP */
 569         incl    %eax                    /* increment over LOCK prefix */
 570         movl    %eax, 8(%esp)           /* store calling EIP */
 571         popl    %eax                    /* pop off temp */
 572         addl    $4, %esp                /* adjust stack pointer */
 573         jmp     _emul_done
 574 3:
 575         /*
 576          * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
 577          * followed by a "popl %ebp".  This looks similar to the above, but
 578          * requires two temporaries:  one for the new base pointer, and one
 579          * for the staging register.
 580          */
 581         popa
 582         pushl   %eax                    /* push temp */
 583         pushl   %ebx                    /* push temp */
 584         movl    %ebp, %ebx              /* set temp to old %ebp */
 585         movl    (%ebx), %ebp            /* pop %ebp */
 586         movl    16(%esp), %eax          /* load calling EFLAGS */
 587         movl    %eax, (%ebx)            /* store calling EFLAGS */
 588         movl    12(%esp), %eax          /* load calling CS */
 589         movl    %eax, -4(%ebx)          /* store calling CS */
 590         movl    8(%esp), %eax           /* load calling EIP */
 591         incl    %eax                    /* increment over LOCK prefix */
 592         movl    %eax, -8(%ebx)          /* store calling EIP */
 593         movl    %ebx, -4(%esp)          /* temporarily store new %esp */
 594         popl    %ebx                    /* pop off temp */
 595         popl    %eax                    /* pop off temp */
 596         movl    -12(%esp), %esp         /* set stack pointer */
 597         subl    $8, %esp                /* adjust for three pushes, one pop */
 598         jmp     _emul_done
 599 4:
 600         /*
 601          * We must emulate a "nop".  This is obviously not hard:  we need only
 602          * advance the %eip by one.
 603          */
 604         popa
 605         incl    (%esp)
 606 _emul_done:
 607         IRET                            /* return from interrupt */
 608 7:
 609         popa
 610         pushl   $0
 611         pushl   $T_ILLINST      /* $6 */
 612         jmp     cmntrap
 613 8:
 614         addl    $4, %esp
 615         pushl   $0
 616         pushl   $T_ILLINST      /* $6 */
 617         jmp     cmntrap
 618         SET_SIZE(invoptrap)
 619 
 620 #endif  /* __i386 */
 621 
 622 #if defined(__amd64)
 623 
 624         /*
 625          * #NM
 626          */
 627 #if defined(__xpv)
 628 
 629         ENTRY_NP(ndptrap)
 630         /*
 631          * (On the hypervisor we must make a hypercall so we might as well
 632          * save everything and handle as in a normal trap.)
 633          */
 634         TRAP_NOERR(T_NOEXTFLT)  /* $7 */
 635         INTR_PUSH
 636         
 637         /*
 638          * We want to do this quickly as every lwp using fp will take this
 639          * after a context switch -- we do the frequent path in ndptrap_frstor
 640          * below; for all other cases, we let the trap code handle it
 641          */
 642         LOADCPU(%rax)                   /* swapgs handled in hypervisor */
 643         cmpl    $0, fpu_exists(%rip)
 644         je      .handle_in_trap         /* let trap handle no fp case */
 645         movq    CPU_THREAD(%rax), %rbx  /* %rbx = curthread */
 646         movl    $FPU_EN, %eax
 647         movq    T_LWP(%rbx), %rbx       /* %rbx = lwp */
 648         testq   %rbx, %rbx
 649         jz      .handle_in_trap         /* should not happen? */
 650 #if LWP_PCB_FPU != 0
 651         addq    $LWP_PCB_FPU, %rbx      /* &lwp->lwp_pcb.pcb_fpu */
 652 #endif
 653         testl   %eax, PCB_FPU_FLAGS(%rbx)
 654         jz      .handle_in_trap         /* must be the first fault */
 655         CLTS
 656         andl    $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
 657 #if FPU_CTX_FPU_REGS != 0
 658         addq    $FPU_CTX_FPU_REGS, %rbx
 659 #endif
 660 
 661         movl    FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax      /* for xrstor */
 662         movl    FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx    /* for xrstor */
 663 
 664         /*
 665          * the label below is used in trap.c to detect FP faults in
 666          * kernel due to user fault.
 667          */
 668         ALTENTRY(ndptrap_frstor)
 669         movq (%rbx), %rbx               /* fpu_regs.kfpu_u.kfpu_XX pointer */
 670         .globl  _patch_xrstorq_rbx
 671 _patch_xrstorq_rbx:
 672         fxrstorq (%rbx)
 673         cmpw    $KCS_SEL, REGOFF_CS(%rsp)
 674         je      .return_to_kernel
 675 
 676         ASSERT_UPCALL_MASK_IS_SET
 677         USER_POP
 678         IRET                            /* return to user mode */
 679         /*NOTREACHED*/
 680 
 681 .return_to_kernel:
 682         INTR_POP
 683         IRET
 684         /*NOTREACHED*/
 685 
 686 .handle_in_trap:
 687         INTR_POP
 688         pushq   $0                      /* can not use TRAP_NOERR */
 689         pushq   $T_NOEXTFLT
 690         jmp     cmninttrap
 691         SET_SIZE(ndptrap_frstor)
 692         SET_SIZE(ndptrap)
 693 
 694 #else   /* __xpv */
 695 
 696         ENTRY_NP(ndptrap)
 697         /*
 698          * We want to do this quickly as every lwp using fp will take this
 699          * after a context switch -- we do the frequent path in ndptrap_frstor
 700          * below; for all other cases, we let the trap code handle it
 701          */
 702         pushq   %rax
 703         pushq   %rbx
 704         cmpw    $KCS_SEL, 24(%rsp)      /* did we come from kernel mode? */
 705         jne     1f
 706         LOADCPU(%rax)                   /* if yes, don't swapgs */
 707         jmp     2f
 708 1:
 709         SWAPGS                          /* if from user, need swapgs */
 710         LOADCPU(%rax)
 711         SWAPGS
 712 2:      
 713         /*
 714          * Xrstor needs to use edx as part of its flag.
 715          * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24
 716          * will not point to CS.
 717          */
 718         pushq   %rdx
 719         cmpl    $0, fpu_exists(%rip)
 720         je      .handle_in_trap         /* let trap handle no fp case */
 721         movq    CPU_THREAD(%rax), %rbx  /* %rbx = curthread */
 722         movl    $FPU_EN, %eax
 723         movq    T_LWP(%rbx), %rbx       /* %rbx = lwp */
 724         testq   %rbx, %rbx
 725         jz      .handle_in_trap         /* should not happen? */
 726 #if LWP_PCB_FPU != 0
 727         addq    $LWP_PCB_FPU, %rbx      /* &lwp->lwp_pcb.pcb_fpu */
 728 #endif
 729         testl   %eax, PCB_FPU_FLAGS(%rbx)
 730         jz      .handle_in_trap         /* must be the first fault */
 731         clts
 732         andl    $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
 733 #if FPU_CTX_FPU_REGS != 0
 734         addq    $FPU_CTX_FPU_REGS, %rbx
 735 #endif
 736 
 737         movl    FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax      /* for xrstor */
 738         movl    FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx    /* for xrstor */
 739 
 740         /*
 741          * the label below is used in trap.c to detect FP faults in
 742          * kernel due to user fault.
 743          */
 744         ALTENTRY(ndptrap_frstor)
 745         movq (%rbx), %rbx               /* fpu_regs.kfpu_u.kfpu_XX pointer */
 746         .globl  _patch_xrstorq_rbx
 747 _patch_xrstorq_rbx:
 748         fxrstorq (%rbx)
 749         popq    %rdx
 750         popq    %rbx
 751         popq    %rax
 752         IRET
 753         /*NOTREACHED*/
 754 
 755 .handle_in_trap:
 756         popq    %rdx
 757         popq    %rbx
 758         popq    %rax
 759         TRAP_NOERR(T_NOEXTFLT)  /* $7 */
 760         jmp     cmninttrap
 761         SET_SIZE(ndptrap_frstor)
 762         SET_SIZE(ndptrap)
 763 
 764 #endif  /* __xpv */
 765 
 766 #elif defined(__i386)
 767 
 768         ENTRY_NP(ndptrap)
 769         /*
 770          * We want to do this quickly as every lwp using fp will take this
 771          * after a context switch -- we do the frequent path in fpnoextflt
 772          * below; for all other cases, we let the trap code handle it
 773          */
 774         pushl   %eax
 775         pushl   %ebx
 776         pushl   %edx                    /* for xrstor */
 777         pushl   %ds
 778         pushl   %gs
 779         movl    $KDS_SEL, %ebx
 780         movw    %bx, %ds
 781         movl    $KGS_SEL, %eax
 782         movw    %ax, %gs
 783         LOADCPU(%eax)
 784         cmpl    $0, fpu_exists
 785         je      .handle_in_trap         /* let trap handle no fp case */
 786         movl    CPU_THREAD(%eax), %ebx  /* %ebx = curthread */
 787         movl    $FPU_EN, %eax
 788         movl    T_LWP(%ebx), %ebx       /* %ebx = lwp */
 789         testl   %ebx, %ebx
 790         jz      .handle_in_trap         /* should not happen? */
 791 #if LWP_PCB_FPU != 0
 792         addl    $LWP_PCB_FPU, %ebx      /* &lwp->lwp_pcb.pcb_fpu */
 793 #endif
 794         testl   %eax, PCB_FPU_FLAGS(%ebx)
 795         jz      .handle_in_trap         /* must be the first fault */
 796         CLTS
 797         andl    $_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
 798 #if FPU_CTX_FPU_REGS != 0
 799         addl    $FPU_CTX_FPU_REGS, %ebx
 800 #endif
 801 
 802         movl    FPU_CTX_FPU_XSAVE_MASK(%ebx), %eax      /* for xrstor */
 803         movl    FPU_CTX_FPU_XSAVE_MASK+4(%ebx), %edx    /* for xrstor */
 804 
 805         /*
 806          * the label below is used in trap.c to detect FP faults in kernel
 807          * due to user fault.
 808          */
 809         ALTENTRY(ndptrap_frstor)
 810         movl    (%ebx), %ebx            /* fpu_regs.kfpu_u.kfpu_XX pointer */
 811         .globl  _patch_fxrstor_ebx
 812 _patch_fxrstor_ebx:
 813         .globl  _patch_xrstor_ebx
 814 _patch_xrstor_ebx:
 815         frstor  (%ebx)          /* may be patched to fxrstor or xrstor */
 816         popl    %gs
 817         popl    %ds
 818         popl    %edx
 819         popl    %ebx
 820         popl    %eax
 821         IRET
 822 
 823 .handle_in_trap:
 824         popl    %gs
 825         popl    %ds
 826         popl    %edx
 827         popl    %ebx
 828         popl    %eax
 829         TRAP_NOERR(T_NOEXTFLT)  /* $7 */
 830         jmp     cmninttrap
 831         SET_SIZE(ndptrap_frstor)
 832         SET_SIZE(ndptrap)
 833 
 834 #endif  /* __i386 */
 835 
 836 #if !defined(__xpv)
 837 #if defined(__amd64)
 838 
 839         /*
 840          * #DF
 841          */
 842         ENTRY_NP(syserrtrap)
 843         pushq   $T_DBLFLT
 844         SET_CPU_GSBASE
 845 
 846         /*
 847          * We share this handler with kmdb (if kmdb is loaded).  As such, we
 848          * may have reached this point after encountering a #df in kmdb.  If
 849          * that happens, we'll still be on kmdb's IDT.  We need to switch back
 850          * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 851          * here from kmdb, kmdb is probably in a very sickly state, and
 852          * shouldn't be entered from the panic flow.  We'll suppress that
 853          * entry by setting nopanicdebug.
 854          */
 855         pushq   %rax
 856         subq    $DESCTBR_SIZE, %rsp
 857         sidt    (%rsp)
 858         movq    %gs:CPU_IDT, %rax
 859         cmpq    %rax, DTR_BASE(%rsp)
 860         je      1f
 861 
 862         movq    %rax, DTR_BASE(%rsp)
 863         movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
 864         lidt    (%rsp)
 865 
 866         movl    $1, nopanicdebug
 867 
 868 1:      addq    $DESCTBR_SIZE, %rsp
 869         popq    %rax
 870         
 871         DFTRAP_PUSH
 872 
 873         /*
 874          * freeze trap trace.
 875          */
 876 #ifdef TRAPTRACE
 877         leaq    trap_trace_freeze(%rip), %r11
 878         incl    (%r11)
 879 #endif
 880 
 881         ENABLE_INTR_FLAGS
 882 
 883         movq    %rsp, %rdi      /* &regs */
 884         xorl    %esi, %esi      /* clear address */
 885         xorl    %edx, %edx      /* cpuid = 0 */
 886         call    trap
 887 
 888         SET_SIZE(syserrtrap)
 889 
 890 #elif defined(__i386)
 891 
 892         /*
 893          * #DF
 894          */
 895         ENTRY_NP(syserrtrap)
 896         cli                             /* disable interrupts */
 897 
 898         /*
 899          * We share this handler with kmdb (if kmdb is loaded).  As such, we
 900          * may have reached this point after encountering a #df in kmdb.  If
 901          * that happens, we'll still be on kmdb's IDT.  We need to switch back
 902          * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 903          * here from kmdb, kmdb is probably in a very sickly state, and
 904          * shouldn't be entered from the panic flow.  We'll suppress that
 905          * entry by setting nopanicdebug.
 906          */
 907 
 908         subl    $DESCTBR_SIZE, %esp
 909         movl    %gs:CPU_IDT, %eax
 910         sidt    (%esp)
 911         cmpl    DTR_BASE(%esp), %eax
 912         je      1f
 913 
 914         movl    %eax, DTR_BASE(%esp)
 915         movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
 916         lidt    (%esp)
 917 
 918         movl    $1, nopanicdebug
 919 
 920 1:      addl    $DESCTBR_SIZE, %esp
 921 
 922         /*
 923          * Check the CPL in the TSS to see what mode
 924          * (user or kernel) we took the fault in.  At this
 925          * point we are running in the context of the double
 926          * fault task (dftss) but the CPU's task points to
 927          * the previous task (ktss) where the process context
 928          * has been saved as the result of the task switch.
 929          */
 930         movl    %gs:CPU_TSS, %eax       /* get the TSS */
 931         movl    TSS_SS(%eax), %ebx      /* save the fault SS */
 932         movl    TSS_ESP(%eax), %edx     /* save the fault ESP */
 933         testw   $CPL_MASK, TSS_CS(%eax) /* user mode ? */
 934         jz      make_frame
 935         movw    TSS_SS0(%eax), %ss      /* get on the kernel stack */
 936         movl    TSS_ESP0(%eax), %esp
 937 
 938         /*
 939          * Clear the NT flag to avoid a task switch when the process
 940          * finally pops the EFL off the stack via an iret.  Clear
 941          * the TF flag since that is what the processor does for
 942          * a normal exception. Clear the IE flag so that interrupts
 943          * remain disabled.
 944          */
 945         movl    TSS_EFL(%eax), %ecx
 946         andl    $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
 947         pushl   %ecx
 948         popfl                           /* restore the EFL */
 949         movw    TSS_LDT(%eax), %cx      /* restore the LDT */
 950         lldt    %cx
 951 
 952         /*
 953          * Restore process segment selectors.
 954          */
 955         movw    TSS_DS(%eax), %ds
 956         movw    TSS_ES(%eax), %es
 957         movw    TSS_FS(%eax), %fs
 958         movw    TSS_GS(%eax), %gs
 959 
 960         /*
 961          * Restore task segment selectors.
 962          */
 963         movl    $KDS_SEL, TSS_DS(%eax)
 964         movl    $KDS_SEL, TSS_ES(%eax)
 965         movl    $KDS_SEL, TSS_SS(%eax)
 966         movl    $KFS_SEL, TSS_FS(%eax)
 967         movl    $KGS_SEL, TSS_GS(%eax)
 968 
 969         /*
 970          * Clear the TS bit, the busy bits in both task
 971          * descriptors, and switch tasks.
 972          */
 973         clts
 974         leal    gdt0, %ecx
 975         movl    DFTSS_SEL+4(%ecx), %esi
 976         andl    $_BITNOT(0x200), %esi
 977         movl    %esi, DFTSS_SEL+4(%ecx)
 978         movl    KTSS_SEL+4(%ecx), %esi
 979         andl    $_BITNOT(0x200), %esi
 980         movl    %esi, KTSS_SEL+4(%ecx)
 981         movw    $KTSS_SEL, %cx
 982         ltr     %cx
 983 
 984         /*
 985          * Restore part of the process registers.
 986          */
 987         movl    TSS_EBP(%eax), %ebp
 988         movl    TSS_ECX(%eax), %ecx
 989         movl    TSS_ESI(%eax), %esi
 990         movl    TSS_EDI(%eax), %edi
 991 
 992 make_frame:
 993         /*
 994          * Make a trap frame.  Leave the error code (0) on
 995          * the stack since the first word on a trap stack is
 996          * unused anyway.
 997          */
 998         pushl   %ebx                    / fault SS
 999         pushl   %edx                    / fault ESP
1000         pushl   TSS_EFL(%eax)           / fault EFL
1001         pushl   TSS_CS(%eax)            / fault CS
1002         pushl   TSS_EIP(%eax)           / fault EIP
1003         pushl   $0                      / error code
1004         pushl   $T_DBLFLT               / trap number 8
1005         movl    TSS_EBX(%eax), %ebx     / restore EBX
1006         movl    TSS_EDX(%eax), %edx     / restore EDX
1007         movl    TSS_EAX(%eax), %eax     / restore EAX
1008         sti                             / enable interrupts
1009         jmp     cmntrap
1010         SET_SIZE(syserrtrap)
1011 
1012 #endif  /* __i386 */
1013 #endif  /* !__xpv */
1014 
1015         ENTRY_NP(overrun)
1016         push    $0
1017         TRAP_NOERR(T_EXTOVRFLT) /* $9 i386 only - not generated */
1018         jmp     cmninttrap
1019         SET_SIZE(overrun)
1020 
1021         /*
1022          * #TS
1023          */
1024         ENTRY_NP(invtsstrap)
1025         TRAP_ERR(T_TSSFLT)      /* $10 already have error code on stack */
1026         jmp     cmntrap
1027         SET_SIZE(invtsstrap)
1028 
1029         /*
1030          * #NP
1031          */
1032         ENTRY_NP(segnptrap)
1033         TRAP_ERR(T_SEGFLT)      /* $11 already have error code on stack */
1034 #if defined(__amd64)
1035         SET_CPU_GSBASE
1036 #endif
1037         jmp     cmntrap
1038         SET_SIZE(segnptrap)
1039 
1040         /*
1041          * #SS
1042          */
1043         ENTRY_NP(stktrap)
1044         TRAP_ERR(T_STKFLT)      /* $12 already have error code on stack */
1045 #if defined(__amd64)
1046         SET_CPU_GSBASE
1047 #endif
1048         jmp     cmntrap
1049         SET_SIZE(stktrap)
1050 
1051         /*
1052          * #GP
1053          */
1054         ENTRY_NP(gptrap)
1055         TRAP_ERR(T_GPFLT)       /* $13 already have error code on stack */
1056 #if defined(__amd64)
1057         SET_CPU_GSBASE
1058 #endif
1059         jmp     cmntrap
1060         SET_SIZE(gptrap)
1061 
1062         /*
1063          * #PF
1064          */
1065         ENTRY_NP(pftrap)
1066         TRAP_ERR(T_PGFLT)       /* $14 already have error code on stack */
1067         INTR_PUSH
1068 #if defined(__xpv)
1069 
1070 #if defined(__amd64)
1071         movq    %gs:CPU_VCPU_INFO, %r15
1072         movq    VCPU_INFO_ARCH_CR2(%r15), %r15  /* vcpu[].arch.cr2 */
1073 #elif defined(__i386)
1074         movl    %gs:CPU_VCPU_INFO, %esi
1075         movl    VCPU_INFO_ARCH_CR2(%esi), %esi  /* vcpu[].arch.cr2 */
1076 #endif  /* __i386 */
1077 
1078 #else   /* __xpv */
1079 
1080 #if defined(__amd64)
1081         movq    %cr2, %r15
1082 #elif defined(__i386)
1083         movl    %cr2, %esi
1084 #endif  /* __i386 */
1085 
1086 #endif  /* __xpv */
1087         jmp     cmntrap_pushed
1088         SET_SIZE(pftrap)
1089 
1090 #if !defined(__amd64)
1091 
1092         .globl  idt0_default_r
1093 
1094         /*
1095          * #PF pentium bug workaround
1096          */
1097         ENTRY_NP(pentium_pftrap)
1098         pushl   %eax
1099         movl    %cr2, %eax
1100         andl    $MMU_STD_PAGEMASK, %eax
1101 
1102         cmpl    %eax, %cs:idt0_default_r+2      /* fixme */
1103 
1104         je      check_for_user_address
1105 user_mode:
1106         popl    %eax
1107         pushl   $T_PGFLT        /* $14 */
1108         jmp     cmntrap
1109 check_for_user_address:
1110         /*
1111          * Before we assume that we have an unmapped trap on our hands,
1112          * check to see if this is a fault from user mode.  If it is,
1113          * we'll kick back into the page fault handler.
1114          */
1115         movl    4(%esp), %eax   /* error code */
1116         andl    $PF_ERR_USER, %eax
1117         jnz     user_mode
1118 
1119         /*
1120          * We now know that this is the invalid opcode trap.
1121          */
1122         popl    %eax
1123         addl    $4, %esp        /* pop error code */
1124         jmp     invoptrap
1125         SET_SIZE(pentium_pftrap)
1126 
1127 #endif  /* !__amd64 */
1128 
1129         ENTRY_NP(resvtrap)
1130         TRAP_NOERR(15)          /* (reserved)  */
1131         jmp     cmntrap
1132         SET_SIZE(resvtrap)
1133 
1134         /*
1135          * #MF
1136          */
1137         ENTRY_NP(ndperr)
1138         TRAP_NOERR(T_EXTERRFLT) /* $16 */
1139         jmp     cmninttrap
1140         SET_SIZE(ndperr)
1141 
1142         /*
1143          * #AC
1144          */
1145         ENTRY_NP(achktrap)
1146         TRAP_ERR(T_ALIGNMENT)   /* $17 */
1147         jmp     cmntrap
1148         SET_SIZE(achktrap)
1149 
1150         /*
1151          * #MC
1152          */
1153         .globl  cmi_mca_trap    /* see uts/i86pc/os/cmi.c */
1154 
1155 #if defined(__amd64)
1156 
1157         ENTRY_NP(mcetrap)
1158         TRAP_NOERR(T_MCE)       /* $18 */
1159 
1160         SET_CPU_GSBASE
1161 
1162         INTR_PUSH
1163         INTGATE_INIT_KERNEL_FLAGS
1164 
1165         TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1166         TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1167         TRACE_STAMP(%rdi)
1168 
1169         movq    %rsp, %rbp
1170 
1171         movq    %rsp, %rdi      /* arg0 = struct regs *rp */
1172         call    cmi_mca_trap    /* cmi_mca_trap(rp); */
1173 
1174         jmp     _sys_rtt
1175         SET_SIZE(mcetrap)
1176 
1177 #else
1178 
1179         ENTRY_NP(mcetrap)
1180         TRAP_NOERR(T_MCE)       /* $18 */
1181 
1182         INTR_PUSH
1183         INTGATE_INIT_KERNEL_FLAGS
1184 
1185         TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1186         TRACE_REGS(%edi, %esp, %ebx, %ecx)
1187         TRACE_STAMP(%edi)
1188 
1189         movl    %esp, %ebp
1190 
1191         movl    %esp, %ecx
1192         pushl   %ecx            /* arg0 = struct regs *rp */
1193         call    cmi_mca_trap    /* cmi_mca_trap(rp) */
1194         addl    $4, %esp        /* pop arg0 */
1195 
1196         jmp     _sys_rtt
1197         SET_SIZE(mcetrap)
1198 
1199 #endif
1200 
1201         /*
1202          * #XF
1203          */
1204         ENTRY_NP(xmtrap)
1205         TRAP_NOERR(T_SIMDFPE)   /* $19 */
1206         jmp     cmninttrap
1207         SET_SIZE(xmtrap)
1208 
1209         ENTRY_NP(invaltrap)
1210         TRAP_NOERR(30)          /* very invalid */
1211         jmp     cmntrap
1212         SET_SIZE(invaltrap)
1213 
1214         ENTRY_NP(invalint)
1215         TRAP_NOERR(31)          /* even more so */
1216         jmp     cmnint
1217         SET_SIZE(invalint)
1218 
1219         .globl  fasttable
1220 
1221 #if defined(__amd64)
1222 
1223         ENTRY_NP(fasttrap)
1224         cmpl    $T_LASTFAST, %eax
1225         ja      1f
1226         orl     %eax, %eax      /* (zero extend top 32-bits) */
1227         leaq    fasttable(%rip), %r11
1228         leaq    (%r11, %rax, CLONGSIZE), %r11
1229         jmp     *(%r11)
1230 1:
1231         /*
1232          * Fast syscall number was illegal.  Make it look
1233          * as if the INT failed.  Modify %rip to point before the
1234          * INT, push the expected error code and fake a GP fault.
1235          *
1236          * XXX Why make the error code be offset into idt + 1?
1237          * Instead we should push a real (soft?) error code
1238          * on the stack and #gp handler could know about fasttraps?
1239          */
1240         XPV_TRAP_POP
1241 
1242         subq    $2, (%rsp)      /* XXX int insn 2-bytes */
1243         pushq   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1244 
1245 #if defined(__xpv)
1246         pushq   %r11
1247         pushq   %rcx
1248 #endif
1249         jmp     gptrap
1250         SET_SIZE(fasttrap)
1251 
1252 #elif defined(__i386)
1253 
1254         ENTRY_NP(fasttrap)
1255         cmpl    $T_LASTFAST, %eax
1256         ja      1f
1257         jmp     *%cs:fasttable(, %eax, CLONGSIZE)
1258 1:
1259         /*
1260          * Fast syscall number was illegal.  Make it look
1261          * as if the INT failed.  Modify %eip to point before the
1262          * INT, push the expected error code and fake a GP fault.
1263          *
1264          * XXX Why make the error code be offset into idt + 1?
1265          * Instead we should push a real (soft?) error code
1266          * on the stack and #gp handler could know about fasttraps?
1267          */
1268         subl    $2, (%esp)      /* XXX int insn 2-bytes */
1269         pushl   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1270         jmp     gptrap
1271         SET_SIZE(fasttrap)
1272 
1273 #endif  /* __i386 */
1274 
1275         ENTRY_NP(dtrace_ret)
1276         TRAP_NOERR(T_DTRACE_RET)
1277         jmp     dtrace_trap
1278         SET_SIZE(dtrace_ret)
1279 
1280 #if defined(__amd64)
1281 
1282         /*
1283          * RFLAGS 24 bytes up the stack from %rsp.
1284          * XXX a constant would be nicer.
1285          */
1286         ENTRY_NP(fast_null)
1287         XPV_TRAP_POP
1288         orq     $PS_C, 24(%rsp) /* set carry bit in user flags */
1289         IRET
1290         /*NOTREACHED*/
1291         SET_SIZE(fast_null)
1292 
1293 #elif defined(__i386)
1294 
1295         ENTRY_NP(fast_null)
1296         orw     $PS_C, 8(%esp)  /* set carry bit in user flags */
1297         IRET
1298         SET_SIZE(fast_null)
1299 
1300 #endif  /* __i386 */
1301 
1302         /*
1303          * Interrupts start at 32
1304          */
1305 #define MKIVCT(n)                       \
1306         ENTRY_NP(ivct/**/n)             \
1307         push    $0;                     \
1308         push    $n - 0x20;              \
1309         jmp     cmnint;                 \
1310         SET_SIZE(ivct/**/n)
1311 
1312         MKIVCT(32)
1313         MKIVCT(33)
1314         MKIVCT(34)
1315         MKIVCT(35)
1316         MKIVCT(36)
1317         MKIVCT(37)
1318         MKIVCT(38)
1319         MKIVCT(39)
1320         MKIVCT(40)
1321         MKIVCT(41)
1322         MKIVCT(42)
1323         MKIVCT(43)
1324         MKIVCT(44)
1325         MKIVCT(45)
1326         MKIVCT(46)
1327         MKIVCT(47)
1328         MKIVCT(48)
1329         MKIVCT(49)
1330         MKIVCT(50)
1331         MKIVCT(51)
1332         MKIVCT(52)
1333         MKIVCT(53)
1334         MKIVCT(54)
1335         MKIVCT(55)
1336         MKIVCT(56)
1337         MKIVCT(57)
1338         MKIVCT(58)
1339         MKIVCT(59)
1340         MKIVCT(60)
1341         MKIVCT(61)
1342         MKIVCT(62)
1343         MKIVCT(63)
1344         MKIVCT(64)
1345         MKIVCT(65)
1346         MKIVCT(66)
1347         MKIVCT(67)
1348         MKIVCT(68)
1349         MKIVCT(69)
1350         MKIVCT(70)
1351         MKIVCT(71)
1352         MKIVCT(72)
1353         MKIVCT(73)
1354         MKIVCT(74)
1355         MKIVCT(75)
1356         MKIVCT(76)
1357         MKIVCT(77)
1358         MKIVCT(78)
1359         MKIVCT(79)
1360         MKIVCT(80)
1361         MKIVCT(81)
1362         MKIVCT(82)
1363         MKIVCT(83)
1364         MKIVCT(84)
1365         MKIVCT(85)
1366         MKIVCT(86)
1367         MKIVCT(87)
1368         MKIVCT(88)
1369         MKIVCT(89)
1370         MKIVCT(90)
1371         MKIVCT(91)
1372         MKIVCT(92)
1373         MKIVCT(93)
1374         MKIVCT(94)
1375         MKIVCT(95)
1376         MKIVCT(96)
1377         MKIVCT(97)
1378         MKIVCT(98)
1379         MKIVCT(99)
1380         MKIVCT(100)
1381         MKIVCT(101)
1382         MKIVCT(102)
1383         MKIVCT(103)
1384         MKIVCT(104)
1385         MKIVCT(105)
1386         MKIVCT(106)
1387         MKIVCT(107)
1388         MKIVCT(108)
1389         MKIVCT(109)
1390         MKIVCT(110)
1391         MKIVCT(111)
1392         MKIVCT(112)
1393         MKIVCT(113)
1394         MKIVCT(114)
1395         MKIVCT(115)
1396         MKIVCT(116)
1397         MKIVCT(117)
1398         MKIVCT(118)
1399         MKIVCT(119)
1400         MKIVCT(120)
1401         MKIVCT(121)
1402         MKIVCT(122)
1403         MKIVCT(123)
1404         MKIVCT(124)
1405         MKIVCT(125)
1406         MKIVCT(126)
1407         MKIVCT(127)
1408         MKIVCT(128)
1409         MKIVCT(129)
1410         MKIVCT(130)
1411         MKIVCT(131)
1412         MKIVCT(132)
1413         MKIVCT(133)
1414         MKIVCT(134)
1415         MKIVCT(135)
1416         MKIVCT(136)
1417         MKIVCT(137)
1418         MKIVCT(138)
1419         MKIVCT(139)
1420         MKIVCT(140)
1421         MKIVCT(141)
1422         MKIVCT(142)
1423         MKIVCT(143)
1424         MKIVCT(144)
1425         MKIVCT(145)
1426         MKIVCT(146)
1427         MKIVCT(147)
1428         MKIVCT(148)
1429         MKIVCT(149)
1430         MKIVCT(150)
1431         MKIVCT(151)
1432         MKIVCT(152)
1433         MKIVCT(153)
1434         MKIVCT(154)
1435         MKIVCT(155)
1436         MKIVCT(156)
1437         MKIVCT(157)
1438         MKIVCT(158)
1439         MKIVCT(159)
1440         MKIVCT(160)
1441         MKIVCT(161)
1442         MKIVCT(162)
1443         MKIVCT(163)
1444         MKIVCT(164)
1445         MKIVCT(165)
1446         MKIVCT(166)
1447         MKIVCT(167)
1448         MKIVCT(168)
1449         MKIVCT(169)
1450         MKIVCT(170)
1451         MKIVCT(171)
1452         MKIVCT(172)
1453         MKIVCT(173)
1454         MKIVCT(174)
1455         MKIVCT(175)
1456         MKIVCT(176)
1457         MKIVCT(177)
1458         MKIVCT(178)
1459         MKIVCT(179)
1460         MKIVCT(180)
1461         MKIVCT(181)
1462         MKIVCT(182)
1463         MKIVCT(183)
1464         MKIVCT(184)
1465         MKIVCT(185)
1466         MKIVCT(186)
1467         MKIVCT(187)
1468         MKIVCT(188)
1469         MKIVCT(189)
1470         MKIVCT(190)
1471         MKIVCT(191)
1472         MKIVCT(192)
1473         MKIVCT(193)
1474         MKIVCT(194)
1475         MKIVCT(195)
1476         MKIVCT(196)
1477         MKIVCT(197)
1478         MKIVCT(198)
1479         MKIVCT(199)
1480         MKIVCT(200)
1481         MKIVCT(201)
1482         MKIVCT(202)
1483         MKIVCT(203)
1484         MKIVCT(204)
1485         MKIVCT(205)
1486         MKIVCT(206)
1487         MKIVCT(207)
1488         MKIVCT(208)
1489         MKIVCT(209)
1490         MKIVCT(210)
1491         MKIVCT(211)
1492         MKIVCT(212)
1493         MKIVCT(213)
1494         MKIVCT(214)
1495         MKIVCT(215)
1496         MKIVCT(216)
1497         MKIVCT(217)
1498         MKIVCT(218)
1499         MKIVCT(219)
1500         MKIVCT(220)
1501         MKIVCT(221)
1502         MKIVCT(222)
1503         MKIVCT(223)
1504         MKIVCT(224)
1505         MKIVCT(225)
1506         MKIVCT(226)
1507         MKIVCT(227)
1508         MKIVCT(228)
1509         MKIVCT(229)
1510         MKIVCT(230)
1511         MKIVCT(231)
1512         MKIVCT(232)
1513         MKIVCT(233)
1514         MKIVCT(234)
1515         MKIVCT(235)
1516         MKIVCT(236)
1517         MKIVCT(237)
1518         MKIVCT(238)
1519         MKIVCT(239)
1520         MKIVCT(240)
1521         MKIVCT(241)
1522         MKIVCT(242)
1523         MKIVCT(243)
1524         MKIVCT(244)
1525         MKIVCT(245)
1526         MKIVCT(246)
1527         MKIVCT(247)
1528         MKIVCT(248)
1529         MKIVCT(249)
1530         MKIVCT(250)
1531         MKIVCT(251)
1532         MKIVCT(252)
1533         MKIVCT(253)
1534         MKIVCT(254)
1535         MKIVCT(255)
1536 
1537 #endif  /* __lint */