1 /*
   2  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
   4  * Copyright 2019 Joyent, Inc.
   5  */
   6 
   7 /*
   8  * Copyright (c) 1989, 1990 William F. Jolitz.
   9  * Copyright (c) 1990 The Regents of the University of California.
  10  * All rights reserved.
  11  *
  12  * Redistribution and use in source and binary forms, with or without
  13  * modification, are permitted provided that the following conditions
  14  * are met:
  15  * 1. Redistributions of source code must retain the above copyright
  16  *    notice, this list of conditions and the following disclaimer.
  17  * 2. Redistributions in binary form must reproduce the above copyright
  18  *    notice, this list of conditions and the following disclaimer in the
  19  *    documentation and/or other materials provided with the distribution.
  20  * 3. All advertising materials mentioning features or use of this software
  21  *    must display the following acknowledgement:
  22  *      This product includes software developed by the University of
  23  *      California, Berkeley and its contributors.
  24  * 4. Neither the name of the University nor the names of its contributors
  25  *    may be used to endorse or promote products derived from this software
  26  *    without specific prior written permission.
  27  *
  28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  38  * SUCH DAMAGE.
  39  *
  40  * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
  41  */
  42 
  43 #include <sys/asm_linkage.h>
  44 #include <sys/asm_misc.h>
  45 #include <sys/trap.h>
  46 #include <sys/psw.h>
  47 #include <sys/regset.h>
  48 #include <sys/privregs.h>
  49 #include <sys/dtrace.h>
  50 #include <sys/x86_archext.h>
  51 #include <sys/traptrace.h>
  52 #include <sys/machparam.h>
  53 
  54 #include "assym.h"
  55 
  56 /*
  57  * push $0 on stack for traps that do not
  58  * generate an error code. This is so the rest
  59  * of the kernel can expect a consistent stack
  60  * from from any exception.
  61  *
  62  * Note that for all exceptions for amd64
  63  * %r11 and %rcx are on the stack. Just pop
  64  * them back into their appropriate registers and let
  65  * it get saved as is running native.
  66  */
  67 
  68 #if defined(__xpv)
  69 
  70 #define NPTRAP_NOERR(trapno)    \
  71         pushq   $0;             \
  72         pushq   $trapno
  73 
  74 #define TRAP_NOERR(trapno)      \
  75         XPV_TRAP_POP;           \
  76         NPTRAP_NOERR(trapno)
  77 
  78 /*
  79  * error code already pushed by hw
  80  * onto stack.
  81  */
  82 #define TRAP_ERR(trapno)        \
  83         XPV_TRAP_POP;           \
  84         pushq   $trapno
  85 
  86 #else /* __xpv */
  87 
  88 #define TRAP_NOERR(trapno)      \
  89         push    $0;             \
  90         push    $trapno
  91 
  92 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
  93 
  94 /*
  95  * error code already pushed by hw
  96  * onto stack.
  97  */
  98 #define TRAP_ERR(trapno)        \
  99         push    $trapno
 100 
 101 #endif  /* __xpv */
 102 
 103         /*
 104          * These are the stacks used on cpu0 for taking double faults,
 105          * NMIs and MCEs.
 106          *
 107          * We define them here instead of in a C file so that we can page-align
 108          * them (gcc won't do that in a .c file).
 109          */
 110         .data
 111         DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 112         .fill   DEFAULTSTKSZ, 1, 0
 113         DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 114         .fill   DEFAULTSTKSZ, 1, 0
 115         DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 116         .fill   DEFAULTSTKSZ, 1, 0
 117 
 118         /*
 119          * #DE
 120          */
 121         ENTRY_NP(div0trap)
 122         TRAP_NOERR(T_ZERODIV)   /* $0 */
 123         jmp     cmntrap
 124         SET_SIZE(div0trap)
 125 
 126         /*
 127          * #DB
 128          *
 129          * Fetch %dr6 and clear it, handing off the value to the
 130          * cmntrap code in %r15/%esi
 131          */
 132         ENTRY_NP(dbgtrap)
 133         TRAP_NOERR(T_SGLSTP)    /* $1 */
 134 
 135 #if !defined(__xpv)             /* no sysenter support yet */
 136         /*
 137          * If we get here as a result of single-stepping a sysenter
 138          * instruction, we suddenly find ourselves taking a #db
 139          * in kernel mode -before- we've swapgs'ed.  So before we can
 140          * take the trap, we do the swapgs here, and fix the return
 141          * %rip in trap() so that we return immediately after the
 142          * swapgs in the sysenter handler to avoid doing the swapgs again.
 143          *
 144          * Nobody said that the design of sysenter was particularly
 145          * elegant, did they?
 146          */
 147 
 148         pushq   %r11
 149 
 150         /*
 151          * At this point the stack looks like this:
 152          *
 153          * (high address)       r_ss
 154          *                      r_rsp
 155          *                      r_rfl
 156          *                      r_cs
 157          *                      r_rip           <-- %rsp + 24
 158          *                      r_err           <-- %rsp + 16
 159          *                      r_trapno        <-- %rsp + 8
 160          * (low address)        %r11            <-- %rsp
 161          */
 162         leaq    sys_sysenter(%rip), %r11
 163         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 164         je      1f
 165         leaq    brand_sys_sysenter(%rip), %r11
 166         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 167         je      1f
 168         leaq    tr_sys_sysenter(%rip), %r11
 169         cmpq    %r11, 24(%rsp)
 170         je      1f
 171         leaq    tr_brand_sys_sysenter(%rip), %r11
 172         cmpq    %r11, 24(%rsp)
 173         jne     2f
 174 1:      swapgs
 175 2:      lfence /* swapgs mitigation */
 176         popq    %r11
 177 #endif  /* !__xpv */
 178 
 179         INTR_PUSH
 180 #if defined(__xpv)
 181         movl    $6, %edi
 182         call    kdi_dreg_get
 183         movq    %rax, %r15              /* %db6 -> %r15 */
 184         movl    $6, %edi
 185         movl    $0, %esi
 186         call    kdi_dreg_set            /* 0 -> %db6 */
 187 #else
 188         movq    %db6, %r15
 189         xorl    %eax, %eax
 190         movq    %rax, %db6
 191 #endif
 192 
 193         jmp     cmntrap_pushed
 194         SET_SIZE(dbgtrap)
 195 
 196 #if !defined(__xpv)
 197 
 198 /*
 199  * Macro to set the gsbase or kgsbase to the address of the struct cpu
 200  * for this processor.  If we came from userland, set kgsbase else
 201  * set gsbase.  We find the proper cpu struct by looping through
 202  * the cpu structs for all processors till we find a match for the gdt
 203  * of the trapping processor.  The stack is expected to be pointing at
 204  * the standard regs pushed by hardware on a trap (plus error code and trapno).
 205  *
 206  * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
 207  * and kgsbase set to the same value) because we're not going back the normal
 208  * way out of here (via IRET). Where we're going, we don't need no user %gs.
 209  */
 210 #define SET_CPU_GSBASE                                                  \
 211         subq    $REGOFF_TRAPNO, %rsp;   /* save regs */                 \
 212         movq    %rax, REGOFF_RAX(%rsp);                                 \
 213         movq    %rbx, REGOFF_RBX(%rsp);                                 \
 214         movq    %rcx, REGOFF_RCX(%rsp);                                 \
 215         movq    %rdx, REGOFF_RDX(%rsp);                                 \
 216         movq    %rbp, REGOFF_RBP(%rsp);                                 \
 217         movq    %rsp, %rbp;                                             \
 218         subq    $16, %rsp;              /* space for gdt */             \
 219         sgdt    6(%rsp);                                                \
 220         movq    8(%rsp), %rcx;          /* %rcx has gdt to match */     \
 221         xorl    %ebx, %ebx;             /* loop index */                \
 222         leaq    cpu(%rip), %rdx;        /* cpu pointer array */         \
 223 1:                                                                      \
 224         movq    (%rdx, %rbx, CLONGSIZE), %rax;  /* get cpu[i] */        \
 225         cmpq    $0x0, %rax;             /* cpu[i] == NULL ? */          \
 226         je      2f;                     /* yes, continue */             \
 227         cmpq    %rcx, CPU_GDT(%rax);    /* gdt == cpu[i]->cpu_gdt ? */       \
 228         je      3f;                     /* yes, go set gsbase */        \
 229 2:                                                                      \
 230         incl    %ebx;                   /* i++ */                       \
 231         cmpl    $NCPU, %ebx;            /* i < NCPU ? */             \
 232         jb      1b;                     /* yes, loop */                 \
 233 /* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */ \
 234 3:                                                                      \
 235         movl    $MSR_AMD_KGSBASE, %ecx;                                 \
 236         cmpw    $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */      \
 237         jne     4f;                     /* no, go set KGSBASE */        \
 238         movl    $MSR_AMD_GSBASE, %ecx;  /* yes, set GSBASE */           \
 239         mfence;                         /* OPTERON_ERRATUM_88 */        \
 240 4:                                                                      \
 241         movq    %rax, %rdx;             /* write base register */       \
 242         shrq    $32, %rdx;                                              \
 243         wrmsr;                                                          \
 244         movq    REGOFF_RDX(%rbp), %rdx; /* restore regs */              \
 245         movq    REGOFF_RCX(%rbp), %rcx;                                 \
 246         movq    REGOFF_RBX(%rbp), %rbx;                                 \
 247         movq    REGOFF_RAX(%rbp), %rax;                                 \
 248         movq    %rbp, %rsp;                                             \
 249         movq    REGOFF_RBP(%rsp), %rbp;                                 \
 250         addq    $REGOFF_TRAPNO, %rsp    /* pop stack */
 251 
 252 #else   /* __xpv */
 253 
 254 #define SET_CPU_GSBASE  /* noop on the hypervisor */
 255 
 256 #endif  /* __xpv */
 257 
 258 
 259         /*
 260          * #NMI
 261          *
 262          * XXPV: See 6532669.
 263          */
 264         ENTRY_NP(nmiint)
 265         TRAP_NOERR(T_NMIFLT)    /* $2 */
 266 
 267         SET_CPU_GSBASE
 268 
 269         /*
 270          * Save all registers and setup segment registers
 271          * with kernel selectors.
 272          */
 273         INTR_PUSH
 274         INTGATE_INIT_KERNEL_FLAGS
 275 
 276         TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
 277         TRACE_REGS(%r12, %rsp, %rax, %rbx)
 278         TRACE_STAMP(%r12)
 279 
 280         movq    %rsp, %rbp
 281 
 282         movq    %rbp, %rdi
 283         call    av_dispatch_nmivect
 284 
 285         INTR_POP
 286         call    x86_md_clear
 287         jmp     tr_iret_auto
 288         /*NOTREACHED*/
 289         SET_SIZE(nmiint)
 290 
 291         /*
 292          * #BP
 293          */
 294         ENTRY_NP(brktrap)
 295         XPV_TRAP_POP
 296         cmpw    $KCS_SEL, 8(%rsp)
 297         jne     bp_user
 298 
 299         /*
 300          * This is a breakpoint in the kernel -- it is very likely that this
 301          * is DTrace-induced.  To unify DTrace handling, we spoof this as an
 302          * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
 303          * we must decrement the trapping %rip to make it appear as a fault.
 304          * We then push a non-zero error code to indicate that this is coming
 305          * from #BP.
 306          */
 307         decq    (%rsp)
 308         push    $1                      /* error code -- non-zero for #BP */
 309         jmp     ud_kernel
 310 
 311 bp_user:
 312 
 313         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 314         jmp     dtrace_trap
 315 
 316         SET_SIZE(brktrap)
 317 
 318         /*
 319          * #OF
 320          */
 321         ENTRY_NP(ovflotrap)
 322         TRAP_NOERR(T_OVFLW)     /* $4 */
 323         jmp     cmntrap
 324         SET_SIZE(ovflotrap)
 325 
 326         /*
 327          * #BR
 328          */
 329         ENTRY_NP(boundstrap)
 330         TRAP_NOERR(T_BOUNDFLT)  /* $5 */
 331         jmp     cmntrap
 332         SET_SIZE(boundstrap)
 333 
 334         ENTRY_NP(invoptrap)
 335 
 336         XPV_TRAP_POP
 337 
 338         cmpw    $KCS_SEL, 8(%rsp)
 339         jne     ud_user
 340 
 341 #if defined(__xpv)
 342         movb    $0, 12(%rsp)            /* clear saved upcall_mask from %cs */
 343 #endif
 344         push    $0                      /* error code -- zero for #UD */
 345 ud_kernel:
 346         push    $0xdddd                 /* a dummy trap number */
 347         INTR_PUSH
 348         movq    REGOFF_RIP(%rsp), %rdi
 349         movq    REGOFF_RSP(%rsp), %rsi
 350         movq    REGOFF_RAX(%rsp), %rdx
 351         pushq   (%rsi)
 352         movq    %rsp, %rsi
 353         subq    $8, %rsp
 354         call    dtrace_invop
 355         ALTENTRY(dtrace_invop_callsite)
 356         addq    $16, %rsp
 357         cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 358         je      ud_push
 359         cmpl    $DTRACE_INVOP_LEAVE, %eax
 360         je      ud_leave
 361         cmpl    $DTRACE_INVOP_NOP, %eax
 362         je      ud_nop
 363         cmpl    $DTRACE_INVOP_RET, %eax
 364         je      ud_ret
 365         jmp     ud_trap
 366 
 367 ud_push:
 368         /*
 369          * We must emulate a "pushq %rbp".  To do this, we pull the stack
 370          * down 8 bytes, and then store the base pointer.
 371          */
 372         INTR_POP
 373         subq    $16, %rsp               /* make room for %rbp */
 374         pushq   %rax                    /* push temp */
 375         movq    24(%rsp), %rax          /* load calling RIP */
 376         addq    $1, %rax                /* increment over trapping instr */
 377         movq    %rax, 8(%rsp)           /* store calling RIP */
 378         movq    32(%rsp), %rax          /* load calling CS */
 379         movq    %rax, 16(%rsp)          /* store calling CS */
 380         movq    40(%rsp), %rax          /* load calling RFLAGS */
 381         movq    %rax, 24(%rsp)          /* store calling RFLAGS */
 382         movq    48(%rsp), %rax          /* load calling RSP */
 383         subq    $8, %rax                /* make room for %rbp */
 384         movq    %rax, 32(%rsp)          /* store calling RSP */
 385         movq    56(%rsp), %rax          /* load calling SS */
 386         movq    %rax, 40(%rsp)          /* store calling SS */
 387         movq    32(%rsp), %rax          /* reload calling RSP */
 388         movq    %rbp, (%rax)            /* store %rbp there */
 389         popq    %rax                    /* pop off temp */
 390         jmp     tr_iret_kernel          /* return from interrupt */
 391         /*NOTREACHED*/
 392 
 393 ud_leave:
 394         /*
 395          * We must emulate a "leave", which is the same as a "movq %rbp,
 396          * %rsp" followed by a "popq %rbp".  We can exploit the fact
 397          * that the %rsp is explicitly saved to effect the pop without
 398          * having to reshuffle the other data pushed for the trap.
 399          */
 400 
 401         INTR_POP
 402         pushq   %rax                    /* push temp */
 403         movq    8(%rsp), %rax           /* load calling RIP */
 404         addq    $1, %rax                /* increment over trapping instr */
 405         movq    %rax, 8(%rsp)           /* store calling RIP */
 406         movq    (%rbp), %rax            /* get new %rbp */
 407         addq    $8, %rbp                /* adjust new %rsp */
 408         movq    %rbp, 32(%rsp)          /* store new %rsp */
 409         movq    %rax, %rbp              /* set new %rbp */
 410         popq    %rax                    /* pop off temp */
 411         jmp     tr_iret_kernel          /* return from interrupt */
 412         /*NOTREACHED*/
 413 
 414 ud_nop:
 415         /*
 416          * We must emulate a "nop".  This is obviously not hard:  we need only
 417          * advance the %rip by one.
 418          */
 419         INTR_POP
 420         incq    (%rsp)
 421         jmp     tr_iret_kernel
 422         /*NOTREACHED*/
 423 
 424 ud_ret:
 425         INTR_POP
 426         pushq   %rax                    /* push temp */
 427         movq    32(%rsp), %rax          /* load %rsp */
 428         movq    (%rax), %rax            /* load calling RIP */
 429         movq    %rax, 8(%rsp)           /* store calling RIP */
 430         addq    $8, 32(%rsp)            /* adjust new %rsp */
 431         popq    %rax                    /* pop off temp */
 432         jmp     tr_iret_kernel          /* return from interrupt */
 433         /*NOTREACHED*/
 434 
 435 ud_trap:
 436         /*
 437          * We're going to let the kernel handle this as a normal #UD.  If,
 438          * however, we came through #BP and are spoofing #UD (in this case,
 439          * the stored error value will be non-zero), we need to de-spoof
 440          * the trap by incrementing %rip and pushing T_BPTFLT.
 441          */
 442         cmpq    $0, REGOFF_ERR(%rsp)
 443         je      ud_ud
 444         incq    REGOFF_RIP(%rsp)
 445         addq    $REGOFF_RIP, %rsp
 446         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 447         jmp     cmntrap
 448 
 449 ud_ud:
 450         addq    $REGOFF_RIP, %rsp
 451 ud_user:
 452         NPTRAP_NOERR(T_ILLINST)
 453         jmp     cmntrap
 454         SET_SIZE(invoptrap)
 455 
 456         /*
 457          * #NM
 458          */
 459 
 460         ENTRY_NP(ndptrap)
 461         TRAP_NOERR(T_NOEXTFLT)  /* $0 */
 462         SET_CPU_GSBASE
 463         jmp     cmntrap
 464         SET_SIZE(ndptrap)
 465 
 466 #if !defined(__xpv)
 467 
 468         /*
 469          * #DF
 470          */
 471         ENTRY_NP(syserrtrap)
 472         pushq   $T_DBLFLT
 473         SET_CPU_GSBASE
 474 
 475         /*
 476          * We share this handler with kmdb (if kmdb is loaded).  As such, we
 477          * may have reached this point after encountering a #df in kmdb.  If
 478          * that happens, we'll still be on kmdb's IDT.  We need to switch back
 479          * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 480          * here from kmdb, kmdb is probably in a very sickly state, and
 481          * shouldn't be entered from the panic flow.  We'll suppress that
 482          * entry by setting nopanicdebug.
 483          */
 484         pushq   %rax
 485         subq    $DESCTBR_SIZE, %rsp
 486         sidt    (%rsp)
 487         movq    %gs:CPU_IDT, %rax
 488         cmpq    %rax, DTR_BASE(%rsp)
 489         je      1f
 490 
 491         movq    %rax, DTR_BASE(%rsp)
 492         movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
 493         lidt    (%rsp)
 494 
 495         movl    $1, nopanicdebug
 496 
 497 1:      addq    $DESCTBR_SIZE, %rsp
 498         popq    %rax
 499 
 500         DFTRAP_PUSH
 501 
 502         /*
 503          * freeze trap trace.
 504          */
 505 #ifdef TRAPTRACE
 506         leaq    trap_trace_freeze(%rip), %r11
 507         incl    (%r11)
 508 #endif
 509 
 510         ENABLE_INTR_FLAGS
 511 
 512         movq    %rsp, %rdi      /* &regs */
 513         xorl    %esi, %esi      /* clear address */
 514         xorl    %edx, %edx      /* cpuid = 0 */
 515         call    trap
 516 
 517         SET_SIZE(syserrtrap)
 518 
 519 #endif  /* !__xpv */
 520 
 521         /*
 522          * #TS
 523          */
 524         ENTRY_NP(invtsstrap)
 525         TRAP_ERR(T_TSSFLT)      /* $10 already have error code on stack */
 526         jmp     cmntrap
 527         SET_SIZE(invtsstrap)
 528 
 529         /*
 530          * #NP
 531          */
 532         ENTRY_NP(segnptrap)
 533         TRAP_ERR(T_SEGFLT)      /* $11 already have error code on stack */
 534         SET_CPU_GSBASE
 535         jmp     cmntrap
 536         SET_SIZE(segnptrap)
 537 
 538         /*
 539          * #SS
 540          */
 541         ENTRY_NP(stktrap)
 542         TRAP_ERR(T_STKFLT)      /* $12 already have error code on stack */
 543         SET_CPU_GSBASE
 544         jmp     cmntrap
 545         SET_SIZE(stktrap)
 546 
 547         /*
 548          * #GP
 549          */
 550         ENTRY_NP(gptrap)
 551         TRAP_ERR(T_GPFLT)       /* $13 already have error code on stack */
 552         SET_CPU_GSBASE
 553         jmp     cmntrap
 554         SET_SIZE(gptrap)
 555 
 556         /*
 557          * #PF
 558          */
 559         ENTRY_NP(pftrap)
 560         TRAP_ERR(T_PGFLT)       /* $14 already have error code on stack */
 561         INTR_PUSH
 562 #if defined(__xpv)
 563 
 564         movq    %gs:CPU_VCPU_INFO, %r15
 565         movq    VCPU_INFO_ARCH_CR2(%r15), %r15  /* vcpu[].arch.cr2 */
 566 
 567 #else   /* __xpv */
 568 
 569         movq    %cr2, %r15
 570 
 571 #endif  /* __xpv */
 572         jmp     cmntrap_pushed
 573         SET_SIZE(pftrap)
 574 
 575         ENTRY_NP(resvtrap)
 576         TRAP_NOERR(T_RESVTRAP)  /* (reserved)  */
 577         jmp     cmntrap
 578         SET_SIZE(resvtrap)
 579 
 580         /*
 581          * #MF
 582          */
 583         ENTRY_NP(ndperr)
 584         TRAP_NOERR(T_EXTERRFLT) /* $16 */
 585         jmp     cmninttrap
 586         SET_SIZE(ndperr)
 587 
 588         /*
 589          * #AC
 590          */
 591         ENTRY_NP(achktrap)
 592         TRAP_ERR(T_ALIGNMENT)   /* $17 */
 593         jmp     cmntrap
 594         SET_SIZE(achktrap)
 595 
 596         /*
 597          * #MC
 598          */
 599         .globl  cmi_mca_trap    /* see uts/i86pc/os/cmi.c */
 600 
 601         ENTRY_NP(mcetrap)
 602         TRAP_NOERR(T_MCE)       /* $18 */
 603 
 604         SET_CPU_GSBASE
 605 
 606         INTR_PUSH
 607         INTGATE_INIT_KERNEL_FLAGS
 608 
 609         TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
 610         TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
 611         TRACE_STAMP(%rdi)
 612 
 613         movq    %rsp, %rbp
 614 
 615         movq    %rsp, %rdi      /* arg0 = struct regs *rp */
 616         call    cmi_mca_trap    /* cmi_mca_trap(rp); */
 617 
 618         jmp     _sys_rtt
 619         SET_SIZE(mcetrap)
 620 
 621         /*
 622          * #XF
 623          */
 624         ENTRY_NP(xmtrap)
 625         TRAP_NOERR(T_SIMDFPE)   /* $19 */
 626         jmp     cmninttrap
 627         SET_SIZE(xmtrap)
 628 
 629         ENTRY_NP(invaltrap)
 630         TRAP_NOERR(T_INVALTRAP) /* very invalid */
 631         jmp     cmntrap
 632         SET_SIZE(invaltrap)
 633 
 634         .globl  fasttable
 635 
 636         ENTRY_NP(fasttrap)
 637         cmpl    $T_LASTFAST, %eax
 638         ja      1f
 639         orl     %eax, %eax      /* (zero extend top 32-bits) */
 640         leaq    fasttable(%rip), %r11
 641         leaq    (%r11, %rax, CLONGSIZE), %r11
 642         movq    (%r11), %r11
 643         INDIRECT_JMP_REG(r11)
 644 1:
 645         /*
 646          * Fast syscall number was illegal.  Make it look
 647          * as if the INT failed.  Modify %rip to point before the
 648          * INT, push the expected error code and fake a GP fault.
 649          *
 650          * XXX Why make the error code be offset into idt + 1?
 651          * Instead we should push a real (soft?) error code
 652          * on the stack and #gp handler could know about fasttraps?
 653          */
 654         XPV_TRAP_POP
 655 
 656         subq    $2, (%rsp)      /* XXX int insn 2-bytes */
 657         pushq   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
 658 
 659 #if defined(__xpv)
 660         pushq   %r11
 661         pushq   %rcx
 662 #endif
 663         jmp     gptrap
 664         SET_SIZE(fasttrap)
 665 
 666         ENTRY_NP(dtrace_ret)
 667         TRAP_NOERR(T_DTRACE_RET)
 668         jmp     dtrace_trap
 669         SET_SIZE(dtrace_ret)
 670 
 671         /*
 672          * RFLAGS 24 bytes up the stack from %rsp.
 673          * XXX a constant would be nicer.
 674          */
 675         ENTRY_NP(fast_null)
 676         XPV_TRAP_POP
 677         orq     $PS_C, 24(%rsp) /* set carry bit in user flags */
 678         call    x86_md_clear
 679         jmp     tr_iret_auto
 680         /*NOTREACHED*/
 681         SET_SIZE(fast_null)
 682 
 683         /*
 684          * Interrupts start at 32
 685          */
 686 #define MKIVCT(n)                       \
 687         ENTRY_NP(ivct/**/n)             \
 688         push    $0;                     \
 689         push    $n - 0x20;              \
 690         jmp     cmnint;                 \
 691         SET_SIZE(ivct/**/n)
 692 
 693         MKIVCT(32)
 694         MKIVCT(33)
 695         MKIVCT(34)
 696         MKIVCT(35)
 697         MKIVCT(36)
 698         MKIVCT(37)
 699         MKIVCT(38)
 700         MKIVCT(39)
 701         MKIVCT(40)
 702         MKIVCT(41)
 703         MKIVCT(42)
 704         MKIVCT(43)
 705         MKIVCT(44)
 706         MKIVCT(45)
 707         MKIVCT(46)
 708         MKIVCT(47)
 709         MKIVCT(48)
 710         MKIVCT(49)
 711         MKIVCT(50)
 712         MKIVCT(51)
 713         MKIVCT(52)
 714         MKIVCT(53)
 715         MKIVCT(54)
 716         MKIVCT(55)
 717         MKIVCT(56)
 718         MKIVCT(57)
 719         MKIVCT(58)
 720         MKIVCT(59)
 721         MKIVCT(60)
 722         MKIVCT(61)
 723         MKIVCT(62)
 724         MKIVCT(63)
 725         MKIVCT(64)
 726         MKIVCT(65)
 727         MKIVCT(66)
 728         MKIVCT(67)
 729         MKIVCT(68)
 730         MKIVCT(69)
 731         MKIVCT(70)
 732         MKIVCT(71)
 733         MKIVCT(72)
 734         MKIVCT(73)
 735         MKIVCT(74)
 736         MKIVCT(75)
 737         MKIVCT(76)
 738         MKIVCT(77)
 739         MKIVCT(78)
 740         MKIVCT(79)
 741         MKIVCT(80)
 742         MKIVCT(81)
 743         MKIVCT(82)
 744         MKIVCT(83)
 745         MKIVCT(84)
 746         MKIVCT(85)
 747         MKIVCT(86)
 748         MKIVCT(87)
 749         MKIVCT(88)
 750         MKIVCT(89)
 751         MKIVCT(90)
 752         MKIVCT(91)
 753         MKIVCT(92)
 754         MKIVCT(93)
 755         MKIVCT(94)
 756         MKIVCT(95)
 757         MKIVCT(96)
 758         MKIVCT(97)
 759         MKIVCT(98)
 760         MKIVCT(99)
 761         MKIVCT(100)
 762         MKIVCT(101)
 763         MKIVCT(102)
 764         MKIVCT(103)
 765         MKIVCT(104)
 766         MKIVCT(105)
 767         MKIVCT(106)
 768         MKIVCT(107)
 769         MKIVCT(108)
 770         MKIVCT(109)
 771         MKIVCT(110)
 772         MKIVCT(111)
 773         MKIVCT(112)
 774         MKIVCT(113)
 775         MKIVCT(114)
 776         MKIVCT(115)
 777         MKIVCT(116)
 778         MKIVCT(117)
 779         MKIVCT(118)
 780         MKIVCT(119)
 781         MKIVCT(120)
 782         MKIVCT(121)
 783         MKIVCT(122)
 784         MKIVCT(123)
 785         MKIVCT(124)
 786         MKIVCT(125)
 787         MKIVCT(126)
 788         MKIVCT(127)
 789         MKIVCT(128)
 790         MKIVCT(129)
 791         MKIVCT(130)
 792         MKIVCT(131)
 793         MKIVCT(132)
 794         MKIVCT(133)
 795         MKIVCT(134)
 796         MKIVCT(135)
 797         MKIVCT(136)
 798         MKIVCT(137)
 799         MKIVCT(138)
 800         MKIVCT(139)
 801         MKIVCT(140)
 802         MKIVCT(141)
 803         MKIVCT(142)
 804         MKIVCT(143)
 805         MKIVCT(144)
 806         MKIVCT(145)
 807         MKIVCT(146)
 808         MKIVCT(147)
 809         MKIVCT(148)
 810         MKIVCT(149)
 811         MKIVCT(150)
 812         MKIVCT(151)
 813         MKIVCT(152)
 814         MKIVCT(153)
 815         MKIVCT(154)
 816         MKIVCT(155)
 817         MKIVCT(156)
 818         MKIVCT(157)
 819         MKIVCT(158)
 820         MKIVCT(159)
 821         MKIVCT(160)
 822         MKIVCT(161)
 823         MKIVCT(162)
 824         MKIVCT(163)
 825         MKIVCT(164)
 826         MKIVCT(165)
 827         MKIVCT(166)
 828         MKIVCT(167)
 829         MKIVCT(168)
 830         MKIVCT(169)
 831         MKIVCT(170)
 832         MKIVCT(171)
 833         MKIVCT(172)
 834         MKIVCT(173)
 835         MKIVCT(174)
 836         MKIVCT(175)
 837         MKIVCT(176)
 838         MKIVCT(177)
 839         MKIVCT(178)
 840         MKIVCT(179)
 841         MKIVCT(180)
 842         MKIVCT(181)
 843         MKIVCT(182)
 844         MKIVCT(183)
 845         MKIVCT(184)
 846         MKIVCT(185)
 847         MKIVCT(186)
 848         MKIVCT(187)
 849         MKIVCT(188)
 850         MKIVCT(189)
 851         MKIVCT(190)
 852         MKIVCT(191)
 853         MKIVCT(192)
 854         MKIVCT(193)
 855         MKIVCT(194)
 856         MKIVCT(195)
 857         MKIVCT(196)
 858         MKIVCT(197)
 859         MKIVCT(198)
 860         MKIVCT(199)
 861         MKIVCT(200)
 862         MKIVCT(201)
 863         MKIVCT(202)
 864         MKIVCT(203)
 865         MKIVCT(204)
 866         MKIVCT(205)
 867         MKIVCT(206)
 868         MKIVCT(207)
 869         MKIVCT(208)
 870         MKIVCT(209)
 871         MKIVCT(210)
 872         MKIVCT(211)
 873         MKIVCT(212)
 874         MKIVCT(213)
 875         MKIVCT(214)
 876         MKIVCT(215)
 877         MKIVCT(216)
 878         MKIVCT(217)
 879         MKIVCT(218)
 880         MKIVCT(219)
 881         MKIVCT(220)
 882         MKIVCT(221)
 883         MKIVCT(222)
 884         MKIVCT(223)
 885         MKIVCT(224)
 886         MKIVCT(225)
 887         MKIVCT(226)
 888         MKIVCT(227)
 889         MKIVCT(228)
 890         MKIVCT(229)
 891         MKIVCT(230)
 892         MKIVCT(231)
 893         MKIVCT(232)
 894         MKIVCT(233)
 895         MKIVCT(234)
 896         MKIVCT(235)
 897         MKIVCT(236)
 898         MKIVCT(237)
 899         MKIVCT(238)
 900         MKIVCT(239)
 901         MKIVCT(240)
 902         MKIVCT(241)
 903         MKIVCT(242)
 904         MKIVCT(243)
 905         MKIVCT(244)
 906         MKIVCT(245)
 907         MKIVCT(246)
 908         MKIVCT(247)
 909         MKIVCT(248)
 910         MKIVCT(249)
 911         MKIVCT(250)
 912         MKIVCT(251)
 913         MKIVCT(252)
 914         MKIVCT(253)
 915         MKIVCT(254)
 916         MKIVCT(255)
 917