Print this page
    
de-linting of .s files
first
    
      
        | Split | Close | 
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/intel/ia32/ml/exception.s
          +++ new/usr/src/uts/intel/ia32/ml/exception.s
   1    1  /*
   2    2   * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
   3    3   * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
   4    4   * Copyright 2019 Joyent, Inc.
   5    5   */
   6    6  
   7    7  /*
   8    8   * Copyright (c) 1989, 1990 William F. Jolitz.
   9    9   * Copyright (c) 1990 The Regents of the University of California.
  10   10   * All rights reserved.
  11   11   *
  12   12   * Redistribution and use in source and binary forms, with or without
  13   13   * modification, are permitted provided that the following conditions
  14   14   * are met:
  15   15   * 1. Redistributions of source code must retain the above copyright
  16   16   *    notice, this list of conditions and the following disclaimer.
  17   17   * 2. Redistributions in binary form must reproduce the above copyright
  18   18   *    notice, this list of conditions and the following disclaimer in the
  19   19   *    documentation and/or other materials provided with the distribution.
  20   20   * 3. All advertising materials mentioning features or use of this software
  21   21   *    must display the following acknowledgement:
  22   22   *      This product includes software developed by the University of
  23   23   *      California, Berkeley and its contributors.
  24   24   * 4. Neither the name of the University nor the names of its contributors
  25   25   *    may be used to endorse or promote products derived from this software
  26   26   *    without specific prior written permission.
  27   27   *
  28   28   * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  29   29   * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  30   30   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  31   31   * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  32   32   * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33   33   * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  34   34   * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35   35   * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36   36   * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  37   37   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  38   38   * SUCH DAMAGE.
  39   39   *
  40   40   * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
  41   41   */
  42   42  
  43   43  #include <sys/asm_linkage.h>
  
    | ↓ open down ↓ | 43 lines elided | ↑ open up ↑ | 
  44   44  #include <sys/asm_misc.h>
  45   45  #include <sys/trap.h>
  46   46  #include <sys/psw.h>
  47   47  #include <sys/regset.h>
  48   48  #include <sys/privregs.h>
  49   49  #include <sys/dtrace.h>
  50   50  #include <sys/x86_archext.h>
  51   51  #include <sys/traptrace.h>
  52   52  #include <sys/machparam.h>
  53   53  
  54      -#if !defined(__lint)
  55      -
  56   54  #include "assym.h"
  57   55  
  58   56  /*
  59   57   * push $0 on stack for traps that do not
  60   58   * generate an error code. This is so the rest
  61   59   * of the kernel can expect a consistent stack
  62   60   * from from any exception.
  63   61   *
  64   62   * Note that for all exceptions for amd64
  65   63   * %r11 and %rcx are on the stack. Just pop
  66   64   * them back into their appropriate registers and let
  67   65   * it get saved as is running native.
  68   66   */
  69   67  
  70      -#if defined(__xpv) && defined(__amd64)
       68 +#if defined(__xpv)
  71   69  
  72   70  #define NPTRAP_NOERR(trapno)    \
  73   71          pushq   $0;             \
  74   72          pushq   $trapno
  75   73  
  76   74  #define TRAP_NOERR(trapno)      \
  77   75          XPV_TRAP_POP;           \
  78   76          NPTRAP_NOERR(trapno)
  79   77  
  80   78  /*
  81   79   * error code already pushed by hw
  82   80   * onto stack.
  83   81   */
  84   82  #define TRAP_ERR(trapno)        \
  85   83          XPV_TRAP_POP;           \
  86   84          pushq   $trapno
  87   85  
  88      -#else /* __xpv && __amd64 */
       86 +#else /* __xpv */
  89   87  
  90   88  #define TRAP_NOERR(trapno)      \
  91   89          push    $0;             \
  92   90          push    $trapno
  93   91  
  94   92  #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
  95   93  
  96   94  /*
  97   95   * error code already pushed by hw
  98   96   * onto stack.
  99   97   */
 100   98  #define TRAP_ERR(trapno)        \
 101   99          push    $trapno
 102  100  
 103      -#endif  /* __xpv && __amd64 */
      101 +#endif  /* __xpv */
 104  102  
 105  103          /*
 106  104           * These are the stacks used on cpu0 for taking double faults,
 107      -         * NMIs and MCEs (the latter two only on amd64 where we have IST).
      105 +         * NMIs and MCEs.
 108  106           *
 109  107           * We define them here instead of in a C file so that we can page-align
 110  108           * them (gcc won't do that in a .c file).
 111  109           */
 112  110          .data
 113  111          DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 114  112          .fill   DEFAULTSTKSZ, 1, 0
 115  113          DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 116  114          .fill   DEFAULTSTKSZ, 1, 0
 117  115          DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 118  116          .fill   DEFAULTSTKSZ, 1, 0
 119  117  
 120  118          /*
 121  119           * #DE
 122  120           */
 123  121          ENTRY_NP(div0trap)
 124  122          TRAP_NOERR(T_ZERODIV)   /* $0 */
 125  123          jmp     cmntrap
 126  124          SET_SIZE(div0trap)
  
    | ↓ open down ↓ | 9 lines elided | ↑ open up ↑ | 
 127  125  
 128  126          /*
 129  127           * #DB
 130  128           *
 131  129           * Fetch %dr6 and clear it, handing off the value to the
 132  130           * cmntrap code in %r15/%esi
 133  131           */
 134  132          ENTRY_NP(dbgtrap)
 135  133          TRAP_NOERR(T_SGLSTP)    /* $1 */
 136  134  
 137      -#if defined(__amd64)
 138  135  #if !defined(__xpv)             /* no sysenter support yet */
 139  136          /*
 140  137           * If we get here as a result of single-stepping a sysenter
 141  138           * instruction, we suddenly find ourselves taking a #db
 142  139           * in kernel mode -before- we've swapgs'ed.  So before we can
 143  140           * take the trap, we do the swapgs here, and fix the return
 144  141           * %rip in trap() so that we return immediately after the
 145  142           * swapgs in the sysenter handler to avoid doing the swapgs again.
 146  143           *
 147  144           * Nobody said that the design of sysenter was particularly
 148  145           * elegant, did they?
 149  146           */
 150  147  
 151  148          pushq   %r11
 152  149  
 153  150          /*
 154  151           * At this point the stack looks like this:
 155  152           *
 156  153           * (high address)       r_ss
 157  154           *                      r_rsp
 158  155           *                      r_rfl
 159  156           *                      r_cs
 160  157           *                      r_rip           <-- %rsp + 24
 161  158           *                      r_err           <-- %rsp + 16
 162  159           *                      r_trapno        <-- %rsp + 8
 163  160           * (low address)        %r11            <-- %rsp
 164  161           */
 165  162          leaq    sys_sysenter(%rip), %r11
 166  163          cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 167  164          je      1f
 168  165          leaq    brand_sys_sysenter(%rip), %r11
 169  166          cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 170  167          je      1f
 171  168          leaq    tr_sys_sysenter(%rip), %r11
 172  169          cmpq    %r11, 24(%rsp)
 173  170          je      1f
 174  171          leaq    tr_brand_sys_sysenter(%rip), %r11
 175  172          cmpq    %r11, 24(%rsp)
 176  173          jne     2f
 177  174  1:      swapgs
 178  175  2:      lfence /* swapgs mitigation */
 179  176          popq    %r11
 180  177  #endif  /* !__xpv */
 181  178  
 182  179          INTR_PUSH
 183  180  #if defined(__xpv)
 184  181          movl    $6, %edi
 185  182          call    kdi_dreg_get
  
    | ↓ open down ↓ | 38 lines elided | ↑ open up ↑ | 
 186  183          movq    %rax, %r15              /* %db6 -> %r15 */
 187  184          movl    $6, %edi
 188  185          movl    $0, %esi
 189  186          call    kdi_dreg_set            /* 0 -> %db6 */
 190  187  #else
 191  188          movq    %db6, %r15
 192  189          xorl    %eax, %eax
 193  190          movq    %rax, %db6
 194  191  #endif
 195  192  
 196      -#elif defined(__i386)
 197      -
 198      -        INTR_PUSH
 199      -#if defined(__xpv)
 200      -        pushl   $6
 201      -        call    kdi_dreg_get
 202      -        addl    $4, %esp
 203      -        movl    %eax, %esi              /* %dr6 -> %esi */
 204      -        pushl   $0
 205      -        pushl   $6
 206      -        call    kdi_dreg_set            /* 0 -> %dr6 */
 207      -        addl    $8, %esp
 208      -#else
 209      -        movl    %db6, %esi
 210      -        xorl    %eax, %eax
 211      -        movl    %eax, %db6
 212      -#endif
 213      -#endif  /* __i386 */
 214      -
 215  193          jmp     cmntrap_pushed
 216  194          SET_SIZE(dbgtrap)
 217  195  
 218      -#if defined(__amd64)
 219  196  #if !defined(__xpv)
 220  197  
 221  198  /*
 222  199   * Macro to set the gsbase or kgsbase to the address of the struct cpu
 223  200   * for this processor.  If we came from userland, set kgsbase else
 224  201   * set gsbase.  We find the proper cpu struct by looping through
 225  202   * the cpu structs for all processors till we find a match for the gdt
 226  203   * of the trapping processor.  The stack is expected to be pointing at
 227  204   * the standard regs pushed by hardware on a trap (plus error code and trapno).
 228  205   *
 229  206   * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
 230  207   * and kgsbase set to the same value) because we're not going back the normal
 231  208   * way out of here (via IRET). Where we're going, we don't need no user %gs.
 232  209   */
 233  210  #define SET_CPU_GSBASE                                                  \
 234  211          subq    $REGOFF_TRAPNO, %rsp;   /* save regs */                 \
 235  212          movq    %rax, REGOFF_RAX(%rsp);                                 \
 236  213          movq    %rbx, REGOFF_RBX(%rsp);                                 \
 237  214          movq    %rcx, REGOFF_RCX(%rsp);                                 \
 238  215          movq    %rdx, REGOFF_RDX(%rsp);                                 \
 239  216          movq    %rbp, REGOFF_RBP(%rsp);                                 \
 240  217          movq    %rsp, %rbp;                                             \
 241  218          subq    $16, %rsp;              /* space for gdt */             \
 242  219          sgdt    6(%rsp);                                                \
 243  220          movq    8(%rsp), %rcx;          /* %rcx has gdt to match */     \
 244  221          xorl    %ebx, %ebx;             /* loop index */                \
 245  222          leaq    cpu(%rip), %rdx;        /* cpu pointer array */         \
 246  223  1:                                                                      \
 247  224          movq    (%rdx, %rbx, CLONGSIZE), %rax;  /* get cpu[i] */        \
 248  225          cmpq    $0x0, %rax;             /* cpu[i] == NULL ? */          \
 249  226          je      2f;                     /* yes, continue */             \
 250  227          cmpq    %rcx, CPU_GDT(%rax);    /* gdt == cpu[i]->cpu_gdt ? */  \
 251  228          je      3f;                     /* yes, go set gsbase */        \
 252  229  2:                                                                      \
 253  230          incl    %ebx;                   /* i++ */                       \
 254  231          cmpl    $NCPU, %ebx;            /* i < NCPU ? */                \
 255  232          jb      1b;                     /* yes, loop */                 \
 256  233  /* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */ \
 257  234  3:                                                                      \
 258  235          movl    $MSR_AMD_KGSBASE, %ecx;                                 \
 259  236          cmpw    $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */      \
 260  237          jne     4f;                     /* no, go set KGSBASE */        \
 261  238          movl    $MSR_AMD_GSBASE, %ecx;  /* yes, set GSBASE */           \
 262  239          mfence;                         /* OPTERON_ERRATUM_88 */        \
 263  240  4:                                                                      \
 264  241          movq    %rax, %rdx;             /* write base register */       \
 265  242          shrq    $32, %rdx;                                              \
 266  243          wrmsr;                                                          \
 267  244          movq    REGOFF_RDX(%rbp), %rdx; /* restore regs */              \
 268  245          movq    REGOFF_RCX(%rbp), %rcx;                                 \
 269  246          movq    REGOFF_RBX(%rbp), %rbx;                                 \
  
    | ↓ open down ↓ | 41 lines elided | ↑ open up ↑ | 
 270  247          movq    REGOFF_RAX(%rbp), %rax;                                 \
 271  248          movq    %rbp, %rsp;                                             \
 272  249          movq    REGOFF_RBP(%rsp), %rbp;                                 \
 273  250          addq    $REGOFF_TRAPNO, %rsp    /* pop stack */
 274  251  
 275  252  #else   /* __xpv */
 276  253  
 277  254  #define SET_CPU_GSBASE  /* noop on the hypervisor */
 278  255  
 279  256  #endif  /* __xpv */
 280      -#endif  /* __amd64 */
 281  257  
 282  258  
 283      -#if defined(__amd64)
 284      -
 285  259          /*
 286  260           * #NMI
 287  261           *
 288  262           * XXPV: See 6532669.
 289  263           */
 290  264          ENTRY_NP(nmiint)
 291  265          TRAP_NOERR(T_NMIFLT)    /* $2 */
 292  266  
 293  267          SET_CPU_GSBASE
 294  268  
 295  269          /*
 296  270           * Save all registers and setup segment registers
 297  271           * with kernel selectors.
 298  272           */
 299  273          INTR_PUSH
 300  274          INTGATE_INIT_KERNEL_FLAGS
 301  275  
 302  276          TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
 303  277          TRACE_REGS(%r12, %rsp, %rax, %rbx)
 304  278          TRACE_STAMP(%r12)
 305  279  
 306  280          movq    %rsp, %rbp
  
    | ↓ open down ↓ | 12 lines elided | ↑ open up ↑ | 
 307  281  
 308  282          movq    %rbp, %rdi
 309  283          call    av_dispatch_nmivect
 310  284  
 311  285          INTR_POP
 312  286          call    x86_md_clear
 313  287          jmp     tr_iret_auto
 314  288          /*NOTREACHED*/
 315  289          SET_SIZE(nmiint)
 316  290  
 317      -#elif defined(__i386)
 318      -
 319  291          /*
 320      -         * #NMI
 321      -         */
 322      -        ENTRY_NP(nmiint)
 323      -        TRAP_NOERR(T_NMIFLT)    /* $2 */
 324      -
 325      -        /*
 326      -         * Save all registers and setup segment registers
 327      -         * with kernel selectors.
 328      -         */
 329      -        INTR_PUSH
 330      -        INTGATE_INIT_KERNEL_FLAGS
 331      -
 332      -        TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 333      -        TRACE_REGS(%edi, %esp, %ebx, %ecx)
 334      -        TRACE_STAMP(%edi)
 335      -
 336      -        movl    %esp, %ebp
 337      -
 338      -        pushl   %ebp
 339      -        call    av_dispatch_nmivect
 340      -        addl    $4, %esp
 341      -
 342      -        INTR_POP_USER
 343      -        IRET
 344      -        SET_SIZE(nmiint)
 345      -
 346      -#endif  /* __i386 */
 347      -
 348      -        /*
 349  292           * #BP
 350  293           */
 351  294          ENTRY_NP(brktrap)
 352      -
 353      -#if defined(__amd64)
 354  295          XPV_TRAP_POP
 355  296          cmpw    $KCS_SEL, 8(%rsp)
 356  297          jne     bp_user
 357  298  
 358  299          /*
 359  300           * This is a breakpoint in the kernel -- it is very likely that this
 360  301           * is DTrace-induced.  To unify DTrace handling, we spoof this as an
 361  302           * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
 362  303           * we must decrement the trapping %rip to make it appear as a fault.
 363  304           * We then push a non-zero error code to indicate that this is coming
 364  305           * from #BP.
 365  306           */
 366  307          decq    (%rsp)
 367  308          push    $1                      /* error code -- non-zero for #BP */
 368  309          jmp     ud_kernel
 369  310  
 370  311  bp_user:
 371      -#endif /* __amd64 */
 372  312  
 373  313          NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 374  314          jmp     dtrace_trap
 375  315  
 376  316          SET_SIZE(brktrap)
 377  317  
 378  318          /*
 379  319           * #OF
 380  320           */
 381  321          ENTRY_NP(ovflotrap)
 382  322          TRAP_NOERR(T_OVFLW)     /* $4 */
 383  323          jmp     cmntrap
  
    | ↓ open down ↓ | 2 lines elided | ↑ open up ↑ | 
 384  324          SET_SIZE(ovflotrap)
 385  325  
 386  326          /*
 387  327           * #BR
 388  328           */
 389  329          ENTRY_NP(boundstrap)
 390  330          TRAP_NOERR(T_BOUNDFLT)  /* $5 */
 391  331          jmp     cmntrap
 392  332          SET_SIZE(boundstrap)
 393  333  
 394      -#if defined(__amd64)
 395      -
 396  334          ENTRY_NP(invoptrap)
 397  335  
 398  336          XPV_TRAP_POP
 399  337  
 400  338          cmpw    $KCS_SEL, 8(%rsp)
 401  339          jne     ud_user
 402  340  
 403  341  #if defined(__xpv)
 404  342          movb    $0, 12(%rsp)            /* clear saved upcall_mask from %cs */
 405  343  #endif
 406  344          push    $0                      /* error code -- zero for #UD */
 407  345  ud_kernel:
 408  346          push    $0xdddd                 /* a dummy trap number */
 409  347          INTR_PUSH
 410  348          movq    REGOFF_RIP(%rsp), %rdi
 411  349          movq    REGOFF_RSP(%rsp), %rsi
 412  350          movq    REGOFF_RAX(%rsp), %rdx
 413  351          pushq   (%rsi)
 414  352          movq    %rsp, %rsi
 415  353          subq    $8, %rsp
 416  354          call    dtrace_invop
 417  355          ALTENTRY(dtrace_invop_callsite)
 418  356          addq    $16, %rsp
 419  357          cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 420  358          je      ud_push
 421  359          cmpl    $DTRACE_INVOP_LEAVE, %eax
 422  360          je      ud_leave
 423  361          cmpl    $DTRACE_INVOP_NOP, %eax
 424  362          je      ud_nop
 425  363          cmpl    $DTRACE_INVOP_RET, %eax
 426  364          je      ud_ret
 427  365          jmp     ud_trap
 428  366  
 429  367  ud_push:
 430  368          /*
 431  369           * We must emulate a "pushq %rbp".  To do this, we pull the stack
 432  370           * down 8 bytes, and then store the base pointer.
 433  371           */
 434  372          INTR_POP
 435  373          subq    $16, %rsp               /* make room for %rbp */
 436  374          pushq   %rax                    /* push temp */
 437  375          movq    24(%rsp), %rax          /* load calling RIP */
 438  376          addq    $1, %rax                /* increment over trapping instr */
 439  377          movq    %rax, 8(%rsp)           /* store calling RIP */
 440  378          movq    32(%rsp), %rax          /* load calling CS */
 441  379          movq    %rax, 16(%rsp)          /* store calling CS */
 442  380          movq    40(%rsp), %rax          /* load calling RFLAGS */
 443  381          movq    %rax, 24(%rsp)          /* store calling RFLAGS */
 444  382          movq    48(%rsp), %rax          /* load calling RSP */
 445  383          subq    $8, %rax                /* make room for %rbp */
 446  384          movq    %rax, 32(%rsp)          /* store calling RSP */
  
    | ↓ open down ↓ | 41 lines elided | ↑ open up ↑ | 
 447  385          movq    56(%rsp), %rax          /* load calling SS */
 448  386          movq    %rax, 40(%rsp)          /* store calling SS */
 449  387          movq    32(%rsp), %rax          /* reload calling RSP */
 450  388          movq    %rbp, (%rax)            /* store %rbp there */
 451  389          popq    %rax                    /* pop off temp */
 452  390          jmp     tr_iret_kernel          /* return from interrupt */
 453  391          /*NOTREACHED*/
 454  392  
 455  393  ud_leave:
 456  394          /*
 457      -         * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
 458      -         * followed by a "popq %rbp".  This is quite a bit simpler on amd64
 459      -         * than it is on i386 -- we can exploit the fact that the %rsp is
 460      -         * explicitly saved to effect the pop without having to reshuffle
 461      -         * the other data pushed for the trap.
      395 +         * We must emulate a "leave", which is the same as a "movq %rbp,
      396 +         * %rsp" followed by a "popq %rbp".  We can exploit the fact
      397 +         * that the %rsp is explicitly saved to effect the pop without
      398 +         * having to reshuffle the other data pushed for the trap.
 462  399           */
      400 +
 463  401          INTR_POP
 464  402          pushq   %rax                    /* push temp */
 465  403          movq    8(%rsp), %rax           /* load calling RIP */
 466  404          addq    $1, %rax                /* increment over trapping instr */
 467  405          movq    %rax, 8(%rsp)           /* store calling RIP */
 468  406          movq    (%rbp), %rax            /* get new %rbp */
 469  407          addq    $8, %rbp                /* adjust new %rsp */
 470  408          movq    %rbp, 32(%rsp)          /* store new %rsp */
 471  409          movq    %rax, %rbp              /* set new %rbp */
 472  410          popq    %rax                    /* pop off temp */
 473  411          jmp     tr_iret_kernel          /* return from interrupt */
 474  412          /*NOTREACHED*/
 475  413  
 476  414  ud_nop:
 477  415          /*
 478  416           * We must emulate a "nop".  This is obviously not hard:  we need only
 479  417           * advance the %rip by one.
 480  418           */
 481  419          INTR_POP
 482  420          incq    (%rsp)
 483  421          jmp     tr_iret_kernel
 484  422          /*NOTREACHED*/
 485  423  
 486  424  ud_ret:
 487  425          INTR_POP
 488  426          pushq   %rax                    /* push temp */
 489  427          movq    32(%rsp), %rax          /* load %rsp */
 490  428          movq    (%rax), %rax            /* load calling RIP */
 491  429          movq    %rax, 8(%rsp)           /* store calling RIP */
 492  430          addq    $8, 32(%rsp)            /* adjust new %rsp */
 493  431          popq    %rax                    /* pop off temp */
 494  432          jmp     tr_iret_kernel          /* return from interrupt */
 495  433          /*NOTREACHED*/
 496  434  
 497  435  ud_trap:
 498  436          /*
 499  437           * We're going to let the kernel handle this as a normal #UD.  If,
 500  438           * however, we came through #BP and are spoofing #UD (in this case,
 501  439           * the stored error value will be non-zero), we need to de-spoof
 502  440           * the trap by incrementing %rip and pushing T_BPTFLT.
 503  441           */
 504  442          cmpq    $0, REGOFF_ERR(%rsp)
 505  443          je      ud_ud
 506  444          incq    REGOFF_RIP(%rsp)
 507  445          addq    $REGOFF_RIP, %rsp
  
    | ↓ open down ↓ | 35 lines elided | ↑ open up ↑ | 
 508  446          NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 509  447          jmp     cmntrap
 510  448  
 511  449  ud_ud:
 512  450          addq    $REGOFF_RIP, %rsp
 513  451  ud_user:
 514  452          NPTRAP_NOERR(T_ILLINST)
 515  453          jmp     cmntrap
 516  454          SET_SIZE(invoptrap)
 517  455  
 518      -#elif defined(__i386)
 519      -
 520  456          /*
 521      -         * #UD
 522      -         */
 523      -        ENTRY_NP(invoptrap)
 524      -        /*
 525      -         * If we are taking an invalid opcode trap while in the kernel, this
 526      -         * is likely an FBT probe point.
 527      -         */
 528      -        pushl   %gs
 529      -        cmpw    $KGS_SEL, (%esp)
 530      -        jne     8f
 531      -
 532      -        addl    $4, %esp
 533      -#if defined(__xpv)
 534      -        movb    $0, 6(%esp)             /* clear saved upcall_mask from %cs */
 535      -#endif  /* __xpv */
 536      -        pusha
 537      -        pushl   %eax                    /* push %eax -- may be return value */
 538      -        pushl   %esp                    /* push stack pointer */
 539      -        addl    $48, (%esp)             /* adjust to incoming args */
 540      -        pushl   40(%esp)                /* push calling EIP */
 541      -        call    dtrace_invop
 542      -        ALTENTRY(dtrace_invop_callsite)
 543      -        addl    $12, %esp
 544      -        cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 545      -        je      1f
 546      -        cmpl    $DTRACE_INVOP_POPL_EBP, %eax
 547      -        je      2f
 548      -        cmpl    $DTRACE_INVOP_LEAVE, %eax
 549      -        je      3f
 550      -        cmpl    $DTRACE_INVOP_NOP, %eax
 551      -        je      4f
 552      -        jmp     7f
 553      -1:
 554      -        /*
 555      -         * We must emulate a "pushl %ebp".  To do this, we pull the stack
 556      -         * down 4 bytes, and then store the base pointer.
 557      -         */
 558      -        popa
 559      -        subl    $4, %esp                /* make room for %ebp */
 560      -        pushl   %eax                    /* push temp */
 561      -        movl    8(%esp), %eax           /* load calling EIP */
 562      -        incl    %eax                    /* increment over LOCK prefix */
 563      -        movl    %eax, 4(%esp)           /* store calling EIP */
 564      -        movl    12(%esp), %eax          /* load calling CS */
 565      -        movl    %eax, 8(%esp)           /* store calling CS */
 566      -        movl    16(%esp), %eax          /* load calling EFLAGS */
 567      -        movl    %eax, 12(%esp)          /* store calling EFLAGS */
 568      -        movl    %ebp, 16(%esp)          /* push %ebp */
 569      -        popl    %eax                    /* pop off temp */
 570      -        jmp     _emul_done
 571      -2:
 572      -        /*
 573      -         * We must emulate a "popl %ebp".  To do this, we do the opposite of
 574      -         * the above:  we remove the %ebp from the stack, and squeeze up the
 575      -         * saved state from the trap.
 576      -         */
 577      -        popa
 578      -        pushl   %eax                    /* push temp */
 579      -        movl    16(%esp), %ebp          /* pop %ebp */
 580      -        movl    12(%esp), %eax          /* load calling EFLAGS */
 581      -        movl    %eax, 16(%esp)          /* store calling EFLAGS */
 582      -        movl    8(%esp), %eax           /* load calling CS */
 583      -        movl    %eax, 12(%esp)          /* store calling CS */
 584      -        movl    4(%esp), %eax           /* load calling EIP */
 585      -        incl    %eax                    /* increment over LOCK prefix */
 586      -        movl    %eax, 8(%esp)           /* store calling EIP */
 587      -        popl    %eax                    /* pop off temp */
 588      -        addl    $4, %esp                /* adjust stack pointer */
 589      -        jmp     _emul_done
 590      -3:
 591      -        /*
 592      -         * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
 593      -         * followed by a "popl %ebp".  This looks similar to the above, but
 594      -         * requires two temporaries:  one for the new base pointer, and one
 595      -         * for the staging register.
 596      -         */
 597      -        popa
 598      -        pushl   %eax                    /* push temp */
 599      -        pushl   %ebx                    /* push temp */
 600      -        movl    %ebp, %ebx              /* set temp to old %ebp */
 601      -        movl    (%ebx), %ebp            /* pop %ebp */
 602      -        movl    16(%esp), %eax          /* load calling EFLAGS */
 603      -        movl    %eax, (%ebx)            /* store calling EFLAGS */
 604      -        movl    12(%esp), %eax          /* load calling CS */
 605      -        movl    %eax, -4(%ebx)          /* store calling CS */
 606      -        movl    8(%esp), %eax           /* load calling EIP */
 607      -        incl    %eax                    /* increment over LOCK prefix */
 608      -        movl    %eax, -8(%ebx)          /* store calling EIP */
 609      -        movl    %ebx, -4(%esp)          /* temporarily store new %esp */
 610      -        popl    %ebx                    /* pop off temp */
 611      -        popl    %eax                    /* pop off temp */
 612      -        movl    -12(%esp), %esp         /* set stack pointer */
 613      -        subl    $8, %esp                /* adjust for three pushes, one pop */
 614      -        jmp     _emul_done
 615      -4:
 616      -        /*
 617      -         * We must emulate a "nop".  This is obviously not hard:  we need only
 618      -         * advance the %eip by one.
 619      -         */
 620      -        popa
 621      -        incl    (%esp)
 622      -_emul_done:
 623      -        IRET                            /* return from interrupt */
 624      -7:
 625      -        popa
 626      -        pushl   $0
 627      -        pushl   $T_ILLINST      /* $6 */
 628      -        jmp     cmntrap
 629      -8:
 630      -        addl    $4, %esp
 631      -        pushl   $0
 632      -        pushl   $T_ILLINST      /* $6 */
 633      -        jmp     cmntrap
 634      -        SET_SIZE(invoptrap)
 635      -
 636      -#endif  /* __i386 */
 637      -
 638      -        /*
 639  457           * #NM
 640  458           */
 641  459  
 642  460          ENTRY_NP(ndptrap)
 643  461          TRAP_NOERR(T_NOEXTFLT)  /* $0 */
 644  462          SET_CPU_GSBASE
 645  463          jmp     cmntrap
 646  464          SET_SIZE(ndptrap)
 647  465  
 648  466  #if !defined(__xpv)
 649      -#if defined(__amd64)
 650  467  
 651  468          /*
 652  469           * #DF
 653  470           */
 654  471          ENTRY_NP(syserrtrap)
 655  472          pushq   $T_DBLFLT
 656  473          SET_CPU_GSBASE
 657  474  
 658  475          /*
 659  476           * We share this handler with kmdb (if kmdb is loaded).  As such, we
 660  477           * may have reached this point after encountering a #df in kmdb.  If
 661  478           * that happens, we'll still be on kmdb's IDT.  We need to switch back
 662  479           * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 663  480           * here from kmdb, kmdb is probably in a very sickly state, and
 664  481           * shouldn't be entered from the panic flow.  We'll suppress that
 665  482           * entry by setting nopanicdebug.
 666  483           */
 667  484          pushq   %rax
 668  485          subq    $DESCTBR_SIZE, %rsp
 669  486          sidt    (%rsp)
 670  487          movq    %gs:CPU_IDT, %rax
 671  488          cmpq    %rax, DTR_BASE(%rsp)
 672  489          je      1f
 673  490  
 674  491          movq    %rax, DTR_BASE(%rsp)
 675  492          movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
 676  493          lidt    (%rsp)
 677  494  
 678  495          movl    $1, nopanicdebug
 679  496  
 680  497  1:      addq    $DESCTBR_SIZE, %rsp
 681  498          popq    %rax
 682  499  
 683  500          DFTRAP_PUSH
 684  501  
 685  502          /*
 686  503           * freeze trap trace.
 687  504           */
 688  505  #ifdef TRAPTRACE
 689  506          leaq    trap_trace_freeze(%rip), %r11
 690  507          incl    (%r11)
 691  508  #endif
  
    | ↓ open down ↓ | 32 lines elided | ↑ open up ↑ | 
 692  509  
 693  510          ENABLE_INTR_FLAGS
 694  511  
 695  512          movq    %rsp, %rdi      /* ®s */
 696  513          xorl    %esi, %esi      /* clear address */
 697  514          xorl    %edx, %edx      /* cpuid = 0 */
 698  515          call    trap
 699  516  
 700  517          SET_SIZE(syserrtrap)
 701  518  
 702      -#elif defined(__i386)
 703      -
 704      -        /*
 705      -         * #DF
 706      -         */
 707      -        ENTRY_NP(syserrtrap)
 708      -        cli                             /* disable interrupts */
 709      -
 710      -        /*
 711      -         * We share this handler with kmdb (if kmdb is loaded).  As such, we
 712      -         * may have reached this point after encountering a #df in kmdb.  If
 713      -         * that happens, we'll still be on kmdb's IDT.  We need to switch back
 714      -         * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 715      -         * here from kmdb, kmdb is probably in a very sickly state, and
 716      -         * shouldn't be entered from the panic flow.  We'll suppress that
 717      -         * entry by setting nopanicdebug.
 718      -         */
 719      -
 720      -        subl    $DESCTBR_SIZE, %esp
 721      -        movl    %gs:CPU_IDT, %eax
 722      -        sidt    (%esp)
 723      -        cmpl    DTR_BASE(%esp), %eax
 724      -        je      1f
 725      -
 726      -        movl    %eax, DTR_BASE(%esp)
 727      -        movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
 728      -        lidt    (%esp)
 729      -
 730      -        movl    $1, nopanicdebug
 731      -
 732      -1:      addl    $DESCTBR_SIZE, %esp
 733      -
 734      -        /*
 735      -         * Check the CPL in the TSS to see what mode
 736      -         * (user or kernel) we took the fault in.  At this
 737      -         * point we are running in the context of the double
 738      -         * fault task (dftss) but the CPU's task points to
 739      -         * the previous task (ktss) where the process context
 740      -         * has been saved as the result of the task switch.
 741      -         */
 742      -        movl    %gs:CPU_TSS, %eax       /* get the TSS */
 743      -        movl    TSS_SS(%eax), %ebx      /* save the fault SS */
 744      -        movl    TSS_ESP(%eax), %edx     /* save the fault ESP */
 745      -        testw   $CPL_MASK, TSS_CS(%eax) /* user mode ? */
 746      -        jz      make_frame
 747      -        movw    TSS_SS0(%eax), %ss      /* get on the kernel stack */
 748      -        movl    TSS_ESP0(%eax), %esp
 749      -
 750      -        /*
 751      -         * Clear the NT flag to avoid a task switch when the process
 752      -         * finally pops the EFL off the stack via an iret.  Clear
 753      -         * the TF flag since that is what the processor does for
 754      -         * a normal exception. Clear the IE flag so that interrupts
 755      -         * remain disabled.
 756      -         */
 757      -        movl    TSS_EFL(%eax), %ecx
 758      -        andl    $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
 759      -        pushl   %ecx
 760      -        popfl                           /* restore the EFL */
 761      -        movw    TSS_LDT(%eax), %cx      /* restore the LDT */
 762      -        lldt    %cx
 763      -
 764      -        /*
 765      -         * Restore process segment selectors.
 766      -         */
 767      -        movw    TSS_DS(%eax), %ds
 768      -        movw    TSS_ES(%eax), %es
 769      -        movw    TSS_FS(%eax), %fs
 770      -        movw    TSS_GS(%eax), %gs
 771      -
 772      -        /*
 773      -         * Restore task segment selectors.
 774      -         */
 775      -        movl    $KDS_SEL, TSS_DS(%eax)
 776      -        movl    $KDS_SEL, TSS_ES(%eax)
 777      -        movl    $KDS_SEL, TSS_SS(%eax)
 778      -        movl    $KFS_SEL, TSS_FS(%eax)
 779      -        movl    $KGS_SEL, TSS_GS(%eax)
 780      -
 781      -        /*
 782      -         * Clear the TS bit, the busy bits in both task
 783      -         * descriptors, and switch tasks.
 784      -         */
 785      -        clts
 786      -        leal    gdt0, %ecx
 787      -        movl    DFTSS_SEL+4(%ecx), %esi
 788      -        andl    $_BITNOT(0x200), %esi
 789      -        movl    %esi, DFTSS_SEL+4(%ecx)
 790      -        movl    KTSS_SEL+4(%ecx), %esi
 791      -        andl    $_BITNOT(0x200), %esi
 792      -        movl    %esi, KTSS_SEL+4(%ecx)
 793      -        movw    $KTSS_SEL, %cx
 794      -        ltr     %cx
 795      -
 796      -        /*
 797      -         * Restore part of the process registers.
 798      -         */
 799      -        movl    TSS_EBP(%eax), %ebp
 800      -        movl    TSS_ECX(%eax), %ecx
 801      -        movl    TSS_ESI(%eax), %esi
 802      -        movl    TSS_EDI(%eax), %edi
 803      -
 804      -make_frame:
 805      -        /*
 806      -         * Make a trap frame.  Leave the error code (0) on
 807      -         * the stack since the first word on a trap stack is
 808      -         * unused anyway.
 809      -         */
 810      -        pushl   %ebx                    / fault SS
 811      -        pushl   %edx                    / fault ESP
 812      -        pushl   TSS_EFL(%eax)           / fault EFL
 813      -        pushl   TSS_CS(%eax)            / fault CS
 814      -        pushl   TSS_EIP(%eax)           / fault EIP
 815      -        pushl   $0                      / error code
 816      -        pushl   $T_DBLFLT               / trap number 8
 817      -        movl    TSS_EBX(%eax), %ebx     / restore EBX
 818      -        movl    TSS_EDX(%eax), %edx     / restore EDX
 819      -        movl    TSS_EAX(%eax), %eax     / restore EAX
 820      -        sti                             / enable interrupts
 821      -        jmp     cmntrap
 822      -        SET_SIZE(syserrtrap)
 823      -
 824      -#endif  /* __i386 */
 825  519  #endif  /* !__xpv */
 826  520  
 827  521          /*
 828  522           * #TS
 829  523           */
 830  524          ENTRY_NP(invtsstrap)
 831  525          TRAP_ERR(T_TSSFLT)      /* $10 already have error code on stack */
 832  526          jmp     cmntrap
 833  527          SET_SIZE(invtsstrap)
 834  528  
 835  529          /*
 836  530           * #NP
 837  531           */
 838  532          ENTRY_NP(segnptrap)
 839  533          TRAP_ERR(T_SEGFLT)      /* $11 already have error code on stack */
 840      -#if defined(__amd64)
 841  534          SET_CPU_GSBASE
 842      -#endif
 843  535          jmp     cmntrap
 844  536          SET_SIZE(segnptrap)
 845  537  
 846  538          /*
 847  539           * #SS
 848  540           */
 849  541          ENTRY_NP(stktrap)
 850  542          TRAP_ERR(T_STKFLT)      /* $12 already have error code on stack */
 851      -#if defined(__amd64)
 852  543          SET_CPU_GSBASE
 853      -#endif
 854  544          jmp     cmntrap
 855  545          SET_SIZE(stktrap)
 856  546  
 857  547          /*
 858  548           * #GP
 859  549           */
 860  550          ENTRY_NP(gptrap)
 861  551          TRAP_ERR(T_GPFLT)       /* $13 already have error code on stack */
 862      -#if defined(__amd64)
 863  552          SET_CPU_GSBASE
 864      -#endif
 865  553          jmp     cmntrap
 866  554          SET_SIZE(gptrap)
 867  555  
 868  556          /*
 869  557           * #PF
 870  558           */
 871  559          ENTRY_NP(pftrap)
 872  560          TRAP_ERR(T_PGFLT)       /* $14 already have error code on stack */
 873  561          INTR_PUSH
 874  562  #if defined(__xpv)
 875  563  
 876      -#if defined(__amd64)
 877  564          movq    %gs:CPU_VCPU_INFO, %r15
 878  565          movq    VCPU_INFO_ARCH_CR2(%r15), %r15  /* vcpu[].arch.cr2 */
 879      -#elif defined(__i386)
 880      -        movl    %gs:CPU_VCPU_INFO, %esi
 881      -        movl    VCPU_INFO_ARCH_CR2(%esi), %esi  /* vcpu[].arch.cr2 */
 882      -#endif  /* __i386 */
 883  566  
 884  567  #else   /* __xpv */
 885  568  
 886      -#if defined(__amd64)
 887  569          movq    %cr2, %r15
 888      -#elif defined(__i386)
 889      -        movl    %cr2, %esi
 890      -#endif  /* __i386 */
 891  570  
 892  571  #endif  /* __xpv */
 893  572          jmp     cmntrap_pushed
 894  573          SET_SIZE(pftrap)
 895  574  
 896      -#if !defined(__amd64)
 897      -
 898      -        .globl  idt0_default_r
 899      -
 900      -        /*
 901      -         * #PF pentium bug workaround
 902      -         */
 903      -        ENTRY_NP(pentium_pftrap)
 904      -        pushl   %eax
 905      -        movl    %cr2, %eax
 906      -        andl    $MMU_STD_PAGEMASK, %eax
 907      -
 908      -        cmpl    %eax, %cs:idt0_default_r+2      /* fixme */
 909      -
 910      -        je      check_for_user_address
 911      -user_mode:
 912      -        popl    %eax
 913      -        pushl   $T_PGFLT        /* $14 */
 914      -        jmp     cmntrap
 915      -check_for_user_address:
 916      -        /*
 917      -         * Before we assume that we have an unmapped trap on our hands,
 918      -         * check to see if this is a fault from user mode.  If it is,
 919      -         * we'll kick back into the page fault handler.
 920      -         */
 921      -        movl    4(%esp), %eax   /* error code */
 922      -        andl    $PF_ERR_USER, %eax
 923      -        jnz     user_mode
 924      -
 925      -        /*
 926      -         * We now know that this is the invalid opcode trap.
 927      -         */
 928      -        popl    %eax
 929      -        addl    $4, %esp        /* pop error code */
 930      -        jmp     invoptrap
 931      -        SET_SIZE(pentium_pftrap)
 932      -
 933      -#endif  /* !__amd64 */
 934      -
 935  575          ENTRY_NP(resvtrap)
 936  576          TRAP_NOERR(T_RESVTRAP)  /* (reserved)  */
 937  577          jmp     cmntrap
 938  578          SET_SIZE(resvtrap)
 939  579  
 940  580          /*
 941  581           * #MF
 942  582           */
 943  583          ENTRY_NP(ndperr)
 944  584          TRAP_NOERR(T_EXTERRFLT) /* $16 */
 945  585          jmp     cmninttrap
 946  586          SET_SIZE(ndperr)
 947  587  
 948  588          /*
 949  589           * #AC
 950  590           */
  
    | ↓ open down ↓ | 6 lines elided | ↑ open up ↑ | 
 951  591          ENTRY_NP(achktrap)
 952  592          TRAP_ERR(T_ALIGNMENT)   /* $17 */
 953  593          jmp     cmntrap
 954  594          SET_SIZE(achktrap)
 955  595  
 956  596          /*
 957  597           * #MC
 958  598           */
 959  599          .globl  cmi_mca_trap    /* see uts/i86pc/os/cmi.c */
 960  600  
 961      -#if defined(__amd64)
 962      -
 963  601          ENTRY_NP(mcetrap)
 964  602          TRAP_NOERR(T_MCE)       /* $18 */
 965  603  
 966  604          SET_CPU_GSBASE
 967  605  
 968  606          INTR_PUSH
 969  607          INTGATE_INIT_KERNEL_FLAGS
 970  608  
 971  609          TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
 972  610          TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
 973  611          TRACE_STAMP(%rdi)
 974  612  
 975  613          movq    %rsp, %rbp
 976  614  
 977  615          movq    %rsp, %rdi      /* arg0 = struct regs *rp */
 978  616          call    cmi_mca_trap    /* cmi_mca_trap(rp); */
 979  617  
 980  618          jmp     _sys_rtt
 981  619          SET_SIZE(mcetrap)
 982  620  
 983      -#else
 984      -
 985      -        ENTRY_NP(mcetrap)
 986      -        TRAP_NOERR(T_MCE)       /* $18 */
 987      -
 988      -        INTR_PUSH
 989      -        INTGATE_INIT_KERNEL_FLAGS
 990      -
 991      -        TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 992      -        TRACE_REGS(%edi, %esp, %ebx, %ecx)
 993      -        TRACE_STAMP(%edi)
 994      -
 995      -        movl    %esp, %ebp
 996      -
 997      -        movl    %esp, %ecx
 998      -        pushl   %ecx            /* arg0 = struct regs *rp */
 999      -        call    cmi_mca_trap    /* cmi_mca_trap(rp) */
1000      -        addl    $4, %esp        /* pop arg0 */
1001      -
1002      -        jmp     _sys_rtt
1003      -        SET_SIZE(mcetrap)
1004      -
1005      -#endif
1006      -
1007  621          /*
1008  622           * #XF
1009  623           */
1010  624          ENTRY_NP(xmtrap)
1011  625          TRAP_NOERR(T_SIMDFPE)   /* $19 */
1012  626          jmp     cmninttrap
1013  627          SET_SIZE(xmtrap)
1014  628  
1015  629          ENTRY_NP(invaltrap)
1016  630          TRAP_NOERR(T_INVALTRAP) /* very invalid */
1017  631          jmp     cmntrap
1018  632          SET_SIZE(invaltrap)
1019  633  
1020  634          .globl  fasttable
1021  635  
1022      -#if defined(__amd64)
1023      -
1024  636          ENTRY_NP(fasttrap)
1025  637          cmpl    $T_LASTFAST, %eax
1026  638          ja      1f
1027  639          orl     %eax, %eax      /* (zero extend top 32-bits) */
1028  640          leaq    fasttable(%rip), %r11
1029  641          leaq    (%r11, %rax, CLONGSIZE), %r11
1030  642          movq    (%r11), %r11
1031  643          INDIRECT_JMP_REG(r11)
1032  644  1:
1033  645          /*
1034  646           * Fast syscall number was illegal.  Make it look
1035  647           * as if the INT failed.  Modify %rip to point before the
1036  648           * INT, push the expected error code and fake a GP fault.
1037  649           *
1038  650           * XXX Why make the error code be offset into idt + 1?
1039  651           * Instead we should push a real (soft?) error code
1040  652           * on the stack and #gp handler could know about fasttraps?
1041  653           */
1042  654          XPV_TRAP_POP
1043  655  
  
    | ↓ open down ↓ | 10 lines elided | ↑ open up ↑ | 
1044  656          subq    $2, (%rsp)      /* XXX int insn 2-bytes */
1045  657          pushq   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1046  658  
1047  659  #if defined(__xpv)
1048  660          pushq   %r11
1049  661          pushq   %rcx
1050  662  #endif
1051  663          jmp     gptrap
1052  664          SET_SIZE(fasttrap)
1053  665  
1054      -#elif defined(__i386)
1055      -
1056      -        ENTRY_NP(fasttrap)
1057      -        cmpl    $T_LASTFAST, %eax
1058      -        ja      1f
1059      -        jmp     *%cs:fasttable(, %eax, CLONGSIZE)
1060      -1:
1061      -        /*
1062      -         * Fast syscall number was illegal.  Make it look
1063      -         * as if the INT failed.  Modify %eip to point before the
1064      -         * INT, push the expected error code and fake a GP fault.
1065      -         *
1066      -         * XXX Why make the error code be offset into idt + 1?
1067      -         * Instead we should push a real (soft?) error code
1068      -         * on the stack and #gp handler could know about fasttraps?
1069      -         */
1070      -        subl    $2, (%esp)      /* XXX int insn 2-bytes */
1071      -        pushl   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1072      -        jmp     gptrap
1073      -        SET_SIZE(fasttrap)
1074      -
1075      -#endif  /* __i386 */
1076      -
1077  666          ENTRY_NP(dtrace_ret)
1078  667          TRAP_NOERR(T_DTRACE_RET)
1079  668          jmp     dtrace_trap
1080  669          SET_SIZE(dtrace_ret)
1081  670  
1082      -#if defined(__amd64)
1083      -
1084  671          /*
1085  672           * RFLAGS 24 bytes up the stack from %rsp.
1086  673           * XXX a constant would be nicer.
1087  674           */
1088  675          ENTRY_NP(fast_null)
1089  676          XPV_TRAP_POP
1090  677          orq     $PS_C, 24(%rsp) /* set carry bit in user flags */
1091  678          call    x86_md_clear
1092  679          jmp     tr_iret_auto
1093  680          /*NOTREACHED*/
1094  681          SET_SIZE(fast_null)
1095  682  
1096      -#elif defined(__i386)
1097      -
1098      -        ENTRY_NP(fast_null)
1099      -        orw     $PS_C, 8(%esp)  /* set carry bit in user flags */
1100      -        IRET
1101      -        SET_SIZE(fast_null)
1102      -
1103      -#endif  /* __i386 */
1104      -
1105  683          /*
1106  684           * Interrupts start at 32
1107  685           */
1108  686  #define MKIVCT(n)                       \
1109  687          ENTRY_NP(ivct/**/n)             \
1110  688          push    $0;                     \
1111  689          push    $n - 0x20;              \
1112  690          jmp     cmnint;                 \
1113  691          SET_SIZE(ivct/**/n)
1114  692  
1115  693          MKIVCT(32)
1116  694          MKIVCT(33)
1117  695          MKIVCT(34)
1118  696          MKIVCT(35)
1119  697          MKIVCT(36)
1120  698          MKIVCT(37)
1121  699          MKIVCT(38)
1122  700          MKIVCT(39)
1123  701          MKIVCT(40)
1124  702          MKIVCT(41)
1125  703          MKIVCT(42)
1126  704          MKIVCT(43)
1127  705          MKIVCT(44)
1128  706          MKIVCT(45)
1129  707          MKIVCT(46)
1130  708          MKIVCT(47)
1131  709          MKIVCT(48)
1132  710          MKIVCT(49)
1133  711          MKIVCT(50)
1134  712          MKIVCT(51)
1135  713          MKIVCT(52)
1136  714          MKIVCT(53)
1137  715          MKIVCT(54)
1138  716          MKIVCT(55)
1139  717          MKIVCT(56)
1140  718          MKIVCT(57)
1141  719          MKIVCT(58)
1142  720          MKIVCT(59)
1143  721          MKIVCT(60)
1144  722          MKIVCT(61)
1145  723          MKIVCT(62)
1146  724          MKIVCT(63)
1147  725          MKIVCT(64)
1148  726          MKIVCT(65)
1149  727          MKIVCT(66)
1150  728          MKIVCT(67)
1151  729          MKIVCT(68)
1152  730          MKIVCT(69)
1153  731          MKIVCT(70)
1154  732          MKIVCT(71)
1155  733          MKIVCT(72)
1156  734          MKIVCT(73)
1157  735          MKIVCT(74)
1158  736          MKIVCT(75)
1159  737          MKIVCT(76)
1160  738          MKIVCT(77)
1161  739          MKIVCT(78)
1162  740          MKIVCT(79)
1163  741          MKIVCT(80)
1164  742          MKIVCT(81)
1165  743          MKIVCT(82)
1166  744          MKIVCT(83)
1167  745          MKIVCT(84)
1168  746          MKIVCT(85)
1169  747          MKIVCT(86)
1170  748          MKIVCT(87)
1171  749          MKIVCT(88)
1172  750          MKIVCT(89)
1173  751          MKIVCT(90)
1174  752          MKIVCT(91)
1175  753          MKIVCT(92)
1176  754          MKIVCT(93)
1177  755          MKIVCT(94)
1178  756          MKIVCT(95)
1179  757          MKIVCT(96)
1180  758          MKIVCT(97)
1181  759          MKIVCT(98)
1182  760          MKIVCT(99)
1183  761          MKIVCT(100)
1184  762          MKIVCT(101)
1185  763          MKIVCT(102)
1186  764          MKIVCT(103)
1187  765          MKIVCT(104)
1188  766          MKIVCT(105)
1189  767          MKIVCT(106)
1190  768          MKIVCT(107)
1191  769          MKIVCT(108)
1192  770          MKIVCT(109)
1193  771          MKIVCT(110)
1194  772          MKIVCT(111)
1195  773          MKIVCT(112)
1196  774          MKIVCT(113)
1197  775          MKIVCT(114)
1198  776          MKIVCT(115)
1199  777          MKIVCT(116)
1200  778          MKIVCT(117)
1201  779          MKIVCT(118)
1202  780          MKIVCT(119)
1203  781          MKIVCT(120)
1204  782          MKIVCT(121)
1205  783          MKIVCT(122)
1206  784          MKIVCT(123)
1207  785          MKIVCT(124)
1208  786          MKIVCT(125)
1209  787          MKIVCT(126)
1210  788          MKIVCT(127)
1211  789          MKIVCT(128)
1212  790          MKIVCT(129)
1213  791          MKIVCT(130)
1214  792          MKIVCT(131)
1215  793          MKIVCT(132)
1216  794          MKIVCT(133)
1217  795          MKIVCT(134)
1218  796          MKIVCT(135)
1219  797          MKIVCT(136)
1220  798          MKIVCT(137)
1221  799          MKIVCT(138)
1222  800          MKIVCT(139)
1223  801          MKIVCT(140)
1224  802          MKIVCT(141)
1225  803          MKIVCT(142)
1226  804          MKIVCT(143)
1227  805          MKIVCT(144)
1228  806          MKIVCT(145)
1229  807          MKIVCT(146)
1230  808          MKIVCT(147)
1231  809          MKIVCT(148)
1232  810          MKIVCT(149)
1233  811          MKIVCT(150)
1234  812          MKIVCT(151)
1235  813          MKIVCT(152)
1236  814          MKIVCT(153)
1237  815          MKIVCT(154)
1238  816          MKIVCT(155)
1239  817          MKIVCT(156)
1240  818          MKIVCT(157)
1241  819          MKIVCT(158)
1242  820          MKIVCT(159)
1243  821          MKIVCT(160)
1244  822          MKIVCT(161)
1245  823          MKIVCT(162)
1246  824          MKIVCT(163)
1247  825          MKIVCT(164)
1248  826          MKIVCT(165)
1249  827          MKIVCT(166)
1250  828          MKIVCT(167)
1251  829          MKIVCT(168)
1252  830          MKIVCT(169)
1253  831          MKIVCT(170)
1254  832          MKIVCT(171)
1255  833          MKIVCT(172)
1256  834          MKIVCT(173)
1257  835          MKIVCT(174)
1258  836          MKIVCT(175)
1259  837          MKIVCT(176)
1260  838          MKIVCT(177)
1261  839          MKIVCT(178)
1262  840          MKIVCT(179)
1263  841          MKIVCT(180)
1264  842          MKIVCT(181)
1265  843          MKIVCT(182)
1266  844          MKIVCT(183)
1267  845          MKIVCT(184)
1268  846          MKIVCT(185)
1269  847          MKIVCT(186)
1270  848          MKIVCT(187)
1271  849          MKIVCT(188)
1272  850          MKIVCT(189)
1273  851          MKIVCT(190)
1274  852          MKIVCT(191)
1275  853          MKIVCT(192)
1276  854          MKIVCT(193)
1277  855          MKIVCT(194)
1278  856          MKIVCT(195)
1279  857          MKIVCT(196)
1280  858          MKIVCT(197)
1281  859          MKIVCT(198)
1282  860          MKIVCT(199)
1283  861          MKIVCT(200)
1284  862          MKIVCT(201)
1285  863          MKIVCT(202)
1286  864          MKIVCT(203)
1287  865          MKIVCT(204)
1288  866          MKIVCT(205)
1289  867          MKIVCT(206)
1290  868          MKIVCT(207)
1291  869          MKIVCT(208)
1292  870          MKIVCT(209)
1293  871          MKIVCT(210)
1294  872          MKIVCT(211)
1295  873          MKIVCT(212)
1296  874          MKIVCT(213)
1297  875          MKIVCT(214)
1298  876          MKIVCT(215)
1299  877          MKIVCT(216)
1300  878          MKIVCT(217)
1301  879          MKIVCT(218)
1302  880          MKIVCT(219)
1303  881          MKIVCT(220)
1304  882          MKIVCT(221)
1305  883          MKIVCT(222)
1306  884          MKIVCT(223)
1307  885          MKIVCT(224)
1308  886          MKIVCT(225)
1309  887          MKIVCT(226)
1310  888          MKIVCT(227)
1311  889          MKIVCT(228)
1312  890          MKIVCT(229)
1313  891          MKIVCT(230)
1314  892          MKIVCT(231)
1315  893          MKIVCT(232)
1316  894          MKIVCT(233)
1317  895          MKIVCT(234)
1318  896          MKIVCT(235)
1319  897          MKIVCT(236)
1320  898          MKIVCT(237)
1321  899          MKIVCT(238)
1322  900          MKIVCT(239)
1323  901          MKIVCT(240)
1324  902          MKIVCT(241)
1325  903          MKIVCT(242)
1326  904          MKIVCT(243)
1327  905          MKIVCT(244)
1328  906          MKIVCT(245)
1329  907          MKIVCT(246)
  
    | ↓ open down ↓ | 215 lines elided | ↑ open up ↑ | 
1330  908          MKIVCT(247)
1331  909          MKIVCT(248)
1332  910          MKIVCT(249)
1333  911          MKIVCT(250)
1334  912          MKIVCT(251)
1335  913          MKIVCT(252)
1336  914          MKIVCT(253)
1337  915          MKIVCT(254)
1338  916          MKIVCT(255)
1339  917  
1340      -#endif  /* __lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX