Print this page
de-linting of .s files
first

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/exception.s
          +++ new/usr/src/uts/intel/ia32/ml/exception.s
↓ open down ↓ 43 lines elided ↑ open up ↑
  44   44  #include <sys/asm_misc.h>
  45   45  #include <sys/trap.h>
  46   46  #include <sys/psw.h>
  47   47  #include <sys/regset.h>
  48   48  #include <sys/privregs.h>
  49   49  #include <sys/dtrace.h>
  50   50  #include <sys/x86_archext.h>
  51   51  #include <sys/traptrace.h>
  52   52  #include <sys/machparam.h>
  53   53  
  54      -#if !defined(__lint)
  55      -
  56   54  #include "assym.h"
  57   55  
  58   56  /*
  59   57   * push $0 on stack for traps that do not
  60   58   * generate an error code. This is so the rest
  61   59   * of the kernel can expect a consistent stack
  62   60   * from from any exception.
  63   61   *
  64   62   * Note that for all exceptions for amd64
  65   63   * %r11 and %rcx are on the stack. Just pop
  66   64   * them back into their appropriate registers and let
  67   65   * it get saved as is running native.
  68   66   */
  69   67  
  70      -#if defined(__xpv) && defined(__amd64)
       68 +#if defined(__xpv)
  71   69  
  72   70  #define NPTRAP_NOERR(trapno)    \
  73   71          pushq   $0;             \
  74   72          pushq   $trapno
  75   73  
  76   74  #define TRAP_NOERR(trapno)      \
  77   75          XPV_TRAP_POP;           \
  78   76          NPTRAP_NOERR(trapno)
  79   77  
  80   78  /*
  81   79   * error code already pushed by hw
  82   80   * onto stack.
  83   81   */
  84   82  #define TRAP_ERR(trapno)        \
  85   83          XPV_TRAP_POP;           \
  86   84          pushq   $trapno
  87   85  
  88      -#else /* __xpv && __amd64 */
       86 +#else /* __xpv */
  89   87  
  90   88  #define TRAP_NOERR(trapno)      \
  91   89          push    $0;             \
  92   90          push    $trapno
  93   91  
  94   92  #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
  95   93  
  96   94  /*
  97   95   * error code already pushed by hw
  98   96   * onto stack.
  99   97   */
 100   98  #define TRAP_ERR(trapno)        \
 101   99          push    $trapno
 102  100  
 103      -#endif  /* __xpv && __amd64 */
      101 +#endif  /* __xpv */
 104  102  
 105  103          /*
 106  104           * These are the stacks used on cpu0 for taking double faults,
 107      -         * NMIs and MCEs (the latter two only on amd64 where we have IST).
      105 +         * NMIs and MCEs.
 108  106           *
 109  107           * We define them here instead of in a C file so that we can page-align
 110  108           * them (gcc won't do that in a .c file).
 111  109           */
 112  110          .data
 113  111          DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 114  112          .fill   DEFAULTSTKSZ, 1, 0
 115  113          DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 116  114          .fill   DEFAULTSTKSZ, 1, 0
 117  115          DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
↓ open down ↓ 9 lines elided ↑ open up ↑
 127  125  
 128  126          /*
 129  127           * #DB
 130  128           *
 131  129           * Fetch %dr6 and clear it, handing off the value to the
 132  130           * cmntrap code in %r15/%esi
 133  131           */
 134  132          ENTRY_NP(dbgtrap)
 135  133          TRAP_NOERR(T_SGLSTP)    /* $1 */
 136  134  
 137      -#if defined(__amd64)
 138  135  #if !defined(__xpv)             /* no sysenter support yet */
 139  136          /*
 140  137           * If we get here as a result of single-stepping a sysenter
 141  138           * instruction, we suddenly find ourselves taking a #db
 142  139           * in kernel mode -before- we've swapgs'ed.  So before we can
 143  140           * take the trap, we do the swapgs here, and fix the return
 144  141           * %rip in trap() so that we return immediately after the
 145  142           * swapgs in the sysenter handler to avoid doing the swapgs again.
 146  143           *
 147  144           * Nobody said that the design of sysenter was particularly
↓ open down ↓ 38 lines elided ↑ open up ↑
 186  183          movq    %rax, %r15              /* %db6 -> %r15 */
 187  184          movl    $6, %edi
 188  185          movl    $0, %esi
 189  186          call    kdi_dreg_set            /* 0 -> %db6 */
 190  187  #else
 191  188          movq    %db6, %r15
 192  189          xorl    %eax, %eax
 193  190          movq    %rax, %db6
 194  191  #endif
 195  192  
 196      -#elif defined(__i386)
 197      -
 198      -        INTR_PUSH
 199      -#if defined(__xpv)
 200      -        pushl   $6
 201      -        call    kdi_dreg_get
 202      -        addl    $4, %esp
 203      -        movl    %eax, %esi              /* %dr6 -> %esi */
 204      -        pushl   $0
 205      -        pushl   $6
 206      -        call    kdi_dreg_set            /* 0 -> %dr6 */
 207      -        addl    $8, %esp
 208      -#else
 209      -        movl    %db6, %esi
 210      -        xorl    %eax, %eax
 211      -        movl    %eax, %db6
 212      -#endif
 213      -#endif  /* __i386 */
 214      -
 215  193          jmp     cmntrap_pushed
 216  194          SET_SIZE(dbgtrap)
 217  195  
 218      -#if defined(__amd64)
 219  196  #if !defined(__xpv)
 220  197  
 221  198  /*
 222  199   * Macro to set the gsbase or kgsbase to the address of the struct cpu
 223  200   * for this processor.  If we came from userland, set kgsbase else
 224  201   * set gsbase.  We find the proper cpu struct by looping through
 225  202   * the cpu structs for all processors till we find a match for the gdt
 226  203   * of the trapping processor.  The stack is expected to be pointing at
 227  204   * the standard regs pushed by hardware on a trap (plus error code and trapno).
 228  205   *
↓ open down ↓ 41 lines elided ↑ open up ↑
 270  247          movq    REGOFF_RAX(%rbp), %rax;                                 \
 271  248          movq    %rbp, %rsp;                                             \
 272  249          movq    REGOFF_RBP(%rsp), %rbp;                                 \
 273  250          addq    $REGOFF_TRAPNO, %rsp    /* pop stack */
 274  251  
 275  252  #else   /* __xpv */
 276  253  
 277  254  #define SET_CPU_GSBASE  /* noop on the hypervisor */
 278  255  
 279  256  #endif  /* __xpv */
 280      -#endif  /* __amd64 */
 281  257  
 282  258  
 283      -#if defined(__amd64)
 284      -
 285  259          /*
 286  260           * #NMI
 287  261           *
 288  262           * XXPV: See 6532669.
 289  263           */
 290  264          ENTRY_NP(nmiint)
 291  265          TRAP_NOERR(T_NMIFLT)    /* $2 */
 292  266  
 293  267          SET_CPU_GSBASE
 294  268  
↓ open down ↓ 12 lines elided ↑ open up ↑
 307  281  
 308  282          movq    %rbp, %rdi
 309  283          call    av_dispatch_nmivect
 310  284  
 311  285          INTR_POP
 312  286          call    x86_md_clear
 313  287          jmp     tr_iret_auto
 314  288          /*NOTREACHED*/
 315  289          SET_SIZE(nmiint)
 316  290  
 317      -#elif defined(__i386)
 318      -
 319  291          /*
 320      -         * #NMI
 321      -         */
 322      -        ENTRY_NP(nmiint)
 323      -        TRAP_NOERR(T_NMIFLT)    /* $2 */
 324      -
 325      -        /*
 326      -         * Save all registers and setup segment registers
 327      -         * with kernel selectors.
 328      -         */
 329      -        INTR_PUSH
 330      -        INTGATE_INIT_KERNEL_FLAGS
 331      -
 332      -        TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 333      -        TRACE_REGS(%edi, %esp, %ebx, %ecx)
 334      -        TRACE_STAMP(%edi)
 335      -
 336      -        movl    %esp, %ebp
 337      -
 338      -        pushl   %ebp
 339      -        call    av_dispatch_nmivect
 340      -        addl    $4, %esp
 341      -
 342      -        INTR_POP_USER
 343      -        IRET
 344      -        SET_SIZE(nmiint)
 345      -
 346      -#endif  /* __i386 */
 347      -
 348      -        /*
 349  292           * #BP
 350  293           */
 351  294          ENTRY_NP(brktrap)
 352      -
 353      -#if defined(__amd64)
 354  295          XPV_TRAP_POP
 355  296          cmpw    $KCS_SEL, 8(%rsp)
 356  297          jne     bp_user
 357  298  
 358  299          /*
 359  300           * This is a breakpoint in the kernel -- it is very likely that this
 360  301           * is DTrace-induced.  To unify DTrace handling, we spoof this as an
 361  302           * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
 362  303           * we must decrement the trapping %rip to make it appear as a fault.
 363  304           * We then push a non-zero error code to indicate that this is coming
 364  305           * from #BP.
 365  306           */
 366  307          decq    (%rsp)
 367  308          push    $1                      /* error code -- non-zero for #BP */
 368  309          jmp     ud_kernel
 369  310  
 370  311  bp_user:
 371      -#endif /* __amd64 */
 372  312  
 373  313          NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 374  314          jmp     dtrace_trap
 375  315  
 376  316          SET_SIZE(brktrap)
 377  317  
 378  318          /*
 379  319           * #OF
 380  320           */
 381  321          ENTRY_NP(ovflotrap)
↓ open down ↓ 2 lines elided ↑ open up ↑
 384  324          SET_SIZE(ovflotrap)
 385  325  
 386  326          /*
 387  327           * #BR
 388  328           */
 389  329          ENTRY_NP(boundstrap)
 390  330          TRAP_NOERR(T_BOUNDFLT)  /* $5 */
 391  331          jmp     cmntrap
 392  332          SET_SIZE(boundstrap)
 393  333  
 394      -#if defined(__amd64)
 395      -
 396  334          ENTRY_NP(invoptrap)
 397  335  
 398  336          XPV_TRAP_POP
 399  337  
 400  338          cmpw    $KCS_SEL, 8(%rsp)
 401  339          jne     ud_user
 402  340  
 403  341  #if defined(__xpv)
 404  342          movb    $0, 12(%rsp)            /* clear saved upcall_mask from %cs */
 405  343  #endif
↓ open down ↓ 41 lines elided ↑ open up ↑
 447  385          movq    56(%rsp), %rax          /* load calling SS */
 448  386          movq    %rax, 40(%rsp)          /* store calling SS */
 449  387          movq    32(%rsp), %rax          /* reload calling RSP */
 450  388          movq    %rbp, (%rax)            /* store %rbp there */
 451  389          popq    %rax                    /* pop off temp */
 452  390          jmp     tr_iret_kernel          /* return from interrupt */
 453  391          /*NOTREACHED*/
 454  392  
 455  393  ud_leave:
 456  394          /*
 457      -         * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
 458      -         * followed by a "popq %rbp".  This is quite a bit simpler on amd64
 459      -         * than it is on i386 -- we can exploit the fact that the %rsp is
 460      -         * explicitly saved to effect the pop without having to reshuffle
 461      -         * the other data pushed for the trap.
      395 +         * We must emulate a "leave", which is the same as a "movq %rbp,
      396 +         * %rsp" followed by a "popq %rbp".  We can exploit the fact
      397 +         * that the %rsp is explicitly saved to effect the pop without
      398 +         * having to reshuffle the other data pushed for the trap.
 462  399           */
      400 +
 463  401          INTR_POP
 464  402          pushq   %rax                    /* push temp */
 465  403          movq    8(%rsp), %rax           /* load calling RIP */
 466  404          addq    $1, %rax                /* increment over trapping instr */
 467  405          movq    %rax, 8(%rsp)           /* store calling RIP */
 468  406          movq    (%rbp), %rax            /* get new %rbp */
 469  407          addq    $8, %rbp                /* adjust new %rsp */
 470  408          movq    %rbp, 32(%rsp)          /* store new %rsp */
 471  409          movq    %rax, %rbp              /* set new %rbp */
 472  410          popq    %rax                    /* pop off temp */
↓ open down ↓ 35 lines elided ↑ open up ↑
 508  446          NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 509  447          jmp     cmntrap
 510  448  
 511  449  ud_ud:
 512  450          addq    $REGOFF_RIP, %rsp
 513  451  ud_user:
 514  452          NPTRAP_NOERR(T_ILLINST)
 515  453          jmp     cmntrap
 516  454          SET_SIZE(invoptrap)
 517  455  
 518      -#elif defined(__i386)
 519      -
 520  456          /*
 521      -         * #UD
 522      -         */
 523      -        ENTRY_NP(invoptrap)
 524      -        /*
 525      -         * If we are taking an invalid opcode trap while in the kernel, this
 526      -         * is likely an FBT probe point.
 527      -         */
 528      -        pushl   %gs
 529      -        cmpw    $KGS_SEL, (%esp)
 530      -        jne     8f
 531      -
 532      -        addl    $4, %esp
 533      -#if defined(__xpv)
 534      -        movb    $0, 6(%esp)             /* clear saved upcall_mask from %cs */
 535      -#endif  /* __xpv */
 536      -        pusha
 537      -        pushl   %eax                    /* push %eax -- may be return value */
 538      -        pushl   %esp                    /* push stack pointer */
 539      -        addl    $48, (%esp)             /* adjust to incoming args */
 540      -        pushl   40(%esp)                /* push calling EIP */
 541      -        call    dtrace_invop
 542      -        ALTENTRY(dtrace_invop_callsite)
 543      -        addl    $12, %esp
 544      -        cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 545      -        je      1f
 546      -        cmpl    $DTRACE_INVOP_POPL_EBP, %eax
 547      -        je      2f
 548      -        cmpl    $DTRACE_INVOP_LEAVE, %eax
 549      -        je      3f
 550      -        cmpl    $DTRACE_INVOP_NOP, %eax
 551      -        je      4f
 552      -        jmp     7f
 553      -1:
 554      -        /*
 555      -         * We must emulate a "pushl %ebp".  To do this, we pull the stack
 556      -         * down 4 bytes, and then store the base pointer.
 557      -         */
 558      -        popa
 559      -        subl    $4, %esp                /* make room for %ebp */
 560      -        pushl   %eax                    /* push temp */
 561      -        movl    8(%esp), %eax           /* load calling EIP */
 562      -        incl    %eax                    /* increment over LOCK prefix */
 563      -        movl    %eax, 4(%esp)           /* store calling EIP */
 564      -        movl    12(%esp), %eax          /* load calling CS */
 565      -        movl    %eax, 8(%esp)           /* store calling CS */
 566      -        movl    16(%esp), %eax          /* load calling EFLAGS */
 567      -        movl    %eax, 12(%esp)          /* store calling EFLAGS */
 568      -        movl    %ebp, 16(%esp)          /* push %ebp */
 569      -        popl    %eax                    /* pop off temp */
 570      -        jmp     _emul_done
 571      -2:
 572      -        /*
 573      -         * We must emulate a "popl %ebp".  To do this, we do the opposite of
 574      -         * the above:  we remove the %ebp from the stack, and squeeze up the
 575      -         * saved state from the trap.
 576      -         */
 577      -        popa
 578      -        pushl   %eax                    /* push temp */
 579      -        movl    16(%esp), %ebp          /* pop %ebp */
 580      -        movl    12(%esp), %eax          /* load calling EFLAGS */
 581      -        movl    %eax, 16(%esp)          /* store calling EFLAGS */
 582      -        movl    8(%esp), %eax           /* load calling CS */
 583      -        movl    %eax, 12(%esp)          /* store calling CS */
 584      -        movl    4(%esp), %eax           /* load calling EIP */
 585      -        incl    %eax                    /* increment over LOCK prefix */
 586      -        movl    %eax, 8(%esp)           /* store calling EIP */
 587      -        popl    %eax                    /* pop off temp */
 588      -        addl    $4, %esp                /* adjust stack pointer */
 589      -        jmp     _emul_done
 590      -3:
 591      -        /*
 592      -         * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
 593      -         * followed by a "popl %ebp".  This looks similar to the above, but
 594      -         * requires two temporaries:  one for the new base pointer, and one
 595      -         * for the staging register.
 596      -         */
 597      -        popa
 598      -        pushl   %eax                    /* push temp */
 599      -        pushl   %ebx                    /* push temp */
 600      -        movl    %ebp, %ebx              /* set temp to old %ebp */
 601      -        movl    (%ebx), %ebp            /* pop %ebp */
 602      -        movl    16(%esp), %eax          /* load calling EFLAGS */
 603      -        movl    %eax, (%ebx)            /* store calling EFLAGS */
 604      -        movl    12(%esp), %eax          /* load calling CS */
 605      -        movl    %eax, -4(%ebx)          /* store calling CS */
 606      -        movl    8(%esp), %eax           /* load calling EIP */
 607      -        incl    %eax                    /* increment over LOCK prefix */
 608      -        movl    %eax, -8(%ebx)          /* store calling EIP */
 609      -        movl    %ebx, -4(%esp)          /* temporarily store new %esp */
 610      -        popl    %ebx                    /* pop off temp */
 611      -        popl    %eax                    /* pop off temp */
 612      -        movl    -12(%esp), %esp         /* set stack pointer */
 613      -        subl    $8, %esp                /* adjust for three pushes, one pop */
 614      -        jmp     _emul_done
 615      -4:
 616      -        /*
 617      -         * We must emulate a "nop".  This is obviously not hard:  we need only
 618      -         * advance the %eip by one.
 619      -         */
 620      -        popa
 621      -        incl    (%esp)
 622      -_emul_done:
 623      -        IRET                            /* return from interrupt */
 624      -7:
 625      -        popa
 626      -        pushl   $0
 627      -        pushl   $T_ILLINST      /* $6 */
 628      -        jmp     cmntrap
 629      -8:
 630      -        addl    $4, %esp
 631      -        pushl   $0
 632      -        pushl   $T_ILLINST      /* $6 */
 633      -        jmp     cmntrap
 634      -        SET_SIZE(invoptrap)
 635      -
 636      -#endif  /* __i386 */
 637      -
 638      -        /*
 639  457           * #NM
 640  458           */
 641  459  
 642  460          ENTRY_NP(ndptrap)
 643  461          TRAP_NOERR(T_NOEXTFLT)  /* $0 */
 644  462          SET_CPU_GSBASE
 645  463          jmp     cmntrap
 646  464          SET_SIZE(ndptrap)
 647  465  
 648  466  #if !defined(__xpv)
 649      -#if defined(__amd64)
 650  467  
 651  468          /*
 652  469           * #DF
 653  470           */
 654  471          ENTRY_NP(syserrtrap)
 655  472          pushq   $T_DBLFLT
 656  473          SET_CPU_GSBASE
 657  474  
 658  475          /*
 659  476           * We share this handler with kmdb (if kmdb is loaded).  As such, we
↓ open down ↓ 32 lines elided ↑ open up ↑
 692  509  
 693  510          ENABLE_INTR_FLAGS
 694  511  
 695  512          movq    %rsp, %rdi      /* &regs */
 696  513          xorl    %esi, %esi      /* clear address */
 697  514          xorl    %edx, %edx      /* cpuid = 0 */
 698  515          call    trap
 699  516  
 700  517          SET_SIZE(syserrtrap)
 701  518  
 702      -#elif defined(__i386)
 703      -
 704      -        /*
 705      -         * #DF
 706      -         */
 707      -        ENTRY_NP(syserrtrap)
 708      -        cli                             /* disable interrupts */
 709      -
 710      -        /*
 711      -         * We share this handler with kmdb (if kmdb is loaded).  As such, we
 712      -         * may have reached this point after encountering a #df in kmdb.  If
 713      -         * that happens, we'll still be on kmdb's IDT.  We need to switch back
 714      -         * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 715      -         * here from kmdb, kmdb is probably in a very sickly state, and
 716      -         * shouldn't be entered from the panic flow.  We'll suppress that
 717      -         * entry by setting nopanicdebug.
 718      -         */
 719      -
 720      -        subl    $DESCTBR_SIZE, %esp
 721      -        movl    %gs:CPU_IDT, %eax
 722      -        sidt    (%esp)
 723      -        cmpl    DTR_BASE(%esp), %eax
 724      -        je      1f
 725      -
 726      -        movl    %eax, DTR_BASE(%esp)
 727      -        movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
 728      -        lidt    (%esp)
 729      -
 730      -        movl    $1, nopanicdebug
 731      -
 732      -1:      addl    $DESCTBR_SIZE, %esp
 733      -
 734      -        /*
 735      -         * Check the CPL in the TSS to see what mode
 736      -         * (user or kernel) we took the fault in.  At this
 737      -         * point we are running in the context of the double
 738      -         * fault task (dftss) but the CPU's task points to
 739      -         * the previous task (ktss) where the process context
 740      -         * has been saved as the result of the task switch.
 741      -         */
 742      -        movl    %gs:CPU_TSS, %eax       /* get the TSS */
 743      -        movl    TSS_SS(%eax), %ebx      /* save the fault SS */
 744      -        movl    TSS_ESP(%eax), %edx     /* save the fault ESP */
 745      -        testw   $CPL_MASK, TSS_CS(%eax) /* user mode ? */
 746      -        jz      make_frame
 747      -        movw    TSS_SS0(%eax), %ss      /* get on the kernel stack */
 748      -        movl    TSS_ESP0(%eax), %esp
 749      -
 750      -        /*
 751      -         * Clear the NT flag to avoid a task switch when the process
 752      -         * finally pops the EFL off the stack via an iret.  Clear
 753      -         * the TF flag since that is what the processor does for
 754      -         * a normal exception. Clear the IE flag so that interrupts
 755      -         * remain disabled.
 756      -         */
 757      -        movl    TSS_EFL(%eax), %ecx
 758      -        andl    $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
 759      -        pushl   %ecx
 760      -        popfl                           /* restore the EFL */
 761      -        movw    TSS_LDT(%eax), %cx      /* restore the LDT */
 762      -        lldt    %cx
 763      -
 764      -        /*
 765      -         * Restore process segment selectors.
 766      -         */
 767      -        movw    TSS_DS(%eax), %ds
 768      -        movw    TSS_ES(%eax), %es
 769      -        movw    TSS_FS(%eax), %fs
 770      -        movw    TSS_GS(%eax), %gs
 771      -
 772      -        /*
 773      -         * Restore task segment selectors.
 774      -         */
 775      -        movl    $KDS_SEL, TSS_DS(%eax)
 776      -        movl    $KDS_SEL, TSS_ES(%eax)
 777      -        movl    $KDS_SEL, TSS_SS(%eax)
 778      -        movl    $KFS_SEL, TSS_FS(%eax)
 779      -        movl    $KGS_SEL, TSS_GS(%eax)
 780      -
 781      -        /*
 782      -         * Clear the TS bit, the busy bits in both task
 783      -         * descriptors, and switch tasks.
 784      -         */
 785      -        clts
 786      -        leal    gdt0, %ecx
 787      -        movl    DFTSS_SEL+4(%ecx), %esi
 788      -        andl    $_BITNOT(0x200), %esi
 789      -        movl    %esi, DFTSS_SEL+4(%ecx)
 790      -        movl    KTSS_SEL+4(%ecx), %esi
 791      -        andl    $_BITNOT(0x200), %esi
 792      -        movl    %esi, KTSS_SEL+4(%ecx)
 793      -        movw    $KTSS_SEL, %cx
 794      -        ltr     %cx
 795      -
 796      -        /*
 797      -         * Restore part of the process registers.
 798      -         */
 799      -        movl    TSS_EBP(%eax), %ebp
 800      -        movl    TSS_ECX(%eax), %ecx
 801      -        movl    TSS_ESI(%eax), %esi
 802      -        movl    TSS_EDI(%eax), %edi
 803      -
 804      -make_frame:
 805      -        /*
 806      -         * Make a trap frame.  Leave the error code (0) on
 807      -         * the stack since the first word on a trap stack is
 808      -         * unused anyway.
 809      -         */
 810      -        pushl   %ebx                    / fault SS
 811      -        pushl   %edx                    / fault ESP
 812      -        pushl   TSS_EFL(%eax)           / fault EFL
 813      -        pushl   TSS_CS(%eax)            / fault CS
 814      -        pushl   TSS_EIP(%eax)           / fault EIP
 815      -        pushl   $0                      / error code
 816      -        pushl   $T_DBLFLT               / trap number 8
 817      -        movl    TSS_EBX(%eax), %ebx     / restore EBX
 818      -        movl    TSS_EDX(%eax), %edx     / restore EDX
 819      -        movl    TSS_EAX(%eax), %eax     / restore EAX
 820      -        sti                             / enable interrupts
 821      -        jmp     cmntrap
 822      -        SET_SIZE(syserrtrap)
 823      -
 824      -#endif  /* __i386 */
 825  519  #endif  /* !__xpv */
 826  520  
 827  521          /*
 828  522           * #TS
 829  523           */
 830  524          ENTRY_NP(invtsstrap)
 831  525          TRAP_ERR(T_TSSFLT)      /* $10 already have error code on stack */
 832  526          jmp     cmntrap
 833  527          SET_SIZE(invtsstrap)
 834  528  
 835  529          /*
 836  530           * #NP
 837  531           */
 838  532          ENTRY_NP(segnptrap)
 839  533          TRAP_ERR(T_SEGFLT)      /* $11 already have error code on stack */
 840      -#if defined(__amd64)
 841  534          SET_CPU_GSBASE
 842      -#endif
 843  535          jmp     cmntrap
 844  536          SET_SIZE(segnptrap)
 845  537  
 846  538          /*
 847  539           * #SS
 848  540           */
 849  541          ENTRY_NP(stktrap)
 850  542          TRAP_ERR(T_STKFLT)      /* $12 already have error code on stack */
 851      -#if defined(__amd64)
 852  543          SET_CPU_GSBASE
 853      -#endif
 854  544          jmp     cmntrap
 855  545          SET_SIZE(stktrap)
 856  546  
 857  547          /*
 858  548           * #GP
 859  549           */
 860  550          ENTRY_NP(gptrap)
 861  551          TRAP_ERR(T_GPFLT)       /* $13 already have error code on stack */
 862      -#if defined(__amd64)
 863  552          SET_CPU_GSBASE
 864      -#endif
 865  553          jmp     cmntrap
 866  554          SET_SIZE(gptrap)
 867  555  
 868  556          /*
 869  557           * #PF
 870  558           */
 871  559          ENTRY_NP(pftrap)
 872  560          TRAP_ERR(T_PGFLT)       /* $14 already have error code on stack */
 873  561          INTR_PUSH
 874  562  #if defined(__xpv)
 875  563  
 876      -#if defined(__amd64)
 877  564          movq    %gs:CPU_VCPU_INFO, %r15
 878  565          movq    VCPU_INFO_ARCH_CR2(%r15), %r15  /* vcpu[].arch.cr2 */
 879      -#elif defined(__i386)
 880      -        movl    %gs:CPU_VCPU_INFO, %esi
 881      -        movl    VCPU_INFO_ARCH_CR2(%esi), %esi  /* vcpu[].arch.cr2 */
 882      -#endif  /* __i386 */
 883  566  
 884  567  #else   /* __xpv */
 885  568  
 886      -#if defined(__amd64)
 887  569          movq    %cr2, %r15
 888      -#elif defined(__i386)
 889      -        movl    %cr2, %esi
 890      -#endif  /* __i386 */
 891  570  
 892  571  #endif  /* __xpv */
 893  572          jmp     cmntrap_pushed
 894  573          SET_SIZE(pftrap)
 895  574  
 896      -#if !defined(__amd64)
 897      -
 898      -        .globl  idt0_default_r
 899      -
 900      -        /*
 901      -         * #PF pentium bug workaround
 902      -         */
 903      -        ENTRY_NP(pentium_pftrap)
 904      -        pushl   %eax
 905      -        movl    %cr2, %eax
 906      -        andl    $MMU_STD_PAGEMASK, %eax
 907      -
 908      -        cmpl    %eax, %cs:idt0_default_r+2      /* fixme */
 909      -
 910      -        je      check_for_user_address
 911      -user_mode:
 912      -        popl    %eax
 913      -        pushl   $T_PGFLT        /* $14 */
 914      -        jmp     cmntrap
 915      -check_for_user_address:
 916      -        /*
 917      -         * Before we assume that we have an unmapped trap on our hands,
 918      -         * check to see if this is a fault from user mode.  If it is,
 919      -         * we'll kick back into the page fault handler.
 920      -         */
 921      -        movl    4(%esp), %eax   /* error code */
 922      -        andl    $PF_ERR_USER, %eax
 923      -        jnz     user_mode
 924      -
 925      -        /*
 926      -         * We now know that this is the invalid opcode trap.
 927      -         */
 928      -        popl    %eax
 929      -        addl    $4, %esp        /* pop error code */
 930      -        jmp     invoptrap
 931      -        SET_SIZE(pentium_pftrap)
 932      -
 933      -#endif  /* !__amd64 */
 934      -
 935  575          ENTRY_NP(resvtrap)
 936  576          TRAP_NOERR(T_RESVTRAP)  /* (reserved)  */
 937  577          jmp     cmntrap
 938  578          SET_SIZE(resvtrap)
 939  579  
 940  580          /*
 941  581           * #MF
 942  582           */
 943  583          ENTRY_NP(ndperr)
 944  584          TRAP_NOERR(T_EXTERRFLT) /* $16 */
↓ open down ↓ 6 lines elided ↑ open up ↑
 951  591          ENTRY_NP(achktrap)
 952  592          TRAP_ERR(T_ALIGNMENT)   /* $17 */
 953  593          jmp     cmntrap
 954  594          SET_SIZE(achktrap)
 955  595  
 956  596          /*
 957  597           * #MC
 958  598           */
 959  599          .globl  cmi_mca_trap    /* see uts/i86pc/os/cmi.c */
 960  600  
 961      -#if defined(__amd64)
 962      -
 963  601          ENTRY_NP(mcetrap)
 964  602          TRAP_NOERR(T_MCE)       /* $18 */
 965  603  
 966  604          SET_CPU_GSBASE
 967  605  
 968  606          INTR_PUSH
 969  607          INTGATE_INIT_KERNEL_FLAGS
 970  608  
 971  609          TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
 972  610          TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
 973  611          TRACE_STAMP(%rdi)
 974  612  
 975  613          movq    %rsp, %rbp
 976  614  
 977  615          movq    %rsp, %rdi      /* arg0 = struct regs *rp */
 978  616          call    cmi_mca_trap    /* cmi_mca_trap(rp); */
 979  617  
 980  618          jmp     _sys_rtt
 981  619          SET_SIZE(mcetrap)
 982  620  
 983      -#else
 984      -
 985      -        ENTRY_NP(mcetrap)
 986      -        TRAP_NOERR(T_MCE)       /* $18 */
 987      -
 988      -        INTR_PUSH
 989      -        INTGATE_INIT_KERNEL_FLAGS
 990      -
 991      -        TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 992      -        TRACE_REGS(%edi, %esp, %ebx, %ecx)
 993      -        TRACE_STAMP(%edi)
 994      -
 995      -        movl    %esp, %ebp
 996      -
 997      -        movl    %esp, %ecx
 998      -        pushl   %ecx            /* arg0 = struct regs *rp */
 999      -        call    cmi_mca_trap    /* cmi_mca_trap(rp) */
1000      -        addl    $4, %esp        /* pop arg0 */
1001      -
1002      -        jmp     _sys_rtt
1003      -        SET_SIZE(mcetrap)
1004      -
1005      -#endif
1006      -
1007  621          /*
1008  622           * #XF
1009  623           */
1010  624          ENTRY_NP(xmtrap)
1011  625          TRAP_NOERR(T_SIMDFPE)   /* $19 */
1012  626          jmp     cmninttrap
1013  627          SET_SIZE(xmtrap)
1014  628  
1015  629          ENTRY_NP(invaltrap)
1016  630          TRAP_NOERR(T_INVALTRAP) /* very invalid */
1017  631          jmp     cmntrap
1018  632          SET_SIZE(invaltrap)
1019  633  
1020  634          .globl  fasttable
1021  635  
1022      -#if defined(__amd64)
1023      -
1024  636          ENTRY_NP(fasttrap)
1025  637          cmpl    $T_LASTFAST, %eax
1026  638          ja      1f
1027  639          orl     %eax, %eax      /* (zero extend top 32-bits) */
1028  640          leaq    fasttable(%rip), %r11
1029  641          leaq    (%r11, %rax, CLONGSIZE), %r11
1030  642          movq    (%r11), %r11
1031  643          INDIRECT_JMP_REG(r11)
1032  644  1:
1033  645          /*
↓ open down ↓ 10 lines elided ↑ open up ↑
1044  656          subq    $2, (%rsp)      /* XXX int insn 2-bytes */
1045  657          pushq   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1046  658  
1047  659  #if defined(__xpv)
1048  660          pushq   %r11
1049  661          pushq   %rcx
1050  662  #endif
1051  663          jmp     gptrap
1052  664          SET_SIZE(fasttrap)
1053  665  
1054      -#elif defined(__i386)
1055      -
1056      -        ENTRY_NP(fasttrap)
1057      -        cmpl    $T_LASTFAST, %eax
1058      -        ja      1f
1059      -        jmp     *%cs:fasttable(, %eax, CLONGSIZE)
1060      -1:
1061      -        /*
1062      -         * Fast syscall number was illegal.  Make it look
1063      -         * as if the INT failed.  Modify %eip to point before the
1064      -         * INT, push the expected error code and fake a GP fault.
1065      -         *
1066      -         * XXX Why make the error code be offset into idt + 1?
1067      -         * Instead we should push a real (soft?) error code
1068      -         * on the stack and #gp handler could know about fasttraps?
1069      -         */
1070      -        subl    $2, (%esp)      /* XXX int insn 2-bytes */
1071      -        pushl   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1072      -        jmp     gptrap
1073      -        SET_SIZE(fasttrap)
1074      -
1075      -#endif  /* __i386 */
1076      -
1077  666          ENTRY_NP(dtrace_ret)
1078  667          TRAP_NOERR(T_DTRACE_RET)
1079  668          jmp     dtrace_trap
1080  669          SET_SIZE(dtrace_ret)
1081  670  
1082      -#if defined(__amd64)
1083      -
1084  671          /*
1085  672           * RFLAGS 24 bytes up the stack from %rsp.
1086  673           * XXX a constant would be nicer.
1087  674           */
1088  675          ENTRY_NP(fast_null)
1089  676          XPV_TRAP_POP
1090  677          orq     $PS_C, 24(%rsp) /* set carry bit in user flags */
1091  678          call    x86_md_clear
1092  679          jmp     tr_iret_auto
1093  680          /*NOTREACHED*/
1094  681          SET_SIZE(fast_null)
1095  682  
1096      -#elif defined(__i386)
1097      -
1098      -        ENTRY_NP(fast_null)
1099      -        orw     $PS_C, 8(%esp)  /* set carry bit in user flags */
1100      -        IRET
1101      -        SET_SIZE(fast_null)
1102      -
1103      -#endif  /* __i386 */
1104      -
1105  683          /*
1106  684           * Interrupts start at 32
1107  685           */
1108  686  #define MKIVCT(n)                       \
1109  687          ENTRY_NP(ivct/**/n)             \
1110  688          push    $0;                     \
1111  689          push    $n - 0x20;              \
1112  690          jmp     cmnint;                 \
1113  691          SET_SIZE(ivct/**/n)
1114  692  
↓ open down ↓ 215 lines elided ↑ open up ↑
1330  908          MKIVCT(247)
1331  909          MKIVCT(248)
1332  910          MKIVCT(249)
1333  911          MKIVCT(250)
1334  912          MKIVCT(251)
1335  913          MKIVCT(252)
1336  914          MKIVCT(253)
1337  915          MKIVCT(254)
1338  916          MKIVCT(255)
1339  917  
1340      -#endif  /* __lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX