1 /*
   2  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
   4  * Copyright 2019 Joyent, Inc.
   5  */
   6 
   7 /*
   8  * Copyright (c) 1989, 1990 William F. Jolitz.
   9  * Copyright (c) 1990 The Regents of the University of California.
  10  * All rights reserved.
  11  *
  12  * Redistribution and use in source and binary forms, with or without
  13  * modification, are permitted provided that the following conditions
  14  * are met:
  15  * 1. Redistributions of source code must retain the above copyright
  16  *    notice, this list of conditions and the following disclaimer.
  17  * 2. Redistributions in binary form must reproduce the above copyright
  18  *    notice, this list of conditions and the following disclaimer in the
  19  *    documentation and/or other materials provided with the distribution.
  20  * 3. All advertising materials mentioning features or use of this software
  21  *    must display the following acknowledgement:
  22  *      This product includes software developed by the University of
  23  *      California, Berkeley and its contributors.
  24  * 4. Neither the name of the University nor the names of its contributors
  25  *    may be used to endorse or promote products derived from this software
  26  *    without specific prior written permission.
  27  *
  28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  38  * SUCH DAMAGE.
  39  *
  40  * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
  41  */
  42 
  43 #include <sys/asm_linkage.h>
  44 #include <sys/asm_misc.h>
  45 #include <sys/trap.h>
  46 #include <sys/psw.h>
  47 #include <sys/regset.h>
  48 #include <sys/privregs.h>
  49 #include <sys/dtrace.h>
  50 #include <sys/x86_archext.h>
  51 #include <sys/traptrace.h>
  52 #include <sys/machparam.h>
  53 
  54 #if !defined(__lint)
  55 
  56 #include "assym.h"
  57 
  58 /*
  59  * push $0 on stack for traps that do not
  60  * generate an error code. This is so the rest
  61  * of the kernel can expect a consistent stack
  62  * from from any exception.
  63  *
  64  * Note that for all exceptions for amd64
  65  * %r11 and %rcx are on the stack. Just pop
  66  * them back into their appropriate registers and let
  67  * it get saved as is running native.
  68  */
  69 
  70 #if defined(__xpv) && defined(__amd64)
  71 
  72 #define NPTRAP_NOERR(trapno)    \
  73         pushq   $0;             \
  74         pushq   $trapno
  75 
  76 #define TRAP_NOERR(trapno)      \
  77         XPV_TRAP_POP;           \
  78         NPTRAP_NOERR(trapno)
  79 
  80 /*
  81  * error code already pushed by hw
  82  * onto stack.
  83  */
  84 #define TRAP_ERR(trapno)        \
  85         XPV_TRAP_POP;           \
  86         pushq   $trapno
  87 
  88 #else /* __xpv && __amd64 */
  89 
  90 #define TRAP_NOERR(trapno)      \
  91         push    $0;             \
  92         push    $trapno
  93 
  94 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
  95 
  96 /*
  97  * error code already pushed by hw
  98  * onto stack.
  99  */
 100 #define TRAP_ERR(trapno)        \
 101         push    $trapno
 102 
 103 #endif  /* __xpv && __amd64 */
 104 
 105         /*
 106          * These are the stacks used on cpu0 for taking double faults,
 107          * NMIs and MCEs (the latter two only on amd64 where we have IST).
 108          *
 109          * We define them here instead of in a C file so that we can page-align
 110          * them (gcc won't do that in a .c file).
 111          */
 112         .data
 113         DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 114         .fill   DEFAULTSTKSZ, 1, 0
 115         DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 116         .fill   DEFAULTSTKSZ, 1, 0
 117         DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
 118         .fill   DEFAULTSTKSZ, 1, 0
 119 
 120         /*
 121          * #DE
 122          */
 123         ENTRY_NP(div0trap)
 124         TRAP_NOERR(T_ZERODIV)   /* $0 */
 125         jmp     cmntrap
 126         SET_SIZE(div0trap)
 127 
 128         /*
 129          * #DB
 130          *
 131          * Fetch %dr6 and clear it, handing off the value to the
 132          * cmntrap code in %r15/%esi
 133          */
 134         ENTRY_NP(dbgtrap)
 135         TRAP_NOERR(T_SGLSTP)    /* $1 */
 136 
 137 #if defined(__amd64)
 138 #if !defined(__xpv)             /* no sysenter support yet */
 139         /*
 140          * If we get here as a result of single-stepping a sysenter
 141          * instruction, we suddenly find ourselves taking a #db
 142          * in kernel mode -before- we've swapgs'ed.  So before we can
 143          * take the trap, we do the swapgs here, and fix the return
 144          * %rip in trap() so that we return immediately after the
 145          * swapgs in the sysenter handler to avoid doing the swapgs again.
 146          *
 147          * Nobody said that the design of sysenter was particularly
 148          * elegant, did they?
 149          */
 150 
 151         pushq   %r11
 152 
 153         /*
 154          * At this point the stack looks like this:
 155          *
 156          * (high address)       r_ss
 157          *                      r_rsp
 158          *                      r_rfl
 159          *                      r_cs
 160          *                      r_rip           <-- %rsp + 24
 161          *                      r_err           <-- %rsp + 16
 162          *                      r_trapno        <-- %rsp + 8
 163          * (low address)        %r11            <-- %rsp
 164          */
 165         leaq    sys_sysenter(%rip), %r11
 166         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 167         je      1f
 168         leaq    brand_sys_sysenter(%rip), %r11
 169         cmpq    %r11, 24(%rsp)  /* Compare to saved r_rip on the stack */
 170         je      1f
 171         leaq    tr_sys_sysenter(%rip), %r11
 172         cmpq    %r11, 24(%rsp)
 173         je      1f
 174         leaq    tr_brand_sys_sysenter(%rip), %r11
 175         cmpq    %r11, 24(%rsp)
 176         jne     2f
 177 1:      swapgs
 178 2:      lfence /* swapgs mitigation */
 179         popq    %r11
 180 #endif  /* !__xpv */
 181 
 182         INTR_PUSH
 183 #if defined(__xpv)
 184         movl    $6, %edi
 185         call    kdi_dreg_get
 186         movq    %rax, %r15              /* %db6 -> %r15 */
 187         movl    $6, %edi
 188         movl    $0, %esi
 189         call    kdi_dreg_set            /* 0 -> %db6 */
 190 #else
 191         movq    %db6, %r15
 192         xorl    %eax, %eax
 193         movq    %rax, %db6
 194 #endif
 195 
 196 #elif defined(__i386)
 197 
 198         INTR_PUSH
 199 #if defined(__xpv)
 200         pushl   $6
 201         call    kdi_dreg_get
 202         addl    $4, %esp
 203         movl    %eax, %esi              /* %dr6 -> %esi */
 204         pushl   $0
 205         pushl   $6
 206         call    kdi_dreg_set            /* 0 -> %dr6 */
 207         addl    $8, %esp
 208 #else
 209         movl    %db6, %esi
 210         xorl    %eax, %eax
 211         movl    %eax, %db6
 212 #endif
 213 #endif  /* __i386 */
 214 
 215         jmp     cmntrap_pushed
 216         SET_SIZE(dbgtrap)
 217 
 218 #if defined(__amd64)
 219 #if !defined(__xpv)
 220 
 221 /*
 222  * Macro to set the gsbase or kgsbase to the address of the struct cpu
 223  * for this processor.  If we came from userland, set kgsbase else
 224  * set gsbase.  We find the proper cpu struct by looping through
 225  * the cpu structs for all processors till we find a match for the gdt
 226  * of the trapping processor.  The stack is expected to be pointing at
 227  * the standard regs pushed by hardware on a trap (plus error code and trapno).
 228  *
 229  * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
 230  * and kgsbase set to the same value) because we're not going back the normal
 231  * way out of here (via IRET). Where we're going, we don't need no user %gs.
 232  */
 233 #define SET_CPU_GSBASE                                                  \
 234         subq    $REGOFF_TRAPNO, %rsp;   /* save regs */                 \
 235         movq    %rax, REGOFF_RAX(%rsp);                                 \
 236         movq    %rbx, REGOFF_RBX(%rsp);                                 \
 237         movq    %rcx, REGOFF_RCX(%rsp);                                 \
 238         movq    %rdx, REGOFF_RDX(%rsp);                                 \
 239         movq    %rbp, REGOFF_RBP(%rsp);                                 \
 240         movq    %rsp, %rbp;                                             \
 241         subq    $16, %rsp;              /* space for gdt */             \
 242         sgdt    6(%rsp);                                                \
 243         movq    8(%rsp), %rcx;          /* %rcx has gdt to match */     \
 244         xorl    %ebx, %ebx;             /* loop index */                \
 245         leaq    cpu(%rip), %rdx;        /* cpu pointer array */         \
 246 1:                                                                      \
 247         movq    (%rdx, %rbx, CLONGSIZE), %rax;  /* get cpu[i] */        \
 248         cmpq    $0x0, %rax;             /* cpu[i] == NULL ? */          \
 249         je      2f;                     /* yes, continue */             \
 250         cmpq    %rcx, CPU_GDT(%rax);    /* gdt == cpu[i]->cpu_gdt ? */       \
 251         je      3f;                     /* yes, go set gsbase */        \
 252 2:                                                                      \
 253         incl    %ebx;                   /* i++ */                       \
 254         cmpl    $NCPU, %ebx;            /* i < NCPU ? */             \
 255         jb      1b;                     /* yes, loop */                 \
 256 /* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */ \
 257 3:                                                                      \
 258         movl    $MSR_AMD_KGSBASE, %ecx;                                 \
 259         cmpw    $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */      \
 260         jne     4f;                     /* no, go set KGSBASE */        \
 261         movl    $MSR_AMD_GSBASE, %ecx;  /* yes, set GSBASE */           \
 262         mfence;                         /* OPTERON_ERRATUM_88 */        \
 263 4:                                                                      \
 264         movq    %rax, %rdx;             /* write base register */       \
 265         shrq    $32, %rdx;                                              \
 266         wrmsr;                                                          \
 267         movq    REGOFF_RDX(%rbp), %rdx; /* restore regs */              \
 268         movq    REGOFF_RCX(%rbp), %rcx;                                 \
 269         movq    REGOFF_RBX(%rbp), %rbx;                                 \
 270         movq    REGOFF_RAX(%rbp), %rax;                                 \
 271         movq    %rbp, %rsp;                                             \
 272         movq    REGOFF_RBP(%rsp), %rbp;                                 \
 273         addq    $REGOFF_TRAPNO, %rsp    /* pop stack */
 274 
 275 #else   /* __xpv */
 276 
 277 #define SET_CPU_GSBASE  /* noop on the hypervisor */
 278 
 279 #endif  /* __xpv */
 280 #endif  /* __amd64 */
 281 
 282 
 283 #if defined(__amd64)
 284 
 285         /*
 286          * #NMI
 287          *
 288          * XXPV: See 6532669.
 289          */
 290         ENTRY_NP(nmiint)
 291         TRAP_NOERR(T_NMIFLT)    /* $2 */
 292 
 293         SET_CPU_GSBASE
 294 
 295         /*
 296          * Save all registers and setup segment registers
 297          * with kernel selectors.
 298          */
 299         INTR_PUSH
 300         INTGATE_INIT_KERNEL_FLAGS
 301 
 302         TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
 303         TRACE_REGS(%r12, %rsp, %rax, %rbx)
 304         TRACE_STAMP(%r12)
 305 
 306         movq    %rsp, %rbp
 307 
 308         movq    %rbp, %rdi
 309         call    av_dispatch_nmivect
 310 
 311         INTR_POP
 312         call    x86_md_clear
 313         jmp     tr_iret_auto
 314         /*NOTREACHED*/
 315         SET_SIZE(nmiint)
 316 
 317 #elif defined(__i386)
 318 
 319         /*
 320          * #NMI
 321          */
 322         ENTRY_NP(nmiint)
 323         TRAP_NOERR(T_NMIFLT)    /* $2 */
 324 
 325         /*
 326          * Save all registers and setup segment registers
 327          * with kernel selectors.
 328          */
 329         INTR_PUSH
 330         INTGATE_INIT_KERNEL_FLAGS
 331 
 332         TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 333         TRACE_REGS(%edi, %esp, %ebx, %ecx)
 334         TRACE_STAMP(%edi)
 335 
 336         movl    %esp, %ebp
 337 
 338         pushl   %ebp
 339         call    av_dispatch_nmivect
 340         addl    $4, %esp
 341 
 342         INTR_POP_USER
 343         IRET
 344         SET_SIZE(nmiint)
 345 
 346 #endif  /* __i386 */
 347 
 348         /*
 349          * #BP
 350          */
 351         ENTRY_NP(brktrap)
 352 
 353 #if defined(__amd64)
 354         XPV_TRAP_POP
 355         cmpw    $KCS_SEL, 8(%rsp)
 356         jne     bp_user
 357 
 358         /*
 359          * This is a breakpoint in the kernel -- it is very likely that this
 360          * is DTrace-induced.  To unify DTrace handling, we spoof this as an
 361          * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
 362          * we must decrement the trapping %rip to make it appear as a fault.
 363          * We then push a non-zero error code to indicate that this is coming
 364          * from #BP.
 365          */
 366         decq    (%rsp)
 367         push    $1                      /* error code -- non-zero for #BP */
 368         jmp     ud_kernel
 369 
 370 bp_user:
 371 #endif /* __amd64 */
 372 
 373         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 374         jmp     dtrace_trap
 375 
 376         SET_SIZE(brktrap)
 377 
 378         /*
 379          * #OF
 380          */
 381         ENTRY_NP(ovflotrap)
 382         TRAP_NOERR(T_OVFLW)     /* $4 */
 383         jmp     cmntrap
 384         SET_SIZE(ovflotrap)
 385 
 386         /*
 387          * #BR
 388          */
 389         ENTRY_NP(boundstrap)
 390         TRAP_NOERR(T_BOUNDFLT)  /* $5 */
 391         jmp     cmntrap
 392         SET_SIZE(boundstrap)
 393 
 394 #if defined(__amd64)
 395 
 396         ENTRY_NP(invoptrap)
 397 
 398         XPV_TRAP_POP
 399 
 400         cmpw    $KCS_SEL, 8(%rsp)
 401         jne     ud_user
 402 
 403 #if defined(__xpv)
 404         movb    $0, 12(%rsp)            /* clear saved upcall_mask from %cs */
 405 #endif
 406         push    $0                      /* error code -- zero for #UD */
 407 ud_kernel:
 408         push    $0xdddd                 /* a dummy trap number */
 409         INTR_PUSH
 410         movq    REGOFF_RIP(%rsp), %rdi
 411         movq    REGOFF_RSP(%rsp), %rsi
 412         movq    REGOFF_RAX(%rsp), %rdx
 413         pushq   (%rsi)
 414         movq    %rsp, %rsi
 415         subq    $8, %rsp
 416         call    dtrace_invop
 417         ALTENTRY(dtrace_invop_callsite)
 418         addq    $16, %rsp
 419         cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 420         je      ud_push
 421         cmpl    $DTRACE_INVOP_LEAVE, %eax
 422         je      ud_leave
 423         cmpl    $DTRACE_INVOP_NOP, %eax
 424         je      ud_nop
 425         cmpl    $DTRACE_INVOP_RET, %eax
 426         je      ud_ret
 427         jmp     ud_trap
 428 
 429 ud_push:
 430         /*
 431          * We must emulate a "pushq %rbp".  To do this, we pull the stack
 432          * down 8 bytes, and then store the base pointer.
 433          */
 434         INTR_POP
 435         subq    $16, %rsp               /* make room for %rbp */
 436         pushq   %rax                    /* push temp */
 437         movq    24(%rsp), %rax          /* load calling RIP */
 438         addq    $1, %rax                /* increment over trapping instr */
 439         movq    %rax, 8(%rsp)           /* store calling RIP */
 440         movq    32(%rsp), %rax          /* load calling CS */
 441         movq    %rax, 16(%rsp)          /* store calling CS */
 442         movq    40(%rsp), %rax          /* load calling RFLAGS */
 443         movq    %rax, 24(%rsp)          /* store calling RFLAGS */
 444         movq    48(%rsp), %rax          /* load calling RSP */
 445         subq    $8, %rax                /* make room for %rbp */
 446         movq    %rax, 32(%rsp)          /* store calling RSP */
 447         movq    56(%rsp), %rax          /* load calling SS */
 448         movq    %rax, 40(%rsp)          /* store calling SS */
 449         movq    32(%rsp), %rax          /* reload calling RSP */
 450         movq    %rbp, (%rax)            /* store %rbp there */
 451         popq    %rax                    /* pop off temp */
 452         jmp     tr_iret_kernel          /* return from interrupt */
 453         /*NOTREACHED*/
 454 
 455 ud_leave:
 456         /*
 457          * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
 458          * followed by a "popq %rbp".  This is quite a bit simpler on amd64
 459          * than it is on i386 -- we can exploit the fact that the %rsp is
 460          * explicitly saved to effect the pop without having to reshuffle
 461          * the other data pushed for the trap.
 462          */
 463         INTR_POP
 464         pushq   %rax                    /* push temp */
 465         movq    8(%rsp), %rax           /* load calling RIP */
 466         addq    $1, %rax                /* increment over trapping instr */
 467         movq    %rax, 8(%rsp)           /* store calling RIP */
 468         movq    (%rbp), %rax            /* get new %rbp */
 469         addq    $8, %rbp                /* adjust new %rsp */
 470         movq    %rbp, 32(%rsp)          /* store new %rsp */
 471         movq    %rax, %rbp              /* set new %rbp */
 472         popq    %rax                    /* pop off temp */
 473         jmp     tr_iret_kernel          /* return from interrupt */
 474         /*NOTREACHED*/
 475 
 476 ud_nop:
 477         /*
 478          * We must emulate a "nop".  This is obviously not hard:  we need only
 479          * advance the %rip by one.
 480          */
 481         INTR_POP
 482         incq    (%rsp)
 483         jmp     tr_iret_kernel
 484         /*NOTREACHED*/
 485 
 486 ud_ret:
 487         INTR_POP
 488         pushq   %rax                    /* push temp */
 489         movq    32(%rsp), %rax          /* load %rsp */
 490         movq    (%rax), %rax            /* load calling RIP */
 491         movq    %rax, 8(%rsp)           /* store calling RIP */
 492         addq    $8, 32(%rsp)            /* adjust new %rsp */
 493         popq    %rax                    /* pop off temp */
 494         jmp     tr_iret_kernel          /* return from interrupt */
 495         /*NOTREACHED*/
 496 
 497 ud_trap:
 498         /*
 499          * We're going to let the kernel handle this as a normal #UD.  If,
 500          * however, we came through #BP and are spoofing #UD (in this case,
 501          * the stored error value will be non-zero), we need to de-spoof
 502          * the trap by incrementing %rip and pushing T_BPTFLT.
 503          */
 504         cmpq    $0, REGOFF_ERR(%rsp)
 505         je      ud_ud
 506         incq    REGOFF_RIP(%rsp)
 507         addq    $REGOFF_RIP, %rsp
 508         NPTRAP_NOERR(T_BPTFLT)  /* $3 */
 509         jmp     cmntrap
 510 
 511 ud_ud:
 512         addq    $REGOFF_RIP, %rsp
 513 ud_user:
 514         NPTRAP_NOERR(T_ILLINST)
 515         jmp     cmntrap
 516         SET_SIZE(invoptrap)
 517 
 518 #elif defined(__i386)
 519 
 520         /*
 521          * #UD
 522          */
 523         ENTRY_NP(invoptrap)
 524         /*
 525          * If we are taking an invalid opcode trap while in the kernel, this
 526          * is likely an FBT probe point.
 527          */
 528         pushl   %gs
 529         cmpw    $KGS_SEL, (%esp)
 530         jne     8f
 531 
 532         addl    $4, %esp
 533 #if defined(__xpv)
 534         movb    $0, 6(%esp)             /* clear saved upcall_mask from %cs */
 535 #endif  /* __xpv */
 536         pusha
 537         pushl   %eax                    /* push %eax -- may be return value */
 538         pushl   %esp                    /* push stack pointer */
 539         addl    $48, (%esp)             /* adjust to incoming args */
 540         pushl   40(%esp)                /* push calling EIP */
 541         call    dtrace_invop
 542         ALTENTRY(dtrace_invop_callsite)
 543         addl    $12, %esp
 544         cmpl    $DTRACE_INVOP_PUSHL_EBP, %eax
 545         je      1f
 546         cmpl    $DTRACE_INVOP_POPL_EBP, %eax
 547         je      2f
 548         cmpl    $DTRACE_INVOP_LEAVE, %eax
 549         je      3f
 550         cmpl    $DTRACE_INVOP_NOP, %eax
 551         je      4f
 552         jmp     7f
 553 1:
 554         /*
 555          * We must emulate a "pushl %ebp".  To do this, we pull the stack
 556          * down 4 bytes, and then store the base pointer.
 557          */
 558         popa
 559         subl    $4, %esp                /* make room for %ebp */
 560         pushl   %eax                    /* push temp */
 561         movl    8(%esp), %eax           /* load calling EIP */
 562         incl    %eax                    /* increment over LOCK prefix */
 563         movl    %eax, 4(%esp)           /* store calling EIP */
 564         movl    12(%esp), %eax          /* load calling CS */
 565         movl    %eax, 8(%esp)           /* store calling CS */
 566         movl    16(%esp), %eax          /* load calling EFLAGS */
 567         movl    %eax, 12(%esp)          /* store calling EFLAGS */
 568         movl    %ebp, 16(%esp)          /* push %ebp */
 569         popl    %eax                    /* pop off temp */
 570         jmp     _emul_done
 571 2:
 572         /*
 573          * We must emulate a "popl %ebp".  To do this, we do the opposite of
 574          * the above:  we remove the %ebp from the stack, and squeeze up the
 575          * saved state from the trap.
 576          */
 577         popa
 578         pushl   %eax                    /* push temp */
 579         movl    16(%esp), %ebp          /* pop %ebp */
 580         movl    12(%esp), %eax          /* load calling EFLAGS */
 581         movl    %eax, 16(%esp)          /* store calling EFLAGS */
 582         movl    8(%esp), %eax           /* load calling CS */
 583         movl    %eax, 12(%esp)          /* store calling CS */
 584         movl    4(%esp), %eax           /* load calling EIP */
 585         incl    %eax                    /* increment over LOCK prefix */
 586         movl    %eax, 8(%esp)           /* store calling EIP */
 587         popl    %eax                    /* pop off temp */
 588         addl    $4, %esp                /* adjust stack pointer */
 589         jmp     _emul_done
 590 3:
 591         /*
 592          * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
 593          * followed by a "popl %ebp".  This looks similar to the above, but
 594          * requires two temporaries:  one for the new base pointer, and one
 595          * for the staging register.
 596          */
 597         popa
 598         pushl   %eax                    /* push temp */
 599         pushl   %ebx                    /* push temp */
 600         movl    %ebp, %ebx              /* set temp to old %ebp */
 601         movl    (%ebx), %ebp            /* pop %ebp */
 602         movl    16(%esp), %eax          /* load calling EFLAGS */
 603         movl    %eax, (%ebx)            /* store calling EFLAGS */
 604         movl    12(%esp), %eax          /* load calling CS */
 605         movl    %eax, -4(%ebx)          /* store calling CS */
 606         movl    8(%esp), %eax           /* load calling EIP */
 607         incl    %eax                    /* increment over LOCK prefix */
 608         movl    %eax, -8(%ebx)          /* store calling EIP */
 609         movl    %ebx, -4(%esp)          /* temporarily store new %esp */
 610         popl    %ebx                    /* pop off temp */
 611         popl    %eax                    /* pop off temp */
 612         movl    -12(%esp), %esp         /* set stack pointer */
 613         subl    $8, %esp                /* adjust for three pushes, one pop */
 614         jmp     _emul_done
 615 4:
 616         /*
 617          * We must emulate a "nop".  This is obviously not hard:  we need only
 618          * advance the %eip by one.
 619          */
 620         popa
 621         incl    (%esp)
 622 _emul_done:
 623         IRET                            /* return from interrupt */
 624 7:
 625         popa
 626         pushl   $0
 627         pushl   $T_ILLINST      /* $6 */
 628         jmp     cmntrap
 629 8:
 630         addl    $4, %esp
 631         pushl   $0
 632         pushl   $T_ILLINST      /* $6 */
 633         jmp     cmntrap
 634         SET_SIZE(invoptrap)
 635 
 636 #endif  /* __i386 */
 637 
 638         /*
 639          * #NM
 640          */
 641 
 642         ENTRY_NP(ndptrap)
 643         TRAP_NOERR(T_NOEXTFLT)  /* $0 */
 644         SET_CPU_GSBASE
 645         jmp     cmntrap
 646         SET_SIZE(ndptrap)
 647 
 648 #if !defined(__xpv)
 649 #if defined(__amd64)
 650 
 651         /*
 652          * #DF
 653          */
 654         ENTRY_NP(syserrtrap)
 655         pushq   $T_DBLFLT
 656         SET_CPU_GSBASE
 657 
 658         /*
 659          * We share this handler with kmdb (if kmdb is loaded).  As such, we
 660          * may have reached this point after encountering a #df in kmdb.  If
 661          * that happens, we'll still be on kmdb's IDT.  We need to switch back
 662          * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 663          * here from kmdb, kmdb is probably in a very sickly state, and
 664          * shouldn't be entered from the panic flow.  We'll suppress that
 665          * entry by setting nopanicdebug.
 666          */
 667         pushq   %rax
 668         subq    $DESCTBR_SIZE, %rsp
 669         sidt    (%rsp)
 670         movq    %gs:CPU_IDT, %rax
 671         cmpq    %rax, DTR_BASE(%rsp)
 672         je      1f
 673 
 674         movq    %rax, DTR_BASE(%rsp)
 675         movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
 676         lidt    (%rsp)
 677 
 678         movl    $1, nopanicdebug
 679 
 680 1:      addq    $DESCTBR_SIZE, %rsp
 681         popq    %rax
 682 
 683         DFTRAP_PUSH
 684 
 685         /*
 686          * freeze trap trace.
 687          */
 688 #ifdef TRAPTRACE
 689         leaq    trap_trace_freeze(%rip), %r11
 690         incl    (%r11)
 691 #endif
 692 
 693         ENABLE_INTR_FLAGS
 694 
 695         movq    %rsp, %rdi      /* &regs */
 696         xorl    %esi, %esi      /* clear address */
 697         xorl    %edx, %edx      /* cpuid = 0 */
 698         call    trap
 699 
 700         SET_SIZE(syserrtrap)
 701 
 702 #elif defined(__i386)
 703 
 704         /*
 705          * #DF
 706          */
 707         ENTRY_NP(syserrtrap)
 708         cli                             /* disable interrupts */
 709 
 710         /*
 711          * We share this handler with kmdb (if kmdb is loaded).  As such, we
 712          * may have reached this point after encountering a #df in kmdb.  If
 713          * that happens, we'll still be on kmdb's IDT.  We need to switch back
 714          * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
 715          * here from kmdb, kmdb is probably in a very sickly state, and
 716          * shouldn't be entered from the panic flow.  We'll suppress that
 717          * entry by setting nopanicdebug.
 718          */
 719 
 720         subl    $DESCTBR_SIZE, %esp
 721         movl    %gs:CPU_IDT, %eax
 722         sidt    (%esp)
 723         cmpl    DTR_BASE(%esp), %eax
 724         je      1f
 725 
 726         movl    %eax, DTR_BASE(%esp)
 727         movw    $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
 728         lidt    (%esp)
 729 
 730         movl    $1, nopanicdebug
 731 
 732 1:      addl    $DESCTBR_SIZE, %esp
 733 
 734         /*
 735          * Check the CPL in the TSS to see what mode
 736          * (user or kernel) we took the fault in.  At this
 737          * point we are running in the context of the double
 738          * fault task (dftss) but the CPU's task points to
 739          * the previous task (ktss) where the process context
 740          * has been saved as the result of the task switch.
 741          */
 742         movl    %gs:CPU_TSS, %eax       /* get the TSS */
 743         movl    TSS_SS(%eax), %ebx      /* save the fault SS */
 744         movl    TSS_ESP(%eax), %edx     /* save the fault ESP */
 745         testw   $CPL_MASK, TSS_CS(%eax) /* user mode ? */
 746         jz      make_frame
 747         movw    TSS_SS0(%eax), %ss      /* get on the kernel stack */
 748         movl    TSS_ESP0(%eax), %esp
 749 
 750         /*
 751          * Clear the NT flag to avoid a task switch when the process
 752          * finally pops the EFL off the stack via an iret.  Clear
 753          * the TF flag since that is what the processor does for
 754          * a normal exception. Clear the IE flag so that interrupts
 755          * remain disabled.
 756          */
 757         movl    TSS_EFL(%eax), %ecx
 758         andl    $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
 759         pushl   %ecx
 760         popfl                           /* restore the EFL */
 761         movw    TSS_LDT(%eax), %cx      /* restore the LDT */
 762         lldt    %cx
 763 
 764         /*
 765          * Restore process segment selectors.
 766          */
 767         movw    TSS_DS(%eax), %ds
 768         movw    TSS_ES(%eax), %es
 769         movw    TSS_FS(%eax), %fs
 770         movw    TSS_GS(%eax), %gs
 771 
 772         /*
 773          * Restore task segment selectors.
 774          */
 775         movl    $KDS_SEL, TSS_DS(%eax)
 776         movl    $KDS_SEL, TSS_ES(%eax)
 777         movl    $KDS_SEL, TSS_SS(%eax)
 778         movl    $KFS_SEL, TSS_FS(%eax)
 779         movl    $KGS_SEL, TSS_GS(%eax)
 780 
 781         /*
 782          * Clear the TS bit, the busy bits in both task
 783          * descriptors, and switch tasks.
 784          */
 785         clts
 786         leal    gdt0, %ecx
 787         movl    DFTSS_SEL+4(%ecx), %esi
 788         andl    $_BITNOT(0x200), %esi
 789         movl    %esi, DFTSS_SEL+4(%ecx)
 790         movl    KTSS_SEL+4(%ecx), %esi
 791         andl    $_BITNOT(0x200), %esi
 792         movl    %esi, KTSS_SEL+4(%ecx)
 793         movw    $KTSS_SEL, %cx
 794         ltr     %cx
 795 
 796         /*
 797          * Restore part of the process registers.
 798          */
 799         movl    TSS_EBP(%eax), %ebp
 800         movl    TSS_ECX(%eax), %ecx
 801         movl    TSS_ESI(%eax), %esi
 802         movl    TSS_EDI(%eax), %edi
 803 
 804 make_frame:
 805         /*
 806          * Make a trap frame.  Leave the error code (0) on
 807          * the stack since the first word on a trap stack is
 808          * unused anyway.
 809          */
 810         pushl   %ebx                    / fault SS
 811         pushl   %edx                    / fault ESP
 812         pushl   TSS_EFL(%eax)           / fault EFL
 813         pushl   TSS_CS(%eax)            / fault CS
 814         pushl   TSS_EIP(%eax)           / fault EIP
 815         pushl   $0                      / error code
 816         pushl   $T_DBLFLT               / trap number 8
 817         movl    TSS_EBX(%eax), %ebx     / restore EBX
 818         movl    TSS_EDX(%eax), %edx     / restore EDX
 819         movl    TSS_EAX(%eax), %eax     / restore EAX
 820         sti                             / enable interrupts
 821         jmp     cmntrap
 822         SET_SIZE(syserrtrap)
 823 
 824 #endif  /* __i386 */
 825 #endif  /* !__xpv */
 826 
 827         /*
 828          * #TS
 829          */
 830         ENTRY_NP(invtsstrap)
 831         TRAP_ERR(T_TSSFLT)      /* $10 already have error code on stack */
 832         jmp     cmntrap
 833         SET_SIZE(invtsstrap)
 834 
 835         /*
 836          * #NP
 837          */
 838         ENTRY_NP(segnptrap)
 839         TRAP_ERR(T_SEGFLT)      /* $11 already have error code on stack */
 840 #if defined(__amd64)
 841         SET_CPU_GSBASE
 842 #endif
 843         jmp     cmntrap
 844         SET_SIZE(segnptrap)
 845 
 846         /*
 847          * #SS
 848          */
 849         ENTRY_NP(stktrap)
 850         TRAP_ERR(T_STKFLT)      /* $12 already have error code on stack */
 851 #if defined(__amd64)
 852         SET_CPU_GSBASE
 853 #endif
 854         jmp     cmntrap
 855         SET_SIZE(stktrap)
 856 
 857         /*
 858          * #GP
 859          */
 860         ENTRY_NP(gptrap)
 861         TRAP_ERR(T_GPFLT)       /* $13 already have error code on stack */
 862 #if defined(__amd64)
 863         SET_CPU_GSBASE
 864 #endif
 865         jmp     cmntrap
 866         SET_SIZE(gptrap)
 867 
 868         /*
 869          * #PF
 870          */
 871         ENTRY_NP(pftrap)
 872         TRAP_ERR(T_PGFLT)       /* $14 already have error code on stack */
 873         INTR_PUSH
 874 #if defined(__xpv)
 875 
 876 #if defined(__amd64)
 877         movq    %gs:CPU_VCPU_INFO, %r15
 878         movq    VCPU_INFO_ARCH_CR2(%r15), %r15  /* vcpu[].arch.cr2 */
 879 #elif defined(__i386)
 880         movl    %gs:CPU_VCPU_INFO, %esi
 881         movl    VCPU_INFO_ARCH_CR2(%esi), %esi  /* vcpu[].arch.cr2 */
 882 #endif  /* __i386 */
 883 
 884 #else   /* __xpv */
 885 
 886 #if defined(__amd64)
 887         movq    %cr2, %r15
 888 #elif defined(__i386)
 889         movl    %cr2, %esi
 890 #endif  /* __i386 */
 891 
 892 #endif  /* __xpv */
 893         jmp     cmntrap_pushed
 894         SET_SIZE(pftrap)
 895 
 896 #if !defined(__amd64)
 897 
 898         .globl  idt0_default_r
 899 
 900         /*
 901          * #PF pentium bug workaround
 902          */
 903         ENTRY_NP(pentium_pftrap)
 904         pushl   %eax
 905         movl    %cr2, %eax
 906         andl    $MMU_STD_PAGEMASK, %eax
 907 
 908         cmpl    %eax, %cs:idt0_default_r+2      /* fixme */
 909 
 910         je      check_for_user_address
 911 user_mode:
 912         popl    %eax
 913         pushl   $T_PGFLT        /* $14 */
 914         jmp     cmntrap
 915 check_for_user_address:
 916         /*
 917          * Before we assume that we have an unmapped trap on our hands,
 918          * check to see if this is a fault from user mode.  If it is,
 919          * we'll kick back into the page fault handler.
 920          */
 921         movl    4(%esp), %eax   /* error code */
 922         andl    $PF_ERR_USER, %eax
 923         jnz     user_mode
 924 
 925         /*
 926          * We now know that this is the invalid opcode trap.
 927          */
 928         popl    %eax
 929         addl    $4, %esp        /* pop error code */
 930         jmp     invoptrap
 931         SET_SIZE(pentium_pftrap)
 932 
 933 #endif  /* !__amd64 */
 934 
 935         ENTRY_NP(resvtrap)
 936         TRAP_NOERR(T_RESVTRAP)  /* (reserved)  */
 937         jmp     cmntrap
 938         SET_SIZE(resvtrap)
 939 
 940         /*
 941          * #MF
 942          */
 943         ENTRY_NP(ndperr)
 944         TRAP_NOERR(T_EXTERRFLT) /* $16 */
 945         jmp     cmninttrap
 946         SET_SIZE(ndperr)
 947 
 948         /*
 949          * #AC
 950          */
 951         ENTRY_NP(achktrap)
 952         TRAP_ERR(T_ALIGNMENT)   /* $17 */
 953         jmp     cmntrap
 954         SET_SIZE(achktrap)
 955 
 956         /*
 957          * #MC
 958          */
 959         .globl  cmi_mca_trap    /* see uts/i86pc/os/cmi.c */
 960 
 961 #if defined(__amd64)
 962 
 963         ENTRY_NP(mcetrap)
 964         TRAP_NOERR(T_MCE)       /* $18 */
 965 
 966         SET_CPU_GSBASE
 967 
 968         INTR_PUSH
 969         INTGATE_INIT_KERNEL_FLAGS
 970 
 971         TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
 972         TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
 973         TRACE_STAMP(%rdi)
 974 
 975         movq    %rsp, %rbp
 976 
 977         movq    %rsp, %rdi      /* arg0 = struct regs *rp */
 978         call    cmi_mca_trap    /* cmi_mca_trap(rp); */
 979 
 980         jmp     _sys_rtt
 981         SET_SIZE(mcetrap)
 982 
 983 #else
 984 
 985         ENTRY_NP(mcetrap)
 986         TRAP_NOERR(T_MCE)       /* $18 */
 987 
 988         INTR_PUSH
 989         INTGATE_INIT_KERNEL_FLAGS
 990 
 991         TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
 992         TRACE_REGS(%edi, %esp, %ebx, %ecx)
 993         TRACE_STAMP(%edi)
 994 
 995         movl    %esp, %ebp
 996 
 997         movl    %esp, %ecx
 998         pushl   %ecx            /* arg0 = struct regs *rp */
 999         call    cmi_mca_trap    /* cmi_mca_trap(rp) */
1000         addl    $4, %esp        /* pop arg0 */
1001 
1002         jmp     _sys_rtt
1003         SET_SIZE(mcetrap)
1004 
1005 #endif
1006 
1007         /*
1008          * #XF
1009          */
1010         ENTRY_NP(xmtrap)
1011         TRAP_NOERR(T_SIMDFPE)   /* $19 */
1012         jmp     cmninttrap
1013         SET_SIZE(xmtrap)
1014 
1015         ENTRY_NP(invaltrap)
1016         TRAP_NOERR(T_INVALTRAP) /* very invalid */
1017         jmp     cmntrap
1018         SET_SIZE(invaltrap)
1019 
1020         .globl  fasttable
1021 
1022 #if defined(__amd64)
1023 
1024         ENTRY_NP(fasttrap)
1025         cmpl    $T_LASTFAST, %eax
1026         ja      1f
1027         orl     %eax, %eax      /* (zero extend top 32-bits) */
1028         leaq    fasttable(%rip), %r11
1029         leaq    (%r11, %rax, CLONGSIZE), %r11
1030         movq    (%r11), %r11
1031         INDIRECT_JMP_REG(r11)
1032 1:
1033         /*
1034          * Fast syscall number was illegal.  Make it look
1035          * as if the INT failed.  Modify %rip to point before the
1036          * INT, push the expected error code and fake a GP fault.
1037          *
1038          * XXX Why make the error code be offset into idt + 1?
1039          * Instead we should push a real (soft?) error code
1040          * on the stack and #gp handler could know about fasttraps?
1041          */
1042         XPV_TRAP_POP
1043 
1044         subq    $2, (%rsp)      /* XXX int insn 2-bytes */
1045         pushq   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1046 
1047 #if defined(__xpv)
1048         pushq   %r11
1049         pushq   %rcx
1050 #endif
1051         jmp     gptrap
1052         SET_SIZE(fasttrap)
1053 
1054 #elif defined(__i386)
1055 
1056         ENTRY_NP(fasttrap)
1057         cmpl    $T_LASTFAST, %eax
1058         ja      1f
1059         jmp     *%cs:fasttable(, %eax, CLONGSIZE)
1060 1:
1061         /*
1062          * Fast syscall number was illegal.  Make it look
1063          * as if the INT failed.  Modify %eip to point before the
1064          * INT, push the expected error code and fake a GP fault.
1065          *
1066          * XXX Why make the error code be offset into idt + 1?
1067          * Instead we should push a real (soft?) error code
1068          * on the stack and #gp handler could know about fasttraps?
1069          */
1070         subl    $2, (%esp)      /* XXX int insn 2-bytes */
1071         pushl   $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1072         jmp     gptrap
1073         SET_SIZE(fasttrap)
1074 
1075 #endif  /* __i386 */
1076 
1077         ENTRY_NP(dtrace_ret)
1078         TRAP_NOERR(T_DTRACE_RET)
1079         jmp     dtrace_trap
1080         SET_SIZE(dtrace_ret)
1081 
1082 #if defined(__amd64)
1083 
1084         /*
1085          * RFLAGS 24 bytes up the stack from %rsp.
1086          * XXX a constant would be nicer.
1087          */
1088         ENTRY_NP(fast_null)
1089         XPV_TRAP_POP
1090         orq     $PS_C, 24(%rsp) /* set carry bit in user flags */
1091         call    x86_md_clear
1092         jmp     tr_iret_auto
1093         /*NOTREACHED*/
1094         SET_SIZE(fast_null)
1095 
1096 #elif defined(__i386)
1097 
1098         ENTRY_NP(fast_null)
1099         orw     $PS_C, 8(%esp)  /* set carry bit in user flags */
1100         IRET
1101         SET_SIZE(fast_null)
1102 
1103 #endif  /* __i386 */
1104 
1105         /*
1106          * Interrupts start at 32
1107          */
1108 #define MKIVCT(n)                       \
1109         ENTRY_NP(ivct/**/n)             \
1110         push    $0;                     \
1111         push    $n - 0x20;              \
1112         jmp     cmnint;                 \
1113         SET_SIZE(ivct/**/n)
1114 
1115         MKIVCT(32)
1116         MKIVCT(33)
1117         MKIVCT(34)
1118         MKIVCT(35)
1119         MKIVCT(36)
1120         MKIVCT(37)
1121         MKIVCT(38)
1122         MKIVCT(39)
1123         MKIVCT(40)
1124         MKIVCT(41)
1125         MKIVCT(42)
1126         MKIVCT(43)
1127         MKIVCT(44)
1128         MKIVCT(45)
1129         MKIVCT(46)
1130         MKIVCT(47)
1131         MKIVCT(48)
1132         MKIVCT(49)
1133         MKIVCT(50)
1134         MKIVCT(51)
1135         MKIVCT(52)
1136         MKIVCT(53)
1137         MKIVCT(54)
1138         MKIVCT(55)
1139         MKIVCT(56)
1140         MKIVCT(57)
1141         MKIVCT(58)
1142         MKIVCT(59)
1143         MKIVCT(60)
1144         MKIVCT(61)
1145         MKIVCT(62)
1146         MKIVCT(63)
1147         MKIVCT(64)
1148         MKIVCT(65)
1149         MKIVCT(66)
1150         MKIVCT(67)
1151         MKIVCT(68)
1152         MKIVCT(69)
1153         MKIVCT(70)
1154         MKIVCT(71)
1155         MKIVCT(72)
1156         MKIVCT(73)
1157         MKIVCT(74)
1158         MKIVCT(75)
1159         MKIVCT(76)
1160         MKIVCT(77)
1161         MKIVCT(78)
1162         MKIVCT(79)
1163         MKIVCT(80)
1164         MKIVCT(81)
1165         MKIVCT(82)
1166         MKIVCT(83)
1167         MKIVCT(84)
1168         MKIVCT(85)
1169         MKIVCT(86)
1170         MKIVCT(87)
1171         MKIVCT(88)
1172         MKIVCT(89)
1173         MKIVCT(90)
1174         MKIVCT(91)
1175         MKIVCT(92)
1176         MKIVCT(93)
1177         MKIVCT(94)
1178         MKIVCT(95)
1179         MKIVCT(96)
1180         MKIVCT(97)
1181         MKIVCT(98)
1182         MKIVCT(99)
1183         MKIVCT(100)
1184         MKIVCT(101)
1185         MKIVCT(102)
1186         MKIVCT(103)
1187         MKIVCT(104)
1188         MKIVCT(105)
1189         MKIVCT(106)
1190         MKIVCT(107)
1191         MKIVCT(108)
1192         MKIVCT(109)
1193         MKIVCT(110)
1194         MKIVCT(111)
1195         MKIVCT(112)
1196         MKIVCT(113)
1197         MKIVCT(114)
1198         MKIVCT(115)
1199         MKIVCT(116)
1200         MKIVCT(117)
1201         MKIVCT(118)
1202         MKIVCT(119)
1203         MKIVCT(120)
1204         MKIVCT(121)
1205         MKIVCT(122)
1206         MKIVCT(123)
1207         MKIVCT(124)
1208         MKIVCT(125)
1209         MKIVCT(126)
1210         MKIVCT(127)
1211         MKIVCT(128)
1212         MKIVCT(129)
1213         MKIVCT(130)
1214         MKIVCT(131)
1215         MKIVCT(132)
1216         MKIVCT(133)
1217         MKIVCT(134)
1218         MKIVCT(135)
1219         MKIVCT(136)
1220         MKIVCT(137)
1221         MKIVCT(138)
1222         MKIVCT(139)
1223         MKIVCT(140)
1224         MKIVCT(141)
1225         MKIVCT(142)
1226         MKIVCT(143)
1227         MKIVCT(144)
1228         MKIVCT(145)
1229         MKIVCT(146)
1230         MKIVCT(147)
1231         MKIVCT(148)
1232         MKIVCT(149)
1233         MKIVCT(150)
1234         MKIVCT(151)
1235         MKIVCT(152)
1236         MKIVCT(153)
1237         MKIVCT(154)
1238         MKIVCT(155)
1239         MKIVCT(156)
1240         MKIVCT(157)
1241         MKIVCT(158)
1242         MKIVCT(159)
1243         MKIVCT(160)
1244         MKIVCT(161)
1245         MKIVCT(162)
1246         MKIVCT(163)
1247         MKIVCT(164)
1248         MKIVCT(165)
1249         MKIVCT(166)
1250         MKIVCT(167)
1251         MKIVCT(168)
1252         MKIVCT(169)
1253         MKIVCT(170)
1254         MKIVCT(171)
1255         MKIVCT(172)
1256         MKIVCT(173)
1257         MKIVCT(174)
1258         MKIVCT(175)
1259         MKIVCT(176)
1260         MKIVCT(177)
1261         MKIVCT(178)
1262         MKIVCT(179)
1263         MKIVCT(180)
1264         MKIVCT(181)
1265         MKIVCT(182)
1266         MKIVCT(183)
1267         MKIVCT(184)
1268         MKIVCT(185)
1269         MKIVCT(186)
1270         MKIVCT(187)
1271         MKIVCT(188)
1272         MKIVCT(189)
1273         MKIVCT(190)
1274         MKIVCT(191)
1275         MKIVCT(192)
1276         MKIVCT(193)
1277         MKIVCT(194)
1278         MKIVCT(195)
1279         MKIVCT(196)
1280         MKIVCT(197)
1281         MKIVCT(198)
1282         MKIVCT(199)
1283         MKIVCT(200)
1284         MKIVCT(201)
1285         MKIVCT(202)
1286         MKIVCT(203)
1287         MKIVCT(204)
1288         MKIVCT(205)
1289         MKIVCT(206)
1290         MKIVCT(207)
1291         MKIVCT(208)
1292         MKIVCT(209)
1293         MKIVCT(210)
1294         MKIVCT(211)
1295         MKIVCT(212)
1296         MKIVCT(213)
1297         MKIVCT(214)
1298         MKIVCT(215)
1299         MKIVCT(216)
1300         MKIVCT(217)
1301         MKIVCT(218)
1302         MKIVCT(219)
1303         MKIVCT(220)
1304         MKIVCT(221)
1305         MKIVCT(222)
1306         MKIVCT(223)
1307         MKIVCT(224)
1308         MKIVCT(225)
1309         MKIVCT(226)
1310         MKIVCT(227)
1311         MKIVCT(228)
1312         MKIVCT(229)
1313         MKIVCT(230)
1314         MKIVCT(231)
1315         MKIVCT(232)
1316         MKIVCT(233)
1317         MKIVCT(234)
1318         MKIVCT(235)
1319         MKIVCT(236)
1320         MKIVCT(237)
1321         MKIVCT(238)
1322         MKIVCT(239)
1323         MKIVCT(240)
1324         MKIVCT(241)
1325         MKIVCT(242)
1326         MKIVCT(243)
1327         MKIVCT(244)
1328         MKIVCT(245)
1329         MKIVCT(246)
1330         MKIVCT(247)
1331         MKIVCT(248)
1332         MKIVCT(249)
1333         MKIVCT(250)
1334         MKIVCT(251)
1335         MKIVCT(252)
1336         MKIVCT(253)
1337         MKIVCT(254)
1338         MKIVCT(255)
1339 
1340 #endif  /* __lint */