1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
  25  * Copyright (c) 2014 by Delphix. All rights reserved.
  26  * Copyright 2019 Joyent, Inc.
  27  */
  28 
  29 /*
  30  *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
  31  *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
  32  *    All Rights Reserved
  33  */
  34 
  35 /*
  36  * Copyright (c) 2009, Intel Corporation.
  37  * All rights reserved.
  38  */
  39 
  40 /*
  41  * General assembly language routines.
  42  * It is the intent of this file to contain routines that are
  43  * independent of the specific kernel architecture, and those that are
  44  * common across kernel architectures.
  45  * As architectures diverge, and implementations of specific
  46  * architecture-dependent routines change, the routines should be moved
  47  * from this file into the respective ../`arch -k`/subr.s file.
  48  */
  49 
  50 #include <sys/asm_linkage.h>
  51 #include <sys/asm_misc.h>
  52 #include <sys/panic.h>
  53 #include <sys/ontrap.h>
  54 #include <sys/regset.h>
  55 #include <sys/privregs.h>
  56 #include <sys/reboot.h>
  57 #include <sys/psw.h>
  58 #include <sys/x86_archext.h>
  59 
  60 #include "assym.h"
  61 #include <sys/dditypes.h>
  62 
  63 /*
  64  * on_fault()
  65  *
  66  * Catch lofault faults. Like setjmp except it returns one
  67  * if code following causes uncorrectable fault. Turned off
  68  * by calling no_fault(). Note that while under on_fault(),
  69  * SMAP is disabled. For more information see
  70  * uts/intel/ia32/ml/copy.s.
  71  */
  72 
  73         ENTRY(on_fault)
  74         movq    %gs:CPU_THREAD, %rsi
  75         leaq    catch_fault(%rip), %rdx
  76         movq    %rdi, T_ONFAULT(%rsi)           /* jumpbuf in t_onfault */
  77         movq    %rdx, T_LOFAULT(%rsi)           /* catch_fault in t_lofault */
  78         call    smap_disable                    /* allow user accesses */
  79         jmp     setjmp                          /* let setjmp do the rest */
  80 
  81 catch_fault:
  82         movq    %gs:CPU_THREAD, %rsi
  83         movq    T_ONFAULT(%rsi), %rdi           /* address of save area */
  84         xorl    %eax, %eax
  85         movq    %rax, T_ONFAULT(%rsi)           /* turn off onfault */
  86         movq    %rax, T_LOFAULT(%rsi)           /* turn off lofault */
  87         call    smap_enable                     /* disallow user accesses */
  88         jmp     longjmp                         /* let longjmp do the rest */
  89         SET_SIZE(on_fault)
  90 
  91         ENTRY(no_fault)
  92         movq    %gs:CPU_THREAD, %rsi
  93         xorl    %eax, %eax
  94         movq    %rax, T_ONFAULT(%rsi)           /* turn off onfault */
  95         movq    %rax, T_LOFAULT(%rsi)           /* turn off lofault */
  96         call    smap_enable                     /* disallow user accesses */
  97         ret
  98         SET_SIZE(no_fault)
  99 
 100 /*
 101  * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
 102  * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
 103  */
 104 
 105         ENTRY(on_trap_trampoline)
 106         movq    %gs:CPU_THREAD, %rsi
 107         movq    T_ONTRAP(%rsi), %rdi
 108         addq    $OT_JMPBUF, %rdi
 109         jmp     longjmp
 110         SET_SIZE(on_trap_trampoline)
 111 
 112 /*
 113  * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
 114  * more information about the on_trap() mechanism.  If the on_trap_data is the
 115  * same as the topmost stack element, we just modify that element.
 116  */
 117 
 118         ENTRY(on_trap)
 119         movw    %si, OT_PROT(%rdi)              /* ot_prot = prot */
 120         movw    $0, OT_TRAP(%rdi)               /* ot_trap = 0 */
 121         leaq    on_trap_trampoline(%rip), %rdx  /* rdx = &on_trap_trampoline */
 122         movq    %rdx, OT_TRAMPOLINE(%rdi)       /* ot_trampoline = rdx */
 123         xorl    %ecx, %ecx
 124         movq    %rcx, OT_HANDLE(%rdi)           /* ot_handle = NULL */
 125         movq    %rcx, OT_PAD1(%rdi)             /* ot_pad1 = NULL */
 126         movq    %gs:CPU_THREAD, %rdx            /* rdx = curthread */
 127         movq    T_ONTRAP(%rdx), %rcx            /* rcx = curthread->t_ontrap */
 128         cmpq    %rdi, %rcx                      /* if (otp == %rcx)     */
 129         je      0f                              /*      don't modify t_ontrap */
 130 
 131         movq    %rcx, OT_PREV(%rdi)             /* ot_prev = t_ontrap */
 132         movq    %rdi, T_ONTRAP(%rdx)            /* curthread->t_ontrap = otp */
 133 
 134 0:      addq    $OT_JMPBUF, %rdi                /* &ot_jmpbuf */
 135         jmp     setjmp
 136         SET_SIZE(on_trap)
 137 
 138 /*
 139  * Setjmp and longjmp implement non-local gotos using state vectors
 140  * type label_t.
 141  */
 142 
 143 #if LABEL_PC != 0
 144 #error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
 145 #endif  /* LABEL_PC != 0 */
 146 
 147         ENTRY(setjmp)
 148         movq    %rsp, LABEL_SP(%rdi)
 149         movq    %rbp, LABEL_RBP(%rdi)
 150         movq    %rbx, LABEL_RBX(%rdi)
 151         movq    %r12, LABEL_R12(%rdi)
 152         movq    %r13, LABEL_R13(%rdi)
 153         movq    %r14, LABEL_R14(%rdi)
 154         movq    %r15, LABEL_R15(%rdi)
 155         movq    (%rsp), %rdx            /* return address */
 156         movq    %rdx, (%rdi)            /* LABEL_PC is 0 */
 157         xorl    %eax, %eax              /* return 0 */
 158         ret
 159         SET_SIZE(setjmp)
 160 
 161         ENTRY(longjmp)
 162         movq    LABEL_SP(%rdi), %rsp
 163         movq    LABEL_RBP(%rdi), %rbp
 164         movq    LABEL_RBX(%rdi), %rbx
 165         movq    LABEL_R12(%rdi), %r12
 166         movq    LABEL_R13(%rdi), %r13
 167         movq    LABEL_R14(%rdi), %r14
 168         movq    LABEL_R15(%rdi), %r15
 169         movq    (%rdi), %rdx            /* return address; LABEL_PC is 0 */
 170         movq    %rdx, (%rsp)
 171         xorl    %eax, %eax
 172         incl    %eax                    /* return 1 */
 173         ret
 174         SET_SIZE(longjmp)
 175 
 176 /*
 177  * if a() calls b() calls caller(),
 178  * caller() returns return address in a().
 179  * (Note: We assume a() and b() are C routines which do the normal entry/exit
 180  *  sequence.)
 181  */
 182 
 183         ENTRY(caller)
 184         movq    8(%rbp), %rax           /* b()'s return pc, in a() */
 185         ret
 186         SET_SIZE(caller)
 187 
 188 /*
 189  * if a() calls callee(), callee() returns the
 190  * return address in a();
 191  */
 192 
 193         ENTRY(callee)
 194         movq    (%rsp), %rax            /* callee()'s return pc, in a() */
 195         ret
 196         SET_SIZE(callee)
 197 
 198 /*
 199  * return the current frame pointer
 200  */
 201 
 202         ENTRY(getfp)
 203         movq    %rbp, %rax
 204         ret
 205         SET_SIZE(getfp)
 206 
 207 /*
 208  * Invalidate a single page table entry in the TLB
 209  */
 210 
 211         ENTRY(mmu_invlpg)
 212         invlpg  (%rdi)
 213         ret
 214         SET_SIZE(mmu_invlpg)
 215 
 216 
 217 /*
 218  * Get/Set the value of various control registers
 219  */
 220 
 221         ENTRY(getcr0)
 222         movq    %cr0, %rax
 223         ret
 224         SET_SIZE(getcr0)
 225 
 226         ENTRY(setcr0)
 227         movq    %rdi, %cr0
 228         ret
 229         SET_SIZE(setcr0)
 230 
 231         ENTRY(getcr2)
 232 #if defined(__xpv)
 233         movq    %gs:CPU_VCPU_INFO, %rax
 234         movq    VCPU_INFO_ARCH_CR2(%rax), %rax
 235 #else
 236         movq    %cr2, %rax
 237 #endif
 238         ret
 239         SET_SIZE(getcr2)
 240 
 241         ENTRY(getcr3)
 242         movq    %cr3, %rax
 243         ret
 244         SET_SIZE(getcr3)
 245 
 246 #if !defined(__xpv)
 247 
 248         ENTRY(setcr3)
 249         movq    %rdi, %cr3
 250         ret
 251         SET_SIZE(setcr3)
 252 
 253         ENTRY(reload_cr3)
 254         movq    %cr3, %rdi
 255         movq    %rdi, %cr3
 256         ret
 257         SET_SIZE(reload_cr3)
 258 
 259 #endif  /* __xpv */
 260 
 261         ENTRY(getcr4)
 262         movq    %cr4, %rax
 263         ret
 264         SET_SIZE(getcr4)
 265 
 266         ENTRY(setcr4)
 267         movq    %rdi, %cr4
 268         ret
 269         SET_SIZE(setcr4)
 270 
 271         ENTRY(getcr8)
 272         movq    %cr8, %rax
 273         ret
 274         SET_SIZE(getcr8)
 275 
 276         ENTRY(setcr8)
 277         movq    %rdi, %cr8
 278         ret
 279         SET_SIZE(setcr8)
 280 
 281         ENTRY(__cpuid_insn)
 282         movq    %rbx, %r8
 283         movq    %rcx, %r9
 284         movq    %rdx, %r11
 285         movl    (%rdi), %eax            /* %eax = regs->cp_eax */
 286         movl    0x4(%rdi), %ebx         /* %ebx = regs->cp_ebx */
 287         movl    0x8(%rdi), %ecx         /* %ecx = regs->cp_ecx */
 288         movl    0xc(%rdi), %edx         /* %edx = regs->cp_edx */
 289         cpuid
 290         movl    %eax, (%rdi)            /* regs->cp_eax = %eax */
 291         movl    %ebx, 0x4(%rdi)         /* regs->cp_ebx = %ebx */
 292         movl    %ecx, 0x8(%rdi)         /* regs->cp_ecx = %ecx */
 293         movl    %edx, 0xc(%rdi)         /* regs->cp_edx = %edx */
 294         movq    %r8, %rbx
 295         movq    %r9, %rcx
 296         movq    %r11, %rdx
 297         ret
 298         SET_SIZE(__cpuid_insn)
 299 
 300         ENTRY_NP(i86_monitor)
 301         pushq   %rbp
 302         movq    %rsp, %rbp
 303         movq    %rdi, %rax              /* addr */
 304         movq    %rsi, %rcx              /* extensions */
 305         /* rdx contains input arg3: hints */
 306         clflush (%rax)
 307         .byte   0x0f, 0x01, 0xc8        /* monitor */
 308         leave
 309         ret
 310         SET_SIZE(i86_monitor)
 311 
 312         ENTRY_NP(i86_mwait)
 313         pushq   %rbp
 314         call    x86_md_clear
 315         movq    %rsp, %rbp
 316         movq    %rdi, %rax              /* data */
 317         movq    %rsi, %rcx              /* extensions */
 318         .byte   0x0f, 0x01, 0xc9        /* mwait */
 319         leave
 320         ret
 321         SET_SIZE(i86_mwait)
 322 
 323 #if defined(__xpv)
 324         /*
 325          * Defined in C
 326          */
 327 #else
 328 
 329         ENTRY_NP(tsc_read)
 330         movq    %rbx, %r11
 331         movl    $0, %eax
 332         cpuid
 333         rdtsc
 334         movq    %r11, %rbx
 335         shlq    $32, %rdx
 336         orq     %rdx, %rax
 337         ret
 338         .globl _tsc_mfence_start
 339 _tsc_mfence_start:
 340         mfence
 341         rdtsc
 342         shlq    $32, %rdx
 343         orq     %rdx, %rax
 344         ret
 345         .globl _tsc_mfence_end
 346 _tsc_mfence_end:
 347         .globl _tscp_start
 348 _tscp_start:
 349         .byte   0x0f, 0x01, 0xf9        /* rdtscp instruction */
 350         shlq    $32, %rdx
 351         orq     %rdx, %rax
 352         ret
 353         .globl _tscp_end
 354 _tscp_end:
 355         .globl _no_rdtsc_start
 356 _no_rdtsc_start:
 357         xorl    %edx, %edx
 358         xorl    %eax, %eax
 359         ret
 360         .globl _no_rdtsc_end
 361 _no_rdtsc_end:
 362         .globl _tsc_lfence_start
 363 _tsc_lfence_start:
 364         lfence
 365         rdtsc
 366         shlq    $32, %rdx
 367         orq     %rdx, %rax
 368         ret
 369         .globl _tsc_lfence_end
 370 _tsc_lfence_end:
 371         SET_SIZE(tsc_read)
 372 
 373 
 374 #endif  /* __xpv */
 375 
 376         ENTRY_NP(randtick)
 377         rdtsc
 378         shlq    $32, %rdx
 379         orq     %rdx, %rax
 380         ret
 381         SET_SIZE(randtick)
 382 /*
 383  * Insert entryp after predp in a doubly linked list.
 384  */
 385 
 386         ENTRY(_insque)
 387         movq    (%rsi), %rax            /* predp->forw                       */
 388         movq    %rsi, CPTRSIZE(%rdi)    /* entryp->back = predp              */
 389         movq    %rax, (%rdi)            /* entryp->forw = predp->forw     */
 390         movq    %rdi, (%rsi)            /* predp->forw = entryp              */
 391         movq    %rdi, CPTRSIZE(%rax)    /* predp->forw->back = entryp     */
 392         ret
 393         SET_SIZE(_insque)
 394 
 395 /*
 396  * Remove entryp from a doubly linked list
 397  */
 398 
 399         ENTRY(_remque)
 400         movq    (%rdi), %rax            /* entry->forw */
 401         movq    CPTRSIZE(%rdi), %rdx    /* entry->back */
 402         movq    %rax, (%rdx)            /* entry->back->forw = entry->forw */
 403         movq    %rdx, CPTRSIZE(%rax)    /* entry->forw->back = entry->back */
 404         ret
 405         SET_SIZE(_remque)
 406 
 407 /*
 408  * Returns the number of
 409  * non-NULL bytes in string argument.
 410  */
 411 
 412 /*
 413  * This is close to a simple transliteration of a C version of this
 414  * routine.  We should either just -make- this be a C version, or
 415  * justify having it in assembler by making it significantly faster.
 416  *
 417  * size_t
 418  * strlen(const char *s)
 419  * {
 420  *      const char *s0;
 421  * #if defined(DEBUG)
 422  *      if ((uintptr_t)s < KERNELBASE)
 423  *              panic(.str_panic_msg);
 424  * #endif
 425  *      for (s0 = s; *s; s++)
 426  *              ;
 427  *      return (s - s0);
 428  * }
 429  */
 430 
 431         ENTRY(strlen)
 432 #ifdef DEBUG
 433         movq    postbootkernelbase(%rip), %rax
 434         cmpq    %rax, %rdi
 435         jae     str_valid
 436         pushq   %rbp
 437         movq    %rsp, %rbp
 438         leaq    .str_panic_msg(%rip), %rdi
 439         xorl    %eax, %eax
 440         call    panic
 441 #endif  /* DEBUG */
 442 str_valid:
 443         cmpb    $0, (%rdi)
 444         movq    %rdi, %rax
 445         je      .null_found
 446         .align  4
 447 .strlen_loop:
 448         incq    %rdi
 449         cmpb    $0, (%rdi)
 450         jne     .strlen_loop
 451 .null_found:
 452         subq    %rax, %rdi
 453         movq    %rdi, %rax
 454         ret
 455         SET_SIZE(strlen)
 456 
 457 #ifdef DEBUG
 458         .text
 459 .str_panic_msg:
 460         .string "strlen: argument below kernelbase"
 461 #endif /* DEBUG */
 462 
 463         /*
 464          * Berkeley 4.3 introduced symbolically named interrupt levels
 465          * as a way deal with priority in a machine independent fashion.
 466          * Numbered priorities are machine specific, and should be
 467          * discouraged where possible.
 468          *
 469          * Note, for the machine specific priorities there are
 470          * examples listed for devices that use a particular priority.
 471          * It should not be construed that all devices of that
 472          * type should be at that priority.  It is currently were
 473          * the current devices fit into the priority scheme based
 474          * upon time criticalness.
 475          *
 476          * The underlying assumption of these assignments is that
 477          * IPL 10 is the highest level from which a device
 478          * routine can call wakeup.  Devices that interrupt from higher
 479          * levels are restricted in what they can do.  If they need
 480          * kernels services they should schedule a routine at a lower
 481          * level (via software interrupt) to do the required
 482          * processing.
 483          *
 484          * Examples of this higher usage:
 485          *      Level   Usage
 486          *      14      Profiling clock (and PROM uart polling clock)
 487          *      12      Serial ports
 488          *
 489          * The serial ports request lower level processing on level 6.
 490          *
 491          * Also, almost all splN routines (where N is a number or a
 492          * mnemonic) will do a RAISE(), on the assumption that they are
 493          * never used to lower our priority.
 494          * The exceptions are:
 495          *      spl8()          Because you can't be above 15 to begin with!
 496          *      splzs()         Because this is used at boot time to lower our
 497          *                      priority, to allow the PROM to poll the uart.
 498          *      spl0()          Used to lower priority to 0.
 499          */
 500 
 501 #define SETPRI(level) \
 502         movl    $/**/level, %edi;       /* new priority */              \
 503         jmp     do_splx                 /* redirect to do_splx */
 504 
 505 #define RAISE(level) \
 506         movl    $/**/level, %edi;       /* new priority */              \
 507         jmp     splr                    /* redirect to splr */
 508 
 509         /* locks out all interrupts, including memory errors */
 510         ENTRY(spl8)
 511         SETPRI(15)
 512         SET_SIZE(spl8)
 513 
 514         /* just below the level that profiling runs */
 515         ENTRY(spl7)
 516         RAISE(13)
 517         SET_SIZE(spl7)
 518 
 519         /* sun specific - highest priority onboard serial i/o asy ports */
 520         ENTRY(splzs)
 521         SETPRI(12)      /* Can't be a RAISE, as it's used to lower us */
 522         SET_SIZE(splzs)
 523 
 524         ENTRY(splhi)
 525         ALTENTRY(splhigh)
 526         ALTENTRY(spl6)
 527         ALTENTRY(i_ddi_splhigh)
 528 
 529         RAISE(DISP_LEVEL)
 530 
 531         SET_SIZE(i_ddi_splhigh)
 532         SET_SIZE(spl6)
 533         SET_SIZE(splhigh)
 534         SET_SIZE(splhi)
 535 
 536         /* allow all interrupts */
 537         ENTRY(spl0)
 538         SETPRI(0)
 539         SET_SIZE(spl0)
 540 
 541 
 542         /* splx implementation */
 543         ENTRY(splx)
 544         jmp     do_splx         /* redirect to common splx code */
 545         SET_SIZE(splx)
 546 
 547         ENTRY(wait_500ms)
 548         pushq   %rbx
 549         movl    $50000, %ebx
 550 1:
 551         call    tenmicrosec
 552         decl    %ebx
 553         jnz     1b
 554         popq    %rbx
 555         ret
 556         SET_SIZE(wait_500ms)
 557 
 558 #define RESET_METHOD_KBC        1
 559 #define RESET_METHOD_PORT92     2
 560 #define RESET_METHOD_PCI        4
 561 
 562         DGDEF3(pc_reset_methods, 4, 8)
 563         .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
 564 
 565         ENTRY(pc_reset)
 566 
 567         testl   $RESET_METHOD_KBC, pc_reset_methods(%rip)
 568         jz      1f
 569 
 570         /
 571         / Try the classic keyboard controller-triggered reset.
 572         /
 573         movw    $0x64, %dx
 574         movb    $0xfe, %al
 575         outb    (%dx)
 576 
 577         / Wait up to 500 milliseconds here for the keyboard controller
 578         / to pull the reset line.  On some systems where the keyboard
 579         / controller is slow to pull the reset line, the next reset method
 580         / may be executed (which may be bad if those systems hang when the
 581         / next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
 582         / and Ferrari 4000 (doesn't like the cf9 reset method))
 583 
 584         call    wait_500ms
 585 
 586 1:
 587         testl   $RESET_METHOD_PORT92, pc_reset_methods(%rip)
 588         jz      3f
 589 
 590         /
 591         / Try port 0x92 fast reset
 592         /
 593         movw    $0x92, %dx
 594         inb     (%dx)
 595         cmpb    $0xff, %al      / If port's not there, we should get back 0xFF
 596         je      1f
 597         testb   $1, %al         / If bit 0
 598         jz      2f              / is clear, jump to perform the reset
 599         andb    $0xfe, %al      / otherwise,
 600         outb    (%dx)           / clear bit 0 first, then
 601 2:
 602         orb     $1, %al         / Set bit 0
 603         outb    (%dx)           / and reset the system
 604 1:
 605 
 606         call    wait_500ms
 607 
 608 3:
 609         testl   $RESET_METHOD_PCI, pc_reset_methods(%rip)
 610         jz      4f
 611 
 612         / Try the PCI (soft) reset vector (should work on all modern systems,
 613         / but has been shown to cause problems on 450NX systems, and some newer
 614         / systems (e.g. ATI IXP400-equipped systems))
 615         / When resetting via this method, 2 writes are required.  The first
 616         / targets bit 1 (0=hard reset without power cycle, 1=hard reset with
 617         / power cycle).
 618         / The reset occurs on the second write, during bit 2's transition from
 619         / 0->1.
 620         movw    $0xcf9, %dx
 621         movb    $0x2, %al       / Reset mode = hard, no power cycle
 622         outb    (%dx)
 623         movb    $0x6, %al
 624         outb    (%dx)
 625 
 626         call    wait_500ms
 627 
 628 4:
 629         /
 630         / port 0xcf9 failed also.  Last-ditch effort is to
 631         / triple-fault the CPU.
 632         / Also, use triple fault for EFI firmware
 633         /
 634         ENTRY(efi_reset)
 635         pushq   $0x0
 636         pushq   $0x0            / IDT base of 0, limit of 0 + 2 unused bytes
 637         lidt    (%rsp)
 638         int     $0x0            / Trigger interrupt, generate triple-fault
 639 
 640         cli
 641         hlt                     / Wait forever
 642         /*NOTREACHED*/
 643         SET_SIZE(efi_reset)
 644         SET_SIZE(pc_reset)
 645 
 646 /*
 647  * C callable in and out routines
 648  */
 649 
 650         ENTRY(outl)
 651         movw    %di, %dx
 652         movl    %esi, %eax
 653         outl    (%dx)
 654         ret
 655         SET_SIZE(outl)
 656 
 657         ENTRY(outw)
 658         movw    %di, %dx
 659         movw    %si, %ax
 660         D16 outl (%dx)          /* XX64 why not outw? */
 661         ret
 662         SET_SIZE(outw)
 663 
 664         ENTRY(outb)
 665         movw    %di, %dx
 666         movb    %sil, %al
 667         outb    (%dx)
 668         ret
 669         SET_SIZE(outb)
 670 
 671         ENTRY(inl)
 672         xorl    %eax, %eax
 673         movw    %di, %dx
 674         inl     (%dx)
 675         ret
 676         SET_SIZE(inl)
 677 
 678         ENTRY(inw)
 679         xorl    %eax, %eax
 680         movw    %di, %dx
 681         D16 inl (%dx)
 682         ret
 683         SET_SIZE(inw)
 684 
 685 
 686         ENTRY(inb)
 687         xorl    %eax, %eax
 688         movw    %di, %dx
 689         inb     (%dx)
 690         ret
 691         SET_SIZE(inb)
 692 
 693 
 694         ENTRY(repoutsw)
 695         movl    %edx, %ecx
 696         movw    %di, %dx
 697         rep
 698           D16 outsl
 699         ret
 700         SET_SIZE(repoutsw)
 701 
 702 
 703         ENTRY(repinsw)
 704         movl    %edx, %ecx
 705         movw    %di, %dx
 706         rep
 707           D16 insl
 708         ret
 709         SET_SIZE(repinsw)
 710 
 711 
 712         ENTRY(repinsb)
 713         movl    %edx, %ecx
 714         movw    %di, %dx
 715         movq    %rsi, %rdi
 716         rep
 717           insb
 718         ret
 719         SET_SIZE(repinsb)
 720 
 721 
 722 /*
 723  * Input a stream of 32-bit words.
 724  * NOTE: count is a DWORD count.
 725  */
 726 
 727         ENTRY(repinsd)
 728         movl    %edx, %ecx
 729         movw    %di, %dx
 730         movq    %rsi, %rdi
 731         rep
 732           insl
 733         ret
 734         SET_SIZE(repinsd)
 735 
 736 /*
 737  * Output a stream of bytes
 738  * NOTE: count is a byte count
 739  */
 740 
 741         ENTRY(repoutsb)
 742         movl    %edx, %ecx
 743         movw    %di, %dx
 744         rep
 745           outsb
 746         ret
 747         SET_SIZE(repoutsb)
 748 
 749 /*
 750  * Output a stream of 32-bit words
 751  * NOTE: count is a DWORD count
 752  */
 753 
 754         ENTRY(repoutsd)
 755         movl    %edx, %ecx
 756         movw    %di, %dx
 757         rep
 758           outsl
 759         ret
 760         SET_SIZE(repoutsd)
 761 
 762 /*
 763  * void int3(void)
 764  * void int18(void)
 765  * void int20(void)
 766  * void int_cmci(void)
 767  */
 768 
 769         ENTRY(int3)
 770         int     $T_BPTFLT
 771         ret
 772         SET_SIZE(int3)
 773 
 774         ENTRY(int18)
 775         int     $T_MCE
 776         ret
 777         SET_SIZE(int18)
 778 
 779         ENTRY(int20)
 780         movl    boothowto, %eax
 781         andl    $RB_DEBUG, %eax
 782         jz      1f
 783 
 784         int     $T_DBGENTR
 785 1:
 786         rep;    ret     /* use 2 byte return instruction when branch target */
 787                         /* AMD Software Optimization Guide - Section 6.2 */
 788         SET_SIZE(int20)
 789 
 790         ENTRY(int_cmci)
 791         int     $T_ENOEXTFLT
 792         ret
 793         SET_SIZE(int_cmci)
 794 
 795         ENTRY(scanc)
 796                                         /* rdi == size */
 797                                         /* rsi == cp */
 798                                         /* rdx == table */
 799                                         /* rcx == mask */
 800         addq    %rsi, %rdi              /* end = &cp[size] */
 801 .scanloop:
 802         cmpq    %rdi, %rsi              /* while (cp < end */
 803         jnb     .scandone
 804         movzbq  (%rsi), %r8             /* %r8 = *cp */
 805         incq    %rsi                    /* cp++ */
 806         testb   %cl, (%r8, %rdx)
 807         jz      .scanloop               /*  && (table[*cp] & mask) == 0) */
 808         decq    %rsi                    /* (fix post-increment) */
 809 .scandone:
 810         movl    %edi, %eax
 811         subl    %esi, %eax              /* return (end - cp) */
 812         ret
 813         SET_SIZE(scanc)
 814 
 815 /*
 816  * Replacement functions for ones that are normally inlined.
 817  * In addition to the copy in i86.il, they are defined here just in case.
 818  */
 819 
 820         ENTRY(intr_clear)
 821         ENTRY(clear_int_flag)
 822         pushfq
 823         popq    %rax
 824 #if defined(__xpv)
 825         leaq    xpv_panicking, %rdi
 826         movl    (%rdi), %edi
 827         cmpl    $0, %edi
 828         jne     2f
 829         CLIRET(%rdi, %dl)       /* returns event mask in %dl */
 830         /*
 831          * Synthesize the PS_IE bit from the event mask bit
 832          */
 833         andq    $_BITNOT(PS_IE), %rax
 834         testb   $1, %dl
 835         jnz     1f
 836         orq     $PS_IE, %rax
 837 1:
 838         ret
 839 2:
 840 #endif
 841         CLI(%rdi)
 842         ret
 843         SET_SIZE(clear_int_flag)
 844         SET_SIZE(intr_clear)
 845 
 846         ENTRY(curcpup)
 847         movq    %gs:CPU_SELF, %rax
 848         ret
 849         SET_SIZE(curcpup)
 850 
 851 /* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
 852  * These functions reverse the byte order of the input parameter and returns
 853  * the result.  This is to convert the byte order from host byte order
 854  * (little endian) to network byte order (big endian), or vice versa.
 855  */
 856 
 857         ENTRY(htonll)
 858         ALTENTRY(ntohll)
 859         movq    %rdi, %rax
 860         bswapq  %rax
 861         ret
 862         SET_SIZE(ntohll)
 863         SET_SIZE(htonll)
 864 
 865         /* XX64 there must be shorter sequences for this */
 866         ENTRY(htonl)
 867         ALTENTRY(ntohl)
 868         movl    %edi, %eax
 869         bswap   %eax
 870         ret
 871         SET_SIZE(ntohl)
 872         SET_SIZE(htonl)
 873 
 874         /* XX64 there must be better sequences for this */
 875         ENTRY(htons)
 876         ALTENTRY(ntohs)
 877         movl    %edi, %eax
 878         bswap   %eax
 879         shrl    $16, %eax
 880         ret
 881         SET_SIZE(ntohs)
 882         SET_SIZE(htons)
 883 
 884 
 885         ENTRY(intr_restore)
 886         ENTRY(restore_int_flag)
 887         testq   $PS_IE, %rdi
 888         jz      1f
 889 #if defined(__xpv)
 890         leaq    xpv_panicking, %rsi
 891         movl    (%rsi), %esi
 892         cmpl    $0, %esi
 893         jne     1f
 894         /*
 895          * Since we're -really- running unprivileged, our attempt
 896          * to change the state of the IF bit will be ignored.
 897          * The virtual IF bit is tweaked by CLI and STI.
 898          */
 899         IE_TO_EVENT_MASK(%rsi, %rdi)
 900 #else
 901         sti
 902 #endif
 903 1:
 904         ret
 905         SET_SIZE(restore_int_flag)
 906         SET_SIZE(intr_restore)
 907 
 908         ENTRY(sti)
 909         STI
 910         ret
 911         SET_SIZE(sti)
 912 
 913         ENTRY(cli)
 914         CLI(%rax)
 915         ret
 916         SET_SIZE(cli)
 917 
 918         ENTRY(dtrace_interrupt_disable)
 919         pushfq
 920         popq    %rax
 921 #if defined(__xpv)
 922         leaq    xpv_panicking, %rdi
 923         movl    (%rdi), %edi
 924         cmpl    $0, %edi
 925         jne     .dtrace_interrupt_disable_done
 926         CLIRET(%rdi, %dl)       /* returns event mask in %dl */
 927         /*
 928          * Synthesize the PS_IE bit from the event mask bit
 929          */
 930         andq    $_BITNOT(PS_IE), %rax
 931         testb   $1, %dl
 932         jnz     .dtrace_interrupt_disable_done
 933         orq     $PS_IE, %rax
 934 #else
 935         CLI(%rdx)
 936 #endif
 937 .dtrace_interrupt_disable_done:
 938         ret
 939         SET_SIZE(dtrace_interrupt_disable)
 940 
 941         ENTRY(dtrace_interrupt_enable)
 942         pushq   %rdi
 943         popfq
 944 #if defined(__xpv)
 945         leaq    xpv_panicking, %rdx
 946         movl    (%rdx), %edx
 947         cmpl    $0, %edx
 948         jne     .dtrace_interrupt_enable_done
 949         /*
 950          * Since we're -really- running unprivileged, our attempt
 951          * to change the state of the IF bit will be ignored. The
 952          * virtual IF bit is tweaked by CLI and STI.
 953          */
 954         IE_TO_EVENT_MASK(%rdx, %rdi)
 955 #endif
 956 .dtrace_interrupt_enable_done:
 957         ret
 958         SET_SIZE(dtrace_interrupt_enable)
 959 
 960 
 961         ENTRY(dtrace_membar_producer)
 962         rep;    ret     /* use 2 byte return instruction when branch target */
 963                         /* AMD Software Optimization Guide - Section 6.2 */
 964         SET_SIZE(dtrace_membar_producer)
 965 
 966         ENTRY(dtrace_membar_consumer)
 967         rep;    ret     /* use 2 byte return instruction when branch target */
 968                         /* AMD Software Optimization Guide - Section 6.2 */
 969         SET_SIZE(dtrace_membar_consumer)
 970 
 971         ENTRY(threadp)
 972         movq    %gs:CPU_THREAD, %rax
 973         ret
 974         SET_SIZE(threadp)
 975 
 976 /*
 977  *   Checksum routine for Internet Protocol Headers
 978  */
 979 
 980         ENTRY(ip_ocsum)
 981         pushq   %rbp
 982         movq    %rsp, %rbp
 983 #ifdef DEBUG
 984         movq    postbootkernelbase(%rip), %rax
 985         cmpq    %rax, %rdi
 986         jnb     1f
 987         xorl    %eax, %eax
 988         movq    %rdi, %rsi
 989         leaq    .ip_ocsum_panic_msg(%rip), %rdi
 990         call    panic
 991         /*NOTREACHED*/
 992 .ip_ocsum_panic_msg:
 993         .string "ip_ocsum: address 0x%p below kernelbase\n"
 994 1:
 995 #endif
 996         movl    %esi, %ecx      /* halfword_count */
 997         movq    %rdi, %rsi      /* address */
 998                                 /* partial sum in %edx */
 999         xorl    %eax, %eax
1000         testl   %ecx, %ecx
1001         jz      .ip_ocsum_done
1002         testq   $3, %rsi
1003         jnz     .ip_csum_notaligned
1004 .ip_csum_aligned:       /* XX64 opportunities for 8-byte operations? */
1005 .next_iter:
1006         /* XX64 opportunities for prefetch? */
1007         /* XX64 compute csum with 64 bit quantities? */
1008         subl    $32, %ecx
1009         jl      .less_than_32
1010 
1011         addl    0(%rsi), %edx
1012 .only60:
1013         adcl    4(%rsi), %eax
1014 .only56:
1015         adcl    8(%rsi), %edx
1016 .only52:
1017         adcl    12(%rsi), %eax
1018 .only48:
1019         adcl    16(%rsi), %edx
1020 .only44:
1021         adcl    20(%rsi), %eax
1022 .only40:
1023         adcl    24(%rsi), %edx
1024 .only36:
1025         adcl    28(%rsi), %eax
1026 .only32:
1027         adcl    32(%rsi), %edx
1028 .only28:
1029         adcl    36(%rsi), %eax
1030 .only24:
1031         adcl    40(%rsi), %edx
1032 .only20:
1033         adcl    44(%rsi), %eax
1034 .only16:
1035         adcl    48(%rsi), %edx
1036 .only12:
1037         adcl    52(%rsi), %eax
1038 .only8:
1039         adcl    56(%rsi), %edx
1040 .only4:
1041         adcl    60(%rsi), %eax  /* could be adding -1 and -1 with a carry */
1042 .only0:
1043         adcl    $0, %eax        /* could be adding -1 in eax with a carry */
1044         adcl    $0, %eax
1045 
1046         addq    $64, %rsi
1047         testl   %ecx, %ecx
1048         jnz     .next_iter
1049 
1050 .ip_ocsum_done:
1051         addl    %eax, %edx
1052         adcl    $0, %edx
1053         movl    %edx, %eax      /* form a 16 bit checksum by */
1054         shrl    $16, %eax       /* adding two halves of 32 bit checksum */
1055         addw    %dx, %ax
1056         adcw    $0, %ax
1057         andl    $0xffff, %eax
1058         leave
1059         ret
1060 
1061 .ip_csum_notaligned:
1062         xorl    %edi, %edi
1063         movw    (%rsi), %di
1064         addl    %edi, %edx
1065         adcl    $0, %edx
1066         addq    $2, %rsi
1067         decl    %ecx
1068         jmp     .ip_csum_aligned
1069 
1070 .less_than_32:
1071         addl    $32, %ecx
1072         testl   $1, %ecx
1073         jz      .size_aligned
1074         andl    $0xfe, %ecx
1075         movzwl  (%rsi, %rcx, 2), %edi
1076         addl    %edi, %edx
1077         adcl    $0, %edx
1078 .size_aligned:
1079         movl    %ecx, %edi
1080         shrl    $1, %ecx
1081         shl     $1, %edi
1082         subq    $64, %rdi
1083         addq    %rdi, %rsi
1084         leaq    .ip_ocsum_jmptbl(%rip), %rdi
1085         leaq    (%rdi, %rcx, 8), %rdi
1086         xorl    %ecx, %ecx
1087         clc
1088         movq    (%rdi), %rdi
1089         INDIRECT_JMP_REG(rdi)
1090 
1091         .align  8
1092 .ip_ocsum_jmptbl:
1093         .quad   .only0, .only4, .only8, .only12, .only16, .only20
1094         .quad   .only24, .only28, .only32, .only36, .only40, .only44
1095         .quad   .only48, .only52, .only56, .only60
1096         SET_SIZE(ip_ocsum)
1097 
1098 /*
1099  * multiply two long numbers and yield a u_longlong_t result, callable from C.
1100  * Provided to manipulate hrtime_t values.
1101  */
1102 
1103         ENTRY(mul32)
1104         xorl    %edx, %edx      /* XX64 joe, paranoia? */
1105         movl    %edi, %eax
1106         mull    %esi
1107         shlq    $32, %rdx
1108         orq     %rdx, %rax
1109         ret
1110         SET_SIZE(mul32)
1111 
1112         ENTRY(scan_memory)
1113         shrq    $3, %rsi        /* convert %rsi from byte to quadword count */
1114         jz      .scanm_done
1115         movq    %rsi, %rcx      /* move count into rep control register */
1116         movq    %rdi, %rsi      /* move addr into lodsq control reg. */
1117         rep lodsq               /* scan the memory range */
1118 .scanm_done:
1119         rep;    ret     /* use 2 byte return instruction when branch target */
1120                         /* AMD Software Optimization Guide - Section 6.2 */
1121         SET_SIZE(scan_memory)
1122 
1123 
1124         ENTRY(lowbit)
1125         movl    $-1, %eax
1126         bsfq    %rdi, %rdi
1127         cmovnz  %edi, %eax
1128         incl    %eax
1129         ret
1130         SET_SIZE(lowbit)
1131 
1132         ENTRY(highbit)
1133         ALTENTRY(highbit64)
1134         movl    $-1, %eax
1135         bsrq    %rdi, %rdi
1136         cmovnz  %edi, %eax
1137         incl    %eax
1138         ret
1139         SET_SIZE(highbit64)
1140         SET_SIZE(highbit)
1141 
1142 #define XMSR_ACCESS_VAL         $0x9c5a203a
1143 
1144         ENTRY(rdmsr)
1145         movl    %edi, %ecx
1146         rdmsr
1147         shlq    $32, %rdx
1148         orq     %rdx, %rax
1149         ret
1150         SET_SIZE(rdmsr)
1151 
1152         ENTRY(wrmsr)
1153         movq    %rsi, %rdx
1154         shrq    $32, %rdx
1155         movl    %esi, %eax
1156         movl    %edi, %ecx
1157         wrmsr
1158         ret
1159         SET_SIZE(wrmsr)
1160 
1161         ENTRY(xrdmsr)
1162         pushq   %rbp
1163         movq    %rsp, %rbp
1164         movl    %edi, %ecx
1165         movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
1166         rdmsr
1167         shlq    $32, %rdx
1168         orq     %rdx, %rax
1169         leave
1170         ret
1171         SET_SIZE(xrdmsr)
1172 
1173         ENTRY(xwrmsr)
1174         pushq   %rbp
1175         movq    %rsp, %rbp
1176         movl    %edi, %ecx
1177         movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
1178         movq    %rsi, %rdx
1179         shrq    $32, %rdx
1180         movl    %esi, %eax
1181         wrmsr
1182         leave
1183         ret
1184         SET_SIZE(xwrmsr)
1185 
1186         ENTRY(get_xcr)
1187         movl    %edi, %ecx
1188         #xgetbv
1189         .byte   0x0f,0x01,0xd0
1190         shlq    $32, %rdx
1191         orq     %rdx, %rax
1192         ret
1193         SET_SIZE(get_xcr)
1194 
1195         ENTRY(set_xcr)
1196         movq    %rsi, %rdx
1197         shrq    $32, %rdx
1198         movl    %esi, %eax
1199         movl    %edi, %ecx
1200         #xsetbv
1201         .byte   0x0f,0x01,0xd1
1202         ret
1203         SET_SIZE(set_xcr)
1204 
1205         ENTRY(invalidate_cache)
1206         wbinvd
1207         ret
1208         SET_SIZE(invalidate_cache)
1209 
1210         ENTRY_NP(getcregs)
1211 #if defined(__xpv)
1212         /*
1213          * Only a few of the hardware control registers or descriptor tables
1214          * are directly accessible to us, so just zero the structure.
1215          *
1216          * XXPV Perhaps it would be helpful for the hypervisor to return
1217          *      virtualized versions of these for post-mortem use.
1218          *      (Need to reevaluate - perhaps it already does!)
1219          */
1220         pushq   %rdi            /* save *crp */
1221         movq    $CREGSZ, %rsi
1222         call    bzero
1223         popq    %rdi
1224 
1225         /*
1226          * Dump what limited information we can
1227          */
1228         movq    %cr0, %rax
1229         movq    %rax, CREG_CR0(%rdi)    /* cr0 */
1230         movq    %cr2, %rax
1231         movq    %rax, CREG_CR2(%rdi)    /* cr2 */
1232         movq    %cr3, %rax
1233         movq    %rax, CREG_CR3(%rdi)    /* cr3 */
1234         movq    %cr4, %rax
1235         movq    %rax, CREG_CR4(%rdi)    /* cr4 */
1236 
1237 #else   /* __xpv */
1238 
1239 #define GETMSR(r, off, d)       \
1240         movl    $r, %ecx;       \
1241         rdmsr;                  \
1242         movl    %eax, off(d);   \
1243         movl    %edx, off+4(d)
1244 
1245         xorl    %eax, %eax
1246         movq    %rax, CREG_GDT+8(%rdi)
1247         sgdt    CREG_GDT(%rdi)          /* 10 bytes */
1248         movq    %rax, CREG_IDT+8(%rdi)
1249         sidt    CREG_IDT(%rdi)          /* 10 bytes */
1250         movq    %rax, CREG_LDT(%rdi)
1251         sldt    CREG_LDT(%rdi)          /* 2 bytes */
1252         movq    %rax, CREG_TASKR(%rdi)
1253         str     CREG_TASKR(%rdi)        /* 2 bytes */
1254         movq    %cr0, %rax
1255         movq    %rax, CREG_CR0(%rdi)    /* cr0 */
1256         movq    %cr2, %rax
1257         movq    %rax, CREG_CR2(%rdi)    /* cr2 */
1258         movq    %cr3, %rax
1259         movq    %rax, CREG_CR3(%rdi)    /* cr3 */
1260         movq    %cr4, %rax
1261         movq    %rax, CREG_CR4(%rdi)    /* cr4 */
1262         movq    %cr8, %rax
1263         movq    %rax, CREG_CR8(%rdi)    /* cr8 */
1264         GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
1265         GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
1266 #endif  /* __xpv */
1267         ret
1268         SET_SIZE(getcregs)
1269 
1270 #undef GETMSR
1271 
1272 
1273 /*
1274  * A panic trigger is a word which is updated atomically and can only be set
1275  * once.  We atomically store 0xDEFACEDD and load the old value.  If the
1276  * previous value was 0, we succeed and return 1; otherwise return 0.
1277  * This allows a partially corrupt trigger to still trigger correctly.  DTrace
1278  * has its own version of this function to allow it to panic correctly from
1279  * probe context.
1280  */
1281 
1282         ENTRY_NP(panic_trigger)
1283         xorl    %eax, %eax
1284         movl    $0xdefacedd, %edx
1285         lock
1286           xchgl %edx, (%rdi)
1287         cmpl    $0, %edx
1288         je      0f
1289         movl    $0, %eax
1290         ret
1291 0:      movl    $1, %eax
1292         ret
1293         SET_SIZE(panic_trigger)
1294 
1295         ENTRY_NP(dtrace_panic_trigger)
1296         xorl    %eax, %eax
1297         movl    $0xdefacedd, %edx
1298         lock
1299           xchgl %edx, (%rdi)
1300         cmpl    $0, %edx
1301         je      0f
1302         movl    $0, %eax
1303         ret
1304 0:      movl    $1, %eax
1305         ret
1306         SET_SIZE(dtrace_panic_trigger)
1307 
1308 /*
1309  * The panic() and cmn_err() functions invoke vpanic() as a common entry point
1310  * into the panic code implemented in panicsys().  vpanic() is responsible
1311  * for passing through the format string and arguments, and constructing a
1312  * regs structure on the stack into which it saves the current register
1313  * values.  If we are not dying due to a fatal trap, these registers will
1314  * then be preserved in panicbuf as the current processor state.  Before
1315  * invoking panicsys(), vpanic() activates the first panic trigger (see
1316  * common/os/panic.c) and switches to the panic_stack if successful.  Note that
1317  * DTrace takes a slightly different panic path if it must panic from probe
1318  * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
1319  * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
1320  * branches back into vpanic().
1321  */
1322 
1323         ENTRY_NP(vpanic)                        /* Initial stack layout: */
1324 
1325         pushq   %rbp                            /* | %rip |     0x60    */
1326         movq    %rsp, %rbp                      /* | %rbp |     0x58    */
1327         pushfq                                  /* | rfl  |     0x50    */
1328         pushq   %r11                            /* | %r11 |     0x48    */
1329         pushq   %r10                            /* | %r10 |     0x40    */
1330         pushq   %rbx                            /* | %rbx |     0x38    */
1331         pushq   %rax                            /* | %rax |     0x30    */
1332         pushq   %r9                             /* | %r9  |     0x28    */
1333         pushq   %r8                             /* | %r8  |     0x20    */
1334         pushq   %rcx                            /* | %rcx |     0x18    */
1335         pushq   %rdx                            /* | %rdx |     0x10    */
1336         pushq   %rsi                            /* | %rsi |     0x8 alist */
1337         pushq   %rdi                            /* | %rdi |     0x0 format */
1338 
1339         movq    %rsp, %rbx                      /* %rbx = current %rsp */
1340 
1341         leaq    panic_quiesce(%rip), %rdi       /* %rdi = &panic_quiesce */
1342         call    panic_trigger                   /* %eax = panic_trigger() */
1343 
1344 vpanic_common:
1345         /*
1346          * The panic_trigger result is in %eax from the call above, and
1347          * dtrace_panic places it in %eax before branching here.
1348          * The rdmsr instructions that follow below will clobber %eax so
1349          * we stash the panic_trigger result in %r11d.
1350          */
1351         movl    %eax, %r11d
1352         cmpl    $0, %r11d
1353         je      0f
1354 
1355         /*
1356          * If panic_trigger() was successful, we are the first to initiate a
1357          * panic: we now switch to the reserved panic_stack before continuing.
1358          */
1359         leaq    panic_stack(%rip), %rsp
1360         addq    $PANICSTKSIZE, %rsp
1361 0:      subq    $REGSIZE, %rsp
1362         /*
1363          * Now that we've got everything set up, store the register values as
1364          * they were when we entered vpanic() to the designated location in
1365          * the regs structure we allocated on the stack.
1366          */
1367         movq    0x0(%rbx), %rcx
1368         movq    %rcx, REGOFF_RDI(%rsp)
1369         movq    0x8(%rbx), %rcx
1370         movq    %rcx, REGOFF_RSI(%rsp)
1371         movq    0x10(%rbx), %rcx
1372         movq    %rcx, REGOFF_RDX(%rsp)
1373         movq    0x18(%rbx), %rcx
1374         movq    %rcx, REGOFF_RCX(%rsp)
1375         movq    0x20(%rbx), %rcx
1376 
1377         movq    %rcx, REGOFF_R8(%rsp)
1378         movq    0x28(%rbx), %rcx
1379         movq    %rcx, REGOFF_R9(%rsp)
1380         movq    0x30(%rbx), %rcx
1381         movq    %rcx, REGOFF_RAX(%rsp)
1382         movq    0x38(%rbx), %rcx
1383         movq    %rcx, REGOFF_RBX(%rsp)
1384         movq    0x58(%rbx), %rcx
1385 
1386         movq    %rcx, REGOFF_RBP(%rsp)
1387         movq    0x40(%rbx), %rcx
1388         movq    %rcx, REGOFF_R10(%rsp)
1389         movq    0x48(%rbx), %rcx
1390         movq    %rcx, REGOFF_R11(%rsp)
1391         movq    %r12, REGOFF_R12(%rsp)
1392 
1393         movq    %r13, REGOFF_R13(%rsp)
1394         movq    %r14, REGOFF_R14(%rsp)
1395         movq    %r15, REGOFF_R15(%rsp)
1396 
1397         xorl    %ecx, %ecx
1398         movw    %ds, %cx
1399         movq    %rcx, REGOFF_DS(%rsp)
1400         movw    %es, %cx
1401         movq    %rcx, REGOFF_ES(%rsp)
1402         movw    %fs, %cx
1403         movq    %rcx, REGOFF_FS(%rsp)
1404         movw    %gs, %cx
1405         movq    %rcx, REGOFF_GS(%rsp)
1406 
1407         movq    $0, REGOFF_TRAPNO(%rsp)
1408 
1409         movq    $0, REGOFF_ERR(%rsp)
1410         leaq    vpanic(%rip), %rcx
1411         movq    %rcx, REGOFF_RIP(%rsp)
1412         movw    %cs, %cx
1413         movzwq  %cx, %rcx
1414         movq    %rcx, REGOFF_CS(%rsp)
1415         movq    0x50(%rbx), %rcx
1416         movq    %rcx, REGOFF_RFL(%rsp)
1417         movq    %rbx, %rcx
1418         addq    $0x60, %rcx
1419         movq    %rcx, REGOFF_RSP(%rsp)
1420         movw    %ss, %cx
1421         movzwq  %cx, %rcx
1422         movq    %rcx, REGOFF_SS(%rsp)
1423 
1424         /*
1425          * panicsys(format, alist, rp, on_panic_stack)
1426          */
1427         movq    REGOFF_RDI(%rsp), %rdi          /* format */
1428         movq    REGOFF_RSI(%rsp), %rsi          /* alist */
1429         movq    %rsp, %rdx                      /* struct regs */
1430         movl    %r11d, %ecx                     /* on_panic_stack */
1431         call    panicsys
1432         addq    $REGSIZE, %rsp
1433         popq    %rdi
1434         popq    %rsi
1435         popq    %rdx
1436         popq    %rcx
1437         popq    %r8
1438         popq    %r9
1439         popq    %rax
1440         popq    %rbx
1441         popq    %r10
1442         popq    %r11
1443         popfq
1444         leave
1445         ret
1446         SET_SIZE(vpanic)
1447 
1448         ENTRY_NP(dtrace_vpanic)                 /* Initial stack layout: */
1449 
1450         pushq   %rbp                            /* | %rip |     0x60    */
1451         movq    %rsp, %rbp                      /* | %rbp |     0x58    */
1452         pushfq                                  /* | rfl  |     0x50    */
1453         pushq   %r11                            /* | %r11 |     0x48    */
1454         pushq   %r10                            /* | %r10 |     0x40    */
1455         pushq   %rbx                            /* | %rbx |     0x38    */
1456         pushq   %rax                            /* | %rax |     0x30    */
1457         pushq   %r9                             /* | %r9  |     0x28    */
1458         pushq   %r8                             /* | %r8  |     0x20    */
1459         pushq   %rcx                            /* | %rcx |     0x18    */
1460         pushq   %rdx                            /* | %rdx |     0x10    */
1461         pushq   %rsi                            /* | %rsi |     0x8 alist */
1462         pushq   %rdi                            /* | %rdi |     0x0 format */
1463 
1464         movq    %rsp, %rbx                      /* %rbx = current %rsp */
1465 
1466         leaq    panic_quiesce(%rip), %rdi       /* %rdi = &panic_quiesce */
1467         call    dtrace_panic_trigger    /* %eax = dtrace_panic_trigger() */
1468         jmp     vpanic_common
1469 
1470         SET_SIZE(dtrace_vpanic)
1471 
1472         DGDEF3(timedelta, 8, 8)
1473         .long   0, 0
1474 
1475         /*
1476          * initialized to a non zero value to make pc_gethrtime()
1477          * work correctly even before clock is initialized
1478          */
1479         DGDEF3(hrtime_base, 8, 8)
1480         .long   _MUL(NSEC_PER_CLOCK_TICK, 6), 0
1481 
1482         DGDEF3(adj_shift, 4, 4)
1483         .long   ADJ_SHIFT
1484 
1485         ENTRY_NP(hres_tick)
1486         pushq   %rbp
1487         movq    %rsp, %rbp
1488 
1489         /*
1490          * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
1491          * hres_last_tick can only be modified while holding CLOCK_LOCK).
1492          * At worst, performing this now instead of under CLOCK_LOCK may
1493          * introduce some jitter in pc_gethrestime().
1494          */
1495         movq    gethrtimef(%rip), %rsi
1496         INDIRECT_CALL_REG(rsi)
1497         movq    %rax, %r8
1498 
1499         leaq    hres_lock(%rip), %rax
1500         movb    $-1, %dl
1501 .CL1:
1502         xchgb   %dl, (%rax)
1503         testb   %dl, %dl
1504         jz      .CL3                    /* got it */
1505 .CL2:
1506         cmpb    $0, (%rax)              /* possible to get lock? */
1507         pause
1508         jne     .CL2
1509         jmp     .CL1                    /* yes, try again */
1510 .CL3:
1511         /*
1512          * compute the interval since last time hres_tick was called
1513          * and adjust hrtime_base and hrestime accordingly
1514          * hrtime_base is an 8 byte value (in nsec), hrestime is
1515          * a timestruc_t (sec, nsec)
1516          */
1517         leaq    hres_last_tick(%rip), %rax
1518         movq    %r8, %r11
1519         subq    (%rax), %r8
1520         addq    %r8, hrtime_base(%rip)  /* add interval to hrtime_base */
1521         addq    %r8, hrestime+8(%rip)   /* add interval to hrestime.tv_nsec */
1522         /*
1523          * Now that we have CLOCK_LOCK, we can update hres_last_tick
1524          */
1525         movq    %r11, (%rax)
1526 
1527         call    __adj_hrestime
1528 
1529         /*
1530          * release the hres_lock
1531          */
1532         incl    hres_lock(%rip)
1533         leave
1534         ret
1535         SET_SIZE(hres_tick)
1536 
1537 /*
1538  * void prefetch_smap_w(void *)
1539  *
1540  * Prefetch ahead within a linear list of smap structures.
1541  * Not implemented for ia32.  Stub for compatibility.
1542  */
1543 
1544         ENTRY(prefetch_smap_w)
1545         rep;    ret     /* use 2 byte return instruction when branch target */
1546                         /* AMD Software Optimization Guide - Section 6.2 */
1547         SET_SIZE(prefetch_smap_w)
1548 
1549 /*
1550  * prefetch_page_r(page_t *)
1551  * issue prefetch instructions for a page_t
1552  */
1553 
1554         ENTRY(prefetch_page_r)
1555         rep;    ret     /* use 2 byte return instruction when branch target */
1556                         /* AMD Software Optimization Guide - Section 6.2 */
1557         SET_SIZE(prefetch_page_r)
1558 
1559         ENTRY(bcmp)
1560         pushq   %rbp
1561         movq    %rsp, %rbp
1562 #ifdef DEBUG
1563         testq   %rdx,%rdx
1564         je      1f
1565         movq    postbootkernelbase(%rip), %r11
1566         cmpq    %r11, %rdi
1567         jb      0f
1568         cmpq    %r11, %rsi
1569         jnb     1f
1570 0:      leaq    .bcmp_panic_msg(%rip), %rdi
1571         xorl    %eax, %eax
1572         call    panic
1573 1:
1574 #endif  /* DEBUG */
1575         call    memcmp
1576         testl   %eax, %eax
1577         setne   %dl
1578         leave
1579         movzbl  %dl, %eax
1580         ret
1581         SET_SIZE(bcmp)
1582 
1583 #ifdef DEBUG
1584         .text
1585 .bcmp_panic_msg:
1586         .string "bcmp: arguments below kernelbase"
1587 #endif  /* DEBUG */
1588 
1589         ENTRY_NP(bsrw_insn)
1590         xorl    %eax, %eax
1591         bsrw    %di, %ax
1592         ret
1593         SET_SIZE(bsrw_insn)
1594 
1595         ENTRY_NP(switch_sp_and_call)
1596         pushq   %rbp
1597         movq    %rsp, %rbp              /* set up stack frame */
1598         movq    %rdi, %rsp              /* switch stack pointer */
1599         movq    %rdx, %rdi              /* pass func arg 1 */
1600         movq    %rsi, %r11              /* save function to call */
1601         movq    %rcx, %rsi              /* pass func arg 2 */
1602         INDIRECT_CALL_REG(r11)          /* call function */
1603         leave                           /* restore stack */
1604         ret
1605         SET_SIZE(switch_sp_and_call)
1606 
1607         ENTRY_NP(kmdb_enter)
1608         pushq   %rbp
1609         movq    %rsp, %rbp
1610 
1611         /*
1612          * Save flags, do a 'cli' then return the saved flags
1613          */
1614         call    intr_clear
1615 
1616         int     $T_DBGENTR
1617 
1618         /*
1619          * Restore the saved flags
1620          */
1621         movq    %rax, %rdi
1622         call    intr_restore
1623 
1624         leave
1625         ret
1626         SET_SIZE(kmdb_enter)
1627 
1628         ENTRY_NP(return_instr)
1629         rep;    ret     /* use 2 byte instruction when branch target */
1630                         /* AMD Software Optimization Guide - Section 6.2 */
1631         SET_SIZE(return_instr)
1632 
1633         ENTRY(getflags)
1634         pushfq
1635         popq    %rax
1636 #if defined(__xpv)
1637         CURTHREAD(%rdi)
1638         KPREEMPT_DISABLE(%rdi)
1639         /*
1640          * Synthesize the PS_IE bit from the event mask bit
1641          */
1642         CURVCPU(%r11)
1643         andq    $_BITNOT(PS_IE), %rax
1644         XEN_TEST_UPCALL_MASK(%r11)
1645         jnz     1f
1646         orq     $PS_IE, %rax
1647 1:
1648         KPREEMPT_ENABLE_NOKP(%rdi)
1649 #endif
1650         ret
1651         SET_SIZE(getflags)
1652 
1653         ENTRY(ftrace_interrupt_disable)
1654         pushfq
1655         popq    %rax
1656         CLI(%rdx)
1657         ret
1658         SET_SIZE(ftrace_interrupt_disable)
1659 
1660         ENTRY(ftrace_interrupt_enable)
1661         pushq   %rdi
1662         popfq
1663         ret
1664         SET_SIZE(ftrace_interrupt_enable)
1665 
1666         ENTRY(clflush_insn)
1667         clflush (%rdi)
1668         ret
1669         SET_SIZE(clflush_insn)
1670 
1671         ENTRY(mfence_insn)
1672         mfence
1673         ret
1674         SET_SIZE(mfence_insn)
1675 
1676 /*
1677  * VMware implements an I/O port that programs can query to detect if software
1678  * is running in a VMware hypervisor. This hypervisor port behaves differently
1679  * depending on magic values in certain registers and modifies some registers
1680  * as a side effect.
1681  *
1682  * References: http://kb.vmware.com/kb/1009458
1683  */
1684 
1685         ENTRY(vmware_port)
1686         pushq   %rbx
1687         movl    $VMWARE_HVMAGIC, %eax
1688         movl    $0xffffffff, %ebx
1689         movl    %edi, %ecx
1690         movl    $VMWARE_HVPORT, %edx
1691         inl     (%dx)
1692         movl    %eax, (%rsi)
1693         movl    %ebx, 4(%rsi)
1694         movl    %ecx, 8(%rsi)
1695         movl    %edx, 12(%rsi)
1696         popq    %rbx
1697         ret
1698         SET_SIZE(vmware_port)
1699