1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  *
  26  * Copyright 2019 Joyent, Inc.
  27  */
  28 
  29 /*
  30  * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
  31  * the IDT stubs that drop into here (mainly via kdi_cmnint).
  32  */
  33 
  34 #if defined(__lint)
  35 #include <sys/types.h>
  36 #else
  37 
  38 #include <sys/segments.h>
  39 #include <sys/asm_linkage.h>
  40 #include <sys/controlregs.h>
  41 #include <sys/x86_archext.h>
  42 #include <sys/privregs.h>
  43 #include <sys/machprivregs.h>
  44 #include <sys/kdi_regs.h>
  45 #include <sys/psw.h>
  46 #include <sys/uadmin.h>
  47 #ifdef __xpv
  48 #include <sys/hypervisor.h>
  49 #endif
  50 #include <kdi_assym.h>
  51 #include <assym.h>
  52 
  53 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
  54 #define GET_CPUSAVE_ADDR \
  55         movzbq  %gs:CPU_ID, %rbx;               \
  56         movq    %rbx, %rax;                     \
  57         movq    $KRS_SIZE, %rcx;                \
  58         mulq    %rcx;                           \
  59         movq    $kdi_cpusave, %rdx;             \
  60         /*CSTYLED*/                             \
  61         addq    (%rdx), %rax
  62 
  63 /*
  64  * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
  65  * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
  66  * debugger through the trap handler.  We don't want to clobber the saved IDT
  67  * in the process, as we'd end up resuming the world on our IDT.
  68  */
  69 #define SAVE_IDTGDT                             \
  70         movq    %gs:CPU_IDT, %r11;              \
  71         leaq    kdi_idt(%rip), %rsi;            \
  72         cmpq    %rsi, %r11;                     \
  73         je      1f;                             \
  74         movq    %r11, KRS_IDT(%rax);            \
  75         movq    %gs:CPU_GDT, %r11;              \
  76         movq    %r11, KRS_GDT(%rax);            \
  77 1:
  78 
  79 #ifdef __xpv
  80 
  81 /*
  82  * Already on kernel gsbase via the hypervisor.
  83  */
  84 #define SAVE_GSBASE(reg) /* nothing */
  85 #define RESTORE_GSBASE(reg) /* nothing */
  86 
  87 #else
  88 
  89 #define SAVE_GSBASE(base)                               \
  90         movl    $MSR_AMD_GSBASE, %ecx;                  \
  91         rdmsr;                                          \
  92         shlq    $32, %rdx;                              \
  93         orq     %rax, %rdx;                             \
  94         movq    %rdx, REG_OFF(KDIREG_GSBASE)(base);     \
  95         movl    $MSR_AMD_KGSBASE, %ecx;                 \
  96         rdmsr;                                          \
  97         shlq    $32, %rdx;                              \
  98         orq     %rax, %rdx;                             \
  99         movq    %rdx, REG_OFF(KDIREG_KGSBASE)(base)
 100 
 101 /*
 102  * We shouldn't have stomped on KGSBASE, so don't try to restore it.
 103  */
 104 #define RESTORE_GSBASE(base)                            \
 105         movq    REG_OFF(KDIREG_GSBASE)(base), %rdx;     \
 106         movq    %rdx, %rax;                             \
 107         shrq    $32, %rdx;                              \
 108         movl    $MSR_AMD_GSBASE, %ecx;                  \
 109         wrmsr
 110 
 111 #endif /* __xpv */
 112 
 113 /*
 114  * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
 115  */
 116 #define KDI_SAVE_REGS(base) \
 117         movq    %rdi, REG_OFF(KDIREG_RDI)(base);        \
 118         movq    %rsi, REG_OFF(KDIREG_RSI)(base);        \
 119         movq    %rdx, REG_OFF(KDIREG_RDX)(base);        \
 120         movq    %rcx, REG_OFF(KDIREG_RCX)(base);        \
 121         movq    %r8, REG_OFF(KDIREG_R8)(base);          \
 122         movq    %r9, REG_OFF(KDIREG_R9)(base);          \
 123         movq    %rax, REG_OFF(KDIREG_RAX)(base);        \
 124         movq    %rbx, REG_OFF(KDIREG_RBX)(base);        \
 125         movq    %rbp, REG_OFF(KDIREG_RBP)(base);        \
 126         movq    %r10, REG_OFF(KDIREG_R10)(base);        \
 127         movq    %r11, REG_OFF(KDIREG_R11)(base);        \
 128         movq    %r12, REG_OFF(KDIREG_R12)(base);        \
 129         movq    %r13, REG_OFF(KDIREG_R13)(base);        \
 130         movq    %r14, REG_OFF(KDIREG_R14)(base);        \
 131         movq    %r15, REG_OFF(KDIREG_R15)(base);        \
 132         movq    %rbp, REG_OFF(KDIREG_SAVFP)(base);      \
 133         movq    REG_OFF(KDIREG_RIP)(base), %rax;        \
 134         movq    %rax, REG_OFF(KDIREG_SAVPC)(base);      \
 135         movq    %cr2, %rax;                             \
 136         movq    %rax, REG_OFF(KDIREG_CR2)(base);        \
 137         clrq    %rax;                                   \
 138         movw    %ds, %ax;                               \
 139         movq    %rax, REG_OFF(KDIREG_DS)(base);         \
 140         movw    %es, %ax;                               \
 141         movq    %rax, REG_OFF(KDIREG_ES)(base);         \
 142         movw    %fs, %ax;                               \
 143         movq    %rax, REG_OFF(KDIREG_FS)(base);         \
 144         movw    %gs, %ax;                               \
 145         movq    %rax, REG_OFF(KDIREG_GS)(base);         \
 146         SAVE_GSBASE(base)
 147 
 148 #define KDI_RESTORE_REGS(base) \
 149         movq    base, %rdi;                             \
 150         RESTORE_GSBASE(%rdi);                           \
 151         movq    REG_OFF(KDIREG_ES)(%rdi), %rax;         \
 152         movw    %ax, %es;                               \
 153         movq    REG_OFF(KDIREG_DS)(%rdi), %rax;         \
 154         movw    %ax, %ds;                               \
 155         movq    REG_OFF(KDIREG_CR2)(base), %rax;        \
 156         movq    %rax, %cr2;                             \
 157         movq    REG_OFF(KDIREG_R15)(%rdi), %r15;        \
 158         movq    REG_OFF(KDIREG_R14)(%rdi), %r14;        \
 159         movq    REG_OFF(KDIREG_R13)(%rdi), %r13;        \
 160         movq    REG_OFF(KDIREG_R12)(%rdi), %r12;        \
 161         movq    REG_OFF(KDIREG_R11)(%rdi), %r11;        \
 162         movq    REG_OFF(KDIREG_R10)(%rdi), %r10;        \
 163         movq    REG_OFF(KDIREG_RBP)(%rdi), %rbp;        \
 164         movq    REG_OFF(KDIREG_RBX)(%rdi), %rbx;        \
 165         movq    REG_OFF(KDIREG_RAX)(%rdi), %rax;        \
 166         movq    REG_OFF(KDIREG_R9)(%rdi), %r9;          \
 167         movq    REG_OFF(KDIREG_R8)(%rdi), %r8;          \
 168         movq    REG_OFF(KDIREG_RCX)(%rdi), %rcx;        \
 169         movq    REG_OFF(KDIREG_RDX)(%rdi), %rdx;        \
 170         movq    REG_OFF(KDIREG_RSI)(%rdi), %rsi;        \
 171         movq    REG_OFF(KDIREG_RDI)(%rdi), %rdi
 172 
 173 /*
 174  * Given the address of the current CPU's cpusave area in %rax, the following
 175  * macro restores the debugging state to said CPU.  Restored state includes
 176  * the debug registers from the global %dr variables.
 177  *
 178  * Takes the cpusave area in %rdi as a parameter.
 179  */
 180 #define KDI_RESTORE_DEBUGGING_STATE \
 181         pushq   %rdi;                                           \
 182         leaq    kdi_drreg(%rip), %r15;                          \
 183         movl    $7, %edi;                                       \
 184         movq    DR_CTL(%r15), %rsi;                             \
 185         call    kdi_dreg_set;                                   \
 186                                                                 \
 187         movl    $6, %edi;                                       \
 188         movq    $KDIREG_DRSTAT_RESERVED, %rsi;                  \
 189         call    kdi_dreg_set;                                   \
 190                                                                 \
 191         movl    $0, %edi;                                       \
 192         movq    DRADDR_OFF(0)(%r15), %rsi;                      \
 193         call    kdi_dreg_set;                                   \
 194         movl    $1, %edi;                                       \
 195         movq    DRADDR_OFF(1)(%r15), %rsi;                      \
 196         call    kdi_dreg_set;                                   \
 197         movl    $2, %edi;                                       \
 198         movq    DRADDR_OFF(2)(%r15), %rsi;                      \
 199         call    kdi_dreg_set;                                   \
 200         movl    $3, %edi;                                       \
 201         movq    DRADDR_OFF(3)(%r15), %rsi;                      \
 202         call    kdi_dreg_set;                                   \
 203         popq    %rdi;
 204 
 205 /*
 206  * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
 207  * The following macros manage the buffer.
 208  */
 209 
 210 /* Advance the ring buffer */
 211 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
 212         movq    KRS_CURCRUMBIDX(cpusave), tmp1; \
 213         cmpq    $[KDI_NCRUMBS - 1], tmp1;       \
 214         jge     1f;                             \
 215         /* Advance the pointer and index */     \
 216         addq    $1, tmp1;                       \
 217         movq    tmp1, KRS_CURCRUMBIDX(cpusave); \
 218         movq    KRS_CURCRUMB(cpusave), tmp1;    \
 219         addq    $KRM_SIZE, tmp1;                \
 220         jmp     2f;                             \
 221 1:      /* Reset the pointer and index */       \
 222         movq    $0, KRS_CURCRUMBIDX(cpusave);   \
 223         leaq    KRS_CRUMBS(cpusave), tmp1;      \
 224 2:      movq    tmp1, KRS_CURCRUMB(cpusave);    \
 225         /* Clear the new crumb */               \
 226         movq    $KDI_NCRUMBS, tmp2;             \
 227 3:      movq    $0, -4(tmp1, tmp2, 4);          \
 228         decq    tmp2;                           \
 229         jnz     3b
 230 
 231 /* Set a value in the current breadcrumb buffer */
 232 #define ADD_CRUMB(cpusave, offset, value, tmp) \
 233         movq    KRS_CURCRUMB(cpusave), tmp;     \
 234         movq    value, offset(tmp)
 235 
 236         /* XXX implement me */
 237         ENTRY_NP(kdi_nmiint)
 238         clrq    %rcx
 239         movq    (%rcx), %rcx
 240         SET_SIZE(kdi_nmiint)
 241 
 242         /*
 243          * The main entry point for master CPUs.  It also serves as the trap
 244          * handler for all traps and interrupts taken during single-step.
 245          */
 246         ENTRY_NP(kdi_cmnint)
 247         ALTENTRY(kdi_master_entry)
 248 
 249         pushq   %rax
 250         CLI(%rax)
 251         popq    %rax
 252 
 253         /* Save current register state */
 254         subq    $REG_OFF(KDIREG_TRAPNO), %rsp
 255         KDI_SAVE_REGS(%rsp)
 256 
 257 #ifdef __xpv
 258         /*
 259          * Clear saved_upcall_mask in unused byte of cs slot on stack.
 260          * It can only confuse things.
 261          */
 262         movb    $0, REG_OFF(KDIREG_CS)+4(%rsp)
 263 #endif
 264 
 265 #if !defined(__xpv)
 266         /*
 267          * Switch to the kernel's GSBASE.  Neither GSBASE nor the ill-named
 268          * KGSBASE can be trusted, as the kernel may or may not have already
 269          * done a swapgs.  All is not lost, as the kernel can divine the correct
 270          * value for us.  Note that the previous GSBASE is saved in the
 271          * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
 272          * blown away.  On the hypervisor, we don't need to do this, since it's
 273          * ensured we're on our requested kernel GSBASE already.
 274          *
 275          * No need to worry about swapgs speculation here as it's unconditional
 276          * and via wrmsr anyway.
 277          */
 278         subq    $10, %rsp
 279         sgdt    (%rsp)
 280         movq    2(%rsp), %rdi   /* gdt base now in %rdi */
 281         addq    $10, %rsp
 282         call    kdi_gdt2gsbase  /* returns kernel's GSBASE in %rax */
 283 
 284         movq    %rax, %rdx
 285         shrq    $32, %rdx
 286         movl    $MSR_AMD_GSBASE, %ecx
 287         wrmsr
 288 
 289         /*
 290          * In the trampoline we stashed the incoming %cr3. Copy this into
 291          * the kdiregs for restoration and later use.
 292          */
 293         mov     %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
 294         mov     %rdx, REG_OFF(KDIREG_CR3)(%rsp)
 295         /*
 296          * Switch to the kernel's %cr3. From the early interrupt handler
 297          * until now we've been running on the "paranoid" %cr3 (that of kas
 298          * from early in boot).
 299          *
 300          * If we took the interrupt from somewhere already on the kas/paranoid
 301          * %cr3 though, don't change it (this could happen if kcr3 is corrupt
 302          * and we took a gptrap earlier from this very code).
 303          */
 304         cmpq    %rdx, kpti_safe_cr3
 305         je      .no_kcr3
 306         mov     %gs:CPU_KPTI_KCR3, %rdx
 307         cmpq    $0, %rdx
 308         je      .no_kcr3
 309         mov     %rdx, %cr3
 310 .no_kcr3:
 311 
 312 #endif  /* __xpv */
 313 
 314         GET_CPUSAVE_ADDR        /* %rax = cpusave, %rbx = CPU ID */
 315 
 316         ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
 317 
 318         ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
 319 
 320         movq    REG_OFF(KDIREG_RIP)(%rsp), %rcx
 321         ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
 322         ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
 323         movq    REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
 324         ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
 325 
 326         movq    %rsp, %rbp
 327         pushq   %rax
 328 
 329         /*
 330          * Were we in the debugger when we took the trap (i.e. was %esp in one
 331          * of the debugger's memory ranges)?
 332          */
 333         leaq    kdi_memranges, %rcx
 334         movl    kdi_nmemranges, %edx
 335 1:
 336         cmpq    MR_BASE(%rcx), %rsp
 337         jl      2f              /* below this range -- try the next one */
 338         cmpq    MR_LIM(%rcx), %rsp
 339         jg      2f              /* above this range -- try the next one */
 340         jmp     3f              /* matched within this range */
 341 
 342 2:
 343         decl    %edx
 344         jz      kdi_save_common_state   /* %rsp not within debugger memory */
 345         addq    $MR_SIZE, %rcx
 346         jmp     1b
 347 
 348 3:      /*
 349          * The master is still set.  That should only happen if we hit a trap
 350          * while running in the debugger.  Note that it may be an intentional
 351          * fault.  kmdb_dpi_handle_fault will sort it all out.
 352          */
 353 
 354         movq    REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
 355         movq    REG_OFF(KDIREG_RIP)(%rbp), %rsi
 356         movq    REG_OFF(KDIREG_RSP)(%rbp), %rdx
 357         movq    %rbx, %rcx              /* cpuid */
 358 
 359         call    kdi_dvec_handle_fault
 360 
 361         /*
 362          * If we're here, we ran into a debugger problem, and the user
 363          * elected to solve it by having the debugger debug itself.  The
 364          * state we're about to save is that of the debugger when it took
 365          * the fault.
 366          */
 367 
 368         jmp     kdi_save_common_state
 369 
 370         SET_SIZE(kdi_master_entry)
 371         SET_SIZE(kdi_cmnint)
 372 
 373 /*
 374  * The cross-call handler for slave CPUs.
 375  *
 376  * The debugger is single-threaded, so only one CPU, called the master, may be
 377  * running it at any given time.  The other CPUs, known as slaves, spin in a
 378  * busy loop until there's something for them to do.  This is the entry point
 379  * for the slaves - they'll be sent here in response to a cross-call sent by the
 380  * master.
 381  */
 382 
 383         ENTRY_NP(kdi_slave_entry)
 384 
 385         /*
 386          * Cross calls are implemented as function calls, so our stack currently
 387          * looks like one you'd get from a zero-argument function call.  That
 388          * is, there's the return %rip at %rsp, and that's about it.  We need
 389          * to make it look like an interrupt stack.  When we first save, we'll
 390          * reverse the saved %ss and %rip, which we'll fix back up when we've
 391          * freed up some general-purpose registers.  We'll also need to fix up
 392          * the saved %rsp.
 393          */
 394 
 395         pushq   %rsp            /* pushed value off by 8 */
 396         pushfq
 397         CLI(%rax)
 398         pushq   $KCS_SEL
 399         clrq    %rax
 400         movw    %ss, %ax
 401         pushq   %rax            /* rip should be here */
 402         pushq   $-1             /* phony trap error code */
 403         pushq   $-1             /* phony trap number */
 404 
 405         subq    $REG_OFF(KDIREG_TRAPNO), %rsp
 406         KDI_SAVE_REGS(%rsp)
 407 
 408         movq    %cr3, %rax
 409         movq    %rax, REG_OFF(KDIREG_CR3)(%rsp)
 410 
 411         movq    REG_OFF(KDIREG_SS)(%rsp), %rax
 412         movq    %rax, REG_OFF(KDIREG_SAVPC)(%rsp)
 413         xchgq   REG_OFF(KDIREG_RIP)(%rsp), %rax
 414         movq    %rax, REG_OFF(KDIREG_SS)(%rsp)
 415 
 416         movq    REG_OFF(KDIREG_RSP)(%rsp), %rax
 417         addq    $8, %rax
 418         movq    %rax, REG_OFF(KDIREG_RSP)(%rsp)
 419 
 420         /*
 421          * We've saved all of the general-purpose registers, and have a stack
 422          * that is irettable (after we strip down to the error code)
 423          */
 424 
 425         GET_CPUSAVE_ADDR        /* %rax = cpusave, %rbx = CPU ID */
 426 
 427         ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
 428 
 429         ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
 430 
 431         movq    REG_OFF(KDIREG_RIP)(%rsp), %rcx
 432         ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
 433         movq    REG_OFF(KDIREG_RSP)(%rsp), %rcx
 434         ADD_CRUMB(%rax, KRM_SP, %rcx, %rdx)
 435         ADD_CRUMB(%rax, KRM_TRAPNO, $-1, %rdx)
 436 
 437         movq    $KDI_CPU_STATE_SLAVE, KRS_CPU_STATE(%rax)
 438 
 439         pushq   %rax
 440         jmp     kdi_save_common_state
 441 
 442         SET_SIZE(kdi_slave_entry)
 443 
 444 /*
 445  * The state of the world:
 446  *
 447  * The stack has a complete set of saved registers and segment
 448  * selectors, arranged in the kdi_regs.h order.  It also has a pointer
 449  * to our cpusave area.
 450  *
 451  * We need to save, into the cpusave area, a pointer to these saved
 452  * registers.  First we check whether we should jump straight back to
 453  * the kernel.  If not, we save a few more registers, ready the
 454  * machine for debugger entry, and enter the debugger.
 455  */
 456 
 457         ENTRY_NP(kdi_save_common_state)
 458 
 459         popq    %rdi                    /* the cpusave area */
 460         movq    %rsp, KRS_GREGS(%rdi)   /* save ptr to current saved regs */
 461 
 462         pushq   %rdi
 463         call    kdi_trap_pass
 464         testq   %rax, %rax
 465         jnz     kdi_pass_to_kernel
 466         popq    %rax /* cpusave in %rax */
 467 
 468         SAVE_IDTGDT
 469 
 470 #if !defined(__xpv)
 471         /* Save off %cr0, and clear write protect */
 472         movq    %cr0, %rcx
 473         movq    %rcx, KRS_CR0(%rax)
 474         andq    $_BITNOT(CR0_WP), %rcx
 475         movq    %rcx, %cr0
 476 #endif
 477 
 478         /* Save the debug registers and disable any active watchpoints */
 479 
 480         movq    %rax, %r15              /* save cpusave area ptr */
 481         movl    $7, %edi
 482         call    kdi_dreg_get
 483         movq    %rax, KRS_DRCTL(%r15)
 484 
 485         andq    $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
 486         movq    %rax, %rsi
 487         movl    $7, %edi
 488         call    kdi_dreg_set
 489 
 490         movl    $6, %edi
 491         call    kdi_dreg_get
 492         movq    %rax, KRS_DRSTAT(%r15)
 493 
 494         movl    $0, %edi
 495         call    kdi_dreg_get
 496         movq    %rax, KRS_DROFF(0)(%r15)
 497 
 498         movl    $1, %edi
 499         call    kdi_dreg_get
 500         movq    %rax, KRS_DROFF(1)(%r15)
 501 
 502         movl    $2, %edi
 503         call    kdi_dreg_get
 504         movq    %rax, KRS_DROFF(2)(%r15)
 505 
 506         movl    $3, %edi
 507         call    kdi_dreg_get
 508         movq    %rax, KRS_DROFF(3)(%r15)
 509 
 510         movq    %r15, %rax      /* restore cpu save area to rax */
 511 
 512         clrq    %rbp            /* stack traces should end here */
 513 
 514         pushq   %rax
 515         movq    %rax, %rdi      /* cpusave */
 516 
 517         call    kdi_debugger_entry
 518 
 519         /* Pass cpusave to kdi_resume */
 520         popq    %rdi
 521 
 522         jmp     kdi_resume
 523 
 524         SET_SIZE(kdi_save_common_state)
 525 
 526 /*
 527  * Resume the world.  The code that calls kdi_resume has already
 528  * decided whether or not to restore the IDT.
 529  */
 530         /* cpusave in %rdi */
 531         ENTRY_NP(kdi_resume)
 532 
 533         /*
 534          * Send this CPU back into the world
 535          */
 536 #if !defined(__xpv)
 537         movq    KRS_CR0(%rdi), %rdx
 538         movq    %rdx, %cr0
 539 #endif
 540 
 541         KDI_RESTORE_DEBUGGING_STATE
 542 
 543         movq    KRS_GREGS(%rdi), %rsp
 544 
 545 #if !defined(__xpv)
 546         /*
 547          * If we're going back via tr_iret_kdi, then we want to copy the
 548          * final %cr3 we're going to back into the kpti_dbg area now.
 549          *
 550          * Since the trampoline needs to find the kpti_dbg too, we enter it
 551          * with %r13 set to point at that. The real %r13 (to restore before
 552          * the iret) we stash in the kpti_dbg itself.
 553          */
 554         movq    %gs:CPU_SELF, %r13      /* can't leaq %gs:*, use self-ptr */
 555         addq    $CPU_KPTI_DBG, %r13
 556 
 557         movq    REG_OFF(KDIREG_R13)(%rsp), %rdx
 558         movq    %rdx, KPTI_R13(%r13)
 559 
 560         movq    REG_OFF(KDIREG_CR3)(%rsp), %rdx
 561         movq    %rdx, KPTI_TR_CR3(%r13)
 562 
 563         /* The trampoline will undo this later. */
 564         movq    %r13, REG_OFF(KDIREG_R13)(%rsp)
 565 #endif
 566 
 567         KDI_RESTORE_REGS(%rsp)
 568         addq    $REG_OFF(KDIREG_RIP), %rsp      /* Discard state, trapno, err */
 569         /*
 570          * The common trampoline code will restore %cr3 to the right value
 571          * for either kernel or userland.
 572          */
 573 #if !defined(__xpv)
 574         jmp     tr_iret_kdi
 575 #else
 576         IRET
 577 #endif
 578         /*NOTREACHED*/
 579         SET_SIZE(kdi_resume)
 580 
 581 
 582         /*
 583          * We took a trap that should be handled by the kernel, not KMDB.
 584          *
 585          * We're hard-coding the three cases where KMDB has installed permanent
 586          * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
 587          * to work with; we can't use a global since other CPUs can easily pass
 588          * through here at the same time.
 589          *
 590          * Note that we handle T_DBGENTR since userspace might have tried it.
 591          *
 592          * The trap handler will expect the stack to be in trap order, with %rip
 593          * being the last entry, so we'll need to restore all our regs.  On
 594          * i86xpv we'll need to compensate for XPV_TRAP_POP.
 595          *
 596          * %rax on entry is either 1 or 2, which is from kdi_trap_pass().
 597          * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably)
 598          * switched us to the CPU's kf_kernel_cr3. But we're about to call, for
 599          * example:
 600          *
 601          * dbgtrap->trap()->tr_iret_kernel
 602          *
 603          * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so
 604          * we'll do so here if needed.
 605          *
 606          * This isn't just a matter of tidiness: for example, consider:
 607          *
 608          * hat_switch(oldhat=kas.a_hat, newhat=prochat)
 609          *  setcr3()
 610          *  reset_kpti()
 611          *   *brktrap* due to fbt on reset_kpti:entry
 612          *
 613          * Here, we have the new hat's %cr3, but we haven't yet updated
 614          * kf_kernel_cr3 (so its currently kas's). So if we don't restore here,
 615          * we'll stay on kas's cr3 value on returning from the trap: not good if
 616          * we fault on a userspace address.
 617          */
 618         ENTRY_NP(kdi_pass_to_kernel)
 619 
 620         popq    %rdi /* cpusave */
 621         movq    $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
 622         movq    KRS_GREGS(%rdi), %rsp
 623 
 624         cmpq    $2, %rax
 625         jne     no_restore_cr3
 626         movq    REG_OFF(KDIREG_CR3)(%rsp), %r11
 627         movq    %r11, %cr3
 628 
 629 no_restore_cr3:
 630         movq    REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
 631 
 632         cmpq    $T_SGLSTP, %rdi
 633         je      kdi_pass_dbgtrap
 634         cmpq    $T_BPTFLT, %rdi
 635         je      kdi_pass_brktrap
 636         cmpq    $T_DBGENTR, %rdi
 637         je      kdi_pass_invaltrap
 638         /*
 639          * Hmm, unknown handler.  Somebody forgot to update this when they
 640          * added a new trap interposition... try to drop back into kmdb.
 641          */
 642         int     $T_DBGENTR
 643 
 644 #define CALL_TRAP_HANDLER(name) \
 645         KDI_RESTORE_REGS(%rsp); \
 646         /* Discard state, trapno, err */ \
 647         addq    $REG_OFF(KDIREG_RIP), %rsp; \
 648         XPV_TRAP_PUSH; \
 649         jmp     %cs:name
 650 
 651 kdi_pass_dbgtrap:
 652         CALL_TRAP_HANDLER(dbgtrap)
 653         /*NOTREACHED*/
 654 kdi_pass_brktrap:
 655         CALL_TRAP_HANDLER(brktrap)
 656         /*NOTREACHED*/
 657 kdi_pass_invaltrap:
 658         CALL_TRAP_HANDLER(invaltrap)
 659         /*NOTREACHED*/
 660 
 661         SET_SIZE(kdi_pass_to_kernel)
 662 
 663         /*
 664          * A minimal version of mdboot(), to be used by the master CPU only.
 665          */
 666         ENTRY_NP(kdi_reboot)
 667 
 668         movl    $AD_BOOT, %edi
 669         movl    $A_SHUTDOWN, %esi
 670         call    *psm_shutdownf
 671 #if defined(__xpv)
 672         movl    $SHUTDOWN_reboot, %edi
 673         call    HYPERVISOR_shutdown
 674 #else
 675         call    reset
 676 #endif
 677         /*NOTREACHED*/
 678 
 679         SET_SIZE(kdi_reboot)
 680 
 681         ENTRY_NP(kdi_cpu_debug_init)
 682         pushq   %rbp
 683         movq    %rsp, %rbp
 684 
 685         pushq   %rbx            /* macro will clobber %rbx */
 686         KDI_RESTORE_DEBUGGING_STATE
 687         popq    %rbx
 688 
 689         leave
 690         ret
 691         SET_SIZE(kdi_cpu_debug_init)
 692 
 693 #define GETDREG(name, r)        \
 694         ENTRY_NP(name);         \
 695         movq    r, %rax;        \
 696         ret;                    \
 697         SET_SIZE(name)
 698 
 699 #define SETDREG(name, r)        \
 700         ENTRY_NP(name);         \
 701         movq    %rdi, r;        \
 702         ret;                    \
 703         SET_SIZE(name)
 704 
 705         GETDREG(kdi_getdr0, %dr0)
 706         GETDREG(kdi_getdr1, %dr1)
 707         GETDREG(kdi_getdr2, %dr2)
 708         GETDREG(kdi_getdr3, %dr3)
 709         GETDREG(kdi_getdr6, %dr6)
 710         GETDREG(kdi_getdr7, %dr7)
 711 
 712         SETDREG(kdi_setdr0, %dr0)
 713         SETDREG(kdi_setdr1, %dr1)
 714         SETDREG(kdi_setdr2, %dr2)
 715         SETDREG(kdi_setdr3, %dr3)
 716         SETDREG(kdi_setdr6, %dr6)
 717         SETDREG(kdi_setdr7, %dr7)
 718 
 719 #endif /* !__lint */