Print this page
9441 kmdb should stash %cr3 in kdiregs
Reviewed by: John Levon <john.levon@joyent.com>
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/kdi/amd64/kdi_asm.s
          +++ new/usr/src/uts/intel/kdi/kdi_asm.s
↓ open down ↓ 14 lines elided ↑ open up ↑
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
       25 + *
       26 + * Copyright 2018 Joyent, Inc.
  25   27   */
  26   28  
  27      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  28      -
  29   29  /*
  30      - * Debugger entry for both master and slave CPUs
       30 + * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
       31 + * the IDT stubs that drop into here (mainly via kdi_cmnint).
  31   32   */
  32   33  
  33   34  #if defined(__lint)
  34   35  #include <sys/types.h>
  35      -#endif
       36 +#else
  36   37  
  37   38  #include <sys/segments.h>
  38   39  #include <sys/asm_linkage.h>
  39   40  #include <sys/controlregs.h>
  40   41  #include <sys/x86_archext.h>
  41   42  #include <sys/privregs.h>
  42   43  #include <sys/machprivregs.h>
  43   44  #include <sys/kdi_regs.h>
  44   45  #include <sys/psw.h>
  45   46  #include <sys/uadmin.h>
  46   47  #ifdef __xpv
  47   48  #include <sys/hypervisor.h>
  48   49  #endif
  49      -
  50      -#ifdef _ASM
  51      -
  52   50  #include <kdi_assym.h>
  53   51  #include <assym.h>
  54   52  
  55   53  /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
  56   54  #define GET_CPUSAVE_ADDR \
  57   55          movzbq  %gs:CPU_ID, %rbx;               \
  58   56          movq    %rbx, %rax;                     \
  59   57          movq    $KRS_SIZE, %rcx;                \
  60   58          mulq    %rcx;                           \
  61   59          movq    $kdi_cpusave, %rdx;             \
↓ open down ↓ 11 lines elided ↑ open up ↑
  73   71          leaq    kdi_idt(%rip), %rsi;            \
  74   72          cmpq    %rsi, %r11;                     \
  75   73          je      1f;                             \
  76   74          movq    %r11, KRS_IDT(%rax);            \
  77   75          movq    %gs:CPU_GDT, %r11;              \
  78   76          movq    %r11, KRS_GDT(%rax);            \
  79   77  1:
  80   78  
  81   79  #ifdef __xpv
  82   80  
       81 +/*
       82 + * Already on kernel gsbase via the hypervisor.
       83 + */
  83   84  #define SAVE_GSBASE(reg) /* nothing */
  84   85  #define RESTORE_GSBASE(reg) /* nothing */
  85   86  
  86   87  #else
  87   88  
  88   89  #define SAVE_GSBASE(base)                               \
  89   90          movl    $MSR_AMD_GSBASE, %ecx;                  \
  90   91          rdmsr;                                          \
  91   92          shlq    $32, %rdx;                              \
  92   93          orq     %rax, %rdx;                             \
  93      -        movq    %rdx, REG_OFF(KDIREG_GSBASE)(base)
       94 +        movq    %rdx, REG_OFF(KDIREG_GSBASE)(base);     \
       95 +        movl    $MSR_AMD_KGSBASE, %ecx;                 \
       96 +        rdmsr;                                          \
       97 +        shlq    $32, %rdx;                              \
       98 +        orq     %rax, %rdx;                             \
       99 +        movq    %rdx, REG_OFF(KDIREG_KGSBASE)(base)
  94  100  
      101 +/*
      102 + * We shouldn't have stomped on KGSBASE, so don't try to restore it.
      103 + */
  95  104  #define RESTORE_GSBASE(base)                            \
  96  105          movq    REG_OFF(KDIREG_GSBASE)(base), %rdx;     \
  97  106          movq    %rdx, %rax;                             \
  98  107          shrq    $32, %rdx;                              \
  99  108          movl    $MSR_AMD_GSBASE, %ecx;                  \
 100  109          wrmsr
 101  110  
 102  111  #endif /* __xpv */
 103  112  
 104  113  /*
 105      - * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.  Note
 106      - * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
 107      - * unnecessary.
      114 + * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
 108  115   */
 109  116  #define KDI_SAVE_REGS(base) \
 110  117          movq    %rdi, REG_OFF(KDIREG_RDI)(base);        \
 111  118          movq    %rsi, REG_OFF(KDIREG_RSI)(base);        \
 112  119          movq    %rdx, REG_OFF(KDIREG_RDX)(base);        \
 113  120          movq    %rcx, REG_OFF(KDIREG_RCX)(base);        \
 114  121          movq    %r8, REG_OFF(KDIREG_R8)(base);          \
 115  122          movq    %r9, REG_OFF(KDIREG_R9)(base);          \
 116  123          movq    %rax, REG_OFF(KDIREG_RAX)(base);        \
 117  124          movq    %rbx, REG_OFF(KDIREG_RBX)(base);        \
 118  125          movq    %rbp, REG_OFF(KDIREG_RBP)(base);        \
 119  126          movq    %r10, REG_OFF(KDIREG_R10)(base);        \
 120  127          movq    %r11, REG_OFF(KDIREG_R11)(base);        \
 121  128          movq    %r12, REG_OFF(KDIREG_R12)(base);        \
 122  129          movq    %r13, REG_OFF(KDIREG_R13)(base);        \
 123  130          movq    %r14, REG_OFF(KDIREG_R14)(base);        \
 124  131          movq    %r15, REG_OFF(KDIREG_R15)(base);        \
 125  132          movq    %rbp, REG_OFF(KDIREG_SAVFP)(base);      \
 126  133          movq    REG_OFF(KDIREG_RIP)(base), %rax;        \
 127  134          movq    %rax, REG_OFF(KDIREG_SAVPC)(base);      \
      135 +        movq    %cr2, %rax;                             \
      136 +        movq    %rax, REG_OFF(KDIREG_CR2)(base);        \
 128  137          clrq    %rax;                                   \
 129  138          movw    %ds, %ax;                               \
 130  139          movq    %rax, REG_OFF(KDIREG_DS)(base);         \
 131  140          movw    %es, %ax;                               \
 132  141          movq    %rax, REG_OFF(KDIREG_ES)(base);         \
 133  142          movw    %fs, %ax;                               \
 134  143          movq    %rax, REG_OFF(KDIREG_FS)(base);         \
 135  144          movw    %gs, %ax;                               \
 136  145          movq    %rax, REG_OFF(KDIREG_GS)(base);         \
 137  146          SAVE_GSBASE(base)
 138  147  
 139  148  #define KDI_RESTORE_REGS(base) \
 140  149          movq    base, %rdi;                             \
 141  150          RESTORE_GSBASE(%rdi);                           \
 142  151          movq    REG_OFF(KDIREG_ES)(%rdi), %rax;         \
 143  152          movw    %ax, %es;                               \
 144  153          movq    REG_OFF(KDIREG_DS)(%rdi), %rax;         \
 145  154          movw    %ax, %ds;                               \
      155 +        movq    REG_OFF(KDIREG_CR2)(base), %rax;        \
      156 +        movq    %rax, %cr2;                             \
 146  157          movq    REG_OFF(KDIREG_R15)(%rdi), %r15;        \
 147  158          movq    REG_OFF(KDIREG_R14)(%rdi), %r14;        \
 148  159          movq    REG_OFF(KDIREG_R13)(%rdi), %r13;        \
 149  160          movq    REG_OFF(KDIREG_R12)(%rdi), %r12;        \
 150  161          movq    REG_OFF(KDIREG_R11)(%rdi), %r11;        \
 151  162          movq    REG_OFF(KDIREG_R10)(%rdi), %r10;        \
 152  163          movq    REG_OFF(KDIREG_RBP)(%rdi), %rbp;        \
 153  164          movq    REG_OFF(KDIREG_RBX)(%rdi), %rbx;        \
 154  165          movq    REG_OFF(KDIREG_RAX)(%rdi), %rax;        \
 155  166          movq    REG_OFF(KDIREG_R9)(%rdi), %r9;          \
 156  167          movq    REG_OFF(KDIREG_R8)(%rdi), %r8;          \
 157  168          movq    REG_OFF(KDIREG_RCX)(%rdi), %rcx;        \
 158  169          movq    REG_OFF(KDIREG_RDX)(%rdi), %rdx;        \
 159  170          movq    REG_OFF(KDIREG_RSI)(%rdi), %rsi;        \
 160  171          movq    REG_OFF(KDIREG_RDI)(%rdi), %rdi
 161  172  
 162  173  /*
 163  174   * Given the address of the current CPU's cpusave area in %rax, the following
 164  175   * macro restores the debugging state to said CPU.  Restored state includes
 165      - * the debug registers from the global %dr variables, and debugging MSRs from
 166      - * the CPU save area.  This code would be in a separate routine, but for the
 167      - * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
 168      - * the number of jumps taken subsequent to the update of said MSRs.  We can
 169      - * remove one jump (the ret) by using a macro instead of a function for the
 170      - * debugging state restoration code.
      176 + * the debug registers from the global %dr variables.
 171  177   *
 172      - * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
 173      - */     
      178 + * Takes the cpusave area in %rdi as a parameter.
      179 + */
 174  180  #define KDI_RESTORE_DEBUGGING_STATE \
 175  181          pushq   %rdi;                                           \
 176  182          leaq    kdi_drreg(%rip), %r15;                          \
 177  183          movl    $7, %edi;                                       \
 178  184          movq    DR_CTL(%r15), %rsi;                             \
 179  185          call    kdi_dreg_set;                                   \
 180  186                                                                  \
 181  187          movl    $6, %edi;                                       \
 182  188          movq    $KDIREG_DRSTAT_RESERVED, %rsi;                  \
 183  189          call    kdi_dreg_set;                                   \
↓ open down ↓ 3 lines elided ↑ open up ↑
 187  193          call    kdi_dreg_set;                                   \
 188  194          movl    $1, %edi;                                       \
 189  195          movq    DRADDR_OFF(1)(%r15), %rsi;                      \
 190  196          call    kdi_dreg_set;                                   \
 191  197          movl    $2, %edi;                                       \
 192  198          movq    DRADDR_OFF(2)(%r15), %rsi;                      \
 193  199          call    kdi_dreg_set;                                   \
 194  200          movl    $3, %edi;                                       \
 195  201          movq    DRADDR_OFF(3)(%r15), %rsi;                      \
 196  202          call    kdi_dreg_set;                                   \
 197      -        popq    %rdi;                                           \
 198      -                                                                \
 199      -        /*                                                      \
 200      -         * Write any requested MSRs.                            \
 201      -         */                                                     \
 202      -        movq    KRS_MSR(%rdi), %rbx;                            \
 203      -        cmpq    $0, %rbx;                                       \
 204      -        je      3f;                                             \
 205      -1:                                                              \
 206      -        movl    MSR_NUM(%rbx), %ecx;                            \
 207      -        cmpl    $0, %ecx;                                       \
 208      -        je      3f;                                             \
 209      -                                                                \
 210      -        movl    MSR_TYPE(%rbx), %edx;                           \
 211      -        cmpl    $KDI_MSR_WRITE, %edx;                           \
 212      -        jne     2f;                                             \
 213      -                                                                \
 214      -        movq    MSR_VALP(%rbx), %rdx;                           \
 215      -        movl    0(%rdx), %eax;                                  \
 216      -        movl    4(%rdx), %edx;                                  \
 217      -        wrmsr;                                                  \
 218      -2:                                                              \
 219      -        addq    $MSR_SIZE, %rbx;                                \
 220      -        jmp     1b;                                             \
 221      -3:                                                              \
 222      -        /*                                                      \
 223      -         * We must not branch after re-enabling LBR.  If        \
 224      -         * kdi_wsr_wrexit_msr is set, it contains the number    \
 225      -         * of the MSR that controls LBR.  kdi_wsr_wrexit_valp   \
 226      -         * contains the value that is to be written to enable   \
 227      -         * LBR.                                                 \
 228      -         */                                                     \
 229      -        leaq    kdi_msr_wrexit_msr(%rip), %rcx;                 \
 230      -        movl    (%rcx), %ecx;                                   \
 231      -        cmpl    $0, %ecx;                                       \
 232      -        je      1f;                                             \
 233      -                                                                \
 234      -        leaq    kdi_msr_wrexit_valp(%rip), %rdx;                \
 235      -        movq    (%rdx), %rdx;                                   \
 236      -        movl    0(%rdx), %eax;                                  \
 237      -        movl    4(%rdx), %edx;                                  \
 238      -                                                                \
 239      -        wrmsr;                                                  \
 240      -1:
      203 +        popq    %rdi;
 241  204  
 242  205  /*
 243  206   * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
 244  207   * The following macros manage the buffer.
 245  208   */
 246  209  
 247  210  /* Advance the ring buffer */
 248  211  #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
 249  212          movq    KRS_CURCRUMBIDX(cpusave), tmp1; \
 250  213          cmpq    $[KDI_NCRUMBS - 1], tmp1;       \
↓ open down ↓ 12 lines elided ↑ open up ↑
 263  226          movq    $KDI_NCRUMBS, tmp2;             \
 264  227  3:      movq    $0, -4(tmp1, tmp2, 4);          \
 265  228          decq    tmp2;                           \
 266  229          jnz     3b
 267  230  
 268  231  /* Set a value in the current breadcrumb buffer */
 269  232  #define ADD_CRUMB(cpusave, offset, value, tmp) \
 270  233          movq    KRS_CURCRUMB(cpusave), tmp;     \
 271  234          movq    value, offset(tmp)
 272  235  
 273      -#endif  /* _ASM */
 274      -
 275      -#if defined(__lint)
 276      -void
 277      -kdi_cmnint(void)
 278      -{
 279      -}
 280      -#else   /* __lint */
 281      -
 282  236          /* XXX implement me */
 283  237          ENTRY_NP(kdi_nmiint)
 284  238          clrq    %rcx
 285  239          movq    (%rcx), %rcx
 286  240          SET_SIZE(kdi_nmiint)
 287  241  
 288  242          /*
 289  243           * The main entry point for master CPUs.  It also serves as the trap
 290  244           * handler for all traps and interrupts taken during single-step.
 291  245           */
↓ open down ↓ 29 lines elided ↑ open up ↑
 321  275          subq    $10, %rsp
 322  276          sgdt    (%rsp)
 323  277          movq    2(%rsp), %rdi   /* gdt base now in %rdi */
 324  278          addq    $10, %rsp
 325  279          call    kdi_gdt2gsbase  /* returns kernel's GSBASE in %rax */
 326  280  
 327  281          movq    %rax, %rdx
 328  282          shrq    $32, %rdx
 329  283          movl    $MSR_AMD_GSBASE, %ecx
 330  284          wrmsr
      285 +
      286 +        /*
      287 +         * In the trampoline we stashed the incoming %cr3. Copy this into
      288 +         * the kdiregs for restoration and later use.
      289 +         */
      290 +        mov     %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
      291 +        mov     %rdx, REG_OFF(KDIREG_CR3)(%rsp)
      292 +        /*
      293 +         * Switch to the kernel's %cr3. From the early interrupt handler
      294 +         * until now we've been running on the "paranoid" %cr3 (that of kas
      295 +         * from early in boot).
      296 +         *
      297 +         * If we took the interrupt from somewhere already on the kas/paranoid
      298 +         * %cr3 though, don't change it (this could happen if kcr3 is corrupt
      299 +         * and we took a gptrap earlier from this very code).
      300 +         */
      301 +        cmpq    %rdx, kpti_safe_cr3
      302 +        je      .no_kcr3
      303 +        mov     %gs:CPU_KPTI_KCR3, %rdx
      304 +        cmpq    $0, %rdx
      305 +        je      .no_kcr3
      306 +        mov     %rdx, %cr3
      307 +.no_kcr3:
      308 +
 331  309  #endif  /* __xpv */
 332  310  
 333  311          GET_CPUSAVE_ADDR        /* %rax = cpusave, %rbx = CPU ID */
 334  312  
 335  313          ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
 336  314  
 337  315          ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
 338  316  
 339  317          movq    REG_OFF(KDIREG_RIP)(%rsp), %rcx
 340  318          ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
↓ open down ↓ 3 lines elided ↑ open up ↑
 344  322  
 345  323          movq    %rsp, %rbp
 346  324          pushq   %rax
 347  325  
 348  326          /*
 349  327           * Were we in the debugger when we took the trap (i.e. was %esp in one
 350  328           * of the debugger's memory ranges)?
 351  329           */
 352  330          leaq    kdi_memranges, %rcx
 353  331          movl    kdi_nmemranges, %edx
 354      -1:      cmpq    MR_BASE(%rcx), %rsp
      332 +1:
      333 +        cmpq    MR_BASE(%rcx), %rsp
 355  334          jl      2f              /* below this range -- try the next one */
 356  335          cmpq    MR_LIM(%rcx), %rsp
 357  336          jg      2f              /* above this range -- try the next one */
 358  337          jmp     3f              /* matched within this range */
 359  338  
 360      -2:      decl    %edx
      339 +2:
      340 +        decl    %edx
 361  341          jz      kdi_save_common_state   /* %rsp not within debugger memory */
 362  342          addq    $MR_SIZE, %rcx
 363  343          jmp     1b
 364  344  
 365  345  3:      /*
 366  346           * The master is still set.  That should only happen if we hit a trap
 367  347           * while running in the debugger.  Note that it may be an intentional
 368  348           * fault.  kmdb_dpi_handle_fault will sort it all out.
 369  349           */
 370  350  
↓ open down ↓ 9 lines elided ↑ open up ↑
 380  360           * elected to solve it by having the debugger debug itself.  The
 381  361           * state we're about to save is that of the debugger when it took
 382  362           * the fault.
 383  363           */
 384  364  
 385  365          jmp     kdi_save_common_state
 386  366  
 387  367          SET_SIZE(kdi_master_entry)
 388  368          SET_SIZE(kdi_cmnint)
 389  369  
 390      -#endif  /* __lint */
 391      -
 392  370  /*
 393  371   * The cross-call handler for slave CPUs.
 394  372   *
 395  373   * The debugger is single-threaded, so only one CPU, called the master, may be
 396  374   * running it at any given time.  The other CPUs, known as slaves, spin in a
 397  375   * busy loop until there's something for them to do.  This is the entry point
 398  376   * for the slaves - they'll be sent here in response to a cross-call sent by the
 399  377   * master.
 400  378   */
 401  379  
 402      -#if defined(__lint)
 403      -char kdi_slave_entry_patch;
 404      -
 405      -void
 406      -kdi_slave_entry(void)
 407      -{
 408      -}
 409      -#else /* __lint */
 410      -        .globl  kdi_slave_entry_patch;
 411      -
 412  380          ENTRY_NP(kdi_slave_entry)
 413  381  
 414      -        /* kdi_msr_add_clrentry knows where this is */
 415      -kdi_slave_entry_patch:
 416      -        KDI_MSR_PATCH;
 417      -
 418  382          /*
 419  383           * Cross calls are implemented as function calls, so our stack currently
 420  384           * looks like one you'd get from a zero-argument function call.  That
 421  385           * is, there's the return %rip at %rsp, and that's about it.  We need
 422  386           * to make it look like an interrupt stack.  When we first save, we'll
 423  387           * reverse the saved %ss and %rip, which we'll fix back up when we've
 424  388           * freed up some general-purpose registers.  We'll also need to fix up
 425  389           * the saved %rsp.
 426  390           */
 427  391  
↓ open down ↓ 3 lines elided ↑ open up ↑
 431  395          pushq   $KCS_SEL
 432  396          clrq    %rax
 433  397          movw    %ss, %ax
 434  398          pushq   %rax            /* rip should be here */
 435  399          pushq   $-1             /* phony trap error code */
 436  400          pushq   $-1             /* phony trap number */
 437  401  
 438  402          subq    $REG_OFF(KDIREG_TRAPNO), %rsp
 439  403          KDI_SAVE_REGS(%rsp)
 440  404  
      405 +        movq    %cr3, %rax
      406 +        movq    %rax, REG_OFF(KDIREG_CR3)(%rsp)
      407 +
 441  408          movq    REG_OFF(KDIREG_SS)(%rsp), %rax
 442  409          xchgq   REG_OFF(KDIREG_RIP)(%rsp), %rax
 443  410          movq    %rax, REG_OFF(KDIREG_SS)(%rsp)
 444  411  
 445  412          movq    REG_OFF(KDIREG_RSP)(%rsp), %rax
 446  413          addq    $8, %rax
 447  414          movq    %rax, REG_OFF(KDIREG_RSP)(%rsp)
 448  415  
 449      -        /* 
      416 +        /*
 450  417           * We've saved all of the general-purpose registers, and have a stack
 451  418           * that is irettable (after we strip down to the error code)
 452  419           */
 453  420  
 454  421          GET_CPUSAVE_ADDR        /* %rax = cpusave, %rbx = CPU ID */
 455  422  
 456  423          ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
 457  424  
 458  425          ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
 459  426  
 460  427          movq    REG_OFF(KDIREG_RIP)(%rsp), %rcx
 461  428          ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
 462  429  
 463  430          pushq   %rax
 464  431          jmp     kdi_save_common_state
 465  432  
 466  433          SET_SIZE(kdi_slave_entry)
 467  434  
 468      -#endif  /* __lint */
 469      -
 470  435  /*
 471  436   * The state of the world:
 472  437   *
 473  438   * The stack has a complete set of saved registers and segment
 474  439   * selectors, arranged in the kdi_regs.h order.  It also has a pointer
 475  440   * to our cpusave area.
 476  441   *
 477  442   * We need to save, into the cpusave area, a pointer to these saved
 478  443   * registers.  First we check whether we should jump straight back to
 479  444   * the kernel.  If not, we save a few more registers, ready the
 480  445   * machine for debugger entry, and enter the debugger.
 481  446   */
 482  447  
 483      -#if !defined(__lint)
 484      -
 485  448          ENTRY_NP(kdi_save_common_state)
 486  449  
 487  450          popq    %rdi                    /* the cpusave area */
 488  451          movq    %rsp, KRS_GREGS(%rdi)   /* save ptr to current saved regs */
 489  452  
 490  453          pushq   %rdi
 491  454          call    kdi_trap_pass
 492  455          cmpq    $1, %rax
 493  456          je      kdi_pass_to_kernel
 494  457          popq    %rax /* cpusave in %rax */
↓ open down ↓ 35 lines elided ↑ open up ↑
 530  493          movl    $2, %edi
 531  494          call    kdi_dreg_get
 532  495          movq    %rax, KRS_DROFF(2)(%r15)
 533  496  
 534  497          movl    $3, %edi
 535  498          call    kdi_dreg_get
 536  499          movq    %rax, KRS_DROFF(3)(%r15)
 537  500  
 538  501          movq    %r15, %rax      /* restore cpu save area to rax */
 539  502  
 540      -        /*
 541      -         * Save any requested MSRs.
 542      -         */
 543      -        movq    KRS_MSR(%rax), %rcx
 544      -        cmpq    $0, %rcx
 545      -        je      no_msr
 546      -
 547      -        pushq   %rax            /* rdmsr clobbers %eax */
 548      -        movq    %rcx, %rbx
 549      -
 550      -1:
 551      -        movl    MSR_NUM(%rbx), %ecx
 552      -        cmpl    $0, %ecx
 553      -        je      msr_done
 554      -
 555      -        movl    MSR_TYPE(%rbx), %edx
 556      -        cmpl    $KDI_MSR_READ, %edx
 557      -        jne     msr_next
 558      -
 559      -        rdmsr                   /* addr in %ecx, value into %edx:%eax */
 560      -        movl    %eax, MSR_VAL(%rbx)
 561      -        movl    %edx, _CONST(MSR_VAL + 4)(%rbx)
 562      -
 563      -msr_next:
 564      -        addq    $MSR_SIZE, %rbx
 565      -        jmp     1b
 566      -
 567      -msr_done:
 568      -        popq    %rax
 569      -
 570      -no_msr:
 571  503          clrq    %rbp            /* stack traces should end here */
 572  504  
 573  505          pushq   %rax
 574  506          movq    %rax, %rdi      /* cpusave */
 575  507  
 576  508          call    kdi_debugger_entry
 577  509  
 578  510          /* Pass cpusave to kdi_resume */
 579  511          popq    %rdi
 580  512  
 581  513          jmp     kdi_resume
 582  514  
 583  515          SET_SIZE(kdi_save_common_state)
 584  516  
 585      -#endif  /* !__lint */
 586      -
 587  517  /*
 588  518   * Resume the world.  The code that calls kdi_resume has already
 589  519   * decided whether or not to restore the IDT.
 590  520   */
 591      -#if defined(__lint)
 592      -void
 593      -kdi_resume(void)
 594      -{
 595      -}
 596      -#else   /* __lint */
 597      -
 598  521          /* cpusave in %rdi */
 599  522          ENTRY_NP(kdi_resume)
 600  523  
 601  524          /*
 602  525           * Send this CPU back into the world
 603  526           */
 604  527  #if !defined(__xpv)
 605  528          movq    KRS_CR0(%rdi), %rdx
 606  529          movq    %rdx, %cr0
 607  530  #endif
 608  531  
 609  532          KDI_RESTORE_DEBUGGING_STATE
 610  533  
 611  534          movq    KRS_GREGS(%rdi), %rsp
      535 +
      536 +#if !defined(__xpv)
      537 +        /*
      538 +         * If we're going back via tr_iret_kdi, then we want to copy the
      539 +         * final %cr3 we're going to back into the kpti_dbg area now.
      540 +         *
      541 +         * Since the trampoline needs to find the kpti_dbg too, we enter it
      542 +         * with %r13 set to point at that. The real %r13 (to restore before
      543 +         * the iret) we stash in the kpti_dbg itself.
      544 +         */
      545 +        movq    %gs:CPU_SELF, %r13      /* can't leaq %gs:*, use self-ptr */
      546 +        addq    $CPU_KPTI_DBG, %r13
      547 +
      548 +        movq    REG_OFF(KDIREG_R13)(%rsp), %rdx
      549 +        movq    %rdx, KPTI_R13(%r13)
      550 +
      551 +        movq    REG_OFF(KDIREG_CR3)(%rsp), %rdx
      552 +        movq    %rdx, KPTI_TR_CR3(%r13)
      553 +
      554 +        /* The trampoline will undo this later. */
      555 +        movq    %r13, REG_OFF(KDIREG_R13)(%rsp)
      556 +#endif
      557 +
 612  558          KDI_RESTORE_REGS(%rsp)
 613  559          addq    $REG_OFF(KDIREG_RIP), %rsp      /* Discard state, trapno, err */
      560 +        /*
      561 +         * The common trampoline code will restore %cr3 to the right value
      562 +         * for either kernel or userland.
      563 +         */
      564 +#if !defined(__xpv)
      565 +        jmp     tr_iret_kdi
      566 +#else
 614  567          IRET
      568 +#endif
 615  569          /*NOTREACHED*/
 616  570          SET_SIZE(kdi_resume)
 617  571  
 618      -#endif  /* __lint */
 619      -
 620      -#if !defined(__lint)
 621      -
 622  572          ENTRY_NP(kdi_pass_to_kernel)
 623  573  
 624  574          popq    %rdi /* cpusave */
 625  575  
 626  576          movq    $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
 627  577  
 628  578          /*
 629  579           * Find the trap and vector off the right kernel handler.  The trap
 630  580           * handler will expect the stack to be in trap order, with %rip being
 631  581           * the last entry, so we'll need to restore all our regs.  On i86xpv
↓ open down ↓ 12 lines elided ↑ open up ↑
 644  594          je      1f
 645  595          cmpq    $T_BPTFLT, %rdi
 646  596          je      2f
 647  597          cmpq    $T_DBGENTR, %rdi
 648  598          je      3f
 649  599          /*
 650  600           * Hmm, unknown handler.  Somebody forgot to update this when they
 651  601           * added a new trap interposition... try to drop back into kmdb.
 652  602           */
 653  603          int     $T_DBGENTR
 654      -        
      604 +
 655  605  #define CALL_TRAP_HANDLER(name) \
 656  606          KDI_RESTORE_REGS(%rsp); \
 657  607          /* Discard state, trapno, err */ \
 658  608          addq    $REG_OFF(KDIREG_RIP), %rsp; \
 659  609          XPV_TRAP_PUSH; \
 660  610          jmp     %cs:name
 661  611  
 662  612  1:
 663  613          CALL_TRAP_HANDLER(dbgtrap)
 664  614          /*NOTREACHED*/
↓ open down ↓ 17 lines elided ↑ open up ↑
 682  632  #if defined(__xpv)
 683  633          movl    $SHUTDOWN_reboot, %edi
 684  634          call    HYPERVISOR_shutdown
 685  635  #else
 686  636          call    reset
 687  637  #endif
 688  638          /*NOTREACHED*/
 689  639  
 690  640          SET_SIZE(kdi_reboot)
 691  641  
 692      -#endif  /* !__lint */
 693      -
 694      -#if defined(__lint)
 695      -/*ARGSUSED*/
 696      -void
 697      -kdi_cpu_debug_init(kdi_cpusave_t *save)
 698      -{
 699      -}
 700      -#else   /* __lint */
 701      -
 702  642          ENTRY_NP(kdi_cpu_debug_init)
 703  643          pushq   %rbp
 704  644          movq    %rsp, %rbp
 705  645  
 706  646          pushq   %rbx            /* macro will clobber %rbx */
 707  647          KDI_RESTORE_DEBUGGING_STATE
 708  648          popq    %rbx
 709  649  
 710  650          leave
 711  651          ret
 712      -
 713  652          SET_SIZE(kdi_cpu_debug_init)
 714      -#endif  /* !__lint */
 715  653  
      654 +#define GETDREG(name, r)        \
      655 +        ENTRY_NP(name);         \
      656 +        movq    r, %rax;        \
      657 +        ret;                    \
      658 +        SET_SIZE(name)
      659 +
      660 +#define SETDREG(name, r)        \
      661 +        ENTRY_NP(name);         \
      662 +        movq    %rdi, r;        \
      663 +        ret;                    \
      664 +        SET_SIZE(name)
      665 +
      666 +        GETDREG(kdi_getdr0, %dr0)
      667 +        GETDREG(kdi_getdr1, %dr1)
      668 +        GETDREG(kdi_getdr2, %dr2)
      669 +        GETDREG(kdi_getdr3, %dr3)
      670 +        GETDREG(kdi_getdr6, %dr6)
      671 +        GETDREG(kdi_getdr7, %dr7)
      672 +
      673 +        SETDREG(kdi_setdr0, %dr0)
      674 +        SETDREG(kdi_setdr1, %dr1)
      675 +        SETDREG(kdi_setdr2, %dr2)
      676 +        SETDREG(kdi_setdr3, %dr3)
      677 +        SETDREG(kdi_setdr6, %dr6)
      678 +        SETDREG(kdi_setdr7, %dr7)
      679 +
      680 +#endif /* !__lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX