Print this page
9210 remove KMDB branch debugging support
9211 ::crregs could do with cr2/cr3 support
9209 ::ttrace should be able to filter by thread
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/kdi/amd64/kdi_asm.s
          +++ new/usr/src/uts/intel/kdi/amd64/kdi_asm.s
↓ open down ↓ 14 lines elided ↑ open up ↑
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
       25 + *
       26 + * Copyright 2018 Joyent, Inc.
  25   27   */
  26   28  
  27      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  28      -
  29   29  /*
  30   30   * Debugger entry for both master and slave CPUs
  31   31   */
  32   32  
  33   33  #if defined(__lint)
  34   34  #include <sys/types.h>
  35   35  #endif
  36   36  
  37   37  #include <sys/segments.h>
  38   38  #include <sys/asm_linkage.h>
↓ open down ↓ 116 lines elided ↑ open up ↑
 155  155          movq    REG_OFF(KDIREG_R9)(%rdi), %r9;          \
 156  156          movq    REG_OFF(KDIREG_R8)(%rdi), %r8;          \
 157  157          movq    REG_OFF(KDIREG_RCX)(%rdi), %rcx;        \
 158  158          movq    REG_OFF(KDIREG_RDX)(%rdi), %rdx;        \
 159  159          movq    REG_OFF(KDIREG_RSI)(%rdi), %rsi;        \
 160  160          movq    REG_OFF(KDIREG_RDI)(%rdi), %rdi
 161  161  
 162  162  /*
 163  163   * Given the address of the current CPU's cpusave area in %rax, the following
 164  164   * macro restores the debugging state to said CPU.  Restored state includes
 165      - * the debug registers from the global %dr variables, and debugging MSRs from
 166      - * the CPU save area.  This code would be in a separate routine, but for the
 167      - * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
 168      - * the number of jumps taken subsequent to the update of said MSRs.  We can
 169      - * remove one jump (the ret) by using a macro instead of a function for the
 170      - * debugging state restoration code.
      165 + * the debug registers from the global %dr variables.
 171  166   *
 172      - * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
 173      - */     
      167 + * Takes the cpusave area in %rdi as a parameter.
      168 + */
 174  169  #define KDI_RESTORE_DEBUGGING_STATE \
 175  170          pushq   %rdi;                                           \
 176  171          leaq    kdi_drreg(%rip), %r15;                          \
 177  172          movl    $7, %edi;                                       \
 178  173          movq    DR_CTL(%r15), %rsi;                             \
 179  174          call    kdi_dreg_set;                                   \
 180  175                                                                  \
 181  176          movl    $6, %edi;                                       \
 182  177          movq    $KDIREG_DRSTAT_RESERVED, %rsi;                  \
 183  178          call    kdi_dreg_set;                                   \
↓ open down ↓ 3 lines elided ↑ open up ↑
 187  182          call    kdi_dreg_set;                                   \
 188  183          movl    $1, %edi;                                       \
 189  184          movq    DRADDR_OFF(1)(%r15), %rsi;                      \
 190  185          call    kdi_dreg_set;                                   \
 191  186          movl    $2, %edi;                                       \
 192  187          movq    DRADDR_OFF(2)(%r15), %rsi;                      \
 193  188          call    kdi_dreg_set;                                   \
 194  189          movl    $3, %edi;                                       \
 195  190          movq    DRADDR_OFF(3)(%r15), %rsi;                      \
 196  191          call    kdi_dreg_set;                                   \
 197      -        popq    %rdi;                                           \
 198      -                                                                \
 199      -        /*                                                      \
 200      -         * Write any requested MSRs.                            \
 201      -         */                                                     \
 202      -        movq    KRS_MSR(%rdi), %rbx;                            \
 203      -        cmpq    $0, %rbx;                                       \
 204      -        je      3f;                                             \
 205      -1:                                                              \
 206      -        movl    MSR_NUM(%rbx), %ecx;                            \
 207      -        cmpl    $0, %ecx;                                       \
 208      -        je      3f;                                             \
 209      -                                                                \
 210      -        movl    MSR_TYPE(%rbx), %edx;                           \
 211      -        cmpl    $KDI_MSR_WRITE, %edx;                           \
 212      -        jne     2f;                                             \
 213      -                                                                \
 214      -        movq    MSR_VALP(%rbx), %rdx;                           \
 215      -        movl    0(%rdx), %eax;                                  \
 216      -        movl    4(%rdx), %edx;                                  \
 217      -        wrmsr;                                                  \
 218      -2:                                                              \
 219      -        addq    $MSR_SIZE, %rbx;                                \
 220      -        jmp     1b;                                             \
 221      -3:                                                              \
 222      -        /*                                                      \
 223      -         * We must not branch after re-enabling LBR.  If        \
 224      -         * kdi_wsr_wrexit_msr is set, it contains the number    \
 225      -         * of the MSR that controls LBR.  kdi_wsr_wrexit_valp   \
 226      -         * contains the value that is to be written to enable   \
 227      -         * LBR.                                                 \
 228      -         */                                                     \
 229      -        leaq    kdi_msr_wrexit_msr(%rip), %rcx;                 \
 230      -        movl    (%rcx), %ecx;                                   \
 231      -        cmpl    $0, %ecx;                                       \
 232      -        je      1f;                                             \
 233      -                                                                \
 234      -        leaq    kdi_msr_wrexit_valp(%rip), %rdx;                \
 235      -        movq    (%rdx), %rdx;                                   \
 236      -        movl    0(%rdx), %eax;                                  \
 237      -        movl    4(%rdx), %edx;                                  \
 238      -                                                                \
 239      -        wrmsr;                                                  \
 240      -1:
      192 +        popq    %rdi;
 241  193  
 242  194  /*
 243  195   * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
 244  196   * The following macros manage the buffer.
 245  197   */
 246  198  
 247  199  /* Advance the ring buffer */
 248  200  #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
 249  201          movq    KRS_CURCRUMBIDX(cpusave), tmp1; \
 250  202          cmpq    $[KDI_NCRUMBS - 1], tmp1;       \
↓ open down ↓ 142 lines elided ↑ open up ↑
 393  345   * The cross-call handler for slave CPUs.
 394  346   *
 395  347   * The debugger is single-threaded, so only one CPU, called the master, may be
 396  348   * running it at any given time.  The other CPUs, known as slaves, spin in a
 397  349   * busy loop until there's something for them to do.  This is the entry point
 398  350   * for the slaves - they'll be sent here in response to a cross-call sent by the
 399  351   * master.
 400  352   */
 401  353  
 402  354  #if defined(__lint)
 403      -char kdi_slave_entry_patch;
 404      -
 405  355  void
 406  356  kdi_slave_entry(void)
 407  357  {
 408  358  }
 409  359  #else /* __lint */
 410      -        .globl  kdi_slave_entry_patch;
 411      -
 412  360          ENTRY_NP(kdi_slave_entry)
 413  361  
 414      -        /* kdi_msr_add_clrentry knows where this is */
 415      -kdi_slave_entry_patch:
 416      -        KDI_MSR_PATCH;
 417      -
 418  362          /*
 419  363           * Cross calls are implemented as function calls, so our stack currently
 420  364           * looks like one you'd get from a zero-argument function call.  That
 421  365           * is, there's the return %rip at %rsp, and that's about it.  We need
 422  366           * to make it look like an interrupt stack.  When we first save, we'll
 423  367           * reverse the saved %ss and %rip, which we'll fix back up when we've
 424  368           * freed up some general-purpose registers.  We'll also need to fix up
 425  369           * the saved %rsp.
 426  370           */
 427  371  
↓ open down ↓ 102 lines elided ↑ open up ↑
 530  474          movl    $2, %edi
 531  475          call    kdi_dreg_get
 532  476          movq    %rax, KRS_DROFF(2)(%r15)
 533  477  
 534  478          movl    $3, %edi
 535  479          call    kdi_dreg_get
 536  480          movq    %rax, KRS_DROFF(3)(%r15)
 537  481  
 538  482          movq    %r15, %rax      /* restore cpu save area to rax */
 539  483  
 540      -        /*
 541      -         * Save any requested MSRs.
 542      -         */
 543      -        movq    KRS_MSR(%rax), %rcx
 544      -        cmpq    $0, %rcx
 545      -        je      no_msr
 546      -
 547      -        pushq   %rax            /* rdmsr clobbers %eax */
 548      -        movq    %rcx, %rbx
 549      -
 550      -1:
 551      -        movl    MSR_NUM(%rbx), %ecx
 552      -        cmpl    $0, %ecx
 553      -        je      msr_done
 554      -
 555      -        movl    MSR_TYPE(%rbx), %edx
 556      -        cmpl    $KDI_MSR_READ, %edx
 557      -        jne     msr_next
 558      -
 559      -        rdmsr                   /* addr in %ecx, value into %edx:%eax */
 560      -        movl    %eax, MSR_VAL(%rbx)
 561      -        movl    %edx, _CONST(MSR_VAL + 4)(%rbx)
 562      -
 563      -msr_next:
 564      -        addq    $MSR_SIZE, %rbx
 565      -        jmp     1b
 566      -
 567      -msr_done:
 568      -        popq    %rax
 569      -
 570      -no_msr:
 571  484          clrq    %rbp            /* stack traces should end here */
 572  485  
 573  486          pushq   %rax
 574  487          movq    %rax, %rdi      /* cpusave */
 575  488  
 576  489          call    kdi_debugger_entry
 577  490  
 578  491          /* Pass cpusave to kdi_resume */
 579  492          popq    %rdi
 580  493  
↓ open down ↓ 135 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX