Print this page
9210 remove KMDB branch debugging support
9211 ::crregs could do with cr2/cr3 support
9209 ::ttrace should be able to filter by thread
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>


   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.


  25  */
  26 
  27 #pragma ident   "%Z%%M% %I%     %E% SMI"
  28 
  29 /*
  30  * Debugger entry for both master and slave CPUs
  31  */
  32 
  33 #if defined(__lint)
  34 #include <sys/types.h>
  35 #endif
  36 
  37 #include <sys/segments.h>
  38 #include <sys/asm_linkage.h>
  39 #include <sys/controlregs.h>
  40 #include <sys/x86_archext.h>
  41 #include <sys/privregs.h>
  42 #include <sys/machprivregs.h>
  43 #include <sys/kdi_regs.h>
  44 #include <sys/psw.h>
  45 #include <sys/uadmin.h>
  46 #ifdef __xpv
  47 #include <sys/hypervisor.h>
  48 #endif


 145         movw    %ax, %ds;                               \
 146         movq    REG_OFF(KDIREG_R15)(%rdi), %r15;        \
 147         movq    REG_OFF(KDIREG_R14)(%rdi), %r14;        \
 148         movq    REG_OFF(KDIREG_R13)(%rdi), %r13;        \
 149         movq    REG_OFF(KDIREG_R12)(%rdi), %r12;        \
 150         movq    REG_OFF(KDIREG_R11)(%rdi), %r11;        \
 151         movq    REG_OFF(KDIREG_R10)(%rdi), %r10;        \
 152         movq    REG_OFF(KDIREG_RBP)(%rdi), %rbp;        \
 153         movq    REG_OFF(KDIREG_RBX)(%rdi), %rbx;        \
 154         movq    REG_OFF(KDIREG_RAX)(%rdi), %rax;        \
 155         movq    REG_OFF(KDIREG_R9)(%rdi), %r9;          \
 156         movq    REG_OFF(KDIREG_R8)(%rdi), %r8;          \
 157         movq    REG_OFF(KDIREG_RCX)(%rdi), %rcx;        \
 158         movq    REG_OFF(KDIREG_RDX)(%rdi), %rdx;        \
 159         movq    REG_OFF(KDIREG_RSI)(%rdi), %rsi;        \
 160         movq    REG_OFF(KDIREG_RDI)(%rdi), %rdi
 161 
 162 /*
 163  * Given the address of the current CPU's cpusave area in %rax, the following
 164  * macro restores the debugging state to said CPU.  Restored state includes
 165  * the debug registers from the global %dr variables, and debugging MSRs from
 166  * the CPU save area.  This code would be in a separate routine, but for the
 167  * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
 168  * the number of jumps taken subsequent to the update of said MSRs.  We can
 169  * remove one jump (the ret) by using a macro instead of a function for the
 170  * debugging state restoration code.
 171  *
 172  * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
 173  */     
 174 #define KDI_RESTORE_DEBUGGING_STATE \
 175         pushq   %rdi;                                           \
 176         leaq    kdi_drreg(%rip), %r15;                          \
 177         movl    $7, %edi;                                       \
 178         movq    DR_CTL(%r15), %rsi;                             \
 179         call    kdi_dreg_set;                                   \
 180                                                                 \
 181         movl    $6, %edi;                                       \
 182         movq    $KDIREG_DRSTAT_RESERVED, %rsi;                  \
 183         call    kdi_dreg_set;                                   \
 184                                                                 \
 185         movl    $0, %edi;                                       \
 186         movq    DRADDR_OFF(0)(%r15), %rsi;                      \
 187         call    kdi_dreg_set;                                   \
 188         movl    $1, %edi;                                       \
 189         movq    DRADDR_OFF(1)(%r15), %rsi;                      \
 190         call    kdi_dreg_set;                                   \
 191         movl    $2, %edi;                                       \
 192         movq    DRADDR_OFF(2)(%r15), %rsi;                      \
 193         call    kdi_dreg_set;                                   \
 194         movl    $3, %edi;                                       \
 195         movq    DRADDR_OFF(3)(%r15), %rsi;                      \
 196         call    kdi_dreg_set;                                   \
 197         popq    %rdi;                                           \
 198                                                                 \
 199         /*                                                      \
 200          * Write any requested MSRs.                            \
 201          */                                                     \
 202         movq    KRS_MSR(%rdi), %rbx;                            \
 203         cmpq    $0, %rbx;                                       \
 204         je      3f;                                             \
 205 1:                                                              \
 206         movl    MSR_NUM(%rbx), %ecx;                            \
 207         cmpl    $0, %ecx;                                       \
 208         je      3f;                                             \
 209                                                                 \
 210         movl    MSR_TYPE(%rbx), %edx;                           \
 211         cmpl    $KDI_MSR_WRITE, %edx;                           \
 212         jne     2f;                                             \
 213                                                                 \
 214         movq    MSR_VALP(%rbx), %rdx;                           \
 215         movl    0(%rdx), %eax;                                  \
 216         movl    4(%rdx), %edx;                                  \
 217         wrmsr;                                                  \
 218 2:                                                              \
 219         addq    $MSR_SIZE, %rbx;                                \
 220         jmp     1b;                                             \
 221 3:                                                              \
 222         /*                                                      \
 223          * We must not branch after re-enabling LBR.  If        \
 224          * kdi_wsr_wrexit_msr is set, it contains the number    \
 225          * of the MSR that controls LBR.  kdi_wsr_wrexit_valp   \
 226          * contains the value that is to be written to enable   \
 227          * LBR.                                                 \
 228          */                                                     \
 229         leaq    kdi_msr_wrexit_msr(%rip), %rcx;                 \
 230         movl    (%rcx), %ecx;                                   \
 231         cmpl    $0, %ecx;                                       \
 232         je      1f;                                             \
 233                                                                 \
 234         leaq    kdi_msr_wrexit_valp(%rip), %rdx;                \
 235         movq    (%rdx), %rdx;                                   \
 236         movl    0(%rdx), %eax;                                  \
 237         movl    4(%rdx), %edx;                                  \
 238                                                                 \
 239         wrmsr;                                                  \
 240 1:
 241 
 242 /*
 243  * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
 244  * The following macros manage the buffer.
 245  */
 246 
 247 /* Advance the ring buffer */
 248 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
 249         movq    KRS_CURCRUMBIDX(cpusave), tmp1; \
 250         cmpq    $[KDI_NCRUMBS - 1], tmp1;       \
 251         jge     1f;                             \
 252         /* Advance the pointer and index */     \
 253         addq    $1, tmp1;                       \
 254         movq    tmp1, KRS_CURCRUMBIDX(cpusave); \
 255         movq    KRS_CURCRUMB(cpusave), tmp1;    \
 256         addq    $KRM_SIZE, tmp1;                \
 257         jmp     2f;                             \
 258 1:      /* Reset the pointer and index */       \
 259         movq    $0, KRS_CURCRUMBIDX(cpusave);   \
 260         leaq    KRS_CRUMBS(cpusave), tmp1;      \


 383          */
 384 
 385         jmp     kdi_save_common_state
 386 
 387         SET_SIZE(kdi_master_entry)
 388         SET_SIZE(kdi_cmnint)
 389 
 390 #endif  /* __lint */
 391 
 392 /*
 393  * The cross-call handler for slave CPUs.
 394  *
 395  * The debugger is single-threaded, so only one CPU, called the master, may be
 396  * running it at any given time.  The other CPUs, known as slaves, spin in a
 397  * busy loop until there's something for them to do.  This is the entry point
 398  * for the slaves - they'll be sent here in response to a cross-call sent by the
 399  * master.
 400  */
 401 
 402 #if defined(__lint)
 403 char kdi_slave_entry_patch;
 404 
 405 void
 406 kdi_slave_entry(void)
 407 {
 408 }
 409 #else /* __lint */
 410         .globl  kdi_slave_entry_patch;
 411 
 412         ENTRY_NP(kdi_slave_entry)
 413 
 414         /* kdi_msr_add_clrentry knows where this is */
 415 kdi_slave_entry_patch:
 416         KDI_MSR_PATCH;
 417 
 418         /*
 419          * Cross calls are implemented as function calls, so our stack currently
 420          * looks like one you'd get from a zero-argument function call.  That
 421          * is, there's the return %rip at %rsp, and that's about it.  We need
 422          * to make it look like an interrupt stack.  When we first save, we'll
 423          * reverse the saved %ss and %rip, which we'll fix back up when we've
 424          * freed up some general-purpose registers.  We'll also need to fix up
 425          * the saved %rsp.
 426          */
 427 
 428         pushq   %rsp            /* pushed value off by 8 */
 429         pushfq
 430         CLI(%rax)
 431         pushq   $KCS_SEL
 432         clrq    %rax
 433         movw    %ss, %ax
 434         pushq   %rax            /* rip should be here */
 435         pushq   $-1             /* phony trap error code */
 436         pushq   $-1             /* phony trap number */
 437 


 520         movq    %rax, KRS_DRSTAT(%r15)
 521 
 522         movl    $0, %edi
 523         call    kdi_dreg_get
 524         movq    %rax, KRS_DROFF(0)(%r15)
 525 
 526         movl    $1, %edi
 527         call    kdi_dreg_get
 528         movq    %rax, KRS_DROFF(1)(%r15)
 529 
 530         movl    $2, %edi
 531         call    kdi_dreg_get
 532         movq    %rax, KRS_DROFF(2)(%r15)
 533 
 534         movl    $3, %edi
 535         call    kdi_dreg_get
 536         movq    %rax, KRS_DROFF(3)(%r15)
 537 
 538         movq    %r15, %rax      /* restore cpu save area to rax */
 539 
 540         /*
 541          * Save any requested MSRs.
 542          */
 543         movq    KRS_MSR(%rax), %rcx
 544         cmpq    $0, %rcx
 545         je      no_msr
 546 
 547         pushq   %rax            /* rdmsr clobbers %eax */
 548         movq    %rcx, %rbx
 549 
 550 1:
 551         movl    MSR_NUM(%rbx), %ecx
 552         cmpl    $0, %ecx
 553         je      msr_done
 554 
 555         movl    MSR_TYPE(%rbx), %edx
 556         cmpl    $KDI_MSR_READ, %edx
 557         jne     msr_next
 558 
 559         rdmsr                   /* addr in %ecx, value into %edx:%eax */
 560         movl    %eax, MSR_VAL(%rbx)
 561         movl    %edx, _CONST(MSR_VAL + 4)(%rbx)
 562 
 563 msr_next:
 564         addq    $MSR_SIZE, %rbx
 565         jmp     1b
 566 
 567 msr_done:
 568         popq    %rax
 569 
 570 no_msr:
 571         clrq    %rbp            /* stack traces should end here */
 572 
 573         pushq   %rax
 574         movq    %rax, %rdi      /* cpusave */
 575 
 576         call    kdi_debugger_entry
 577 
 578         /* Pass cpusave to kdi_resume */
 579         popq    %rdi
 580 
 581         jmp     kdi_resume
 582 
 583         SET_SIZE(kdi_save_common_state)
 584 
 585 #endif  /* !__lint */
 586 
 587 /*
 588  * Resume the world.  The code that calls kdi_resume has already
 589  * decided whether or not to restore the IDT.
 590  */




   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  *
  26  * Copyright 2018 Joyent, Inc.
  27  */
  28 


  29 /*
  30  * Debugger entry for both master and slave CPUs
  31  */
  32 
  33 #if defined(__lint)
  34 #include <sys/types.h>
  35 #endif
  36 
  37 #include <sys/segments.h>
  38 #include <sys/asm_linkage.h>
  39 #include <sys/controlregs.h>
  40 #include <sys/x86_archext.h>
  41 #include <sys/privregs.h>
  42 #include <sys/machprivregs.h>
  43 #include <sys/kdi_regs.h>
  44 #include <sys/psw.h>
  45 #include <sys/uadmin.h>
  46 #ifdef __xpv
  47 #include <sys/hypervisor.h>
  48 #endif


 145         movw    %ax, %ds;                               \
 146         movq    REG_OFF(KDIREG_R15)(%rdi), %r15;        \
 147         movq    REG_OFF(KDIREG_R14)(%rdi), %r14;        \
 148         movq    REG_OFF(KDIREG_R13)(%rdi), %r13;        \
 149         movq    REG_OFF(KDIREG_R12)(%rdi), %r12;        \
 150         movq    REG_OFF(KDIREG_R11)(%rdi), %r11;        \
 151         movq    REG_OFF(KDIREG_R10)(%rdi), %r10;        \
 152         movq    REG_OFF(KDIREG_RBP)(%rdi), %rbp;        \
 153         movq    REG_OFF(KDIREG_RBX)(%rdi), %rbx;        \
 154         movq    REG_OFF(KDIREG_RAX)(%rdi), %rax;        \
 155         movq    REG_OFF(KDIREG_R9)(%rdi), %r9;          \
 156         movq    REG_OFF(KDIREG_R8)(%rdi), %r8;          \
 157         movq    REG_OFF(KDIREG_RCX)(%rdi), %rcx;        \
 158         movq    REG_OFF(KDIREG_RDX)(%rdi), %rdx;        \
 159         movq    REG_OFF(KDIREG_RSI)(%rdi), %rsi;        \
 160         movq    REG_OFF(KDIREG_RDI)(%rdi), %rdi
 161 
 162 /*
 163  * Given the address of the current CPU's cpusave area in %rax, the following
 164  * macro restores the debugging state to said CPU.  Restored state includes
 165  * the debug registers from the global %dr variables.





 166  *
 167  * Takes the cpusave area in %rdi as a parameter.
 168  */
 169 #define KDI_RESTORE_DEBUGGING_STATE \
 170         pushq   %rdi;                                           \
 171         leaq    kdi_drreg(%rip), %r15;                          \
 172         movl    $7, %edi;                                       \
 173         movq    DR_CTL(%r15), %rsi;                             \
 174         call    kdi_dreg_set;                                   \
 175                                                                 \
 176         movl    $6, %edi;                                       \
 177         movq    $KDIREG_DRSTAT_RESERVED, %rsi;                  \
 178         call    kdi_dreg_set;                                   \
 179                                                                 \
 180         movl    $0, %edi;                                       \
 181         movq    DRADDR_OFF(0)(%r15), %rsi;                      \
 182         call    kdi_dreg_set;                                   \
 183         movl    $1, %edi;                                       \
 184         movq    DRADDR_OFF(1)(%r15), %rsi;                      \
 185         call    kdi_dreg_set;                                   \
 186         movl    $2, %edi;                                       \
 187         movq    DRADDR_OFF(2)(%r15), %rsi;                      \
 188         call    kdi_dreg_set;                                   \
 189         movl    $3, %edi;                                       \
 190         movq    DRADDR_OFF(3)(%r15), %rsi;                      \
 191         call    kdi_dreg_set;                                   \
 192         popq    %rdi;











































 193 
 194 /*
 195  * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
 196  * The following macros manage the buffer.
 197  */
 198 
 199 /* Advance the ring buffer */
 200 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
 201         movq    KRS_CURCRUMBIDX(cpusave), tmp1; \
 202         cmpq    $[KDI_NCRUMBS - 1], tmp1;       \
 203         jge     1f;                             \
 204         /* Advance the pointer and index */     \
 205         addq    $1, tmp1;                       \
 206         movq    tmp1, KRS_CURCRUMBIDX(cpusave); \
 207         movq    KRS_CURCRUMB(cpusave), tmp1;    \
 208         addq    $KRM_SIZE, tmp1;                \
 209         jmp     2f;                             \
 210 1:      /* Reset the pointer and index */       \
 211         movq    $0, KRS_CURCRUMBIDX(cpusave);   \
 212         leaq    KRS_CRUMBS(cpusave), tmp1;      \


 335          */
 336 
 337         jmp     kdi_save_common_state
 338 
 339         SET_SIZE(kdi_master_entry)
 340         SET_SIZE(kdi_cmnint)
 341 
 342 #endif  /* __lint */
 343 
 344 /*
 345  * The cross-call handler for slave CPUs.
 346  *
 347  * The debugger is single-threaded, so only one CPU, called the master, may be
 348  * running it at any given time.  The other CPUs, known as slaves, spin in a
 349  * busy loop until there's something for them to do.  This is the entry point
 350  * for the slaves - they'll be sent here in response to a cross-call sent by the
 351  * master.
 352  */
 353 
 354 #if defined(__lint)


 355 void
 356 kdi_slave_entry(void)
 357 {
 358 }
 359 #else /* __lint */


 360         ENTRY_NP(kdi_slave_entry)
 361 




 362         /*
 363          * Cross calls are implemented as function calls, so our stack currently
 364          * looks like one you'd get from a zero-argument function call.  That
 365          * is, there's the return %rip at %rsp, and that's about it.  We need
 366          * to make it look like an interrupt stack.  When we first save, we'll
 367          * reverse the saved %ss and %rip, which we'll fix back up when we've
 368          * freed up some general-purpose registers.  We'll also need to fix up
 369          * the saved %rsp.
 370          */
 371 
 372         pushq   %rsp            /* pushed value off by 8 */
 373         pushfq
 374         CLI(%rax)
 375         pushq   $KCS_SEL
 376         clrq    %rax
 377         movw    %ss, %ax
 378         pushq   %rax            /* rip should be here */
 379         pushq   $-1             /* phony trap error code */
 380         pushq   $-1             /* phony trap number */
 381 


 464         movq    %rax, KRS_DRSTAT(%r15)
 465 
 466         movl    $0, %edi
 467         call    kdi_dreg_get
 468         movq    %rax, KRS_DROFF(0)(%r15)
 469 
 470         movl    $1, %edi
 471         call    kdi_dreg_get
 472         movq    %rax, KRS_DROFF(1)(%r15)
 473 
 474         movl    $2, %edi
 475         call    kdi_dreg_get
 476         movq    %rax, KRS_DROFF(2)(%r15)
 477 
 478         movl    $3, %edi
 479         call    kdi_dreg_get
 480         movq    %rax, KRS_DROFF(3)(%r15)
 481 
 482         movq    %r15, %rax      /* restore cpu save area to rax */
 483 































 484         clrq    %rbp            /* stack traces should end here */
 485 
 486         pushq   %rax
 487         movq    %rax, %rdi      /* cpusave */
 488 
 489         call    kdi_debugger_entry
 490 
 491         /* Pass cpusave to kdi_resume */
 492         popq    %rdi
 493 
 494         jmp     kdi_resume
 495 
 496         SET_SIZE(kdi_save_common_state)
 497 
 498 #endif  /* !__lint */
 499 
 500 /*
 501  * Resume the world.  The code that calls kdi_resume has already
 502  * decided whether or not to restore the IDT.
 503  */