1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Copyright 2018 Joyent, Inc. 27 */ 28 29 /* 30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains 31 * the IDT stubs that drop into here (mainly via kdi_cmnint). 32 */ 33 34 #if defined(__lint) 35 #include <sys/types.h> 36 #else 37 38 #include <sys/segments.h> 39 #include <sys/asm_linkage.h> 40 #include <sys/controlregs.h> 41 #include <sys/x86_archext.h> 42 #include <sys/privregs.h> 43 #include <sys/machprivregs.h> 44 #include <sys/kdi_regs.h> 45 #include <sys/psw.h> 46 #include <sys/uadmin.h> 47 #ifdef __xpv 48 #include <sys/hypervisor.h> 49 #endif 50 #include <kdi_assym.h> 51 #include <assym.h> 52 53 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */ 54 #define GET_CPUSAVE_ADDR \ 55 movzbq %gs:CPU_ID, %rbx; \ 56 movq %rbx, %rax; \ 57 movq $KRS_SIZE, %rcx; \ 58 mulq %rcx; \ 59 movq $kdi_cpusave, %rdx; \ 60 /*CSTYLED*/ \ 61 addq (%rdx), %rax 62 63 /* 64 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT 65 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the 66 * debugger through the trap handler. We don't want to clobber the saved IDT 67 * in the process, as we'd end up resuming the world on our IDT. 68 */ 69 #define SAVE_IDTGDT \ 70 movq %gs:CPU_IDT, %r11; \ 71 leaq kdi_idt(%rip), %rsi; \ 72 cmpq %rsi, %r11; \ 73 je 1f; \ 74 movq %r11, KRS_IDT(%rax); \ 75 movq %gs:CPU_GDT, %r11; \ 76 movq %r11, KRS_GDT(%rax); \ 77 1: 78 79 #ifdef __xpv 80 81 /* 82 * Already on kernel gsbase via the hypervisor. 83 */ 84 #define SAVE_GSBASE(reg) /* nothing */ 85 #define RESTORE_GSBASE(reg) /* nothing */ 86 87 #else 88 89 #define SAVE_GSBASE(base) \ 90 movl $MSR_AMD_GSBASE, %ecx; \ 91 rdmsr; \ 92 shlq $32, %rdx; \ 93 orq %rax, %rdx; \ 94 movq %rdx, REG_OFF(KDIREG_GSBASE)(base); \ 95 movl $MSR_AMD_KGSBASE, %ecx; \ 96 rdmsr; \ 97 shlq $32, %rdx; \ 98 orq %rax, %rdx; \ 99 movq %rdx, REG_OFF(KDIREG_KGSBASE)(base) 100 101 /* 102 * We shouldn't have stomped on KGSBASE, so don't try to restore it. 103 */ 104 #define RESTORE_GSBASE(base) \ 105 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \ 106 movq %rdx, %rax; \ 107 shrq $32, %rdx; \ 108 movl $MSR_AMD_GSBASE, %ecx; \ 109 wrmsr 110 111 #endif /* __xpv */ 112 113 /* 114 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. 115 */ 116 #define KDI_SAVE_REGS(base) \ 117 movq %rdi, REG_OFF(KDIREG_RDI)(base); \ 118 movq %rsi, REG_OFF(KDIREG_RSI)(base); \ 119 movq %rdx, REG_OFF(KDIREG_RDX)(base); \ 120 movq %rcx, REG_OFF(KDIREG_RCX)(base); \ 121 movq %r8, REG_OFF(KDIREG_R8)(base); \ 122 movq %r9, REG_OFF(KDIREG_R9)(base); \ 123 movq %rax, REG_OFF(KDIREG_RAX)(base); \ 124 movq %rbx, REG_OFF(KDIREG_RBX)(base); \ 125 movq %rbp, REG_OFF(KDIREG_RBP)(base); \ 126 movq %r10, REG_OFF(KDIREG_R10)(base); \ 127 movq %r11, REG_OFF(KDIREG_R11)(base); \ 128 movq %r12, REG_OFF(KDIREG_R12)(base); \ 129 movq %r13, REG_OFF(KDIREG_R13)(base); \ 130 movq %r14, REG_OFF(KDIREG_R14)(base); \ 131 movq %r15, REG_OFF(KDIREG_R15)(base); \ 132 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \ 133 movq REG_OFF(KDIREG_RIP)(base), %rax; \ 134 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \ 135 movq %cr2, %rax; \ 136 movq %rax, REG_OFF(KDIREG_CR2)(base); \ 137 clrq %rax; \ 138 movw %ds, %ax; \ 139 movq %rax, REG_OFF(KDIREG_DS)(base); \ 140 movw %es, %ax; \ 141 movq %rax, REG_OFF(KDIREG_ES)(base); \ 142 movw %fs, %ax; \ 143 movq %rax, REG_OFF(KDIREG_FS)(base); \ 144 movw %gs, %ax; \ 145 movq %rax, REG_OFF(KDIREG_GS)(base); \ 146 SAVE_GSBASE(base) 147 148 #define KDI_RESTORE_REGS(base) \ 149 movq base, %rdi; \ 150 RESTORE_GSBASE(%rdi); \ 151 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \ 152 movw %ax, %es; \ 153 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \ 154 movw %ax, %ds; \ 155 movq REG_OFF(KDIREG_CR2)(base), %rax; \ 156 movq %rax, %cr2; \ 157 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \ 158 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \ 159 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \ 160 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \ 161 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \ 162 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \ 163 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \ 164 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \ 165 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \ 166 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \ 167 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \ 168 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \ 169 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \ 170 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \ 171 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi 172 173 /* 174 * Given the address of the current CPU's cpusave area in %rax, the following 175 * macro restores the debugging state to said CPU. Restored state includes 176 * the debug registers from the global %dr variables. 177 * 178 * Takes the cpusave area in %rdi as a parameter. 179 */ 180 #define KDI_RESTORE_DEBUGGING_STATE \ 181 pushq %rdi; \ 182 leaq kdi_drreg(%rip), %r15; \ 183 movl $7, %edi; \ 184 movq DR_CTL(%r15), %rsi; \ 185 call kdi_dreg_set; \ 186 \ 187 movl $6, %edi; \ 188 movq $KDIREG_DRSTAT_RESERVED, %rsi; \ 189 call kdi_dreg_set; \ 190 \ 191 movl $0, %edi; \ 192 movq DRADDR_OFF(0)(%r15), %rsi; \ 193 call kdi_dreg_set; \ 194 movl $1, %edi; \ 195 movq DRADDR_OFF(1)(%r15), %rsi; \ 196 call kdi_dreg_set; \ 197 movl $2, %edi; \ 198 movq DRADDR_OFF(2)(%r15), %rsi; \ 199 call kdi_dreg_set; \ 200 movl $3, %edi; \ 201 movq DRADDR_OFF(3)(%r15), %rsi; \ 202 call kdi_dreg_set; \ 203 popq %rdi; 204 205 /* 206 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs. 207 * The following macros manage the buffer. 208 */ 209 210 /* Advance the ring buffer */ 211 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \ 212 movq KRS_CURCRUMBIDX(cpusave), tmp1; \ 213 cmpq $[KDI_NCRUMBS - 1], tmp1; \ 214 jge 1f; \ 215 /* Advance the pointer and index */ \ 216 addq $1, tmp1; \ 217 movq tmp1, KRS_CURCRUMBIDX(cpusave); \ 218 movq KRS_CURCRUMB(cpusave), tmp1; \ 219 addq $KRM_SIZE, tmp1; \ 220 jmp 2f; \ 221 1: /* Reset the pointer and index */ \ 222 movq $0, KRS_CURCRUMBIDX(cpusave); \ 223 leaq KRS_CRUMBS(cpusave), tmp1; \ 224 2: movq tmp1, KRS_CURCRUMB(cpusave); \ 225 /* Clear the new crumb */ \ 226 movq $KDI_NCRUMBS, tmp2; \ 227 3: movq $0, -4(tmp1, tmp2, 4); \ 228 decq tmp2; \ 229 jnz 3b 230 231 /* Set a value in the current breadcrumb buffer */ 232 #define ADD_CRUMB(cpusave, offset, value, tmp) \ 233 movq KRS_CURCRUMB(cpusave), tmp; \ 234 movq value, offset(tmp) 235 236 /* XXX implement me */ 237 ENTRY_NP(kdi_nmiint) 238 clrq %rcx 239 movq (%rcx), %rcx 240 SET_SIZE(kdi_nmiint) 241 242 /* 243 * The main entry point for master CPUs. It also serves as the trap 244 * handler for all traps and interrupts taken during single-step. 245 */ 246 ENTRY_NP(kdi_cmnint) 247 ALTENTRY(kdi_master_entry) 248 249 pushq %rax 250 CLI(%rax) 251 popq %rax 252 253 /* Save current register state */ 254 subq $REG_OFF(KDIREG_TRAPNO), %rsp 255 KDI_SAVE_REGS(%rsp) 256 257 #ifdef __xpv 258 /* 259 * Clear saved_upcall_mask in unused byte of cs slot on stack. 260 * It can only confuse things. 261 */ 262 movb $0, REG_OFF(KDIREG_CS)+4(%rsp) 263 #endif 264 265 #if !defined(__xpv) 266 /* 267 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named 268 * KGSBASE can be trusted, as the kernel may or may not have already 269 * done a swapgs. All is not lost, as the kernel can divine the correct 270 * value for us. Note that the previous GSBASE is saved in the 271 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being 272 * blown away. On the hypervisor, we don't need to do this, since it's 273 * ensured we're on our requested kernel GSBASE already. 274 */ 275 subq $10, %rsp 276 sgdt (%rsp) 277 movq 2(%rsp), %rdi /* gdt base now in %rdi */ 278 addq $10, %rsp 279 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */ 280 281 movq %rax, %rdx 282 shrq $32, %rdx 283 movl $MSR_AMD_GSBASE, %ecx 284 wrmsr 285 286 /* 287 * In the trampoline we stashed the incoming %cr3. Copy this into 288 * the kdiregs for restoration and later use. 289 */ 290 mov %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx 291 mov %rdx, REG_OFF(KDIREG_CR3)(%rsp) 292 /* 293 * Switch to the kernel's %cr3. From the early interrupt handler 294 * until now we've been running on the "paranoid" %cr3 (that of kas 295 * from early in boot). 296 * 297 * If we took the interrupt from somewhere already on the kas/paranoid 298 * %cr3 though, don't change it (this could happen if kcr3 is corrupt 299 * and we took a gptrap earlier from this very code). 300 */ 301 cmpq %rdx, kpti_safe_cr3 302 je .no_kcr3 303 mov %gs:CPU_KPTI_KCR3, %rdx 304 cmpq $0, %rdx 305 je .no_kcr3 306 mov %rdx, %cr3 307 .no_kcr3: 308 309 #endif /* __xpv */ 310 311 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */ 312 313 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx) 314 315 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx) 316 317 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx 318 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx) 319 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx) 320 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx 321 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx) 322 323 movq %rsp, %rbp 324 pushq %rax 325 326 /* 327 * Were we in the debugger when we took the trap (i.e. was %esp in one 328 * of the debugger's memory ranges)? 329 */ 330 leaq kdi_memranges, %rcx 331 movl kdi_nmemranges, %edx 332 1: 333 cmpq MR_BASE(%rcx), %rsp 334 jl 2f /* below this range -- try the next one */ 335 cmpq MR_LIM(%rcx), %rsp 336 jg 2f /* above this range -- try the next one */ 337 jmp 3f /* matched within this range */ 338 339 2: 340 decl %edx 341 jz kdi_save_common_state /* %rsp not within debugger memory */ 342 addq $MR_SIZE, %rcx 343 jmp 1b 344 345 3: /* 346 * The master is still set. That should only happen if we hit a trap 347 * while running in the debugger. Note that it may be an intentional 348 * fault. kmdb_dpi_handle_fault will sort it all out. 349 */ 350 351 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi 352 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi 353 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx 354 movq %rbx, %rcx /* cpuid */ 355 356 call kdi_dvec_handle_fault 357 358 /* 359 * If we're here, we ran into a debugger problem, and the user 360 * elected to solve it by having the debugger debug itself. The 361 * state we're about to save is that of the debugger when it took 362 * the fault. 363 */ 364 365 jmp kdi_save_common_state 366 367 SET_SIZE(kdi_master_entry) 368 SET_SIZE(kdi_cmnint) 369 370 /* 371 * The cross-call handler for slave CPUs. 372 * 373 * The debugger is single-threaded, so only one CPU, called the master, may be 374 * running it at any given time. The other CPUs, known as slaves, spin in a 375 * busy loop until there's something for them to do. This is the entry point 376 * for the slaves - they'll be sent here in response to a cross-call sent by the 377 * master. 378 */ 379 380 ENTRY_NP(kdi_slave_entry) 381 382 /* 383 * Cross calls are implemented as function calls, so our stack currently 384 * looks like one you'd get from a zero-argument function call. That 385 * is, there's the return %rip at %rsp, and that's about it. We need 386 * to make it look like an interrupt stack. When we first save, we'll 387 * reverse the saved %ss and %rip, which we'll fix back up when we've 388 * freed up some general-purpose registers. We'll also need to fix up 389 * the saved %rsp. 390 */ 391 392 pushq %rsp /* pushed value off by 8 */ 393 pushfq 394 CLI(%rax) 395 pushq $KCS_SEL 396 clrq %rax 397 movw %ss, %ax 398 pushq %rax /* rip should be here */ 399 pushq $-1 /* phony trap error code */ 400 pushq $-1 /* phony trap number */ 401 402 subq $REG_OFF(KDIREG_TRAPNO), %rsp 403 KDI_SAVE_REGS(%rsp) 404 405 movq %cr3, %rax 406 movq %rax, REG_OFF(KDIREG_CR3)(%rsp) 407 408 movq REG_OFF(KDIREG_SS)(%rsp), %rax 409 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax 410 movq %rax, REG_OFF(KDIREG_SS)(%rsp) 411 412 movq REG_OFF(KDIREG_RSP)(%rsp), %rax 413 addq $8, %rax 414 movq %rax, REG_OFF(KDIREG_RSP)(%rsp) 415 416 /* 417 * We've saved all of the general-purpose registers, and have a stack 418 * that is irettable (after we strip down to the error code) 419 */ 420 421 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */ 422 423 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx) 424 425 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx) 426 427 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx 428 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx) 429 430 pushq %rax 431 jmp kdi_save_common_state 432 433 SET_SIZE(kdi_slave_entry) 434 435 /* 436 * The state of the world: 437 * 438 * The stack has a complete set of saved registers and segment 439 * selectors, arranged in the kdi_regs.h order. It also has a pointer 440 * to our cpusave area. 441 * 442 * We need to save, into the cpusave area, a pointer to these saved 443 * registers. First we check whether we should jump straight back to 444 * the kernel. If not, we save a few more registers, ready the 445 * machine for debugger entry, and enter the debugger. 446 */ 447 448 ENTRY_NP(kdi_save_common_state) 449 450 popq %rdi /* the cpusave area */ 451 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */ 452 453 pushq %rdi 454 call kdi_trap_pass 455 cmpq $1, %rax 456 je kdi_pass_to_kernel 457 popq %rax /* cpusave in %rax */ 458 459 SAVE_IDTGDT 460 461 #if !defined(__xpv) 462 /* Save off %cr0, and clear write protect */ 463 movq %cr0, %rcx 464 movq %rcx, KRS_CR0(%rax) 465 andq $_BITNOT(CR0_WP), %rcx 466 movq %rcx, %cr0 467 #endif 468 469 /* Save the debug registers and disable any active watchpoints */ 470 471 movq %rax, %r15 /* save cpusave area ptr */ 472 movl $7, %edi 473 call kdi_dreg_get 474 movq %rax, KRS_DRCTL(%r15) 475 476 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax 477 movq %rax, %rsi 478 movl $7, %edi 479 call kdi_dreg_set 480 481 movl $6, %edi 482 call kdi_dreg_get 483 movq %rax, KRS_DRSTAT(%r15) 484 485 movl $0, %edi 486 call kdi_dreg_get 487 movq %rax, KRS_DROFF(0)(%r15) 488 489 movl $1, %edi 490 call kdi_dreg_get 491 movq %rax, KRS_DROFF(1)(%r15) 492 493 movl $2, %edi 494 call kdi_dreg_get 495 movq %rax, KRS_DROFF(2)(%r15) 496 497 movl $3, %edi 498 call kdi_dreg_get 499 movq %rax, KRS_DROFF(3)(%r15) 500 501 movq %r15, %rax /* restore cpu save area to rax */ 502 503 clrq %rbp /* stack traces should end here */ 504 505 pushq %rax 506 movq %rax, %rdi /* cpusave */ 507 508 call kdi_debugger_entry 509 510 /* Pass cpusave to kdi_resume */ 511 popq %rdi 512 513 jmp kdi_resume 514 515 SET_SIZE(kdi_save_common_state) 516 517 /* 518 * Resume the world. The code that calls kdi_resume has already 519 * decided whether or not to restore the IDT. 520 */ 521 /* cpusave in %rdi */ 522 ENTRY_NP(kdi_resume) 523 524 /* 525 * Send this CPU back into the world 526 */ 527 #if !defined(__xpv) 528 movq KRS_CR0(%rdi), %rdx 529 movq %rdx, %cr0 530 #endif 531 532 KDI_RESTORE_DEBUGGING_STATE 533 534 movq KRS_GREGS(%rdi), %rsp 535 536 #if !defined(__xpv) 537 /* 538 * If we're going back via tr_iret_kdi, then we want to copy the 539 * final %cr3 we're going to back into the kpti_dbg area now. 540 * 541 * Since the trampoline needs to find the kpti_dbg too, we enter it 542 * with %r13 set to point at that. The real %r13 (to restore before 543 * the iret) we stash in the kpti_dbg itself. 544 */ 545 movq %gs:CPU_SELF, %r13 /* can't leaq %gs:*, use self-ptr */ 546 addq $CPU_KPTI_DBG, %r13 547 548 movq REG_OFF(KDIREG_R13)(%rsp), %rdx 549 movq %rdx, KPTI_R13(%r13) 550 551 movq REG_OFF(KDIREG_CR3)(%rsp), %rdx 552 movq %rdx, KPTI_TR_CR3(%r13) 553 554 /* The trampoline will undo this later. */ 555 movq %r13, REG_OFF(KDIREG_R13)(%rsp) 556 #endif 557 558 KDI_RESTORE_REGS(%rsp) 559 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */ 560 /* 561 * The common trampoline code will restore %cr3 to the right value 562 * for either kernel or userland. 563 */ 564 #if !defined(__xpv) 565 jmp tr_iret_kdi 566 #else 567 IRET 568 #endif 569 /*NOTREACHED*/ 570 SET_SIZE(kdi_resume) 571 572 ENTRY_NP(kdi_pass_to_kernel) 573 574 popq %rdi /* cpusave */ 575 576 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi) 577 578 /* 579 * Find the trap and vector off the right kernel handler. The trap 580 * handler will expect the stack to be in trap order, with %rip being 581 * the last entry, so we'll need to restore all our regs. On i86xpv 582 * we'll need to compensate for XPV_TRAP_POP. 583 * 584 * We're hard-coding the three cases where KMDB has installed permanent 585 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers 586 * to work with; we can't use a global since other CPUs can easily pass 587 * through here at the same time. 588 * 589 * Note that we handle T_DBGENTR since userspace might have tried it. 590 */ 591 movq KRS_GREGS(%rdi), %rsp 592 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi 593 cmpq $T_SGLSTP, %rdi 594 je 1f 595 cmpq $T_BPTFLT, %rdi 596 je 2f 597 cmpq $T_DBGENTR, %rdi 598 je 3f 599 /* 600 * Hmm, unknown handler. Somebody forgot to update this when they 601 * added a new trap interposition... try to drop back into kmdb. 602 */ 603 int $T_DBGENTR 604 605 #define CALL_TRAP_HANDLER(name) \ 606 KDI_RESTORE_REGS(%rsp); \ 607 /* Discard state, trapno, err */ \ 608 addq $REG_OFF(KDIREG_RIP), %rsp; \ 609 XPV_TRAP_PUSH; \ 610 jmp %cs:name 611 612 1: 613 CALL_TRAP_HANDLER(dbgtrap) 614 /*NOTREACHED*/ 615 2: 616 CALL_TRAP_HANDLER(brktrap) 617 /*NOTREACHED*/ 618 3: 619 CALL_TRAP_HANDLER(invaltrap) 620 /*NOTREACHED*/ 621 622 SET_SIZE(kdi_pass_to_kernel) 623 624 /* 625 * A minimal version of mdboot(), to be used by the master CPU only. 626 */ 627 ENTRY_NP(kdi_reboot) 628 629 movl $AD_BOOT, %edi 630 movl $A_SHUTDOWN, %esi 631 call *psm_shutdownf 632 #if defined(__xpv) 633 movl $SHUTDOWN_reboot, %edi 634 call HYPERVISOR_shutdown 635 #else 636 call reset 637 #endif 638 /*NOTREACHED*/ 639 640 SET_SIZE(kdi_reboot) 641 642 ENTRY_NP(kdi_cpu_debug_init) 643 pushq %rbp 644 movq %rsp, %rbp 645 646 pushq %rbx /* macro will clobber %rbx */ 647 KDI_RESTORE_DEBUGGING_STATE 648 popq %rbx 649 650 leave 651 ret 652 SET_SIZE(kdi_cpu_debug_init) 653 654 #define GETDREG(name, r) \ 655 ENTRY_NP(name); \ 656 movq r, %rax; \ 657 ret; \ 658 SET_SIZE(name) 659 660 #define SETDREG(name, r) \ 661 ENTRY_NP(name); \ 662 movq %rdi, r; \ 663 ret; \ 664 SET_SIZE(name) 665 666 GETDREG(kdi_getdr0, %dr0) 667 GETDREG(kdi_getdr1, %dr1) 668 GETDREG(kdi_getdr2, %dr2) 669 GETDREG(kdi_getdr3, %dr3) 670 GETDREG(kdi_getdr6, %dr6) 671 GETDREG(kdi_getdr7, %dr7) 672 673 SETDREG(kdi_setdr0, %dr0) 674 SETDREG(kdi_setdr1, %dr1) 675 SETDREG(kdi_setdr2, %dr2) 676 SETDREG(kdi_setdr3, %dr3) 677 SETDREG(kdi_setdr6, %dr6) 678 SETDREG(kdi_setdr7, %dr7) 679 680 #endif /* !__lint */