1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Copyright 2018 Joyent, Inc. 27 */ 28 29 /* 30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains 31 * the IDT stubs that drop into here (mainly via kdi_cmnint). 32 */ 33 34 #if defined(__lint) 35 #include <sys/types.h> 36 #else 37 38 #include <sys/segments.h> 39 #include <sys/asm_linkage.h> 40 #include <sys/controlregs.h> 41 #include <sys/x86_archext.h> 42 #include <sys/privregs.h> 43 #include <sys/machprivregs.h> 44 #include <sys/kdi_regs.h> 45 #include <sys/psw.h> 46 #include <sys/uadmin.h> 47 #ifdef __xpv 48 #include <sys/hypervisor.h> 49 #endif 50 #include <kdi_assym.h> 51 #include <assym.h> 52 53 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */ 54 #define GET_CPUSAVE_ADDR \ 55 movzbq %gs:CPU_ID, %rbx; \ 56 movq %rbx, %rax; \ 57 movq $KRS_SIZE, %rcx; \ 58 mulq %rcx; \ 59 movq $kdi_cpusave, %rdx; \ 60 /*CSTYLED*/ \ 61 addq (%rdx), %rax 62 63 /* 64 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT 65 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the 66 * debugger through the trap handler. We don't want to clobber the saved IDT 67 * in the process, as we'd end up resuming the world on our IDT. 68 */ 69 #define SAVE_IDTGDT \ 70 movq %gs:CPU_IDT, %r11; \ 71 leaq kdi_idt(%rip), %rsi; \ 72 cmpq %rsi, %r11; \ 73 je 1f; \ 74 movq %r11, KRS_IDT(%rax); \ 75 movq %gs:CPU_GDT, %r11; \ 76 movq %r11, KRS_GDT(%rax); \ 77 1: 78 79 #ifdef __xpv 80 81 /* 82 * Already on kernel gsbase via the hypervisor. 83 */ 84 #define SAVE_GSBASE(reg) /* nothing */ 85 #define RESTORE_GSBASE(reg) /* nothing */ 86 87 #else 88 89 #define SAVE_GSBASE(base) \ 90 movl $MSR_AMD_GSBASE, %ecx; \ 91 rdmsr; \ 92 shlq $32, %rdx; \ 93 orq %rax, %rdx; \ 94 movq %rdx, REG_OFF(KDIREG_GSBASE)(base); \ 95 movl $MSR_AMD_KGSBASE, %ecx; \ 96 rdmsr; \ 97 shlq $32, %rdx; \ 98 orq %rax, %rdx; \ 99 movq %rdx, REG_OFF(KDIREG_KGSBASE)(base) 100 101 /* 102 * We shouldn't have stomped on KGSBASE, so don't try to restore it. 103 */ 104 #define RESTORE_GSBASE(base) \ 105 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \ 106 movq %rdx, %rax; \ 107 shrq $32, %rdx; \ 108 movl $MSR_AMD_GSBASE, %ecx; \ 109 wrmsr 110 111 #endif /* __xpv */ 112 113 /* 114 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. 115 */ 116 #define KDI_SAVE_REGS(base) \ 117 movq %rdi, REG_OFF(KDIREG_RDI)(base); \ 118 movq %rsi, REG_OFF(KDIREG_RSI)(base); \ 119 movq %rdx, REG_OFF(KDIREG_RDX)(base); \ 120 movq %rcx, REG_OFF(KDIREG_RCX)(base); \ 121 movq %r8, REG_OFF(KDIREG_R8)(base); \ 122 movq %r9, REG_OFF(KDIREG_R9)(base); \ 123 movq %rax, REG_OFF(KDIREG_RAX)(base); \ 124 movq %rbx, REG_OFF(KDIREG_RBX)(base); \ 125 movq %rbp, REG_OFF(KDIREG_RBP)(base); \ 126 movq %r10, REG_OFF(KDIREG_R10)(base); \ 127 movq %r11, REG_OFF(KDIREG_R11)(base); \ 128 movq %r12, REG_OFF(KDIREG_R12)(base); \ 129 movq %r13, REG_OFF(KDIREG_R13)(base); \ 130 movq %r14, REG_OFF(KDIREG_R14)(base); \ 131 movq %r15, REG_OFF(KDIREG_R15)(base); \ 132 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \ 133 movq REG_OFF(KDIREG_RIP)(base), %rax; \ 134 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \ 135 movq %cr2, %rax; \ 136 movq %rax, REG_OFF(KDIREG_CR2)(base); \ 137 clrq %rax; \ 138 movw %ds, %ax; \ 139 movq %rax, REG_OFF(KDIREG_DS)(base); \ 140 movw %es, %ax; \ 141 movq %rax, REG_OFF(KDIREG_ES)(base); \ 142 movw %fs, %ax; \ 143 movq %rax, REG_OFF(KDIREG_FS)(base); \ 144 movw %gs, %ax; \ 145 movq %rax, REG_OFF(KDIREG_GS)(base); \ 146 SAVE_GSBASE(base) 147 148 #define KDI_RESTORE_REGS(base) \ 149 movq base, %rdi; \ 150 RESTORE_GSBASE(%rdi); \ 151 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \ 152 movw %ax, %es; \ 153 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \ 154 movw %ax, %ds; \ 155 movq REG_OFF(KDIREG_CR2)(base), %rax; \ 156 movq %rax, %cr2; \ 157 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \ 158 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \ 159 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \ 160 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \ 161 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \ 162 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \ 163 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \ 164 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \ 165 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \ 166 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \ 167 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \ 168 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \ 169 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \ 170 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \ 171 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi 172 173 /* 174 * Given the address of the current CPU's cpusave area in %rax, the following 175 * macro restores the debugging state to said CPU. Restored state includes 176 * the debug registers from the global %dr variables. 177 * 178 * Takes the cpusave area in %rdi as a parameter. 179 */ 180 #define KDI_RESTORE_DEBUGGING_STATE \ 181 pushq %rdi; \ 182 leaq kdi_drreg(%rip), %r15; \ 183 movl $7, %edi; \ 184 movq DR_CTL(%r15), %rsi; \ 185 call kdi_dreg_set; \ 186 \ 187 movl $6, %edi; \ 188 movq $KDIREG_DRSTAT_RESERVED, %rsi; \ 189 call kdi_dreg_set; \ 190 \ 191 movl $0, %edi; \ 192 movq DRADDR_OFF(0)(%r15), %rsi; \ 193 call kdi_dreg_set; \ 194 movl $1, %edi; \ 195 movq DRADDR_OFF(1)(%r15), %rsi; \ 196 call kdi_dreg_set; \ 197 movl $2, %edi; \ 198 movq DRADDR_OFF(2)(%r15), %rsi; \ 199 call kdi_dreg_set; \ 200 movl $3, %edi; \ 201 movq DRADDR_OFF(3)(%r15), %rsi; \ 202 call kdi_dreg_set; \ 203 popq %rdi; 204 205 /* 206 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs. 207 * The following macros manage the buffer. 208 */ 209 210 /* Advance the ring buffer */ 211 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \ 212 movq KRS_CURCRUMBIDX(cpusave), tmp1; \ 213 cmpq $[KDI_NCRUMBS - 1], tmp1; \ 214 jge 1f; \ 215 /* Advance the pointer and index */ \ 216 addq $1, tmp1; \ 217 movq tmp1, KRS_CURCRUMBIDX(cpusave); \ 218 movq KRS_CURCRUMB(cpusave), tmp1; \ 219 addq $KRM_SIZE, tmp1; \ 220 jmp 2f; \ 221 1: /* Reset the pointer and index */ \ 222 movq $0, KRS_CURCRUMBIDX(cpusave); \ 223 leaq KRS_CRUMBS(cpusave), tmp1; \ 224 2: movq tmp1, KRS_CURCRUMB(cpusave); \ 225 /* Clear the new crumb */ \ 226 movq $KDI_NCRUMBS, tmp2; \ 227 3: movq $0, -4(tmp1, tmp2, 4); \ 228 decq tmp2; \ 229 jnz 3b 230 231 /* Set a value in the current breadcrumb buffer */ 232 #define ADD_CRUMB(cpusave, offset, value, tmp) \ 233 movq KRS_CURCRUMB(cpusave), tmp; \ 234 movq value, offset(tmp) 235 236 /* XXX implement me */ 237 ENTRY_NP(kdi_nmiint) 238 clrq %rcx 239 movq (%rcx), %rcx 240 SET_SIZE(kdi_nmiint) 241 242 /* 243 * The main entry point for master CPUs. It also serves as the trap 244 * handler for all traps and interrupts taken during single-step. 245 */ 246 ENTRY_NP(kdi_cmnint) 247 ALTENTRY(kdi_master_entry) 248 249 pushq %rax 250 CLI(%rax) 251 popq %rax 252 253 /* Save current register state */ 254 subq $REG_OFF(KDIREG_TRAPNO), %rsp 255 KDI_SAVE_REGS(%rsp) 256 257 #ifdef __xpv 258 /* 259 * Clear saved_upcall_mask in unused byte of cs slot on stack. 260 * It can only confuse things. 261 */ 262 movb $0, REG_OFF(KDIREG_CS)+4(%rsp) 263 #endif 264 265 #if !defined(__xpv) 266 /* 267 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named 268 * KGSBASE can be trusted, as the kernel may or may not have already 269 * done a swapgs. All is not lost, as the kernel can divine the correct 270 * value for us. Note that the previous GSBASE is saved in the 271 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being 272 * blown away. On the hypervisor, we don't need to do this, since it's 273 * ensured we're on our requested kernel GSBASE already. 274 */ 275 subq $10, %rsp 276 sgdt (%rsp) 277 movq 2(%rsp), %rdi /* gdt base now in %rdi */ 278 addq $10, %rsp 279 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */ 280 281 movq %rax, %rdx 282 shrq $32, %rdx 283 movl $MSR_AMD_GSBASE, %ecx 284 wrmsr 285 286 /* 287 * In the trampoline we stashed the incoming %cr3. Copy this into 288 * the kdiregs for restoration and later use. 289 */ 290 mov %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx 291 mov %rdx, REG_OFF(KDIREG_CR3)(%rsp) 292 /* 293 * Switch to the kernel's %cr3. From the early interrupt handler 294 * until now we've been running on the "paranoid" %cr3 (that of kas 295 * from early in boot). 296 * 297 * If we took the interrupt from somewhere already on the kas/paranoid 298 * %cr3 though, don't change it (this could happen if kcr3 is corrupt 299 * and we took a gptrap earlier from this very code). 300 */ 301 cmpq %rdx, kpti_safe_cr3 302 je .no_kcr3 303 mov %gs:CPU_KPTI_KCR3, %rdx 304 cmpq $0, %rdx 305 je .no_kcr3 306 mov %rdx, %cr3 307 .no_kcr3: 308 309 #endif /* __xpv */ 310 311 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */ 312 313 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx) 314 315 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx) 316 317 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx 318 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx) 319 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx) 320 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx 321 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx) 322 323 movq %rsp, %rbp 324 pushq %rax 325 326 /* 327 * Were we in the debugger when we took the trap (i.e. was %esp in one 328 * of the debugger's memory ranges)? 329 */ 330 leaq kdi_memranges, %rcx 331 movl kdi_nmemranges, %edx 332 1: 333 cmpq MR_BASE(%rcx), %rsp 334 jl 2f /* below this range -- try the next one */ 335 cmpq MR_LIM(%rcx), %rsp 336 jg 2f /* above this range -- try the next one */ 337 jmp 3f /* matched within this range */ 338 339 2: 340 decl %edx 341 jz kdi_save_common_state /* %rsp not within debugger memory */ 342 addq $MR_SIZE, %rcx 343 jmp 1b 344 345 3: /* 346 * The master is still set. That should only happen if we hit a trap 347 * while running in the debugger. Note that it may be an intentional 348 * fault. kmdb_dpi_handle_fault will sort it all out. 349 */ 350 351 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi 352 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi 353 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx 354 movq %rbx, %rcx /* cpuid */ 355 356 call kdi_dvec_handle_fault 357 358 /* 359 * If we're here, we ran into a debugger problem, and the user 360 * elected to solve it by having the debugger debug itself. The 361 * state we're about to save is that of the debugger when it took 362 * the fault. 363 */ 364 365 jmp kdi_save_common_state 366 367 SET_SIZE(kdi_master_entry) 368 SET_SIZE(kdi_cmnint) 369 370 /* 371 * The cross-call handler for slave CPUs. 372 * 373 * The debugger is single-threaded, so only one CPU, called the master, may be 374 * running it at any given time. The other CPUs, known as slaves, spin in a 375 * busy loop until there's something for them to do. This is the entry point 376 * for the slaves - they'll be sent here in response to a cross-call sent by the 377 * master. 378 */ 379 380 ENTRY_NP(kdi_slave_entry) 381 382 /* 383 * Cross calls are implemented as function calls, so our stack currently 384 * looks like one you'd get from a zero-argument function call. That 385 * is, there's the return %rip at %rsp, and that's about it. We need 386 * to make it look like an interrupt stack. When we first save, we'll 387 * reverse the saved %ss and %rip, which we'll fix back up when we've 388 * freed up some general-purpose registers. We'll also need to fix up 389 * the saved %rsp. 390 */ 391 392 pushq %rsp /* pushed value off by 8 */ 393 pushfq 394 CLI(%rax) 395 pushq $KCS_SEL 396 clrq %rax 397 movw %ss, %ax 398 pushq %rax /* rip should be here */ 399 pushq $-1 /* phony trap error code */ 400 pushq $-1 /* phony trap number */ 401 402 subq $REG_OFF(KDIREG_TRAPNO), %rsp 403 KDI_SAVE_REGS(%rsp) 404 405 movq %cr3, %rax 406 movq %rax, REG_OFF(KDIREG_CR3)(%rsp) 407 408 movq REG_OFF(KDIREG_SS)(%rsp), %rax 409 movq %rax, REG_OFF(KDIREG_SAVPC)(%rsp) 410 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax 411 movq %rax, REG_OFF(KDIREG_SS)(%rsp) 412 413 movq REG_OFF(KDIREG_RSP)(%rsp), %rax 414 addq $8, %rax 415 movq %rax, REG_OFF(KDIREG_RSP)(%rsp) 416 417 /* 418 * We've saved all of the general-purpose registers, and have a stack 419 * that is irettable (after we strip down to the error code) 420 */ 421 422 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */ 423 424 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx) 425 426 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx) 427 428 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx 429 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx) 430 movq REG_OFF(KDIREG_RSP)(%rsp), %rcx 431 ADD_CRUMB(%rax, KRM_SP, %rcx, %rdx) 432 ADD_CRUMB(%rax, KRM_TRAPNO, $-1, %rdx) 433 434 movq $KDI_CPU_STATE_SLAVE, KRS_CPU_STATE(%rax) 435 436 pushq %rax 437 jmp kdi_save_common_state 438 439 SET_SIZE(kdi_slave_entry) 440 441 /* 442 * The state of the world: 443 * 444 * The stack has a complete set of saved registers and segment 445 * selectors, arranged in the kdi_regs.h order. It also has a pointer 446 * to our cpusave area. 447 * 448 * We need to save, into the cpusave area, a pointer to these saved 449 * registers. First we check whether we should jump straight back to 450 * the kernel. If not, we save a few more registers, ready the 451 * machine for debugger entry, and enter the debugger. 452 */ 453 454 ENTRY_NP(kdi_save_common_state) 455 456 popq %rdi /* the cpusave area */ 457 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */ 458 459 pushq %rdi 460 call kdi_trap_pass 461 testq %rax, %rax 462 jnz kdi_pass_to_kernel 463 popq %rax /* cpusave in %rax */ 464 465 SAVE_IDTGDT 466 467 #if !defined(__xpv) 468 /* Save off %cr0, and clear write protect */ 469 movq %cr0, %rcx 470 movq %rcx, KRS_CR0(%rax) 471 andq $_BITNOT(CR0_WP), %rcx 472 movq %rcx, %cr0 473 #endif 474 475 /* Save the debug registers and disable any active watchpoints */ 476 477 movq %rax, %r15 /* save cpusave area ptr */ 478 movl $7, %edi 479 call kdi_dreg_get 480 movq %rax, KRS_DRCTL(%r15) 481 482 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax 483 movq %rax, %rsi 484 movl $7, %edi 485 call kdi_dreg_set 486 487 movl $6, %edi 488 call kdi_dreg_get 489 movq %rax, KRS_DRSTAT(%r15) 490 491 movl $0, %edi 492 call kdi_dreg_get 493 movq %rax, KRS_DROFF(0)(%r15) 494 495 movl $1, %edi 496 call kdi_dreg_get 497 movq %rax, KRS_DROFF(1)(%r15) 498 499 movl $2, %edi 500 call kdi_dreg_get 501 movq %rax, KRS_DROFF(2)(%r15) 502 503 movl $3, %edi 504 call kdi_dreg_get 505 movq %rax, KRS_DROFF(3)(%r15) 506 507 movq %r15, %rax /* restore cpu save area to rax */ 508 509 clrq %rbp /* stack traces should end here */ 510 511 pushq %rax 512 movq %rax, %rdi /* cpusave */ 513 514 call kdi_debugger_entry 515 516 /* Pass cpusave to kdi_resume */ 517 popq %rdi 518 519 jmp kdi_resume 520 521 SET_SIZE(kdi_save_common_state) 522 523 /* 524 * Resume the world. The code that calls kdi_resume has already 525 * decided whether or not to restore the IDT. 526 */ 527 /* cpusave in %rdi */ 528 ENTRY_NP(kdi_resume) 529 530 /* 531 * Send this CPU back into the world 532 */ 533 #if !defined(__xpv) 534 movq KRS_CR0(%rdi), %rdx 535 movq %rdx, %cr0 536 #endif 537 538 KDI_RESTORE_DEBUGGING_STATE 539 540 movq KRS_GREGS(%rdi), %rsp 541 542 #if !defined(__xpv) 543 /* 544 * If we're going back via tr_iret_kdi, then we want to copy the 545 * final %cr3 we're going to back into the kpti_dbg area now. 546 * 547 * Since the trampoline needs to find the kpti_dbg too, we enter it 548 * with %r13 set to point at that. The real %r13 (to restore before 549 * the iret) we stash in the kpti_dbg itself. 550 */ 551 movq %gs:CPU_SELF, %r13 /* can't leaq %gs:*, use self-ptr */ 552 addq $CPU_KPTI_DBG, %r13 553 554 movq REG_OFF(KDIREG_R13)(%rsp), %rdx 555 movq %rdx, KPTI_R13(%r13) 556 557 movq REG_OFF(KDIREG_CR3)(%rsp), %rdx 558 movq %rdx, KPTI_TR_CR3(%r13) 559 560 /* The trampoline will undo this later. */ 561 movq %r13, REG_OFF(KDIREG_R13)(%rsp) 562 #endif 563 564 KDI_RESTORE_REGS(%rsp) 565 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */ 566 /* 567 * The common trampoline code will restore %cr3 to the right value 568 * for either kernel or userland. 569 */ 570 #if !defined(__xpv) 571 jmp tr_iret_kdi 572 #else 573 IRET 574 #endif 575 /*NOTREACHED*/ 576 SET_SIZE(kdi_resume) 577 578 579 /* 580 * We took a trap that should be handled by the kernel, not KMDB. 581 * 582 * We're hard-coding the three cases where KMDB has installed permanent 583 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers 584 * to work with; we can't use a global since other CPUs can easily pass 585 * through here at the same time. 586 * 587 * Note that we handle T_DBGENTR since userspace might have tried it. 588 * 589 * The trap handler will expect the stack to be in trap order, with %rip 590 * being the last entry, so we'll need to restore all our regs. On 591 * i86xpv we'll need to compensate for XPV_TRAP_POP. 592 * 593 * %rax on entry is either 1 or 2, which is from kdi_trap_pass(). 594 * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably) 595 * switched us to the CPU's kf_kernel_cr3. But we're about to call, for 596 * example: 597 * 598 * dbgtrap->trap()->tr_iret_kernel 599 * 600 * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so 601 * we'll do so here if needed. 602 * 603 * This isn't just a matter of tidiness: for example, consider: 604 * 605 * hat_switch(oldhat=kas.a_hat, newhat=prochat) 606 * setcr3() 607 * reset_kpti() 608 * *brktrap* due to fbt on reset_kpti:entry 609 * 610 * Here, we have the new hat's %cr3, but we haven't yet updated 611 * kf_kernel_cr3 (so its currently kas's). So if we don't restore here, 612 * we'll stay on kas's cr3 value on returning from the trap: not good if 613 * we fault on a userspace address. 614 */ 615 ENTRY_NP(kdi_pass_to_kernel) 616 617 popq %rdi /* cpusave */ 618 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi) 619 movq KRS_GREGS(%rdi), %rsp 620 621 cmpq $2, %rax 622 jne no_restore_cr3 623 movq REG_OFF(KDIREG_CR3)(%rsp), %r11 624 movq %r11, %cr3 625 626 no_restore_cr3: 627 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi 628 629 cmpq $T_SGLSTP, %rdi 630 je kdi_pass_dbgtrap 631 cmpq $T_BPTFLT, %rdi 632 je kdi_pass_brktrap 633 cmpq $T_DBGENTR, %rdi 634 je kdi_pass_invaltrap 635 /* 636 * Hmm, unknown handler. Somebody forgot to update this when they 637 * added a new trap interposition... try to drop back into kmdb. 638 */ 639 int $T_DBGENTR 640 641 #define CALL_TRAP_HANDLER(name) \ 642 KDI_RESTORE_REGS(%rsp); \ 643 /* Discard state, trapno, err */ \ 644 addq $REG_OFF(KDIREG_RIP), %rsp; \ 645 XPV_TRAP_PUSH; \ 646 jmp %cs:name 647 648 kdi_pass_dbgtrap: 649 CALL_TRAP_HANDLER(dbgtrap) 650 /*NOTREACHED*/ 651 kdi_pass_brktrap: 652 CALL_TRAP_HANDLER(brktrap) 653 /*NOTREACHED*/ 654 kdi_pass_invaltrap: 655 CALL_TRAP_HANDLER(invaltrap) 656 /*NOTREACHED*/ 657 658 SET_SIZE(kdi_pass_to_kernel) 659 660 /* 661 * A minimal version of mdboot(), to be used by the master CPU only. 662 */ 663 ENTRY_NP(kdi_reboot) 664 665 movl $AD_BOOT, %edi 666 movl $A_SHUTDOWN, %esi 667 call *psm_shutdownf 668 #if defined(__xpv) 669 movl $SHUTDOWN_reboot, %edi 670 call HYPERVISOR_shutdown 671 #else 672 call reset 673 #endif 674 /*NOTREACHED*/ 675 676 SET_SIZE(kdi_reboot) 677 678 ENTRY_NP(kdi_cpu_debug_init) 679 pushq %rbp 680 movq %rsp, %rbp 681 682 pushq %rbx /* macro will clobber %rbx */ 683 KDI_RESTORE_DEBUGGING_STATE 684 popq %rbx 685 686 leave 687 ret 688 SET_SIZE(kdi_cpu_debug_init) 689 690 #define GETDREG(name, r) \ 691 ENTRY_NP(name); \ 692 movq r, %rax; \ 693 ret; \ 694 SET_SIZE(name) 695 696 #define SETDREG(name, r) \ 697 ENTRY_NP(name); \ 698 movq %rdi, r; \ 699 ret; \ 700 SET_SIZE(name) 701 702 GETDREG(kdi_getdr0, %dr0) 703 GETDREG(kdi_getdr1, %dr1) 704 GETDREG(kdi_getdr2, %dr2) 705 GETDREG(kdi_getdr3, %dr3) 706 GETDREG(kdi_getdr6, %dr6) 707 GETDREG(kdi_getdr7, %dr7) 708 709 SETDREG(kdi_setdr0, %dr0) 710 SETDREG(kdi_setdr1, %dr1) 711 SETDREG(kdi_setdr2, %dr2) 712 SETDREG(kdi_setdr3, %dr3) 713 SETDREG(kdi_setdr6, %dr6) 714 SETDREG(kdi_setdr7, %dr7) 715 716 #endif /* !__lint */