5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * Debugger entry for both master and slave CPUs
31 */
32
33 #if defined(__lint)
34 #include <sys/types.h>
35 #endif
36
37 #include <sys/segments.h>
38 #include <sys/asm_linkage.h>
39 #include <sys/controlregs.h>
40 #include <sys/x86_archext.h>
41 #include <sys/privregs.h>
42 #include <sys/machprivregs.h>
43 #include <sys/kdi_regs.h>
44 #include <sys/psw.h>
45 #include <sys/uadmin.h>
46 #ifdef __xpv
47 #include <sys/hypervisor.h>
48 #endif
49
50 #ifdef _ASM
51
52 #include <kdi_assym.h>
53 #include <assym.h>
54
55 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
56 #define GET_CPUSAVE_ADDR \
57 movzbq %gs:CPU_ID, %rbx; \
58 movq %rbx, %rax; \
59 movq $KRS_SIZE, %rcx; \
60 mulq %rcx; \
61 movq $kdi_cpusave, %rdx; \
62 /*CSTYLED*/ \
63 addq (%rdx), %rax
64
65 /*
66 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
67 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
68 * debugger through the trap handler. We don't want to clobber the saved IDT
69 * in the process, as we'd end up resuming the world on our IDT.
70 */
71 #define SAVE_IDTGDT \
72 movq %gs:CPU_IDT, %r11; \
73 leaq kdi_idt(%rip), %rsi; \
74 cmpq %rsi, %r11; \
75 je 1f; \
76 movq %r11, KRS_IDT(%rax); \
77 movq %gs:CPU_GDT, %r11; \
78 movq %r11, KRS_GDT(%rax); \
79 1:
80
81 #ifdef __xpv
82
83 #define SAVE_GSBASE(reg) /* nothing */
84 #define RESTORE_GSBASE(reg) /* nothing */
85
86 #else
87
88 #define SAVE_GSBASE(base) \
89 movl $MSR_AMD_GSBASE, %ecx; \
90 rdmsr; \
91 shlq $32, %rdx; \
92 orq %rax, %rdx; \
93 movq %rdx, REG_OFF(KDIREG_GSBASE)(base)
94
95 #define RESTORE_GSBASE(base) \
96 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
97 movq %rdx, %rax; \
98 shrq $32, %rdx; \
99 movl $MSR_AMD_GSBASE, %ecx; \
100 wrmsr
101
102 #endif /* __xpv */
103
104 /*
105 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
106 * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
107 * unnecessary.
108 */
109 #define KDI_SAVE_REGS(base) \
110 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
111 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
112 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
113 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
114 movq %r8, REG_OFF(KDIREG_R8)(base); \
115 movq %r9, REG_OFF(KDIREG_R9)(base); \
116 movq %rax, REG_OFF(KDIREG_RAX)(base); \
117 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
118 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
119 movq %r10, REG_OFF(KDIREG_R10)(base); \
120 movq %r11, REG_OFF(KDIREG_R11)(base); \
121 movq %r12, REG_OFF(KDIREG_R12)(base); \
122 movq %r13, REG_OFF(KDIREG_R13)(base); \
123 movq %r14, REG_OFF(KDIREG_R14)(base); \
124 movq %r15, REG_OFF(KDIREG_R15)(base); \
125 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
126 movq REG_OFF(KDIREG_RIP)(base), %rax; \
127 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
128 clrq %rax; \
129 movw %ds, %ax; \
130 movq %rax, REG_OFF(KDIREG_DS)(base); \
131 movw %es, %ax; \
132 movq %rax, REG_OFF(KDIREG_ES)(base); \
133 movw %fs, %ax; \
134 movq %rax, REG_OFF(KDIREG_FS)(base); \
135 movw %gs, %ax; \
136 movq %rax, REG_OFF(KDIREG_GS)(base); \
137 SAVE_GSBASE(base)
138
139 #define KDI_RESTORE_REGS(base) \
140 movq base, %rdi; \
141 RESTORE_GSBASE(%rdi); \
142 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
143 movw %ax, %es; \
144 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
145 movw %ax, %ds; \
146 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
147 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
148 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
149 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
150 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
151 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
152 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
153 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
154 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
155 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
156 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
157 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
158 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
159 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
160 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
161
162 /*
163 * Given the address of the current CPU's cpusave area in %rax, the following
164 * macro restores the debugging state to said CPU. Restored state includes
165 * the debug registers from the global %dr variables, and debugging MSRs from
166 * the CPU save area. This code would be in a separate routine, but for the
167 * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
168 * the number of jumps taken subsequent to the update of said MSRs. We can
169 * remove one jump (the ret) by using a macro instead of a function for the
170 * debugging state restoration code.
171 *
172 * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
173 */
174 #define KDI_RESTORE_DEBUGGING_STATE \
175 pushq %rdi; \
176 leaq kdi_drreg(%rip), %r15; \
177 movl $7, %edi; \
178 movq DR_CTL(%r15), %rsi; \
179 call kdi_dreg_set; \
180 \
181 movl $6, %edi; \
182 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
183 call kdi_dreg_set; \
184 \
185 movl $0, %edi; \
186 movq DRADDR_OFF(0)(%r15), %rsi; \
187 call kdi_dreg_set; \
188 movl $1, %edi; \
189 movq DRADDR_OFF(1)(%r15), %rsi; \
190 call kdi_dreg_set; \
191 movl $2, %edi; \
192 movq DRADDR_OFF(2)(%r15), %rsi; \
193 call kdi_dreg_set; \
194 movl $3, %edi; \
195 movq DRADDR_OFF(3)(%r15), %rsi; \
196 call kdi_dreg_set; \
197 popq %rdi; \
198 \
199 /* \
200 * Write any requested MSRs. \
201 */ \
202 movq KRS_MSR(%rdi), %rbx; \
203 cmpq $0, %rbx; \
204 je 3f; \
205 1: \
206 movl MSR_NUM(%rbx), %ecx; \
207 cmpl $0, %ecx; \
208 je 3f; \
209 \
210 movl MSR_TYPE(%rbx), %edx; \
211 cmpl $KDI_MSR_WRITE, %edx; \
212 jne 2f; \
213 \
214 movq MSR_VALP(%rbx), %rdx; \
215 movl 0(%rdx), %eax; \
216 movl 4(%rdx), %edx; \
217 wrmsr; \
218 2: \
219 addq $MSR_SIZE, %rbx; \
220 jmp 1b; \
221 3: \
222 /* \
223 * We must not branch after re-enabling LBR. If \
224 * kdi_wsr_wrexit_msr is set, it contains the number \
225 * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
226 * contains the value that is to be written to enable \
227 * LBR. \
228 */ \
229 leaq kdi_msr_wrexit_msr(%rip), %rcx; \
230 movl (%rcx), %ecx; \
231 cmpl $0, %ecx; \
232 je 1f; \
233 \
234 leaq kdi_msr_wrexit_valp(%rip), %rdx; \
235 movq (%rdx), %rdx; \
236 movl 0(%rdx), %eax; \
237 movl 4(%rdx), %edx; \
238 \
239 wrmsr; \
240 1:
241
242 /*
243 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
244 * The following macros manage the buffer.
245 */
246
247 /* Advance the ring buffer */
248 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
249 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
250 cmpq $[KDI_NCRUMBS - 1], tmp1; \
251 jge 1f; \
252 /* Advance the pointer and index */ \
253 addq $1, tmp1; \
254 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
255 movq KRS_CURCRUMB(cpusave), tmp1; \
256 addq $KRM_SIZE, tmp1; \
257 jmp 2f; \
258 1: /* Reset the pointer and index */ \
259 movq $0, KRS_CURCRUMBIDX(cpusave); \
260 leaq KRS_CRUMBS(cpusave), tmp1; \
261 2: movq tmp1, KRS_CURCRUMB(cpusave); \
262 /* Clear the new crumb */ \
263 movq $KDI_NCRUMBS, tmp2; \
264 3: movq $0, -4(tmp1, tmp2, 4); \
265 decq tmp2; \
266 jnz 3b
267
268 /* Set a value in the current breadcrumb buffer */
269 #define ADD_CRUMB(cpusave, offset, value, tmp) \
270 movq KRS_CURCRUMB(cpusave), tmp; \
271 movq value, offset(tmp)
272
273 #endif /* _ASM */
274
275 #if defined(__lint)
276 void
277 kdi_cmnint(void)
278 {
279 }
280 #else /* __lint */
281
282 /* XXX implement me */
283 ENTRY_NP(kdi_nmiint)
284 clrq %rcx
285 movq (%rcx), %rcx
286 SET_SIZE(kdi_nmiint)
287
288 /*
289 * The main entry point for master CPUs. It also serves as the trap
290 * handler for all traps and interrupts taken during single-step.
291 */
292 ENTRY_NP(kdi_cmnint)
293 ALTENTRY(kdi_master_entry)
294
295 pushq %rax
296 CLI(%rax)
297 popq %rax
298
299 /* Save current register state */
300 subq $REG_OFF(KDIREG_TRAPNO), %rsp
301 KDI_SAVE_REGS(%rsp)
311 #if !defined(__xpv)
312 /*
313 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
314 * KGSBASE can be trusted, as the kernel may or may not have already
315 * done a swapgs. All is not lost, as the kernel can divine the correct
316 * value for us. Note that the previous GSBASE is saved in the
317 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
318 * blown away. On the hypervisor, we don't need to do this, since it's
319 * ensured we're on our requested kernel GSBASE already.
320 */
321 subq $10, %rsp
322 sgdt (%rsp)
323 movq 2(%rsp), %rdi /* gdt base now in %rdi */
324 addq $10, %rsp
325 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
326
327 movq %rax, %rdx
328 shrq $32, %rdx
329 movl $MSR_AMD_GSBASE, %ecx
330 wrmsr
331 #endif /* __xpv */
332
333 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
334
335 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
336
337 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
338
339 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
340 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
341 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
342 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
343 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
344
345 movq %rsp, %rbp
346 pushq %rax
347
348 /*
349 * Were we in the debugger when we took the trap (i.e. was %esp in one
350 * of the debugger's memory ranges)?
351 */
352 leaq kdi_memranges, %rcx
353 movl kdi_nmemranges, %edx
354 1: cmpq MR_BASE(%rcx), %rsp
355 jl 2f /* below this range -- try the next one */
356 cmpq MR_LIM(%rcx), %rsp
357 jg 2f /* above this range -- try the next one */
358 jmp 3f /* matched within this range */
359
360 2: decl %edx
361 jz kdi_save_common_state /* %rsp not within debugger memory */
362 addq $MR_SIZE, %rcx
363 jmp 1b
364
365 3: /*
366 * The master is still set. That should only happen if we hit a trap
367 * while running in the debugger. Note that it may be an intentional
368 * fault. kmdb_dpi_handle_fault will sort it all out.
369 */
370
371 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
372 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
373 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
374 movq %rbx, %rcx /* cpuid */
375
376 call kdi_dvec_handle_fault
377
378 /*
379 * If we're here, we ran into a debugger problem, and the user
380 * elected to solve it by having the debugger debug itself. The
381 * state we're about to save is that of the debugger when it took
382 * the fault.
383 */
384
385 jmp kdi_save_common_state
386
387 SET_SIZE(kdi_master_entry)
388 SET_SIZE(kdi_cmnint)
389
390 #endif /* __lint */
391
392 /*
393 * The cross-call handler for slave CPUs.
394 *
395 * The debugger is single-threaded, so only one CPU, called the master, may be
396 * running it at any given time. The other CPUs, known as slaves, spin in a
397 * busy loop until there's something for them to do. This is the entry point
398 * for the slaves - they'll be sent here in response to a cross-call sent by the
399 * master.
400 */
401
402 #if defined(__lint)
403 char kdi_slave_entry_patch;
404
405 void
406 kdi_slave_entry(void)
407 {
408 }
409 #else /* __lint */
410 .globl kdi_slave_entry_patch;
411
412 ENTRY_NP(kdi_slave_entry)
413
414 /* kdi_msr_add_clrentry knows where this is */
415 kdi_slave_entry_patch:
416 KDI_MSR_PATCH;
417
418 /*
419 * Cross calls are implemented as function calls, so our stack currently
420 * looks like one you'd get from a zero-argument function call. That
421 * is, there's the return %rip at %rsp, and that's about it. We need
422 * to make it look like an interrupt stack. When we first save, we'll
423 * reverse the saved %ss and %rip, which we'll fix back up when we've
424 * freed up some general-purpose registers. We'll also need to fix up
425 * the saved %rsp.
426 */
427
428 pushq %rsp /* pushed value off by 8 */
429 pushfq
430 CLI(%rax)
431 pushq $KCS_SEL
432 clrq %rax
433 movw %ss, %ax
434 pushq %rax /* rip should be here */
435 pushq $-1 /* phony trap error code */
436 pushq $-1 /* phony trap number */
437
438 subq $REG_OFF(KDIREG_TRAPNO), %rsp
439 KDI_SAVE_REGS(%rsp)
440
441 movq REG_OFF(KDIREG_SS)(%rsp), %rax
442 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
443 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
444
445 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
446 addq $8, %rax
447 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
448
449 /*
450 * We've saved all of the general-purpose registers, and have a stack
451 * that is irettable (after we strip down to the error code)
452 */
453
454 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
455
456 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
457
458 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
459
460 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
461 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
462
463 pushq %rax
464 jmp kdi_save_common_state
465
466 SET_SIZE(kdi_slave_entry)
467
468 #endif /* __lint */
469
470 /*
471 * The state of the world:
472 *
473 * The stack has a complete set of saved registers and segment
474 * selectors, arranged in the kdi_regs.h order. It also has a pointer
475 * to our cpusave area.
476 *
477 * We need to save, into the cpusave area, a pointer to these saved
478 * registers. First we check whether we should jump straight back to
479 * the kernel. If not, we save a few more registers, ready the
480 * machine for debugger entry, and enter the debugger.
481 */
482
483 #if !defined(__lint)
484
485 ENTRY_NP(kdi_save_common_state)
486
487 popq %rdi /* the cpusave area */
488 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
489
490 pushq %rdi
491 call kdi_trap_pass
492 cmpq $1, %rax
493 je kdi_pass_to_kernel
494 popq %rax /* cpusave in %rax */
495
496 SAVE_IDTGDT
497
498 #if !defined(__xpv)
499 /* Save off %cr0, and clear write protect */
500 movq %cr0, %rcx
501 movq %rcx, KRS_CR0(%rax)
502 andq $_BITNOT(CR0_WP), %rcx
503 movq %rcx, %cr0
504 #endif
520 movq %rax, KRS_DRSTAT(%r15)
521
522 movl $0, %edi
523 call kdi_dreg_get
524 movq %rax, KRS_DROFF(0)(%r15)
525
526 movl $1, %edi
527 call kdi_dreg_get
528 movq %rax, KRS_DROFF(1)(%r15)
529
530 movl $2, %edi
531 call kdi_dreg_get
532 movq %rax, KRS_DROFF(2)(%r15)
533
534 movl $3, %edi
535 call kdi_dreg_get
536 movq %rax, KRS_DROFF(3)(%r15)
537
538 movq %r15, %rax /* restore cpu save area to rax */
539
540 /*
541 * Save any requested MSRs.
542 */
543 movq KRS_MSR(%rax), %rcx
544 cmpq $0, %rcx
545 je no_msr
546
547 pushq %rax /* rdmsr clobbers %eax */
548 movq %rcx, %rbx
549
550 1:
551 movl MSR_NUM(%rbx), %ecx
552 cmpl $0, %ecx
553 je msr_done
554
555 movl MSR_TYPE(%rbx), %edx
556 cmpl $KDI_MSR_READ, %edx
557 jne msr_next
558
559 rdmsr /* addr in %ecx, value into %edx:%eax */
560 movl %eax, MSR_VAL(%rbx)
561 movl %edx, _CONST(MSR_VAL + 4)(%rbx)
562
563 msr_next:
564 addq $MSR_SIZE, %rbx
565 jmp 1b
566
567 msr_done:
568 popq %rax
569
570 no_msr:
571 clrq %rbp /* stack traces should end here */
572
573 pushq %rax
574 movq %rax, %rdi /* cpusave */
575
576 call kdi_debugger_entry
577
578 /* Pass cpusave to kdi_resume */
579 popq %rdi
580
581 jmp kdi_resume
582
583 SET_SIZE(kdi_save_common_state)
584
585 #endif /* !__lint */
586
587 /*
588 * Resume the world. The code that calls kdi_resume has already
589 * decided whether or not to restore the IDT.
590 */
591 #if defined(__lint)
592 void
593 kdi_resume(void)
594 {
595 }
596 #else /* __lint */
597
598 /* cpusave in %rdi */
599 ENTRY_NP(kdi_resume)
600
601 /*
602 * Send this CPU back into the world
603 */
604 #if !defined(__xpv)
605 movq KRS_CR0(%rdi), %rdx
606 movq %rdx, %cr0
607 #endif
608
609 KDI_RESTORE_DEBUGGING_STATE
610
611 movq KRS_GREGS(%rdi), %rsp
612 KDI_RESTORE_REGS(%rsp)
613 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
614 IRET
615 /*NOTREACHED*/
616 SET_SIZE(kdi_resume)
617
618 #endif /* __lint */
619
620 #if !defined(__lint)
621
622 ENTRY_NP(kdi_pass_to_kernel)
623
624 popq %rdi /* cpusave */
625
626 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
627
628 /*
629 * Find the trap and vector off the right kernel handler. The trap
630 * handler will expect the stack to be in trap order, with %rip being
631 * the last entry, so we'll need to restore all our regs. On i86xpv
632 * we'll need to compensate for XPV_TRAP_POP.
633 *
634 * We're hard-coding the three cases where KMDB has installed permanent
635 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
636 * to work with; we can't use a global since other CPUs can easily pass
637 * through here at the same time.
638 *
639 * Note that we handle T_DBGENTR since userspace might have tried it.
640 */
641 movq KRS_GREGS(%rdi), %rsp
672 SET_SIZE(kdi_pass_to_kernel)
673
674 /*
675 * A minimal version of mdboot(), to be used by the master CPU only.
676 */
677 ENTRY_NP(kdi_reboot)
678
679 movl $AD_BOOT, %edi
680 movl $A_SHUTDOWN, %esi
681 call *psm_shutdownf
682 #if defined(__xpv)
683 movl $SHUTDOWN_reboot, %edi
684 call HYPERVISOR_shutdown
685 #else
686 call reset
687 #endif
688 /*NOTREACHED*/
689
690 SET_SIZE(kdi_reboot)
691
692 #endif /* !__lint */
693
694 #if defined(__lint)
695 /*ARGSUSED*/
696 void
697 kdi_cpu_debug_init(kdi_cpusave_t *save)
698 {
699 }
700 #else /* __lint */
701
702 ENTRY_NP(kdi_cpu_debug_init)
703 pushq %rbp
704 movq %rsp, %rbp
705
706 pushq %rbx /* macro will clobber %rbx */
707 KDI_RESTORE_DEBUGGING_STATE
708 popq %rbx
709
710 leave
711 ret
712
713 SET_SIZE(kdi_cpu_debug_init)
714 #endif /* !__lint */
715
|
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright 2018 Joyent, Inc.
27 */
28
29 /*
30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
31 * the IDT stubs that drop into here (mainly via kdi_cmnint).
32 */
33
34 #if defined(__lint)
35 #include <sys/types.h>
36 #else
37
38 #include <sys/segments.h>
39 #include <sys/asm_linkage.h>
40 #include <sys/controlregs.h>
41 #include <sys/x86_archext.h>
42 #include <sys/privregs.h>
43 #include <sys/machprivregs.h>
44 #include <sys/kdi_regs.h>
45 #include <sys/psw.h>
46 #include <sys/uadmin.h>
47 #ifdef __xpv
48 #include <sys/hypervisor.h>
49 #endif
50 #include <kdi_assym.h>
51 #include <assym.h>
52
53 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
54 #define GET_CPUSAVE_ADDR \
55 movzbq %gs:CPU_ID, %rbx; \
56 movq %rbx, %rax; \
57 movq $KRS_SIZE, %rcx; \
58 mulq %rcx; \
59 movq $kdi_cpusave, %rdx; \
60 /*CSTYLED*/ \
61 addq (%rdx), %rax
62
63 /*
64 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
65 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
66 * debugger through the trap handler. We don't want to clobber the saved IDT
67 * in the process, as we'd end up resuming the world on our IDT.
68 */
69 #define SAVE_IDTGDT \
70 movq %gs:CPU_IDT, %r11; \
71 leaq kdi_idt(%rip), %rsi; \
72 cmpq %rsi, %r11; \
73 je 1f; \
74 movq %r11, KRS_IDT(%rax); \
75 movq %gs:CPU_GDT, %r11; \
76 movq %r11, KRS_GDT(%rax); \
77 1:
78
79 #ifdef __xpv
80
81 /*
82 * Already on kernel gsbase via the hypervisor.
83 */
84 #define SAVE_GSBASE(reg) /* nothing */
85 #define RESTORE_GSBASE(reg) /* nothing */
86
87 #else
88
89 #define SAVE_GSBASE(base) \
90 movl $MSR_AMD_GSBASE, %ecx; \
91 rdmsr; \
92 shlq $32, %rdx; \
93 orq %rax, %rdx; \
94 movq %rdx, REG_OFF(KDIREG_GSBASE)(base); \
95 movl $MSR_AMD_KGSBASE, %ecx; \
96 rdmsr; \
97 shlq $32, %rdx; \
98 orq %rax, %rdx; \
99 movq %rdx, REG_OFF(KDIREG_KGSBASE)(base)
100
101 /*
102 * We shouldn't have stomped on KGSBASE, so don't try to restore it.
103 */
104 #define RESTORE_GSBASE(base) \
105 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
106 movq %rdx, %rax; \
107 shrq $32, %rdx; \
108 movl $MSR_AMD_GSBASE, %ecx; \
109 wrmsr
110
111 #endif /* __xpv */
112
113 /*
114 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
115 */
116 #define KDI_SAVE_REGS(base) \
117 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
118 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
119 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
120 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
121 movq %r8, REG_OFF(KDIREG_R8)(base); \
122 movq %r9, REG_OFF(KDIREG_R9)(base); \
123 movq %rax, REG_OFF(KDIREG_RAX)(base); \
124 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
125 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
126 movq %r10, REG_OFF(KDIREG_R10)(base); \
127 movq %r11, REG_OFF(KDIREG_R11)(base); \
128 movq %r12, REG_OFF(KDIREG_R12)(base); \
129 movq %r13, REG_OFF(KDIREG_R13)(base); \
130 movq %r14, REG_OFF(KDIREG_R14)(base); \
131 movq %r15, REG_OFF(KDIREG_R15)(base); \
132 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
133 movq REG_OFF(KDIREG_RIP)(base), %rax; \
134 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
135 movq %cr2, %rax; \
136 movq %rax, REG_OFF(KDIREG_CR2)(base); \
137 clrq %rax; \
138 movw %ds, %ax; \
139 movq %rax, REG_OFF(KDIREG_DS)(base); \
140 movw %es, %ax; \
141 movq %rax, REG_OFF(KDIREG_ES)(base); \
142 movw %fs, %ax; \
143 movq %rax, REG_OFF(KDIREG_FS)(base); \
144 movw %gs, %ax; \
145 movq %rax, REG_OFF(KDIREG_GS)(base); \
146 SAVE_GSBASE(base)
147
148 #define KDI_RESTORE_REGS(base) \
149 movq base, %rdi; \
150 RESTORE_GSBASE(%rdi); \
151 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
152 movw %ax, %es; \
153 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
154 movw %ax, %ds; \
155 movq REG_OFF(KDIREG_CR2)(base), %rax; \
156 movq %rax, %cr2; \
157 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
158 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
159 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
160 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
161 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
162 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
163 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
164 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
165 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
166 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
167 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
168 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
169 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
170 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
171 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
172
173 /*
174 * Given the address of the current CPU's cpusave area in %rax, the following
175 * macro restores the debugging state to said CPU. Restored state includes
176 * the debug registers from the global %dr variables.
177 *
178 * Takes the cpusave area in %rdi as a parameter.
179 */
180 #define KDI_RESTORE_DEBUGGING_STATE \
181 pushq %rdi; \
182 leaq kdi_drreg(%rip), %r15; \
183 movl $7, %edi; \
184 movq DR_CTL(%r15), %rsi; \
185 call kdi_dreg_set; \
186 \
187 movl $6, %edi; \
188 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
189 call kdi_dreg_set; \
190 \
191 movl $0, %edi; \
192 movq DRADDR_OFF(0)(%r15), %rsi; \
193 call kdi_dreg_set; \
194 movl $1, %edi; \
195 movq DRADDR_OFF(1)(%r15), %rsi; \
196 call kdi_dreg_set; \
197 movl $2, %edi; \
198 movq DRADDR_OFF(2)(%r15), %rsi; \
199 call kdi_dreg_set; \
200 movl $3, %edi; \
201 movq DRADDR_OFF(3)(%r15), %rsi; \
202 call kdi_dreg_set; \
203 popq %rdi;
204
205 /*
206 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
207 * The following macros manage the buffer.
208 */
209
210 /* Advance the ring buffer */
211 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
212 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
213 cmpq $[KDI_NCRUMBS - 1], tmp1; \
214 jge 1f; \
215 /* Advance the pointer and index */ \
216 addq $1, tmp1; \
217 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
218 movq KRS_CURCRUMB(cpusave), tmp1; \
219 addq $KRM_SIZE, tmp1; \
220 jmp 2f; \
221 1: /* Reset the pointer and index */ \
222 movq $0, KRS_CURCRUMBIDX(cpusave); \
223 leaq KRS_CRUMBS(cpusave), tmp1; \
224 2: movq tmp1, KRS_CURCRUMB(cpusave); \
225 /* Clear the new crumb */ \
226 movq $KDI_NCRUMBS, tmp2; \
227 3: movq $0, -4(tmp1, tmp2, 4); \
228 decq tmp2; \
229 jnz 3b
230
231 /* Set a value in the current breadcrumb buffer */
232 #define ADD_CRUMB(cpusave, offset, value, tmp) \
233 movq KRS_CURCRUMB(cpusave), tmp; \
234 movq value, offset(tmp)
235
236 /* XXX implement me */
237 ENTRY_NP(kdi_nmiint)
238 clrq %rcx
239 movq (%rcx), %rcx
240 SET_SIZE(kdi_nmiint)
241
242 /*
243 * The main entry point for master CPUs. It also serves as the trap
244 * handler for all traps and interrupts taken during single-step.
245 */
246 ENTRY_NP(kdi_cmnint)
247 ALTENTRY(kdi_master_entry)
248
249 pushq %rax
250 CLI(%rax)
251 popq %rax
252
253 /* Save current register state */
254 subq $REG_OFF(KDIREG_TRAPNO), %rsp
255 KDI_SAVE_REGS(%rsp)
265 #if !defined(__xpv)
266 /*
267 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
268 * KGSBASE can be trusted, as the kernel may or may not have already
269 * done a swapgs. All is not lost, as the kernel can divine the correct
270 * value for us. Note that the previous GSBASE is saved in the
271 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
272 * blown away. On the hypervisor, we don't need to do this, since it's
273 * ensured we're on our requested kernel GSBASE already.
274 */
275 subq $10, %rsp
276 sgdt (%rsp)
277 movq 2(%rsp), %rdi /* gdt base now in %rdi */
278 addq $10, %rsp
279 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
280
281 movq %rax, %rdx
282 shrq $32, %rdx
283 movl $MSR_AMD_GSBASE, %ecx
284 wrmsr
285
286 /*
287 * In the trampoline we stashed the incoming %cr3. Copy this into
288 * the kdiregs for restoration and later use.
289 */
290 mov %gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
291 mov %rdx, REG_OFF(KDIREG_CR3)(%rsp)
292 /*
293 * Switch to the kernel's %cr3. From the early interrupt handler
294 * until now we've been running on the "paranoid" %cr3 (that of kas
295 * from early in boot).
296 *
297 * If we took the interrupt from somewhere already on the kas/paranoid
298 * %cr3 though, don't change it (this could happen if kcr3 is corrupt
299 * and we took a gptrap earlier from this very code).
300 */
301 cmpq %rdx, kpti_safe_cr3
302 je .no_kcr3
303 mov %gs:CPU_KPTI_KCR3, %rdx
304 cmpq $0, %rdx
305 je .no_kcr3
306 mov %rdx, %cr3
307 .no_kcr3:
308
309 #endif /* __xpv */
310
311 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
312
313 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
314
315 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
316
317 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
318 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
319 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
320 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
321 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
322
323 movq %rsp, %rbp
324 pushq %rax
325
326 /*
327 * Were we in the debugger when we took the trap (i.e. was %esp in one
328 * of the debugger's memory ranges)?
329 */
330 leaq kdi_memranges, %rcx
331 movl kdi_nmemranges, %edx
332 1:
333 cmpq MR_BASE(%rcx), %rsp
334 jl 2f /* below this range -- try the next one */
335 cmpq MR_LIM(%rcx), %rsp
336 jg 2f /* above this range -- try the next one */
337 jmp 3f /* matched within this range */
338
339 2:
340 decl %edx
341 jz kdi_save_common_state /* %rsp not within debugger memory */
342 addq $MR_SIZE, %rcx
343 jmp 1b
344
345 3: /*
346 * The master is still set. That should only happen if we hit a trap
347 * while running in the debugger. Note that it may be an intentional
348 * fault. kmdb_dpi_handle_fault will sort it all out.
349 */
350
351 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
352 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
353 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
354 movq %rbx, %rcx /* cpuid */
355
356 call kdi_dvec_handle_fault
357
358 /*
359 * If we're here, we ran into a debugger problem, and the user
360 * elected to solve it by having the debugger debug itself. The
361 * state we're about to save is that of the debugger when it took
362 * the fault.
363 */
364
365 jmp kdi_save_common_state
366
367 SET_SIZE(kdi_master_entry)
368 SET_SIZE(kdi_cmnint)
369
370 /*
371 * The cross-call handler for slave CPUs.
372 *
373 * The debugger is single-threaded, so only one CPU, called the master, may be
374 * running it at any given time. The other CPUs, known as slaves, spin in a
375 * busy loop until there's something for them to do. This is the entry point
376 * for the slaves - they'll be sent here in response to a cross-call sent by the
377 * master.
378 */
379
380 ENTRY_NP(kdi_slave_entry)
381
382 /*
383 * Cross calls are implemented as function calls, so our stack currently
384 * looks like one you'd get from a zero-argument function call. That
385 * is, there's the return %rip at %rsp, and that's about it. We need
386 * to make it look like an interrupt stack. When we first save, we'll
387 * reverse the saved %ss and %rip, which we'll fix back up when we've
388 * freed up some general-purpose registers. We'll also need to fix up
389 * the saved %rsp.
390 */
391
392 pushq %rsp /* pushed value off by 8 */
393 pushfq
394 CLI(%rax)
395 pushq $KCS_SEL
396 clrq %rax
397 movw %ss, %ax
398 pushq %rax /* rip should be here */
399 pushq $-1 /* phony trap error code */
400 pushq $-1 /* phony trap number */
401
402 subq $REG_OFF(KDIREG_TRAPNO), %rsp
403 KDI_SAVE_REGS(%rsp)
404
405 movq %cr3, %rax
406 movq %rax, REG_OFF(KDIREG_CR3)(%rsp)
407
408 movq REG_OFF(KDIREG_SS)(%rsp), %rax
409 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
410 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
411
412 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
413 addq $8, %rax
414 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
415
416 /*
417 * We've saved all of the general-purpose registers, and have a stack
418 * that is irettable (after we strip down to the error code)
419 */
420
421 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
422
423 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
424
425 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
426
427 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
428 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
429
430 pushq %rax
431 jmp kdi_save_common_state
432
433 SET_SIZE(kdi_slave_entry)
434
435 /*
436 * The state of the world:
437 *
438 * The stack has a complete set of saved registers and segment
439 * selectors, arranged in the kdi_regs.h order. It also has a pointer
440 * to our cpusave area.
441 *
442 * We need to save, into the cpusave area, a pointer to these saved
443 * registers. First we check whether we should jump straight back to
444 * the kernel. If not, we save a few more registers, ready the
445 * machine for debugger entry, and enter the debugger.
446 */
447
448 ENTRY_NP(kdi_save_common_state)
449
450 popq %rdi /* the cpusave area */
451 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
452
453 pushq %rdi
454 call kdi_trap_pass
455 cmpq $1, %rax
456 je kdi_pass_to_kernel
457 popq %rax /* cpusave in %rax */
458
459 SAVE_IDTGDT
460
461 #if !defined(__xpv)
462 /* Save off %cr0, and clear write protect */
463 movq %cr0, %rcx
464 movq %rcx, KRS_CR0(%rax)
465 andq $_BITNOT(CR0_WP), %rcx
466 movq %rcx, %cr0
467 #endif
483 movq %rax, KRS_DRSTAT(%r15)
484
485 movl $0, %edi
486 call kdi_dreg_get
487 movq %rax, KRS_DROFF(0)(%r15)
488
489 movl $1, %edi
490 call kdi_dreg_get
491 movq %rax, KRS_DROFF(1)(%r15)
492
493 movl $2, %edi
494 call kdi_dreg_get
495 movq %rax, KRS_DROFF(2)(%r15)
496
497 movl $3, %edi
498 call kdi_dreg_get
499 movq %rax, KRS_DROFF(3)(%r15)
500
501 movq %r15, %rax /* restore cpu save area to rax */
502
503 clrq %rbp /* stack traces should end here */
504
505 pushq %rax
506 movq %rax, %rdi /* cpusave */
507
508 call kdi_debugger_entry
509
510 /* Pass cpusave to kdi_resume */
511 popq %rdi
512
513 jmp kdi_resume
514
515 SET_SIZE(kdi_save_common_state)
516
517 /*
518 * Resume the world. The code that calls kdi_resume has already
519 * decided whether or not to restore the IDT.
520 */
521 /* cpusave in %rdi */
522 ENTRY_NP(kdi_resume)
523
524 /*
525 * Send this CPU back into the world
526 */
527 #if !defined(__xpv)
528 movq KRS_CR0(%rdi), %rdx
529 movq %rdx, %cr0
530 #endif
531
532 KDI_RESTORE_DEBUGGING_STATE
533
534 movq KRS_GREGS(%rdi), %rsp
535
536 #if !defined(__xpv)
537 /*
538 * If we're going back via tr_iret_kdi, then we want to copy the
539 * final %cr3 we're going to back into the kpti_dbg area now.
540 *
541 * Since the trampoline needs to find the kpti_dbg too, we enter it
542 * with %r13 set to point at that. The real %r13 (to restore before
543 * the iret) we stash in the kpti_dbg itself.
544 */
545 movq %gs:CPU_SELF, %r13 /* can't leaq %gs:*, use self-ptr */
546 addq $CPU_KPTI_DBG, %r13
547
548 movq REG_OFF(KDIREG_R13)(%rsp), %rdx
549 movq %rdx, KPTI_R13(%r13)
550
551 movq REG_OFF(KDIREG_CR3)(%rsp), %rdx
552 movq %rdx, KPTI_TR_CR3(%r13)
553
554 /* The trampoline will undo this later. */
555 movq %r13, REG_OFF(KDIREG_R13)(%rsp)
556 #endif
557
558 KDI_RESTORE_REGS(%rsp)
559 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
560 /*
561 * The common trampoline code will restore %cr3 to the right value
562 * for either kernel or userland.
563 */
564 #if !defined(__xpv)
565 jmp tr_iret_kdi
566 #else
567 IRET
568 #endif
569 /*NOTREACHED*/
570 SET_SIZE(kdi_resume)
571
572 ENTRY_NP(kdi_pass_to_kernel)
573
574 popq %rdi /* cpusave */
575
576 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
577
578 /*
579 * Find the trap and vector off the right kernel handler. The trap
580 * handler will expect the stack to be in trap order, with %rip being
581 * the last entry, so we'll need to restore all our regs. On i86xpv
582 * we'll need to compensate for XPV_TRAP_POP.
583 *
584 * We're hard-coding the three cases where KMDB has installed permanent
585 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
586 * to work with; we can't use a global since other CPUs can easily pass
587 * through here at the same time.
588 *
589 * Note that we handle T_DBGENTR since userspace might have tried it.
590 */
591 movq KRS_GREGS(%rdi), %rsp
622 SET_SIZE(kdi_pass_to_kernel)
623
624 /*
625 * A minimal version of mdboot(), to be used by the master CPU only.
626 */
627 ENTRY_NP(kdi_reboot)
628
629 movl $AD_BOOT, %edi
630 movl $A_SHUTDOWN, %esi
631 call *psm_shutdownf
632 #if defined(__xpv)
633 movl $SHUTDOWN_reboot, %edi
634 call HYPERVISOR_shutdown
635 #else
636 call reset
637 #endif
638 /*NOTREACHED*/
639
640 SET_SIZE(kdi_reboot)
641
642 ENTRY_NP(kdi_cpu_debug_init)
643 pushq %rbp
644 movq %rsp, %rbp
645
646 pushq %rbx /* macro will clobber %rbx */
647 KDI_RESTORE_DEBUGGING_STATE
648 popq %rbx
649
650 leave
651 ret
652 SET_SIZE(kdi_cpu_debug_init)
653
654 #define GETDREG(name, r) \
655 ENTRY_NP(name); \
656 movq r, %rax; \
657 ret; \
658 SET_SIZE(name)
659
660 #define SETDREG(name, r) \
661 ENTRY_NP(name); \
662 movq %rdi, r; \
663 ret; \
664 SET_SIZE(name)
665
666 GETDREG(kdi_getdr0, %dr0)
667 GETDREG(kdi_getdr1, %dr1)
668 GETDREG(kdi_getdr2, %dr2)
669 GETDREG(kdi_getdr3, %dr3)
670 GETDREG(kdi_getdr6, %dr6)
671 GETDREG(kdi_getdr7, %dr7)
672
673 SETDREG(kdi_setdr0, %dr0)
674 SETDREG(kdi_setdr1, %dr1)
675 SETDREG(kdi_setdr2, %dr2)
676 SETDREG(kdi_setdr3, %dr3)
677 SETDREG(kdi_setdr6, %dr6)
678 SETDREG(kdi_setdr7, %dr7)
679
680 #endif /* !__lint */
|