1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * Debugger entry for both master and slave CPUs
31 */
32
33 #if defined(__lint)
34 #include <sys/types.h>
35 #endif
36
37 #include <sys/segments.h>
38 #include <sys/asm_linkage.h>
39 #include <sys/controlregs.h>
40 #include <sys/x86_archext.h>
41 #include <sys/privregs.h>
42 #include <sys/machprivregs.h>
43 #include <sys/kdi_regs.h>
44 #include <sys/psw.h>
45 #include <sys/uadmin.h>
46 #ifdef __xpv
47 #include <sys/hypervisor.h>
48 #endif
49
50 #ifdef _ASM
51
52 #include <kdi_assym.h>
53 #include <assym.h>
54
55 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
56 #define GET_CPUSAVE_ADDR \
57 movzbq %gs:CPU_ID, %rbx; \
58 movq %rbx, %rax; \
59 movq $KRS_SIZE, %rcx; \
60 mulq %rcx; \
61 movq $kdi_cpusave, %rdx; \
62 /*CSTYLED*/ \
63 addq (%rdx), %rax
64
65 /*
66 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
67 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
68 * debugger through the trap handler. We don't want to clobber the saved IDT
69 * in the process, as we'd end up resuming the world on our IDT.
70 */
71 #define SAVE_IDTGDT \
72 movq %gs:CPU_IDT, %r11; \
73 leaq kdi_idt(%rip), %rsi; \
74 cmpq %rsi, %r11; \
75 je 1f; \
76 movq %r11, KRS_IDT(%rax); \
77 movq %gs:CPU_GDT, %r11; \
78 movq %r11, KRS_GDT(%rax); \
79 1:
80
81 #ifdef __xpv
82
83 #define SAVE_GSBASE(reg) /* nothing */
84 #define RESTORE_GSBASE(reg) /* nothing */
85
86 #else
87
88 #define SAVE_GSBASE(base) \
89 movl $MSR_AMD_GSBASE, %ecx; \
90 rdmsr; \
91 shlq $32, %rdx; \
92 orq %rax, %rdx; \
93 movq %rdx, REG_OFF(KDIREG_GSBASE)(base)
94
95 #define RESTORE_GSBASE(base) \
96 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
97 movq %rdx, %rax; \
98 shrq $32, %rdx; \
99 movl $MSR_AMD_GSBASE, %ecx; \
100 wrmsr
101
102 #endif /* __xpv */
103
104 /*
105 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
106 * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
107 * unnecessary.
108 */
109 #define KDI_SAVE_REGS(base) \
110 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
111 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
112 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
113 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
114 movq %r8, REG_OFF(KDIREG_R8)(base); \
115 movq %r9, REG_OFF(KDIREG_R9)(base); \
116 movq %rax, REG_OFF(KDIREG_RAX)(base); \
117 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
118 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
119 movq %r10, REG_OFF(KDIREG_R10)(base); \
120 movq %r11, REG_OFF(KDIREG_R11)(base); \
121 movq %r12, REG_OFF(KDIREG_R12)(base); \
122 movq %r13, REG_OFF(KDIREG_R13)(base); \
123 movq %r14, REG_OFF(KDIREG_R14)(base); \
124 movq %r15, REG_OFF(KDIREG_R15)(base); \
125 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
126 movq REG_OFF(KDIREG_RIP)(base), %rax; \
127 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
128 clrq %rax; \
129 movw %ds, %ax; \
130 movq %rax, REG_OFF(KDIREG_DS)(base); \
131 movw %es, %ax; \
132 movq %rax, REG_OFF(KDIREG_ES)(base); \
133 movw %fs, %ax; \
134 movq %rax, REG_OFF(KDIREG_FS)(base); \
135 movw %gs, %ax; \
136 movq %rax, REG_OFF(KDIREG_GS)(base); \
137 SAVE_GSBASE(base)
138
139 #define KDI_RESTORE_REGS(base) \
140 movq base, %rdi; \
141 RESTORE_GSBASE(%rdi); \
142 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
143 movw %ax, %es; \
144 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
145 movw %ax, %ds; \
146 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
147 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
148 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
149 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
150 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
151 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
152 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
153 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
154 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
155 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
156 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
157 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
158 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
159 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
160 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
161
162 /*
163 * Given the address of the current CPU's cpusave area in %rax, the following
164 * macro restores the debugging state to said CPU. Restored state includes
165 * the debug registers from the global %dr variables, and debugging MSRs from
166 * the CPU save area. This code would be in a separate routine, but for the
167 * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
168 * the number of jumps taken subsequent to the update of said MSRs. We can
169 * remove one jump (the ret) by using a macro instead of a function for the
170 * debugging state restoration code.
171 *
172 * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
173 */
174 #define KDI_RESTORE_DEBUGGING_STATE \
175 pushq %rdi; \
176 leaq kdi_drreg(%rip), %r15; \
177 movl $7, %edi; \
178 movq DR_CTL(%r15), %rsi; \
179 call kdi_dreg_set; \
180 \
181 movl $6, %edi; \
182 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
183 call kdi_dreg_set; \
184 \
185 movl $0, %edi; \
186 movq DRADDR_OFF(0)(%r15), %rsi; \
187 call kdi_dreg_set; \
188 movl $1, %edi; \
189 movq DRADDR_OFF(1)(%r15), %rsi; \
190 call kdi_dreg_set; \
191 movl $2, %edi; \
192 movq DRADDR_OFF(2)(%r15), %rsi; \
193 call kdi_dreg_set; \
194 movl $3, %edi; \
195 movq DRADDR_OFF(3)(%r15), %rsi; \
196 call kdi_dreg_set; \
197 popq %rdi; \
198 \
199 /* \
200 * Write any requested MSRs. \
201 */ \
202 movq KRS_MSR(%rdi), %rbx; \
203 cmpq $0, %rbx; \
204 je 3f; \
205 1: \
206 movl MSR_NUM(%rbx), %ecx; \
207 cmpl $0, %ecx; \
208 je 3f; \
209 \
210 movl MSR_TYPE(%rbx), %edx; \
211 cmpl $KDI_MSR_WRITE, %edx; \
212 jne 2f; \
213 \
214 movq MSR_VALP(%rbx), %rdx; \
215 movl 0(%rdx), %eax; \
216 movl 4(%rdx), %edx; \
217 wrmsr; \
218 2: \
219 addq $MSR_SIZE, %rbx; \
220 jmp 1b; \
221 3: \
222 /* \
223 * We must not branch after re-enabling LBR. If \
224 * kdi_wsr_wrexit_msr is set, it contains the number \
225 * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
226 * contains the value that is to be written to enable \
227 * LBR. \
228 */ \
229 leaq kdi_msr_wrexit_msr(%rip), %rcx; \
230 movl (%rcx), %ecx; \
231 cmpl $0, %ecx; \
232 je 1f; \
233 \
234 leaq kdi_msr_wrexit_valp(%rip), %rdx; \
235 movq (%rdx), %rdx; \
236 movl 0(%rdx), %eax; \
237 movl 4(%rdx), %edx; \
238 \
239 wrmsr; \
240 1:
241
242 /*
243 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
244 * The following macros manage the buffer.
245 */
246
247 /* Advance the ring buffer */
248 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
249 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
250 cmpq $[KDI_NCRUMBS - 1], tmp1; \
251 jge 1f; \
252 /* Advance the pointer and index */ \
253 addq $1, tmp1; \
254 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
255 movq KRS_CURCRUMB(cpusave), tmp1; \
256 addq $KRM_SIZE, tmp1; \
257 jmp 2f; \
258 1: /* Reset the pointer and index */ \
259 movq $0, KRS_CURCRUMBIDX(cpusave); \
260 leaq KRS_CRUMBS(cpusave), tmp1; \
261 2: movq tmp1, KRS_CURCRUMB(cpusave); \
262 /* Clear the new crumb */ \
263 movq $KDI_NCRUMBS, tmp2; \
264 3: movq $0, -4(tmp1, tmp2, 4); \
265 decq tmp2; \
266 jnz 3b
267
268 /* Set a value in the current breadcrumb buffer */
269 #define ADD_CRUMB(cpusave, offset, value, tmp) \
270 movq KRS_CURCRUMB(cpusave), tmp; \
271 movq value, offset(tmp)
272
273 #endif /* _ASM */
274
275 #if defined(__lint)
276 void
277 kdi_cmnint(void)
278 {
279 }
280 #else /* __lint */
281
282 /* XXX implement me */
283 ENTRY_NP(kdi_nmiint)
284 clrq %rcx
285 movq (%rcx), %rcx
286 SET_SIZE(kdi_nmiint)
287
288 /*
289 * The main entry point for master CPUs. It also serves as the trap
290 * handler for all traps and interrupts taken during single-step.
291 */
292 ENTRY_NP(kdi_cmnint)
293 ALTENTRY(kdi_master_entry)
294
295 pushq %rax
296 CLI(%rax)
297 popq %rax
298
299 /* Save current register state */
300 subq $REG_OFF(KDIREG_TRAPNO), %rsp
301 KDI_SAVE_REGS(%rsp)
302
303 #ifdef __xpv
304 /*
305 * Clear saved_upcall_mask in unused byte of cs slot on stack.
306 * It can only confuse things.
307 */
308 movb $0, REG_OFF(KDIREG_CS)+4(%rsp)
309 #endif
310
311 #if !defined(__xpv)
312 /*
313 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
314 * KGSBASE can be trusted, as the kernel may or may not have already
315 * done a swapgs. All is not lost, as the kernel can divine the correct
316 * value for us. Note that the previous GSBASE is saved in the
317 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
318 * blown away. On the hypervisor, we don't need to do this, since it's
319 * ensured we're on our requested kernel GSBASE already.
320 */
321 subq $10, %rsp
322 sgdt (%rsp)
323 movq 2(%rsp), %rdi /* gdt base now in %rdi */
324 addq $10, %rsp
325 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
326
327 movq %rax, %rdx
328 shrq $32, %rdx
329 movl $MSR_AMD_GSBASE, %ecx
330 wrmsr
331 #endif /* __xpv */
332
333 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
334
335 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
336
337 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
338
339 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
340 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
341 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
342 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
343 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
344
345 movq %rsp, %rbp
346 pushq %rax
347
348 /*
349 * Were we in the debugger when we took the trap (i.e. was %esp in one
350 * of the debugger's memory ranges)?
351 */
352 leaq kdi_memranges, %rcx
353 movl kdi_nmemranges, %edx
354 1: cmpq MR_BASE(%rcx), %rsp
355 jl 2f /* below this range -- try the next one */
356 cmpq MR_LIM(%rcx), %rsp
357 jg 2f /* above this range -- try the next one */
358 jmp 3f /* matched within this range */
359
360 2: decl %edx
361 jz kdi_save_common_state /* %rsp not within debugger memory */
362 addq $MR_SIZE, %rcx
363 jmp 1b
364
365 3: /*
366 * The master is still set. That should only happen if we hit a trap
367 * while running in the debugger. Note that it may be an intentional
368 * fault. kmdb_dpi_handle_fault will sort it all out.
369 */
370
371 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
372 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
373 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
374 movq %rbx, %rcx /* cpuid */
375
376 call kdi_dvec_handle_fault
377
378 /*
379 * If we're here, we ran into a debugger problem, and the user
380 * elected to solve it by having the debugger debug itself. The
381 * state we're about to save is that of the debugger when it took
382 * the fault.
383 */
384
385 jmp kdi_save_common_state
386
387 SET_SIZE(kdi_master_entry)
388 SET_SIZE(kdi_cmnint)
389
390 #endif /* __lint */
391
392 /*
393 * The cross-call handler for slave CPUs.
394 *
395 * The debugger is single-threaded, so only one CPU, called the master, may be
396 * running it at any given time. The other CPUs, known as slaves, spin in a
397 * busy loop until there's something for them to do. This is the entry point
398 * for the slaves - they'll be sent here in response to a cross-call sent by the
399 * master.
400 */
401
402 #if defined(__lint)
403 char kdi_slave_entry_patch;
404
405 void
406 kdi_slave_entry(void)
407 {
408 }
409 #else /* __lint */
410 .globl kdi_slave_entry_patch;
411
412 ENTRY_NP(kdi_slave_entry)
413
414 /* kdi_msr_add_clrentry knows where this is */
415 kdi_slave_entry_patch:
416 KDI_MSR_PATCH;
417
418 /*
419 * Cross calls are implemented as function calls, so our stack currently
420 * looks like one you'd get from a zero-argument function call. That
421 * is, there's the return %rip at %rsp, and that's about it. We need
422 * to make it look like an interrupt stack. When we first save, we'll
423 * reverse the saved %ss and %rip, which we'll fix back up when we've
424 * freed up some general-purpose registers. We'll also need to fix up
425 * the saved %rsp.
426 */
427
428 pushq %rsp /* pushed value off by 8 */
429 pushfq
430 CLI(%rax)
431 pushq $KCS_SEL
432 clrq %rax
433 movw %ss, %ax
434 pushq %rax /* rip should be here */
435 pushq $-1 /* phony trap error code */
436 pushq $-1 /* phony trap number */
437
438 subq $REG_OFF(KDIREG_TRAPNO), %rsp
439 KDI_SAVE_REGS(%rsp)
440
441 movq REG_OFF(KDIREG_SS)(%rsp), %rax
442 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
443 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
444
445 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
446 addq $8, %rax
447 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
448
449 /*
450 * We've saved all of the general-purpose registers, and have a stack
451 * that is irettable (after we strip down to the error code)
452 */
453
454 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
455
456 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
457
458 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
459
460 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
461 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
462
463 pushq %rax
464 jmp kdi_save_common_state
465
466 SET_SIZE(kdi_slave_entry)
467
468 #endif /* __lint */
469
470 /*
471 * The state of the world:
472 *
473 * The stack has a complete set of saved registers and segment
474 * selectors, arranged in the kdi_regs.h order. It also has a pointer
475 * to our cpusave area.
476 *
477 * We need to save, into the cpusave area, a pointer to these saved
478 * registers. First we check whether we should jump straight back to
479 * the kernel. If not, we save a few more registers, ready the
480 * machine for debugger entry, and enter the debugger.
481 */
482
483 #if !defined(__lint)
484
485 ENTRY_NP(kdi_save_common_state)
486
487 popq %rdi /* the cpusave area */
488 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
489
490 pushq %rdi
491 call kdi_trap_pass
492 cmpq $1, %rax
493 je kdi_pass_to_kernel
494 popq %rax /* cpusave in %rax */
495
496 SAVE_IDTGDT
497
498 #if !defined(__xpv)
499 /* Save off %cr0, and clear write protect */
500 movq %cr0, %rcx
501 movq %rcx, KRS_CR0(%rax)
502 andq $_BITNOT(CR0_WP), %rcx
503 movq %rcx, %cr0
504 #endif
505
506 /* Save the debug registers and disable any active watchpoints */
507
508 movq %rax, %r15 /* save cpusave area ptr */
509 movl $7, %edi
510 call kdi_dreg_get
511 movq %rax, KRS_DRCTL(%r15)
512
513 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
514 movq %rax, %rsi
515 movl $7, %edi
516 call kdi_dreg_set
517
518 movl $6, %edi
519 call kdi_dreg_get
520 movq %rax, KRS_DRSTAT(%r15)
521
522 movl $0, %edi
523 call kdi_dreg_get
524 movq %rax, KRS_DROFF(0)(%r15)
525
526 movl $1, %edi
527 call kdi_dreg_get
528 movq %rax, KRS_DROFF(1)(%r15)
529
530 movl $2, %edi
531 call kdi_dreg_get
532 movq %rax, KRS_DROFF(2)(%r15)
533
534 movl $3, %edi
535 call kdi_dreg_get
536 movq %rax, KRS_DROFF(3)(%r15)
537
538 movq %r15, %rax /* restore cpu save area to rax */
539
540 /*
541 * Save any requested MSRs.
542 */
543 movq KRS_MSR(%rax), %rcx
544 cmpq $0, %rcx
545 je no_msr
546
547 pushq %rax /* rdmsr clobbers %eax */
548 movq %rcx, %rbx
549
550 1:
551 movl MSR_NUM(%rbx), %ecx
552 cmpl $0, %ecx
553 je msr_done
554
555 movl MSR_TYPE(%rbx), %edx
556 cmpl $KDI_MSR_READ, %edx
557 jne msr_next
558
559 rdmsr /* addr in %ecx, value into %edx:%eax */
560 movl %eax, MSR_VAL(%rbx)
561 movl %edx, _CONST(MSR_VAL + 4)(%rbx)
562
563 msr_next:
564 addq $MSR_SIZE, %rbx
565 jmp 1b
566
567 msr_done:
568 popq %rax
569
570 no_msr:
571 clrq %rbp /* stack traces should end here */
572
573 pushq %rax
574 movq %rax, %rdi /* cpusave */
575
576 call kdi_debugger_entry
577
578 /* Pass cpusave to kdi_resume */
579 popq %rdi
580
581 jmp kdi_resume
582
583 SET_SIZE(kdi_save_common_state)
584
585 #endif /* !__lint */
586
587 /*
588 * Resume the world. The code that calls kdi_resume has already
589 * decided whether or not to restore the IDT.
590 */
591 #if defined(__lint)
592 void
593 kdi_resume(void)
594 {
595 }
596 #else /* __lint */
597
598 /* cpusave in %rdi */
599 ENTRY_NP(kdi_resume)
600
601 /*
602 * Send this CPU back into the world
603 */
604 #if !defined(__xpv)
605 movq KRS_CR0(%rdi), %rdx
606 movq %rdx, %cr0
607 #endif
608
609 KDI_RESTORE_DEBUGGING_STATE
610
611 movq KRS_GREGS(%rdi), %rsp
612 KDI_RESTORE_REGS(%rsp)
613 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
614 IRET
615 /*NOTREACHED*/
616 SET_SIZE(kdi_resume)
617
618 #endif /* __lint */
619
620 #if !defined(__lint)
621
622 ENTRY_NP(kdi_pass_to_kernel)
623
624 popq %rdi /* cpusave */
625
626 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
627
628 /*
629 * Find the trap and vector off the right kernel handler. The trap
630 * handler will expect the stack to be in trap order, with %rip being
631 * the last entry, so we'll need to restore all our regs. On i86xpv
632 * we'll need to compensate for XPV_TRAP_POP.
633 *
634 * We're hard-coding the three cases where KMDB has installed permanent
635 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
636 * to work with; we can't use a global since other CPUs can easily pass
637 * through here at the same time.
638 *
639 * Note that we handle T_DBGENTR since userspace might have tried it.
640 */
641 movq KRS_GREGS(%rdi), %rsp
642 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
643 cmpq $T_SGLSTP, %rdi
644 je 1f
645 cmpq $T_BPTFLT, %rdi
646 je 2f
647 cmpq $T_DBGENTR, %rdi
648 je 3f
649 /*
650 * Hmm, unknown handler. Somebody forgot to update this when they
651 * added a new trap interposition... try to drop back into kmdb.
652 */
653 int $T_DBGENTR
654
655 #define CALL_TRAP_HANDLER(name) \
656 KDI_RESTORE_REGS(%rsp); \
657 /* Discard state, trapno, err */ \
658 addq $REG_OFF(KDIREG_RIP), %rsp; \
659 XPV_TRAP_PUSH; \
660 jmp %cs:name
661
662 1:
663 CALL_TRAP_HANDLER(dbgtrap)
664 /*NOTREACHED*/
665 2:
666 CALL_TRAP_HANDLER(brktrap)
667 /*NOTREACHED*/
668 3:
669 CALL_TRAP_HANDLER(invaltrap)
670 /*NOTREACHED*/
671
672 SET_SIZE(kdi_pass_to_kernel)
673
674 /*
675 * A minimal version of mdboot(), to be used by the master CPU only.
676 */
677 ENTRY_NP(kdi_reboot)
678
679 movl $AD_BOOT, %edi
680 movl $A_SHUTDOWN, %esi
681 call *psm_shutdownf
682 #if defined(__xpv)
683 movl $SHUTDOWN_reboot, %edi
684 call HYPERVISOR_shutdown
685 #else
686 call reset
687 #endif
688 /*NOTREACHED*/
689
690 SET_SIZE(kdi_reboot)
691
692 #endif /* !__lint */
693
694 #if defined(__lint)
695 /*ARGSUSED*/
696 void
697 kdi_cpu_debug_init(kdi_cpusave_t *save)
698 {
699 }
700 #else /* __lint */
701
702 ENTRY_NP(kdi_cpu_debug_init)
703 pushq %rbp
704 movq %rsp, %rbp
705
706 pushq %rbx /* macro will clobber %rbx */
707 KDI_RESTORE_DEBUGGING_STATE
708 popq %rbx
709
710 leave
711 ret
712
713 SET_SIZE(kdi_cpu_debug_init)
714 #endif /* !__lint */
715