1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * Debugger entry for both master and slave CPUs
31 */
32
33 #if defined(__lint)
34 #include <sys/types.h>
35 #endif
36
37 #include <sys/segments.h>
38 #include <sys/asm_linkage.h>
39 #include <sys/controlregs.h>
40 #include <sys/x86_archext.h>
41 #include <sys/privregs.h>
42 #include <sys/machprivregs.h>
43 #include <sys/kdi_regs.h>
44 #include <sys/uadmin.h>
45 #include <sys/psw.h>
46
47 #ifdef _ASM
48
49 #include <kdi_assym.h>
50 #include <assym.h>
51
52 /* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
53 #define GET_CPUSAVE_ADDR \
54 movl %gs:CPU_ID, %ebx; \
55 movl %ebx, %eax; \
56 movl $KRS_SIZE, %ecx; \
57 mull %ecx; \
58 movl $kdi_cpusave, %edx; \
59 /*CSTYLED*/ \
60 addl (%edx), %eax
61
62 /*
63 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
64 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
65 * debugger through the trap handler. We don't want to clobber the saved IDT
66 * in the process, as we'd end up resuming the world on our IDT.
67 */
68 #define SAVE_IDTGDT \
69 movl %gs:CPU_IDT, %edx; \
70 cmpl $kdi_idt, %edx; \
71 je 1f; \
72 movl %edx, KRS_IDT(%eax); \
73 movl %gs:CPU_GDT, %edx; \
74 movl %edx, KRS_GDT(%eax); \
75 1:
76
77 /*
78 * Given the address of the current CPU's cpusave area in %edi, the following
79 * macro restores the debugging state to said CPU. Restored state includes
80 * the debug registers from the global %dr variables, and debugging MSRs from
81 * the CPU save area. This code would be in a separate routine, but for the
82 * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
83 * the number of jumps taken subsequent to the update of said MSRs. We can
84 * remove one jump (the ret) by using a macro instead of a function for the
85 * debugging state restoration code.
86 *
87 * Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
88 */
89 #define KDI_RESTORE_DEBUGGING_STATE \
90 leal kdi_drreg, %ebx; \
91 \
92 pushl DR_CTL(%ebx); \
93 pushl $7; \
94 call kdi_dreg_set; \
95 addl $8, %esp; \
96 \
97 pushl $KDIREG_DRSTAT_RESERVED; \
98 pushl $6; \
99 call kdi_dreg_set; \
100 addl $8, %esp; \
101 \
102 pushl DRADDR_OFF(0)(%ebx); \
103 pushl $0; \
104 call kdi_dreg_set; \
105 addl $8, %esp; \
106 \
107 pushl DRADDR_OFF(1)(%ebx); \
108 pushl $1; \
109 call kdi_dreg_set; \
110 addl $8, %esp; \
111 \
112 pushl DRADDR_OFF(2)(%ebx); \
113 pushl $2; \
114 call kdi_dreg_set; \
115 addl $8, %esp; \
116 \
117 pushl DRADDR_OFF(3)(%ebx); \
118 pushl $3; \
119 call kdi_dreg_set; \
120 addl $8, %esp; \
121 \
122 /* \
123 * Write any requested MSRs. \
124 */ \
125 movl KRS_MSR(%edi), %ebx; \
126 cmpl $0, %ebx; \
127 je 3f; \
128 1: \
129 movl MSR_NUM(%ebx), %ecx; \
130 cmpl $0, %ecx; \
131 je 3f; \
132 \
133 movl MSR_TYPE(%ebx), %edx; \
134 cmpl $KDI_MSR_WRITE, %edx; \
135 jne 2f; \
136 \
137 movl MSR_VALP(%ebx), %edx; \
138 movl 0(%edx), %eax; \
139 movl 4(%edx), %edx; \
140 wrmsr; \
141 2: \
142 addl $MSR_SIZE, %ebx; \
143 jmp 1b; \
144 3: \
145 /* \
146 * We must not branch after re-enabling LBR. If \
147 * kdi_wsr_wrexit_msr is set, it contains the number \
148 * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
149 * contains the value that is to be written to enable \
150 * LBR. \
151 */ \
152 movl kdi_msr_wrexit_msr, %ecx; \
153 cmpl $0, %ecx; \
154 je 1f; \
155 \
156 movl kdi_msr_wrexit_valp, %edx; \
157 movl 0(%edx), %eax; \
158 movl 4(%edx), %edx; \
159 \
160 wrmsr; \
161 1:
162
163 #define KDI_RESTORE_REGS() \
164 /* Discard savfp and savpc */ \
165 addl $8, %esp; \
166 popl %ss; \
167 popl %gs; \
168 popl %fs; \
169 popl %es; \
170 popl %ds; \
171 popal; \
172 /* Discard trapno and err */ \
173 addl $8, %esp
174
175 /*
176 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
177 * The following macros manage the buffer.
178 */
179
180 /* Advance the ring buffer */
181 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
182 movl KRS_CURCRUMBIDX(cpusave), tmp1; \
183 cmpl $[KDI_NCRUMBS - 1], tmp1; \
184 jge 1f; \
185 /* Advance the pointer and index */ \
186 addl $1, tmp1; \
187 movl tmp1, KRS_CURCRUMBIDX(cpusave); \
188 movl KRS_CURCRUMB(cpusave), tmp1; \
189 addl $KRM_SIZE, tmp1; \
190 jmp 2f; \
191 1: /* Reset the pointer and index */ \
192 movw $0, KRS_CURCRUMBIDX(cpusave); \
193 leal KRS_CRUMBS(cpusave), tmp1; \
194 2: movl tmp1, KRS_CURCRUMB(cpusave); \
195 /* Clear the new crumb */ \
196 movl $KDI_NCRUMBS, tmp2; \
197 3: movl $0, -4(tmp1, tmp2, 4); \
198 decl tmp2; \
199 jnz 3b
200
201 /* Set a value in the current breadcrumb buffer */
202 #define ADD_CRUMB(cpusave, offset, value, tmp) \
203 movl KRS_CURCRUMB(cpusave), tmp; \
204 movl value, offset(tmp)
205
206 #endif /* _ASM */
207
208 /*
209 * The main entry point for master CPUs. It also serves as the trap handler
210 * for all traps and interrupts taken during single-step.
211 */
212 #if defined(__lint)
213 void
214 kdi_cmnint(void)
215 {
216 }
217 #else /* __lint */
218
219 /* XXX implement me */
220 ENTRY_NP(kdi_nmiint)
221 clr %ecx
222 movl (%ecx), %ecx
223 SET_SIZE(kdi_nmiint)
224
225 ENTRY_NP(kdi_cmnint)
226 ALTENTRY(kdi_master_entry)
227
228 /* Save all registers and selectors */
229
230 pushal
231 pushl %ds
232 pushl %es
233 pushl %fs
234 pushl %gs
235 pushl %ss
236
237 subl $8, %esp
238 movl %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
239 movl REG_OFF(KDIREG_EIP)(%esp), %eax
240 movl %eax, REG_OFF(KDIREG_SAVPC)(%esp)
241
242 /*
243 * If the kernel has started using its own selectors, we should too.
244 * Update our saved selectors if they haven't been updated already.
245 */
246 movw %cs, %ax
247 cmpw $KCS_SEL, %ax
248 jne 1f /* The kernel hasn't switched yet */
249
250 movw $KDS_SEL, %ax
251 movw %ax, %ds
252 movw kdi_cs, %ax
253 cmpw $KCS_SEL, %ax
254 je 1f /* We already switched */
255
256 /*
257 * The kernel switched, but we haven't. Update our saved selectors
258 * to match the kernel's copies for use below.
259 */
260 movl $KCS_SEL, kdi_cs
261 movl $KDS_SEL, kdi_ds
262 movl $KFS_SEL, kdi_fs
263 movl $KGS_SEL, kdi_gs
264
265 1:
266 /*
267 * Set the selectors to a known state. If we come in from kmdb's IDT,
268 * we'll be on boot's %cs. This will cause GET_CPUSAVE_ADDR to return
269 * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
270 * ensue. So, if we've got $KCSSEL in kdi_cs, switch to it. The other
271 * selectors are restored normally.
272 */
273 movw %cs:kdi_cs, %ax
274 cmpw $KCS_SEL, %ax
275 jne 1f
276 ljmp $KCS_SEL, $1f
277 1:
278 movw %cs:kdi_ds, %ds
279 movw kdi_ds, %es
280 movw kdi_fs, %fs
281 movw kdi_gs, %gs
282 movw kdi_ds, %ss
283
284 /*
285 * This has to come after we set %gs to the kernel descriptor. Since
286 * we've hijacked some IDT entries used in user-space such as the
287 * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
288 * in %gs. On the hypervisor, CLI() needs GDT_GS to access the machcpu.
289 */
290 CLI(%eax)
291
292 #if defined(__xpv)
293 /*
294 * Clear saved_upcall_mask in unused byte of cs slot on stack.
295 * It can only confuse things.
296 */
297 movb $0, REG_OFF(KDIREG_CS)+2(%esp)
298
299 #endif
300
301 GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
302
303 ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
304
305 ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %edx)
306
307 movl REG_OFF(KDIREG_EIP)(%esp), %ecx
308 ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
309 ADD_CRUMB(%eax, KRM_SP, %esp, %edx)
310 movl REG_OFF(KDIREG_TRAPNO)(%esp), %ecx
311 ADD_CRUMB(%eax, KRM_TRAPNO, %ecx, %edx)
312
313 movl %esp, %ebp
314 pushl %eax
315
316 /*
317 * Were we in the debugger when we took the trap (i.e. was %esp in one
318 * of the debugger's memory ranges)?
319 */
320 leal kdi_memranges, %ecx
321 movl kdi_nmemranges, %edx
322 1: cmpl MR_BASE(%ecx), %esp
323 jl 2f /* below this range -- try the next one */
324 cmpl MR_LIM(%ecx), %esp
325 jg 2f /* above this range -- try the next one */
326 jmp 3f /* matched within this range */
327
328 2: decl %edx
329 jz kdi_save_common_state /* %esp not within debugger memory */
330 addl $MR_SIZE, %ecx
331 jmp 1b
332
333 3: /*
334 * %esp was within one of the debugger's memory ranges. This should
335 * only happen when we take a trap while running in the debugger.
336 * kmdb_dpi_handle_fault will determine whether or not it was an
337 * expected trap, and will take the appropriate action.
338 */
339
340 pushl %ebx /* cpuid */
341
342 movl REG_OFF(KDIREG_ESP)(%ebp), %ecx
343 addl $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), %ecx
344 pushl %ecx
345
346 pushl REG_OFF(KDIREG_EIP)(%ebp)
347 pushl REG_OFF(KDIREG_TRAPNO)(%ebp)
348
349 call kdi_dvec_handle_fault
350 addl $16, %esp
351
352 /*
353 * If we're here, we ran into a debugger problem, and the user
354 * elected to solve it by having the debugger debug itself. The
355 * state we're about to save is that of the debugger when it took
356 * the fault.
357 */
358
359 jmp kdi_save_common_state
360
361 SET_SIZE(kdi_master_entry)
362 SET_SIZE(kdi_cmnint)
363
364 #endif /* __lint */
365
366 /*
367 * The cross-call handler for slave CPUs.
368 *
369 * The debugger is single-threaded, so only one CPU, called the master, may be
370 * running it at any given time. The other CPUs, known as slaves, spin in a
371 * busy loop until there's something for them to do. This is the entry point
372 * for the slaves - they'll be sent here in response to a cross-call sent by the
373 * master.
374 */
375
376 #if defined(__lint)
377 char kdi_slave_entry_patch;
378
379 void
380 kdi_slave_entry(void)
381 {
382 }
383 #else /* __lint */
384 .globl kdi_slave_entry_patch;
385
386 ENTRY_NP(kdi_slave_entry)
387
388 /* kdi_msr_add_clrentry knows where this is */
389 kdi_slave_entry_patch:
390 KDI_MSR_PATCH;
391
392 /*
393 * Cross calls are implemented as function calls, so our stack
394 * currently looks like one you'd get from a zero-argument function
395 * call. There's an %eip at %esp, and that's about it. We want to
396 * make it look like the master CPU's stack. By doing this, we can
397 * use the same resume code for both master and slave. We need to
398 * make our stack look like a `struct regs' before we jump into the
399 * common save routine.
400 */
401
402 pushl %cs
403 pushfl
404 pushl $-1 /* A phony trap error code */
405 pushl $-1 /* A phony trap number */
406 pushal
407 pushl %ds
408 pushl %es
409 pushl %fs
410 pushl %gs
411 pushl %ss
412
413 subl $8, %esp
414 movl %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
415 movl REG_OFF(KDIREG_EIP)(%esp), %eax
416 movl %eax, REG_OFF(KDIREG_SAVPC)(%esp)
417
418 /*
419 * Swap our saved EFLAGS and %eip. Each is where the other
420 * should be.
421 */
422 movl REG_OFF(KDIREG_EFLAGS)(%esp), %eax
423 xchgl REG_OFF(KDIREG_EIP)(%esp), %eax
424 movl %eax, REG_OFF(KDIREG_EFLAGS)(%esp)
425
426 /*
427 * Our stack now matches struct regs, and is irettable. We don't need
428 * to do anything special for the hypervisor w.r.t. PS_IE since we
429 * iret twice anyway; the second iret back to the hypervisor
430 * will re-enable interrupts.
431 */
432 CLI(%eax)
433
434 /* Load sanitized segment selectors */
435 movw kdi_ds, %ds
436 movw kdi_ds, %es
437 movw kdi_fs, %fs
438 movw kdi_gs, %gs
439 movw kdi_ds, %ss
440
441 GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
442
443 ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
444
445 ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %edx)
446
447 movl REG_OFF(KDIREG_EIP)(%esp), %ecx
448 ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
449
450 pushl %eax
451 jmp kdi_save_common_state
452
453 SET_SIZE(kdi_slave_entry)
454
455 #endif /* __lint */
456
457 /*
458 * The state of the world:
459 *
460 * The stack has a complete set of saved registers and segment
461 * selectors, arranged in `struct regs' order (or vice-versa), up to
462 * and including EFLAGS. It also has a pointer to our cpusave area.
463 *
464 * We need to save a pointer to these saved registers. We also want
465 * to adjust the saved %esp - it should point just beyond the saved
466 * registers to the last frame of the thread we interrupted. Finally,
467 * we want to clear out bits 16-31 of the saved selectors, as the
468 * selector pushls don't automatically clear them.
469 */
470 #if !defined(__lint)
471
472 ENTRY_NP(kdi_save_common_state)
473
474 popl %eax /* the cpusave area */
475
476 movl %esp, KRS_GREGS(%eax) /* save ptr to current saved regs */
477
478 addl $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), KDIREG_OFF(KDIREG_ESP)(%esp)
479
480 andl $0xffff, KDIREG_OFF(KDIREG_SS)(%esp)
481 andl $0xffff, KDIREG_OFF(KDIREG_GS)(%esp)
482 andl $0xffff, KDIREG_OFF(KDIREG_FS)(%esp)
483 andl $0xffff, KDIREG_OFF(KDIREG_ES)(%esp)
484 andl $0xffff, KDIREG_OFF(KDIREG_DS)(%esp)
485
486 pushl %eax
487 call kdi_trap_pass
488 cmpl $1, %eax
489 je kdi_pass_to_kernel
490 popl %eax
491
492 SAVE_IDTGDT
493
494 #if !defined(__xpv)
495 /* Save off %cr0, and clear write protect */
496 movl %cr0, %ecx
497 movl %ecx, KRS_CR0(%eax)
498 andl $_BITNOT(CR0_WP), %ecx
499 movl %ecx, %cr0
500 #endif
501 pushl %edi
502 movl %eax, %edi
503
504 /* Save the debug registers and disable any active watchpoints */
505 pushl $7
506 call kdi_dreg_get
507 addl $4, %esp
508
509 movl %eax, KRS_DRCTL(%edi)
510 andl $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %eax
511
512 pushl %eax
513 pushl $7
514 call kdi_dreg_set
515 addl $8, %esp
516
517 pushl $6
518 call kdi_dreg_get
519 addl $4, %esp
520 movl %eax, KRS_DRSTAT(%edi)
521
522 pushl $0
523 call kdi_dreg_get
524 addl $4, %esp
525 movl %eax, KRS_DROFF(0)(%edi)
526
527 pushl $1
528 call kdi_dreg_get
529 addl $4, %esp
530 movl %eax, KRS_DROFF(1)(%edi)
531
532 pushl $2
533 call kdi_dreg_get
534 addl $4, %esp
535 movl %eax, KRS_DROFF(2)(%edi)
536
537 pushl $3
538 call kdi_dreg_get
539 addl $4, %esp
540 movl %eax, KRS_DROFF(3)(%edi)
541
542 movl %edi, %eax
543 popl %edi
544
545 /*
546 * Save any requested MSRs.
547 */
548 movl KRS_MSR(%eax), %ecx
549 cmpl $0, %ecx
550 je no_msr
551
552 pushl %eax /* rdmsr clobbers %eax */
553 movl %ecx, %ebx
554 1:
555 movl MSR_NUM(%ebx), %ecx
556 cmpl $0, %ecx
557 je msr_done
558
559 movl MSR_TYPE(%ebx), %edx
560 cmpl $KDI_MSR_READ, %edx
561 jne msr_next
562
563 rdmsr /* addr in %ecx, value into %edx:%eax */
564 movl %eax, MSR_VAL(%ebx)
565 movl %edx, _CONST(MSR_VAL + 4)(%ebx)
566
567 msr_next:
568 addl $MSR_SIZE, %ebx
569 jmp 1b
570
571 msr_done:
572 popl %eax
573
574 no_msr:
575 clr %ebp /* stack traces should end here */
576
577 pushl %eax
578 call kdi_debugger_entry
579 popl %eax
580
581 jmp kdi_resume
582
583 SET_SIZE(kdi_save_common_state)
584
585 #endif /* !__lint */
586
587 /*
588 * Resume the world. The code that calls kdi_resume has already
589 * decided whether or not to restore the IDT.
590 */
591 #if defined(__lint)
592 void
593 kdi_resume(void)
594 {
595 }
596 #else /* __lint */
597
598 /* cpusave in %eax */
599 ENTRY_NP(kdi_resume)
600
601 /*
602 * Send this CPU back into the world
603 */
604
605 #if !defined(__xpv)
606 movl KRS_CR0(%eax), %edx
607 movl %edx, %cr0
608 #endif
609
610 pushl %edi
611 movl %eax, %edi
612
613 KDI_RESTORE_DEBUGGING_STATE
614
615 popl %edi
616
617 #if defined(__xpv)
618 /*
619 * kmdb might have set PS_T in the saved eflags, so we can't use
620 * intr_restore, since that restores all of eflags; instead, just
621 * pick up PS_IE from the saved eflags.
622 */
623 movl REG_OFF(KDIREG_EFLAGS)(%esp), %eax
624 testl $PS_IE, %eax
625 jz 2f
626 STI
627 2:
628 #endif
629
630 addl $8, %esp /* Discard savfp and savpc */
631
632 popl %ss
633 popl %gs
634 popl %fs
635 popl %es
636 popl %ds
637 popal
638
639 addl $8, %esp /* Discard TRAPNO and ERROR */
640
641 IRET
642
643 SET_SIZE(kdi_resume)
644 #endif /* __lint */
645
646 #if !defined(__lint)
647
648 ENTRY_NP(kdi_pass_to_kernel)
649
650 /* pop cpusave, leaving %esp pointing to saved regs */
651 popl %eax
652
653 movl $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%eax)
654
655 /*
656 * Find the trap and vector off the right kernel handler. The trap
657 * handler will expect the stack to be in trap order, with %eip being
658 * the last entry, so we'll need to restore all our regs.
659 *
660 * We're hard-coding the three cases where KMDB has installed permanent
661 * handlers, since after we restore, we don't have registers to work
662 * with; we can't use a global since other CPUs can easily pass through
663 * here at the same time.
664 *
665 * Note that we handle T_DBGENTR since userspace might have tried it.
666 */
667 movl REG_OFF(KDIREG_TRAPNO)(%esp), %eax
668 cmpl $T_SGLSTP, %eax
669 je kpass_dbgtrap
670 cmpl $T_BPTFLT, %eax
671 je kpass_brktrap
672 cmpl $T_DBGENTR, %eax
673 je kpass_invaltrap
674 /*
675 * Hmm, unknown handler. Somebody forgot to update this when they
676 * added a new trap interposition... try to drop back into kmdb.
677 */
678 int $T_DBGENTR
679
680 kpass_dbgtrap:
681 KDI_RESTORE_REGS()
682 ljmp $KCS_SEL, $1f
683 1: jmp %cs:dbgtrap
684 /*NOTREACHED*/
685
686 kpass_brktrap:
687 KDI_RESTORE_REGS()
688 ljmp $KCS_SEL, $2f
689 2: jmp %cs:brktrap
690 /*NOTREACHED*/
691
692 kpass_invaltrap:
693 KDI_RESTORE_REGS()
694 ljmp $KCS_SEL, $3f
695 3: jmp %cs:invaltrap
696 /*NOTREACHED*/
697
698 SET_SIZE(kdi_pass_to_kernel)
699
700 /*
701 * A minimal version of mdboot(), to be used by the master CPU only.
702 */
703 ENTRY_NP(kdi_reboot)
704
705 pushl $AD_BOOT
706 pushl $A_SHUTDOWN
707 call *psm_shutdownf
708 addl $8, %esp
709
710 #if defined(__xpv)
711 pushl $SHUTDOWN_reboot
712 call HYPERVISOR_shutdown
713 #else
714 call reset
715 #endif
716 /*NOTREACHED*/
717
718 SET_SIZE(kdi_reboot)
719
720 #endif /* !__lint */
721
722 #if defined(__lint)
723 /*ARGSUSED*/
724 void
725 kdi_cpu_debug_init(kdi_cpusave_t *save)
726 {
727 }
728 #else /* __lint */
729
730 ENTRY_NP(kdi_cpu_debug_init)
731 pushl %ebp
732 movl %esp, %ebp
733
734 pushl %edi
735 pushl %ebx
736
737 movl 8(%ebp), %edi
738
739 KDI_RESTORE_DEBUGGING_STATE
740
741 popl %ebx
742 popl %edi
743 leave
744 ret
745
746 SET_SIZE(kdi_cpu_debug_init)
747 #endif /* !__lint */
748