1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  *
  26  * Copyright 2018 Joyent, Inc.
  27  */
  28 
  29 /*
  30  * Debugger entry for both master and slave CPUs
  31  */
  32 
  33 #if defined(__lint)
  34 #include <sys/types.h>
  35 #endif
  36 
  37 #include <sys/segments.h>
  38 #include <sys/asm_linkage.h>
  39 #include <sys/controlregs.h>
  40 #include <sys/x86_archext.h>
  41 #include <sys/privregs.h>
  42 #include <sys/machprivregs.h>
  43 #include <sys/kdi_regs.h>
  44 #include <sys/uadmin.h>
  45 #include <sys/psw.h>
  46 
  47 #ifdef _ASM
  48 
  49 #include <kdi_assym.h>
  50 #include <assym.h>
  51 
  52 /* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
  53 #define GET_CPUSAVE_ADDR \
  54         movl    %gs:CPU_ID, %ebx;               \
  55         movl    %ebx, %eax;                     \
  56         movl    $KRS_SIZE, %ecx;                \
  57         mull    %ecx;                           \
  58         movl    $kdi_cpusave, %edx;             \
  59         /*CSTYLED*/                             \
  60         addl    (%edx), %eax
  61 
  62 /*
  63  * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
  64  * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
  65  * debugger through the trap handler.  We don't want to clobber the saved IDT
  66  * in the process, as we'd end up resuming the world on our IDT.
  67  */
  68 #define SAVE_IDTGDT                             \
  69         movl    %gs:CPU_IDT, %edx;              \
  70         cmpl    $kdi_idt, %edx;                 \
  71         je      1f;                             \
  72         movl    %edx, KRS_IDT(%eax);            \
  73         movl    %gs:CPU_GDT, %edx;              \
  74         movl    %edx, KRS_GDT(%eax);            \
  75 1:
  76 
  77 /*
  78  * Given the address of the current CPU's cpusave area in %edi, the following
  79  * macro restores the debugging state to said CPU.  Restored state includes
  80  * the debug registers from the global %dr variables.
  81  */
  82 #define KDI_RESTORE_DEBUGGING_STATE \
  83         leal    kdi_drreg, %ebx;                                \
  84                                                                 \
  85         pushl   DR_CTL(%ebx);                                   \
  86         pushl   $7;                                             \
  87         call    kdi_dreg_set;                                   \
  88         addl    $8, %esp;                                       \
  89                                                                 \
  90         pushl   $KDIREG_DRSTAT_RESERVED;                                \
  91         pushl   $6;                                             \
  92         call    kdi_dreg_set;                                   \
  93         addl    $8, %esp;                                       \
  94                                                                 \
  95         pushl   DRADDR_OFF(0)(%ebx);                            \
  96         pushl   $0;                                             \
  97         call    kdi_dreg_set;                                   \
  98         addl    $8, %esp;                                       \
  99                                                                 \
 100         pushl   DRADDR_OFF(1)(%ebx);                            \
 101         pushl   $1;                                             \
 102         call    kdi_dreg_set;                                   \
 103         addl    $8, %esp;                                       \
 104                                                                 \
 105         pushl   DRADDR_OFF(2)(%ebx);                            \
 106         pushl   $2;                                             \
 107         call    kdi_dreg_set;                                   \
 108         addl    $8, %esp;                                       \
 109                                                                 \
 110         pushl   DRADDR_OFF(3)(%ebx);                            \
 111         pushl   $3;                                             \
 112         call    kdi_dreg_set;                                   \
 113         addl    $8, %esp;
 114 
 115 #define KDI_RESTORE_REGS() \
 116         /* Discard savfp and savpc */ \
 117         addl    $8, %esp; \
 118         popl    %ss; \
 119         popl    %gs; \
 120         popl    %fs; \
 121         popl    %es; \
 122         popl    %ds; \
 123         popal; \
 124         /* Discard trapno and err */ \
 125         addl    $8, %esp
 126 
 127 /*
 128  * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
 129  * The following macros manage the buffer.
 130  */
 131 
 132 /* Advance the ring buffer */
 133 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
 134         movl    KRS_CURCRUMBIDX(cpusave), tmp1; \
 135         cmpl    $[KDI_NCRUMBS - 1], tmp1;       \
 136         jge     1f;                             \
 137         /* Advance the pointer and index */     \
 138         addl    $1, tmp1;                       \
 139         movl    tmp1, KRS_CURCRUMBIDX(cpusave); \
 140         movl    KRS_CURCRUMB(cpusave), tmp1;    \
 141         addl    $KRM_SIZE, tmp1;                \
 142         jmp     2f;                             \
 143 1:      /* Reset the pointer and index */       \
 144         movw    $0, KRS_CURCRUMBIDX(cpusave);   \
 145         leal    KRS_CRUMBS(cpusave), tmp1;      \
 146 2:      movl    tmp1, KRS_CURCRUMB(cpusave);    \
 147         /* Clear the new crumb */               \
 148         movl    $KDI_NCRUMBS, tmp2;             \
 149 3:      movl    $0, -4(tmp1, tmp2, 4);          \
 150         decl    tmp2;                           \
 151         jnz     3b
 152 
 153 /* Set a value in the current breadcrumb buffer */
 154 #define ADD_CRUMB(cpusave, offset, value, tmp) \
 155         movl    KRS_CURCRUMB(cpusave), tmp;     \
 156         movl    value, offset(tmp)
 157 
 158 #endif  /* _ASM */
 159 
 160 /*
 161  * The main entry point for master CPUs.  It also serves as the trap handler
 162  * for all traps and interrupts taken during single-step.
 163  */
 164 #if defined(__lint)
 165 void
 166 kdi_cmnint(void)
 167 {
 168 }
 169 #else   /* __lint */
 170 
 171         /* XXX implement me */
 172         ENTRY_NP(kdi_nmiint)
 173         clr     %ecx
 174         movl    (%ecx), %ecx
 175         SET_SIZE(kdi_nmiint)
 176 
 177         ENTRY_NP(kdi_cmnint)
 178         ALTENTRY(kdi_master_entry)
 179         
 180         /* Save all registers and selectors */
 181 
 182         pushal
 183         pushl   %ds
 184         pushl   %es
 185         pushl   %fs
 186         pushl   %gs
 187         pushl   %ss
 188 
 189         subl    $8, %esp
 190         movl    %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
 191         movl    REG_OFF(KDIREG_EIP)(%esp), %eax
 192         movl    %eax, REG_OFF(KDIREG_SAVPC)(%esp)
 193 
 194         /*
 195          * If the kernel has started using its own selectors, we should too.
 196          * Update our saved selectors if they haven't been updated already.
 197          */
 198         movw    %cs, %ax
 199         cmpw    $KCS_SEL, %ax
 200         jne     1f                      /* The kernel hasn't switched yet */
 201 
 202         movw    $KDS_SEL, %ax
 203         movw    %ax, %ds
 204         movw    kdi_cs, %ax
 205         cmpw    $KCS_SEL, %ax
 206         je      1f                      /* We already switched */
 207 
 208         /*
 209          * The kernel switched, but we haven't.  Update our saved selectors
 210          * to match the kernel's copies for use below.
 211          */
 212         movl    $KCS_SEL, kdi_cs
 213         movl    $KDS_SEL, kdi_ds
 214         movl    $KFS_SEL, kdi_fs
 215         movl    $KGS_SEL, kdi_gs
 216 
 217 1:
 218         /*
 219          * Set the selectors to a known state.  If we come in from kmdb's IDT,
 220          * we'll be on boot's %cs.  This will cause GET_CPUSAVE_ADDR to return
 221          * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
 222          * ensue.  So, if we've got $KCSSEL in kdi_cs, switch to it.  The other
 223          * selectors are restored normally.
 224          */
 225         movw    %cs:kdi_cs, %ax
 226         cmpw    $KCS_SEL, %ax
 227         jne     1f
 228         ljmp    $KCS_SEL, $1f
 229 1:
 230         movw    %cs:kdi_ds, %ds
 231         movw    kdi_ds, %es
 232         movw    kdi_fs, %fs
 233         movw    kdi_gs, %gs
 234         movw    kdi_ds, %ss
 235 
 236         /*
 237          * This has to come after we set %gs to the kernel descriptor.  Since
 238          * we've hijacked some IDT entries used in user-space such as the
 239          * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
 240          * in %gs.  On the hypervisor, CLI() needs GDT_GS to access the machcpu.
 241          */
 242         CLI(%eax)
 243 
 244 #if defined(__xpv)
 245         /*
 246          * Clear saved_upcall_mask in unused byte of cs slot on stack.
 247          * It can only confuse things.
 248          */
 249         movb    $0, REG_OFF(KDIREG_CS)+2(%esp)
 250 
 251 #endif
 252 
 253         GET_CPUSAVE_ADDR                /* %eax = cpusave, %ebx = CPU ID */
 254 
 255         ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
 256 
 257         ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %edx)
 258 
 259         movl    REG_OFF(KDIREG_EIP)(%esp), %ecx
 260         ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
 261         ADD_CRUMB(%eax, KRM_SP, %esp, %edx)
 262         movl    REG_OFF(KDIREG_TRAPNO)(%esp), %ecx
 263         ADD_CRUMB(%eax, KRM_TRAPNO, %ecx, %edx)
 264 
 265         movl    %esp, %ebp
 266         pushl   %eax
 267 
 268         /*
 269          * Were we in the debugger when we took the trap (i.e. was %esp in one
 270          * of the debugger's memory ranges)?
 271          */
 272         leal    kdi_memranges, %ecx
 273         movl    kdi_nmemranges, %edx
 274 1:      cmpl    MR_BASE(%ecx), %esp
 275         jl      2f              /* below this range -- try the next one */
 276         cmpl    MR_LIM(%ecx), %esp
 277         jg      2f              /* above this range -- try the next one */
 278         jmp     3f              /* matched within this range */
 279 
 280 2:      decl    %edx
 281         jz      kdi_save_common_state   /* %esp not within debugger memory */
 282         addl    $MR_SIZE, %ecx
 283         jmp     1b
 284 
 285 3:      /*
 286          * %esp was within one of the debugger's memory ranges.  This should
 287          * only happen when we take a trap while running in the debugger.
 288          * kmdb_dpi_handle_fault will determine whether or not it was an
 289          * expected trap, and will take the appropriate action.
 290          */
 291 
 292         pushl   %ebx                    /* cpuid */
 293 
 294         movl    REG_OFF(KDIREG_ESP)(%ebp), %ecx
 295         addl    $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), %ecx
 296         pushl   %ecx
 297 
 298         pushl   REG_OFF(KDIREG_EIP)(%ebp)
 299         pushl   REG_OFF(KDIREG_TRAPNO)(%ebp)
 300 
 301         call    kdi_dvec_handle_fault
 302         addl    $16, %esp
 303 
 304         /*
 305          * If we're here, we ran into a debugger problem, and the user
 306          * elected to solve it by having the debugger debug itself.  The
 307          * state we're about to save is that of the debugger when it took
 308          * the fault.
 309          */
 310 
 311         jmp     kdi_save_common_state
 312 
 313         SET_SIZE(kdi_master_entry)
 314         SET_SIZE(kdi_cmnint)
 315 
 316 #endif  /* __lint */
 317 
 318 /*
 319  * The cross-call handler for slave CPUs.
 320  *
 321  * The debugger is single-threaded, so only one CPU, called the master, may be
 322  * running it at any given time.  The other CPUs, known as slaves, spin in a
 323  * busy loop until there's something for them to do.  This is the entry point
 324  * for the slaves - they'll be sent here in response to a cross-call sent by the
 325  * master.
 326  */
 327 
 328 #if defined(__lint)
 329 void
 330 kdi_slave_entry(void)
 331 {
 332 }
 333 #else /* __lint */
 334         ENTRY_NP(kdi_slave_entry)
 335 
 336         /*
 337          * Cross calls are implemented as function calls, so our stack
 338          * currently looks like one you'd get from a zero-argument function
 339          * call. There's an %eip at %esp, and that's about it.  We want to
 340          * make it look like the master CPU's stack.  By doing this, we can
 341          * use the same resume code for both master and slave.  We need to
 342          * make our stack look like a `struct regs' before we jump into the
 343          * common save routine.
 344          */
 345 
 346         pushl   %cs
 347         pushfl
 348         pushl   $-1             /* A phony trap error code */
 349         pushl   $-1             /* A phony trap number */
 350         pushal
 351         pushl   %ds
 352         pushl   %es
 353         pushl   %fs
 354         pushl   %gs
 355         pushl   %ss
 356 
 357         subl    $8, %esp
 358         movl    %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
 359         movl    REG_OFF(KDIREG_EIP)(%esp), %eax
 360         movl    %eax, REG_OFF(KDIREG_SAVPC)(%esp)
 361 
 362         /*
 363          * Swap our saved EFLAGS and %eip.  Each is where the other
 364          * should be.
 365          */
 366         movl    REG_OFF(KDIREG_EFLAGS)(%esp), %eax
 367         xchgl   REG_OFF(KDIREG_EIP)(%esp), %eax
 368         movl    %eax, REG_OFF(KDIREG_EFLAGS)(%esp)
 369 
 370         /*
 371          * Our stack now matches struct regs, and is irettable.  We don't need
 372          * to do anything special for the hypervisor w.r.t. PS_IE since we
 373          * iret twice anyway; the second iret back to the hypervisor
 374          * will re-enable interrupts.
 375          */
 376         CLI(%eax)
 377 
 378         /* Load sanitized segment selectors */
 379         movw    kdi_ds, %ds
 380         movw    kdi_ds, %es
 381         movw    kdi_fs, %fs
 382         movw    kdi_gs, %gs
 383         movw    kdi_ds, %ss
 384 
 385         GET_CPUSAVE_ADDR        /* %eax = cpusave, %ebx = CPU ID */
 386 
 387         ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
 388 
 389         ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %edx)
 390 
 391         movl    REG_OFF(KDIREG_EIP)(%esp), %ecx
 392         ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
 393 
 394         pushl   %eax
 395         jmp     kdi_save_common_state
 396 
 397         SET_SIZE(kdi_slave_entry)
 398 
 399 #endif  /* __lint */
 400 
 401 /*
 402  * The state of the world:
 403  *
 404  * The stack has a complete set of saved registers and segment
 405  * selectors, arranged in `struct regs' order (or vice-versa), up to
 406  * and including EFLAGS.  It also has a pointer to our cpusave area.
 407  *
 408  * We need to save a pointer to these saved registers.  We also want
 409  * to adjust the saved %esp - it should point just beyond the saved
 410  * registers to the last frame of the thread we interrupted.  Finally,
 411  * we want to clear out bits 16-31 of the saved selectors, as the
 412  * selector pushls don't automatically clear them.
 413  */
 414 #if !defined(__lint)
 415 
 416         ENTRY_NP(kdi_save_common_state)
 417 
 418         popl    %eax                    /* the cpusave area */
 419 
 420         movl    %esp, KRS_GREGS(%eax)   /* save ptr to current saved regs */
 421 
 422         addl    $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), KDIREG_OFF(KDIREG_ESP)(%esp)
 423 
 424         andl    $0xffff, KDIREG_OFF(KDIREG_SS)(%esp)
 425         andl    $0xffff, KDIREG_OFF(KDIREG_GS)(%esp)
 426         andl    $0xffff, KDIREG_OFF(KDIREG_FS)(%esp)
 427         andl    $0xffff, KDIREG_OFF(KDIREG_ES)(%esp)
 428         andl    $0xffff, KDIREG_OFF(KDIREG_DS)(%esp)
 429 
 430         pushl   %eax
 431         call    kdi_trap_pass
 432         cmpl    $1, %eax
 433         je      kdi_pass_to_kernel
 434         popl    %eax
 435 
 436         SAVE_IDTGDT
 437 
 438 #if !defined(__xpv)
 439         /* Save off %cr0, and clear write protect */
 440         movl    %cr0, %ecx
 441         movl    %ecx, KRS_CR0(%eax)
 442         andl    $_BITNOT(CR0_WP), %ecx
 443         movl    %ecx, %cr0
 444 #endif
 445         pushl   %edi
 446         movl    %eax, %edi
 447 
 448         /* Save the debug registers and disable any active watchpoints */
 449         pushl   $7
 450         call    kdi_dreg_get
 451         addl    $4, %esp
 452 
 453         movl    %eax, KRS_DRCTL(%edi)
 454         andl    $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %eax
 455 
 456         pushl   %eax
 457         pushl   $7
 458         call    kdi_dreg_set
 459         addl    $8, %esp
 460 
 461         pushl   $6
 462         call    kdi_dreg_get
 463         addl    $4, %esp
 464         movl    %eax, KRS_DRSTAT(%edi)
 465 
 466         pushl   $0
 467         call    kdi_dreg_get
 468         addl    $4, %esp
 469         movl    %eax, KRS_DROFF(0)(%edi)
 470 
 471         pushl   $1
 472         call    kdi_dreg_get
 473         addl    $4, %esp
 474         movl    %eax, KRS_DROFF(1)(%edi)
 475 
 476         pushl   $2
 477         call    kdi_dreg_get
 478         addl    $4, %esp
 479         movl    %eax, KRS_DROFF(2)(%edi)
 480 
 481         pushl   $3
 482         call    kdi_dreg_get
 483         addl    $4, %esp
 484         movl    %eax, KRS_DROFF(3)(%edi)
 485 
 486         movl    %edi, %eax
 487         popl    %edi
 488 
 489         clr     %ebp            /* stack traces should end here */
 490 
 491         pushl   %eax
 492         call    kdi_debugger_entry
 493         popl    %eax
 494 
 495         jmp     kdi_resume
 496 
 497         SET_SIZE(kdi_save_common_state)
 498 
 499 #endif  /* !__lint */
 500 
 501 /*
 502  * Resume the world.  The code that calls kdi_resume has already
 503  * decided whether or not to restore the IDT.
 504  */
 505 #if defined(__lint)
 506 void
 507 kdi_resume(void)
 508 {
 509 }
 510 #else   /* __lint */
 511 
 512         /* cpusave in %eax */
 513         ENTRY_NP(kdi_resume)
 514 
 515         /*
 516          * Send this CPU back into the world
 517          */
 518 
 519 #if !defined(__xpv)
 520         movl    KRS_CR0(%eax), %edx
 521         movl    %edx, %cr0
 522 #endif
 523 
 524         pushl   %edi
 525         movl    %eax, %edi
 526 
 527         KDI_RESTORE_DEBUGGING_STATE
 528 
 529         popl    %edi
 530 
 531 #if defined(__xpv)
 532         /*
 533          * kmdb might have set PS_T in the saved eflags, so we can't use
 534          * intr_restore, since that restores all of eflags; instead, just
 535          * pick up PS_IE from the saved eflags.
 536          */
 537         movl    REG_OFF(KDIREG_EFLAGS)(%esp), %eax
 538         testl   $PS_IE, %eax
 539         jz      2f
 540         STI
 541 2:
 542 #endif
 543 
 544         addl    $8, %esp        /* Discard savfp and savpc */
 545 
 546         popl    %ss
 547         popl    %gs
 548         popl    %fs
 549         popl    %es
 550         popl    %ds
 551         popal
 552 
 553         addl    $8, %esp        /* Discard TRAPNO and ERROR */
 554 
 555         IRET
 556 
 557         SET_SIZE(kdi_resume)
 558 #endif  /* __lint */
 559 
 560 #if !defined(__lint)
 561 
 562         ENTRY_NP(kdi_pass_to_kernel)
 563 
 564         /* pop cpusave, leaving %esp pointing to saved regs */
 565         popl    %eax
 566 
 567         movl    $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%eax)
 568 
 569         /*
 570          * Find the trap and vector off the right kernel handler.  The trap
 571          * handler will expect the stack to be in trap order, with %eip being
 572          * the last entry, so we'll need to restore all our regs.
 573          *
 574          * We're hard-coding the three cases where KMDB has installed permanent
 575          * handlers, since after we restore, we don't have registers to work
 576          * with; we can't use a global since other CPUs can easily pass through
 577          * here at the same time.
 578          *
 579          * Note that we handle T_DBGENTR since userspace might have tried it.
 580          */
 581         movl    REG_OFF(KDIREG_TRAPNO)(%esp), %eax
 582         cmpl    $T_SGLSTP, %eax
 583         je      kpass_dbgtrap
 584         cmpl    $T_BPTFLT, %eax
 585         je      kpass_brktrap
 586         cmpl    $T_DBGENTR, %eax
 587         je      kpass_invaltrap
 588         /*
 589          * Hmm, unknown handler.  Somebody forgot to update this when they
 590          * added a new trap interposition... try to drop back into kmdb.
 591          */
 592         int     $T_DBGENTR
 593         
 594 kpass_dbgtrap:
 595         KDI_RESTORE_REGS()
 596         ljmp    $KCS_SEL, $1f
 597 1:      jmp     %cs:dbgtrap
 598         /*NOTREACHED*/
 599 
 600 kpass_brktrap:
 601         KDI_RESTORE_REGS()
 602         ljmp    $KCS_SEL, $2f
 603 2:      jmp     %cs:brktrap
 604         /*NOTREACHED*/
 605 
 606 kpass_invaltrap:
 607         KDI_RESTORE_REGS()
 608         ljmp    $KCS_SEL, $3f
 609 3:      jmp     %cs:invaltrap
 610         /*NOTREACHED*/
 611 
 612         SET_SIZE(kdi_pass_to_kernel)
 613 
 614         /*
 615          * A minimal version of mdboot(), to be used by the master CPU only.
 616          */
 617         ENTRY_NP(kdi_reboot)
 618 
 619         pushl   $AD_BOOT
 620         pushl   $A_SHUTDOWN
 621         call    *psm_shutdownf
 622         addl    $8, %esp
 623 
 624 #if defined(__xpv)
 625         pushl   $SHUTDOWN_reboot
 626         call    HYPERVISOR_shutdown
 627 #else
 628         call    reset
 629 #endif
 630         /*NOTREACHED*/
 631 
 632         SET_SIZE(kdi_reboot)
 633 
 634 #endif  /* !__lint */
 635 
 636 #if defined(__lint)
 637 /*ARGSUSED*/
 638 void
 639 kdi_cpu_debug_init(kdi_cpusave_t *save)
 640 {
 641 }
 642 #else   /* __lint */
 643 
 644         ENTRY_NP(kdi_cpu_debug_init)
 645         pushl   %ebp
 646         movl    %esp, %ebp
 647 
 648         pushl   %edi
 649         pushl   %ebx
 650 
 651         movl    8(%ebp), %edi
 652 
 653         KDI_RESTORE_DEBUGGING_STATE
 654 
 655         popl    %ebx
 656         popl    %edi
 657         leave
 658         ret
 659 
 660         SET_SIZE(kdi_cpu_debug_init)
 661 #endif  /* !__lint */
 662