435 /*
436 * The state of the world:
437 *
438 * The stack has a complete set of saved registers and segment
439 * selectors, arranged in the kdi_regs.h order. It also has a pointer
440 * to our cpusave area.
441 *
442 * We need to save, into the cpusave area, a pointer to these saved
443 * registers. First we check whether we should jump straight back to
444 * the kernel. If not, we save a few more registers, ready the
445 * machine for debugger entry, and enter the debugger.
446 */
447
448 ENTRY_NP(kdi_save_common_state)
449
450 popq %rdi /* the cpusave area */
451 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
452
453 pushq %rdi
454 call kdi_trap_pass
455 cmpq $1, %rax
456 je kdi_pass_to_kernel
457 popq %rax /* cpusave in %rax */
458
459 SAVE_IDTGDT
460
461 #if !defined(__xpv)
462 /* Save off %cr0, and clear write protect */
463 movq %cr0, %rcx
464 movq %rcx, KRS_CR0(%rax)
465 andq $_BITNOT(CR0_WP), %rcx
466 movq %rcx, %cr0
467 #endif
468
469 /* Save the debug registers and disable any active watchpoints */
470
471 movq %rax, %r15 /* save cpusave area ptr */
472 movl $7, %edi
473 call kdi_dreg_get
474 movq %rax, KRS_DRCTL(%r15)
475
476 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
552 movq %rdx, KPTI_TR_CR3(%r13)
553
554 /* The trampoline will undo this later. */
555 movq %r13, REG_OFF(KDIREG_R13)(%rsp)
556 #endif
557
558 KDI_RESTORE_REGS(%rsp)
559 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
560 /*
561 * The common trampoline code will restore %cr3 to the right value
562 * for either kernel or userland.
563 */
564 #if !defined(__xpv)
565 jmp tr_iret_kdi
566 #else
567 IRET
568 #endif
569 /*NOTREACHED*/
570 SET_SIZE(kdi_resume)
571
572 ENTRY_NP(kdi_pass_to_kernel)
573
574 popq %rdi /* cpusave */
575
576 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
577
578 /*
579 * Find the trap and vector off the right kernel handler. The trap
580 * handler will expect the stack to be in trap order, with %rip being
581 * the last entry, so we'll need to restore all our regs. On i86xpv
582 * we'll need to compensate for XPV_TRAP_POP.
583 *
584 * We're hard-coding the three cases where KMDB has installed permanent
585 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
586 * to work with; we can't use a global since other CPUs can easily pass
587 * through here at the same time.
588 *
589 * Note that we handle T_DBGENTR since userspace might have tried it.
590 */
591 movq KRS_GREGS(%rdi), %rsp
592 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
593 cmpq $T_SGLSTP, %rdi
594 je 1f
595 cmpq $T_BPTFLT, %rdi
596 je 2f
597 cmpq $T_DBGENTR, %rdi
598 je 3f
599 /*
600 * Hmm, unknown handler. Somebody forgot to update this when they
601 * added a new trap interposition... try to drop back into kmdb.
602 */
603 int $T_DBGENTR
604
605 #define CALL_TRAP_HANDLER(name) \
606 KDI_RESTORE_REGS(%rsp); \
607 /* Discard state, trapno, err */ \
608 addq $REG_OFF(KDIREG_RIP), %rsp; \
609 XPV_TRAP_PUSH; \
610 jmp %cs:name
611
612 1:
613 CALL_TRAP_HANDLER(dbgtrap)
614 /*NOTREACHED*/
615 2:
616 CALL_TRAP_HANDLER(brktrap)
617 /*NOTREACHED*/
618 3:
619 CALL_TRAP_HANDLER(invaltrap)
620 /*NOTREACHED*/
621
622 SET_SIZE(kdi_pass_to_kernel)
623
624 /*
625 * A minimal version of mdboot(), to be used by the master CPU only.
626 */
627 ENTRY_NP(kdi_reboot)
628
629 movl $AD_BOOT, %edi
630 movl $A_SHUTDOWN, %esi
631 call *psm_shutdownf
632 #if defined(__xpv)
633 movl $SHUTDOWN_reboot, %edi
634 call HYPERVISOR_shutdown
635 #else
636 call reset
637 #endif
638 /*NOTREACHED*/
|
435 /*
436 * The state of the world:
437 *
438 * The stack has a complete set of saved registers and segment
439 * selectors, arranged in the kdi_regs.h order. It also has a pointer
440 * to our cpusave area.
441 *
442 * We need to save, into the cpusave area, a pointer to these saved
443 * registers. First we check whether we should jump straight back to
444 * the kernel. If not, we save a few more registers, ready the
445 * machine for debugger entry, and enter the debugger.
446 */
447
448 ENTRY_NP(kdi_save_common_state)
449
450 popq %rdi /* the cpusave area */
451 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
452
453 pushq %rdi
454 call kdi_trap_pass
455 testq %rax, %rax
456 jnz kdi_pass_to_kernel
457 popq %rax /* cpusave in %rax */
458
459 SAVE_IDTGDT
460
461 #if !defined(__xpv)
462 /* Save off %cr0, and clear write protect */
463 movq %cr0, %rcx
464 movq %rcx, KRS_CR0(%rax)
465 andq $_BITNOT(CR0_WP), %rcx
466 movq %rcx, %cr0
467 #endif
468
469 /* Save the debug registers and disable any active watchpoints */
470
471 movq %rax, %r15 /* save cpusave area ptr */
472 movl $7, %edi
473 call kdi_dreg_get
474 movq %rax, KRS_DRCTL(%r15)
475
476 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
552 movq %rdx, KPTI_TR_CR3(%r13)
553
554 /* The trampoline will undo this later. */
555 movq %r13, REG_OFF(KDIREG_R13)(%rsp)
556 #endif
557
558 KDI_RESTORE_REGS(%rsp)
559 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
560 /*
561 * The common trampoline code will restore %cr3 to the right value
562 * for either kernel or userland.
563 */
564 #if !defined(__xpv)
565 jmp tr_iret_kdi
566 #else
567 IRET
568 #endif
569 /*NOTREACHED*/
570 SET_SIZE(kdi_resume)
571
572
573 /*
574 * We took a trap that should be handled by the kernel, not KMDB.
575 *
576 * We're hard-coding the three cases where KMDB has installed permanent
577 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
578 * to work with; we can't use a global since other CPUs can easily pass
579 * through here at the same time.
580 *
581 * Note that we handle T_DBGENTR since userspace might have tried it.
582 *
583 * The trap handler will expect the stack to be in trap order, with %rip
584 * being the last entry, so we'll need to restore all our regs. On
585 * i86xpv we'll need to compensate for XPV_TRAP_POP.
586 *
587 * %rax on entry is either 1 or 2, which is from kdi_trap_pass().
588 * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably)
589 * switched us to the CPU's kf_kernel_cr3. But we're about to call, for
590 * example:
591 *
592 * dbgtrap->trap()->tr_iret_kernel
593 *
594 * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so
595 * we'll do so here if needed.
596 *
597 * This isn't just a matter of tidiness: for example, consider:
598 *
599 * hat_switch(oldhat=kas.a_hat, newhat=prochat)
600 * setcr3()
601 * reset_kpti()
602 * *brktrap* due to fbt on reset_kpti:entry
603 *
604 * Here, we have the new hat's %cr3, but we haven't yet updated
605 * kf_kernel_cr3 (so its currently kas's). So if we don't restore here,
606 * we'll stay on kas's cr3 value on returning from the trap: not good if
607 * we fault on a userspace address.
608 */
609 ENTRY_NP(kdi_pass_to_kernel)
610
611 popq %rdi /* cpusave */
612 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
613 movq KRS_GREGS(%rdi), %rsp
614
615 cmpq $2, %rax
616 jne no_restore_cr3
617 movq REG_OFF(KDIREG_CR3)(%rsp), %r11
618 movq %r11, %cr3
619
620 no_restore_cr3:
621 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
622
623 cmpq $T_SGLSTP, %rdi
624 je kdi_pass_dbgtrap
625 cmpq $T_BPTFLT, %rdi
626 je kdi_pass_brktrap
627 cmpq $T_DBGENTR, %rdi
628 je kdi_pass_invaltrap
629 /*
630 * Hmm, unknown handler. Somebody forgot to update this when they
631 * added a new trap interposition... try to drop back into kmdb.
632 */
633 int $T_DBGENTR
634
635 #define CALL_TRAP_HANDLER(name) \
636 KDI_RESTORE_REGS(%rsp); \
637 /* Discard state, trapno, err */ \
638 addq $REG_OFF(KDIREG_RIP), %rsp; \
639 XPV_TRAP_PUSH; \
640 jmp %cs:name
641
642 kdi_pass_dbgtrap:
643 CALL_TRAP_HANDLER(dbgtrap)
644 /*NOTREACHED*/
645 kdi_pass_brktrap:
646 CALL_TRAP_HANDLER(brktrap)
647 /*NOTREACHED*/
648 kdi_pass_invaltrap:
649 CALL_TRAP_HANDLER(invaltrap)
650 /*NOTREACHED*/
651
652 SET_SIZE(kdi_pass_to_kernel)
653
654 /*
655 * A minimal version of mdboot(), to be used by the master CPU only.
656 */
657 ENTRY_NP(kdi_reboot)
658
659 movl $AD_BOOT, %edi
660 movl $A_SHUTDOWN, %esi
661 call *psm_shutdownf
662 #if defined(__xpv)
663 movl $SHUTDOWN_reboot, %edi
664 call HYPERVISOR_shutdown
665 #else
666 call reset
667 #endif
668 /*NOTREACHED*/
|