1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #pragma ident   "%Z%%M% %I%     %E% SMI"
  27 
  28 /*
  29  * The debugger/"PROM" interface layer
  30  *
  31  * It makes more sense on SPARC. In reality, these interfaces deal with three
  32  * things: setting break/watchpoints, stepping, and interfacing with the KDI to
  33  * set up kmdb's IDT handlers.
  34  */
  35 
  36 #include <kmdb/kmdb_dpi_impl.h>
  37 #include <kmdb/kmdb_kdi.h>
  38 #include <kmdb/kmdb_umemglue.h>
  39 #include <kmdb/kaif.h>
  40 #include <kmdb/kmdb_io.h>
  41 #include <kmdb/kaif_start.h>
  42 #include <mdb/mdb_err.h>
  43 #include <mdb/mdb_debug.h>
  44 #include <mdb/mdb_isautil.h>
  45 #include <mdb/mdb_io_impl.h>
  46 #include <mdb/mdb_kreg_impl.h>
  47 #include <mdb/mdb.h>
  48 
  49 #include <sys/types.h>
  50 #include <sys/bitmap.h>
  51 #include <sys/termios.h>
  52 #include <sys/kdi_impl.h>
  53 
  54 /*
  55  * This is the area containing the saved state when we enter
  56  * via kmdb's IDT entries.
  57  */
  58 kdi_cpusave_t   *kaif_cpusave;
  59 int             kaif_ncpusave;
  60 kdi_drreg_t     kaif_drreg;
  61 
  62 uint32_t        kaif_waptmap;
  63 
  64 int             kaif_trap_switch;
  65 
  66 void (*kaif_modchg_cb)(struct modctl *, int);
  67 
  68 enum {
  69         M_SYSRET        = 0x07, /* after M_ESC */
  70         M_ESC           = 0x0f,
  71         M_SYSEXIT       = 0x35, /* after M_ESC */
  72         M_REX_LO        = 0x40, /* first REX prefix */
  73         M_REX_HI        = 0x4f, /* last REX prefix */
  74         M_PUSHF         = 0x9c, /* pushfl and pushfq */
  75         M_POPF          = 0x9d, /* popfl and popfq */
  76         M_INT3          = 0xcc,
  77         M_INTX          = 0xcd,
  78         M_INTO          = 0xce,
  79         M_IRET          = 0xcf,
  80         M_CLI           = 0xfa,
  81         M_STI           = 0xfb
  82 };
  83 
  84 #define KAIF_BREAKPOINT_INSTR   M_INT3
  85 
  86 #define KAIF_WPPRIV2ID(wp)      (int)(uintptr_t)((wp)->wp_priv)
  87 
  88 #ifdef __amd64
  89 #define FLAGS_REG_NAME          "rflags"
  90 #else
  91 #define FLAGS_REG_NAME          "eflags"
  92 #endif
  93 
  94 /*
  95  * Called during normal debugger operation and during debugger faults.
  96  */
  97 static void
  98 kaif_enter_mon(void)
  99 {
 100         char c;
 101 
 102         for (;;) {
 103                 mdb_iob_printf(mdb.m_out,
 104                     "%s: Do you really want to reboot? (y/n) ",
 105                     mdb.m_pname);
 106                 mdb_iob_flush(mdb.m_out);
 107                 mdb_iob_clearlines(mdb.m_out);
 108 
 109                 c = kmdb_getchar();
 110 
 111                 if (c == 'n' || c == 'N' || c == CTRL('c'))
 112                         return;
 113                 else if (c == 'y' || c == 'Y') {
 114                         mdb_iob_printf(mdb.m_out, "Rebooting...\n");
 115 
 116                         kmdb_dpi_reboot();
 117                 }
 118         }
 119 }
 120 
 121 static kaif_cpusave_t *
 122 kaif_cpuid2save(int cpuid)
 123 {
 124         kaif_cpusave_t *save;
 125 
 126         if (cpuid == DPI_MASTER_CPUID)
 127                 return (&kaif_cpusave[kaif_master_cpuid]);
 128 
 129         if (cpuid < 0 || cpuid >= kaif_ncpusave) {
 130                 (void) set_errno(EINVAL);
 131                 return (NULL);
 132         }
 133 
 134         save = &kaif_cpusave[cpuid];
 135 
 136         if (save->krs_cpu_state != KAIF_CPU_STATE_MASTER &&
 137             save->krs_cpu_state != KAIF_CPU_STATE_SLAVE) {
 138                 (void) set_errno(EINVAL);
 139                 return (NULL);
 140         }
 141 
 142         return (save);
 143 }
 144 
 145 static int
 146 kaif_get_cpu_state(int cpuid)
 147 {
 148         kaif_cpusave_t *save;
 149 
 150         if ((save = kaif_cpuid2save(cpuid)) == NULL)
 151                 return (-1); /* errno is set for us */
 152 
 153         switch (save->krs_cpu_state) {
 154         case KAIF_CPU_STATE_MASTER:
 155                 return (DPI_CPU_STATE_MASTER);
 156         case KAIF_CPU_STATE_SLAVE:
 157                 return (DPI_CPU_STATE_SLAVE);
 158         default:
 159                 return (set_errno(EINVAL));
 160         }
 161 }
 162 
 163 static int
 164 kaif_get_master_cpuid(void)
 165 {
 166         return (kaif_master_cpuid);
 167 }
 168 
 169 static mdb_tgt_gregset_t *
 170 kaif_kdi_to_gregs(int cpuid)
 171 {
 172         kaif_cpusave_t *save;
 173 
 174         if ((save = kaif_cpuid2save(cpuid)) == NULL)
 175                 return (NULL); /* errno is set for us */
 176 
 177         /*
 178          * The saved registers are actually identical to an mdb_tgt_gregset,
 179          * so we can directly cast here.
 180          */
 181         return ((mdb_tgt_gregset_t *)save->krs_gregs);
 182 }
 183 
 184 static const mdb_tgt_gregset_t *
 185 kaif_get_gregs(int cpuid)
 186 {
 187         return (kaif_kdi_to_gregs(cpuid));
 188 }
 189 
 190 typedef struct kaif_reg_synonyms {
 191         const char *rs_syn;
 192         const char *rs_name;
 193 } kaif_reg_synonyms_t;
 194 
 195 static kreg_t *
 196 kaif_find_regp(const char *regname)
 197 {
 198         static const kaif_reg_synonyms_t synonyms[] = {
 199 #ifdef __amd64
 200             { "pc", "rip" },
 201             { "sp", "rsp" },
 202             { "fp", "rbp" },
 203 #else
 204             { "pc", "eip" },
 205             { "sp", "esp" },
 206             { "fp", "ebp" },
 207 #endif
 208             { "tt", "trapno" }
 209         };
 210         mdb_tgt_gregset_t *regs;
 211         int i;
 212 
 213         if ((regs = kaif_kdi_to_gregs(DPI_MASTER_CPUID)) == NULL)
 214                 return (NULL);
 215 
 216         for (i = 0; i < sizeof (synonyms) / sizeof (synonyms[0]); i++) {
 217                 if (strcmp(synonyms[i].rs_syn, regname) == 0)
 218                         regname = synonyms[i].rs_name;
 219         }
 220 
 221         for (i = 0; mdb_isa_kregs[i].rd_name != NULL; i++) {
 222                 const mdb_tgt_regdesc_t *rd = &mdb_isa_kregs[i];
 223 
 224                 if (strcmp(rd->rd_name, regname) == 0)
 225                         return (&regs->kregs[rd->rd_num]);
 226         }
 227 
 228         (void) set_errno(ENOENT);
 229         return (NULL);
 230 }
 231 
 232 /*ARGSUSED*/
 233 static int
 234 kaif_get_register(const char *regname, kreg_t *valp)
 235 {
 236         kreg_t *regp;
 237 
 238         if ((regp = kaif_find_regp(regname)) == NULL)
 239                 return (-1);
 240 
 241         *valp = *regp;
 242 
 243         return (0);
 244 }
 245 
 246 static int
 247 kaif_set_register(const char *regname, kreg_t val)
 248 {
 249         kreg_t *regp;
 250 
 251         if ((regp = kaif_find_regp(regname)) == NULL)
 252                 return (-1);
 253 
 254         *regp = val;
 255 
 256         return (0);
 257 }
 258 
 259 static int
 260 kaif_brkpt_arm(uintptr_t addr, mdb_instr_t *instrp)
 261 {
 262         mdb_instr_t bkpt = KAIF_BREAKPOINT_INSTR;
 263 
 264         if (mdb_tgt_vread(mdb.m_target, instrp, sizeof (mdb_instr_t), addr) !=
 265             sizeof (mdb_instr_t))
 266                 return (-1); /* errno is set for us */
 267 
 268         if (mdb_tgt_vwrite(mdb.m_target, &bkpt, sizeof (mdb_instr_t), addr) !=
 269             sizeof (mdb_instr_t))
 270                 return (-1); /* errno is set for us */
 271 
 272         return (0);
 273 }
 274 
 275 static int
 276 kaif_brkpt_disarm(uintptr_t addr, mdb_instr_t instrp)
 277 {
 278         if (mdb_tgt_vwrite(mdb.m_target, &instrp, sizeof (mdb_instr_t), addr) !=
 279             sizeof (mdb_instr_t))
 280                 return (-1); /* errno is set for us */
 281 
 282         return (0);
 283 }
 284 
 285 /*
 286  * Intel watchpoints are even more fun than SPARC ones.  The Intel architecture
 287  * manuals refer to watchpoints as breakpoints.  For consistency  with the
 288  * terminology used in other portions of kmdb, we will, however, refer to them
 289  * as watchpoints.
 290  *
 291  * Execute, data write, I/O read/write, and data read/write watchpoints are
 292  * supported by the hardware.  Execute watchpoints must be one byte in length,
 293  * and must be placed on the first byte of the instruction to be watched.
 294  * Lengths of other watchpoints are more varied.
 295  *
 296  * Given that we already have a breakpoint facility, and given the restrictions
 297  * placed on execute watchpoints, we're going to disallow the creation of
 298  * execute watchpoints.  The others will be fully supported.  See the Debugging
 299  * chapter in both the IA32 and AMD64 System Programming books for more details.
 300  */
 301 
 302 #ifdef __amd64
 303 #define WAPT_DATA_MAX_SIZE      8
 304 #define WAPT_DATA_SIZES_MSG     "1, 2, 4, or 8"
 305 #else
 306 #define WAPT_DATA_MAX_SIZE      4
 307 #define WAPT_DATA_SIZES_MSG     "1, 2, or 4"
 308 #endif
 309 
 310 static int
 311 kaif_wapt_validate(kmdb_wapt_t *wp)
 312 {
 313         if (wp->wp_type == DPI_WAPT_TYPE_IO) {
 314                 if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W)) {
 315                         warn("I/O port watchpoints must be read/write\n");
 316                         return (set_errno(EINVAL));
 317                 }
 318 
 319                 if (wp->wp_size != 1 && wp->wp_size != 2 && wp->wp_size != 4) {
 320                         warn("I/O watchpoint size must be 1, 2, or 4 bytes\n");
 321                         return (set_errno(EINVAL));
 322                 }
 323 
 324         } else if (wp->wp_type == DPI_WAPT_TYPE_PHYS) {
 325                 warn("physical address watchpoints are not supported on this "
 326                     "platform\n");
 327                 return (set_errno(EMDB_TGTHWNOTSUP));
 328 
 329         } else {
 330                 if (wp->wp_wflags != (MDB_TGT_WA_R | MDB_TGT_WA_W) &&
 331                     wp->wp_wflags != MDB_TGT_WA_W) {
 332                         warn("watchpoints must be read/write or write-only\n");
 333                         return (set_errno(EINVAL));
 334                 }
 335 
 336                 if ((wp->wp_size & -(wp->wp_size)) != wp->wp_size ||
 337                     wp->wp_size > WAPT_DATA_MAX_SIZE) {
 338                         warn("data watchpoint size must be " WAPT_DATA_SIZES_MSG
 339                             " bytes\n");
 340                         return (set_errno(EINVAL));
 341                 }
 342 
 343         }
 344 
 345         if (wp->wp_addr & (wp->wp_size - 1)) {
 346                 warn("%lu-byte watchpoints must be %lu-byte aligned\n",
 347                     (ulong_t)wp->wp_size, (ulong_t)wp->wp_size);
 348                 return (set_errno(EINVAL));
 349         }
 350 
 351         return (0);
 352 }
 353 
 354 static int
 355 kaif_wapt_reserve(kmdb_wapt_t *wp)
 356 {
 357         int id;
 358 
 359         for (id = 0; id <= KDI_MAXWPIDX; id++) {
 360                 if (!BT_TEST(&kaif_waptmap, id)) {
 361                         /* found one */
 362                         BT_SET(&kaif_waptmap, id);
 363                         wp->wp_priv = (void *)(uintptr_t)id;
 364                         return (0);
 365                 }
 366         }
 367 
 368         return (set_errno(EMDB_WPTOOMANY));
 369 }
 370 
 371 static void
 372 kaif_wapt_release(kmdb_wapt_t *wp)
 373 {
 374         int id = KAIF_WPPRIV2ID(wp);
 375 
 376         ASSERT(BT_TEST(&kaif_waptmap, id));
 377         BT_CLEAR(&kaif_waptmap, id);
 378 }
 379 
 380 /*ARGSUSED*/
 381 static void
 382 kaif_wapt_arm(kmdb_wapt_t *wp)
 383 {
 384         uint_t rw;
 385         int hwid = KAIF_WPPRIV2ID(wp);
 386 
 387         ASSERT(BT_TEST(&kaif_waptmap, hwid));
 388 
 389         if (wp->wp_type == DPI_WAPT_TYPE_IO)
 390                 rw = KREG_DRCTL_WP_IORW;
 391         else if (wp->wp_wflags & MDB_TGT_WA_R)
 392                 rw = KREG_DRCTL_WP_RW;
 393         else if (wp->wp_wflags & MDB_TGT_WA_X)
 394                 rw = KREG_DRCTL_WP_EXEC;
 395         else
 396                 rw = KREG_DRCTL_WP_WONLY;
 397 
 398         kaif_drreg.dr_addr[hwid] = wp->wp_addr;
 399 
 400         kaif_drreg.dr_ctl &= ~KREG_DRCTL_WP_MASK(hwid);
 401         kaif_drreg.dr_ctl |= KREG_DRCTL_WP_LENRW(hwid, wp->wp_size - 1, rw);
 402         kaif_drreg.dr_ctl |= KREG_DRCTL_WPEN(hwid);
 403         kmdb_kdi_update_drreg(&kaif_drreg);
 404 }
 405 
 406 /*ARGSUSED*/
 407 static void
 408 kaif_wapt_disarm(kmdb_wapt_t *wp)
 409 {
 410         int hwid = KAIF_WPPRIV2ID(wp);
 411 
 412         ASSERT(BT_TEST(&kaif_waptmap, hwid));
 413 
 414         kaif_drreg.dr_addr[hwid] = 0;
 415         kaif_drreg.dr_ctl &= ~(KREG_DRCTL_WP_MASK(hwid) |
 416             KREG_DRCTL_WPEN_MASK(hwid));
 417         kmdb_kdi_update_drreg(&kaif_drreg);
 418 }
 419 
 420 /*ARGSUSED*/
 421 static int
 422 kaif_wapt_match(kmdb_wapt_t *wp)
 423 {
 424         int hwid = KAIF_WPPRIV2ID(wp);
 425         uint32_t mask = KREG_DRSTAT_WP_MASK(hwid);
 426         int n = 0;
 427         int i;
 428 
 429         ASSERT(BT_TEST(&kaif_waptmap, hwid));
 430 
 431         for (i = 0; i < kaif_ncpusave; i++)
 432                 n += (kaif_cpusave[i].krs_dr.dr_stat & mask) != 0;
 433 
 434         return (n);
 435 }
 436 
 437 static int
 438 kaif_step(void)
 439 {
 440         kreg_t pc, fl, oldfl, newfl, sp;
 441         mdb_tgt_addr_t npc;
 442         mdb_instr_t instr;
 443         int emulated = 0, rchk = 0;
 444         size_t pcoff = 0;
 445 
 446         (void) kmdb_dpi_get_register("pc", &pc);
 447 
 448         if ((npc = mdb_dis_nextins(mdb.m_disasm, mdb.m_target,
 449             MDB_TGT_AS_VIRT, pc)) == pc) {
 450                 warn("failed to decode instruction at %a for step\n", pc);
 451                 return (set_errno(EINVAL));
 452         }
 453 
 454         /*
 455          * Stepping behavior depends on the type of instruction.  It does not
 456          * depend on the presence of a REX prefix, as the action we take for a
 457          * given instruction doesn't currently vary for 32-bit instructions
 458          * versus their 64-bit counterparts.
 459          */
 460         do {
 461                 if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
 462                     pc + pcoff) != sizeof (mdb_instr_t)) {
 463                         warn("failed to read at %p for step",
 464                             (void *)(pc + pcoff));
 465                         return (-1);
 466                 }
 467         } while (pcoff++, (instr >= M_REX_LO && instr <= M_REX_HI && !rchk++));
 468 
 469         switch (instr) {
 470         case M_IRET:
 471                 warn("iret cannot be stepped\n");
 472                 return (set_errno(EMDB_TGTNOTSUP));
 473 
 474         case M_INT3:
 475         case M_INTX:
 476         case M_INTO:
 477                 warn("int cannot be stepped\n");
 478                 return (set_errno(EMDB_TGTNOTSUP));
 479 
 480         case M_ESC:
 481                 if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
 482                     pc + pcoff) != sizeof (mdb_instr_t)) {
 483                         warn("failed to read at %p for step",
 484                             (void *)(pc + pcoff));
 485                         return (-1);
 486                 }
 487 
 488                 switch (instr) {
 489                 case M_SYSRET:
 490                         warn("sysret cannot be stepped\n");
 491                         return (set_errno(EMDB_TGTNOTSUP));
 492                 case M_SYSEXIT:
 493                         warn("sysexit cannot be stepped\n");
 494                         return (set_errno(EMDB_TGTNOTSUP));
 495                 }
 496                 break;
 497 
 498         /*
 499          * Some instructions need to be emulated.  We need to prevent direct
 500          * manipulations of EFLAGS, so we'll emulate cli, sti.  pushfl and
 501          * popfl also receive special handling, as they manipulate both EFLAGS
 502          * and %esp.
 503          */
 504         case M_CLI:
 505                 (void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
 506                 fl &= ~KREG_EFLAGS_IF_MASK;
 507                 (void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
 508 
 509                 emulated = 1;
 510                 break;
 511 
 512         case M_STI:
 513                 (void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
 514                 fl |= (1 << KREG_EFLAGS_IF_SHIFT);
 515                 (void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
 516 
 517                 emulated = 1;
 518                 break;
 519 
 520         case M_POPF:
 521                 /*
 522                  * popfl will restore a pushed EFLAGS from the stack, and could
 523                  * in so doing cause IF to be turned on, if only for a brief
 524                  * period.  To avoid this, we'll secretly replace the stack's
 525                  * EFLAGS with our decaffeinated brand.  We'll then manually
 526                  * load our EFLAGS copy with the real verion after the step.
 527                  */
 528                 (void) kmdb_dpi_get_register("sp", &sp);
 529                 (void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
 530 
 531                 if (mdb_tgt_vread(mdb.m_target, &newfl, sizeof (kreg_t),
 532                     sp) != sizeof (kreg_t)) {
 533                         warn("failed to read " FLAGS_REG_NAME
 534                             " at %p for popfl step\n", (void *)sp);
 535                         return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
 536                 }
 537 
 538                 fl = (fl & ~KREG_EFLAGS_IF_MASK) | KREG_EFLAGS_TF_MASK;
 539 
 540                 if (mdb_tgt_vwrite(mdb.m_target, &fl, sizeof (kreg_t),
 541                     sp) != sizeof (kreg_t)) {
 542                         warn("failed to update " FLAGS_REG_NAME
 543                             " at %p for popfl step\n", (void *)sp);
 544                         return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
 545                 }
 546                 break;
 547         }
 548 
 549         if (emulated) {
 550                 (void) kmdb_dpi_set_register("pc", npc);
 551                 return (0);
 552         }
 553 
 554         /* Do the step with IF off, and TF (step) on */
 555         (void) kmdb_dpi_get_register(FLAGS_REG_NAME, &oldfl);
 556         (void) kmdb_dpi_set_register(FLAGS_REG_NAME,
 557             ((oldfl | (1 << KREG_EFLAGS_TF_SHIFT)) & ~KREG_EFLAGS_IF_MASK));
 558 
 559         kmdb_dpi_resume_master(); /* ... there and back again ... */
 560 
 561         /* EFLAGS has now changed, and may require tuning */
 562 
 563         switch (instr) {
 564         case M_POPF:
 565                 /*
 566                  * Use the EFLAGS we grabbed before the pop - see the pre-step
 567                  * M_POPFL comment.
 568                  */
 569                 (void) kmdb_dpi_set_register(FLAGS_REG_NAME, newfl);
 570                 return (0);
 571 
 572         case M_PUSHF:
 573                 /*
 574                  * We pushed our modified EFLAGS (with IF and TF turned off)
 575                  * onto the stack.  Replace the pushed version with our
 576                  * unmodified one.
 577                  */
 578                 (void) kmdb_dpi_get_register("sp", &sp);
 579 
 580                 if (mdb_tgt_vwrite(mdb.m_target, &oldfl, sizeof (kreg_t),
 581                     sp) != sizeof (kreg_t)) {
 582                         warn("failed to update pushed " FLAGS_REG_NAME
 583                             " at %p after pushfl step\n", (void *)sp);
 584                         return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
 585                 }
 586 
 587                 /* Go back to using the EFLAGS we were using before the step */
 588                 (void) kmdb_dpi_set_register(FLAGS_REG_NAME, oldfl);
 589                 return (0);
 590 
 591         default:
 592                 /*
 593                  * The stepped instruction may have altered EFLAGS.  We only
 594                  * really care about the value of IF, and we know the stepped
 595                  * instruction didn't alter it, so we can simply copy the
 596                  * pre-step value.  We'll also need to turn TF back off.
 597                  */
 598                 (void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
 599                 (void) kmdb_dpi_set_register(FLAGS_REG_NAME,
 600                     ((fl & ~(KREG_EFLAGS_TF_MASK|KREG_EFLAGS_IF_MASK)) |
 601                     (oldfl & KREG_EFLAGS_IF_MASK)));
 602                 return (0);
 603         }
 604 }
 605 
 606 /*
 607  * The target has already configured the chip for branch step, leaving us to
 608  * actually make the machine go.  Due to a number of issues involving
 609  * the potential alteration of system state via instructions like sti, cli,
 610  * pushfl, and popfl, we're going to treat this like a normal system resume.
 611  * All CPUs will be released, on the kernel's IDT.  Our primary concern is
 612  * the alteration/storage of our TF'd EFLAGS via pushfl and popfl.  There's no
 613  * real workaround - we don't have opcode breakpoints - so the best we can do is
 614  * to ensure that the world won't end if someone does bad things to EFLAGS.
 615  *
 616  * Two things can happen:
 617  *  1. EFLAGS.TF may be cleared, either maliciously or via a popfl from saved
 618  *     state.  The CPU will continue execution beyond the branch, and will not
 619  *     reenter the debugger unless brought/sent in by other means.
 620  *  2. Someone may pushlf the TF'd EFLAGS, and may stash a copy of it somewhere.
 621  *     When the saved version is popfl'd back into place, the debugger will be
 622  *     re-entered on a single-step trap.
 623  */
 624 static void
 625 kaif_step_branch(void)
 626 {
 627         kreg_t fl;
 628 
 629         (void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
 630         (void) kmdb_dpi_set_register(FLAGS_REG_NAME,
 631             (fl | (1 << KREG_EFLAGS_TF_SHIFT)));
 632 
 633         kmdb_dpi_resume_master();
 634 
 635         (void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);
 636 }
 637 
 638 /*ARGSUSED*/
 639 static uintptr_t
 640 kaif_call(uintptr_t funcva, uint_t argc, const uintptr_t argv[])
 641 {
 642         return (kaif_invoke(funcva, argc, argv));
 643 }
 644 
 645 static void
 646 dump_crumb(kdi_crumb_t *krmp)
 647 {
 648         kdi_crumb_t krm;
 649 
 650         if (mdb_vread(&krm, sizeof (kdi_crumb_t), (uintptr_t)krmp) !=
 651             sizeof (kdi_crumb_t)) {
 652                 warn("failed to read crumb at %p", krmp);
 653                 return;
 654         }
 655 
 656         mdb_printf("state: ");
 657         switch (krm.krm_cpu_state) {
 658         case KAIF_CPU_STATE_MASTER:
 659                 mdb_printf("M");
 660                 break;
 661         case KAIF_CPU_STATE_SLAVE:
 662                 mdb_printf("S");
 663                 break;
 664         default:
 665                 mdb_printf("%d", krm.krm_cpu_state);
 666         }
 667 
 668         mdb_printf(" trapno %3d sp %08x flag %d pc %p %A\n",
 669             krm.krm_trapno, krm.krm_sp, krm.krm_flag, krm.krm_pc, krm.krm_pc);
 670 }
 671 
 672 static void
 673 dump_crumbs(kaif_cpusave_t *save)
 674 {
 675         int i;
 676 
 677         for (i = KDI_NCRUMBS; i > 0; i--) {
 678                 uint_t idx = (save->krs_curcrumbidx + i) % KDI_NCRUMBS;
 679                 dump_crumb(&save->krs_crumbs[idx]);
 680         }
 681 }
 682 
 683 static void
 684 kaif_dump_crumbs(uintptr_t addr, int cpuid)
 685 {
 686         int i;
 687 
 688         if (addr != NULL) {
 689                 /* dump_crumb will protect us against bogus addresses */
 690                 dump_crumb((kdi_crumb_t *)addr);
 691 
 692         } else if (cpuid != -1) {
 693                 if (cpuid < 0 || cpuid >= kaif_ncpusave)
 694                         return;
 695 
 696                 dump_crumbs(&kaif_cpusave[cpuid]);
 697 
 698         } else {
 699                 for (i = 0; i < kaif_ncpusave; i++) {
 700                         kaif_cpusave_t *save = &kaif_cpusave[i];
 701 
 702                         if (save->krs_cpu_state == KAIF_CPU_STATE_NONE)
 703                                 continue;
 704 
 705                         mdb_printf("%sCPU %d crumbs: (curidx %d)\n",
 706                             (i == 0 ? "" : "\n"), i, save->krs_curcrumbidx);
 707 
 708                         dump_crumbs(save);
 709                 }
 710         }
 711 }
 712 
 713 static void
 714 kaif_modchg_register(void (*func)(struct modctl *, int))
 715 {
 716         kaif_modchg_cb = func;
 717 }
 718 
 719 static void
 720 kaif_modchg_cancel(void)
 721 {
 722         ASSERT(kaif_modchg_cb != NULL);
 723 
 724         kaif_modchg_cb = NULL;
 725 }
 726 
 727 static void
 728 kaif_msr_add(const kdi_msr_t *msrs)
 729 {
 730         kdi_msr_t *save;
 731         size_t nr_msrs = 0;
 732         size_t i;
 733 
 734         while (msrs[nr_msrs].msr_num != 0)
 735                 nr_msrs++;
 736         /* we want to copy the terminating kdi_msr_t too */
 737         nr_msrs++;
 738 
 739         save = mdb_zalloc(sizeof (kdi_msr_t) * nr_msrs * kaif_ncpusave,
 740             UM_SLEEP);
 741 
 742         for (i = 0; i < kaif_ncpusave; i++)
 743                 bcopy(msrs, &save[nr_msrs * i], sizeof (kdi_msr_t) * nr_msrs);
 744 
 745         kmdb_kdi_set_debug_msrs(save);
 746 }
 747 
 748 static uint64_t
 749 kaif_msr_get(int cpuid, uint_t num)
 750 {
 751         kdi_cpusave_t *save;
 752         kdi_msr_t *msr;
 753         int i;
 754 
 755         if ((save = kaif_cpuid2save(cpuid)) == NULL)
 756                 return (-1); /* errno is set for us */
 757 
 758         msr = save->krs_msr;
 759 
 760         for (i = 0; msr[i].msr_num != 0; i++) {
 761                 if (msr[i].msr_num == num && (msr[i].msr_type & KDI_MSR_READ))
 762                         return (msr[i].kdi_msr_val);
 763         }
 764 
 765         return (0);
 766 }
 767 
 768 void
 769 kaif_trap_set_debugger(void)
 770 {
 771         kmdb_kdi_idt_switch(NULL);
 772 }
 773 
 774 void
 775 kaif_trap_set_saved(kaif_cpusave_t *cpusave)
 776 {
 777         kmdb_kdi_idt_switch(cpusave);
 778 }
 779 
 780 static void
 781 kaif_vmready(void)
 782 {
 783 }
 784 
 785 void
 786 kaif_memavail(caddr_t base, size_t len)
 787 {
 788         int ret;
 789         /*
 790          * In the unlikely event that someone is stepping through this routine,
 791          * we need to make sure that the KDI knows about the new range before
 792          * umem gets it.  That way the entry code can recognize stacks
 793          * allocated from the new region.
 794          */
 795         kmdb_kdi_memrange_add(base, len);
 796         ret = mdb_umem_add(base, len);
 797         ASSERT(ret == 0);
 798 }
 799 
 800 void
 801 kaif_mod_loaded(struct modctl *modp)
 802 {
 803         if (kaif_modchg_cb != NULL)
 804                 kaif_modchg_cb(modp, 1);
 805 }
 806 
 807 void
 808 kaif_mod_unloading(struct modctl *modp)
 809 {
 810         if (kaif_modchg_cb != NULL)
 811                 kaif_modchg_cb(modp, 0);
 812 }
 813 
 814 void
 815 kaif_handle_fault(greg_t trapno, greg_t pc, greg_t sp, int cpuid)
 816 {
 817         kmdb_dpi_handle_fault((kreg_t)trapno, (kreg_t)pc,
 818             (kreg_t)sp, cpuid);
 819 }
 820 
 821 static kdi_debugvec_t kaif_dvec = {
 822         NULL,                   /* dv_kctl_vmready */
 823         NULL,                   /* dv_kctl_memavail */
 824         NULL,                   /* dv_kctl_modavail */
 825         NULL,                   /* dv_kctl_thravail */
 826         kaif_vmready,
 827         kaif_memavail,
 828         kaif_mod_loaded,
 829         kaif_mod_unloading,
 830         kaif_handle_fault
 831 };
 832 
 833 void
 834 kaif_kdi_entry(kdi_cpusave_t *cpusave)
 835 {
 836         int ret = kaif_main_loop(cpusave);
 837         ASSERT(ret == KAIF_CPU_CMD_RESUME ||
 838             ret == KAIF_CPU_CMD_RESUME_MASTER);
 839 }
 840 
 841 /*ARGSUSED*/
 842 void
 843 kaif_activate(kdi_debugvec_t **dvecp, uint_t flags)
 844 {
 845         kmdb_kdi_activate(kaif_kdi_entry, kaif_cpusave, kaif_ncpusave);
 846         *dvecp = &kaif_dvec;
 847 }
 848 
 849 static int
 850 kaif_init(kmdb_auxv_t *kav)
 851 {
 852         /* Allocate the per-CPU save areas */
 853         kaif_cpusave = mdb_zalloc(sizeof (kaif_cpusave_t) * kav->kav_ncpu,
 854             UM_SLEEP);
 855         kaif_ncpusave = kav->kav_ncpu;
 856 
 857         kaif_modchg_cb = NULL;
 858 
 859         kaif_waptmap = 0;
 860 
 861         kaif_trap_switch = (kav->kav_flags & KMDB_AUXV_FL_NOTRPSWTCH) == 0;
 862 
 863         return (0);
 864 }
 865 
 866 dpi_ops_t kmdb_dpi_ops = {
 867         kaif_init,
 868         kaif_activate,
 869         kmdb_kdi_deactivate,
 870         kaif_enter_mon,
 871         kaif_modchg_register,
 872         kaif_modchg_cancel,
 873         kaif_get_cpu_state,
 874         kaif_get_master_cpuid,
 875         kaif_get_gregs,
 876         kaif_get_register,
 877         kaif_set_register,
 878         kaif_brkpt_arm,
 879         kaif_brkpt_disarm,
 880         kaif_wapt_validate,
 881         kaif_wapt_reserve,
 882         kaif_wapt_release,
 883         kaif_wapt_arm,
 884         kaif_wapt_disarm,
 885         kaif_wapt_match,
 886         kaif_step,
 887         kaif_step_branch,
 888         kaif_call,
 889         kaif_dump_crumbs,
 890         kaif_msr_add,
 891         kaif_msr_get,
 892 };