1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * 25 * Copyright 2018 Joyent, Inc. 26 */ 27 28 #include <kmdb/kmdb_kvm.h> 29 #include <kmdb/kvm.h> 30 #include <kmdb/kmdb_kdi.h> 31 #include <kmdb/kmdb_promif.h> 32 #include <kmdb/kmdb_module.h> 33 #include <kmdb/kmdb_asmutil.h> 34 #include <mdb/mdb_types.h> 35 #include <mdb/mdb_conf.h> 36 #include <mdb/mdb_err.h> 37 #include <mdb/mdb_modapi.h> 38 #include <mdb/mdb_target_impl.h> 39 #include <mdb/mdb_debug.h> 40 #include <mdb/mdb_string.h> 41 #include <mdb/mdb_ctf.h> 42 #include <mdb/mdb_kreg_impl.h> 43 #include <mdb/mdb_ks.h> 44 #include <mdb/mdb.h> 45 46 #include <strings.h> 47 #include <dlfcn.h> 48 #include <sys/isa_defs.h> 49 #include <sys/kobj.h> 50 #include <sys/kobj_impl.h> 51 #include <sys/bitmap.h> 52 #include <vm/as.h> 53 54 static const char KMT_RTLD_NAME[] = "krtld"; 55 static const char KMT_MODULE[] = "mdb_ks"; 56 static const char KMT_CTFPARENT[] = "genunix"; 57 58 static mdb_list_t kmt_defbp_list; /* List of current deferred bp's */ 59 static int kmt_defbp_lock; /* For list, running kernel holds */ 60 static uint_t kmt_defbp_modchg_isload; /* Whether mod change is load/unload */ 61 static struct modctl *kmt_defbp_modchg_modctl; /* modctl for defbp checking */ 62 static uint_t kmt_defbp_num; /* Number of referenced def'd bp's */ 63 static int kmt_defbp_bpspec; /* vespec for def'd bp activation bp */ 64 65 static const mdb_se_ops_t kmt_brkpt_ops; 66 static const mdb_se_ops_t kmt_wapt_ops; 67 68 static void kmt_sync(mdb_tgt_t *); 69 70 typedef struct kmt_symarg { 71 mdb_tgt_sym_f *sym_cb; /* Caller's callback function */ 72 void *sym_data; /* Callback function argument */ 73 uint_t sym_type; /* Symbol type/binding filter */ 74 mdb_syminfo_t sym_info; /* Symbol id and table id */ 75 const char *sym_obj; /* Containing object */ 76 } kmt_symarg_t; 77 78 typedef struct kmt_maparg { 79 mdb_tgt_t *map_target; /* Target used for mapping iter */ 80 mdb_tgt_map_f *map_cb; /* Caller's callback function */ 81 void *map_data; /* Callback function argument */ 82 } kmt_maparg_t; 83 84 /*ARGSUSED*/ 85 int 86 kmt_setflags(mdb_tgt_t *t, int flags) 87 { 88 /* 89 * We only handle one flag (ALLOWIO), and we can't fail to set or clear 90 * it, so we just blindly replace the t_flags version with the one 91 * passed. 92 */ 93 t->t_flags = (t->t_flags & ~MDB_TGT_F_ALLOWIO) | 94 (flags & MDB_TGT_F_ALLOWIO); 95 96 return (0); 97 } 98 99 /*ARGSUSED*/ 100 const char * 101 kmt_name(mdb_tgt_t *t) 102 { 103 return ("kmdb_kvm"); 104 } 105 106 /*ARGSUSED*/ 107 static const char * 108 kmt_platform(mdb_tgt_t *t) 109 { 110 static char platform[SYS_NMLN]; 111 112 if (kmdb_dpi_get_state(NULL) == DPI_STATE_INIT) 113 return (mdb_conf_platform()); 114 115 if (mdb_tgt_readsym(mdb.m_target, MDB_TGT_AS_VIRT, platform, 116 sizeof (platform), "unix", "platform") != sizeof (platform)) { 117 warn("'platform' symbol is missing from kernel\n"); 118 return ("unknown"); 119 } 120 121 return (platform); 122 } 123 124 static int 125 kmt_uname(mdb_tgt_t *t, struct utsname *utsp) 126 { 127 return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp, 128 sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname")); 129 } 130 131 /*ARGSUSED*/ 132 static int 133 kmt_dmodel(mdb_tgt_t *t) 134 { 135 return (MDB_TGT_MODEL_NATIVE); 136 } 137 138 /*ARGSUSED*/ 139 ssize_t 140 kmt_rw(mdb_tgt_t *t, void *buf, size_t nbytes, uint64_t addr, 141 ssize_t (*rw)(void *, size_t, uint64_t)) 142 { 143 /* 144 * chunksz needs to be volatile because of the use of setjmp() in this 145 * function. 146 */ 147 volatile size_t chunksz; 148 size_t n, ndone; 149 jmp_buf *oldpcb = NULL; 150 jmp_buf pcb; 151 ssize_t res; 152 153 kmdb_prom_check_interrupt(); 154 155 if (nbytes == 0) 156 return (0); 157 158 /* 159 * Try to process the entire buffer, as requested. If we catch a fault, 160 * try smaller chunks. This allows us to handle regions that cross 161 * mapping boundaries. 162 */ 163 chunksz = nbytes; 164 ndone = 0; 165 if (setjmp(pcb) != 0) { 166 if (chunksz == 1) { 167 /* We failed with the smallest chunk - give up */ 168 kmdb_dpi_restore_fault_hdlr(oldpcb); 169 return (ndone > 0 ? ndone : -1); /* errno set for us */ 170 } else if (chunksz > 4) 171 chunksz = 4; 172 else 173 chunksz = 1; 174 } 175 176 oldpcb = kmdb_dpi_set_fault_hdlr(&pcb); 177 while (nbytes > 0) { 178 n = MIN(chunksz, nbytes); 179 180 if ((res = rw(buf, n, addr)) != n) 181 return (res < 0 ? res : ndone + res); 182 183 addr += n; 184 nbytes -= n; 185 ndone += n; 186 buf = ((caddr_t)buf + n); 187 } 188 189 kmdb_dpi_restore_fault_hdlr(oldpcb); 190 191 return (ndone); 192 } 193 194 static void 195 kmt_bcopy(const void *s1, void *s2, size_t n) 196 { 197 /* 198 * We need to guarantee atomic accesses for certain sizes. bcopy won't 199 * make that guarantee, so we need to do it ourselves. 200 */ 201 #ifdef _LP64 202 if (n == 8 && ((uintptr_t)s1 & 7) == 0 && ((uintptr_t)s2 & 7) == 0) 203 *(uint64_t *)s2 = *(uint64_t *)s1; 204 else 205 #endif 206 if (n == 4 && ((uintptr_t)s1 & 3) == 0 && ((uintptr_t)s2 & 3) == 0) 207 *(uint32_t *)s2 = *(uint32_t *)s1; 208 else if (n == 2 && ((uintptr_t)s1 & 1) == 0 && ((uintptr_t)s2 & 1) == 0) 209 *(uint16_t *)s2 = *(uint16_t *)s1; 210 else if (n == 1) 211 *(uint8_t *)s2 = *(uint8_t *)s1; 212 else 213 bcopy(s1, s2, n); 214 } 215 216 static ssize_t 217 kmt_reader(void *buf, size_t nbytes, uint64_t addr) 218 { 219 kmt_bcopy((void *)(uintptr_t)addr, buf, nbytes); 220 return (nbytes); 221 } 222 223 ssize_t 224 kmt_writer(void *buf, size_t nbytes, uint64_t addr) 225 { 226 kmt_bcopy(buf, (void *)(uintptr_t)addr, nbytes); 227 return (nbytes); 228 } 229 230 /*ARGSUSED*/ 231 static ssize_t 232 kmt_read(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr) 233 { 234 /* 235 * We don't want to allow reads of I/O-mapped memory. Multi-page reads 236 * that cross into I/O-mapped memory should be restricted to the initial 237 * non-I/O region. Reads that begin in I/O-mapped memory are failed 238 * outright. 239 */ 240 if (!(t->t_flags & MDB_TGT_F_ALLOWIO) && 241 (nbytes = kmdb_kdi_range_is_nontoxic(addr, nbytes, 0)) == 0) 242 return (set_errno(EMDB_NOMAP)); 243 244 return (kmt_rw(t, buf, nbytes, addr, kmt_reader)); 245 } 246 247 /*ARGSUSED*/ 248 static ssize_t 249 kmt_pread(mdb_tgt_t *t, void *buf, size_t nbytes, physaddr_t addr) 250 { 251 return (kmt_rw(t, buf, nbytes, addr, kmdb_kdi_pread)); 252 } 253 254 /*ARGSUSED*/ 255 ssize_t 256 kmt_pwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, physaddr_t addr) 257 { 258 return (kmt_rw(t, (void *)buf, nbytes, addr, kmdb_kdi_pwrite)); 259 } 260 261 static uintptr_t 262 kmt_read_kas(mdb_tgt_t *t) 263 { 264 GElf_Sym sym; 265 266 if (mdb_tgt_lookup_by_name(t, "unix", "kas", &sym, NULL) < 0) { 267 warn("'kas' symbol is missing from kernel\n"); 268 (void) set_errno(EMDB_NOSYM); 269 return (0); 270 } 271 272 return ((uintptr_t)sym.st_value); 273 } 274 275 static int 276 kmt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap) 277 { 278 mdb_module_t *mod; 279 struct as *asp; 280 mdb_var_t *v; 281 282 switch ((uintptr_t)as) { 283 case (uintptr_t)MDB_TGT_AS_PHYS: 284 case (uintptr_t)MDB_TGT_AS_FILE: 285 case (uintptr_t)MDB_TGT_AS_IO: 286 return (set_errno(EINVAL)); 287 case (uintptr_t)MDB_TGT_AS_VIRT: 288 if ((asp = (struct as *)kmt_read_kas(t)) == NULL) 289 return (-1); /* errno is set for us */ 290 break; 291 default: 292 asp = (struct as *)as; 293 294 /* We don't support non-kas vtop */ 295 if (asp != (struct as *)kmt_read_kas(t)) 296 return (set_errno(EMDB_TGTNOTSUP)); 297 } 298 299 if (kmdb_prom_vtop(va, pap) == 0) 300 return (0); 301 302 if ((v = mdb_nv_lookup(&mdb.m_modules, "unix")) != NULL && 303 (mod = mdb_nv_get_cookie(v)) != NULL) { 304 int (*fptr)(uintptr_t, struct as *, physaddr_t *); 305 306 fptr = (int (*)(uintptr_t, struct as *, physaddr_t *)) 307 dlsym(mod->mod_hdl, "platform_vtop"); 308 309 if ((fptr != NULL) && ((*fptr)(va, asp, pap) == 0)) 310 return (0); 311 } 312 313 return (set_errno(EMDB_NOMAP)); 314 } 315 316 /*ARGSUSED*/ 317 static int 318 kmt_cpuregs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 319 { 320 const mdb_tgt_gregset_t *gregs; 321 intptr_t cpuid = DPI_MASTER_CPUID; 322 int i; 323 324 if (flags & DCMD_ADDRSPEC) { 325 if (argc != 0) 326 return (DCMD_USAGE); 327 if ((cpuid = mdb_cpu2cpuid(addr)) < 0) { 328 (void) set_errno(EMDB_NOMAP); 329 mdb_warn("failed to find cpuid for cpu at %p", addr); 330 return (DCMD_ERR); 331 } 332 } 333 334 i = mdb_getopts(argc, argv, 335 'c', MDB_OPT_UINTPTR, &cpuid, 336 NULL); 337 338 argc -= i; 339 argv += i; 340 341 if (argc != 0) 342 return (DCMD_USAGE); 343 344 if ((gregs = kmdb_dpi_get_gregs(cpuid)) == NULL) { 345 warn("failed to retrieve registers for cpu %d", (int)cpuid); 346 return (DCMD_ERR); 347 } 348 349 kmt_printregs(gregs); 350 351 return (DCMD_OK); 352 } 353 354 static int 355 kmt_regs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 356 { 357 if (flags & DCMD_ADDRSPEC) 358 return (DCMD_USAGE); 359 360 return (kmt_cpuregs(addr, flags, argc, argv)); 361 } 362 363 static int 364 kmt_cpustack_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 365 { 366 intptr_t cpuid = DPI_MASTER_CPUID; 367 uint_t verbose = 0; 368 int i; 369 370 if (flags & DCMD_ADDRSPEC) { 371 if ((cpuid = mdb_cpu2cpuid(addr)) < 0) { 372 (void) set_errno(EMDB_NOMAP); 373 mdb_warn("failed to find cpuid for cpu at %p", addr); 374 return (DCMD_ERR); 375 } 376 flags &= ~DCMD_ADDRSPEC; 377 } 378 379 i = mdb_getopts(argc, argv, 380 'c', MDB_OPT_UINTPTR, &cpuid, 381 'v', MDB_OPT_SETBITS, 1, &verbose, 382 NULL); 383 384 argc -= i; 385 argv += i; 386 387 return (kmt_cpustack(addr, flags, argc, argv, cpuid, verbose)); 388 } 389 390 /* 391 * Lasciate ogne speranza, voi ch'intrate. 392 */ 393 static int 394 kmt_call(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 395 { 396 uintptr_t *call_argv, rval; 397 int parse_strings = 1; 398 GElf_Sym sym; 399 jmp_buf *oldpcb = NULL; 400 jmp_buf pcb; 401 int i; 402 403 if (!(flags & DCMD_ADDRSPEC)) 404 return (DCMD_USAGE); 405 406 if (mdb_tgt_lookup_by_addr(mdb.m_target, addr, MDB_TGT_SYM_EXACT, 407 NULL, 0, &sym, NULL) == 0 && GELF_ST_TYPE(sym.st_info) != 408 STT_FUNC) { 409 warn("%a is not a function\n", addr); 410 return (DCMD_ERR); 411 } 412 413 if (argc > 1 && argv[0].a_type == MDB_TYPE_STRING && 414 strcmp(argv[0].a_un.a_str, "-s") == 0) { 415 parse_strings = 0; 416 argc--; 417 argv++; 418 } 419 420 call_argv = mdb_alloc(sizeof (uintptr_t) * argc, UM_SLEEP); 421 422 for (i = 0; i < argc; i++) { 423 switch (argv[i].a_type) { 424 case MDB_TYPE_STRING: 425 /* 426 * mdb_strtoull doesn't return on error, so we have to 427 * pre-check strings suspected to contain numbers. 428 */ 429 if (parse_strings && strisbasenum(argv[i].a_un.a_str)) { 430 call_argv[i] = (uintptr_t)mdb_strtoull( 431 argv[i].a_un.a_str); 432 } else 433 call_argv[i] = (uintptr_t)argv[i].a_un.a_str; 434 435 break; 436 437 case MDB_TYPE_IMMEDIATE: 438 call_argv[i] = argv[i].a_un.a_val; 439 break; 440 441 default: 442 mdb_free(call_argv, 443 sizeof (uintptr_t) * argc); 444 return (DCMD_USAGE); 445 } 446 } 447 448 if (setjmp(pcb) != 0) { 449 warn("call failed: caught a trap\n"); 450 451 kmdb_dpi_restore_fault_hdlr(oldpcb); 452 mdb_free(call_argv, sizeof (uintptr_t) * argc); 453 return (DCMD_ERR); 454 } 455 456 oldpcb = kmdb_dpi_set_fault_hdlr(&pcb); 457 rval = kmdb_dpi_call(addr, argc, call_argv); 458 kmdb_dpi_restore_fault_hdlr(oldpcb); 459 460 if (flags & DCMD_PIPE_OUT) { 461 mdb_printf("%p\n", rval); 462 } else { 463 /* pretty-print the results */ 464 mdb_printf("%p = %a(", rval, addr); 465 for (i = 0; i < argc; i++) { 466 if (i > 0) 467 mdb_printf(", "); 468 if (argv[i].a_type == MDB_TYPE_STRING) { 469 /* I'm ashamed but amused */ 470 char *quote = &("\""[parse_strings && 471 strisbasenum(argv[i].a_un.a_str)]); 472 473 mdb_printf("%s%s%s", quote, argv[i].a_un.a_str, 474 quote); 475 } else 476 mdb_printf("%p", argv[i].a_un.a_val); 477 } 478 mdb_printf(");\n"); 479 } 480 481 mdb_free(call_argv, sizeof (uintptr_t) * argc); 482 483 return (DCMD_OK); 484 } 485 486 /*ARGSUSED*/ 487 int 488 kmt_dump_crumbs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 489 { 490 intptr_t cpu = -1; 491 492 if (flags & DCMD_ADDRSPEC) { 493 if (argc != 0) 494 return (DCMD_USAGE); 495 } else { 496 addr = 0; 497 498 if (mdb_getopts(argc, argv, 499 'c', MDB_OPT_UINTPTR, &cpu, 500 NULL) != argc) 501 return (DCMD_USAGE); 502 } 503 504 kmdb_dpi_dump_crumbs(addr, cpu); 505 506 return (DCMD_OK); 507 } 508 509 /*ARGSUSED*/ 510 static int 511 kmt_noducttape(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 512 { 513 int a = 0; 514 515 return (a/a); 516 } 517 518 static int 519 kmt_dmod_status(char *msg, int state) 520 { 521 kmdb_modctl_t *kmc; 522 mdb_var_t *v; 523 int first = 1, n = 0; 524 525 mdb_nv_rewind(&mdb.m_dmodctl); 526 while ((v = mdb_nv_advance(&mdb.m_dmodctl)) != NULL) { 527 kmc = MDB_NV_COOKIE(v); 528 529 if (kmc->kmc_state != state) 530 continue; 531 532 n++; 533 534 if (msg != NULL) { 535 if (first) { 536 mdb_printf(msg, NULL); 537 first = 0; 538 } 539 540 mdb_printf(" %s", kmc->kmc_modname); 541 } 542 } 543 544 if (!first && msg != NULL) 545 mdb_printf("\n"); 546 547 return (n); 548 } 549 550 /*ARGSUSED*/ 551 static int 552 kmt_status_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 553 { 554 struct utsname uts; 555 char uuid[37]; 556 kreg_t tt; 557 558 if (mdb_tgt_readsym(mdb.m_target, MDB_TGT_AS_VIRT, &uts, sizeof (uts), 559 "unix", "utsname") != sizeof (uts)) { 560 warn("failed to read 'utsname' struct from kernel\n"); 561 bzero(&uts, sizeof (uts)); 562 (void) strcpy(uts.nodename, "unknown machine"); 563 } 564 565 mdb_printf("debugging live kernel (%d-bit) on %s\n", 566 (int)(sizeof (void *) * NBBY), 567 (*uts.nodename == '\0' ? "(not set)" : uts.nodename)); 568 mdb_printf("operating system: %s %s (%s)\n", 569 uts.release, uts.version, uts.machine); 570 571 if (mdb_tgt_readsym(mdb.m_target, MDB_TGT_AS_VIRT, uuid, sizeof (uuid), 572 "genunix", "dump_osimage_uuid") != sizeof (uuid)) { 573 warn("failed to read 'dump_osimage_uuid' string from kernel\n"); 574 (void) strcpy(uuid, "(error)"); 575 } else if (*uuid == '\0') { 576 (void) strcpy(uuid, "(not set)"); 577 } else if (uuid[36] != '\0') { 578 (void) strcpy(uuid, "(invalid)"); 579 } 580 mdb_printf("image uuid: %s\n", uuid); 581 582 mdb_printf("DTrace state: %s\n", (kmdb_kdi_dtrace_get_state() == 583 KDI_DTSTATE_DTRACE_ACTIVE ? "active (debugger breakpoints cannot " 584 "be armed)" : "inactive")); 585 586 (void) kmdb_dpi_get_register("tt", &tt); 587 mdb_printf("stopped on: %s\n", kmt_trapname(tt)); 588 589 (void) kmt_dmod_status("pending dmod loads:", KMDB_MC_STATE_LOADING); 590 (void) kmt_dmod_status("pending dmod unloads:", 591 KMDB_MC_STATE_UNLOADING); 592 593 return (DCMD_OK); 594 } 595 596 /*ARGSUSED*/ 597 static int 598 kmt_switch(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 599 { 600 if (!(flags & DCMD_ADDRSPEC) || argc != 0) 601 return (DCMD_USAGE); 602 603 if (kmdb_dpi_switch_master((int)addr) < 0) { 604 warn("failed to switch to CPU %d", (int)addr); 605 return (DCMD_ERR); 606 } 607 608 return (DCMD_OK); 609 } 610 611 static const mdb_dcmd_t kmt_dcmds[] = { 612 { "$c", "?[cnt]", "print stack backtrace", kmt_stack }, 613 { "$C", "?[cnt]", "print stack backtrace", kmt_stackv }, 614 { "$r", NULL, "print general-purpose registers", kmt_regs }, 615 { "$?", NULL, "print status and registers", kmt_regs }, 616 { ":x", ":", "change the active CPU", kmt_switch }, 617 { "call", ":[arg ...]", "call a kernel function", kmt_call }, 618 { "cpustack", "?[-v] [-c cpuid] [cnt]", "print stack backtrace for a " 619 "specific CPU", kmt_cpustack_dcmd }, 620 { "cpuregs", "?[-c cpuid]", "print general-purpose registers for a " 621 "specific CPU", kmt_cpuregs }, 622 { "crumbs", NULL, NULL, kmt_dump_crumbs }, 623 #if defined(__i386) || defined(__amd64) 624 { "in", ":[-L len]", "read from I/O port", kmt_in_dcmd }, 625 { "out", ":[-L len] val", "write to I/O port", kmt_out_dcmd }, 626 { "rdmsr", ":", "read an MSR", kmt_rdmsr }, 627 { "wrmsr", ": val", "write an MSR", kmt_wrmsr }, 628 { "rdpcicfg", ": bus dev func", "read a register in PCI config space", 629 kmt_rdpcicfg }, 630 { "wrpcicfg", ": bus dev func val", "write a register in PCI config " 631 "space", kmt_wrpcicfg }, 632 #endif 633 { "noducttape", NULL, NULL, kmt_noducttape }, 634 { "regs", NULL, "print general-purpose registers", kmt_regs }, 635 { "stack", "?[cnt]", "print stack backtrace", kmt_stack }, 636 { "stackregs", "?", "print stack backtrace and registers", kmt_stackr }, 637 { "status", NULL, "print summary of current target", kmt_status_dcmd }, 638 { "switch", ":", "change the active CPU", kmt_switch }, 639 { NULL } 640 }; 641 642 static uintmax_t 643 kmt_reg_disc_get(const mdb_var_t *v) 644 { 645 mdb_tgt_reg_t r = 0; 646 647 (void) mdb_tgt_getareg(MDB_NV_COOKIE(v), 0, mdb_nv_get_name(v), &r); 648 649 return (r); 650 } 651 652 static void 653 kmt_reg_disc_set(mdb_var_t *v, uintmax_t r) 654 { 655 if (mdb_tgt_putareg(MDB_NV_COOKIE(v), 0, mdb_nv_get_name(v), r) == -1) 656 warn("failed to modify %%%s register", mdb_nv_get_name(v)); 657 } 658 659 static const mdb_nv_disc_t kmt_reg_disc = { 660 kmt_reg_disc_set, 661 kmt_reg_disc_get 662 }; 663 664 /*ARGSUSED*/ 665 static int 666 kmt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid, const char *rname, 667 mdb_tgt_reg_t *rp) 668 { 669 kreg_t val; 670 671 if (kmdb_dpi_get_register(rname, &val) < 0) 672 return (set_errno(EMDB_BADREG)); 673 674 *rp = val; 675 return (0); 676 } 677 678 /*ARGSUSED*/ 679 static int 680 kmt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid, const char *rname, mdb_tgt_reg_t r) 681 { 682 if (kmdb_dpi_set_register(rname, r) < 0) 683 return (set_errno(EMDB_BADREG)); 684 685 return (0); 686 } 687 688 static void 689 kmt_mod_destroy(kmt_module_t *km) 690 { 691 if (km->km_name != NULL) 692 strfree(km->km_name); 693 if (km->km_symtab != NULL) 694 mdb_gelf_symtab_destroy(km->km_symtab); 695 if (km->km_ctfp != NULL) 696 mdb_ctf_close(km->km_ctfp); 697 } 698 699 static kmt_module_t * 700 kmt_mod_create(mdb_tgt_t *t, struct modctl *ctlp, char *name) 701 { 702 kmt_module_t *km = mdb_zalloc(sizeof (kmt_module_t), UM_SLEEP); 703 struct module *mod; 704 705 km->km_name = mdb_alloc(strlen(name) + 1, UM_SLEEP); 706 (void) strcpy(km->km_name, name); 707 708 bcopy(ctlp, &km->km_modctl, sizeof (struct modctl)); 709 710 if (mdb_tgt_vread(t, &km->km_module, sizeof (struct module), 711 (uintptr_t)km->km_modctl.mod_mp) != sizeof (struct module)) 712 goto create_module_cleanup; 713 mod = &km->km_module; 714 715 if (mod->symhdr != NULL && mod->strhdr != NULL && mod->symtbl != NULL && 716 mod->strings != NULL) { 717 mdb_gelf_ehdr_to_gehdr(&mod->hdr, &km->km_ehdr); 718 719 km->km_symtab = mdb_gelf_symtab_create_raw(&km->km_ehdr, 720 mod->symhdr, mod->symtbl, mod->strhdr, mod->strings, 721 MDB_TGT_SYMTAB); 722 723 km->km_symtab_va = mod->symtbl; 724 km->km_strtab_va = mod->strings; 725 726 if (mdb_tgt_vread(t, &km->km_symtab_hdr, sizeof (Shdr), 727 (uintptr_t)mod->symhdr) != sizeof (Shdr) || 728 mdb_tgt_vread(t, &km->km_strtab_hdr, sizeof (Shdr), 729 (uintptr_t)mod->strhdr) != sizeof (Shdr)) 730 goto create_module_cleanup; 731 } 732 733 /* 734 * We don't want everyone rooting around in the module structure, so we 735 * make copies of the interesting members. 736 */ 737 km->km_text_va = (uintptr_t)mod->text; 738 km->km_text_size = mod->text_size; 739 km->km_data_va = (uintptr_t)mod->data; 740 km->km_data_size = mod->data_size; 741 km->km_bss_va = (uintptr_t)mod->bss; 742 km->km_bss_size = mod->bss_size; 743 km->km_ctf_va = mod->ctfdata; 744 km->km_ctf_size = mod->ctfsize; 745 746 if (mod->flags & KOBJ_PRIM) 747 km->km_flags |= KM_F_PRIMARY; 748 749 return (km); 750 751 create_module_cleanup: 752 warn("failed to read module %s\n", name); 753 kmt_mod_destroy(km); 754 return (NULL); 755 } 756 757 static void 758 kmt_mod_remove(kmt_data_t *kmt, kmt_module_t *km) 759 { 760 mdb_var_t *v = mdb_nv_lookup(&kmt->kmt_modules, km->km_name); 761 762 ASSERT(v != NULL); 763 764 mdb_dprintf(MDB_DBG_KMOD, "removing module %s\n", km->km_name); 765 766 mdb_list_delete(&kmt->kmt_modlist, km); 767 mdb_nv_remove(&kmt->kmt_modules, v); 768 kmt_mod_destroy(km); 769 } 770 771 static int 772 kmt_modlist_update_cb(struct modctl *modp, void *arg) 773 { 774 mdb_tgt_t *t = arg; 775 kmt_data_t *kmt = t->t_data; 776 kmt_module_t *km; 777 mdb_var_t *v; 778 char name[MAXNAMELEN]; 779 780 if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN, 781 (uintptr_t)modp->mod_modname) <= 0) { 782 warn("failed to read module name at %p", 783 (void *)modp->mod_modname); 784 } 785 786 /* We only care about modules that are actually loaded */ 787 if (!kmdb_kdi_mod_isloaded(modp)) 788 return (0); 789 790 /* 791 * Skip the modules we already know about and that haven't 792 * changed since last time we were here. 793 */ 794 if ((v = mdb_nv_lookup(&kmt->kmt_modules, name)) != NULL) { 795 km = MDB_NV_COOKIE(v); 796 797 if (kmdb_kdi_mod_haschanged(&km->km_modctl, &km->km_module, 798 modp, modp->mod_mp)) { 799 /* 800 * The module has changed since last we saw it. For 801 * safety, remove our old version, and treat it as a 802 * new module. 803 */ 804 mdb_dprintf(MDB_DBG_KMOD, "stutter module %s\n", name); 805 kmt_mod_remove(kmt, km); 806 } else { 807 km->km_seen = 1; 808 return (0); 809 } 810 } 811 812 mdb_dprintf(MDB_DBG_KMOD, "found new module %s\n", name); 813 814 if ((km = kmt_mod_create(t, modp, name)) != NULL) { 815 mdb_list_append(&kmt->kmt_modlist, km); 816 (void) mdb_nv_insert(&kmt->kmt_modules, name, NULL, 817 (uintptr_t)km, 0); 818 km->km_seen = 1; 819 } 820 821 return (0); 822 } 823 824 static void 825 kmt_modlist_update(mdb_tgt_t *t) 826 { 827 kmt_data_t *kmt = t->t_data; 828 kmt_module_t *km, *kmn; 829 830 if (kmdb_kdi_mod_iter(kmt_modlist_update_cb, t) < 0) { 831 warn("failed to complete update of kernel module list\n"); 832 return; 833 } 834 835 km = mdb_list_next(&kmt->kmt_modlist); 836 while (km != NULL) { 837 kmn = mdb_list_next(km); 838 839 if (km->km_seen == 1) { 840 /* Reset the mark for next time */ 841 km->km_seen = 0; 842 } else { 843 /* 844 * We didn't see it on the kernel's module list, so 845 * remove it from our view of the world. 846 */ 847 kmt_mod_remove(kmt, km); 848 } 849 850 km = kmn; 851 } 852 } 853 854 static void 855 kmt_periodic(mdb_tgt_t *t) 856 { 857 (void) mdb_tgt_status(t, &t->t_status); 858 } 859 860 int 861 kmt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags, 862 char *buf, size_t nbytes, GElf_Sym *symp, mdb_syminfo_t *sip) 863 { 864 kmt_data_t *kmt = t->t_data; 865 kmt_module_t *km = mdb_list_next(&kmt->kmt_modlist); 866 kmt_module_t *sym_km = NULL; 867 kmt_module_t prmod; 868 GElf_Sym sym; 869 uint_t symid; 870 const char *name; 871 872 /* 873 * We look through the private symbols (if any), then through the module 874 * symbols. We can simplify the loop if we pretend the private symbols 875 * come from a module. 876 */ 877 if (mdb.m_prsym != NULL) { 878 bzero(&prmod, sizeof (kmt_module_t)); 879 prmod.km_name = "<<<prmod>>>"; 880 prmod.km_symtab = mdb.m_prsym; 881 prmod.km_list.ml_next = (mdb_list_t *)km; 882 km = &prmod; 883 } 884 885 /* Symbol resolution isn't available during initialization */ 886 if (kmdb_dpi_get_state(NULL) == DPI_STATE_INIT) 887 return (set_errno(EMDB_NOSYM)); 888 889 for (; km != NULL; km = mdb_list_next(km)) { 890 if (km != &prmod && !kmt->kmt_symavail) 891 continue; 892 893 if (km->km_symtab == NULL) 894 continue; 895 896 if (mdb_gelf_symtab_lookup_by_addr(km->km_symtab, addr, flags, 897 buf, nbytes, symp, &sip->sym_id) != 0 || 898 symp->st_value == 0) 899 continue; 900 901 if (flags & MDB_TGT_SYM_EXACT) { 902 sym_km = km; 903 goto found; 904 } 905 906 /* 907 * If this is the first match we've found, or if this symbol is 908 * closer to the specified address than the last one we found, 909 * use it. 910 */ 911 if (sym_km == NULL || mdb_gelf_sym_closer(symp, &sym, addr)) { 912 sym_km = km; 913 sym = *symp; 914 symid = sip->sym_id; 915 } 916 } 917 918 /* 919 * kmdb dmods are normal kernel modules, loaded by krtld as such. To 920 * avoid polluting modinfo, and to keep from confusing the module 921 * subsystem (many dmods have the same names as real kernel modules), 922 * kmdb keeps their modctls separate, and doesn't allow their loading 923 * to be broadcast via the krtld module load/unload mechanism. As a 924 * result, kmdb_kvm doesn't find out about them, and can't turn their 925 * addresses into symbols. This can be most inconvenient during 926 * debugger faults, as the dmod frames will show up without names. 927 * We weren't able to turn the requested address into a symbol, so we'll 928 * take a spin through the dmods, trying to match our address against 929 * their symbols. 930 */ 931 if (sym_km == NULL) { 932 return (kmdb_module_lookup_by_addr(addr, flags, buf, nbytes, 933 symp, sip)); 934 } 935 936 *symp = sym; 937 sip->sym_id = symid; 938 939 found: 940 /* 941 * Once we've found something, copy the final name into the caller's 942 * buffer and prefix it with the load object name if appropriate. 943 */ 944 name = mdb_gelf_sym_name(sym_km->km_symtab, symp); 945 946 if (sym_km == &prmod) { 947 if (buf != NULL) { 948 (void) strncpy(buf, name, nbytes); 949 buf[nbytes - 1] = '\0'; 950 } 951 sip->sym_table = MDB_TGT_PRVSYM; 952 } else { 953 if (buf != NULL) { 954 if (sym_km->km_flags & KM_F_PRIMARY) { 955 (void) strncpy(buf, name, nbytes); 956 buf[nbytes - 1] = '\0'; 957 } else { 958 (void) mdb_snprintf(buf, nbytes, "%s`%s", 959 sym_km->km_name, name); 960 } 961 } 962 sip->sym_table = MDB_TGT_SYMTAB; 963 } 964 965 return (0); 966 } 967 968 static int 969 kmt_lookup_by_name(mdb_tgt_t *t, const char *obj, const char *name, 970 GElf_Sym *symp, mdb_syminfo_t *sip) 971 { 972 kmt_data_t *kmt = t->t_data; 973 kmt_module_t *km; 974 mdb_var_t *v; 975 GElf_Sym sym; 976 uint_t symid; 977 int n; 978 979 if (!kmt->kmt_symavail) 980 return (set_errno(EMDB_NOSYM)); 981 982 switch ((uintptr_t)obj) { 983 case (uintptr_t)MDB_TGT_OBJ_EXEC: 984 case (uintptr_t)MDB_TGT_OBJ_EVERY: 985 km = mdb_list_next(&kmt->kmt_modlist); 986 n = mdb_nv_size(&kmt->kmt_modules); 987 break; 988 989 case (uintptr_t)MDB_TGT_OBJ_RTLD: 990 obj = kmt->kmt_rtld_name; 991 /*FALLTHROUGH*/ 992 993 default: 994 /* 995 * If this is a request for a dmod symbol, let kmdb_module 996 * handle it. 997 */ 998 if (obj != NULL && strncmp(obj, "DMOD`", 5) == 0) { 999 return (kmdb_module_lookup_by_name(obj + 5, name, 1000 symp, sip)); 1001 } 1002 1003 if ((v = mdb_nv_lookup(&kmt->kmt_modules, obj)) == NULL) 1004 return (set_errno(EMDB_NOOBJ)); 1005 1006 km = mdb_nv_get_cookie(v); 1007 n = 1; 1008 } 1009 1010 /* 1011 * kmdb's kvm target is at a bit of a disadvantage compared to mdb's 1012 * kvm target when it comes to global symbol lookups. mdb has ksyms, 1013 * which hides pesky things like symbols that are undefined in unix, 1014 * but which are defined in genunix. We don't have such a facility - 1015 * we simply iterate through the modules, looking for a given symbol 1016 * in each. Unless we're careful, we'll return the undef in the 1017 * aforementioned case. 1018 */ 1019 for (; n > 0; n--, km = mdb_list_next(km)) { 1020 if (mdb_gelf_symtab_lookup_by_name(km->km_symtab, name, 1021 &sym, &symid) == 0 && sym.st_shndx != SHN_UNDEF) 1022 break; 1023 } 1024 1025 if (n == 0) 1026 return (set_errno(EMDB_NOSYM)); 1027 1028 found: 1029 bcopy(&sym, symp, sizeof (GElf_Sym)); 1030 sip->sym_id = symid; 1031 sip->sym_table = MDB_TGT_SYMTAB; 1032 1033 return (0); 1034 } 1035 1036 static int 1037 kmt_symtab_func(void *data, const GElf_Sym *sym, const char *name, uint_t id) 1038 { 1039 kmt_symarg_t *arg = data; 1040 1041 if (mdb_tgt_sym_match(sym, arg->sym_type)) { 1042 arg->sym_info.sym_id = id; 1043 1044 return (arg->sym_cb(arg->sym_data, sym, name, &arg->sym_info, 1045 arg->sym_obj)); 1046 } 1047 1048 return (0); 1049 } 1050 1051 static void 1052 kmt_symtab_iter(mdb_gelf_symtab_t *gst, uint_t type, const char *obj, 1053 mdb_tgt_sym_f *cb, void *p) 1054 { 1055 kmt_symarg_t arg; 1056 1057 arg.sym_cb = cb; 1058 arg.sym_data = p; 1059 arg.sym_type = type; 1060 arg.sym_info.sym_table = gst->gst_tabid; 1061 arg.sym_obj = obj; 1062 1063 mdb_gelf_symtab_iter(gst, kmt_symtab_func, &arg); 1064 } 1065 1066 static int 1067 kmt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, uint_t type, 1068 mdb_tgt_sym_f *cb, void *data) 1069 { 1070 kmt_data_t *kmt = t->t_data; 1071 kmt_module_t *km; 1072 1073 mdb_gelf_symtab_t *symtab = NULL; 1074 mdb_var_t *v; 1075 1076 if (which == MDB_TGT_DYNSYM) 1077 return (set_errno(EMDB_TGTNOTSUP)); 1078 1079 switch ((uintptr_t)obj) { 1080 case (uintptr_t)MDB_TGT_OBJ_EXEC: 1081 case (uintptr_t)MDB_TGT_OBJ_EVERY: 1082 mdb_nv_rewind(&kmt->kmt_modules); 1083 while ((v = mdb_nv_advance(&kmt->kmt_modules)) != NULL) { 1084 km = mdb_nv_get_cookie(v); 1085 1086 if (km->km_symtab != NULL) { 1087 kmt_symtab_iter(km->km_symtab, type, 1088 km->km_name, cb, data); 1089 } 1090 } 1091 return (0); 1092 1093 case (uintptr_t)MDB_TGT_OBJ_RTLD: 1094 obj = kmt->kmt_rtld_name; 1095 /*FALLTHROUGH*/ 1096 1097 default: 1098 if (strncmp(obj, "DMOD`", 5) == 0) { 1099 return (kmdb_module_symbol_iter(obj + 5, type, 1100 cb, data)); 1101 } 1102 1103 if ((v = mdb_nv_lookup(&kmt->kmt_modules, obj)) == NULL) 1104 return (set_errno(EMDB_NOOBJ)); 1105 km = mdb_nv_get_cookie(v); 1106 1107 symtab = km->km_symtab; 1108 } 1109 1110 if (symtab != NULL) 1111 kmt_symtab_iter(symtab, type, obj, cb, data); 1112 1113 return (0); 1114 } 1115 1116 static int 1117 kmt_mapping_walk(uintptr_t addr, const void *data, kmt_maparg_t *marg) 1118 { 1119 /* 1120 * This is a bit sketchy but avoids problematic compilation of this 1121 * target against the current VM implementation. Now that we have 1122 * vmem, we can make this less broken and more informative by changing 1123 * this code to invoke the vmem walker in the near future. 1124 */ 1125 const struct kmt_seg { 1126 caddr_t s_base; 1127 size_t s_size; 1128 } *segp = (const struct kmt_seg *)data; 1129 1130 mdb_map_t map; 1131 GElf_Sym sym; 1132 mdb_syminfo_t info; 1133 1134 map.map_base = (uintptr_t)segp->s_base; 1135 map.map_size = segp->s_size; 1136 map.map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X; 1137 1138 if (kmt_lookup_by_addr(marg->map_target, addr, MDB_TGT_SYM_EXACT, 1139 map.map_name, MDB_TGT_MAPSZ, &sym, &info) == -1) { 1140 1141 (void) mdb_iob_snprintf(map.map_name, MDB_TGT_MAPSZ, 1142 "%lr", addr); 1143 } 1144 1145 return (marg->map_cb(marg->map_data, &map, map.map_name)); 1146 } 1147 1148 static int 1149 kmt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private) 1150 { 1151 kmt_maparg_t m; 1152 uintptr_t kas; 1153 1154 m.map_target = t; 1155 m.map_cb = func; 1156 m.map_data = private; 1157 1158 if ((kas = kmt_read_kas(t)) == 0) 1159 return (-1); /* errno is set for us */ 1160 1161 return (mdb_pwalk("seg", (mdb_walk_cb_t)kmt_mapping_walk, &m, kas)); 1162 } 1163 1164 static const mdb_map_t * 1165 kmt_mod_to_map(kmt_module_t *km, mdb_map_t *map) 1166 { 1167 (void) strncpy(map->map_name, km->km_name, MDB_TGT_MAPSZ); 1168 map->map_name[MDB_TGT_MAPSZ - 1] = '\0'; 1169 map->map_base = km->km_text_va; 1170 map->map_size = km->km_text_size; 1171 map->map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X; 1172 1173 return (map); 1174 } 1175 1176 static int 1177 kmt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private) 1178 { 1179 kmt_data_t *kmt = t->t_data; 1180 kmt_module_t *km; 1181 mdb_map_t m; 1182 1183 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1184 km = mdb_list_next(km)) { 1185 if (func(private, kmt_mod_to_map(km, &m), km->km_name) == -1) 1186 break; 1187 } 1188 1189 return (0); 1190 } 1191 1192 static const mdb_map_t * 1193 kmt_addr_to_map(mdb_tgt_t *t, uintptr_t addr) 1194 { 1195 kmt_data_t *kmt = t->t_data; 1196 kmt_module_t *km; 1197 1198 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1199 km = mdb_list_next(km)) { 1200 if (addr - km->km_text_va < km->km_text_size || 1201 addr - km->km_data_va < km->km_data_size || 1202 addr - km->km_bss_va < km->km_bss_size) 1203 return (kmt_mod_to_map(km, &kmt->kmt_map)); 1204 } 1205 1206 (void) set_errno(EMDB_NOMAP); 1207 return (NULL); 1208 } 1209 1210 static kmt_module_t * 1211 kmt_module_by_name(kmt_data_t *kmt, const char *name) 1212 { 1213 kmt_module_t *km; 1214 1215 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1216 km = mdb_list_next(km)) { 1217 if (strcmp(name, km->km_name) == 0) 1218 return (km); 1219 } 1220 1221 return (NULL); 1222 } 1223 1224 static const mdb_map_t * 1225 kmt_name_to_map(mdb_tgt_t *t, const char *name) 1226 { 1227 kmt_data_t *kmt = t->t_data; 1228 kmt_module_t *km; 1229 mdb_map_t m; 1230 1231 /* 1232 * If name is MDB_TGT_OBJ_EXEC, return the first module on the list, 1233 * which will be unix since we keep kmt_modlist in load order. 1234 */ 1235 if (name == MDB_TGT_OBJ_EXEC) { 1236 return (kmt_mod_to_map(mdb_list_next(&kmt->kmt_modlist), 1237 &m)); 1238 } 1239 1240 if (name == MDB_TGT_OBJ_RTLD) 1241 name = kmt->kmt_rtld_name; 1242 1243 if ((km = kmt_module_by_name(kmt, name)) != NULL) 1244 return (kmt_mod_to_map(km, &m)); 1245 1246 (void) set_errno(EMDB_NOOBJ); 1247 return (NULL); 1248 } 1249 1250 static ctf_file_t * 1251 kmt_load_ctfdata(mdb_tgt_t *t, kmt_module_t *km) 1252 { 1253 kmt_data_t *kmt = t->t_data; 1254 int err; 1255 1256 if (km->km_ctfp != NULL) 1257 return (km->km_ctfp); 1258 1259 if (km->km_ctf_va == NULL || km->km_symtab == NULL) { 1260 (void) set_errno(EMDB_NOCTF); 1261 return (NULL); 1262 } 1263 1264 if ((km->km_ctfp = mdb_ctf_bufopen(km->km_ctf_va, km->km_ctf_size, 1265 km->km_symtab_va, &km->km_symtab_hdr, km->km_strtab_va, 1266 &km->km_strtab_hdr, &err)) == NULL) { 1267 (void) set_errno(ctf_to_errno(err)); 1268 return (NULL); 1269 } 1270 1271 mdb_dprintf(MDB_DBG_KMOD, "loaded %lu bytes of CTF data for %s\n", 1272 (ulong_t)km->km_ctf_size, km->km_name); 1273 1274 if (ctf_parent_name(km->km_ctfp) != NULL) { 1275 mdb_var_t *v; 1276 1277 if ((v = mdb_nv_lookup(&kmt->kmt_modules, 1278 ctf_parent_name(km->km_ctfp))) != NULL) { 1279 kmt_module_t *pm = mdb_nv_get_cookie(v); 1280 1281 if (pm->km_ctfp == NULL) 1282 (void) kmt_load_ctfdata(t, pm); 1283 1284 if (pm->km_ctfp != NULL && ctf_import(km->km_ctfp, 1285 pm->km_ctfp) == CTF_ERR) { 1286 warn("failed to import parent types into " 1287 "%s: %s\n", km->km_name, 1288 ctf_errmsg(ctf_errno(km->km_ctfp))); 1289 } 1290 } else { 1291 warn("failed to load CTF data for %s - parent %s not " 1292 "loaded\n", km->km_name, 1293 ctf_parent_name(km->km_ctfp)); 1294 } 1295 } 1296 1297 return (km->km_ctfp); 1298 } 1299 1300 ctf_file_t * 1301 kmt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr) 1302 { 1303 kmt_data_t *kmt = t->t_data; 1304 kmt_module_t *km; 1305 1306 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1307 km = mdb_list_next(km)) { 1308 if (addr - km->km_text_va < km->km_text_size || 1309 addr - km->km_data_va < km->km_data_size || 1310 addr - km->km_bss_va < km->km_bss_size) 1311 return (kmt_load_ctfdata(t, km)); 1312 } 1313 1314 return (kmdb_module_addr_to_ctf(addr)); 1315 } 1316 1317 ctf_file_t * 1318 kmt_name_to_ctf(mdb_tgt_t *t, const char *name) 1319 { 1320 kmt_data_t *kt = t->t_data; 1321 kmt_module_t *km; 1322 1323 if (name == MDB_TGT_OBJ_EXEC) { 1324 name = KMT_CTFPARENT; 1325 } else if (name == MDB_TGT_OBJ_RTLD) { 1326 name = kt->kmt_rtld_name; 1327 } else if (strncmp(name, "DMOD`", 5) == 0) { 1328 /* Request for CTF data for a DMOD symbol */ 1329 return (kmdb_module_name_to_ctf(name + 5)); 1330 } 1331 1332 if ((km = kmt_module_by_name(kt, name)) != NULL) 1333 return (kmt_load_ctfdata(t, km)); 1334 1335 (void) set_errno(EMDB_NOOBJ); 1336 return (NULL); 1337 } 1338 1339 /*ARGSUSED*/ 1340 static int 1341 kmt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1342 { 1343 int state; 1344 1345 bzero(tsp, sizeof (mdb_tgt_status_t)); 1346 1347 switch ((state = kmdb_dpi_get_state(NULL))) { 1348 case DPI_STATE_INIT: 1349 tsp->st_state = MDB_TGT_RUNNING; 1350 tsp->st_pc = 0; 1351 break; 1352 1353 case DPI_STATE_STOPPED: 1354 tsp->st_state = MDB_TGT_STOPPED; 1355 1356 (void) kmdb_dpi_get_register("pc", &tsp->st_pc); 1357 break; 1358 1359 case DPI_STATE_FAULTED: 1360 tsp->st_state = MDB_TGT_STOPPED; 1361 1362 (void) kmdb_dpi_get_register("pc", &tsp->st_pc); 1363 1364 tsp->st_flags |= MDB_TGT_ISTOP; 1365 break; 1366 1367 case DPI_STATE_LOST: 1368 tsp->st_state = MDB_TGT_LOST; 1369 1370 (void) kmdb_dpi_get_register("pc", &tsp->st_pc); 1371 break; 1372 } 1373 1374 mdb_dprintf(MDB_DBG_KMOD, "kmt_status, dpi: %d tsp: %d, pc = %p %A\n", 1375 state, tsp->st_state, (void *)tsp->st_pc, tsp->st_pc); 1376 1377 return (0); 1378 } 1379 1380 /* 1381 * Invoked when kmt_defbp_enter_debugger is called, this routine activates and 1382 * deactivates deferred breakpoints in response to module load and unload 1383 * events. 1384 */ 1385 /*ARGSUSED*/ 1386 static void 1387 kmt_defbp_event(mdb_tgt_t *t, int vid, void *private) 1388 { 1389 if (kmt_defbp_modchg_isload) { 1390 if (!mdb_tgt_sespec_activate_all(t) && 1391 (mdb.m_flags & MDB_FL_BPTNOSYMSTOP)) { 1392 /* 1393 * We weren't able to activate the breakpoints. 1394 * If so requested, we'll return without calling 1395 * continue, thus throwing the user into the debugger. 1396 */ 1397 return; 1398 } 1399 1400 } else { 1401 mdb_sespec_t *sep, *nsep; 1402 const mdb_map_t *map, *bpmap; 1403 mdb_map_t modmap; 1404 1405 if ((map = kmt_addr_to_map(t, 1406 (uintptr_t)kmt_defbp_modchg_modctl->mod_text)) == NULL) { 1407 warn("module unload notification for unknown module %s", 1408 kmt_defbp_modchg_modctl->mod_modname); 1409 return; /* drop into the debugger */ 1410 } 1411 1412 bcopy(map, &modmap, sizeof (mdb_map_t)); 1413 1414 for (sep = mdb_list_next(&t->t_active); sep; sep = nsep) { 1415 nsep = mdb_list_next(sep); 1416 1417 if (sep->se_ops == &kmt_brkpt_ops) { 1418 kmt_brkpt_t *kb = sep->se_data; 1419 1420 if ((bpmap = kmt_addr_to_map(t, 1421 kb->kb_addr)) == NULL || 1422 (bpmap->map_base == modmap.map_base && 1423 bpmap->map_size == modmap.map_size)) { 1424 mdb_tgt_sespec_idle_one(t, sep, 1425 EMDB_NOMAP); 1426 } 1427 } 1428 } 1429 } 1430 1431 (void) mdb_tgt_continue(t, NULL); 1432 } 1433 1434 static void 1435 kmt_defbp_enter_debugger(void) 1436 { 1437 /* 1438 * The debugger places a breakpoint here. We can't have a simple 1439 * nop function here, because GCC knows much more than we do, and 1440 * will optimize away the call to it. 1441 */ 1442 (void) get_fp(); 1443 } 1444 1445 /* 1446 * This routine is called while the kernel is running. It attempts to determine 1447 * whether any deferred breakpoints exist for the module being changed (loaded 1448 * or unloaded). If any such breakpoints exist, the debugger will be entered to 1449 * process them. 1450 */ 1451 static void 1452 kmt_defbp_modchg(struct modctl *mctl, int isload) 1453 { 1454 kmt_defbp_t *dbp; 1455 1456 kmt_defbp_lock = 1; 1457 1458 for (dbp = mdb_list_next(&kmt_defbp_list); dbp; 1459 dbp = mdb_list_next(dbp)) { 1460 if (!dbp->dbp_ref) 1461 continue; 1462 1463 if (strcmp(mctl->mod_modname, dbp->dbp_objname) == 0) { 1464 /* 1465 * Activate the breakpoint 1466 */ 1467 kmt_defbp_modchg_isload = isload; 1468 kmt_defbp_modchg_modctl = mctl; 1469 1470 kmt_defbp_enter_debugger(); 1471 break; 1472 } 1473 } 1474 1475 kmt_defbp_lock = 0; 1476 } 1477 1478 /*ARGSUSED*/ 1479 static int 1480 kmt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1481 { 1482 int n; 1483 1484 kmdb_dpi_resume(); 1485 1486 /* 1487 * The order of the following two calls is important. If there are 1488 * load acks on the work queue, we'll initialize the dmods they 1489 * represent. This will involve a call to _mdb_init, which may very 1490 * well result in a symbol lookup. If we haven't resynced our view 1491 * of symbols with the current state of the world, this lookup could 1492 * end very badly. We therefore make sure to sync before processing 1493 * the work queue. 1494 */ 1495 kmt_sync(t); 1496 kmdb_dpi_process_work_queue(); 1497 1498 if (kmdb_kdi_get_unload_request()) 1499 t->t_flags |= MDB_TGT_F_UNLOAD; 1500 1501 (void) mdb_tgt_status(t, &t->t_status); 1502 1503 if ((n = kmt_dmod_status(NULL, KMDB_MC_STATE_LOADING) + 1504 kmt_dmod_status(NULL, KMDB_MC_STATE_UNLOADING)) != 0) { 1505 mdb_warn("%d dmod load%c/unload%c pending\n", n, 1506 "s"[n == 1], "s"[n == 1]); 1507 } 1508 1509 return (0); 1510 } 1511 1512 /*ARGSUSED*/ 1513 static int 1514 kmt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1515 { 1516 int rc; 1517 1518 if ((rc = kmdb_dpi_step()) == 0) 1519 (void) mdb_tgt_status(t, &t->t_status); 1520 1521 return (rc); 1522 } 1523 1524 static int 1525 kmt_defbp_activate(mdb_tgt_t *t) 1526 { 1527 kmdb_dpi_modchg_register(kmt_defbp_modchg); 1528 1529 /* 1530 * The routines that add and arm breakpoints will check for the proper 1531 * DTrace state, but they'll just put this breakpoint on the idle list 1532 * if DTrace is active. It'll correctly move to the active list when 1533 * DTrace deactivates, but that's insufficient for our purposes -- we 1534 * need to do extra processing at that point. We won't get to do said 1535 * processing with with a normal idle->active transition, so we just 1536 * won't add it add it until we're sure that it'll stick. 1537 */ 1538 1539 if (kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_DTRACE_ACTIVE) 1540 return (set_errno(EMDB_DTACTIVE)); 1541 1542 kmt_defbp_bpspec = mdb_tgt_add_vbrkpt(t, 1543 (uintptr_t)kmt_defbp_enter_debugger, 1544 MDB_TGT_SPEC_HIDDEN, kmt_defbp_event, NULL); 1545 1546 return (0); 1547 } 1548 1549 static void 1550 kmt_defbp_deactivate(mdb_tgt_t *t) 1551 { 1552 kmdb_dpi_modchg_cancel(); 1553 1554 if (kmt_defbp_bpspec != 0) { 1555 if (t != NULL) 1556 (void) mdb_tgt_vespec_delete(t, kmt_defbp_bpspec); 1557 1558 kmt_defbp_bpspec = 0; 1559 } 1560 } 1561 1562 static kmt_defbp_t * 1563 kmt_defbp_create(mdb_tgt_t *t, const char *objname, const char *symname) 1564 { 1565 kmt_defbp_t *dbp = mdb_alloc(sizeof (kmt_defbp_t), UM_SLEEP); 1566 1567 mdb_dprintf(MDB_DBG_KMOD, "defbp_create %s`%s\n", objname, symname); 1568 1569 dbp->dbp_objname = strdup(objname); 1570 dbp->dbp_symname = strdup(symname); 1571 dbp->dbp_ref = 1; 1572 1573 kmt_defbp_num++; 1574 1575 if (kmt_defbp_num == 1 || kmt_defbp_bpspec == 0) { 1576 if (kmt_defbp_activate(t) < 0) 1577 warn("failed to activate deferred breakpoints"); 1578 } 1579 1580 mdb_list_append(&kmt_defbp_list, dbp); 1581 1582 return (dbp); 1583 } 1584 1585 static void 1586 kmt_defbp_destroy(kmt_defbp_t *dbp) 1587 { 1588 mdb_dprintf(MDB_DBG_KMOD, "defbp_destroy %s`%s\n", dbp->dbp_objname, 1589 dbp->dbp_symname); 1590 1591 mdb_list_delete(&kmt_defbp_list, dbp); 1592 1593 strfree(dbp->dbp_objname); 1594 strfree(dbp->dbp_symname); 1595 mdb_free(dbp, sizeof (kmt_defbp_t)); 1596 } 1597 1598 static void 1599 kmt_defbp_prune_common(int all) 1600 { 1601 kmt_defbp_t *dbp, *ndbp; 1602 1603 /* We can't remove items from the list while the driver is using it. */ 1604 if (kmt_defbp_lock) 1605 return; 1606 1607 for (dbp = mdb_list_next(&kmt_defbp_list); dbp != NULL; dbp = ndbp) { 1608 ndbp = mdb_list_next(dbp); 1609 1610 if (!all && dbp->dbp_ref) 1611 continue; 1612 1613 kmt_defbp_destroy(dbp); 1614 } 1615 } 1616 1617 static void 1618 kmt_defbp_prune(void) 1619 { 1620 kmt_defbp_prune_common(0); 1621 } 1622 1623 static void 1624 kmt_defbp_destroy_all(void) 1625 { 1626 kmt_defbp_prune_common(1); 1627 } 1628 1629 static void 1630 kmt_defbp_delete(mdb_tgt_t *t, kmt_defbp_t *dbp) 1631 { 1632 dbp->dbp_ref = 0; 1633 1634 ASSERT(kmt_defbp_num > 0); 1635 kmt_defbp_num--; 1636 1637 if (kmt_defbp_num == 0) 1638 kmt_defbp_deactivate(t); 1639 1640 kmt_defbp_prune(); 1641 } 1642 1643 static int 1644 kmt_brkpt_ctor(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1645 { 1646 mdb_tgt_status_t tsp; 1647 kmt_bparg_t *ka = args; 1648 kmt_brkpt_t *kb; 1649 GElf_Sym s; 1650 mdb_instr_t instr; 1651 1652 (void) mdb_tgt_status(t, &tsp); 1653 if (tsp.st_state != MDB_TGT_RUNNING && tsp.st_state != MDB_TGT_STOPPED) 1654 return (set_errno(EMDB_NOPROC)); 1655 1656 if (ka->ka_symbol != NULL) { 1657 if (mdb_tgt_lookup_by_scope(t, ka->ka_symbol, &s, NULL) == -1) { 1658 if (errno != EMDB_NOOBJ && !(errno == EMDB_NOSYM && 1659 !(mdb.m_flags & MDB_FL_BPTNOSYMSTOP))) { 1660 warn("breakpoint %s activation failed", 1661 ka->ka_symbol); 1662 } 1663 return (-1); /* errno is set for us */ 1664 } 1665 1666 ka->ka_addr = (uintptr_t)s.st_value; 1667 } 1668 1669 #ifdef __sparc 1670 if (ka->ka_addr & 3) 1671 return (set_errno(EMDB_BPALIGN)); 1672 #endif 1673 1674 if (mdb_vread(&instr, sizeof (instr), ka->ka_addr) != sizeof (instr)) 1675 return (-1); /* errno is set for us */ 1676 1677 if (kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_DTRACE_ACTIVE) 1678 warn("breakpoint will not arm until DTrace is inactive\n"); 1679 1680 kb = mdb_zalloc(sizeof (kmt_brkpt_t), UM_SLEEP); 1681 kb->kb_addr = ka->ka_addr; 1682 sep->se_data = kb; 1683 1684 return (0); 1685 } 1686 1687 /*ARGSUSED*/ 1688 static void 1689 kmt_brkpt_dtor(mdb_tgt_t *t, mdb_sespec_t *sep) 1690 { 1691 mdb_free(sep->se_data, sizeof (kmt_brkpt_t)); 1692 } 1693 1694 /*ARGSUSED*/ 1695 static char * 1696 kmt_brkpt_info(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_vespec_t *vep, 1697 mdb_tgt_spec_desc_t *sp, char *buf, size_t nbytes) 1698 { 1699 uintptr_t addr = 0; 1700 1701 if (vep != NULL) { 1702 kmt_bparg_t *ka = vep->ve_args; 1703 1704 if (ka->ka_symbol != NULL) { 1705 (void) mdb_iob_snprintf(buf, nbytes, "stop at %s", 1706 ka->ka_symbol); 1707 } else { 1708 (void) mdb_iob_snprintf(buf, nbytes, "stop at %a", 1709 ka->ka_addr); 1710 addr = ka->ka_addr; 1711 } 1712 1713 } else { 1714 addr = ((kmt_brkpt_t *)sep->se_data)->kb_addr; 1715 (void) mdb_iob_snprintf(buf, nbytes, "stop at %a", addr); 1716 } 1717 1718 sp->spec_base = addr; 1719 sp->spec_size = sizeof (mdb_instr_t); 1720 1721 return (buf); 1722 } 1723 1724 static int 1725 kmt_brkpt_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1726 { 1727 kmt_brkpt_t *kb = sep->se_data; 1728 kmt_bparg_t *ka = args; 1729 GElf_Sym sym; 1730 1731 if (ka->ka_symbol != NULL) { 1732 return (mdb_tgt_lookup_by_scope(t, ka->ka_symbol, 1733 &sym, NULL) == 0 && sym.st_value == kb->kb_addr); 1734 } 1735 1736 return (ka->ka_addr == kb->kb_addr); 1737 } 1738 1739 /*ARGSUSED*/ 1740 static int 1741 kmt_brkpt_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args) 1742 { 1743 kmt_bparg_t *ka1 = vep->ve_args; 1744 kmt_bparg_t *ka2 = args; 1745 1746 if (ka1->ka_symbol != NULL && ka2->ka_symbol != NULL) 1747 return (strcmp(ka1->ka_symbol, ka2->ka_symbol) == 0); 1748 1749 if (ka1->ka_symbol == NULL && ka2->ka_symbol == NULL) 1750 return (ka1->ka_addr == ka2->ka_addr); 1751 1752 return (0); /* fail if one is symbolic, other is an explicit address */ 1753 } 1754 1755 static int 1756 kmt_brkpt_arm(mdb_tgt_t *t, mdb_sespec_t *sep) 1757 { 1758 kmt_data_t *kmt = t->t_data; 1759 kmt_brkpt_t *kb = sep->se_data; 1760 int rv; 1761 1762 if (kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_DTRACE_ACTIVE) 1763 return (set_errno(EMDB_DTACTIVE)); 1764 1765 if ((rv = kmdb_dpi_brkpt_arm(kb->kb_addr, &kb->kb_oinstr)) != 0) 1766 return (rv); 1767 1768 if (kmt->kmt_narmedbpts++ == 0) 1769 (void) kmdb_kdi_dtrace_set(KDI_DTSET_KMDB_BPT_ACTIVATE); 1770 1771 return (0); 1772 } 1773 1774 static int 1775 kmt_brkpt_disarm(mdb_tgt_t *t, mdb_sespec_t *sep) 1776 { 1777 kmt_data_t *kmt = t->t_data; 1778 kmt_brkpt_t *kb = sep->se_data; 1779 int rv; 1780 1781 ASSERT(kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_KMDB_BPT_ACTIVE); 1782 1783 if ((rv = kmdb_dpi_brkpt_disarm(kb->kb_addr, kb->kb_oinstr)) != 0) 1784 return (rv); 1785 1786 if (--kmt->kmt_narmedbpts == 0) 1787 (void) kmdb_kdi_dtrace_set(KDI_DTSET_KMDB_BPT_DEACTIVATE); 1788 1789 return (0); 1790 } 1791 1792 /* 1793 * Determine whether the specified sespec is an armed watchpoint that overlaps 1794 * with the given breakpoint and has the given flags set. We use this to find 1795 * conflicts with breakpoints, below. 1796 */ 1797 static int 1798 kmt_wp_overlap(mdb_sespec_t *sep, kmt_brkpt_t *kb, int flags) 1799 { 1800 const kmdb_wapt_t *wp = sep->se_data; 1801 1802 return (sep->se_state == MDB_TGT_SPEC_ARMED && 1803 sep->se_ops == &kmt_wapt_ops && (wp->wp_wflags & flags) && 1804 kb->kb_addr - wp->wp_addr < wp->wp_size); 1805 } 1806 1807 /* 1808 * We step over breakpoints using our single-stepper. If a conflicting 1809 * watchpoint is present, we must temporarily remove it before stepping over the 1810 * breakpoint so we don't immediately re-trigger the watchpoint. We know the 1811 * watchpoint has already triggered on our trap instruction as part of fetching 1812 * it. Before we return, we must re-install any disabled watchpoints. 1813 */ 1814 static int 1815 kmt_brkpt_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 1816 { 1817 kmt_brkpt_t *kb = sep->se_data; 1818 int status = -1; 1819 int error; 1820 1821 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 1822 if (kmt_wp_overlap(sep, kb, MDB_TGT_WA_X)) 1823 (void) kmdb_dpi_wapt_disarm(sep->se_data); 1824 } 1825 1826 if (kmdb_dpi_brkpt_disarm(kb->kb_addr, kb->kb_oinstr) == 0 && 1827 kmt_step(t, tsp) == 0) 1828 status = kmt_status(t, tsp); 1829 1830 error = errno; /* save errno from disarm, step, or status */ 1831 1832 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 1833 if (kmt_wp_overlap(sep, kb, MDB_TGT_WA_X)) 1834 kmdb_dpi_wapt_arm(sep->se_data); 1835 } 1836 1837 (void) set_errno(error); 1838 return (status); 1839 } 1840 1841 /*ARGSUSED*/ 1842 static int 1843 kmt_brkpt_match(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 1844 { 1845 kmt_brkpt_t *kb = sep->se_data; 1846 int state, why; 1847 kreg_t pc; 1848 1849 state = kmdb_dpi_get_state(&why); 1850 (void) kmdb_dpi_get_register("pc", &pc); 1851 1852 return (state == DPI_STATE_FAULTED && why == DPI_STATE_WHY_BKPT && 1853 pc == kb->kb_addr); 1854 } 1855 1856 static const mdb_se_ops_t kmt_brkpt_ops = { 1857 kmt_brkpt_ctor, /* se_ctor */ 1858 kmt_brkpt_dtor, /* se_dtor */ 1859 kmt_brkpt_info, /* se_info */ 1860 kmt_brkpt_secmp, /* se_secmp */ 1861 kmt_brkpt_vecmp, /* se_vecmp */ 1862 kmt_brkpt_arm, /* se_arm */ 1863 kmt_brkpt_disarm, /* se_disarm */ 1864 kmt_brkpt_cont, /* se_cont */ 1865 kmt_brkpt_match /* se_match */ 1866 }; 1867 1868 static int 1869 kmt_wapt_ctor(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1870 { 1871 mdb_tgt_status_t tsp; 1872 kmdb_wapt_t *vwp = args; 1873 kmdb_wapt_t *swp; 1874 1875 (void) mdb_tgt_status(t, &tsp); 1876 if (tsp.st_state != MDB_TGT_RUNNING && tsp.st_state != MDB_TGT_STOPPED) 1877 return (set_errno(EMDB_NOPROC)); 1878 1879 swp = mdb_alloc(sizeof (kmdb_wapt_t), UM_SLEEP); 1880 bcopy(vwp, swp, sizeof (kmdb_wapt_t)); 1881 1882 if (kmdb_dpi_wapt_reserve(swp) < 0) { 1883 mdb_free(swp, sizeof (kmdb_wapt_t)); 1884 return (-1); /* errno is set for us */ 1885 } 1886 1887 sep->se_data = swp; 1888 1889 return (0); 1890 } 1891 1892 /*ARGSUSED*/ 1893 static void 1894 kmt_wapt_dtor(mdb_tgt_t *t, mdb_sespec_t *sep) 1895 { 1896 kmdb_wapt_t *wp = sep->se_data; 1897 1898 kmdb_dpi_wapt_release(wp); 1899 mdb_free(wp, sizeof (kmdb_wapt_t)); 1900 } 1901 1902 /*ARGSUSED*/ 1903 static char * 1904 kmt_wapt_info(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_vespec_t *vep, 1905 mdb_tgt_spec_desc_t *sp, char *buf, size_t nbytes) 1906 { 1907 kmdb_wapt_t *wp = vep != NULL ? vep->ve_args : sep->se_data; 1908 const char *fmt; 1909 char desc[24]; 1910 1911 ASSERT(wp->wp_wflags != 0); 1912 desc[0] = '\0'; 1913 1914 switch (wp->wp_wflags) { 1915 case MDB_TGT_WA_R: 1916 (void) strcat(desc, "/read"); 1917 break; 1918 case MDB_TGT_WA_W: 1919 (void) strcat(desc, "/write"); 1920 break; 1921 case MDB_TGT_WA_X: 1922 (void) strcat(desc, "/exec"); 1923 break; 1924 default: 1925 if (wp->wp_wflags & MDB_TGT_WA_R) 1926 (void) strcat(desc, "/r"); 1927 if (wp->wp_wflags & MDB_TGT_WA_W) 1928 (void) strcat(desc, "/w"); 1929 if (wp->wp_wflags & MDB_TGT_WA_X) 1930 (void) strcat(desc, "/x"); 1931 } 1932 1933 switch (wp->wp_type) { 1934 case DPI_WAPT_TYPE_PHYS: 1935 fmt = "stop on %s of phys [%p, %p)"; 1936 break; 1937 1938 case DPI_WAPT_TYPE_VIRT: 1939 fmt = "stop on %s of [%la, %la)"; 1940 break; 1941 1942 case DPI_WAPT_TYPE_IO: 1943 if (wp->wp_size == 1) 1944 fmt = "stop on %s of I/O port %p"; 1945 else 1946 fmt = "stop on %s of I/O port [%p, %p)"; 1947 break; 1948 } 1949 1950 (void) mdb_iob_snprintf(buf, nbytes, fmt, desc + 1, wp->wp_addr, 1951 wp->wp_addr + wp->wp_size); 1952 1953 sp->spec_base = wp->wp_addr; 1954 sp->spec_size = wp->wp_size; 1955 1956 return (buf); 1957 } 1958 1959 /*ARGSUSED*/ 1960 static int 1961 kmt_wapt_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1962 { 1963 kmdb_wapt_t *wp1 = sep->se_data; 1964 kmdb_wapt_t *wp2 = args; 1965 1966 return (wp1->wp_addr == wp2->wp_addr && wp1->wp_size == wp2->wp_size && 1967 wp1->wp_wflags == wp2->wp_wflags); 1968 } 1969 1970 /*ARGSUSED*/ 1971 static int 1972 kmt_wapt_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args) 1973 { 1974 kmdb_wapt_t *wp1 = vep->ve_args; 1975 kmdb_wapt_t *wp2 = args; 1976 1977 return (wp1->wp_addr == wp2->wp_addr && wp1->wp_size == wp2->wp_size && 1978 wp1->wp_wflags == wp2->wp_wflags); 1979 } 1980 1981 /*ARGSUSED*/ 1982 static int 1983 kmt_wapt_arm(mdb_tgt_t *t, mdb_sespec_t *sep) 1984 { 1985 kmdb_dpi_wapt_arm(sep->se_data); 1986 1987 return (0); 1988 } 1989 1990 /*ARGSUSED*/ 1991 static int 1992 kmt_wapt_disarm(mdb_tgt_t *t, mdb_sespec_t *sep) 1993 { 1994 kmdb_dpi_wapt_disarm(sep->se_data); 1995 1996 return (0); 1997 } 1998 1999 /* 2000 * Determine whether the specified sespec is an armed breakpoint at the given 2001 * %pc. We use this to find conflicts with watchpoints below. 2002 */ 2003 static int 2004 kmt_bp_overlap(mdb_sespec_t *sep, uintptr_t pc) 2005 { 2006 kmt_brkpt_t *kb = sep->se_data; 2007 2008 return (sep->se_state == MDB_TGT_SPEC_ARMED && 2009 sep->se_ops == &kmt_brkpt_ops && kb->kb_addr == pc); 2010 } 2011 2012 /* 2013 * We step over watchpoints using our single-stepper. If a conflicting 2014 * breakpoint is present, we must temporarily disarm it before stepping over 2015 * the watchpoint so we do not immediately re-trigger the breakpoint. This is 2016 * similar to the case handled in kmt_brkpt_cont(), above. 2017 */ 2018 static int 2019 kmt_wapt_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 2020 { 2021 mdb_sespec_t *bep = NULL; 2022 int status = -1; 2023 int error, why; 2024 2025 /* 2026 * If we stopped for anything other than a watchpoint, check to see 2027 * if there's a breakpoint here. 2028 */ 2029 if (!(kmdb_dpi_get_state(&why) == DPI_STATE_FAULTED && 2030 (why == DPI_STATE_WHY_V_WAPT || why == DPI_STATE_WHY_P_WAPT))) { 2031 kreg_t pc; 2032 2033 (void) kmdb_dpi_get_register("pc", &pc); 2034 2035 for (bep = mdb_list_next(&t->t_active); bep != NULL; 2036 bep = mdb_list_next(bep)) { 2037 if (kmt_bp_overlap(bep, pc)) { 2038 (void) bep->se_ops->se_disarm(t, bep); 2039 bep->se_state = MDB_TGT_SPEC_ACTIVE; 2040 break; 2041 } 2042 } 2043 } 2044 2045 kmdb_dpi_wapt_disarm(sep->se_data); 2046 if (kmt_step(t, tsp) == 0) 2047 status = kmt_status(t, tsp); 2048 2049 error = errno; /* save errno from step or status */ 2050 2051 if (bep != NULL) 2052 mdb_tgt_sespec_arm_one(t, bep); 2053 2054 (void) set_errno(error); 2055 return (status); 2056 } 2057 2058 /*ARGSUSED*/ 2059 static int 2060 kmt_wapt_match(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 2061 { 2062 return (kmdb_dpi_wapt_match(sep->se_data)); 2063 } 2064 2065 static const mdb_se_ops_t kmt_wapt_ops = { 2066 kmt_wapt_ctor, /* se_ctor */ 2067 kmt_wapt_dtor, /* se_dtor */ 2068 kmt_wapt_info, /* se_info */ 2069 kmt_wapt_secmp, /* se_secmp */ 2070 kmt_wapt_vecmp, /* se_vecmp */ 2071 kmt_wapt_arm, /* se_arm */ 2072 kmt_wapt_disarm, /* se_disarm */ 2073 kmt_wapt_cont, /* se_cont */ 2074 kmt_wapt_match /* se_match */ 2075 }; 2076 2077 /*ARGSUSED*/ 2078 static int 2079 kmt_trap_ctor(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 2080 { 2081 sep->se_data = args; /* trap number */ 2082 2083 return (0); 2084 } 2085 2086 /*ARGSUSED*/ 2087 static char * 2088 kmt_trap_info(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_vespec_t *vep, 2089 mdb_tgt_spec_desc_t *sp, char *buf, size_t nbytes) 2090 { 2091 const char *name; 2092 int trapnum; 2093 2094 if (vep != NULL) 2095 trapnum = (intptr_t)vep->ve_args; 2096 else 2097 trapnum = (intptr_t)sep->se_data; 2098 2099 if (trapnum == KMT_TRAP_ALL) 2100 name = "any trap"; 2101 else if (trapnum == KMT_TRAP_NOTENUM) 2102 name = "miscellaneous trap"; 2103 else 2104 name = kmt_trapname(trapnum); 2105 2106 (void) mdb_iob_snprintf(buf, nbytes, "single-step stop on %s", name); 2107 2108 return (buf); 2109 } 2110 2111 /*ARGSUSED2*/ 2112 static int 2113 kmt_trap_match(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 2114 { 2115 int spectt = (intptr_t)sep->se_data; 2116 kmt_data_t *kmt = t->t_data; 2117 kreg_t tt; 2118 2119 (void) kmdb_dpi_get_register("tt", &tt); 2120 2121 switch (spectt) { 2122 case KMT_TRAP_ALL: 2123 return (1); 2124 case KMT_TRAP_NOTENUM: 2125 return (tt > kmt->kmt_trapmax || 2126 !BT_TEST(kmt->kmt_trapmap, tt)); 2127 default: 2128 return (tt == spectt); 2129 } 2130 } 2131 2132 static const mdb_se_ops_t kmt_trap_ops = { 2133 kmt_trap_ctor, /* se_ctor */ 2134 no_se_dtor, /* se_dtor */ 2135 kmt_trap_info, /* se_info */ 2136 no_se_secmp, /* se_secmp */ 2137 no_se_vecmp, /* se_vecmp */ 2138 no_se_arm, /* se_arm */ 2139 no_se_disarm, /* se_disarm */ 2140 no_se_cont, /* se_cont */ 2141 kmt_trap_match /* se_match */ 2142 }; 2143 2144 static void 2145 kmt_bparg_dtor(mdb_vespec_t *vep) 2146 { 2147 kmt_bparg_t *ka = vep->ve_args; 2148 2149 if (ka->ka_symbol != NULL) 2150 strfree(ka->ka_symbol); 2151 2152 if (ka->ka_defbp != NULL) 2153 kmt_defbp_delete(mdb.m_target, ka->ka_defbp); 2154 2155 mdb_free(ka, sizeof (kmt_bparg_t)); 2156 } 2157 2158 static int 2159 kmt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr, 2160 int spec_flags, mdb_tgt_se_f *func, void *data) 2161 { 2162 kmt_bparg_t *ka = mdb_alloc(sizeof (kmt_bparg_t), UM_SLEEP); 2163 2164 ka->ka_addr = addr; 2165 ka->ka_symbol = NULL; 2166 ka->ka_defbp = NULL; 2167 2168 return (mdb_tgt_vespec_insert(t, &kmt_brkpt_ops, spec_flags, 2169 func, data, ka, kmt_bparg_dtor)); 2170 } 2171 2172 static int 2173 kmt_add_sbrkpt(mdb_tgt_t *t, const char *fullname, 2174 int spec_flags, mdb_tgt_se_f *func, void *data) 2175 { 2176 kmt_bparg_t *ka; 2177 kmt_defbp_t *dbp; 2178 GElf_Sym sym; 2179 char *tick, *objname, *symname; 2180 int serrno; 2181 2182 if ((tick = strchr(fullname, '`')) == fullname) { 2183 (void) set_errno(EMDB_NOOBJ); 2184 return (0); 2185 } 2186 2187 /* 2188 * Deferred breakpoints are always scoped. If we didn't find a tick, 2189 * there's no scope. We'll create a vbrkpt, but only if we can turn the 2190 * provided string into an address. 2191 */ 2192 if (tick == NULL) { 2193 uintptr_t addr; 2194 2195 if (strisbasenum(fullname)) { 2196 addr = mdb_strtoull(fullname); /* a bare address */ 2197 } else if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EVERY, 2198 fullname, &sym, NULL) < 0) { 2199 (void) set_errno(EMDB_NOSYM); 2200 return (0); 2201 } else { 2202 addr = (uintptr_t)sym.st_value; /* unscoped sym name */ 2203 } 2204 2205 return (kmt_add_vbrkpt(t, addr, spec_flags, func, data)); 2206 } 2207 2208 if (*(tick + 1) == '\0') { 2209 (void) set_errno(EMDB_NOSYM); 2210 return (0); 2211 } 2212 2213 objname = strndup(fullname, tick - fullname); 2214 symname = tick + 1; 2215 2216 if (mdb_tgt_lookup_by_name(t, objname, symname, NULL, NULL) < 0 && 2217 errno != EMDB_NOOBJ) { 2218 serrno = errno; 2219 strfree(objname); 2220 2221 (void) set_errno(serrno); 2222 return (0); /* errno is set for us */ 2223 } 2224 2225 dbp = kmt_defbp_create(t, objname, symname); 2226 strfree(objname); 2227 2228 ka = mdb_alloc(sizeof (kmt_bparg_t), UM_SLEEP); 2229 ka->ka_symbol = strdup(fullname); 2230 ka->ka_addr = 0; 2231 ka->ka_defbp = dbp; 2232 2233 return (mdb_tgt_vespec_insert(t, &kmt_brkpt_ops, spec_flags, 2234 func, data, ka, kmt_bparg_dtor)); 2235 } 2236 2237 static int 2238 kmt_wparg_overlap(const kmdb_wapt_t *wp1, const kmdb_wapt_t *wp2) 2239 { 2240 /* Assume the watchpoint spaces don't overlap */ 2241 if (wp1->wp_type != wp2->wp_type) 2242 return (0); 2243 2244 if (wp2->wp_addr + wp2->wp_size <= wp1->wp_addr) 2245 return (0); /* no range overlap */ 2246 2247 if (wp1->wp_addr + wp1->wp_size <= wp2->wp_addr) 2248 return (0); /* no range overlap */ 2249 2250 return (wp1->wp_addr != wp2->wp_addr || wp1->wp_size != wp2->wp_size || 2251 wp1->wp_wflags != wp2->wp_wflags); 2252 } 2253 2254 static void 2255 kmt_wparg_dtor(mdb_vespec_t *vep) 2256 { 2257 mdb_free(vep->ve_args, sizeof (kmdb_wapt_t)); 2258 } 2259 2260 static int 2261 kmt_add_wapt_common(mdb_tgt_t *t, uintptr_t addr, size_t len, uint_t wflags, 2262 int spec_flags, mdb_tgt_se_f *func, void *data, int type) 2263 { 2264 kmdb_wapt_t *wp = mdb_alloc(sizeof (kmdb_wapt_t), UM_SLEEP); 2265 mdb_sespec_t *sep; 2266 2267 wp->wp_addr = addr; 2268 wp->wp_size = len; 2269 wp->wp_type = type; 2270 wp->wp_wflags = wflags; 2271 2272 if (kmdb_dpi_wapt_validate(wp) < 0) 2273 return (0); /* errno is set for us */ 2274 2275 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 2276 if (sep->se_ops == &kmt_wapt_ops && 2277 mdb_list_next(&sep->se_velist) != NULL && 2278 kmt_wparg_overlap(wp, sep->se_data)) 2279 goto wapt_dup; 2280 } 2281 2282 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) { 2283 if (sep->se_ops == &kmt_wapt_ops && kmt_wparg_overlap(wp, 2284 ((mdb_vespec_t *)mdb_list_next(&sep->se_velist))->ve_args)) 2285 goto wapt_dup; 2286 } 2287 2288 return (mdb_tgt_vespec_insert(t, &kmt_wapt_ops, spec_flags, 2289 func, data, wp, kmt_wparg_dtor)); 2290 2291 wapt_dup: 2292 mdb_free(wp, sizeof (kmdb_wapt_t)); 2293 (void) set_errno(EMDB_WPDUP); 2294 return (0); 2295 } 2296 2297 static int 2298 kmt_add_pwapt(mdb_tgt_t *t, physaddr_t addr, size_t len, uint_t wflags, 2299 int spec_flags, mdb_tgt_se_f *func, void *data) 2300 { 2301 return (kmt_add_wapt_common(t, (uintptr_t)addr, len, wflags, spec_flags, 2302 func, data, DPI_WAPT_TYPE_PHYS)); 2303 } 2304 2305 static int 2306 kmt_add_vwapt(mdb_tgt_t *t, uintptr_t addr, size_t len, uint_t wflags, 2307 int spec_flags, mdb_tgt_se_f *func, void *data) 2308 { 2309 return (kmt_add_wapt_common(t, addr, len, wflags, spec_flags, func, 2310 data, DPI_WAPT_TYPE_VIRT)); 2311 } 2312 2313 static int 2314 kmt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t len, uint_t wflags, 2315 int spec_flags, mdb_tgt_se_f *func, void *data) 2316 { 2317 return (kmt_add_wapt_common(t, addr, len, wflags, spec_flags, func, 2318 data, DPI_WAPT_TYPE_IO)); 2319 } 2320 2321 static int 2322 kmt_add_trap(mdb_tgt_t *t, int trapnum, int spec_flags, mdb_tgt_se_f *func, 2323 void *data) 2324 { 2325 kmt_data_t *kmt = t->t_data; 2326 2327 if (trapnum != KMT_TRAP_ALL && trapnum != KMT_TRAP_NOTENUM) { 2328 if (trapnum < 0 || trapnum > kmt->kmt_trapmax) { 2329 (void) set_errno(EMDB_BADFLTNUM); 2330 return (0); 2331 } 2332 2333 BT_SET(kmt->kmt_trapmap, trapnum); 2334 } 2335 2336 return (mdb_tgt_vespec_insert(t, &kmt_trap_ops, spec_flags, func, data, 2337 (void *)(uintptr_t)trapnum, no_ve_dtor)); 2338 } 2339 2340 /*ARGSUSED*/ 2341 static uintmax_t 2342 kmt_cpuid_disc_get(const mdb_var_t *v) 2343 { 2344 return (kmdb_dpi_get_master_cpuid()); 2345 } 2346 2347 static const mdb_nv_disc_t kmt_cpuid_disc = { 2348 NULL, 2349 kmt_cpuid_disc_get 2350 }; 2351 2352 /* 2353 * This routine executes while the kernel is running. 2354 */ 2355 void 2356 kmt_activate(mdb_tgt_t *t) 2357 { 2358 kmt_data_t *kmt = t->t_data; 2359 2360 mdb_prop_postmortem = FALSE; 2361 mdb_prop_kernel = TRUE; 2362 2363 (void) mdb_tgt_register_dcmds(t, &kmt_dcmds[0], MDB_MOD_FORCE); 2364 mdb_tgt_register_regvars(t, kmt->kmt_rds, &kmt_reg_disc, 0); 2365 2366 /* 2367 * Force load of the MDB krtld module, in case it's been rolled into 2368 * unix. 2369 */ 2370 (void) mdb_module_load(KMT_RTLD_NAME, MDB_MOD_SILENT | MDB_MOD_DEFER); 2371 } 2372 2373 static void 2374 kmt_destroy(mdb_tgt_t *t) 2375 { 2376 kmt_data_t *kmt = t->t_data; 2377 kmt_module_t *km, *pkm; 2378 2379 mdb_nv_destroy(&kmt->kmt_modules); 2380 for (km = mdb_list_prev(&kmt->kmt_modlist); km != NULL; km = pkm) { 2381 pkm = mdb_list_prev(km); 2382 mdb_free(km, sizeof (kmt_module_t)); 2383 } 2384 2385 if (!kmt_defbp_lock) 2386 kmt_defbp_destroy_all(); 2387 2388 if (kmt->kmt_trapmap != NULL) 2389 mdb_free(kmt->kmt_trapmap, BT_SIZEOFMAP(kmt->kmt_trapmax)); 2390 2391 mdb_free(kmt, sizeof (kmt_data_t)); 2392 } 2393 2394 static const mdb_tgt_ops_t kmt_ops = { 2395 kmt_setflags, /* t_setflags */ 2396 (int (*)()) mdb_tgt_notsup, /* t_setcontext */ 2397 kmt_activate, /* t_activate */ 2398 (void (*)()) mdb_tgt_nop, /* t_deactivate */ 2399 kmt_periodic, /* t_periodic */ 2400 kmt_destroy, /* t_destroy */ 2401 kmt_name, /* t_name */ 2402 (const char *(*)()) mdb_conf_isa, /* t_isa */ 2403 kmt_platform, /* t_platform */ 2404 kmt_uname, /* t_uname */ 2405 kmt_dmodel, /* t_dmodel */ 2406 (ssize_t (*)()) mdb_tgt_notsup, /* t_aread */ 2407 (ssize_t (*)()) mdb_tgt_notsup, /* t_awrite */ 2408 kmt_read, /* t_vread */ 2409 kmt_write, /* t_vwrite */ 2410 kmt_pread, /* t_pread */ 2411 kmt_pwrite, /* t_pwrite */ 2412 kmt_read, /* t_fread */ 2413 kmt_write, /* t_fwrite */ 2414 kmt_ioread, /* t_ioread */ 2415 kmt_iowrite, /* t_iowrite */ 2416 kmt_vtop, /* t_vtop */ 2417 kmt_lookup_by_name, /* t_lookup_by_name */ 2418 kmt_lookup_by_addr, /* t_lookup_by_addr */ 2419 kmt_symbol_iter, /* t_symbol_iter */ 2420 kmt_mapping_iter, /* t_mapping_iter */ 2421 kmt_object_iter, /* t_object_iter */ 2422 kmt_addr_to_map, /* t_addr_to_map */ 2423 kmt_name_to_map, /* t_name_to_map */ 2424 kmt_addr_to_ctf, /* t_addr_to_ctf */ 2425 kmt_name_to_ctf, /* t_name_to_ctf */ 2426 kmt_status, /* t_status */ 2427 (int (*)()) mdb_tgt_notsup, /* t_run */ 2428 kmt_step, /* t_step */ 2429 kmt_step_out, /* t_step_out */ 2430 kmt_next, /* t_next */ 2431 kmt_continue, /* t_cont */ 2432 (int (*)()) mdb_tgt_notsup, /* t_signal */ 2433 kmt_add_vbrkpt, /* t_add_vbrkpt */ 2434 kmt_add_sbrkpt, /* t_add_sbrkpt */ 2435 kmt_add_pwapt, /* t_add_pwapt */ 2436 kmt_add_vwapt, /* t_add_vwapt */ 2437 kmt_add_iowapt, /* t_add_iowapt */ 2438 (int (*)()) mdb_tgt_null, /* t_add_sysenter */ 2439 (int (*)()) mdb_tgt_null, /* t_add_sysexit */ 2440 (int (*)()) mdb_tgt_null, /* t_add_signal */ 2441 kmt_add_trap, /* t_add_fault */ 2442 kmt_getareg, /* t_getareg */ 2443 kmt_putareg, /* t_putareg */ 2444 (int (*)()) mdb_tgt_nop, /* XXX t_stack_iter */ 2445 (int (*)()) mdb_tgt_notsup /* t_auxv */ 2446 }; 2447 2448 /* 2449 * Called immediately upon resumption of the system after a step or continue. 2450 * Allows us to synchronize kmt's view of the world with reality. 2451 */ 2452 /*ARGSUSED*/ 2453 static void 2454 kmt_sync(mdb_tgt_t *t) 2455 { 2456 kmt_data_t *kmt = t->t_data; 2457 int symavail; 2458 2459 mdb_dprintf(MDB_DBG_KMOD, "synchronizing with kernel\n"); 2460 2461 symavail = kmt->kmt_symavail; 2462 kmt->kmt_symavail = FALSE; 2463 2464 /* 2465 * Resync our view of the world if the modules have changed, or if we 2466 * didn't have any symbols coming into this function. The latter will 2467 * only happen on startup. 2468 */ 2469 if (kmdb_kdi_mods_changed() || !symavail) 2470 kmt_modlist_update(t); 2471 2472 /* 2473 * It would be nice if we could run this less frequently, perhaps 2474 * after a dvec-initiated trigger. 2475 */ 2476 kmdb_module_sync(); 2477 2478 kmt->kmt_symavail = TRUE; 2479 2480 mdb_dprintf(MDB_DBG_KMOD, "synchronization complete\n"); 2481 2482 kmt_defbp_prune(); 2483 2484 if (kmt_defbp_num > 0 && kmt_defbp_bpspec == 0 && 2485 kmdb_kdi_dtrace_get_state() != KDI_DTSTATE_DTRACE_ACTIVE) { 2486 /* 2487 * Deferred breakpoints were created while DTrace was active, 2488 * and consequently the deferred breakpoint enabling mechanism 2489 * wasn't activated. Activate it now, and then try to activate 2490 * the deferred breakpoints. We do this so that we can catch 2491 * the ones which may apply to modules that have been loaded 2492 * while they were waiting for DTrace to deactivate. 2493 */ 2494 (void) kmt_defbp_activate(t); 2495 (void) mdb_tgt_sespec_activate_all(t); 2496 } 2497 2498 (void) mdb_tgt_status(t, &t->t_status); 2499 } 2500 2501 /* 2502 * This routine executes while the kernel is running. 2503 */ 2504 /*ARGSUSED*/ 2505 int 2506 kmdb_kvm_create(mdb_tgt_t *t, int argc, const char *argv[]) 2507 { 2508 kmt_data_t *kmt; 2509 2510 if (argc != 0) 2511 return (set_errno(EINVAL)); 2512 2513 kmt = mdb_zalloc(sizeof (kmt_data_t), UM_SLEEP); 2514 t->t_data = kmt; 2515 t->t_ops = &kmt_ops; 2516 t->t_flags |= MDB_TGT_F_RDWR; /* kmdb is always r/w */ 2517 2518 (void) mdb_nv_insert(&mdb.m_nv, "cpuid", &kmt_cpuid_disc, 0, 2519 MDB_NV_PERSIST | MDB_NV_RDONLY); 2520 2521 (void) mdb_nv_create(&kmt->kmt_modules, UM_SLEEP); 2522 2523 kmt_init_isadep(t); 2524 2525 kmt->kmt_symavail = FALSE; 2526 2527 bzero(&kmt_defbp_list, sizeof (mdb_list_t)); 2528 2529 return (0); 2530 2531 create_err: 2532 kmt_destroy(t); 2533 2534 return (-1); 2535 } 2536 2537 /* 2538 * This routine is called once, when kmdb first has control of the world. 2539 */ 2540 void 2541 kmdb_kvm_startup(void) 2542 { 2543 kmt_data_t *kmt = mdb.m_target->t_data; 2544 2545 mdb_dprintf(MDB_DBG_KMOD, "kmdb_kvm startup\n"); 2546 2547 kmt_sync(mdb.m_target); 2548 (void) mdb_module_load_builtin(KMT_MODULE); 2549 kmt_startup_isadep(mdb.m_target); 2550 2551 /* 2552 * This is here because we need to write the deferred breakpoint 2553 * breakpoint when the debugger starts. Our normal r/o write routines 2554 * don't work when the kernel is running, so we have to do it during 2555 * startup. 2556 */ 2557 (void) mdb_tgt_sespec_activate_all(mdb.m_target); 2558 2559 kmt->kmt_rtld_name = KMT_RTLD_NAME; 2560 2561 if (kmt_module_by_name(kmt, KMT_RTLD_NAME) == NULL) 2562 kmt->kmt_rtld_name = "unix"; 2563 } 2564 2565 /* 2566 * This routine is called after kmdb has loaded its initial set of modules. 2567 */ 2568 void 2569 kmdb_kvm_poststartup(void) 2570 { 2571 mdb_dprintf(MDB_DBG_KMOD, "kmdb_kvm post-startup\n"); 2572 2573 (void) mdb_dis_select(kmt_def_dismode()); 2574 }