1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Copyright 2019 Joyent, Inc. 27 */ 28 29 /* 30 * Libkvm Kernel Target 31 * 32 * The libkvm kernel target provides access to both crash dumps and live 33 * kernels through /dev/ksyms and /dev/kmem, using the facilities provided by 34 * the libkvm.so library. The target-specific data structures are shared 35 * between this file (common code) and the ISA-dependent parts of the target, 36 * and so they are defined in the mdb_kvm.h header. The target processes an 37 * "executable" (/dev/ksyms or the unix.X file) which contains a primary 38 * .symtab and .dynsym, and then also iterates over the krtld module chain in 39 * the kernel in order to obtain a list of loaded modules and per-module symbol 40 * tables. To improve startup performance, the per-module symbol tables are 41 * instantiated on-the-fly whenever an address lookup falls within the text 42 * section of a given module. The target also relies on services from the 43 * mdb_ks (kernel support) module, which contains pieces of the implementation 44 * that must be compiled against the kernel implementation. 45 */ 46 47 #include <sys/modctl.h> 48 #include <sys/kobj.h> 49 #include <sys/kobj_impl.h> 50 #include <sys/utsname.h> 51 #include <sys/panic.h> 52 #include <sys/dumphdr.h> 53 #include <sys/dumpadm.h> 54 #include <sys/uuid.h> 55 56 #include <dlfcn.h> 57 #include <libctf.h> 58 #include <string.h> 59 #include <fcntl.h> 60 #include <errno.h> 61 62 #include <mdb/mdb_target_impl.h> 63 #include <mdb/mdb_err.h> 64 #include <mdb/mdb_debug.h> 65 #include <mdb/mdb_string.h> 66 #include <mdb/mdb_modapi.h> 67 #include <mdb/mdb_io_impl.h> 68 #include <mdb/mdb_ctf.h> 69 #include <mdb/mdb_kvm.h> 70 #include <mdb/mdb_module.h> 71 #include <mdb/mdb_kb.h> 72 #include <mdb/mdb_ks.h> 73 #include <mdb/mdb.h> 74 75 #define KT_RELOC_BUF(buf, obase, nbase) \ 76 ((uintptr_t)(buf) - (uintptr_t)(obase) + (uintptr_t)(nbase)) 77 78 #define KT_BAD_BUF(buf, base, size) \ 79 ((uintptr_t)(buf) < (uintptr_t)(base) || \ 80 ((uintptr_t)(buf) >= (uintptr_t)(base) + (uintptr_t)(size))) 81 82 typedef struct kt_symarg { 83 mdb_tgt_sym_f *sym_cb; /* Caller's callback function */ 84 void *sym_data; /* Callback function argument */ 85 uint_t sym_type; /* Symbol type/binding filter */ 86 mdb_syminfo_t sym_info; /* Symbol id and table id */ 87 const char *sym_obj; /* Containing object */ 88 } kt_symarg_t; 89 90 typedef struct kt_maparg { 91 mdb_tgt_t *map_target; /* Target used for mapping iter */ 92 mdb_tgt_map_f *map_cb; /* Caller's callback function */ 93 void *map_data; /* Callback function argument */ 94 } kt_maparg_t; 95 96 static const char KT_MODULE[] = "mdb_ks"; 97 static const char KT_CTFPARENT[] = "genunix"; 98 99 static void (*print_buildversion)(void); 100 101 static void 102 kt_load_module(kt_data_t *kt, mdb_tgt_t *t, kt_module_t *km) 103 { 104 km->km_data = mdb_alloc(km->km_datasz, UM_SLEEP); 105 106 (void) mdb_tgt_vread(t, km->km_data, km->km_datasz, km->km_symspace_va); 107 108 km->km_symbuf = (void *) 109 KT_RELOC_BUF(km->km_symtab_va, km->km_symspace_va, km->km_data); 110 111 km->km_strtab = (char *) 112 KT_RELOC_BUF(km->km_strtab_va, km->km_symspace_va, km->km_data); 113 114 km->km_symtab = mdb_gelf_symtab_create_raw(&kt->k_file->gf_ehdr, 115 &km->km_symtab_hdr, km->km_symbuf, 116 &km->km_strtab_hdr, km->km_strtab, MDB_TGT_SYMTAB); 117 } 118 119 static void 120 kt_load_modules(kt_data_t *kt, mdb_tgt_t *t) 121 { 122 char name[MAXNAMELEN]; 123 uintptr_t addr, head; 124 125 struct module kmod; 126 struct modctl ctl; 127 Shdr symhdr, strhdr; 128 GElf_Sym sym; 129 130 kt_module_t *km; 131 132 if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC, 133 "modules", &sym, NULL) == -1) { 134 warn("failed to get 'modules' symbol"); 135 return; 136 } 137 138 if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &ctl, sizeof (ctl), 139 MDB_TGT_OBJ_EXEC, "modules") != sizeof (ctl)) { 140 warn("failed to read 'modules' struct"); 141 return; 142 } 143 144 addr = head = (uintptr_t)sym.st_value; 145 146 do { 147 if (addr == 0) 148 break; /* Avoid spurious NULL pointers in list */ 149 150 if (mdb_tgt_vread(t, &ctl, sizeof (ctl), addr) == -1) { 151 warn("failed to read modctl at %p", (void *)addr); 152 return; 153 } 154 155 if (ctl.mod_mp == NULL) 156 continue; /* No associated krtld structure */ 157 158 if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN, 159 (uintptr_t)ctl.mod_modname) <= 0) { 160 warn("failed to read module name at %p", 161 (void *)ctl.mod_modname); 162 continue; 163 } 164 165 mdb_dprintf(MDB_DBG_KMOD, "reading mod %s (%p)\n", 166 name, (void *)addr); 167 168 if (mdb_nv_lookup(&kt->k_modules, name) != NULL) { 169 warn("skipping duplicate module '%s', id=%d\n", 170 name, ctl.mod_id); 171 continue; 172 } 173 174 if (mdb_tgt_vread(t, &kmod, sizeof (kmod), 175 (uintptr_t)ctl.mod_mp) == -1) { 176 warn("failed to read module at %p\n", 177 (void *)ctl.mod_mp); 178 continue; 179 } 180 181 if (kmod.symspace == NULL || kmod.symhdr == NULL || 182 kmod.strhdr == NULL) { 183 /* 184 * If no buffer for the symbols has been allocated, 185 * or the shdrs for .symtab and .strtab are missing, 186 * then we're out of luck. 187 */ 188 continue; 189 } 190 191 if (mdb_tgt_vread(t, &symhdr, sizeof (Shdr), 192 (uintptr_t)kmod.symhdr) == -1) { 193 warn("failed to read .symtab header for '%s', id=%d", 194 name, ctl.mod_id); 195 continue; 196 } 197 198 if (mdb_tgt_vread(t, &strhdr, sizeof (Shdr), 199 (uintptr_t)kmod.strhdr) == -1) { 200 warn("failed to read .strtab header for '%s', id=%d", 201 name, ctl.mod_id); 202 continue; 203 } 204 205 /* 206 * Now get clever: f(*^ing krtld didn't used to bother updating 207 * its own kmod.symsize value. We know that prior to this bug 208 * being fixed, symspace was a contiguous buffer containing 209 * .symtab, .strtab, and the symbol hash table in that order. 210 * So if symsize is zero, recompute it as the size of .symtab 211 * plus the size of .strtab. We don't need to load the hash 212 * table anyway since we re-hash all the symbols internally. 213 */ 214 if (kmod.symsize == 0) 215 kmod.symsize = symhdr.sh_size + strhdr.sh_size; 216 217 /* 218 * Similar logic can be used to make educated guesses 219 * at the values of kmod.symtbl and kmod.strings. 220 */ 221 if (kmod.symtbl == NULL) 222 kmod.symtbl = kmod.symspace; 223 if (kmod.strings == NULL) 224 kmod.strings = kmod.symspace + symhdr.sh_size; 225 226 /* 227 * Make sure things seem reasonable before we proceed 228 * to actually read and decipher the symspace. 229 */ 230 if (KT_BAD_BUF(kmod.symtbl, kmod.symspace, kmod.symsize) || 231 KT_BAD_BUF(kmod.strings, kmod.symspace, kmod.symsize)) { 232 warn("skipping module '%s', id=%d (corrupt symspace)\n", 233 name, ctl.mod_id); 234 continue; 235 } 236 237 km = mdb_zalloc(sizeof (kt_module_t), UM_SLEEP); 238 km->km_name = strdup(name); 239 240 (void) mdb_nv_insert(&kt->k_modules, km->km_name, NULL, 241 (uintptr_t)km, MDB_NV_EXTNAME); 242 243 km->km_datasz = kmod.symsize; 244 km->km_symspace_va = (uintptr_t)kmod.symspace; 245 km->km_symtab_va = (uintptr_t)kmod.symtbl; 246 km->km_strtab_va = (uintptr_t)kmod.strings; 247 km->km_symtab_hdr = symhdr; 248 km->km_strtab_hdr = strhdr; 249 km->km_text_va = (uintptr_t)kmod.text; 250 km->km_text_size = kmod.text_size; 251 km->km_data_va = (uintptr_t)kmod.data; 252 km->km_data_size = kmod.data_size; 253 km->km_bss_va = (uintptr_t)kmod.bss; 254 km->km_bss_size = kmod.bss_size; 255 256 if (kt->k_ctfvalid) { 257 km->km_ctf_va = (uintptr_t)kmod.ctfdata; 258 km->km_ctf_size = kmod.ctfsize; 259 } 260 261 /* 262 * Add the module to the end of the list of modules in load- 263 * dependency order. This is needed to load the corresponding 264 * debugger modules in the same order for layering purposes. 265 */ 266 mdb_list_append(&kt->k_modlist, km); 267 268 if (t->t_flags & MDB_TGT_F_PRELOAD) { 269 mdb_iob_printf(mdb.m_out, " %s", name); 270 mdb_iob_flush(mdb.m_out); 271 kt_load_module(kt, t, km); 272 } 273 274 } while ((addr = (uintptr_t)ctl.mod_next) != head); 275 } 276 277 int 278 kt_setflags(mdb_tgt_t *t, int flags) 279 { 280 int iochg = ((flags ^ t->t_flags) & MDB_TGT_F_ALLOWIO) && 281 !mdb_prop_postmortem; 282 int rwchg = (flags ^ t->t_flags) & MDB_TGT_F_RDWR; 283 kt_data_t *kt = t->t_data; 284 const char *kvmfile; 285 void *cookie; 286 int mode; 287 288 if (!iochg && !rwchg) 289 return (0); 290 291 if (kt->k_xpv_domu) { 292 warn("read-only target"); 293 return (-1); 294 } 295 296 if (iochg) { 297 kvmfile = (flags & MDB_TGT_F_ALLOWIO) ? "/dev/allkmem" : 298 "/dev/kmem"; 299 } else { 300 kvmfile = kt->k_kvmfile; 301 } 302 303 mode = (flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY; 304 305 if ((cookie = kt->k_kb_ops->kb_open(kt->k_symfile, kvmfile, NULL, mode, 306 mdb.m_pname)) == NULL) { 307 /* We failed to re-open, so don't change t_flags */ 308 warn("failed to re-open target"); 309 return (-1); 310 } 311 312 /* 313 * We successfully reopened the target, so update k_kvmfile. Also set 314 * the RDWR and ALLOWIO bits in t_flags to match those in flags. 315 */ 316 (void) kt->k_kb_ops->kb_close(kt->k_cookie); 317 kt->k_cookie = cookie; 318 319 if (kvmfile != kt->k_kvmfile) { 320 strfree(kt->k_kvmfile); 321 kt->k_kvmfile = strdup(kvmfile); 322 } 323 324 t->t_flags = (t->t_flags & ~(MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO)) | 325 (flags & (MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO)); 326 327 return (0); 328 } 329 330 /* 331 * Determine which PIDs (if any) have their pages saved in the dump. We 332 * do this by looking for content flags in dump_flags in the header. These 333 * flags, which won't be set in older dumps, tell us whether a single process 334 * has had its pages included in the dump. If a single process has been 335 * included, we need to get the PID for that process from the dump_pids 336 * array in the dump. 337 */ 338 static int 339 kt_find_dump_contents(kt_data_t *kt) 340 { 341 dumphdr_t *dh = kt->k_dumphdr; 342 pid_t pid = -1; 343 344 if (dh->dump_flags & DF_ALL) 345 return (KT_DUMPCONTENT_ALL); 346 347 if (dh->dump_flags & DF_CURPROC) { 348 if ((pid = kt->k_dump_find_curproc()) == -1) 349 return (KT_DUMPCONTENT_INVALID); 350 else 351 return (pid); 352 } else { 353 return (KT_DUMPCONTENT_KERNEL); 354 } 355 } 356 357 static int 358 kt_dump_contains_proc(mdb_tgt_t *t, void *context) 359 { 360 kt_data_t *kt = t->t_data; 361 pid_t (*f_pid)(uintptr_t); 362 pid_t reqpid; 363 364 switch (kt->k_dumpcontent) { 365 case KT_DUMPCONTENT_KERNEL: 366 return (0); 367 case KT_DUMPCONTENT_ALL: 368 return (1); 369 case KT_DUMPCONTENT_INVALID: 370 goto procnotfound; 371 default: 372 f_pid = (pid_t (*)()) dlsym(RTLD_NEXT, "mdb_kproc_pid"); 373 if (f_pid == NULL) 374 goto procnotfound; 375 376 reqpid = f_pid((uintptr_t)context); 377 if (reqpid == -1) 378 goto procnotfound; 379 380 return (kt->k_dumpcontent == reqpid); 381 } 382 383 procnotfound: 384 warn("unable to determine whether dump contains proc %p\n", context); 385 return (1); 386 } 387 388 int 389 kt_setcontext(mdb_tgt_t *t, void *context) 390 { 391 if (context != NULL) { 392 const char *argv[2]; 393 int argc = 0; 394 mdb_tgt_t *ct; 395 kt_data_t *kt = t->t_data; 396 397 argv[argc++] = (const char *)context; 398 argv[argc] = NULL; 399 400 if (kt->k_dumphdr != NULL && 401 !kt_dump_contains_proc(t, context)) { 402 warn("dump does not contain pages for proc %p\n", 403 context); 404 return (-1); 405 } 406 407 if ((ct = mdb_tgt_create(mdb_kproc_tgt_create, 408 t->t_flags, argc, argv)) == NULL) 409 return (-1); 410 411 mdb_printf("debugger context set to proc %p\n", context); 412 mdb_tgt_activate(ct); 413 } else 414 mdb_printf("debugger context set to kernel\n"); 415 416 return (0); 417 } 418 419 static int 420 kt_stack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 421 { 422 kt_data_t *kt = mdb.m_target->t_data; 423 return (kt->k_dcmd_stack(addr, flags, argc, argv)); 424 } 425 426 static int 427 kt_stackv(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 428 { 429 kt_data_t *kt = mdb.m_target->t_data; 430 return (kt->k_dcmd_stackv(addr, flags, argc, argv)); 431 } 432 433 static int 434 kt_stackr(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 435 { 436 kt_data_t *kt = mdb.m_target->t_data; 437 return (kt->k_dcmd_stackr(addr, flags, argc, argv)); 438 } 439 440 static int 441 kt_regs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 442 { 443 kt_data_t *kt = mdb.m_target->t_data; 444 445 if (argc != 0 || (flags & DCMD_ADDRSPEC)) 446 return (DCMD_USAGE); 447 448 addr = (uintptr_t)kt->k_regs; 449 450 return (kt->k_dcmd_regs(addr, flags, argc, argv)); 451 } 452 453 #ifdef __x86 454 static int 455 kt_cpustack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 456 { 457 kt_data_t *kt = mdb.m_target->t_data; 458 return (kt->k_dcmd_cpustack(addr, flags, argc, argv)); 459 } 460 461 static int 462 kt_cpuregs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 463 { 464 kt_data_t *kt = mdb.m_target->t_data; 465 return (kt->k_dcmd_cpuregs(addr, flags, argc, argv)); 466 } 467 #endif /* __x86 */ 468 469 /*ARGSUSED*/ 470 static int 471 kt_status_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 472 { 473 kt_data_t *kt = mdb.m_target->t_data; 474 struct utsname uts; 475 476 bzero(&uts, sizeof (uts)); 477 (void) strcpy(uts.nodename, "unknown machine"); 478 (void) kt_uname(mdb.m_target, &uts); 479 480 if (mdb_prop_postmortem) { 481 mdb_printf("debugging %scrash dump %s (%d-bit) from %s\n", 482 kt->k_xpv_domu ? "domain " : "", kt->k_kvmfile, 483 (int)(sizeof (void *) * NBBY), uts.nodename); 484 } else { 485 mdb_printf("debugging live kernel (%d-bit) on %s\n", 486 (int)(sizeof (void *) * NBBY), uts.nodename); 487 } 488 489 mdb_printf("operating system: %s %s (%s)\n", 490 uts.release, uts.version, uts.machine); 491 492 if (print_buildversion != NULL) 493 print_buildversion(); 494 495 if (kt->k_dumphdr) { 496 dumphdr_t *dh = kt->k_dumphdr; 497 498 mdb_printf("image uuid: %s\n", dh->dump_uuid[0] != '\0' ? 499 dh->dump_uuid : "(not set)"); 500 mdb_printf("panic message: %s\n", dh->dump_panicstring); 501 502 kt->k_dump_print_content(dh, kt->k_dumpcontent); 503 } else { 504 char uuid[UUID_PRINTABLE_STRING_LENGTH]; 505 506 if (mdb_readsym(uuid, sizeof (uuid), 507 "dump_osimage_uuid") == sizeof (uuid) && 508 uuid[sizeof (uuid) - 1] == '\0') { 509 mdb_printf("image uuid: %s\n", uuid[0] != '\0' ? 510 uuid : "(not set)"); 511 } 512 } 513 514 return (DCMD_OK); 515 } 516 517 static const mdb_dcmd_t kt_dcmds[] = { 518 { "$c", "?[cnt]", "print stack backtrace", kt_stack }, 519 { "$C", "?[cnt]", "print stack backtrace", kt_stackv }, 520 { "$r", NULL, "print general-purpose registers", kt_regs }, 521 { "$?", NULL, "print status and registers", kt_regs }, 522 { "regs", NULL, "print general-purpose registers", kt_regs }, 523 { "stack", "?[cnt]", "print stack backtrace", kt_stack }, 524 { "stackregs", "?", "print stack backtrace and registers", kt_stackr }, 525 #ifdef __x86 526 { "cpustack", "?[-v] [-c cpuid] [cnt]", "print stack backtrace for a " 527 "specific CPU", kt_cpustack }, 528 { "cpuregs", "?[-c cpuid]", "print general-purpose registers for a " 529 "specific CPU", kt_cpuregs }, 530 #endif 531 { "status", NULL, "print summary of current target", kt_status_dcmd }, 532 { NULL } 533 }; 534 535 static uintmax_t 536 reg_disc_get(const mdb_var_t *v) 537 { 538 mdb_tgt_t *t = MDB_NV_COOKIE(v); 539 kt_data_t *kt = t->t_data; 540 mdb_tgt_reg_t r = 0; 541 542 (void) mdb_tgt_getareg(t, kt->k_tid, mdb_nv_get_name(v), &r); 543 return (r); 544 } 545 546 static kt_module_t * 547 kt_module_by_name(kt_data_t *kt, const char *name) 548 { 549 kt_module_t *km; 550 551 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) { 552 if (strcmp(name, km->km_name) == 0) 553 return (km); 554 } 555 556 return (NULL); 557 } 558 559 void 560 kt_activate(mdb_tgt_t *t) 561 { 562 static const mdb_nv_disc_t reg_disc = { NULL, reg_disc_get }; 563 kt_data_t *kt = t->t_data; 564 void *sym; 565 566 int oflag; 567 568 mdb_prop_postmortem = kt->k_xpv_domu || (kt->k_dumphdr != NULL); 569 mdb_prop_kernel = TRUE; 570 mdb_prop_datamodel = MDB_TGT_MODEL_NATIVE; 571 572 if (kt->k_activated == FALSE) { 573 struct utsname u1, u2; 574 /* 575 * If we're examining a crash dump, root is /, and uname(2) 576 * does not match the utsname in the dump, issue a warning. 577 * Note that we are assuming that the modules and macros in 578 * /usr/lib are compiled against the kernel from uname -rv. 579 */ 580 if (mdb_prop_postmortem && strcmp(mdb.m_root, "/") == 0 && 581 uname(&u1) >= 0 && kt_uname(t, &u2) >= 0 && 582 (strcmp(u1.release, u2.release) || 583 strcmp(u1.version, u2.version))) { 584 mdb_warn("warning: dump is from %s %s %s; dcmds and " 585 "macros may not match kernel implementation\n", 586 u2.sysname, u2.release, u2.version); 587 } 588 589 if (mdb_module_load(KT_MODULE, MDB_MOD_GLOBAL) < 0) { 590 warn("failed to load kernel support module -- " 591 "some modules may not load\n"); 592 } 593 594 print_buildversion = (void (*)(void))dlsym(RTLD_NEXT, 595 "mdb_print_buildversion"); 596 597 if (mdb_prop_postmortem && kt->k_dumphdr != NULL) { 598 sym = dlsym(RTLD_NEXT, "mdb_dump_print_content"); 599 if (sym != NULL) 600 kt->k_dump_print_content = (void (*)())sym; 601 602 sym = dlsym(RTLD_NEXT, "mdb_dump_find_curproc"); 603 if (sym != NULL) 604 kt->k_dump_find_curproc = (int (*)())sym; 605 606 kt->k_dumpcontent = kt_find_dump_contents(kt); 607 } 608 609 if (t->t_flags & MDB_TGT_F_PRELOAD) { 610 oflag = mdb_iob_getflags(mdb.m_out) & MDB_IOB_PGENABLE; 611 612 mdb_iob_clrflags(mdb.m_out, oflag); 613 mdb_iob_puts(mdb.m_out, "Preloading module symbols: ["); 614 mdb_iob_flush(mdb.m_out); 615 } 616 617 if (!(t->t_flags & MDB_TGT_F_NOLOAD)) { 618 kt_load_modules(kt, t); 619 620 /* 621 * Determine where the CTF data for krtld is. If krtld 622 * is rolled into unix, force load the MDB krtld 623 * module. 624 */ 625 kt->k_rtld_name = "krtld"; 626 627 if (kt_module_by_name(kt, "krtld") == NULL) { 628 (void) mdb_module_load("krtld", MDB_MOD_SILENT); 629 kt->k_rtld_name = "unix"; 630 } 631 } 632 633 634 if (t->t_flags & MDB_TGT_F_PRELOAD) { 635 mdb_iob_puts(mdb.m_out, " ]\n"); 636 mdb_iob_setflags(mdb.m_out, oflag); 637 } 638 639 kt->k_activated = TRUE; 640 } 641 642 (void) mdb_tgt_register_dcmds(t, &kt_dcmds[0], MDB_MOD_FORCE); 643 644 /* Export some of our registers as named variables */ 645 mdb_tgt_register_regvars(t, kt->k_rds, ®_disc, MDB_NV_RDONLY); 646 647 mdb_tgt_elf_export(kt->k_file); 648 } 649 650 void 651 kt_deactivate(mdb_tgt_t *t) 652 { 653 kt_data_t *kt = t->t_data; 654 655 const mdb_tgt_regdesc_t *rdp; 656 const mdb_dcmd_t *dcp; 657 658 for (rdp = kt->k_rds; rdp->rd_name != NULL; rdp++) { 659 mdb_var_t *v; 660 661 if (!(rdp->rd_flags & MDB_TGT_R_EXPORT)) 662 continue; /* Didn't export register as a variable */ 663 664 if ((v = mdb_nv_lookup(&mdb.m_nv, rdp->rd_name)) != NULL) { 665 v->v_flags &= ~MDB_NV_PERSIST; 666 mdb_nv_remove(&mdb.m_nv, v); 667 } 668 } 669 670 for (dcp = &kt_dcmds[0]; dcp->dc_name != NULL; dcp++) { 671 if (mdb_module_remove_dcmd(t->t_module, dcp->dc_name) == -1) 672 warn("failed to remove dcmd %s", dcp->dc_name); 673 } 674 675 mdb_prop_postmortem = FALSE; 676 mdb_prop_kernel = FALSE; 677 mdb_prop_datamodel = MDB_TGT_MODEL_UNKNOWN; 678 } 679 680 /*ARGSUSED*/ 681 const char * 682 kt_name(mdb_tgt_t *t) 683 { 684 return ("kvm"); 685 } 686 687 const char * 688 kt_platform(mdb_tgt_t *t) 689 { 690 kt_data_t *kt = t->t_data; 691 return (kt->k_platform); 692 } 693 694 int 695 kt_uname(mdb_tgt_t *t, struct utsname *utsp) 696 { 697 return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp, 698 sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname")); 699 } 700 701 /*ARGSUSED*/ 702 int 703 kt_dmodel(mdb_tgt_t *t) 704 { 705 return (MDB_TGT_MODEL_NATIVE); 706 } 707 708 ssize_t 709 kt_aread(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf, 710 size_t nbytes, mdb_tgt_addr_t addr) 711 { 712 kt_data_t *kt = t->t_data; 713 ssize_t rval; 714 715 if ((rval = kt->k_kb_ops->kb_aread(kt->k_cookie, addr, buf, 716 nbytes, as)) == -1) 717 return (set_errno(EMDB_NOMAP)); 718 719 return (rval); 720 } 721 722 ssize_t 723 kt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf, 724 size_t nbytes, mdb_tgt_addr_t addr) 725 { 726 kt_data_t *kt = t->t_data; 727 ssize_t rval; 728 729 if ((rval = kt->k_kb_ops->kb_awrite(kt->k_cookie, addr, buf, 730 nbytes, as)) == -1) 731 return (set_errno(EMDB_NOMAP)); 732 733 return (rval); 734 } 735 736 ssize_t 737 kt_vread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr) 738 { 739 kt_data_t *kt = t->t_data; 740 ssize_t rval; 741 742 if ((rval = kt->k_kb_ops->kb_kread(kt->k_cookie, addr, buf, 743 nbytes)) == -1) 744 return (set_errno(EMDB_NOMAP)); 745 746 return (rval); 747 } 748 749 ssize_t 750 kt_vwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr) 751 { 752 kt_data_t *kt = t->t_data; 753 ssize_t rval; 754 755 if ((rval = kt->k_kb_ops->kb_kwrite(kt->k_cookie, addr, buf, 756 nbytes)) == -1) 757 return (set_errno(EMDB_NOMAP)); 758 759 return (rval); 760 } 761 762 ssize_t 763 kt_fread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr) 764 { 765 return (kt_vread(t, buf, nbytes, addr)); 766 } 767 768 ssize_t 769 kt_fwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr) 770 { 771 return (kt_vwrite(t, buf, nbytes, addr)); 772 } 773 774 ssize_t 775 kt_pread(mdb_tgt_t *t, void *buf, size_t nbytes, physaddr_t addr) 776 { 777 kt_data_t *kt = t->t_data; 778 ssize_t rval; 779 780 if ((rval = kt->k_kb_ops->kb_pread(kt->k_cookie, addr, buf, 781 nbytes)) == -1) 782 return (set_errno(EMDB_NOMAP)); 783 784 return (rval); 785 } 786 787 ssize_t 788 kt_pwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, physaddr_t addr) 789 { 790 kt_data_t *kt = t->t_data; 791 ssize_t rval; 792 793 if ((rval = kt->k_kb_ops->kb_pwrite(kt->k_cookie, addr, buf, 794 nbytes)) == -1) 795 return (set_errno(EMDB_NOMAP)); 796 797 return (rval); 798 } 799 800 int 801 kt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap) 802 { 803 kt_data_t *kt = t->t_data; 804 805 struct as *asp; 806 physaddr_t pa; 807 mdb_module_t *mod; 808 mdb_var_t *v; 809 int (*fptr)(uintptr_t, struct as *, physaddr_t *); 810 811 switch ((uintptr_t)as) { 812 case (uintptr_t)MDB_TGT_AS_PHYS: 813 case (uintptr_t)MDB_TGT_AS_FILE: 814 case (uintptr_t)MDB_TGT_AS_IO: 815 return (set_errno(EINVAL)); 816 case (uintptr_t)MDB_TGT_AS_VIRT: 817 asp = kt->k_as; 818 break; 819 default: 820 asp = (struct as *)as; 821 } 822 823 if ((pa = kt->k_kb_ops->kb_vtop(kt->k_cookie, asp, va)) != -1ULL) { 824 *pap = pa; 825 return (0); 826 } 827 828 if ((v = mdb_nv_lookup(&mdb.m_modules, "unix")) != NULL && 829 (mod = mdb_nv_get_cookie(v)) != NULL) { 830 831 fptr = (int (*)(uintptr_t, struct as *, physaddr_t *)) 832 dlsym(mod->mod_hdl, "platform_vtop"); 833 834 if ((fptr != NULL) && ((*fptr)(va, asp, pap) == 0)) 835 return (0); 836 } 837 838 return (set_errno(EMDB_NOMAP)); 839 } 840 841 int 842 kt_lookup_by_name(mdb_tgt_t *t, const char *obj, const char *name, 843 GElf_Sym *symp, mdb_syminfo_t *sip) 844 { 845 kt_data_t *kt = t->t_data; 846 kt_module_t *km, kmod; 847 mdb_var_t *v; 848 int n; 849 850 /* 851 * To simplify the implementation, we create a fake module on the stack 852 * which is "prepended" to k_modlist and whose symtab is kt->k_symtab. 853 */ 854 kmod.km_symtab = kt->k_symtab; 855 kmod.km_list.ml_next = mdb_list_next(&kt->k_modlist); 856 857 switch ((uintptr_t)obj) { 858 case (uintptr_t)MDB_TGT_OBJ_EXEC: 859 km = &kmod; 860 n = 1; 861 break; 862 863 case (uintptr_t)MDB_TGT_OBJ_EVERY: 864 km = &kmod; 865 n = mdb_nv_size(&kt->k_modules) + 1; 866 break; 867 868 case (uintptr_t)MDB_TGT_OBJ_RTLD: 869 obj = kt->k_rtld_name; 870 /*FALLTHRU*/ 871 872 default: 873 if ((v = mdb_nv_lookup(&kt->k_modules, obj)) == NULL) 874 return (set_errno(EMDB_NOOBJ)); 875 876 km = mdb_nv_get_cookie(v); 877 n = 1; 878 879 if (km->km_symtab == NULL) 880 kt_load_module(kt, t, km); 881 } 882 883 for (; n > 0; n--, km = mdb_list_next(km)) { 884 if (mdb_gelf_symtab_lookup_by_name(km->km_symtab, name, 885 symp, &sip->sym_id) == 0) { 886 sip->sym_table = MDB_TGT_SYMTAB; 887 return (0); 888 } 889 } 890 891 return (set_errno(EMDB_NOSYM)); 892 } 893 894 int 895 kt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags, 896 char *buf, size_t nbytes, GElf_Sym *symp, mdb_syminfo_t *sip) 897 { 898 kt_data_t *kt = t->t_data; 899 kt_module_t kmods[3], *kmods_begin = &kmods[0], *kmods_end; 900 const char *name; 901 902 kt_module_t *km = &kmods[0]; /* Point km at first fake module */ 903 kt_module_t *sym_km = NULL; /* Module associated with best sym */ 904 GElf_Sym sym; /* Best symbol found so far if !exact */ 905 uint_t symid; /* ID of best symbol found so far */ 906 907 /* 908 * To simplify the implementation, we create fake modules on the stack 909 * that are "prepended" to k_modlist and whose symtab is set to 910 * each of three special symbol tables, in order of precedence. 911 */ 912 km->km_symtab = mdb.m_prsym; 913 914 if (kt->k_symtab != NULL) { 915 km->km_list.ml_next = (mdb_list_t *)(km + 1); 916 km = mdb_list_next(km); 917 km->km_symtab = kt->k_symtab; 918 } 919 920 if (kt->k_dynsym != NULL) { 921 km->km_list.ml_next = (mdb_list_t *)(km + 1); 922 km = mdb_list_next(km); 923 km->km_symtab = kt->k_dynsym; 924 } 925 926 km->km_list.ml_next = mdb_list_next(&kt->k_modlist); 927 kmods_end = km; 928 929 /* 930 * Now iterate over the list of fake and real modules. If the module 931 * has no symbol table and the address is in the text section, 932 * instantiate the module's symbol table. In exact mode, we can 933 * jump to 'found' immediately if we match. Otherwise we continue 934 * looking and improve our choice if we find a closer symbol. 935 */ 936 for (km = &kmods[0]; km != NULL; km = mdb_list_next(km)) { 937 if (km->km_symtab == NULL && addr >= km->km_text_va && 938 addr < km->km_text_va + km->km_text_size) 939 kt_load_module(kt, t, km); 940 941 if (mdb_gelf_symtab_lookup_by_addr(km->km_symtab, addr, 942 flags, buf, nbytes, symp, &sip->sym_id) != 0 || 943 symp->st_value == 0) 944 continue; 945 946 if (flags & MDB_TGT_SYM_EXACT) { 947 sym_km = km; 948 goto found; 949 } 950 951 if (sym_km == NULL || mdb_gelf_sym_closer(symp, &sym, addr)) { 952 sym_km = km; 953 sym = *symp; 954 symid = sip->sym_id; 955 } 956 } 957 958 if (sym_km == NULL) 959 return (set_errno(EMDB_NOSYMADDR)); 960 961 *symp = sym; /* Copy our best symbol into the caller's symbol */ 962 sip->sym_id = symid; 963 found: 964 /* 965 * Once we've found something, copy the final name into the caller's 966 * buffer and prefix it with the load object name if appropriate. 967 */ 968 if (sym_km != NULL) { 969 name = mdb_gelf_sym_name(sym_km->km_symtab, symp); 970 971 if (sym_km < kmods_begin || sym_km > kmods_end) { 972 (void) mdb_snprintf(buf, nbytes, "%s`%s", 973 sym_km->km_name, name); 974 } else if (nbytes > 0) { 975 (void) strncpy(buf, name, nbytes); 976 buf[nbytes - 1] = '\0'; 977 } 978 979 if (sym_km->km_symtab == mdb.m_prsym) 980 sip->sym_table = MDB_TGT_PRVSYM; 981 else 982 sip->sym_table = MDB_TGT_SYMTAB; 983 } else { 984 sip->sym_table = MDB_TGT_SYMTAB; 985 } 986 987 return (0); 988 } 989 990 static int 991 kt_symtab_func(void *data, const GElf_Sym *sym, const char *name, uint_t id) 992 { 993 kt_symarg_t *argp = data; 994 995 if (mdb_tgt_sym_match(sym, argp->sym_type)) { 996 argp->sym_info.sym_id = id; 997 998 return (argp->sym_cb(argp->sym_data, sym, name, 999 &argp->sym_info, argp->sym_obj)); 1000 } 1001 1002 return (0); 1003 } 1004 1005 static void 1006 kt_symtab_iter(mdb_gelf_symtab_t *gst, uint_t type, const char *obj, 1007 mdb_tgt_sym_f *cb, void *p) 1008 { 1009 kt_symarg_t arg; 1010 1011 arg.sym_cb = cb; 1012 arg.sym_data = p; 1013 arg.sym_type = type; 1014 arg.sym_info.sym_table = gst->gst_tabid; 1015 arg.sym_obj = obj; 1016 1017 mdb_gelf_symtab_iter(gst, kt_symtab_func, &arg); 1018 } 1019 1020 int 1021 kt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, uint_t type, 1022 mdb_tgt_sym_f *cb, void *data) 1023 { 1024 kt_data_t *kt = t->t_data; 1025 kt_module_t *km; 1026 1027 mdb_gelf_symtab_t *symtab = NULL; 1028 mdb_var_t *v; 1029 1030 switch ((uintptr_t)obj) { 1031 case (uintptr_t)MDB_TGT_OBJ_EXEC: 1032 if (which == MDB_TGT_SYMTAB) 1033 symtab = kt->k_symtab; 1034 else 1035 symtab = kt->k_dynsym; 1036 break; 1037 1038 case (uintptr_t)MDB_TGT_OBJ_EVERY: 1039 if (which == MDB_TGT_DYNSYM) { 1040 symtab = kt->k_dynsym; 1041 obj = MDB_TGT_OBJ_EXEC; 1042 break; 1043 } 1044 1045 mdb_nv_rewind(&kt->k_modules); 1046 while ((v = mdb_nv_advance(&kt->k_modules)) != NULL) { 1047 km = mdb_nv_get_cookie(v); 1048 1049 if (km->km_symtab == NULL) 1050 kt_load_module(kt, t, km); 1051 1052 if (km->km_symtab != NULL) 1053 kt_symtab_iter(km->km_symtab, type, 1054 km->km_name, cb, data); 1055 } 1056 break; 1057 1058 case (uintptr_t)MDB_TGT_OBJ_RTLD: 1059 obj = kt->k_rtld_name; 1060 /*FALLTHRU*/ 1061 1062 default: 1063 v = mdb_nv_lookup(&kt->k_modules, obj); 1064 1065 if (v == NULL) 1066 return (set_errno(EMDB_NOOBJ)); 1067 1068 km = mdb_nv_get_cookie(v); 1069 1070 if (km->km_symtab == NULL) 1071 kt_load_module(kt, t, km); 1072 1073 symtab = km->km_symtab; 1074 } 1075 1076 if (symtab) 1077 kt_symtab_iter(symtab, type, obj, cb, data); 1078 1079 return (0); 1080 } 1081 1082 static int 1083 kt_mapping_walk(uintptr_t addr, const void *data, kt_maparg_t *marg) 1084 { 1085 /* 1086 * This is a bit sketchy but avoids problematic compilation of this 1087 * target against the current VM implementation. Now that we have 1088 * vmem, we can make this less broken and more informative by changing 1089 * this code to invoke the vmem walker in the near future. 1090 */ 1091 const struct kt_seg { 1092 caddr_t s_base; 1093 size_t s_size; 1094 } *segp = (const struct kt_seg *)data; 1095 1096 mdb_map_t map; 1097 GElf_Sym sym; 1098 mdb_syminfo_t info; 1099 1100 map.map_base = (uintptr_t)segp->s_base; 1101 map.map_size = segp->s_size; 1102 map.map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X; 1103 1104 if (kt_lookup_by_addr(marg->map_target, addr, MDB_TGT_SYM_EXACT, 1105 map.map_name, MDB_TGT_MAPSZ, &sym, &info) == -1) { 1106 1107 (void) mdb_iob_snprintf(map.map_name, MDB_TGT_MAPSZ, 1108 "%lr", addr); 1109 } 1110 1111 return (marg->map_cb(marg->map_data, &map, map.map_name)); 1112 } 1113 1114 int 1115 kt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private) 1116 { 1117 kt_data_t *kt = t->t_data; 1118 kt_maparg_t m; 1119 1120 m.map_target = t; 1121 m.map_cb = func; 1122 m.map_data = private; 1123 1124 return (mdb_pwalk("seg", (mdb_walk_cb_t)kt_mapping_walk, &m, 1125 (uintptr_t)kt->k_as)); 1126 } 1127 1128 static const mdb_map_t * 1129 kt_module_to_map(kt_module_t *km, mdb_map_t *map) 1130 { 1131 (void) strncpy(map->map_name, km->km_name, MDB_TGT_MAPSZ); 1132 map->map_name[MDB_TGT_MAPSZ - 1] = '\0'; 1133 map->map_base = km->km_text_va; 1134 map->map_size = km->km_text_size; 1135 map->map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X; 1136 1137 return (map); 1138 } 1139 1140 int 1141 kt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private) 1142 { 1143 kt_data_t *kt = t->t_data; 1144 kt_module_t *km; 1145 mdb_map_t m; 1146 1147 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) { 1148 if (func(private, kt_module_to_map(km, &m), km->km_name) == -1) 1149 break; 1150 } 1151 1152 return (0); 1153 } 1154 1155 const mdb_map_t * 1156 kt_addr_to_map(mdb_tgt_t *t, uintptr_t addr) 1157 { 1158 kt_data_t *kt = t->t_data; 1159 kt_module_t *km; 1160 1161 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) { 1162 if (addr - km->km_text_va < km->km_text_size || 1163 addr - km->km_data_va < km->km_data_size || 1164 addr - km->km_bss_va < km->km_bss_size) 1165 return (kt_module_to_map(km, &kt->k_map)); 1166 } 1167 1168 (void) set_errno(EMDB_NOMAP); 1169 return (NULL); 1170 } 1171 1172 const mdb_map_t * 1173 kt_name_to_map(mdb_tgt_t *t, const char *name) 1174 { 1175 kt_data_t *kt = t->t_data; 1176 kt_module_t *km; 1177 mdb_map_t m; 1178 1179 /* 1180 * If name is MDB_TGT_OBJ_EXEC, return the first module on the list, 1181 * which will be unix since we keep k_modlist in load order. 1182 */ 1183 if (name == MDB_TGT_OBJ_EXEC) 1184 return (kt_module_to_map(mdb_list_next(&kt->k_modlist), &m)); 1185 1186 if (name == MDB_TGT_OBJ_RTLD) 1187 name = kt->k_rtld_name; 1188 1189 if ((km = kt_module_by_name(kt, name)) != NULL) 1190 return (kt_module_to_map(km, &m)); 1191 1192 (void) set_errno(EMDB_NOOBJ); 1193 return (NULL); 1194 } 1195 1196 static ctf_file_t * 1197 kt_load_ctfdata(mdb_tgt_t *t, kt_module_t *km) 1198 { 1199 kt_data_t *kt = t->t_data; 1200 int err; 1201 1202 if (km->km_ctfp != NULL) 1203 return (km->km_ctfp); 1204 1205 if (km->km_ctf_va == 0) { 1206 (void) set_errno(EMDB_NOCTF); 1207 return (NULL); 1208 } 1209 1210 if (km->km_symtab == NULL) 1211 kt_load_module(t->t_data, t, km); 1212 1213 if ((km->km_ctf_buf = mdb_alloc(km->km_ctf_size, UM_NOSLEEP)) == NULL) { 1214 warn("failed to allocate memory to load %s debugging " 1215 "information", km->km_name); 1216 return (NULL); 1217 } 1218 1219 if (mdb_tgt_vread(t, km->km_ctf_buf, km->km_ctf_size, 1220 km->km_ctf_va) != km->km_ctf_size) { 1221 warn("failed to read %lu bytes of debug data for %s at %p", 1222 (ulong_t)km->km_ctf_size, km->km_name, 1223 (void *)km->km_ctf_va); 1224 mdb_free(km->km_ctf_buf, km->km_ctf_size); 1225 km->km_ctf_buf = NULL; 1226 return (NULL); 1227 } 1228 1229 if ((km->km_ctfp = mdb_ctf_bufopen((const void *)km->km_ctf_buf, 1230 km->km_ctf_size, km->km_symbuf, &km->km_symtab_hdr, 1231 km->km_strtab, &km->km_strtab_hdr, &err)) == NULL) { 1232 mdb_free(km->km_ctf_buf, km->km_ctf_size); 1233 km->km_ctf_buf = NULL; 1234 (void) set_errno(ctf_to_errno(err)); 1235 return (NULL); 1236 } 1237 1238 mdb_dprintf(MDB_DBG_KMOD, "loaded %lu bytes of CTF data for %s\n", 1239 (ulong_t)km->km_ctf_size, km->km_name); 1240 1241 if (ctf_parent_name(km->km_ctfp) != NULL) { 1242 mdb_var_t *v; 1243 1244 if ((v = mdb_nv_lookup(&kt->k_modules, 1245 ctf_parent_name(km->km_ctfp))) == NULL) { 1246 warn("failed to load CTF data for %s - parent %s not " 1247 "loaded\n", km->km_name, 1248 ctf_parent_name(km->km_ctfp)); 1249 } 1250 1251 if (v != NULL) { 1252 kt_module_t *pm = mdb_nv_get_cookie(v); 1253 1254 if (pm->km_ctfp == NULL) 1255 (void) kt_load_ctfdata(t, pm); 1256 1257 if (pm->km_ctfp != NULL && ctf_import(km->km_ctfp, 1258 pm->km_ctfp) == CTF_ERR) { 1259 warn("failed to import parent types into " 1260 "%s: %s\n", km->km_name, 1261 ctf_errmsg(ctf_errno(km->km_ctfp))); 1262 } 1263 } 1264 } 1265 1266 return (km->km_ctfp); 1267 } 1268 1269 ctf_file_t * 1270 kt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr) 1271 { 1272 kt_data_t *kt = t->t_data; 1273 kt_module_t *km; 1274 1275 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) { 1276 if (addr - km->km_text_va < km->km_text_size || 1277 addr - km->km_data_va < km->km_data_size || 1278 addr - km->km_bss_va < km->km_bss_size) 1279 return (kt_load_ctfdata(t, km)); 1280 } 1281 1282 (void) set_errno(EMDB_NOMAP); 1283 return (NULL); 1284 } 1285 1286 ctf_file_t * 1287 kt_name_to_ctf(mdb_tgt_t *t, const char *name) 1288 { 1289 kt_data_t *kt = t->t_data; 1290 kt_module_t *km; 1291 1292 if (name == MDB_TGT_OBJ_EXEC) 1293 name = KT_CTFPARENT; 1294 else if (name == MDB_TGT_OBJ_RTLD) 1295 name = kt->k_rtld_name; 1296 1297 if ((km = kt_module_by_name(kt, name)) != NULL) 1298 return (kt_load_ctfdata(t, km)); 1299 1300 (void) set_errno(EMDB_NOOBJ); 1301 return (NULL); 1302 } 1303 1304 /*ARGSUSED*/ 1305 int 1306 kt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1307 { 1308 kt_data_t *kt = t->t_data; 1309 bzero(tsp, sizeof (mdb_tgt_status_t)); 1310 tsp->st_state = (kt->k_xpv_domu || (kt->k_dumphdr != NULL)) ? 1311 MDB_TGT_DEAD : MDB_TGT_RUNNING; 1312 return (0); 1313 } 1314 1315 static ssize_t 1316 kt_xd_dumphdr(mdb_tgt_t *t, void *buf, size_t nbytes) 1317 { 1318 kt_data_t *kt = t->t_data; 1319 1320 if (buf == NULL && nbytes == 0) 1321 return (sizeof (dumphdr_t)); 1322 1323 if (kt->k_dumphdr == NULL) 1324 return (set_errno(ENODATA)); 1325 1326 nbytes = MIN(nbytes, sizeof (dumphdr_t)); 1327 bcopy(kt->k_dumphdr, buf, nbytes); 1328 1329 return (nbytes); 1330 } 1331 1332 void 1333 kt_destroy(mdb_tgt_t *t) 1334 { 1335 kt_data_t *kt = t->t_data; 1336 kt_module_t *km, *nkm; 1337 1338 (void) mdb_module_unload(KT_MODULE, 0); 1339 1340 if (kt->k_regs != NULL) 1341 mdb_free(kt->k_regs, kt->k_regsize); 1342 1343 if (kt->k_symtab != NULL) 1344 mdb_gelf_symtab_destroy(kt->k_symtab); 1345 1346 if (kt->k_dynsym != NULL) 1347 mdb_gelf_symtab_destroy(kt->k_dynsym); 1348 1349 if (kt->k_dumphdr != NULL) 1350 mdb_free(kt->k_dumphdr, sizeof (dumphdr_t)); 1351 1352 mdb_gelf_destroy(kt->k_file); 1353 1354 (void) kt->k_kb_ops->kb_close(kt->k_cookie); 1355 1356 for (km = mdb_list_next(&kt->k_modlist); km; km = nkm) { 1357 if (km->km_symtab) 1358 mdb_gelf_symtab_destroy(km->km_symtab); 1359 1360 if (km->km_data) 1361 mdb_free(km->km_data, km->km_datasz); 1362 1363 if (km->km_ctfp) 1364 ctf_close(km->km_ctfp); 1365 1366 if (km->km_ctf_buf != NULL) 1367 mdb_free(km->km_ctf_buf, km->km_ctf_size); 1368 1369 nkm = mdb_list_next(km); 1370 strfree(km->km_name); 1371 mdb_free(km, sizeof (kt_module_t)); 1372 } 1373 1374 mdb_nv_destroy(&kt->k_modules); 1375 1376 strfree(kt->k_kvmfile); 1377 if (kt->k_symfile != NULL) 1378 strfree(kt->k_symfile); 1379 1380 mdb_free(kt, sizeof (kt_data_t)); 1381 } 1382 1383 static int 1384 kt_data_stub(void) 1385 { 1386 return (-1); 1387 } 1388 1389 int 1390 mdb_kvm_tgt_create(mdb_tgt_t *t, int argc, const char *argv[]) 1391 { 1392 kt_data_t *kt = mdb_zalloc(sizeof (kt_data_t), UM_SLEEP); 1393 mdb_kb_ops_t *kvm_kb_ops = libkvm_kb_ops(); 1394 int oflag = (t->t_flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY; 1395 struct utsname uts; 1396 GElf_Sym sym; 1397 pgcnt_t pmem; 1398 1399 1400 if (argc == 2) { 1401 kt->k_symfile = strdup(argv[0]); 1402 kt->k_kvmfile = strdup(argv[1]); 1403 1404 kt->k_cookie = kvm_kb_ops->kb_open(kt->k_symfile, 1405 kt->k_kvmfile, NULL, oflag, (char *)mdb.m_pname); 1406 1407 if (kt->k_cookie == NULL) 1408 goto err; 1409 1410 kt->k_xpv_domu = 0; 1411 kt->k_kb_ops = kvm_kb_ops; 1412 } else { 1413 #ifndef __x86 1414 return (set_errno(EINVAL)); 1415 #else 1416 mdb_kb_ops_t *(*getops)(void); 1417 1418 kt->k_symfile = NULL; 1419 kt->k_kvmfile = strdup(argv[0]); 1420 1421 getops = (mdb_kb_ops_t *(*)())dlsym(RTLD_NEXT, "mdb_kb_ops"); 1422 1423 /* 1424 * Load mdb_kb if it's not already loaded during 1425 * identification. 1426 */ 1427 if (getops == NULL) { 1428 (void) mdb_module_load("mdb_kb", 1429 MDB_MOD_GLOBAL | MDB_MOD_SILENT); 1430 getops = (mdb_kb_ops_t *(*)()) 1431 dlsym(RTLD_NEXT, "mdb_kb_ops"); 1432 } 1433 1434 if (getops == NULL || (kt->k_kb_ops = getops()) == NULL) { 1435 warn("failed to load KVM backend ops\n"); 1436 goto err; 1437 } 1438 1439 kt->k_cookie = kt->k_kb_ops->kb_open(NULL, kt->k_kvmfile, NULL, 1440 oflag, (char *)mdb.m_pname); 1441 1442 if (kt->k_cookie == NULL) 1443 goto err; 1444 1445 kt->k_xpv_domu = 1; 1446 #endif 1447 } 1448 1449 if ((kt->k_fio = kt->k_kb_ops->kb_sym_io(kt->k_cookie, 1450 kt->k_symfile)) == NULL) 1451 goto err; 1452 1453 if ((kt->k_file = mdb_gelf_create(kt->k_fio, 1454 ET_EXEC, GF_FILE)) == NULL) { 1455 mdb_io_destroy(kt->k_fio); 1456 goto err; 1457 } 1458 1459 kt->k_symtab = 1460 mdb_gelf_symtab_create_file(kt->k_file, SHT_SYMTAB, MDB_TGT_SYMTAB); 1461 1462 kt->k_dynsym = 1463 mdb_gelf_symtab_create_file(kt->k_file, SHT_DYNSYM, MDB_TGT_DYNSYM); 1464 1465 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "kas", 1466 &sym, NULL) == -1) { 1467 warn("'kas' symbol is missing from kernel\n"); 1468 goto err; 1469 } 1470 1471 kt->k_as = (struct as *)(uintptr_t)sym.st_value; 1472 1473 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "platform", 1474 &sym, NULL) == -1) { 1475 warn("'platform' symbol is missing from kernel\n"); 1476 goto err; 1477 } 1478 1479 if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value, 1480 kt->k_platform, MAXNAMELEN) <= 0) { 1481 warn("failed to read 'platform' string from kernel"); 1482 goto err; 1483 } 1484 1485 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "utsname", 1486 &sym, NULL) == -1) { 1487 warn("'utsname' symbol is missing from kernel\n"); 1488 goto err; 1489 } 1490 1491 if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value, &uts, 1492 sizeof (uts)) <= 0) { 1493 warn("failed to read 'utsname' struct from kernel"); 1494 goto err; 1495 } 1496 1497 kt->k_dump_print_content = (void (*)())kt_data_stub; 1498 kt->k_dump_find_curproc = kt_data_stub; 1499 1500 /* 1501 * We set k_ctfvalid based on the presence of the CTF vmem arena 1502 * symbol. The CTF members were added to the end of struct module at 1503 * the same time, so this allows us to know whether we can use them. 1504 */ 1505 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "ctf_arena", &sym, 1506 NULL) == 0 && !(mdb.m_flags & MDB_FL_NOCTF)) 1507 kt->k_ctfvalid = 1; 1508 1509 (void) mdb_nv_create(&kt->k_modules, UM_SLEEP); 1510 t->t_pshandle = kt->k_cookie; 1511 t->t_data = kt; 1512 1513 #if defined(__sparc) 1514 #if defined(__sparcv9) 1515 kt_sparcv9_init(t); 1516 #else 1517 kt_sparcv7_init(t); 1518 #endif 1519 #elif defined(__amd64) 1520 kt_amd64_init(t); 1521 #elif defined(__i386) 1522 kt_ia32_init(t); 1523 #else 1524 #error "unknown ISA" 1525 #endif 1526 1527 /* 1528 * We read our representative thread ID (address) from the kernel's 1529 * global panic_thread. It will remain 0 if this is a live kernel. 1530 */ 1531 (void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &kt->k_tid, sizeof (void *), 1532 MDB_TGT_OBJ_EXEC, "panic_thread"); 1533 1534 if ((mdb.m_flags & MDB_FL_ADB) && mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, 1535 &pmem, sizeof (pmem), MDB_TGT_OBJ_EXEC, "physmem") == sizeof (pmem)) 1536 mdb_printf("physmem %lx\n", (ulong_t)pmem); 1537 1538 /* 1539 * If this is not a live kernel or a hypervisor dump, read the dump 1540 * header. We don't have to sanity-check the header, as the open would 1541 * not have succeeded otherwise. 1542 */ 1543 if (!kt->k_xpv_domu && strcmp(kt->k_symfile, "/dev/ksyms") != 0) { 1544 mdb_io_t *vmcore; 1545 1546 kt->k_dumphdr = mdb_alloc(sizeof (dumphdr_t), UM_SLEEP); 1547 1548 if ((vmcore = mdb_fdio_create_path(NULL, kt->k_kvmfile, 1549 O_RDONLY, 0)) == NULL) { 1550 mdb_warn("failed to open %s", kt->k_kvmfile); 1551 goto err; 1552 } 1553 1554 if (IOP_READ(vmcore, kt->k_dumphdr, sizeof (dumphdr_t)) != 1555 sizeof (dumphdr_t)) { 1556 mdb_warn("failed to read dump header"); 1557 mdb_io_destroy(vmcore); 1558 goto err; 1559 } 1560 1561 mdb_io_destroy(vmcore); 1562 1563 (void) mdb_tgt_xdata_insert(t, "dumphdr", 1564 "dump header structure", kt_xd_dumphdr); 1565 } 1566 1567 return (0); 1568 1569 err: 1570 if (kt->k_dumphdr != NULL) 1571 mdb_free(kt->k_dumphdr, sizeof (dumphdr_t)); 1572 1573 if (kt->k_symtab != NULL) 1574 mdb_gelf_symtab_destroy(kt->k_symtab); 1575 1576 if (kt->k_dynsym != NULL) 1577 mdb_gelf_symtab_destroy(kt->k_dynsym); 1578 1579 if (kt->k_file != NULL) 1580 mdb_gelf_destroy(kt->k_file); 1581 1582 if (kt->k_cookie != NULL) 1583 (void) kt->k_kb_ops->kb_close(kt->k_cookie); 1584 1585 mdb_free(kt, sizeof (kt_data_t)); 1586 return (-1); 1587 } 1588 1589 int 1590 mdb_kvm_is_dump(mdb_io_t *io) 1591 { 1592 dumphdr_t h; 1593 1594 (void) IOP_SEEK(io, (off64_t)0L, SEEK_SET); 1595 1596 return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) && 1597 h.dump_magic == DUMP_MAGIC); 1598 } 1599 1600 int 1601 mdb_kvm_is_compressed_dump(mdb_io_t *io) 1602 { 1603 dumphdr_t h; 1604 1605 (void) IOP_SEEK(io, (off64_t)0L, SEEK_SET); 1606 1607 return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) && 1608 h.dump_magic == DUMP_MAGIC && 1609 (h.dump_flags & DF_COMPRESSED) != 0); 1610 }