Print this page
11626 introduce /etc/versions/build
11627 clean up UUID code for ::status
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/mdb/common/modules/mdb_ks/mdb_ks.c
+++ new/usr/src/cmd/mdb/common/modules/mdb_ks/mdb_ks.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
23 - * Copyright 2017 Joyent, Inc.
23 + * Copyright 2019 Joyent, Inc.
24 24 */
25 25
26 26 /*
27 27 * Mdb kernel support module. This module is loaded automatically when the
28 28 * kvm target is initialized. Any global functions declared here are exported
29 29 * for the resolution of symbols in subsequently loaded modules.
30 30 *
31 31 * WARNING: Do not assume that static variables in mdb_ks will be initialized
32 32 * to zero.
33 33 */
34 34
35 35 #include <mdb/mdb_target.h>
36 36 #include <mdb/mdb_param.h>
37 37 #include <mdb/mdb_modapi.h>
38 38 #include <mdb/mdb_ks.h>
39 39
40 40 #include <sys/types.h>
41 41 #include <sys/procfs.h>
42 42 #include <sys/proc.h>
43 43 #include <sys/dnlc.h>
44 44 #include <sys/autoconf.h>
45 45 #include <sys/machelf.h>
46 46 #include <sys/modctl.h>
47 47 #include <sys/hwconf.h>
48 48 #include <sys/kobj.h>
49 49 #include <sys/fs/autofs.h>
50 50 #include <sys/ddi_impldefs.h>
51 51 #include <sys/refstr_impl.h>
52 52 #include <sys/cpuvar.h>
53 53 #include <sys/dlpi.h>
54 54 #include <sys/clock_impl.h>
55 55 #include <sys/swap.h>
56 56 #include <errno.h>
57 57
58 58 #include <vm/seg_vn.h>
59 59 #include <vm/page.h>
60 60
61 61 #define MDB_PATH_NELEM 256 /* Maximum path components */
62 62
63 63 typedef struct mdb_path {
64 64 size_t mdp_nelem; /* Number of components */
65 65 uint_t mdp_complete; /* Path completely resolved? */
66 66 uintptr_t mdp_vnode[MDB_PATH_NELEM]; /* Array of vnode_t addresses */
67 67 char *mdp_name[MDB_PATH_NELEM]; /* Array of name components */
68 68 } mdb_path_t;
69 69
70 70 static int mdb_autonode2path(uintptr_t, mdb_path_t *);
71 71 static int mdb_sprintpath(char *, size_t, mdb_path_t *);
72 72
73 73 /*
74 74 * Kernel parameters from <sys/param.h> which we keep in-core:
75 75 */
76 76 unsigned long _mdb_ks_pagesize;
77 77 unsigned int _mdb_ks_pageshift;
78 78 unsigned long _mdb_ks_pageoffset;
79 79 unsigned long long _mdb_ks_pagemask;
80 80 unsigned long _mdb_ks_mmu_pagesize;
81 81 unsigned int _mdb_ks_mmu_pageshift;
82 82 unsigned long _mdb_ks_mmu_pageoffset;
83 83 unsigned long _mdb_ks_mmu_pagemask;
84 84 uintptr_t _mdb_ks_kernelbase;
85 85 uintptr_t _mdb_ks_userlimit;
86 86 uintptr_t _mdb_ks_userlimit32;
87 87 uintptr_t _mdb_ks_argsbase;
88 88 unsigned long _mdb_ks_msg_bsize;
89 89 unsigned long _mdb_ks_defaultstksz;
90 90 int _mdb_ks_ncpu;
91 91 int _mdb_ks_ncpu_log2;
92 92 int _mdb_ks_ncpu_p2;
93 93
94 94 /*
95 95 * In-core copy of DNLC information:
96 96 */
97 97 #define MDB_DNLC_HSIZE 1024
98 98 #define MDB_DNLC_HASH(vp) (((uintptr_t)(vp) >> 3) & (MDB_DNLC_HSIZE - 1))
99 99 #define MDB_DNLC_NCACHE_SZ(ncp) (sizeof (ncache_t) + (ncp)->namlen)
100 100 #define MDB_DNLC_MAX_RETRY 4
101 101
102 102 static ncache_t **dnlc_hash; /* mdbs hash array of dnlc entries */
103 103
104 104 /*
105 105 * copy of page_hash-related data
106 106 */
107 107 static int page_hash_loaded;
108 108 static long mdb_page_hashsz;
109 109 static uint_t mdb_page_hashsz_shift; /* Needed for PAGE_HASH_FUNC */
110 110 static uintptr_t mdb_page_hash; /* base address of page hash */
111 111 #define page_hashsz mdb_page_hashsz
112 112 #define page_hashsz_shift mdb_page_hashsz_shift
113 113
114 114 /*
115 115 * This will be the location of the vnodeops pointer for "autofs_vnodeops"
116 116 * The pointer still needs to be read with mdb_vread() to get the location
117 117 * of the vnodeops structure for autofs.
118 118 */
119 119 static struct vnodeops *autofs_vnops_ptr;
120 120
121 121 /*
122 122 * STREAMS queue registrations:
123 123 */
124 124 typedef struct mdb_qinfo {
125 125 const mdb_qops_t *qi_ops; /* Address of ops vector */
126 126 uintptr_t qi_addr; /* Address of qinit structure (key) */
127 127 struct mdb_qinfo *qi_next; /* Next qinfo in list */
128 128 } mdb_qinfo_t;
129 129
130 130 static mdb_qinfo_t *qi_head; /* Head of qinfo chain */
131 131
132 132 /*
133 133 * Device naming callback structure:
134 134 */
135 135 typedef struct nm_query {
136 136 const char *nm_name; /* Device driver name [in/out] */
137 137 major_t nm_major; /* Device major number [in/out] */
138 138 ushort_t nm_found; /* Did we find a match? [out] */
139 139 } nm_query_t;
140 140
141 141 /*
142 142 * Address-to-modctl callback structure:
143 143 */
144 144 typedef struct a2m_query {
145 145 uintptr_t a2m_addr; /* Virtual address [in] */
146 146 uintptr_t a2m_where; /* Modctl address [out] */
147 147 } a2m_query_t;
148 148
149 149 /*
150 150 * Segment-to-mdb_map callback structure:
151 151 */
152 152 typedef struct {
153 153 struct seg_ops *asm_segvn_ops; /* Address of segvn ops [in] */
154 154 void (*asm_callback)(const struct mdb_map *, void *); /* Callb [in] */
155 155 void *asm_cbdata; /* Callback data [in] */
156 156 } asmap_arg_t;
157 157
158 158 static void
159 159 dnlc_free(void)
160 160 {
161 161 ncache_t *ncp, *next;
162 162 int i;
163 163
164 164 if (dnlc_hash == NULL) {
165 165 return;
166 166 }
167 167
168 168 /*
169 169 * Free up current dnlc entries
170 170 */
171 171 for (i = 0; i < MDB_DNLC_HSIZE; i++) {
172 172 for (ncp = dnlc_hash[i]; ncp; ncp = next) {
173 173 next = ncp->hash_next;
174 174 mdb_free(ncp, MDB_DNLC_NCACHE_SZ(ncp));
175 175 }
176 176 }
177 177 mdb_free(dnlc_hash, MDB_DNLC_HSIZE * sizeof (ncache_t *));
178 178 dnlc_hash = NULL;
179 179 }
180 180
181 181 char bad_dnlc[] = "inconsistent dnlc chain: %d, ncache va: %p"
182 182 " - continuing with the rest\n";
183 183
184 184 static int
185 185 dnlc_load(void)
186 186 {
187 187 int i; /* hash index */
188 188 int retry_cnt = 0;
189 189 int skip_bad_chains = 0;
190 190 int nc_hashsz; /* kernel hash array size */
191 191 uintptr_t nc_hash_addr; /* kernel va of ncache hash array */
192 192 uintptr_t head; /* kernel va of head of hash chain */
193 193
194 194 /*
195 195 * If we've already cached the DNLC and we're looking at a dump,
196 196 * our cache is good forever, so don't bother re-loading.
197 197 */
198 198 if (dnlc_hash && mdb_prop_postmortem) {
199 199 return (0);
200 200 }
201 201
202 202 /*
203 203 * For a core dump, retries wont help.
204 204 * Just print and skip any bad chains.
205 205 */
206 206 if (mdb_prop_postmortem) {
207 207 skip_bad_chains = 1;
208 208 }
209 209 retry:
210 210 if (retry_cnt++ >= MDB_DNLC_MAX_RETRY) {
211 211 /*
212 212 * Give up retrying the rapidly changing dnlc.
213 213 * Just print and skip any bad chains
214 214 */
215 215 skip_bad_chains = 1;
216 216 }
217 217
218 218 dnlc_free(); /* Free up the mdb hashed dnlc - if any */
219 219
220 220 /*
221 221 * Although nc_hashsz and the location of nc_hash doesn't currently
222 222 * change, it may do in the future with a more dynamic dnlc.
223 223 * So always read these values afresh.
224 224 */
225 225 if (mdb_readvar(&nc_hashsz, "nc_hashsz") == -1) {
226 226 mdb_warn("failed to read nc_hashsz");
227 227 return (-1);
228 228 }
229 229 if (mdb_readvar(&nc_hash_addr, "nc_hash") == -1) {
230 230 mdb_warn("failed to read nc_hash");
231 231 return (-1);
232 232 }
233 233
234 234 /*
235 235 * Allocate the mdb dnlc hash array
236 236 */
237 237 dnlc_hash = mdb_zalloc(MDB_DNLC_HSIZE * sizeof (ncache_t *), UM_SLEEP);
238 238
239 239 /* for each kernel hash chain */
240 240 for (i = 0, head = nc_hash_addr; i < nc_hashsz;
241 241 i++, head += sizeof (nc_hash_t)) {
242 242 nc_hash_t nch; /* kernel hash chain header */
243 243 ncache_t *ncp; /* name cache pointer */
244 244 int hash; /* mdb hash value */
245 245 uintptr_t nc_va; /* kernel va of next ncache */
246 246 uintptr_t ncprev_va; /* kernel va of previous ncache */
247 247 int khash; /* kernel dnlc hash value */
248 248 uchar_t namelen; /* name length */
249 249 ncache_t nc; /* name cache entry */
250 250 int nc_size; /* size of a name cache entry */
251 251
252 252 /*
253 253 * We read each element of the nc_hash array individually
254 254 * just before we process the entries in its chain. This is
255 255 * because the chain can change so rapidly on a running system.
256 256 */
257 257 if (mdb_vread(&nch, sizeof (nc_hash_t), head) == -1) {
258 258 mdb_warn("failed to read nc_hash chain header %d", i);
259 259 dnlc_free();
260 260 return (-1);
261 261 }
262 262
263 263 ncprev_va = head;
264 264 nc_va = (uintptr_t)(nch.hash_next);
265 265 /* for each entry in the chain */
266 266 while (nc_va != head) {
267 267 /*
268 268 * The size of the ncache entries varies
269 269 * because the name is appended to the structure.
270 270 * So we read in the structure then re-read
271 271 * for the structure plus name.
272 272 */
273 273 if (mdb_vread(&nc, sizeof (ncache_t), nc_va) == -1) {
274 274 if (skip_bad_chains) {
275 275 mdb_warn(bad_dnlc, i, nc_va);
276 276 break;
277 277 }
278 278 goto retry;
279 279 }
280 280 nc_size = MDB_DNLC_NCACHE_SZ(&nc);
281 281 ncp = mdb_alloc(nc_size, UM_SLEEP);
282 282 if (mdb_vread(ncp, nc_size - 1, nc_va) == -1) {
283 283 mdb_free(ncp, nc_size);
284 284 if (skip_bad_chains) {
285 285 mdb_warn(bad_dnlc, i, nc_va);
286 286 break;
287 287 }
288 288 goto retry;
289 289 }
290 290
291 291 /*
292 292 * Check for chain consistency
293 293 */
294 294 if ((uintptr_t)ncp->hash_prev != ncprev_va) {
295 295 mdb_free(ncp, nc_size);
296 296 if (skip_bad_chains) {
297 297 mdb_warn(bad_dnlc, i, nc_va);
298 298 break;
299 299 }
300 300 goto retry;
301 301 }
302 302 /*
303 303 * Terminate the new name with a null.
304 304 * Note, we allowed space for this null when
305 305 * allocating space for the entry.
306 306 */
307 307 ncp->name[ncp->namlen] = '\0';
308 308
309 309 /*
310 310 * Validate new entry by re-hashing using the
311 311 * kernel dnlc hash function and comparing the hash
312 312 */
313 313 DNLCHASH(ncp->name, ncp->dp, khash, namelen);
314 314 if ((namelen != ncp->namlen) ||
315 315 (khash != ncp->hash)) {
316 316 mdb_free(ncp, nc_size);
317 317 if (skip_bad_chains) {
318 318 mdb_warn(bad_dnlc, i, nc_va);
319 319 break;
320 320 }
321 321 goto retry;
322 322 }
323 323
324 324 /*
325 325 * Finally put the validated entry into the mdb
326 326 * hash chains. Reuse the kernel next hash field
327 327 * for the mdb hash chain pointer.
328 328 */
329 329 hash = MDB_DNLC_HASH(ncp->vp);
330 330 ncprev_va = nc_va;
331 331 nc_va = (uintptr_t)(ncp->hash_next);
332 332 ncp->hash_next = dnlc_hash[hash];
333 333 dnlc_hash[hash] = ncp;
334 334 }
335 335 }
336 336 return (0);
337 337 }
338 338
339 339 /*ARGSUSED*/
340 340 int
341 341 dnlcdump(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
342 342 {
343 343 ncache_t *ent;
344 344 int i;
345 345
346 346 if ((flags & DCMD_ADDRSPEC) || argc != 0)
347 347 return (DCMD_USAGE);
348 348
349 349 if (dnlc_load() == -1)
350 350 return (DCMD_ERR);
351 351
352 352 mdb_printf("%<u>%-?s %-?s %-32s%</u>\n", "VP", "DVP", "NAME");
353 353
354 354 for (i = 0; i < MDB_DNLC_HSIZE; i++) {
355 355 for (ent = dnlc_hash[i]; ent != NULL; ent = ent->hash_next) {
356 356 mdb_printf("%0?p %0?p %s\n",
357 357 ent->vp, ent->dp, ent->name);
358 358 }
359 359 }
360 360
361 361 return (DCMD_OK);
362 362 }
363 363
364 364 static int
365 365 mdb_sprintpath(char *buf, size_t len, mdb_path_t *path)
366 366 {
367 367 char *s = buf;
368 368 int i;
369 369
370 370 if (len < sizeof ("/..."))
371 371 return (-1);
372 372
373 373 if (!path->mdp_complete) {
374 374 (void) strcpy(s, "??");
375 375 s += 2;
376 376
377 377 if (path->mdp_nelem == 0)
378 378 return (-1);
379 379 }
380 380
381 381 if (path->mdp_nelem == 0) {
382 382 (void) strcpy(s, "/");
383 383 return (0);
384 384 }
385 385
386 386 for (i = path->mdp_nelem - 1; i >= 0; i--) {
387 387 /*
388 388 * Number of bytes left is the distance from where we
389 389 * are to the end, minus 2 for '/' and '\0'
390 390 */
391 391 ssize_t left = (ssize_t)(&buf[len] - s) - 2;
392 392
393 393 if (left <= 0)
394 394 break;
395 395
396 396 *s++ = '/';
397 397 (void) strncpy(s, path->mdp_name[i], left);
398 398 s[left - 1] = '\0';
399 399 s += strlen(s);
400 400
401 401 if (left < strlen(path->mdp_name[i]))
402 402 break;
403 403 }
404 404
405 405 if (i >= 0)
406 406 (void) strcpy(&buf[len - 4], "...");
407 407
408 408 return (0);
409 409 }
410 410
411 411 static int
412 412 mdb_autonode2path(uintptr_t addr, mdb_path_t *path)
413 413 {
414 414 fninfo_t fni;
415 415 fnnode_t fn;
416 416
417 417 vnode_t vn;
418 418 vfs_t vfs;
419 419 struct vnodeops *autofs_vnops = NULL;
420 420
421 421 /*
422 422 * "autofs_vnops_ptr" is the address of the pointer to the vnodeops
423 423 * structure for autofs. We want to read it each time we access
424 424 * it since autofs could (in theory) be unloaded and reloaded.
425 425 */
426 426 if (mdb_vread(&autofs_vnops, sizeof (autofs_vnops),
427 427 (uintptr_t)autofs_vnops_ptr) == -1)
428 428 return (-1);
429 429
430 430 if (mdb_vread(&vn, sizeof (vn), addr) == -1)
431 431 return (-1);
432 432
433 433 if (autofs_vnops == NULL || vn.v_op != autofs_vnops)
434 434 return (-1);
435 435
436 436 addr = (uintptr_t)vn.v_data;
437 437
438 438 if (mdb_vread(&vfs, sizeof (vfs), (uintptr_t)vn.v_vfsp) == -1 ||
439 439 mdb_vread(&fni, sizeof (fni), (uintptr_t)vfs.vfs_data) == -1 ||
440 440 mdb_vread(&vn, sizeof (vn), (uintptr_t)fni.fi_rootvp) == -1)
441 441 return (-1);
442 442
443 443 for (;;) {
444 444 size_t elem = path->mdp_nelem++;
445 445 char elemstr[MAXNAMELEN];
446 446 char *c, *p;
447 447
448 448 if (elem == MDB_PATH_NELEM) {
449 449 path->mdp_nelem--;
450 450 return (-1);
451 451 }
452 452
453 453 if (mdb_vread(&fn, sizeof (fn), addr) != sizeof (fn)) {
454 454 path->mdp_nelem--;
455 455 return (-1);
456 456 }
457 457
458 458 if (mdb_readstr(elemstr, sizeof (elemstr),
459 459 (uintptr_t)fn.fn_name) <= 0) {
460 460 (void) strcpy(elemstr, "?");
461 461 }
462 462
463 463 c = mdb_alloc(strlen(elemstr) + 1, UM_SLEEP | UM_GC);
464 464 (void) strcpy(c, elemstr);
465 465
466 466 path->mdp_vnode[elem] = (uintptr_t)fn.fn_vnode;
467 467
468 468 if (addr == (uintptr_t)fn.fn_parent) {
469 469 path->mdp_name[elem] = &c[1];
470 470 path->mdp_complete = TRUE;
471 471 break;
472 472 }
473 473
474 474 if ((p = strrchr(c, '/')) != NULL)
475 475 path->mdp_name[elem] = p + 1;
476 476 else
477 477 path->mdp_name[elem] = c;
478 478
479 479 addr = (uintptr_t)fn.fn_parent;
480 480 }
481 481
482 482 return (0);
483 483 }
484 484
485 485 int
486 486 mdb_vnode2path(uintptr_t addr, char *buf, size_t buflen)
487 487 {
488 488 uintptr_t rootdir;
489 489 ncache_t *ent;
490 490 vnode_t vp;
491 491 mdb_path_t path;
492 492
493 493 /*
494 494 * Check to see if we have a cached value for this vnode
495 495 */
496 496 if (mdb_vread(&vp, sizeof (vp), addr) != -1 &&
497 497 vp.v_path != NULL &&
498 498 mdb_readstr(buf, buflen, (uintptr_t)vp.v_path) != -1)
499 499 return (0);
500 500
501 501 if (dnlc_load() == -1)
502 502 return (-1);
503 503
504 504 if (mdb_readvar(&rootdir, "rootdir") == -1) {
505 505 mdb_warn("failed to read 'rootdir'");
506 506 return (-1);
507 507 }
508 508
509 509 bzero(&path, sizeof (mdb_path_t));
510 510 again:
511 511 if ((addr == 0) && (path.mdp_nelem == 0)) {
512 512 /*
513 513 * 0 elems && complete tells sprintpath to just print "/"
514 514 */
515 515 path.mdp_complete = TRUE;
516 516 goto out;
517 517 }
518 518
519 519 if (addr == rootdir) {
520 520 path.mdp_complete = TRUE;
521 521 goto out;
522 522 }
523 523
524 524 for (ent = dnlc_hash[MDB_DNLC_HASH(addr)]; ent; ent = ent->hash_next) {
525 525 if ((uintptr_t)ent->vp == addr) {
526 526 if (strcmp(ent->name, "..") == 0 ||
527 527 strcmp(ent->name, ".") == 0)
528 528 continue;
529 529
530 530 path.mdp_vnode[path.mdp_nelem] = (uintptr_t)ent->vp;
531 531 path.mdp_name[path.mdp_nelem] = ent->name;
532 532 path.mdp_nelem++;
533 533
534 534 if (path.mdp_nelem == MDB_PATH_NELEM) {
535 535 path.mdp_nelem--;
536 536 mdb_warn("path exceeded maximum expected "
537 537 "elements\n");
538 538 return (-1);
539 539 }
540 540
541 541 addr = (uintptr_t)ent->dp;
542 542 goto again;
543 543 }
544 544 }
545 545
546 546 (void) mdb_autonode2path(addr, &path);
547 547
548 548 out:
549 549 return (mdb_sprintpath(buf, buflen, &path));
550 550 }
551 551
552 552
553 553 uintptr_t
554 554 mdb_pid2proc(pid_t pid, proc_t *proc)
555 555 {
556 556 int pid_hashsz, hash;
557 557 uintptr_t paddr, pidhash, procdir;
558 558 struct pid pidp;
559 559
560 560 if (mdb_readvar(&pidhash, "pidhash") == -1)
561 561 return (0);
562 562
563 563 if (mdb_readvar(&pid_hashsz, "pid_hashsz") == -1)
564 564 return (0);
565 565
566 566 if (mdb_readvar(&procdir, "procdir") == -1)
567 567 return (0);
568 568
569 569 hash = pid & (pid_hashsz - 1);
570 570
571 571 if (mdb_vread(&paddr, sizeof (paddr),
572 572 pidhash + (hash * sizeof (paddr))) == -1)
573 573 return (0);
574 574
575 575 while (paddr != 0) {
576 576 if (mdb_vread(&pidp, sizeof (pidp), paddr) == -1)
577 577 return (0);
578 578
579 579 if (pidp.pid_id == pid) {
580 580 uintptr_t procp;
581 581
582 582 if (mdb_vread(&procp, sizeof (procp), procdir +
583 583 (pidp.pid_prslot * sizeof (procp))) == -1)
584 584 return (0);
585 585
586 586 if (proc != NULL)
587 587 (void) mdb_vread(proc, sizeof (proc_t), procp);
588 588
589 589 return (procp);
590 590 }
591 591 paddr = (uintptr_t)pidp.pid_link;
592 592 }
593 593 return (0);
594 594 }
595 595
596 596 int
597 597 mdb_cpu2cpuid(uintptr_t cpup)
598 598 {
599 599 cpu_t cpu;
600 600
601 601 if (mdb_vread(&cpu, sizeof (cpu_t), cpup) != sizeof (cpu_t))
602 602 return (-1);
603 603
604 604 return (cpu.cpu_id);
605 605 }
606 606
607 607 int
608 608 mdb_cpuset_find(uintptr_t cpusetp)
609 609 {
610 610 ulong_t *cpuset;
611 611 size_t nr_words = BT_BITOUL(NCPU);
612 612 size_t sz = nr_words * sizeof (ulong_t);
613 613 size_t i;
614 614 int cpu = -1;
615 615
616 616 cpuset = mdb_alloc(sz, UM_SLEEP);
617 617
618 618 if (mdb_vread((void *)cpuset, sz, cpusetp) != sz)
619 619 goto out;
620 620
621 621 for (i = 0; i < nr_words; i++) {
622 622 size_t j;
623 623 ulong_t m;
624 624
625 625 for (j = 0, m = 1; j < BT_NBIPUL; j++, m <<= 1) {
626 626 if (cpuset[i] & m) {
627 627 cpu = i * BT_NBIPUL + j;
628 628 goto out;
629 629 }
630 630 }
631 631 }
632 632
633 633 out:
634 634 mdb_free(cpuset, sz);
635 635 return (cpu);
636 636 }
637 637
638 638 static int
639 639 page_hash_load(void)
640 640 {
641 641 if (page_hash_loaded) {
642 642 return (1);
643 643 }
644 644
645 645 if (mdb_readvar(&mdb_page_hashsz, "page_hashsz") == -1) {
646 646 mdb_warn("unable to read page_hashsz");
647 647 return (0);
648 648 }
649 649 if (mdb_readvar(&mdb_page_hashsz_shift, "page_hashsz_shift") == -1) {
650 650 mdb_warn("unable to read page_hashsz_shift");
651 651 return (0);
652 652 }
653 653 if (mdb_readvar(&mdb_page_hash, "page_hash") == -1) {
654 654 mdb_warn("unable to read page_hash");
655 655 return (0);
656 656 }
657 657
658 658 page_hash_loaded = 1; /* zeroed on state change */
659 659 return (1);
660 660 }
661 661
662 662 uintptr_t
663 663 mdb_page_lookup(uintptr_t vp, u_offset_t offset)
664 664 {
665 665 size_t ndx;
666 666 uintptr_t page_hash_entry, pp;
667 667
668 668 if (!page_hash_loaded && !page_hash_load()) {
669 669 return (0);
670 670 }
671 671
672 672 ndx = PAGE_HASH_FUNC(vp, offset);
673 673 page_hash_entry = mdb_page_hash + ndx * sizeof (uintptr_t);
674 674
675 675 if (mdb_vread(&pp, sizeof (pp), page_hash_entry) < 0) {
676 676 mdb_warn("unable to read page_hash[%ld] (%p)", ndx,
677 677 page_hash_entry);
678 678 return (0);
679 679 }
680 680
681 681 while (pp != 0) {
682 682 page_t page;
683 683 long nndx;
684 684
685 685 if (mdb_vread(&page, sizeof (page), pp) < 0) {
686 686 mdb_warn("unable to read page_t at %p", pp);
687 687 return (0);
688 688 }
689 689
690 690 if ((uintptr_t)page.p_vnode == vp &&
691 691 (uint64_t)page.p_offset == offset)
692 692 return (pp);
693 693
694 694 /*
695 695 * Double check that the pages actually hash to the
696 696 * bucket we're searching. If not, our version of
697 697 * PAGE_HASH_FUNC() doesn't match the kernel's, and we're
698 698 * not going to be able to find the page. The most
699 699 * likely reason for this that mdb_ks doesn't match the
700 700 * kernel we're running against.
701 701 */
702 702 nndx = PAGE_HASH_FUNC(page.p_vnode, page.p_offset);
703 703 if (page.p_vnode != NULL && nndx != ndx) {
704 704 mdb_warn("mdb_page_lookup: mdb_ks PAGE_HASH_FUNC() "
705 705 "mismatch: in bucket %ld, but page %p hashes to "
706 706 "bucket %ld\n", ndx, pp, nndx);
707 707 return (0);
708 708 }
709 709
710 710 pp = (uintptr_t)page.p_hash;
711 711 }
712 712
713 713 return (0);
714 714 }
715 715
716 716 char
717 717 mdb_vtype2chr(vtype_t type, mode_t mode)
718 718 {
719 719 static const char vttab[] = {
720 720 ' ', /* VNON */
721 721 ' ', /* VREG */
722 722 '/', /* VDIR */
723 723 ' ', /* VBLK */
724 724 ' ', /* VCHR */
725 725 '@', /* VLNK */
726 726 '|', /* VFIFO */
727 727 '>', /* VDOOR */
728 728 ' ', /* VPROC */
729 729 '=', /* VSOCK */
730 730 ' ', /* VBAD */
731 731 };
732 732
733 733 if (type < 0 || type >= sizeof (vttab) / sizeof (vttab[0]))
734 734 return ('?');
735 735
736 736 if (type == VREG && (mode & 0111) != 0)
737 737 return ('*');
738 738
739 739 return (vttab[type]);
740 740 }
741 741
742 742 struct pfn2page {
743 743 pfn_t pfn;
744 744 page_t *pp;
745 745 };
746 746
747 747 /*ARGSUSED*/
748 748 static int
749 749 pfn2page_cb(uintptr_t addr, const struct memseg *msp, void *data)
750 750 {
751 751 struct pfn2page *p = data;
752 752
753 753 if (p->pfn >= msp->pages_base && p->pfn < msp->pages_end) {
754 754 p->pp = msp->pages + (p->pfn - msp->pages_base);
755 755 return (WALK_DONE);
756 756 }
757 757
758 758 return (WALK_NEXT);
759 759 }
760 760
761 761 uintptr_t
762 762 mdb_pfn2page(pfn_t pfn)
763 763 {
764 764 struct pfn2page arg;
765 765 struct page page;
766 766
767 767 arg.pfn = pfn;
768 768 arg.pp = NULL;
769 769
770 770 if (mdb_walk("memseg", (mdb_walk_cb_t)pfn2page_cb, &arg) == -1) {
771 771 mdb_warn("pfn2page: can't walk memsegs");
772 772 return (0);
773 773 }
774 774 if (arg.pp == NULL) {
775 775 mdb_warn("pfn2page: unable to find page_t for pfn %lx\n",
776 776 pfn);
777 777 return (0);
778 778 }
779 779
780 780 if (mdb_vread(&page, sizeof (page_t), (uintptr_t)arg.pp) == -1) {
781 781 mdb_warn("pfn2page: can't read page 0x%lx at %p", pfn, arg.pp);
782 782 return (0);
783 783 }
784 784 if (page.p_pagenum != pfn) {
785 785 mdb_warn("pfn2page: page_t 0x%p should have PFN 0x%lx, "
786 786 "but actually has 0x%lx\n", arg.pp, pfn, page.p_pagenum);
787 787 return (0);
788 788 }
789 789
790 790 return ((uintptr_t)arg.pp);
791 791 }
792 792
793 793 pfn_t
794 794 mdb_page2pfn(uintptr_t addr)
795 795 {
796 796 struct page page;
797 797
798 798 if (mdb_vread(&page, sizeof (page_t), addr) == -1) {
799 799 mdb_warn("pp2pfn: can't read page at %p", addr);
800 800 return ((pfn_t)(-1));
801 801 }
802 802
803 803 return (page.p_pagenum);
804 804 }
805 805
806 806 static int
807 807 a2m_walk_modctl(uintptr_t addr, const struct modctl *m, a2m_query_t *a2m)
808 808 {
809 809 struct module mod;
810 810
811 811 if (m->mod_mp == NULL)
812 812 return (0);
813 813
814 814 if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
815 815 mdb_warn("couldn't read modctl %p's module", addr);
816 816 return (0);
817 817 }
818 818
819 819 if (a2m->a2m_addr >= (uintptr_t)mod.text &&
820 820 a2m->a2m_addr < (uintptr_t)mod.text + mod.text_size)
821 821 goto found;
822 822
823 823 if (a2m->a2m_addr >= (uintptr_t)mod.data &&
824 824 a2m->a2m_addr < (uintptr_t)mod.data + mod.data_size)
825 825 goto found;
826 826
827 827 return (0);
828 828
829 829 found:
830 830 a2m->a2m_where = addr;
831 831 return (-1);
832 832 }
833 833
834 834 uintptr_t
835 835 mdb_addr2modctl(uintptr_t addr)
836 836 {
837 837 a2m_query_t a2m;
838 838
839 839 a2m.a2m_addr = addr;
840 840 a2m.a2m_where = 0;
841 841
842 842 (void) mdb_walk("modctl", (mdb_walk_cb_t)a2m_walk_modctl, &a2m);
843 843 return (a2m.a2m_where);
844 844 }
845 845
846 846 static mdb_qinfo_t *
847 847 qi_lookup(uintptr_t qinit_addr)
848 848 {
849 849 mdb_qinfo_t *qip;
850 850
851 851 for (qip = qi_head; qip != NULL; qip = qip->qi_next) {
852 852 if (qip->qi_addr == qinit_addr)
853 853 return (qip);
854 854 }
855 855
856 856 return (NULL);
857 857 }
858 858
859 859 void
860 860 mdb_qops_install(const mdb_qops_t *qops, uintptr_t qinit_addr)
861 861 {
862 862 mdb_qinfo_t *qip = qi_lookup(qinit_addr);
863 863
864 864 if (qip != NULL) {
865 865 qip->qi_ops = qops;
866 866 return;
867 867 }
868 868
869 869 qip = mdb_alloc(sizeof (mdb_qinfo_t), UM_SLEEP);
870 870
871 871 qip->qi_ops = qops;
872 872 qip->qi_addr = qinit_addr;
873 873 qip->qi_next = qi_head;
874 874
875 875 qi_head = qip;
876 876 }
877 877
878 878 void
879 879 mdb_qops_remove(const mdb_qops_t *qops, uintptr_t qinit_addr)
880 880 {
881 881 mdb_qinfo_t *qip, *p = NULL;
882 882
883 883 for (qip = qi_head; qip != NULL; p = qip, qip = qip->qi_next) {
884 884 if (qip->qi_addr == qinit_addr && qip->qi_ops == qops) {
885 885 if (qi_head == qip)
886 886 qi_head = qip->qi_next;
887 887 else
888 888 p->qi_next = qip->qi_next;
889 889 mdb_free(qip, sizeof (mdb_qinfo_t));
890 890 return;
891 891 }
892 892 }
893 893 }
894 894
895 895 char *
896 896 mdb_qname(const queue_t *q, char *buf, size_t nbytes)
897 897 {
898 898 struct module_info mi;
899 899 struct qinit qi;
900 900
901 901 if (mdb_vread(&qi, sizeof (qi), (uintptr_t)q->q_qinfo) == -1) {
902 902 mdb_warn("failed to read qinit at %p", q->q_qinfo);
903 903 goto err;
904 904 }
905 905
906 906 if (mdb_vread(&mi, sizeof (mi), (uintptr_t)qi.qi_minfo) == -1) {
907 907 mdb_warn("failed to read module_info at %p", qi.qi_minfo);
908 908 goto err;
909 909 }
910 910
911 911 if (mdb_readstr(buf, nbytes, (uintptr_t)mi.mi_idname) <= 0) {
912 912 mdb_warn("failed to read mi_idname at %p", mi.mi_idname);
913 913 goto err;
914 914 }
915 915
916 916 return (buf);
917 917
918 918 err:
919 919 (void) mdb_snprintf(buf, nbytes, "???");
920 920 return (buf);
921 921 }
922 922
923 923 void
924 924 mdb_qinfo(const queue_t *q, char *buf, size_t nbytes)
925 925 {
926 926 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
927 927 buf[0] = '\0';
928 928
929 929 if (qip != NULL)
930 930 qip->qi_ops->q_info(q, buf, nbytes);
931 931 }
932 932
933 933 uintptr_t
934 934 mdb_qrnext(const queue_t *q)
935 935 {
936 936 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
937 937
938 938 if (qip != NULL)
939 939 return (qip->qi_ops->q_rnext(q));
940 940
941 941 return (0);
942 942 }
943 943
944 944 uintptr_t
945 945 mdb_qwnext(const queue_t *q)
946 946 {
947 947 mdb_qinfo_t *qip = qi_lookup((uintptr_t)q->q_qinfo);
948 948
949 949 if (qip != NULL)
950 950 return (qip->qi_ops->q_wnext(q));
951 951
952 952 return (0);
953 953 }
954 954
955 955 uintptr_t
956 956 mdb_qrnext_default(const queue_t *q)
957 957 {
958 958 return ((uintptr_t)q->q_next);
959 959 }
960 960
961 961 uintptr_t
962 962 mdb_qwnext_default(const queue_t *q)
963 963 {
964 964 return ((uintptr_t)q->q_next);
965 965 }
966 966
967 967 /*
968 968 * The following three routines borrowed from modsubr.c
969 969 */
970 970 static int
971 971 nm_hash(const char *name)
972 972 {
973 973 char c;
974 974 int hash = 0;
975 975
976 976 for (c = *name++; c; c = *name++)
977 977 hash ^= c;
978 978
979 979 return (hash & MOD_BIND_HASHMASK);
980 980 }
981 981
982 982 static uintptr_t
983 983 find_mbind(const char *name, uintptr_t *hashtab)
984 984 {
985 985 int hashndx;
986 986 uintptr_t mb;
987 987 struct bind mb_local;
988 988 char node_name[MAXPATHLEN + 1];
989 989
990 990 hashndx = nm_hash(name);
991 991 mb = hashtab[hashndx];
992 992 while (mb) {
993 993 if (mdb_vread(&mb_local, sizeof (mb_local), mb) == -1) {
994 994 mdb_warn("failed to read struct bind at %p", mb);
995 995 return (0);
996 996 }
997 997 if (mdb_readstr(node_name, sizeof (node_name),
998 998 (uintptr_t)mb_local.b_name) == -1) {
999 999 mdb_warn("failed to read node name string at %p",
1000 1000 mb_local.b_name);
1001 1001 return (0);
1002 1002 }
1003 1003
1004 1004 if (strcmp(name, node_name) == 0)
↓ open down ↓ |
971 lines elided |
↑ open up ↑ |
1005 1005 break;
1006 1006
1007 1007 mb = (uintptr_t)mb_local.b_next;
1008 1008 }
1009 1009 return (mb);
1010 1010 }
1011 1011
1012 1012 int
1013 1013 mdb_name_to_major(const char *name, major_t *major)
1014 1014 {
1015 - uintptr_t mbind;
1016 - uintptr_t mb_hashtab[MOD_BIND_HASHSIZE];
1017 - struct bind mbind_local;
1015 + uintptr_t mbind;
1016 + uintptr_t mb_hashtab[MOD_BIND_HASHSIZE];
1017 + struct bind mbind_local;
1018 1018
1019 1019
1020 1020 if (mdb_readsym(mb_hashtab, sizeof (mb_hashtab), "mb_hashtab") == -1) {
1021 1021 mdb_warn("failed to read symbol 'mb_hashtab'");
1022 1022 return (-1);
1023 1023 }
1024 1024
1025 1025 if ((mbind = find_mbind(name, mb_hashtab)) != 0) {
1026 1026 if (mdb_vread(&mbind_local, sizeof (mbind_local), mbind) ==
1027 1027 -1) {
1028 1028 mdb_warn("failed to read mbind struct at %p", mbind);
1029 1029 return (-1);
1030 1030 }
1031 1031
1032 1032 *major = (major_t)mbind_local.b_num;
1033 1033 return (0);
1034 1034 }
1035 1035 return (-1);
1036 1036 }
1037 1037
1038 1038 const char *
1039 1039 mdb_major_to_name(major_t major)
1040 1040 {
1041 1041 static char name[MODMAXNAMELEN + 1];
1042 1042
1043 1043 uintptr_t devnamesp;
1044 1044 struct devnames dn;
1045 1045 uint_t devcnt;
1046 1046
1047 1047 if (mdb_readvar(&devcnt, "devcnt") == -1 || major >= devcnt ||
1048 1048 mdb_readvar(&devnamesp, "devnamesp") == -1)
1049 1049 return (NULL);
1050 1050
1051 1051 if (mdb_vread(&dn, sizeof (struct devnames), devnamesp +
1052 1052 major * sizeof (struct devnames)) != sizeof (struct devnames))
1053 1053 return (NULL);
1054 1054
1055 1055 if (mdb_readstr(name, MODMAXNAMELEN + 1, (uintptr_t)dn.dn_name) == -1)
1056 1056 return (NULL);
1057 1057
1058 1058 return ((const char *)name);
1059 1059 }
1060 1060
1061 1061 /*
1062 1062 * Return the name of the driver attached to the dip in drivername.
1063 1063 */
1064 1064 int
1065 1065 mdb_devinfo2driver(uintptr_t dip_addr, char *drivername, size_t namebufsize)
1066 1066 {
1067 1067 struct dev_info devinfo;
1068 1068 char bind_name[MAXPATHLEN + 1];
1069 1069 major_t major;
1070 1070 const char *namestr;
1071 1071
1072 1072
1073 1073 if (mdb_vread(&devinfo, sizeof (devinfo), dip_addr) == -1) {
1074 1074 mdb_warn("failed to read devinfo at %p", dip_addr);
1075 1075 return (-1);
1076 1076 }
1077 1077
1078 1078 if (mdb_readstr(bind_name, sizeof (bind_name),
1079 1079 (uintptr_t)devinfo.devi_binding_name) == -1) {
1080 1080 mdb_warn("failed to read binding name at %p",
1081 1081 devinfo.devi_binding_name);
1082 1082 return (-1);
1083 1083 }
1084 1084
1085 1085 /*
1086 1086 * Many->one relation: various names to one major number
1087 1087 */
1088 1088 if (mdb_name_to_major(bind_name, &major) == -1) {
1089 1089 mdb_warn("failed to translate bind name to major number\n");
1090 1090 return (-1);
1091 1091 }
1092 1092
1093 1093 /*
1094 1094 * One->one relation: one major number corresponds to one driver
1095 1095 */
1096 1096 if ((namestr = mdb_major_to_name(major)) == NULL) {
1097 1097 (void) strncpy(drivername, "???", namebufsize);
1098 1098 return (-1);
1099 1099 }
1100 1100
1101 1101 (void) strncpy(drivername, namestr, namebufsize);
1102 1102 return (0);
1103 1103 }
1104 1104
1105 1105 /*
1106 1106 * Find the name of the driver attached to this dip (if any), given:
1107 1107 * - the address of a dip (in core)
1108 1108 * - the NAME of the global pointer to the driver's i_ddi_soft_state struct
1109 1109 * - pointer to a pointer to receive the address
1110 1110 */
1111 1111 int
1112 1112 mdb_devinfo2statep(uintptr_t dip_addr, char *soft_statep_name,
1113 1113 uintptr_t *statep)
1114 1114 {
1115 1115 struct dev_info dev_info;
1116 1116
1117 1117
1118 1118 if (mdb_vread(&dev_info, sizeof (dev_info), dip_addr) == -1) {
1119 1119 mdb_warn("failed to read devinfo at %p", dip_addr);
1120 1120 return (-1);
1121 1121 }
1122 1122
1123 1123 return (mdb_get_soft_state_byname(soft_statep_name,
1124 1124 dev_info.devi_instance, statep, NULL, 0));
1125 1125 }
1126 1126
1127 1127 /*
1128 1128 * Returns a pointer to the top of the soft state struct for the instance
1129 1129 * specified (in state_addr), given the address of the global soft state
1130 1130 * pointer and size of the struct. Also fills in the buffer pointed to by
1131 1131 * state_buf_p (if non-NULL) with the contents of the state struct.
1132 1132 */
1133 1133 int
1134 1134 mdb_get_soft_state_byaddr(uintptr_t ssaddr, uint_t instance,
1135 1135 uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1136 1136 {
1137 1137 struct i_ddi_soft_state ss;
1138 1138 void *statep;
1139 1139
1140 1140
1141 1141 if (mdb_vread(&ss, sizeof (ss), ssaddr) == -1)
1142 1142 return (-1);
1143 1143
1144 1144 if (instance >= ss.n_items)
1145 1145 return (-1);
1146 1146
1147 1147 if (mdb_vread(&statep, sizeof (statep), (uintptr_t)ss.array +
1148 1148 (sizeof (statep) * instance)) == -1)
1149 1149 return (-1);
1150 1150
1151 1151 if (state_addr != NULL)
1152 1152 *state_addr = (uintptr_t)statep;
1153 1153
1154 1154 if (statep == NULL) {
1155 1155 errno = ENOENT;
1156 1156 return (-1);
1157 1157 }
1158 1158
1159 1159 if (state_buf_p != NULL) {
1160 1160
1161 1161 /* Read the state struct into the buffer in local space. */
1162 1162 if (mdb_vread(state_buf_p, sizeof_state,
1163 1163 (uintptr_t)statep) == -1)
1164 1164 return (-1);
1165 1165 }
1166 1166
1167 1167 return (0);
1168 1168 }
1169 1169
1170 1170
1171 1171 /*
1172 1172 * Returns a pointer to the top of the soft state struct for the instance
1173 1173 * specified (in state_addr), given the name of the global soft state pointer
1174 1174 * and size of the struct. Also fills in the buffer pointed to by
1175 1175 * state_buf_p (if non-NULL) with the contents of the state struct.
1176 1176 */
1177 1177 int
1178 1178 mdb_get_soft_state_byname(char *softstatep_name, uint_t instance,
1179 1179 uintptr_t *state_addr, void *state_buf_p, size_t sizeof_state)
1180 1180 {
1181 1181 uintptr_t ssaddr;
1182 1182
1183 1183 if (mdb_readvar((void *)&ssaddr, softstatep_name) == -1)
1184 1184 return (-1);
1185 1185
1186 1186 return (mdb_get_soft_state_byaddr(ssaddr, instance, state_addr,
1187 1187 state_buf_p, sizeof_state));
1188 1188 }
1189 1189
1190 1190 static const mdb_dcmd_t dcmds[] = {
1191 1191 { "dnlc", NULL, "print DNLC contents", dnlcdump },
1192 1192 { NULL }
1193 1193 };
1194 1194
1195 1195 static const mdb_modinfo_t modinfo = { MDB_API_VERSION, dcmds };
1196 1196
1197 1197 /*ARGSUSED*/
1198 1198 static void
1199 1199 update_vars(void *arg)
1200 1200 {
1201 1201 GElf_Sym sym;
1202 1202
1203 1203 if (mdb_lookup_by_name("auto_vnodeops", &sym) == 0)
1204 1204 autofs_vnops_ptr = (struct vnodeops *)(uintptr_t)sym.st_value;
1205 1205 else
1206 1206 autofs_vnops_ptr = NULL;
1207 1207
1208 1208 (void) mdb_readvar(&_mdb_ks_pagesize, "_pagesize");
1209 1209 (void) mdb_readvar(&_mdb_ks_pageshift, "_pageshift");
1210 1210 (void) mdb_readvar(&_mdb_ks_pageoffset, "_pageoffset");
1211 1211 (void) mdb_readvar(&_mdb_ks_pagemask, "_pagemask");
1212 1212 (void) mdb_readvar(&_mdb_ks_mmu_pagesize, "_mmu_pagesize");
1213 1213 (void) mdb_readvar(&_mdb_ks_mmu_pageshift, "_mmu_pageshift");
1214 1214 (void) mdb_readvar(&_mdb_ks_mmu_pageoffset, "_mmu_pageoffset");
1215 1215 (void) mdb_readvar(&_mdb_ks_mmu_pagemask, "_mmu_pagemask");
1216 1216 (void) mdb_readvar(&_mdb_ks_kernelbase, "_kernelbase");
1217 1217
1218 1218 (void) mdb_readvar(&_mdb_ks_userlimit, "_userlimit");
1219 1219 (void) mdb_readvar(&_mdb_ks_userlimit32, "_userlimit32");
1220 1220 (void) mdb_readvar(&_mdb_ks_argsbase, "_argsbase");
1221 1221 (void) mdb_readvar(&_mdb_ks_msg_bsize, "_msg_bsize");
1222 1222 (void) mdb_readvar(&_mdb_ks_defaultstksz, "_defaultstksz");
1223 1223 (void) mdb_readvar(&_mdb_ks_ncpu, "_ncpu");
1224 1224 (void) mdb_readvar(&_mdb_ks_ncpu_log2, "_ncpu_log2");
1225 1225 (void) mdb_readvar(&_mdb_ks_ncpu_p2, "_ncpu_p2");
1226 1226
1227 1227 page_hash_loaded = 0; /* invalidate cached page_hash state */
1228 1228 }
1229 1229
1230 1230 const mdb_modinfo_t *
1231 1231 _mdb_init(void)
1232 1232 {
1233 1233 /*
1234 1234 * When used with mdb, mdb_ks is a separate dmod. With kmdb, however,
1235 1235 * mdb_ks is compiled into the debugger module. kmdb cannot
1236 1236 * automatically modunload itself when it exits. If it restarts after
1237 1237 * debugger fault, static variables may not be initialized to zero.
1238 1238 * They must be manually reinitialized here.
1239 1239 */
1240 1240 dnlc_hash = NULL;
1241 1241 qi_head = NULL;
1242 1242
1243 1243 mdb_callback_add(MDB_CALLBACK_STCHG, update_vars, NULL);
1244 1244
1245 1245 update_vars(NULL);
1246 1246
1247 1247 return (&modinfo);
1248 1248 }
1249 1249
1250 1250 void
1251 1251 _mdb_fini(void)
1252 1252 {
1253 1253 dnlc_free();
1254 1254 while (qi_head != NULL) {
1255 1255 mdb_qinfo_t *qip = qi_head;
1256 1256 qi_head = qip->qi_next;
1257 1257 mdb_free(qip, sizeof (mdb_qinfo_t));
1258 1258 }
1259 1259 }
1260 1260
1261 1261 /*
1262 1262 * Interface between MDB kproc target and mdb_ks. The kproc target relies
1263 1263 * on looking up and invoking these functions in mdb_ks so that dependencies
1264 1264 * on the current kernel implementation are isolated in mdb_ks.
1265 1265 */
1266 1266
1267 1267 /*
1268 1268 * Given the address of a proc_t, return the p.p_as pointer; return NULL
1269 1269 * if we were unable to read a proc structure from the given address.
1270 1270 */
1271 1271 uintptr_t
1272 1272 mdb_kproc_as(uintptr_t proc_addr)
1273 1273 {
1274 1274 proc_t p;
1275 1275
1276 1276 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p))
1277 1277 return ((uintptr_t)p.p_as);
1278 1278
1279 1279 return (0);
1280 1280 }
1281 1281
1282 1282 /*
1283 1283 * Given the address of a proc_t, return the p.p_model value; return
1284 1284 * PR_MODEL_UNKNOWN if we were unable to read a proc structure or if
1285 1285 * the model value does not match one of the two known values.
1286 1286 */
1287 1287 uint_t
1288 1288 mdb_kproc_model(uintptr_t proc_addr)
1289 1289 {
1290 1290 proc_t p;
1291 1291
1292 1292 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p)) {
1293 1293 switch (p.p_model) {
1294 1294 case DATAMODEL_ILP32:
1295 1295 return (PR_MODEL_ILP32);
1296 1296 case DATAMODEL_LP64:
1297 1297 return (PR_MODEL_LP64);
1298 1298 }
1299 1299 }
1300 1300
1301 1301 return (PR_MODEL_UNKNOWN);
1302 1302 }
1303 1303
1304 1304 /*
1305 1305 * Callback function for walking process's segment list. For each segment,
1306 1306 * we fill in an mdb_map_t describing its properties, and then invoke
1307 1307 * the callback function provided by the kproc target.
1308 1308 */
1309 1309 static int
1310 1310 asmap_step(uintptr_t addr, const struct seg *seg, asmap_arg_t *asmp)
1311 1311 {
1312 1312 struct segvn_data svd;
1313 1313 mdb_map_t map;
1314 1314
1315 1315 if (seg->s_ops == asmp->asm_segvn_ops && mdb_vread(&svd,
1316 1316 sizeof (svd), (uintptr_t)seg->s_data) == sizeof (svd)) {
1317 1317
1318 1318 if (svd.vp != NULL) {
1319 1319 if (mdb_vnode2path((uintptr_t)svd.vp, map.map_name,
1320 1320 MDB_TGT_MAPSZ) != 0) {
1321 1321 (void) mdb_snprintf(map.map_name,
1322 1322 MDB_TGT_MAPSZ, "[ vnode %p ]", svd.vp);
1323 1323 }
1324 1324 } else
1325 1325 (void) strcpy(map.map_name, "[ anon ]");
1326 1326
1327 1327 } else {
1328 1328 (void) mdb_snprintf(map.map_name, MDB_TGT_MAPSZ,
1329 1329 "[ seg %p ]", addr);
1330 1330 }
1331 1331
1332 1332 map.map_base = (uintptr_t)seg->s_base;
1333 1333 map.map_size = seg->s_size;
1334 1334 map.map_flags = 0;
1335 1335
1336 1336 asmp->asm_callback((const struct mdb_map *)&map, asmp->asm_cbdata);
1337 1337 return (WALK_NEXT);
1338 1338 }
1339 1339
1340 1340 /*
1341 1341 * Given a process address space, walk its segment list using the seg walker,
1342 1342 * convert the segment data to an mdb_map_t, and pass this information
1343 1343 * back to the kproc target via the given callback function.
1344 1344 */
1345 1345 int
1346 1346 mdb_kproc_asiter(uintptr_t as,
1347 1347 void (*func)(const struct mdb_map *, void *), void *p)
1348 1348 {
1349 1349 asmap_arg_t arg;
1350 1350 GElf_Sym sym;
1351 1351
1352 1352 arg.asm_segvn_ops = NULL;
1353 1353 arg.asm_callback = func;
1354 1354 arg.asm_cbdata = p;
1355 1355
1356 1356 if (mdb_lookup_by_name("segvn_ops", &sym) == 0)
1357 1357 arg.asm_segvn_ops = (struct seg_ops *)(uintptr_t)sym.st_value;
1358 1358
1359 1359 return (mdb_pwalk("seg", (mdb_walk_cb_t)asmap_step, &arg, as));
1360 1360 }
1361 1361
1362 1362 /*
1363 1363 * Copy the auxv array from the given process's u-area into the provided
1364 1364 * buffer. If the buffer is NULL, only return the size of the auxv array
1365 1365 * so the caller knows how much space will be required.
1366 1366 */
1367 1367 int
1368 1368 mdb_kproc_auxv(uintptr_t proc, auxv_t *auxv)
1369 1369 {
1370 1370 if (auxv != NULL) {
1371 1371 proc_t p;
1372 1372
1373 1373 if (mdb_vread(&p, sizeof (p), proc) != sizeof (p))
1374 1374 return (-1);
1375 1375
1376 1376 bcopy(p.p_user.u_auxv, auxv,
1377 1377 sizeof (auxv_t) * __KERN_NAUXV_IMPL);
1378 1378 }
1379 1379
1380 1380 return (__KERN_NAUXV_IMPL);
1381 1381 }
1382 1382
1383 1383 /*
1384 1384 * Given a process address, return the PID.
1385 1385 */
1386 1386 pid_t
1387 1387 mdb_kproc_pid(uintptr_t proc_addr)
1388 1388 {
1389 1389 struct pid pid;
1390 1390 proc_t p;
1391 1391
1392 1392 if (mdb_vread(&p, sizeof (p), proc_addr) == sizeof (p) &&
1393 1393 mdb_vread(&pid, sizeof (pid), (uintptr_t)p.p_pidp) == sizeof (pid))
1394 1394 return (pid.pid_id);
1395 1395
1396 1396 return (-1);
1397 1397 }
1398 1398
1399 1399 /*
1400 1400 * Interface between the MDB kvm target and mdb_ks. The kvm target relies
1401 1401 * on looking up and invoking these functions in mdb_ks so that dependencies
1402 1402 * on the current kernel implementation are isolated in mdb_ks.
1403 1403 */
1404 1404
1405 1405 /*
1406 1406 * Determine whether or not the thread that panicked the given kernel was a
1407 1407 * kernel thread (panic_thread->t_procp == &p0).
1408 1408 */
1409 1409 void
1410 1410 mdb_dump_print_content(dumphdr_t *dh, pid_t content)
1411 1411 {
1412 1412 GElf_Sym sym;
1413 1413 uintptr_t pt;
1414 1414 uintptr_t procp;
1415 1415 int expcont = 0;
1416 1416 int actcont;
1417 1417
1418 1418 (void) mdb_readvar(&expcont, "dump_conflags");
1419 1419 actcont = dh->dump_flags & DF_CONTENT;
1420 1420
1421 1421 if (actcont == DF_ALL) {
1422 1422 mdb_printf("dump content: all kernel and user pages\n");
1423 1423 return;
1424 1424 } else if (actcont == DF_CURPROC) {
1425 1425 mdb_printf("dump content: kernel pages and pages from "
1426 1426 "PID %d", content);
1427 1427 return;
1428 1428 }
1429 1429
1430 1430 mdb_printf("dump content: kernel pages only\n");
1431 1431 if (!(expcont & DF_CURPROC))
1432 1432 return;
1433 1433
1434 1434 if (mdb_readvar(&pt, "panic_thread") != sizeof (pt) || pt == 0)
1435 1435 goto kthreadpanic_err;
1436 1436
1437 1437 if (mdb_vread(&procp, sizeof (procp), pt + OFFSETOF(kthread_t,
1438 1438 t_procp)) == -1 || procp == 0)
1439 1439 goto kthreadpanic_err;
1440 1440
1441 1441 if (mdb_lookup_by_name("p0", &sym) != 0)
1442 1442 goto kthreadpanic_err;
1443 1443
1444 1444 if (procp == (uintptr_t)sym.st_value) {
1445 1445 mdb_printf(" (curproc requested, but a kernel thread "
1446 1446 "panicked)\n");
1447 1447 } else {
1448 1448 mdb_printf(" (curproc requested, but the process that "
1449 1449 "panicked could not be dumped)\n");
1450 1450 }
1451 1451
1452 1452 return;
1453 1453
1454 1454 kthreadpanic_err:
1455 1455 mdb_printf(" (curproc requested, but the process that panicked could "
1456 1456 "not be found)\n");
1457 1457 }
1458 1458
1459 1459 /*
1460 1460 * Determine the process that was saved in a `curproc' dump. This process will
1461 1461 * be recorded as the first element in dump_pids[].
1462 1462 */
1463 1463 int
1464 1464 mdb_dump_find_curproc(void)
1465 1465 {
1466 1466 uintptr_t pidp;
1467 1467 pid_t pid = -1;
1468 1468
1469 1469 if (mdb_readvar(&pidp, "dump_pids") == sizeof (pidp) &&
1470 1470 mdb_vread(&pid, sizeof (pid), pidp) == sizeof (pid) &&
1471 1471 pid > 0)
1472 1472 return (pid);
1473 1473 else
1474 1474 return (-1);
1475 1475 }
1476 1476
1477 1477
1478 1478 /*
1479 1479 * Following three funcs extracted from sunddi.c
1480 1480 */
1481 1481
1482 1482 /*
1483 1483 * Return core address of root node of devinfo tree
1484 1484 */
1485 1485 static uintptr_t
1486 1486 mdb_ddi_root_node(void)
1487 1487 {
1488 1488 uintptr_t top_devinfo_addr;
1489 1489
1490 1490 /* return (top_devinfo); */
1491 1491 if (mdb_readvar(&top_devinfo_addr, "top_devinfo") == -1) {
1492 1492 mdb_warn("failed to read top_devinfo");
1493 1493 return (0);
1494 1494 }
1495 1495 return (top_devinfo_addr);
1496 1496 }
1497 1497
1498 1498 /*
1499 1499 * Return the name of the devinfo node pointed at by 'dip_addr' in the buffer
1500 1500 * pointed at by 'name.'
1501 1501 *
1502 1502 * - dip_addr is a pointer to a dev_info struct in core.
1503 1503 */
1504 1504 static char *
1505 1505 mdb_ddi_deviname(uintptr_t dip_addr, char *name, size_t name_size)
1506 1506 {
1507 1507 uintptr_t addrname;
1508 1508 ssize_t length;
1509 1509 char *local_namep = name;
1510 1510 size_t local_name_size = name_size;
1511 1511 struct dev_info local_dip;
1512 1512
1513 1513
1514 1514 if (dip_addr == mdb_ddi_root_node()) {
1515 1515 if (name_size < 1) {
1516 1516 mdb_warn("failed to get node name: buf too small\n");
1517 1517 return (NULL);
1518 1518 }
1519 1519
1520 1520 *name = '\0';
1521 1521 return (name);
1522 1522 }
1523 1523
1524 1524 if (name_size < 2) {
1525 1525 mdb_warn("failed to get node name: buf too small\n");
1526 1526 return (NULL);
1527 1527 }
1528 1528
1529 1529 local_namep = name;
1530 1530 *local_namep++ = '/';
1531 1531 *local_namep = '\0';
1532 1532 local_name_size--;
1533 1533
1534 1534 if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1535 1535 mdb_warn("failed to read devinfo struct");
1536 1536 }
1537 1537
1538 1538 length = mdb_readstr(local_namep, local_name_size,
1539 1539 (uintptr_t)local_dip.devi_node_name);
1540 1540 if (length == -1) {
1541 1541 mdb_warn("failed to read node name");
1542 1542 return (NULL);
1543 1543 }
1544 1544 local_namep += length;
1545 1545 local_name_size -= length;
1546 1546 addrname = (uintptr_t)local_dip.devi_addr;
1547 1547
1548 1548 if (addrname != 0) {
1549 1549
1550 1550 if (local_name_size < 2) {
1551 1551 mdb_warn("not enough room for node address string");
1552 1552 return (name);
1553 1553 }
1554 1554 *local_namep++ = '@';
1555 1555 *local_namep = '\0';
1556 1556 local_name_size--;
1557 1557
1558 1558 length = mdb_readstr(local_namep, local_name_size, addrname);
1559 1559 if (length == -1) {
1560 1560 mdb_warn("failed to read name");
1561 1561 return (NULL);
1562 1562 }
1563 1563 }
1564 1564
1565 1565 return (name);
1566 1566 }
1567 1567
1568 1568 /*
1569 1569 * Generate the full path under the /devices dir to the device entry.
1570 1570 *
1571 1571 * dip is a pointer to a devinfo struct in core (not in local memory).
1572 1572 */
1573 1573 char *
1574 1574 mdb_ddi_pathname(uintptr_t dip_addr, char *path, size_t pathlen)
1575 1575 {
1576 1576 struct dev_info local_dip;
1577 1577 uintptr_t parent_dip;
1578 1578 char *bp;
1579 1579 size_t buf_left;
1580 1580
1581 1581
1582 1582 if (dip_addr == mdb_ddi_root_node()) {
1583 1583 *path = '\0';
1584 1584 return (path);
1585 1585 }
1586 1586
1587 1587
1588 1588 if (mdb_vread(&local_dip, sizeof (struct dev_info), dip_addr) == -1) {
1589 1589 mdb_warn("failed to read devinfo struct");
1590 1590 }
1591 1591
1592 1592 parent_dip = (uintptr_t)local_dip.devi_parent;
1593 1593 (void) mdb_ddi_pathname(parent_dip, path, pathlen);
1594 1594
1595 1595 bp = path + strlen(path);
1596 1596 buf_left = pathlen - strlen(path);
1597 1597 (void) mdb_ddi_deviname(dip_addr, bp, buf_left);
1598 1598 return (path);
1599 1599 }
1600 1600
1601 1601
1602 1602 /*
1603 1603 * Read in the string value of a refstr, which is appended to the end of
1604 1604 * the structure.
1605 1605 */
1606 1606 ssize_t
1607 1607 mdb_read_refstr(uintptr_t refstr_addr, char *str, size_t nbytes)
1608 1608 {
1609 1609 struct refstr *r = (struct refstr *)refstr_addr;
1610 1610
1611 1611 return (mdb_readstr(str, nbytes, (uintptr_t)r->rs_string));
1612 1612 }
1613 1613
1614 1614 /*
1615 1615 * Chase an mblk list by b_next and return the length.
1616 1616 */
1617 1617 int
1618 1618 mdb_mblk_count(const mblk_t *mb)
1619 1619 {
1620 1620 int count;
1621 1621 mblk_t mblk;
1622 1622
1623 1623 if (mb == NULL)
1624 1624 return (0);
1625 1625
1626 1626 count = 1;
1627 1627 while (mb->b_next != NULL) {
1628 1628 count++;
1629 1629 if (mdb_vread(&mblk, sizeof (mblk), (uintptr_t)mb->b_next) ==
1630 1630 -1)
1631 1631 break;
1632 1632 mb = &mblk;
1633 1633 }
1634 1634 return (count);
1635 1635 }
1636 1636
1637 1637 /*
1638 1638 * Write the given MAC address as a printable string in the usual colon-
1639 1639 * separated format. Assumes that buflen is at least 2.
1640 1640 */
1641 1641 void
1642 1642 mdb_mac_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen)
1643 1643 {
1644 1644 int slen;
1645 1645
1646 1646 if (alen == 0 || buflen < 4) {
1647 1647 (void) strcpy(buf, "?");
1648 1648 return;
1649 1649 }
1650 1650 for (;;) {
1651 1651 /*
1652 1652 * If there are more MAC address bytes available, but we won't
1653 1653 * have any room to print them, then add "..." to the string
1654 1654 * instead. See below for the 'magic number' explanation.
1655 1655 */
1656 1656 if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) {
1657 1657 (void) strcpy(buf, "...");
1658 1658 break;
1659 1659 }
1660 1660 slen = mdb_snprintf(buf, buflen, "%02x", *addr++);
1661 1661 buf += slen;
1662 1662 if (--alen == 0)
1663 1663 break;
1664 1664 *buf++ = ':';
1665 1665 buflen -= slen + 1;
1666 1666 /*
1667 1667 * At this point, based on the first 'if' statement above,
1668 1668 * either alen == 1 and buflen >= 3, or alen > 1 and
1669 1669 * buflen >= 4. The first case leaves room for the final "xx"
1670 1670 * number and trailing NUL byte. The second leaves room for at
1671 1671 * least "...". Thus the apparently 'magic' numbers chosen for
1672 1672 * that statement.
1673 1673 */
1674 1674 }
1675 1675 }
1676 1676
1677 1677 /*
1678 1678 * Produce a string that represents a DLPI primitive, or NULL if no such string
1679 1679 * is possible.
1680 1680 */
1681 1681 const char *
1682 1682 mdb_dlpi_prim(int prim)
1683 1683 {
1684 1684 switch (prim) {
1685 1685 case DL_INFO_REQ: return ("DL_INFO_REQ");
1686 1686 case DL_INFO_ACK: return ("DL_INFO_ACK");
1687 1687 case DL_ATTACH_REQ: return ("DL_ATTACH_REQ");
1688 1688 case DL_DETACH_REQ: return ("DL_DETACH_REQ");
1689 1689 case DL_BIND_REQ: return ("DL_BIND_REQ");
1690 1690 case DL_BIND_ACK: return ("DL_BIND_ACK");
1691 1691 case DL_UNBIND_REQ: return ("DL_UNBIND_REQ");
1692 1692 case DL_OK_ACK: return ("DL_OK_ACK");
1693 1693 case DL_ERROR_ACK: return ("DL_ERROR_ACK");
1694 1694 case DL_ENABMULTI_REQ: return ("DL_ENABMULTI_REQ");
1695 1695 case DL_DISABMULTI_REQ: return ("DL_DISABMULTI_REQ");
1696 1696 case DL_PROMISCON_REQ: return ("DL_PROMISCON_REQ");
1697 1697 case DL_PROMISCOFF_REQ: return ("DL_PROMISCOFF_REQ");
1698 1698 case DL_UNITDATA_REQ: return ("DL_UNITDATA_REQ");
1699 1699 case DL_UNITDATA_IND: return ("DL_UNITDATA_IND");
1700 1700 case DL_UDERROR_IND: return ("DL_UDERROR_IND");
1701 1701 case DL_PHYS_ADDR_REQ: return ("DL_PHYS_ADDR_REQ");
1702 1702 case DL_PHYS_ADDR_ACK: return ("DL_PHYS_ADDR_ACK");
1703 1703 case DL_SET_PHYS_ADDR_REQ: return ("DL_SET_PHYS_ADDR_REQ");
1704 1704 case DL_NOTIFY_REQ: return ("DL_NOTIFY_REQ");
1705 1705 case DL_NOTIFY_ACK: return ("DL_NOTIFY_ACK");
1706 1706 case DL_NOTIFY_IND: return ("DL_NOTIFY_IND");
1707 1707 case DL_NOTIFY_CONF: return ("DL_NOTIFY_CONF");
1708 1708 case DL_CAPABILITY_REQ: return ("DL_CAPABILITY_REQ");
1709 1709 case DL_CAPABILITY_ACK: return ("DL_CAPABILITY_ACK");
1710 1710 case DL_CONTROL_REQ: return ("DL_CONTROL_REQ");
1711 1711 case DL_CONTROL_ACK: return ("DL_CONTROL_ACK");
1712 1712 case DL_PASSIVE_REQ: return ("DL_PASSIVE_REQ");
1713 1713 default: return (NULL);
1714 1714 }
1715 1715 }
1716 1716
1717 1717 /*
1718 1718 * mdb_gethrtime() returns the hires system time. This will be the timestamp at
1719 1719 * which we dropped into, if called from, kmdb(1); the core dump's hires time
1720 1720 * if inspecting one; or the running system's hires time if we're inspecting
1721 1721 * a live kernel.
1722 1722 */
1723 1723 hrtime_t
1724 1724 mdb_gethrtime(void)
1725 1725 {
1726 1726 uintptr_t ptr;
1727 1727 GElf_Sym sym;
1728 1728 lbolt_info_t lbi;
1729 1729 hrtime_t ts;
1730 1730
1731 1731 /*
1732 1732 * We first check whether the lbolt info structure has been allocated
1733 1733 * and initialized. If not, lbolt_hybrid will be pointing at
1734 1734 * lbolt_bootstrap.
1735 1735 */
1736 1736 if (mdb_lookup_by_name("lbolt_bootstrap", &sym) == -1)
1737 1737 return (0);
1738 1738
1739 1739 if (mdb_readvar(&ptr, "lbolt_hybrid") == -1)
1740 1740 return (0);
1741 1741
1742 1742 if (ptr == (uintptr_t)sym.st_value)
1743 1743 return (0);
1744 1744
1745 1745 #ifdef _KMDB
1746 1746 if (mdb_readvar(&ptr, "lb_info") == -1)
1747 1747 return (0);
1748 1748
1749 1749 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1750 1750 sizeof (lbolt_info_t))
1751 1751 return (0);
1752 1752
1753 1753 ts = lbi.lbi_debug_ts;
1754 1754 #else
1755 1755 if (mdb_prop_postmortem) {
1756 1756 if (mdb_readvar(&ptr, "lb_info") == -1)
1757 1757 return (0);
1758 1758
1759 1759 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1760 1760 sizeof (lbolt_info_t))
1761 1761 return (0);
1762 1762
1763 1763 ts = lbi.lbi_debug_ts;
1764 1764 } else {
1765 1765 ts = gethrtime();
1766 1766 }
1767 1767 #endif
1768 1768 return (ts);
1769 1769 }
1770 1770
1771 1771 /*
1772 1772 * mdb_get_lbolt() returns the number of clock ticks since system boot.
1773 1773 * Depending on the context in which it's called, the value will be derived
1774 1774 * from different sources per mdb_gethrtime(). If inspecting a panicked
1775 1775 * system, the routine returns the 'panic_lbolt64' variable from the core file.
1776 1776 */
1777 1777 int64_t
1778 1778 mdb_get_lbolt(void)
1779 1779 {
1780 1780 lbolt_info_t lbi;
1781 1781 uintptr_t ptr;
1782 1782 int64_t pl;
1783 1783 hrtime_t ts;
1784 1784 int nsec;
1785 1785
1786 1786 if (mdb_readvar(&pl, "panic_lbolt64") != -1 && pl > 0)
1787 1787 return (pl);
1788 1788
1789 1789 /*
1790 1790 * mdb_gethrtime() will return zero if the lbolt info structure hasn't
1791 1791 * been allocated and initialized yet, or if it fails to read it.
1792 1792 */
1793 1793 if ((ts = mdb_gethrtime()) <= 0)
1794 1794 return (0);
1795 1795
1796 1796 /*
1797 1797 * Load the time spent in kmdb, if any.
1798 1798 */
1799 1799 if (mdb_readvar(&ptr, "lb_info") == -1)
1800 1800 return (0);
1801 1801
↓ open down ↓ |
774 lines elided |
↑ open up ↑ |
1802 1802 if (mdb_vread(&lbi, sizeof (lbolt_info_t), ptr) !=
1803 1803 sizeof (lbolt_info_t))
1804 1804 return (0);
1805 1805
1806 1806 if (mdb_readvar(&nsec, "nsec_per_tick") == -1 || nsec == 0) {
1807 1807 mdb_warn("failed to read 'nsec_per_tick'");
1808 1808 return (-1);
1809 1809 }
1810 1810
1811 1811 return ((ts/nsec) - lbi.lbi_debug_time);
1812 +}
1813 +
1814 +void
1815 +mdb_print_buildversion(void)
1816 +{
1817 + GElf_Sym sym;
1818 +
1819 + if (mdb_lookup_by_name("buildversion", &sym) != 0)
1820 + return;
1821 +
1822 + char *str = mdb_zalloc(4096, UM_SLEEP | UM_GC);
1823 +
1824 + if (mdb_readstr(str, 4096, sym.st_value) < 1)
1825 + return;
1826 +
1827 + mdb_printf("build version: %s\n", str);
1812 1828 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX