Print this page
XXXX remove i86xpv platform code
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/mdb/common/modules/genunix/memory.c
+++ new/usr/src/cmd/mdb/common/modules/genunix/memory.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2017 Joyent, Inc.
24 24 */
25 25
26 26 #include <mdb/mdb_param.h>
27 27 #include <mdb/mdb_modapi.h>
28 28 #include <mdb/mdb_ks.h>
29 29 #include <sys/types.h>
30 30 #include <sys/memlist.h>
31 31 #include <sys/swap.h>
32 32 #include <sys/systm.h>
33 33 #include <sys/thread.h>
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
34 34 #include <vm/anon.h>
35 35 #include <vm/as.h>
36 36 #include <vm/page.h>
37 37 #include <sys/thread.h>
38 38 #include <sys/swap.h>
39 39 #include <sys/memlist.h>
40 40 #include <sys/vnode.h>
41 41 #include <vm/seg_map.h>
42 42 #include <vm/seg_vn.h>
43 43 #include <vm/seg_hole.h>
44 -#if defined(__i386) || defined(__amd64)
45 -#include <sys/balloon_impl.h>
46 -#endif
47 44
48 45 #include "avl.h"
49 46 #include "memory.h"
50 47
51 48 /*
52 49 * Page walker.
53 50 * By default, this will walk all pages in the system. If given an
54 51 * address, it will walk all pages belonging to the vnode at that
55 52 * address.
56 53 */
57 54
58 55 /*
59 56 * page_walk_data
60 57 *
61 58 * pw_hashleft is set to -1 when walking a vnode's pages, and holds the
62 59 * number of hash locations remaining in the page hash table when
63 60 * walking all pages.
64 61 *
65 62 * The astute reader will notice that pw_hashloc is only used when
66 63 * reading all pages (to hold a pointer to our location in the page
67 64 * hash table), and that pw_first is only used when reading the pages
68 65 * belonging to a particular vnode (to hold a pointer to the first
69 66 * page). While these could be combined to be a single pointer, they
70 67 * are left separate for clarity.
71 68 */
72 69 typedef struct page_walk_data {
73 70 long pw_hashleft;
74 71 void **pw_hashloc;
75 72 uintptr_t pw_first;
76 73 } page_walk_data_t;
77 74
78 75 int
79 76 page_walk_init(mdb_walk_state_t *wsp)
80 77 {
81 78 page_walk_data_t *pwd;
82 79 void **ptr;
83 80 size_t hashsz;
84 81 vnode_t vn;
85 82
86 83 if (wsp->walk_addr == NULL) {
87 84
88 85 /*
89 86 * Walk all pages
90 87 */
91 88
92 89 if ((mdb_readvar(&ptr, "page_hash") == -1) ||
93 90 (mdb_readvar(&hashsz, "page_hashsz") == -1) ||
94 91 (ptr == NULL) || (hashsz == 0)) {
95 92 mdb_warn("page_hash, page_hashsz not found or invalid");
96 93 return (WALK_ERR);
97 94 }
98 95
99 96 /*
100 97 * Since we are walking all pages, initialize hashleft
101 98 * to be the remaining number of entries in the page
102 99 * hash. hashloc is set the start of the page hash
103 100 * table. Setting the walk address to 0 indicates that
104 101 * we aren't currently following a hash chain, and that
105 102 * we need to scan the page hash table for a page.
106 103 */
107 104 pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP);
108 105 pwd->pw_hashleft = hashsz;
109 106 pwd->pw_hashloc = ptr;
110 107 wsp->walk_addr = 0;
111 108 } else {
112 109
113 110 /*
114 111 * Walk just this vnode
115 112 */
116 113
117 114 if (mdb_vread(&vn, sizeof (vnode_t), wsp->walk_addr) == -1) {
118 115 mdb_warn("unable to read vnode_t at %#lx",
119 116 wsp->walk_addr);
120 117 return (WALK_ERR);
121 118 }
122 119
123 120 /*
124 121 * We set hashleft to -1 to indicate that we are
125 122 * walking a vnode, and initialize first to 0 (it is
126 123 * used to terminate the walk, so it must not be set
127 124 * until after we have walked the first page). The
128 125 * walk address is set to the first page.
129 126 */
130 127 pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP);
131 128 pwd->pw_hashleft = -1;
132 129 pwd->pw_first = 0;
133 130
134 131 wsp->walk_addr = (uintptr_t)vn.v_pages;
135 132 }
136 133
137 134 wsp->walk_data = pwd;
138 135
139 136 return (WALK_NEXT);
140 137 }
141 138
142 139 int
143 140 page_walk_step(mdb_walk_state_t *wsp)
144 141 {
145 142 page_walk_data_t *pwd = wsp->walk_data;
146 143 page_t page;
147 144 uintptr_t pp;
148 145
149 146 pp = wsp->walk_addr;
150 147
151 148 if (pwd->pw_hashleft < 0) {
152 149
153 150 /* We're walking a vnode's pages */
154 151
155 152 /*
156 153 * If we don't have any pages to walk, we have come
157 154 * back around to the first one (we finished), or we
158 155 * can't read the page we're looking at, we are done.
159 156 */
160 157 if (pp == NULL || pp == pwd->pw_first)
161 158 return (WALK_DONE);
162 159 if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
163 160 mdb_warn("unable to read page_t at %#lx", pp);
164 161 return (WALK_ERR);
165 162 }
166 163
167 164 /*
168 165 * Set the walk address to the next page, and if the
169 166 * first page hasn't been set yet (i.e. we are on the
170 167 * first page), set it.
171 168 */
172 169 wsp->walk_addr = (uintptr_t)page.p_vpnext;
173 170 if (pwd->pw_first == NULL)
174 171 pwd->pw_first = pp;
175 172
176 173 } else if (pwd->pw_hashleft > 0) {
177 174
178 175 /* We're walking all pages */
179 176
180 177 /*
181 178 * If pp (the walk address) is NULL, we scan through
182 179 * the page hash table until we find a page.
183 180 */
184 181 if (pp == NULL) {
185 182
186 183 /*
187 184 * Iterate through the page hash table until we
188 185 * find a page or reach the end.
189 186 */
190 187 do {
191 188 if (mdb_vread(&pp, sizeof (uintptr_t),
192 189 (uintptr_t)pwd->pw_hashloc) == -1) {
193 190 mdb_warn("unable to read from %#p",
194 191 pwd->pw_hashloc);
195 192 return (WALK_ERR);
196 193 }
197 194 pwd->pw_hashleft--;
198 195 pwd->pw_hashloc++;
199 196 } while (pwd->pw_hashleft && (pp == NULL));
200 197
201 198 /*
202 199 * We've reached the end; exit.
203 200 */
204 201 if (pp == NULL)
205 202 return (WALK_DONE);
206 203 }
207 204
208 205 if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
209 206 mdb_warn("unable to read page_t at %#lx", pp);
210 207 return (WALK_ERR);
211 208 }
212 209
213 210 /*
214 211 * Set the walk address to the next page.
215 212 */
216 213 wsp->walk_addr = (uintptr_t)page.p_hash;
217 214
218 215 } else {
219 216 /* We've finished walking all pages. */
220 217 return (WALK_DONE);
221 218 }
222 219
223 220 return (wsp->walk_callback(pp, &page, wsp->walk_cbdata));
224 221 }
225 222
226 223 void
227 224 page_walk_fini(mdb_walk_state_t *wsp)
228 225 {
229 226 mdb_free(wsp->walk_data, sizeof (page_walk_data_t));
230 227 }
231 228
232 229 /*
233 230 * allpages walks all pages in the system in order they appear in
234 231 * the memseg structure
235 232 */
236 233
237 234 #define PAGE_BUFFER 128
238 235
239 236 int
240 237 allpages_walk_init(mdb_walk_state_t *wsp)
241 238 {
242 239 if (wsp->walk_addr != 0) {
243 240 mdb_warn("allpages only supports global walks.\n");
244 241 return (WALK_ERR);
245 242 }
246 243
247 244 if (mdb_layered_walk("memseg", wsp) == -1) {
248 245 mdb_warn("couldn't walk 'memseg'");
249 246 return (WALK_ERR);
250 247 }
251 248
252 249 wsp->walk_data = mdb_alloc(sizeof (page_t) * PAGE_BUFFER, UM_SLEEP);
253 250 return (WALK_NEXT);
254 251 }
255 252
256 253 int
257 254 allpages_walk_step(mdb_walk_state_t *wsp)
258 255 {
259 256 const struct memseg *msp = wsp->walk_layer;
260 257 page_t *buf = wsp->walk_data;
261 258 size_t pg_read, i;
262 259 size_t pg_num = msp->pages_end - msp->pages_base;
263 260 const page_t *pg_addr = msp->pages;
264 261
265 262 while (pg_num > 0) {
266 263 pg_read = MIN(pg_num, PAGE_BUFFER);
267 264
268 265 if (mdb_vread(buf, pg_read * sizeof (page_t),
269 266 (uintptr_t)pg_addr) == -1) {
270 267 mdb_warn("can't read page_t's at %#lx", pg_addr);
271 268 return (WALK_ERR);
272 269 }
273 270 for (i = 0; i < pg_read; i++) {
274 271 int ret = wsp->walk_callback((uintptr_t)&pg_addr[i],
275 272 &buf[i], wsp->walk_cbdata);
276 273
277 274 if (ret != WALK_NEXT)
278 275 return (ret);
279 276 }
280 277 pg_num -= pg_read;
281 278 pg_addr += pg_read;
282 279 }
283 280
284 281 return (WALK_NEXT);
285 282 }
286 283
287 284 void
288 285 allpages_walk_fini(mdb_walk_state_t *wsp)
289 286 {
290 287 mdb_free(wsp->walk_data, sizeof (page_t) * PAGE_BUFFER);
291 288 }
292 289
293 290 /*
294 291 * Hash table + LRU queue.
295 292 * This table is used to cache recently read vnodes for the memstat
296 293 * command, to reduce the number of mdb_vread calls. This greatly
297 294 * speeds the memstat command on on live, large CPU count systems.
298 295 */
299 296
300 297 #define VN_SMALL 401
301 298 #define VN_LARGE 10007
302 299 #define VN_HTABLE_KEY(p, hp) ((p) % ((hp)->vn_htable_buckets))
303 300
304 301 struct vn_htable_list {
305 302 uint_t vn_flag; /* v_flag from vnode */
306 303 uintptr_t vn_ptr; /* pointer to vnode */
307 304 struct vn_htable_list *vn_q_next; /* queue next pointer */
308 305 struct vn_htable_list *vn_q_prev; /* queue prev pointer */
309 306 struct vn_htable_list *vn_h_next; /* hash table pointer */
310 307 };
311 308
312 309 /*
313 310 * vn_q_first -> points to to head of queue: the vnode that was most
314 311 * recently used
315 312 * vn_q_last -> points to the oldest used vnode, and is freed once a new
316 313 * vnode is read.
317 314 * vn_htable -> hash table
318 315 * vn_htable_buf -> contains htable objects
319 316 * vn_htable_size -> total number of items in the hash table
320 317 * vn_htable_buckets -> number of buckets in the hash table
321 318 */
322 319 typedef struct vn_htable {
323 320 struct vn_htable_list *vn_q_first;
324 321 struct vn_htable_list *vn_q_last;
325 322 struct vn_htable_list **vn_htable;
326 323 struct vn_htable_list *vn_htable_buf;
327 324 int vn_htable_size;
328 325 int vn_htable_buckets;
329 326 } vn_htable_t;
330 327
331 328
332 329 /* allocate memory, initilize hash table and LRU queue */
333 330 static void
334 331 vn_htable_init(vn_htable_t *hp, size_t vn_size)
335 332 {
336 333 int i;
337 334 int htable_size = MAX(vn_size, VN_LARGE);
338 335
339 336 if ((hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
340 337 * htable_size, UM_NOSLEEP|UM_GC)) == NULL) {
341 338 htable_size = VN_SMALL;
342 339 hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list)
343 340 * htable_size, UM_SLEEP|UM_GC);
344 341 }
345 342
346 343 hp->vn_htable = mdb_zalloc(sizeof (struct vn_htable_list *)
347 344 * htable_size, UM_SLEEP|UM_GC);
348 345
349 346 hp->vn_q_first = &hp->vn_htable_buf[0];
350 347 hp->vn_q_last = &hp->vn_htable_buf[htable_size - 1];
351 348 hp->vn_q_first->vn_q_next = &hp->vn_htable_buf[1];
352 349 hp->vn_q_last->vn_q_prev = &hp->vn_htable_buf[htable_size - 2];
353 350
354 351 for (i = 1; i < (htable_size-1); i++) {
355 352 hp->vn_htable_buf[i].vn_q_next = &hp->vn_htable_buf[i + 1];
356 353 hp->vn_htable_buf[i].vn_q_prev = &hp->vn_htable_buf[i - 1];
357 354 }
358 355
359 356 hp->vn_htable_size = htable_size;
360 357 hp->vn_htable_buckets = htable_size;
361 358 }
362 359
363 360
364 361 /*
365 362 * Find the vnode whose address is ptr, and return its v_flag in vp->v_flag.
366 363 * The function tries to find needed information in the following order:
367 364 *
368 365 * 1. check if ptr is the first in queue
369 366 * 2. check if ptr is in hash table (if so move it to the top of queue)
370 367 * 3. do mdb_vread, remove last queue item from queue and hash table.
371 368 * Insert new information to freed object, and put this object in to the
372 369 * top of the queue.
373 370 */
374 371 static int
375 372 vn_get(vn_htable_t *hp, struct vnode *vp, uintptr_t ptr)
376 373 {
377 374 int hkey;
378 375 struct vn_htable_list *hent, **htmp, *q_next, *q_prev;
379 376 struct vn_htable_list *q_first = hp->vn_q_first;
380 377
381 378 /* 1. vnode ptr is the first in queue, just get v_flag and return */
382 379 if (q_first->vn_ptr == ptr) {
383 380 vp->v_flag = q_first->vn_flag;
384 381
385 382 return (0);
386 383 }
387 384
388 385 /* 2. search the hash table for this ptr */
389 386 hkey = VN_HTABLE_KEY(ptr, hp);
390 387 hent = hp->vn_htable[hkey];
391 388 while (hent && (hent->vn_ptr != ptr))
392 389 hent = hent->vn_h_next;
393 390
394 391 /* 3. if hent is NULL, we did not find in hash table, do mdb_vread */
395 392 if (hent == NULL) {
396 393 struct vnode vn;
397 394
398 395 if (mdb_vread(&vn, sizeof (vnode_t), ptr) == -1) {
399 396 mdb_warn("unable to read vnode_t at %#lx", ptr);
400 397 return (-1);
401 398 }
402 399
403 400 /* we will insert read data into the last element in queue */
404 401 hent = hp->vn_q_last;
405 402
406 403 /* remove last hp->vn_q_last object from hash table */
407 404 if (hent->vn_ptr) {
408 405 htmp = &hp->vn_htable[VN_HTABLE_KEY(hent->vn_ptr, hp)];
409 406 while (*htmp != hent)
410 407 htmp = &(*htmp)->vn_h_next;
411 408 *htmp = hent->vn_h_next;
412 409 }
413 410
414 411 /* insert data into new free object */
415 412 hent->vn_ptr = ptr;
416 413 hent->vn_flag = vn.v_flag;
417 414
418 415 /* insert new object into hash table */
419 416 hent->vn_h_next = hp->vn_htable[hkey];
420 417 hp->vn_htable[hkey] = hent;
421 418 }
422 419
423 420 /* Remove from queue. hent is not first, vn_q_prev is not NULL */
424 421 q_next = hent->vn_q_next;
425 422 q_prev = hent->vn_q_prev;
426 423 if (q_next == NULL)
427 424 hp->vn_q_last = q_prev;
428 425 else
429 426 q_next->vn_q_prev = q_prev;
430 427 q_prev->vn_q_next = q_next;
431 428
432 429 /* Add to the front of queue */
433 430 hent->vn_q_prev = NULL;
434 431 hent->vn_q_next = q_first;
435 432 q_first->vn_q_prev = hent;
436 433 hp->vn_q_first = hent;
437 434
438 435 /* Set v_flag in vnode pointer from hent */
439 436 vp->v_flag = hent->vn_flag;
440 437
441 438 return (0);
442 439 }
443 440
444 441 /* Summary statistics of pages */
445 442 typedef struct memstat {
446 443 struct vnode *ms_kvp; /* Cached address of kernel vnode */
447 444 struct vnode *ms_unused_vp; /* Unused pages vnode pointer */
448 445 struct vnode *ms_zvp; /* Cached address of zio vnode */
449 446 uint64_t ms_kmem; /* Pages of kernel memory */
450 447 uint64_t ms_zfs_data; /* Pages of zfs data */
451 448 uint64_t ms_anon; /* Pages of anonymous memory */
452 449 uint64_t ms_vnode; /* Pages of named (vnode) memory */
453 450 uint64_t ms_exec; /* Pages of exec/library memory */
454 451 uint64_t ms_cachelist; /* Pages on the cachelist (free) */
455 452 uint64_t ms_bootpages; /* Pages on the bootpages list */
456 453 uint64_t ms_total; /* Pages on page hash */
457 454 vn_htable_t *ms_vn_htable; /* Pointer to hash table */
458 455 struct vnode ms_vn; /* vnode buffer */
459 456 } memstat_t;
460 457
461 458 #define MS_PP_ISKAS(pp, stats) \
462 459 ((pp)->p_vnode == (stats)->ms_kvp)
463 460
464 461 #define MS_PP_ISZFS_DATA(pp, stats) \
465 462 (((stats)->ms_zvp != NULL) && ((pp)->p_vnode == (stats)->ms_zvp))
466 463
467 464 /*
468 465 * Summarize pages by type and update stat information
469 466 */
470 467
471 468 /* ARGSUSED */
472 469 static int
473 470 memstat_callback(page_t *page, page_t *pp, memstat_t *stats)
474 471 {
475 472 struct vnode *vp = &stats->ms_vn;
476 473
477 474 if (PP_ISBOOTPAGES(pp))
478 475 stats->ms_bootpages++;
479 476 else if (pp->p_vnode == NULL || pp->p_vnode == stats->ms_unused_vp)
480 477 return (WALK_NEXT);
481 478 else if (MS_PP_ISKAS(pp, stats))
482 479 stats->ms_kmem++;
483 480 else if (MS_PP_ISZFS_DATA(pp, stats))
484 481 stats->ms_zfs_data++;
485 482 else if (PP_ISFREE(pp))
486 483 stats->ms_cachelist++;
487 484 else if (vn_get(stats->ms_vn_htable, vp, (uintptr_t)pp->p_vnode))
488 485 return (WALK_ERR);
489 486 else if (IS_SWAPFSVP(vp))
490 487 stats->ms_anon++;
491 488 else if ((vp->v_flag & VVMEXEC) != 0)
492 489 stats->ms_exec++;
493 490 else
494 491 stats->ms_vnode++;
495 492
496 493 stats->ms_total++;
497 494
498 495 return (WALK_NEXT);
499 496 }
500 497
501 498 /* ARGSUSED */
↓ open down ↓ |
445 lines elided |
↑ open up ↑ |
502 499 int
503 500 memstat(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
504 501 {
505 502 pgcnt_t total_pages, physmem;
506 503 ulong_t freemem;
507 504 memstat_t stats;
508 505 GElf_Sym sym;
509 506 vn_htable_t ht;
510 507 struct vnode *kvps;
511 508 uintptr_t vn_size = 0;
512 -#if defined(__i386) || defined(__amd64)
513 - bln_stats_t bln_stats;
514 - ssize_t bln_size;
515 -#endif
516 509
517 510 bzero(&stats, sizeof (memstat_t));
518 511
519 512 /*
520 513 * -s size, is an internal option. It specifies the size of vn_htable.
521 514 * Hash table size is set in the following order:
522 515 * If user has specified the size that is larger than VN_LARGE: try it,
523 516 * but if malloc failed default to VN_SMALL. Otherwise try VN_LARGE, if
524 517 * failed to allocate default to VN_SMALL.
525 518 * For a better efficiency of hash table it is highly recommended to
526 519 * set size to a prime number.
527 520 */
528 521 if ((flags & DCMD_ADDRSPEC) || mdb_getopts(argc, argv,
529 522 's', MDB_OPT_UINTPTR, &vn_size, NULL) != argc)
530 523 return (DCMD_USAGE);
531 524
532 525 /* Initialize vnode hash list and queue */
533 526 vn_htable_init(&ht, vn_size);
534 527 stats.ms_vn_htable = &ht;
535 528
536 529 /* Total physical memory */
537 530 if (mdb_readvar(&total_pages, "total_pages") == -1) {
538 531 mdb_warn("unable to read total_pages");
539 532 return (DCMD_ERR);
540 533 }
541 534
542 535 /* Artificially limited memory */
543 536 if (mdb_readvar(&physmem, "physmem") == -1) {
544 537 mdb_warn("unable to read physmem");
545 538 return (DCMD_ERR);
546 539 }
547 540
548 541 /* read kernel vnode array pointer */
549 542 if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "kvps",
550 543 (GElf_Sym *)&sym) == -1) {
551 544 mdb_warn("unable to read kvps");
552 545 return (DCMD_ERR);
553 546 }
554 547 kvps = (struct vnode *)(uintptr_t)sym.st_value;
555 548 stats.ms_kvp = &kvps[KV_KVP];
556 549
557 550 /*
558 551 * Read the zio vnode pointer.
559 552 */
560 553 stats.ms_zvp = &kvps[KV_ZVP];
561 554
562 555 /*
563 556 * If physmem != total_pages, then the administrator has limited the
564 557 * number of pages available in the system. Excluded pages are
565 558 * associated with the unused pages vnode. Read this vnode so the
566 559 * pages can be excluded in the page accounting.
567 560 */
568 561 if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "unused_pages_vp",
569 562 (GElf_Sym *)&sym) == -1) {
570 563 mdb_warn("unable to read unused_pages_vp");
571 564 return (DCMD_ERR);
572 565 }
573 566 stats.ms_unused_vp = (struct vnode *)(uintptr_t)sym.st_value;
574 567
575 568 /* walk all pages, collect statistics */
576 569 if (mdb_walk("allpages", (mdb_walk_cb_t)memstat_callback,
577 570 &stats) == -1) {
578 571 mdb_warn("can't walk memseg");
579 572 return (DCMD_ERR);
580 573 }
581 574
582 575 #define MS_PCT_TOTAL(x) ((ulong_t)((((5 * total_pages) + ((x) * 1000ull))) / \
583 576 ((physmem) * 10)))
584 577
585 578 mdb_printf("Page Summary Pages MB"
586 579 " %%Tot\n");
587 580 mdb_printf("------------ ---------------- ----------------"
588 581 " ----\n");
589 582 mdb_printf("Kernel %16llu %16llu %3lu%%\n",
590 583 stats.ms_kmem,
591 584 (uint64_t)stats.ms_kmem * PAGESIZE / (1024 * 1024),
592 585 MS_PCT_TOTAL(stats.ms_kmem));
593 586
594 587 if (stats.ms_bootpages != 0) {
595 588 mdb_printf("Boot pages %16llu %16llu %3lu%%\n",
596 589 stats.ms_bootpages,
597 590 (uint64_t)stats.ms_bootpages * PAGESIZE / (1024 * 1024),
598 591 MS_PCT_TOTAL(stats.ms_bootpages));
599 592 }
600 593
601 594 if (stats.ms_zfs_data != 0) {
602 595 mdb_printf("ZFS File Data %16llu %16llu %3lu%%\n",
603 596 stats.ms_zfs_data,
604 597 (uint64_t)stats.ms_zfs_data * PAGESIZE / (1024 * 1024),
605 598 MS_PCT_TOTAL(stats.ms_zfs_data));
606 599 }
607 600
608 601 mdb_printf("Anon %16llu %16llu %3lu%%\n",
609 602 stats.ms_anon,
610 603 (uint64_t)stats.ms_anon * PAGESIZE / (1024 * 1024),
611 604 MS_PCT_TOTAL(stats.ms_anon));
612 605 mdb_printf("Exec and libs %16llu %16llu %3lu%%\n",
613 606 stats.ms_exec,
614 607 (uint64_t)stats.ms_exec * PAGESIZE / (1024 * 1024),
615 608 MS_PCT_TOTAL(stats.ms_exec));
616 609 mdb_printf("Page cache %16llu %16llu %3lu%%\n",
617 610 stats.ms_vnode,
618 611 (uint64_t)stats.ms_vnode * PAGESIZE / (1024 * 1024),
619 612 MS_PCT_TOTAL(stats.ms_vnode));
620 613 mdb_printf("Free (cachelist) %16llu %16llu %3lu%%\n",
621 614 stats.ms_cachelist,
622 615 (uint64_t)stats.ms_cachelist * PAGESIZE / (1024 * 1024),
623 616 MS_PCT_TOTAL(stats.ms_cachelist));
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
624 617
625 618 /*
626 619 * occasionally, we double count pages above. To avoid printing
627 620 * absurdly large values for freemem, we clamp it at zero.
628 621 */
629 622 if (physmem > stats.ms_total)
630 623 freemem = physmem - stats.ms_total;
631 624 else
632 625 freemem = 0;
633 626
634 -#if defined(__i386) || defined(__amd64)
635 - /* Are we running under Xen? If so, get balloon memory usage. */
636 - if ((bln_size = mdb_readvar(&bln_stats, "bln_stats")) != -1) {
637 - if (freemem > bln_stats.bln_hv_pages)
638 - freemem -= bln_stats.bln_hv_pages;
639 - else
640 - freemem = 0;
641 - }
642 -#endif
643 -
644 627 mdb_printf("Free (freelist) %16lu %16llu %3lu%%\n", freemem,
645 628 (uint64_t)freemem * PAGESIZE / (1024 * 1024),
646 629 MS_PCT_TOTAL(freemem));
647 630
648 -#if defined(__i386) || defined(__amd64)
649 - if (bln_size != -1) {
650 - mdb_printf("Balloon %16lu %16llu %3lu%%\n",
651 - bln_stats.bln_hv_pages,
652 - (uint64_t)bln_stats.bln_hv_pages * PAGESIZE / (1024 * 1024),
653 - MS_PCT_TOTAL(bln_stats.bln_hv_pages));
654 - }
655 -#endif
656 -
657 631 mdb_printf("\nTotal %16lu %16lu\n",
658 632 physmem,
659 633 (uint64_t)physmem * PAGESIZE / (1024 * 1024));
660 634
661 635 if (physmem != total_pages) {
662 636 mdb_printf("Physical %16lu %16lu\n",
663 637 total_pages,
664 638 (uint64_t)total_pages * PAGESIZE / (1024 * 1024));
665 639 }
666 640
667 641 #undef MS_PCT_TOTAL
668 642
669 643 return (DCMD_OK);
670 644 }
671 645
672 646 void
673 647 pagelookup_help(void)
674 648 {
675 649 mdb_printf(
676 650 "Finds the page with name { %<b>vp%</b>, %<b>offset%</b> }.\n"
677 651 "\n"
678 652 "Can be invoked three different ways:\n\n"
679 653 " ::pagelookup -v %<b>vp%</b> -o %<b>offset%</b>\n"
680 654 " %<b>vp%</b>::pagelookup -o %<b>offset%</b>\n"
681 655 " %<b>offset%</b>::pagelookup -v %<b>vp%</b>\n"
682 656 "\n"
683 657 "The latter two forms are useful in pipelines.\n");
684 658 }
685 659
686 660 int
687 661 pagelookup(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
688 662 {
689 663 uintptr_t vp = -(uintptr_t)1;
690 664 uint64_t offset = -(uint64_t)1;
691 665
692 666 uintptr_t pageaddr;
693 667 int hasaddr = (flags & DCMD_ADDRSPEC);
694 668 int usedaddr = 0;
695 669
696 670 if (mdb_getopts(argc, argv,
697 671 'v', MDB_OPT_UINTPTR, &vp,
698 672 'o', MDB_OPT_UINT64, &offset,
699 673 0) != argc) {
700 674 return (DCMD_USAGE);
701 675 }
702 676
703 677 if (vp == -(uintptr_t)1) {
704 678 if (offset == -(uint64_t)1) {
705 679 mdb_warn(
706 680 "pagelookup: at least one of -v vp or -o offset "
707 681 "required.\n");
708 682 return (DCMD_USAGE);
709 683 }
710 684 vp = addr;
711 685 usedaddr = 1;
712 686 } else if (offset == -(uint64_t)1) {
713 687 offset = mdb_get_dot();
714 688 usedaddr = 1;
715 689 }
716 690 if (usedaddr && !hasaddr) {
717 691 mdb_warn("pagelookup: address required\n");
718 692 return (DCMD_USAGE);
719 693 }
720 694 if (!usedaddr && hasaddr) {
721 695 mdb_warn(
722 696 "pagelookup: address specified when both -v and -o were "
723 697 "passed");
724 698 return (DCMD_USAGE);
725 699 }
726 700
727 701 pageaddr = mdb_page_lookup(vp, offset);
728 702 if (pageaddr == 0) {
729 703 mdb_warn("pagelookup: no page for {vp = %p, offset = %llp)\n",
730 704 vp, offset);
731 705 return (DCMD_OK);
732 706 }
733 707 mdb_printf("%#lr\n", pageaddr); /* this is PIPE_OUT friendly */
734 708 return (DCMD_OK);
735 709 }
736 710
737 711 /*ARGSUSED*/
738 712 int
739 713 page_num2pp(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
740 714 {
741 715 uintptr_t pp;
742 716
743 717 if (argc != 0 || !(flags & DCMD_ADDRSPEC)) {
744 718 return (DCMD_USAGE);
745 719 }
746 720
747 721 pp = mdb_pfn2page((pfn_t)addr);
748 722 if (pp == 0) {
749 723 return (DCMD_ERR);
750 724 }
751 725
752 726 if (flags & DCMD_PIPE_OUT) {
753 727 mdb_printf("%#lr\n", pp);
754 728 } else {
755 729 mdb_printf("%lx has page_t at %#lx\n", (pfn_t)addr, pp);
756 730 }
757 731
758 732 return (DCMD_OK);
759 733 }
760 734
761 735 int
762 736 page(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
763 737 {
764 738 page_t p;
765 739
766 740 if (!(flags & DCMD_ADDRSPEC)) {
767 741 if (mdb_walk_dcmd("page", "page", argc, argv) == -1) {
768 742 mdb_warn("can't walk pages");
769 743 return (DCMD_ERR);
770 744 }
771 745 return (DCMD_OK);
772 746 }
773 747
774 748 if (DCMD_HDRSPEC(flags)) {
775 749 mdb_printf("%<u>%?s %?s %16s %8s %3s %3s %2s %2s %2s%</u>\n",
776 750 "PAGE", "VNODE", "OFFSET", "SELOCK",
777 751 "LCT", "COW", "IO", "FS", "ST");
778 752 }
779 753
780 754 if (mdb_vread(&p, sizeof (page_t), addr) == -1) {
781 755 mdb_warn("can't read page_t at %#lx", addr);
782 756 return (DCMD_ERR);
783 757 }
784 758
785 759 mdb_printf("%0?lx %?p %16llx %8x %3d %3d %2x %2x %2x\n",
786 760 addr, p.p_vnode, p.p_offset, p.p_selock, p.p_lckcnt, p.p_cowcnt,
787 761 p.p_iolock_state, p.p_fsdata, p.p_state);
788 762
789 763 return (DCMD_OK);
790 764 }
791 765
792 766 int
793 767 swap_walk_init(mdb_walk_state_t *wsp)
794 768 {
795 769 void *ptr;
796 770
797 771 if ((mdb_readvar(&ptr, "swapinfo") == -1) || ptr == NULL) {
798 772 mdb_warn("swapinfo not found or invalid");
799 773 return (WALK_ERR);
800 774 }
801 775
802 776 wsp->walk_addr = (uintptr_t)ptr;
803 777
804 778 return (WALK_NEXT);
805 779 }
806 780
807 781 int
808 782 swap_walk_step(mdb_walk_state_t *wsp)
809 783 {
810 784 uintptr_t sip;
811 785 struct swapinfo si;
812 786
813 787 sip = wsp->walk_addr;
814 788
815 789 if (sip == NULL)
816 790 return (WALK_DONE);
817 791
818 792 if (mdb_vread(&si, sizeof (struct swapinfo), sip) == -1) {
819 793 mdb_warn("unable to read swapinfo at %#lx", sip);
820 794 return (WALK_ERR);
821 795 }
822 796
823 797 wsp->walk_addr = (uintptr_t)si.si_next;
824 798
825 799 return (wsp->walk_callback(sip, &si, wsp->walk_cbdata));
826 800 }
827 801
828 802 int
829 803 swapinfof(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
830 804 {
831 805 struct swapinfo si;
832 806 char *name;
833 807
834 808 if (!(flags & DCMD_ADDRSPEC)) {
835 809 if (mdb_walk_dcmd("swapinfo", "swapinfo", argc, argv) == -1) {
836 810 mdb_warn("can't walk swapinfo");
837 811 return (DCMD_ERR);
838 812 }
839 813 return (DCMD_OK);
840 814 }
841 815
842 816 if (DCMD_HDRSPEC(flags)) {
843 817 mdb_printf("%<u>%?s %?s %9s %9s %s%</u>\n",
844 818 "ADDR", "VNODE", "PAGES", "FREE", "NAME");
845 819 }
846 820
847 821 if (mdb_vread(&si, sizeof (struct swapinfo), addr) == -1) {
848 822 mdb_warn("can't read swapinfo at %#lx", addr);
849 823 return (DCMD_ERR);
850 824 }
851 825
852 826 name = mdb_alloc(si.si_pnamelen, UM_SLEEP | UM_GC);
853 827 if (mdb_vread(name, si.si_pnamelen, (uintptr_t)si.si_pname) == -1)
854 828 name = "*error*";
855 829
856 830 mdb_printf("%0?lx %?p %9d %9d %s\n",
857 831 addr, si.si_vp, si.si_npgs, si.si_nfpgs, name);
858 832
859 833 return (DCMD_OK);
860 834 }
861 835
862 836 int
863 837 memlist_walk_step(mdb_walk_state_t *wsp)
864 838 {
865 839 uintptr_t mlp;
866 840 struct memlist ml;
867 841
868 842 mlp = wsp->walk_addr;
869 843
870 844 if (mlp == NULL)
871 845 return (WALK_DONE);
872 846
873 847 if (mdb_vread(&ml, sizeof (struct memlist), mlp) == -1) {
874 848 mdb_warn("unable to read memlist at %#lx", mlp);
875 849 return (WALK_ERR);
876 850 }
877 851
878 852 wsp->walk_addr = (uintptr_t)ml.ml_next;
879 853
880 854 return (wsp->walk_callback(mlp, &ml, wsp->walk_cbdata));
881 855 }
882 856
883 857 int
884 858 memlist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
885 859 {
886 860 struct memlist ml;
887 861
888 862 if (!(flags & DCMD_ADDRSPEC)) {
889 863 uintptr_t ptr;
890 864 uint_t list = 0;
891 865 int i;
892 866 static const char *lists[] = {
893 867 "phys_install",
894 868 "phys_avail",
895 869 "virt_avail"
896 870 };
897 871
898 872 if (mdb_getopts(argc, argv,
899 873 'i', MDB_OPT_SETBITS, (1 << 0), &list,
900 874 'a', MDB_OPT_SETBITS, (1 << 1), &list,
901 875 'v', MDB_OPT_SETBITS, (1 << 2), &list, NULL) != argc)
902 876 return (DCMD_USAGE);
903 877
904 878 if (!list)
905 879 list = 1;
906 880
907 881 for (i = 0; list; i++, list >>= 1) {
908 882 if (!(list & 1))
909 883 continue;
910 884 if ((mdb_readvar(&ptr, lists[i]) == -1) ||
911 885 (ptr == NULL)) {
912 886 mdb_warn("%s not found or invalid", lists[i]);
913 887 return (DCMD_ERR);
914 888 }
915 889
916 890 mdb_printf("%s:\n", lists[i]);
917 891 if (mdb_pwalk_dcmd("memlist", "memlist", 0, NULL,
918 892 ptr) == -1) {
919 893 mdb_warn("can't walk memlist");
920 894 return (DCMD_ERR);
921 895 }
922 896 }
923 897 return (DCMD_OK);
924 898 }
925 899
926 900 if (DCMD_HDRSPEC(flags))
927 901 mdb_printf("%<u>%?s %16s %16s%</u>\n", "ADDR", "BASE", "SIZE");
928 902
929 903 if (mdb_vread(&ml, sizeof (struct memlist), addr) == -1) {
930 904 mdb_warn("can't read memlist at %#lx", addr);
931 905 return (DCMD_ERR);
932 906 }
933 907
934 908 mdb_printf("%0?lx %16llx %16llx\n", addr, ml.ml_address, ml.ml_size);
935 909
936 910 return (DCMD_OK);
937 911 }
938 912
939 913 int
940 914 seg_walk_init(mdb_walk_state_t *wsp)
941 915 {
942 916 if (wsp->walk_addr == NULL) {
943 917 mdb_warn("seg walk must begin at struct as *\n");
944 918 return (WALK_ERR);
945 919 }
946 920
947 921 /*
948 922 * this is really just a wrapper to AVL tree walk
949 923 */
950 924 wsp->walk_addr = (uintptr_t)&((struct as *)wsp->walk_addr)->a_segtree;
951 925 return (avl_walk_init(wsp));
952 926 }
953 927
954 928 /*ARGSUSED*/
955 929 int
956 930 seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
957 931 {
958 932 struct seg s;
959 933
960 934 if (argc != 0)
961 935 return (DCMD_USAGE);
962 936
963 937 if ((flags & DCMD_LOOPFIRST) || !(flags & DCMD_LOOP)) {
964 938 mdb_printf("%<u>%?s %?s %?s %?s %s%</u>\n",
965 939 "SEG", "BASE", "SIZE", "DATA", "OPS");
966 940 }
967 941
968 942 if (mdb_vread(&s, sizeof (s), addr) == -1) {
969 943 mdb_warn("failed to read seg at %p", addr);
970 944 return (DCMD_ERR);
971 945 }
972 946
973 947 mdb_printf("%?p %?p %?lx %?p %a\n",
974 948 addr, s.s_base, s.s_size, s.s_data, s.s_ops);
975 949
976 950 return (DCMD_OK);
977 951 }
978 952
979 953 typedef struct pmap_walk_types {
980 954 uintptr_t pwt_segvn;
981 955 uintptr_t pwt_seghole;
982 956 } pmap_walk_types_t;
983 957
984 958 /*ARGSUSED*/
985 959 static int
986 960 pmap_walk_count_pages(uintptr_t addr, const void *data, void *out)
987 961 {
988 962 pgcnt_t *nres = out;
989 963
990 964 (*nres)++;
991 965
992 966 return (WALK_NEXT);
993 967 }
994 968
995 969 static int
996 970 pmap_walk_seg(uintptr_t addr, const struct seg *seg,
997 971 const pmap_walk_types_t *types)
998 972 {
999 973 const uintptr_t ops = (uintptr_t)seg->s_ops;
1000 974
1001 975 mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024);
1002 976
1003 977 if (ops == types->pwt_segvn && seg->s_data != NULL) {
1004 978 struct segvn_data svn;
1005 979 pgcnt_t nres = 0;
1006 980
1007 981 svn.vp = NULL;
1008 982 (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data);
1009 983
1010 984 /*
1011 985 * Use the segvn_pages walker to find all of the in-core pages
1012 986 * for this mapping.
1013 987 */
1014 988 if (mdb_pwalk("segvn_pages", pmap_walk_count_pages, &nres,
1015 989 (uintptr_t)seg->s_data) == -1) {
1016 990 mdb_warn("failed to walk segvn_pages (s_data=%p)",
1017 991 seg->s_data);
1018 992 }
1019 993 mdb_printf(" %7ldk", (nres * PAGESIZE) / 1024);
1020 994
1021 995 if (svn.vp != NULL) {
1022 996 char buf[29];
1023 997
1024 998 mdb_vnode2path((uintptr_t)svn.vp, buf, sizeof (buf));
1025 999 mdb_printf(" %s", buf);
1026 1000 } else {
1027 1001 mdb_printf(" [ anon ]");
1028 1002 }
1029 1003 } else if (ops == types->pwt_seghole && seg->s_data != NULL) {
1030 1004 seghole_data_t shd;
1031 1005 char name[16];
1032 1006
1033 1007 (void) mdb_vread(&shd, sizeof (shd), (uintptr_t)seg->s_data);
1034 1008 if (shd.shd_name == NULL || mdb_readstr(name, sizeof (name),
1035 1009 (uintptr_t)shd.shd_name) == 0) {
1036 1010 name[0] = '\0';
1037 1011 }
1038 1012
1039 1013 mdb_printf(" %8s [ hole%s%s ]", "-",
1040 1014 name[0] == '0' ? "" : ":", name);
1041 1015 } else {
1042 1016 mdb_printf(" %8s [ &%a ]", "?", seg->s_ops);
1043 1017 }
1044 1018
1045 1019 mdb_printf("\n");
1046 1020 return (WALK_NEXT);
1047 1021 }
1048 1022
1049 1023 static int
1050 1024 pmap_walk_seg_quick(uintptr_t addr, const struct seg *seg,
1051 1025 const pmap_walk_types_t *types)
1052 1026 {
1053 1027 const uintptr_t ops = (uintptr_t)seg->s_ops;
1054 1028
1055 1029 mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024);
1056 1030
1057 1031 if (ops == types->pwt_segvn && seg->s_data != NULL) {
1058 1032 struct segvn_data svn;
1059 1033
1060 1034 svn.vp = NULL;
1061 1035 (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data);
1062 1036
1063 1037 if (svn.vp != NULL) {
1064 1038 mdb_printf(" %0?p", svn.vp);
1065 1039 } else {
1066 1040 mdb_printf(" [ anon ]");
1067 1041 }
1068 1042 } else {
1069 1043 mdb_printf(" [ &%a ]", seg->s_ops);
1070 1044 }
1071 1045
1072 1046 mdb_printf("\n");
1073 1047 return (WALK_NEXT);
1074 1048 }
1075 1049
1076 1050 /*ARGSUSED*/
1077 1051 int
1078 1052 pmap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1079 1053 {
1080 1054 proc_t proc;
1081 1055 uint_t quick = FALSE;
1082 1056 mdb_walk_cb_t cb = (mdb_walk_cb_t)pmap_walk_seg;
1083 1057 pmap_walk_types_t wtypes = { 0 };
1084 1058
1085 1059 GElf_Sym sym;
1086 1060
1087 1061 if (!(flags & DCMD_ADDRSPEC))
1088 1062 return (DCMD_USAGE);
1089 1063
1090 1064 if (mdb_getopts(argc, argv,
1091 1065 'q', MDB_OPT_SETBITS, TRUE, &quick, NULL) != argc)
1092 1066 return (DCMD_USAGE);
1093 1067
1094 1068 if (mdb_vread(&proc, sizeof (proc), addr) == -1) {
1095 1069 mdb_warn("failed to read proc at %p", addr);
1096 1070 return (DCMD_ERR);
1097 1071 }
1098 1072
1099 1073 if (mdb_lookup_by_name("segvn_ops", &sym) == 0)
1100 1074 wtypes.pwt_segvn = (uintptr_t)sym.st_value;
1101 1075 if (mdb_lookup_by_name("seghole_ops", &sym) == 0)
1102 1076 wtypes.pwt_seghole = (uintptr_t)sym.st_value;
1103 1077
1104 1078 mdb_printf("%?s %?s %8s ", "SEG", "BASE", "SIZE");
1105 1079
1106 1080 if (quick) {
1107 1081 mdb_printf("VNODE\n");
1108 1082 cb = (mdb_walk_cb_t)pmap_walk_seg_quick;
1109 1083 } else {
1110 1084 mdb_printf("%8s %s\n", "RES", "PATH");
1111 1085 }
1112 1086
1113 1087 if (mdb_pwalk("seg", cb, (void *)&wtypes, (uintptr_t)proc.p_as) == -1) {
1114 1088 mdb_warn("failed to walk segments of as %p", proc.p_as);
1115 1089 return (DCMD_ERR);
1116 1090 }
1117 1091
1118 1092 return (DCMD_OK);
1119 1093 }
1120 1094
1121 1095 typedef struct anon_walk_data {
1122 1096 uintptr_t *aw_levone;
1123 1097 uintptr_t *aw_levtwo;
1124 1098 size_t aw_minslot;
1125 1099 size_t aw_maxslot;
1126 1100 pgcnt_t aw_nlevone;
1127 1101 pgcnt_t aw_levone_ndx;
1128 1102 size_t aw_levtwo_ndx;
1129 1103 struct anon_map *aw_ampp;
1130 1104 struct anon_map aw_amp;
1131 1105 struct anon_hdr aw_ahp;
1132 1106 int aw_all; /* report all anon pointers, even NULLs */
1133 1107 } anon_walk_data_t;
1134 1108
1135 1109 int
1136 1110 anon_walk_init_common(mdb_walk_state_t *wsp, ulong_t minslot, ulong_t maxslot)
1137 1111 {
1138 1112 anon_walk_data_t *aw;
1139 1113
1140 1114 if (wsp->walk_addr == NULL) {
1141 1115 mdb_warn("anon walk doesn't support global walks\n");
1142 1116 return (WALK_ERR);
1143 1117 }
1144 1118
1145 1119 aw = mdb_alloc(sizeof (anon_walk_data_t), UM_SLEEP);
1146 1120 aw->aw_ampp = (struct anon_map *)wsp->walk_addr;
1147 1121
1148 1122 if (mdb_vread(&aw->aw_amp, sizeof (aw->aw_amp), wsp->walk_addr) == -1) {
1149 1123 mdb_warn("failed to read anon map at %p", wsp->walk_addr);
1150 1124 mdb_free(aw, sizeof (anon_walk_data_t));
1151 1125 return (WALK_ERR);
1152 1126 }
1153 1127
1154 1128 if (mdb_vread(&aw->aw_ahp, sizeof (aw->aw_ahp),
1155 1129 (uintptr_t)(aw->aw_amp.ahp)) == -1) {
1156 1130 mdb_warn("failed to read anon hdr ptr at %p", aw->aw_amp.ahp);
1157 1131 mdb_free(aw, sizeof (anon_walk_data_t));
1158 1132 return (WALK_ERR);
1159 1133 }
1160 1134
1161 1135 /* update min and maxslot with the given constraints */
1162 1136 maxslot = MIN(maxslot, aw->aw_ahp.size);
1163 1137 minslot = MIN(minslot, maxslot);
1164 1138
1165 1139 if (aw->aw_ahp.size <= ANON_CHUNK_SIZE ||
1166 1140 (aw->aw_ahp.flags & ANON_ALLOC_FORCE)) {
1167 1141 aw->aw_nlevone = maxslot;
1168 1142 aw->aw_levone_ndx = minslot;
1169 1143 aw->aw_levtwo = NULL;
1170 1144 } else {
1171 1145 aw->aw_nlevone =
1172 1146 (maxslot + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT;
1173 1147 aw->aw_levone_ndx = 0;
1174 1148 aw->aw_levtwo =
1175 1149 mdb_zalloc(ANON_CHUNK_SIZE * sizeof (uintptr_t), UM_SLEEP);
1176 1150 }
1177 1151
1178 1152 aw->aw_levone =
1179 1153 mdb_alloc(aw->aw_nlevone * sizeof (uintptr_t), UM_SLEEP);
1180 1154 aw->aw_all = (wsp->walk_arg == ANON_WALK_ALL);
1181 1155
1182 1156 mdb_vread(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t),
1183 1157 (uintptr_t)aw->aw_ahp.array_chunk);
1184 1158
1185 1159 aw->aw_levtwo_ndx = 0;
1186 1160 aw->aw_minslot = minslot;
1187 1161 aw->aw_maxslot = maxslot;
1188 1162
1189 1163 out:
1190 1164 wsp->walk_data = aw;
1191 1165 return (0);
1192 1166 }
1193 1167
1194 1168 int
1195 1169 anon_walk_step(mdb_walk_state_t *wsp)
1196 1170 {
1197 1171 anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data;
1198 1172 struct anon anon;
1199 1173 uintptr_t anonptr;
1200 1174 ulong_t slot;
1201 1175
1202 1176 /*
1203 1177 * Once we've walked through level one, we're done.
1204 1178 */
1205 1179 if (aw->aw_levone_ndx >= aw->aw_nlevone) {
1206 1180 return (WALK_DONE);
1207 1181 }
1208 1182
1209 1183 if (aw->aw_levtwo == NULL) {
1210 1184 anonptr = aw->aw_levone[aw->aw_levone_ndx];
1211 1185 aw->aw_levone_ndx++;
1212 1186 } else {
1213 1187 if (aw->aw_levtwo_ndx == 0) {
1214 1188 uintptr_t levtwoptr;
1215 1189
1216 1190 /* The first time through, skip to our first index. */
1217 1191 if (aw->aw_levone_ndx == 0) {
1218 1192 aw->aw_levone_ndx =
1219 1193 aw->aw_minslot / ANON_CHUNK_SIZE;
1220 1194 aw->aw_levtwo_ndx =
1221 1195 aw->aw_minslot % ANON_CHUNK_SIZE;
1222 1196 }
1223 1197
1224 1198 levtwoptr = (uintptr_t)aw->aw_levone[aw->aw_levone_ndx];
1225 1199
1226 1200 if (levtwoptr == NULL) {
1227 1201 if (!aw->aw_all) {
1228 1202 aw->aw_levtwo_ndx = 0;
1229 1203 aw->aw_levone_ndx++;
1230 1204 return (WALK_NEXT);
1231 1205 }
1232 1206 bzero(aw->aw_levtwo,
1233 1207 ANON_CHUNK_SIZE * sizeof (uintptr_t));
1234 1208
1235 1209 } else if (mdb_vread(aw->aw_levtwo,
1236 1210 ANON_CHUNK_SIZE * sizeof (uintptr_t), levtwoptr) ==
1237 1211 -1) {
1238 1212 mdb_warn("unable to read anon_map %p's "
1239 1213 "second-level map %d at %p",
1240 1214 aw->aw_ampp, aw->aw_levone_ndx,
1241 1215 levtwoptr);
1242 1216 return (WALK_ERR);
1243 1217 }
1244 1218 }
1245 1219 slot = aw->aw_levone_ndx * ANON_CHUNK_SIZE + aw->aw_levtwo_ndx;
1246 1220 anonptr = aw->aw_levtwo[aw->aw_levtwo_ndx];
1247 1221
1248 1222 /* update the indices for next time */
1249 1223 aw->aw_levtwo_ndx++;
1250 1224 if (aw->aw_levtwo_ndx == ANON_CHUNK_SIZE) {
1251 1225 aw->aw_levtwo_ndx = 0;
1252 1226 aw->aw_levone_ndx++;
1253 1227 }
1254 1228
1255 1229 /* make sure the slot # is in the requested range */
1256 1230 if (slot >= aw->aw_maxslot) {
1257 1231 return (WALK_DONE);
1258 1232 }
1259 1233 }
1260 1234
1261 1235 if (anonptr != NULL) {
1262 1236 mdb_vread(&anon, sizeof (anon), anonptr);
1263 1237 return (wsp->walk_callback(anonptr, &anon, wsp->walk_cbdata));
1264 1238 }
1265 1239 if (aw->aw_all) {
1266 1240 return (wsp->walk_callback(NULL, NULL, wsp->walk_cbdata));
1267 1241 }
1268 1242 return (WALK_NEXT);
1269 1243 }
1270 1244
1271 1245 void
1272 1246 anon_walk_fini(mdb_walk_state_t *wsp)
1273 1247 {
1274 1248 anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data;
1275 1249
1276 1250 if (aw->aw_levtwo != NULL)
1277 1251 mdb_free(aw->aw_levtwo, ANON_CHUNK_SIZE * sizeof (uintptr_t));
1278 1252
1279 1253 mdb_free(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t));
1280 1254 mdb_free(aw, sizeof (anon_walk_data_t));
1281 1255 }
1282 1256
1283 1257 int
1284 1258 anon_walk_init(mdb_walk_state_t *wsp)
1285 1259 {
1286 1260 return (anon_walk_init_common(wsp, 0, ULONG_MAX));
1287 1261 }
1288 1262
1289 1263 int
1290 1264 segvn_anon_walk_init(mdb_walk_state_t *wsp)
1291 1265 {
1292 1266 const uintptr_t svd_addr = wsp->walk_addr;
1293 1267 uintptr_t amp_addr;
1294 1268 uintptr_t seg_addr;
1295 1269 struct segvn_data svd;
1296 1270 struct anon_map amp;
1297 1271 struct seg seg;
1298 1272
1299 1273 if (svd_addr == NULL) {
1300 1274 mdb_warn("segvn_anon walk doesn't support global walks\n");
1301 1275 return (WALK_ERR);
1302 1276 }
1303 1277 if (mdb_vread(&svd, sizeof (svd), svd_addr) == -1) {
1304 1278 mdb_warn("segvn_anon walk: unable to read segvn_data at %p",
1305 1279 svd_addr);
1306 1280 return (WALK_ERR);
1307 1281 }
1308 1282 if (svd.amp == NULL) {
1309 1283 mdb_warn("segvn_anon walk: segvn_data at %p has no anon map\n",
1310 1284 svd_addr);
1311 1285 return (WALK_ERR);
1312 1286 }
1313 1287 amp_addr = (uintptr_t)svd.amp;
1314 1288 if (mdb_vread(&, sizeof (amp), amp_addr) == -1) {
1315 1289 mdb_warn("segvn_anon walk: unable to read amp %p for "
1316 1290 "segvn_data %p", amp_addr, svd_addr);
1317 1291 return (WALK_ERR);
1318 1292 }
1319 1293 seg_addr = (uintptr_t)svd.seg;
1320 1294 if (mdb_vread(&seg, sizeof (seg), seg_addr) == -1) {
1321 1295 mdb_warn("segvn_anon walk: unable to read seg %p for "
1322 1296 "segvn_data %p", seg_addr, svd_addr);
1323 1297 return (WALK_ERR);
1324 1298 }
1325 1299 if ((seg.s_size + (svd.anon_index << PAGESHIFT)) > amp.size) {
1326 1300 mdb_warn("anon map %p is too small for segment %p\n",
1327 1301 amp_addr, seg_addr);
1328 1302 return (WALK_ERR);
1329 1303 }
1330 1304
1331 1305 wsp->walk_addr = amp_addr;
1332 1306 return (anon_walk_init_common(wsp,
1333 1307 svd.anon_index, svd.anon_index + (seg.s_size >> PAGESHIFT)));
1334 1308 }
1335 1309
1336 1310
1337 1311 typedef struct {
1338 1312 u_offset_t svs_offset;
1339 1313 uintptr_t svs_page;
1340 1314 } segvn_sparse_t;
1341 1315 #define SEGVN_MAX_SPARSE ((128 * 1024) / sizeof (segvn_sparse_t))
1342 1316
1343 1317 typedef struct {
1344 1318 uintptr_t svw_svdp;
1345 1319 struct segvn_data svw_svd;
1346 1320 struct seg svw_seg;
1347 1321 size_t svw_walkoff;
1348 1322 ulong_t svw_anonskip;
1349 1323 segvn_sparse_t *svw_sparse;
1350 1324 size_t svw_sparse_idx;
1351 1325 size_t svw_sparse_count;
1352 1326 size_t svw_sparse_size;
1353 1327 uint8_t svw_sparse_overflow;
1354 1328 uint8_t svw_all;
1355 1329 } segvn_walk_data_t;
1356 1330
1357 1331 static int
1358 1332 segvn_sparse_fill(uintptr_t addr, const void *pp_arg, void *arg)
1359 1333 {
1360 1334 segvn_walk_data_t *const svw = arg;
1361 1335 const page_t *const pp = pp_arg;
1362 1336 const u_offset_t offset = pp->p_offset;
1363 1337 segvn_sparse_t *const cur =
1364 1338 &svw->svw_sparse[svw->svw_sparse_count];
1365 1339
1366 1340 /* See if the page is of interest */
1367 1341 if ((u_offset_t)(offset - svw->svw_svd.offset) >= svw->svw_seg.s_size) {
1368 1342 return (WALK_NEXT);
1369 1343 }
1370 1344 /* See if we have space for the new entry, then add it. */
1371 1345 if (svw->svw_sparse_count >= svw->svw_sparse_size) {
1372 1346 svw->svw_sparse_overflow = 1;
1373 1347 return (WALK_DONE);
1374 1348 }
1375 1349 svw->svw_sparse_count++;
1376 1350 cur->svs_offset = offset;
1377 1351 cur->svs_page = addr;
1378 1352 return (WALK_NEXT);
1379 1353 }
1380 1354
1381 1355 static int
1382 1356 segvn_sparse_cmp(const void *lp, const void *rp)
1383 1357 {
1384 1358 const segvn_sparse_t *const l = lp;
1385 1359 const segvn_sparse_t *const r = rp;
1386 1360
1387 1361 if (l->svs_offset < r->svs_offset) {
1388 1362 return (-1);
1389 1363 }
1390 1364 if (l->svs_offset > r->svs_offset) {
1391 1365 return (1);
1392 1366 }
1393 1367 return (0);
1394 1368 }
1395 1369
1396 1370 /*
1397 1371 * Builds on the "anon_all" walker to walk all resident pages in a segvn_data
1398 1372 * structure. For segvn_datas without an anon structure, it just looks up
1399 1373 * pages in the vnode. For segvn_datas with an anon structure, NULL slots
1400 1374 * pass through to the vnode, and non-null slots are checked for residency.
1401 1375 */
1402 1376 int
1403 1377 segvn_pages_walk_init(mdb_walk_state_t *wsp)
1404 1378 {
1405 1379 segvn_walk_data_t *svw;
1406 1380 struct segvn_data *svd;
1407 1381
1408 1382 if (wsp->walk_addr == NULL) {
1409 1383 mdb_warn("segvn walk doesn't support global walks\n");
1410 1384 return (WALK_ERR);
1411 1385 }
1412 1386
1413 1387 svw = mdb_zalloc(sizeof (*svw), UM_SLEEP);
1414 1388 svw->svw_svdp = wsp->walk_addr;
1415 1389 svw->svw_anonskip = 0;
1416 1390 svw->svw_sparse_idx = 0;
1417 1391 svw->svw_walkoff = 0;
1418 1392 svw->svw_all = (wsp->walk_arg == SEGVN_PAGES_ALL);
1419 1393
1420 1394 if (mdb_vread(&svw->svw_svd, sizeof (svw->svw_svd), wsp->walk_addr) ==
1421 1395 -1) {
1422 1396 mdb_warn("failed to read segvn_data at %p", wsp->walk_addr);
1423 1397 mdb_free(svw, sizeof (*svw));
1424 1398 return (WALK_ERR);
1425 1399 }
1426 1400
1427 1401 svd = &svw->svw_svd;
1428 1402 if (mdb_vread(&svw->svw_seg, sizeof (svw->svw_seg),
1429 1403 (uintptr_t)svd->seg) == -1) {
1430 1404 mdb_warn("failed to read seg at %p (from %p)",
1431 1405 svd->seg, &((struct segvn_data *)(wsp->walk_addr))->seg);
1432 1406 mdb_free(svw, sizeof (*svw));
1433 1407 return (WALK_ERR);
1434 1408 }
1435 1409
1436 1410 if (svd->amp == NULL && svd->vp == NULL) {
1437 1411 /* make the walk terminate immediately; no pages */
1438 1412 svw->svw_walkoff = svw->svw_seg.s_size;
1439 1413
1440 1414 } else if (svd->amp == NULL &&
1441 1415 (svw->svw_seg.s_size >> PAGESHIFT) >= SEGVN_MAX_SPARSE) {
1442 1416 /*
1443 1417 * If we don't have an anon pointer, and the segment is large,
1444 1418 * we try to load the in-memory pages into a fixed-size array,
1445 1419 * which is then sorted and reported directly. This is much
1446 1420 * faster than doing a mdb_page_lookup() for each possible
1447 1421 * offset.
1448 1422 *
1449 1423 * If the allocation fails, or there are too many pages
1450 1424 * in-core, we fall back to looking up the pages individually.
1451 1425 */
1452 1426 svw->svw_sparse = mdb_alloc(
1453 1427 SEGVN_MAX_SPARSE * sizeof (*svw->svw_sparse), UM_NOSLEEP);
1454 1428 if (svw->svw_sparse != NULL) {
1455 1429 svw->svw_sparse_size = SEGVN_MAX_SPARSE;
1456 1430
1457 1431 if (mdb_pwalk("page", segvn_sparse_fill, svw,
1458 1432 (uintptr_t)svd->vp) == -1 ||
1459 1433 svw->svw_sparse_overflow) {
1460 1434 mdb_free(svw->svw_sparse, SEGVN_MAX_SPARSE *
1461 1435 sizeof (*svw->svw_sparse));
1462 1436 svw->svw_sparse = NULL;
1463 1437 } else {
1464 1438 qsort(svw->svw_sparse, svw->svw_sparse_count,
1465 1439 sizeof (*svw->svw_sparse),
1466 1440 segvn_sparse_cmp);
1467 1441 }
1468 1442 }
1469 1443
1470 1444 } else if (svd->amp != NULL) {
1471 1445 const char *const layer = (!svw->svw_all && svd->vp == NULL) ?
1472 1446 "segvn_anon" : "segvn_anon_all";
1473 1447 /*
1474 1448 * If we're not printing all offsets, and the segvn_data has
1475 1449 * no backing VP, we can use the "segvn_anon" walker, which
1476 1450 * efficiently skips NULL slots.
1477 1451 *
1478 1452 * Otherwise, we layer over the "segvn_anon_all" walker
1479 1453 * (which reports all anon slots, even NULL ones), so that
1480 1454 * segvn_pages_walk_step() knows the precise offset for each
1481 1455 * element. It uses that offset information to look up the
1482 1456 * backing pages for NULL anon slots.
1483 1457 */
1484 1458 if (mdb_layered_walk(layer, wsp) == -1) {
1485 1459 mdb_warn("segvn_pages: failed to layer \"%s\" "
1486 1460 "for segvn_data %p", layer, svw->svw_svdp);
1487 1461 mdb_free(svw, sizeof (*svw));
1488 1462 return (WALK_ERR);
1489 1463 }
1490 1464 }
1491 1465
1492 1466 wsp->walk_data = svw;
1493 1467 return (WALK_NEXT);
1494 1468 }
1495 1469
1496 1470 int
1497 1471 segvn_pages_walk_step(mdb_walk_state_t *wsp)
1498 1472 {
1499 1473 segvn_walk_data_t *const svw = wsp->walk_data;
1500 1474 struct seg *const seg = &svw->svw_seg;
1501 1475 struct segvn_data *const svd = &svw->svw_svd;
1502 1476 uintptr_t pp;
1503 1477 page_t page;
1504 1478
1505 1479 /* If we've walked off the end of the segment, we're done. */
1506 1480 if (svw->svw_walkoff >= seg->s_size) {
1507 1481 return (WALK_DONE);
1508 1482 }
1509 1483
1510 1484 /*
1511 1485 * If we've got a sparse page array, just send it directly.
1512 1486 */
1513 1487 if (svw->svw_sparse != NULL) {
1514 1488 u_offset_t off;
1515 1489
1516 1490 if (svw->svw_sparse_idx >= svw->svw_sparse_count) {
1517 1491 pp = NULL;
1518 1492 if (!svw->svw_all) {
1519 1493 return (WALK_DONE);
1520 1494 }
1521 1495 } else {
1522 1496 segvn_sparse_t *const svs =
1523 1497 &svw->svw_sparse[svw->svw_sparse_idx];
1524 1498 off = svs->svs_offset - svd->offset;
1525 1499 if (svw->svw_all && svw->svw_walkoff != off) {
1526 1500 pp = NULL;
1527 1501 } else {
1528 1502 pp = svs->svs_page;
1529 1503 svw->svw_sparse_idx++;
1530 1504 }
1531 1505 }
1532 1506
1533 1507 } else if (svd->amp == NULL || wsp->walk_addr == NULL) {
1534 1508 /*
1535 1509 * If there's no anon, or the anon slot is NULL, look up
1536 1510 * <vp, offset>.
1537 1511 */
1538 1512 if (svd->vp != NULL) {
1539 1513 pp = mdb_page_lookup((uintptr_t)svd->vp,
1540 1514 svd->offset + svw->svw_walkoff);
1541 1515 } else {
1542 1516 pp = NULL;
1543 1517 }
1544 1518
1545 1519 } else {
1546 1520 const struct anon *const anon = wsp->walk_layer;
1547 1521
1548 1522 /*
1549 1523 * We have a "struct anon"; if it's not swapped out,
1550 1524 * look up the page.
1551 1525 */
1552 1526 if (anon->an_vp != NULL || anon->an_off != 0) {
1553 1527 pp = mdb_page_lookup((uintptr_t)anon->an_vp,
1554 1528 anon->an_off);
1555 1529 if (pp == 0 && mdb_get_state() != MDB_STATE_RUNNING) {
1556 1530 mdb_warn("walk segvn_pages: segvn_data %p "
1557 1531 "offset %ld, anon page <%p, %llx> not "
1558 1532 "found.\n", svw->svw_svdp, svw->svw_walkoff,
1559 1533 anon->an_vp, anon->an_off);
1560 1534 }
1561 1535 } else {
1562 1536 if (anon->an_pvp == NULL) {
1563 1537 mdb_warn("walk segvn_pages: useless struct "
1564 1538 "anon at %p\n", wsp->walk_addr);
1565 1539 }
1566 1540 pp = NULL; /* nothing at this offset */
1567 1541 }
1568 1542 }
1569 1543
1570 1544 svw->svw_walkoff += PAGESIZE; /* Update for the next call */
1571 1545 if (pp != NULL) {
1572 1546 if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
1573 1547 mdb_warn("unable to read page_t at %#lx", pp);
1574 1548 return (WALK_ERR);
1575 1549 }
1576 1550 return (wsp->walk_callback(pp, &page, wsp->walk_cbdata));
1577 1551 }
1578 1552 if (svw->svw_all) {
1579 1553 return (wsp->walk_callback(NULL, NULL, wsp->walk_cbdata));
1580 1554 }
1581 1555 return (WALK_NEXT);
1582 1556 }
1583 1557
1584 1558 void
1585 1559 segvn_pages_walk_fini(mdb_walk_state_t *wsp)
1586 1560 {
1587 1561 segvn_walk_data_t *const svw = wsp->walk_data;
1588 1562
1589 1563 if (svw->svw_sparse != NULL) {
1590 1564 mdb_free(svw->svw_sparse, SEGVN_MAX_SPARSE *
1591 1565 sizeof (*svw->svw_sparse));
1592 1566 }
1593 1567 mdb_free(svw, sizeof (*svw));
1594 1568 }
1595 1569
1596 1570 /*
1597 1571 * Grumble, grumble.
1598 1572 */
1599 1573 #define SMAP_HASHFUNC(vp, off) \
1600 1574 ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
1601 1575 ((off) >> MAXBSHIFT)) & smd_hashmsk)
1602 1576
1603 1577 int
1604 1578 vnode2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1605 1579 {
1606 1580 long smd_hashmsk;
1607 1581 int hash;
1608 1582 uintptr_t offset = 0;
1609 1583 struct smap smp;
1610 1584 uintptr_t saddr, kaddr;
1611 1585 uintptr_t smd_hash, smd_smap;
1612 1586 struct seg seg;
1613 1587
1614 1588 if (!(flags & DCMD_ADDRSPEC))
1615 1589 return (DCMD_USAGE);
1616 1590
1617 1591 if (mdb_readvar(&smd_hashmsk, "smd_hashmsk") == -1) {
1618 1592 mdb_warn("failed to read smd_hashmsk");
1619 1593 return (DCMD_ERR);
1620 1594 }
1621 1595
1622 1596 if (mdb_readvar(&smd_hash, "smd_hash") == -1) {
1623 1597 mdb_warn("failed to read smd_hash");
1624 1598 return (DCMD_ERR);
1625 1599 }
1626 1600
1627 1601 if (mdb_readvar(&smd_smap, "smd_smap") == -1) {
1628 1602 mdb_warn("failed to read smd_hash");
1629 1603 return (DCMD_ERR);
1630 1604 }
1631 1605
1632 1606 if (mdb_readvar(&kaddr, "segkmap") == -1) {
1633 1607 mdb_warn("failed to read segkmap");
1634 1608 return (DCMD_ERR);
1635 1609 }
1636 1610
1637 1611 if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) {
1638 1612 mdb_warn("failed to read segkmap at %p", kaddr);
1639 1613 return (DCMD_ERR);
1640 1614 }
1641 1615
1642 1616 if (argc != 0) {
1643 1617 const mdb_arg_t *arg = &argv[0];
1644 1618
1645 1619 if (arg->a_type == MDB_TYPE_IMMEDIATE)
1646 1620 offset = arg->a_un.a_val;
1647 1621 else
1648 1622 offset = (uintptr_t)mdb_strtoull(arg->a_un.a_str);
1649 1623 }
1650 1624
1651 1625 hash = SMAP_HASHFUNC(addr, offset);
1652 1626
1653 1627 if (mdb_vread(&saddr, sizeof (saddr),
1654 1628 smd_hash + hash * sizeof (uintptr_t)) == -1) {
1655 1629 mdb_warn("couldn't read smap at %p",
1656 1630 smd_hash + hash * sizeof (uintptr_t));
1657 1631 return (DCMD_ERR);
1658 1632 }
1659 1633
1660 1634 do {
1661 1635 if (mdb_vread(&smp, sizeof (smp), saddr) == -1) {
1662 1636 mdb_warn("couldn't read smap at %p", saddr);
1663 1637 return (DCMD_ERR);
1664 1638 }
1665 1639
1666 1640 if ((uintptr_t)smp.sm_vp == addr && smp.sm_off == offset) {
1667 1641 mdb_printf("vnode %p, offs %p is smap %p, vaddr %p\n",
1668 1642 addr, offset, saddr, ((saddr - smd_smap) /
1669 1643 sizeof (smp)) * MAXBSIZE + seg.s_base);
1670 1644 return (DCMD_OK);
1671 1645 }
1672 1646
1673 1647 saddr = (uintptr_t)smp.sm_hash;
1674 1648 } while (saddr != NULL);
1675 1649
1676 1650 mdb_printf("no smap for vnode %p, offs %p\n", addr, offset);
1677 1651 return (DCMD_OK);
1678 1652 }
1679 1653
1680 1654 /*ARGSUSED*/
1681 1655 int
1682 1656 addr2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1683 1657 {
1684 1658 uintptr_t kaddr;
1685 1659 struct seg seg;
1686 1660 struct segmap_data sd;
1687 1661
1688 1662 if (!(flags & DCMD_ADDRSPEC))
1689 1663 return (DCMD_USAGE);
1690 1664
1691 1665 if (mdb_readvar(&kaddr, "segkmap") == -1) {
1692 1666 mdb_warn("failed to read segkmap");
1693 1667 return (DCMD_ERR);
1694 1668 }
1695 1669
1696 1670 if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) {
1697 1671 mdb_warn("failed to read segkmap at %p", kaddr);
1698 1672 return (DCMD_ERR);
1699 1673 }
1700 1674
1701 1675 if (mdb_vread(&sd, sizeof (sd), (uintptr_t)seg.s_data) == -1) {
1702 1676 mdb_warn("failed to read segmap_data at %p", seg.s_data);
1703 1677 return (DCMD_ERR);
1704 1678 }
1705 1679
1706 1680 mdb_printf("%p is smap %p\n", addr,
1707 1681 ((addr - (uintptr_t)seg.s_base) >> MAXBSHIFT) *
1708 1682 sizeof (struct smap) + (uintptr_t)sd.smd_sm);
1709 1683
1710 1684 return (DCMD_OK);
1711 1685 }
↓ open down ↓ |
1045 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX