Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/mdb/i86pc/modules/unix/i86mmu.c
+++ new/usr/src/cmd/mdb/i86pc/modules/unix/i86mmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 + *
25 + * Copyright 2018 Joyent, Inc.
24 26 */
25 27
26 28 /*
27 29 * This part of the file contains the mdb support for dcmds:
28 30 * ::memseg_list
29 31 * and walkers for:
30 32 * memseg - a memseg list walker for ::memseg_list
31 33 *
32 34 */
33 35
34 36 #include <sys/types.h>
35 37 #include <sys/machparam.h>
36 38 #include <sys/controlregs.h>
37 39 #include <sys/mach_mmu.h>
38 40 #ifdef __xpv
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
39 41 #include <sys/hypervisor.h>
40 42 #endif
41 43 #include <vm/as.h>
42 44
43 45 #include <mdb/mdb_modapi.h>
44 46 #include <mdb/mdb_target.h>
45 47
46 48 #include <vm/page.h>
47 49 #include <vm/hat_i86.h>
48 50
51 +#define VA_SIGN_BIT (1UL << 47)
52 +#define VA_SIGN_EXTEND(va) (((va) ^ VA_SIGN_BIT) - VA_SIGN_BIT)
53 +
49 54 struct pfn2pp {
50 55 pfn_t pfn;
51 56 page_t *pp;
52 57 };
53 58
54 59 static int do_va2pa(uintptr_t, struct as *, int, physaddr_t *, pfn_t *);
55 60 static void init_mmu(void);
56 61
57 62 int
58 63 platform_vtop(uintptr_t addr, struct as *asp, physaddr_t *pap)
59 64 {
60 65 if (asp == NULL)
61 66 return (DCMD_ERR);
62 67
63 68 init_mmu();
64 69
65 70 if (mmu.num_level == 0)
66 71 return (DCMD_ERR);
67 72
68 73 return (do_va2pa(addr, asp, 0, pap, NULL));
69 74 }
70 75
71 76 /*
72 77 * ::memseg_list dcmd and walker to implement it.
73 78 */
74 79 /*ARGSUSED*/
75 80 int
76 81 memseg_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
77 82 {
78 83 struct memseg ms;
79 84
80 85 if (!(flags & DCMD_ADDRSPEC)) {
81 86 if (mdb_pwalk_dcmd("memseg", "memseg_list",
82 87 0, NULL, 0) == -1) {
83 88 mdb_warn("can't walk memseg");
84 89 return (DCMD_ERR);
85 90 }
86 91 return (DCMD_OK);
87 92 }
88 93
89 94 if (DCMD_HDRSPEC(flags))
90 95 mdb_printf("%<u>%?s %?s %?s %?s %?s%</u>\n", "ADDR",
91 96 "PAGES", "EPAGES", "BASE", "END");
92 97
93 98 if (mdb_vread(&ms, sizeof (struct memseg), addr) == -1) {
94 99 mdb_warn("can't read memseg at %#lx", addr);
95 100 return (DCMD_ERR);
96 101 }
97 102
98 103 mdb_printf("%0?lx %0?lx %0?lx %0?lx %0?lx\n", addr,
99 104 ms.pages, ms.epages, ms.pages_base, ms.pages_end);
100 105
101 106 return (DCMD_OK);
102 107 }
103 108
104 109 /*
105 110 * walk the memseg structures
106 111 */
107 112 int
108 113 memseg_walk_init(mdb_walk_state_t *wsp)
109 114 {
110 115 if (wsp->walk_addr != NULL) {
111 116 mdb_warn("memseg only supports global walks\n");
112 117 return (WALK_ERR);
113 118 }
114 119
115 120 if (mdb_readvar(&wsp->walk_addr, "memsegs") == -1) {
116 121 mdb_warn("symbol 'memsegs' not found");
117 122 return (WALK_ERR);
118 123 }
119 124
120 125 wsp->walk_data = mdb_alloc(sizeof (struct memseg), UM_SLEEP);
121 126 return (WALK_NEXT);
122 127
123 128 }
124 129
125 130 int
126 131 memseg_walk_step(mdb_walk_state_t *wsp)
127 132 {
128 133 int status;
129 134
130 135 if (wsp->walk_addr == 0) {
131 136 return (WALK_DONE);
132 137 }
133 138
134 139 if (mdb_vread(wsp->walk_data, sizeof (struct memseg),
135 140 wsp->walk_addr) == -1) {
136 141 mdb_warn("failed to read struct memseg at %p", wsp->walk_addr);
137 142 return (WALK_DONE);
138 143 }
139 144
140 145 status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
141 146 wsp->walk_cbdata);
142 147
143 148 wsp->walk_addr = (uintptr_t)(((struct memseg *)wsp->walk_data)->next);
144 149
145 150 return (status);
146 151 }
147 152
148 153 void
149 154 memseg_walk_fini(mdb_walk_state_t *wsp)
150 155 {
151 156 mdb_free(wsp->walk_data, sizeof (struct memseg));
152 157 }
153 158
154 159 /*
155 160 * Now HAT related dcmds.
156 161 */
157 162
158 163 static struct hat *khat; /* value of kas.a_hat */
159 164 struct hat_mmu_info mmu;
160 165 uintptr_t kernelbase;
161 166
162 167 /*
163 168 * stuff for i86xpv images
164 169 */
165 170 static int is_xpv;
166 171 static uintptr_t mfn_list_addr; /* kernel MFN list address */
167 172 uintptr_t xen_virt_start; /* address of mfn_to_pfn[] table */
168 173 ulong_t mfn_count; /* number of pfn's in the MFN list */
169 174 pfn_t *mfn_list; /* local MFN list copy */
170 175
171 176 /*
172 177 * read mmu parameters from kernel
173 178 */
174 179 static void
175 180 init_mmu(void)
176 181 {
177 182 struct as kas;
178 183
179 184 if (mmu.num_level != 0)
180 185 return;
181 186
182 187 if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1)
183 188 mdb_warn("Can't use HAT information before mmu_init()\n");
184 189 if (mdb_readsym(&kas, sizeof (kas), "kas") == -1)
185 190 mdb_warn("Couldn't find kas - kernel's struct as\n");
186 191 if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1)
187 192 mdb_warn("Couldn't find kernelbase\n");
188 193 khat = kas.a_hat;
189 194
190 195 /*
191 196 * Is this a paravirtualized domain image?
192 197 */
193 198 if (mdb_readsym(&mfn_list_addr, sizeof (mfn_list_addr),
194 199 "mfn_list") == -1 ||
195 200 mdb_readsym(&xen_virt_start, sizeof (xen_virt_start),
196 201 "xen_virt_start") == -1 ||
197 202 mdb_readsym(&mfn_count, sizeof (mfn_count), "mfn_count") == -1) {
198 203 mfn_list_addr = NULL;
199 204 }
200 205
201 206 is_xpv = mfn_list_addr != NULL;
202 207
203 208 #ifndef _KMDB
204 209 /*
205 210 * recreate the local mfn_list
206 211 */
207 212 if (is_xpv) {
208 213 size_t sz = mfn_count * sizeof (pfn_t);
209 214 mfn_list = mdb_zalloc(sz, UM_SLEEP);
210 215
211 216 if (mdb_vread(mfn_list, sz, (uintptr_t)mfn_list_addr) == -1) {
212 217 mdb_warn("Failed to read MFN list\n");
213 218 mdb_free(mfn_list, sz);
214 219 mfn_list = NULL;
215 220 }
216 221 }
217 222 #endif
218 223 }
219 224
220 225 void
221 226 free_mmu(void)
222 227 {
223 228 #ifdef __xpv
224 229 if (mfn_list != NULL)
225 230 mdb_free(mfn_list, mfn_count * sizeof (mfn_t));
226 231 #endif
227 232 }
228 233
229 234 #ifdef __xpv
230 235
231 236 #ifdef _KMDB
232 237
233 238 /*
234 239 * Convert between MFNs and PFNs. Since we're in kmdb we can go directly
235 240 * through the machine to phys mapping and the MFN list.
236 241 */
237 242
238 243 pfn_t
239 244 mdb_mfn_to_pfn(mfn_t mfn)
240 245 {
241 246 pfn_t pfn;
242 247 mfn_t tmp;
243 248 pfn_t *pfn_list;
244 249
245 250 if (mfn_list_addr == NULL)
246 251 return (-(pfn_t)1);
247 252
248 253 pfn_list = (pfn_t *)xen_virt_start;
249 254 if (mdb_vread(&pfn, sizeof (pfn), (uintptr_t)(pfn_list + mfn)) == -1)
250 255 return (-(pfn_t)1);
251 256
252 257 if (mdb_vread(&tmp, sizeof (tmp),
253 258 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
254 259 return (-(pfn_t)1);
255 260
256 261 if (pfn >= mfn_count || tmp != mfn)
257 262 return (-(pfn_t)1);
258 263
259 264 return (pfn);
260 265 }
261 266
262 267 mfn_t
263 268 mdb_pfn_to_mfn(pfn_t pfn)
264 269 {
265 270 mfn_t mfn;
266 271
267 272 init_mmu();
268 273
269 274 if (mfn_list_addr == NULL || pfn >= mfn_count)
270 275 return (-(mfn_t)1);
271 276
272 277 if (mdb_vread(&mfn, sizeof (mfn),
273 278 (uintptr_t)(mfn_list_addr + (pfn * sizeof (mfn_t)))) == -1)
274 279 return (-(mfn_t)1);
275 280
276 281 return (mfn);
277 282 }
278 283
279 284 #else /* _KMDB */
280 285
281 286 /*
282 287 * Convert between MFNs and PFNs. Since a crash dump doesn't include the
283 288 * MFN->PFN translation table (it's part of the hypervisor, not our image)
284 289 * we do the MFN->PFN translation by searching the PFN->MFN (mfn_list)
285 290 * table, if it's there.
286 291 */
287 292
288 293 pfn_t
289 294 mdb_mfn_to_pfn(mfn_t mfn)
290 295 {
291 296 pfn_t pfn;
292 297
293 298 init_mmu();
294 299
295 300 if (mfn_list == NULL)
296 301 return (-(pfn_t)1);
297 302
298 303 for (pfn = 0; pfn < mfn_count; ++pfn) {
299 304 if (mfn_list[pfn] != mfn)
300 305 continue;
301 306 return (pfn);
302 307 }
303 308
304 309 return (-(pfn_t)1);
305 310 }
306 311
307 312 mfn_t
308 313 mdb_pfn_to_mfn(pfn_t pfn)
309 314 {
310 315 init_mmu();
311 316
312 317 if (mfn_list == NULL || pfn >= mfn_count)
313 318 return (-(mfn_t)1);
314 319
315 320 return (mfn_list[pfn]);
316 321 }
317 322
318 323 #endif /* _KMDB */
319 324
320 325 static paddr_t
321 326 mdb_ma_to_pa(uint64_t ma)
322 327 {
323 328 pfn_t pfn = mdb_mfn_to_pfn(mmu_btop(ma));
324 329 if (pfn == -(pfn_t)1)
325 330 return (-(paddr_t)1);
326 331
327 332 return (mmu_ptob((paddr_t)pfn) | (ma & (MMU_PAGESIZE - 1)));
328 333 }
329 334
330 335 #else /* __xpv */
331 336
332 337 #define mdb_ma_to_pa(ma) (ma)
333 338 #define mdb_mfn_to_pfn(mfn) (mfn)
334 339 #define mdb_pfn_to_mfn(pfn) (pfn)
335 340
336 341 #endif /* __xpv */
337 342
338 343 /*
339 344 * ::mfntopfn dcmd translates hypervisor machine page number
340 345 * to physical page number
341 346 */
342 347 /*ARGSUSED*/
343 348 int
344 349 mfntopfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
345 350 {
346 351 pfn_t pfn;
347 352
348 353 if ((flags & DCMD_ADDRSPEC) == 0) {
349 354 mdb_warn("MFN missing\n");
350 355 return (DCMD_USAGE);
351 356 }
352 357
353 358 if ((pfn = mdb_mfn_to_pfn((pfn_t)addr)) == -(pfn_t)1) {
354 359 mdb_warn("Invalid mfn %lr\n", (pfn_t)addr);
355 360 return (DCMD_ERR);
356 361 }
357 362
358 363 mdb_printf("%lr\n", pfn);
359 364
360 365 return (DCMD_OK);
361 366 }
362 367
363 368 /*
364 369 * ::pfntomfn dcmd translates physical page number to
365 370 * hypervisor machine page number
366 371 */
367 372 /*ARGSUSED*/
368 373 int
369 374 pfntomfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
370 375 {
371 376 pfn_t mfn;
372 377
373 378 if ((flags & DCMD_ADDRSPEC) == 0) {
374 379 mdb_warn("PFN missing\n");
375 380 return (DCMD_USAGE);
376 381 }
377 382
378 383 if ((mfn = mdb_pfn_to_mfn((pfn_t)addr)) == -(pfn_t)1) {
379 384 mdb_warn("Invalid pfn %lr\n", (pfn_t)addr);
380 385 return (DCMD_ABORT);
381 386 }
382 387
383 388 mdb_printf("%lr\n", mfn);
384 389
385 390 if (flags & DCMD_LOOP)
386 391 mdb_set_dot(addr + 1);
387 392 return (DCMD_OK);
388 393 }
389 394
390 395 static pfn_t
↓ open down ↓ |
332 lines elided |
↑ open up ↑ |
391 396 pte2mfn(x86pte_t pte, uint_t level)
392 397 {
393 398 pfn_t mfn;
394 399 if (level > 0 && (pte & PT_PAGESIZE))
395 400 mfn = mmu_btop(pte & PT_PADDR_LGPG);
396 401 else
397 402 mfn = mmu_btop(pte & PT_PADDR);
398 403 return (mfn);
399 404 }
400 405
401 -/*
402 - * Print a PTE in more human friendly way. The PTE is assumed to be in
403 - * a level 0 page table, unless -l specifies another level.
404 - *
405 - * The PTE value can be specified as the -p option, since on a 32 bit kernel
406 - * with PAE running it's larger than a uintptr_t.
407 - */
408 406 static int
409 407 do_pte_dcmd(int level, uint64_t pte)
410 408 {
411 409 static char *attr[] = {
412 410 "wrback", "wrthru", "uncached", "uncached",
413 411 "wrback", "wrthru", "wrcombine", "uncached"};
414 412 int pat_index = 0;
415 413 pfn_t mfn;
416 414
417 - mdb_printf("pte=%llr: ", pte);
418 - if (PTE_GET(pte, mmu.pt_nx))
419 - mdb_printf("noexec ");
415 + mdb_printf("pte=0x%llr: ", pte);
420 416
421 417 mfn = pte2mfn(pte, level);
422 418 mdb_printf("%s=0x%lr ", is_xpv ? "mfn" : "pfn", mfn);
423 419
420 + if (PTE_GET(pte, mmu.pt_nx))
421 + mdb_printf("noexec ");
422 +
424 423 if (PTE_GET(pte, PT_NOCONSIST))
425 424 mdb_printf("noconsist ");
426 425
427 426 if (PTE_GET(pte, PT_NOSYNC))
428 427 mdb_printf("nosync ");
429 428
430 429 if (PTE_GET(pte, mmu.pt_global))
431 430 mdb_printf("global ");
432 431
433 432 if (level > 0 && PTE_GET(pte, PT_PAGESIZE))
434 433 mdb_printf("largepage ");
435 434
436 435 if (level > 0 && PTE_GET(pte, PT_MOD))
437 436 mdb_printf("mod ");
438 437
439 438 if (level > 0 && PTE_GET(pte, PT_REF))
440 439 mdb_printf("ref ");
441 440
442 441 if (PTE_GET(pte, PT_USER))
443 442 mdb_printf("user ");
444 443
445 444 if (PTE_GET(pte, PT_WRITABLE))
446 445 mdb_printf("write ");
447 446
448 447 /*
449 448 * Report non-standard cacheability
450 449 */
451 450 pat_index = 0;
452 451 if (level > 0) {
453 452 if (PTE_GET(pte, PT_PAGESIZE) && PTE_GET(pte, PT_PAT_LARGE))
454 453 pat_index += 4;
455 454 } else {
456 455 if (PTE_GET(pte, PT_PAT_4K))
457 456 pat_index += 4;
458 457 }
459 458
460 459 if (PTE_GET(pte, PT_NOCACHE))
461 460 pat_index += 2;
462 461
463 462 if (PTE_GET(pte, PT_WRITETHRU))
464 463 pat_index += 1;
465 464
466 465 if (pat_index != 0)
467 466 mdb_printf("%s", attr[pat_index]);
468 467
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
469 468 if (PTE_GET(pte, PT_VALID) == 0)
470 469 mdb_printf(" !VALID ");
471 470
472 471 mdb_printf("\n");
473 472 return (DCMD_OK);
474 473 }
475 474
476 475 /*
477 476 * Print a PTE in more human friendly way. The PTE is assumed to be in
478 477 * a level 0 page table, unless -l specifies another level.
479 - *
480 - * The PTE value can be specified as the -p option, since on a 32 bit kernel
481 - * with PAE running it's larger than a uintptr_t.
482 478 */
483 479 /*ARGSUSED*/
484 480 int
485 481 pte_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
486 482 {
487 - int level = 0;
488 - uint64_t pte = 0;
489 - char *level_str = NULL;
490 - char *pte_str = NULL;
483 + uint64_t level = 0;
491 484
492 485 init_mmu();
493 486
494 487 if (mmu.num_level == 0)
495 488 return (DCMD_ERR);
496 489
490 + if ((flags & DCMD_ADDRSPEC) == 0)
491 + return (DCMD_USAGE);
492 +
497 493 if (mdb_getopts(argc, argv,
498 - 'p', MDB_OPT_STR, &pte_str,
499 - 'l', MDB_OPT_STR, &level_str) != argc)
494 + 'l', MDB_OPT_UINT64, &level) != argc)
500 495 return (DCMD_USAGE);
501 496
502 - /*
503 - * parse the PTE to decode, if it's 0, we don't do anything
504 - */
505 - if (pte_str != NULL) {
506 - pte = mdb_strtoull(pte_str);
507 - } else {
508 - if ((flags & DCMD_ADDRSPEC) == 0)
509 - return (DCMD_USAGE);
510 - pte = addr;
497 + if (level > mmu.max_level) {
498 + mdb_warn("invalid level %lu\n", level);
499 + return (DCMD_ERR);
511 500 }
512 - if (pte == 0)
501 +
502 + if (addr == 0)
513 503 return (DCMD_OK);
514 504
515 - /*
516 - * parse the level if supplied
517 - */
518 - if (level_str != NULL) {
519 - level = mdb_strtoull(level_str);
520 - if (level < 0 || level > mmu.max_level)
521 - return (DCMD_ERR);
522 - }
523 -
524 - return (do_pte_dcmd(level, pte));
505 + return (do_pte_dcmd((int)level, addr));
525 506 }
526 507
527 508 static size_t
528 509 va2entry(htable_t *htable, uintptr_t addr)
529 510 {
530 511 size_t entry = (addr - htable->ht_vaddr);
531 512
532 513 entry >>= mmu.level_shift[htable->ht_level];
533 514 return (entry & HTABLE_NUM_PTES(htable) - 1);
534 515 }
535 516
536 517 static x86pte_t
537 518 get_pte(hat_t *hat, htable_t *htable, uintptr_t addr)
538 519 {
539 520 x86pte_t buf;
540 - x86pte32_t *pte32 = (x86pte32_t *)&buf;
541 - size_t len;
542 521
543 - if (htable->ht_flags & HTABLE_VLP) {
544 - uintptr_t ptr = (uintptr_t)hat->hat_vlp_ptes;
522 + if (htable->ht_flags & HTABLE_COPIED) {
523 + uintptr_t ptr = (uintptr_t)hat->hat_copied_ptes;
545 524 ptr += va2entry(htable, addr) << mmu.pte_size_shift;
546 - len = mdb_vread(&buf, mmu.pte_size, ptr);
547 - } else {
548 - paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn);
549 - paddr += va2entry(htable, addr) << mmu.pte_size_shift;
550 - len = mdb_pread(&buf, mmu.pte_size, paddr);
525 + return (*(x86pte_t *)ptr);
551 526 }
552 527
553 - if (len != mmu.pte_size)
554 - return (0);
528 + paddr_t paddr = mmu_ptob((paddr_t)htable->ht_pfn);
529 + paddr += va2entry(htable, addr) << mmu.pte_size_shift;
555 530
556 - if (mmu.pte_size == sizeof (x86pte_t))
531 + if ((mdb_pread(&buf, mmu.pte_size, paddr)) == mmu.pte_size)
557 532 return (buf);
558 - return (*pte32);
533 +
534 + return (0);
559 535 }
560 536
561 537 static int
562 538 do_va2pa(uintptr_t addr, struct as *asp, int print_level, physaddr_t *pap,
563 539 pfn_t *mfnp)
564 540 {
565 541 struct as as;
566 542 struct hat *hatp;
567 543 struct hat hat;
568 544 htable_t *ht;
569 545 htable_t htable;
570 546 uintptr_t base;
571 547 int h;
572 548 int level;
573 549 int found = 0;
574 550 x86pte_t pte;
575 551 physaddr_t paddr;
576 552
577 553 if (asp != NULL) {
578 554 if (mdb_vread(&as, sizeof (as), (uintptr_t)asp) == -1) {
579 555 mdb_warn("Couldn't read struct as\n");
580 556 return (DCMD_ERR);
581 557 }
582 558 hatp = as.a_hat;
583 559 } else {
584 560 hatp = khat;
585 561 }
586 562
587 563 /*
588 564 * read the hat and its hash table
589 565 */
590 566 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
591 567 mdb_warn("Couldn't read struct hat\n");
592 568 return (DCMD_ERR);
593 569 }
594 570
595 571 /*
596 572 * read the htable hashtable
597 573 */
598 574 for (level = 0; level <= mmu.max_level; ++level) {
599 575 if (level == TOP_LEVEL(&hat))
600 576 base = 0;
601 577 else
602 578 base = addr & mmu.level_mask[level + 1];
603 579
604 580 for (h = 0; h < hat.hat_num_hash; ++h) {
605 581 if (mdb_vread(&ht, sizeof (htable_t *),
606 582 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
607 583 mdb_warn("Couldn't read htable\n");
608 584 return (DCMD_ERR);
609 585 }
610 586 for (; ht != NULL; ht = htable.ht_next) {
611 587 if (mdb_vread(&htable, sizeof (htable_t),
612 588 (uintptr_t)ht) == -1) {
613 589 mdb_warn("Couldn't read htable\n");
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
614 590 return (DCMD_ERR);
615 591 }
616 592
617 593 if (htable.ht_vaddr != base ||
618 594 htable.ht_level != level)
619 595 continue;
620 596
621 597 pte = get_pte(&hat, &htable, addr);
622 598
623 599 if (print_level) {
624 - mdb_printf("\tlevel=%d htable=%p "
625 - "pte=%llr\n", level, ht, pte);
600 + mdb_printf("\tlevel=%d htable=0x%p "
601 + "pte=0x%llr\n", level, ht, pte);
626 602 }
627 603
628 604 if (!PTE_ISVALID(pte)) {
629 605 mdb_printf("Address %p is unmapped.\n",
630 606 addr);
631 607 return (DCMD_ERR);
632 608 }
633 609
634 610 if (found)
635 611 continue;
636 612
637 613 if (PTE_IS_LGPG(pte, level))
638 614 paddr = mdb_ma_to_pa(pte &
639 615 PT_PADDR_LGPG);
640 616 else
641 617 paddr = mdb_ma_to_pa(pte & PT_PADDR);
642 618 paddr += addr & mmu.level_offset[level];
643 619 if (pap != NULL)
644 620 *pap = paddr;
645 621 if (mfnp != NULL)
646 622 *mfnp = pte2mfn(pte, level);
647 623 found = 1;
648 624 }
649 625 }
650 626 }
651 627
652 628 done:
653 629 if (!found)
654 630 return (DCMD_ERR);
655 631 return (DCMD_OK);
656 632 }
657 633
658 634 int
659 635 va2pfn_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
660 636 {
661 637 uintptr_t addrspace;
662 638 char *addrspace_str = NULL;
663 639 int piped = flags & DCMD_PIPE_OUT;
664 640 pfn_t pfn;
665 641 pfn_t mfn;
666 642 int rc;
667 643
668 644 init_mmu();
669 645
670 646 if (mmu.num_level == 0)
671 647 return (DCMD_ERR);
672 648
673 649 if (mdb_getopts(argc, argv,
674 650 'a', MDB_OPT_STR, &addrspace_str) != argc)
675 651 return (DCMD_USAGE);
676 652
677 653 if ((flags & DCMD_ADDRSPEC) == 0)
678 654 return (DCMD_USAGE);
679 655
680 656 /*
681 657 * parse the address space
682 658 */
683 659 if (addrspace_str != NULL)
684 660 addrspace = mdb_strtoull(addrspace_str);
685 661 else
686 662 addrspace = 0;
687 663
688 664 rc = do_va2pa(addr, (struct as *)addrspace, !piped, NULL, &mfn);
689 665
690 666 if (rc != DCMD_OK)
691 667 return (rc);
692 668
693 669 if ((pfn = mdb_mfn_to_pfn(mfn)) == -(pfn_t)1) {
694 670 mdb_warn("Invalid mfn %lr\n", mfn);
695 671 return (DCMD_ERR);
696 672 }
697 673
698 674 if (piped) {
699 675 mdb_printf("0x%lr\n", pfn);
700 676 return (DCMD_OK);
701 677 }
702 678
703 679 mdb_printf("Virtual address 0x%p maps pfn 0x%lr", addr, pfn);
704 680
705 681 if (is_xpv)
706 682 mdb_printf(" (mfn 0x%lr)", mfn);
707 683
708 684 mdb_printf("\n");
709 685
710 686 return (DCMD_OK);
711 687 }
712 688
713 689 /*
714 690 * Report all hat's that either use PFN as a page table or that map the page.
715 691 */
716 692 static int
717 693 do_report_maps(pfn_t pfn)
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
718 694 {
719 695 struct hat *hatp;
720 696 struct hat hat;
721 697 htable_t *ht;
722 698 htable_t htable;
723 699 uintptr_t base;
724 700 int h;
725 701 int level;
726 702 int entry;
727 703 x86pte_t pte;
728 - x86pte_t buf;
729 - x86pte32_t *pte32 = (x86pte32_t *)&buf;
730 704 physaddr_t paddr;
731 705 size_t len;
732 706
733 707 /*
734 708 * The hats are kept in a list with khat at the head.
735 709 */
736 710 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
737 711 /*
738 712 * read the hat and its hash table
739 713 */
740 714 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
741 715 mdb_warn("Couldn't read struct hat\n");
742 716 return (DCMD_ERR);
743 717 }
744 718
745 719 /*
746 720 * read the htable hashtable
747 721 */
748 722 paddr = 0;
749 723 for (h = 0; h < hat.hat_num_hash; ++h) {
750 724 if (mdb_vread(&ht, sizeof (htable_t *),
751 725 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
752 726 mdb_warn("Couldn't read htable\n");
753 727 return (DCMD_ERR);
754 728 }
755 729 for (; ht != NULL; ht = htable.ht_next) {
756 730 if (mdb_vread(&htable, sizeof (htable_t),
757 731 (uintptr_t)ht) == -1) {
758 732 mdb_warn("Couldn't read htable\n");
759 733 return (DCMD_ERR);
760 734 }
761 735
762 736 /*
763 737 * only report kernel addresses once
764 738 */
765 739 if (hatp != khat &&
766 740 htable.ht_vaddr >= kernelbase)
767 741 continue;
768 742
769 743 /*
770 744 * Is the PFN a pagetable itself?
771 745 */
772 746 if (htable.ht_pfn == pfn) {
773 747 mdb_printf("Pagetable for "
774 748 "hat=%p htable=%p\n", hatp, ht);
775 749 continue;
776 750 }
777 751
778 752 /*
779 753 * otherwise, examine page mappings
780 754 */
781 755 level = htable.ht_level;
782 756 if (level > mmu.max_page_level)
783 757 continue;
784 758 paddr = mmu_ptob((physaddr_t)htable.ht_pfn);
785 759 for (entry = 0;
786 760 entry < HTABLE_NUM_PTES(&htable);
787 761 ++entry) {
788 762
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
789 763 base = htable.ht_vaddr + entry *
790 764 mmu.level_size[level];
791 765
792 766 /*
793 767 * only report kernel addresses once
794 768 */
795 769 if (hatp != khat &&
796 770 base >= kernelbase)
797 771 continue;
798 772
799 - len = mdb_pread(&buf, mmu.pte_size,
773 + len = mdb_pread(&pte, mmu.pte_size,
800 774 paddr + entry * mmu.pte_size);
801 775 if (len != mmu.pte_size)
802 776 return (DCMD_ERR);
803 - if (mmu.pte_size == sizeof (x86pte_t))
804 - pte = buf;
805 - else
806 - pte = *pte32;
807 777
808 778 if ((pte & PT_VALID) == 0)
809 779 continue;
810 780 if (level == 0 || !(pte & PT_PAGESIZE))
811 781 pte &= PT_PADDR;
812 782 else
813 783 pte &= PT_PADDR_LGPG;
814 784 if (mmu_btop(mdb_ma_to_pa(pte)) != pfn)
815 785 continue;
816 786 mdb_printf("hat=%p maps addr=%p\n",
817 787 hatp, (caddr_t)base);
818 788 }
819 789 }
820 790 }
821 791 }
822 792
823 793 done:
824 794 return (DCMD_OK);
825 795 }
826 796
827 797 /*
828 798 * given a PFN as its address argument, prints out the uses of it
829 799 */
830 800 /*ARGSUSED*/
831 801 int
832 802 report_maps_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
833 803 {
834 804 pfn_t pfn;
835 805 uint_t mflag = 0;
836 806
837 807 init_mmu();
838 808
839 809 if (mmu.num_level == 0)
840 810 return (DCMD_ERR);
841 811
842 812 if ((flags & DCMD_ADDRSPEC) == 0)
843 813 return (DCMD_USAGE);
844 814
845 815 if (mdb_getopts(argc, argv,
846 816 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
847 817 return (DCMD_USAGE);
848 818
849 819 pfn = (pfn_t)addr;
850 820 if (mflag)
851 821 pfn = mdb_mfn_to_pfn(pfn);
852 822
853 823 return (do_report_maps(pfn));
854 824 }
855 825
856 826 static int
857 -do_ptable_dcmd(pfn_t pfn)
827 +do_ptable_dcmd(pfn_t pfn, uint64_t level)
858 828 {
859 829 struct hat *hatp;
860 830 struct hat hat;
861 831 htable_t *ht;
862 832 htable_t htable;
863 833 uintptr_t base;
864 834 int h;
865 - int level;
866 835 int entry;
867 836 uintptr_t pagesize;
868 837 x86pte_t pte;
869 838 x86pte_t buf;
870 - x86pte32_t *pte32 = (x86pte32_t *)&buf;
871 839 physaddr_t paddr;
872 840 size_t len;
873 841
874 842 /*
875 843 * The hats are kept in a list with khat at the head.
876 844 */
877 845 for (hatp = khat; hatp != NULL; hatp = hat.hat_next) {
878 846 /*
879 847 * read the hat and its hash table
880 848 */
881 849 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
882 850 mdb_warn("Couldn't read struct hat\n");
883 851 return (DCMD_ERR);
884 852 }
885 853
886 854 /*
887 855 * read the htable hashtable
888 856 */
889 857 paddr = 0;
890 858 for (h = 0; h < hat.hat_num_hash; ++h) {
891 859 if (mdb_vread(&ht, sizeof (htable_t *),
892 860 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
893 861 mdb_warn("Couldn't read htable\n");
894 862 return (DCMD_ERR);
895 863 }
896 864 for (; ht != NULL; ht = htable.ht_next) {
897 865 if (mdb_vread(&htable, sizeof (htable_t),
898 866 (uintptr_t)ht) == -1) {
899 867 mdb_warn("Couldn't read htable\n");
900 868 return (DCMD_ERR);
901 869 }
902 870
903 871 /*
904 872 * Is this the PFN for this htable
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
905 873 */
906 874 if (htable.ht_pfn == pfn)
907 875 goto found_it;
908 876 }
909 877 }
910 878 }
911 879
912 880 found_it:
913 881 if (htable.ht_pfn == pfn) {
914 882 mdb_printf("htable=%p\n", ht);
915 - level = htable.ht_level;
883 + if (level == (uint64_t)-1) {
884 + level = htable.ht_level;
885 + } else if (htable.ht_level != level) {
886 + mdb_warn("htable has level %d but forcing level %lu\n",
887 + htable.ht_level, level);
888 + }
916 889 base = htable.ht_vaddr;
917 890 pagesize = mmu.level_size[level];
918 891 } else {
919 - mdb_printf("Unknown pagetable - assuming level/addr 0");
920 - level = 0; /* assume level == 0 for PFN */
892 + if (level == (uint64_t)-1)
893 + level = 0;
894 + mdb_warn("couldn't find matching htable, using level=%lu, "
895 + "base address=0x0\n", level);
921 896 base = 0;
922 - pagesize = MMU_PAGESIZE;
897 + pagesize = mmu.level_size[level];
923 898 }
924 899
925 900 paddr = mmu_ptob((physaddr_t)pfn);
926 901 for (entry = 0; entry < mmu.ptes_per_table; ++entry) {
927 902 len = mdb_pread(&buf, mmu.pte_size,
928 903 paddr + entry * mmu.pte_size);
929 904 if (len != mmu.pte_size)
930 905 return (DCMD_ERR);
931 - if (mmu.pte_size == sizeof (x86pte_t))
932 906 pte = buf;
933 - else
934 - pte = *pte32;
935 907
936 908 if (pte == 0)
937 909 continue;
938 910
939 - mdb_printf("[%3d] va=%p ", entry, base + entry * pagesize);
911 + mdb_printf("[%3d] va=0x%p ", entry,
912 + VA_SIGN_EXTEND(base + entry * pagesize));
940 913 do_pte_dcmd(level, pte);
941 914 }
942 915
943 916 done:
944 917 return (DCMD_OK);
945 918 }
946 919
947 920 /*
948 921 * Dump the page table at the given PFN
949 922 */
950 923 /*ARGSUSED*/
951 924 int
952 925 ptable_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
953 926 {
954 927 pfn_t pfn;
955 928 uint_t mflag = 0;
929 + uint64_t level = (uint64_t)-1;
956 930
957 931 init_mmu();
958 932
959 933 if (mmu.num_level == 0)
960 934 return (DCMD_ERR);
961 935
962 936 if ((flags & DCMD_ADDRSPEC) == 0)
963 937 return (DCMD_USAGE);
964 938
965 939 if (mdb_getopts(argc, argv,
966 - 'm', MDB_OPT_SETBITS, TRUE, &mflag, NULL) != argc)
940 + 'm', MDB_OPT_SETBITS, TRUE, &mflag,
941 + 'l', MDB_OPT_UINT64, &level, NULL) != argc)
967 942 return (DCMD_USAGE);
968 943
944 + if (level != (uint64_t)-1 && level > mmu.max_level) {
945 + mdb_warn("invalid level %lu\n", level);
946 + return (DCMD_ERR);
947 + }
948 +
969 949 pfn = (pfn_t)addr;
970 950 if (mflag)
971 951 pfn = mdb_mfn_to_pfn(pfn);
972 952
973 - return (do_ptable_dcmd(pfn));
953 + return (do_ptable_dcmd(pfn, level));
974 954 }
975 955
976 956 static int
977 957 do_htables_dcmd(hat_t *hatp)
978 958 {
979 959 struct hat hat;
980 960 htable_t *ht;
981 961 htable_t htable;
982 962 int h;
983 963
984 964 /*
985 965 * read the hat and its hash table
986 966 */
987 967 if (mdb_vread(&hat, sizeof (hat), (uintptr_t)hatp) == -1) {
988 968 mdb_warn("Couldn't read struct hat\n");
989 969 return (DCMD_ERR);
990 970 }
991 971
992 972 /*
993 973 * read the htable hashtable
994 974 */
995 975 for (h = 0; h < hat.hat_num_hash; ++h) {
996 976 if (mdb_vread(&ht, sizeof (htable_t *),
997 977 (uintptr_t)(hat.hat_ht_hash + h)) == -1) {
998 978 mdb_warn("Couldn't read htable ptr\\n");
999 979 return (DCMD_ERR);
1000 980 }
1001 981 for (; ht != NULL; ht = htable.ht_next) {
1002 982 mdb_printf("%p\n", ht);
1003 983 if (mdb_vread(&htable, sizeof (htable_t),
1004 984 (uintptr_t)ht) == -1) {
1005 985 mdb_warn("Couldn't read htable\n");
1006 986 return (DCMD_ERR);
1007 987 }
1008 988 }
1009 989 }
1010 990 return (DCMD_OK);
1011 991 }
1012 992
1013 993 /*
1014 994 * Dump the htables for the given hat
1015 995 */
1016 996 /*ARGSUSED*/
1017 997 int
1018 998 htables_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1019 999 {
1020 1000 hat_t *hat;
1021 1001
1022 1002 init_mmu();
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
1023 1003
1024 1004 if (mmu.num_level == 0)
1025 1005 return (DCMD_ERR);
1026 1006
1027 1007 if ((flags & DCMD_ADDRSPEC) == 0)
1028 1008 return (DCMD_USAGE);
1029 1009
1030 1010 hat = (hat_t *)addr;
1031 1011
1032 1012 return (do_htables_dcmd(hat));
1013 +}
1014 +
1015 +static uintptr_t
1016 +entry2va(size_t *entries)
1017 +{
1018 + uintptr_t va = 0;
1019 +
1020 + for (level_t l = mmu.max_level; l >= 0; l--)
1021 + va += entries[l] << mmu.level_shift[l];
1022 +
1023 + return (VA_SIGN_EXTEND(va));
1024 +}
1025 +
1026 +static void
1027 +ptmap_report(size_t *entries, uintptr_t start,
1028 + boolean_t user, boolean_t writable, boolean_t wflag)
1029 +{
1030 + uint64_t curva = entry2va(entries);
1031 +
1032 + mdb_printf("mapped %s,%s range of %lu bytes: %a-%a\n",
1033 + user ? "user" : "kernel", writable ? "writable" : "read-only",
1034 + curva - start, start, curva - 1);
1035 + if (wflag && start >= kernelbase)
1036 + (void) mdb_call_dcmd("whatis", start, DCMD_ADDRSPEC, 0, NULL);
1037 +}
1038 +
1039 +int
1040 +ptmap_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
1041 +{
1042 + physaddr_t paddrs[MAX_NUM_LEVEL] = { 0, };
1043 + size_t entry[MAX_NUM_LEVEL] = { 0, };
1044 + uintptr_t start = (uintptr_t)-1;
1045 + boolean_t writable = B_FALSE;
1046 + boolean_t user = B_FALSE;
1047 + boolean_t wflag = B_FALSE;
1048 + level_t curlevel;
1049 +
1050 + if ((flags & DCMD_ADDRSPEC) == 0)
1051 + return (DCMD_USAGE);
1052 +
1053 + if (mdb_getopts(argc, argv,
1054 + 'w', MDB_OPT_SETBITS, TRUE, &wflag, NULL) != argc)
1055 + return (DCMD_USAGE);
1056 +
1057 + init_mmu();
1058 +
1059 + if (mmu.num_level == 0)
1060 + return (DCMD_ERR);
1061 +
1062 + curlevel = mmu.max_level;
1063 +
1064 + paddrs[curlevel] = addr & MMU_PAGEMASK;
1065 +
1066 + for (;;) {
1067 + physaddr_t pte_addr;
1068 + x86pte_t pte;
1069 +
1070 + pte_addr = paddrs[curlevel] +
1071 + (entry[curlevel] << mmu.pte_size_shift);
1072 +
1073 + if (mdb_pread(&pte, sizeof (pte), pte_addr) != sizeof (pte)) {
1074 + mdb_warn("couldn't read pte at %p", pte_addr);
1075 + return (DCMD_ERR);
1076 + }
1077 +
1078 + if (PTE_GET(pte, PT_VALID) == 0) {
1079 + if (start != (uintptr_t)-1) {
1080 + ptmap_report(entry, start,
1081 + user, writable, wflag);
1082 + start = (uintptr_t)-1;
1083 + }
1084 + } else if (curlevel == 0 || PTE_GET(pte, PT_PAGESIZE)) {
1085 + if (start == (uintptr_t)-1) {
1086 + start = entry2va(entry);
1087 + user = PTE_GET(pte, PT_USER);
1088 + writable = PTE_GET(pte, PT_WRITABLE);
1089 + } else if (user != PTE_GET(pte, PT_USER) ||
1090 + writable != PTE_GET(pte, PT_WRITABLE)) {
1091 + ptmap_report(entry, start,
1092 + user, writable, wflag);
1093 + start = entry2va(entry);
1094 + user = PTE_GET(pte, PT_USER);
1095 + writable = PTE_GET(pte, PT_WRITABLE);
1096 + }
1097 + } else {
1098 + /* Descend a level. */
1099 + physaddr_t pa = mmu_ptob(pte2mfn(pte, curlevel));
1100 + paddrs[--curlevel] = pa;
1101 + entry[curlevel] = 0;
1102 + continue;
1103 + }
1104 +
1105 + while (++entry[curlevel] == mmu.ptes_per_table) {
1106 + /* Ascend back up. */
1107 + entry[curlevel] = 0;
1108 + if (curlevel == mmu.max_level) {
1109 + if (start != (uintptr_t)-1) {
1110 + ptmap_report(entry, start,
1111 + user, writable, wflag);
1112 + }
1113 + goto out;
1114 + }
1115 +
1116 + curlevel++;
1117 + }
1118 + }
1119 +
1120 +out:
1121 + return (DCMD_OK);
1033 1122 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX