915 if (immu_devi->imd_domain != NULL) {
916 dvp->dva_domain = domain;
917 } else {
918 dvp->dva_domain = domain;
919 }
920 mutex_exit(&(DEVI(pdip)->devi_lock));
921
922 /*
923 * walk upwards until the topmost PCI bridge is found
924 */
925 return (DDI_WALK_CONTINUE);
926
927 }
928
929 static void
930 map_unity_domain(domain_t *domain)
931 {
932 struct memlist *mp;
933 uint64_t start;
934 uint64_t npages;
935 immu_dcookie_t dcookies[1] = {0};
936 int dcount = 0;
937
938 /*
939 * UNITY arenas are a mirror of the physical memory
940 * installed on the system.
941 */
942
943 #ifdef BUGGY_DRIVERS
944 /*
945 * Dont skip page0. Some broken HW/FW access it.
946 */
947 dcookies[0].dck_paddr = 0;
948 dcookies[0].dck_npages = 1;
949 dcount = 1;
950 (void) dvma_map(domain, 0, 1, dcookies, dcount, NULL,
951 IMMU_FLAGS_READ | IMMU_FLAGS_WRITE | IMMU_FLAGS_PAGE1);
952 #endif
953
954 memlist_read_lock();
955
1300 mutex_enter(&(immu_domain_lock));
1301 list_insert_tail(&immu_unity_domain_list, domain);
1302 mutex_exit(&(immu_domain_lock));
1303 }
1304
1305 /*
1306 * ddip is the domain-dip - the topmost dip in a domain
1307 * rdip is the requesting-dip - the device which is
1308 * requesting DVMA setup
1309 * if domain is a non-shared domain rdip == ddip
1310 */
1311 static domain_t *
1312 domain_create(immu_t *immu, dev_info_t *ddip, dev_info_t *rdip,
1313 immu_flags_t immu_flags)
1314 {
1315 int kmflags;
1316 domain_t *domain;
1317 char mod_hash_name[128];
1318 immu_devi_t *immu_devi;
1319 int did;
1320 immu_dcookie_t dcookies[1] = {0};
1321 int dcount = 0;
1322
1323 immu_devi = immu_devi_get(rdip);
1324
1325 /*
1326 * First allocate a domainid.
1327 * This routine will never fail, since if we run out
1328 * of domains the unity domain will be allocated.
1329 */
1330 did = did_alloc(immu, rdip, ddip, immu_flags);
1331 if (did == IMMU_UNITY_DID) {
1332 /* domain overflow */
1333 ASSERT(immu->immu_unity_domain);
1334 return (immu->immu_unity_domain);
1335 }
1336
1337 kmflags = (immu_flags & IMMU_FLAGS_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
1338 domain = kmem_zalloc(sizeof (domain_t), kmflags);
1339 if (domain == NULL) {
1340 ddi_err(DER_PANIC, rdip, "Failed to alloc DVMA domain "
1947 immu_fault_walk(void *arg, void *base, size_t len)
1948 {
1949 uint64_t dvma, start;
1950
1951 dvma = *(uint64_t *)arg;
1952 start = (uint64_t)(uintptr_t)base;
1953
1954 if (dvma >= start && dvma < (start + len)) {
1955 ddi_err(DER_WARN, NULL,
1956 "faulting DVMA address is in vmem arena "
1957 "(%" PRIx64 "-%" PRIx64 ")",
1958 start, start + len);
1959 *(uint64_t *)arg = ~0ULL;
1960 }
1961 }
1962
1963 void
1964 immu_print_fault_info(uint_t sid, uint64_t dvma)
1965 {
1966 int nlevels;
1967 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0};
1968 xlate_t *xlatep;
1969 hw_pdte_t pte;
1970 domain_t *domain;
1971 immu_t *immu;
1972 uint64_t dvma_arg;
1973
1974 if (mod_hash_find(bdf_domain_hash,
1975 (void *)(uintptr_t)sid, (void *)&domain) != 0) {
1976 ddi_err(DER_WARN, NULL,
1977 "no domain for faulting SID %08x", sid);
1978 return;
1979 }
1980
1981 immu = domain->dom_immu;
1982
1983 dvma_arg = dvma;
1984 vmem_walk(domain->dom_dvma_arena, VMEM_ALLOC, immu_fault_walk,
1985 (void *)&dvma_arg);
1986 if (dvma_arg != ~0ULL)
1987 ddi_err(DER_WARN, domain->dom_dip,
2338 * dvma_map()
2339 * map a contiguous range of DVMA pages
2340 *
2341 * immu: IOMMU unit for which we are generating DVMA cookies
2342 * domain: domain
2343 * sdvma: Starting dvma
2344 * spaddr: Starting paddr
2345 * npages: Number of pages
2346 * rdip: requesting device
2347 * immu_flags: flags
2348 */
2349 static boolean_t
2350 dvma_map(domain_t *domain, uint64_t sdvma, uint64_t snvpages,
2351 immu_dcookie_t *dcookies, int dcount, dev_info_t *rdip,
2352 immu_flags_t immu_flags)
2353 {
2354 uint64_t dvma;
2355 uint64_t n;
2356 immu_t *immu = domain->dom_immu;
2357 int nlevels = immu->immu_dvma_nlevels;
2358 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0};
2359 boolean_t pde_set = B_FALSE;
2360
2361 n = snvpages;
2362 dvma = sdvma;
2363
2364 while (n > 0) {
2365 xlate_setup(dvma, xlate, nlevels);
2366
2367 /* Lookup or allocate PGDIRs and PGTABLEs if necessary */
2368 if (PDE_set_all(immu, domain, xlate, nlevels, rdip, immu_flags)
2369 == B_TRUE) {
2370 pde_set = B_TRUE;
2371 }
2372
2373 /* set all matching ptes that fit into this leaf pgtable */
2374 PTE_set_all(immu, domain, &xlate[1], &dvma, &n, dcookies,
2375 dcount, rdip, immu_flags);
2376 }
2377
2378 return (pde_set);
2379 }
2380
2381 /*
2382 * dvma_unmap()
2383 * unmap a range of DVMAs
2384 *
2385 * immu: IOMMU unit state
2386 * domain: domain for requesting device
2387 * ddip: domain-dip
2388 * dvma: starting DVMA
2389 * npages: Number of IMMU pages to be unmapped
2390 * rdip: requesting device
2391 */
2392 static void
2393 dvma_unmap(domain_t *domain, uint64_t sdvma, uint64_t snpages,
2394 dev_info_t *rdip)
2395 {
2396 immu_t *immu = domain->dom_immu;
2397 int nlevels = immu->immu_dvma_nlevels;
2398 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0};
2399 uint64_t n;
2400 uint64_t dvma;
2401
2402 dvma = sdvma;
2403 n = snpages;
2404
2405 while (n > 0) {
2406 /* setup the xlate array */
2407 xlate_setup(dvma, xlate, nlevels);
2408
2409 /* just lookup existing pgtables. Should never fail */
2410 if (!PDE_lookup(domain, xlate, nlevels))
2411 ddi_err(DER_PANIC, rdip,
2412 "PTE not found for addr %" PRIx64,
2413 (unsigned long long)dvma);
2414
2415 /* clear all matching ptes that fit into this leaf pgtable */
2416 PTE_clear_all(immu, domain, &xlate[1], &dvma, &n, rdip);
2417 }
2418
2434
2435 /* handle the rollover cases */
2436 if (maxaddr < dma_attr->dma_attr_addr_hi) {
2437 maxaddr = dma_attr->dma_attr_addr_hi;
2438 }
2439
2440 /*
2441 * allocate from vmem arena.
2442 */
2443 dvma = (uint64_t)(uintptr_t)vmem_xalloc(domain->dom_dvma_arena,
2444 xsize, align, 0, 0, (void *)(uintptr_t)minaddr,
2445 (void *)(uintptr_t)maxaddr, kmf);
2446
2447 return (dvma);
2448 }
2449
2450 static void
2451 dvma_prealloc(dev_info_t *rdip, immu_hdl_priv_t *ihp, ddi_dma_attr_t *dma_attr)
2452 {
2453 int nlevels;
2454 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0}, *xlp;
2455 uint64_t dvma, n;
2456 size_t xsize, align;
2457 uint64_t minaddr, maxaddr, dmamax;
2458 int on, npte, pindex;
2459 hw_pdte_t *shwp;
2460 immu_t *immu;
2461 domain_t *domain;
2462
2463 /* parameters */
2464 domain = IMMU_DEVI(rdip)->imd_domain;
2465 immu = domain->dom_immu;
2466 nlevels = immu->immu_dvma_nlevels;
2467 xsize = IMMU_NPREPTES * IMMU_PAGESIZE;
2468 align = MAX((size_t)(dma_attr->dma_attr_align), IMMU_PAGESIZE);
2469 minaddr = dma_attr->dma_attr_addr_lo;
2470 if (dma_attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG)
2471 dmamax = dma_attr->dma_attr_seg;
2472 else
2473 dmamax = dma_attr->dma_attr_addr_hi;
2474 maxaddr = dmamax + 1;
2816 }
2817
2818 /*
2819 * DVMA will start once IOMMU is "running"
2820 */
2821 immu->immu_dvma_running = B_TRUE;
2822 }
2823
2824 /*
2825 * immu_dvma_physmem_update()
2826 * called when the installed memory on a
2827 * system increases, to expand domain DVMA
2828 * for domains with UNITY mapping
2829 */
2830 void
2831 immu_dvma_physmem_update(uint64_t addr, uint64_t size)
2832 {
2833 uint64_t start;
2834 uint64_t npages;
2835 int dcount;
2836 immu_dcookie_t dcookies[1] = {0};
2837 domain_t *domain;
2838
2839 /*
2840 * Just walk the system-wide list of domains with
2841 * UNITY mapping. Both the list of *all* domains
2842 * and *UNITY* domains is protected by the same
2843 * single lock
2844 */
2845 mutex_enter(&immu_domain_lock);
2846 domain = list_head(&immu_unity_domain_list);
2847 for (; domain; domain = list_next(&immu_unity_domain_list, domain)) {
2848 /*
2849 * Nothing to do if the IOMMU supports passthrough.
2850 */
2851 if (IMMU_ECAP_GET_PT(domain->dom_immu->immu_regs_excap))
2852 continue;
2853
2854 /* There is no vmem_arena for unity domains. Just map it */
2855 ddi_err(DER_LOG, domain->dom_dip,
2856 "iommu: unity-domain: Adding map "
2939 }
2940
2941 if (odip != rdip)
2942 set_domain(odip, ddip, domain);
2943
2944 /*
2945 * Update the root and context entries
2946 */
2947 if (immu_context_update(immu, domain, ddip, rdip, immu_flags)
2948 != DDI_SUCCESS) {
2949 ddi_err(DER_MODE, rdip, "DVMA map: context update failed");
2950 return (DDI_DMA_NORESOURCES);
2951 }
2952
2953 return (DDI_SUCCESS);
2954 }
2955
2956 int
2957 immu_map_memrange(dev_info_t *rdip, memrng_t *mrng)
2958 {
2959 immu_dcookie_t dcookies[1] = {0};
2960 boolean_t pde_set;
2961 immu_t *immu;
2962 domain_t *domain;
2963 immu_inv_wait_t iw;
2964
2965 dcookies[0].dck_paddr = mrng->mrng_start;
2966 dcookies[0].dck_npages = mrng->mrng_npages;
2967
2968 domain = IMMU_DEVI(rdip)->imd_domain;
2969 immu = domain->dom_immu;
2970
2971 pde_set = dvma_map(domain, mrng->mrng_start,
2972 mrng->mrng_npages, dcookies, 1, rdip,
2973 IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
2974
2975 immu_init_inv_wait(&iw, "memrange", B_TRUE);
2976
2977 immu_flush_iotlb_psi(immu, domain->dom_did, mrng->mrng_start,
2978 mrng->mrng_npages, pde_set == B_TRUE ?
2979 TLB_IVA_WHOLE : TLB_IVA_LEAF, &iw);
|
915 if (immu_devi->imd_domain != NULL) {
916 dvp->dva_domain = domain;
917 } else {
918 dvp->dva_domain = domain;
919 }
920 mutex_exit(&(DEVI(pdip)->devi_lock));
921
922 /*
923 * walk upwards until the topmost PCI bridge is found
924 */
925 return (DDI_WALK_CONTINUE);
926
927 }
928
929 static void
930 map_unity_domain(domain_t *domain)
931 {
932 struct memlist *mp;
933 uint64_t start;
934 uint64_t npages;
935 immu_dcookie_t dcookies[1] = {{(uintptr_t)NULL}};
936 int dcount = 0;
937
938 /*
939 * UNITY arenas are a mirror of the physical memory
940 * installed on the system.
941 */
942
943 #ifdef BUGGY_DRIVERS
944 /*
945 * Dont skip page0. Some broken HW/FW access it.
946 */
947 dcookies[0].dck_paddr = 0;
948 dcookies[0].dck_npages = 1;
949 dcount = 1;
950 (void) dvma_map(domain, 0, 1, dcookies, dcount, NULL,
951 IMMU_FLAGS_READ | IMMU_FLAGS_WRITE | IMMU_FLAGS_PAGE1);
952 #endif
953
954 memlist_read_lock();
955
1300 mutex_enter(&(immu_domain_lock));
1301 list_insert_tail(&immu_unity_domain_list, domain);
1302 mutex_exit(&(immu_domain_lock));
1303 }
1304
1305 /*
1306 * ddip is the domain-dip - the topmost dip in a domain
1307 * rdip is the requesting-dip - the device which is
1308 * requesting DVMA setup
1309 * if domain is a non-shared domain rdip == ddip
1310 */
1311 static domain_t *
1312 domain_create(immu_t *immu, dev_info_t *ddip, dev_info_t *rdip,
1313 immu_flags_t immu_flags)
1314 {
1315 int kmflags;
1316 domain_t *domain;
1317 char mod_hash_name[128];
1318 immu_devi_t *immu_devi;
1319 int did;
1320 immu_dcookie_t dcookies[1] = {{(uintptr_t)NULL}};
1321 int dcount = 0;
1322
1323 immu_devi = immu_devi_get(rdip);
1324
1325 /*
1326 * First allocate a domainid.
1327 * This routine will never fail, since if we run out
1328 * of domains the unity domain will be allocated.
1329 */
1330 did = did_alloc(immu, rdip, ddip, immu_flags);
1331 if (did == IMMU_UNITY_DID) {
1332 /* domain overflow */
1333 ASSERT(immu->immu_unity_domain);
1334 return (immu->immu_unity_domain);
1335 }
1336
1337 kmflags = (immu_flags & IMMU_FLAGS_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
1338 domain = kmem_zalloc(sizeof (domain_t), kmflags);
1339 if (domain == NULL) {
1340 ddi_err(DER_PANIC, rdip, "Failed to alloc DVMA domain "
1947 immu_fault_walk(void *arg, void *base, size_t len)
1948 {
1949 uint64_t dvma, start;
1950
1951 dvma = *(uint64_t *)arg;
1952 start = (uint64_t)(uintptr_t)base;
1953
1954 if (dvma >= start && dvma < (start + len)) {
1955 ddi_err(DER_WARN, NULL,
1956 "faulting DVMA address is in vmem arena "
1957 "(%" PRIx64 "-%" PRIx64 ")",
1958 start, start + len);
1959 *(uint64_t *)arg = ~0ULL;
1960 }
1961 }
1962
1963 void
1964 immu_print_fault_info(uint_t sid, uint64_t dvma)
1965 {
1966 int nlevels;
1967 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {{0}};
1968 xlate_t *xlatep;
1969 hw_pdte_t pte;
1970 domain_t *domain;
1971 immu_t *immu;
1972 uint64_t dvma_arg;
1973
1974 if (mod_hash_find(bdf_domain_hash,
1975 (void *)(uintptr_t)sid, (void *)&domain) != 0) {
1976 ddi_err(DER_WARN, NULL,
1977 "no domain for faulting SID %08x", sid);
1978 return;
1979 }
1980
1981 immu = domain->dom_immu;
1982
1983 dvma_arg = dvma;
1984 vmem_walk(domain->dom_dvma_arena, VMEM_ALLOC, immu_fault_walk,
1985 (void *)&dvma_arg);
1986 if (dvma_arg != ~0ULL)
1987 ddi_err(DER_WARN, domain->dom_dip,
2338 * dvma_map()
2339 * map a contiguous range of DVMA pages
2340 *
2341 * immu: IOMMU unit for which we are generating DVMA cookies
2342 * domain: domain
2343 * sdvma: Starting dvma
2344 * spaddr: Starting paddr
2345 * npages: Number of pages
2346 * rdip: requesting device
2347 * immu_flags: flags
2348 */
2349 static boolean_t
2350 dvma_map(domain_t *domain, uint64_t sdvma, uint64_t snvpages,
2351 immu_dcookie_t *dcookies, int dcount, dev_info_t *rdip,
2352 immu_flags_t immu_flags)
2353 {
2354 uint64_t dvma;
2355 uint64_t n;
2356 immu_t *immu = domain->dom_immu;
2357 int nlevels = immu->immu_dvma_nlevels;
2358 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {{0}};
2359 boolean_t pde_set = B_FALSE;
2360
2361 n = snvpages;
2362 dvma = sdvma;
2363
2364 while (n > 0) {
2365 xlate_setup(dvma, xlate, nlevels);
2366
2367 /* Lookup or allocate PGDIRs and PGTABLEs if necessary */
2368 if (PDE_set_all(immu, domain, xlate, nlevels, rdip, immu_flags)
2369 == B_TRUE) {
2370 pde_set = B_TRUE;
2371 }
2372
2373 /* set all matching ptes that fit into this leaf pgtable */
2374 PTE_set_all(immu, domain, &xlate[1], &dvma, &n, dcookies,
2375 dcount, rdip, immu_flags);
2376 }
2377
2378 return (pde_set);
2379 }
2380
2381 /*
2382 * dvma_unmap()
2383 * unmap a range of DVMAs
2384 *
2385 * immu: IOMMU unit state
2386 * domain: domain for requesting device
2387 * ddip: domain-dip
2388 * dvma: starting DVMA
2389 * npages: Number of IMMU pages to be unmapped
2390 * rdip: requesting device
2391 */
2392 static void
2393 dvma_unmap(domain_t *domain, uint64_t sdvma, uint64_t snpages,
2394 dev_info_t *rdip)
2395 {
2396 immu_t *immu = domain->dom_immu;
2397 int nlevels = immu->immu_dvma_nlevels;
2398 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {{0}};
2399 uint64_t n;
2400 uint64_t dvma;
2401
2402 dvma = sdvma;
2403 n = snpages;
2404
2405 while (n > 0) {
2406 /* setup the xlate array */
2407 xlate_setup(dvma, xlate, nlevels);
2408
2409 /* just lookup existing pgtables. Should never fail */
2410 if (!PDE_lookup(domain, xlate, nlevels))
2411 ddi_err(DER_PANIC, rdip,
2412 "PTE not found for addr %" PRIx64,
2413 (unsigned long long)dvma);
2414
2415 /* clear all matching ptes that fit into this leaf pgtable */
2416 PTE_clear_all(immu, domain, &xlate[1], &dvma, &n, rdip);
2417 }
2418
2434
2435 /* handle the rollover cases */
2436 if (maxaddr < dma_attr->dma_attr_addr_hi) {
2437 maxaddr = dma_attr->dma_attr_addr_hi;
2438 }
2439
2440 /*
2441 * allocate from vmem arena.
2442 */
2443 dvma = (uint64_t)(uintptr_t)vmem_xalloc(domain->dom_dvma_arena,
2444 xsize, align, 0, 0, (void *)(uintptr_t)minaddr,
2445 (void *)(uintptr_t)maxaddr, kmf);
2446
2447 return (dvma);
2448 }
2449
2450 static void
2451 dvma_prealloc(dev_info_t *rdip, immu_hdl_priv_t *ihp, ddi_dma_attr_t *dma_attr)
2452 {
2453 int nlevels;
2454 xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {{0}}, *xlp;
2455 uint64_t dvma, n;
2456 size_t xsize, align;
2457 uint64_t minaddr, maxaddr, dmamax;
2458 int on, npte, pindex;
2459 hw_pdte_t *shwp;
2460 immu_t *immu;
2461 domain_t *domain;
2462
2463 /* parameters */
2464 domain = IMMU_DEVI(rdip)->imd_domain;
2465 immu = domain->dom_immu;
2466 nlevels = immu->immu_dvma_nlevels;
2467 xsize = IMMU_NPREPTES * IMMU_PAGESIZE;
2468 align = MAX((size_t)(dma_attr->dma_attr_align), IMMU_PAGESIZE);
2469 minaddr = dma_attr->dma_attr_addr_lo;
2470 if (dma_attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG)
2471 dmamax = dma_attr->dma_attr_seg;
2472 else
2473 dmamax = dma_attr->dma_attr_addr_hi;
2474 maxaddr = dmamax + 1;
2816 }
2817
2818 /*
2819 * DVMA will start once IOMMU is "running"
2820 */
2821 immu->immu_dvma_running = B_TRUE;
2822 }
2823
2824 /*
2825 * immu_dvma_physmem_update()
2826 * called when the installed memory on a
2827 * system increases, to expand domain DVMA
2828 * for domains with UNITY mapping
2829 */
2830 void
2831 immu_dvma_physmem_update(uint64_t addr, uint64_t size)
2832 {
2833 uint64_t start;
2834 uint64_t npages;
2835 int dcount;
2836 immu_dcookie_t dcookies[1] = {{(uintptr_t)NULL}};
2837 domain_t *domain;
2838
2839 /*
2840 * Just walk the system-wide list of domains with
2841 * UNITY mapping. Both the list of *all* domains
2842 * and *UNITY* domains is protected by the same
2843 * single lock
2844 */
2845 mutex_enter(&immu_domain_lock);
2846 domain = list_head(&immu_unity_domain_list);
2847 for (; domain; domain = list_next(&immu_unity_domain_list, domain)) {
2848 /*
2849 * Nothing to do if the IOMMU supports passthrough.
2850 */
2851 if (IMMU_ECAP_GET_PT(domain->dom_immu->immu_regs_excap))
2852 continue;
2853
2854 /* There is no vmem_arena for unity domains. Just map it */
2855 ddi_err(DER_LOG, domain->dom_dip,
2856 "iommu: unity-domain: Adding map "
2939 }
2940
2941 if (odip != rdip)
2942 set_domain(odip, ddip, domain);
2943
2944 /*
2945 * Update the root and context entries
2946 */
2947 if (immu_context_update(immu, domain, ddip, rdip, immu_flags)
2948 != DDI_SUCCESS) {
2949 ddi_err(DER_MODE, rdip, "DVMA map: context update failed");
2950 return (DDI_DMA_NORESOURCES);
2951 }
2952
2953 return (DDI_SUCCESS);
2954 }
2955
2956 int
2957 immu_map_memrange(dev_info_t *rdip, memrng_t *mrng)
2958 {
2959 immu_dcookie_t dcookies[1] = {{(uintptr_t)NULL}};
2960 boolean_t pde_set;
2961 immu_t *immu;
2962 domain_t *domain;
2963 immu_inv_wait_t iw;
2964
2965 dcookies[0].dck_paddr = mrng->mrng_start;
2966 dcookies[0].dck_npages = mrng->mrng_npages;
2967
2968 domain = IMMU_DEVI(rdip)->imd_domain;
2969 immu = domain->dom_immu;
2970
2971 pde_set = dvma_map(domain, mrng->mrng_start,
2972 mrng->mrng_npages, dcookies, 1, rdip,
2973 IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
2974
2975 immu_init_inv_wait(&iw, "memrange", B_TRUE);
2976
2977 immu_flush_iotlb_psi(immu, domain->dom_did, mrng->mrng_start,
2978 mrng->mrng_npages, pde_set == B_TRUE ?
2979 TLB_IVA_WHOLE : TLB_IVA_LEAF, &iw);
|