Print this page
9208 hati_demap_func should take pagesize into account
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Tim Kordas <tim.kordas@joyent.com>

*** 25,34 **** --- 25,35 ---- * Copyright (c) 2010, Intel Corporation. * All rights reserved. */ /* * Copyright 2011 Nexenta Systems, Inc. All rights reserved. + * Copyright 2017 Joyent, Inc. All rights reserved. * Copyright (c) 2014, 2015 by Delphix. All rights reserved. */ /* * VM - Hardware Address Translation management for i386 and amd64
*** 1912,1933 **** hat_region_cookie_t rcookie) { panic("No shared region support on x86"); } #if !defined(__xpv) /* ! * Cross call service routine to demap a virtual page on ! * the current CPU or flush all mappings in TLB. */ /*ARGSUSED*/ static int hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) { hat_t *hat = (hat_t *)a1; ! caddr_t addr = (caddr_t)a2; size_t len = (size_t)a3; /* * If the target hat isn't the kernel and this CPU isn't operating * in the target hat, we can ignore the cross call. */ --- 1913,1945 ---- hat_region_cookie_t rcookie) { panic("No shared region support on x86"); } + /* + * A range of virtual pages for purposes of demapping. + */ + typedef struct range_info { + uintptr_t rng_va; /* address of page */ + ulong_t rng_cnt; /* number of pages in range */ + level_t rng_level; /* page table level */ + } range_info_t; + #if !defined(__xpv) /* ! * Cross call service routine to demap a range of virtual ! * pages on the current CPU or flush all mappings in TLB. */ /*ARGSUSED*/ static int hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) { hat_t *hat = (hat_t *)a1; ! range_info_t *range = (range_info_t *)a2; size_t len = (size_t)a3; + caddr_t addr = (caddr_t)range->rng_va; + size_t pgsz = LEVEL_SIZE(range->rng_level); /* * If the target hat isn't the kernel and this CPU isn't operating * in the target hat, we can ignore the cross call. */
*** 1936,1946 **** /* * For a normal address, we flush a range of contiguous mappings */ if ((uintptr_t)addr != DEMAP_ALL_ADDR) { ! for (size_t i = 0; i < len; i += MMU_PAGESIZE) mmu_tlbflush_entry(addr + i); return (0); } /* --- 1948,1958 ---- /* * For a normal address, we flush a range of contiguous mappings */ if ((uintptr_t)addr != DEMAP_ALL_ADDR) { ! for (size_t i = 0; i < len; i += pgsz) mmu_tlbflush_entry(addr + i); return (0); } /*
*** 2032,2046 **** /* * Internal routine to do cross calls to invalidate a range of pages on * all CPUs using a given hat. */ void ! hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len) { extern int flushes_require_xcalls; /* from mp_startup.c */ cpuset_t justme; cpuset_t cpus_to_shootdown; #ifndef __xpv cpuset_t check_cpus; cpu_t *cpup; int c; #endif --- 2044,2060 ---- /* * Internal routine to do cross calls to invalidate a range of pages on * all CPUs using a given hat. */ void ! hat_tlb_inval_range(hat_t *hat, range_info_t *range) { extern int flushes_require_xcalls; /* from mp_startup.c */ cpuset_t justme; cpuset_t cpus_to_shootdown; + uintptr_t va = range->rng_va; + size_t len = range->rng_cnt << LEVEL_SHIFT(range->rng_level); #ifndef __xpv cpuset_t check_cpus; cpu_t *cpup; int c; #endif
*** 2073,2083 **** for (size_t i = 0; i < len; i += MMU_PAGESIZE) xen_flush_va((caddr_t)(va + i)); } #else (void) hati_demap_func((xc_arg_t)hat, ! (xc_arg_t)va, (xc_arg_t)len); #endif return; } --- 2087,2097 ---- for (size_t i = 0; i < len; i += MMU_PAGESIZE) xen_flush_va((caddr_t)(va + i)); } #else (void) hati_demap_func((xc_arg_t)hat, ! (xc_arg_t)range, (xc_arg_t)len); #endif return; }
*** 2132,2142 **** for (size_t i = 0; i < len; i += MMU_PAGESIZE) xen_flush_va((caddr_t)(va + i)); } #else (void) hati_demap_func((xc_arg_t)hat, ! (xc_arg_t)va, (xc_arg_t)len); #endif } else { CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); --- 2146,2156 ---- for (size_t i = 0; i < len; i += MMU_PAGESIZE) xen_flush_va((caddr_t)(va + i)); } #else (void) hati_demap_func((xc_arg_t)hat, ! (xc_arg_t)range, (xc_arg_t)len); #endif } else { CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
*** 2148,2158 **** xen_gflush_va((caddr_t)(va + i), cpus_to_shootdown); } } #else ! xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len, CPUSET2BV(cpus_to_shootdown), hati_demap_func); #endif } kpreempt_enable(); --- 2162,2172 ---- xen_gflush_va((caddr_t)(va + i), cpus_to_shootdown); } } #else ! xc_call((xc_arg_t)hat, (xc_arg_t)range, (xc_arg_t)len, CPUSET2BV(cpus_to_shootdown), hati_demap_func); #endif } kpreempt_enable();
*** 2159,2169 **** } void hat_tlb_inval(hat_t *hat, uintptr_t va) { ! hat_tlb_inval_range(hat, va, MMU_PAGESIZE); } /* * Interior routine for HAT_UNLOADs from hat_unload_callback(), * hat_kmap_unload() OR from hat_steal() code. This routine doesn't --- 2173,2191 ---- } void hat_tlb_inval(hat_t *hat, uintptr_t va) { ! /* ! * Create range for a single page. ! */ ! range_info_t range; ! range.rng_va = va; ! range.rng_cnt = 1; /* one page */ ! range.rng_level = MIN_PAGE_LEVEL; /* pages are MMU_PAGESIZE */ ! ! hat_tlb_inval_range(hat, &range); } /* * Interior routine for HAT_UNLOADs from hat_unload_callback(), * hat_kmap_unload() OR from hat_steal() code. This routine doesn't
*** 2326,2361 **** } XPV_ALLOW_MIGRATE(); } /* - * Do the callbacks for ranges being unloaded. - */ - typedef struct range_info { - uintptr_t rng_va; - ulong_t rng_cnt; - level_t rng_level; - } range_info_t; - - /* * Invalidate the TLB, and perform the callback to the upper level VM system, * for the specified ranges of contiguous pages. */ static void handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range) { while (cnt > 0) { - size_t len; - --cnt; ! len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level); ! hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len); if (cb != NULL) { cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; cb->hcb_end_addr = cb->hcb_start_addr; ! cb->hcb_end_addr += len; cb->hcb_function(cb); } } } --- 2348,2372 ---- } XPV_ALLOW_MIGRATE(); } /* * Invalidate the TLB, and perform the callback to the upper level VM system, * for the specified ranges of contiguous pages. */ static void handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range) { while (cnt > 0) { --cnt; ! hat_tlb_inval_range(hat, &range[cnt]); if (cb != NULL) { cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; cb->hcb_end_addr = cb->hcb_start_addr; ! cb->hcb_end_addr += range[cnt].rng_cnt << ! LEVEL_SHIFT(range[cnt].rng_level); cb->hcb_function(cb); } } }