29 /*
30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 * under license from the Regents of the University of California.
32 */
33
34 /*
35 * UNIX machine dependent virtual memory support.
36 */
37
38 #include <sys/vm.h>
39 #include <sys/exec.h>
40 #include <sys/cmn_err.h>
41 #include <sys/cpu_module.h>
42 #include <sys/cpu.h>
43 #include <sys/elf_SPARC.h>
44 #include <sys/archsystm.h>
45 #include <vm/hat_sfmmu.h>
46 #include <sys/memnode.h>
47 #include <sys/mem_cage.h>
48 #include <vm/vm_dep.h>
49
50 #if defined(__sparcv9) && defined(SF_ERRATA_57)
51 caddr_t errata57_limit;
52 #endif
53
54 uint_t page_colors = 0;
55 uint_t page_colors_mask = 0;
56 uint_t page_coloring_shift = 0;
57 int consistent_coloring;
58 int update_proc_pgcolorbase_after_fork = 0;
59
60 uint_t mmu_page_sizes = DEFAULT_MMU_PAGE_SIZES;
61 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
62 uint_t mmu_hashcnt = DEFAULT_MAX_HASHCNT;
63 uint_t max_mmu_hashcnt = MAX_HASHCNT;
64 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
65
66 /*
67 * The sun4u hardware mapping sizes which will always be supported are
68 * 8K, 64K, 512K and 4M. If sun4u based machines need to support other
120 size_t max_shm_lpsize = MMU_PAGESIZE4M;
121
122 void
123 adjust_data_maxlpsize(size_t ismpagesize)
124 {
125 if (max_uheap_lpsize == MMU_PAGESIZE4M) {
126 max_uheap_lpsize = ismpagesize;
127 }
128 if (max_ustack_lpsize == MMU_PAGESIZE4M) {
129 max_ustack_lpsize = ismpagesize;
130 }
131 if (max_privmap_lpsize == MMU_PAGESIZE4M) {
132 max_privmap_lpsize = ismpagesize;
133 }
134 if (max_shm_lpsize == MMU_PAGESIZE4M) {
135 max_shm_lpsize = ismpagesize;
136 }
137 }
138
139 /*
140 * map_addr_proc() is the routine called when the system is to
141 * choose an address for the user. We will pick an address
142 * range which is just below the current stack limit. The
143 * algorithm used for cache consistency on machines with virtual
144 * address caches is such that offset 0 in the vnode is always
145 * on a shm_alignment'ed aligned address. Unfortunately, this
146 * means that vnodes which are demand paged will not be mapped
147 * cache consistently with the executable images. When the
148 * cache alignment for a given object is inconsistent, the
149 * lower level code must manage the translations so that this
150 * is not seen here (at the cost of efficiency, of course).
151 *
152 * Every mapping will have a redzone of a single page on either side of
153 * the request. This is done to leave one page unmapped between segments.
154 * This is not required, but it's useful for the user because if their
155 * program strays across a segment boundary, it will catch a fault
156 * immediately making debugging a little easier. Currently the redzone
157 * is mandatory.
158 *
159 *
248 if (p->p_model == DATAMODEL_LP64)
249 align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
250 #ifdef VAC
251 if (vac && vacalign && (align_amount < shm_alignment))
252 align_amount = shm_alignment;
253 #endif
254
255 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
256 align_amount = (uintptr_t)*addrp;
257 }
258
259 ASSERT(ISP2(align_amount));
260 ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
261
262 /*
263 * Look for a large enough hole starting below the stack limit.
264 * After finding it, use the upper part.
265 */
266 as_purge(as);
267 off = off & (align_amount - 1);
268 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
269 PAGESIZE, off) == 0) {
270 caddr_t as_addr;
271
272 /*
273 * addr is the highest possible address to use since we have
274 * a PAGESIZE redzone at the beginning and end.
275 */
276 addr = base + slen - (PAGESIZE + len);
277 as_addr = addr;
278 /*
279 * Round address DOWN to the alignment amount and
280 * add the offset in.
281 * If addr is greater than as_addr, len would not be large
282 * enough to include the redzone, so we must adjust down
283 * by the alignment amount.
284 */
285 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
286 addr += (long)off;
287 if (addr > as_addr) {
288 addr -= align_amount;
289 }
290
291 ASSERT(addr > base);
292 ASSERT(addr + len < base + slen);
293 ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
294 ((uintptr_t)(off)));
295 *addrp = addr;
296
297 #if defined(SF_ERRATA_57)
298 if (AS_TYPE_64BIT(as) && addr < errata57_limit) {
299 *addrp = NULL;
300 }
301 #endif
302 } else {
303 *addrp = NULL; /* no more virtual space */
304 }
305 }
306
307 /*
308 * Platform-dependent page scrub call.
309 */
310 void
332 {
333 extern void set_kcontextreg();
334
335 if (kcontextreg)
336 set_kcontextreg();
337 }
338
339 void
340 contig_mem_init(void)
341 {
342 /* not applicable to sun4u */
343 }
344
345 /*ARGSUSED*/
346 caddr_t
347 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
348 {
349 /* not applicable to sun4u */
350 return (alloc_base);
351 }
352
353 size_t
354 exec_get_spslew(void)
355 {
356 return (0);
357 }
|
29 /*
30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 * under license from the Regents of the University of California.
32 */
33
34 /*
35 * UNIX machine dependent virtual memory support.
36 */
37
38 #include <sys/vm.h>
39 #include <sys/exec.h>
40 #include <sys/cmn_err.h>
41 #include <sys/cpu_module.h>
42 #include <sys/cpu.h>
43 #include <sys/elf_SPARC.h>
44 #include <sys/archsystm.h>
45 #include <vm/hat_sfmmu.h>
46 #include <sys/memnode.h>
47 #include <sys/mem_cage.h>
48 #include <vm/vm_dep.h>
49 #include <sys/random.h>
50
51 #if defined(__sparcv9) && defined(SF_ERRATA_57)
52 caddr_t errata57_limit;
53 #endif
54
55 uint_t page_colors = 0;
56 uint_t page_colors_mask = 0;
57 uint_t page_coloring_shift = 0;
58 int consistent_coloring;
59 int update_proc_pgcolorbase_after_fork = 0;
60
61 uint_t mmu_page_sizes = DEFAULT_MMU_PAGE_SIZES;
62 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
63 uint_t mmu_hashcnt = DEFAULT_MAX_HASHCNT;
64 uint_t max_mmu_hashcnt = MAX_HASHCNT;
65 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
66
67 /*
68 * The sun4u hardware mapping sizes which will always be supported are
69 * 8K, 64K, 512K and 4M. If sun4u based machines need to support other
121 size_t max_shm_lpsize = MMU_PAGESIZE4M;
122
123 void
124 adjust_data_maxlpsize(size_t ismpagesize)
125 {
126 if (max_uheap_lpsize == MMU_PAGESIZE4M) {
127 max_uheap_lpsize = ismpagesize;
128 }
129 if (max_ustack_lpsize == MMU_PAGESIZE4M) {
130 max_ustack_lpsize = ismpagesize;
131 }
132 if (max_privmap_lpsize == MMU_PAGESIZE4M) {
133 max_privmap_lpsize = ismpagesize;
134 }
135 if (max_shm_lpsize == MMU_PAGESIZE4M) {
136 max_shm_lpsize = ismpagesize;
137 }
138 }
139
140 /*
141 * The maximum amount a randomized mapping will be slewed. We should perhaps
142 * arrange things so these tunables can be separate for mmap, mmapobj, and
143 * ld.so
144 */
145 size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
146
147 /*
148 * map_addr_proc() is the routine called when the system is to
149 * choose an address for the user. We will pick an address
150 * range which is just below the current stack limit. The
151 * algorithm used for cache consistency on machines with virtual
152 * address caches is such that offset 0 in the vnode is always
153 * on a shm_alignment'ed aligned address. Unfortunately, this
154 * means that vnodes which are demand paged will not be mapped
155 * cache consistently with the executable images. When the
156 * cache alignment for a given object is inconsistent, the
157 * lower level code must manage the translations so that this
158 * is not seen here (at the cost of efficiency, of course).
159 *
160 * Every mapping will have a redzone of a single page on either side of
161 * the request. This is done to leave one page unmapped between segments.
162 * This is not required, but it's useful for the user because if their
163 * program strays across a segment boundary, it will catch a fault
164 * immediately making debugging a little easier. Currently the redzone
165 * is mandatory.
166 *
167 *
256 if (p->p_model == DATAMODEL_LP64)
257 align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
258 #ifdef VAC
259 if (vac && vacalign && (align_amount < shm_alignment))
260 align_amount = shm_alignment;
261 #endif
262
263 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
264 align_amount = (uintptr_t)*addrp;
265 }
266
267 ASSERT(ISP2(align_amount));
268 ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
269
270 /*
271 * Look for a large enough hole starting below the stack limit.
272 * After finding it, use the upper part.
273 */
274 as_purge(as);
275 off = off & (align_amount - 1);
276
277 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
278 PAGESIZE, off) == 0) {
279 caddr_t as_addr;
280
281 /*
282 * addr is the highest possible address to use since we have
283 * a PAGESIZE redzone at the beginning and end.
284 */
285 addr = base + slen - (PAGESIZE + len);
286 as_addr = addr;
287 /*
288 * Round address DOWN to the alignment amount and
289 * add the offset in.
290 * If addr is greater than as_addr, len would not be large
291 * enough to include the redzone, so we must adjust down
292 * by the alignment amount.
293 */
294 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
295 addr += (long)off;
296 if (addr > as_addr) {
297 addr -= align_amount;
298 }
299
300 /*
301 * If randomization is requested, slew the allocation
302 * backwards, within the same gap, by a random amount.
303 */
304 if (flags & _MAP_RANDOMIZE) {
305 uint32_t slew;
306 uint32_t maxslew;
307
308 (void) random_get_pseudo_bytes((uint8_t *)&slew,
309 sizeof (slew));
310
311 maxslew = MIN(aslr_max_map_skew, (addr - base));
312 /*
313 * Don't allow ASLR to cause mappings to fail below
314 * because of SF erratum #57
315 */
316 maxslew = MIN(maxslew, (addr - errata57_limit));
317
318 slew = slew % maxslew;
319 addr -= P2ALIGN(slew, align_amount);
320 }
321
322 ASSERT(addr > base);
323 ASSERT(addr + len < base + slen);
324 ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
325 ((uintptr_t)(off)));
326 *addrp = addr;
327
328 #if defined(SF_ERRATA_57)
329 if (AS_TYPE_64BIT(as) && addr < errata57_limit) {
330 *addrp = NULL;
331 }
332 #endif
333 } else {
334 *addrp = NULL; /* no more virtual space */
335 }
336 }
337
338 /*
339 * Platform-dependent page scrub call.
340 */
341 void
363 {
364 extern void set_kcontextreg();
365
366 if (kcontextreg)
367 set_kcontextreg();
368 }
369
370 void
371 contig_mem_init(void)
372 {
373 /* not applicable to sun4u */
374 }
375
376 /*ARGSUSED*/
377 caddr_t
378 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
379 {
380 /* not applicable to sun4u */
381 return (alloc_base);
382 }
|