1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 /*
30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 * under license from the Regents of the University of California.
32 */
33
34 /*
35 * UNIX machine dependent virtual memory support.
36 */
37
38 #include <sys/vm.h>
39 #include <sys/exec.h>
40 #include <sys/cmn_err.h>
41 #include <sys/cpu_module.h>
42 #include <sys/cpu.h>
43 #include <sys/elf_SPARC.h>
44 #include <sys/archsystm.h>
45 #include <vm/hat_sfmmu.h>
46 #include <sys/memnode.h>
47 #include <sys/mem_cage.h>
48 #include <vm/vm_dep.h>
49 #include <sys/error.h>
50 #include <sys/machsystm.h>
51 #include <vm/seg_kmem.h>
52 #include <sys/stack.h>
53 #include <sys/atomic.h>
54 #include <sys/promif.h>
55 #include <sys/random.h>
56
57 uint_t page_colors = 0;
58 uint_t page_colors_mask = 0;
59 uint_t page_coloring_shift = 0;
60 int consistent_coloring;
61 int update_proc_pgcolorbase_after_fork = 1;
62
63 uint_t mmu_page_sizes = MMU_PAGE_SIZES;
64 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
65 uint_t mmu_hashcnt = MAX_HASHCNT;
66 uint_t max_mmu_hashcnt = MAX_HASHCNT;
67 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
68
69 /*
70 * A bitmask of the page sizes supported by hardware based upon szc.
71 * The base pagesize (p_szc == 0) must always be supported by the hardware.
72 */
73 int mmu_exported_pagesize_mask;
74 uint_t mmu_exported_page_sizes;
75
76 uint_t szc_2_userszc[MMU_PAGE_SIZES];
77 uint_t userszc_2_szc[MMU_PAGE_SIZES];
78
79 extern uint_t vac_colors_mask;
80 extern int vac_shift;
81
82 hw_pagesize_t hw_page_array[] = {
83 {MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
84 {MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
85 MMU_PAGESIZE64K >> MMU_PAGESHIFT},
86 {MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
87 MMU_PAGESIZE512K >> MMU_PAGESHIFT},
88 {MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
89 {MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
90 MMU_PAGESIZE32M >> MMU_PAGESHIFT},
91 {MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
92 MMU_PAGESIZE256M >> MMU_PAGESHIFT},
93 {0, 0, 0, 0}
94 };
95
96 /*
97 * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end
98 */
99 int max_bootlp_tteszc = TTE256M;
100
101 /*
102 * Maximum and default segment size tunables for user heap, stack, private
103 * and shared anonymous memory, and user text and initialized data.
104 */
105 size_t max_uheap_lpsize = MMU_PAGESIZE64K;
106 size_t default_uheap_lpsize = MMU_PAGESIZE64K;
107 size_t max_ustack_lpsize = MMU_PAGESIZE64K;
108 size_t default_ustack_lpsize = MMU_PAGESIZE64K;
109 size_t max_privmap_lpsize = MMU_PAGESIZE64K;
110 size_t max_uidata_lpsize = MMU_PAGESIZE64K;
111 size_t max_utext_lpsize = MMU_PAGESIZE4M;
112 size_t max_shm_lpsize = MMU_PAGESIZE4M;
113
114 /*
115 * Contiguous memory allocator data structures and variables.
116 *
117 * The sun4v kernel must provide a means to allocate physically
118 * contiguous, non-relocatable memory. The contig_mem_arena
119 * and contig_mem_slab_arena exist for this purpose. Allocations
120 * that require physically contiguous non-relocatable memory should
121 * be made using contig_mem_alloc() or contig_mem_alloc_align()
122 * which return memory from contig_mem_arena or contig_mem_reloc_arena.
123 * These arenas import memory from the contig_mem_slab_arena one
124 * contiguous chunk at a time.
125 *
126 * When importing slabs, an attempt is made to allocate a large page
127 * to use as backing. As a result of the non-relocatable requirement,
128 * slabs are allocated from the kernel cage freelists. If the cage does
129 * not contain any free contiguous chunks large enough to satisfy the
130 * slab allocation, the slab size will be downsized and the operation
131 * retried. Large slab sizes are tried first to minimize cage
132 * fragmentation. If the slab allocation is unsuccessful still, the slab
133 * is allocated from outside the kernel cage. This is undesirable because,
134 * until slabs are freed, it results in non-relocatable chunks scattered
135 * throughout physical memory.
136 *
137 * Allocations from the contig_mem_arena are backed by slabs from the
138 * cage. Allocations from the contig_mem_reloc_arena are backed by
139 * slabs allocated outside the cage. Slabs are left share locked while
140 * in use to prevent non-cage slabs from being relocated.
141 *
142 * Since there is no guarantee that large pages will be available in
143 * the kernel cage, contiguous memory is reserved and added to the
144 * contig_mem_arena at boot time, making it available for later
145 * contiguous memory allocations. This reserve will be used to satisfy
146 * contig_mem allocations first and it is only when the reserve is
147 * completely allocated that new slabs will need to be imported.
148 */
149 static vmem_t *contig_mem_slab_arena;
150 static vmem_t *contig_mem_arena;
151 static vmem_t *contig_mem_reloc_arena;
152 static kmutex_t contig_mem_lock;
153 #define CONTIG_MEM_ARENA_QUANTUM 64
154 #define CONTIG_MEM_SLAB_ARENA_QUANTUM MMU_PAGESIZE64K
155
156 /* contig_mem_arena import slab sizes, in decreasing size order */
157 static size_t contig_mem_import_sizes[] = {
158 MMU_PAGESIZE4M,
159 MMU_PAGESIZE512K,
160 MMU_PAGESIZE64K
161 };
162 #define NUM_IMPORT_SIZES \
163 (sizeof (contig_mem_import_sizes) / sizeof (size_t))
164 static size_t contig_mem_import_size_max = MMU_PAGESIZE4M;
165 size_t contig_mem_slab_size = MMU_PAGESIZE4M;
166
167 /* Boot-time allocated buffer to pre-populate the contig_mem_arena */
168 static size_t contig_mem_prealloc_size;
169 static void *contig_mem_prealloc_buf;
170
171 /*
172 * The maximum amount a randomized mapping will be slewed. We should perhaps
173 * arrange things so these tunables can be separate for mmap, mmapobj, and
174 * ld.so
175 */
176 volatile size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
177
178 /*
179 * map_addr_proc() is the routine called when the system is to
180 * choose an address for the user. We will pick an address
181 * range which is just below the current stack limit. The
182 * algorithm used for cache consistency on machines with virtual
183 * address caches is such that offset 0 in the vnode is always
184 * on a shm_alignment'ed aligned address. Unfortunately, this
185 * means that vnodes which are demand paged will not be mapped
186 * cache consistently with the executable images. When the
187 * cache alignment for a given object is inconsistent, the
188 * lower level code must manage the translations so that this
189 * is not seen here (at the cost of efficiency, of course).
190 *
191 * Every mapping will have a redzone of a single page on either side of
192 * the request. This is done to leave one page unmapped between segments.
193 * This is not required, but it's useful for the user because if their
194 * program strays across a segment boundary, it will catch a fault
195 * immediately making debugging a little easier. Currently the redzone
196 * is mandatory.
197 *
198 * addrp is a value/result parameter.
199 * On input it is a hint from the user to be used in a completely
200 * machine dependent fashion. For MAP_ALIGN, addrp contains the
201 * minimal alignment, which must be some "power of two" multiple of
202 * pagesize.
203 *
204 * On output it is NULL if no address can be found in the current
205 * processes address space or else an address that is currently
206 * not mapped for len bytes with a page of red zone on either side.
207 * If vacalign is true, then the selected address will obey the alignment
208 * constraints of a vac machine based on the given off value.
209 */
210 /*ARGSUSED3*/
211 void
212 map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
213 caddr_t userlimit, struct proc *p, uint_t flags)
214 {
215 struct as *as = p->p_as;
216 caddr_t addr;
217 caddr_t base;
218 size_t slen;
219 uintptr_t align_amount;
220 int allow_largepage_alignment = 1;
221
222 base = p->p_brkbase;
223 if (userlimit < as->a_userlimit) {
224 /*
225 * This happens when a program wants to map something in
226 * a range that's accessible to a program in a smaller
227 * address space. For example, a 64-bit program might
228 * be calling mmap32(2) to guarantee that the returned
229 * address is below 4Gbytes.
230 */
231 ASSERT(userlimit > base);
232 slen = userlimit - base;
233 } else {
234 slen = p->p_usrstack - base -
235 ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
236 }
237 /* Make len be a multiple of PAGESIZE */
238 len = (len + PAGEOFFSET) & PAGEMASK;
239
240 /*
241 * If the request is larger than the size of a particular
242 * mmu level, then we use that level to map the request.
243 * But this requires that both the virtual and the physical
244 * addresses be aligned with respect to that level, so we
245 * do the virtual bit of nastiness here.
246 *
247 * For 32-bit processes, only those which have specified
248 * MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
249 * we can potentially waste up to 256MB of the 4G process address
250 * space just for alignment.
251 *
252 * XXXQ Should iterate trough hw_page_array here to catch
253 * all supported pagesizes
254 */
255 if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
256 ((uintptr_t)*addrp) != 0)) {
257 allow_largepage_alignment = 0;
258 }
259 if ((mmu_page_sizes == max_mmu_page_sizes) &&
260 allow_largepage_alignment &&
261 (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */
262 align_amount = MMU_PAGESIZE256M;
263 } else if ((mmu_page_sizes == max_mmu_page_sizes) &&
264 allow_largepage_alignment &&
265 (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */
266 align_amount = MMU_PAGESIZE32M;
267 } else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */
268 align_amount = MMU_PAGESIZE4M;
269 } else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
270 align_amount = MMU_PAGESIZE512K;
271 } else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
272 align_amount = MMU_PAGESIZE64K;
273 } else {
274 /*
275 * Align virtual addresses on a 64K boundary to ensure
276 * that ELF shared libraries are mapped with the appropriate
277 * alignment constraints by the run-time linker.
278 */
279 align_amount = ELF_SPARC_MAXPGSZ;
280 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
281 ((uintptr_t)*addrp < align_amount))
282 align_amount = (uintptr_t)*addrp;
283 }
284
285 /*
286 * 64-bit processes require 1024K alignment of ELF shared libraries.
287 */
288 if (p->p_model == DATAMODEL_LP64)
289 align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
290 #ifdef VAC
291 if (vac && vacalign && (align_amount < shm_alignment))
292 align_amount = shm_alignment;
293 #endif
294
295 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
296 align_amount = (uintptr_t)*addrp;
297 }
298
299 ASSERT(ISP2(align_amount));
300 ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
301
302 /*
303 * Look for a large enough hole starting below the stack limit.
304 * After finding it, use the upper part.
305 */
306 as_purge(as);
307 off = off & (align_amount - 1);
308 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
309 PAGESIZE, off) == 0) {
310 caddr_t as_addr;
311
312 /*
313 * addr is the highest possible address to use since we have
314 * a PAGESIZE redzone at the beginning and end.
315 */
316 addr = base + slen - (PAGESIZE + len);
317 as_addr = addr;
318 /*
319 * Round address DOWN to the alignment amount and
320 * add the offset in.
321 * If addr is greater than as_addr, len would not be large
322 * enough to include the redzone, so we must adjust down
323 * by the alignment amount.
324 */
325 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
326 addr += (long)off;
327 if (addr > as_addr) {
328 addr -= align_amount;
329 }
330
331 /*
332 * If randomization is requested, slew the allocation
333 * backwards, within the same gap, by a random amount.
334 *
335 * XXX: This will fall over in processes like Java, which
336 * commonly have a great many small mappings.
337 */
338 if (flags & _MAP_RANDOMIZE) {
339 uint32_t slew;
340
341 (void) random_get_pseudo_bytes((uint8_t *)&slew,
342 sizeof (slew));
343
344 slew = slew % MIN(aslr_max_map_skew, (addr - base));
345 addr -= P2ALIGN(slew, align_amount);
346 }
347
348 ASSERT(addr > base);
349 ASSERT(addr + len < base + slen);
350 ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
351 ((uintptr_t)(off)));
352 *addrp = addr;
353
354 } else {
355 *addrp = NULL; /* no more virtual space */
356 }
357 }
358
359 /*
360 * Platform-dependent page scrub call.
361 * We call hypervisor to scrub the page.
362 */
363 void
364 pagescrub(page_t *pp, uint_t off, uint_t len)
365 {
366 uint64_t pa, length;
367
368 pa = (uint64_t)(pp->p_pagenum << MMU_PAGESHIFT + off);
369 length = (uint64_t)len;
370
371 (void) mem_scrub(pa, length);
372 }
373
374 void
375 sync_data_memory(caddr_t va, size_t len)
376 {
377 /* Call memory sync function */
378 (void) mem_sync(va, len);
379 }
380
381 size_t
382 mmu_get_kernel_lpsize(size_t lpsize)
383 {
384 extern int mmu_exported_pagesize_mask;
385 uint_t tte;
386
387 if (lpsize == 0) {
388 /* no setting for segkmem_lpsize in /etc/system: use default */
389 if (mmu_exported_pagesize_mask & (1 << TTE256M)) {
390 lpsize = MMU_PAGESIZE256M;
391 } else if (mmu_exported_pagesize_mask & (1 << TTE4M)) {
392 lpsize = MMU_PAGESIZE4M;
393 } else if (mmu_exported_pagesize_mask & (1 << TTE64K)) {
394 lpsize = MMU_PAGESIZE64K;
395 } else {
396 lpsize = MMU_PAGESIZE;
397 }
398
399 return (lpsize);
400 }
401
402 for (tte = TTE8K; tte <= TTE256M; tte++) {
403
404 if ((mmu_exported_pagesize_mask & (1 << tte)) == 0)
405 continue;
406
407 if (lpsize == TTEBYTES(tte))
408 return (lpsize);
409 }
410
411 lpsize = TTEBYTES(TTE8K);
412 return (lpsize);
413 }
414
415 void
416 mmu_init_kcontext()
417 {
418 }
419
420 /*ARGSUSED*/
421 void
422 mmu_init_kernel_pgsz(struct hat *hat)
423 {
424 }
425
426 static void *
427 contig_mem_span_alloc(vmem_t *vmp, size_t size, int vmflag)
428 {
429 page_t *ppl;
430 page_t *rootpp;
431 caddr_t addr = NULL;
432 pgcnt_t npages = btopr(size);
433 page_t **ppa;
434 int pgflags;
435 spgcnt_t i = 0;
436
437
438 ASSERT(size <= contig_mem_import_size_max);
439 ASSERT((size & (size - 1)) == 0);
440
441 if ((addr = vmem_xalloc(vmp, size, size, 0, 0,
442 NULL, NULL, vmflag)) == NULL) {
443 return (NULL);
444 }
445
446 /* The address should be slab-size aligned. */
447 ASSERT(((uintptr_t)addr & (size - 1)) == 0);
448
449 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
450 vmem_xfree(vmp, addr, size);
451 return (NULL);
452 }
453
454 pgflags = PG_EXCL;
455 if (vmflag & VM_NORELOC)
456 pgflags |= PG_NORELOC;
457
458 ppl = page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
459 pgflags, &kvseg, addr, NULL);
460
461 if (ppl == NULL) {
462 vmem_xfree(vmp, addr, size);
463 page_unresv(npages);
464 return (NULL);
465 }
466
467 rootpp = ppl;
468 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
469 while (ppl != NULL) {
470 page_t *pp = ppl;
471 ppa[i++] = pp;
472 page_sub(&ppl, pp);
473 ASSERT(page_iolock_assert(pp));
474 ASSERT(PAGE_EXCL(pp));
475 page_io_unlock(pp);
476 }
477
478 /*
479 * Load the locked entry. It's OK to preload the entry into
480 * the TSB since we now support large mappings in the kernel TSB.
481 */
482 hat_memload_array(kas.a_hat, (caddr_t)rootpp->p_offset, size,
483 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC, HAT_LOAD_LOCK);
484
485 ASSERT(i == page_get_pagecnt(ppa[0]->p_szc));
486 for (--i; i >= 0; --i) {
487 ASSERT(ppa[i]->p_szc == ppa[0]->p_szc);
488 ASSERT(page_pptonum(ppa[i]) == page_pptonum(ppa[0]) + i);
489 (void) page_pp_lock(ppa[i], 0, 1);
490 /*
491 * Leave the page share locked. For non-cage pages,
492 * this would prevent memory DR if it were supported
493 * on sun4v.
494 */
495 page_downgrade(ppa[i]);
496 }
497
498 kmem_free(ppa, npages * sizeof (page_t *));
499 return (addr);
500 }
501
502 /*
503 * Allocates a slab by first trying to use the largest slab size
504 * in contig_mem_import_sizes and then falling back to smaller slab
505 * sizes still large enough for the allocation. The sizep argument
506 * is a pointer to the requested size. When a slab is successfully
507 * allocated, the slab size, which must be >= *sizep and <=
508 * contig_mem_import_size_max, is returned in the *sizep argument.
509 * Returns the virtual address of the new slab.
510 */
511 static void *
512 span_alloc_downsize(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
513 {
514 int i;
515
516 ASSERT(*sizep <= contig_mem_import_size_max);
517
518 for (i = 0; i < NUM_IMPORT_SIZES; i++) {
519 size_t page_size = contig_mem_import_sizes[i];
520
521 /*
522 * Check that the alignment is also less than the
523 * import (large page) size. In the case where the
524 * alignment is larger than the size, a large page
525 * large enough for the allocation is not necessarily
526 * physical-address aligned to satisfy the requested
527 * alignment. Since alignment is required to be a
528 * power-of-2, any large page >= size && >= align will
529 * suffice.
530 */
531 if (*sizep <= page_size && align <= page_size) {
532 void *addr;
533 addr = contig_mem_span_alloc(vmp, page_size, vmflag);
534 if (addr == NULL)
535 continue;
536 *sizep = page_size;
537 return (addr);
538 }
539 return (NULL);
540 }
541
542 return (NULL);
543 }
544
545 static void *
546 contig_mem_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
547 {
548 return (span_alloc_downsize(vmp, sizep, align, vmflag | VM_NORELOC));
549 }
550
551 static void *
552 contig_mem_reloc_span_xalloc(vmem_t *vmp, size_t *sizep, size_t align,
553 int vmflag)
554 {
555 ASSERT((vmflag & VM_NORELOC) == 0);
556 return (span_alloc_downsize(vmp, sizep, align, vmflag));
557 }
558
559 /*
560 * Free a span, which is always exactly one large page.
561 */
562 static void
563 contig_mem_span_free(vmem_t *vmp, void *inaddr, size_t size)
564 {
565 page_t *pp;
566 caddr_t addr = inaddr;
567 caddr_t eaddr;
568 pgcnt_t npages = btopr(size);
569 page_t *rootpp = NULL;
570
571 ASSERT(size <= contig_mem_import_size_max);
572 /* All slabs should be size aligned */
573 ASSERT(((uintptr_t)addr & (size - 1)) == 0);
574
575 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
576
577 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
578 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
579 if (pp == NULL) {
580 panic("contig_mem_span_free: page not found");
581 }
582 if (!page_tryupgrade(pp)) {
583 page_unlock(pp);
584 pp = page_lookup(&kvp,
585 (u_offset_t)(uintptr_t)addr, SE_EXCL);
586 if (pp == NULL)
587 panic("contig_mem_span_free: page not found");
588 }
589
590 ASSERT(PAGE_EXCL(pp));
591 ASSERT(size == page_get_pagesize(pp->p_szc));
592 ASSERT(rootpp == NULL || rootpp->p_szc == pp->p_szc);
593 ASSERT(rootpp == NULL || (page_pptonum(rootpp) +
594 (pgcnt_t)btop(addr - (caddr_t)inaddr) == page_pptonum(pp)));
595
596 page_pp_unlock(pp, 0, 1);
597
598 if (rootpp == NULL)
599 rootpp = pp;
600 }
601 page_destroy_pages(rootpp);
602 page_unresv(npages);
603
604 if (vmp != NULL)
605 vmem_xfree(vmp, inaddr, size);
606 }
607
608 static void *
609 contig_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t *sizep, size_t align,
610 int vmflag)
611 {
612 ASSERT((align & (align - 1)) == 0);
613 return (vmem_xalloc(vmp, *sizep, align, 0, 0, NULL, NULL, vmflag));
614 }
615
616 /*
617 * contig_mem_alloc, contig_mem_alloc_align
618 *
619 * Caution: contig_mem_alloc and contig_mem_alloc_align should be
620 * used only when physically contiguous non-relocatable memory is
621 * required. Furthermore, use of these allocation routines should be
622 * minimized as well as should the allocation size. As described in the
623 * contig_mem_arena comment block above, slab allocations fall back to
624 * being outside of the cage. Therefore, overuse of these allocation
625 * routines can lead to non-relocatable large pages being allocated
626 * outside the cage. Such pages prevent the allocation of a larger page
627 * occupying overlapping pages. This can impact performance for
628 * applications that utilize e.g. 256M large pages.
629 */
630
631 /*
632 * Allocates size aligned contiguous memory up to contig_mem_import_size_max.
633 * Size must be a power of 2.
634 */
635 void *
636 contig_mem_alloc(size_t size)
637 {
638 ASSERT((size & (size - 1)) == 0);
639 return (contig_mem_alloc_align(size, size));
640 }
641
642 /*
643 * contig_mem_alloc_align allocates real contiguous memory with the
644 * specified alignment up to contig_mem_import_size_max. The alignment must
645 * be a power of 2 and no greater than contig_mem_import_size_max. We assert
646 * the aligment is a power of 2. For non-debug, vmem_xalloc will panic
647 * for non power of 2 alignments.
648 */
649 void *
650 contig_mem_alloc_align(size_t size, size_t align)
651 {
652 void *buf;
653
654 ASSERT(size <= contig_mem_import_size_max);
655 ASSERT(align <= contig_mem_import_size_max);
656 ASSERT((align & (align - 1)) == 0);
657
658 if (align < CONTIG_MEM_ARENA_QUANTUM)
659 align = CONTIG_MEM_ARENA_QUANTUM;
660
661 /*
662 * We take the lock here to serialize span allocations.
663 * We do not lose concurrency for the common case, since
664 * allocations that don't require new span allocations
665 * are serialized by vmem_xalloc. Serializing span
666 * allocations also prevents us from trying to allocate
667 * more spans than necessary.
668 */
669 mutex_enter(&contig_mem_lock);
670
671 buf = vmem_xalloc(contig_mem_arena, size, align, 0, 0,
672 NULL, NULL, VM_NOSLEEP | VM_NORELOC);
673
674 if ((buf == NULL) && (size <= MMU_PAGESIZE)) {
675 mutex_exit(&contig_mem_lock);
676 return (vmem_xalloc(static_alloc_arena, size, align, 0, 0,
677 NULL, NULL, VM_NOSLEEP));
678 }
679
680 if (buf == NULL) {
681 buf = vmem_xalloc(contig_mem_reloc_arena, size, align, 0, 0,
682 NULL, NULL, VM_NOSLEEP);
683 }
684
685 mutex_exit(&contig_mem_lock);
686
687 return (buf);
688 }
689
690 void
691 contig_mem_free(void *vaddr, size_t size)
692 {
693 if (vmem_contains(contig_mem_arena, vaddr, size)) {
694 vmem_xfree(contig_mem_arena, vaddr, size);
695 } else if (size > MMU_PAGESIZE) {
696 vmem_xfree(contig_mem_reloc_arena, vaddr, size);
697 } else {
698 vmem_xfree(static_alloc_arena, vaddr, size);
699 }
700 }
701
702 /*
703 * We create a set of stacked vmem arenas to enable us to
704 * allocate large >PAGESIZE chucks of contiguous Real Address space.
705 * The vmem_xcreate interface is used to create the contig_mem_arena
706 * allowing the import routine to downsize the requested slab size
707 * and return a smaller slab.
708 */
709 void
710 contig_mem_init(void)
711 {
712 mutex_init(&contig_mem_lock, NULL, MUTEX_DEFAULT, NULL);
713
714 contig_mem_slab_arena = vmem_xcreate("contig_mem_slab_arena", NULL, 0,
715 CONTIG_MEM_SLAB_ARENA_QUANTUM, contig_vmem_xalloc_aligned_wrapper,
716 vmem_xfree, heap_arena, 0, VM_SLEEP | VMC_XALIGN);
717
718 contig_mem_arena = vmem_xcreate("contig_mem_arena", NULL, 0,
719 CONTIG_MEM_ARENA_QUANTUM, contig_mem_span_xalloc,
720 contig_mem_span_free, contig_mem_slab_arena, 0,
721 VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
722
723 contig_mem_reloc_arena = vmem_xcreate("contig_mem_reloc_arena", NULL, 0,
724 CONTIG_MEM_ARENA_QUANTUM, contig_mem_reloc_span_xalloc,
725 contig_mem_span_free, contig_mem_slab_arena, 0,
726 VM_SLEEP | VM_BESTFIT | VMC_XALIGN);
727
728 if (contig_mem_prealloc_buf == NULL || vmem_add(contig_mem_arena,
729 contig_mem_prealloc_buf, contig_mem_prealloc_size, VM_SLEEP)
730 == NULL) {
731 cmn_err(CE_WARN, "Failed to pre-populate contig_mem_arena");
732 }
733 }
734
735 /*
736 * In calculating how much memory to pre-allocate, we include a small
737 * amount per-CPU to account for per-CPU buffers in line with measured
738 * values for different size systems. contig_mem_prealloc_base_size is
739 * a cpu specific amount to be pre-allocated before considering per-CPU
740 * requirements and memory size. We always pre-allocate a minimum amount
741 * of memory determined by PREALLOC_MIN. Beyond that, we take the minimum
742 * of contig_mem_prealloc_base_size and a small percentage of physical
743 * memory to prevent allocating too much on smaller systems.
744 * contig_mem_prealloc_base_size is global, allowing for the CPU module
745 * to increase its value if necessary.
746 */
747 #define PREALLOC_PER_CPU (256 * 1024) /* 256K */
748 #define PREALLOC_PERCENT (4) /* 4% */
749 #define PREALLOC_MIN (16 * 1024 * 1024) /* 16M */
750 size_t contig_mem_prealloc_base_size = 0;
751
752 /*
753 * Called at boot-time allowing pre-allocation of contiguous memory.
754 * The argument 'alloc_base' is the requested base address for the
755 * allocation and originates in startup_memlist.
756 */
757 caddr_t
758 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
759 {
760 caddr_t chunkp;
761
762 contig_mem_prealloc_size = MIN((PREALLOC_PER_CPU * ncpu_guest_max) +
763 contig_mem_prealloc_base_size,
764 (ptob(npages) * PREALLOC_PERCENT) / 100);
765 contig_mem_prealloc_size = MAX(contig_mem_prealloc_size, PREALLOC_MIN);
766 contig_mem_prealloc_size = P2ROUNDUP(contig_mem_prealloc_size,
767 MMU_PAGESIZE4M);
768
769 alloc_base = (caddr_t)roundup((uintptr_t)alloc_base, MMU_PAGESIZE4M);
770 if (prom_alloc(alloc_base, contig_mem_prealloc_size,
771 MMU_PAGESIZE4M) != alloc_base) {
772
773 /*
774 * Failed. This may mean the physical memory has holes in it
775 * and it will be more difficult to get large contiguous
776 * pieces of memory. Since we only guarantee contiguous
777 * pieces of memory contig_mem_import_size_max or smaller,
778 * loop, getting contig_mem_import_size_max at a time, until
779 * failure or contig_mem_prealloc_size is reached.
780 */
781 for (chunkp = alloc_base;
782 (chunkp - alloc_base) < contig_mem_prealloc_size;
783 chunkp += contig_mem_import_size_max) {
784
785 if (prom_alloc(chunkp, contig_mem_import_size_max,
786 MMU_PAGESIZE4M) != chunkp) {
787 break;
788 }
789 }
790 contig_mem_prealloc_size = chunkp - alloc_base;
791 ASSERT(contig_mem_prealloc_size != 0);
792 }
793
794 if (contig_mem_prealloc_size != 0) {
795 contig_mem_prealloc_buf = alloc_base;
796 } else {
797 contig_mem_prealloc_buf = NULL;
798 }
799 alloc_base += contig_mem_prealloc_size;
800
801 return (alloc_base);
802 }