Print this page
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.


  35  * UNIX machine dependent virtual memory support.
  36  */
  37 
  38 #include <sys/vm.h>
  39 #include <sys/exec.h>
  40 #include <sys/cmn_err.h>
  41 #include <sys/cpu_module.h>
  42 #include <sys/cpu.h>
  43 #include <sys/elf_SPARC.h>
  44 #include <sys/archsystm.h>
  45 #include <vm/hat_sfmmu.h>
  46 #include <sys/memnode.h>
  47 #include <sys/mem_cage.h>
  48 #include <vm/vm_dep.h>
  49 #include <sys/error.h>
  50 #include <sys/machsystm.h>
  51 #include <vm/seg_kmem.h>
  52 #include <sys/stack.h>
  53 #include <sys/atomic.h>
  54 #include <sys/promif.h>

  55 
  56 uint_t page_colors = 0;
  57 uint_t page_colors_mask = 0;
  58 uint_t page_coloring_shift = 0;
  59 int consistent_coloring;
  60 int update_proc_pgcolorbase_after_fork = 1;
  61 
  62 uint_t mmu_page_sizes = MMU_PAGE_SIZES;
  63 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
  64 uint_t mmu_hashcnt = MAX_HASHCNT;
  65 uint_t max_mmu_hashcnt = MAX_HASHCNT;
  66 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
  67 
  68 /*
  69  * A bitmask of the page sizes supported by hardware based upon szc.
  70  * The base pagesize (p_szc == 0) must always be supported by the hardware.
  71  */
  72 int mmu_exported_pagesize_mask;
  73 uint_t mmu_exported_page_sizes;
  74 


 151 static  kmutex_t        contig_mem_lock;
 152 #define CONTIG_MEM_ARENA_QUANTUM        64
 153 #define CONTIG_MEM_SLAB_ARENA_QUANTUM   MMU_PAGESIZE64K
 154 
 155 /* contig_mem_arena import slab sizes, in decreasing size order */
 156 static size_t contig_mem_import_sizes[] = {
 157         MMU_PAGESIZE4M,
 158         MMU_PAGESIZE512K,
 159         MMU_PAGESIZE64K
 160 };
 161 #define NUM_IMPORT_SIZES        \
 162         (sizeof (contig_mem_import_sizes) / sizeof (size_t))
 163 static size_t contig_mem_import_size_max        = MMU_PAGESIZE4M;
 164 size_t contig_mem_slab_size                     = MMU_PAGESIZE4M;
 165 
 166 /* Boot-time allocated buffer to pre-populate the contig_mem_arena */
 167 static size_t contig_mem_prealloc_size;
 168 static void *contig_mem_prealloc_buf;
 169 
 170 /*







 171  * map_addr_proc() is the routine called when the system is to
 172  * choose an address for the user.  We will pick an address
 173  * range which is just below the current stack limit.  The
 174  * algorithm used for cache consistency on machines with virtual
 175  * address caches is such that offset 0 in the vnode is always
 176  * on a shm_alignment'ed aligned address.  Unfortunately, this
 177  * means that vnodes which are demand paged will not be mapped
 178  * cache consistently with the executable images.  When the
 179  * cache alignment for a given object is inconsistent, the
 180  * lower level code must manage the translations so that this
 181  * is not seen here (at the cost of efficiency, of course).
 182  *
 183  * Every mapping will have a redzone of a single page on either side of
 184  * the request. This is done to leave one page unmapped between segments.
 185  * This is not required, but it's useful for the user because if their
 186  * program strays across a segment boundary, it will catch a fault
 187  * immediately making debugging a little easier.  Currently the redzone
 188  * is mandatory.
 189  *
 190  * addrp is a value/result parameter.


 303 
 304                 /*
 305                  * addr is the highest possible address to use since we have
 306                  * a PAGESIZE redzone at the beginning and end.
 307                  */
 308                 addr = base + slen - (PAGESIZE + len);
 309                 as_addr = addr;
 310                 /*
 311                  * Round address DOWN to the alignment amount and
 312                  * add the offset in.
 313                  * If addr is greater than as_addr, len would not be large
 314                  * enough to include the redzone, so we must adjust down
 315                  * by the alignment amount.
 316                  */
 317                 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
 318                 addr += (long)off;
 319                 if (addr > as_addr) {
 320                         addr -= align_amount;
 321                 }
 322 














 323                 ASSERT(addr > base);
 324                 ASSERT(addr + len < base + slen);
 325                 ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
 326                     ((uintptr_t)(off)));
 327                 *addrp = addr;
 328 
 329         } else {
 330                 *addrp = NULL;  /* no more virtual space */
 331         }
 332 }
 333 
 334 /*
 335  * Platform-dependent page scrub call.
 336  * We call hypervisor to scrub the page.
 337  */
 338 void
 339 pagescrub(page_t *pp, uint_t off, uint_t len)
 340 {
 341         uint64_t pa, length;
 342 


 758                     chunkp += contig_mem_import_size_max) {
 759 
 760                         if (prom_alloc(chunkp, contig_mem_import_size_max,
 761                             MMU_PAGESIZE4M) != chunkp) {
 762                                 break;
 763                         }
 764                 }
 765                 contig_mem_prealloc_size = chunkp - alloc_base;
 766                 ASSERT(contig_mem_prealloc_size != 0);
 767         }
 768 
 769         if (contig_mem_prealloc_size != 0) {
 770                 contig_mem_prealloc_buf = alloc_base;
 771         } else {
 772                 contig_mem_prealloc_buf = NULL;
 773         }
 774         alloc_base += contig_mem_prealloc_size;
 775 
 776         return (alloc_base);
 777 }
 778 
 779 static uint_t sp_color_stride = 16;
 780 static uint_t sp_color_mask = 0x1f;
 781 static uint_t sp_current_color = (uint_t)-1;
 782 
 783 size_t
 784 exec_get_spslew(void)
 785 {
 786         uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
 787         return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride)));
 788 }


  35  * UNIX machine dependent virtual memory support.
  36  */
  37 
  38 #include <sys/vm.h>
  39 #include <sys/exec.h>
  40 #include <sys/cmn_err.h>
  41 #include <sys/cpu_module.h>
  42 #include <sys/cpu.h>
  43 #include <sys/elf_SPARC.h>
  44 #include <sys/archsystm.h>
  45 #include <vm/hat_sfmmu.h>
  46 #include <sys/memnode.h>
  47 #include <sys/mem_cage.h>
  48 #include <vm/vm_dep.h>
  49 #include <sys/error.h>
  50 #include <sys/machsystm.h>
  51 #include <vm/seg_kmem.h>
  52 #include <sys/stack.h>
  53 #include <sys/atomic.h>
  54 #include <sys/promif.h>
  55 #include <sys/random.h>
  56 
  57 uint_t page_colors = 0;
  58 uint_t page_colors_mask = 0;
  59 uint_t page_coloring_shift = 0;
  60 int consistent_coloring;
  61 int update_proc_pgcolorbase_after_fork = 1;
  62 
  63 uint_t mmu_page_sizes = MMU_PAGE_SIZES;
  64 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
  65 uint_t mmu_hashcnt = MAX_HASHCNT;
  66 uint_t max_mmu_hashcnt = MAX_HASHCNT;
  67 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
  68 
  69 /*
  70  * A bitmask of the page sizes supported by hardware based upon szc.
  71  * The base pagesize (p_szc == 0) must always be supported by the hardware.
  72  */
  73 int mmu_exported_pagesize_mask;
  74 uint_t mmu_exported_page_sizes;
  75 


 152 static  kmutex_t        contig_mem_lock;
 153 #define CONTIG_MEM_ARENA_QUANTUM        64
 154 #define CONTIG_MEM_SLAB_ARENA_QUANTUM   MMU_PAGESIZE64K
 155 
 156 /* contig_mem_arena import slab sizes, in decreasing size order */
 157 static size_t contig_mem_import_sizes[] = {
 158         MMU_PAGESIZE4M,
 159         MMU_PAGESIZE512K,
 160         MMU_PAGESIZE64K
 161 };
 162 #define NUM_IMPORT_SIZES        \
 163         (sizeof (contig_mem_import_sizes) / sizeof (size_t))
 164 static size_t contig_mem_import_size_max        = MMU_PAGESIZE4M;
 165 size_t contig_mem_slab_size                     = MMU_PAGESIZE4M;
 166 
 167 /* Boot-time allocated buffer to pre-populate the contig_mem_arena */
 168 static size_t contig_mem_prealloc_size;
 169 static void *contig_mem_prealloc_buf;
 170 
 171 /*
 172  * The maximum amount a randomized mapping will be slewed.  We should perhaps
 173  * arrange things so these tunables can be separate for mmap, mmapobj, and
 174  * ld.so
 175  */
 176 size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
 177 
 178 /*
 179  * map_addr_proc() is the routine called when the system is to
 180  * choose an address for the user.  We will pick an address
 181  * range which is just below the current stack limit.  The
 182  * algorithm used for cache consistency on machines with virtual
 183  * address caches is such that offset 0 in the vnode is always
 184  * on a shm_alignment'ed aligned address.  Unfortunately, this
 185  * means that vnodes which are demand paged will not be mapped
 186  * cache consistently with the executable images.  When the
 187  * cache alignment for a given object is inconsistent, the
 188  * lower level code must manage the translations so that this
 189  * is not seen here (at the cost of efficiency, of course).
 190  *
 191  * Every mapping will have a redzone of a single page on either side of
 192  * the request. This is done to leave one page unmapped between segments.
 193  * This is not required, but it's useful for the user because if their
 194  * program strays across a segment boundary, it will catch a fault
 195  * immediately making debugging a little easier.  Currently the redzone
 196  * is mandatory.
 197  *
 198  * addrp is a value/result parameter.


 311 
 312                 /*
 313                  * addr is the highest possible address to use since we have
 314                  * a PAGESIZE redzone at the beginning and end.
 315                  */
 316                 addr = base + slen - (PAGESIZE + len);
 317                 as_addr = addr;
 318                 /*
 319                  * Round address DOWN to the alignment amount and
 320                  * add the offset in.
 321                  * If addr is greater than as_addr, len would not be large
 322                  * enough to include the redzone, so we must adjust down
 323                  * by the alignment amount.
 324                  */
 325                 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
 326                 addr += (long)off;
 327                 if (addr > as_addr) {
 328                         addr -= align_amount;
 329                 }
 330 
 331                 /*
 332                  * If randomization is requested, slew the allocation
 333                  * backwards, within the same gap, by a random amount.
 334                  */
 335                 if (flags & _MAP_RANDOMIZE) {
 336                         uint32_t slew;
 337 
 338                         (void) random_get_pseudo_bytes((uint8_t *)&slew,
 339                             sizeof (slew));
 340 
 341                         slew = slew % MIN(aslr_max_map_skew, (addr - base));
 342                         addr -= P2ALIGN(slew, align_amount);
 343                 }
 344 
 345                 ASSERT(addr > base);
 346                 ASSERT(addr + len < base + slen);
 347                 ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
 348                     ((uintptr_t)(off)));
 349                 *addrp = addr;
 350 
 351         } else {
 352                 *addrp = NULL;  /* no more virtual space */
 353         }
 354 }
 355 
 356 /*
 357  * Platform-dependent page scrub call.
 358  * We call hypervisor to scrub the page.
 359  */
 360 void
 361 pagescrub(page_t *pp, uint_t off, uint_t len)
 362 {
 363         uint64_t pa, length;
 364 


 780                     chunkp += contig_mem_import_size_max) {
 781 
 782                         if (prom_alloc(chunkp, contig_mem_import_size_max,
 783                             MMU_PAGESIZE4M) != chunkp) {
 784                                 break;
 785                         }
 786                 }
 787                 contig_mem_prealloc_size = chunkp - alloc_base;
 788                 ASSERT(contig_mem_prealloc_size != 0);
 789         }
 790 
 791         if (contig_mem_prealloc_size != 0) {
 792                 contig_mem_prealloc_buf = alloc_base;
 793         } else {
 794                 contig_mem_prealloc_buf = NULL;
 795         }
 796         alloc_base += contig_mem_prealloc_size;
 797 
 798         return (alloc_base);
 799 }