Print this page
uts: Allow for address space randomisation.
Randomise the base addresses of shared objects, non-fixed mappings, the
stack and the heap.  Introduce a service, svc:/system/process-security,
and a tool psecflags(1) to control and observe it

@@ -44,10 +44,11 @@
 #include <sys/archsystm.h>
 #include <vm/hat_sfmmu.h>
 #include <sys/memnode.h>
 #include <sys/mem_cage.h>
 #include <vm/vm_dep.h>
+#include <sys/random.h>
 
 #if defined(__sparcv9) && defined(SF_ERRATA_57)
 caddr_t errata57_limit;
 #endif
 

@@ -135,10 +136,17 @@
                 max_shm_lpsize = ismpagesize;
         }
 }
 
 /*
+ * The maximum amount a randomized mapping will be slewed.  We should perhaps
+ * arrange things so these tunables can be separate for mmap, mmapobj, and
+ * ld.so
+ */
+volatile size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
+
+/*
  * map_addr_proc() is the routine called when the system is to
  * choose an address for the user.  We will pick an address
  * range which is just below the current stack limit.  The
  * algorithm used for cache consistency on machines with virtual
  * address caches is such that offset 0 in the vnode is always

@@ -263,10 +271,11 @@
          * Look for a large enough hole starting below the stack limit.
          * After finding it, use the upper part.
          */
         as_purge(as);
         off = off & (align_amount - 1);
+
         if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
             PAGESIZE, off) == 0) {
                 caddr_t as_addr;
 
                 /*

@@ -286,10 +295,36 @@
                 addr += (long)off;
                 if (addr > as_addr) {
                         addr -= align_amount;
                 }
 
+                /*
+                 * If randomization is requested, slew the allocation
+                 * backwards, within the same gap, by a random amount.
+                 *
+                 * XXX: This will fall over in processes like Java, which
+                 * commonly have a great many small mappings.
+                 */
+                if (flags & _MAP_RANDOMIZE) {
+                        uint32_t slew;
+                        uint32_t maxslew;
+
+                        (void) random_get_pseudo_bytes((uint8_t *)&slew,
+                            sizeof (slew));
+
+                        maxslew = MIN(aslr_max_map_skew, (addr - base));
+                        /*
+                         * Don't allow ASLR to cause mappings to fail below
+                         * because of SF erratum #57
+                         */
+                        maxslew = MIN(maxslew, (addr - errata57_limit));
+
+                        slew = slew % MIN(MIN(aslr_max_map_skew, (addr - base)),
+                            addr - errata57_limit);
+                        addr -= P2ALIGN(slew, align_amount);
+                }
+
                 ASSERT(addr > base);
                 ASSERT(addr + len < base + slen);
                 ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
                     ((uintptr_t)(off)));
                 *addrp = addr;

@@ -347,11 +382,5 @@
 contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
 {
         /* not applicable to sun4u */
         return (alloc_base);
 }
-
-size_t
-exec_get_spslew(void)
-{
-        return (0);
-}