Print this page
Code review comments from pmooney (sundry), and igork (screwups in zonecfg refactoring)
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.
*** 44,53 ****
--- 44,54 ----
#include <sys/archsystm.h>
#include <vm/hat_sfmmu.h>
#include <sys/memnode.h>
#include <sys/mem_cage.h>
#include <vm/vm_dep.h>
+ #include <sys/random.h>
#if defined(__sparcv9) && defined(SF_ERRATA_57)
caddr_t errata57_limit;
#endif
*** 135,144 ****
--- 136,152 ----
max_shm_lpsize = ismpagesize;
}
}
/*
+ * The maximum amount a randomized mapping will be slewed. We should perhaps
+ * arrange things so these tunables can be separate for mmap, mmapobj, and
+ * ld.so
+ */
+ size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
+
+ /*
* map_addr_proc() is the routine called when the system is to
* choose an address for the user. We will pick an address
* range which is just below the current stack limit. The
* algorithm used for cache consistency on machines with virtual
* address caches is such that offset 0 in the vnode is always
*** 263,272 ****
--- 271,281 ----
* Look for a large enough hole starting below the stack limit.
* After finding it, use the upper part.
*/
as_purge(as);
off = off & (align_amount - 1);
+
if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
PAGESIZE, off) == 0) {
caddr_t as_addr;
/*
*** 286,295 ****
--- 295,326 ----
addr += (long)off;
if (addr > as_addr) {
addr -= align_amount;
}
+ /*
+ * If randomization is requested, slew the allocation
+ * backwards, within the same gap, by a random amount.
+ */
+ if (flags & _MAP_RANDOMIZE) {
+ uint32_t slew;
+ uint32_t maxslew;
+
+ (void) random_get_pseudo_bytes((uint8_t *)&slew,
+ sizeof (slew));
+
+ maxslew = MIN(aslr_max_map_skew, (addr - base));
+ /*
+ * Don't allow ASLR to cause mappings to fail below
+ * because of SF erratum #57
+ */
+ maxslew = MIN(maxslew, (addr - errata57_limit));
+
+ slew = slew % maxslew;
+ addr -= P2ALIGN(slew, align_amount);
+ }
+
ASSERT(addr > base);
ASSERT(addr + len < base + slen);
ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
((uintptr_t)(off)));
*addrp = addr;
*** 347,357 ****
contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
{
/* not applicable to sun4u */
return (alloc_base);
}
-
- size_t
- exec_get_spslew(void)
- {
- return (0);
- }
--- 378,382 ----