Print this page
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.

*** 66,77 **** * and Libraries Guide" for more information about the standard and mapping * rules. * * Having mmapobj interpret and map objects will allow the kernel to make the * best decision for where to place the mappings for said objects. Thus, we ! * can make optimizations inside of the kernel for specific platforms or ! * cache mapping information to make mapping objects faster. * * The lib_va_hash will be one such optimization. For each ELF object that * mmapobj is asked to interpret, we will attempt to cache the information * about the PT_LOAD and PT_SUNWBSS sections to speed up future mappings of * the same objects. We will cache up to LIBVA_CACHED_SEGS (see below) program --- 66,78 ---- * and Libraries Guide" for more information about the standard and mapping * rules. * * Having mmapobj interpret and map objects will allow the kernel to make the * best decision for where to place the mappings for said objects. Thus, we ! * can make optimizations inside of the kernel for specific platforms or cache ! * mapping information to make mapping objects faster. The cache is ignored ! * if ASLR is enabled. * * The lib_va_hash will be one such optimization. For each ELF object that * mmapobj is asked to interpret, we will attempt to cache the information * about the PT_LOAD and PT_SUNWBSS sections to speed up future mappings of * the same objects. We will cache up to LIBVA_CACHED_SEGS (see below) program
*** 716,726 **** * The caller is responsible for doing this making sure that any modifications * to lv_mps are visible before setting lv_num_segs. */ static caddr_t mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va, ! size_t align, vattr_t *vap) { proc_t *p = curproc; struct as *as = p->p_as; struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL); int error; --- 717,727 ---- * The caller is responsible for doing this making sure that any modifications * to lv_mps are visible before setting lv_num_segs. */ static caddr_t mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va, ! int randomize, size_t align, vattr_t *vap) { proc_t *p = curproc; struct as *as = p->p_as; struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL); int error;
*** 731,740 **** --- 732,742 ---- size_t lib_va_start; size_t lib_va_end; size_t lib_va_len; ASSERT(lvpp != NULL); + ASSERT((randomize & use_lib_va) != 1); MOBJ_STAT_ADD(alloc_start); model = get_udatamodel(); if (model == DATAMODEL_LP64) {
*** 746,755 **** --- 748,761 ---- } if (align > 1) { ma_flags |= MAP_ALIGN; } + + if (randomize != 0) + ma_flags |= _MAP_RANDOMIZE; + if (use_lib_va) { /* * The first time through, we need to setup the lib_va arenas. * We call map_addr to find a suitable range of memory to map * the given library, and we will set the highest address
*** 859,869 **** --- 865,882 ---- /* * If we don't have an expected base address, or the one that we want * to use is not available or acceptable, go get an acceptable * address range. + * + * If ASLR is enabled, we should never have used the cache, and should + * also start our real work here, in the consequent of the next + * condition. */ + if (randomize != 0) + ASSERT(base == NULL); + if (base == NULL || as_gap(as, len, &base, &len, 0, NULL) || valid_usr_range(base, len, PROT_ALL, as, as->a_userlimit) != RANGE_OKAY || OVERLAPS_STACK(base + len, p)) { MOBJ_STAT_ADD(get_addr); base = (caddr_t)align;
*** 1523,1533 **** * Walk through the ELF program headers and extract all useful information * for PT_LOAD and PT_SUNWBSS segments into mrp. * Return 0 on success or error on failure. */ static int ! process_phdr(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp, vnode_t *vp, uint_t *num_mapped, size_t padding, cred_t *fcred) { int i; caddr_t start_addr = NULL; caddr_t vaddr; --- 1536,1546 ---- * Walk through the ELF program headers and extract all useful information * for PT_LOAD and PT_SUNWBSS segments into mrp. * Return 0 on success or error on failure. */ static int ! process_phdrs(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp, vnode_t *vp, uint_t *num_mapped, size_t padding, cred_t *fcred) { int i; caddr_t start_addr = NULL; caddr_t vaddr;
*** 1579,1598 **** MOBJ_STAT_ADD(phent_align32); return (ENOTSUP); } } ! if (padding != 0) { use_lib_va = 0; } if (e_type == ET_DYN) { vattr.va_mask = AT_FSID | AT_NODEID | AT_CTIME | AT_MTIME; error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL); if (error) { return (error); } /* Check to see if we already have a description for this lib */ lvp = lib_va_find(&vattr); if (lvp != NULL) { MOBJ_STAT_ADD(lvp_found); if (use_lib_va) { --- 1592,1612 ---- MOBJ_STAT_ADD(phent_align32); return (ENOTSUP); } } ! if ((padding != 0) || secflag_enabled(curproc, PROC_SEC_ASLR)) { use_lib_va = 0; } if (e_type == ET_DYN) { vattr.va_mask = AT_FSID | AT_NODEID | AT_CTIME | AT_MTIME; error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL); if (error) { return (error); } /* Check to see if we already have a description for this lib */ + if (!secflag_enabled(curproc, PROC_SEC_ASLR)) lvp = lib_va_find(&vattr); if (lvp != NULL) { MOBJ_STAT_ADD(lvp_found); if (use_lib_va) {
*** 1699,1709 **** * to call mmapobj_alloc_start_addr and know that lvp * will not be modified. */ ASSERT(lvp ? use_lib_va == 0 : 1); start_addr = mmapobj_alloc_start_addr(&lvp, len, ! use_lib_va, align, &vattr); if (start_addr == NULL) { if (lvp) { lib_va_release(lvp); } MOBJ_STAT_ADD(alloc_start_fail); --- 1713,1725 ---- * to call mmapobj_alloc_start_addr and know that lvp * will not be modified. */ ASSERT(lvp ? use_lib_va == 0 : 1); start_addr = mmapobj_alloc_start_addr(&lvp, len, ! use_lib_va, ! secflag_enabled(curproc, PROC_SEC_ASLR), ! align, &vattr); if (start_addr == NULL) { if (lvp) { lib_va_release(lvp); } MOBJ_STAT_ADD(alloc_start_fail);
*** 2024,2034 **** kmem_free(phbasep, phsizep); return (error); } /* Now process the phdr's */ ! error = process_phdr(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped, padding, fcred); kmem_free(phbasep, phsizep); return (error); } --- 2040,2050 ---- kmem_free(phbasep, phsizep); return (error); } /* Now process the phdr's */ ! error = process_phdrs(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped, padding, fcred); kmem_free(phbasep, phsizep); return (error); }
*** 2310,2320 **** /* * Check lib_va to see if we already have a full description * for this library. This is the fast path and only used for * ET_DYN ELF files (dynamic libraries). */ ! if (padding == 0 && (lvp = lib_va_find(&vattr)) != NULL) { int num_segs; model = get_udatamodel(); if ((model == DATAMODEL_ILP32 && lvp->lv_flags & LV_ELF64) || --- 2326,2337 ---- /* * Check lib_va to see if we already have a full description * for this library. This is the fast path and only used for * ET_DYN ELF files (dynamic libraries). */ ! if (padding == 0 && !secflag_enabled(curproc, PROC_SEC_ASLR) && ! ((lvp = lib_va_find(&vattr)) != NULL)) { int num_segs; model = get_udatamodel(); if ((model == DATAMODEL_ILP32 && lvp->lv_flags & LV_ELF64) ||