Print this page
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.

*** 67,76 **** --- 67,77 ---- #include <sys/lwpchan_impl.h> #include <sys/pool.h> #include <sys/sdt.h> #include <sys/brand.h> #include <sys/klpd.h> + #include <sys/random.h> #include <c2/audit.h> #include <vm/hat.h> #include <vm/anon.h>
*** 97,106 **** --- 98,122 ---- #endif #define PSUIDFLAGS (SNOCD|SUGID) /* + * These are consumed within the specific exec modules, but are defined here + * because + * + * 1) The exec modules are unloadable, which would make this near useless. + * + * 2) We want them to be common across all of them, should more than ELF come + * to support them. + * + * All must be powers of 2. + */ + size_t aslr_max_brk_skew = 16 * 1024 * 1024; /* 16MB */ + #pragma weak exec_stackgap = aslr_max_stack_skew /* Old, compatible name */ + size_t aslr_max_stack_skew = 64 * 1024; /* 64KB */ + + /* * exece() - system call wrapper around exec_common() */ int exece(const char *fname, const char **argp, const char **envp) {
*** 558,567 **** --- 574,586 ---- int setid; cred_t *oldcred, *newcred = NULL; int privflags = 0; int setidfl; priv_set_t fset; + secflagset_t old_secflags; + + secflags_copy(&old_secflags, &pp->p_secflags.psf_effective); /* * If the SNOCD or SUGID flag is set, turn it off and remember the * previous setting so we can restore it if we encounter an error. */
*** 658,667 **** --- 677,689 ---- priv_intersect(&CR_LPRIV(cred), &CR_IPRIV(cred)); CR_EPRIV(cred) = CR_PPRIV(cred) = CR_IPRIV(cred); priv_adjust_PA(cred); } + /* The new image gets the inheritable secflags as its secflags */ + secflags_promote(pp); + /* SunOS 4.x buy-back */ if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) && (vattr.va_mode & (VSUID|VSGID))) { char path[MAXNAMELEN]; refstr_t *mntpt = NULL;
*** 718,728 **** /* * Use /etc/system variable to determine if the stack * should be marked as executable by default. */ ! if (noexec_user_stack) args->stk_prot &= ~PROT_EXEC; args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */ args->ex_vp = vp; --- 740,751 ---- /* * Use /etc/system variable to determine if the stack * should be marked as executable by default. */ ! if ((noexec_user_stack != 0) || ! secflag_enabled(pp, PROC_SEC_NOEXECSTACK)) args->stk_prot &= ~PROT_EXEC; args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */ args->ex_vp = vp;
*** 804,814 **** oldcred = curthread->t_cred; curthread->t_cred = cred; crfree(oldcred); if (priv_basic_test >= 0 && ! !PRIV_ISASSERT(&CR_IPRIV(newcred), priv_basic_test)) { pid_t pid = pp->p_pid; char *fn = PTOU(pp)->u_comm; cmn_err(CE_WARN, "%s[%d]: exec: basic_test " --- 827,837 ---- oldcred = curthread->t_cred; curthread->t_cred = cred; crfree(oldcred); if (priv_basic_test >= 0 && ! !PRIV_ISMEMBER(&CR_IPRIV(newcred), priv_basic_test)) { pid_t pid = pp->p_pid; char *fn = PTOU(pp)->u_comm; cmn_err(CE_WARN, "%s[%d]: exec: basic_test "
*** 874,888 **** if (newcred != NULL) crfree(newcred); if (error == 0) error = ENOEXEC; - if (suidflags) { mutex_enter(&pp->p_lock); pp->p_flag |= suidflags; - mutex_exit(&pp->p_lock); } return (error); } extern char *execswnames[]; --- 897,917 ---- if (newcred != NULL) crfree(newcred); if (error == 0) error = ENOEXEC; mutex_enter(&pp->p_lock); + if (suidflags) { pp->p_flag |= suidflags; } + /* + * Restore the effective secflags, to maintain the invariant they + * never change for a given process + */ + secflags_copy(&pp->p_secflags.psf_effective, &old_secflags); + mutex_exit(&pp->p_lock); + return (error); } extern char *execswnames[];
*** 1785,1794 **** --- 1814,1861 ---- return (0); } /* + * Though the actual stack base is constant, slew the %sp by a random aligned + * amount in [0,aslr_max_stack_skew). Mostly, this makes life slightly more + * complicated for buffer overflows hoping to overwrite the return address. + * + * On some platforms this helps avoid cache thrashing when identical processes + * simultaneously share caches that don't provide enough associativity + * (e.g. sun4v systems). In this case stack slewing makes the same hot stack + * variables in different processes live in different cache sets increasing + * effective associativity. + */ + size_t + exec_get_spslew(void) + { + #ifdef sun4v + static uint_t sp_color_stride = 16; + static uint_t sp_color_mask = 0x1f; + static uint_t sp_current_color = (uint_t)-1; + #endif + size_t off; + + ASSERT(ISP2(aslr_max_stack_skew)); + + if ((aslr_max_stack_skew == 0) || + !secflag_enabled(curproc, PROC_SEC_ASLR)) { + #ifdef sun4v + uint_t spcolor = atomic_inc_32_nv(&sp_current_color); + return ((size_t)((spcolor & sp_color_mask) * + SA(sp_color_stride))); + #else + return (0); + #endif + } + + (void) random_get_pseudo_bytes((uint8_t *)&off, sizeof (off)); + return (SA(P2PHASE(off, aslr_max_stack_skew))); + } + + /* * Initialize a new user stack with the specified arguments and environment. * The initial user stack layout is as follows: * * User Stack * +---------------+ <--- curproc->p_usrstack
*** 2014,2034 **** mutex_enter(&p->p_lock); p->p_flag |= SAUTOLPG; /* kernel controls page sizes */ mutex_exit(&p->p_lock); - /* - * Some platforms may choose to randomize real stack start by adding a - * small slew (not more than a few hundred bytes) to the top of the - * stack. This helps avoid cache thrashing when identical processes - * simultaneously share caches that don't provide enough associativity - * (e.g. sun4v systems). In this case stack slewing makes the same hot - * stack variables in different processes to live in different cache - * sets increasing effective associativity. - */ sp_slew = exec_get_spslew(); ASSERT(P2PHASE(sp_slew, args->stk_align) == 0); exec_set_sp(size + sp_slew); as = as_alloc(); p->p_as = as; as->a_proc = p; --- 2081,2094 ---- mutex_enter(&p->p_lock); p->p_flag |= SAUTOLPG; /* kernel controls page sizes */ mutex_exit(&p->p_lock); sp_slew = exec_get_spslew(); ASSERT(P2PHASE(sp_slew, args->stk_align) == 0); + /* Be certain we don't underflow */ + VERIFY((curproc->p_usrstack - (size + sp_slew)) < curproc->p_usrstack); exec_set_sp(size + sp_slew); as = as_alloc(); p->p_as = as; as->a_proc = p;