Print this page
Code review comments from pmooney (sundry), and igork (screwups in zonecfg refactoring)
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4u/vm/mach_vm_dep.c
          +++ new/usr/src/uts/sun4u/vm/mach_vm_dep.c
↓ open down ↓ 38 lines elided ↑ open up ↑
  39   39  #include <sys/exec.h>
  40   40  #include <sys/cmn_err.h>
  41   41  #include <sys/cpu_module.h>
  42   42  #include <sys/cpu.h>
  43   43  #include <sys/elf_SPARC.h>
  44   44  #include <sys/archsystm.h>
  45   45  #include <vm/hat_sfmmu.h>
  46   46  #include <sys/memnode.h>
  47   47  #include <sys/mem_cage.h>
  48   48  #include <vm/vm_dep.h>
       49 +#include <sys/random.h>
  49   50  
  50   51  #if defined(__sparcv9) && defined(SF_ERRATA_57)
  51   52  caddr_t errata57_limit;
  52   53  #endif
  53   54  
  54   55  uint_t page_colors = 0;
  55   56  uint_t page_colors_mask = 0;
  56   57  uint_t page_coloring_shift = 0;
  57   58  int consistent_coloring;
  58   59  int update_proc_pgcolorbase_after_fork = 0;
↓ open down ↓ 71 lines elided ↑ open up ↑
 130  131          }
 131  132          if (max_privmap_lpsize == MMU_PAGESIZE4M) {
 132  133                  max_privmap_lpsize = ismpagesize;
 133  134          }
 134  135          if (max_shm_lpsize == MMU_PAGESIZE4M) {
 135  136                  max_shm_lpsize = ismpagesize;
 136  137          }
 137  138  }
 138  139  
 139  140  /*
      141 + * The maximum amount a randomized mapping will be slewed.  We should perhaps
      142 + * arrange things so these tunables can be separate for mmap, mmapobj, and
      143 + * ld.so
      144 + */
      145 +size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
      146 +
      147 +/*
 140  148   * map_addr_proc() is the routine called when the system is to
 141  149   * choose an address for the user.  We will pick an address
 142  150   * range which is just below the current stack limit.  The
 143  151   * algorithm used for cache consistency on machines with virtual
 144  152   * address caches is such that offset 0 in the vnode is always
 145  153   * on a shm_alignment'ed aligned address.  Unfortunately, this
 146  154   * means that vnodes which are demand paged will not be mapped
 147  155   * cache consistently with the executable images.  When the
 148  156   * cache alignment for a given object is inconsistent, the
 149  157   * lower level code must manage the translations so that this
↓ open down ↓ 108 lines elided ↑ open up ↑
 258  266  
 259  267          ASSERT(ISP2(align_amount));
 260  268          ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
 261  269  
 262  270          /*
 263  271           * Look for a large enough hole starting below the stack limit.
 264  272           * After finding it, use the upper part.
 265  273           */
 266  274          as_purge(as);
 267  275          off = off & (align_amount - 1);
      276 +
 268  277          if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
 269  278              PAGESIZE, off) == 0) {
 270  279                  caddr_t as_addr;
 271  280  
 272  281                  /*
 273  282                   * addr is the highest possible address to use since we have
 274  283                   * a PAGESIZE redzone at the beginning and end.
 275  284                   */
 276  285                  addr = base + slen - (PAGESIZE + len);
 277  286                  as_addr = addr;
↓ open down ↓ 3 lines elided ↑ open up ↑
 281  290                   * If addr is greater than as_addr, len would not be large
 282  291                   * enough to include the redzone, so we must adjust down
 283  292                   * by the alignment amount.
 284  293                   */
 285  294                  addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
 286  295                  addr += (long)off;
 287  296                  if (addr > as_addr) {
 288  297                          addr -= align_amount;
 289  298                  }
 290  299  
      300 +                /*
      301 +                 * If randomization is requested, slew the allocation
      302 +                 * backwards, within the same gap, by a random amount.
      303 +                 */
      304 +                if (flags & _MAP_RANDOMIZE) {
      305 +                        uint32_t slew;
      306 +                        uint32_t maxslew;
      307 +
      308 +                        (void) random_get_pseudo_bytes((uint8_t *)&slew,
      309 +                            sizeof (slew));
      310 +
      311 +                        maxslew = MIN(aslr_max_map_skew, (addr - base));
      312 +                        /*
      313 +                         * Don't allow ASLR to cause mappings to fail below
      314 +                         * because of SF erratum #57
      315 +                         */
      316 +                        maxslew = MIN(maxslew, (addr - errata57_limit));
      317 +
      318 +                        slew = slew % maxslew;
      319 +                        addr -= P2ALIGN(slew, align_amount);
      320 +                }
      321 +
 291  322                  ASSERT(addr > base);
 292  323                  ASSERT(addr + len < base + slen);
 293  324                  ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
 294  325                      ((uintptr_t)(off)));
 295  326                  *addrp = addr;
 296  327  
 297  328  #if defined(SF_ERRATA_57)
 298  329                  if (AS_TYPE_64BIT(as) && addr < errata57_limit) {
 299  330                          *addrp = NULL;
 300  331                  }
↓ open down ↓ 41 lines elided ↑ open up ↑
 342  373          /* not applicable to sun4u */
 343  374  }
 344  375  
 345  376  /*ARGSUSED*/
 346  377  caddr_t
 347  378  contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
 348  379  {
 349  380          /* not applicable to sun4u */
 350  381          return (alloc_base);
 351  382  }
 352      -
 353      -size_t
 354      -exec_get_spslew(void)
 355      -{
 356      -        return (0);
 357      -}
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX