Print this page
uts: Allow for address space randomisation.
Randomise the base addresses of shared objects, non-fixed mappings, the
stack and the heap.  Introduce a service, svc:/system/process-security,
and a tool psecflags(1) to control and observe it

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4u/vm/mach_vm_dep.c
          +++ new/usr/src/uts/sun4u/vm/mach_vm_dep.c
↓ open down ↓ 38 lines elided ↑ open up ↑
  39   39  #include <sys/exec.h>
  40   40  #include <sys/cmn_err.h>
  41   41  #include <sys/cpu_module.h>
  42   42  #include <sys/cpu.h>
  43   43  #include <sys/elf_SPARC.h>
  44   44  #include <sys/archsystm.h>
  45   45  #include <vm/hat_sfmmu.h>
  46   46  #include <sys/memnode.h>
  47   47  #include <sys/mem_cage.h>
  48   48  #include <vm/vm_dep.h>
       49 +#include <sys/random.h>
  49   50  
  50   51  #if defined(__sparcv9) && defined(SF_ERRATA_57)
  51   52  caddr_t errata57_limit;
  52   53  #endif
  53   54  
  54   55  uint_t page_colors = 0;
  55   56  uint_t page_colors_mask = 0;
  56   57  uint_t page_coloring_shift = 0;
  57   58  int consistent_coloring;
  58   59  int update_proc_pgcolorbase_after_fork = 0;
↓ open down ↓ 71 lines elided ↑ open up ↑
 130  131          }
 131  132          if (max_privmap_lpsize == MMU_PAGESIZE4M) {
 132  133                  max_privmap_lpsize = ismpagesize;
 133  134          }
 134  135          if (max_shm_lpsize == MMU_PAGESIZE4M) {
 135  136                  max_shm_lpsize = ismpagesize;
 136  137          }
 137  138  }
 138  139  
 139  140  /*
      141 + * The maximum amount a randomized mapping will be slewed.  We should perhaps
      142 + * arrange things so these tunables can be separate for mmap, mmapobj, and
      143 + * ld.so
      144 + */
      145 +volatile size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
      146 +
      147 +/*
 140  148   * map_addr_proc() is the routine called when the system is to
 141  149   * choose an address for the user.  We will pick an address
 142  150   * range which is just below the current stack limit.  The
 143  151   * algorithm used for cache consistency on machines with virtual
 144  152   * address caches is such that offset 0 in the vnode is always
 145  153   * on a shm_alignment'ed aligned address.  Unfortunately, this
 146  154   * means that vnodes which are demand paged will not be mapped
 147  155   * cache consistently with the executable images.  When the
 148  156   * cache alignment for a given object is inconsistent, the
 149  157   * lower level code must manage the translations so that this
↓ open down ↓ 108 lines elided ↑ open up ↑
 258  266  
 259  267          ASSERT(ISP2(align_amount));
 260  268          ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
 261  269  
 262  270          /*
 263  271           * Look for a large enough hole starting below the stack limit.
 264  272           * After finding it, use the upper part.
 265  273           */
 266  274          as_purge(as);
 267  275          off = off & (align_amount - 1);
      276 +
 268  277          if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
 269  278              PAGESIZE, off) == 0) {
 270  279                  caddr_t as_addr;
 271  280  
 272  281                  /*
 273  282                   * addr is the highest possible address to use since we have
 274  283                   * a PAGESIZE redzone at the beginning and end.
 275  284                   */
 276  285                  addr = base + slen - (PAGESIZE + len);
 277  286                  as_addr = addr;
↓ open down ↓ 3 lines elided ↑ open up ↑
 281  290                   * If addr is greater than as_addr, len would not be large
 282  291                   * enough to include the redzone, so we must adjust down
 283  292                   * by the alignment amount.
 284  293                   */
 285  294                  addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
 286  295                  addr += (long)off;
 287  296                  if (addr > as_addr) {
 288  297                          addr -= align_amount;
 289  298                  }
 290  299  
      300 +                /*
      301 +                 * If randomization is requested, slew the allocation
      302 +                 * backwards, within the same gap, by a random amount.
      303 +                 *
      304 +                 * XXX: This will fall over in processes like Java, which
      305 +                 * commonly have a great many small mappings.
      306 +                 */
      307 +                if (flags & _MAP_RANDOMIZE) {
      308 +                        uint32_t slew;
      309 +                        uint32_t maxslew;
      310 +
      311 +                        (void) random_get_pseudo_bytes((uint8_t *)&slew,
      312 +                            sizeof (slew));
      313 +
      314 +                        maxslew = MIN(aslr_max_map_skew, (addr - base));
      315 +                        /*
      316 +                         * Don't allow ASLR to cause mappings to fail below
      317 +                         * because of SF erratum #57
      318 +                         */
      319 +                        maxslew = MIN(maxslew, (addr - errata57_limit));
      320 +
      321 +                        slew = slew % MIN(MIN(aslr_max_map_skew, (addr - base)),
      322 +                            addr - errata57_limit);
      323 +                        addr -= P2ALIGN(slew, align_amount);
      324 +                }
      325 +
 291  326                  ASSERT(addr > base);
 292  327                  ASSERT(addr + len < base + slen);
 293  328                  ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
 294  329                      ((uintptr_t)(off)));
 295  330                  *addrp = addr;
 296  331  
 297  332  #if defined(SF_ERRATA_57)
 298  333                  if (AS_TYPE_64BIT(as) && addr < errata57_limit) {
 299  334                          *addrp = NULL;
 300  335                  }
↓ open down ↓ 41 lines elided ↑ open up ↑
 342  377          /* not applicable to sun4u */
 343  378  }
 344  379  
 345  380  /*ARGSUSED*/
 346  381  caddr_t
 347  382  contig_mem_prealloc(caddr_t alloc_base, pgcnt_t npages)
 348  383  {
 349  384          /* not applicable to sun4u */
 350  385          return (alloc_base);
 351  386  }
 352      -
 353      -size_t
 354      -exec_get_spslew(void)
 355      -{
 356      -        return (0);
 357      -}
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX