Print this page
uts: Allow for address space randomisation.
Randomise the base addresses of shared objects, non-fixed mappings, the
stack and the heap.  Introduce a service, svc:/system/process-security,
and a tool psecflags(1) to control and observe it

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4v/vm/mach_vm_dep.c
          +++ new/usr/src/uts/sun4v/vm/mach_vm_dep.c
↓ open down ↓ 44 lines elided ↑ open up ↑
  45   45  #include <vm/hat_sfmmu.h>
  46   46  #include <sys/memnode.h>
  47   47  #include <sys/mem_cage.h>
  48   48  #include <vm/vm_dep.h>
  49   49  #include <sys/error.h>
  50   50  #include <sys/machsystm.h>
  51   51  #include <vm/seg_kmem.h>
  52   52  #include <sys/stack.h>
  53   53  #include <sys/atomic.h>
  54   54  #include <sys/promif.h>
       55 +#include <sys/random.h>
  55   56  
  56   57  uint_t page_colors = 0;
  57   58  uint_t page_colors_mask = 0;
  58   59  uint_t page_coloring_shift = 0;
  59   60  int consistent_coloring;
  60   61  int update_proc_pgcolorbase_after_fork = 1;
  61   62  
  62   63  uint_t mmu_page_sizes = MMU_PAGE_SIZES;
  63   64  uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
  64   65  uint_t mmu_hashcnt = MAX_HASHCNT;
↓ open down ↓ 96 lines elided ↑ open up ↑
 161  162  #define NUM_IMPORT_SIZES        \
 162  163          (sizeof (contig_mem_import_sizes) / sizeof (size_t))
 163  164  static size_t contig_mem_import_size_max        = MMU_PAGESIZE4M;
 164  165  size_t contig_mem_slab_size                     = MMU_PAGESIZE4M;
 165  166  
 166  167  /* Boot-time allocated buffer to pre-populate the contig_mem_arena */
 167  168  static size_t contig_mem_prealloc_size;
 168  169  static void *contig_mem_prealloc_buf;
 169  170  
 170  171  /*
      172 + * The maximum amount a randomized mapping will be slewed.  We should perhaps
      173 + * arrange things so these tunables can be separate for mmap, mmapobj, and
      174 + * ld.so
      175 + */
      176 +volatile size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
      177 +
      178 +/*
 171  179   * map_addr_proc() is the routine called when the system is to
 172  180   * choose an address for the user.  We will pick an address
 173  181   * range which is just below the current stack limit.  The
 174  182   * algorithm used for cache consistency on machines with virtual
 175  183   * address caches is such that offset 0 in the vnode is always
 176  184   * on a shm_alignment'ed aligned address.  Unfortunately, this
 177  185   * means that vnodes which are demand paged will not be mapped
 178  186   * cache consistently with the executable images.  When the
 179  187   * cache alignment for a given object is inconsistent, the
 180  188   * lower level code must manage the translations so that this
↓ open down ↓ 132 lines elided ↑ open up ↑
 313  321                   * If addr is greater than as_addr, len would not be large
 314  322                   * enough to include the redzone, so we must adjust down
 315  323                   * by the alignment amount.
 316  324                   */
 317  325                  addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
 318  326                  addr += (long)off;
 319  327                  if (addr > as_addr) {
 320  328                          addr -= align_amount;
 321  329                  }
 322  330  
      331 +                /*
      332 +                 * If randomization is requested, slew the allocation
      333 +                 * backwards, within the same gap, by a random amount.
      334 +                 *
      335 +                 * XXX: This will fall over in processes like Java, which
      336 +                 * commonly have a great many small mappings.
      337 +                 */
      338 +                if (flags & _MAP_RANDOMIZE) {
      339 +                        uint32_t slew;
      340 +
      341 +                        (void) random_get_pseudo_bytes((uint8_t *)&slew,
      342 +                            sizeof (slew));
      343 +
      344 +                        slew = slew % MIN(aslr_max_map_skew, (addr - base));
      345 +                        addr -= P2ALIGN(slew, align_amount);
      346 +                }
      347 +
 323  348                  ASSERT(addr > base);
 324  349                  ASSERT(addr + len < base + slen);
 325  350                  ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
 326  351                      ((uintptr_t)(off)));
 327  352                  *addrp = addr;
 328  353  
 329  354          } else {
 330  355                  *addrp = NULL;  /* no more virtual space */
 331  356          }
 332  357  }
↓ open down ↓ 435 lines elided ↑ open up ↑
 768  793  
 769  794          if (contig_mem_prealloc_size != 0) {
 770  795                  contig_mem_prealloc_buf = alloc_base;
 771  796          } else {
 772  797                  contig_mem_prealloc_buf = NULL;
 773  798          }
 774  799          alloc_base += contig_mem_prealloc_size;
 775  800  
 776  801          return (alloc_base);
 777  802  }
 778      -
 779      -static uint_t sp_color_stride = 16;
 780      -static uint_t sp_color_mask = 0x1f;
 781      -static uint_t sp_current_color = (uint_t)-1;
 782      -
 783      -size_t
 784      -exec_get_spslew(void)
 785      -{
 786      -        uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
 787      -        return ((size_t)((spcolor & sp_color_mask) * SA(sp_color_stride)));
 788      -}
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX