Print this page
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/vm_machdep.c
          +++ new/usr/src/uts/i86pc/vm/vm_machdep.c
↓ open down ↓ 51 lines elided ↑ open up ↑
  52   52  #include <sys/vm.h>
  53   53  #include <sys/mman.h>
  54   54  #include <sys/vnode.h>
  55   55  #include <sys/cred.h>
  56   56  #include <sys/exec.h>
  57   57  #include <sys/exechdr.h>
  58   58  #include <sys/debug.h>
  59   59  #include <sys/vmsystm.h>
  60   60  #include <sys/swap.h>
  61   61  #include <sys/dumphdr.h>
       62 +#include <sys/random.h>
  62   63  
  63   64  #include <vm/hat.h>
  64   65  #include <vm/as.h>
  65   66  #include <vm/seg.h>
  66   67  #include <vm/seg_kp.h>
  67   68  #include <vm/seg_vn.h>
  68   69  #include <vm/page.h>
  69   70  #include <vm/seg_kmem.h>
  70   71  #include <vm/seg_kpm.h>
  71   72  #include <vm/vm_dep.h>
↓ open down ↓ 1 lines elided ↑ open up ↑
  73   74  #include <sys/cpu.h>
  74   75  #include <sys/vm_machparam.h>
  75   76  #include <sys/memlist.h>
  76   77  #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */
  77   78  #include <vm/hat_i86.h>
  78   79  #include <sys/x86_archext.h>
  79   80  #include <sys/elf_386.h>
  80   81  #include <sys/cmn_err.h>
  81   82  #include <sys/archsystm.h>
  82   83  #include <sys/machsystm.h>
       84 +#include <sys/secflags.h>
  83   85  
  84   86  #include <sys/vtrace.h>
  85   87  #include <sys/ddidmareq.h>
  86   88  #include <sys/promif.h>
  87   89  #include <sys/memnode.h>
  88   90  #include <sys/stack.h>
  89   91  #include <util/qsort.h>
  90   92  #include <sys/taskq.h>
  91   93  
  92   94  #ifdef __xpv
↓ open down ↓ 537 lines elided ↑ open up ↑
 630  632  }
 631  633  
 632  634  /*ARGSUSED*/
 633  635  int
 634  636  map_addr_vacalign_check(caddr_t addr, u_offset_t off)
 635  637  {
 636  638          return (0);
 637  639  }
 638  640  
 639  641  /*
      642 + * The maximum amount a randomized mapping will be slewed.  We should perhaps
      643 + * arrange things so these tunables can be separate for mmap, mmapobj, and
      644 + * ld.so
      645 + */
      646 +size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
      647 +
      648 +/*
 640  649   * map_addr_proc() is the routine called when the system is to
 641  650   * choose an address for the user.  We will pick an address
 642  651   * range which is the highest available below userlimit.
 643  652   *
 644  653   * Every mapping will have a redzone of a single page on either side of
 645  654   * the request. This is done to leave one page unmapped between segments.
 646  655   * This is not required, but it's useful for the user because if their
 647  656   * program strays across a segment boundary, it will catch a fault
 648  657   * immediately making debugging a little easier.  Currently the redzone
 649  658   * is mandatory.
↓ open down ↓ 95 lines elided ↑ open up ↑
 745  754  
 746  755                  align_amount = LEVEL_SIZE(lvl);
 747  756          }
 748  757          if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
 749  758                  align_amount = (uintptr_t)*addrp;
 750  759  
 751  760          ASSERT(ISP2(align_amount));
 752  761          ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
 753  762  
 754  763          off = off & (align_amount - 1);
      764 +
 755  765          /*
 756  766           * Look for a large enough hole starting below userlimit.
 757  767           * After finding it, use the upper part.
 758  768           */
 759  769          if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
 760  770              PAGESIZE, off) == 0) {
 761  771                  caddr_t as_addr;
 762  772  
 763  773                  /*
 764  774                   * addr is the highest possible address to use since we have
↓ open down ↓ 7 lines elided ↑ open up ↑
 772  782                   * If addr is greater than as_addr, len would not be large
 773  783                   * enough to include the redzone, so we must adjust down
 774  784                   * by the alignment amount.
 775  785                   */
 776  786                  addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
 777  787                  addr += (uintptr_t)off;
 778  788                  if (addr > as_addr) {
 779  789                          addr -= align_amount;
 780  790                  }
 781  791  
      792 +                /*
      793 +                 * If randomization is requested, slew the allocation
      794 +                 * backwards, within the same gap, by a random amount.
      795 +                 */
      796 +                if (flags & _MAP_RANDOMIZE) {
      797 +                        uint32_t slew;
      798 +
      799 +                        (void) random_get_pseudo_bytes((uint8_t *)&slew,
      800 +                            sizeof (slew));
      801 +
      802 +                        slew = slew % MIN(aslr_max_map_skew, (addr - base));
      803 +                        addr -= P2ALIGN(slew, align_amount);
      804 +                }
      805 +
 782  806                  ASSERT(addr > base);
 783  807                  ASSERT(addr + len < base + slen);
 784  808                  ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
 785  809                      ((uintptr_t)(off)));
 786  810                  *addrp = addr;
 787  811          } else {
 788  812                  *addrp = NULL;  /* no more virtual space */
 789  813          }
 790  814  }
 791  815  
↓ open down ↓ 105 lines elided ↑ open up ↑
 897  921   * and *lenp are adjusted to describe the acceptable range.  On failure, 0
 898  922   * is returned.
 899  923   */
 900  924  int
 901  925  valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
 902  926  {
 903  927          return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
 904  928  }
 905  929  
 906  930  /*
      931 + * Default to forbidding the first 64k of address space.  This protects most
      932 + * reasonably sized structures from dereferences through NULL:
      933 + *     ((foo_t *)0)->bar
      934 + */
      935 +uintptr_t forbidden_null_mapping_sz = 0x10000;
      936 +
      937 +/*
 907  938   * Determine whether [addr, addr+len] are valid user addresses.
 908  939   */
 909  940  /*ARGSUSED*/
 910  941  int
 911  942  valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
 912  943      caddr_t userlimit)
 913  944  {
 914  945          caddr_t eaddr = addr + len;
 915  946  
 916  947          if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
 917  948                  return (RANGE_BADADDR);
 918  949  
      950 +        if ((addr <= (caddr_t)forbidden_null_mapping_sz) &&
      951 +            secflag_enabled(as->a_proc, PROC_SEC_FORBIDNULLMAP))
      952 +                return (RANGE_BADADDR);
      953 +
 919  954  #if defined(__amd64)
 920  955          /*
 921  956           * Check for the VA hole
 922  957           */
 923  958          if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end)
 924  959                  return (RANGE_BADADDR);
 925  960  #endif
 926  961  
 927  962          return (RANGE_OKAY);
 928  963  }
↓ open down ↓ 2990 lines elided ↑ open up ↑
3919 3954  }
3920 3955  
3921 3956  /*
3922 3957   * Function for flushing D-cache when performing module relocations
3923 3958   * to an alternate mapping.  Unnecessary on Intel / AMD platforms.
3924 3959   */
3925 3960  void
3926 3961  dcache_flushall()
3927 3962  {}
3928 3963  
3929      -size_t
3930      -exec_get_spslew(void)
3931      -{
3932      -        return (0);
3933      -}
3934      -
3935 3964  /*
3936 3965   * Allocate a memory page.  The argument 'seed' can be any pseudo-random
3937 3966   * number to vary where the pages come from.  This is quite a hacked up
3938 3967   * method -- it works for now, but really needs to be fixed up a bit.
3939 3968   *
3940 3969   * We currently use page_create_va() on the kvp with fake offsets,
3941 3970   * segments and virt address.  This is pretty bogus, but was copied from the
3942 3971   * old hat_i86.c code.  A better approach would be to specify either mnode
3943 3972   * random or mnode local and takes a page from whatever color has the MOST
3944 3973   * available - this would have a minimal impact on page coloring.
↓ open down ↓ 44 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX