Print this page
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/mmapobj.c
          +++ new/usr/src/uts/common/os/mmapobj.c
↓ open down ↓ 60 lines elided ↑ open up ↑
  61   61   * mmapobj also supports the AOUT 4.x binary format as well as flat files in
  62   62   * a read only manner.
  63   63   *
  64   64   * When interpreting and mapping an ELF file, mmapobj will map each PT_LOAD
  65   65   * or PT_SUNWBSS segment according to the ELF standard.  Refer to the "Linker
  66   66   * and Libraries Guide" for more information about the standard and mapping
  67   67   * rules.
  68   68   *
  69   69   * Having mmapobj interpret and map objects will allow the kernel to make the
  70   70   * best decision for where to place the mappings for said objects.  Thus, we
  71      - * can make optimizations inside of the kernel for specific platforms or
  72      - * cache mapping information to make mapping objects faster.
       71 + * can make optimizations inside of the kernel for specific platforms or cache
       72 + * mapping information to make mapping objects faster.  The cache is ignored
       73 + * if ASLR is enabled.
  73   74   *
  74   75   * The lib_va_hash will be one such optimization.  For each ELF object that
  75   76   * mmapobj is asked to interpret, we will attempt to cache the information
  76   77   * about the PT_LOAD and PT_SUNWBSS sections to speed up future mappings of
  77   78   * the same objects.  We will cache up to LIBVA_CACHED_SEGS (see below) program
  78   79   * headers which should cover a majority of the libraries out there without
  79   80   * wasting space.  In order to make sure that the cached information is valid,
  80   81   * we check the passed in vnode's mtime and ctime to make sure the vnode
  81   82   * has not been modified since the last time we used it.
  82   83   *
↓ open down ↓ 628 lines elided ↑ open up ↑
 711  712   * if an error is encountered. If we successfully insert the requested info
 712  713   * into the lib_va hash, then *lvpp will be set to point to this lib_va
 713  714   * structure.  The structure will have a hold on it and thus lib_va_release
 714  715   * needs to be called on it by the caller.  This function will not fill out
 715  716   * lv_mps or lv_num_segs since it does not have enough information to do so.
 716  717   * The caller is responsible for doing this making sure that any modifications
 717  718   * to lv_mps are visible before setting lv_num_segs.
 718  719   */
 719  720  static caddr_t
 720  721  mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va,
 721      -    size_t align, vattr_t *vap)
      722 +    int randomize, size_t align, vattr_t *vap)
 722  723  {
 723  724          proc_t *p = curproc;
 724  725          struct as *as = p->p_as;
 725  726          struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL);
 726  727          int error;
 727  728          model_t model;
 728  729          uint_t ma_flags = _MAP_LOW32;
 729  730          caddr_t base = NULL;
 730  731          vmem_t *model_vmem;
 731  732          size_t lib_va_start;
 732  733          size_t lib_va_end;
 733  734          size_t lib_va_len;
 734  735  
 735  736          ASSERT(lvpp != NULL);
      737 +        ASSERT((randomize & use_lib_va) != 1);
 736  738  
 737  739          MOBJ_STAT_ADD(alloc_start);
 738  740          model = get_udatamodel();
 739  741  
 740  742          if (model == DATAMODEL_LP64) {
 741  743                  ma_flags = 0;
 742  744                  model_vmem = lib_va_64_arena;
 743  745          } else {
 744  746                  ASSERT(model == DATAMODEL_ILP32);
 745  747                  model_vmem = lib_va_32_arena;
 746  748          }
 747  749  
 748  750          if (align > 1) {
 749  751                  ma_flags |= MAP_ALIGN;
 750  752          }
      753 +
      754 +        if (randomize != 0)
      755 +                ma_flags |= _MAP_RANDOMIZE;
      756 +
 751  757          if (use_lib_va) {
 752  758                  /*
 753  759                   * The first time through, we need to setup the lib_va arenas.
 754  760                   * We call map_addr to find a suitable range of memory to map
 755  761                   * the given library, and we will set the highest address
 756  762                   * in our vmem arena to the end of this adddress range.
 757  763                   * We allow up to half of the address space to be used
 758  764                   * for lib_va addresses but we do not prevent any allocations
 759  765                   * in this range from other allocation paths.
 760  766                   */
↓ open down ↓ 93 lines elided ↑ open up ↑
 854  860                  }
 855  861          }
 856  862  
 857  863  nolibva:
 858  864          as_rangelock(as);
 859  865  
 860  866          /*
 861  867           * If we don't have an expected base address, or the one that we want
 862  868           * to use is not available or acceptable, go get an acceptable
 863  869           * address range.
      870 +         *
      871 +         * If ASLR is enabled, we should never have used the cache, and should
      872 +         * also start our real work here, in the consequent of the next
      873 +         * condition.
 864  874           */
      875 +        if (randomize != 0)
      876 +                ASSERT(base == NULL);
      877 +
 865  878          if (base == NULL || as_gap(as, len, &base, &len, 0, NULL) ||
 866  879              valid_usr_range(base, len, PROT_ALL, as, as->a_userlimit) !=
 867  880              RANGE_OKAY || OVERLAPS_STACK(base + len, p)) {
 868  881                  MOBJ_STAT_ADD(get_addr);
 869  882                  base = (caddr_t)align;
 870  883                  map_addr(&base, len, 0, 1, ma_flags);
 871  884          }
 872  885  
 873  886          /*
 874  887           * Need to reserve the address space we're going to use.
↓ open down ↓ 643 lines elided ↑ open up ↑
1518 1531          as_rangeunlock(as);
1519 1532          return (0);
1520 1533  }
1521 1534  
1522 1535  /*
1523 1536   * Walk through the ELF program headers and extract all useful information
1524 1537   * for PT_LOAD and PT_SUNWBSS segments into mrp.
1525 1538   * Return 0 on success or error on failure.
1526 1539   */
1527 1540  static int
1528      -process_phdr(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
     1541 +process_phdrs(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
1529 1542      vnode_t *vp, uint_t *num_mapped, size_t padding, cred_t *fcred)
1530 1543  {
1531 1544          int i;
1532 1545          caddr_t start_addr = NULL;
1533 1546          caddr_t vaddr;
1534 1547          size_t len = 0;
1535 1548          size_t lib_len = 0;
1536 1549          int ret;
1537 1550          int prot;
1538 1551          struct lib_va *lvp = NULL;
↓ open down ↓ 35 lines elided ↑ open up ↑
1574 1587                  }
1575 1588          } else {
1576 1589                  ASSERT(model == DATAMODEL_ILP32);
1577 1590                  hsize = ((Elf32_Ehdr *)ehdrp)->e_phentsize;
1578 1591                  if (hsize & 3) {
1579 1592                          MOBJ_STAT_ADD(phent_align32);
1580 1593                          return (ENOTSUP);
1581 1594                  }
1582 1595          }
1583 1596  
1584      -        if (padding != 0) {
     1597 +        if ((padding != 0) || secflag_enabled(curproc, PROC_SEC_ASLR)) {
1585 1598                  use_lib_va = 0;
1586 1599          }
1587 1600          if (e_type == ET_DYN) {
1588 1601                  vattr.va_mask = AT_FSID | AT_NODEID | AT_CTIME | AT_MTIME;
1589 1602                  error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
1590 1603                  if (error) {
1591 1604                          return (error);
1592 1605                  }
1593 1606                  /* Check to see if we already have a description for this lib */
1594      -                lvp = lib_va_find(&vattr);
     1607 +                if (!secflag_enabled(curproc, PROC_SEC_ASLR))
     1608 +                        lvp = lib_va_find(&vattr);
1595 1609  
1596 1610                  if (lvp != NULL) {
1597 1611                          MOBJ_STAT_ADD(lvp_found);
1598 1612                          if (use_lib_va) {
1599 1613                                  start_addr = mmapobj_lookup_start_addr(lvp);
1600 1614                                  if (start_addr == NULL) {
1601 1615                                          lib_va_release(lvp);
1602 1616                                          return (ENOMEM);
1603 1617                                  }
1604 1618                          }
↓ open down ↓ 89 lines elided ↑ open up ↑
1694 1708                  /*
1695 1709                   * At this point, if lvp is non-NULL, then above we
1696 1710                   * already found it in the cache but did not get
1697 1711                   * the start address since we were not going to use lib_va.
1698 1712                   * Since we know that lib_va will not be used, it's safe
1699 1713                   * to call mmapobj_alloc_start_addr and know that lvp
1700 1714                   * will not be modified.
1701 1715                   */
1702 1716                  ASSERT(lvp ? use_lib_va == 0 : 1);
1703 1717                  start_addr = mmapobj_alloc_start_addr(&lvp, len,
1704      -                    use_lib_va, align, &vattr);
     1718 +                    use_lib_va,
     1719 +                    secflag_enabled(curproc, PROC_SEC_ASLR),
     1720 +                    align, &vattr);
1705 1721                  if (start_addr == NULL) {
1706 1722                          if (lvp) {
1707 1723                                  lib_va_release(lvp);
1708 1724                          }
1709 1725                          MOBJ_STAT_ADD(alloc_start_fail);
1710 1726                          return (ENOMEM);
1711 1727                  }
1712 1728                  /*
1713 1729                   * If we can't cache it, no need to hang on to it.
1714 1730                   * Setting lv_num_segs to non-zero will make that
↓ open down ↓ 304 lines elided ↑ open up ↑
2019 2035          }
2020 2036  
2021 2037          if ((error = vn_rdwr(UIO_READ, vp, phbasep, phsizep,
2022 2038              (offset_t)phoff, UIO_SYSSPACE, 0, (rlim64_t)0,
2023 2039              fcred, NULL)) != 0) {
2024 2040                  kmem_free(phbasep, phsizep);
2025 2041                  return (error);
2026 2042          }
2027 2043  
2028 2044          /* Now process the phdr's */
2029      -        error = process_phdr(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped,
     2045 +        error = process_phdrs(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped,
2030 2046              padding, fcred);
2031 2047          kmem_free(phbasep, phsizep);
2032 2048          return (error);
2033 2049  }
2034 2050  
2035 2051  #if defined(__sparc)
2036 2052  /*
2037 2053   * Hack to support 64 bit kernels running AOUT 4.x programs.
2038 2054   * This is the sizeof (struct nlist) for a 32 bit kernel.
2039 2055   * Since AOUT programs are 32 bit only, they will never use the 64 bit
↓ open down ↓ 265 lines elided ↑ open up ↑
2305 2321          error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
2306 2322          if (error) {
2307 2323                  return (error);
2308 2324          }
2309 2325  
2310 2326          /*
2311 2327           * Check lib_va to see if we already have a full description
2312 2328           * for this library.  This is the fast path and only used for
2313 2329           * ET_DYN ELF files (dynamic libraries).
2314 2330           */
2315      -        if (padding == 0 && (lvp = lib_va_find(&vattr)) != NULL) {
     2331 +        if (padding == 0 && !secflag_enabled(curproc, PROC_SEC_ASLR) &&
     2332 +            ((lvp = lib_va_find(&vattr)) != NULL)) {
2316 2333                  int num_segs;
2317 2334  
2318 2335                  model = get_udatamodel();
2319 2336                  if ((model == DATAMODEL_ILP32 &&
2320 2337                      lvp->lv_flags & LV_ELF64) ||
2321 2338                      (model == DATAMODEL_LP64 &&
2322 2339                      lvp->lv_flags & LV_ELF32)) {
2323 2340                          lib_va_release(lvp);
2324 2341                          MOBJ_STAT_ADD(fast_wrong_model);
2325 2342                          return (ENOTSUP);
↓ open down ↓ 124 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX