Print this page
sync further changes from uts/aslr


  45 #include <sys/cred.h>
  46 #include <sys/vnode.h>
  47 #include <sys/vfs.h>
  48 #include <sys/vm.h>
  49 #include <sys/file.h>
  50 #include <sys/mman.h>
  51 #include <sys/vmparam.h>
  52 #include <sys/fcntl.h>
  53 #include <sys/lwpchan_impl.h>
  54 #include <sys/nbmlock.h>
  55 
  56 #include <vm/hat.h>
  57 #include <vm/as.h>
  58 #include <vm/seg.h>
  59 #include <vm/seg_dev.h>
  60 #include <vm/seg_vn.h>
  61 
  62 int use_brk_lpg = 1;
  63 int use_stk_lpg = 1;
  64 






  65 static int brk_lpg(caddr_t nva);
  66 static int grow_lpg(caddr_t sp);
  67 
  68 intptr_t
  69 brk(caddr_t nva)
  70 {
  71         int error;
  72         proc_t *p = curproc;
  73 
  74         /*
  75          * As a special case to aid the implementation of sbrk(3C), if given a
  76          * new brk of 0, return the current brk.  We'll hide this in brk(3C).
  77          */
  78         if (nva == 0)
  79                 return ((intptr_t)(p->p_brkbase + p->p_brksize));
  80 
  81         /*
  82          * Serialize brk operations on an address space.
  83          * This also serves as the lock protecting p_brksize
  84          * and p_brkpageszc.
  85          */
  86         as_rangelock(p->p_as);










  87         if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
  88                 error = brk_lpg(nva);
  89         } else {
  90                 error = brk_internal(nva, p->p_brkpageszc);
  91         }
  92         as_rangeunlock(p->p_as);
  93         return ((error != 0 ? set_errno(error) : 0));
  94 }
  95 
  96 /*
  97  * Algorithm: call arch-specific map_pgsz to get best page size to use,
  98  * then call brk_internal().
  99  * Returns 0 on success.
 100  */
 101 static int
 102 brk_lpg(caddr_t nva)
 103 {
 104         struct proc *p = curproc;
 105         size_t pgsz, len;
 106         caddr_t addr, brkend;


 578         }
 579 
 580         /*
 581          * Use the seg_vn segment driver; passing in the NULL amp
 582          * gives the desired "cloning" effect.
 583          */
 584         vn_a.vp = NULL;
 585         vn_a.offset = 0;
 586         vn_a.type = flags & MAP_TYPE;
 587         vn_a.prot = uprot;
 588         vn_a.maxprot = PROT_ALL;
 589         vn_a.flags = flags & ~MAP_TYPE;
 590         vn_a.cred = CRED();
 591         vn_a.amp = NULL;
 592         vn_a.szc = 0;
 593         vn_a.lgrp_mem_policy_flags = 0;
 594 
 595         return (as_map(as, *addrp, len, segvn_create, &vn_a));
 596 }
 597 



 598 static int
 599 smmap_common(caddr_t *addrp, size_t len,
 600     int prot, int flags, struct file *fp, offset_t pos)
 601 {
 602         struct vnode *vp;
 603         struct as *as = curproc->p_as;
 604         uint_t uprot, maxprot, type;
 605         int error;
 606         int in_crit = 0;
 607 
 608         if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
 609             _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
 610             MAP_TEXT | MAP_INITDATA)) != 0) {
 611                 /* | MAP_RENAME */      /* not implemented, let user know */
 612                 return (EINVAL);
 613         }
 614 
 615         if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
 616                 return (EINVAL);
 617         }
 618 
 619         if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
 620                 return (EINVAL);
 621         }
 622 
 623         if ((flags & (MAP_FIXED | _MAP_RANDOMIZE)) ==
 624             (MAP_FIXED | _MAP_RANDOMIZE)) {
 625                 return (EINVAL);
 626         }
 627 
 628         /*
 629          * If it's not a fixed allocation and mmap ASLR is enabled, randomize
 630          * it.
 631          */
 632         if (((flags & MAP_FIXED) == 0) &&
 633             secflag_enabled(curproc, PROC_SEC_ASLR))
 634                 flags |= _MAP_RANDOMIZE;
 635 
 636 #if defined(__sparc)
 637         /*
 638          * See if this is an "old mmap call".  If so, remember this
 639          * fact and convert the flags value given to mmap to indicate
 640          * the specified address in the system call must be used.
 641          * _MAP_NEW is turned set by all new uses of mmap.
 642          */
 643         if ((flags & _MAP_NEW) == 0)
 644                 flags |= MAP_FIXED;
 645 #endif
 646         flags &= ~_MAP_NEW;
 647 
 648         type = flags & MAP_TYPE;
 649         if (type != MAP_PRIVATE && type != MAP_SHARED)
 650                 return (EINVAL);
 651 
 652 




  45 #include <sys/cred.h>
  46 #include <sys/vnode.h>
  47 #include <sys/vfs.h>
  48 #include <sys/vm.h>
  49 #include <sys/file.h>
  50 #include <sys/mman.h>
  51 #include <sys/vmparam.h>
  52 #include <sys/fcntl.h>
  53 #include <sys/lwpchan_impl.h>
  54 #include <sys/nbmlock.h>
  55 
  56 #include <vm/hat.h>
  57 #include <vm/as.h>
  58 #include <vm/seg.h>
  59 #include <vm/seg_dev.h>
  60 #include <vm/seg_vn.h>
  61 
  62 int use_brk_lpg = 1;
  63 int use_stk_lpg = 1;
  64 
  65 /*
  66  * If set, we will not randomize mappings where the 'addr' argument is
  67  * non-NULL and not an alignment.
  68  */
  69 int aslr_respect_mmap_hint = 0;
  70 
  71 static int brk_lpg(caddr_t nva);
  72 static int grow_lpg(caddr_t sp);
  73 
  74 intptr_t
  75 brk(caddr_t nva)
  76 {
  77         int error;
  78         proc_t *p = curproc;
  79 
  80         /*







  81          * Serialize brk operations on an address space.
  82          * This also serves as the lock protecting p_brksize
  83          * and p_brkpageszc.
  84          */
  85         as_rangelock(p->p_as);
  86 
  87         /*
  88          * As a special case to aid the implementation of sbrk(3C), if given a
  89          * new brk of 0, return the current brk.  We'll hide this in brk(3C).
  90          */
  91         if (nva == 0) {
  92                 as_rangeunlock(p->p_as);
  93                 return ((intptr_t)(p->p_brkbase + p->p_brksize));
  94         }
  95 
  96         if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
  97                 error = brk_lpg(nva);
  98         } else {
  99                 error = brk_internal(nva, p->p_brkpageszc);
 100         }
 101         as_rangeunlock(p->p_as);
 102         return ((error != 0 ? set_errno(error) : 0));
 103 }
 104 
 105 /*
 106  * Algorithm: call arch-specific map_pgsz to get best page size to use,
 107  * then call brk_internal().
 108  * Returns 0 on success.
 109  */
 110 static int
 111 brk_lpg(caddr_t nva)
 112 {
 113         struct proc *p = curproc;
 114         size_t pgsz, len;
 115         caddr_t addr, brkend;


 587         }
 588 
 589         /*
 590          * Use the seg_vn segment driver; passing in the NULL amp
 591          * gives the desired "cloning" effect.
 592          */
 593         vn_a.vp = NULL;
 594         vn_a.offset = 0;
 595         vn_a.type = flags & MAP_TYPE;
 596         vn_a.prot = uprot;
 597         vn_a.maxprot = PROT_ALL;
 598         vn_a.flags = flags & ~MAP_TYPE;
 599         vn_a.cred = CRED();
 600         vn_a.amp = NULL;
 601         vn_a.szc = 0;
 602         vn_a.lgrp_mem_policy_flags = 0;
 603 
 604         return (as_map(as, *addrp, len, segvn_create, &vn_a));
 605 }
 606 
 607 #define RANDOMIZABLE_MAPPING(addr, flags) (((flags & MAP_FIXED) == 0) && \
 608         !(((flags & MAP_ALIGN) == 0) && (addr != 0) && aslr_respect_mmap_hint))
 609 
 610 static int
 611 smmap_common(caddr_t *addrp, size_t len,
 612     int prot, int flags, struct file *fp, offset_t pos)
 613 {
 614         struct vnode *vp;
 615         struct as *as = curproc->p_as;
 616         uint_t uprot, maxprot, type;
 617         int error;
 618         int in_crit = 0;
 619 
 620         if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
 621             _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
 622             MAP_TEXT | MAP_INITDATA)) != 0) {
 623                 /* | MAP_RENAME */      /* not implemented, let user know */
 624                 return (EINVAL);
 625         }
 626 
 627         if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
 628                 return (EINVAL);
 629         }
 630 
 631         if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
 632                 return (EINVAL);
 633         }
 634 
 635         if ((flags & (MAP_FIXED | _MAP_RANDOMIZE)) ==
 636             (MAP_FIXED | _MAP_RANDOMIZE)) {
 637                 return (EINVAL);
 638         }
 639 
 640         /*
 641          * If it's not a fixed allocation and mmap ASLR is enabled, randomize
 642          * it.
 643          */
 644         if (RANDOMIZABLE_MAPPING(*addrp, flags) &&
 645             secflag_enabled(curproc, PROC_SEC_ASLR))
 646                 flags |= _MAP_RANDOMIZE;
 647 
 648 #if defined(__sparc)
 649         /*
 650          * See if this is an "old mmap call".  If so, remember this
 651          * fact and convert the flags value given to mmap to indicate
 652          * the specified address in the system call must be used.
 653          * _MAP_NEW is turned set by all new uses of mmap.
 654          */
 655         if ((flags & _MAP_NEW) == 0)
 656                 flags |= MAP_FIXED;
 657 #endif
 658         flags &= ~_MAP_NEW;
 659 
 660         type = flags & MAP_TYPE;
 661         if (type != MAP_PRIVATE && type != MAP_SHARED)
 662                 return (EINVAL);
 663 
 664