Print this page
sync further changes from uts/aslr
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.


  45 #include <sys/cred.h>
  46 #include <sys/vnode.h>
  47 #include <sys/vfs.h>
  48 #include <sys/vm.h>
  49 #include <sys/file.h>
  50 #include <sys/mman.h>
  51 #include <sys/vmparam.h>
  52 #include <sys/fcntl.h>
  53 #include <sys/lwpchan_impl.h>
  54 #include <sys/nbmlock.h>
  55 
  56 #include <vm/hat.h>
  57 #include <vm/as.h>
  58 #include <vm/seg.h>
  59 #include <vm/seg_dev.h>
  60 #include <vm/seg_vn.h>
  61 
  62 int use_brk_lpg = 1;
  63 int use_stk_lpg = 1;
  64 






  65 static int brk_lpg(caddr_t nva);
  66 static int grow_lpg(caddr_t sp);
  67 
  68 int
  69 brk(caddr_t nva)
  70 {
  71         int error;
  72         proc_t *p = curproc;
  73 
  74         /*
  75          * Serialize brk operations on an address space.
  76          * This also serves as the lock protecting p_brksize
  77          * and p_brkpageszc.
  78          */
  79         as_rangelock(p->p_as);










  80         if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
  81                 error = brk_lpg(nva);
  82         } else {
  83                 error = brk_internal(nva, p->p_brkpageszc);
  84         }
  85         as_rangeunlock(p->p_as);
  86         return ((error != 0 ? set_errno(error) : 0));
  87 }
  88 
  89 /*
  90  * Algorithm: call arch-specific map_pgsz to get best page size to use,
  91  * then call brk_internal().
  92  * Returns 0 on success.
  93  */
  94 static int
  95 brk_lpg(caddr_t nva)
  96 {
  97         struct proc *p = curproc;
  98         size_t pgsz, len;
  99         caddr_t addr, brkend;


 473                         crargs.szc = AS_MAP_STACK;
 474                 }
 475         } else {
 476                 crargs.szc = AS_MAP_NO_LPOOB;
 477         }
 478         crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;
 479 
 480         if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
 481             segvn_create, &crargs)) != 0) {
 482                 if (error == EAGAIN) {
 483                         cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
 484                             "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
 485                 }
 486                 return (error);
 487         }
 488         p->p_stksize = newsize;
 489         return (0);
 490 }
 491 
 492 /*
 493  * Find address for user to map.
 494  * If MAP_FIXED is not specified, we can pick any address we want, but we will
 495  * first try the value in *addrp if it is non-NULL.  Thus this is implementing
 496  * a way to try and get a preferred address.
 497  */
 498 int
 499 choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
 500     int vacalign, uint_t flags)
 501 {
 502         caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
 503         size_t lenp = len;
 504 
 505         ASSERT(AS_ISCLAIMGAP(as));      /* searches should be serialized */
 506         if (flags & MAP_FIXED) {
 507                 (void) as_unmap(as, *addrp, len);
 508                 return (0);
 509         } else if (basep != NULL && ((flags & MAP_ALIGN) == 0) &&

 510             !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
 511                 /* User supplied address was available */
 512                 *addrp = basep;
 513         } else {
 514                 /*
 515                  * No user supplied address or the address supplied was not
 516                  * available.
 517                  */
 518                 map_addr(addrp, len, off, vacalign, flags);
 519         }
 520         if (*addrp == NULL)
 521                 return (ENOMEM);
 522         return (0);
 523 }
 524 
 525 
 526 /*
 527  * Used for MAP_ANON - fast way to get anonymous pages
 528  */
 529 static int


 570         }
 571 
 572         /*
 573          * Use the seg_vn segment driver; passing in the NULL amp
 574          * gives the desired "cloning" effect.
 575          */
 576         vn_a.vp = NULL;
 577         vn_a.offset = 0;
 578         vn_a.type = flags & MAP_TYPE;
 579         vn_a.prot = uprot;
 580         vn_a.maxprot = PROT_ALL;
 581         vn_a.flags = flags & ~MAP_TYPE;
 582         vn_a.cred = CRED();
 583         vn_a.amp = NULL;
 584         vn_a.szc = 0;
 585         vn_a.lgrp_mem_policy_flags = 0;
 586 
 587         return (as_map(as, *addrp, len, segvn_create, &vn_a));
 588 }
 589 



 590 static int
 591 smmap_common(caddr_t *addrp, size_t len,
 592     int prot, int flags, struct file *fp, offset_t pos)
 593 {
 594         struct vnode *vp;
 595         struct as *as = curproc->p_as;
 596         uint_t uprot, maxprot, type;
 597         int error;
 598         int in_crit = 0;
 599 
 600         if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
 601             _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
 602             MAP_TEXT | MAP_INITDATA)) != 0) {
 603                 /* | MAP_RENAME */      /* not implemented, let user know */
 604                 return (EINVAL);
 605         }
 606 
 607         if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
 608                 return (EINVAL);
 609         }
 610 
 611         if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
 612                 return (EINVAL);
 613         }
 614 













 615 #if defined(__sparc)
 616         /*
 617          * See if this is an "old mmap call".  If so, remember this
 618          * fact and convert the flags value given to mmap to indicate
 619          * the specified address in the system call must be used.
 620          * _MAP_NEW is turned set by all new uses of mmap.
 621          */
 622         if ((flags & _MAP_NEW) == 0)
 623                 flags |= MAP_FIXED;
 624 #endif
 625         flags &= ~_MAP_NEW;
 626 
 627         type = flags & MAP_TYPE;
 628         if (type != MAP_PRIVATE && type != MAP_SHARED)
 629                 return (EINVAL);
 630 
 631 
 632         if (flags & MAP_ALIGN) {
 633 
 634                 if (flags & MAP_FIXED)
 635                         return (EINVAL);
 636 
 637                 /* alignment needs to be a power of 2 >= page size */
 638                 if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
 639                     !ISP2((uintptr_t)*addrp))
 640                         return (EINVAL);
 641         }
 642         /*
 643          * Check for bad lengths and file position.
 644          * We let the VOP_MAP routine check for negative lengths
 645          * since on some vnode types this might be appropriate.
 646          */
 647         if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
 648                 return (EINVAL);
 649 
 650         maxprot = PROT_ALL;             /* start out allowing all accesses */
 651         uprot = prot | PROT_USER;
 652 
 653         if (fp == NULL) {




  45 #include <sys/cred.h>
  46 #include <sys/vnode.h>
  47 #include <sys/vfs.h>
  48 #include <sys/vm.h>
  49 #include <sys/file.h>
  50 #include <sys/mman.h>
  51 #include <sys/vmparam.h>
  52 #include <sys/fcntl.h>
  53 #include <sys/lwpchan_impl.h>
  54 #include <sys/nbmlock.h>
  55 
  56 #include <vm/hat.h>
  57 #include <vm/as.h>
  58 #include <vm/seg.h>
  59 #include <vm/seg_dev.h>
  60 #include <vm/seg_vn.h>
  61 
  62 int use_brk_lpg = 1;
  63 int use_stk_lpg = 1;
  64 
  65 /*
  66  * If set, we will not randomize mappings where the 'addr' argument is
  67  * non-NULL and not an alignment.
  68  */
  69 int aslr_respect_mmap_hint = 0;
  70 
  71 static int brk_lpg(caddr_t nva);
  72 static int grow_lpg(caddr_t sp);
  73 
  74 intptr_t
  75 brk(caddr_t nva)
  76 {
  77         int error;
  78         proc_t *p = curproc;
  79 
  80         /*
  81          * Serialize brk operations on an address space.
  82          * This also serves as the lock protecting p_brksize
  83          * and p_brkpageszc.
  84          */
  85         as_rangelock(p->p_as);
  86 
  87         /*
  88          * As a special case to aid the implementation of sbrk(3C), if given a
  89          * new brk of 0, return the current brk.  We'll hide this in brk(3C).
  90          */
  91         if (nva == 0) {
  92                 as_rangeunlock(p->p_as);
  93                 return ((intptr_t)(p->p_brkbase + p->p_brksize));
  94         }
  95 
  96         if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
  97                 error = brk_lpg(nva);
  98         } else {
  99                 error = brk_internal(nva, p->p_brkpageszc);
 100         }
 101         as_rangeunlock(p->p_as);
 102         return ((error != 0 ? set_errno(error) : 0));
 103 }
 104 
 105 /*
 106  * Algorithm: call arch-specific map_pgsz to get best page size to use,
 107  * then call brk_internal().
 108  * Returns 0 on success.
 109  */
 110 static int
 111 brk_lpg(caddr_t nva)
 112 {
 113         struct proc *p = curproc;
 114         size_t pgsz, len;
 115         caddr_t addr, brkend;


 489                         crargs.szc = AS_MAP_STACK;
 490                 }
 491         } else {
 492                 crargs.szc = AS_MAP_NO_LPOOB;
 493         }
 494         crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;
 495 
 496         if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
 497             segvn_create, &crargs)) != 0) {
 498                 if (error == EAGAIN) {
 499                         cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
 500                             "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
 501                 }
 502                 return (error);
 503         }
 504         p->p_stksize = newsize;
 505         return (0);
 506 }
 507 
 508 /*
 509  * Find address for user to map.  If MAP_FIXED is not specified, we can pick
 510  * any address we want, but we will first try the value in *addrp if it is
 511  * non-NULL and _MAP_RANDOMIZE is not set.  Thus this is implementing a way to
 512  * try and get a preferred address.
 513  */
 514 int
 515 choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
 516     int vacalign, uint_t flags)
 517 {
 518         caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
 519         size_t lenp = len;
 520 
 521         ASSERT(AS_ISCLAIMGAP(as));      /* searches should be serialized */
 522         if (flags & MAP_FIXED) {
 523                 (void) as_unmap(as, *addrp, len);
 524                 return (0);
 525         } else if (basep != NULL &&
 526             ((flags & (MAP_ALIGN | _MAP_RANDOMIZE)) == 0) &&
 527             !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
 528                 /* User supplied address was available */
 529                 *addrp = basep;
 530         } else {
 531                 /*
 532                  * No user supplied address or the address supplied was not
 533                  * available.
 534                  */
 535                 map_addr(addrp, len, off, vacalign, flags);
 536         }
 537         if (*addrp == NULL)
 538                 return (ENOMEM);
 539         return (0);
 540 }
 541 
 542 
 543 /*
 544  * Used for MAP_ANON - fast way to get anonymous pages
 545  */
 546 static int


 587         }
 588 
 589         /*
 590          * Use the seg_vn segment driver; passing in the NULL amp
 591          * gives the desired "cloning" effect.
 592          */
 593         vn_a.vp = NULL;
 594         vn_a.offset = 0;
 595         vn_a.type = flags & MAP_TYPE;
 596         vn_a.prot = uprot;
 597         vn_a.maxprot = PROT_ALL;
 598         vn_a.flags = flags & ~MAP_TYPE;
 599         vn_a.cred = CRED();
 600         vn_a.amp = NULL;
 601         vn_a.szc = 0;
 602         vn_a.lgrp_mem_policy_flags = 0;
 603 
 604         return (as_map(as, *addrp, len, segvn_create, &vn_a));
 605 }
 606 
 607 #define RANDOMIZABLE_MAPPING(addr, flags) (((flags & MAP_FIXED) == 0) && \
 608         !(((flags & MAP_ALIGN) == 0) && (addr != 0) && aslr_respect_mmap_hint))
 609 
 610 static int
 611 smmap_common(caddr_t *addrp, size_t len,
 612     int prot, int flags, struct file *fp, offset_t pos)
 613 {
 614         struct vnode *vp;
 615         struct as *as = curproc->p_as;
 616         uint_t uprot, maxprot, type;
 617         int error;
 618         int in_crit = 0;
 619 
 620         if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
 621             _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
 622             MAP_TEXT | MAP_INITDATA)) != 0) {
 623                 /* | MAP_RENAME */      /* not implemented, let user know */
 624                 return (EINVAL);
 625         }
 626 
 627         if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
 628                 return (EINVAL);
 629         }
 630 
 631         if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
 632                 return (EINVAL);
 633         }
 634 
 635         if ((flags & (MAP_FIXED | _MAP_RANDOMIZE)) ==
 636             (MAP_FIXED | _MAP_RANDOMIZE)) {
 637                 return (EINVAL);
 638         }
 639 
 640         /*
 641          * If it's not a fixed allocation and mmap ASLR is enabled, randomize
 642          * it.
 643          */
 644         if (RANDOMIZABLE_MAPPING(*addrp, flags) &&
 645             secflag_enabled(curproc, PROC_SEC_ASLR))
 646                 flags |= _MAP_RANDOMIZE;
 647 
 648 #if defined(__sparc)
 649         /*
 650          * See if this is an "old mmap call".  If so, remember this
 651          * fact and convert the flags value given to mmap to indicate
 652          * the specified address in the system call must be used.
 653          * _MAP_NEW is turned set by all new uses of mmap.
 654          */
 655         if ((flags & _MAP_NEW) == 0)
 656                 flags |= MAP_FIXED;
 657 #endif
 658         flags &= ~_MAP_NEW;
 659 
 660         type = flags & MAP_TYPE;
 661         if (type != MAP_PRIVATE && type != MAP_SHARED)
 662                 return (EINVAL);
 663 
 664 
 665         if (flags & MAP_ALIGN) {

 666                 if (flags & MAP_FIXED)
 667                         return (EINVAL);
 668 
 669                 /* alignment needs to be a power of 2 >= page size */
 670                 if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
 671                     !ISP2((uintptr_t)*addrp))
 672                         return (EINVAL);
 673         }
 674         /*
 675          * Check for bad lengths and file position.
 676          * We let the VOP_MAP routine check for negative lengths
 677          * since on some vnode types this might be appropriate.
 678          */
 679         if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
 680                 return (EINVAL);
 681 
 682         maxprot = PROT_ALL;             /* start out allowing all accesses */
 683         uprot = prot | PROT_USER;
 684 
 685         if (fp == NULL) {