48 #include <sys/vm.h>
49 #include <sys/file.h>
50 #include <sys/mman.h>
51 #include <sys/vmparam.h>
52 #include <sys/fcntl.h>
53 #include <sys/lwpchan_impl.h>
54 #include <sys/nbmlock.h>
55
56 #include <vm/hat.h>
57 #include <vm/as.h>
58 #include <vm/seg.h>
59 #include <vm/seg_dev.h>
60 #include <vm/seg_vn.h>
61
62 int use_brk_lpg = 1;
63 int use_stk_lpg = 1;
64
65 static int brk_lpg(caddr_t nva);
66 static int grow_lpg(caddr_t sp);
67
68 int
69 brk(caddr_t nva)
70 {
71 int error;
72 proc_t *p = curproc;
73
74 /*
75 * Serialize brk operations on an address space.
76 * This also serves as the lock protecting p_brksize
77 * and p_brkpageszc.
78 */
79 as_rangelock(p->p_as);
80 if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
81 error = brk_lpg(nva);
82 } else {
83 error = brk_internal(nva, p->p_brkpageszc);
84 }
85 as_rangeunlock(p->p_as);
86 return ((error != 0 ? set_errno(error) : 0));
87 }
88
89 /*
90 * Algorithm: call arch-specific map_pgsz to get best page size to use,
91 * then call brk_internal().
92 * Returns 0 on success.
93 */
94 static int
473 crargs.szc = AS_MAP_STACK;
474 }
475 } else {
476 crargs.szc = AS_MAP_NO_LPOOB;
477 }
478 crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;
479
480 if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
481 segvn_create, &crargs)) != 0) {
482 if (error == EAGAIN) {
483 cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
484 "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
485 }
486 return (error);
487 }
488 p->p_stksize = newsize;
489 return (0);
490 }
491
492 /*
493 * Find address for user to map.
494 * If MAP_FIXED is not specified, we can pick any address we want, but we will
495 * first try the value in *addrp if it is non-NULL. Thus this is implementing
496 * a way to try and get a preferred address.
497 */
498 int
499 choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
500 int vacalign, uint_t flags)
501 {
502 caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
503 size_t lenp = len;
504
505 ASSERT(AS_ISCLAIMGAP(as)); /* searches should be serialized */
506 if (flags & MAP_FIXED) {
507 (void) as_unmap(as, *addrp, len);
508 return (0);
509 } else if (basep != NULL && ((flags & MAP_ALIGN) == 0) &&
510 !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
511 /* User supplied address was available */
512 *addrp = basep;
513 } else {
514 /*
515 * No user supplied address or the address supplied was not
516 * available.
517 */
518 map_addr(addrp, len, off, vacalign, flags);
519 }
520 if (*addrp == NULL)
521 return (ENOMEM);
522 return (0);
523 }
524
525
526 /*
527 * Used for MAP_ANON - fast way to get anonymous pages
528 */
529 static int
595 struct as *as = curproc->p_as;
596 uint_t uprot, maxprot, type;
597 int error;
598 int in_crit = 0;
599
600 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
601 _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
602 MAP_TEXT | MAP_INITDATA)) != 0) {
603 /* | MAP_RENAME */ /* not implemented, let user know */
604 return (EINVAL);
605 }
606
607 if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
608 return (EINVAL);
609 }
610
611 if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
612 return (EINVAL);
613 }
614
615 #if defined(__sparc)
616 /*
617 * See if this is an "old mmap call". If so, remember this
618 * fact and convert the flags value given to mmap to indicate
619 * the specified address in the system call must be used.
620 * _MAP_NEW is turned set by all new uses of mmap.
621 */
622 if ((flags & _MAP_NEW) == 0)
623 flags |= MAP_FIXED;
624 #endif
625 flags &= ~_MAP_NEW;
626
627 type = flags & MAP_TYPE;
628 if (type != MAP_PRIVATE && type != MAP_SHARED)
629 return (EINVAL);
630
631
632 if (flags & MAP_ALIGN) {
633
634 if (flags & MAP_FIXED)
635 return (EINVAL);
636
637 /* alignment needs to be a power of 2 >= page size */
638 if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
639 !ISP2((uintptr_t)*addrp))
640 return (EINVAL);
641 }
642 /*
643 * Check for bad lengths and file position.
644 * We let the VOP_MAP routine check for negative lengths
645 * since on some vnode types this might be appropriate.
646 */
647 if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
648 return (EINVAL);
649
650 maxprot = PROT_ALL; /* start out allowing all accesses */
651 uprot = prot | PROT_USER;
652
653 if (fp == NULL) {
|
48 #include <sys/vm.h>
49 #include <sys/file.h>
50 #include <sys/mman.h>
51 #include <sys/vmparam.h>
52 #include <sys/fcntl.h>
53 #include <sys/lwpchan_impl.h>
54 #include <sys/nbmlock.h>
55
56 #include <vm/hat.h>
57 #include <vm/as.h>
58 #include <vm/seg.h>
59 #include <vm/seg_dev.h>
60 #include <vm/seg_vn.h>
61
62 int use_brk_lpg = 1;
63 int use_stk_lpg = 1;
64
65 static int brk_lpg(caddr_t nva);
66 static int grow_lpg(caddr_t sp);
67
68 intptr_t
69 brk(caddr_t nva)
70 {
71 int error;
72 proc_t *p = curproc;
73
74 /*
75 * As a special case to aid the implementation of sbrk(3C), if given a
76 * new brk of 0, return the current brk. We'll hide this in brk(3C).
77 */
78 if (nva == 0)
79 return ((intptr_t)(p->p_brkbase + p->p_brksize));
80
81 /*
82 * Serialize brk operations on an address space.
83 * This also serves as the lock protecting p_brksize
84 * and p_brkpageszc.
85 */
86 as_rangelock(p->p_as);
87 if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
88 error = brk_lpg(nva);
89 } else {
90 error = brk_internal(nva, p->p_brkpageszc);
91 }
92 as_rangeunlock(p->p_as);
93 return ((error != 0 ? set_errno(error) : 0));
94 }
95
96 /*
97 * Algorithm: call arch-specific map_pgsz to get best page size to use,
98 * then call brk_internal().
99 * Returns 0 on success.
100 */
101 static int
480 crargs.szc = AS_MAP_STACK;
481 }
482 } else {
483 crargs.szc = AS_MAP_NO_LPOOB;
484 }
485 crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;
486
487 if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
488 segvn_create, &crargs)) != 0) {
489 if (error == EAGAIN) {
490 cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
491 "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
492 }
493 return (error);
494 }
495 p->p_stksize = newsize;
496 return (0);
497 }
498
499 /*
500 * Find address for user to map. If MAP_FIXED is not specified, we can pick
501 * any address we want, but we will first try the value in *addrp if it is
502 * non-NULL and _MAP_RANDOMIZE is not set. Thus this is implementing a way to
503 * try and get a preferred address.
504 */
505 int
506 choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
507 int vacalign, uint_t flags)
508 {
509 caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
510 size_t lenp = len;
511
512 ASSERT(AS_ISCLAIMGAP(as)); /* searches should be serialized */
513 if (flags & MAP_FIXED) {
514 (void) as_unmap(as, *addrp, len);
515 return (0);
516 } else if (basep != NULL &&
517 ((flags & (MAP_ALIGN | _MAP_RANDOMIZE)) == 0) &&
518 !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
519 /* User supplied address was available */
520 *addrp = basep;
521 } else {
522 /*
523 * No user supplied address or the address supplied was not
524 * available.
525 */
526 map_addr(addrp, len, off, vacalign, flags);
527 }
528 if (*addrp == NULL)
529 return (ENOMEM);
530 return (0);
531 }
532
533
534 /*
535 * Used for MAP_ANON - fast way to get anonymous pages
536 */
537 static int
603 struct as *as = curproc->p_as;
604 uint_t uprot, maxprot, type;
605 int error;
606 int in_crit = 0;
607
608 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
609 _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
610 MAP_TEXT | MAP_INITDATA)) != 0) {
611 /* | MAP_RENAME */ /* not implemented, let user know */
612 return (EINVAL);
613 }
614
615 if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
616 return (EINVAL);
617 }
618
619 if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
620 return (EINVAL);
621 }
622
623 if ((flags & (MAP_FIXED | _MAP_RANDOMIZE)) == (MAP_FIXED | _MAP_RANDOMIZE)) {
624 return (EINVAL);
625 }
626
627 /* If it's not a fixed allocation and mmap ASLR is enabled, randomize it. */
628 if (((flags & MAP_FIXED) == 0) &&
629 secflag_enabled(curproc, PROC_SEC_ASLR))
630 flags |= _MAP_RANDOMIZE;
631
632 #if defined(__sparc)
633 /*
634 * See if this is an "old mmap call". If so, remember this
635 * fact and convert the flags value given to mmap to indicate
636 * the specified address in the system call must be used.
637 * _MAP_NEW is turned set by all new uses of mmap.
638 */
639 if ((flags & _MAP_NEW) == 0)
640 flags |= MAP_FIXED;
641 #endif
642 flags &= ~_MAP_NEW;
643
644 type = flags & MAP_TYPE;
645 if (type != MAP_PRIVATE && type != MAP_SHARED)
646 return (EINVAL);
647
648
649 if (flags & MAP_ALIGN) {
650 if (flags & MAP_FIXED)
651 return (EINVAL);
652
653 /* alignment needs to be a power of 2 >= page size */
654 if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
655 !ISP2((uintptr_t)*addrp))
656 return (EINVAL);
657 }
658 /*
659 * Check for bad lengths and file position.
660 * We let the VOP_MAP routine check for negative lengths
661 * since on some vnode types this might be appropriate.
662 */
663 if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
664 return (EINVAL);
665
666 maxprot = PROT_ALL; /* start out allowing all accesses */
667 uprot = prot | PROT_USER;
668
669 if (fp == NULL) {
|