3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25
26 /*
27 * VM - page locking primitives
28 */
29 #include <sys/param.h>
30 #include <sys/t_lock.h>
31 #include <sys/vtrace.h>
32 #include <sys/debug.h>
33 #include <sys/cmn_err.h>
34 #include <sys/bitmap.h>
35 #include <sys/lockstat.h>
36 #include <sys/sysmacros.h>
37 #include <sys/condvar_impl.h>
38 #include <vm/page.h>
39 #include <vm/seg_enum.h>
40 #include <vm/vm_dep.h>
41 #include <vm/seg_kmem.h>
42
347 * exclusive access to it, force the upgrade now.
348 * Again, we will fail to acquire p_selock if the
349 * page is not free and block.
350 */
351 upgraded = 1;
352 se = SE_EXCL;
353 VM_STAT_ADD(page_lock_upgrade);
354 }
355 }
356
357 if (se == SE_EXCL) {
358 if (!(es & SE_EXCL_WANTED) && (pp->p_selock & SE_EWANTED)) {
359 /*
360 * if the caller wants a writer lock (but did not
361 * specify exclusive access), and there is a pending
362 * writer that wants exclusive access, return failure
363 */
364 retval = 0;
365 } else if ((pp->p_selock & ~SE_EWANTED) == 0) {
366 /* no reader/writer lock held */
367 THREAD_KPRI_REQUEST();
368 /* this clears our setting of the SE_EWANTED bit */
369 pp->p_selock = SE_WRITER;
370 retval = 1;
371 } else {
372 /* page is locked */
373 if (es & SE_EXCL_WANTED) {
374 /* set the SE_EWANTED bit */
375 pp->p_selock |= SE_EWANTED;
376 }
377 retval = 0;
378 }
379 } else {
380 retval = 0;
381 if (pp->p_selock >= 0) {
382 if ((pp->p_selock & SE_EWANTED) == 0) {
383 pp->p_selock += SE_READER;
384 retval = 1;
385 }
386 }
387 }
534 return (1);
535 }
536 }
537 mutex_exit(pse);
538 return (0);
539 }
540 /*
541 * The page is free, so we really want SE_EXCL (below)
542 */
543 VM_STAT_ADD(page_try_reclaim_upgrade);
544 }
545
546 /*
547 * The caller wants a writer lock. We try for it only if
548 * SE_EWANTED is not set, or if the caller specified
549 * SE_EXCL_WANTED.
550 */
551 if (!(old & SE_EWANTED) || (es & SE_EXCL_WANTED)) {
552 if ((old & ~SE_EWANTED) == 0) {
553 /* no reader/writer lock held */
554 THREAD_KPRI_REQUEST();
555 /* this clears out our setting of the SE_EWANTED bit */
556 pp->p_selock = SE_WRITER;
557 mutex_exit(pse);
558 return (1);
559 }
560 }
561 if (es & SE_EXCL_WANTED) {
562 /* page is locked, set the SE_EWANTED bit */
563 pp->p_selock |= SE_EWANTED;
564 }
565 mutex_exit(pse);
566 return (0);
567 }
568
569 /*
570 * Acquire a page's "shared/exclusive" lock, but never block.
571 * Returns 1 on success, 0 on failure.
572 */
573 int
574 page_trylock(page_t *pp, se_t se)
575 {
576 kmutex_t *pse = PAGE_SE_MUTEX(pp);
577
578 mutex_enter(pse);
579 if (pp->p_selock & SE_EWANTED || PP_RETIRED(pp) ||
580 (se == SE_SHARED && PP_PR_NOSHARE(pp))) {
581 /*
582 * Fail if a thread wants exclusive access and page is
583 * retired, if the page is slated for retirement, or a
584 * share lock is requested.
585 */
586 mutex_exit(pse);
587 VM_STAT_ADD(page_trylock_failed);
588 return (0);
589 }
590
591 if (se == SE_EXCL) {
592 if (pp->p_selock == 0) {
593 THREAD_KPRI_REQUEST();
594 pp->p_selock = SE_WRITER;
595 mutex_exit(pse);
596 return (1);
597 }
598 } else {
599 if (pp->p_selock >= 0) {
600 pp->p_selock += SE_READER;
601 mutex_exit(pse);
602 return (1);
603 }
604 }
605 mutex_exit(pse);
606 return (0);
607 }
608
609 /*
610 * Variant of page_unlock() specifically for the page freelist
611 * code. The mere existence of this code is a vile hack that
612 * has resulted due to the backwards locking order of the page
613 * freelist manager; please don't call it.
614 */
615 void
616 page_unlock_nocapture(page_t *pp)
617 {
618 kmutex_t *pse = PAGE_SE_MUTEX(pp);
619 selock_t old;
620
621 mutex_enter(pse);
622
623 old = pp->p_selock;
624 if ((old & ~SE_EWANTED) == SE_READER) {
625 pp->p_selock = old & ~SE_READER;
626 if (CV_HAS_WAITERS(&pp->p_cv))
627 cv_broadcast(&pp->p_cv);
628 } else if ((old & ~SE_EWANTED) == SE_DELETED) {
629 panic("page_unlock_nocapture: page %p is deleted", (void *)pp);
630 } else if (old < 0) {
631 THREAD_KPRI_RELEASE();
632 pp->p_selock &= SE_EWANTED;
633 if (CV_HAS_WAITERS(&pp->p_cv))
634 cv_broadcast(&pp->p_cv);
635 } else if ((old & ~SE_EWANTED) > SE_READER) {
636 pp->p_selock = old - SE_READER;
637 } else {
638 panic("page_unlock_nocapture: page %p is not locked",
639 (void *)pp);
640 }
641
642 mutex_exit(pse);
643 }
644
645 /*
646 * Release the page's "shared/exclusive" lock and wake up anyone
647 * who might be waiting for it.
648 */
649 void
650 page_unlock(page_t *pp)
651 {
652 kmutex_t *pse = PAGE_SE_MUTEX(pp);
653 selock_t old;
654
655 mutex_enter(pse);
656
657 old = pp->p_selock;
658 if ((old & ~SE_EWANTED) == SE_READER) {
659 pp->p_selock = old & ~SE_READER;
660 if (CV_HAS_WAITERS(&pp->p_cv))
661 cv_broadcast(&pp->p_cv);
662 } else if ((old & ~SE_EWANTED) == SE_DELETED) {
663 panic("page_unlock: page %p is deleted", (void *)pp);
664 } else if (old < 0) {
665 THREAD_KPRI_RELEASE();
666 pp->p_selock &= SE_EWANTED;
667 if (CV_HAS_WAITERS(&pp->p_cv))
668 cv_broadcast(&pp->p_cv);
669 } else if ((old & ~SE_EWANTED) > SE_READER) {
670 pp->p_selock = old - SE_READER;
671 } else {
672 panic("page_unlock: page %p is not locked", (void *)pp);
673 }
674
675 if (pp->p_selock == 0) {
676 /*
677 * If the T_CAPTURING bit is set, that means that we should
678 * not try and capture the page again as we could recurse
679 * which could lead to a stack overflow panic or spending a
680 * relatively long time in the kernel making no progress.
681 */
682 if ((pp->p_toxic & PR_CAPTURE) &&
683 !(curthread->t_flag & T_CAPTURING) &&
684 !PP_RETIRED(pp)) {
685 THREAD_KPRI_REQUEST();
686 pp->p_selock = SE_WRITER;
687 mutex_exit(pse);
688 page_unlock_capture(pp);
689 } else {
690 mutex_exit(pse);
691 }
692 } else {
693 mutex_exit(pse);
694 }
695 }
696
697 /*
698 * Try to upgrade the lock on the page from a "shared" to an
699 * "exclusive" lock. Since this upgrade operation is done while
700 * holding the mutex protecting this page, no one else can acquire this page's
701 * lock and change the page. Thus, it is safe to drop the "shared"
702 * lock and attempt to acquire the "exclusive" lock.
703 *
704 * Returns 1 on success, 0 on failure.
705 */
706 int
707 page_tryupgrade(page_t *pp)
708 {
709 kmutex_t *pse = PAGE_SE_MUTEX(pp);
710
711 mutex_enter(pse);
712 if (!(pp->p_selock & SE_EWANTED)) {
713 /* no threads want exclusive access, try upgrade */
714 if (pp->p_selock == SE_READER) {
715 THREAD_KPRI_REQUEST();
716 /* convert to exclusive lock */
717 pp->p_selock = SE_WRITER;
718 mutex_exit(pse);
719 return (1);
720 }
721 }
722 mutex_exit(pse);
723 return (0);
724 }
725
726 /*
727 * Downgrade the "exclusive" lock on the page to a "shared" lock
728 * while holding the mutex protecting this page's p_selock field.
729 */
730 void
731 page_downgrade(page_t *pp)
732 {
733 kmutex_t *pse = PAGE_SE_MUTEX(pp);
734 int excl_waiting;
735
736 ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED);
737 ASSERT(PAGE_EXCL(pp));
738
739 mutex_enter(pse);
740 excl_waiting = pp->p_selock & SE_EWANTED;
741 THREAD_KPRI_RELEASE();
742 pp->p_selock = SE_READER | excl_waiting;
743 if (CV_HAS_WAITERS(&pp->p_cv))
744 cv_broadcast(&pp->p_cv);
745 mutex_exit(pse);
746 }
747
748 void
749 page_lock_delete(page_t *pp)
750 {
751 kmutex_t *pse = PAGE_SE_MUTEX(pp);
752
753 ASSERT(PAGE_EXCL(pp));
754 ASSERT(pp->p_vnode == NULL);
755 ASSERT(pp->p_offset == (u_offset_t)-1);
756 ASSERT(!PP_ISFREE(pp));
757
758 mutex_enter(pse);
759 THREAD_KPRI_RELEASE();
760 pp->p_selock = SE_DELETED;
761 if (CV_HAS_WAITERS(&pp->p_cv))
762 cv_broadcast(&pp->p_cv);
763 mutex_exit(pse);
764 }
765
766 int
767 page_deleted(page_t *pp)
768 {
769 return (pp->p_selock == SE_DELETED);
770 }
771
772 /*
773 * Implement the io lock for pages
774 */
775 void
776 page_iolock_init(page_t *pp)
777 {
778 pp->p_iolock_state = 0;
779 cv_init(&pp->p_io_cv, NULL, CV_DEFAULT, NULL);
|
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2019 Joyent, Inc.
24 */
25
26
27 /*
28 * VM - page locking primitives
29 */
30 #include <sys/param.h>
31 #include <sys/t_lock.h>
32 #include <sys/vtrace.h>
33 #include <sys/debug.h>
34 #include <sys/cmn_err.h>
35 #include <sys/bitmap.h>
36 #include <sys/lockstat.h>
37 #include <sys/sysmacros.h>
38 #include <sys/condvar_impl.h>
39 #include <vm/page.h>
40 #include <vm/seg_enum.h>
41 #include <vm/vm_dep.h>
42 #include <vm/seg_kmem.h>
43
348 * exclusive access to it, force the upgrade now.
349 * Again, we will fail to acquire p_selock if the
350 * page is not free and block.
351 */
352 upgraded = 1;
353 se = SE_EXCL;
354 VM_STAT_ADD(page_lock_upgrade);
355 }
356 }
357
358 if (se == SE_EXCL) {
359 if (!(es & SE_EXCL_WANTED) && (pp->p_selock & SE_EWANTED)) {
360 /*
361 * if the caller wants a writer lock (but did not
362 * specify exclusive access), and there is a pending
363 * writer that wants exclusive access, return failure
364 */
365 retval = 0;
366 } else if ((pp->p_selock & ~SE_EWANTED) == 0) {
367 /* no reader/writer lock held */
368 /* this clears our setting of the SE_EWANTED bit */
369 pp->p_selock = SE_WRITER;
370 retval = 1;
371 } else {
372 /* page is locked */
373 if (es & SE_EXCL_WANTED) {
374 /* set the SE_EWANTED bit */
375 pp->p_selock |= SE_EWANTED;
376 }
377 retval = 0;
378 }
379 } else {
380 retval = 0;
381 if (pp->p_selock >= 0) {
382 if ((pp->p_selock & SE_EWANTED) == 0) {
383 pp->p_selock += SE_READER;
384 retval = 1;
385 }
386 }
387 }
534 return (1);
535 }
536 }
537 mutex_exit(pse);
538 return (0);
539 }
540 /*
541 * The page is free, so we really want SE_EXCL (below)
542 */
543 VM_STAT_ADD(page_try_reclaim_upgrade);
544 }
545
546 /*
547 * The caller wants a writer lock. We try for it only if
548 * SE_EWANTED is not set, or if the caller specified
549 * SE_EXCL_WANTED.
550 */
551 if (!(old & SE_EWANTED) || (es & SE_EXCL_WANTED)) {
552 if ((old & ~SE_EWANTED) == 0) {
553 /* no reader/writer lock held */
554 /* this clears out our setting of the SE_EWANTED bit */
555 pp->p_selock = SE_WRITER;
556 mutex_exit(pse);
557 return (1);
558 }
559 }
560 if (es & SE_EXCL_WANTED) {
561 /* page is locked, set the SE_EWANTED bit */
562 pp->p_selock |= SE_EWANTED;
563 }
564 mutex_exit(pse);
565 return (0);
566 }
567
568 /*
569 * Acquire a page's "shared/exclusive" lock, but never block.
570 * Returns 1 on success, 0 on failure.
571 */
572 int
573 page_trylock(page_t *pp, se_t se)
574 {
575 kmutex_t *pse = PAGE_SE_MUTEX(pp);
576
577 mutex_enter(pse);
578 if (pp->p_selock & SE_EWANTED || PP_RETIRED(pp) ||
579 (se == SE_SHARED && PP_PR_NOSHARE(pp))) {
580 /*
581 * Fail if a thread wants exclusive access and page is
582 * retired, if the page is slated for retirement, or a
583 * share lock is requested.
584 */
585 mutex_exit(pse);
586 VM_STAT_ADD(page_trylock_failed);
587 return (0);
588 }
589
590 if (se == SE_EXCL) {
591 if (pp->p_selock == 0) {
592 pp->p_selock = SE_WRITER;
593 mutex_exit(pse);
594 return (1);
595 }
596 } else {
597 if (pp->p_selock >= 0) {
598 pp->p_selock += SE_READER;
599 mutex_exit(pse);
600 return (1);
601 }
602 }
603 mutex_exit(pse);
604 return (0);
605 }
606
607 /*
608 * Variant of page_unlock() specifically for the page freelist
609 * code. The mere existence of this code is a vile hack that
610 * has resulted due to the backwards locking order of the page
611 * freelist manager; please don't call it.
612 */
613 void
614 page_unlock_nocapture(page_t *pp)
615 {
616 kmutex_t *pse = PAGE_SE_MUTEX(pp);
617 selock_t old;
618
619 mutex_enter(pse);
620
621 old = pp->p_selock;
622 if ((old & ~SE_EWANTED) == SE_READER) {
623 pp->p_selock = old & ~SE_READER;
624 if (CV_HAS_WAITERS(&pp->p_cv))
625 cv_broadcast(&pp->p_cv);
626 } else if ((old & ~SE_EWANTED) == SE_DELETED) {
627 panic("page_unlock_nocapture: page %p is deleted", (void *)pp);
628 } else if (old < 0) {
629 pp->p_selock &= SE_EWANTED;
630 if (CV_HAS_WAITERS(&pp->p_cv))
631 cv_broadcast(&pp->p_cv);
632 } else if ((old & ~SE_EWANTED) > SE_READER) {
633 pp->p_selock = old - SE_READER;
634 } else {
635 panic("page_unlock_nocapture: page %p is not locked",
636 (void *)pp);
637 }
638
639 mutex_exit(pse);
640 }
641
642 /*
643 * Release the page's "shared/exclusive" lock and wake up anyone
644 * who might be waiting for it.
645 */
646 void
647 page_unlock(page_t *pp)
648 {
649 kmutex_t *pse = PAGE_SE_MUTEX(pp);
650 selock_t old;
651
652 mutex_enter(pse);
653
654 old = pp->p_selock;
655 if ((old & ~SE_EWANTED) == SE_READER) {
656 pp->p_selock = old & ~SE_READER;
657 if (CV_HAS_WAITERS(&pp->p_cv))
658 cv_broadcast(&pp->p_cv);
659 } else if ((old & ~SE_EWANTED) == SE_DELETED) {
660 panic("page_unlock: page %p is deleted", (void *)pp);
661 } else if (old < 0) {
662 pp->p_selock &= SE_EWANTED;
663 if (CV_HAS_WAITERS(&pp->p_cv))
664 cv_broadcast(&pp->p_cv);
665 } else if ((old & ~SE_EWANTED) > SE_READER) {
666 pp->p_selock = old - SE_READER;
667 } else {
668 panic("page_unlock: page %p is not locked", (void *)pp);
669 }
670
671 if (pp->p_selock == 0) {
672 /*
673 * If the T_CAPTURING bit is set, that means that we should
674 * not try and capture the page again as we could recurse
675 * which could lead to a stack overflow panic or spending a
676 * relatively long time in the kernel making no progress.
677 */
678 if ((pp->p_toxic & PR_CAPTURE) &&
679 !(curthread->t_flag & T_CAPTURING) &&
680 !PP_RETIRED(pp)) {
681 pp->p_selock = SE_WRITER;
682 mutex_exit(pse);
683 page_unlock_capture(pp);
684 } else {
685 mutex_exit(pse);
686 }
687 } else {
688 mutex_exit(pse);
689 }
690 }
691
692 /*
693 * Try to upgrade the lock on the page from a "shared" to an
694 * "exclusive" lock. Since this upgrade operation is done while
695 * holding the mutex protecting this page, no one else can acquire this page's
696 * lock and change the page. Thus, it is safe to drop the "shared"
697 * lock and attempt to acquire the "exclusive" lock.
698 *
699 * Returns 1 on success, 0 on failure.
700 */
701 int
702 page_tryupgrade(page_t *pp)
703 {
704 kmutex_t *pse = PAGE_SE_MUTEX(pp);
705
706 mutex_enter(pse);
707 if (!(pp->p_selock & SE_EWANTED)) {
708 /* no threads want exclusive access, try upgrade */
709 if (pp->p_selock == SE_READER) {
710 /* convert to exclusive lock */
711 pp->p_selock = SE_WRITER;
712 mutex_exit(pse);
713 return (1);
714 }
715 }
716 mutex_exit(pse);
717 return (0);
718 }
719
720 /*
721 * Downgrade the "exclusive" lock on the page to a "shared" lock
722 * while holding the mutex protecting this page's p_selock field.
723 */
724 void
725 page_downgrade(page_t *pp)
726 {
727 kmutex_t *pse = PAGE_SE_MUTEX(pp);
728 int excl_waiting;
729
730 ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED);
731 ASSERT(PAGE_EXCL(pp));
732
733 mutex_enter(pse);
734 excl_waiting = pp->p_selock & SE_EWANTED;
735 pp->p_selock = SE_READER | excl_waiting;
736 if (CV_HAS_WAITERS(&pp->p_cv))
737 cv_broadcast(&pp->p_cv);
738 mutex_exit(pse);
739 }
740
741 void
742 page_lock_delete(page_t *pp)
743 {
744 kmutex_t *pse = PAGE_SE_MUTEX(pp);
745
746 ASSERT(PAGE_EXCL(pp));
747 ASSERT(pp->p_vnode == NULL);
748 ASSERT(pp->p_offset == (u_offset_t)-1);
749 ASSERT(!PP_ISFREE(pp));
750
751 mutex_enter(pse);
752 pp->p_selock = SE_DELETED;
753 if (CV_HAS_WAITERS(&pp->p_cv))
754 cv_broadcast(&pp->p_cv);
755 mutex_exit(pse);
756 }
757
758 int
759 page_deleted(page_t *pp)
760 {
761 return (pp->p_selock == SE_DELETED);
762 }
763
764 /*
765 * Implement the io lock for pages
766 */
767 void
768 page_iolock_init(page_t *pp)
769 {
770 pp->p_iolock_state = 0;
771 cv_init(&pp->p_io_cv, NULL, CV_DEFAULT, NULL);
|