7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright 2016 Gary Mills
27 */
28
29 /*
30 * VM - Hardware Address Translation management for Spitfire MMU.
31 *
32 * This file implements the machine specific hardware translation
33 * needed by the VM system. The machine independent interface is
34 * described in <vm/hat.h> while the machine dependent interface
35 * and data structures are described in <vm/hat_sfmmu.h>.
36 *
37 * The hat layer manages the address translation hardware as a cache
38 * driven by calls from the higher levels in the VM system.
39 */
40
41 #include <sys/types.h>
42 #include <sys/kstat.h>
43 #include <vm/hat.h>
44 #include <vm/hat_sfmmu.h>
45 #include <vm/page.h>
46 #include <sys/pte.h>
5428 return (0); /* clr prv and wrt */
5429 case (PROT_USER | PROT_WRITE):
5430 case (PROT_USER | PROT_WRITE | PROT_READ):
5431 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5432 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5433 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5434 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5435 default:
5436 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5437 }
5438 return (0);
5439 }
5440
5441 /*
5442 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5443 * the normal algorithm would take too long for a very large VA range with
5444 * few real mappings. This routine just walks thru all HMEs in the global
5445 * hash table to find and remove mappings.
5446 */
5447 static void
5448 hat_unload_large_virtual(
5449 struct hat *sfmmup,
5450 caddr_t startaddr,
5451 size_t len,
5452 uint_t flags,
5453 hat_callback_t *callback)
5454 {
5455 struct hmehash_bucket *hmebp;
5456 struct hme_blk *hmeblkp;
5457 struct hme_blk *pr_hblk = NULL;
5458 struct hme_blk *nx_hblk;
5459 struct hme_blk *list = NULL;
5460 int i;
5461 demap_range_t dmr, *dmrp;
5462 cpuset_t cpuset;
5463 caddr_t endaddr = startaddr + len;
5464 caddr_t sa;
5465 caddr_t ea;
5466 caddr_t cb_sa[MAX_CB_ADDR];
5467 caddr_t cb_ea[MAX_CB_ADDR];
5468 int addr_cnt = 0;
5469 int a = 0;
5470
5471 if (sfmmup->sfmmu_free) {
5472 dmrp = NULL;
5473 } else {
5571 }
5572
5573 /*
5574 * Check TSB and TLB page sizes if the process isn't exiting.
5575 */
5576 if (!sfmmup->sfmmu_free)
5577 sfmmu_check_page_sizes(sfmmup, 0);
5578 }
5579
5580 /*
5581 * Unload all the mappings in the range [addr..addr+len). addr and len must
5582 * be MMU_PAGESIZE aligned.
5583 */
5584
5585 extern struct seg *segkmap;
5586 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5587 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5588
5589
5590 void
5591 hat_unload_callback(
5592 struct hat *sfmmup,
5593 caddr_t addr,
5594 size_t len,
5595 uint_t flags,
5596 hat_callback_t *callback)
5597 {
5598 struct hmehash_bucket *hmebp;
5599 hmeblk_tag hblktag;
5600 int hmeshift, hashno, iskernel;
5601 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5602 caddr_t endaddr;
5603 cpuset_t cpuset;
5604 int addr_count = 0;
5605 int a;
5606 caddr_t cb_start_addr[MAX_CB_ADDR];
5607 caddr_t cb_end_addr[MAX_CB_ADDR];
5608 int issegkmap = ISSEGKMAP(sfmmup, addr);
5609 demap_range_t dmr, *dmrp;
5610
5611 ASSERT(sfmmup->sfmmu_as != NULL);
5612
5613 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5614 AS_LOCK_HELD(sfmmup->sfmmu_as));
5615
8436 * zero out the entry
8437 */
8438 iment->iment_next = NULL;
8439 iment->iment_prev = NULL;
8440 iment->iment_hat = NULL;
8441 iment->iment_base_va = 0;
8442 }
8443
8444 /*
8445 * Hat_share()/unshare() return an (non-zero) error
8446 * when saddr and daddr are not properly aligned.
8447 *
8448 * The top level mapping element determines the alignment
8449 * requirement for saddr and daddr, depending on different
8450 * architectures.
8451 *
8452 * When hat_share()/unshare() are not supported,
8453 * HATOP_SHARE()/UNSHARE() return 0
8454 */
8455 int
8456 hat_share(struct hat *sfmmup, caddr_t addr,
8457 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8458 {
8459 ism_blk_t *ism_blkp;
8460 ism_blk_t *new_iblk;
8461 ism_map_t *ism_map;
8462 ism_ment_t *ism_ment;
8463 int i, added;
8464 hatlock_t *hatlockp;
8465 int reload_mmu = 0;
8466 uint_t ismshift = page_get_shift(ismszc);
8467 size_t ismpgsz = page_get_pagesize(ismszc);
8468 uint_t ismmask = (uint_t)ismpgsz - 1;
8469 size_t sh_size = ISM_SHIFT(ismshift, len);
8470 ushort_t ismhatflag;
8471 hat_region_cookie_t rcookie;
8472 sf_scd_t *old_scdp;
8473
8474 #ifdef DEBUG
8475 caddr_t eaddr = addr + len;
8476 #endif /* DEBUG */
8477
10859 ASSERT(sfmmup != ksfmmup);
10860 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10861 }
10862
10863 /*
10864 * Locking primitives to provide consistency between ISM unmap
10865 * and other operations. Since ISM unmap can take a long time, we
10866 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10867 * contention on the hatlock buckets while ISM segments are being
10868 * unmapped. The tradeoff is that the flags don't prevent priority
10869 * inversion from occurring, so we must request kernel priority in
10870 * case we have to sleep to keep from getting buried while holding
10871 * the HAT_ISMBUSY flag set, which in turn could block other kernel
10872 * threads from running (for example, in sfmmu_uvatopfn()).
10873 */
10874 static void
10875 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10876 {
10877 hatlock_t *hatlockp;
10878
10879 THREAD_KPRI_REQUEST();
10880 if (!hatlock_held)
10881 hatlockp = sfmmu_hat_enter(sfmmup);
10882 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10883 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10884 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10885 if (!hatlock_held)
10886 sfmmu_hat_exit(hatlockp);
10887 }
10888
10889 static void
10890 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
10891 {
10892 hatlock_t *hatlockp;
10893
10894 if (!hatlock_held)
10895 hatlockp = sfmmu_hat_enter(sfmmup);
10896 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10897 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10898 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10899 if (!hatlock_held)
10900 sfmmu_hat_exit(hatlockp);
10901 THREAD_KPRI_RELEASE();
10902 }
10903
10904 /*
10905 *
10906 * Algorithm:
10907 *
10908 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10909 * hblks.
10910 *
10911 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10912 *
10913 * (a) try to return an hblk from reserve pool of free hblks;
10914 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
10915 * and return hblk_reserve.
10916 *
10917 * (3) call kmem_cache_alloc() to allocate hblk;
10918 *
10919 * (a) if hblk_reserve_lock is held by the current thread,
10920 * atomically replace hblk_reserve by the hblk that is
10921 * returned by kmem_cache_alloc; release hblk_reserve_lock
13789 /*
13790 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13791 * at the same time for the same process and address range. This is ensured by
13792 * the fact that address space is locked as writer when a process joins the
13793 * regions. Therefore there's no need to hold an srd lock during the entire
13794 * execution of hat_join_region()/hat_leave_region().
13795 */
13796
13797 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13798 (((uintptr_t)(obj)) >> 11)) & \
13799 srd_rgn_hashmask)
13800 /*
13801 * This routine implements the shared context functionality required when
13802 * attaching a segment to an address space. It must be called from
13803 * hat_share() for D(ISM) segments and from segvn_create() for segments
13804 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13805 * which is saved in the private segment data for hme segments and
13806 * the ism_map structure for ism segments.
13807 */
13808 hat_region_cookie_t
13809 hat_join_region(struct hat *sfmmup,
13810 caddr_t r_saddr,
13811 size_t r_size,
13812 void *r_obj,
13813 u_offset_t r_objoff,
13814 uchar_t r_perm,
13815 uchar_t r_pgszc,
13816 hat_rgn_cb_func_t r_cb_function,
13817 uint_t flags)
13818 {
13819 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13820 uint_t rhash;
13821 uint_t rid;
13822 hatlock_t *hatlockp;
13823 sf_region_t *rgnp;
13824 sf_region_t *new_rgnp = NULL;
13825 int i;
13826 uint16_t *nextidp;
13827 sf_region_t **freelistp;
13828 int maxids;
13829 sf_region_t **rarrp;
13830 uint16_t *busyrgnsp;
13831 ulong_t rttecnt;
13832 uchar_t tteflag;
13833 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13834 int text = (r_type == HAT_REGION_TEXT);
13835
13836 if (srdp == NULL || r_size == 0) {
13837 return (HAT_INVALID_REGION_COOKIE);
15495 * the hmeblk after first phase disappear before we finally reclaim it.
15496 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15497 * during their traversal.
15498 *
15499 * The hmehash_mutex must be held when calling this function.
15500 *
15501 * Input:
15502 * hmebp - hme hash bucket pointer
15503 * hmeblkp - address of hmeblk to be removed
15504 * pr_hblk - virtual address of previous hmeblkp
15505 * listp - pointer to list of hmeblks linked by virtual address
15506 * free_now flag - indicates that a complete removal from the hash chains
15507 * is necessary.
15508 *
15509 * It is inefficient to use the free_now flag as a cross-call is required to
15510 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15511 * in short supply.
15512 */
15513 void
15514 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15515 struct hme_blk *pr_hblk, struct hme_blk **listp,
15516 int free_now)
15517 {
15518 int shw_size, vshift;
15519 struct hme_blk *shw_hblkp;
15520 uint_t shw_mask, newshw_mask;
15521 caddr_t vaddr;
15522 int size;
15523 cpuset_t cpuset = cpu_ready_set;
15524
15525 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15526
15527 if (hmebp->hmeblkp == hmeblkp) {
15528 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15529 hmebp->hmeblkp = hmeblkp->hblk_next;
15530 } else {
15531 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15532 pr_hblk->hblk_next = hmeblkp->hblk_next;
15533 }
15534
15535 size = get_hblk_ttesz(hmeblkp);
15536 shw_hblkp = hmeblkp->hblk_shadow;
|
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright 2016 Gary Mills
27 * Copyright 2019 Joyent, Inc.
28 */
29
30 /*
31 * VM - Hardware Address Translation management for Spitfire MMU.
32 *
33 * This file implements the machine specific hardware translation
34 * needed by the VM system. The machine independent interface is
35 * described in <vm/hat.h> while the machine dependent interface
36 * and data structures are described in <vm/hat_sfmmu.h>.
37 *
38 * The hat layer manages the address translation hardware as a cache
39 * driven by calls from the higher levels in the VM system.
40 */
41
42 #include <sys/types.h>
43 #include <sys/kstat.h>
44 #include <vm/hat.h>
45 #include <vm/hat_sfmmu.h>
46 #include <vm/page.h>
47 #include <sys/pte.h>
5429 return (0); /* clr prv and wrt */
5430 case (PROT_USER | PROT_WRITE):
5431 case (PROT_USER | PROT_WRITE | PROT_READ):
5432 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5433 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5434 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5435 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5436 default:
5437 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5438 }
5439 return (0);
5440 }
5441
5442 /*
5443 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5444 * the normal algorithm would take too long for a very large VA range with
5445 * few real mappings. This routine just walks thru all HMEs in the global
5446 * hash table to find and remove mappings.
5447 */
5448 static void
5449 hat_unload_large_virtual(struct hat *sfmmup, caddr_t startaddr, size_t len,
5450 uint_t flags, hat_callback_t *callback)
5451 {
5452 struct hmehash_bucket *hmebp;
5453 struct hme_blk *hmeblkp;
5454 struct hme_blk *pr_hblk = NULL;
5455 struct hme_blk *nx_hblk;
5456 struct hme_blk *list = NULL;
5457 int i;
5458 demap_range_t dmr, *dmrp;
5459 cpuset_t cpuset;
5460 caddr_t endaddr = startaddr + len;
5461 caddr_t sa;
5462 caddr_t ea;
5463 caddr_t cb_sa[MAX_CB_ADDR];
5464 caddr_t cb_ea[MAX_CB_ADDR];
5465 int addr_cnt = 0;
5466 int a = 0;
5467
5468 if (sfmmup->sfmmu_free) {
5469 dmrp = NULL;
5470 } else {
5568 }
5569
5570 /*
5571 * Check TSB and TLB page sizes if the process isn't exiting.
5572 */
5573 if (!sfmmup->sfmmu_free)
5574 sfmmu_check_page_sizes(sfmmup, 0);
5575 }
5576
5577 /*
5578 * Unload all the mappings in the range [addr..addr+len). addr and len must
5579 * be MMU_PAGESIZE aligned.
5580 */
5581
5582 extern struct seg *segkmap;
5583 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5584 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5585
5586
5587 void
5588 hat_unload_callback(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags,
5589 hat_callback_t *callback)
5590 {
5591 struct hmehash_bucket *hmebp;
5592 hmeblk_tag hblktag;
5593 int hmeshift, hashno, iskernel;
5594 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5595 caddr_t endaddr;
5596 cpuset_t cpuset;
5597 int addr_count = 0;
5598 int a;
5599 caddr_t cb_start_addr[MAX_CB_ADDR];
5600 caddr_t cb_end_addr[MAX_CB_ADDR];
5601 int issegkmap = ISSEGKMAP(sfmmup, addr);
5602 demap_range_t dmr, *dmrp;
5603
5604 ASSERT(sfmmup->sfmmu_as != NULL);
5605
5606 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5607 AS_LOCK_HELD(sfmmup->sfmmu_as));
5608
8429 * zero out the entry
8430 */
8431 iment->iment_next = NULL;
8432 iment->iment_prev = NULL;
8433 iment->iment_hat = NULL;
8434 iment->iment_base_va = 0;
8435 }
8436
8437 /*
8438 * Hat_share()/unshare() return an (non-zero) error
8439 * when saddr and daddr are not properly aligned.
8440 *
8441 * The top level mapping element determines the alignment
8442 * requirement for saddr and daddr, depending on different
8443 * architectures.
8444 *
8445 * When hat_share()/unshare() are not supported,
8446 * HATOP_SHARE()/UNSHARE() return 0
8447 */
8448 int
8449 hat_share(struct hat *sfmmup, caddr_t addr, struct hat *ism_hatid,
8450 caddr_t sptaddr, size_t len, uint_t ismszc)
8451 {
8452 ism_blk_t *ism_blkp;
8453 ism_blk_t *new_iblk;
8454 ism_map_t *ism_map;
8455 ism_ment_t *ism_ment;
8456 int i, added;
8457 hatlock_t *hatlockp;
8458 int reload_mmu = 0;
8459 uint_t ismshift = page_get_shift(ismszc);
8460 size_t ismpgsz = page_get_pagesize(ismszc);
8461 uint_t ismmask = (uint_t)ismpgsz - 1;
8462 size_t sh_size = ISM_SHIFT(ismshift, len);
8463 ushort_t ismhatflag;
8464 hat_region_cookie_t rcookie;
8465 sf_scd_t *old_scdp;
8466
8467 #ifdef DEBUG
8468 caddr_t eaddr = addr + len;
8469 #endif /* DEBUG */
8470
10852 ASSERT(sfmmup != ksfmmup);
10853 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10854 }
10855
10856 /*
10857 * Locking primitives to provide consistency between ISM unmap
10858 * and other operations. Since ISM unmap can take a long time, we
10859 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10860 * contention on the hatlock buckets while ISM segments are being
10861 * unmapped. The tradeoff is that the flags don't prevent priority
10862 * inversion from occurring, so we must request kernel priority in
10863 * case we have to sleep to keep from getting buried while holding
10864 * the HAT_ISMBUSY flag set, which in turn could block other kernel
10865 * threads from running (for example, in sfmmu_uvatopfn()).
10866 */
10867 static void
10868 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10869 {
10870 hatlock_t *hatlockp;
10871
10872 if (!hatlock_held)
10873 hatlockp = sfmmu_hat_enter(sfmmup);
10874 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10875 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10876 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10877 if (!hatlock_held)
10878 sfmmu_hat_exit(hatlockp);
10879 }
10880
10881 static void
10882 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
10883 {
10884 hatlock_t *hatlockp;
10885
10886 if (!hatlock_held)
10887 hatlockp = sfmmu_hat_enter(sfmmup);
10888 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10889 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10890 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10891 if (!hatlock_held)
10892 sfmmu_hat_exit(hatlockp);
10893 }
10894
10895 /*
10896 *
10897 * Algorithm:
10898 *
10899 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10900 * hblks.
10901 *
10902 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10903 *
10904 * (a) try to return an hblk from reserve pool of free hblks;
10905 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
10906 * and return hblk_reserve.
10907 *
10908 * (3) call kmem_cache_alloc() to allocate hblk;
10909 *
10910 * (a) if hblk_reserve_lock is held by the current thread,
10911 * atomically replace hblk_reserve by the hblk that is
10912 * returned by kmem_cache_alloc; release hblk_reserve_lock
13780 /*
13781 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13782 * at the same time for the same process and address range. This is ensured by
13783 * the fact that address space is locked as writer when a process joins the
13784 * regions. Therefore there's no need to hold an srd lock during the entire
13785 * execution of hat_join_region()/hat_leave_region().
13786 */
13787
13788 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13789 (((uintptr_t)(obj)) >> 11)) & \
13790 srd_rgn_hashmask)
13791 /*
13792 * This routine implements the shared context functionality required when
13793 * attaching a segment to an address space. It must be called from
13794 * hat_share() for D(ISM) segments and from segvn_create() for segments
13795 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13796 * which is saved in the private segment data for hme segments and
13797 * the ism_map structure for ism segments.
13798 */
13799 hat_region_cookie_t
13800 hat_join_region(struct hat *sfmmup, caddr_t r_saddr, size_t r_size,
13801 void *r_obj, u_offset_t r_objoff, uchar_t r_perm, uchar_t r_pgszc,
13802 hat_rgn_cb_func_t r_cb_function, uint_t flags)
13803 {
13804 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13805 uint_t rhash;
13806 uint_t rid;
13807 hatlock_t *hatlockp;
13808 sf_region_t *rgnp;
13809 sf_region_t *new_rgnp = NULL;
13810 int i;
13811 uint16_t *nextidp;
13812 sf_region_t **freelistp;
13813 int maxids;
13814 sf_region_t **rarrp;
13815 uint16_t *busyrgnsp;
13816 ulong_t rttecnt;
13817 uchar_t tteflag;
13818 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13819 int text = (r_type == HAT_REGION_TEXT);
13820
13821 if (srdp == NULL || r_size == 0) {
13822 return (HAT_INVALID_REGION_COOKIE);
15480 * the hmeblk after first phase disappear before we finally reclaim it.
15481 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15482 * during their traversal.
15483 *
15484 * The hmehash_mutex must be held when calling this function.
15485 *
15486 * Input:
15487 * hmebp - hme hash bucket pointer
15488 * hmeblkp - address of hmeblk to be removed
15489 * pr_hblk - virtual address of previous hmeblkp
15490 * listp - pointer to list of hmeblks linked by virtual address
15491 * free_now flag - indicates that a complete removal from the hash chains
15492 * is necessary.
15493 *
15494 * It is inefficient to use the free_now flag as a cross-call is required to
15495 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15496 * in short supply.
15497 */
15498 void
15499 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15500 struct hme_blk *pr_hblk, struct hme_blk **listp, int free_now)
15501 {
15502 int shw_size, vshift;
15503 struct hme_blk *shw_hblkp;
15504 uint_t shw_mask, newshw_mask;
15505 caddr_t vaddr;
15506 int size;
15507 cpuset_t cpuset = cpu_ready_set;
15508
15509 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15510
15511 if (hmebp->hmeblkp == hmeblkp) {
15512 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15513 hmebp->hmeblkp = hmeblkp->hblk_next;
15514 } else {
15515 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15516 pr_hblk->hblk_next = hmeblkp->hblk_next;
15517 }
15518
15519 size = get_hblk_ttesz(hmeblkp);
15520 shw_hblkp = hmeblkp->hblk_shadow;
|