Print this page
11909 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
@@ -22,10 +22,11 @@
* Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright 2016 Gary Mills
+ * Copyright 2019 Joyent, Inc.
*/
/*
* VM - Hardware Address Translation management for Spitfire MMU.
*
@@ -5443,16 +5444,12 @@
* the normal algorithm would take too long for a very large VA range with
* few real mappings. This routine just walks thru all HMEs in the global
* hash table to find and remove mappings.
*/
static void
-hat_unload_large_virtual(
- struct hat *sfmmup,
- caddr_t startaddr,
- size_t len,
- uint_t flags,
- hat_callback_t *callback)
+hat_unload_large_virtual(struct hat *sfmmup, caddr_t startaddr, size_t len,
+ uint_t flags, hat_callback_t *callback)
{
struct hmehash_bucket *hmebp;
struct hme_blk *hmeblkp;
struct hme_blk *pr_hblk = NULL;
struct hme_blk *nx_hblk;
@@ -5586,15 +5583,11 @@
#define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
void
-hat_unload_callback(
- struct hat *sfmmup,
- caddr_t addr,
- size_t len,
- uint_t flags,
+hat_unload_callback(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags,
hat_callback_t *callback)
{
struct hmehash_bucket *hmebp;
hmeblk_tag hblktag;
int hmeshift, hashno, iskernel;
@@ -8451,12 +8444,12 @@
*
* When hat_share()/unshare() are not supported,
* HATOP_SHARE()/UNSHARE() return 0
*/
int
-hat_share(struct hat *sfmmup, caddr_t addr,
- struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
+hat_share(struct hat *sfmmup, caddr_t addr, struct hat *ism_hatid,
+ caddr_t sptaddr, size_t len, uint_t ismszc)
{
ism_blk_t *ism_blkp;
ism_blk_t *new_iblk;
ism_map_t *ism_map;
ism_ment_t *ism_ment;
@@ -10874,11 +10867,10 @@
static void
sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
{
hatlock_t *hatlockp;
- THREAD_KPRI_REQUEST();
if (!hatlock_held)
hatlockp = sfmmu_hat_enter(sfmmup);
while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
@@ -10896,11 +10888,10 @@
ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
cv_broadcast(&sfmmup->sfmmu_tsb_cv);
if (!hatlock_held)
sfmmu_hat_exit(hatlockp);
- THREAD_KPRI_RELEASE();
}
/*
*
* Algorithm:
@@ -13804,19 +13795,13 @@
* with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
* which is saved in the private segment data for hme segments and
* the ism_map structure for ism segments.
*/
hat_region_cookie_t
-hat_join_region(struct hat *sfmmup,
- caddr_t r_saddr,
- size_t r_size,
- void *r_obj,
- u_offset_t r_objoff,
- uchar_t r_perm,
- uchar_t r_pgszc,
- hat_rgn_cb_func_t r_cb_function,
- uint_t flags)
+hat_join_region(struct hat *sfmmup, caddr_t r_saddr, size_t r_size,
+ void *r_obj, u_offset_t r_objoff, uchar_t r_perm, uchar_t r_pgszc,
+ hat_rgn_cb_func_t r_cb_function, uint_t flags)
{
sf_srd_t *srdp = sfmmup->sfmmu_srdp;
uint_t rhash;
uint_t rid;
hatlock_t *hatlockp;
@@ -15510,12 +15495,11 @@
* remove a single hmeblk from the hash chain but is necessary when hmeblks are
* in short supply.
*/
void
sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
- struct hme_blk *pr_hblk, struct hme_blk **listp,
- int free_now)
+ struct hme_blk *pr_hblk, struct hme_blk **listp, int free_now)
{
int shw_size, vshift;
struct hme_blk *shw_hblkp;
uint_t shw_mask, newshw_mask;
caddr_t vaddr;