Print this page
6854 Set but not used in hat_sfmmu.c
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 + * Copyright 2016 Gary Mills
26 27 */
27 28
28 29 /*
29 30 * VM - Hardware Address Translation management for Spitfire MMU.
30 31 *
31 32 * This file implements the machine specific hardware translation
32 33 * needed by the VM system. The machine independent interface is
33 34 * described in <vm/hat.h> while the machine dependent interface
34 35 * and data structures are described in <vm/hat_sfmmu.h>.
35 36 *
36 37 * The hat layer manages the address translation hardware as a cache
37 38 * driven by calls from the higher levels in the VM system.
38 39 */
39 40
40 41 #include <sys/types.h>
41 42 #include <sys/kstat.h>
42 43 #include <vm/hat.h>
43 44 #include <vm/hat_sfmmu.h>
44 45 #include <vm/page.h>
45 46 #include <sys/pte.h>
46 47 #include <sys/systm.h>
47 48 #include <sys/mman.h>
48 49 #include <sys/sysmacros.h>
49 50 #include <sys/machparam.h>
50 51 #include <sys/vtrace.h>
51 52 #include <sys/kmem.h>
52 53 #include <sys/mmu.h>
53 54 #include <sys/cmn_err.h>
54 55 #include <sys/cpu.h>
55 56 #include <sys/cpuvar.h>
56 57 #include <sys/debug.h>
57 58 #include <sys/lgrp.h>
58 59 #include <sys/archsystm.h>
59 60 #include <sys/machsystm.h>
60 61 #include <sys/vmsystm.h>
61 62 #include <vm/as.h>
62 63 #include <vm/seg.h>
63 64 #include <vm/seg_kp.h>
64 65 #include <vm/seg_kmem.h>
65 66 #include <vm/seg_kpm.h>
66 67 #include <vm/rm.h>
67 68 #include <sys/t_lock.h>
68 69 #include <sys/obpdefs.h>
69 70 #include <sys/vm_machparam.h>
70 71 #include <sys/var.h>
71 72 #include <sys/trap.h>
72 73 #include <sys/machtrap.h>
73 74 #include <sys/scb.h>
74 75 #include <sys/bitmap.h>
75 76 #include <sys/machlock.h>
76 77 #include <sys/membar.h>
77 78 #include <sys/atomic.h>
78 79 #include <sys/cpu_module.h>
79 80 #include <sys/prom_debug.h>
80 81 #include <sys/ksynch.h>
81 82 #include <sys/mem_config.h>
82 83 #include <sys/mem_cage.h>
83 84 #include <vm/vm_dep.h>
84 85 #include <sys/fpu/fpusystm.h>
85 86 #include <vm/mach_kpm.h>
86 87 #include <sys/callb.h>
87 88
88 89 #ifdef DEBUG
89 90 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
90 91 if (SFMMU_IS_SHMERID_VALID(rid)) { \
91 92 caddr_t _eaddr = (saddr) + (len); \
92 93 sf_srd_t *_srdp; \
93 94 sf_region_t *_rgnp; \
94 95 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
95 96 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
96 97 ASSERT((hat) != ksfmmup); \
97 98 _srdp = (hat)->sfmmu_srdp; \
98 99 ASSERT(_srdp != NULL); \
99 100 ASSERT(_srdp->srd_refcnt != 0); \
100 101 _rgnp = _srdp->srd_hmergnp[(rid)]; \
101 102 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \
102 103 ASSERT(_rgnp->rgn_refcnt != 0); \
103 104 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \
104 105 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
105 106 SFMMU_REGION_HME); \
106 107 ASSERT((saddr) >= _rgnp->rgn_saddr); \
107 108 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \
108 109 ASSERT(_eaddr > _rgnp->rgn_saddr); \
109 110 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \
110 111 }
111 112
112 113 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
113 114 { \
114 115 caddr_t _hsva; \
115 116 caddr_t _heva; \
116 117 caddr_t _rsva; \
117 118 caddr_t _reva; \
118 119 int _ttesz = get_hblk_ttesz(hmeblkp); \
119 120 int _flagtte; \
120 121 ASSERT((srdp)->srd_refcnt != 0); \
121 122 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
122 123 ASSERT((rgnp)->rgn_id == rid); \
123 124 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
124 125 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
125 126 SFMMU_REGION_HME); \
126 127 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
127 128 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
128 129 _heva = get_hblk_endaddr(hmeblkp); \
129 130 _rsva = (caddr_t)P2ALIGN( \
130 131 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
131 132 _reva = (caddr_t)P2ROUNDUP( \
132 133 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
133 134 HBLK_MIN_BYTES); \
134 135 ASSERT(_hsva >= _rsva); \
135 136 ASSERT(_hsva < _reva); \
136 137 ASSERT(_heva > _rsva); \
137 138 ASSERT(_heva <= _reva); \
138 139 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
139 140 _ttesz; \
140 141 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
141 142 }
142 143
143 144 #else /* DEBUG */
144 145 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
145 146 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
146 147 #endif /* DEBUG */
147 148
148 149 #if defined(SF_ERRATA_57)
149 150 extern caddr_t errata57_limit;
150 151 #endif
151 152
152 153 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
153 154 (sizeof (int64_t)))
154 155 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve)
155 156
156 157 #define HBLK_RESERVE_CNT 128
157 158 #define HBLK_RESERVE_MIN 20
158 159
159 160 static struct hme_blk *freehblkp;
160 161 static kmutex_t freehblkp_lock;
161 162 static int freehblkcnt;
162 163
163 164 static int64_t hblk_reserve[HME8BLK_SZ_RND];
164 165 static kmutex_t hblk_reserve_lock;
165 166 static kthread_t *hblk_reserve_thread;
166 167
167 168 static nucleus_hblk8_info_t nucleus_hblk8;
168 169 static nucleus_hblk1_info_t nucleus_hblk1;
169 170
170 171 /*
171 172 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
172 173 * after the initial phase of removing an hmeblk from the hash chain, see
173 174 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
174 175 */
175 176 static cpu_hme_pend_t *cpu_hme_pend;
176 177 static uint_t cpu_hme_pend_thresh;
177 178 /*
178 179 * SFMMU specific hat functions
179 180 */
180 181 void hat_pagecachectl(struct page *, int);
181 182
182 183 /* flags for hat_pagecachectl */
183 184 #define HAT_CACHE 0x1
184 185 #define HAT_UNCACHE 0x2
185 186 #define HAT_TMPNC 0x4
186 187
187 188 /*
188 189 * Flag to allow the creation of non-cacheable translations
189 190 * to system memory. It is off by default. At the moment this
190 191 * flag is used by the ecache error injector. The error injector
191 192 * will turn it on when creating such a translation then shut it
192 193 * off when it's finished.
193 194 */
194 195
195 196 int sfmmu_allow_nc_trans = 0;
196 197
197 198 /*
198 199 * Flag to disable large page support.
199 200 * value of 1 => disable all large pages.
200 201 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
201 202 *
202 203 * For example, use the value 0x4 to disable 512K pages.
203 204 *
204 205 */
205 206 #define LARGE_PAGES_OFF 0x1
206 207
207 208 /*
208 209 * The disable_large_pages and disable_ism_large_pages variables control
209 210 * hat_memload_array and the page sizes to be used by ISM and the kernel.
210 211 *
211 212 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
212 213 * are only used to control which OOB pages to use at upper VM segment creation
213 214 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
214 215 * Their values may come from platform or CPU specific code to disable page
215 216 * sizes that should not be used.
216 217 *
217 218 * WARNING: 512K pages are currently not supported for ISM/DISM.
218 219 */
219 220 uint_t disable_large_pages = 0;
220 221 uint_t disable_ism_large_pages = (1 << TTE512K);
221 222 uint_t disable_auto_data_large_pages = 0;
222 223 uint_t disable_auto_text_large_pages = 0;
223 224
224 225 /*
225 226 * Private sfmmu data structures for hat management
226 227 */
227 228 static struct kmem_cache *sfmmuid_cache;
228 229 static struct kmem_cache *mmuctxdom_cache;
229 230
230 231 /*
231 232 * Private sfmmu data structures for tsb management
232 233 */
233 234 static struct kmem_cache *sfmmu_tsbinfo_cache;
234 235 static struct kmem_cache *sfmmu_tsb8k_cache;
235 236 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
236 237 static vmem_t *kmem_bigtsb_arena;
237 238 static vmem_t *kmem_tsb_arena;
238 239
239 240 /*
240 241 * sfmmu static variables for hmeblk resource management.
241 242 */
242 243 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
243 244 static struct kmem_cache *sfmmu8_cache;
244 245 static struct kmem_cache *sfmmu1_cache;
245 246 static struct kmem_cache *pa_hment_cache;
246 247
247 248 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
248 249 /*
249 250 * private data for ism
250 251 */
251 252 static struct kmem_cache *ism_blk_cache;
252 253 static struct kmem_cache *ism_ment_cache;
253 254 #define ISMID_STARTADDR NULL
254 255
255 256 /*
256 257 * Region management data structures and function declarations.
257 258 */
258 259
259 260 static void sfmmu_leave_srd(sfmmu_t *);
260 261 static int sfmmu_srdcache_constructor(void *, void *, int);
261 262 static void sfmmu_srdcache_destructor(void *, void *);
262 263 static int sfmmu_rgncache_constructor(void *, void *, int);
263 264 static void sfmmu_rgncache_destructor(void *, void *);
264 265 static int sfrgnmap_isnull(sf_region_map_t *);
265 266 static int sfhmergnmap_isnull(sf_hmeregion_map_t *);
266 267 static int sfmmu_scdcache_constructor(void *, void *, int);
267 268 static void sfmmu_scdcache_destructor(void *, void *);
268 269 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
269 270 size_t, void *, u_offset_t);
270 271
271 272 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
272 273 static sf_srd_bucket_t *srd_buckets;
273 274 static struct kmem_cache *srd_cache;
274 275 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
275 276 static struct kmem_cache *region_cache;
276 277 static struct kmem_cache *scd_cache;
277 278
278 279 #ifdef sun4v
279 280 int use_bigtsb_arena = 1;
280 281 #else
281 282 int use_bigtsb_arena = 0;
282 283 #endif
283 284
284 285 /* External /etc/system tunable, for turning on&off the shctx support */
285 286 int disable_shctx = 0;
286 287 /* Internal variable, set by MD if the HW supports shctx feature */
287 288 int shctx_on = 0;
288 289
289 290 #ifdef DEBUG
290 291 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
291 292 #endif
292 293 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
293 294 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
294 295
295 296 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
296 297 static void sfmmu_find_scd(sfmmu_t *);
297 298 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
298 299 static void sfmmu_finish_join_scd(sfmmu_t *);
299 300 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
300 301 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
301 302 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
302 303 static void sfmmu_free_scd_tsbs(sfmmu_t *);
303 304 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
304 305 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
305 306 static void sfmmu_ism_hatflags(sfmmu_t *, int);
306 307 static int sfmmu_srd_lock_held(sf_srd_t *);
307 308 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
308 309 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
309 310 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
310 311 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
311 312 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
312 313 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
313 314
314 315 /*
315 316 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
316 317 * HAT flags, synchronizing TLB/TSB coherency, and context management.
317 318 * The lock is hashed on the sfmmup since the case where we need to lock
318 319 * all processes is rare but does occur (e.g. we need to unload a shared
319 320 * mapping from all processes using the mapping). We have a lot of buckets,
320 321 * and each slab of sfmmu_t's can use about a quarter of them, giving us
321 322 * a fairly good distribution without wasting too much space and overhead
322 323 * when we have to grab them all.
323 324 */
324 325 #define SFMMU_NUM_LOCK 128 /* must be power of two */
325 326 hatlock_t hat_lock[SFMMU_NUM_LOCK];
326 327
327 328 /*
328 329 * Hash algorithm optimized for a small number of slabs.
329 330 * 7 is (highbit((sizeof sfmmu_t)) - 1)
330 331 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
331 332 * kmem_cache, and thus they will be sequential within that cache. In
332 333 * addition, each new slab will have a different "color" up to cache_maxcolor
333 334 * which will skew the hashing for each successive slab which is allocated.
334 335 * If the size of sfmmu_t changed to a larger size, this algorithm may need
335 336 * to be revisited.
336 337 */
337 338 #define TSB_HASH_SHIFT_BITS (7)
338 339 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
339 340
340 341 #ifdef DEBUG
341 342 int tsb_hash_debug = 0;
342 343 #define TSB_HASH(sfmmup) \
343 344 (tsb_hash_debug ? &hat_lock[0] : \
344 345 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
345 346 #else /* DEBUG */
346 347 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
347 348 #endif /* DEBUG */
348 349
349 350
350 351 /* sfmmu_replace_tsb() return codes. */
351 352 typedef enum tsb_replace_rc {
352 353 TSB_SUCCESS,
353 354 TSB_ALLOCFAIL,
354 355 TSB_LOSTRACE,
355 356 TSB_ALREADY_SWAPPED,
356 357 TSB_CANTGROW
357 358 } tsb_replace_rc_t;
358 359
359 360 /*
360 361 * Flags for TSB allocation routines.
361 362 */
362 363 #define TSB_ALLOC 0x01
363 364 #define TSB_FORCEALLOC 0x02
364 365 #define TSB_GROW 0x04
365 366 #define TSB_SHRINK 0x08
366 367 #define TSB_SWAPIN 0x10
367 368
368 369 /*
369 370 * Support for HAT callbacks.
370 371 */
371 372 #define SFMMU_MAX_RELOC_CALLBACKS 10
372 373 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
373 374 static id_t sfmmu_cb_nextid = 0;
374 375 static id_t sfmmu_tsb_cb_id;
375 376 struct sfmmu_callback *sfmmu_cb_table;
376 377
377 378 kmutex_t kpr_mutex;
378 379 kmutex_t kpr_suspendlock;
379 380 kthread_t *kreloc_thread;
380 381
381 382 /*
382 383 * Enable VA->PA translation sanity checking on DEBUG kernels.
383 384 * Disabled by default. This is incompatible with some
384 385 * drivers (error injector, RSM) so if it breaks you get
385 386 * to keep both pieces.
386 387 */
387 388 int hat_check_vtop = 0;
388 389
389 390 /*
390 391 * Private sfmmu routines (prototypes)
391 392 */
392 393 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
393 394 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
394 395 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
395 396 uint_t);
396 397 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
397 398 caddr_t, demap_range_t *, uint_t);
398 399 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
399 400 caddr_t, int);
400 401 static void sfmmu_hblk_free(struct hme_blk **);
401 402 static void sfmmu_hblks_list_purge(struct hme_blk **, int);
402 403 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
403 404 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
404 405 static struct hme_blk *sfmmu_hblk_steal(int);
405 406 static int sfmmu_steal_this_hblk(struct hmehash_bucket *,
406 407 struct hme_blk *, uint64_t, struct hme_blk *);
407 408 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
408 409
409 410 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
410 411 struct page **, uint_t, uint_t, uint_t);
411 412 static void hat_do_memload(struct hat *, caddr_t, struct page *,
412 413 uint_t, uint_t, uint_t);
413 414 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
414 415 uint_t, uint_t, pgcnt_t, uint_t);
415 416 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
416 417 uint_t);
417 418 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
418 419 uint_t, uint_t);
419 420 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
420 421 caddr_t, int, uint_t);
421 422 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
422 423 struct hmehash_bucket *, caddr_t, uint_t, uint_t,
423 424 uint_t);
424 425 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
425 426 caddr_t, page_t **, uint_t, uint_t);
426 427 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
427 428
428 429 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
429 430 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
430 431 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
431 432 #ifdef VAC
432 433 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
433 434 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
434 435 int tst_tnc(page_t *pp, pgcnt_t);
435 436 void conv_tnc(page_t *pp, int);
436 437 #endif
437 438
438 439 static void sfmmu_get_ctx(sfmmu_t *);
439 440 static void sfmmu_free_sfmmu(sfmmu_t *);
440 441
441 442 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
442 443 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
443 444
444 445 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
445 446 static void hat_pagereload(struct page *, struct page *);
446 447 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
447 448 #ifdef VAC
448 449 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
449 450 static void sfmmu_page_cache(page_t *, int, int, int);
450 451 #endif
451 452
452 453 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
453 454 struct hme_blk *, int);
454 455 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
455 456 pfn_t, int, int, int, int);
456 457 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
457 458 pfn_t, int);
458 459 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
459 460 static void sfmmu_tlb_range_demap(demap_range_t *);
460 461 static void sfmmu_invalidate_ctx(sfmmu_t *);
461 462 static void sfmmu_sync_mmustate(sfmmu_t *);
462 463
463 464 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
464 465 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
465 466 sfmmu_t *);
466 467 static void sfmmu_tsb_free(struct tsb_info *);
467 468 static void sfmmu_tsbinfo_free(struct tsb_info *);
468 469 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
469 470 sfmmu_t *);
470 471 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
471 472 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
472 473 static int sfmmu_select_tsb_szc(pgcnt_t);
473 474 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
474 475 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
475 476 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
476 477 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \
477 478 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
478 479 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
479 480 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
480 481 hatlock_t *, uint_t);
481 482 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
482 483
483 484 #ifdef VAC
484 485 void sfmmu_cache_flush(pfn_t, int);
485 486 void sfmmu_cache_flushcolor(int, pfn_t);
486 487 #endif
487 488 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
488 489 caddr_t, demap_range_t *, uint_t, int);
489 490
490 491 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
491 492 static uint_t sfmmu_ptov_attr(tte_t *);
492 493 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
493 494 caddr_t, demap_range_t *, uint_t);
494 495 static uint_t sfmmu_vtop_prot(uint_t, uint_t *);
495 496 static int sfmmu_idcache_constructor(void *, void *, int);
496 497 static void sfmmu_idcache_destructor(void *, void *);
497 498 static int sfmmu_hblkcache_constructor(void *, void *, int);
498 499 static void sfmmu_hblkcache_destructor(void *, void *);
499 500 static void sfmmu_hblkcache_reclaim(void *);
500 501 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
501 502 struct hmehash_bucket *);
502 503 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
503 504 struct hme_blk *, struct hme_blk **, int);
504 505 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
505 506 uint64_t);
506 507 static struct hme_blk *sfmmu_check_pending_hblks(int);
507 508 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
508 509 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
509 510 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
510 511 int, caddr_t *);
511 512 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
512 513
513 514 static void sfmmu_rm_large_mappings(page_t *, int);
514 515
515 516 static void hat_lock_init(void);
516 517 static void hat_kstat_init(void);
517 518 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
518 519 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
519 520 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
520 521 static void sfmmu_check_page_sizes(sfmmu_t *, int);
521 522 int fnd_mapping_sz(page_t *);
522 523 static void iment_add(struct ism_ment *, struct hat *);
523 524 static void iment_sub(struct ism_ment *, struct hat *);
524 525 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc);
525 526 extern void sfmmu_setup_tsbinfo(sfmmu_t *);
526 527 extern void sfmmu_clear_utsbinfo(void);
527 528
528 529 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
529 530
530 531 extern int vpm_enable;
531 532
532 533 /* kpm globals */
533 534 #ifdef DEBUG
534 535 /*
535 536 * Enable trap level tsbmiss handling
536 537 */
537 538 int kpm_tsbmtl = 1;
538 539
539 540 /*
540 541 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
541 542 * required TLB shootdowns in this case, so handle w/ care. Off by default.
542 543 */
543 544 int kpm_tlb_flush;
544 545 #endif /* DEBUG */
545 546
546 547 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
547 548
548 549 #ifdef DEBUG
549 550 static void sfmmu_check_hblk_flist();
550 551 #endif
551 552
552 553 /*
553 554 * Semi-private sfmmu data structures. Some of them are initialize in
554 555 * startup or in hat_init. Some of them are private but accessed by
555 556 * assembly code or mach_sfmmu.c
556 557 */
557 558 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
558 559 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
559 560 uint64_t uhme_hash_pa; /* PA of uhme_hash */
560 561 uint64_t khme_hash_pa; /* PA of khme_hash */
561 562 int uhmehash_num; /* # of buckets in user hash table */
562 563 int khmehash_num; /* # of buckets in kernel hash table */
563 564
564 565 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */
565 566 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
566 567 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
567 568
568 569 #define DEFAULT_NUM_CTXS_PER_MMU 8192
569 570 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
570 571
571 572 int cache; /* describes system cache */
572 573
573 574 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
574 575 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
575 576 int ktsb_szcode; /* kernel 8k-indexed tsb size code */
576 577 int ktsb_sz; /* kernel 8k-indexed tsb size */
577 578
578 579 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
579 580 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
580 581 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
581 582 int ktsb4m_sz; /* kernel 4m-indexed tsb size */
582 583
583 584 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
584 585 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */
585 586 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
586 587 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
587 588
588 589 #ifndef sun4v
589 590 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
590 591 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
591 592 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
592 593 caddr_t utsb_vabase; /* reserved kernel virtual memory */
593 594 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */
594 595 #endif /* sun4v */
595 596 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
596 597 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
597 598 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
598 599
599 600 /*
600 601 * Size to use for TSB slabs. Future platforms that support page sizes
601 602 * larger than 4M may wish to change these values, and provide their own
602 603 * assembly macros for building and decoding the TSB base register contents.
603 604 * Note disable_large_pages will override the value set here.
604 605 */
605 606 static uint_t tsb_slab_ttesz = TTE4M;
606 607 size_t tsb_slab_size = MMU_PAGESIZE4M;
607 608 uint_t tsb_slab_shift = MMU_PAGESHIFT4M;
608 609 /* PFN mask for TTE */
609 610 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
610 611
611 612 /*
612 613 * Size to use for TSB slabs. These are used only when 256M tsb arenas
613 614 * exist.
614 615 */
615 616 static uint_t bigtsb_slab_ttesz = TTE256M;
616 617 static size_t bigtsb_slab_size = MMU_PAGESIZE256M;
617 618 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M;
618 619 /* 256M page alignment for 8K pfn */
619 620 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
620 621
621 622 /* largest TSB size to grow to, will be smaller on smaller memory systems */
622 623 static int tsb_max_growsize = 0;
623 624
624 625 /*
625 626 * Tunable parameters dealing with TSB policies.
626 627 */
627 628
628 629 /*
629 630 * This undocumented tunable forces all 8K TSBs to be allocated from
630 631 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
631 632 */
632 633 #ifdef DEBUG
633 634 int tsb_forceheap = 0;
634 635 #endif /* DEBUG */
635 636
636 637 /*
637 638 * Decide whether to use per-lgroup arenas, or one global set of
638 639 * TSB arenas. The default is not to break up per-lgroup, since
639 640 * most platforms don't recognize any tangible benefit from it.
640 641 */
641 642 int tsb_lgrp_affinity = 0;
642 643
643 644 /*
644 645 * Used for growing the TSB based on the process RSS.
645 646 * tsb_rss_factor is based on the smallest TSB, and is
646 647 * shifted by the TSB size to determine if we need to grow.
647 648 * The default will grow the TSB if the number of TTEs for
648 649 * this page size exceeds 75% of the number of TSB entries,
649 650 * which should _almost_ eliminate all conflict misses
650 651 * (at the expense of using up lots and lots of memory).
651 652 */
652 653 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
653 654 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
654 655 #define SELECT_TSB_SIZECODE(pgcnt) ( \
655 656 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
656 657 default_tsb_size)
657 658 #define TSB_OK_SHRINK() \
658 659 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
659 660 #define TSB_OK_GROW() \
660 661 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
661 662
662 663 int enable_tsb_rss_sizing = 1;
663 664 int tsb_rss_factor = (int)TSB_RSS_FACTOR;
664 665
665 666 /* which TSB size code to use for new address spaces or if rss sizing off */
666 667 int default_tsb_size = TSB_8K_SZCODE;
667 668
668 669 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
669 670 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
670 671 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32
671 672
672 673 #ifdef DEBUG
673 674 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
674 675 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
675 676 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
676 677 static int tsb_alloc_fail_mtbf = 0;
677 678 static int tsb_alloc_count = 0;
678 679 #endif /* DEBUG */
679 680
680 681 /* if set to 1, will remap valid TTEs when growing TSB. */
681 682 int tsb_remap_ttes = 1;
682 683
683 684 /*
684 685 * If we have more than this many mappings, allocate a second TSB.
685 686 * This default is chosen because the I/D fully associative TLBs are
686 687 * assumed to have at least 8 available entries. Platforms with a
687 688 * larger fully-associative TLB could probably override the default.
688 689 */
689 690
690 691 #ifdef sun4v
691 692 int tsb_sectsb_threshold = 0;
692 693 #else
693 694 int tsb_sectsb_threshold = 8;
694 695 #endif
695 696
696 697 /*
697 698 * kstat data
698 699 */
699 700 struct sfmmu_global_stat sfmmu_global_stat;
700 701 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
701 702
702 703 /*
703 704 * Global data
704 705 */
705 706 sfmmu_t *ksfmmup; /* kernel's hat id */
706 707
707 708 #ifdef DEBUG
708 709 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
709 710 #endif
710 711
711 712 /* sfmmu locking operations */
712 713 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
713 714 static int sfmmu_mlspl_held(struct page *, int);
714 715
715 716 kmutex_t *sfmmu_page_enter(page_t *);
716 717 void sfmmu_page_exit(kmutex_t *);
717 718 int sfmmu_page_spl_held(struct page *);
718 719
719 720 /* sfmmu internal locking operations - accessed directly */
720 721 static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
721 722 kmutex_t **, kmutex_t **);
722 723 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
723 724 static hatlock_t *
724 725 sfmmu_hat_enter(sfmmu_t *);
725 726 static hatlock_t *
726 727 sfmmu_hat_tryenter(sfmmu_t *);
727 728 static void sfmmu_hat_exit(hatlock_t *);
728 729 static void sfmmu_hat_lock_all(void);
729 730 static void sfmmu_hat_unlock_all(void);
730 731 static void sfmmu_ismhat_enter(sfmmu_t *, int);
731 732 static void sfmmu_ismhat_exit(sfmmu_t *, int);
732 733
733 734 kpm_hlk_t *kpmp_table;
734 735 uint_t kpmp_table_sz; /* must be a power of 2 */
735 736 uchar_t kpmp_shift;
736 737
737 738 kpm_shlk_t *kpmp_stable;
738 739 uint_t kpmp_stable_sz; /* must be a power of 2 */
739 740
740 741 /*
741 742 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
742 743 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
743 744 */
744 745 #if ((2*NCPU_P2) > 128)
745 746 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1))
746 747 #else
747 748 #define SPL_SHIFT 7U
748 749 #endif
749 750 #define SPL_TABLE_SIZE (1U << SPL_SHIFT)
750 751 #define SPL_MASK (SPL_TABLE_SIZE - 1)
751 752
752 753 /*
753 754 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
754 755 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
755 756 */
756 757 #define SPL_INDEX(pp) \
757 758 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
758 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
759 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
760 761 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
761 762 SPL_MASK)
762 763
763 764 #define SPL_HASH(pp) \
764 765 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
765 766
766 767 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE];
767 768
768 769 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
769 770
770 771 #define MML_TABLE_SIZE SPL_TABLE_SIZE
771 772 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex)
772 773
773 774 static pad_mutex_t mml_table[MML_TABLE_SIZE];
774 775
775 776 /*
776 777 * hat_unload_callback() will group together callbacks in order
777 778 * to avoid xt_sync() calls. This is the maximum size of the group.
778 779 */
779 780 #define MAX_CB_ADDR 32
780 781
781 782 tte_t hw_tte;
782 783 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
783 784
784 785 static char *mmu_ctx_kstat_names[] = {
785 786 "mmu_ctx_tsb_exceptions",
786 787 "mmu_ctx_tsb_raise_exception",
787 788 "mmu_ctx_wrap_around",
788 789 };
789 790
790 791 /*
791 792 * Wrapper for vmem_xalloc since vmem_create only allows limited
792 793 * parameters for vm_source_alloc functions. This function allows us
793 794 * to specify alignment consistent with the size of the object being
794 795 * allocated.
795 796 */
796 797 static void *
797 798 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
798 799 {
799 800 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
800 801 }
801 802
802 803 /* Common code for setting tsb_alloc_hiwater. */
803 804 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
804 805 ptob(pages) / tsb_alloc_hiwater_factor
805 806
806 807 /*
807 808 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
808 809 * a single TSB. physmem is the number of physical pages so we need physmem 8K
809 810 * TTEs to represent all those physical pages. We round this up by using
810 811 * 1<<highbit(). To figure out which size code to use, remember that the size
811 812 * code is just an amount to shift the smallest TSB size to get the size of
812 813 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
813 814 * highbit() - 1) to get the size code for the smallest TSB that can represent
814 815 * all of physical memory, while erring on the side of too much.
815 816 *
816 817 * Restrict tsb_max_growsize to make sure that:
817 818 * 1) TSBs can't grow larger than the TSB slab size
818 819 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
819 820 */
820 821 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \
821 822 int _i, _szc, _slabszc, _tsbszc; \
822 823 \
823 824 _i = highbit(pages); \
824 825 if ((1 << (_i - 1)) == (pages)) \
825 826 _i--; /* 2^n case, round down */ \
826 827 _szc = _i - TSB_START_SIZE; \
827 828 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
828 829 _tsbszc = MIN(_szc, _slabszc); \
829 830 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \
830 831 }
831 832
832 833 /*
833 834 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
834 835 * tsb_info which handles that TTE size.
835 836 */
836 837 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \
837 838 (tsbinfop) = (sfmmup)->sfmmu_tsb; \
838 839 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \
839 840 sfmmu_hat_lock_held(sfmmup)); \
840 841 if ((tte_szc) >= TTE4M) { \
841 842 ASSERT((tsbinfop) != NULL); \
842 843 (tsbinfop) = (tsbinfop)->tsb_next; \
843 844 } \
844 845 }
845 846
846 847 /*
847 848 * Macro to use to unload entries from the TSB.
848 849 * It has knowledge of which page sizes get replicated in the TSB
849 850 * and will call the appropriate unload routine for the appropriate size.
850 851 */
851 852 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
852 853 { \
853 854 int ttesz = get_hblk_ttesz(hmeblkp); \
854 855 if (ttesz == TTE8K || ttesz == TTE4M) { \
855 856 sfmmu_unload_tsb(sfmmup, addr, ttesz); \
856 857 } else { \
857 858 caddr_t sva = ismhat ? addr : \
858 859 (caddr_t)get_hblk_base(hmeblkp); \
859 860 caddr_t eva = sva + get_hblk_span(hmeblkp); \
860 861 ASSERT(addr >= sva && addr < eva); \
861 862 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \
862 863 } \
863 864 }
864 865
865 866
866 867 /* Update tsb_alloc_hiwater after memory is configured. */
867 868 /*ARGSUSED*/
868 869 static void
869 870 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
870 871 {
871 872 /* Assumes physmem has already been updated. */
872 873 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
873 874 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
874 875 }
875 876
876 877 /*
877 878 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
878 879 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
879 880 * deleted.
880 881 */
881 882 /*ARGSUSED*/
882 883 static int
883 884 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
884 885 {
885 886 return (0);
886 887 }
887 888
888 889 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
889 890 /*ARGSUSED*/
890 891 static void
891 892 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
892 893 {
893 894 /*
894 895 * Whether the delete was cancelled or not, just go ahead and update
895 896 * tsb_alloc_hiwater and tsb_max_growsize.
896 897 */
897 898 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
898 899 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
899 900 }
900 901
901 902 static kphysm_setup_vector_t sfmmu_update_vec = {
902 903 KPHYSM_SETUP_VECTOR_VERSION, /* version */
903 904 sfmmu_update_post_add, /* post_add */
904 905 sfmmu_update_pre_del, /* pre_del */
905 906 sfmmu_update_post_del /* post_del */
906 907 };
907 908
908 909
909 910 /*
910 911 * HME_BLK HASH PRIMITIVES
911 912 */
912 913
913 914 /*
914 915 * Enter a hme on the mapping list for page pp.
915 916 * When large pages are more prevalent in the system we might want to
916 917 * keep the mapping list in ascending order by the hment size. For now,
917 918 * small pages are more frequent, so don't slow it down.
918 919 */
919 920 #define HME_ADD(hme, pp) \
920 921 { \
921 922 ASSERT(sfmmu_mlist_held(pp)); \
922 923 \
923 924 hme->hme_prev = NULL; \
924 925 hme->hme_next = pp->p_mapping; \
925 926 hme->hme_page = pp; \
926 927 if (pp->p_mapping) { \
927 928 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
928 929 ASSERT(pp->p_share > 0); \
929 930 } else { \
930 931 /* EMPTY */ \
931 932 ASSERT(pp->p_share == 0); \
932 933 } \
933 934 pp->p_mapping = hme; \
934 935 pp->p_share++; \
935 936 }
936 937
937 938 /*
938 939 * Enter a hme on the mapping list for page pp.
939 940 * If we are unmapping a large translation, we need to make sure that the
940 941 * change is reflect in the corresponding bit of the p_index field.
941 942 */
942 943 #define HME_SUB(hme, pp) \
943 944 { \
944 945 ASSERT(sfmmu_mlist_held(pp)); \
945 946 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \
946 947 \
947 948 if (pp->p_mapping == NULL) { \
948 949 panic("hme_remove - no mappings"); \
949 950 } \
950 951 \
951 952 membar_stst(); /* ensure previous stores finish */ \
952 953 \
953 954 ASSERT(pp->p_share > 0); \
954 955 pp->p_share--; \
955 956 \
956 957 if (hme->hme_prev) { \
957 958 ASSERT(pp->p_mapping != hme); \
958 959 ASSERT(hme->hme_prev->hme_page == pp || \
959 960 IS_PAHME(hme->hme_prev)); \
960 961 hme->hme_prev->hme_next = hme->hme_next; \
961 962 } else { \
962 963 ASSERT(pp->p_mapping == hme); \
963 964 pp->p_mapping = hme->hme_next; \
964 965 ASSERT((pp->p_mapping == NULL) ? \
965 966 (pp->p_share == 0) : 1); \
966 967 } \
967 968 \
968 969 if (hme->hme_next) { \
969 970 ASSERT(hme->hme_next->hme_page == pp || \
970 971 IS_PAHME(hme->hme_next)); \
971 972 hme->hme_next->hme_prev = hme->hme_prev; \
972 973 } \
973 974 \
974 975 /* zero out the entry */ \
975 976 hme->hme_next = NULL; \
976 977 hme->hme_prev = NULL; \
977 978 hme->hme_page = NULL; \
978 979 \
979 980 if (hme_size(hme) > TTE8K) { \
980 981 /* remove mappings for remainder of large pg */ \
981 982 sfmmu_rm_large_mappings(pp, hme_size(hme)); \
982 983 } \
983 984 }
984 985
985 986 /*
986 987 * This function returns the hment given the hme_blk and a vaddr.
987 988 * It assumes addr has already been checked to belong to hme_blk's
988 989 * range.
989 990 */
990 991 #define HBLKTOHME(hment, hmeblkp, addr) \
991 992 { \
992 993 int index; \
993 994 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
994 995 }
995 996
996 997 /*
997 998 * Version of HBLKTOHME that also returns the index in hmeblkp
998 999 * of the hment.
999 1000 */
1000 1001 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1001 1002 { \
1002 1003 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1003 1004 \
1004 1005 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1005 1006 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1006 1007 } else \
1007 1008 idx = 0; \
1008 1009 \
1009 1010 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1010 1011 }
1011 1012
1012 1013 /*
1013 1014 * Disable any page sizes not supported by the CPU
1014 1015 */
1015 1016 void
1016 1017 hat_init_pagesizes()
1017 1018 {
1018 1019 int i;
1019 1020
1020 1021 mmu_exported_page_sizes = 0;
1021 1022 for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1022 1023
1023 1024 szc_2_userszc[i] = (uint_t)-1;
1024 1025 userszc_2_szc[i] = (uint_t)-1;
1025 1026
1026 1027 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1027 1028 disable_large_pages |= (1 << i);
1028 1029 } else {
1029 1030 szc_2_userszc[i] = mmu_exported_page_sizes;
1030 1031 userszc_2_szc[mmu_exported_page_sizes] = i;
1031 1032 mmu_exported_page_sizes++;
1032 1033 }
1033 1034 }
1034 1035
1035 1036 disable_ism_large_pages |= disable_large_pages;
1036 1037 disable_auto_data_large_pages = disable_large_pages;
1037 1038 disable_auto_text_large_pages = disable_large_pages;
1038 1039
1039 1040 /*
1040 1041 * Initialize mmu-specific large page sizes.
1041 1042 */
1042 1043 if (&mmu_large_pages_disabled) {
1043 1044 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1044 1045 disable_ism_large_pages |=
1045 1046 mmu_large_pages_disabled(HAT_LOAD_SHARE);
1046 1047 disable_auto_data_large_pages |=
1047 1048 mmu_large_pages_disabled(HAT_AUTO_DATA);
1048 1049 disable_auto_text_large_pages |=
1049 1050 mmu_large_pages_disabled(HAT_AUTO_TEXT);
1050 1051 }
1051 1052 }
1052 1053
1053 1054 /*
1054 1055 * Initialize the hardware address translation structures.
1055 1056 */
1056 1057 void
1057 1058 hat_init(void)
1058 1059 {
1059 1060 int i;
1060 1061 uint_t sz;
1061 1062 size_t size;
1062 1063
1063 1064 hat_lock_init();
1064 1065 hat_kstat_init();
1065 1066
1066 1067 /*
1067 1068 * Hardware-only bits in a TTE
1068 1069 */
1069 1070 MAKE_TTE_MASK(&hw_tte);
1070 1071
1071 1072 hat_init_pagesizes();
1072 1073
1073 1074 /* Initialize the hash locks */
1074 1075 for (i = 0; i < khmehash_num; i++) {
1075 1076 mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1076 1077 MUTEX_DEFAULT, NULL);
1077 1078 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1078 1079 }
1079 1080 for (i = 0; i < uhmehash_num; i++) {
1080 1081 mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1081 1082 MUTEX_DEFAULT, NULL);
1082 1083 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1083 1084 }
1084 1085 khmehash_num--; /* make sure counter starts from 0 */
1085 1086 uhmehash_num--; /* make sure counter starts from 0 */
1086 1087
1087 1088 /*
1088 1089 * Allocate context domain structures.
1089 1090 *
1090 1091 * A platform may choose to modify max_mmu_ctxdoms in
1091 1092 * set_platform_defaults(). If a platform does not define
1092 1093 * a set_platform_defaults() or does not choose to modify
1093 1094 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1094 1095 *
1095 1096 * For all platforms that have CPUs sharing MMUs, this
1096 1097 * value must be defined.
1097 1098 */
1098 1099 if (max_mmu_ctxdoms == 0)
1099 1100 max_mmu_ctxdoms = max_ncpus;
1100 1101
1101 1102 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1102 1103 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1103 1104
1104 1105 /* mmu_ctx_t is 64 bytes aligned */
1105 1106 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1106 1107 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1107 1108 /*
1108 1109 * MMU context domain initialization for the Boot CPU.
1109 1110 * This needs the context domains array allocated above.
1110 1111 */
1111 1112 mutex_enter(&cpu_lock);
1112 1113 sfmmu_cpu_init(CPU);
1113 1114 mutex_exit(&cpu_lock);
1114 1115
1115 1116 /*
1116 1117 * Intialize ism mapping list lock.
1117 1118 */
1118 1119
1119 1120 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1120 1121
1121 1122 /*
1122 1123 * Each sfmmu structure carries an array of MMU context info
1123 1124 * structures, one per context domain. The size of this array depends
1124 1125 * on the maximum number of context domains. So, the size of the
1125 1126 * sfmmu structure varies per platform.
1126 1127 *
1127 1128 * sfmmu is allocated from static arena, because trap
1128 1129 * handler at TL > 0 is not allowed to touch kernel relocatable
1129 1130 * memory. sfmmu's alignment is changed to 64 bytes from
1130 1131 * default 8 bytes, as the lower 6 bits will be used to pass
1131 1132 * pgcnt to vtag_flush_pgcnt_tl1.
1132 1133 */
1133 1134 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1134 1135
1135 1136 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1136 1137 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1137 1138 NULL, NULL, static_arena, 0);
1138 1139
1139 1140 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1140 1141 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1141 1142
1142 1143 /*
1143 1144 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1144 1145 * from the heap when low on memory or when TSB_FORCEALLOC is
1145 1146 * specified, don't use magazines to cache them--we want to return
1146 1147 * them to the system as quickly as possible.
1147 1148 */
1148 1149 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1149 1150 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1150 1151 static_arena, KMC_NOMAGAZINE);
1151 1152
1152 1153 /*
1153 1154 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1154 1155 * memory, which corresponds to the old static reserve for TSBs.
1155 1156 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
1156 1157 * memory we'll allocate for TSB slabs; beyond this point TSB
1157 1158 * allocations will be taken from the kernel heap (via
1158 1159 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1159 1160 * consumer.
1160 1161 */
1161 1162 if (tsb_alloc_hiwater_factor == 0) {
1162 1163 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1163 1164 }
1164 1165 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1165 1166
1166 1167 for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1167 1168 if (!(disable_large_pages & (1 << sz)))
1168 1169 break;
1169 1170 }
1170 1171
1171 1172 if (sz < tsb_slab_ttesz) {
1172 1173 tsb_slab_ttesz = sz;
1173 1174 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1174 1175 tsb_slab_size = 1 << tsb_slab_shift;
1175 1176 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1176 1177 use_bigtsb_arena = 0;
1177 1178 } else if (use_bigtsb_arena &&
1178 1179 (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1179 1180 use_bigtsb_arena = 0;
1180 1181 }
1181 1182
1182 1183 if (!use_bigtsb_arena) {
1183 1184 bigtsb_slab_shift = tsb_slab_shift;
1184 1185 }
1185 1186 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1186 1187
1187 1188 /*
1188 1189 * On smaller memory systems, allocate TSB memory in smaller chunks
1189 1190 * than the default 4M slab size. We also honor disable_large_pages
1190 1191 * here.
1191 1192 *
1192 1193 * The trap handlers need to be patched with the final slab shift,
1193 1194 * since they need to be able to construct the TSB pointer at runtime.
1194 1195 */
1195 1196 if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1196 1197 !(disable_large_pages & (1 << TTE512K))) {
1197 1198 tsb_slab_ttesz = TTE512K;
1198 1199 tsb_slab_shift = MMU_PAGESHIFT512K;
1199 1200 tsb_slab_size = MMU_PAGESIZE512K;
1200 1201 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1201 1202 use_bigtsb_arena = 0;
1202 1203 }
1203 1204
1204 1205 if (!use_bigtsb_arena) {
1205 1206 bigtsb_slab_ttesz = tsb_slab_ttesz;
1206 1207 bigtsb_slab_shift = tsb_slab_shift;
1207 1208 bigtsb_slab_size = tsb_slab_size;
1208 1209 bigtsb_slab_mask = tsb_slab_mask;
1209 1210 }
1210 1211
1211 1212
1212 1213 /*
1213 1214 * Set up memory callback to update tsb_alloc_hiwater and
1214 1215 * tsb_max_growsize.
1215 1216 */
1216 1217 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1217 1218 ASSERT(i == 0);
1218 1219
1219 1220 /*
1220 1221 * kmem_tsb_arena is the source from which large TSB slabs are
1221 1222 * drawn. The quantum of this arena corresponds to the largest
1222 1223 * TSB size we can dynamically allocate for user processes.
1223 1224 * Currently it must also be a supported page size since we
1224 1225 * use exactly one translation entry to map each slab page.
1225 1226 *
1226 1227 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1227 1228 * which most TSBs are allocated. Since most TSB allocations are
1228 1229 * typically 8K we have a kmem cache we stack on top of each
1229 1230 * kmem_tsb_default_arena to speed up those allocations.
1230 1231 *
1231 1232 * Note the two-level scheme of arenas is required only
1232 1233 * because vmem_create doesn't allow us to specify alignment
1233 1234 * requirements. If this ever changes the code could be
1234 1235 * simplified to use only one level of arenas.
1235 1236 *
1236 1237 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1237 1238 * will be provided in addition to the 4M kmem_tsb_arena.
1238 1239 */
1239 1240 if (use_bigtsb_arena) {
1240 1241 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1241 1242 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1242 1243 vmem_xfree, heap_arena, 0, VM_SLEEP);
1243 1244 }
1244 1245
1245 1246 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1246 1247 sfmmu_vmem_xalloc_aligned_wrapper,
1247 1248 vmem_xfree, heap_arena, 0, VM_SLEEP);
1248 1249
1249 1250 if (tsb_lgrp_affinity) {
1250 1251 char s[50];
1251 1252 for (i = 0; i < NLGRPS_MAX; i++) {
1252 1253 if (use_bigtsb_arena) {
1253 1254 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1254 1255 kmem_bigtsb_default_arena[i] = vmem_create(s,
1255 1256 NULL, 0, 2 * tsb_slab_size,
1256 1257 sfmmu_tsb_segkmem_alloc,
1257 1258 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1258 1259 0, VM_SLEEP | VM_BESTFIT);
1259 1260 }
1260 1261
1261 1262 (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1262 1263 kmem_tsb_default_arena[i] = vmem_create(s,
1263 1264 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1264 1265 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1265 1266 VM_SLEEP | VM_BESTFIT);
1266 1267
1267 1268 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1268 1269 sfmmu_tsb_cache[i] = kmem_cache_create(s,
1269 1270 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1270 1271 kmem_tsb_default_arena[i], 0);
1271 1272 }
1272 1273 } else {
1273 1274 if (use_bigtsb_arena) {
1274 1275 kmem_bigtsb_default_arena[0] =
1275 1276 vmem_create("kmem_bigtsb_default", NULL, 0,
1276 1277 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1277 1278 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1278 1279 VM_SLEEP | VM_BESTFIT);
1279 1280 }
1280 1281
1281 1282 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1282 1283 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1283 1284 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1284 1285 VM_SLEEP | VM_BESTFIT);
1285 1286 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1286 1287 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1287 1288 kmem_tsb_default_arena[0], 0);
1288 1289 }
1289 1290
1290 1291 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1291 1292 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1292 1293 sfmmu_hblkcache_destructor,
1293 1294 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1294 1295 hat_memload_arena, KMC_NOHASH);
1295 1296
1296 1297 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1297 1298 segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1298 1299 VMC_DUMPSAFE | VM_SLEEP);
1299 1300
1300 1301 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1301 1302 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1302 1303 sfmmu_hblkcache_destructor,
1303 1304 NULL, (void *)HME1BLK_SZ,
1304 1305 hat_memload1_arena, KMC_NOHASH);
1305 1306
1306 1307 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1307 1308 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1308 1309
1309 1310 ism_blk_cache = kmem_cache_create("ism_blk_cache",
1310 1311 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1311 1312 NULL, NULL, static_arena, KMC_NOHASH);
1312 1313
1313 1314 ism_ment_cache = kmem_cache_create("ism_ment_cache",
1314 1315 sizeof (ism_ment_t), 0, NULL, NULL,
1315 1316 NULL, NULL, NULL, 0);
1316 1317
1317 1318 /*
1318 1319 * We grab the first hat for the kernel,
1319 1320 */
1320 1321 AS_LOCK_ENTER(&kas, RW_WRITER);
1321 1322 kas.a_hat = hat_alloc(&kas);
1322 1323 AS_LOCK_EXIT(&kas);
1323 1324
1324 1325 /*
1325 1326 * Initialize hblk_reserve.
1326 1327 */
1327 1328 ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1328 1329 va_to_pa((caddr_t)hblk_reserve);
1329 1330
1330 1331 #ifndef UTSB_PHYS
1331 1332 /*
1332 1333 * Reserve some kernel virtual address space for the locked TTEs
1333 1334 * that allow us to probe the TSB from TL>0.
1334 1335 */
1335 1336 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1336 1337 0, 0, NULL, NULL, VM_SLEEP);
1337 1338 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1338 1339 0, 0, NULL, NULL, VM_SLEEP);
1339 1340 #endif
1340 1341
1341 1342 #ifdef VAC
1342 1343 /*
1343 1344 * The big page VAC handling code assumes VAC
1344 1345 * will not be bigger than the smallest big
1345 1346 * page- which is 64K.
1346 1347 */
1347 1348 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1348 1349 cmn_err(CE_PANIC, "VAC too big!");
1349 1350 }
1350 1351 #endif
1351 1352
1352 1353 uhme_hash_pa = va_to_pa(uhme_hash);
1353 1354 khme_hash_pa = va_to_pa(khme_hash);
1354 1355
1355 1356 /*
1356 1357 * Initialize relocation locks. kpr_suspendlock is held
1357 1358 * at PIL_MAX to prevent interrupts from pinning the holder
1358 1359 * of a suspended TTE which may access it leading to a
1359 1360 * deadlock condition.
1360 1361 */
1361 1362 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1362 1363 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1363 1364
1364 1365 /*
1365 1366 * If Shared context support is disabled via /etc/system
1366 1367 * set shctx_on to 0 here if it was set to 1 earlier in boot
1367 1368 * sequence by cpu module initialization code.
1368 1369 */
1369 1370 if (shctx_on && disable_shctx) {
1370 1371 shctx_on = 0;
1371 1372 }
1372 1373
1373 1374 if (shctx_on) {
1374 1375 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1375 1376 sizeof (srd_buckets[0]), KM_SLEEP);
1376 1377 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1377 1378 mutex_init(&srd_buckets[i].srdb_lock, NULL,
1378 1379 MUTEX_DEFAULT, NULL);
1379 1380 }
1380 1381
1381 1382 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1382 1383 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1383 1384 NULL, NULL, NULL, 0);
1384 1385 region_cache = kmem_cache_create("region_cache",
1385 1386 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1386 1387 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1387 1388 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1388 1389 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor,
1389 1390 NULL, NULL, NULL, 0);
1390 1391 }
1391 1392
1392 1393 /*
1393 1394 * Pre-allocate hrm_hashtab before enabling the collection of
1394 1395 * refmod statistics. Allocating on the fly would mean us
1395 1396 * running the risk of suffering recursive mutex enters or
1396 1397 * deadlocks.
1397 1398 */
1398 1399 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1399 1400 KM_SLEEP);
1400 1401
1401 1402 /* Allocate per-cpu pending freelist of hmeblks */
1402 1403 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1403 1404 KM_SLEEP);
1404 1405 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1405 1406 (uintptr_t)cpu_hme_pend, 64);
1406 1407
1407 1408 for (i = 0; i < NCPU; i++) {
1408 1409 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1409 1410 NULL);
1410 1411 }
1411 1412
1412 1413 if (cpu_hme_pend_thresh == 0) {
1413 1414 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1414 1415 }
1415 1416 }
1416 1417
1417 1418 /*
1418 1419 * Initialize locking for the hat layer, called early during boot.
1419 1420 */
1420 1421 static void
1421 1422 hat_lock_init()
1422 1423 {
1423 1424 int i;
1424 1425
1425 1426 /*
1426 1427 * initialize the array of mutexes protecting a page's mapping
1427 1428 * list and p_nrm field.
1428 1429 */
1429 1430 for (i = 0; i < MML_TABLE_SIZE; i++)
1430 1431 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1431 1432
1432 1433 if (kpm_enable) {
1433 1434 for (i = 0; i < kpmp_table_sz; i++) {
1434 1435 mutex_init(&kpmp_table[i].khl_mutex, NULL,
1435 1436 MUTEX_DEFAULT, NULL);
1436 1437 }
1437 1438 }
1438 1439
1439 1440 /*
1440 1441 * Initialize array of mutex locks that protects sfmmu fields and
1441 1442 * TSB lists.
1442 1443 */
1443 1444 for (i = 0; i < SFMMU_NUM_LOCK; i++)
1444 1445 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1445 1446 NULL);
1446 1447 }
1447 1448
1448 1449 #define SFMMU_KERNEL_MAXVA \
1449 1450 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1450 1451
1451 1452 /*
1452 1453 * Allocate a hat structure.
1453 1454 * Called when an address space first uses a hat.
1454 1455 */
1455 1456 struct hat *
1456 1457 hat_alloc(struct as *as)
1457 1458 {
1458 1459 sfmmu_t *sfmmup;
1459 1460 int i;
1460 1461 uint64_t cnum;
1461 1462 extern uint_t get_color_start(struct as *);
1462 1463
1463 1464 ASSERT(AS_WRITE_HELD(as));
1464 1465 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1465 1466 sfmmup->sfmmu_as = as;
1466 1467 sfmmup->sfmmu_flags = 0;
1467 1468 sfmmup->sfmmu_tteflags = 0;
1468 1469 sfmmup->sfmmu_rtteflags = 0;
1469 1470 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1470 1471
1471 1472 if (as == &kas) {
1472 1473 ksfmmup = sfmmup;
1473 1474 sfmmup->sfmmu_cext = 0;
1474 1475 cnum = KCONTEXT;
1475 1476
1476 1477 sfmmup->sfmmu_clrstart = 0;
1477 1478 sfmmup->sfmmu_tsb = NULL;
1478 1479 /*
1479 1480 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1480 1481 * to setup tsb_info for ksfmmup.
1481 1482 */
1482 1483 } else {
1483 1484
1484 1485 /*
1485 1486 * Just set to invalid ctx. When it faults, it will
1486 1487 * get a valid ctx. This would avoid the situation
1487 1488 * where we get a ctx, but it gets stolen and then
1488 1489 * we fault when we try to run and so have to get
1489 1490 * another ctx.
1490 1491 */
1491 1492 sfmmup->sfmmu_cext = 0;
1492 1493 cnum = INVALID_CONTEXT;
1493 1494
1494 1495 /* initialize original physical page coloring bin */
1495 1496 sfmmup->sfmmu_clrstart = get_color_start(as);
1496 1497 #ifdef DEBUG
1497 1498 if (tsb_random_size) {
1498 1499 uint32_t randval = (uint32_t)gettick() >> 4;
1499 1500 int size = randval % (tsb_max_growsize + 1);
1500 1501
1501 1502 /* chose a random tsb size for stress testing */
1502 1503 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1503 1504 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1504 1505 } else
1505 1506 #endif /* DEBUG */
1506 1507 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1507 1508 default_tsb_size,
1508 1509 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1509 1510 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1510 1511 ASSERT(sfmmup->sfmmu_tsb != NULL);
1511 1512 }
1512 1513
1513 1514 ASSERT(max_mmu_ctxdoms > 0);
1514 1515 for (i = 0; i < max_mmu_ctxdoms; i++) {
1515 1516 sfmmup->sfmmu_ctxs[i].cnum = cnum;
1516 1517 sfmmup->sfmmu_ctxs[i].gnum = 0;
1517 1518 }
1518 1519
1519 1520 for (i = 0; i < max_mmu_page_sizes; i++) {
1520 1521 sfmmup->sfmmu_ttecnt[i] = 0;
1521 1522 sfmmup->sfmmu_scdrttecnt[i] = 0;
1522 1523 sfmmup->sfmmu_ismttecnt[i] = 0;
1523 1524 sfmmup->sfmmu_scdismttecnt[i] = 0;
1524 1525 sfmmup->sfmmu_pgsz[i] = TTE8K;
1525 1526 }
1526 1527 sfmmup->sfmmu_tsb0_4minflcnt = 0;
1527 1528 sfmmup->sfmmu_iblk = NULL;
1528 1529 sfmmup->sfmmu_ismhat = 0;
1529 1530 sfmmup->sfmmu_scdhat = 0;
1530 1531 sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1531 1532 if (sfmmup == ksfmmup) {
1532 1533 CPUSET_ALL(sfmmup->sfmmu_cpusran);
1533 1534 } else {
1534 1535 CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1535 1536 }
1536 1537 sfmmup->sfmmu_free = 0;
1537 1538 sfmmup->sfmmu_rmstat = 0;
1538 1539 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1539 1540 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1540 1541 sfmmup->sfmmu_srdp = NULL;
1541 1542 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1542 1543 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1543 1544 sfmmup->sfmmu_scdp = NULL;
1544 1545 sfmmup->sfmmu_scd_link.next = NULL;
1545 1546 sfmmup->sfmmu_scd_link.prev = NULL;
1546 1547 return (sfmmup);
1547 1548 }
1548 1549
1549 1550 /*
1550 1551 * Create per-MMU context domain kstats for a given MMU ctx.
1551 1552 */
1552 1553 static void
1553 1554 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1554 1555 {
1555 1556 mmu_ctx_stat_t stat;
1556 1557 kstat_t *mmu_kstat;
1557 1558
1558 1559 ASSERT(MUTEX_HELD(&cpu_lock));
1559 1560 ASSERT(mmu_ctxp->mmu_kstat == NULL);
1560 1561
1561 1562 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1562 1563 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1563 1564
1564 1565 if (mmu_kstat == NULL) {
1565 1566 cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1566 1567 mmu_ctxp->mmu_idx);
1567 1568 } else {
1568 1569 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1569 1570 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1570 1571 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1571 1572 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1572 1573 mmu_ctxp->mmu_kstat = mmu_kstat;
1573 1574 kstat_install(mmu_kstat);
1574 1575 }
1575 1576 }
1576 1577
1577 1578 /*
1578 1579 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1579 1580 * context domain information for a given CPU. If a platform does not
1580 1581 * specify that interface, then the function below is used instead to return
1581 1582 * default information. The defaults are as follows:
1582 1583 *
1583 1584 * - The number of MMU context IDs supported on any CPU in the
1584 1585 * system is 8K.
1585 1586 * - There is one MMU context domain per CPU.
1586 1587 */
1587 1588 /*ARGSUSED*/
1588 1589 static void
1589 1590 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1590 1591 {
1591 1592 infop->mmu_nctxs = nctxs;
1592 1593 infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1593 1594 }
1594 1595
1595 1596 /*
1596 1597 * Called during CPU initialization to set the MMU context-related information
1597 1598 * for a CPU.
1598 1599 *
1599 1600 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1600 1601 */
1601 1602 void
1602 1603 sfmmu_cpu_init(cpu_t *cp)
1603 1604 {
1604 1605 mmu_ctx_info_t info;
1605 1606 mmu_ctx_t *mmu_ctxp;
1606 1607
1607 1608 ASSERT(MUTEX_HELD(&cpu_lock));
1608 1609
1609 1610 if (&plat_cpuid_to_mmu_ctx_info == NULL)
1610 1611 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1611 1612 else
1612 1613 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1613 1614
1614 1615 ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1615 1616
1616 1617 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1617 1618 /* Each mmu_ctx is cacheline aligned. */
1618 1619 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1619 1620 bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1620 1621
1621 1622 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1622 1623 (void *)ipltospl(DISP_LEVEL));
1623 1624 mmu_ctxp->mmu_idx = info.mmu_idx;
1624 1625 mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1625 1626 /*
1626 1627 * Globally for lifetime of a system,
1627 1628 * gnum must always increase.
1628 1629 * mmu_saved_gnum is protected by the cpu_lock.
1629 1630 */
1630 1631 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1631 1632 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1632 1633
1633 1634 sfmmu_mmu_kstat_create(mmu_ctxp);
1634 1635
1635 1636 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1636 1637 } else {
1637 1638 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1638 1639 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1639 1640 }
1640 1641
1641 1642 /*
1642 1643 * The mmu_lock is acquired here to prevent races with
1643 1644 * the wrap-around code.
1644 1645 */
1645 1646 mutex_enter(&mmu_ctxp->mmu_lock);
1646 1647
1647 1648
1648 1649 mmu_ctxp->mmu_ncpus++;
1649 1650 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1650 1651 CPU_MMU_IDX(cp) = info.mmu_idx;
1651 1652 CPU_MMU_CTXP(cp) = mmu_ctxp;
1652 1653
1653 1654 mutex_exit(&mmu_ctxp->mmu_lock);
1654 1655 }
1655 1656
1656 1657 static void
1657 1658 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1658 1659 {
1659 1660 ASSERT(MUTEX_HELD(&cpu_lock));
1660 1661 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1661 1662
1662 1663 mutex_destroy(&mmu_ctxp->mmu_lock);
1663 1664
1664 1665 if (mmu_ctxp->mmu_kstat)
1665 1666 kstat_delete(mmu_ctxp->mmu_kstat);
1666 1667
1667 1668 /* mmu_saved_gnum is protected by the cpu_lock. */
1668 1669 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1669 1670 mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1670 1671
1671 1672 kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1672 1673 }
1673 1674
1674 1675 /*
1675 1676 * Called to perform MMU context-related cleanup for a CPU.
1676 1677 */
1677 1678 void
1678 1679 sfmmu_cpu_cleanup(cpu_t *cp)
1679 1680 {
1680 1681 mmu_ctx_t *mmu_ctxp;
1681 1682
1682 1683 ASSERT(MUTEX_HELD(&cpu_lock));
1683 1684
1684 1685 mmu_ctxp = CPU_MMU_CTXP(cp);
1685 1686 ASSERT(mmu_ctxp != NULL);
1686 1687
1687 1688 /*
1688 1689 * The mmu_lock is acquired here to prevent races with
1689 1690 * the wrap-around code.
1690 1691 */
1691 1692 mutex_enter(&mmu_ctxp->mmu_lock);
1692 1693
1693 1694 CPU_MMU_CTXP(cp) = NULL;
1694 1695
1695 1696 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1696 1697 if (--mmu_ctxp->mmu_ncpus == 0) {
1697 1698 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1698 1699 mutex_exit(&mmu_ctxp->mmu_lock);
1699 1700 sfmmu_ctxdom_free(mmu_ctxp);
1700 1701 return;
1701 1702 }
1702 1703
1703 1704 mutex_exit(&mmu_ctxp->mmu_lock);
1704 1705 }
1705 1706
1706 1707 uint_t
1707 1708 sfmmu_ctxdom_nctxs(int idx)
1708 1709 {
1709 1710 return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1710 1711 }
1711 1712
1712 1713 #ifdef sun4v
1713 1714 /*
1714 1715 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1715 1716 * consistant after suspend/resume on system that can resume on a different
1716 1717 * hardware than it was suspended.
1717 1718 *
1718 1719 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1719 1720 * from being allocated. It acquires all hat_locks, which blocks most access to
1720 1721 * context data, except for a few cases that are handled separately or are
1721 1722 * harmless. It wraps each domain to increment gnum and invalidate on-CPU
1722 1723 * contexts, and forces cnum to its max. As a result of this call all user
1723 1724 * threads that are running on CPUs trap and try to perform wrap around but
1724 1725 * can't because hat_locks are taken. Threads that were not on CPUs but started
1725 1726 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1726 1727 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1727 1728 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs
1728 1729 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1729 1730 *
1730 1731 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1731 1732 * the CPUs that had them. It must be called after CPUs have been paused. This
1732 1733 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1733 1734 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1734 1735 * runs with interrupts disabled. When CPUs are later resumed, they may enter
1735 1736 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1736 1737 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus
1737 1738 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1738 1739 * accessing the old context domains.
1739 1740 *
1740 1741 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1741 1742 * allocates new context domains based on hardware layout. It initializes
1742 1743 * every CPU that had context domain before migration to have one again.
1743 1744 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1744 1745 * could deadlock acquiring locks held by paused CPUs.
1745 1746 *
1746 1747 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1747 1748 * acquire new context ids and continue execution.
1748 1749 *
1749 1750 * Therefore functions should be called in the following order:
1750 1751 * suspend_routine()
1751 1752 * sfmmu_ctxdom_lock()
1752 1753 * pause_cpus()
1753 1754 * suspend()
1754 1755 * if (suspend failed)
1755 1756 * sfmmu_ctxdom_unlock()
1756 1757 * ...
1757 1758 * sfmmu_ctxdom_remove()
1758 1759 * resume_cpus()
1759 1760 * sfmmu_ctxdom_update()
1760 1761 * sfmmu_ctxdom_unlock()
1761 1762 */
1762 1763 static cpuset_t sfmmu_ctxdoms_pset;
1763 1764
1764 1765 void
1765 1766 sfmmu_ctxdoms_remove()
1766 1767 {
1767 1768 processorid_t id;
1768 1769 cpu_t *cp;
1769 1770
1770 1771 /*
1771 1772 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1772 1773 * be restored post-migration. A CPU may be powered off and not have a
1773 1774 * domain, for example.
1774 1775 */
1775 1776 CPUSET_ZERO(sfmmu_ctxdoms_pset);
1776 1777
1777 1778 for (id = 0; id < NCPU; id++) {
1778 1779 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1779 1780 CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1780 1781 CPU_MMU_CTXP(cp) = NULL;
1781 1782 }
1782 1783 }
1783 1784 }
1784 1785
1785 1786 void
1786 1787 sfmmu_ctxdoms_lock(void)
1787 1788 {
1788 1789 int idx;
1789 1790 mmu_ctx_t *mmu_ctxp;
1790 1791
1791 1792 sfmmu_hat_lock_all();
1792 1793
1793 1794 /*
1794 1795 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1795 1796 * hat_lock is always taken before calling it.
1796 1797 *
1797 1798 * For each domain, set mmu_cnum to max so no more contexts can be
1798 1799 * allocated, and wrap to flush on-CPU contexts and force threads to
1799 1800 * acquire a new context when we later drop hat_lock after migration.
1800 1801 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1801 1802 * but the latter uses CAS and will miscompare and not overwrite it.
1802 1803 */
1803 1804 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1804 1805 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1805 1806 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1806 1807 mutex_enter(&mmu_ctxp->mmu_lock);
1807 1808 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1808 1809 /* make sure updated cnum visible */
1809 1810 membar_enter();
1810 1811 mutex_exit(&mmu_ctxp->mmu_lock);
1811 1812 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1812 1813 }
1813 1814 }
1814 1815 kpreempt_enable();
1815 1816 }
1816 1817
1817 1818 void
1818 1819 sfmmu_ctxdoms_unlock(void)
1819 1820 {
1820 1821 sfmmu_hat_unlock_all();
1821 1822 }
1822 1823
1823 1824 void
1824 1825 sfmmu_ctxdoms_update(void)
1825 1826 {
1826 1827 processorid_t id;
1827 1828 cpu_t *cp;
1828 1829 uint_t idx;
1829 1830 mmu_ctx_t *mmu_ctxp;
1830 1831
1831 1832 /*
1832 1833 * Free all context domains. As side effect, this increases
1833 1834 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1834 1835 * init gnum in the new domains, which therefore will be larger than the
1835 1836 * sfmmu gnum for any process, guaranteeing that every process will see
1836 1837 * a new generation and allocate a new context regardless of what new
1837 1838 * domain it runs in.
1838 1839 */
1839 1840 mutex_enter(&cpu_lock);
1840 1841
1841 1842 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1842 1843 if (mmu_ctxs_tbl[idx] != NULL) {
1843 1844 mmu_ctxp = mmu_ctxs_tbl[idx];
1844 1845 mmu_ctxs_tbl[idx] = NULL;
1845 1846 sfmmu_ctxdom_free(mmu_ctxp);
1846 1847 }
1847 1848 }
1848 1849
1849 1850 for (id = 0; id < NCPU; id++) {
1850 1851 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1851 1852 (cp = cpu[id]) != NULL)
1852 1853 sfmmu_cpu_init(cp);
1853 1854 }
1854 1855 mutex_exit(&cpu_lock);
1855 1856 }
1856 1857 #endif
1857 1858
1858 1859 /*
1859 1860 * Hat_setup, makes an address space context the current active one.
1860 1861 * In sfmmu this translates to setting the secondary context with the
1861 1862 * corresponding context.
1862 1863 */
1863 1864 void
1864 1865 hat_setup(struct hat *sfmmup, int allocflag)
1865 1866 {
1866 1867 hatlock_t *hatlockp;
1867 1868
1868 1869 /* Init needs some special treatment. */
1869 1870 if (allocflag == HAT_INIT) {
1870 1871 /*
1871 1872 * Make sure that we have
1872 1873 * 1. a TSB
1873 1874 * 2. a valid ctx that doesn't get stolen after this point.
1874 1875 */
1875 1876 hatlockp = sfmmu_hat_enter(sfmmup);
1876 1877
1877 1878 /*
1878 1879 * Swap in the TSB. hat_init() allocates tsbinfos without
1879 1880 * TSBs, but we need one for init, since the kernel does some
1880 1881 * special things to set up its stack and needs the TSB to
1881 1882 * resolve page faults.
1882 1883 */
1883 1884 sfmmu_tsb_swapin(sfmmup, hatlockp);
1884 1885
1885 1886 sfmmu_get_ctx(sfmmup);
1886 1887
1887 1888 sfmmu_hat_exit(hatlockp);
1888 1889 } else {
1889 1890 ASSERT(allocflag == HAT_ALLOC);
1890 1891
1891 1892 hatlockp = sfmmu_hat_enter(sfmmup);
1892 1893 kpreempt_disable();
1893 1894
1894 1895 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1895 1896 /*
1896 1897 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1897 1898 * pagesize bits don't matter in this case since we are passing
1898 1899 * INVALID_CONTEXT to it.
1899 1900 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1900 1901 */
1901 1902 sfmmu_setctx_sec(INVALID_CONTEXT);
1902 1903 sfmmu_clear_utsbinfo();
1903 1904
1904 1905 kpreempt_enable();
1905 1906 sfmmu_hat_exit(hatlockp);
1906 1907 }
1907 1908 }
1908 1909
1909 1910 /*
1910 1911 * Free all the translation resources for the specified address space.
1911 1912 * Called from as_free when an address space is being destroyed.
1912 1913 */
1913 1914 void
1914 1915 hat_free_start(struct hat *sfmmup)
1915 1916 {
1916 1917 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
1917 1918 ASSERT(sfmmup != ksfmmup);
1918 1919
1919 1920 sfmmup->sfmmu_free = 1;
1920 1921 if (sfmmup->sfmmu_scdp != NULL) {
1921 1922 sfmmu_leave_scd(sfmmup, 0);
1922 1923 }
1923 1924
1924 1925 ASSERT(sfmmup->sfmmu_scdp == NULL);
1925 1926 }
1926 1927
1927 1928 void
1928 1929 hat_free_end(struct hat *sfmmup)
1929 1930 {
1930 1931 int i;
1931 1932
1932 1933 ASSERT(sfmmup->sfmmu_free == 1);
1933 1934 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1934 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1935 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1936 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1937 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1938 1939 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1939 1940
1940 1941 if (sfmmup->sfmmu_rmstat) {
1941 1942 hat_freestat(sfmmup->sfmmu_as, NULL);
1942 1943 }
1943 1944
1944 1945 while (sfmmup->sfmmu_tsb != NULL) {
1945 1946 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1946 1947 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1947 1948 sfmmup->sfmmu_tsb = next;
1948 1949 }
1949 1950
1950 1951 if (sfmmup->sfmmu_srdp != NULL) {
1951 1952 sfmmu_leave_srd(sfmmup);
1952 1953 ASSERT(sfmmup->sfmmu_srdp == NULL);
1953 1954 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1954 1955 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1955 1956 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1956 1957 SFMMU_L2_HMERLINKS_SIZE);
1957 1958 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1958 1959 }
1959 1960 }
1960 1961 }
1961 1962 sfmmu_free_sfmmu(sfmmup);
1962 1963
1963 1964 #ifdef DEBUG
1964 1965 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1965 1966 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1966 1967 }
1967 1968 #endif
1968 1969
1969 1970 kmem_cache_free(sfmmuid_cache, sfmmup);
1970 1971 }
1971 1972
1972 1973 /*
1973 1974 * Set up any translation structures, for the specified address space,
1974 1975 * that are needed or preferred when the process is being swapped in.
1975 1976 */
1976 1977 /* ARGSUSED */
1977 1978 void
1978 1979 hat_swapin(struct hat *hat)
1979 1980 {
1980 1981 }
1981 1982
1982 1983 /*
1983 1984 * Free all of the translation resources, for the specified address space,
1984 1985 * that can be freed while the process is swapped out. Called from as_swapout.
1985 1986 * Also, free up the ctx that this process was using.
1986 1987 */
1987 1988 void
1988 1989 hat_swapout(struct hat *sfmmup)
1989 1990 {
1990 1991 struct hmehash_bucket *hmebp;
1991 1992 struct hme_blk *hmeblkp;
1992 1993 struct hme_blk *pr_hblk = NULL;
1993 1994 struct hme_blk *nx_hblk;
1994 1995 int i;
1995 1996 struct hme_blk *list = NULL;
1996 1997 hatlock_t *hatlockp;
1997 1998 struct tsb_info *tsbinfop;
1998 1999 struct free_tsb {
1999 2000 struct free_tsb *next;
2000 2001 struct tsb_info *tsbinfop;
2001 2002 }; /* free list of TSBs */
2002 2003 struct free_tsb *freelist, *last, *next;
2003 2004
2004 2005 SFMMU_STAT(sf_swapout);
2005 2006
2006 2007 /*
2007 2008 * There is no way to go from an as to all its translations in sfmmu.
2008 2009 * Here is one of the times when we take the big hit and traverse
2009 2010 * the hash looking for hme_blks to free up. Not only do we free up
2010 2011 * this as hme_blks but all those that are free. We are obviously
2011 2012 * swapping because we need memory so let's free up as much
2012 2013 * as we can.
2013 2014 *
2014 2015 * Note that we don't flush TLB/TSB here -- it's not necessary
2015 2016 * because:
2016 2017 * 1) we free the ctx we're using and throw away the TSB(s);
2017 2018 * 2) processes aren't runnable while being swapped out.
2018 2019 */
2019 2020 ASSERT(sfmmup != KHATID);
2020 2021 for (i = 0; i <= UHMEHASH_SZ; i++) {
2021 2022 hmebp = &uhme_hash[i];
2022 2023 SFMMU_HASH_LOCK(hmebp);
2023 2024 hmeblkp = hmebp->hmeblkp;
2024 2025 pr_hblk = NULL;
2025 2026 while (hmeblkp) {
2026 2027
2027 2028 if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2028 2029 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2029 2030 ASSERT(!hmeblkp->hblk_shared);
2030 2031 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2031 2032 (caddr_t)get_hblk_base(hmeblkp),
2032 2033 get_hblk_endaddr(hmeblkp),
2033 2034 NULL, HAT_UNLOAD);
2034 2035 }
2035 2036 nx_hblk = hmeblkp->hblk_next;
2036 2037 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2037 2038 ASSERT(!hmeblkp->hblk_lckcnt);
2038 2039 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2039 2040 &list, 0);
2040 2041 } else {
2041 2042 pr_hblk = hmeblkp;
2042 2043 }
2043 2044 hmeblkp = nx_hblk;
2044 2045 }
2045 2046 SFMMU_HASH_UNLOCK(hmebp);
2046 2047 }
2047 2048
2048 2049 sfmmu_hblks_list_purge(&list, 0);
2049 2050
2050 2051 /*
2051 2052 * Now free up the ctx so that others can reuse it.
2052 2053 */
2053 2054 hatlockp = sfmmu_hat_enter(sfmmup);
2054 2055
2055 2056 sfmmu_invalidate_ctx(sfmmup);
2056 2057
2057 2058 /*
2058 2059 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2059 2060 * If TSBs were never swapped in, just return.
2060 2061 * This implies that we don't support partial swapping
2061 2062 * of TSBs -- either all are swapped out, or none are.
2062 2063 *
2063 2064 * We must hold the HAT lock here to prevent racing with another
2064 2065 * thread trying to unmap TTEs from the TSB or running the post-
2065 2066 * relocator after relocating the TSB's memory. Unfortunately, we
2066 2067 * can't free memory while holding the HAT lock or we could
2067 2068 * deadlock, so we build a list of TSBs to be freed after marking
2068 2069 * the tsbinfos as swapped out and free them after dropping the
2069 2070 * lock.
2070 2071 */
2071 2072 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2072 2073 sfmmu_hat_exit(hatlockp);
2073 2074 return;
2074 2075 }
2075 2076
2076 2077 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2077 2078 last = freelist = NULL;
2078 2079 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2079 2080 tsbinfop = tsbinfop->tsb_next) {
2080 2081 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2081 2082
2082 2083 /*
2083 2084 * Cast the TSB into a struct free_tsb and put it on the free
2084 2085 * list.
2085 2086 */
2086 2087 if (freelist == NULL) {
2087 2088 last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2088 2089 } else {
2089 2090 last->next = (struct free_tsb *)tsbinfop->tsb_va;
2090 2091 last = last->next;
2091 2092 }
2092 2093 last->next = NULL;
2093 2094 last->tsbinfop = tsbinfop;
2094 2095 tsbinfop->tsb_flags |= TSB_SWAPPED;
2095 2096 /*
2096 2097 * Zero out the TTE to clear the valid bit.
2097 2098 * Note we can't use a value like 0xbad because we want to
2098 2099 * ensure diagnostic bits are NEVER set on TTEs that might
2099 2100 * be loaded. The intent is to catch any invalid access
2100 2101 * to the swapped TSB, such as a thread running with a valid
2101 2102 * context without first calling sfmmu_tsb_swapin() to
2102 2103 * allocate TSB memory.
2103 2104 */
2104 2105 tsbinfop->tsb_tte.ll = 0;
2105 2106 }
2106 2107
2107 2108 /* Now we can drop the lock and free the TSB memory. */
2108 2109 sfmmu_hat_exit(hatlockp);
2109 2110 for (; freelist != NULL; freelist = next) {
2110 2111 next = freelist->next;
2111 2112 sfmmu_tsb_free(freelist->tsbinfop);
2112 2113 }
2113 2114 }
2114 2115
2115 2116 /*
2116 2117 * Duplicate the translations of an as into another newas
2117 2118 */
2118 2119 /* ARGSUSED */
2119 2120 int
2120 2121 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2121 2122 uint_t flag)
2122 2123 {
2123 2124 sf_srd_t *srdp;
2124 2125 sf_scd_t *scdp;
2125 2126 int i;
2126 2127 extern uint_t get_color_start(struct as *);
2127 2128
2128 2129 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2129 2130 (flag == HAT_DUP_SRD));
2130 2131 ASSERT(hat != ksfmmup);
2131 2132 ASSERT(newhat != ksfmmup);
2132 2133 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2133 2134
2134 2135 if (flag == HAT_DUP_COW) {
2135 2136 panic("hat_dup: HAT_DUP_COW not supported");
2136 2137 }
2137 2138
2138 2139 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2139 2140 ASSERT(srdp->srd_evp != NULL);
2140 2141 VN_HOLD(srdp->srd_evp);
2141 2142 ASSERT(srdp->srd_refcnt > 0);
2142 2143 newhat->sfmmu_srdp = srdp;
2143 2144 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2144 2145 }
2145 2146
2146 2147 /*
2147 2148 * HAT_DUP_ALL flag is used after as duplication is done.
2148 2149 */
2149 2150 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2150 2151 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2151 2152 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2152 2153 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2153 2154 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2154 2155 }
2155 2156
2156 2157 /* check if need to join scd */
2157 2158 if ((scdp = hat->sfmmu_scdp) != NULL &&
2158 2159 newhat->sfmmu_scdp != scdp) {
2159 2160 int ret;
2160 2161 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2161 2162 &scdp->scd_region_map, ret);
2162 2163 ASSERT(ret);
2163 2164 sfmmu_join_scd(scdp, newhat);
2164 2165 ASSERT(newhat->sfmmu_scdp == scdp &&
2165 2166 scdp->scd_refcnt >= 2);
2166 2167 for (i = 0; i < max_mmu_page_sizes; i++) {
2167 2168 newhat->sfmmu_ismttecnt[i] =
2168 2169 hat->sfmmu_ismttecnt[i];
2169 2170 newhat->sfmmu_scdismttecnt[i] =
2170 2171 hat->sfmmu_scdismttecnt[i];
2171 2172 }
2172 2173 }
2173 2174
2174 2175 sfmmu_check_page_sizes(newhat, 1);
2175 2176 }
2176 2177
2177 2178 if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2178 2179 update_proc_pgcolorbase_after_fork != 0) {
2179 2180 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2180 2181 }
2181 2182 return (0);
2182 2183 }
2183 2184
2184 2185 void
2185 2186 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2186 2187 uint_t attr, uint_t flags)
2187 2188 {
2188 2189 hat_do_memload(hat, addr, pp, attr, flags,
2189 2190 SFMMU_INVALID_SHMERID);
2190 2191 }
2191 2192
2192 2193 void
2193 2194 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2194 2195 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2195 2196 {
2196 2197 uint_t rid;
2197 2198 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2198 2199 hat_do_memload(hat, addr, pp, attr, flags,
2199 2200 SFMMU_INVALID_SHMERID);
2200 2201 return;
2201 2202 }
2202 2203 rid = (uint_t)((uint64_t)rcookie);
2203 2204 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2204 2205 hat_do_memload(hat, addr, pp, attr, flags, rid);
2205 2206 }
2206 2207
2207 2208 /*
2208 2209 * Set up addr to map to page pp with protection prot.
2209 2210 * As an optimization we also load the TSB with the
2210 2211 * corresponding tte but it is no big deal if the tte gets kicked out.
2211 2212 */
2212 2213 static void
2213 2214 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2214 2215 uint_t attr, uint_t flags, uint_t rid)
2215 2216 {
2216 2217 tte_t tte;
2217 2218
2218 2219
2219 2220 ASSERT(hat != NULL);
2220 2221 ASSERT(PAGE_LOCKED(pp));
2221 2222 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2222 2223 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2223 2224 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2224 2225 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2225 2226
2226 2227 if (PP_ISFREE(pp)) {
2227 2228 panic("hat_memload: loading a mapping to free page %p",
2228 2229 (void *)pp);
2229 2230 }
2230 2231
2231 2232 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2232 2233
2233 2234 if (flags & ~SFMMU_LOAD_ALLFLAG)
2234 2235 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2235 2236 flags & ~SFMMU_LOAD_ALLFLAG);
2236 2237
2237 2238 if (hat->sfmmu_rmstat)
2238 2239 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2239 2240
2240 2241 #if defined(SF_ERRATA_57)
2241 2242 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2242 2243 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2243 2244 !(flags & HAT_LOAD_SHARE)) {
2244 2245 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2245 2246 " page executable");
2246 2247 attr &= ~PROT_EXEC;
2247 2248 }
2248 2249 #endif
2249 2250
2250 2251 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2251 2252 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2252 2253
2253 2254 /*
2254 2255 * Check TSB and TLB page sizes.
2255 2256 */
2256 2257 if ((flags & HAT_LOAD_SHARE) == 0) {
2257 2258 sfmmu_check_page_sizes(hat, 1);
2258 2259 }
2259 2260 }
2260 2261
2261 2262 /*
2262 2263 * hat_devload can be called to map real memory (e.g.
2263 2264 * /dev/kmem) and even though hat_devload will determine pf is
2264 2265 * for memory, it will be unable to get a shared lock on the
2265 2266 * page (because someone else has it exclusively) and will
2266 2267 * pass dp = NULL. If tteload doesn't get a non-NULL
2267 2268 * page pointer it can't cache memory.
2268 2269 */
2269 2270 void
2270 2271 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2271 2272 uint_t attr, int flags)
2272 2273 {
2273 2274 tte_t tte;
2274 2275 struct page *pp = NULL;
2275 2276 int use_lgpg = 0;
2276 2277
2277 2278 ASSERT(hat != NULL);
2278 2279
2279 2280 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2280 2281 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2281 2282 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2282 2283 if (len == 0)
2283 2284 panic("hat_devload: zero len");
2284 2285 if (flags & ~SFMMU_LOAD_ALLFLAG)
2285 2286 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2286 2287 flags & ~SFMMU_LOAD_ALLFLAG);
2287 2288
2288 2289 #if defined(SF_ERRATA_57)
2289 2290 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2290 2291 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2291 2292 !(flags & HAT_LOAD_SHARE)) {
2292 2293 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2293 2294 " page executable");
2294 2295 attr &= ~PROT_EXEC;
2295 2296 }
2296 2297 #endif
2297 2298
2298 2299 /*
2299 2300 * If it's a memory page find its pp
2300 2301 */
2301 2302 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2302 2303 pp = page_numtopp_nolock(pfn);
2303 2304 if (pp == NULL) {
2304 2305 flags |= HAT_LOAD_NOCONSIST;
2305 2306 } else {
2306 2307 if (PP_ISFREE(pp)) {
2307 2308 panic("hat_memload: loading "
2308 2309 "a mapping to free page %p",
2309 2310 (void *)pp);
2310 2311 }
2311 2312 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2312 2313 panic("hat_memload: loading a mapping "
2313 2314 "to unlocked relocatable page %p",
2314 2315 (void *)pp);
2315 2316 }
2316 2317 ASSERT(len == MMU_PAGESIZE);
2317 2318 }
2318 2319 }
2319 2320
2320 2321 if (hat->sfmmu_rmstat)
2321 2322 hat_resvstat(len, hat->sfmmu_as, addr);
2322 2323
2323 2324 if (flags & HAT_LOAD_NOCONSIST) {
2324 2325 attr |= SFMMU_UNCACHEVTTE;
2325 2326 use_lgpg = 1;
2326 2327 }
2327 2328 if (!pf_is_memory(pfn)) {
2328 2329 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2329 2330 use_lgpg = 1;
2330 2331 switch (attr & HAT_ORDER_MASK) {
2331 2332 case HAT_STRICTORDER:
2332 2333 case HAT_UNORDERED_OK:
2333 2334 /*
2334 2335 * we set the side effect bit for all non
2335 2336 * memory mappings unless merging is ok
2336 2337 */
2337 2338 attr |= SFMMU_SIDEFFECT;
2338 2339 break;
2339 2340 case HAT_MERGING_OK:
2340 2341 case HAT_LOADCACHING_OK:
2341 2342 case HAT_STORECACHING_OK:
2342 2343 break;
2343 2344 default:
2344 2345 panic("hat_devload: bad attr");
2345 2346 break;
2346 2347 }
2347 2348 }
2348 2349 while (len) {
2349 2350 if (!use_lgpg) {
2350 2351 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2351 2352 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2352 2353 flags, SFMMU_INVALID_SHMERID);
2353 2354 len -= MMU_PAGESIZE;
2354 2355 addr += MMU_PAGESIZE;
2355 2356 pfn++;
2356 2357 continue;
2357 2358 }
2358 2359 /*
2359 2360 * try to use large pages, check va/pa alignments
2360 2361 * Note that 32M/256M page sizes are not (yet) supported.
2361 2362 */
2362 2363 if ((len >= MMU_PAGESIZE4M) &&
2363 2364 !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2364 2365 !(disable_large_pages & (1 << TTE4M)) &&
2365 2366 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2366 2367 sfmmu_memtte(&tte, pfn, attr, TTE4M);
2367 2368 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2368 2369 flags, SFMMU_INVALID_SHMERID);
2369 2370 len -= MMU_PAGESIZE4M;
2370 2371 addr += MMU_PAGESIZE4M;
2371 2372 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2372 2373 } else if ((len >= MMU_PAGESIZE512K) &&
2373 2374 !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2374 2375 !(disable_large_pages & (1 << TTE512K)) &&
2375 2376 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2376 2377 sfmmu_memtte(&tte, pfn, attr, TTE512K);
2377 2378 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2378 2379 flags, SFMMU_INVALID_SHMERID);
2379 2380 len -= MMU_PAGESIZE512K;
2380 2381 addr += MMU_PAGESIZE512K;
2381 2382 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2382 2383 } else if ((len >= MMU_PAGESIZE64K) &&
2383 2384 !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2384 2385 !(disable_large_pages & (1 << TTE64K)) &&
2385 2386 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2386 2387 sfmmu_memtte(&tte, pfn, attr, TTE64K);
2387 2388 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2388 2389 flags, SFMMU_INVALID_SHMERID);
2389 2390 len -= MMU_PAGESIZE64K;
2390 2391 addr += MMU_PAGESIZE64K;
2391 2392 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2392 2393 } else {
2393 2394 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2394 2395 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2395 2396 flags, SFMMU_INVALID_SHMERID);
2396 2397 len -= MMU_PAGESIZE;
2397 2398 addr += MMU_PAGESIZE;
2398 2399 pfn++;
2399 2400 }
2400 2401 }
2401 2402
2402 2403 /*
2403 2404 * Check TSB and TLB page sizes.
2404 2405 */
2405 2406 if ((flags & HAT_LOAD_SHARE) == 0) {
2406 2407 sfmmu_check_page_sizes(hat, 1);
2407 2408 }
2408 2409 }
2409 2410
2410 2411 void
2411 2412 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2412 2413 struct page **pps, uint_t attr, uint_t flags)
2413 2414 {
2414 2415 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2415 2416 SFMMU_INVALID_SHMERID);
2416 2417 }
2417 2418
2418 2419 void
2419 2420 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2420 2421 struct page **pps, uint_t attr, uint_t flags,
2421 2422 hat_region_cookie_t rcookie)
2422 2423 {
2423 2424 uint_t rid;
2424 2425 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2425 2426 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2426 2427 SFMMU_INVALID_SHMERID);
2427 2428 return;
2428 2429 }
2429 2430 rid = (uint_t)((uint64_t)rcookie);
2430 2431 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2431 2432 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2432 2433 }
2433 2434
2434 2435 /*
2435 2436 * Map the largest extend possible out of the page array. The array may NOT
2436 2437 * be in order. The largest possible mapping a page can have
2437 2438 * is specified in the p_szc field. The p_szc field
2438 2439 * cannot change as long as there any mappings (large or small)
2439 2440 * to any of the pages that make up the large page. (ie. any
2440 2441 * promotion/demotion of page size is not up to the hat but up to
2441 2442 * the page free list manager). The array
2442 2443 * should consist of properly aligned contigous pages that are
2443 2444 * part of a big page for a large mapping to be created.
2444 2445 */
2445 2446 static void
2446 2447 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2447 2448 struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2448 2449 {
2449 2450 int ttesz;
2450 2451 size_t mapsz;
2451 2452 pgcnt_t numpg, npgs;
2452 2453 tte_t tte;
2453 2454 page_t *pp;
2454 2455 uint_t large_pages_disable;
2455 2456
2456 2457 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2457 2458 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2458 2459
2459 2460 if (hat->sfmmu_rmstat)
2460 2461 hat_resvstat(len, hat->sfmmu_as, addr);
2461 2462
2462 2463 #if defined(SF_ERRATA_57)
2463 2464 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2464 2465 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2465 2466 !(flags & HAT_LOAD_SHARE)) {
2466 2467 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2467 2468 "user page executable");
2468 2469 attr &= ~PROT_EXEC;
2469 2470 }
2470 2471 #endif
2471 2472
2472 2473 /* Get number of pages */
2473 2474 npgs = len >> MMU_PAGESHIFT;
2474 2475
2475 2476 if (flags & HAT_LOAD_SHARE) {
2476 2477 large_pages_disable = disable_ism_large_pages;
2477 2478 } else {
2478 2479 large_pages_disable = disable_large_pages;
2479 2480 }
2480 2481
2481 2482 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2482 2483 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2483 2484 rid);
2484 2485 return;
2485 2486 }
2486 2487
2487 2488 while (npgs >= NHMENTS) {
2488 2489 pp = *pps;
2489 2490 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2490 2491 /*
2491 2492 * Check if this page size is disabled.
2492 2493 */
2493 2494 if (large_pages_disable & (1 << ttesz))
2494 2495 continue;
2495 2496
2496 2497 numpg = TTEPAGES(ttesz);
2497 2498 mapsz = numpg << MMU_PAGESHIFT;
2498 2499 if ((npgs >= numpg) &&
2499 2500 IS_P2ALIGNED(addr, mapsz) &&
2500 2501 IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2501 2502 /*
2502 2503 * At this point we have enough pages and
2503 2504 * we know the virtual address and the pfn
2504 2505 * are properly aligned. We still need
2505 2506 * to check for physical contiguity but since
2506 2507 * it is very likely that this is the case
2507 2508 * we will assume they are so and undo
2508 2509 * the request if necessary. It would
2509 2510 * be great if we could get a hint flag
2510 2511 * like HAT_CONTIG which would tell us
2511 2512 * the pages are contigous for sure.
2512 2513 */
2513 2514 sfmmu_memtte(&tte, (*pps)->p_pagenum,
2514 2515 attr, ttesz);
2515 2516 if (!sfmmu_tteload_array(hat, &tte, addr,
2516 2517 pps, flags, rid)) {
2517 2518 break;
2518 2519 }
2519 2520 }
2520 2521 }
2521 2522 if (ttesz == TTE8K) {
2522 2523 /*
2523 2524 * We were not able to map array using a large page
2524 2525 * batch a hmeblk or fraction at a time.
2525 2526 */
2526 2527 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2527 2528 & (NHMENTS-1);
2528 2529 numpg = NHMENTS - numpg;
2529 2530 ASSERT(numpg <= npgs);
2530 2531 mapsz = numpg * MMU_PAGESIZE;
2531 2532 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2532 2533 numpg, rid);
2533 2534 }
2534 2535 addr += mapsz;
2535 2536 npgs -= numpg;
2536 2537 pps += numpg;
2537 2538 }
2538 2539
2539 2540 if (npgs) {
2540 2541 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2541 2542 rid);
2542 2543 }
2543 2544
2544 2545 /*
2545 2546 * Check TSB and TLB page sizes.
2546 2547 */
2547 2548 if ((flags & HAT_LOAD_SHARE) == 0) {
2548 2549 sfmmu_check_page_sizes(hat, 1);
2549 2550 }
2550 2551 }
2551 2552
2552 2553 /*
2553 2554 * Function tries to batch 8K pages into the same hme blk.
2554 2555 */
2555 2556 static void
2556 2557 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2557 2558 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2558 2559 {
2559 2560 tte_t tte;
2560 2561 page_t *pp;
2561 2562 struct hmehash_bucket *hmebp;
2562 2563 struct hme_blk *hmeblkp;
2563 2564 int index;
2564 2565
2565 2566 while (npgs) {
2566 2567 /*
2567 2568 * Acquire the hash bucket.
2568 2569 */
2569 2570 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2570 2571 rid);
2571 2572 ASSERT(hmebp);
2572 2573
2573 2574 /*
2574 2575 * Find the hment block.
2575 2576 */
2576 2577 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2577 2578 TTE8K, flags, rid);
2578 2579 ASSERT(hmeblkp);
2579 2580
2580 2581 do {
2581 2582 /*
2582 2583 * Make the tte.
2583 2584 */
2584 2585 pp = *pps;
2585 2586 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2586 2587
2587 2588 /*
2588 2589 * Add the translation.
2589 2590 */
2590 2591 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2591 2592 vaddr, pps, flags, rid);
2592 2593
2593 2594 /*
2594 2595 * Goto next page.
2595 2596 */
2596 2597 pps++;
2597 2598 npgs--;
2598 2599
2599 2600 /*
2600 2601 * Goto next address.
2601 2602 */
2602 2603 vaddr += MMU_PAGESIZE;
2603 2604
2604 2605 /*
2605 2606 * Don't crossover into a different hmentblk.
2606 2607 */
2607 2608 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2608 2609 (NHMENTS-1));
2609 2610
2610 2611 } while (index != 0 && npgs != 0);
2611 2612
2612 2613 /*
2613 2614 * Release the hash bucket.
2614 2615 */
2615 2616
2616 2617 sfmmu_tteload_release_hashbucket(hmebp);
2617 2618 }
2618 2619 }
2619 2620
2620 2621 /*
2621 2622 * Construct a tte for a page:
2622 2623 *
2623 2624 * tte_valid = 1
2624 2625 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2625 2626 * tte_size = size
2626 2627 * tte_nfo = attr & HAT_NOFAULT
2627 2628 * tte_ie = attr & HAT_STRUCTURE_LE
2628 2629 * tte_hmenum = hmenum
2629 2630 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2630 2631 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2631 2632 * tte_ref = 1 (optimization)
2632 2633 * tte_wr_perm = attr & PROT_WRITE;
2633 2634 * tte_no_sync = attr & HAT_NOSYNC
2634 2635 * tte_lock = attr & SFMMU_LOCKTTE
2635 2636 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2636 2637 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2637 2638 * tte_e = attr & SFMMU_SIDEFFECT
2638 2639 * tte_priv = !(attr & PROT_USER)
2639 2640 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2640 2641 * tte_glb = 0
2641 2642 */
2642 2643 void
2643 2644 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2644 2645 {
2645 2646 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2646 2647
2647 2648 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2648 2649 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2649 2650
2650 2651 if (TTE_IS_NOSYNC(ttep)) {
2651 2652 TTE_SET_REF(ttep);
2652 2653 if (TTE_IS_WRITABLE(ttep)) {
2653 2654 TTE_SET_MOD(ttep);
2654 2655 }
2655 2656 }
2656 2657 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2657 2658 panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2658 2659 }
2659 2660 }
2660 2661
2661 2662 /*
2662 2663 * This function will add a translation to the hme_blk and allocate the
2663 2664 * hme_blk if one does not exist.
2664 2665 * If a page structure is specified then it will add the
2665 2666 * corresponding hment to the mapping list.
2666 2667 * It will also update the hmenum field for the tte.
2667 2668 *
2668 2669 * Currently this function is only used for kernel mappings.
2669 2670 * So pass invalid region to sfmmu_tteload_array().
2670 2671 */
2671 2672 void
2672 2673 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2673 2674 uint_t flags)
2674 2675 {
2675 2676 ASSERT(sfmmup == ksfmmup);
2676 2677 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2677 2678 SFMMU_INVALID_SHMERID);
2678 2679 }
2679 2680
2680 2681 /*
2681 2682 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2682 2683 * Assumes that a particular page size may only be resident in one TSB.
2683 2684 */
2684 2685 static void
2685 2686 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2686 2687 {
2687 2688 struct tsb_info *tsbinfop = NULL;
2688 2689 uint64_t tag;
2689 2690 struct tsbe *tsbe_addr;
2690 2691 uint64_t tsb_base;
2691 2692 uint_t tsb_size;
2692 2693 int vpshift = MMU_PAGESHIFT;
2693 2694 int phys = 0;
2694 2695
2695 2696 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2696 2697 phys = ktsb_phys;
2697 2698 if (ttesz >= TTE4M) {
2698 2699 #ifndef sun4v
2699 2700 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2700 2701 #endif
2701 2702 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2702 2703 tsb_size = ktsb4m_szcode;
2703 2704 } else {
2704 2705 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2705 2706 tsb_size = ktsb_szcode;
2706 2707 }
2707 2708 } else {
2708 2709 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2709 2710
2710 2711 /*
2711 2712 * If there isn't a TSB for this page size, or the TSB is
2712 2713 * swapped out, there is nothing to do. Note that the latter
2713 2714 * case seems impossible but can occur if hat_pageunload()
2714 2715 * is called on an ISM mapping while the process is swapped
2715 2716 * out.
2716 2717 */
2717 2718 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2718 2719 return;
2719 2720
2720 2721 /*
2721 2722 * If another thread is in the middle of relocating a TSB
2722 2723 * we can't unload the entry so set a flag so that the
2723 2724 * TSB will be flushed before it can be accessed by the
2724 2725 * process.
2725 2726 */
2726 2727 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2727 2728 if (ttep == NULL)
2728 2729 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2729 2730 return;
2730 2731 }
2731 2732 #if defined(UTSB_PHYS)
2732 2733 phys = 1;
2733 2734 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2734 2735 #else
2735 2736 tsb_base = (uint64_t)tsbinfop->tsb_va;
2736 2737 #endif
2737 2738 tsb_size = tsbinfop->tsb_szc;
2738 2739 }
2739 2740 if (ttesz >= TTE4M)
2740 2741 vpshift = MMU_PAGESHIFT4M;
2741 2742
2742 2743 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2743 2744 tag = sfmmu_make_tsbtag(vaddr);
2744 2745
2745 2746 if (ttep == NULL) {
2746 2747 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2747 2748 } else {
2748 2749 if (ttesz >= TTE4M) {
2749 2750 SFMMU_STAT(sf_tsb_load4m);
2750 2751 } else {
2751 2752 SFMMU_STAT(sf_tsb_load8k);
2752 2753 }
2753 2754
2754 2755 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2755 2756 }
2756 2757 }
2757 2758
2758 2759 /*
2759 2760 * Unmap all entries from [start, end) matching the given page size.
2760 2761 *
2761 2762 * This function is used primarily to unmap replicated 64K or 512K entries
2762 2763 * from the TSB that are inserted using the base page size TSB pointer, but
2763 2764 * it may also be called to unmap a range of addresses from the TSB.
2764 2765 */
2765 2766 void
2766 2767 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2767 2768 {
2768 2769 struct tsb_info *tsbinfop;
2769 2770 uint64_t tag;
2770 2771 struct tsbe *tsbe_addr;
2771 2772 caddr_t vaddr;
2772 2773 uint64_t tsb_base;
2773 2774 int vpshift, vpgsz;
2774 2775 uint_t tsb_size;
2775 2776 int phys = 0;
2776 2777
2777 2778 /*
2778 2779 * Assumptions:
2779 2780 * If ttesz == 8K, 64K or 512K, we walk through the range 8K
2780 2781 * at a time shooting down any valid entries we encounter.
2781 2782 *
2782 2783 * If ttesz >= 4M we walk the range 4M at a time shooting
2783 2784 * down any valid mappings we find.
2784 2785 */
2785 2786 if (sfmmup == ksfmmup) {
2786 2787 phys = ktsb_phys;
2787 2788 if (ttesz >= TTE4M) {
2788 2789 #ifndef sun4v
2789 2790 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2790 2791 #endif
2791 2792 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2792 2793 tsb_size = ktsb4m_szcode;
2793 2794 } else {
2794 2795 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2795 2796 tsb_size = ktsb_szcode;
2796 2797 }
2797 2798 } else {
2798 2799 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2799 2800
2800 2801 /*
2801 2802 * If there isn't a TSB for this page size, or the TSB is
2802 2803 * swapped out, there is nothing to do. Note that the latter
2803 2804 * case seems impossible but can occur if hat_pageunload()
2804 2805 * is called on an ISM mapping while the process is swapped
2805 2806 * out.
2806 2807 */
2807 2808 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2808 2809 return;
2809 2810
2810 2811 /*
2811 2812 * If another thread is in the middle of relocating a TSB
2812 2813 * we can't unload the entry so set a flag so that the
2813 2814 * TSB will be flushed before it can be accessed by the
2814 2815 * process.
2815 2816 */
2816 2817 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2817 2818 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2818 2819 return;
2819 2820 }
2820 2821 #if defined(UTSB_PHYS)
2821 2822 phys = 1;
2822 2823 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2823 2824 #else
2824 2825 tsb_base = (uint64_t)tsbinfop->tsb_va;
2825 2826 #endif
2826 2827 tsb_size = tsbinfop->tsb_szc;
2827 2828 }
2828 2829 if (ttesz >= TTE4M) {
2829 2830 vpshift = MMU_PAGESHIFT4M;
2830 2831 vpgsz = MMU_PAGESIZE4M;
2831 2832 } else {
2832 2833 vpshift = MMU_PAGESHIFT;
2833 2834 vpgsz = MMU_PAGESIZE;
2834 2835 }
2835 2836
2836 2837 for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2837 2838 tag = sfmmu_make_tsbtag(vaddr);
2838 2839 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2839 2840 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2840 2841 }
2841 2842 }
2842 2843
2843 2844 /*
2844 2845 * Select the optimum TSB size given the number of mappings
2845 2846 * that need to be cached.
2846 2847 */
2847 2848 static int
2848 2849 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2849 2850 {
2850 2851 int szc = 0;
2851 2852
2852 2853 #ifdef DEBUG
2853 2854 if (tsb_grow_stress) {
2854 2855 uint32_t randval = (uint32_t)gettick() >> 4;
2855 2856 return (randval % (tsb_max_growsize + 1));
2856 2857 }
2857 2858 #endif /* DEBUG */
2858 2859
2859 2860 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2860 2861 szc++;
2861 2862 return (szc);
2862 2863 }
2863 2864
2864 2865 /*
2865 2866 * This function will add a translation to the hme_blk and allocate the
2866 2867 * hme_blk if one does not exist.
2867 2868 * If a page structure is specified then it will add the
2868 2869 * corresponding hment to the mapping list.
2869 2870 * It will also update the hmenum field for the tte.
2870 2871 * Furthermore, it attempts to create a large page translation
2871 2872 * for <addr,hat> at page array pps. It assumes addr and first
2872 2873 * pp is correctly aligned. It returns 0 if successful and 1 otherwise.
2873 2874 */
2874 2875 static int
2875 2876 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2876 2877 page_t **pps, uint_t flags, uint_t rid)
2877 2878 {
2878 2879 struct hmehash_bucket *hmebp;
2879 2880 struct hme_blk *hmeblkp;
2880 2881 int ret;
2881 2882 uint_t size;
2882 2883
2883 2884 /*
2884 2885 * Get mapping size.
2885 2886 */
2886 2887 size = TTE_CSZ(ttep);
2887 2888 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2888 2889
2889 2890 /*
2890 2891 * Acquire the hash bucket.
2891 2892 */
2892 2893 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2893 2894 ASSERT(hmebp);
2894 2895
2895 2896 /*
2896 2897 * Find the hment block.
2897 2898 */
2898 2899 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2899 2900 rid);
2900 2901 ASSERT(hmeblkp);
2901 2902
2902 2903 /*
2903 2904 * Add the translation.
2904 2905 */
2905 2906 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2906 2907 rid);
2907 2908
2908 2909 /*
2909 2910 * Release the hash bucket.
2910 2911 */
2911 2912 sfmmu_tteload_release_hashbucket(hmebp);
2912 2913
2913 2914 return (ret);
2914 2915 }
2915 2916
2916 2917 /*
2917 2918 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2918 2919 */
2919 2920 static struct hmehash_bucket *
2920 2921 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2921 2922 uint_t rid)
2922 2923 {
2923 2924 struct hmehash_bucket *hmebp;
2924 2925 int hmeshift;
2925 2926 void *htagid = sfmmutohtagid(sfmmup, rid);
2926 2927
2927 2928 ASSERT(htagid != NULL);
2928 2929
2929 2930 hmeshift = HME_HASH_SHIFT(size);
2930 2931
2931 2932 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2932 2933
2933 2934 SFMMU_HASH_LOCK(hmebp);
2934 2935
2935 2936 return (hmebp);
2936 2937 }
2937 2938
2938 2939 /*
2939 2940 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2940 2941 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2941 2942 * allocated.
2942 2943 */
2943 2944 static struct hme_blk *
2944 2945 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2945 2946 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2946 2947 {
2947 2948 hmeblk_tag hblktag;
2948 2949 int hmeshift;
2949 2950 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2950 2951
2951 2952 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2952 2953
2953 2954 hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2954 2955 ASSERT(hblktag.htag_id != NULL);
2955 2956 hmeshift = HME_HASH_SHIFT(size);
2956 2957 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2957 2958 hblktag.htag_rehash = HME_HASH_REHASH(size);
2958 2959 hblktag.htag_rid = rid;
2959 2960
2960 2961 ttearray_realloc:
2961 2962
2962 2963 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2963 2964
2964 2965 /*
2965 2966 * We block until hblk_reserve_lock is released; it's held by
2966 2967 * the thread, temporarily using hblk_reserve, until hblk_reserve is
2967 2968 * replaced by a hblk from sfmmu8_cache.
2968 2969 */
2969 2970 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2970 2971 hblk_reserve_thread != curthread) {
2971 2972 SFMMU_HASH_UNLOCK(hmebp);
2972 2973 mutex_enter(&hblk_reserve_lock);
2973 2974 mutex_exit(&hblk_reserve_lock);
2974 2975 SFMMU_STAT(sf_hblk_reserve_hit);
2975 2976 SFMMU_HASH_LOCK(hmebp);
2976 2977 goto ttearray_realloc;
2977 2978 }
2978 2979
2979 2980 if (hmeblkp == NULL) {
2980 2981 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2981 2982 hblktag, flags, rid);
2982 2983 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2983 2984 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2984 2985 } else {
2985 2986 /*
2986 2987 * It is possible for 8k and 64k hblks to collide since they
2987 2988 * have the same rehash value. This is because we
2988 2989 * lazily free hblks and 8K/64K blks could be lingering.
2989 2990 * If we find size mismatch we free the block and & try again.
2990 2991 */
2991 2992 if (get_hblk_ttesz(hmeblkp) != size) {
2992 2993 ASSERT(!hmeblkp->hblk_vcnt);
2993 2994 ASSERT(!hmeblkp->hblk_hmecnt);
2994 2995 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2995 2996 &list, 0);
2996 2997 goto ttearray_realloc;
2997 2998 }
2998 2999 if (hmeblkp->hblk_shw_bit) {
2999 3000 /*
3000 3001 * if the hblk was previously used as a shadow hblk then
3001 3002 * we will change it to a normal hblk
3002 3003 */
3003 3004 ASSERT(!hmeblkp->hblk_shared);
3004 3005 if (hmeblkp->hblk_shw_mask) {
3005 3006 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3006 3007 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3007 3008 goto ttearray_realloc;
3008 3009 } else {
3009 3010 hmeblkp->hblk_shw_bit = 0;
3010 3011 }
3011 3012 }
3012 3013 SFMMU_STAT(sf_hblk_hit);
3013 3014 }
3014 3015
3015 3016 /*
3016 3017 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3017 3018 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3018 3019 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3019 3020 * just add these hmeblks to the per-cpu pending queue.
3020 3021 */
3021 3022 sfmmu_hblks_list_purge(&list, 1);
3022 3023
3023 3024 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3024 3025 ASSERT(!hmeblkp->hblk_shw_bit);
3025 3026 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3026 3027 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3027 3028 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3028 3029
3029 3030 return (hmeblkp);
3030 3031 }
3031 3032
3032 3033 /*
3033 3034 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3034 3035 * otherwise.
3035 3036 */
3036 3037 static int
3037 3038 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3038 3039 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3039 3040 {
3040 3041 page_t *pp = *pps;
3041 3042 int hmenum, size, remap;
3042 3043 tte_t tteold, flush_tte;
3043 3044 #ifdef DEBUG
3044 3045 tte_t orig_old;
3045 3046 #endif /* DEBUG */
3046 3047 struct sf_hment *sfhme;
3047 3048 kmutex_t *pml, *pmtx;
3048 3049 hatlock_t *hatlockp;
3049 3050 int myflt;
3050 3051
3051 3052 /*
3052 3053 * remove this panic when we decide to let user virtual address
3053 3054 * space be >= USERLIMIT.
3054 3055 */
3055 3056 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3056 3057 panic("user addr %p in kernel space", (void *)vaddr);
3057 3058 #if defined(TTE_IS_GLOBAL)
3058 3059 if (TTE_IS_GLOBAL(ttep))
3059 3060 panic("sfmmu_tteload: creating global tte");
3060 3061 #endif
3061 3062
3062 3063 #ifdef DEBUG
3063 3064 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3064 3065 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3065 3066 panic("sfmmu_tteload: non cacheable memory tte");
3066 3067 #endif /* DEBUG */
3067 3068
3068 3069 /* don't simulate dirty bit for writeable ISM/DISM mappings */
3069 3070 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3070 3071 TTE_SET_REF(ttep);
3071 3072 TTE_SET_MOD(ttep);
3072 3073 }
3073 3074
3074 3075 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3075 3076 !TTE_IS_MOD(ttep)) {
3076 3077 /*
3077 3078 * Don't load TSB for dummy as in ISM. Also don't preload
3078 3079 * the TSB if the TTE isn't writable since we're likely to
3079 3080 * fault on it again -- preloading can be fairly expensive.
3080 3081 */
3081 3082 flags |= SFMMU_NO_TSBLOAD;
3082 3083 }
3083 3084
3084 3085 size = TTE_CSZ(ttep);
3085 3086 switch (size) {
3086 3087 case TTE8K:
3087 3088 SFMMU_STAT(sf_tteload8k);
3088 3089 break;
3089 3090 case TTE64K:
3090 3091 SFMMU_STAT(sf_tteload64k);
3091 3092 break;
3092 3093 case TTE512K:
3093 3094 SFMMU_STAT(sf_tteload512k);
3094 3095 break;
3095 3096 case TTE4M:
3096 3097 SFMMU_STAT(sf_tteload4m);
3097 3098 break;
3098 3099 case (TTE32M):
3099 3100 SFMMU_STAT(sf_tteload32m);
3100 3101 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3101 3102 break;
3102 3103 case (TTE256M):
3103 3104 SFMMU_STAT(sf_tteload256m);
3104 3105 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3105 3106 break;
3106 3107 }
3107 3108
3108 3109 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3109 3110 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3110 3111 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3111 3112 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3112 3113
3113 3114 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3114 3115
3115 3116 /*
3116 3117 * Need to grab mlist lock here so that pageunload
3117 3118 * will not change tte behind us.
3118 3119 */
3119 3120 if (pp) {
3120 3121 pml = sfmmu_mlist_enter(pp);
3121 3122 }
3122 3123
3123 3124 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3124 3125 /*
3125 3126 * Look for corresponding hment and if valid verify
3126 3127 * pfns are equal.
3127 3128 */
3128 3129 remap = TTE_IS_VALID(&tteold);
3129 3130 if (remap) {
3130 3131 pfn_t new_pfn, old_pfn;
3131 3132
3132 3133 old_pfn = TTE_TO_PFN(vaddr, &tteold);
3133 3134 new_pfn = TTE_TO_PFN(vaddr, ttep);
3134 3135
3135 3136 if (flags & HAT_LOAD_REMAP) {
3136 3137 /* make sure we are remapping same type of pages */
3137 3138 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3138 3139 panic("sfmmu_tteload - tte remap io<->memory");
3139 3140 }
3140 3141 if (old_pfn != new_pfn &&
3141 3142 (pp != NULL || sfhme->hme_page != NULL)) {
3142 3143 panic("sfmmu_tteload - tte remap pp != NULL");
3143 3144 }
3144 3145 } else if (old_pfn != new_pfn) {
3145 3146 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3146 3147 (void *)hmeblkp);
3147 3148 }
3148 3149 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3149 3150 }
3150 3151
3151 3152 if (pp) {
3152 3153 if (size == TTE8K) {
3153 3154 #ifdef VAC
3154 3155 /*
3155 3156 * Handle VAC consistency
3156 3157 */
3157 3158 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3158 3159 sfmmu_vac_conflict(sfmmup, vaddr, pp);
3159 3160 }
3160 3161 #endif
3161 3162
3162 3163 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3163 3164 pmtx = sfmmu_page_enter(pp);
3164 3165 PP_CLRRO(pp);
3165 3166 sfmmu_page_exit(pmtx);
3166 3167 } else if (!PP_ISMAPPED(pp) &&
3167 3168 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3168 3169 pmtx = sfmmu_page_enter(pp);
3169 3170 if (!(PP_ISMOD(pp))) {
3170 3171 PP_SETRO(pp);
3171 3172 }
3172 3173 sfmmu_page_exit(pmtx);
3173 3174 }
3174 3175
3175 3176 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3176 3177 /*
3177 3178 * sfmmu_pagearray_setup failed so return
3178 3179 */
3179 3180 sfmmu_mlist_exit(pml);
3180 3181 return (1);
3181 3182 }
3182 3183 }
3183 3184
3184 3185 /*
3185 3186 * Make sure hment is not on a mapping list.
3186 3187 */
3187 3188 ASSERT(remap || (sfhme->hme_page == NULL));
3188 3189
3189 3190 /* if it is not a remap then hme->next better be NULL */
3190 3191 ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3191 3192
3192 3193 if (flags & HAT_LOAD_LOCK) {
3193 3194 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3194 3195 panic("too high lckcnt-hmeblk %p",
3195 3196 (void *)hmeblkp);
3196 3197 }
3197 3198 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3198 3199
3199 3200 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3200 3201 }
3201 3202
3202 3203 #ifdef VAC
3203 3204 if (pp && PP_ISNC(pp)) {
3204 3205 /*
3205 3206 * If the physical page is marked to be uncacheable, like
3206 3207 * by a vac conflict, make sure the new mapping is also
3207 3208 * uncacheable.
3208 3209 */
3209 3210 TTE_CLR_VCACHEABLE(ttep);
3210 3211 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3211 3212 }
3212 3213 #endif
3213 3214 ttep->tte_hmenum = hmenum;
3214 3215
3215 3216 #ifdef DEBUG
3216 3217 orig_old = tteold;
3217 3218 #endif /* DEBUG */
3218 3219
3219 3220 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3220 3221 if ((sfmmup == KHATID) &&
3221 3222 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3222 3223 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3223 3224 }
3224 3225 #ifdef DEBUG
3225 3226 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3226 3227 #endif /* DEBUG */
3227 3228 }
3228 3229 ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3229 3230
3230 3231 if (!TTE_IS_VALID(&tteold)) {
3231 3232
3232 3233 atomic_inc_16(&hmeblkp->hblk_vcnt);
3233 3234 if (rid == SFMMU_INVALID_SHMERID) {
3234 3235 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3235 3236 } else {
3236 3237 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3237 3238 sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3238 3239 /*
3239 3240 * We already accounted for region ttecnt's in sfmmu
3240 3241 * during hat_join_region() processing. Here we
3241 3242 * only update ttecnt's in region struture.
3242 3243 */
3243 3244 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3244 3245 }
3245 3246 }
3246 3247
3247 3248 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3248 3249 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3249 3250 sfmmup != ksfmmup) {
3250 3251 uchar_t tteflag = 1 << size;
3251 3252 if (rid == SFMMU_INVALID_SHMERID) {
3252 3253 if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3253 3254 hatlockp = sfmmu_hat_enter(sfmmup);
3254 3255 sfmmup->sfmmu_tteflags |= tteflag;
3255 3256 sfmmu_hat_exit(hatlockp);
3256 3257 }
3257 3258 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3258 3259 hatlockp = sfmmu_hat_enter(sfmmup);
3259 3260 sfmmup->sfmmu_rtteflags |= tteflag;
3260 3261 sfmmu_hat_exit(hatlockp);
3261 3262 }
3262 3263 /*
3263 3264 * Update the current CPU tsbmiss area, so the current thread
3264 3265 * won't need to take the tsbmiss for the new pagesize.
3265 3266 * The other threads in the process will update their tsb
3266 3267 * miss area lazily in sfmmu_tsbmiss_exception() when they
3267 3268 * fail to find the translation for a newly added pagesize.
3268 3269 */
3269 3270 if (size > TTE64K && myflt) {
3270 3271 struct tsbmiss *tsbmp;
3271 3272 kpreempt_disable();
3272 3273 tsbmp = &tsbmiss_area[CPU->cpu_id];
3273 3274 if (rid == SFMMU_INVALID_SHMERID) {
3274 3275 if (!(tsbmp->uhat_tteflags & tteflag)) {
3275 3276 tsbmp->uhat_tteflags |= tteflag;
3276 3277 }
3277 3278 } else {
3278 3279 if (!(tsbmp->uhat_rtteflags & tteflag)) {
3279 3280 tsbmp->uhat_rtteflags |= tteflag;
3280 3281 }
3281 3282 }
3282 3283 kpreempt_enable();
3283 3284 }
3284 3285 }
3285 3286
3286 3287 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3287 3288 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3288 3289 hatlockp = sfmmu_hat_enter(sfmmup);
3289 3290 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3290 3291 sfmmu_hat_exit(hatlockp);
3291 3292 }
3292 3293
3293 3294 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3294 3295 hw_tte.tte_intlo;
3295 3296 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3296 3297 hw_tte.tte_inthi;
3297 3298
3298 3299 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3299 3300 /*
3300 3301 * If remap and new tte differs from old tte we need
3301 3302 * to sync the mod bit and flush TLB/TSB. We don't
3302 3303 * need to sync ref bit because we currently always set
3303 3304 * ref bit in tteload.
3304 3305 */
3305 3306 ASSERT(TTE_IS_REF(ttep));
3306 3307 if (TTE_IS_MOD(&tteold)) {
3307 3308 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3308 3309 }
3309 3310 /*
3310 3311 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3311 3312 * hmes are only used for read only text. Adding this code for
3312 3313 * completeness and future use of shared hmeblks with writable
3313 3314 * mappings of VMODSORT vnodes.
3314 3315 */
3315 3316 if (hmeblkp->hblk_shared) {
3316 3317 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3317 3318 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3318 3319 xt_sync(cpuset);
3319 3320 SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3320 3321 } else {
3321 3322 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3322 3323 xt_sync(sfmmup->sfmmu_cpusran);
3323 3324 }
3324 3325 }
3325 3326
3326 3327 if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3327 3328 /*
3328 3329 * We only preload 8K and 4M mappings into the TSB, since
3329 3330 * 64K and 512K mappings are replicated and hence don't
3330 3331 * have a single, unique TSB entry. Ditto for 32M/256M.
3331 3332 */
3332 3333 if (size == TTE8K || size == TTE4M) {
3333 3334 sf_scd_t *scdp;
3334 3335 hatlockp = sfmmu_hat_enter(sfmmup);
3335 3336 /*
3336 3337 * Don't preload private TSB if the mapping is used
3337 3338 * by the shctx in the SCD.
3338 3339 */
3339 3340 scdp = sfmmup->sfmmu_scdp;
3340 3341 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3341 3342 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3342 3343 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3343 3344 size);
3344 3345 }
3345 3346 sfmmu_hat_exit(hatlockp);
3346 3347 }
3347 3348 }
3348 3349 if (pp) {
3349 3350 if (!remap) {
3350 3351 HME_ADD(sfhme, pp);
3351 3352 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3352 3353 ASSERT(hmeblkp->hblk_hmecnt > 0);
3353 3354
3354 3355 /*
3355 3356 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3356 3357 * see pageunload() for comment.
3357 3358 */
3358 3359 }
3359 3360 sfmmu_mlist_exit(pml);
3360 3361 }
3361 3362
3362 3363 return (0);
3363 3364 }
3364 3365 /*
3365 3366 * Function unlocks hash bucket.
3366 3367 */
3367 3368 static void
3368 3369 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3369 3370 {
3370 3371 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3371 3372 SFMMU_HASH_UNLOCK(hmebp);
3372 3373 }
3373 3374
3374 3375 /*
3375 3376 * function which checks and sets up page array for a large
3376 3377 * translation. Will set p_vcolor, p_index, p_ro fields.
3377 3378 * Assumes addr and pfnum of first page are properly aligned.
3378 3379 * Will check for physical contiguity. If check fails it return
3379 3380 * non null.
3380 3381 */
3381 3382 static int
3382 3383 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3383 3384 {
3384 3385 int i, index, ttesz;
3385 3386 pfn_t pfnum;
3386 3387 pgcnt_t npgs;
3387 3388 page_t *pp, *pp1;
3388 3389 kmutex_t *pmtx;
3389 3390 #ifdef VAC
3390 3391 int osz;
3391 3392 int cflags = 0;
3392 3393 int vac_err = 0;
3393 3394 #endif
3394 3395 int newidx = 0;
3395 3396
3396 3397 ttesz = TTE_CSZ(ttep);
3397 3398
3398 3399 ASSERT(ttesz > TTE8K);
3399 3400
3400 3401 npgs = TTEPAGES(ttesz);
3401 3402 index = PAGESZ_TO_INDEX(ttesz);
3402 3403
3403 3404 pfnum = (*pps)->p_pagenum;
3404 3405 ASSERT(IS_P2ALIGNED(pfnum, npgs));
3405 3406
3406 3407 /*
3407 3408 * Save the first pp so we can do HAT_TMPNC at the end.
3408 3409 */
3409 3410 pp1 = *pps;
3410 3411 #ifdef VAC
3411 3412 osz = fnd_mapping_sz(pp1);
3412 3413 #endif
3413 3414
3414 3415 for (i = 0; i < npgs; i++, pps++) {
3415 3416 pp = *pps;
3416 3417 ASSERT(PAGE_LOCKED(pp));
3417 3418 ASSERT(pp->p_szc >= ttesz);
3418 3419 ASSERT(pp->p_szc == pp1->p_szc);
3419 3420 ASSERT(sfmmu_mlist_held(pp));
3420 3421
3421 3422 /*
3422 3423 * XXX is it possible to maintain P_RO on the root only?
3423 3424 */
3424 3425 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3425 3426 pmtx = sfmmu_page_enter(pp);
3426 3427 PP_CLRRO(pp);
3427 3428 sfmmu_page_exit(pmtx);
3428 3429 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3429 3430 !PP_ISMOD(pp)) {
3430 3431 pmtx = sfmmu_page_enter(pp);
3431 3432 if (!(PP_ISMOD(pp))) {
3432 3433 PP_SETRO(pp);
3433 3434 }
3434 3435 sfmmu_page_exit(pmtx);
3435 3436 }
3436 3437
3437 3438 /*
3438 3439 * If this is a remap we skip vac & contiguity checks.
3439 3440 */
3440 3441 if (remap)
3441 3442 continue;
3442 3443
3443 3444 /*
3444 3445 * set p_vcolor and detect any vac conflicts.
3445 3446 */
3446 3447 #ifdef VAC
3447 3448 if (vac_err == 0) {
3448 3449 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3449 3450
3450 3451 }
3451 3452 #endif
3452 3453
3453 3454 /*
3454 3455 * Save current index in case we need to undo it.
3455 3456 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))"
3456 3457 * "SFMMU_INDEX_SHIFT 6"
3457 3458 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)"
3458 3459 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)"
3459 3460 *
3460 3461 * So: index = PAGESZ_TO_INDEX(ttesz);
3461 3462 * if ttesz == 1 then index = 0x2
3462 3463 * 2 then index = 0x4
3463 3464 * 3 then index = 0x8
3464 3465 * 4 then index = 0x10
3465 3466 * 5 then index = 0x20
3466 3467 * The code below checks if it's a new pagesize (ie, newidx)
3467 3468 * in case we need to take it back out of p_index,
3468 3469 * and then or's the new index into the existing index.
3469 3470 */
3470 3471 if ((PP_MAPINDEX(pp) & index) == 0)
3471 3472 newidx = 1;
3472 3473 pp->p_index = (PP_MAPINDEX(pp) | index);
3473 3474
3474 3475 /*
3475 3476 * contiguity check
3476 3477 */
3477 3478 if (pp->p_pagenum != pfnum) {
3478 3479 /*
3479 3480 * If we fail the contiguity test then
3480 3481 * the only thing we need to fix is the p_index field.
3481 3482 * We might get a few extra flushes but since this
3482 3483 * path is rare that is ok. The p_ro field will
3483 3484 * get automatically fixed on the next tteload to
3484 3485 * the page. NO TNC bit is set yet.
3485 3486 */
3486 3487 while (i >= 0) {
3487 3488 pp = *pps;
3488 3489 if (newidx)
3489 3490 pp->p_index = (PP_MAPINDEX(pp) &
3490 3491 ~index);
3491 3492 pps--;
3492 3493 i--;
3493 3494 }
3494 3495 return (1);
3495 3496 }
3496 3497 pfnum++;
3497 3498 addr += MMU_PAGESIZE;
3498 3499 }
3499 3500
3500 3501 #ifdef VAC
3501 3502 if (vac_err) {
3502 3503 if (ttesz > osz) {
3503 3504 /*
3504 3505 * There are some smaller mappings that causes vac
3505 3506 * conflicts. Convert all existing small mappings to
3506 3507 * TNC.
3507 3508 */
3508 3509 SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3509 3510 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3510 3511 npgs);
3511 3512 } else {
3512 3513 /* EMPTY */
3513 3514 /*
3514 3515 * If there exists an big page mapping,
3515 3516 * that means the whole existing big page
3516 3517 * has TNC setting already. No need to covert to
3517 3518 * TNC again.
3518 3519 */
3519 3520 ASSERT(PP_ISTNC(pp1));
3520 3521 }
3521 3522 }
3522 3523 #endif /* VAC */
3523 3524
3524 3525 return (0);
3525 3526 }
3526 3527
3527 3528 #ifdef VAC
3528 3529 /*
3529 3530 * Routine that detects vac consistency for a large page. It also
3530 3531 * sets virtual color for all pp's for this big mapping.
3531 3532 */
3532 3533 static int
3533 3534 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3534 3535 {
3535 3536 int vcolor, ocolor;
3536 3537
3537 3538 ASSERT(sfmmu_mlist_held(pp));
3538 3539
3539 3540 if (PP_ISNC(pp)) {
3540 3541 return (HAT_TMPNC);
3541 3542 }
3542 3543
3543 3544 vcolor = addr_to_vcolor(addr);
3544 3545 if (PP_NEWPAGE(pp)) {
3545 3546 PP_SET_VCOLOR(pp, vcolor);
3546 3547 return (0);
3547 3548 }
3548 3549
3549 3550 ocolor = PP_GET_VCOLOR(pp);
3550 3551 if (ocolor == vcolor) {
3551 3552 return (0);
3552 3553 }
3553 3554
3554 3555 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3555 3556 /*
3556 3557 * Previous user of page had a differnet color
3557 3558 * but since there are no current users
3558 3559 * we just flush the cache and change the color.
3559 3560 * As an optimization for large pages we flush the
3560 3561 * entire cache of that color and set a flag.
3561 3562 */
3562 3563 SFMMU_STAT(sf_pgcolor_conflict);
3563 3564 if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3564 3565 CacheColor_SetFlushed(*cflags, ocolor);
3565 3566 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3566 3567 }
3567 3568 PP_SET_VCOLOR(pp, vcolor);
3568 3569 return (0);
3569 3570 }
3570 3571
3571 3572 /*
3572 3573 * We got a real conflict with a current mapping.
3573 3574 * set flags to start unencaching all mappings
3574 3575 * and return failure so we restart looping
3575 3576 * the pp array from the beginning.
3576 3577 */
3577 3578 return (HAT_TMPNC);
3578 3579 }
3579 3580 #endif /* VAC */
3580 3581
3581 3582 /*
3582 3583 * creates a large page shadow hmeblk for a tte.
3583 3584 * The purpose of this routine is to allow us to do quick unloads because
3584 3585 * the vm layer can easily pass a very large but sparsely populated range.
3585 3586 */
3586 3587 static struct hme_blk *
3587 3588 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3588 3589 {
3589 3590 struct hmehash_bucket *hmebp;
3590 3591 hmeblk_tag hblktag;
3591 3592 int hmeshift, size, vshift;
3592 3593 uint_t shw_mask, newshw_mask;
3593 3594 struct hme_blk *hmeblkp;
3594 3595
3595 3596 ASSERT(sfmmup != KHATID);
3596 3597 if (mmu_page_sizes == max_mmu_page_sizes) {
3597 3598 ASSERT(ttesz < TTE256M);
3598 3599 } else {
3599 3600 ASSERT(ttesz < TTE4M);
3600 3601 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3601 3602 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3602 3603 }
3603 3604
3604 3605 if (ttesz == TTE8K) {
3605 3606 size = TTE512K;
3606 3607 } else {
3607 3608 size = ++ttesz;
3608 3609 }
3609 3610
3610 3611 hblktag.htag_id = sfmmup;
3611 3612 hmeshift = HME_HASH_SHIFT(size);
3612 3613 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3613 3614 hblktag.htag_rehash = HME_HASH_REHASH(size);
3614 3615 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3615 3616 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3616 3617
3617 3618 SFMMU_HASH_LOCK(hmebp);
3618 3619
3619 3620 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3620 3621 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3621 3622 if (hmeblkp == NULL) {
3622 3623 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3623 3624 hblktag, flags, SFMMU_INVALID_SHMERID);
3624 3625 }
3625 3626 ASSERT(hmeblkp);
3626 3627 if (!hmeblkp->hblk_shw_mask) {
3627 3628 /*
3628 3629 * if this is a unused hblk it was just allocated or could
3629 3630 * potentially be a previous large page hblk so we need to
3630 3631 * set the shadow bit.
3631 3632 */
3632 3633 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3633 3634 hmeblkp->hblk_shw_bit = 1;
3634 3635 } else if (hmeblkp->hblk_shw_bit == 0) {
3635 3636 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3636 3637 (void *)hmeblkp);
3637 3638 }
3638 3639 ASSERT(hmeblkp->hblk_shw_bit == 1);
3639 3640 ASSERT(!hmeblkp->hblk_shared);
3640 3641 vshift = vaddr_to_vshift(hblktag, vaddr, size);
3641 3642 ASSERT(vshift < 8);
3642 3643 /*
3643 3644 * Atomically set shw mask bit
3644 3645 */
3645 3646 do {
3646 3647 shw_mask = hmeblkp->hblk_shw_mask;
3647 3648 newshw_mask = shw_mask | (1 << vshift);
3648 3649 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3649 3650 newshw_mask);
3650 3651 } while (newshw_mask != shw_mask);
3651 3652
3652 3653 SFMMU_HASH_UNLOCK(hmebp);
3653 3654
3654 3655 return (hmeblkp);
3655 3656 }
3656 3657
3657 3658 /*
3658 3659 * This routine cleanup a previous shadow hmeblk and changes it to
3659 3660 * a regular hblk. This happens rarely but it is possible
3660 3661 * when a process wants to use large pages and there are hblks still
3661 3662 * lying around from the previous as that used these hmeblks.
3662 3663 * The alternative was to cleanup the shadow hblks at unload time
3663 3664 * but since so few user processes actually use large pages, it is
3664 3665 * better to be lazy and cleanup at this time.
3665 3666 */
3666 3667 static void
3667 3668 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3668 3669 struct hmehash_bucket *hmebp)
3669 3670 {
3670 3671 caddr_t addr, endaddr;
3671 3672 int hashno, size;
3672 3673
3673 3674 ASSERT(hmeblkp->hblk_shw_bit);
3674 3675 ASSERT(!hmeblkp->hblk_shared);
3675 3676
3676 3677 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3677 3678
3678 3679 if (!hmeblkp->hblk_shw_mask) {
3679 3680 hmeblkp->hblk_shw_bit = 0;
3680 3681 return;
3681 3682 }
3682 3683 addr = (caddr_t)get_hblk_base(hmeblkp);
3683 3684 endaddr = get_hblk_endaddr(hmeblkp);
3684 3685 size = get_hblk_ttesz(hmeblkp);
3685 3686 hashno = size - 1;
3686 3687 ASSERT(hashno > 0);
3687 3688 SFMMU_HASH_UNLOCK(hmebp);
3688 3689
3689 3690 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3690 3691
3691 3692 SFMMU_HASH_LOCK(hmebp);
3692 3693 }
3693 3694
3694 3695 static void
3695 3696 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3696 3697 int hashno)
3697 3698 {
3698 3699 int hmeshift, shadow = 0;
3699 3700 hmeblk_tag hblktag;
3700 3701 struct hmehash_bucket *hmebp;
3701 3702 struct hme_blk *hmeblkp;
3702 3703 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3703 3704
3704 3705 ASSERT(hashno > 0);
3705 3706 hblktag.htag_id = sfmmup;
3706 3707 hblktag.htag_rehash = hashno;
3707 3708 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3708 3709
3709 3710 hmeshift = HME_HASH_SHIFT(hashno);
3710 3711
3711 3712 while (addr < endaddr) {
3712 3713 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3713 3714 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3714 3715 SFMMU_HASH_LOCK(hmebp);
3715 3716 /* inline HME_HASH_SEARCH */
3716 3717 hmeblkp = hmebp->hmeblkp;
3717 3718 pr_hblk = NULL;
3718 3719 while (hmeblkp) {
3719 3720 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3720 3721 /* found hme_blk */
3721 3722 ASSERT(!hmeblkp->hblk_shared);
3722 3723 if (hmeblkp->hblk_shw_bit) {
3723 3724 if (hmeblkp->hblk_shw_mask) {
3724 3725 shadow = 1;
3725 3726 sfmmu_shadow_hcleanup(sfmmup,
3726 3727 hmeblkp, hmebp);
3727 3728 break;
3728 3729 } else {
3729 3730 hmeblkp->hblk_shw_bit = 0;
3730 3731 }
3731 3732 }
3732 3733
3733 3734 /*
3734 3735 * Hblk_hmecnt and hblk_vcnt could be non zero
3735 3736 * since hblk_unload() does not gurantee that.
3736 3737 *
3737 3738 * XXX - this could cause tteload() to spin
3738 3739 * where sfmmu_shadow_hcleanup() is called.
3739 3740 */
3740 3741 }
3741 3742
3742 3743 nx_hblk = hmeblkp->hblk_next;
3743 3744 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3744 3745 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3745 3746 &list, 0);
3746 3747 } else {
3747 3748 pr_hblk = hmeblkp;
3748 3749 }
3749 3750 hmeblkp = nx_hblk;
3750 3751 }
3751 3752
3752 3753 SFMMU_HASH_UNLOCK(hmebp);
3753 3754
3754 3755 if (shadow) {
3755 3756 /*
3756 3757 * We found another shadow hblk so cleaned its
3757 3758 * children. We need to go back and cleanup
3758 3759 * the original hblk so we don't change the
3759 3760 * addr.
3760 3761 */
3761 3762 shadow = 0;
3762 3763 } else {
3763 3764 addr = (caddr_t)roundup((uintptr_t)addr + 1,
3764 3765 (1 << hmeshift));
3765 3766 }
3766 3767 }
3767 3768 sfmmu_hblks_list_purge(&list, 0);
3768 3769 }
3769 3770
3770 3771 /*
3771 3772 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3772 3773 * may still linger on after pageunload.
3773 3774 */
3774 3775 static void
3775 3776 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3776 3777 {
3777 3778 int hmeshift;
3778 3779 hmeblk_tag hblktag;
3779 3780 struct hmehash_bucket *hmebp;
3780 3781 struct hme_blk *hmeblkp;
3781 3782 struct hme_blk *pr_hblk;
3782 3783 struct hme_blk *list = NULL;
3783 3784
3784 3785 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3785 3786 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3786 3787
3787 3788 hmeshift = HME_HASH_SHIFT(ttesz);
3788 3789 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3789 3790 hblktag.htag_rehash = ttesz;
3790 3791 hblktag.htag_rid = rid;
3791 3792 hblktag.htag_id = srdp;
3792 3793 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3793 3794
3794 3795 SFMMU_HASH_LOCK(hmebp);
3795 3796 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3796 3797 if (hmeblkp != NULL) {
3797 3798 ASSERT(hmeblkp->hblk_shared);
3798 3799 ASSERT(!hmeblkp->hblk_shw_bit);
3799 3800 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3800 3801 panic("sfmmu_cleanup_rhblk: valid hmeblk");
3801 3802 }
3802 3803 ASSERT(!hmeblkp->hblk_lckcnt);
3803 3804 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3804 3805 &list, 0);
3805 3806 }
3806 3807 SFMMU_HASH_UNLOCK(hmebp);
3807 3808 sfmmu_hblks_list_purge(&list, 0);
3808 3809 }
3809 3810
3810 3811 /* ARGSUSED */
3811 3812 static void
3812 3813 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3813 3814 size_t r_size, void *r_obj, u_offset_t r_objoff)
3814 3815 {
3815 3816 }
3816 3817
3817 3818 /*
3818 3819 * Searches for an hmeblk which maps addr, then unloads this mapping
3819 3820 * and updates *eaddrp, if the hmeblk is found.
3820 3821 */
3821 3822 static void
3822 3823 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3823 3824 caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3824 3825 {
3825 3826 int hmeshift;
3826 3827 hmeblk_tag hblktag;
3827 3828 struct hmehash_bucket *hmebp;
3828 3829 struct hme_blk *hmeblkp;
3829 3830 struct hme_blk *pr_hblk;
3830 3831 struct hme_blk *list = NULL;
3831 3832
3832 3833 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3833 3834 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3834 3835 ASSERT(ttesz >= HBLK_MIN_TTESZ);
3835 3836
3836 3837 hmeshift = HME_HASH_SHIFT(ttesz);
3837 3838 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3838 3839 hblktag.htag_rehash = ttesz;
3839 3840 hblktag.htag_rid = rid;
3840 3841 hblktag.htag_id = srdp;
3841 3842 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3842 3843
3843 3844 SFMMU_HASH_LOCK(hmebp);
3844 3845 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3845 3846 if (hmeblkp != NULL) {
3846 3847 ASSERT(hmeblkp->hblk_shared);
3847 3848 ASSERT(!hmeblkp->hblk_lckcnt);
3848 3849 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3849 3850 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3850 3851 eaddr, NULL, HAT_UNLOAD);
3851 3852 ASSERT(*eaddrp > addr);
3852 3853 }
3853 3854 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3854 3855 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3855 3856 &list, 0);
3856 3857 }
3857 3858 SFMMU_HASH_UNLOCK(hmebp);
3858 3859 sfmmu_hblks_list_purge(&list, 0);
3859 3860 }
3860 3861
3861 3862 static void
3862 3863 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3863 3864 {
3864 3865 int ttesz = rgnp->rgn_pgszc;
3865 3866 size_t rsz = rgnp->rgn_size;
3866 3867 caddr_t rsaddr = rgnp->rgn_saddr;
3867 3868 caddr_t readdr = rsaddr + rsz;
3868 3869 caddr_t rhsaddr;
3869 3870 caddr_t va;
3870 3871 uint_t rid = rgnp->rgn_id;
3871 3872 caddr_t cbsaddr;
3872 3873 caddr_t cbeaddr;
3873 3874 hat_rgn_cb_func_t rcbfunc;
3874 3875 ulong_t cnt;
3875 3876
3876 3877 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3877 3878 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3878 3879
3879 3880 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3880 3881 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3881 3882 if (ttesz < HBLK_MIN_TTESZ) {
3882 3883 ttesz = HBLK_MIN_TTESZ;
3883 3884 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3884 3885 } else {
3885 3886 rhsaddr = rsaddr;
3886 3887 }
3887 3888
3888 3889 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3889 3890 rcbfunc = sfmmu_rgn_cb_noop;
3890 3891 }
3891 3892
3892 3893 while (ttesz >= HBLK_MIN_TTESZ) {
3893 3894 cbsaddr = rsaddr;
3894 3895 cbeaddr = rsaddr;
3895 3896 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3896 3897 ttesz--;
3897 3898 continue;
3898 3899 }
3899 3900 cnt = 0;
3900 3901 va = rsaddr;
3901 3902 while (va < readdr) {
3902 3903 ASSERT(va >= rhsaddr);
3903 3904 if (va != cbeaddr) {
3904 3905 if (cbeaddr != cbsaddr) {
3905 3906 ASSERT(cbeaddr > cbsaddr);
3906 3907 (*rcbfunc)(cbsaddr, cbeaddr,
3907 3908 rsaddr, rsz, rgnp->rgn_obj,
3908 3909 rgnp->rgn_objoff);
3909 3910 }
3910 3911 cbsaddr = va;
3911 3912 cbeaddr = va;
3912 3913 }
3913 3914 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3914 3915 ttesz, &cbeaddr);
3915 3916 cnt++;
3916 3917 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3917 3918 }
3918 3919 if (cbeaddr != cbsaddr) {
3919 3920 ASSERT(cbeaddr > cbsaddr);
3920 3921 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3921 3922 rsz, rgnp->rgn_obj,
3922 3923 rgnp->rgn_objoff);
3923 3924 }
3924 3925 ttesz--;
3925 3926 }
3926 3927 }
3927 3928
3928 3929 /*
3929 3930 * Release one hardware address translation lock on the given address range.
3930 3931 */
3931 3932 void
3932 3933 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3933 3934 {
3934 3935 struct hmehash_bucket *hmebp;
3935 3936 hmeblk_tag hblktag;
3936 3937 int hmeshift, hashno = 1;
3937 3938 struct hme_blk *hmeblkp, *list = NULL;
3938 3939 caddr_t endaddr;
3939 3940
3940 3941 ASSERT(sfmmup != NULL);
3941 3942
3942 3943 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
3943 3944 ASSERT((len & MMU_PAGEOFFSET) == 0);
3944 3945 endaddr = addr + len;
3945 3946 hblktag.htag_id = sfmmup;
3946 3947 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3947 3948
3948 3949 /*
3949 3950 * Spitfire supports 4 page sizes.
3950 3951 * Most pages are expected to be of the smallest page size (8K) and
3951 3952 * these will not need to be rehashed. 64K pages also don't need to be
3952 3953 * rehashed because an hmeblk spans 64K of address space. 512K pages
3953 3954 * might need 1 rehash and and 4M pages might need 2 rehashes.
3954 3955 */
3955 3956 while (addr < endaddr) {
3956 3957 hmeshift = HME_HASH_SHIFT(hashno);
3957 3958 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3958 3959 hblktag.htag_rehash = hashno;
3959 3960 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3960 3961
3961 3962 SFMMU_HASH_LOCK(hmebp);
3962 3963
3963 3964 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3964 3965 if (hmeblkp != NULL) {
3965 3966 ASSERT(!hmeblkp->hblk_shared);
3966 3967 /*
3967 3968 * If we encounter a shadow hmeblk then
3968 3969 * we know there are no valid hmeblks mapping
3969 3970 * this address at this size or larger.
3970 3971 * Just increment address by the smallest
3971 3972 * page size.
3972 3973 */
3973 3974 if (hmeblkp->hblk_shw_bit) {
3974 3975 addr += MMU_PAGESIZE;
3975 3976 } else {
3976 3977 addr = sfmmu_hblk_unlock(hmeblkp, addr,
3977 3978 endaddr);
3978 3979 }
3979 3980 SFMMU_HASH_UNLOCK(hmebp);
3980 3981 hashno = 1;
3981 3982 continue;
3982 3983 }
3983 3984 SFMMU_HASH_UNLOCK(hmebp);
3984 3985
3985 3986 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3986 3987 /*
3987 3988 * We have traversed the whole list and rehashed
3988 3989 * if necessary without finding the address to unlock
3989 3990 * which should never happen.
3990 3991 */
3991 3992 panic("sfmmu_unlock: addr not found. "
3992 3993 "addr %p hat %p", (void *)addr, (void *)sfmmup);
3993 3994 } else {
3994 3995 hashno++;
3995 3996 }
3996 3997 }
3997 3998
3998 3999 sfmmu_hblks_list_purge(&list, 0);
3999 4000 }
4000 4001
4001 4002 void
4002 4003 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4003 4004 hat_region_cookie_t rcookie)
4004 4005 {
4005 4006 sf_srd_t *srdp;
4006 4007 sf_region_t *rgnp;
4007 4008 int ttesz;
4008 4009 uint_t rid;
4009 4010 caddr_t eaddr;
4010 4011 caddr_t va;
4011 4012 int hmeshift;
4012 4013 hmeblk_tag hblktag;
4013 4014 struct hmehash_bucket *hmebp;
4014 4015 struct hme_blk *hmeblkp;
4015 4016 struct hme_blk *pr_hblk;
4016 4017 struct hme_blk *list;
4017 4018
4018 4019 if (rcookie == HAT_INVALID_REGION_COOKIE) {
4019 4020 hat_unlock(sfmmup, addr, len);
4020 4021 return;
4021 4022 }
4022 4023
4023 4024 ASSERT(sfmmup != NULL);
4024 4025 ASSERT(sfmmup != ksfmmup);
4025 4026
4026 4027 srdp = sfmmup->sfmmu_srdp;
4027 4028 rid = (uint_t)((uint64_t)rcookie);
4028 4029 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4029 4030 eaddr = addr + len;
4030 4031 va = addr;
4031 4032 list = NULL;
4032 4033 rgnp = srdp->srd_hmergnp[rid];
4033 4034 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4034 4035
4035 4036 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4036 4037 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4037 4038 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4038 4039 ttesz = HBLK_MIN_TTESZ;
4039 4040 } else {
4040 4041 ttesz = rgnp->rgn_pgszc;
4041 4042 }
4042 4043 while (va < eaddr) {
4043 4044 while (ttesz < rgnp->rgn_pgszc &&
4044 4045 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4045 4046 ttesz++;
4046 4047 }
4047 4048 while (ttesz >= HBLK_MIN_TTESZ) {
4048 4049 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4049 4050 ttesz--;
4050 4051 continue;
4051 4052 }
4052 4053 hmeshift = HME_HASH_SHIFT(ttesz);
4053 4054 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4054 4055 hblktag.htag_rehash = ttesz;
4055 4056 hblktag.htag_rid = rid;
4056 4057 hblktag.htag_id = srdp;
4057 4058 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4058 4059 SFMMU_HASH_LOCK(hmebp);
4059 4060 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4060 4061 &list);
4061 4062 if (hmeblkp == NULL) {
4062 4063 SFMMU_HASH_UNLOCK(hmebp);
4063 4064 ttesz--;
4064 4065 continue;
4065 4066 }
4066 4067 ASSERT(hmeblkp->hblk_shared);
4067 4068 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4068 4069 ASSERT(va >= eaddr ||
4069 4070 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4070 4071 SFMMU_HASH_UNLOCK(hmebp);
4071 4072 break;
4072 4073 }
4073 4074 if (ttesz < HBLK_MIN_TTESZ) {
4074 4075 panic("hat_unlock_region: addr not found "
4075 4076 "addr %p hat %p", (void *)va, (void *)sfmmup);
4076 4077 }
4077 4078 }
4078 4079 sfmmu_hblks_list_purge(&list, 0);
4079 4080 }
4080 4081
4081 4082 /*
4082 4083 * Function to unlock a range of addresses in an hmeblk. It returns the
4083 4084 * next address that needs to be unlocked.
4084 4085 * Should be called with the hash lock held.
4085 4086 */
4086 4087 static caddr_t
4087 4088 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4088 4089 {
4089 4090 struct sf_hment *sfhme;
4090 4091 tte_t tteold, ttemod;
4091 4092 int ttesz, ret;
4092 4093
4093 4094 ASSERT(in_hblk_range(hmeblkp, addr));
4094 4095 ASSERT(hmeblkp->hblk_shw_bit == 0);
4095 4096
4096 4097 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4097 4098 ttesz = get_hblk_ttesz(hmeblkp);
4098 4099
4099 4100 HBLKTOHME(sfhme, hmeblkp, addr);
4100 4101 while (addr < endaddr) {
4101 4102 readtte:
4102 4103 sfmmu_copytte(&sfhme->hme_tte, &tteold);
4103 4104 if (TTE_IS_VALID(&tteold)) {
4104 4105
4105 4106 ttemod = tteold;
4106 4107
4107 4108 ret = sfmmu_modifytte_try(&tteold, &ttemod,
4108 4109 &sfhme->hme_tte);
4109 4110
4110 4111 if (ret < 0)
4111 4112 goto readtte;
4112 4113
4113 4114 if (hmeblkp->hblk_lckcnt == 0)
4114 4115 panic("zero hblk lckcnt");
4115 4116
4116 4117 if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4117 4118 (uintptr_t)endaddr)
4118 4119 panic("can't unlock large tte");
4119 4120
4120 4121 ASSERT(hmeblkp->hblk_lckcnt > 0);
4121 4122 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4122 4123 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4123 4124 } else {
4124 4125 panic("sfmmu_hblk_unlock: invalid tte");
4125 4126 }
4126 4127 addr += TTEBYTES(ttesz);
4127 4128 sfhme++;
4128 4129 }
4129 4130 return (addr);
4130 4131 }
4131 4132
4132 4133 /*
4133 4134 * Physical Address Mapping Framework
4134 4135 *
4135 4136 * General rules:
4136 4137 *
4137 4138 * (1) Applies only to seg_kmem memory pages. To make things easier,
4138 4139 * seg_kpm addresses are also accepted by the routines, but nothing
4139 4140 * is done with them since by definition their PA mappings are static.
4140 4141 * (2) hat_add_callback() may only be called while holding the page lock
4141 4142 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4142 4143 * or passing HAC_PAGELOCK flag.
4143 4144 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4144 4145 * hat_delete_callback(), nor should they allocate memory. Post quiesce
4145 4146 * callbacks may not sleep or acquire adaptive mutex locks.
4146 4147 * (4) Either prehandler() or posthandler() (but not both) may be specified
4147 4148 * as being NULL. Specifying an errhandler() is optional.
4148 4149 *
4149 4150 * Details of using the framework:
4150 4151 *
4151 4152 * registering a callback (hat_register_callback())
4152 4153 *
4153 4154 * Pass prehandler, posthandler, errhandler addresses
4154 4155 * as described below. If capture_cpus argument is nonzero,
4155 4156 * suspend callback to the prehandler will occur with CPUs
4156 4157 * captured and executing xc_loop() and CPUs will remain
4157 4158 * captured until after the posthandler suspend callback
4158 4159 * occurs.
4159 4160 *
4160 4161 * adding a callback (hat_add_callback())
4161 4162 *
4162 4163 * as_pagelock();
4163 4164 * hat_add_callback();
4164 4165 * save returned pfn in private data structures or program registers;
4165 4166 * as_pageunlock();
4166 4167 *
4167 4168 * prehandler()
4168 4169 *
4169 4170 * Stop all accesses by physical address to this memory page.
4170 4171 * Called twice: the first, PRESUSPEND, is a context safe to acquire
4171 4172 * adaptive locks. The second, SUSPEND, is called at high PIL with
4172 4173 * CPUs captured so adaptive locks may NOT be acquired (and all spin
4173 4174 * locks must be XCALL_PIL or higher locks).
4174 4175 *
4175 4176 * May return the following errors:
4176 4177 * EIO: A fatal error has occurred. This will result in panic.
4177 4178 * EAGAIN: The page cannot be suspended. This will fail the
4178 4179 * relocation.
4179 4180 * 0: Success.
4180 4181 *
4181 4182 * posthandler()
4182 4183 *
4183 4184 * Save new pfn in private data structures or program registers;
4184 4185 * not allowed to fail (non-zero return values will result in panic).
4185 4186 *
4186 4187 * errhandler()
4187 4188 *
4188 4189 * called when an error occurs related to the callback. Currently
4189 4190 * the only such error is HAT_CB_ERR_LEAKED which indicates that
4190 4191 * a page is being freed, but there are still outstanding callback(s)
4191 4192 * registered on the page.
4192 4193 *
4193 4194 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4194 4195 *
4195 4196 * stop using physical address
4196 4197 * hat_delete_callback();
4197 4198 *
4198 4199 */
4199 4200
4200 4201 /*
4201 4202 * Register a callback class. Each subsystem should do this once and
4202 4203 * cache the id_t returned for use in setting up and tearing down callbacks.
4203 4204 *
4204 4205 * There is no facility for removing callback IDs once they are created;
4205 4206 * the "key" should be unique for each module, so in case a module is unloaded
4206 4207 * and subsequently re-loaded, we can recycle the module's previous entry.
4207 4208 */
4208 4209 id_t
4209 4210 hat_register_callback(int key,
4210 4211 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4211 4212 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4212 4213 int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4213 4214 int capture_cpus)
4214 4215 {
4215 4216 id_t id;
4216 4217
4217 4218 /*
4218 4219 * Search the table for a pre-existing callback associated with
4219 4220 * the identifier "key". If one exists, we re-use that entry in
4220 4221 * the table for this instance, otherwise we assign the next
4221 4222 * available table slot.
4222 4223 */
4223 4224 for (id = 0; id < sfmmu_max_cb_id; id++) {
4224 4225 if (sfmmu_cb_table[id].key == key)
4225 4226 break;
4226 4227 }
4227 4228
4228 4229 if (id == sfmmu_max_cb_id) {
4229 4230 id = sfmmu_cb_nextid++;
4230 4231 if (id >= sfmmu_max_cb_id)
4231 4232 panic("hat_register_callback: out of callback IDs");
4232 4233 }
4233 4234
4234 4235 ASSERT(prehandler != NULL || posthandler != NULL);
4235 4236
4236 4237 sfmmu_cb_table[id].key = key;
4237 4238 sfmmu_cb_table[id].prehandler = prehandler;
4238 4239 sfmmu_cb_table[id].posthandler = posthandler;
4239 4240 sfmmu_cb_table[id].errhandler = errhandler;
4240 4241 sfmmu_cb_table[id].capture_cpus = capture_cpus;
4241 4242
4242 4243 return (id);
4243 4244 }
4244 4245
4245 4246 #define HAC_COOKIE_NONE (void *)-1
4246 4247
4247 4248 /*
4248 4249 * Add relocation callbacks to the specified addr/len which will be called
4249 4250 * when relocating the associated page. See the description of pre and
4250 4251 * posthandler above for more details.
4251 4252 *
4252 4253 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4253 4254 * locked internally so the caller must be able to deal with the callback
4254 4255 * running even before this function has returned. If HAC_PAGELOCK is not
4255 4256 * set, it is assumed that the underlying memory pages are locked.
4256 4257 *
4257 4258 * Since the caller must track the individual page boundaries anyway,
4258 4259 * we only allow a callback to be added to a single page (large
4259 4260 * or small). Thus [addr, addr + len) MUST be contained within a single
4260 4261 * page.
4261 4262 *
4262 4263 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4263 4264 * _provided_that_ a unique parameter is specified for each callback.
4264 4265 * If multiple callbacks are registered on the same range the callback will
4265 4266 * be invoked with each unique parameter. Registering the same callback with
4266 4267 * the same argument more than once will result in corrupted kernel state.
4267 4268 *
4268 4269 * Returns the pfn of the underlying kernel page in *rpfn
4269 4270 * on success, or PFN_INVALID on failure.
4270 4271 *
4271 4272 * cookiep (if passed) provides storage space for an opaque cookie
4272 4273 * to return later to hat_delete_callback(). This cookie makes the callback
4273 4274 * deletion significantly quicker by avoiding a potentially lengthy hash
4274 4275 * search.
4275 4276 *
4276 4277 * Returns values:
4277 4278 * 0: success
4278 4279 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4279 4280 * EINVAL: callback ID is not valid
4280 4281 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4281 4282 * space
4282 4283 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4283 4284 */
4284 4285 int
4285 4286 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4286 4287 void *pvt, pfn_t *rpfn, void **cookiep)
4287 4288 {
4288 4289 struct hmehash_bucket *hmebp;
4289 4290 hmeblk_tag hblktag;
4290 4291 struct hme_blk *hmeblkp;
4291 4292 int hmeshift, hashno;
4292 4293 caddr_t saddr, eaddr, baseaddr;
4293 4294 struct pa_hment *pahmep;
4294 4295 struct sf_hment *sfhmep, *osfhmep;
4295 4296 kmutex_t *pml;
4296 4297 tte_t tte;
4297 4298 page_t *pp;
4298 4299 vnode_t *vp;
4299 4300 u_offset_t off;
4300 4301 pfn_t pfn;
4301 4302 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4302 4303 int locked = 0;
4303 4304
4304 4305 /*
4305 4306 * For KPM mappings, just return the physical address since we
4306 4307 * don't need to register any callbacks.
4307 4308 */
4308 4309 if (IS_KPM_ADDR(vaddr)) {
4309 4310 uint64_t paddr;
4310 4311 SFMMU_KPM_VTOP(vaddr, paddr);
4311 4312 *rpfn = btop(paddr);
4312 4313 if (cookiep != NULL)
4313 4314 *cookiep = HAC_COOKIE_NONE;
4314 4315 return (0);
4315 4316 }
4316 4317
4317 4318 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4318 4319 *rpfn = PFN_INVALID;
4319 4320 return (EINVAL);
4320 4321 }
4321 4322
4322 4323 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4323 4324 *rpfn = PFN_INVALID;
4324 4325 return (ENOMEM);
4325 4326 }
4326 4327
4327 4328 sfhmep = &pahmep->sfment;
4328 4329
4329 4330 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4330 4331 eaddr = saddr + len;
4331 4332
4332 4333 rehash:
4333 4334 /* Find the mapping(s) for this page */
4334 4335 for (hashno = TTE64K, hmeblkp = NULL;
4335 4336 hmeblkp == NULL && hashno <= mmu_hashcnt;
4336 4337 hashno++) {
4337 4338 hmeshift = HME_HASH_SHIFT(hashno);
4338 4339 hblktag.htag_id = ksfmmup;
4339 4340 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4340 4341 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4341 4342 hblktag.htag_rehash = hashno;
4342 4343 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4343 4344
4344 4345 SFMMU_HASH_LOCK(hmebp);
4345 4346
4346 4347 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4347 4348
4348 4349 if (hmeblkp == NULL)
4349 4350 SFMMU_HASH_UNLOCK(hmebp);
4350 4351 }
4351 4352
4352 4353 if (hmeblkp == NULL) {
4353 4354 kmem_cache_free(pa_hment_cache, pahmep);
4354 4355 *rpfn = PFN_INVALID;
4355 4356 return (ENXIO);
4356 4357 }
4357 4358
4358 4359 ASSERT(!hmeblkp->hblk_shared);
4359 4360
4360 4361 HBLKTOHME(osfhmep, hmeblkp, saddr);
4361 4362 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4362 4363
4363 4364 if (!TTE_IS_VALID(&tte)) {
4364 4365 SFMMU_HASH_UNLOCK(hmebp);
4365 4366 kmem_cache_free(pa_hment_cache, pahmep);
4366 4367 *rpfn = PFN_INVALID;
4367 4368 return (ENXIO);
4368 4369 }
4369 4370
4370 4371 /*
4371 4372 * Make sure the boundaries for the callback fall within this
4372 4373 * single mapping.
4373 4374 */
4374 4375 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4375 4376 ASSERT(saddr >= baseaddr);
4376 4377 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4377 4378 SFMMU_HASH_UNLOCK(hmebp);
4378 4379 kmem_cache_free(pa_hment_cache, pahmep);
4379 4380 *rpfn = PFN_INVALID;
4380 4381 return (ERANGE);
4381 4382 }
4382 4383
4383 4384 pfn = sfmmu_ttetopfn(&tte, vaddr);
4384 4385
4385 4386 /*
4386 4387 * The pfn may not have a page_t underneath in which case we
4387 4388 * just return it. This can happen if we are doing I/O to a
4388 4389 * static portion of the kernel's address space, for instance.
4389 4390 */
4390 4391 pp = osfhmep->hme_page;
4391 4392 if (pp == NULL) {
4392 4393 SFMMU_HASH_UNLOCK(hmebp);
4393 4394 kmem_cache_free(pa_hment_cache, pahmep);
4394 4395 *rpfn = pfn;
4395 4396 if (cookiep)
4396 4397 *cookiep = HAC_COOKIE_NONE;
4397 4398 return (0);
4398 4399 }
4399 4400 ASSERT(pp == PP_PAGEROOT(pp));
4400 4401
4401 4402 vp = pp->p_vnode;
4402 4403 off = pp->p_offset;
4403 4404
4404 4405 pml = sfmmu_mlist_enter(pp);
4405 4406
4406 4407 if (flags & HAC_PAGELOCK) {
4407 4408 if (!page_trylock(pp, SE_SHARED)) {
4408 4409 /*
4409 4410 * Somebody is holding SE_EXCL lock. Might
4410 4411 * even be hat_page_relocate(). Drop all
4411 4412 * our locks, lookup the page in &kvp, and
4412 4413 * retry. If it doesn't exist in &kvp and &zvp,
4413 4414 * then we must be dealing with a kernel mapped
4414 4415 * page which doesn't actually belong to
4415 4416 * segkmem so we punt.
4416 4417 */
4417 4418 sfmmu_mlist_exit(pml);
4418 4419 SFMMU_HASH_UNLOCK(hmebp);
4419 4420 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4420 4421
4421 4422 /* check zvp before giving up */
4422 4423 if (pp == NULL)
4423 4424 pp = page_lookup(&zvp, (u_offset_t)saddr,
4424 4425 SE_SHARED);
4425 4426
4426 4427 /* Okay, we didn't find it, give up */
4427 4428 if (pp == NULL) {
4428 4429 kmem_cache_free(pa_hment_cache, pahmep);
4429 4430 *rpfn = pfn;
4430 4431 if (cookiep)
4431 4432 *cookiep = HAC_COOKIE_NONE;
4432 4433 return (0);
4433 4434 }
4434 4435 page_unlock(pp);
4435 4436 goto rehash;
4436 4437 }
4437 4438 locked = 1;
4438 4439 }
4439 4440
4440 4441 if (!PAGE_LOCKED(pp) && !panicstr)
4441 4442 panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4442 4443
4443 4444 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4444 4445 pp->p_offset != off) {
4445 4446 /*
4446 4447 * The page moved before we got our hands on it. Drop
4447 4448 * all the locks and try again.
4448 4449 */
4449 4450 ASSERT((flags & HAC_PAGELOCK) != 0);
4450 4451 sfmmu_mlist_exit(pml);
4451 4452 SFMMU_HASH_UNLOCK(hmebp);
4452 4453 page_unlock(pp);
4453 4454 locked = 0;
4454 4455 goto rehash;
4455 4456 }
4456 4457
4457 4458 if (!VN_ISKAS(vp)) {
4458 4459 /*
4459 4460 * This is not a segkmem page but another page which
4460 4461 * has been kernel mapped. It had better have at least
4461 4462 * a share lock on it. Return the pfn.
4462 4463 */
4463 4464 sfmmu_mlist_exit(pml);
4464 4465 SFMMU_HASH_UNLOCK(hmebp);
4465 4466 if (locked)
4466 4467 page_unlock(pp);
4467 4468 kmem_cache_free(pa_hment_cache, pahmep);
4468 4469 ASSERT(PAGE_LOCKED(pp));
4469 4470 *rpfn = pfn;
4470 4471 if (cookiep)
4471 4472 *cookiep = HAC_COOKIE_NONE;
4472 4473 return (0);
4473 4474 }
4474 4475
4475 4476 /*
4476 4477 * Setup this pa_hment and link its embedded dummy sf_hment into
4477 4478 * the mapping list.
4478 4479 */
4479 4480 pp->p_share++;
4480 4481 pahmep->cb_id = callback_id;
4481 4482 pahmep->addr = vaddr;
4482 4483 pahmep->len = len;
4483 4484 pahmep->refcnt = 1;
4484 4485 pahmep->flags = 0;
4485 4486 pahmep->pvt = pvt;
4486 4487
4487 4488 sfhmep->hme_tte.ll = 0;
4488 4489 sfhmep->hme_data = pahmep;
4489 4490 sfhmep->hme_prev = osfhmep;
4490 4491 sfhmep->hme_next = osfhmep->hme_next;
4491 4492
4492 4493 if (osfhmep->hme_next)
4493 4494 osfhmep->hme_next->hme_prev = sfhmep;
4494 4495
4495 4496 osfhmep->hme_next = sfhmep;
4496 4497
4497 4498 sfmmu_mlist_exit(pml);
4498 4499 SFMMU_HASH_UNLOCK(hmebp);
4499 4500
4500 4501 if (locked)
4501 4502 page_unlock(pp);
4502 4503
4503 4504 *rpfn = pfn;
4504 4505 if (cookiep)
4505 4506 *cookiep = (void *)pahmep;
4506 4507
4507 4508 return (0);
4508 4509 }
4509 4510
4510 4511 /*
4511 4512 * Remove the relocation callbacks from the specified addr/len.
4512 4513 */
4513 4514 void
4514 4515 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4515 4516 void *cookie)
4516 4517 {
4517 4518 struct hmehash_bucket *hmebp;
4518 4519 hmeblk_tag hblktag;
4519 4520 struct hme_blk *hmeblkp;
4520 4521 int hmeshift, hashno;
4521 4522 caddr_t saddr;
4522 4523 struct pa_hment *pahmep;
4523 4524 struct sf_hment *sfhmep, *osfhmep;
4524 4525 kmutex_t *pml;
4525 4526 tte_t tte;
4526 4527 page_t *pp;
4527 4528 vnode_t *vp;
4528 4529 u_offset_t off;
4529 4530 int locked = 0;
4530 4531
4531 4532 /*
4532 4533 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4533 4534 * remove so just return.
4534 4535 */
4535 4536 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4536 4537 return;
4537 4538
4538 4539 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4539 4540
4540 4541 rehash:
4541 4542 /* Find the mapping(s) for this page */
4542 4543 for (hashno = TTE64K, hmeblkp = NULL;
4543 4544 hmeblkp == NULL && hashno <= mmu_hashcnt;
4544 4545 hashno++) {
4545 4546 hmeshift = HME_HASH_SHIFT(hashno);
4546 4547 hblktag.htag_id = ksfmmup;
4547 4548 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4548 4549 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4549 4550 hblktag.htag_rehash = hashno;
4550 4551 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4551 4552
4552 4553 SFMMU_HASH_LOCK(hmebp);
4553 4554
4554 4555 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4555 4556
4556 4557 if (hmeblkp == NULL)
4557 4558 SFMMU_HASH_UNLOCK(hmebp);
4558 4559 }
4559 4560
4560 4561 if (hmeblkp == NULL)
4561 4562 return;
4562 4563
4563 4564 ASSERT(!hmeblkp->hblk_shared);
4564 4565
4565 4566 HBLKTOHME(osfhmep, hmeblkp, saddr);
4566 4567
4567 4568 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4568 4569 if (!TTE_IS_VALID(&tte)) {
4569 4570 SFMMU_HASH_UNLOCK(hmebp);
4570 4571 return;
4571 4572 }
4572 4573
4573 4574 pp = osfhmep->hme_page;
4574 4575 if (pp == NULL) {
4575 4576 SFMMU_HASH_UNLOCK(hmebp);
4576 4577 ASSERT(cookie == NULL);
4577 4578 return;
4578 4579 }
4579 4580
4580 4581 vp = pp->p_vnode;
4581 4582 off = pp->p_offset;
4582 4583
4583 4584 pml = sfmmu_mlist_enter(pp);
4584 4585
4585 4586 if (flags & HAC_PAGELOCK) {
4586 4587 if (!page_trylock(pp, SE_SHARED)) {
4587 4588 /*
4588 4589 * Somebody is holding SE_EXCL lock. Might
4589 4590 * even be hat_page_relocate(). Drop all
4590 4591 * our locks, lookup the page in &kvp, and
4591 4592 * retry. If it doesn't exist in &kvp and &zvp,
4592 4593 * then we must be dealing with a kernel mapped
4593 4594 * page which doesn't actually belong to
4594 4595 * segkmem so we punt.
4595 4596 */
4596 4597 sfmmu_mlist_exit(pml);
4597 4598 SFMMU_HASH_UNLOCK(hmebp);
4598 4599 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4599 4600 /* check zvp before giving up */
4600 4601 if (pp == NULL)
4601 4602 pp = page_lookup(&zvp, (u_offset_t)saddr,
4602 4603 SE_SHARED);
4603 4604
4604 4605 if (pp == NULL) {
4605 4606 ASSERT(cookie == NULL);
4606 4607 return;
4607 4608 }
4608 4609 page_unlock(pp);
4609 4610 goto rehash;
4610 4611 }
4611 4612 locked = 1;
4612 4613 }
4613 4614
4614 4615 ASSERT(PAGE_LOCKED(pp));
4615 4616
4616 4617 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4617 4618 pp->p_offset != off) {
4618 4619 /*
4619 4620 * The page moved before we got our hands on it. Drop
4620 4621 * all the locks and try again.
4621 4622 */
4622 4623 ASSERT((flags & HAC_PAGELOCK) != 0);
4623 4624 sfmmu_mlist_exit(pml);
4624 4625 SFMMU_HASH_UNLOCK(hmebp);
4625 4626 page_unlock(pp);
4626 4627 locked = 0;
4627 4628 goto rehash;
4628 4629 }
4629 4630
4630 4631 if (!VN_ISKAS(vp)) {
4631 4632 /*
4632 4633 * This is not a segkmem page but another page which
4633 4634 * has been kernel mapped.
4634 4635 */
4635 4636 sfmmu_mlist_exit(pml);
4636 4637 SFMMU_HASH_UNLOCK(hmebp);
4637 4638 if (locked)
4638 4639 page_unlock(pp);
4639 4640 ASSERT(cookie == NULL);
4640 4641 return;
4641 4642 }
4642 4643
4643 4644 if (cookie != NULL) {
4644 4645 pahmep = (struct pa_hment *)cookie;
4645 4646 sfhmep = &pahmep->sfment;
4646 4647 } else {
4647 4648 for (sfhmep = pp->p_mapping; sfhmep != NULL;
4648 4649 sfhmep = sfhmep->hme_next) {
4649 4650
4650 4651 /*
4651 4652 * skip va<->pa mappings
4652 4653 */
4653 4654 if (!IS_PAHME(sfhmep))
4654 4655 continue;
4655 4656
4656 4657 pahmep = sfhmep->hme_data;
4657 4658 ASSERT(pahmep != NULL);
4658 4659
4659 4660 /*
4660 4661 * if pa_hment matches, remove it
4661 4662 */
4662 4663 if ((pahmep->pvt == pvt) &&
4663 4664 (pahmep->addr == vaddr) &&
4664 4665 (pahmep->len == len)) {
4665 4666 break;
4666 4667 }
4667 4668 }
4668 4669 }
4669 4670
4670 4671 if (sfhmep == NULL) {
4671 4672 if (!panicstr) {
4672 4673 panic("hat_delete_callback: pa_hment not found, pp %p",
4673 4674 (void *)pp);
4674 4675 }
4675 4676 return;
4676 4677 }
4677 4678
4678 4679 /*
4679 4680 * Note: at this point a valid kernel mapping must still be
4680 4681 * present on this page.
4681 4682 */
4682 4683 pp->p_share--;
4683 4684 if (pp->p_share <= 0)
4684 4685 panic("hat_delete_callback: zero p_share");
4685 4686
4686 4687 if (--pahmep->refcnt == 0) {
4687 4688 if (pahmep->flags != 0)
4688 4689 panic("hat_delete_callback: pa_hment is busy");
4689 4690
4690 4691 /*
4691 4692 * Remove sfhmep from the mapping list for the page.
4692 4693 */
4693 4694 if (sfhmep->hme_prev) {
4694 4695 sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4695 4696 } else {
4696 4697 pp->p_mapping = sfhmep->hme_next;
4697 4698 }
4698 4699
4699 4700 if (sfhmep->hme_next)
4700 4701 sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4701 4702
4702 4703 sfmmu_mlist_exit(pml);
4703 4704 SFMMU_HASH_UNLOCK(hmebp);
4704 4705
4705 4706 if (locked)
4706 4707 page_unlock(pp);
4707 4708
4708 4709 kmem_cache_free(pa_hment_cache, pahmep);
4709 4710 return;
4710 4711 }
4711 4712
4712 4713 sfmmu_mlist_exit(pml);
4713 4714 SFMMU_HASH_UNLOCK(hmebp);
4714 4715 if (locked)
4715 4716 page_unlock(pp);
4716 4717 }
4717 4718
4718 4719 /*
4719 4720 * hat_probe returns 1 if the translation for the address 'addr' is
4720 4721 * loaded, zero otherwise.
4721 4722 *
4722 4723 * hat_probe should be used only for advisorary purposes because it may
4723 4724 * occasionally return the wrong value. The implementation must guarantee that
4724 4725 * returning the wrong value is a very rare event. hat_probe is used
4725 4726 * to implement optimizations in the segment drivers.
4726 4727 *
4727 4728 */
4728 4729 int
4729 4730 hat_probe(struct hat *sfmmup, caddr_t addr)
4730 4731 {
4731 4732 pfn_t pfn;
4732 4733 tte_t tte;
4733 4734
4734 4735 ASSERT(sfmmup != NULL);
4735 4736
4736 4737 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4737 4738
4738 4739 if (sfmmup == ksfmmup) {
4739 4740 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4740 4741 == PFN_SUSPENDED) {
4741 4742 sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4742 4743 }
4743 4744 } else {
4744 4745 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4745 4746 }
4746 4747
4747 4748 if (pfn != PFN_INVALID)
4748 4749 return (1);
4749 4750 else
4750 4751 return (0);
4751 4752 }
4752 4753
4753 4754 ssize_t
4754 4755 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4755 4756 {
4756 4757 tte_t tte;
4757 4758
4758 4759 if (sfmmup == ksfmmup) {
4759 4760 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4760 4761 return (-1);
4761 4762 }
4762 4763 } else {
4763 4764 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4764 4765 return (-1);
4765 4766 }
4766 4767 }
4767 4768
4768 4769 ASSERT(TTE_IS_VALID(&tte));
4769 4770 return (TTEBYTES(TTE_CSZ(&tte)));
4770 4771 }
4771 4772
4772 4773 uint_t
4773 4774 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4774 4775 {
4775 4776 tte_t tte;
4776 4777
4777 4778 if (sfmmup == ksfmmup) {
4778 4779 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4779 4780 tte.ll = 0;
4780 4781 }
4781 4782 } else {
4782 4783 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4783 4784 tte.ll = 0;
4784 4785 }
4785 4786 }
4786 4787 if (TTE_IS_VALID(&tte)) {
4787 4788 *attr = sfmmu_ptov_attr(&tte);
4788 4789 return (0);
4789 4790 }
4790 4791 *attr = 0;
4791 4792 return ((uint_t)0xffffffff);
4792 4793 }
4793 4794
4794 4795 /*
4795 4796 * Enables more attributes on specified address range (ie. logical OR)
4796 4797 */
4797 4798 void
4798 4799 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4799 4800 {
4800 4801 ASSERT(hat->sfmmu_as != NULL);
4801 4802
4802 4803 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4803 4804 }
4804 4805
4805 4806 /*
4806 4807 * Assigns attributes to the specified address range. All the attributes
4807 4808 * are specified.
4808 4809 */
4809 4810 void
4810 4811 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4811 4812 {
4812 4813 ASSERT(hat->sfmmu_as != NULL);
4813 4814
4814 4815 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4815 4816 }
4816 4817
4817 4818 /*
4818 4819 * Remove attributes on the specified address range (ie. loginal NAND)
4819 4820 */
4820 4821 void
4821 4822 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4822 4823 {
4823 4824 ASSERT(hat->sfmmu_as != NULL);
4824 4825
4825 4826 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4826 4827 }
4827 4828
4828 4829 /*
4829 4830 * Change attributes on an address range to that specified by attr and mode.
4830 4831 */
4831 4832 static void
4832 4833 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4833 4834 int mode)
4834 4835 {
4835 4836 struct hmehash_bucket *hmebp;
4836 4837 hmeblk_tag hblktag;
4837 4838 int hmeshift, hashno = 1;
4838 4839 struct hme_blk *hmeblkp, *list = NULL;
4839 4840 caddr_t endaddr;
4840 4841 cpuset_t cpuset;
4841 4842 demap_range_t dmr;
4842 4843
4843 4844 CPUSET_ZERO(cpuset);
4844 4845
4845 4846 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4846 4847 ASSERT((len & MMU_PAGEOFFSET) == 0);
4847 4848 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4848 4849
4849 4850 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4850 4851 ((addr + len) > (caddr_t)USERLIMIT)) {
4851 4852 panic("user addr %p in kernel space",
4852 4853 (void *)addr);
4853 4854 }
4854 4855
4855 4856 endaddr = addr + len;
4856 4857 hblktag.htag_id = sfmmup;
4857 4858 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4858 4859 DEMAP_RANGE_INIT(sfmmup, &dmr);
4859 4860
4860 4861 while (addr < endaddr) {
4861 4862 hmeshift = HME_HASH_SHIFT(hashno);
4862 4863 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4863 4864 hblktag.htag_rehash = hashno;
4864 4865 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4865 4866
4866 4867 SFMMU_HASH_LOCK(hmebp);
4867 4868
4868 4869 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4869 4870 if (hmeblkp != NULL) {
4870 4871 ASSERT(!hmeblkp->hblk_shared);
4871 4872 /*
4872 4873 * We've encountered a shadow hmeblk so skip the range
4873 4874 * of the next smaller mapping size.
4874 4875 */
4875 4876 if (hmeblkp->hblk_shw_bit) {
4876 4877 ASSERT(sfmmup != ksfmmup);
4877 4878 ASSERT(hashno > 1);
4878 4879 addr = (caddr_t)P2END((uintptr_t)addr,
4879 4880 TTEBYTES(hashno - 1));
4880 4881 } else {
4881 4882 addr = sfmmu_hblk_chgattr(sfmmup,
4882 4883 hmeblkp, addr, endaddr, &dmr, attr, mode);
4883 4884 }
4884 4885 SFMMU_HASH_UNLOCK(hmebp);
4885 4886 hashno = 1;
4886 4887 continue;
4887 4888 }
4888 4889 SFMMU_HASH_UNLOCK(hmebp);
4889 4890
4890 4891 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4891 4892 /*
4892 4893 * We have traversed the whole list and rehashed
4893 4894 * if necessary without finding the address to chgattr.
4894 4895 * This is ok, so we increment the address by the
4895 4896 * smallest hmeblk range for kernel mappings or for
4896 4897 * user mappings with no large pages, and the largest
4897 4898 * hmeblk range, to account for shadow hmeblks, for
4898 4899 * user mappings with large pages and continue.
4899 4900 */
4900 4901 if (sfmmup == ksfmmup)
4901 4902 addr = (caddr_t)P2END((uintptr_t)addr,
4902 4903 TTEBYTES(1));
4903 4904 else
4904 4905 addr = (caddr_t)P2END((uintptr_t)addr,
4905 4906 TTEBYTES(hashno));
4906 4907 hashno = 1;
4907 4908 } else {
4908 4909 hashno++;
4909 4910 }
4910 4911 }
4911 4912
4912 4913 sfmmu_hblks_list_purge(&list, 0);
4913 4914 DEMAP_RANGE_FLUSH(&dmr);
4914 4915 cpuset = sfmmup->sfmmu_cpusran;
4915 4916 xt_sync(cpuset);
4916 4917 }
4917 4918
4918 4919 /*
4919 4920 * This function chgattr on a range of addresses in an hmeblk. It returns the
4920 4921 * next addres that needs to be chgattr.
4921 4922 * It should be called with the hash lock held.
4922 4923 * XXX It should be possible to optimize chgattr by not flushing every time but
4923 4924 * on the other hand:
4924 4925 * 1. do one flush crosscall.
4925 4926 * 2. only flush if we are increasing permissions (make sure this will work)
4926 4927 */
4927 4928 static caddr_t
4928 4929 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4929 4930 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4930 4931 {
4931 4932 tte_t tte, tteattr, tteflags, ttemod;
4932 4933 struct sf_hment *sfhmep;
4933 4934 int ttesz;
4934 4935 struct page *pp = NULL;
4935 4936 kmutex_t *pml, *pmtx;
4936 4937 int ret;
4937 4938 int use_demap_range;
4938 4939 #if defined(SF_ERRATA_57)
4939 4940 int check_exec;
4940 4941 #endif
4941 4942
4942 4943 ASSERT(in_hblk_range(hmeblkp, addr));
4943 4944 ASSERT(hmeblkp->hblk_shw_bit == 0);
4944 4945 ASSERT(!hmeblkp->hblk_shared);
4945 4946
4946 4947 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4947 4948 ttesz = get_hblk_ttesz(hmeblkp);
4948 4949
4949 4950 /*
4950 4951 * Flush the current demap region if addresses have been
4951 4952 * skipped or the page size doesn't match.
4952 4953 */
4953 4954 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4954 4955 if (use_demap_range) {
4955 4956 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4956 4957 } else if (dmrp != NULL) {
4957 4958 DEMAP_RANGE_FLUSH(dmrp);
4958 4959 }
4959 4960
4960 4961 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4961 4962 #if defined(SF_ERRATA_57)
4962 4963 check_exec = (sfmmup != ksfmmup) &&
4963 4964 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4964 4965 TTE_IS_EXECUTABLE(&tteattr);
4965 4966 #endif
4966 4967 HBLKTOHME(sfhmep, hmeblkp, addr);
4967 4968 while (addr < endaddr) {
4968 4969 sfmmu_copytte(&sfhmep->hme_tte, &tte);
4969 4970 if (TTE_IS_VALID(&tte)) {
4970 4971 if ((tte.ll & tteflags.ll) == tteattr.ll) {
4971 4972 /*
4972 4973 * if the new attr is the same as old
4973 4974 * continue
4974 4975 */
4975 4976 goto next_addr;
4976 4977 }
4977 4978 if (!TTE_IS_WRITABLE(&tteattr)) {
4978 4979 /*
4979 4980 * make sure we clear hw modify bit if we
4980 4981 * removing write protections
4981 4982 */
4982 4983 tteflags.tte_intlo |= TTE_HWWR_INT;
4983 4984 }
4984 4985
4985 4986 pml = NULL;
4986 4987 pp = sfhmep->hme_page;
4987 4988 if (pp) {
4988 4989 pml = sfmmu_mlist_enter(pp);
4989 4990 }
4990 4991
4991 4992 if (pp != sfhmep->hme_page) {
4992 4993 /*
4993 4994 * tte must have been unloaded.
4994 4995 */
4995 4996 ASSERT(pml);
4996 4997 sfmmu_mlist_exit(pml);
4997 4998 continue;
4998 4999 }
4999 5000
5000 5001 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5001 5002
5002 5003 ttemod = tte;
5003 5004 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5004 5005 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5005 5006
5006 5007 #if defined(SF_ERRATA_57)
5007 5008 if (check_exec && addr < errata57_limit)
5008 5009 ttemod.tte_exec_perm = 0;
5009 5010 #endif
5010 5011 ret = sfmmu_modifytte_try(&tte, &ttemod,
5011 5012 &sfhmep->hme_tte);
5012 5013
5013 5014 if (ret < 0) {
5014 5015 /* tte changed underneath us */
5015 5016 if (pml) {
5016 5017 sfmmu_mlist_exit(pml);
5017 5018 }
5018 5019 continue;
5019 5020 }
5020 5021
5021 5022 if (tteflags.tte_intlo & TTE_HWWR_INT) {
5022 5023 /*
5023 5024 * need to sync if we are clearing modify bit.
5024 5025 */
5025 5026 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5026 5027 }
5027 5028
5028 5029 if (pp && PP_ISRO(pp)) {
5029 5030 if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5030 5031 pmtx = sfmmu_page_enter(pp);
5031 5032 PP_CLRRO(pp);
5032 5033 sfmmu_page_exit(pmtx);
5033 5034 }
5034 5035 }
5035 5036
5036 5037 if (ret > 0 && use_demap_range) {
5037 5038 DEMAP_RANGE_MARKPG(dmrp, addr);
5038 5039 } else if (ret > 0) {
5039 5040 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5040 5041 }
5041 5042
5042 5043 if (pml) {
5043 5044 sfmmu_mlist_exit(pml);
5044 5045 }
5045 5046 }
5046 5047 next_addr:
5047 5048 addr += TTEBYTES(ttesz);
5048 5049 sfhmep++;
5049 5050 DEMAP_RANGE_NEXTPG(dmrp);
5050 5051 }
5051 5052 return (addr);
5052 5053 }
5053 5054
5054 5055 /*
5055 5056 * This routine converts virtual attributes to physical ones. It will
5056 5057 * update the tteflags field with the tte mask corresponding to the attributes
5057 5058 * affected and it returns the new attributes. It will also clear the modify
5058 5059 * bit if we are taking away write permission. This is necessary since the
5059 5060 * modify bit is the hardware permission bit and we need to clear it in order
5060 5061 * to detect write faults.
5061 5062 */
5062 5063 static uint64_t
5063 5064 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5064 5065 {
5065 5066 tte_t ttevalue;
5066 5067
5067 5068 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5068 5069
5069 5070 switch (mode) {
5070 5071 case SFMMU_CHGATTR:
5071 5072 /* all attributes specified */
5072 5073 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5073 5074 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5074 5075 ttemaskp->tte_inthi = TTEINTHI_ATTR;
5075 5076 ttemaskp->tte_intlo = TTEINTLO_ATTR;
5076 5077 break;
5077 5078 case SFMMU_SETATTR:
5078 5079 ASSERT(!(attr & ~HAT_PROT_MASK));
5079 5080 ttemaskp->ll = 0;
5080 5081 ttevalue.ll = 0;
5081 5082 /*
5082 5083 * a valid tte implies exec and read for sfmmu
5083 5084 * so no need to do anything about them.
5084 5085 * since priviledged access implies user access
5085 5086 * PROT_USER doesn't make sense either.
5086 5087 */
5087 5088 if (attr & PROT_WRITE) {
5088 5089 ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5089 5090 ttevalue.tte_intlo |= TTE_WRPRM_INT;
5090 5091 }
5091 5092 break;
5092 5093 case SFMMU_CLRATTR:
5093 5094 /* attributes will be nand with current ones */
5094 5095 if (attr & ~(PROT_WRITE | PROT_USER)) {
5095 5096 panic("sfmmu: attr %x not supported", attr);
5096 5097 }
5097 5098 ttemaskp->ll = 0;
5098 5099 ttevalue.ll = 0;
5099 5100 if (attr & PROT_WRITE) {
5100 5101 /* clear both writable and modify bit */
5101 5102 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5102 5103 }
5103 5104 if (attr & PROT_USER) {
5104 5105 ttemaskp->tte_intlo |= TTE_PRIV_INT;
5105 5106 ttevalue.tte_intlo |= TTE_PRIV_INT;
5106 5107 }
5107 5108 break;
5108 5109 default:
5109 5110 panic("sfmmu_vtop_attr: bad mode %x", mode);
5110 5111 }
5111 5112 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5112 5113 return (ttevalue.ll);
5113 5114 }
5114 5115
5115 5116 static uint_t
5116 5117 sfmmu_ptov_attr(tte_t *ttep)
5117 5118 {
5118 5119 uint_t attr;
5119 5120
5120 5121 ASSERT(TTE_IS_VALID(ttep));
5121 5122
5122 5123 attr = PROT_READ;
5123 5124
5124 5125 if (TTE_IS_WRITABLE(ttep)) {
5125 5126 attr |= PROT_WRITE;
5126 5127 }
5127 5128 if (TTE_IS_EXECUTABLE(ttep)) {
5128 5129 attr |= PROT_EXEC;
5129 5130 }
5130 5131 if (!TTE_IS_PRIVILEGED(ttep)) {
5131 5132 attr |= PROT_USER;
5132 5133 }
5133 5134 if (TTE_IS_NFO(ttep)) {
5134 5135 attr |= HAT_NOFAULT;
5135 5136 }
5136 5137 if (TTE_IS_NOSYNC(ttep)) {
5137 5138 attr |= HAT_NOSYNC;
5138 5139 }
5139 5140 if (TTE_IS_SIDEFFECT(ttep)) {
5140 5141 attr |= SFMMU_SIDEFFECT;
5141 5142 }
5142 5143 if (!TTE_IS_VCACHEABLE(ttep)) {
5143 5144 attr |= SFMMU_UNCACHEVTTE;
5144 5145 }
5145 5146 if (!TTE_IS_PCACHEABLE(ttep)) {
5146 5147 attr |= SFMMU_UNCACHEPTTE;
5147 5148 }
5148 5149 return (attr);
5149 5150 }
5150 5151
5151 5152 /*
5152 5153 * hat_chgprot is a deprecated hat call. New segment drivers
5153 5154 * should store all attributes and use hat_*attr calls.
5154 5155 *
5155 5156 * Change the protections in the virtual address range
5156 5157 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
5157 5158 * then remove write permission, leaving the other
5158 5159 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions.
5159 5160 *
5160 5161 */
5161 5162 void
5162 5163 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5163 5164 {
5164 5165 struct hmehash_bucket *hmebp;
5165 5166 hmeblk_tag hblktag;
5166 5167 int hmeshift, hashno = 1;
5167 5168 struct hme_blk *hmeblkp, *list = NULL;
5168 5169 caddr_t endaddr;
5169 5170 cpuset_t cpuset;
5170 5171 demap_range_t dmr;
5171 5172
5172 5173 ASSERT((len & MMU_PAGEOFFSET) == 0);
5173 5174 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5174 5175
5175 5176 ASSERT(sfmmup->sfmmu_as != NULL);
5176 5177
5177 5178 CPUSET_ZERO(cpuset);
5178 5179
5179 5180 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5180 5181 ((addr + len) > (caddr_t)USERLIMIT)) {
5181 5182 panic("user addr %p vprot %x in kernel space",
5182 5183 (void *)addr, vprot);
5183 5184 }
5184 5185 endaddr = addr + len;
5185 5186 hblktag.htag_id = sfmmup;
5186 5187 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5187 5188 DEMAP_RANGE_INIT(sfmmup, &dmr);
5188 5189
5189 5190 while (addr < endaddr) {
5190 5191 hmeshift = HME_HASH_SHIFT(hashno);
5191 5192 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5192 5193 hblktag.htag_rehash = hashno;
5193 5194 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5194 5195
5195 5196 SFMMU_HASH_LOCK(hmebp);
5196 5197
5197 5198 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5198 5199 if (hmeblkp != NULL) {
5199 5200 ASSERT(!hmeblkp->hblk_shared);
5200 5201 /*
5201 5202 * We've encountered a shadow hmeblk so skip the range
5202 5203 * of the next smaller mapping size.
5203 5204 */
5204 5205 if (hmeblkp->hblk_shw_bit) {
5205 5206 ASSERT(sfmmup != ksfmmup);
5206 5207 ASSERT(hashno > 1);
5207 5208 addr = (caddr_t)P2END((uintptr_t)addr,
5208 5209 TTEBYTES(hashno - 1));
5209 5210 } else {
5210 5211 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5211 5212 addr, endaddr, &dmr, vprot);
5212 5213 }
5213 5214 SFMMU_HASH_UNLOCK(hmebp);
5214 5215 hashno = 1;
5215 5216 continue;
5216 5217 }
5217 5218 SFMMU_HASH_UNLOCK(hmebp);
5218 5219
5219 5220 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5220 5221 /*
5221 5222 * We have traversed the whole list and rehashed
5222 5223 * if necessary without finding the address to chgprot.
5223 5224 * This is ok so we increment the address by the
5224 5225 * smallest hmeblk range for kernel mappings and the
5225 5226 * largest hmeblk range, to account for shadow hmeblks,
5226 5227 * for user mappings and continue.
5227 5228 */
5228 5229 if (sfmmup == ksfmmup)
5229 5230 addr = (caddr_t)P2END((uintptr_t)addr,
5230 5231 TTEBYTES(1));
5231 5232 else
5232 5233 addr = (caddr_t)P2END((uintptr_t)addr,
5233 5234 TTEBYTES(hashno));
5234 5235 hashno = 1;
5235 5236 } else {
5236 5237 hashno++;
5237 5238 }
5238 5239 }
5239 5240
5240 5241 sfmmu_hblks_list_purge(&list, 0);
5241 5242 DEMAP_RANGE_FLUSH(&dmr);
5242 5243 cpuset = sfmmup->sfmmu_cpusran;
5243 5244 xt_sync(cpuset);
5244 5245 }
5245 5246
5246 5247 /*
5247 5248 * This function chgprots a range of addresses in an hmeblk. It returns the
5248 5249 * next addres that needs to be chgprot.
5249 5250 * It should be called with the hash lock held.
5250 5251 * XXX It shold be possible to optimize chgprot by not flushing every time but
5251 5252 * on the other hand:
5252 5253 * 1. do one flush crosscall.
5253 5254 * 2. only flush if we are increasing permissions (make sure this will work)
5254 5255 */
5255 5256 static caddr_t
5256 5257 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5257 5258 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5258 5259 {
5259 5260 uint_t pprot;
5260 5261 tte_t tte, ttemod;
5261 5262 struct sf_hment *sfhmep;
5262 5263 uint_t tteflags;
5263 5264 int ttesz;
5264 5265 struct page *pp = NULL;
5265 5266 kmutex_t *pml, *pmtx;
5266 5267 int ret;
5267 5268 int use_demap_range;
5268 5269 #if defined(SF_ERRATA_57)
5269 5270 int check_exec;
5270 5271 #endif
5271 5272
5272 5273 ASSERT(in_hblk_range(hmeblkp, addr));
5273 5274 ASSERT(hmeblkp->hblk_shw_bit == 0);
5274 5275 ASSERT(!hmeblkp->hblk_shared);
5275 5276
5276 5277 #ifdef DEBUG
5277 5278 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5278 5279 (endaddr < get_hblk_endaddr(hmeblkp))) {
5279 5280 panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5280 5281 }
5281 5282 #endif /* DEBUG */
5282 5283
5283 5284 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5284 5285 ttesz = get_hblk_ttesz(hmeblkp);
5285 5286
5286 5287 pprot = sfmmu_vtop_prot(vprot, &tteflags);
5287 5288 #if defined(SF_ERRATA_57)
5288 5289 check_exec = (sfmmup != ksfmmup) &&
5289 5290 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5290 5291 ((vprot & PROT_EXEC) == PROT_EXEC);
5291 5292 #endif
5292 5293 HBLKTOHME(sfhmep, hmeblkp, addr);
5293 5294
5294 5295 /*
5295 5296 * Flush the current demap region if addresses have been
5296 5297 * skipped or the page size doesn't match.
5297 5298 */
5298 5299 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5299 5300 if (use_demap_range) {
5300 5301 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5301 5302 } else if (dmrp != NULL) {
5302 5303 DEMAP_RANGE_FLUSH(dmrp);
5303 5304 }
5304 5305
5305 5306 while (addr < endaddr) {
5306 5307 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5307 5308 if (TTE_IS_VALID(&tte)) {
5308 5309 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5309 5310 /*
5310 5311 * if the new protection is the same as old
5311 5312 * continue
5312 5313 */
5313 5314 goto next_addr;
5314 5315 }
5315 5316 pml = NULL;
5316 5317 pp = sfhmep->hme_page;
5317 5318 if (pp) {
5318 5319 pml = sfmmu_mlist_enter(pp);
5319 5320 }
5320 5321 if (pp != sfhmep->hme_page) {
5321 5322 /*
5322 5323 * tte most have been unloaded
5323 5324 * underneath us. Recheck
5324 5325 */
5325 5326 ASSERT(pml);
5326 5327 sfmmu_mlist_exit(pml);
5327 5328 continue;
5328 5329 }
5329 5330
5330 5331 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5331 5332
5332 5333 ttemod = tte;
5333 5334 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5334 5335 #if defined(SF_ERRATA_57)
5335 5336 if (check_exec && addr < errata57_limit)
5336 5337 ttemod.tte_exec_perm = 0;
5337 5338 #endif
5338 5339 ret = sfmmu_modifytte_try(&tte, &ttemod,
5339 5340 &sfhmep->hme_tte);
5340 5341
5341 5342 if (ret < 0) {
5342 5343 /* tte changed underneath us */
5343 5344 if (pml) {
5344 5345 sfmmu_mlist_exit(pml);
5345 5346 }
5346 5347 continue;
5347 5348 }
5348 5349
5349 5350 if (tteflags & TTE_HWWR_INT) {
5350 5351 /*
5351 5352 * need to sync if we are clearing modify bit.
5352 5353 */
5353 5354 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5354 5355 }
5355 5356
5356 5357 if (pp && PP_ISRO(pp)) {
5357 5358 if (pprot & TTE_WRPRM_INT) {
5358 5359 pmtx = sfmmu_page_enter(pp);
5359 5360 PP_CLRRO(pp);
5360 5361 sfmmu_page_exit(pmtx);
5361 5362 }
5362 5363 }
5363 5364
5364 5365 if (ret > 0 && use_demap_range) {
5365 5366 DEMAP_RANGE_MARKPG(dmrp, addr);
5366 5367 } else if (ret > 0) {
5367 5368 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5368 5369 }
5369 5370
5370 5371 if (pml) {
5371 5372 sfmmu_mlist_exit(pml);
5372 5373 }
5373 5374 }
5374 5375 next_addr:
5375 5376 addr += TTEBYTES(ttesz);
5376 5377 sfhmep++;
5377 5378 DEMAP_RANGE_NEXTPG(dmrp);
5378 5379 }
5379 5380 return (addr);
5380 5381 }
5381 5382
5382 5383 /*
5383 5384 * This routine is deprecated and should only be used by hat_chgprot.
5384 5385 * The correct routine is sfmmu_vtop_attr.
5385 5386 * This routine converts virtual page protections to physical ones. It will
5386 5387 * update the tteflags field with the tte mask corresponding to the protections
5387 5388 * affected and it returns the new protections. It will also clear the modify
5388 5389 * bit if we are taking away write permission. This is necessary since the
5389 5390 * modify bit is the hardware permission bit and we need to clear it in order
5390 5391 * to detect write faults.
5391 5392 * It accepts the following special protections:
5392 5393 * ~PROT_WRITE = remove write permissions.
5393 5394 * ~PROT_USER = remove user permissions.
5394 5395 */
5395 5396 static uint_t
5396 5397 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5397 5398 {
5398 5399 if (vprot == (uint_t)~PROT_WRITE) {
5399 5400 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5400 5401 return (0); /* will cause wrprm to be cleared */
5401 5402 }
5402 5403 if (vprot == (uint_t)~PROT_USER) {
5403 5404 *tteflagsp = TTE_PRIV_INT;
5404 5405 return (0); /* will cause privprm to be cleared */
5405 5406 }
5406 5407 if ((vprot == 0) || (vprot == PROT_USER) ||
5407 5408 ((vprot & PROT_ALL) != vprot)) {
5408 5409 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5409 5410 }
5410 5411
5411 5412 switch (vprot) {
5412 5413 case (PROT_READ):
5413 5414 case (PROT_EXEC):
5414 5415 case (PROT_EXEC | PROT_READ):
5415 5416 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5416 5417 return (TTE_PRIV_INT); /* set prv and clr wrt */
5417 5418 case (PROT_WRITE):
5418 5419 case (PROT_WRITE | PROT_READ):
5419 5420 case (PROT_EXEC | PROT_WRITE):
5420 5421 case (PROT_EXEC | PROT_WRITE | PROT_READ):
5421 5422 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5422 5423 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5423 5424 case (PROT_USER | PROT_READ):
5424 5425 case (PROT_USER | PROT_EXEC):
5425 5426 case (PROT_USER | PROT_EXEC | PROT_READ):
5426 5427 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5427 5428 return (0); /* clr prv and wrt */
5428 5429 case (PROT_USER | PROT_WRITE):
5429 5430 case (PROT_USER | PROT_WRITE | PROT_READ):
5430 5431 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5431 5432 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5432 5433 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5433 5434 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5434 5435 default:
5435 5436 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5436 5437 }
5437 5438 return (0);
5438 5439 }
5439 5440
5440 5441 /*
5441 5442 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5442 5443 * the normal algorithm would take too long for a very large VA range with
5443 5444 * few real mappings. This routine just walks thru all HMEs in the global
5444 5445 * hash table to find and remove mappings.
5445 5446 */
5446 5447 static void
5447 5448 hat_unload_large_virtual(
5448 5449 struct hat *sfmmup,
5449 5450 caddr_t startaddr,
5450 5451 size_t len,
5451 5452 uint_t flags,
5452 5453 hat_callback_t *callback)
5453 5454 {
5454 5455 struct hmehash_bucket *hmebp;
5455 5456 struct hme_blk *hmeblkp;
5456 5457 struct hme_blk *pr_hblk = NULL;
5457 5458 struct hme_blk *nx_hblk;
5458 5459 struct hme_blk *list = NULL;
5459 5460 int i;
5460 5461 demap_range_t dmr, *dmrp;
5461 5462 cpuset_t cpuset;
5462 5463 caddr_t endaddr = startaddr + len;
5463 5464 caddr_t sa;
5464 5465 caddr_t ea;
5465 5466 caddr_t cb_sa[MAX_CB_ADDR];
5466 5467 caddr_t cb_ea[MAX_CB_ADDR];
5467 5468 int addr_cnt = 0;
5468 5469 int a = 0;
5469 5470
5470 5471 if (sfmmup->sfmmu_free) {
5471 5472 dmrp = NULL;
5472 5473 } else {
5473 5474 dmrp = &dmr;
5474 5475 DEMAP_RANGE_INIT(sfmmup, dmrp);
5475 5476 }
5476 5477
5477 5478 /*
5478 5479 * Loop through all the hash buckets of HME blocks looking for matches.
5479 5480 */
5480 5481 for (i = 0; i <= UHMEHASH_SZ; i++) {
5481 5482 hmebp = &uhme_hash[i];
5482 5483 SFMMU_HASH_LOCK(hmebp);
5483 5484 hmeblkp = hmebp->hmeblkp;
5484 5485 pr_hblk = NULL;
5485 5486 while (hmeblkp) {
5486 5487 nx_hblk = hmeblkp->hblk_next;
5487 5488
5488 5489 /*
5489 5490 * skip if not this context, if a shadow block or
5490 5491 * if the mapping is not in the requested range
5491 5492 */
5492 5493 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5493 5494 hmeblkp->hblk_shw_bit ||
5494 5495 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5495 5496 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5496 5497 pr_hblk = hmeblkp;
5497 5498 goto next_block;
5498 5499 }
5499 5500
5500 5501 ASSERT(!hmeblkp->hblk_shared);
5501 5502 /*
5502 5503 * unload if there are any current valid mappings
5503 5504 */
5504 5505 if (hmeblkp->hblk_vcnt != 0 ||
5505 5506 hmeblkp->hblk_hmecnt != 0)
5506 5507 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5507 5508 sa, ea, dmrp, flags);
5508 5509
5509 5510 /*
5510 5511 * on unmap we also release the HME block itself, once
5511 5512 * all mappings are gone.
5512 5513 */
5513 5514 if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5514 5515 !hmeblkp->hblk_vcnt &&
5515 5516 !hmeblkp->hblk_hmecnt) {
5516 5517 ASSERT(!hmeblkp->hblk_lckcnt);
5517 5518 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5518 5519 &list, 0);
5519 5520 } else {
5520 5521 pr_hblk = hmeblkp;
5521 5522 }
5522 5523
5523 5524 if (callback == NULL)
5524 5525 goto next_block;
5525 5526
5526 5527 /*
5527 5528 * HME blocks may span more than one page, but we may be
5528 5529 * unmapping only one page, so check for a smaller range
5529 5530 * for the callback
5530 5531 */
5531 5532 if (sa < startaddr)
5532 5533 sa = startaddr;
5533 5534 if (--ea > endaddr)
5534 5535 ea = endaddr - 1;
5535 5536
5536 5537 cb_sa[addr_cnt] = sa;
5537 5538 cb_ea[addr_cnt] = ea;
5538 5539 if (++addr_cnt == MAX_CB_ADDR) {
5539 5540 if (dmrp != NULL) {
5540 5541 DEMAP_RANGE_FLUSH(dmrp);
5541 5542 cpuset = sfmmup->sfmmu_cpusran;
5542 5543 xt_sync(cpuset);
5543 5544 }
5544 5545
5545 5546 for (a = 0; a < MAX_CB_ADDR; ++a) {
5546 5547 callback->hcb_start_addr = cb_sa[a];
5547 5548 callback->hcb_end_addr = cb_ea[a];
5548 5549 callback->hcb_function(callback);
5549 5550 }
5550 5551 addr_cnt = 0;
5551 5552 }
5552 5553
5553 5554 next_block:
5554 5555 hmeblkp = nx_hblk;
5555 5556 }
5556 5557 SFMMU_HASH_UNLOCK(hmebp);
5557 5558 }
5558 5559
5559 5560 sfmmu_hblks_list_purge(&list, 0);
5560 5561 if (dmrp != NULL) {
5561 5562 DEMAP_RANGE_FLUSH(dmrp);
5562 5563 cpuset = sfmmup->sfmmu_cpusran;
5563 5564 xt_sync(cpuset);
5564 5565 }
5565 5566
5566 5567 for (a = 0; a < addr_cnt; ++a) {
5567 5568 callback->hcb_start_addr = cb_sa[a];
5568 5569 callback->hcb_end_addr = cb_ea[a];
5569 5570 callback->hcb_function(callback);
5570 5571 }
5571 5572
5572 5573 /*
5573 5574 * Check TSB and TLB page sizes if the process isn't exiting.
5574 5575 */
5575 5576 if (!sfmmup->sfmmu_free)
5576 5577 sfmmu_check_page_sizes(sfmmup, 0);
5577 5578 }
5578 5579
5579 5580 /*
5580 5581 * Unload all the mappings in the range [addr..addr+len). addr and len must
5581 5582 * be MMU_PAGESIZE aligned.
5582 5583 */
5583 5584
5584 5585 extern struct seg *segkmap;
5585 5586 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5586 5587 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5587 5588
5588 5589
5589 5590 void
5590 5591 hat_unload_callback(
5591 5592 struct hat *sfmmup,
5592 5593 caddr_t addr,
5593 5594 size_t len,
5594 5595 uint_t flags,
5595 5596 hat_callback_t *callback)
5596 5597 {
5597 5598 struct hmehash_bucket *hmebp;
5598 5599 hmeblk_tag hblktag;
5599 5600 int hmeshift, hashno, iskernel;
5600 5601 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5601 5602 caddr_t endaddr;
5602 5603 cpuset_t cpuset;
5603 5604 int addr_count = 0;
5604 5605 int a;
5605 5606 caddr_t cb_start_addr[MAX_CB_ADDR];
5606 5607 caddr_t cb_end_addr[MAX_CB_ADDR];
5607 5608 int issegkmap = ISSEGKMAP(sfmmup, addr);
5608 5609 demap_range_t dmr, *dmrp;
5609 5610
5610 5611 ASSERT(sfmmup->sfmmu_as != NULL);
5611 5612
5612 5613 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5613 5614 AS_LOCK_HELD(sfmmup->sfmmu_as));
5614 5615
5615 5616 ASSERT(sfmmup != NULL);
5616 5617 ASSERT((len & MMU_PAGEOFFSET) == 0);
5617 5618 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5618 5619
5619 5620 /*
5620 5621 * Probing through a large VA range (say 63 bits) will be slow, even
5621 5622 * at 4 Meg steps between the probes. So, when the virtual address range
5622 5623 * is very large, search the HME entries for what to unload.
5623 5624 *
5624 5625 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5625 5626 *
5626 5627 * UHMEHASH_SZ is number of hash buckets to examine
5627 5628 *
5628 5629 */
5629 5630 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5630 5631 hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5631 5632 return;
5632 5633 }
5633 5634
5634 5635 CPUSET_ZERO(cpuset);
5635 5636
5636 5637 /*
5637 5638 * If the process is exiting, we can save a lot of fuss since
5638 5639 * we'll flush the TLB when we free the ctx anyway.
5639 5640 */
5640 5641 if (sfmmup->sfmmu_free) {
5641 5642 dmrp = NULL;
5642 5643 } else {
5643 5644 dmrp = &dmr;
5644 5645 DEMAP_RANGE_INIT(sfmmup, dmrp);
5645 5646 }
5646 5647
5647 5648 endaddr = addr + len;
5648 5649 hblktag.htag_id = sfmmup;
5649 5650 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5650 5651
5651 5652 /*
5652 5653 * It is likely for the vm to call unload over a wide range of
5653 5654 * addresses that are actually very sparsely populated by
5654 5655 * translations. In order to speed this up the sfmmu hat supports
5655 5656 * the concept of shadow hmeblks. Dummy large page hmeblks that
5656 5657 * correspond to actual small translations are allocated at tteload
5657 5658 * time and are referred to as shadow hmeblks. Now, during unload
5658 5659 * time, we first check if we have a shadow hmeblk for that
5659 5660 * translation. The absence of one means the corresponding address
5660 5661 * range is empty and can be skipped.
5661 5662 *
5662 5663 * The kernel is an exception to above statement and that is why
5663 5664 * we don't use shadow hmeblks and hash starting from the smallest
5664 5665 * page size.
5665 5666 */
5666 5667 if (sfmmup == KHATID) {
5667 5668 iskernel = 1;
5668 5669 hashno = TTE64K;
5669 5670 } else {
5670 5671 iskernel = 0;
5671 5672 if (mmu_page_sizes == max_mmu_page_sizes) {
5672 5673 hashno = TTE256M;
5673 5674 } else {
5674 5675 hashno = TTE4M;
5675 5676 }
5676 5677 }
5677 5678 while (addr < endaddr) {
5678 5679 hmeshift = HME_HASH_SHIFT(hashno);
5679 5680 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5680 5681 hblktag.htag_rehash = hashno;
5681 5682 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5682 5683
5683 5684 SFMMU_HASH_LOCK(hmebp);
5684 5685
5685 5686 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5686 5687 if (hmeblkp == NULL) {
5687 5688 /*
5688 5689 * didn't find an hmeblk. skip the appropiate
5689 5690 * address range.
5690 5691 */
5691 5692 SFMMU_HASH_UNLOCK(hmebp);
5692 5693 if (iskernel) {
5693 5694 if (hashno < mmu_hashcnt) {
5694 5695 hashno++;
5695 5696 continue;
5696 5697 } else {
5697 5698 hashno = TTE64K;
5698 5699 addr = (caddr_t)roundup((uintptr_t)addr
5699 5700 + 1, MMU_PAGESIZE64K);
5700 5701 continue;
5701 5702 }
5702 5703 }
5703 5704 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5704 5705 (1 << hmeshift));
5705 5706 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5706 5707 ASSERT(hashno == TTE64K);
5707 5708 continue;
5708 5709 }
5709 5710 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5710 5711 hashno = TTE512K;
5711 5712 continue;
5712 5713 }
5713 5714 if (mmu_page_sizes == max_mmu_page_sizes) {
5714 5715 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5715 5716 hashno = TTE4M;
5716 5717 continue;
5717 5718 }
5718 5719 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5719 5720 hashno = TTE32M;
5720 5721 continue;
5721 5722 }
5722 5723 hashno = TTE256M;
5723 5724 continue;
5724 5725 } else {
5725 5726 hashno = TTE4M;
5726 5727 continue;
5727 5728 }
5728 5729 }
5729 5730 ASSERT(hmeblkp);
5730 5731 ASSERT(!hmeblkp->hblk_shared);
5731 5732 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5732 5733 /*
5733 5734 * If the valid count is zero we can skip the range
5734 5735 * mapped by this hmeblk.
5735 5736 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP
5736 5737 * is used by segment drivers as a hint
5737 5738 * that the mapping resource won't be used any longer.
5738 5739 * The best example of this is during exit().
5739 5740 */
5740 5741 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5741 5742 get_hblk_span(hmeblkp));
5742 5743 if ((flags & HAT_UNLOAD_UNMAP) ||
5743 5744 (iskernel && !issegkmap)) {
5744 5745 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5745 5746 &list, 0);
5746 5747 }
5747 5748 SFMMU_HASH_UNLOCK(hmebp);
5748 5749
5749 5750 if (iskernel) {
5750 5751 hashno = TTE64K;
5751 5752 continue;
5752 5753 }
5753 5754 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5754 5755 ASSERT(hashno == TTE64K);
5755 5756 continue;
5756 5757 }
5757 5758 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5758 5759 hashno = TTE512K;
5759 5760 continue;
5760 5761 }
5761 5762 if (mmu_page_sizes == max_mmu_page_sizes) {
5762 5763 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5763 5764 hashno = TTE4M;
5764 5765 continue;
5765 5766 }
5766 5767 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5767 5768 hashno = TTE32M;
5768 5769 continue;
5769 5770 }
5770 5771 hashno = TTE256M;
5771 5772 continue;
5772 5773 } else {
5773 5774 hashno = TTE4M;
5774 5775 continue;
5775 5776 }
5776 5777 }
5777 5778 if (hmeblkp->hblk_shw_bit) {
5778 5779 /*
5779 5780 * If we encounter a shadow hmeblk we know there is
5780 5781 * smaller sized hmeblks mapping the same address space.
5781 5782 * Decrement the hash size and rehash.
5782 5783 */
5783 5784 ASSERT(sfmmup != KHATID);
5784 5785 hashno--;
5785 5786 SFMMU_HASH_UNLOCK(hmebp);
5786 5787 continue;
5787 5788 }
5788 5789
5789 5790 /*
5790 5791 * track callback address ranges.
5791 5792 * only start a new range when it's not contiguous
5792 5793 */
5793 5794 if (callback != NULL) {
5794 5795 if (addr_count > 0 &&
5795 5796 addr == cb_end_addr[addr_count - 1])
5796 5797 --addr_count;
5797 5798 else
5798 5799 cb_start_addr[addr_count] = addr;
5799 5800 }
5800 5801
5801 5802 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5802 5803 dmrp, flags);
5803 5804
5804 5805 if (callback != NULL)
5805 5806 cb_end_addr[addr_count++] = addr;
5806 5807
5807 5808 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5808 5809 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5809 5810 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5810 5811 }
5811 5812 SFMMU_HASH_UNLOCK(hmebp);
5812 5813
5813 5814 /*
5814 5815 * Notify our caller as to exactly which pages
5815 5816 * have been unloaded. We do these in clumps,
5816 5817 * to minimize the number of xt_sync()s that need to occur.
5817 5818 */
5818 5819 if (callback != NULL && addr_count == MAX_CB_ADDR) {
5819 5820 if (dmrp != NULL) {
5820 5821 DEMAP_RANGE_FLUSH(dmrp);
5821 5822 cpuset = sfmmup->sfmmu_cpusran;
5822 5823 xt_sync(cpuset);
5823 5824 }
5824 5825
5825 5826 for (a = 0; a < MAX_CB_ADDR; ++a) {
5826 5827 callback->hcb_start_addr = cb_start_addr[a];
5827 5828 callback->hcb_end_addr = cb_end_addr[a];
5828 5829 callback->hcb_function(callback);
5829 5830 }
5830 5831 addr_count = 0;
5831 5832 }
5832 5833 if (iskernel) {
5833 5834 hashno = TTE64K;
5834 5835 continue;
5835 5836 }
5836 5837 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5837 5838 ASSERT(hashno == TTE64K);
5838 5839 continue;
5839 5840 }
5840 5841 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5841 5842 hashno = TTE512K;
5842 5843 continue;
5843 5844 }
5844 5845 if (mmu_page_sizes == max_mmu_page_sizes) {
5845 5846 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5846 5847 hashno = TTE4M;
5847 5848 continue;
5848 5849 }
5849 5850 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5850 5851 hashno = TTE32M;
5851 5852 continue;
5852 5853 }
5853 5854 hashno = TTE256M;
5854 5855 } else {
5855 5856 hashno = TTE4M;
5856 5857 }
5857 5858 }
5858 5859
5859 5860 sfmmu_hblks_list_purge(&list, 0);
5860 5861 if (dmrp != NULL) {
5861 5862 DEMAP_RANGE_FLUSH(dmrp);
5862 5863 cpuset = sfmmup->sfmmu_cpusran;
5863 5864 xt_sync(cpuset);
5864 5865 }
5865 5866 if (callback && addr_count != 0) {
5866 5867 for (a = 0; a < addr_count; ++a) {
5867 5868 callback->hcb_start_addr = cb_start_addr[a];
5868 5869 callback->hcb_end_addr = cb_end_addr[a];
5869 5870 callback->hcb_function(callback);
5870 5871 }
5871 5872 }
5872 5873
5873 5874 /*
5874 5875 * Check TSB and TLB page sizes if the process isn't exiting.
5875 5876 */
5876 5877 if (!sfmmup->sfmmu_free)
5877 5878 sfmmu_check_page_sizes(sfmmup, 0);
5878 5879 }
5879 5880
5880 5881 /*
5881 5882 * Unload all the mappings in the range [addr..addr+len). addr and len must
5882 5883 * be MMU_PAGESIZE aligned.
5883 5884 */
5884 5885 void
5885 5886 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5886 5887 {
5887 5888 hat_unload_callback(sfmmup, addr, len, flags, NULL);
5888 5889 }
5889 5890
5890 5891
5891 5892 /*
5892 5893 * Find the largest mapping size for this page.
5893 5894 */
5894 5895 int
5895 5896 fnd_mapping_sz(page_t *pp)
5896 5897 {
5897 5898 int sz;
5898 5899 int p_index;
5899 5900
5900 5901 p_index = PP_MAPINDEX(pp);
5901 5902
5902 5903 sz = 0;
5903 5904 p_index >>= 1; /* don't care about 8K bit */
5904 5905 for (; p_index; p_index >>= 1) {
5905 5906 sz++;
5906 5907 }
5907 5908
5908 5909 return (sz);
5909 5910 }
5910 5911
5911 5912 /*
5912 5913 * This function unloads a range of addresses for an hmeblk.
5913 5914 * It returns the next address to be unloaded.
5914 5915 * It should be called with the hash lock held.
5915 5916 */
5916 5917 static caddr_t
5917 5918 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5918 5919 caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5919 5920 {
5920 5921 tte_t tte, ttemod;
5921 5922 struct sf_hment *sfhmep;
5922 5923 int ttesz;
5923 5924 long ttecnt;
5924 5925 page_t *pp;
5925 5926 kmutex_t *pml;
5926 5927 int ret;
5927 5928 int use_demap_range;
5928 5929
5929 5930 ASSERT(in_hblk_range(hmeblkp, addr));
5930 5931 ASSERT(!hmeblkp->hblk_shw_bit);
5931 5932 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5932 5933 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5933 5934 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5934 5935
5935 5936 #ifdef DEBUG
5936 5937 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5937 5938 (endaddr < get_hblk_endaddr(hmeblkp))) {
5938 5939 panic("sfmmu_hblk_unload: partial unload of large page");
5939 5940 }
5940 5941 #endif /* DEBUG */
5941 5942
5942 5943 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5943 5944 ttesz = get_hblk_ttesz(hmeblkp);
5944 5945
5945 5946 use_demap_range = ((dmrp == NULL) ||
5946 5947 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
5947 5948
5948 5949 if (use_demap_range) {
5949 5950 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5950 5951 } else if (dmrp != NULL) {
5951 5952 DEMAP_RANGE_FLUSH(dmrp);
5952 5953 }
5953 5954 ttecnt = 0;
5954 5955 HBLKTOHME(sfhmep, hmeblkp, addr);
5955 5956
5956 5957 while (addr < endaddr) {
5957 5958 pml = NULL;
5958 5959 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5959 5960 if (TTE_IS_VALID(&tte)) {
5960 5961 pp = sfhmep->hme_page;
5961 5962 if (pp != NULL) {
5962 5963 pml = sfmmu_mlist_enter(pp);
5963 5964 }
5964 5965
5965 5966 /*
5966 5967 * Verify if hme still points to 'pp' now that
5967 5968 * we have p_mapping lock.
5968 5969 */
5969 5970 if (sfhmep->hme_page != pp) {
5970 5971 if (pp != NULL && sfhmep->hme_page != NULL) {
5971 5972 ASSERT(pml != NULL);
5972 5973 sfmmu_mlist_exit(pml);
5973 5974 /* Re-start this iteration. */
5974 5975 continue;
5975 5976 }
5976 5977 ASSERT((pp != NULL) &&
5977 5978 (sfhmep->hme_page == NULL));
5978 5979 goto tte_unloaded;
5979 5980 }
5980 5981
5981 5982 /*
5982 5983 * This point on we have both HASH and p_mapping
5983 5984 * lock.
5984 5985 */
5985 5986 ASSERT(pp == sfhmep->hme_page);
5986 5987 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5987 5988
5988 5989 /*
5989 5990 * We need to loop on modify tte because it is
5990 5991 * possible for pagesync to come along and
5991 5992 * change the software bits beneath us.
5992 5993 *
5993 5994 * Page_unload can also invalidate the tte after
5994 5995 * we read tte outside of p_mapping lock.
5995 5996 */
5996 5997 again:
5997 5998 ttemod = tte;
5998 5999
5999 6000 TTE_SET_INVALID(&ttemod);
6000 6001 ret = sfmmu_modifytte_try(&tte, &ttemod,
6001 6002 &sfhmep->hme_tte);
6002 6003
6003 6004 if (ret <= 0) {
6004 6005 if (TTE_IS_VALID(&tte)) {
6005 6006 ASSERT(ret < 0);
6006 6007 goto again;
6007 6008 }
6008 6009 if (pp != NULL) {
6009 6010 panic("sfmmu_hblk_unload: pp = 0x%p "
6010 6011 "tte became invalid under mlist"
6011 6012 " lock = 0x%p", (void *)pp,
6012 6013 (void *)pml);
6013 6014 }
6014 6015 continue;
6015 6016 }
6016 6017
6017 6018 if (!(flags & HAT_UNLOAD_NOSYNC)) {
6018 6019 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6019 6020 }
6020 6021
6021 6022 /*
6022 6023 * Ok- we invalidated the tte. Do the rest of the job.
6023 6024 */
6024 6025 ttecnt++;
6025 6026
6026 6027 if (flags & HAT_UNLOAD_UNLOCK) {
6027 6028 ASSERT(hmeblkp->hblk_lckcnt > 0);
6028 6029 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6029 6030 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6030 6031 }
6031 6032
6032 6033 /*
6033 6034 * Normally we would need to flush the page
6034 6035 * from the virtual cache at this point in
6035 6036 * order to prevent a potential cache alias
6036 6037 * inconsistency.
6037 6038 * The particular scenario we need to worry
6038 6039 * about is:
6039 6040 * Given: va1 and va2 are two virtual address
6040 6041 * that alias and map the same physical
6041 6042 * address.
6042 6043 * 1. mapping exists from va1 to pa and data
6043 6044 * has been read into the cache.
6044 6045 * 2. unload va1.
6045 6046 * 3. load va2 and modify data using va2.
6046 6047 * 4 unload va2.
6047 6048 * 5. load va1 and reference data. Unless we
6048 6049 * flush the data cache when we unload we will
6049 6050 * get stale data.
6050 6051 * Fortunately, page coloring eliminates the
6051 6052 * above scenario by remembering the color a
6052 6053 * physical page was last or is currently
6053 6054 * mapped to. Now, we delay the flush until
6054 6055 * the loading of translations. Only when the
6055 6056 * new translation is of a different color
6056 6057 * are we forced to flush.
6057 6058 */
6058 6059 if (use_demap_range) {
6059 6060 /*
6060 6061 * Mark this page as needing a demap.
6061 6062 */
6062 6063 DEMAP_RANGE_MARKPG(dmrp, addr);
6063 6064 } else {
6064 6065 ASSERT(sfmmup != NULL);
6065 6066 ASSERT(!hmeblkp->hblk_shared);
6066 6067 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6067 6068 sfmmup->sfmmu_free, 0);
6068 6069 }
6069 6070
6070 6071 if (pp) {
6071 6072 /*
6072 6073 * Remove the hment from the mapping list
6073 6074 */
6074 6075 ASSERT(hmeblkp->hblk_hmecnt > 0);
6075 6076
6076 6077 /*
6077 6078 * Again, we cannot
6078 6079 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6079 6080 */
6080 6081 HME_SUB(sfhmep, pp);
6081 6082 membar_stst();
6082 6083 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6083 6084 }
6084 6085
6085 6086 ASSERT(hmeblkp->hblk_vcnt > 0);
6086 6087 atomic_dec_16(&hmeblkp->hblk_vcnt);
6087 6088
6088 6089 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6089 6090 !hmeblkp->hblk_lckcnt);
6090 6091
6091 6092 #ifdef VAC
6092 6093 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6093 6094 if (PP_ISTNC(pp)) {
6094 6095 /*
6095 6096 * If page was temporary
6096 6097 * uncached, try to recache
6097 6098 * it. Note that HME_SUB() was
6098 6099 * called above so p_index and
6099 6100 * mlist had been updated.
6100 6101 */
6101 6102 conv_tnc(pp, ttesz);
6102 6103 } else if (pp->p_mapping == NULL) {
6103 6104 ASSERT(kpm_enable);
6104 6105 /*
6105 6106 * Page is marked to be in VAC conflict
6106 6107 * to an existing kpm mapping and/or is
6107 6108 * kpm mapped using only the regular
6108 6109 * pagesize.
6109 6110 */
6110 6111 sfmmu_kpm_hme_unload(pp);
6111 6112 }
6112 6113 }
6113 6114 #endif /* VAC */
6114 6115 } else if ((pp = sfhmep->hme_page) != NULL) {
6115 6116 /*
6116 6117 * TTE is invalid but the hme
6117 6118 * still exists. let pageunload
6118 6119 * complete its job.
6119 6120 */
6120 6121 ASSERT(pml == NULL);
6121 6122 pml = sfmmu_mlist_enter(pp);
6122 6123 if (sfhmep->hme_page != NULL) {
6123 6124 sfmmu_mlist_exit(pml);
6124 6125 continue;
6125 6126 }
6126 6127 ASSERT(sfhmep->hme_page == NULL);
6127 6128 } else if (hmeblkp->hblk_hmecnt != 0) {
6128 6129 /*
6129 6130 * pageunload may have not finished decrementing
6130 6131 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6131 6132 * wait for pageunload to finish. Rely on pageunload
6132 6133 * to decrement hblk_hmecnt after hblk_vcnt.
6133 6134 */
6134 6135 pfn_t pfn = TTE_TO_TTEPFN(&tte);
6135 6136 ASSERT(pml == NULL);
6136 6137 if (pf_is_memory(pfn)) {
6137 6138 pp = page_numtopp_nolock(pfn);
6138 6139 if (pp != NULL) {
6139 6140 pml = sfmmu_mlist_enter(pp);
6140 6141 sfmmu_mlist_exit(pml);
6141 6142 pml = NULL;
6142 6143 }
6143 6144 }
6144 6145 }
6145 6146
6146 6147 tte_unloaded:
6147 6148 /*
6148 6149 * At this point, the tte we are looking at
6149 6150 * should be unloaded, and hme has been unlinked
6150 6151 * from page too. This is important because in
6151 6152 * pageunload, it does ttesync() then HME_SUB.
6152 6153 * We need to make sure HME_SUB has been completed
6153 6154 * so we know ttesync() has been completed. Otherwise,
6154 6155 * at exit time, after return from hat layer, VM will
6155 6156 * release as structure which hat_setstat() (called
6156 6157 * by ttesync()) needs.
6157 6158 */
6158 6159 #ifdef DEBUG
6159 6160 {
6160 6161 tte_t dtte;
6161 6162
6162 6163 ASSERT(sfhmep->hme_page == NULL);
6163 6164
6164 6165 sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6165 6166 ASSERT(!TTE_IS_VALID(&dtte));
6166 6167 }
6167 6168 #endif
6168 6169
6169 6170 if (pml) {
6170 6171 sfmmu_mlist_exit(pml);
6171 6172 }
6172 6173
6173 6174 addr += TTEBYTES(ttesz);
6174 6175 sfhmep++;
6175 6176 DEMAP_RANGE_NEXTPG(dmrp);
6176 6177 }
6177 6178 /*
6178 6179 * For shared hmeblks this routine is only called when region is freed
6179 6180 * and no longer referenced. So no need to decrement ttecnt
6180 6181 * in the region structure here.
6181 6182 */
6182 6183 if (ttecnt > 0 && sfmmup != NULL) {
6183 6184 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6184 6185 }
6185 6186 return (addr);
6186 6187 }
6187 6188
6188 6189 /*
6189 6190 * Invalidate a virtual address range for the local CPU.
6190 6191 * For best performance ensure that the va range is completely
6191 6192 * mapped, otherwise the entire TLB will be flushed.
6192 6193 */
6193 6194 void
6194 6195 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6195 6196 {
6196 6197 ssize_t sz;
6197 6198 caddr_t endva = va + size;
6198 6199
6199 6200 while (va < endva) {
6200 6201 sz = hat_getpagesize(sfmmup, va);
6201 6202 if (sz < 0) {
6202 6203 vtag_flushall();
6203 6204 break;
6204 6205 }
6205 6206 vtag_flushpage(va, (uint64_t)sfmmup);
6206 6207 va += sz;
6207 6208 }
6208 6209 }
6209 6210
6210 6211 /*
6211 6212 * Synchronize all the mappings in the range [addr..addr+len).
6212 6213 * Can be called with clearflag having two states:
6213 6214 * HAT_SYNC_DONTZERO means just return the rm stats
6214 6215 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6215 6216 */
6216 6217 void
6217 6218 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6218 6219 {
6219 6220 struct hmehash_bucket *hmebp;
6220 6221 hmeblk_tag hblktag;
6221 6222 int hmeshift, hashno = 1;
6222 6223 struct hme_blk *hmeblkp, *list = NULL;
6223 6224 caddr_t endaddr;
6224 6225 cpuset_t cpuset;
6225 6226
6226 6227 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
6227 6228 ASSERT((len & MMU_PAGEOFFSET) == 0);
6228 6229 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6229 6230 (clearflag == HAT_SYNC_ZERORM));
6230 6231
6231 6232 CPUSET_ZERO(cpuset);
6232 6233
6233 6234 endaddr = addr + len;
6234 6235 hblktag.htag_id = sfmmup;
6235 6236 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6236 6237
6237 6238 /*
6238 6239 * Spitfire supports 4 page sizes.
6239 6240 * Most pages are expected to be of the smallest page
6240 6241 * size (8K) and these will not need to be rehashed. 64K
6241 6242 * pages also don't need to be rehashed because the an hmeblk
6242 6243 * spans 64K of address space. 512K pages might need 1 rehash and
6243 6244 * and 4M pages 2 rehashes.
6244 6245 */
6245 6246 while (addr < endaddr) {
6246 6247 hmeshift = HME_HASH_SHIFT(hashno);
6247 6248 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6248 6249 hblktag.htag_rehash = hashno;
6249 6250 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6250 6251
6251 6252 SFMMU_HASH_LOCK(hmebp);
6252 6253
6253 6254 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6254 6255 if (hmeblkp != NULL) {
6255 6256 ASSERT(!hmeblkp->hblk_shared);
6256 6257 /*
6257 6258 * We've encountered a shadow hmeblk so skip the range
6258 6259 * of the next smaller mapping size.
6259 6260 */
6260 6261 if (hmeblkp->hblk_shw_bit) {
6261 6262 ASSERT(sfmmup != ksfmmup);
6262 6263 ASSERT(hashno > 1);
6263 6264 addr = (caddr_t)P2END((uintptr_t)addr,
6264 6265 TTEBYTES(hashno - 1));
6265 6266 } else {
6266 6267 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6267 6268 addr, endaddr, clearflag);
6268 6269 }
6269 6270 SFMMU_HASH_UNLOCK(hmebp);
6270 6271 hashno = 1;
6271 6272 continue;
6272 6273 }
6273 6274 SFMMU_HASH_UNLOCK(hmebp);
6274 6275
6275 6276 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6276 6277 /*
6277 6278 * We have traversed the whole list and rehashed
6278 6279 * if necessary without finding the address to sync.
6279 6280 * This is ok so we increment the address by the
6280 6281 * smallest hmeblk range for kernel mappings and the
6281 6282 * largest hmeblk range, to account for shadow hmeblks,
6282 6283 * for user mappings and continue.
6283 6284 */
6284 6285 if (sfmmup == ksfmmup)
6285 6286 addr = (caddr_t)P2END((uintptr_t)addr,
6286 6287 TTEBYTES(1));
6287 6288 else
6288 6289 addr = (caddr_t)P2END((uintptr_t)addr,
6289 6290 TTEBYTES(hashno));
6290 6291 hashno = 1;
6291 6292 } else {
6292 6293 hashno++;
6293 6294 }
6294 6295 }
6295 6296 sfmmu_hblks_list_purge(&list, 0);
6296 6297 cpuset = sfmmup->sfmmu_cpusran;
6297 6298 xt_sync(cpuset);
6298 6299 }
6299 6300
6300 6301 static caddr_t
6301 6302 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6302 6303 caddr_t endaddr, int clearflag)
6303 6304 {
6304 6305 tte_t tte, ttemod;
6305 6306 struct sf_hment *sfhmep;
6306 6307 int ttesz;
6307 6308 struct page *pp;
6308 6309 kmutex_t *pml;
6309 6310 int ret;
6310 6311
6311 6312 ASSERT(hmeblkp->hblk_shw_bit == 0);
6312 6313 ASSERT(!hmeblkp->hblk_shared);
6313 6314
6314 6315 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6315 6316
6316 6317 ttesz = get_hblk_ttesz(hmeblkp);
6317 6318 HBLKTOHME(sfhmep, hmeblkp, addr);
6318 6319
6319 6320 while (addr < endaddr) {
6320 6321 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6321 6322 if (TTE_IS_VALID(&tte)) {
6322 6323 pml = NULL;
6323 6324 pp = sfhmep->hme_page;
6324 6325 if (pp) {
6325 6326 pml = sfmmu_mlist_enter(pp);
6326 6327 }
6327 6328 if (pp != sfhmep->hme_page) {
6328 6329 /*
6329 6330 * tte most have been unloaded
6330 6331 * underneath us. Recheck
6331 6332 */
6332 6333 ASSERT(pml);
6333 6334 sfmmu_mlist_exit(pml);
6334 6335 continue;
6335 6336 }
6336 6337
6337 6338 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6338 6339
6339 6340 if (clearflag == HAT_SYNC_ZERORM) {
6340 6341 ttemod = tte;
6341 6342 TTE_CLR_RM(&ttemod);
6342 6343 ret = sfmmu_modifytte_try(&tte, &ttemod,
6343 6344 &sfhmep->hme_tte);
6344 6345 if (ret < 0) {
6345 6346 if (pml) {
6346 6347 sfmmu_mlist_exit(pml);
6347 6348 }
6348 6349 continue;
6349 6350 }
6350 6351
6351 6352 if (ret > 0) {
6352 6353 sfmmu_tlb_demap(addr, sfmmup,
6353 6354 hmeblkp, 0, 0);
6354 6355 }
6355 6356 }
6356 6357 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6357 6358 if (pml) {
6358 6359 sfmmu_mlist_exit(pml);
6359 6360 }
6360 6361 }
6361 6362 addr += TTEBYTES(ttesz);
6362 6363 sfhmep++;
6363 6364 }
6364 6365 return (addr);
6365 6366 }
6366 6367
6367 6368 /*
6368 6369 * This function will sync a tte to the page struct and it will
6369 6370 * update the hat stats. Currently it allows us to pass a NULL pp
6370 6371 * and we will simply update the stats. We may want to change this
6371 6372 * so we only keep stats for pages backed by pp's.
6372 6373 */
6373 6374 static void
6374 6375 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6375 6376 {
6376 6377 uint_t rm = 0;
6377 6378 int sz;
6378 6379 pgcnt_t npgs;
6379 6380
6380 6381 ASSERT(TTE_IS_VALID(ttep));
6381 6382
6382 6383 if (TTE_IS_NOSYNC(ttep)) {
6383 6384 return;
6384 6385 }
6385 6386
6386 6387 if (TTE_IS_REF(ttep)) {
6387 6388 rm = P_REF;
6388 6389 }
6389 6390 if (TTE_IS_MOD(ttep)) {
6390 6391 rm |= P_MOD;
6391 6392 }
6392 6393
6393 6394 if (rm == 0) {
6394 6395 return;
6395 6396 }
6396 6397
6397 6398 sz = TTE_CSZ(ttep);
6398 6399 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6399 6400 int i;
6400 6401 caddr_t vaddr = addr;
6401 6402
6402 6403 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6403 6404 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6404 6405 }
6405 6406
6406 6407 }
6407 6408
6408 6409 /*
6409 6410 * XXX I want to use cas to update nrm bits but they
6410 6411 * currently belong in common/vm and not in hat where
6411 6412 * they should be.
6412 6413 * The nrm bits are protected by the same mutex as
6413 6414 * the one that protects the page's mapping list.
6414 6415 */
6415 6416 if (!pp)
6416 6417 return;
6417 6418 ASSERT(sfmmu_mlist_held(pp));
6418 6419 /*
6419 6420 * If the tte is for a large page, we need to sync all the
6420 6421 * pages covered by the tte.
6421 6422 */
6422 6423 if (sz != TTE8K) {
6423 6424 ASSERT(pp->p_szc != 0);
6424 6425 pp = PP_GROUPLEADER(pp, sz);
6425 6426 ASSERT(sfmmu_mlist_held(pp));
6426 6427 }
6427 6428
6428 6429 /* Get number of pages from tte size. */
6429 6430 npgs = TTEPAGES(sz);
6430 6431
6431 6432 do {
6432 6433 ASSERT(pp);
6433 6434 ASSERT(sfmmu_mlist_held(pp));
6434 6435 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6435 6436 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6436 6437 hat_page_setattr(pp, rm);
6437 6438
6438 6439 /*
6439 6440 * Are we done? If not, we must have a large mapping.
6440 6441 * For large mappings we need to sync the rest of the pages
6441 6442 * covered by this tte; goto the next page.
6442 6443 */
6443 6444 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6444 6445 }
6445 6446
6446 6447 /*
6447 6448 * Execute pre-callback handler of each pa_hment linked to pp
6448 6449 *
6449 6450 * Inputs:
6450 6451 * flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6451 6452 * capture_cpus: pointer to return value (below)
6452 6453 *
6453 6454 * Returns:
6454 6455 * Propagates the subsystem callback return values back to the caller;
6455 6456 * returns 0 on success. If capture_cpus is non-NULL, the value returned
6456 6457 * is zero if all of the pa_hments are of a type that do not require
6457 6458 * capturing CPUs prior to suspending the mapping, else it is 1.
6458 6459 */
6459 6460 static int
6460 6461 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6461 6462 {
6462 6463 struct sf_hment *sfhmep;
6463 6464 struct pa_hment *pahmep;
6464 6465 int (*f)(caddr_t, uint_t, uint_t, void *);
6465 6466 int ret;
6466 6467 id_t id;
6467 6468 int locked = 0;
6468 6469 kmutex_t *pml;
6469 6470
6470 6471 ASSERT(PAGE_EXCL(pp));
6471 6472 if (!sfmmu_mlist_held(pp)) {
6472 6473 pml = sfmmu_mlist_enter(pp);
6473 6474 locked = 1;
6474 6475 }
6475 6476
6476 6477 if (capture_cpus)
6477 6478 *capture_cpus = 0;
6478 6479
6479 6480 top:
6480 6481 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6481 6482 /*
6482 6483 * skip sf_hments corresponding to VA<->PA mappings;
6483 6484 * for pa_hment's, hme_tte.ll is zero
6484 6485 */
6485 6486 if (!IS_PAHME(sfhmep))
6486 6487 continue;
6487 6488
6488 6489 pahmep = sfhmep->hme_data;
6489 6490 ASSERT(pahmep != NULL);
6490 6491
6491 6492 /*
6492 6493 * skip if pre-handler has been called earlier in this loop
6493 6494 */
6494 6495 if (pahmep->flags & flag)
6495 6496 continue;
6496 6497
6497 6498 id = pahmep->cb_id;
6498 6499 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6499 6500 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6500 6501 *capture_cpus = 1;
6501 6502 if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6502 6503 pahmep->flags |= flag;
6503 6504 continue;
6504 6505 }
6505 6506
6506 6507 /*
6507 6508 * Drop the mapping list lock to avoid locking order issues.
6508 6509 */
6509 6510 if (locked)
6510 6511 sfmmu_mlist_exit(pml);
6511 6512
6512 6513 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6513 6514 if (ret != 0)
6514 6515 return (ret); /* caller must do the cleanup */
6515 6516
6516 6517 if (locked) {
6517 6518 pml = sfmmu_mlist_enter(pp);
6518 6519 pahmep->flags |= flag;
6519 6520 goto top;
6520 6521 }
6521 6522
6522 6523 pahmep->flags |= flag;
6523 6524 }
6524 6525
6525 6526 if (locked)
6526 6527 sfmmu_mlist_exit(pml);
6527 6528
6528 6529 return (0);
6529 6530 }
6530 6531
6531 6532 /*
6532 6533 * Execute post-callback handler of each pa_hment linked to pp
6533 6534 *
6534 6535 * Same overall assumptions and restrictions apply as for
6535 6536 * hat_pageprocess_precallbacks().
6536 6537 */
6537 6538 static void
6538 6539 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6539 6540 {
6540 6541 pfn_t pgpfn = pp->p_pagenum;
6541 6542 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6542 6543 pfn_t newpfn;
6543 6544 struct sf_hment *sfhmep;
6544 6545 struct pa_hment *pahmep;
6545 6546 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6546 6547 id_t id;
6547 6548 int locked = 0;
6548 6549 kmutex_t *pml;
6549 6550
6550 6551 ASSERT(PAGE_EXCL(pp));
6551 6552 if (!sfmmu_mlist_held(pp)) {
6552 6553 pml = sfmmu_mlist_enter(pp);
6553 6554 locked = 1;
6554 6555 }
6555 6556
6556 6557 top:
6557 6558 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6558 6559 /*
6559 6560 * skip sf_hments corresponding to VA<->PA mappings;
6560 6561 * for pa_hment's, hme_tte.ll is zero
6561 6562 */
6562 6563 if (!IS_PAHME(sfhmep))
6563 6564 continue;
6564 6565
6565 6566 pahmep = sfhmep->hme_data;
6566 6567 ASSERT(pahmep != NULL);
6567 6568
6568 6569 if ((pahmep->flags & flag) == 0)
6569 6570 continue;
6570 6571
6571 6572 pahmep->flags &= ~flag;
6572 6573
6573 6574 id = pahmep->cb_id;
6574 6575 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6575 6576 if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6576 6577 continue;
6577 6578
6578 6579 /*
6579 6580 * Convert the base page PFN into the constituent PFN
6580 6581 * which is needed by the callback handler.
6581 6582 */
6582 6583 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6583 6584
6584 6585 /*
6585 6586 * Drop the mapping list lock to avoid locking order issues.
6586 6587 */
6587 6588 if (locked)
6588 6589 sfmmu_mlist_exit(pml);
6589 6590
6590 6591 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6591 6592 != 0)
6592 6593 panic("sfmmu: posthandler failed");
6593 6594
6594 6595 if (locked) {
6595 6596 pml = sfmmu_mlist_enter(pp);
6596 6597 goto top;
6597 6598 }
6598 6599 }
6599 6600
6600 6601 if (locked)
6601 6602 sfmmu_mlist_exit(pml);
6602 6603 }
6603 6604
6604 6605 /*
6605 6606 * Suspend locked kernel mapping
6606 6607 */
6607 6608 void
6608 6609 hat_pagesuspend(struct page *pp)
6609 6610 {
6610 6611 struct sf_hment *sfhmep;
6611 6612 sfmmu_t *sfmmup;
6612 6613 tte_t tte, ttemod;
6613 6614 struct hme_blk *hmeblkp;
6614 6615 caddr_t addr;
6615 6616 int index, cons;
6616 6617 cpuset_t cpuset;
6617 6618
6618 6619 ASSERT(PAGE_EXCL(pp));
6619 6620 ASSERT(sfmmu_mlist_held(pp));
6620 6621
6621 6622 mutex_enter(&kpr_suspendlock);
6622 6623
6623 6624 /*
6624 6625 * We're about to suspend a kernel mapping so mark this thread as
6625 6626 * non-traceable by DTrace. This prevents us from running into issues
6626 6627 * with probe context trying to touch a suspended page
6627 6628 * in the relocation codepath itself.
6628 6629 */
6629 6630 curthread->t_flag |= T_DONTDTRACE;
6630 6631
6631 6632 index = PP_MAPINDEX(pp);
6632 6633 cons = TTE8K;
6633 6634
6634 6635 retry:
6635 6636 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6636 6637
6637 6638 if (IS_PAHME(sfhmep))
6638 6639 continue;
6639 6640
6640 6641 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6641 6642 continue;
6642 6643
6643 6644 /*
6644 6645 * Loop until we successfully set the suspend bit in
6645 6646 * the TTE.
6646 6647 */
6647 6648 again:
6648 6649 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6649 6650 ASSERT(TTE_IS_VALID(&tte));
6650 6651
6651 6652 ttemod = tte;
6652 6653 TTE_SET_SUSPEND(&ttemod);
6653 6654 if (sfmmu_modifytte_try(&tte, &ttemod,
6654 6655 &sfhmep->hme_tte) < 0)
6655 6656 goto again;
6656 6657
6657 6658 /*
6658 6659 * Invalidate TSB entry
6659 6660 */
6660 6661 hmeblkp = sfmmu_hmetohblk(sfhmep);
6661 6662
6662 6663 sfmmup = hblktosfmmu(hmeblkp);
6663 6664 ASSERT(sfmmup == ksfmmup);
6664 6665 ASSERT(!hmeblkp->hblk_shared);
6665 6666
6666 6667 addr = tte_to_vaddr(hmeblkp, tte);
6667 6668
6668 6669 /*
6669 6670 * No need to make sure that the TSB for this sfmmu is
6670 6671 * not being relocated since it is ksfmmup and thus it
6671 6672 * will never be relocated.
6672 6673 */
6673 6674 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6674 6675
6675 6676 /*
6676 6677 * Update xcall stats
6677 6678 */
6678 6679 cpuset = cpu_ready_set;
6679 6680 CPUSET_DEL(cpuset, CPU->cpu_id);
6680 6681
6681 6682 /* LINTED: constant in conditional context */
6682 6683 SFMMU_XCALL_STATS(ksfmmup);
6683 6684
6684 6685 /*
6685 6686 * Flush TLB entry on remote CPU's
6686 6687 */
6687 6688 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6688 6689 (uint64_t)ksfmmup);
6689 6690 xt_sync(cpuset);
6690 6691
6691 6692 /*
6692 6693 * Flush TLB entry on local CPU
6693 6694 */
6694 6695 vtag_flushpage(addr, (uint64_t)ksfmmup);
6695 6696 }
6696 6697
6697 6698 while (index != 0) {
6698 6699 index = index >> 1;
6699 6700 if (index != 0)
6700 6701 cons++;
6701 6702 if (index & 0x1) {
6702 6703 pp = PP_GROUPLEADER(pp, cons);
6703 6704 goto retry;
6704 6705 }
6705 6706 }
6706 6707 }
6707 6708
6708 6709 #ifdef DEBUG
6709 6710
6710 6711 #define N_PRLE 1024
6711 6712 struct prle {
6712 6713 page_t *targ;
6713 6714 page_t *repl;
6714 6715 int status;
6715 6716 int pausecpus;
6716 6717 hrtime_t whence;
6717 6718 };
6718 6719
6719 6720 static struct prle page_relocate_log[N_PRLE];
6720 6721 static int prl_entry;
6721 6722 static kmutex_t prl_mutex;
6722 6723
6723 6724 #define PAGE_RELOCATE_LOG(t, r, s, p) \
6724 6725 mutex_enter(&prl_mutex); \
6725 6726 page_relocate_log[prl_entry].targ = *(t); \
6726 6727 page_relocate_log[prl_entry].repl = *(r); \
6727 6728 page_relocate_log[prl_entry].status = (s); \
6728 6729 page_relocate_log[prl_entry].pausecpus = (p); \
6729 6730 page_relocate_log[prl_entry].whence = gethrtime(); \
6730 6731 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \
6731 6732 mutex_exit(&prl_mutex);
6732 6733
6733 6734 #else /* !DEBUG */
6734 6735 #define PAGE_RELOCATE_LOG(t, r, s, p)
6735 6736 #endif
6736 6737
6737 6738 /*
6738 6739 * Core Kernel Page Relocation Algorithm
6739 6740 *
6740 6741 * Input:
6741 6742 *
6742 6743 * target : constituent pages are SE_EXCL locked.
6743 6744 * replacement: constituent pages are SE_EXCL locked.
6744 6745 *
6745 6746 * Output:
6746 6747 *
6747 6748 * nrelocp: number of pages relocated
6748 6749 */
6749 6750 int
6750 6751 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6751 6752 {
6752 6753 page_t *targ, *repl;
6753 6754 page_t *tpp, *rpp;
6754 6755 kmutex_t *low, *high;
6755 6756 spgcnt_t npages, i;
6756 6757 page_t *pl = NULL;
6757 6758 int old_pil;
6758 6759 cpuset_t cpuset;
6759 6760 int cap_cpus;
6760 6761 int ret;
6761 6762 #ifdef VAC
6762 6763 int cflags = 0;
6763 6764 #endif
6764 6765
6765 6766 if (!kcage_on || PP_ISNORELOC(*target)) {
6766 6767 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6767 6768 return (EAGAIN);
6768 6769 }
6769 6770
6770 6771 mutex_enter(&kpr_mutex);
6771 6772 kreloc_thread = curthread;
6772 6773
6773 6774 targ = *target;
6774 6775 repl = *replacement;
6775 6776 ASSERT(repl != NULL);
6776 6777 ASSERT(targ->p_szc == repl->p_szc);
6777 6778
6778 6779 npages = page_get_pagecnt(targ->p_szc);
6779 6780
6780 6781 /*
6781 6782 * unload VA<->PA mappings that are not locked
6782 6783 */
6783 6784 tpp = targ;
6784 6785 for (i = 0; i < npages; i++) {
6785 6786 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6786 6787 tpp++;
6787 6788 }
6788 6789
6789 6790 /*
6790 6791 * Do "presuspend" callbacks, in a context from which we can still
6791 6792 * block as needed. Note that we don't hold the mapping list lock
6792 6793 * of "targ" at this point due to potential locking order issues;
6793 6794 * we assume that between the hat_pageunload() above and holding
6794 6795 * the SE_EXCL lock that the mapping list *cannot* change at this
6795 6796 * point.
6796 6797 */
6797 6798 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6798 6799 if (ret != 0) {
6799 6800 /*
6800 6801 * EIO translates to fatal error, for all others cleanup
6801 6802 * and return EAGAIN.
6802 6803 */
6803 6804 ASSERT(ret != EIO);
6804 6805 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6805 6806 PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6806 6807 kreloc_thread = NULL;
6807 6808 mutex_exit(&kpr_mutex);
6808 6809 return (EAGAIN);
6809 6810 }
6810 6811
6811 6812 /*
6812 6813 * acquire p_mapping list lock for both the target and replacement
6813 6814 * root pages.
6814 6815 *
6815 6816 * low and high refer to the need to grab the mlist locks in a
6816 6817 * specific order in order to prevent race conditions. Thus the
6817 6818 * lower lock must be grabbed before the higher lock.
6818 6819 *
6819 6820 * This will block hat_unload's accessing p_mapping list. Since
6820 6821 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6821 6822 * blocked. Thus, no one else will be accessing the p_mapping list
6822 6823 * while we suspend and reload the locked mapping below.
6823 6824 */
6824 6825 tpp = targ;
6825 6826 rpp = repl;
6826 6827 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6827 6828
6828 6829 kpreempt_disable();
6829 6830
6830 6831 /*
6831 6832 * We raise our PIL to 13 so that we don't get captured by
6832 6833 * another CPU or pinned by an interrupt thread. We can't go to
6833 6834 * PIL 14 since the nexus driver(s) may need to interrupt at
6834 6835 * that level in the case of IOMMU pseudo mappings.
6835 6836 */
6836 6837 cpuset = cpu_ready_set;
6837 6838 CPUSET_DEL(cpuset, CPU->cpu_id);
6838 6839 if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6839 6840 old_pil = splr(XCALL_PIL);
6840 6841 } else {
6841 6842 old_pil = -1;
6842 6843 xc_attention(cpuset);
6843 6844 }
6844 6845 ASSERT(getpil() == XCALL_PIL);
6845 6846
6846 6847 /*
6847 6848 * Now do suspend callbacks. In the case of an IOMMU mapping
6848 6849 * this will suspend all DMA activity to the page while it is
6849 6850 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6850 6851 * may be captured at this point we should have acquired any needed
6851 6852 * locks in the presuspend callback.
6852 6853 */
6853 6854 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6854 6855 if (ret != 0) {
6855 6856 repl = targ;
6856 6857 goto suspend_fail;
6857 6858 }
6858 6859
6859 6860 /*
6860 6861 * Raise the PIL yet again, this time to block all high-level
6861 6862 * interrupts on this CPU. This is necessary to prevent an
6862 6863 * interrupt routine from pinning the thread which holds the
6863 6864 * mapping suspended and then touching the suspended page.
6864 6865 *
6865 6866 * Once the page is suspended we also need to be careful to
6866 6867 * avoid calling any functions which touch any seg_kmem memory
6867 6868 * since that memory may be backed by the very page we are
6868 6869 * relocating in here!
6869 6870 */
6870 6871 hat_pagesuspend(targ);
6871 6872
6872 6873 /*
6873 6874 * Now that we are confident everybody has stopped using this page,
6874 6875 * copy the page contents. Note we use a physical copy to prevent
6875 6876 * locking issues and to avoid fpRAS because we can't handle it in
6876 6877 * this context.
6877 6878 */
6878 6879 for (i = 0; i < npages; i++, tpp++, rpp++) {
6879 6880 #ifdef VAC
6880 6881 /*
6881 6882 * If the replacement has a different vcolor than
6882 6883 * the one being replacd, we need to handle VAC
6883 6884 * consistency for it just as we were setting up
6884 6885 * a new mapping to it.
6885 6886 */
6886 6887 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6887 6888 (tpp->p_vcolor != rpp->p_vcolor) &&
6888 6889 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6889 6890 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
6890 6891 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6891 6892 rpp->p_pagenum);
6892 6893 }
6893 6894 #endif
6894 6895 /*
6895 6896 * Copy the contents of the page.
6896 6897 */
6897 6898 ppcopy_kernel(tpp, rpp);
6898 6899 }
6899 6900
6900 6901 tpp = targ;
6901 6902 rpp = repl;
6902 6903 for (i = 0; i < npages; i++, tpp++, rpp++) {
6903 6904 /*
6904 6905 * Copy attributes. VAC consistency was handled above,
6905 6906 * if required.
6906 6907 */
6907 6908 rpp->p_nrm = tpp->p_nrm;
6908 6909 tpp->p_nrm = 0;
6909 6910 rpp->p_index = tpp->p_index;
6910 6911 tpp->p_index = 0;
6911 6912 #ifdef VAC
6912 6913 rpp->p_vcolor = tpp->p_vcolor;
6913 6914 #endif
6914 6915 }
6915 6916
6916 6917 /*
6917 6918 * First, unsuspend the page, if we set the suspend bit, and transfer
6918 6919 * the mapping list from the target page to the replacement page.
6919 6920 * Next process postcallbacks; since pa_hment's are linked only to the
6920 6921 * p_mapping list of root page, we don't iterate over the constituent
6921 6922 * pages.
6922 6923 */
6923 6924 hat_pagereload(targ, repl);
6924 6925
6925 6926 suspend_fail:
6926 6927 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6927 6928
6928 6929 /*
6929 6930 * Now lower our PIL and release any captured CPUs since we
6930 6931 * are out of the "danger zone". After this it will again be
6931 6932 * safe to acquire adaptive mutex locks, or to drop them...
6932 6933 */
6933 6934 if (old_pil != -1) {
6934 6935 splx(old_pil);
6935 6936 } else {
6936 6937 xc_dismissed(cpuset);
6937 6938 }
6938 6939
6939 6940 kpreempt_enable();
6940 6941
6941 6942 sfmmu_mlist_reloc_exit(low, high);
6942 6943
6943 6944 /*
6944 6945 * Postsuspend callbacks should drop any locks held across
6945 6946 * the suspend callbacks. As before, we don't hold the mapping
6946 6947 * list lock at this point.. our assumption is that the mapping
6947 6948 * list still can't change due to our holding SE_EXCL lock and
6948 6949 * there being no unlocked mappings left. Hence the restriction
6949 6950 * on calling context to hat_delete_callback()
6950 6951 */
6951 6952 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6952 6953 if (ret != 0) {
6953 6954 /*
6954 6955 * The second presuspend call failed: we got here through
6955 6956 * the suspend_fail label above.
6956 6957 */
6957 6958 ASSERT(ret != EIO);
6958 6959 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6959 6960 kreloc_thread = NULL;
6960 6961 mutex_exit(&kpr_mutex);
6961 6962 return (EAGAIN);
6962 6963 }
6963 6964
6964 6965 /*
6965 6966 * Now that we're out of the performance critical section we can
6966 6967 * take care of updating the hash table, since we still
6967 6968 * hold all the pages locked SE_EXCL at this point we
6968 6969 * needn't worry about things changing out from under us.
6969 6970 */
6970 6971 tpp = targ;
6971 6972 rpp = repl;
6972 6973 for (i = 0; i < npages; i++, tpp++, rpp++) {
6973 6974
6974 6975 /*
6975 6976 * replace targ with replacement in page_hash table
6976 6977 */
6977 6978 targ = tpp;
6978 6979 page_relocate_hash(rpp, targ);
6979 6980
6980 6981 /*
6981 6982 * concatenate target; caller of platform_page_relocate()
6982 6983 * expects target to be concatenated after returning.
6983 6984 */
6984 6985 ASSERT(targ->p_next == targ);
6985 6986 ASSERT(targ->p_prev == targ);
6986 6987 page_list_concat(&pl, &targ);
6987 6988 }
6988 6989
6989 6990 ASSERT(*target == pl);
6990 6991 *nrelocp = npages;
6991 6992 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6992 6993 kreloc_thread = NULL;
6993 6994 mutex_exit(&kpr_mutex);
6994 6995 return (0);
6995 6996 }
6996 6997
6997 6998 /*
6998 6999 * Called when stray pa_hments are found attached to a page which is
6999 7000 * being freed. Notify the subsystem which attached the pa_hment of
7000 7001 * the error if it registered a suitable handler, else panic.
7001 7002 */
7002 7003 static void
7003 7004 sfmmu_pahment_leaked(struct pa_hment *pahmep)
7004 7005 {
7005 7006 id_t cb_id = pahmep->cb_id;
7006 7007
7007 7008 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7008 7009 if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7009 7010 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7010 7011 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7011 7012 return; /* non-fatal */
7012 7013 }
7013 7014 panic("pa_hment leaked: 0x%p", (void *)pahmep);
7014 7015 }
7015 7016
7016 7017 /*
7017 7018 * Remove all mappings to page 'pp'.
7018 7019 */
7019 7020 int
7020 7021 hat_pageunload(struct page *pp, uint_t forceflag)
7021 7022 {
7022 7023 struct page *origpp = pp;
7023 7024 struct sf_hment *sfhme, *tmphme;
7024 7025 struct hme_blk *hmeblkp;
7025 7026 kmutex_t *pml;
7026 7027 #ifdef VAC
7027 7028 kmutex_t *pmtx;
7028 7029 #endif
7029 7030 cpuset_t cpuset, tset;
7030 7031 int index, cons;
7031 7032 int pa_hments;
7032 7033
7033 7034 ASSERT(PAGE_EXCL(pp));
7034 7035
7035 7036 tmphme = NULL;
7036 7037 pa_hments = 0;
7037 7038 CPUSET_ZERO(cpuset);
7038 7039
7039 7040 pml = sfmmu_mlist_enter(pp);
7040 7041
7041 7042 #ifdef VAC
7042 7043 if (pp->p_kpmref)
7043 7044 sfmmu_kpm_pageunload(pp);
7044 7045 ASSERT(!PP_ISMAPPED_KPM(pp));
7045 7046 #endif
7046 7047 /*
7047 7048 * Clear vpm reference. Since the page is exclusively locked
7048 7049 * vpm cannot be referencing it.
7049 7050 */
7050 7051 if (vpm_enable) {
7051 7052 pp->p_vpmref = 0;
7052 7053 }
7053 7054
7054 7055 index = PP_MAPINDEX(pp);
7055 7056 cons = TTE8K;
7056 7057 retry:
7057 7058 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7058 7059 tmphme = sfhme->hme_next;
7059 7060
7060 7061 if (IS_PAHME(sfhme)) {
7061 7062 ASSERT(sfhme->hme_data != NULL);
7062 7063 pa_hments++;
7063 7064 continue;
7064 7065 }
7065 7066
7066 7067 hmeblkp = sfmmu_hmetohblk(sfhme);
7067 7068
7068 7069 /*
7069 7070 * If there are kernel mappings don't unload them, they will
7070 7071 * be suspended.
7071 7072 */
7072 7073 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7073 7074 hmeblkp->hblk_tag.htag_id == ksfmmup)
7074 7075 continue;
7075 7076
7076 7077 tset = sfmmu_pageunload(pp, sfhme, cons);
7077 7078 CPUSET_OR(cpuset, tset);
7078 7079 }
7079 7080
7080 7081 while (index != 0) {
7081 7082 index = index >> 1;
7082 7083 if (index != 0)
7083 7084 cons++;
7084 7085 if (index & 0x1) {
7085 7086 /* Go to leading page */
7086 7087 pp = PP_GROUPLEADER(pp, cons);
7087 7088 ASSERT(sfmmu_mlist_held(pp));
7088 7089 goto retry;
7089 7090 }
7090 7091 }
7091 7092
7092 7093 /*
7093 7094 * cpuset may be empty if the page was only mapped by segkpm,
7094 7095 * in which case we won't actually cross-trap.
7095 7096 */
7096 7097 xt_sync(cpuset);
7097 7098
7098 7099 /*
7099 7100 * The page should have no mappings at this point, unless
7100 7101 * we were called from hat_page_relocate() in which case we
7101 7102 * leave the locked mappings which will be suspended later.
7102 7103 */
7103 7104 ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7104 7105 (forceflag == SFMMU_KERNEL_RELOC));
7105 7106
7106 7107 #ifdef VAC
7107 7108 if (PP_ISTNC(pp)) {
7108 7109 if (cons == TTE8K) {
7109 7110 pmtx = sfmmu_page_enter(pp);
7110 7111 PP_CLRTNC(pp);
7111 7112 sfmmu_page_exit(pmtx);
7112 7113 } else {
7113 7114 conv_tnc(pp, cons);
7114 7115 }
7115 7116 }
7116 7117 #endif /* VAC */
7117 7118
7118 7119 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7119 7120 /*
7120 7121 * Unlink any pa_hments and free them, calling back
7121 7122 * the responsible subsystem to notify it of the error.
7122 7123 * This can occur in situations such as drivers leaking
7123 7124 * DMA handles: naughty, but common enough that we'd like
7124 7125 * to keep the system running rather than bringing it
7125 7126 * down with an obscure error like "pa_hment leaked"
7126 7127 * which doesn't aid the user in debugging their driver.
7127 7128 */
7128 7129 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7129 7130 tmphme = sfhme->hme_next;
7130 7131 if (IS_PAHME(sfhme)) {
7131 7132 struct pa_hment *pahmep = sfhme->hme_data;
7132 7133 sfmmu_pahment_leaked(pahmep);
7133 7134 HME_SUB(sfhme, pp);
7134 7135 kmem_cache_free(pa_hment_cache, pahmep);
7135 7136 }
7136 7137 }
7137 7138
7138 7139 ASSERT(!PP_ISMAPPED(origpp));
7139 7140 }
7140 7141
7141 7142 sfmmu_mlist_exit(pml);
7142 7143
7143 7144 return (0);
7144 7145 }
7145 7146
7146 7147 cpuset_t
7147 7148 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7148 7149 {
7149 7150 struct hme_blk *hmeblkp;
7150 7151 sfmmu_t *sfmmup;
7151 7152 tte_t tte, ttemod;
7152 7153 #ifdef DEBUG
7153 7154 tte_t orig_old;
7154 7155 #endif /* DEBUG */
7155 7156 caddr_t addr;
7156 7157 int ttesz;
7157 7158 int ret;
7158 7159 cpuset_t cpuset;
7159 7160
7160 7161 ASSERT(pp != NULL);
7161 7162 ASSERT(sfmmu_mlist_held(pp));
7162 7163 ASSERT(!PP_ISKAS(pp));
7163 7164
7164 7165 CPUSET_ZERO(cpuset);
7165 7166
7166 7167 hmeblkp = sfmmu_hmetohblk(sfhme);
7167 7168
7168 7169 readtte:
7169 7170 sfmmu_copytte(&sfhme->hme_tte, &tte);
7170 7171 if (TTE_IS_VALID(&tte)) {
7171 7172 sfmmup = hblktosfmmu(hmeblkp);
7172 7173 ttesz = get_hblk_ttesz(hmeblkp);
7173 7174 /*
7174 7175 * Only unload mappings of 'cons' size.
7175 7176 */
7176 7177 if (ttesz != cons)
7177 7178 return (cpuset);
7178 7179
7179 7180 /*
7180 7181 * Note that we have p_mapping lock, but no hash lock here.
7181 7182 * hblk_unload() has to have both hash lock AND p_mapping
7182 7183 * lock before it tries to modify tte. So, the tte could
7183 7184 * not become invalid in the sfmmu_modifytte_try() below.
7184 7185 */
7185 7186 ttemod = tte;
7186 7187 #ifdef DEBUG
7187 7188 orig_old = tte;
7188 7189 #endif /* DEBUG */
7189 7190
7190 7191 TTE_SET_INVALID(&ttemod);
7191 7192 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7192 7193 if (ret < 0) {
7193 7194 #ifdef DEBUG
7194 7195 /* only R/M bits can change. */
7195 7196 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7196 7197 #endif /* DEBUG */
7197 7198 goto readtte;
7198 7199 }
7199 7200
7200 7201 if (ret == 0) {
7201 7202 panic("pageunload: cas failed?");
7202 7203 }
7203 7204
7204 7205 addr = tte_to_vaddr(hmeblkp, tte);
7205 7206
7206 7207 if (hmeblkp->hblk_shared) {
7207 7208 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7208 7209 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7209 7210 sf_region_t *rgnp;
7210 7211 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7211 7212 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7212 7213 ASSERT(srdp != NULL);
7213 7214 rgnp = srdp->srd_hmergnp[rid];
7214 7215 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7215 7216 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7216 7217 sfmmu_ttesync(NULL, addr, &tte, pp);
7217 7218 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7218 7219 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7219 7220 } else {
7220 7221 sfmmu_ttesync(sfmmup, addr, &tte, pp);
7221 7222 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7222 7223
7223 7224 /*
7224 7225 * We need to flush the page from the virtual cache
7225 7226 * in order to prevent a virtual cache alias
7226 7227 * inconsistency. The particular scenario we need
7227 7228 * to worry about is:
7228 7229 * Given: va1 and va2 are two virtual address that
7229 7230 * alias and will map the same physical address.
7230 7231 * 1. mapping exists from va1 to pa and data has
7231 7232 * been read into the cache.
7232 7233 * 2. unload va1.
7233 7234 * 3. load va2 and modify data using va2.
7234 7235 * 4 unload va2.
7235 7236 * 5. load va1 and reference data. Unless we flush
7236 7237 * the data cache when we unload we will get
7237 7238 * stale data.
7238 7239 * This scenario is taken care of by using virtual
7239 7240 * page coloring.
7240 7241 */
7241 7242 if (sfmmup->sfmmu_ismhat) {
7242 7243 /*
7243 7244 * Flush TSBs, TLBs and caches
7244 7245 * of every process
7245 7246 * sharing this ism segment.
7246 7247 */
7247 7248 sfmmu_hat_lock_all();
7248 7249 mutex_enter(&ism_mlist_lock);
7249 7250 kpreempt_disable();
7250 7251 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7251 7252 pp->p_pagenum, CACHE_NO_FLUSH);
7252 7253 kpreempt_enable();
7253 7254 mutex_exit(&ism_mlist_lock);
7254 7255 sfmmu_hat_unlock_all();
7255 7256 cpuset = cpu_ready_set;
7256 7257 } else {
7257 7258 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7258 7259 cpuset = sfmmup->sfmmu_cpusran;
7259 7260 }
7260 7261 }
7261 7262
7262 7263 /*
7263 7264 * Hme_sub has to run after ttesync() and a_rss update.
7264 7265 * See hblk_unload().
7265 7266 */
7266 7267 HME_SUB(sfhme, pp);
7267 7268 membar_stst();
7268 7269
7269 7270 /*
7270 7271 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7271 7272 * since pteload may have done a HME_ADD() right after
7272 7273 * we did the HME_SUB() above. Hmecnt is now maintained
7273 7274 * by cas only. no lock guranteed its value. The only
7274 7275 * gurantee we have is the hmecnt should not be less than
7275 7276 * what it should be so the hblk will not be taken away.
7276 7277 * It's also important that we decremented the hmecnt after
7277 7278 * we are done with hmeblkp so that this hmeblk won't be
7278 7279 * stolen.
7279 7280 */
7280 7281 ASSERT(hmeblkp->hblk_hmecnt > 0);
7281 7282 ASSERT(hmeblkp->hblk_vcnt > 0);
7282 7283 atomic_dec_16(&hmeblkp->hblk_vcnt);
7283 7284 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7284 7285 /*
7285 7286 * This is bug 4063182.
7286 7287 * XXX: fixme
7287 7288 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7288 7289 * !hmeblkp->hblk_lckcnt);
7289 7290 */
7290 7291 } else {
7291 7292 panic("invalid tte? pp %p &tte %p",
7292 7293 (void *)pp, (void *)&tte);
7293 7294 }
7294 7295
7295 7296 return (cpuset);
7296 7297 }
7297 7298
7298 7299 /*
7299 7300 * While relocating a kernel page, this function will move the mappings
7300 7301 * from tpp to dpp and modify any associated data with these mappings.
7301 7302 * It also unsuspends the suspended kernel mapping.
7302 7303 */
7303 7304 static void
7304 7305 hat_pagereload(struct page *tpp, struct page *dpp)
7305 7306 {
7306 7307 struct sf_hment *sfhme;
7307 7308 tte_t tte, ttemod;
7308 7309 int index, cons;
7309 7310
7310 7311 ASSERT(getpil() == PIL_MAX);
7311 7312 ASSERT(sfmmu_mlist_held(tpp));
7312 7313 ASSERT(sfmmu_mlist_held(dpp));
7313 7314
7314 7315 index = PP_MAPINDEX(tpp);
7315 7316 cons = TTE8K;
7316 7317
7317 7318 /* Update real mappings to the page */
7318 7319 retry:
7319 7320 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7320 7321 if (IS_PAHME(sfhme))
7321 7322 continue;
7322 7323 sfmmu_copytte(&sfhme->hme_tte, &tte);
7323 7324 ttemod = tte;
7324 7325
7325 7326 /*
7326 7327 * replace old pfn with new pfn in TTE
7327 7328 */
7328 7329 PFN_TO_TTE(ttemod, dpp->p_pagenum);
7329 7330
7330 7331 /*
7331 7332 * clear suspend bit
7332 7333 */
7333 7334 ASSERT(TTE_IS_SUSPEND(&ttemod));
7334 7335 TTE_CLR_SUSPEND(&ttemod);
7335 7336
7336 7337 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7337 7338 panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7338 7339
7339 7340 /*
7340 7341 * set hme_page point to new page
7341 7342 */
7342 7343 sfhme->hme_page = dpp;
7343 7344 }
7344 7345
7345 7346 /*
7346 7347 * move p_mapping list from old page to new page
7347 7348 */
7348 7349 dpp->p_mapping = tpp->p_mapping;
7349 7350 tpp->p_mapping = NULL;
7350 7351 dpp->p_share = tpp->p_share;
7351 7352 tpp->p_share = 0;
7352 7353
7353 7354 while (index != 0) {
7354 7355 index = index >> 1;
7355 7356 if (index != 0)
7356 7357 cons++;
7357 7358 if (index & 0x1) {
7358 7359 tpp = PP_GROUPLEADER(tpp, cons);
7359 7360 dpp = PP_GROUPLEADER(dpp, cons);
7360 7361 goto retry;
7361 7362 }
7362 7363 }
7363 7364
7364 7365 curthread->t_flag &= ~T_DONTDTRACE;
7365 7366 mutex_exit(&kpr_suspendlock);
7366 7367 }
7367 7368
7368 7369 uint_t
7369 7370 hat_pagesync(struct page *pp, uint_t clearflag)
7370 7371 {
7371 7372 struct sf_hment *sfhme, *tmphme = NULL;
7372 7373 struct hme_blk *hmeblkp;
7373 7374 kmutex_t *pml;
7374 7375 cpuset_t cpuset, tset;
7375 7376 int index, cons;
7376 7377 extern ulong_t po_share;
7377 7378 page_t *save_pp = pp;
7378 7379 int stop_on_sh = 0;
7379 7380 uint_t shcnt;
7380 7381
7381 7382 CPUSET_ZERO(cpuset);
7382 7383
7383 7384 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7384 7385 return (PP_GENERIC_ATTR(pp));
7385 7386 }
7386 7387
7387 7388 if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7388 7389 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7389 7390 return (PP_GENERIC_ATTR(pp));
7390 7391 }
7391 7392 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7392 7393 return (PP_GENERIC_ATTR(pp));
7393 7394 }
7394 7395 if (clearflag & HAT_SYNC_STOPON_SHARED) {
7395 7396 if (pp->p_share > po_share) {
7396 7397 hat_page_setattr(pp, P_REF);
7397 7398 return (PP_GENERIC_ATTR(pp));
7398 7399 }
7399 7400 stop_on_sh = 1;
7400 7401 shcnt = 0;
7401 7402 }
7402 7403 }
7403 7404
7404 7405 clearflag &= ~HAT_SYNC_STOPON_SHARED;
7405 7406 pml = sfmmu_mlist_enter(pp);
7406 7407 index = PP_MAPINDEX(pp);
7407 7408 cons = TTE8K;
7408 7409 retry:
7409 7410 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7410 7411 /*
7411 7412 * We need to save the next hment on the list since
7412 7413 * it is possible for pagesync to remove an invalid hment
7413 7414 * from the list.
7414 7415 */
7415 7416 tmphme = sfhme->hme_next;
7416 7417 if (IS_PAHME(sfhme))
7417 7418 continue;
7418 7419 /*
7419 7420 * If we are looking for large mappings and this hme doesn't
7420 7421 * reach the range we are seeking, just ignore it.
7421 7422 */
7422 7423 hmeblkp = sfmmu_hmetohblk(sfhme);
7423 7424
7424 7425 if (hme_size(sfhme) < cons)
7425 7426 continue;
7426 7427
7427 7428 if (stop_on_sh) {
7428 7429 if (hmeblkp->hblk_shared) {
7429 7430 sf_srd_t *srdp = hblktosrd(hmeblkp);
7430 7431 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7431 7432 sf_region_t *rgnp;
7432 7433 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7433 7434 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7434 7435 ASSERT(srdp != NULL);
7435 7436 rgnp = srdp->srd_hmergnp[rid];
7436 7437 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7437 7438 rgnp, rid);
7438 7439 shcnt += rgnp->rgn_refcnt;
7439 7440 } else {
7440 7441 shcnt++;
7441 7442 }
7442 7443 if (shcnt > po_share) {
7443 7444 /*
7444 7445 * tell the pager to spare the page this time
7445 7446 * around.
7446 7447 */
7447 7448 hat_page_setattr(save_pp, P_REF);
7448 7449 index = 0;
7449 7450 break;
7450 7451 }
7451 7452 }
7452 7453 tset = sfmmu_pagesync(pp, sfhme,
7453 7454 clearflag & ~HAT_SYNC_STOPON_RM);
7454 7455 CPUSET_OR(cpuset, tset);
7455 7456
7456 7457 /*
7457 7458 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7458 7459 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7459 7460 */
7460 7461 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7461 7462 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7462 7463 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7463 7464 index = 0;
7464 7465 break;
7465 7466 }
7466 7467 }
7467 7468
7468 7469 while (index) {
7469 7470 index = index >> 1;
7470 7471 cons++;
7471 7472 if (index & 0x1) {
7472 7473 /* Go to leading page */
7473 7474 pp = PP_GROUPLEADER(pp, cons);
7474 7475 goto retry;
7475 7476 }
7476 7477 }
7477 7478
7478 7479 xt_sync(cpuset);
7479 7480 sfmmu_mlist_exit(pml);
7480 7481 return (PP_GENERIC_ATTR(save_pp));
7481 7482 }
7482 7483
7483 7484 /*
7484 7485 * Get all the hardware dependent attributes for a page struct
7485 7486 */
7486 7487 static cpuset_t
7487 7488 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7488 7489 uint_t clearflag)
7489 7490 {
7490 7491 caddr_t addr;
7491 7492 tte_t tte, ttemod;
7492 7493 struct hme_blk *hmeblkp;
7493 7494 int ret;
7494 7495 sfmmu_t *sfmmup;
7495 7496 cpuset_t cpuset;
7496 7497
7497 7498 ASSERT(pp != NULL);
7498 7499 ASSERT(sfmmu_mlist_held(pp));
7499 7500 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7500 7501 (clearflag == HAT_SYNC_ZERORM));
7501 7502
7502 7503 SFMMU_STAT(sf_pagesync);
7503 7504
7504 7505 CPUSET_ZERO(cpuset);
7505 7506
7506 7507 sfmmu_pagesync_retry:
7507 7508
7508 7509 sfmmu_copytte(&sfhme->hme_tte, &tte);
7509 7510 if (TTE_IS_VALID(&tte)) {
7510 7511 hmeblkp = sfmmu_hmetohblk(sfhme);
7511 7512 sfmmup = hblktosfmmu(hmeblkp);
7512 7513 addr = tte_to_vaddr(hmeblkp, tte);
7513 7514 if (clearflag == HAT_SYNC_ZERORM) {
7514 7515 ttemod = tte;
7515 7516 TTE_CLR_RM(&ttemod);
7516 7517 ret = sfmmu_modifytte_try(&tte, &ttemod,
7517 7518 &sfhme->hme_tte);
7518 7519 if (ret < 0) {
7519 7520 /*
7520 7521 * cas failed and the new value is not what
7521 7522 * we want.
7522 7523 */
7523 7524 goto sfmmu_pagesync_retry;
7524 7525 }
7525 7526
7526 7527 if (ret > 0) {
7527 7528 /* we win the cas */
7528 7529 if (hmeblkp->hblk_shared) {
7529 7530 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7530 7531 uint_t rid =
7531 7532 hmeblkp->hblk_tag.htag_rid;
7532 7533 sf_region_t *rgnp;
7533 7534 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7534 7535 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7535 7536 ASSERT(srdp != NULL);
7536 7537 rgnp = srdp->srd_hmergnp[rid];
7537 7538 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7538 7539 srdp, rgnp, rid);
7539 7540 cpuset = sfmmu_rgntlb_demap(addr,
7540 7541 rgnp, hmeblkp, 1);
7541 7542 } else {
7542 7543 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7543 7544 0, 0);
7544 7545 cpuset = sfmmup->sfmmu_cpusran;
7545 7546 }
7546 7547 }
7547 7548 }
7548 7549 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7549 7550 &tte, pp);
7550 7551 }
7551 7552 return (cpuset);
7552 7553 }
7553 7554
7554 7555 /*
7555 7556 * Remove write permission from a mappings to a page, so that
7556 7557 * we can detect the next modification of it. This requires modifying
7557 7558 * the TTE then invalidating (demap) any TLB entry using that TTE.
7558 7559 * This code is similar to sfmmu_pagesync().
7559 7560 */
7560 7561 static cpuset_t
7561 7562 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7562 7563 {
7563 7564 caddr_t addr;
7564 7565 tte_t tte;
7565 7566 tte_t ttemod;
7566 7567 struct hme_blk *hmeblkp;
7567 7568 int ret;
7568 7569 sfmmu_t *sfmmup;
7569 7570 cpuset_t cpuset;
7570 7571
7571 7572 ASSERT(pp != NULL);
7572 7573 ASSERT(sfmmu_mlist_held(pp));
7573 7574
7574 7575 CPUSET_ZERO(cpuset);
7575 7576 SFMMU_STAT(sf_clrwrt);
7576 7577
7577 7578 retry:
7578 7579
7579 7580 sfmmu_copytte(&sfhme->hme_tte, &tte);
7580 7581 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7581 7582 hmeblkp = sfmmu_hmetohblk(sfhme);
7582 7583 sfmmup = hblktosfmmu(hmeblkp);
7583 7584 addr = tte_to_vaddr(hmeblkp, tte);
7584 7585
7585 7586 ttemod = tte;
7586 7587 TTE_CLR_WRT(&ttemod);
7587 7588 TTE_CLR_MOD(&ttemod);
7588 7589 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7589 7590
7590 7591 /*
7591 7592 * if cas failed and the new value is not what
7592 7593 * we want retry
7593 7594 */
7594 7595 if (ret < 0)
7595 7596 goto retry;
7596 7597
7597 7598 /* we win the cas */
7598 7599 if (ret > 0) {
7599 7600 if (hmeblkp->hblk_shared) {
7600 7601 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7601 7602 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7602 7603 sf_region_t *rgnp;
7603 7604 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7604 7605 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7605 7606 ASSERT(srdp != NULL);
7606 7607 rgnp = srdp->srd_hmergnp[rid];
7607 7608 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7608 7609 srdp, rgnp, rid);
7609 7610 cpuset = sfmmu_rgntlb_demap(addr,
7610 7611 rgnp, hmeblkp, 1);
7611 7612 } else {
7612 7613 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7613 7614 cpuset = sfmmup->sfmmu_cpusran;
7614 7615 }
7615 7616 }
7616 7617 }
7617 7618
7618 7619 return (cpuset);
7619 7620 }
7620 7621
7621 7622 /*
7622 7623 * Walk all mappings of a page, removing write permission and clearing the
7623 7624 * ref/mod bits. This code is similar to hat_pagesync()
7624 7625 */
7625 7626 static void
7626 7627 hat_page_clrwrt(page_t *pp)
7627 7628 {
7628 7629 struct sf_hment *sfhme;
7629 7630 struct sf_hment *tmphme = NULL;
7630 7631 kmutex_t *pml;
7631 7632 cpuset_t cpuset;
7632 7633 cpuset_t tset;
7633 7634 int index;
7634 7635 int cons;
7635 7636
7636 7637 CPUSET_ZERO(cpuset);
7637 7638
7638 7639 pml = sfmmu_mlist_enter(pp);
7639 7640 index = PP_MAPINDEX(pp);
7640 7641 cons = TTE8K;
7641 7642 retry:
7642 7643 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7643 7644 tmphme = sfhme->hme_next;
7644 7645
7645 7646 /*
7646 7647 * If we are looking for large mappings and this hme doesn't
7647 7648 * reach the range we are seeking, just ignore its.
7648 7649 */
7649 7650
7650 7651 if (hme_size(sfhme) < cons)
7651 7652 continue;
7652 7653
7653 7654 tset = sfmmu_pageclrwrt(pp, sfhme);
7654 7655 CPUSET_OR(cpuset, tset);
7655 7656 }
7656 7657
7657 7658 while (index) {
7658 7659 index = index >> 1;
7659 7660 cons++;
7660 7661 if (index & 0x1) {
7661 7662 /* Go to leading page */
7662 7663 pp = PP_GROUPLEADER(pp, cons);
7663 7664 goto retry;
7664 7665 }
7665 7666 }
7666 7667
7667 7668 xt_sync(cpuset);
7668 7669 sfmmu_mlist_exit(pml);
7669 7670 }
7670 7671
7671 7672 /*
7672 7673 * Set the given REF/MOD/RO bits for the given page.
7673 7674 * For a vnode with a sorted v_pages list, we need to change
7674 7675 * the attributes and the v_pages list together under page_vnode_mutex.
7675 7676 */
7676 7677 void
7677 7678 hat_page_setattr(page_t *pp, uint_t flag)
7678 7679 {
7679 7680 vnode_t *vp = pp->p_vnode;
7680 7681 page_t **listp;
7681 7682 kmutex_t *pmtx;
7682 7683 kmutex_t *vphm = NULL;
7683 7684 int noshuffle;
7684 7685
7685 7686 noshuffle = flag & P_NSH;
7686 7687 flag &= ~P_NSH;
7687 7688
7688 7689 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7689 7690
7690 7691 /*
7691 7692 * nothing to do if attribute already set
7692 7693 */
7693 7694 if ((pp->p_nrm & flag) == flag)
7694 7695 return;
7695 7696
7696 7697 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7697 7698 !noshuffle) {
7698 7699 vphm = page_vnode_mutex(vp);
7699 7700 mutex_enter(vphm);
7700 7701 }
7701 7702
7702 7703 pmtx = sfmmu_page_enter(pp);
7703 7704 pp->p_nrm |= flag;
7704 7705 sfmmu_page_exit(pmtx);
7705 7706
7706 7707 if (vphm != NULL) {
7707 7708 /*
7708 7709 * Some File Systems examine v_pages for NULL w/o
7709 7710 * grabbing the vphm mutex. Must not let it become NULL when
7710 7711 * pp is the only page on the list.
7711 7712 */
7712 7713 if (pp->p_vpnext != pp) {
7713 7714 page_vpsub(&vp->v_pages, pp);
7714 7715 if (vp->v_pages != NULL)
7715 7716 listp = &vp->v_pages->p_vpprev->p_vpnext;
7716 7717 else
7717 7718 listp = &vp->v_pages;
7718 7719 page_vpadd(listp, pp);
7719 7720 }
7720 7721 mutex_exit(vphm);
7721 7722 }
7722 7723 }
7723 7724
7724 7725 void
7725 7726 hat_page_clrattr(page_t *pp, uint_t flag)
7726 7727 {
7727 7728 vnode_t *vp = pp->p_vnode;
7728 7729 kmutex_t *pmtx;
7729 7730
7730 7731 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7731 7732
7732 7733 pmtx = sfmmu_page_enter(pp);
7733 7734
7734 7735 /*
7735 7736 * Caller is expected to hold page's io lock for VMODSORT to work
7736 7737 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7737 7738 * bit is cleared.
7738 7739 * We don't have assert to avoid tripping some existing third party
7739 7740 * code. The dirty page is moved back to top of the v_page list
7740 7741 * after IO is done in pvn_write_done().
7741 7742 */
7742 7743 pp->p_nrm &= ~flag;
7743 7744 sfmmu_page_exit(pmtx);
7744 7745
7745 7746 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7746 7747
7747 7748 /*
7748 7749 * VMODSORT works by removing write permissions and getting
7749 7750 * a fault when a page is made dirty. At this point
7750 7751 * we need to remove write permission from all mappings
7751 7752 * to this page.
7752 7753 */
7753 7754 hat_page_clrwrt(pp);
7754 7755 }
7755 7756 }
7756 7757
7757 7758 uint_t
7758 7759 hat_page_getattr(page_t *pp, uint_t flag)
7759 7760 {
7760 7761 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7761 7762 return ((uint_t)(pp->p_nrm & flag));
7762 7763 }
7763 7764
7764 7765 /*
7765 7766 * DEBUG kernels: verify that a kernel va<->pa translation
7766 7767 * is safe by checking the underlying page_t is in a page
7767 7768 * relocation-safe state.
7768 7769 */
7769 7770 #ifdef DEBUG
7770 7771 void
7771 7772 sfmmu_check_kpfn(pfn_t pfn)
7772 7773 {
7773 7774 page_t *pp;
7774 7775 int index, cons;
7775 7776
7776 7777 if (hat_check_vtop == 0)
7777 7778 return;
7778 7779
7779 7780 if (kvseg.s_base == NULL || panicstr)
7780 7781 return;
7781 7782
7782 7783 pp = page_numtopp_nolock(pfn);
7783 7784 if (!pp)
7784 7785 return;
7785 7786
7786 7787 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7787 7788 return;
7788 7789
7789 7790 /*
7790 7791 * Handed a large kernel page, we dig up the root page since we
7791 7792 * know the root page might have the lock also.
7792 7793 */
7793 7794 if (pp->p_szc != 0) {
7794 7795 index = PP_MAPINDEX(pp);
7795 7796 cons = TTE8K;
7796 7797 again:
7797 7798 while (index != 0) {
7798 7799 index >>= 1;
7799 7800 if (index != 0)
7800 7801 cons++;
7801 7802 if (index & 0x1) {
7802 7803 pp = PP_GROUPLEADER(pp, cons);
7803 7804 goto again;
7804 7805 }
7805 7806 }
7806 7807 }
7807 7808
7808 7809 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7809 7810 return;
7810 7811
7811 7812 /*
7812 7813 * Pages need to be locked or allocated "permanent" (either from
7813 7814 * static_arena arena or explicitly setting PG_NORELOC when calling
7814 7815 * page_create_va()) for VA->PA translations to be valid.
7815 7816 */
7816 7817 if (!PP_ISNORELOC(pp))
7817 7818 panic("Illegal VA->PA translation, pp 0x%p not permanent",
7818 7819 (void *)pp);
7819 7820 else
7820 7821 panic("Illegal VA->PA translation, pp 0x%p not locked",
7821 7822 (void *)pp);
7822 7823 }
7823 7824 #endif /* DEBUG */
7824 7825
7825 7826 /*
7826 7827 * Returns a page frame number for a given virtual address.
7827 7828 * Returns PFN_INVALID to indicate an invalid mapping
7828 7829 */
7829 7830 pfn_t
7830 7831 hat_getpfnum(struct hat *hat, caddr_t addr)
7831 7832 {
7832 7833 pfn_t pfn;
7833 7834 tte_t tte;
7834 7835
7835 7836 /*
7836 7837 * We would like to
7837 7838 * ASSERT(AS_LOCK_HELD(as));
7838 7839 * but we can't because the iommu driver will call this
7839 7840 * routine at interrupt time and it can't grab the as lock
7840 7841 * or it will deadlock: A thread could have the as lock
7841 7842 * and be waiting for io. The io can't complete
7842 7843 * because the interrupt thread is blocked trying to grab
7843 7844 * the as lock.
7844 7845 */
7845 7846
7846 7847 if (hat == ksfmmup) {
7847 7848 if (IS_KMEM_VA_LARGEPAGE(addr)) {
7848 7849 ASSERT(segkmem_lpszc > 0);
7849 7850 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7850 7851 if (pfn != PFN_INVALID) {
7851 7852 sfmmu_check_kpfn(pfn);
7852 7853 return (pfn);
7853 7854 }
7854 7855 } else if (segkpm && IS_KPM_ADDR(addr)) {
7855 7856 return (sfmmu_kpm_vatopfn(addr));
7856 7857 }
7857 7858 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7858 7859 == PFN_SUSPENDED) {
7859 7860 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7860 7861 }
7861 7862 sfmmu_check_kpfn(pfn);
7862 7863 return (pfn);
7863 7864 } else {
7864 7865 return (sfmmu_uvatopfn(addr, hat, NULL));
7865 7866 }
7866 7867 }
7867 7868
7868 7869 /*
7869 7870 * This routine will return both pfn and tte for the vaddr.
7870 7871 */
7871 7872 static pfn_t
7872 7873 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
7873 7874 {
7874 7875 struct hmehash_bucket *hmebp;
7875 7876 hmeblk_tag hblktag;
7876 7877 int hmeshift, hashno = 1;
7877 7878 struct hme_blk *hmeblkp = NULL;
7878 7879 tte_t tte;
7879 7880
7880 7881 struct sf_hment *sfhmep;
7881 7882 pfn_t pfn;
7882 7883
7883 7884 /* support for ISM */
7884 7885 ism_map_t *ism_map;
7885 7886 ism_blk_t *ism_blkp;
7886 7887 int i;
7887 7888 sfmmu_t *ism_hatid = NULL;
7888 7889 sfmmu_t *locked_hatid = NULL;
7889 7890 sfmmu_t *sv_sfmmup = sfmmup;
7890 7891 caddr_t sv_vaddr = vaddr;
7891 7892 sf_srd_t *srdp;
7892 7893
7893 7894 if (ttep == NULL) {
7894 7895 ttep = &tte;
7895 7896 } else {
7896 7897 ttep->ll = 0;
7897 7898 }
7898 7899
7899 7900 ASSERT(sfmmup != ksfmmup);
7900 7901 SFMMU_STAT(sf_user_vtop);
7901 7902 /*
7902 7903 * Set ism_hatid if vaddr falls in a ISM segment.
7903 7904 */
7904 7905 ism_blkp = sfmmup->sfmmu_iblk;
7905 7906 if (ism_blkp != NULL) {
7906 7907 sfmmu_ismhat_enter(sfmmup, 0);
7907 7908 locked_hatid = sfmmup;
7908 7909 }
7909 7910 while (ism_blkp != NULL && ism_hatid == NULL) {
7910 7911 ism_map = ism_blkp->iblk_maps;
7911 7912 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7912 7913 if (vaddr >= ism_start(ism_map[i]) &&
7913 7914 vaddr < ism_end(ism_map[i])) {
7914 7915 sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7915 7916 vaddr = (caddr_t)(vaddr -
7916 7917 ism_start(ism_map[i]));
7917 7918 break;
7918 7919 }
7919 7920 }
7920 7921 ism_blkp = ism_blkp->iblk_next;
7921 7922 }
7922 7923 if (locked_hatid) {
7923 7924 sfmmu_ismhat_exit(locked_hatid, 0);
7924 7925 }
7925 7926
7926 7927 hblktag.htag_id = sfmmup;
7927 7928 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
7928 7929 do {
7929 7930 hmeshift = HME_HASH_SHIFT(hashno);
7930 7931 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7931 7932 hblktag.htag_rehash = hashno;
7932 7933 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7933 7934
7934 7935 SFMMU_HASH_LOCK(hmebp);
7935 7936
7936 7937 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7937 7938 if (hmeblkp != NULL) {
7938 7939 ASSERT(!hmeblkp->hblk_shared);
7939 7940 HBLKTOHME(sfhmep, hmeblkp, vaddr);
7940 7941 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7941 7942 SFMMU_HASH_UNLOCK(hmebp);
7942 7943 if (TTE_IS_VALID(ttep)) {
7943 7944 pfn = TTE_TO_PFN(vaddr, ttep);
7944 7945 return (pfn);
7945 7946 }
7946 7947 break;
7947 7948 }
7948 7949 SFMMU_HASH_UNLOCK(hmebp);
7949 7950 hashno++;
7950 7951 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7951 7952
7952 7953 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
7953 7954 return (PFN_INVALID);
7954 7955 }
7955 7956 srdp = sv_sfmmup->sfmmu_srdp;
7956 7957 ASSERT(srdp != NULL);
7957 7958 ASSERT(srdp->srd_refcnt != 0);
7958 7959 hblktag.htag_id = srdp;
7959 7960 hashno = 1;
7960 7961 do {
7961 7962 hmeshift = HME_HASH_SHIFT(hashno);
7962 7963 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
7963 7964 hblktag.htag_rehash = hashno;
7964 7965 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
7965 7966
7966 7967 SFMMU_HASH_LOCK(hmebp);
7967 7968 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7968 7969 hmeblkp = hmeblkp->hblk_next) {
7969 7970 uint_t rid;
7970 7971 sf_region_t *rgnp;
7971 7972 caddr_t rsaddr;
7972 7973 caddr_t readdr;
7973 7974
7974 7975 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7975 7976 sv_sfmmup->sfmmu_hmeregion_map)) {
7976 7977 continue;
7977 7978 }
7978 7979 ASSERT(hmeblkp->hblk_shared);
7979 7980 rid = hmeblkp->hblk_tag.htag_rid;
7980 7981 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7981 7982 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7982 7983 rgnp = srdp->srd_hmergnp[rid];
7983 7984 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7984 7985 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7985 7986 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7986 7987 rsaddr = rgnp->rgn_saddr;
7987 7988 readdr = rsaddr + rgnp->rgn_size;
7988 7989 #ifdef DEBUG
7989 7990 if (TTE_IS_VALID(ttep) ||
7990 7991 get_hblk_ttesz(hmeblkp) > TTE8K) {
7991 7992 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
7992 7993 ASSERT(eva > sv_vaddr);
7993 7994 ASSERT(sv_vaddr >= rsaddr);
7994 7995 ASSERT(sv_vaddr < readdr);
7995 7996 ASSERT(eva <= readdr);
7996 7997 }
7997 7998 #endif /* DEBUG */
7998 7999 /*
7999 8000 * Continue the search if we
8000 8001 * found an invalid 8K tte outside of the area
8001 8002 * covered by this hmeblk's region.
8002 8003 */
8003 8004 if (TTE_IS_VALID(ttep)) {
8004 8005 SFMMU_HASH_UNLOCK(hmebp);
8005 8006 pfn = TTE_TO_PFN(sv_vaddr, ttep);
8006 8007 return (pfn);
8007 8008 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8008 8009 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8009 8010 SFMMU_HASH_UNLOCK(hmebp);
8010 8011 pfn = PFN_INVALID;
8011 8012 return (pfn);
8012 8013 }
8013 8014 }
8014 8015 SFMMU_HASH_UNLOCK(hmebp);
8015 8016 hashno++;
8016 8017 } while (hashno <= mmu_hashcnt);
8017 8018 return (PFN_INVALID);
8018 8019 }
8019 8020
8020 8021
8021 8022 /*
8022 8023 * For compatability with AT&T and later optimizations
8023 8024 */
8024 8025 /* ARGSUSED */
8025 8026 void
8026 8027 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8027 8028 {
8028 8029 ASSERT(hat != NULL);
8029 8030 }
8030 8031
8031 8032 /*
8032 8033 * Return the number of mappings to a particular page. This number is an
8033 8034 * approximation of the number of people sharing the page.
8034 8035 *
8035 8036 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8036 8037 * hat_page_checkshare() can be used to compare threshold to share
8037 8038 * count that reflects the number of region sharers albeit at higher cost.
8038 8039 */
8039 8040 ulong_t
8040 8041 hat_page_getshare(page_t *pp)
8041 8042 {
8042 8043 page_t *spp = pp; /* start page */
8043 8044 kmutex_t *pml;
8044 8045 ulong_t cnt;
8045 8046 int index, sz = TTE64K;
8046 8047
8047 8048 /*
8048 8049 * We need to grab the mlist lock to make sure any outstanding
8049 8050 * load/unloads complete. Otherwise we could return zero
8050 8051 * even though the unload(s) hasn't finished yet.
8051 8052 */
8052 8053 pml = sfmmu_mlist_enter(spp);
8053 8054 cnt = spp->p_share;
8054 8055
8055 8056 #ifdef VAC
8056 8057 if (kpm_enable)
8057 8058 cnt += spp->p_kpmref;
8058 8059 #endif
8059 8060 if (vpm_enable && pp->p_vpmref) {
8060 8061 cnt += 1;
8061 8062 }
8062 8063
8063 8064 /*
8064 8065 * If we have any large mappings, we count the number of
8065 8066 * mappings that this large page is part of.
8066 8067 */
8067 8068 index = PP_MAPINDEX(spp);
8068 8069 index >>= 1;
8069 8070 while (index) {
8070 8071 pp = PP_GROUPLEADER(spp, sz);
8071 8072 if ((index & 0x1) && pp != spp) {
8072 8073 cnt += pp->p_share;
8073 8074 spp = pp;
8074 8075 }
8075 8076 index >>= 1;
8076 8077 sz++;
8077 8078 }
8078 8079 sfmmu_mlist_exit(pml);
8079 8080 return (cnt);
8080 8081 }
8081 8082
8082 8083 /*
8083 8084 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8084 8085 * otherwise. Count shared hmeblks by region's refcnt.
8085 8086 */
8086 8087 int
8087 8088 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8088 8089 {
8089 8090 kmutex_t *pml;
8090 8091 ulong_t cnt = 0;
8091 8092 int index, sz = TTE8K;
8092 8093 struct sf_hment *sfhme, *tmphme = NULL;
8093 8094 struct hme_blk *hmeblkp;
8094 8095
8095 8096 pml = sfmmu_mlist_enter(pp);
8096 8097
8097 8098 #ifdef VAC
8098 8099 if (kpm_enable)
8099 8100 cnt = pp->p_kpmref;
8100 8101 #endif
8101 8102
8102 8103 if (vpm_enable && pp->p_vpmref) {
8103 8104 cnt += 1;
8104 8105 }
8105 8106
8106 8107 if (pp->p_share + cnt > sh_thresh) {
8107 8108 sfmmu_mlist_exit(pml);
8108 8109 return (1);
8109 8110 }
8110 8111
8111 8112 index = PP_MAPINDEX(pp);
8112 8113
8113 8114 again:
8114 8115 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8115 8116 tmphme = sfhme->hme_next;
8116 8117 if (IS_PAHME(sfhme)) {
8117 8118 continue;
8118 8119 }
8119 8120
8120 8121 hmeblkp = sfmmu_hmetohblk(sfhme);
8121 8122 if (hme_size(sfhme) != sz) {
8122 8123 continue;
8123 8124 }
8124 8125
8125 8126 if (hmeblkp->hblk_shared) {
8126 8127 sf_srd_t *srdp = hblktosrd(hmeblkp);
8127 8128 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8128 8129 sf_region_t *rgnp;
8129 8130 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8130 8131 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8131 8132 ASSERT(srdp != NULL);
8132 8133 rgnp = srdp->srd_hmergnp[rid];
8133 8134 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8134 8135 rgnp, rid);
8135 8136 cnt += rgnp->rgn_refcnt;
8136 8137 } else {
8137 8138 cnt++;
8138 8139 }
8139 8140 if (cnt > sh_thresh) {
8140 8141 sfmmu_mlist_exit(pml);
8141 8142 return (1);
8142 8143 }
8143 8144 }
8144 8145
8145 8146 index >>= 1;
8146 8147 sz++;
8147 8148 while (index) {
8148 8149 pp = PP_GROUPLEADER(pp, sz);
8149 8150 ASSERT(sfmmu_mlist_held(pp));
8150 8151 if (index & 0x1) {
8151 8152 goto again;
8152 8153 }
8153 8154 index >>= 1;
8154 8155 sz++;
8155 8156 }
8156 8157 sfmmu_mlist_exit(pml);
8157 8158 return (0);
8158 8159 }
8159 8160
8160 8161 /*
8161 8162 * Unload all large mappings to the pp and reset the p_szc field of every
8162 8163 * constituent page according to the remaining mappings.
8163 8164 *
8164 8165 * pp must be locked SE_EXCL. Even though no other constituent pages are
8165 8166 * locked it's legal to unload the large mappings to the pp because all
8166 8167 * constituent pages of large locked mappings have to be locked SE_SHARED.
8167 8168 * This means if we have SE_EXCL lock on one of constituent pages none of the
8168 8169 * large mappings to pp are locked.
8169 8170 *
8170 8171 * Decrease p_szc field starting from the last constituent page and ending
8171 8172 * with the root page. This method is used because other threads rely on the
8172 8173 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8173 8174 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8174 8175 * ensures that p_szc changes of the constituent pages appears atomic for all
8175 8176 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8176 8177 *
8177 8178 * This mechanism is only used for file system pages where it's not always
8178 8179 * possible to get SE_EXCL locks on all constituent pages to demote the size
8179 8180 * code (as is done for anonymous or kernel large pages).
8180 8181 *
8181 8182 * See more comments in front of sfmmu_mlspl_enter().
8182 8183 */
↓ open down ↓ |
8147 lines elided |
↑ open up ↑ |
8183 8184 void
8184 8185 hat_page_demote(page_t *pp)
8185 8186 {
8186 8187 int index;
8187 8188 int sz;
8188 8189 cpuset_t cpuset;
8189 8190 int sync = 0;
8190 8191 page_t *rootpp;
8191 8192 struct sf_hment *sfhme;
8192 8193 struct sf_hment *tmphme = NULL;
8193 - struct hme_blk *hmeblkp;
8194 8194 uint_t pszc;
8195 8195 page_t *lastpp;
8196 8196 cpuset_t tset;
8197 8197 pgcnt_t npgs;
8198 8198 kmutex_t *pml;
8199 8199 kmutex_t *pmtx = NULL;
8200 8200
8201 8201 ASSERT(PAGE_EXCL(pp));
8202 8202 ASSERT(!PP_ISFREE(pp));
8203 8203 ASSERT(!PP_ISKAS(pp));
8204 8204 ASSERT(page_szc_lock_assert(pp));
8205 8205 pml = sfmmu_mlist_enter(pp);
8206 8206
8207 8207 pszc = pp->p_szc;
8208 8208 if (pszc == 0) {
8209 8209 goto out;
8210 8210 }
8211 8211
8212 8212 index = PP_MAPINDEX(pp) >> 1;
8213 8213
8214 8214 if (index) {
8215 8215 CPUSET_ZERO(cpuset);
8216 8216 sz = TTE64K;
8217 8217 sync = 1;
8218 8218 }
8219 8219
8220 8220 while (index) {
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
8221 8221 if (!(index & 0x1)) {
8222 8222 index >>= 1;
8223 8223 sz++;
8224 8224 continue;
8225 8225 }
8226 8226 ASSERT(sz <= pszc);
8227 8227 rootpp = PP_GROUPLEADER(pp, sz);
8228 8228 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8229 8229 tmphme = sfhme->hme_next;
8230 8230 ASSERT(!IS_PAHME(sfhme));
8231 - hmeblkp = sfmmu_hmetohblk(sfhme);
8232 8231 if (hme_size(sfhme) != sz) {
8233 8232 continue;
8234 8233 }
8235 8234 tset = sfmmu_pageunload(rootpp, sfhme, sz);
8236 8235 CPUSET_OR(cpuset, tset);
8237 8236 }
8238 8237 if (index >>= 1) {
8239 8238 sz++;
8240 8239 }
8241 8240 }
8242 8241
8243 8242 ASSERT(!PP_ISMAPPED_LARGE(pp));
8244 8243
8245 8244 if (sync) {
8246 8245 xt_sync(cpuset);
8247 8246 #ifdef VAC
8248 8247 if (PP_ISTNC(pp)) {
8249 8248 conv_tnc(rootpp, sz);
8250 8249 }
8251 8250 #endif /* VAC */
8252 8251 }
8253 8252
8254 8253 pmtx = sfmmu_page_enter(pp);
8255 8254
8256 8255 ASSERT(pp->p_szc == pszc);
8257 8256 rootpp = PP_PAGEROOT(pp);
8258 8257 ASSERT(rootpp->p_szc == pszc);
8259 8258 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8260 8259
8261 8260 while (lastpp != rootpp) {
8262 8261 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8263 8262 ASSERT(sz < pszc);
8264 8263 npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8265 8264 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8266 8265 while (--npgs > 0) {
8267 8266 lastpp->p_szc = (uchar_t)sz;
8268 8267 lastpp = PP_PAGEPREV(lastpp);
8269 8268 }
8270 8269 if (sz) {
8271 8270 /*
8272 8271 * make sure before current root's pszc
8273 8272 * is updated all updates to constituent pages pszc
8274 8273 * fields are globally visible.
8275 8274 */
8276 8275 membar_producer();
8277 8276 }
8278 8277 lastpp->p_szc = sz;
8279 8278 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8280 8279 if (lastpp != rootpp) {
8281 8280 lastpp = PP_PAGEPREV(lastpp);
8282 8281 }
8283 8282 }
8284 8283 if (sz == 0) {
8285 8284 /* the loop above doesn't cover this case */
8286 8285 rootpp->p_szc = 0;
8287 8286 }
8288 8287 out:
8289 8288 ASSERT(pp->p_szc == 0);
8290 8289 if (pmtx != NULL) {
8291 8290 sfmmu_page_exit(pmtx);
8292 8291 }
8293 8292 sfmmu_mlist_exit(pml);
8294 8293 }
8295 8294
8296 8295 /*
8297 8296 * Refresh the HAT ismttecnt[] element for size szc.
8298 8297 * Caller must have set ISM busy flag to prevent mapping
8299 8298 * lists from changing while we're traversing them.
8300 8299 */
8301 8300 pgcnt_t
8302 8301 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8303 8302 {
8304 8303 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk;
8305 8304 ism_map_t *ism_map;
8306 8305 pgcnt_t npgs = 0;
8307 8306 pgcnt_t npgs_scd = 0;
8308 8307 int j;
8309 8308 sf_scd_t *scdp;
8310 8309 uchar_t rid;
8311 8310
8312 8311 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8313 8312 scdp = sfmmup->sfmmu_scdp;
8314 8313
8315 8314 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8316 8315 ism_map = ism_blkp->iblk_maps;
8317 8316 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8318 8317 rid = ism_map[j].imap_rid;
8319 8318 ASSERT(rid == SFMMU_INVALID_ISMRID ||
8320 8319 rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8321 8320
8322 8321 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8323 8322 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8324 8323 /* ISM is in sfmmup's SCD */
8325 8324 npgs_scd +=
8326 8325 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8327 8326 } else {
8328 8327 /* ISMs is not in SCD */
8329 8328 npgs +=
8330 8329 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8331 8330 }
8332 8331 }
8333 8332 }
8334 8333 sfmmup->sfmmu_ismttecnt[szc] = npgs;
8335 8334 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8336 8335 return (npgs);
8337 8336 }
8338 8337
8339 8338 /*
8340 8339 * Yield the memory claim requirement for an address space.
8341 8340 *
8342 8341 * This is currently implemented as the number of bytes that have active
8343 8342 * hardware translations that have page structures. Therefore, it can
8344 8343 * underestimate the traditional resident set size, eg, if the
8345 8344 * physical page is present and the hardware translation is missing;
8346 8345 * and it can overestimate the rss, eg, if there are active
8347 8346 * translations to a frame buffer with page structs.
8348 8347 * Also, it does not take sharing into account.
8349 8348 *
8350 8349 * Note that we don't acquire locks here since this function is most often
8351 8350 * called from the clock thread.
8352 8351 */
8353 8352 size_t
8354 8353 hat_get_mapped_size(struct hat *hat)
8355 8354 {
8356 8355 size_t assize = 0;
8357 8356 int i;
8358 8357
8359 8358 if (hat == NULL)
8360 8359 return (0);
8361 8360
8362 8361 for (i = 0; i < mmu_page_sizes; i++)
8363 8362 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8364 8363 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8365 8364
8366 8365 if (hat->sfmmu_iblk == NULL)
8367 8366 return (assize);
8368 8367
8369 8368 for (i = 0; i < mmu_page_sizes; i++)
8370 8369 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8371 8370 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8372 8371
8373 8372 return (assize);
8374 8373 }
8375 8374
8376 8375 int
8377 8376 hat_stats_enable(struct hat *hat)
8378 8377 {
8379 8378 hatlock_t *hatlockp;
8380 8379
8381 8380 hatlockp = sfmmu_hat_enter(hat);
8382 8381 hat->sfmmu_rmstat++;
8383 8382 sfmmu_hat_exit(hatlockp);
8384 8383 return (1);
8385 8384 }
8386 8385
8387 8386 void
8388 8387 hat_stats_disable(struct hat *hat)
8389 8388 {
8390 8389 hatlock_t *hatlockp;
8391 8390
8392 8391 hatlockp = sfmmu_hat_enter(hat);
8393 8392 hat->sfmmu_rmstat--;
8394 8393 sfmmu_hat_exit(hatlockp);
8395 8394 }
8396 8395
8397 8396 /*
8398 8397 * Routines for entering or removing ourselves from the
8399 8398 * ism_hat's mapping list. This is used for both private and
8400 8399 * SCD hats.
8401 8400 */
8402 8401 static void
8403 8402 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8404 8403 {
8405 8404 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8406 8405
8407 8406 iment->iment_prev = NULL;
8408 8407 iment->iment_next = ism_hat->sfmmu_iment;
8409 8408 if (ism_hat->sfmmu_iment) {
8410 8409 ism_hat->sfmmu_iment->iment_prev = iment;
8411 8410 }
8412 8411 ism_hat->sfmmu_iment = iment;
8413 8412 }
8414 8413
8415 8414 static void
8416 8415 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8417 8416 {
8418 8417 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8419 8418
8420 8419 if (ism_hat->sfmmu_iment == NULL) {
8421 8420 panic("ism map entry remove - no entries");
8422 8421 }
8423 8422
8424 8423 if (iment->iment_prev) {
8425 8424 ASSERT(ism_hat->sfmmu_iment != iment);
8426 8425 iment->iment_prev->iment_next = iment->iment_next;
8427 8426 } else {
8428 8427 ASSERT(ism_hat->sfmmu_iment == iment);
8429 8428 ism_hat->sfmmu_iment = iment->iment_next;
8430 8429 }
8431 8430
8432 8431 if (iment->iment_next) {
8433 8432 iment->iment_next->iment_prev = iment->iment_prev;
8434 8433 }
8435 8434
8436 8435 /*
8437 8436 * zero out the entry
8438 8437 */
8439 8438 iment->iment_next = NULL;
8440 8439 iment->iment_prev = NULL;
8441 8440 iment->iment_hat = NULL;
8442 8441 iment->iment_base_va = 0;
8443 8442 }
8444 8443
8445 8444 /*
8446 8445 * Hat_share()/unshare() return an (non-zero) error
8447 8446 * when saddr and daddr are not properly aligned.
8448 8447 *
8449 8448 * The top level mapping element determines the alignment
8450 8449 * requirement for saddr and daddr, depending on different
8451 8450 * architectures.
8452 8451 *
8453 8452 * When hat_share()/unshare() are not supported,
8454 8453 * HATOP_SHARE()/UNSHARE() return 0
8455 8454 */
8456 8455 int
8457 8456 hat_share(struct hat *sfmmup, caddr_t addr,
8458 8457 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8459 8458 {
8460 8459 ism_blk_t *ism_blkp;
8461 8460 ism_blk_t *new_iblk;
8462 8461 ism_map_t *ism_map;
8463 8462 ism_ment_t *ism_ment;
8464 8463 int i, added;
8465 8464 hatlock_t *hatlockp;
8466 8465 int reload_mmu = 0;
8467 8466 uint_t ismshift = page_get_shift(ismszc);
8468 8467 size_t ismpgsz = page_get_pagesize(ismszc);
8469 8468 uint_t ismmask = (uint_t)ismpgsz - 1;
8470 8469 size_t sh_size = ISM_SHIFT(ismshift, len);
8471 8470 ushort_t ismhatflag;
8472 8471 hat_region_cookie_t rcookie;
8473 8472 sf_scd_t *old_scdp;
8474 8473
8475 8474 #ifdef DEBUG
8476 8475 caddr_t eaddr = addr + len;
8477 8476 #endif /* DEBUG */
8478 8477
8479 8478 ASSERT(ism_hatid != NULL && sfmmup != NULL);
8480 8479 ASSERT(sptaddr == ISMID_STARTADDR);
8481 8480 /*
8482 8481 * Check the alignment.
8483 8482 */
8484 8483 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8485 8484 return (EINVAL);
8486 8485
8487 8486 /*
8488 8487 * Check size alignment.
8489 8488 */
8490 8489 if (!ISM_ALIGNED(ismshift, len))
8491 8490 return (EINVAL);
8492 8491
8493 8492 /*
8494 8493 * Allocate ism_ment for the ism_hat's mapping list, and an
8495 8494 * ism map blk in case we need one. We must do our
8496 8495 * allocations before acquiring locks to prevent a deadlock
8497 8496 * in the kmem allocator on the mapping list lock.
8498 8497 */
8499 8498 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8500 8499 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8501 8500
8502 8501 /*
8503 8502 * Serialize ISM mappings with the ISM busy flag, and also the
8504 8503 * trap handlers.
8505 8504 */
8506 8505 sfmmu_ismhat_enter(sfmmup, 0);
8507 8506
8508 8507 /*
8509 8508 * Allocate an ism map blk if necessary.
8510 8509 */
8511 8510 if (sfmmup->sfmmu_iblk == NULL) {
8512 8511 sfmmup->sfmmu_iblk = new_iblk;
8513 8512 bzero(new_iblk, sizeof (*new_iblk));
8514 8513 new_iblk->iblk_nextpa = (uint64_t)-1;
8515 8514 membar_stst(); /* make sure next ptr visible to all CPUs */
8516 8515 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8517 8516 reload_mmu = 1;
8518 8517 new_iblk = NULL;
8519 8518 }
8520 8519
8521 8520 #ifdef DEBUG
8522 8521 /*
8523 8522 * Make sure mapping does not already exist.
8524 8523 */
8525 8524 ism_blkp = sfmmup->sfmmu_iblk;
8526 8525 while (ism_blkp != NULL) {
8527 8526 ism_map = ism_blkp->iblk_maps;
8528 8527 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8529 8528 if ((addr >= ism_start(ism_map[i]) &&
8530 8529 addr < ism_end(ism_map[i])) ||
8531 8530 eaddr > ism_start(ism_map[i]) &&
8532 8531 eaddr <= ism_end(ism_map[i])) {
8533 8532 panic("sfmmu_share: Already mapped!");
8534 8533 }
8535 8534 }
8536 8535 ism_blkp = ism_blkp->iblk_next;
8537 8536 }
8538 8537 #endif /* DEBUG */
8539 8538
8540 8539 ASSERT(ismszc >= TTE4M);
8541 8540 if (ismszc == TTE4M) {
8542 8541 ismhatflag = HAT_4M_FLAG;
8543 8542 } else if (ismszc == TTE32M) {
8544 8543 ismhatflag = HAT_32M_FLAG;
8545 8544 } else if (ismszc == TTE256M) {
8546 8545 ismhatflag = HAT_256M_FLAG;
8547 8546 }
8548 8547 /*
8549 8548 * Add mapping to first available mapping slot.
8550 8549 */
8551 8550 ism_blkp = sfmmup->sfmmu_iblk;
8552 8551 added = 0;
8553 8552 while (!added) {
8554 8553 ism_map = ism_blkp->iblk_maps;
8555 8554 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8556 8555 if (ism_map[i].imap_ismhat == NULL) {
8557 8556
8558 8557 ism_map[i].imap_ismhat = ism_hatid;
8559 8558 ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8560 8559 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8561 8560 ism_map[i].imap_hatflags = ismhatflag;
8562 8561 ism_map[i].imap_sz_mask = ismmask;
8563 8562 /*
8564 8563 * imap_seg is checked in ISM_CHECK to see if
8565 8564 * non-NULL, then other info assumed valid.
8566 8565 */
8567 8566 membar_stst();
8568 8567 ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8569 8568 ism_map[i].imap_ment = ism_ment;
8570 8569
8571 8570 /*
8572 8571 * Now add ourselves to the ism_hat's
8573 8572 * mapping list.
8574 8573 */
8575 8574 ism_ment->iment_hat = sfmmup;
8576 8575 ism_ment->iment_base_va = addr;
8577 8576 ism_hatid->sfmmu_ismhat = 1;
8578 8577 mutex_enter(&ism_mlist_lock);
8579 8578 iment_add(ism_ment, ism_hatid);
8580 8579 mutex_exit(&ism_mlist_lock);
8581 8580 added = 1;
8582 8581 break;
8583 8582 }
8584 8583 }
8585 8584 if (!added && ism_blkp->iblk_next == NULL) {
8586 8585 ism_blkp->iblk_next = new_iblk;
8587 8586 new_iblk = NULL;
8588 8587 bzero(ism_blkp->iblk_next,
8589 8588 sizeof (*ism_blkp->iblk_next));
8590 8589 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8591 8590 membar_stst();
8592 8591 ism_blkp->iblk_nextpa =
8593 8592 va_to_pa((caddr_t)ism_blkp->iblk_next);
8594 8593 }
8595 8594 ism_blkp = ism_blkp->iblk_next;
8596 8595 }
8597 8596
8598 8597 /*
8599 8598 * After calling hat_join_region, sfmmup may join a new SCD or
8600 8599 * move from the old scd to a new scd, in which case, we want to
8601 8600 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8602 8601 * sfmmu_check_page_sizes at the end of this routine.
8603 8602 */
8604 8603 old_scdp = sfmmup->sfmmu_scdp;
8605 8604
8606 8605 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8607 8606 PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8608 8607 if (rcookie != HAT_INVALID_REGION_COOKIE) {
8609 8608 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8610 8609 }
8611 8610 /*
8612 8611 * Update our counters for this sfmmup's ism mappings.
8613 8612 */
8614 8613 for (i = 0; i <= ismszc; i++) {
8615 8614 if (!(disable_ism_large_pages & (1 << i)))
8616 8615 (void) ism_tsb_entries(sfmmup, i);
8617 8616 }
8618 8617
8619 8618 /*
8620 8619 * For ISM and DISM we do not support 512K pages, so we only only
8621 8620 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8622 8621 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8623 8622 *
8624 8623 * Need to set 32M/256M ISM flags to make sure
8625 8624 * sfmmu_check_page_sizes() enables them on Panther.
8626 8625 */
8627 8626 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8628 8627
8629 8628 switch (ismszc) {
8630 8629 case TTE256M:
8631 8630 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8632 8631 hatlockp = sfmmu_hat_enter(sfmmup);
8633 8632 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8634 8633 sfmmu_hat_exit(hatlockp);
8635 8634 }
8636 8635 break;
8637 8636 case TTE32M:
8638 8637 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8639 8638 hatlockp = sfmmu_hat_enter(sfmmup);
8640 8639 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8641 8640 sfmmu_hat_exit(hatlockp);
8642 8641 }
8643 8642 break;
8644 8643 default:
8645 8644 break;
8646 8645 }
8647 8646
8648 8647 /*
8649 8648 * If we updated the ismblkpa for this HAT we must make
8650 8649 * sure all CPUs running this process reload their tsbmiss area.
8651 8650 * Otherwise they will fail to load the mappings in the tsbmiss
8652 8651 * handler and will loop calling pagefault().
8653 8652 */
8654 8653 if (reload_mmu) {
8655 8654 hatlockp = sfmmu_hat_enter(sfmmup);
8656 8655 sfmmu_sync_mmustate(sfmmup);
8657 8656 sfmmu_hat_exit(hatlockp);
8658 8657 }
8659 8658
8660 8659 sfmmu_ismhat_exit(sfmmup, 0);
8661 8660
8662 8661 /*
8663 8662 * Free up ismblk if we didn't use it.
8664 8663 */
8665 8664 if (new_iblk != NULL)
8666 8665 kmem_cache_free(ism_blk_cache, new_iblk);
8667 8666
8668 8667 /*
8669 8668 * Check TSB and TLB page sizes.
8670 8669 */
8671 8670 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8672 8671 sfmmu_check_page_sizes(sfmmup, 0);
8673 8672 } else {
8674 8673 sfmmu_check_page_sizes(sfmmup, 1);
8675 8674 }
8676 8675 return (0);
8677 8676 }
8678 8677
8679 8678 /*
8680 8679 * hat_unshare removes exactly one ism_map from
8681 8680 * this process's as. It expects multiple calls
8682 8681 * to hat_unshare for multiple shm segments.
8683 8682 */
8684 8683 void
8685 8684 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8686 8685 {
8687 8686 ism_map_t *ism_map;
8688 8687 ism_ment_t *free_ment = NULL;
8689 8688 ism_blk_t *ism_blkp;
8690 8689 struct hat *ism_hatid;
8691 8690 int found, i;
8692 8691 hatlock_t *hatlockp;
8693 8692 struct tsb_info *tsbinfo;
8694 8693 uint_t ismshift = page_get_shift(ismszc);
8695 8694 size_t sh_size = ISM_SHIFT(ismshift, len);
8696 8695 uchar_t ism_rid;
8697 8696 sf_scd_t *old_scdp;
8698 8697
8699 8698 ASSERT(ISM_ALIGNED(ismshift, addr));
8700 8699 ASSERT(ISM_ALIGNED(ismshift, len));
8701 8700 ASSERT(sfmmup != NULL);
8702 8701 ASSERT(sfmmup != ksfmmup);
8703 8702
8704 8703 ASSERT(sfmmup->sfmmu_as != NULL);
8705 8704
8706 8705 /*
8707 8706 * Make sure that during the entire time ISM mappings are removed,
8708 8707 * the trap handlers serialize behind us, and that no one else
8709 8708 * can be mucking with ISM mappings. This also lets us get away
8710 8709 * with not doing expensive cross calls to flush the TLB -- we
8711 8710 * just discard the context, flush the entire TSB, and call it
8712 8711 * a day.
8713 8712 */
8714 8713 sfmmu_ismhat_enter(sfmmup, 0);
8715 8714
8716 8715 /*
8717 8716 * Remove the mapping.
8718 8717 *
8719 8718 * We can't have any holes in the ism map.
8720 8719 * The tsb miss code while searching the ism map will
8721 8720 * stop on an empty map slot. So we must move
8722 8721 * everyone past the hole up 1 if any.
8723 8722 *
8724 8723 * Also empty ism map blks are not freed until the
8725 8724 * process exits. This is to prevent a MT race condition
8726 8725 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8727 8726 */
8728 8727 found = 0;
8729 8728 ism_blkp = sfmmup->sfmmu_iblk;
8730 8729 while (!found && ism_blkp != NULL) {
8731 8730 ism_map = ism_blkp->iblk_maps;
8732 8731 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8733 8732 if (addr == ism_start(ism_map[i]) &&
8734 8733 sh_size == (size_t)(ism_size(ism_map[i]))) {
8735 8734 found = 1;
8736 8735 break;
8737 8736 }
8738 8737 }
8739 8738 if (!found)
8740 8739 ism_blkp = ism_blkp->iblk_next;
8741 8740 }
8742 8741
8743 8742 if (found) {
8744 8743 ism_hatid = ism_map[i].imap_ismhat;
8745 8744 ism_rid = ism_map[i].imap_rid;
8746 8745 ASSERT(ism_hatid != NULL);
8747 8746 ASSERT(ism_hatid->sfmmu_ismhat == 1);
8748 8747
8749 8748 /*
8750 8749 * After hat_leave_region, the sfmmup may leave SCD,
8751 8750 * in which case, we want to grow the private tsb size when
8752 8751 * calling sfmmu_check_page_sizes at the end of the routine.
8753 8752 */
8754 8753 old_scdp = sfmmup->sfmmu_scdp;
8755 8754 /*
8756 8755 * Then remove ourselves from the region.
8757 8756 */
8758 8757 if (ism_rid != SFMMU_INVALID_ISMRID) {
8759 8758 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8760 8759 HAT_REGION_ISM);
8761 8760 }
8762 8761
8763 8762 /*
8764 8763 * And now guarantee that any other cpu
8765 8764 * that tries to process an ISM miss
8766 8765 * will go to tl=0.
8767 8766 */
8768 8767 hatlockp = sfmmu_hat_enter(sfmmup);
8769 8768 sfmmu_invalidate_ctx(sfmmup);
8770 8769 sfmmu_hat_exit(hatlockp);
8771 8770
8772 8771 /*
8773 8772 * Remove ourselves from the ism mapping list.
8774 8773 */
8775 8774 mutex_enter(&ism_mlist_lock);
8776 8775 iment_sub(ism_map[i].imap_ment, ism_hatid);
8777 8776 mutex_exit(&ism_mlist_lock);
8778 8777 free_ment = ism_map[i].imap_ment;
8779 8778
8780 8779 /*
8781 8780 * We delete the ism map by copying
8782 8781 * the next map over the current one.
8783 8782 * We will take the next one in the maps
8784 8783 * array or from the next ism_blk.
8785 8784 */
8786 8785 while (ism_blkp != NULL) {
8787 8786 ism_map = ism_blkp->iblk_maps;
8788 8787 while (i < (ISM_MAP_SLOTS - 1)) {
8789 8788 ism_map[i] = ism_map[i + 1];
8790 8789 i++;
8791 8790 }
8792 8791 /* i == (ISM_MAP_SLOTS - 1) */
8793 8792 ism_blkp = ism_blkp->iblk_next;
8794 8793 if (ism_blkp != NULL) {
8795 8794 ism_map[i] = ism_blkp->iblk_maps[0];
8796 8795 i = 0;
8797 8796 } else {
8798 8797 ism_map[i].imap_seg = 0;
8799 8798 ism_map[i].imap_vb_shift = 0;
8800 8799 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8801 8800 ism_map[i].imap_hatflags = 0;
8802 8801 ism_map[i].imap_sz_mask = 0;
8803 8802 ism_map[i].imap_ismhat = NULL;
8804 8803 ism_map[i].imap_ment = NULL;
8805 8804 }
8806 8805 }
8807 8806
8808 8807 /*
8809 8808 * Now flush entire TSB for the process, since
8810 8809 * demapping page by page can be too expensive.
8811 8810 * We don't have to flush the TLB here anymore
8812 8811 * since we switch to a new TLB ctx instead.
8813 8812 * Also, there is no need to flush if the process
8814 8813 * is exiting since the TSB will be freed later.
8815 8814 */
8816 8815 if (!sfmmup->sfmmu_free) {
8817 8816 hatlockp = sfmmu_hat_enter(sfmmup);
8818 8817 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8819 8818 tsbinfo = tsbinfo->tsb_next) {
8820 8819 if (tsbinfo->tsb_flags & TSB_SWAPPED)
8821 8820 continue;
8822 8821 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
8823 8822 tsbinfo->tsb_flags |=
8824 8823 TSB_FLUSH_NEEDED;
8825 8824 continue;
8826 8825 }
8827 8826
8828 8827 sfmmu_inv_tsb(tsbinfo->tsb_va,
8829 8828 TSB_BYTES(tsbinfo->tsb_szc));
8830 8829 }
8831 8830 sfmmu_hat_exit(hatlockp);
8832 8831 }
8833 8832 }
8834 8833
8835 8834 /*
8836 8835 * Update our counters for this sfmmup's ism mappings.
8837 8836 */
8838 8837 for (i = 0; i <= ismszc; i++) {
8839 8838 if (!(disable_ism_large_pages & (1 << i)))
8840 8839 (void) ism_tsb_entries(sfmmup, i);
8841 8840 }
8842 8841
8843 8842 sfmmu_ismhat_exit(sfmmup, 0);
8844 8843
8845 8844 /*
8846 8845 * We must do our freeing here after dropping locks
8847 8846 * to prevent a deadlock in the kmem allocator on the
8848 8847 * mapping list lock.
8849 8848 */
8850 8849 if (free_ment != NULL)
8851 8850 kmem_cache_free(ism_ment_cache, free_ment);
8852 8851
8853 8852 /*
8854 8853 * Check TSB and TLB page sizes if the process isn't exiting.
8855 8854 */
8856 8855 if (!sfmmup->sfmmu_free) {
8857 8856 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
8858 8857 sfmmu_check_page_sizes(sfmmup, 1);
8859 8858 } else {
8860 8859 sfmmu_check_page_sizes(sfmmup, 0);
8861 8860 }
8862 8861 }
8863 8862 }
8864 8863
8865 8864 /* ARGSUSED */
8866 8865 static int
8867 8866 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
8868 8867 {
8869 8868 /* void *buf is sfmmu_t pointer */
8870 8869 bzero(buf, sizeof (sfmmu_t));
8871 8870
8872 8871 return (0);
8873 8872 }
8874 8873
8875 8874 /* ARGSUSED */
8876 8875 static void
8877 8876 sfmmu_idcache_destructor(void *buf, void *cdrarg)
8878 8877 {
8879 8878 /* void *buf is sfmmu_t pointer */
8880 8879 }
8881 8880
8882 8881 /*
8883 8882 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
8884 8883 * field to be the pa of this hmeblk
8885 8884 */
8886 8885 /* ARGSUSED */
8887 8886 static int
8888 8887 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
8889 8888 {
8890 8889 struct hme_blk *hmeblkp;
8891 8890
8892 8891 bzero(buf, (size_t)cdrarg);
8893 8892 hmeblkp = (struct hme_blk *)buf;
8894 8893 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8895 8894
8896 8895 #ifdef HBLK_TRACE
8897 8896 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8898 8897 #endif /* HBLK_TRACE */
8899 8898
8900 8899 return (0);
8901 8900 }
8902 8901
8903 8902 /* ARGSUSED */
8904 8903 static void
8905 8904 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
8906 8905 {
8907 8906
8908 8907 #ifdef HBLK_TRACE
8909 8908
8910 8909 struct hme_blk *hmeblkp;
8911 8910
8912 8911 hmeblkp = (struct hme_blk *)buf;
8913 8912 mutex_destroy(&hmeblkp->hblk_audit_lock);
8914 8913
8915 8914 #endif /* HBLK_TRACE */
8916 8915 }
8917 8916
8918 8917 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
8919 8918 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
8920 8919 /*
8921 8920 * The kmem allocator will callback into our reclaim routine when the system
8922 8921 * is running low in memory. We traverse the hash and free up all unused but
8923 8922 * still cached hme_blks. We also traverse the free list and free them up
8924 8923 * as well.
8925 8924 */
8926 8925 /*ARGSUSED*/
8927 8926 static void
8928 8927 sfmmu_hblkcache_reclaim(void *cdrarg)
8929 8928 {
8930 8929 int i;
8931 8930 struct hmehash_bucket *hmebp;
8932 8931 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8933 8932 static struct hmehash_bucket *uhmehash_reclaim_hand;
8934 8933 static struct hmehash_bucket *khmehash_reclaim_hand;
8935 8934 struct hme_blk *list = NULL, *last_hmeblkp;
8936 8935 cpuset_t cpuset = cpu_ready_set;
8937 8936 cpu_hme_pend_t *cpuhp;
8938 8937
8939 8938 /* Free up hmeblks on the cpu pending lists */
8940 8939 for (i = 0; i < NCPU; i++) {
8941 8940 cpuhp = &cpu_hme_pend[i];
8942 8941 if (cpuhp->chp_listp != NULL) {
8943 8942 mutex_enter(&cpuhp->chp_mutex);
8944 8943 if (cpuhp->chp_listp == NULL) {
8945 8944 mutex_exit(&cpuhp->chp_mutex);
8946 8945 continue;
8947 8946 }
8948 8947 for (last_hmeblkp = cpuhp->chp_listp;
8949 8948 last_hmeblkp->hblk_next != NULL;
8950 8949 last_hmeblkp = last_hmeblkp->hblk_next)
8951 8950 ;
8952 8951 last_hmeblkp->hblk_next = list;
8953 8952 list = cpuhp->chp_listp;
8954 8953 cpuhp->chp_listp = NULL;
8955 8954 cpuhp->chp_count = 0;
8956 8955 mutex_exit(&cpuhp->chp_mutex);
8957 8956 }
8958 8957
8959 8958 }
8960 8959
8961 8960 if (list != NULL) {
8962 8961 kpreempt_disable();
8963 8962 CPUSET_DEL(cpuset, CPU->cpu_id);
8964 8963 xt_sync(cpuset);
8965 8964 xt_sync(cpuset);
8966 8965 kpreempt_enable();
8967 8966 sfmmu_hblk_free(&list);
8968 8967 list = NULL;
8969 8968 }
8970 8969
8971 8970 hmebp = uhmehash_reclaim_hand;
8972 8971 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
8973 8972 uhmehash_reclaim_hand = hmebp = uhme_hash;
8974 8973 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8975 8974
8976 8975 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8977 8976 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8978 8977 hmeblkp = hmebp->hmeblkp;
8979 8978 pr_hblk = NULL;
8980 8979 while (hmeblkp) {
8981 8980 nx_hblk = hmeblkp->hblk_next;
8982 8981 if (!hmeblkp->hblk_vcnt &&
8983 8982 !hmeblkp->hblk_hmecnt) {
8984 8983 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8985 8984 pr_hblk, &list, 0);
8986 8985 } else {
8987 8986 pr_hblk = hmeblkp;
8988 8987 }
8989 8988 hmeblkp = nx_hblk;
8990 8989 }
8991 8990 SFMMU_HASH_UNLOCK(hmebp);
8992 8991 }
8993 8992 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
8994 8993 hmebp = uhme_hash;
8995 8994 }
8996 8995
8997 8996 hmebp = khmehash_reclaim_hand;
8998 8997 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
8999 8998 khmehash_reclaim_hand = hmebp = khme_hash;
9000 8999 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9001 9000
9002 9001 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9003 9002 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9004 9003 hmeblkp = hmebp->hmeblkp;
9005 9004 pr_hblk = NULL;
9006 9005 while (hmeblkp) {
9007 9006 nx_hblk = hmeblkp->hblk_next;
9008 9007 if (!hmeblkp->hblk_vcnt &&
9009 9008 !hmeblkp->hblk_hmecnt) {
9010 9009 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9011 9010 pr_hblk, &list, 0);
9012 9011 } else {
9013 9012 pr_hblk = hmeblkp;
9014 9013 }
9015 9014 hmeblkp = nx_hblk;
9016 9015 }
9017 9016 SFMMU_HASH_UNLOCK(hmebp);
9018 9017 }
9019 9018 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9020 9019 hmebp = khme_hash;
9021 9020 }
9022 9021 sfmmu_hblks_list_purge(&list, 0);
9023 9022 }
9024 9023
9025 9024 /*
9026 9025 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9027 9026 * same goes for sfmmu_get_addrvcolor().
9028 9027 *
9029 9028 * This function will return the virtual color for the specified page. The
9030 9029 * virtual color corresponds to this page current mapping or its last mapping.
9031 9030 * It is used by memory allocators to choose addresses with the correct
9032 9031 * alignment so vac consistency is automatically maintained. If the page
9033 9032 * has no color it returns -1.
9034 9033 */
9035 9034 /*ARGSUSED*/
9036 9035 int
9037 9036 sfmmu_get_ppvcolor(struct page *pp)
9038 9037 {
9039 9038 #ifdef VAC
9040 9039 int color;
9041 9040
9042 9041 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9043 9042 return (-1);
9044 9043 }
9045 9044 color = PP_GET_VCOLOR(pp);
9046 9045 ASSERT(color < mmu_btop(shm_alignment));
9047 9046 return (color);
9048 9047 #else
9049 9048 return (-1);
9050 9049 #endif /* VAC */
9051 9050 }
9052 9051
9053 9052 /*
9054 9053 * This function will return the desired alignment for vac consistency
9055 9054 * (vac color) given a virtual address. If no vac is present it returns -1.
9056 9055 */
9057 9056 /*ARGSUSED*/
9058 9057 int
9059 9058 sfmmu_get_addrvcolor(caddr_t vaddr)
9060 9059 {
9061 9060 #ifdef VAC
9062 9061 if (cache & CACHE_VAC) {
9063 9062 return (addr_to_vcolor(vaddr));
9064 9063 } else {
9065 9064 return (-1);
9066 9065 }
9067 9066 #else
9068 9067 return (-1);
9069 9068 #endif /* VAC */
9070 9069 }
9071 9070
9072 9071 #ifdef VAC
9073 9072 /*
9074 9073 * Check for conflicts.
9075 9074 * A conflict exists if the new and existent mappings do not match in
9076 9075 * their "shm_alignment fields. If conflicts exist, the existant mappings
9077 9076 * are flushed unless one of them is locked. If one of them is locked, then
9078 9077 * the mappings are flushed and converted to non-cacheable mappings.
9079 9078 */
9080 9079 static void
9081 9080 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9082 9081 {
9083 9082 struct hat *tmphat;
9084 9083 struct sf_hment *sfhmep, *tmphme = NULL;
9085 9084 struct hme_blk *hmeblkp;
9086 9085 int vcolor;
9087 9086 tte_t tte;
9088 9087
9089 9088 ASSERT(sfmmu_mlist_held(pp));
9090 9089 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */
9091 9090
9092 9091 vcolor = addr_to_vcolor(addr);
9093 9092 if (PP_NEWPAGE(pp)) {
9094 9093 PP_SET_VCOLOR(pp, vcolor);
9095 9094 return;
9096 9095 }
9097 9096
9098 9097 if (PP_GET_VCOLOR(pp) == vcolor) {
9099 9098 return;
9100 9099 }
9101 9100
9102 9101 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9103 9102 /*
9104 9103 * Previous user of page had a different color
9105 9104 * but since there are no current users
9106 9105 * we just flush the cache and change the color.
9107 9106 */
9108 9107 SFMMU_STAT(sf_pgcolor_conflict);
9109 9108 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9110 9109 PP_SET_VCOLOR(pp, vcolor);
9111 9110 return;
9112 9111 }
9113 9112
9114 9113 /*
9115 9114 * If we get here we have a vac conflict with a current
9116 9115 * mapping. VAC conflict policy is as follows.
9117 9116 * - The default is to unload the other mappings unless:
9118 9117 * - If we have a large mapping we uncache the page.
9119 9118 * We need to uncache the rest of the large page too.
9120 9119 * - If any of the mappings are locked we uncache the page.
9121 9120 * - If the requested mapping is inconsistent
9122 9121 * with another mapping and that mapping
9123 9122 * is in the same address space we have to
9124 9123 * make it non-cached. The default thing
9125 9124 * to do is unload the inconsistent mapping
9126 9125 * but if they are in the same address space
9127 9126 * we run the risk of unmapping the pc or the
9128 9127 * stack which we will use as we return to the user,
9129 9128 * in which case we can then fault on the thing
9130 9129 * we just unloaded and get into an infinite loop.
9131 9130 */
9132 9131 if (PP_ISMAPPED_LARGE(pp)) {
9133 9132 int sz;
9134 9133
9135 9134 /*
9136 9135 * Existing mapping is for big pages. We don't unload
9137 9136 * existing big mappings to satisfy new mappings.
9138 9137 * Always convert all mappings to TNC.
9139 9138 */
9140 9139 sz = fnd_mapping_sz(pp);
9141 9140 pp = PP_GROUPLEADER(pp, sz);
9142 9141 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9143 9142 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9144 9143 TTEPAGES(sz));
9145 9144
9146 9145 return;
9147 9146 }
9148 9147
9149 9148 /*
9150 9149 * check if any mapping is in same as or if it is locked
9151 9150 * since in that case we need to uncache.
9152 9151 */
9153 9152 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9154 9153 tmphme = sfhmep->hme_next;
9155 9154 if (IS_PAHME(sfhmep))
9156 9155 continue;
9157 9156 hmeblkp = sfmmu_hmetohblk(sfhmep);
9158 9157 tmphat = hblktosfmmu(hmeblkp);
9159 9158 sfmmu_copytte(&sfhmep->hme_tte, &tte);
9160 9159 ASSERT(TTE_IS_VALID(&tte));
9161 9160 if (hmeblkp->hblk_shared || tmphat == hat ||
9162 9161 hmeblkp->hblk_lckcnt) {
9163 9162 /*
9164 9163 * We have an uncache conflict
9165 9164 */
9166 9165 SFMMU_STAT(sf_uncache_conflict);
9167 9166 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9168 9167 return;
9169 9168 }
9170 9169 }
9171 9170
9172 9171 /*
9173 9172 * We have an unload conflict
9174 9173 * We have already checked for LARGE mappings, therefore
9175 9174 * the remaining mapping(s) must be TTE8K.
9176 9175 */
9177 9176 SFMMU_STAT(sf_unload_conflict);
9178 9177
9179 9178 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9180 9179 tmphme = sfhmep->hme_next;
9181 9180 if (IS_PAHME(sfhmep))
9182 9181 continue;
9183 9182 hmeblkp = sfmmu_hmetohblk(sfhmep);
9184 9183 ASSERT(!hmeblkp->hblk_shared);
9185 9184 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9186 9185 }
9187 9186
9188 9187 if (PP_ISMAPPED_KPM(pp))
9189 9188 sfmmu_kpm_vac_unload(pp, addr);
9190 9189
9191 9190 /*
9192 9191 * Unloads only do TLB flushes so we need to flush the
9193 9192 * cache here.
9194 9193 */
9195 9194 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9196 9195 PP_SET_VCOLOR(pp, vcolor);
9197 9196 }
9198 9197
9199 9198 /*
9200 9199 * Whenever a mapping is unloaded and the page is in TNC state,
9201 9200 * we see if the page can be made cacheable again. 'pp' is
9202 9201 * the page that we just unloaded a mapping from, the size
9203 9202 * of mapping that was unloaded is 'ottesz'.
9204 9203 * Remark:
9205 9204 * The recache policy for mpss pages can leave a performance problem
9206 9205 * under the following circumstances:
9207 9206 * . A large page in uncached mode has just been unmapped.
9208 9207 * . All constituent pages are TNC due to a conflicting small mapping.
9209 9208 * . There are many other, non conflicting, small mappings around for
9210 9209 * a lot of the constituent pages.
9211 9210 * . We're called w/ the "old" groupleader page and the old ottesz,
9212 9211 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9213 9212 * we end up w/ TTE8K or npages == 1.
9214 9213 * . We call tst_tnc w/ the old groupleader only, and if there is no
9215 9214 * conflict, we re-cache only this page.
9216 9215 * . All other small mappings are not checked and will be left in TNC mode.
9217 9216 * The problem is not very serious because:
9218 9217 * . mpss is actually only defined for heap and stack, so the probability
9219 9218 * is not very high that a large page mapping exists in parallel to a small
9220 9219 * one (this is possible, but seems to be bad programming style in the
9221 9220 * appl).
9222 9221 * . The problem gets a little bit more serious, when those TNC pages
9223 9222 * have to be mapped into kernel space, e.g. for networking.
9224 9223 * . When VAC alias conflicts occur in applications, this is regarded
9225 9224 * as an application bug. So if kstat's show them, the appl should
9226 9225 * be changed anyway.
9227 9226 */
9228 9227 void
9229 9228 conv_tnc(page_t *pp, int ottesz)
9230 9229 {
9231 9230 int cursz, dosz;
9232 9231 pgcnt_t curnpgs, dopgs;
9233 9232 pgcnt_t pg64k;
9234 9233 page_t *pp2;
9235 9234
9236 9235 /*
9237 9236 * Determine how big a range we check for TNC and find
9238 9237 * leader page. cursz is the size of the biggest
9239 9238 * mapping that still exist on 'pp'.
9240 9239 */
9241 9240 if (PP_ISMAPPED_LARGE(pp)) {
9242 9241 cursz = fnd_mapping_sz(pp);
9243 9242 } else {
9244 9243 cursz = TTE8K;
9245 9244 }
9246 9245
9247 9246 if (ottesz >= cursz) {
9248 9247 dosz = ottesz;
9249 9248 pp2 = pp;
9250 9249 } else {
9251 9250 dosz = cursz;
9252 9251 pp2 = PP_GROUPLEADER(pp, dosz);
9253 9252 }
9254 9253
9255 9254 pg64k = TTEPAGES(TTE64K);
9256 9255 dopgs = TTEPAGES(dosz);
9257 9256
9258 9257 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9259 9258
9260 9259 while (dopgs != 0) {
9261 9260 curnpgs = TTEPAGES(cursz);
9262 9261 if (tst_tnc(pp2, curnpgs)) {
9263 9262 SFMMU_STAT_ADD(sf_recache, curnpgs);
9264 9263 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9265 9264 curnpgs);
9266 9265 }
9267 9266
9268 9267 ASSERT(dopgs >= curnpgs);
9269 9268 dopgs -= curnpgs;
9270 9269
9271 9270 if (dopgs == 0) {
9272 9271 break;
9273 9272 }
9274 9273
9275 9274 pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9276 9275 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9277 9276 cursz = fnd_mapping_sz(pp2);
9278 9277 } else {
9279 9278 cursz = TTE8K;
9280 9279 }
9281 9280 }
9282 9281 }
9283 9282
9284 9283 /*
9285 9284 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9286 9285 * returns 0 otherwise. Note that oaddr argument is valid for only
9287 9286 * 8k pages.
9288 9287 */
9289 9288 int
9290 9289 tst_tnc(page_t *pp, pgcnt_t npages)
9291 9290 {
9292 9291 struct sf_hment *sfhme;
9293 9292 struct hme_blk *hmeblkp;
9294 9293 tte_t tte;
9295 9294 caddr_t vaddr;
9296 9295 int clr_valid = 0;
9297 9296 int color, color1, bcolor;
9298 9297 int i, ncolors;
9299 9298
9300 9299 ASSERT(pp != NULL);
9301 9300 ASSERT(!(cache & CACHE_WRITEBACK));
9302 9301
9303 9302 if (npages > 1) {
9304 9303 ncolors = CACHE_NUM_COLOR;
9305 9304 }
9306 9305
9307 9306 for (i = 0; i < npages; i++) {
9308 9307 ASSERT(sfmmu_mlist_held(pp));
9309 9308 ASSERT(PP_ISTNC(pp));
9310 9309 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9311 9310
9312 9311 if (PP_ISPNC(pp)) {
9313 9312 return (0);
9314 9313 }
9315 9314
9316 9315 clr_valid = 0;
9317 9316 if (PP_ISMAPPED_KPM(pp)) {
9318 9317 caddr_t kpmvaddr;
9319 9318
9320 9319 ASSERT(kpm_enable);
9321 9320 kpmvaddr = hat_kpm_page2va(pp, 1);
9322 9321 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9323 9322 color1 = addr_to_vcolor(kpmvaddr);
9324 9323 clr_valid = 1;
9325 9324 }
9326 9325
9327 9326 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9328 9327 if (IS_PAHME(sfhme))
9329 9328 continue;
9330 9329 hmeblkp = sfmmu_hmetohblk(sfhme);
9331 9330
9332 9331 sfmmu_copytte(&sfhme->hme_tte, &tte);
9333 9332 ASSERT(TTE_IS_VALID(&tte));
9334 9333
9335 9334 vaddr = tte_to_vaddr(hmeblkp, tte);
9336 9335 color = addr_to_vcolor(vaddr);
9337 9336
9338 9337 if (npages > 1) {
9339 9338 /*
9340 9339 * If there is a big mapping, make sure
9341 9340 * 8K mapping is consistent with the big
9342 9341 * mapping.
9343 9342 */
9344 9343 bcolor = i % ncolors;
9345 9344 if (color != bcolor) {
9346 9345 return (0);
9347 9346 }
9348 9347 }
9349 9348 if (!clr_valid) {
9350 9349 clr_valid = 1;
9351 9350 color1 = color;
9352 9351 }
9353 9352
9354 9353 if (color1 != color) {
9355 9354 return (0);
9356 9355 }
9357 9356 }
9358 9357
9359 9358 pp = PP_PAGENEXT(pp);
9360 9359 }
9361 9360
9362 9361 return (1);
9363 9362 }
9364 9363
9365 9364 void
9366 9365 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9367 9366 pgcnt_t npages)
9368 9367 {
9369 9368 kmutex_t *pmtx;
9370 9369 int i, ncolors, bcolor;
9371 9370 kpm_hlk_t *kpmp;
9372 9371 cpuset_t cpuset;
9373 9372
9374 9373 ASSERT(pp != NULL);
9375 9374 ASSERT(!(cache & CACHE_WRITEBACK));
9376 9375
9377 9376 kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9378 9377 pmtx = sfmmu_page_enter(pp);
9379 9378
9380 9379 /*
9381 9380 * Fast path caching single unmapped page
9382 9381 */
9383 9382 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9384 9383 flags == HAT_CACHE) {
9385 9384 PP_CLRTNC(pp);
9386 9385 PP_CLRPNC(pp);
9387 9386 sfmmu_page_exit(pmtx);
9388 9387 sfmmu_kpm_kpmp_exit(kpmp);
9389 9388 return;
9390 9389 }
9391 9390
9392 9391 /*
9393 9392 * We need to capture all cpus in order to change cacheability
9394 9393 * because we can't allow one cpu to access the same physical
9395 9394 * page using a cacheable and a non-cachebale mapping at the same
9396 9395 * time. Since we may end up walking the ism mapping list
9397 9396 * have to grab it's lock now since we can't after all the
9398 9397 * cpus have been captured.
9399 9398 */
9400 9399 sfmmu_hat_lock_all();
9401 9400 mutex_enter(&ism_mlist_lock);
9402 9401 kpreempt_disable();
9403 9402 cpuset = cpu_ready_set;
9404 9403 xc_attention(cpuset);
9405 9404
9406 9405 if (npages > 1) {
9407 9406 /*
9408 9407 * Make sure all colors are flushed since the
9409 9408 * sfmmu_page_cache() only flushes one color-
9410 9409 * it does not know big pages.
9411 9410 */
9412 9411 ncolors = CACHE_NUM_COLOR;
9413 9412 if (flags & HAT_TMPNC) {
9414 9413 for (i = 0; i < ncolors; i++) {
9415 9414 sfmmu_cache_flushcolor(i, pp->p_pagenum);
9416 9415 }
9417 9416 cache_flush_flag = CACHE_NO_FLUSH;
9418 9417 }
9419 9418 }
9420 9419
9421 9420 for (i = 0; i < npages; i++) {
9422 9421
9423 9422 ASSERT(sfmmu_mlist_held(pp));
9424 9423
9425 9424 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9426 9425
9427 9426 if (npages > 1) {
9428 9427 bcolor = i % ncolors;
9429 9428 } else {
9430 9429 bcolor = NO_VCOLOR;
9431 9430 }
9432 9431
9433 9432 sfmmu_page_cache(pp, flags, cache_flush_flag,
9434 9433 bcolor);
9435 9434 }
9436 9435
9437 9436 pp = PP_PAGENEXT(pp);
9438 9437 }
9439 9438
9440 9439 xt_sync(cpuset);
9441 9440 xc_dismissed(cpuset);
9442 9441 mutex_exit(&ism_mlist_lock);
9443 9442 sfmmu_hat_unlock_all();
9444 9443 sfmmu_page_exit(pmtx);
9445 9444 sfmmu_kpm_kpmp_exit(kpmp);
9446 9445 kpreempt_enable();
9447 9446 }
9448 9447
9449 9448 /*
9450 9449 * This function changes the virtual cacheability of all mappings to a
9451 9450 * particular page. When changing from uncache to cacheable the mappings will
9452 9451 * only be changed if all of them have the same virtual color.
9453 9452 * We need to flush the cache in all cpus. It is possible that
9454 9453 * a process referenced a page as cacheable but has sinced exited
9455 9454 * and cleared the mapping list. We still to flush it but have no
9456 9455 * state so all cpus is the only alternative.
9457 9456 */
9458 9457 static void
9459 9458 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9460 9459 {
9461 9460 struct sf_hment *sfhme;
9462 9461 struct hme_blk *hmeblkp;
9463 9462 sfmmu_t *sfmmup;
9464 9463 tte_t tte, ttemod;
9465 9464 caddr_t vaddr;
9466 9465 int ret, color;
9467 9466 pfn_t pfn;
9468 9467
9469 9468 color = bcolor;
9470 9469 pfn = pp->p_pagenum;
9471 9470
9472 9471 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9473 9472
9474 9473 if (IS_PAHME(sfhme))
9475 9474 continue;
9476 9475 hmeblkp = sfmmu_hmetohblk(sfhme);
9477 9476
9478 9477 sfmmu_copytte(&sfhme->hme_tte, &tte);
9479 9478 ASSERT(TTE_IS_VALID(&tte));
9480 9479 vaddr = tte_to_vaddr(hmeblkp, tte);
9481 9480 color = addr_to_vcolor(vaddr);
9482 9481
9483 9482 #ifdef DEBUG
9484 9483 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9485 9484 ASSERT(color == bcolor);
9486 9485 }
9487 9486 #endif
9488 9487
9489 9488 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9490 9489
9491 9490 ttemod = tte;
9492 9491 if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9493 9492 TTE_CLR_VCACHEABLE(&ttemod);
9494 9493 } else { /* flags & HAT_CACHE */
9495 9494 TTE_SET_VCACHEABLE(&ttemod);
9496 9495 }
9497 9496 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9498 9497 if (ret < 0) {
9499 9498 /*
9500 9499 * Since all cpus are captured modifytte should not
9501 9500 * fail.
9502 9501 */
9503 9502 panic("sfmmu_page_cache: write to tte failed");
9504 9503 }
9505 9504
9506 9505 sfmmup = hblktosfmmu(hmeblkp);
9507 9506 if (cache_flush_flag == CACHE_FLUSH) {
9508 9507 /*
9509 9508 * Flush TSBs, TLBs and caches
9510 9509 */
9511 9510 if (hmeblkp->hblk_shared) {
9512 9511 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9513 9512 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9514 9513 sf_region_t *rgnp;
9515 9514 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9516 9515 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9517 9516 ASSERT(srdp != NULL);
9518 9517 rgnp = srdp->srd_hmergnp[rid];
9519 9518 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9520 9519 srdp, rgnp, rid);
9521 9520 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9522 9521 hmeblkp, 0);
9523 9522 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9524 9523 } else if (sfmmup->sfmmu_ismhat) {
9525 9524 if (flags & HAT_CACHE) {
9526 9525 SFMMU_STAT(sf_ism_recache);
9527 9526 } else {
9528 9527 SFMMU_STAT(sf_ism_uncache);
9529 9528 }
9530 9529 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9531 9530 pfn, CACHE_FLUSH);
9532 9531 } else {
9533 9532 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9534 9533 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9535 9534 }
9536 9535
9537 9536 /*
9538 9537 * all cache entries belonging to this pfn are
9539 9538 * now flushed.
9540 9539 */
9541 9540 cache_flush_flag = CACHE_NO_FLUSH;
9542 9541 } else {
9543 9542 /*
9544 9543 * Flush only TSBs and TLBs.
9545 9544 */
9546 9545 if (hmeblkp->hblk_shared) {
9547 9546 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9548 9547 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9549 9548 sf_region_t *rgnp;
9550 9549 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9551 9550 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9552 9551 ASSERT(srdp != NULL);
9553 9552 rgnp = srdp->srd_hmergnp[rid];
9554 9553 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9555 9554 srdp, rgnp, rid);
9556 9555 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9557 9556 hmeblkp, 0);
9558 9557 } else if (sfmmup->sfmmu_ismhat) {
9559 9558 if (flags & HAT_CACHE) {
9560 9559 SFMMU_STAT(sf_ism_recache);
9561 9560 } else {
9562 9561 SFMMU_STAT(sf_ism_uncache);
9563 9562 }
9564 9563 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9565 9564 pfn, CACHE_NO_FLUSH);
9566 9565 } else {
9567 9566 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9568 9567 }
9569 9568 }
9570 9569 }
9571 9570
9572 9571 if (PP_ISMAPPED_KPM(pp))
9573 9572 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9574 9573
9575 9574 switch (flags) {
9576 9575
9577 9576 default:
9578 9577 panic("sfmmu_pagecache: unknown flags");
9579 9578 break;
9580 9579
9581 9580 case HAT_CACHE:
9582 9581 PP_CLRTNC(pp);
9583 9582 PP_CLRPNC(pp);
9584 9583 PP_SET_VCOLOR(pp, color);
9585 9584 break;
9586 9585
9587 9586 case HAT_TMPNC:
9588 9587 PP_SETTNC(pp);
9589 9588 PP_SET_VCOLOR(pp, NO_VCOLOR);
9590 9589 break;
9591 9590
9592 9591 case HAT_UNCACHE:
9593 9592 PP_SETPNC(pp);
9594 9593 PP_CLRTNC(pp);
9595 9594 PP_SET_VCOLOR(pp, NO_VCOLOR);
9596 9595 break;
9597 9596 }
9598 9597 }
9599 9598 #endif /* VAC */
9600 9599
9601 9600
9602 9601 /*
9603 9602 * Wrapper routine used to return a context.
9604 9603 *
9605 9604 * It's the responsibility of the caller to guarantee that the
9606 9605 * process serializes on calls here by taking the HAT lock for
9607 9606 * the hat.
9608 9607 *
9609 9608 */
9610 9609 static void
9611 9610 sfmmu_get_ctx(sfmmu_t *sfmmup)
9612 9611 {
9613 9612 mmu_ctx_t *mmu_ctxp;
9614 9613 uint_t pstate_save;
9615 9614 int ret;
9616 9615
9617 9616 ASSERT(sfmmu_hat_lock_held(sfmmup));
9618 9617 ASSERT(sfmmup != ksfmmup);
9619 9618
9620 9619 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9621 9620 sfmmu_setup_tsbinfo(sfmmup);
9622 9621 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9623 9622 }
9624 9623
9625 9624 kpreempt_disable();
9626 9625
9627 9626 mmu_ctxp = CPU_MMU_CTXP(CPU);
9628 9627 ASSERT(mmu_ctxp);
9629 9628 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9630 9629 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9631 9630
9632 9631 /*
9633 9632 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9634 9633 */
9635 9634 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9636 9635 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9637 9636
9638 9637 /*
9639 9638 * Let the MMU set up the page sizes to use for
9640 9639 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9641 9640 */
9642 9641 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9643 9642 mmu_set_ctx_page_sizes(sfmmup);
9644 9643 }
9645 9644
9646 9645 /*
9647 9646 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9648 9647 * interrupts disabled to prevent race condition with wrap-around
9649 9648 * ctx invalidatation. In sun4v, ctx invalidation also involves
9650 9649 * a HV call to set the number of TSBs to 0. If interrupts are not
9651 9650 * disabled until after sfmmu_load_mmustate is complete TSBs may
9652 9651 * become assigned to INVALID_CONTEXT. This is not allowed.
9653 9652 */
9654 9653 pstate_save = sfmmu_disable_intrs();
9655 9654
9656 9655 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9657 9656 sfmmup->sfmmu_scdp != NULL) {
9658 9657 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9659 9658 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9660 9659 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9661 9660 /* debug purpose only */
9662 9661 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9663 9662 != INVALID_CONTEXT);
9664 9663 }
9665 9664 sfmmu_load_mmustate(sfmmup);
9666 9665
9667 9666 sfmmu_enable_intrs(pstate_save);
9668 9667
9669 9668 kpreempt_enable();
9670 9669 }
9671 9670
9672 9671 /*
9673 9672 * When all cnums are used up in a MMU, cnum will wrap around to the
9674 9673 * next generation and start from 2.
9675 9674 */
9676 9675 static void
9677 9676 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9678 9677 {
9679 9678
9680 9679 /* caller must have disabled the preemption */
9681 9680 ASSERT(curthread->t_preempt >= 1);
9682 9681 ASSERT(mmu_ctxp != NULL);
9683 9682
9684 9683 /* acquire Per-MMU (PM) spin lock */
9685 9684 mutex_enter(&mmu_ctxp->mmu_lock);
9686 9685
9687 9686 /* re-check to see if wrap-around is needed */
9688 9687 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9689 9688 goto done;
9690 9689
9691 9690 SFMMU_MMU_STAT(mmu_wrap_around);
9692 9691
9693 9692 /* update gnum */
9694 9693 ASSERT(mmu_ctxp->mmu_gnum != 0);
9695 9694 mmu_ctxp->mmu_gnum++;
9696 9695 if (mmu_ctxp->mmu_gnum == 0 ||
9697 9696 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9698 9697 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9699 9698 (void *)mmu_ctxp);
9700 9699 }
9701 9700
9702 9701 if (mmu_ctxp->mmu_ncpus > 1) {
9703 9702 cpuset_t cpuset;
9704 9703
9705 9704 membar_enter(); /* make sure updated gnum visible */
9706 9705
9707 9706 SFMMU_XCALL_STATS(NULL);
9708 9707
9709 9708 /* xcall to others on the same MMU to invalidate ctx */
9710 9709 cpuset = mmu_ctxp->mmu_cpuset;
9711 9710 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9712 9711 CPUSET_DEL(cpuset, CPU->cpu_id);
9713 9712 CPUSET_AND(cpuset, cpu_ready_set);
9714 9713
9715 9714 /*
9716 9715 * Pass in INVALID_CONTEXT as the first parameter to
9717 9716 * sfmmu_raise_tsb_exception, which invalidates the context
9718 9717 * of any process running on the CPUs in the MMU.
9719 9718 */
9720 9719 xt_some(cpuset, sfmmu_raise_tsb_exception,
9721 9720 INVALID_CONTEXT, INVALID_CONTEXT);
9722 9721 xt_sync(cpuset);
9723 9722
9724 9723 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9725 9724 }
9726 9725
9727 9726 if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9728 9727 sfmmu_setctx_sec(INVALID_CONTEXT);
9729 9728 sfmmu_clear_utsbinfo();
9730 9729 }
9731 9730
9732 9731 /*
9733 9732 * No xcall is needed here. For sun4u systems all CPUs in context
9734 9733 * domain share a single physical MMU therefore it's enough to flush
9735 9734 * TLB on local CPU. On sun4v systems we use 1 global context
9736 9735 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9737 9736 * handler. Note that vtag_flushall_uctxs() is called
9738 9737 * for Ultra II machine, where the equivalent flushall functionality
9739 9738 * is implemented in SW, and only user ctx TLB entries are flushed.
9740 9739 */
9741 9740 if (&vtag_flushall_uctxs != NULL) {
9742 9741 vtag_flushall_uctxs();
9743 9742 } else {
9744 9743 vtag_flushall();
9745 9744 }
9746 9745
9747 9746 /* reset mmu cnum, skips cnum 0 and 1 */
9748 9747 if (reset_cnum == B_TRUE)
9749 9748 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9750 9749
9751 9750 done:
9752 9751 mutex_exit(&mmu_ctxp->mmu_lock);
9753 9752 }
9754 9753
9755 9754
9756 9755 /*
9757 9756 * For multi-threaded process, set the process context to INVALID_CONTEXT
9758 9757 * so that it faults and reloads the MMU state from TL=0. For single-threaded
9759 9758 * process, we can just load the MMU state directly without having to
9760 9759 * set context invalid. Caller must hold the hat lock since we don't
9761 9760 * acquire it here.
9762 9761 */
9763 9762 static void
9764 9763 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9765 9764 {
9766 9765 uint_t cnum;
9767 9766 uint_t pstate_save;
9768 9767
9769 9768 ASSERT(sfmmup != ksfmmup);
9770 9769 ASSERT(sfmmu_hat_lock_held(sfmmup));
9771 9770
9772 9771 kpreempt_disable();
9773 9772
9774 9773 /*
9775 9774 * We check whether the pass'ed-in sfmmup is the same as the
9776 9775 * current running proc. This is to makes sure the current proc
9777 9776 * stays single-threaded if it already is.
9778 9777 */
9779 9778 if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9780 9779 (curthread->t_procp->p_lwpcnt == 1)) {
9781 9780 /* single-thread */
9782 9781 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9783 9782 if (cnum != INVALID_CONTEXT) {
9784 9783 uint_t curcnum;
9785 9784 /*
9786 9785 * Disable interrupts to prevent race condition
9787 9786 * with sfmmu_ctx_wrap_around ctx invalidation.
9788 9787 * In sun4v, ctx invalidation involves setting
9789 9788 * TSB to NULL, hence, interrupts should be disabled
9790 9789 * untill after sfmmu_load_mmustate is completed.
9791 9790 */
9792 9791 pstate_save = sfmmu_disable_intrs();
9793 9792 curcnum = sfmmu_getctx_sec();
9794 9793 if (curcnum == cnum)
9795 9794 sfmmu_load_mmustate(sfmmup);
9796 9795 sfmmu_enable_intrs(pstate_save);
9797 9796 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9798 9797 }
9799 9798 } else {
9800 9799 /*
9801 9800 * multi-thread
9802 9801 * or when sfmmup is not the same as the curproc.
9803 9802 */
9804 9803 sfmmu_invalidate_ctx(sfmmup);
9805 9804 }
9806 9805
9807 9806 kpreempt_enable();
9808 9807 }
9809 9808
9810 9809
9811 9810 /*
9812 9811 * Replace the specified TSB with a new TSB. This function gets called when
9813 9812 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the
9814 9813 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9815 9814 * (8K).
9816 9815 *
9817 9816 * Caller must hold the HAT lock, but should assume any tsb_info
9818 9817 * pointers it has are no longer valid after calling this function.
9819 9818 *
9820 9819 * Return values:
9821 9820 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
9822 9821 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
9823 9822 * something to this tsbinfo/TSB
9824 9823 * TSB_SUCCESS Operation succeeded
9825 9824 */
9826 9825 static tsb_replace_rc_t
9827 9826 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9828 9827 hatlock_t *hatlockp, uint_t flags)
9829 9828 {
9830 9829 struct tsb_info *new_tsbinfo = NULL;
9831 9830 struct tsb_info *curtsb, *prevtsb;
9832 9831 uint_t tte_sz_mask;
9833 9832 int i;
9834 9833
9835 9834 ASSERT(sfmmup != ksfmmup);
9836 9835 ASSERT(sfmmup->sfmmu_ismhat == 0);
9837 9836 ASSERT(sfmmu_hat_lock_held(sfmmup));
9838 9837 ASSERT(szc <= tsb_max_growsize);
9839 9838
9840 9839 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
9841 9840 return (TSB_LOSTRACE);
9842 9841
9843 9842 /*
9844 9843 * Find the tsb_info ahead of this one in the list, and
9845 9844 * also make sure that the tsb_info passed in really
9846 9845 * exists!
9847 9846 */
9848 9847 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9849 9848 curtsb != old_tsbinfo && curtsb != NULL;
9850 9849 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9851 9850 ;
9852 9851 ASSERT(curtsb != NULL);
9853 9852
9854 9853 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9855 9854 /*
9856 9855 * The process is swapped out, so just set the new size
9857 9856 * code. When it swaps back in, we'll allocate a new one
9858 9857 * of the new chosen size.
9859 9858 */
9860 9859 curtsb->tsb_szc = szc;
9861 9860 return (TSB_SUCCESS);
9862 9861 }
9863 9862 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
9864 9863
9865 9864 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
9866 9865
9867 9866 /*
9868 9867 * All initialization is done inside of sfmmu_tsbinfo_alloc().
9869 9868 * If we fail to allocate a TSB, exit.
9870 9869 *
9871 9870 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
9872 9871 * then try 4M slab after the initial alloc fails.
9873 9872 *
9874 9873 * If tsb swapin with tsb size > 4M, then try 4M after the
9875 9874 * initial alloc fails.
9876 9875 */
9877 9876 sfmmu_hat_exit(hatlockp);
9878 9877 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
9879 9878 tte_sz_mask, flags, sfmmup) &&
9880 9879 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
9881 9880 (!(flags & TSB_SWAPIN) &&
9882 9881 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
9883 9882 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
9884 9883 tte_sz_mask, flags, sfmmup))) {
9885 9884 (void) sfmmu_hat_enter(sfmmup);
9886 9885 if (!(flags & TSB_SWAPIN))
9887 9886 SFMMU_STAT(sf_tsb_resize_failures);
9888 9887 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9889 9888 return (TSB_ALLOCFAIL);
9890 9889 }
9891 9890 (void) sfmmu_hat_enter(sfmmup);
9892 9891
9893 9892 /*
9894 9893 * Re-check to make sure somebody else didn't muck with us while we
9895 9894 * didn't hold the HAT lock. If the process swapped out, fine, just
9896 9895 * exit; this can happen if we try to shrink the TSB from the context
9897 9896 * of another process (such as on an ISM unmap), though it is rare.
9898 9897 */
9899 9898 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9900 9899 SFMMU_STAT(sf_tsb_resize_failures);
9901 9900 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9902 9901 sfmmu_hat_exit(hatlockp);
9903 9902 sfmmu_tsbinfo_free(new_tsbinfo);
9904 9903 (void) sfmmu_hat_enter(sfmmup);
9905 9904 return (TSB_LOSTRACE);
9906 9905 }
9907 9906
9908 9907 #ifdef DEBUG
9909 9908 /* Reverify that the tsb_info still exists.. for debugging only */
9910 9909 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9911 9910 curtsb != old_tsbinfo && curtsb != NULL;
9912 9911 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9913 9912 ;
9914 9913 ASSERT(curtsb != NULL);
9915 9914 #endif /* DEBUG */
9916 9915
9917 9916 /*
9918 9917 * Quiesce any CPUs running this process on their next TLB miss
9919 9918 * so they atomically see the new tsb_info. We temporarily set the
9920 9919 * context to invalid context so new threads that come on processor
9921 9920 * after we do the xcall to cpusran will also serialize behind the
9922 9921 * HAT lock on TLB miss and will see the new TSB. Since this short
9923 9922 * race with a new thread coming on processor is relatively rare,
9924 9923 * this synchronization mechanism should be cheaper than always
9925 9924 * pausing all CPUs for the duration of the setup, which is what
9926 9925 * the old implementation did. This is particuarly true if we are
9927 9926 * copying a huge chunk of memory around during that window.
9928 9927 *
9929 9928 * The memory barriers are to make sure things stay consistent
9930 9929 * with resume() since it does not hold the HAT lock while
9931 9930 * walking the list of tsb_info structures.
9932 9931 */
9933 9932 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9934 9933 /* The TSB is either growing or shrinking. */
9935 9934 sfmmu_invalidate_ctx(sfmmup);
9936 9935 } else {
9937 9936 /*
9938 9937 * It is illegal to swap in TSBs from a process other
9939 9938 * than a process being swapped in. This in turn
9940 9939 * implies we do not have a valid MMU context here
9941 9940 * since a process needs one to resolve translation
9942 9941 * misses.
9943 9942 */
9944 9943 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
9945 9944 }
9946 9945
9947 9946 #ifdef DEBUG
9948 9947 ASSERT(max_mmu_ctxdoms > 0);
9949 9948
9950 9949 /*
9951 9950 * Process should have INVALID_CONTEXT on all MMUs
9952 9951 */
9953 9952 for (i = 0; i < max_mmu_ctxdoms; i++) {
9954 9953
9955 9954 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
9956 9955 }
9957 9956 #endif
9958 9957
9959 9958 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
9960 9959 membar_stst(); /* strict ordering required */
9961 9960 if (prevtsb)
9962 9961 prevtsb->tsb_next = new_tsbinfo;
9963 9962 else
9964 9963 sfmmup->sfmmu_tsb = new_tsbinfo;
9965 9964 membar_enter(); /* make sure new TSB globally visible */
9966 9965
9967 9966 /*
9968 9967 * We need to migrate TSB entries from the old TSB to the new TSB
9969 9968 * if tsb_remap_ttes is set and the TSB is growing.
9970 9969 */
9971 9970 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9972 9971 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9973 9972
9974 9973 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9975 9974
9976 9975 /*
9977 9976 * Drop the HAT lock to free our old tsb_info.
9978 9977 */
9979 9978 sfmmu_hat_exit(hatlockp);
9980 9979
9981 9980 if ((flags & TSB_GROW) == TSB_GROW) {
9982 9981 SFMMU_STAT(sf_tsb_grow);
9983 9982 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9984 9983 SFMMU_STAT(sf_tsb_shrink);
9985 9984 }
9986 9985
9987 9986 sfmmu_tsbinfo_free(old_tsbinfo);
9988 9987
9989 9988 (void) sfmmu_hat_enter(sfmmup);
9990 9989 return (TSB_SUCCESS);
9991 9990 }
9992 9991
9993 9992 /*
9994 9993 * This function will re-program hat pgsz array, and invalidate the
9995 9994 * process' context, forcing the process to switch to another
9996 9995 * context on the next TLB miss, and therefore start using the
9997 9996 * TLB that is reprogrammed for the new page sizes.
9998 9997 */
9999 9998 void
10000 9999 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10001 10000 {
10002 10001 int i;
10003 10002 hatlock_t *hatlockp = NULL;
10004 10003
10005 10004 hatlockp = sfmmu_hat_enter(sfmmup);
10006 10005 /* USIII+-IV+ optimization, requires hat lock */
10007 10006 if (tmp_pgsz) {
10008 10007 for (i = 0; i < mmu_page_sizes; i++)
10009 10008 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10010 10009 }
10011 10010 SFMMU_STAT(sf_tlb_reprog_pgsz);
10012 10011
10013 10012 sfmmu_invalidate_ctx(sfmmup);
10014 10013
10015 10014 sfmmu_hat_exit(hatlockp);
10016 10015 }
10017 10016
10018 10017 /*
10019 10018 * The scd_rttecnt field in the SCD must be updated to take account of the
10020 10019 * regions which it contains.
10021 10020 */
10022 10021 static void
10023 10022 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10024 10023 {
10025 10024 uint_t rid;
10026 10025 uint_t i, j;
10027 10026 ulong_t w;
10028 10027 sf_region_t *rgnp;
10029 10028
10030 10029 ASSERT(srdp != NULL);
10031 10030
10032 10031 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10033 10032 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10034 10033 continue;
10035 10034 }
10036 10035
10037 10036 j = 0;
10038 10037 while (w) {
10039 10038 if (!(w & 0x1)) {
10040 10039 j++;
10041 10040 w >>= 1;
10042 10041 continue;
10043 10042 }
10044 10043 rid = (i << BT_ULSHIFT) | j;
10045 10044 j++;
10046 10045 w >>= 1;
10047 10046
10048 10047 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10049 10048 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10050 10049 rgnp = srdp->srd_hmergnp[rid];
10051 10050 ASSERT(rgnp->rgn_refcnt > 0);
10052 10051 ASSERT(rgnp->rgn_id == rid);
10053 10052
10054 10053 scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10055 10054 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10056 10055
10057 10056 /*
10058 10057 * Maintain the tsb0 inflation cnt for the regions
10059 10058 * in the SCD.
10060 10059 */
10061 10060 if (rgnp->rgn_pgszc >= TTE4M) {
10062 10061 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10063 10062 rgnp->rgn_size >>
10064 10063 (TTE_PAGE_SHIFT(TTE8K) + 2);
10065 10064 }
10066 10065 }
10067 10066 }
10068 10067 }
10069 10068
10070 10069 /*
10071 10070 * This function assumes that there are either four or six supported page
10072 10071 * sizes and at most two programmable TLBs, so we need to decide which
10073 10072 * page sizes are most important and then tell the MMU layer so it
10074 10073 * can adjust the TLB page sizes accordingly (if supported).
10075 10074 *
10076 10075 * If these assumptions change, this function will need to be
10077 10076 * updated to support whatever the new limits are.
10078 10077 *
10079 10078 * The growing flag is nonzero if we are growing the address space,
10080 10079 * and zero if it is shrinking. This allows us to decide whether
10081 10080 * to grow or shrink our TSB, depending upon available memory
10082 10081 * conditions.
10083 10082 */
10084 10083 static void
10085 10084 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10086 10085 {
10087 10086 uint64_t ttecnt[MMU_PAGE_SIZES];
10088 10087 uint64_t tte8k_cnt, tte4m_cnt;
10089 10088 uint8_t i;
10090 10089 int sectsb_thresh;
10091 10090
10092 10091 /*
10093 10092 * Kernel threads, processes with small address spaces not using
10094 10093 * large pages, and dummy ISM HATs need not apply.
10095 10094 */
10096 10095 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10097 10096 return;
10098 10097
10099 10098 if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10100 10099 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10101 10100 return;
10102 10101
10103 10102 for (i = 0; i < mmu_page_sizes; i++) {
10104 10103 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10105 10104 sfmmup->sfmmu_ismttecnt[i];
10106 10105 }
10107 10106
10108 10107 /* Check pagesizes in use, and possibly reprogram DTLB. */
10109 10108 if (&mmu_check_page_sizes)
10110 10109 mmu_check_page_sizes(sfmmup, ttecnt);
10111 10110
10112 10111 /*
10113 10112 * Calculate the number of 8k ttes to represent the span of these
10114 10113 * pages.
10115 10114 */
10116 10115 tte8k_cnt = ttecnt[TTE8K] +
10117 10116 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10118 10117 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10119 10118 if (mmu_page_sizes == max_mmu_page_sizes) {
10120 10119 tte4m_cnt = ttecnt[TTE4M] +
10121 10120 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10122 10121 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10123 10122 } else {
10124 10123 tte4m_cnt = ttecnt[TTE4M];
10125 10124 }
10126 10125
10127 10126 /*
10128 10127 * Inflate tte8k_cnt to allow for region large page allocation failure.
10129 10128 */
10130 10129 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10131 10130
10132 10131 /*
10133 10132 * Inflate TSB sizes by a factor of 2 if this process
10134 10133 * uses 4M text pages to minimize extra conflict misses
10135 10134 * in the first TSB since without counting text pages
10136 10135 * 8K TSB may become too small.
10137 10136 *
10138 10137 * Also double the size of the second TSB to minimize
10139 10138 * extra conflict misses due to competition between 4M text pages
10140 10139 * and data pages.
10141 10140 *
10142 10141 * We need to adjust the second TSB allocation threshold by the
10143 10142 * inflation factor, since there is no point in creating a second
10144 10143 * TSB when we know all the mappings can fit in the I/D TLBs.
10145 10144 */
10146 10145 sectsb_thresh = tsb_sectsb_threshold;
10147 10146 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10148 10147 tte8k_cnt <<= 1;
10149 10148 tte4m_cnt <<= 1;
10150 10149 sectsb_thresh <<= 1;
10151 10150 }
10152 10151
10153 10152 /*
10154 10153 * Check to see if our TSB is the right size; we may need to
10155 10154 * grow or shrink it. If the process is small, our work is
10156 10155 * finished at this point.
10157 10156 */
10158 10157 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10159 10158 return;
10160 10159 }
10161 10160 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10162 10161 }
10163 10162
10164 10163 static void
10165 10164 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10166 10165 uint64_t tte4m_cnt, int sectsb_thresh)
10167 10166 {
10168 10167 int tsb_bits;
10169 10168 uint_t tsb_szc;
10170 10169 struct tsb_info *tsbinfop;
10171 10170 hatlock_t *hatlockp = NULL;
10172 10171
10173 10172 hatlockp = sfmmu_hat_enter(sfmmup);
10174 10173 ASSERT(hatlockp != NULL);
10175 10174 tsbinfop = sfmmup->sfmmu_tsb;
10176 10175 ASSERT(tsbinfop != NULL);
10177 10176
10178 10177 /*
10179 10178 * If we're growing, select the size based on RSS. If we're
10180 10179 * shrinking, leave some room so we don't have to turn around and
10181 10180 * grow again immediately.
10182 10181 */
10183 10182 if (growing)
10184 10183 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10185 10184 else
10186 10185 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10187 10186
10188 10187 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10189 10188 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10190 10189 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10191 10190 hatlockp, TSB_SHRINK);
10192 10191 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10193 10192 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10194 10193 hatlockp, TSB_GROW);
10195 10194 }
10196 10195 tsbinfop = sfmmup->sfmmu_tsb;
10197 10196
10198 10197 /*
10199 10198 * With the TLB and first TSB out of the way, we need to see if
10200 10199 * we need a second TSB for 4M pages. If we managed to reprogram
10201 10200 * the TLB page sizes above, the process will start using this new
10202 10201 * TSB right away; otherwise, it will start using it on the next
10203 10202 * context switch. Either way, it's no big deal so there's no
10204 10203 * synchronization with the trap handlers here unless we grow the
10205 10204 * TSB (in which case it's required to prevent using the old one
10206 10205 * after it's freed). Note: second tsb is required for 32M/256M
10207 10206 * page sizes.
10208 10207 */
10209 10208 if (tte4m_cnt > sectsb_thresh) {
10210 10209 /*
10211 10210 * If we're growing, select the size based on RSS. If we're
10212 10211 * shrinking, leave some room so we don't have to turn
10213 10212 * around and grow again immediately.
10214 10213 */
10215 10214 if (growing)
10216 10215 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10217 10216 else
10218 10217 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10219 10218 if (tsbinfop->tsb_next == NULL) {
10220 10219 struct tsb_info *newtsb;
10221 10220 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10222 10221 0 : TSB_ALLOC;
10223 10222
10224 10223 sfmmu_hat_exit(hatlockp);
10225 10224
10226 10225 /*
10227 10226 * Try to allocate a TSB for 4[32|256]M pages. If we
10228 10227 * can't get the size we want, retry w/a minimum sized
10229 10228 * TSB. If that still didn't work, give up; we can
10230 10229 * still run without one.
10231 10230 */
10232 10231 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10233 10232 TSB4M|TSB32M|TSB256M:TSB4M;
10234 10233 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10235 10234 allocflags, sfmmup)) &&
10236 10235 (tsb_szc <= TSB_4M_SZCODE ||
10237 10236 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10238 10237 tsb_bits, allocflags, sfmmup)) &&
10239 10238 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10240 10239 tsb_bits, allocflags, sfmmup)) {
10241 10240 return;
10242 10241 }
10243 10242
10244 10243 hatlockp = sfmmu_hat_enter(sfmmup);
10245 10244
10246 10245 sfmmu_invalidate_ctx(sfmmup);
10247 10246
10248 10247 if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10249 10248 sfmmup->sfmmu_tsb->tsb_next = newtsb;
10250 10249 SFMMU_STAT(sf_tsb_sectsb_create);
10251 10250 sfmmu_hat_exit(hatlockp);
10252 10251 return;
10253 10252 } else {
10254 10253 /*
10255 10254 * It's annoying, but possible for us
10256 10255 * to get here.. we dropped the HAT lock
10257 10256 * because of locking order in the kmem
10258 10257 * allocator, and while we were off getting
10259 10258 * our memory, some other thread decided to
10260 10259 * do us a favor and won the race to get a
10261 10260 * second TSB for this process. Sigh.
10262 10261 */
10263 10262 sfmmu_hat_exit(hatlockp);
10264 10263 sfmmu_tsbinfo_free(newtsb);
10265 10264 return;
10266 10265 }
10267 10266 }
10268 10267
10269 10268 /*
10270 10269 * We have a second TSB, see if it's big enough.
10271 10270 */
10272 10271 tsbinfop = tsbinfop->tsb_next;
10273 10272
10274 10273 /*
10275 10274 * Check to see if our second TSB is the right size;
10276 10275 * we may need to grow or shrink it.
10277 10276 * To prevent thrashing (e.g. growing the TSB on a
10278 10277 * subsequent map operation), only try to shrink if
10279 10278 * the TSB reach exceeds twice the virtual address
10280 10279 * space size.
10281 10280 */
10282 10281 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10283 10282 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10284 10283 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10285 10284 tsb_szc, hatlockp, TSB_SHRINK);
10286 10285 } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10287 10286 TSB_OK_GROW()) {
10288 10287 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10289 10288 tsb_szc, hatlockp, TSB_GROW);
10290 10289 }
10291 10290 }
10292 10291
10293 10292 sfmmu_hat_exit(hatlockp);
10294 10293 }
10295 10294
10296 10295 /*
10297 10296 * Free up a sfmmu
10298 10297 * Since the sfmmu is currently embedded in the hat struct we simply zero
10299 10298 * out our fields and free up the ism map blk list if any.
10300 10299 */
10301 10300 static void
10302 10301 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10303 10302 {
10304 10303 ism_blk_t *blkp, *nx_blkp;
10305 10304 #ifdef DEBUG
10306 10305 ism_map_t *map;
10307 10306 int i;
10308 10307 #endif
10309 10308
10310 10309 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10311 10310 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10312 10311 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10313 10312 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10314 10313 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10315 10314 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10316 10315 ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10317 10316
10318 10317 sfmmup->sfmmu_free = 0;
10319 10318 sfmmup->sfmmu_ismhat = 0;
10320 10319
10321 10320 blkp = sfmmup->sfmmu_iblk;
10322 10321 sfmmup->sfmmu_iblk = NULL;
10323 10322
10324 10323 while (blkp) {
10325 10324 #ifdef DEBUG
10326 10325 map = blkp->iblk_maps;
10327 10326 for (i = 0; i < ISM_MAP_SLOTS; i++) {
10328 10327 ASSERT(map[i].imap_seg == 0);
10329 10328 ASSERT(map[i].imap_ismhat == NULL);
10330 10329 ASSERT(map[i].imap_ment == NULL);
10331 10330 }
10332 10331 #endif
10333 10332 nx_blkp = blkp->iblk_next;
10334 10333 blkp->iblk_next = NULL;
10335 10334 blkp->iblk_nextpa = (uint64_t)-1;
10336 10335 kmem_cache_free(ism_blk_cache, blkp);
10337 10336 blkp = nx_blkp;
10338 10337 }
10339 10338 }
10340 10339
10341 10340 /*
10342 10341 * Locking primitves accessed by HATLOCK macros
10343 10342 */
10344 10343
10345 10344 #define SFMMU_SPL_MTX (0x0)
10346 10345 #define SFMMU_ML_MTX (0x1)
10347 10346
10348 10347 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \
10349 10348 SPL_HASH(pg) : MLIST_HASH(pg))
10350 10349
10351 10350 kmutex_t *
10352 10351 sfmmu_page_enter(struct page *pp)
10353 10352 {
10354 10353 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10355 10354 }
10356 10355
10357 10356 void
10358 10357 sfmmu_page_exit(kmutex_t *spl)
10359 10358 {
10360 10359 mutex_exit(spl);
10361 10360 }
10362 10361
10363 10362 int
10364 10363 sfmmu_page_spl_held(struct page *pp)
10365 10364 {
10366 10365 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10367 10366 }
10368 10367
10369 10368 kmutex_t *
10370 10369 sfmmu_mlist_enter(struct page *pp)
10371 10370 {
10372 10371 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10373 10372 }
10374 10373
10375 10374 void
10376 10375 sfmmu_mlist_exit(kmutex_t *mml)
10377 10376 {
10378 10377 mutex_exit(mml);
10379 10378 }
10380 10379
10381 10380 int
10382 10381 sfmmu_mlist_held(struct page *pp)
10383 10382 {
10384 10383
10385 10384 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10386 10385 }
10387 10386
10388 10387 /*
10389 10388 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For
10390 10389 * sfmmu_mlist_enter() case mml_table lock array is used and for
10391 10390 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10392 10391 *
10393 10392 * The lock is taken on a root page so that it protects an operation on all
10394 10393 * constituent pages of a large page pp belongs to.
10395 10394 *
10396 10395 * The routine takes a lock from the appropriate array. The lock is determined
10397 10396 * by hashing the root page. After taking the lock this routine checks if the
10398 10397 * root page has the same size code that was used to determine the root (i.e
10399 10398 * that root hasn't changed). If root page has the expected p_szc field we
10400 10399 * have the right lock and it's returned to the caller. If root's p_szc
10401 10400 * decreased we release the lock and retry from the beginning. This case can
10402 10401 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10403 10402 * value and taking the lock. The number of retries due to p_szc decrease is
10404 10403 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10405 10404 * determined by hashing pp itself.
10406 10405 *
10407 10406 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10408 10407 * possible that p_szc can increase. To increase p_szc a thread has to lock
10409 10408 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10410 10409 * callers that don't hold a page locked recheck if hmeblk through which pp
10411 10410 * was found still maps this pp. If it doesn't map it anymore returned lock
10412 10411 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10413 10412 * p_szc increase after taking the lock it returns this lock without further
10414 10413 * retries because in this case the caller doesn't care about which lock was
10415 10414 * taken. The caller will drop it right away.
10416 10415 *
10417 10416 * After the routine returns it's guaranteed that hat_page_demote() can't
10418 10417 * change p_szc field of any of constituent pages of a large page pp belongs
10419 10418 * to as long as pp was either locked at least SHARED prior to this call or
10420 10419 * the caller finds that hment that pointed to this pp still references this
10421 10420 * pp (this also assumes that the caller holds hme hash bucket lock so that
10422 10421 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10423 10422 * hat_pageunload()).
10424 10423 */
10425 10424 static kmutex_t *
10426 10425 sfmmu_mlspl_enter(struct page *pp, int type)
10427 10426 {
10428 10427 kmutex_t *mtx;
10429 10428 uint_t prev_rszc = UINT_MAX;
10430 10429 page_t *rootpp;
10431 10430 uint_t szc;
10432 10431 uint_t rszc;
10433 10432 uint_t pszc = pp->p_szc;
10434 10433
10435 10434 ASSERT(pp != NULL);
10436 10435
10437 10436 again:
10438 10437 if (pszc == 0) {
10439 10438 mtx = SFMMU_MLSPL_MTX(type, pp);
10440 10439 mutex_enter(mtx);
10441 10440 return (mtx);
10442 10441 }
10443 10442
10444 10443 /* The lock lives in the root page */
10445 10444 rootpp = PP_GROUPLEADER(pp, pszc);
10446 10445 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10447 10446 mutex_enter(mtx);
10448 10447
10449 10448 /*
10450 10449 * Return mml in the following 3 cases:
10451 10450 *
10452 10451 * 1) If pp itself is root since if its p_szc decreased before we took
10453 10452 * the lock pp is still the root of smaller szc page. And if its p_szc
10454 10453 * increased it doesn't matter what lock we return (see comment in
10455 10454 * front of this routine).
10456 10455 *
10457 10456 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10458 10457 * large page we have the right lock since any previous potential
10459 10458 * hat_page_demote() is done demoting from greater than current root's
10460 10459 * p_szc because hat_page_demote() changes root's p_szc last. No
10461 10460 * further hat_page_demote() can start or be in progress since it
10462 10461 * would need the same lock we currently hold.
10463 10462 *
10464 10463 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10465 10464 * matter what lock we return (see comment in front of this routine).
10466 10465 */
10467 10466 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10468 10467 rszc >= prev_rszc) {
10469 10468 return (mtx);
10470 10469 }
10471 10470
10472 10471 /*
10473 10472 * hat_page_demote() could have decreased root's p_szc.
10474 10473 * In this case pp's p_szc must also be smaller than pszc.
10475 10474 * Retry.
10476 10475 */
10477 10476 if (rszc < pszc) {
10478 10477 szc = pp->p_szc;
10479 10478 if (szc < pszc) {
10480 10479 mutex_exit(mtx);
10481 10480 pszc = szc;
10482 10481 goto again;
10483 10482 }
10484 10483 /*
10485 10484 * pp's p_szc increased after it was decreased.
10486 10485 * page cannot be mapped. Return current lock. The caller
10487 10486 * will drop it right away.
10488 10487 */
10489 10488 return (mtx);
10490 10489 }
10491 10490
10492 10491 /*
10493 10492 * root's p_szc is greater than pp's p_szc.
10494 10493 * hat_page_demote() is not done with all pages
10495 10494 * yet. Wait for it to complete.
10496 10495 */
10497 10496 mutex_exit(mtx);
10498 10497 rootpp = PP_GROUPLEADER(rootpp, rszc);
10499 10498 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10500 10499 mutex_enter(mtx);
10501 10500 mutex_exit(mtx);
10502 10501 prev_rszc = rszc;
10503 10502 goto again;
10504 10503 }
10505 10504
10506 10505 static int
10507 10506 sfmmu_mlspl_held(struct page *pp, int type)
10508 10507 {
10509 10508 kmutex_t *mtx;
10510 10509
10511 10510 ASSERT(pp != NULL);
10512 10511 /* The lock lives in the root page */
10513 10512 pp = PP_PAGEROOT(pp);
10514 10513 ASSERT(pp != NULL);
10515 10514
10516 10515 mtx = SFMMU_MLSPL_MTX(type, pp);
10517 10516 return (MUTEX_HELD(mtx));
10518 10517 }
10519 10518
10520 10519 static uint_t
10521 10520 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10522 10521 {
10523 10522 struct hme_blk *hblkp;
10524 10523
10525 10524
10526 10525 if (freehblkp != NULL) {
10527 10526 mutex_enter(&freehblkp_lock);
10528 10527 if (freehblkp != NULL) {
10529 10528 /*
10530 10529 * If the current thread is owning hblk_reserve OR
10531 10530 * critical request from sfmmu_hblk_steal()
10532 10531 * let it succeed even if freehblkcnt is really low.
10533 10532 */
10534 10533 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10535 10534 SFMMU_STAT(sf_get_free_throttle);
10536 10535 mutex_exit(&freehblkp_lock);
10537 10536 return (0);
10538 10537 }
10539 10538 freehblkcnt--;
10540 10539 *hmeblkpp = freehblkp;
10541 10540 hblkp = *hmeblkpp;
10542 10541 freehblkp = hblkp->hblk_next;
10543 10542 mutex_exit(&freehblkp_lock);
10544 10543 hblkp->hblk_next = NULL;
10545 10544 SFMMU_STAT(sf_get_free_success);
10546 10545
10547 10546 ASSERT(hblkp->hblk_hmecnt == 0);
10548 10547 ASSERT(hblkp->hblk_vcnt == 0);
10549 10548 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10550 10549
10551 10550 return (1);
10552 10551 }
10553 10552 mutex_exit(&freehblkp_lock);
10554 10553 }
10555 10554
10556 10555 /* Check cpu hblk pending queues */
10557 10556 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10558 10557 hblkp = *hmeblkpp;
10559 10558 hblkp->hblk_next = NULL;
10560 10559 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10561 10560
10562 10561 ASSERT(hblkp->hblk_hmecnt == 0);
10563 10562 ASSERT(hblkp->hblk_vcnt == 0);
10564 10563
10565 10564 return (1);
10566 10565 }
10567 10566
10568 10567 SFMMU_STAT(sf_get_free_fail);
10569 10568 return (0);
10570 10569 }
10571 10570
10572 10571 static uint_t
10573 10572 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10574 10573 {
10575 10574 struct hme_blk *hblkp;
10576 10575
10577 10576 ASSERT(hmeblkp->hblk_hmecnt == 0);
10578 10577 ASSERT(hmeblkp->hblk_vcnt == 0);
10579 10578 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10580 10579
10581 10580 /*
10582 10581 * If the current thread is mapping into kernel space,
10583 10582 * let it succede even if freehblkcnt is max
10584 10583 * so that it will avoid freeing it to kmem.
10585 10584 * This will prevent stack overflow due to
10586 10585 * possible recursion since kmem_cache_free()
10587 10586 * might require creation of a slab which
10588 10587 * in turn needs an hmeblk to map that slab;
10589 10588 * let's break this vicious chain at the first
10590 10589 * opportunity.
10591 10590 */
10592 10591 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10593 10592 mutex_enter(&freehblkp_lock);
10594 10593 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10595 10594 SFMMU_STAT(sf_put_free_success);
10596 10595 freehblkcnt++;
10597 10596 hmeblkp->hblk_next = freehblkp;
10598 10597 freehblkp = hmeblkp;
10599 10598 mutex_exit(&freehblkp_lock);
10600 10599 return (1);
10601 10600 }
10602 10601 mutex_exit(&freehblkp_lock);
10603 10602 }
10604 10603
10605 10604 /*
10606 10605 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10607 10606 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10608 10607 * we are not in the process of mapping into kernel space.
10609 10608 */
10610 10609 ASSERT(!critical);
10611 10610 while (freehblkcnt > HBLK_RESERVE_CNT) {
10612 10611 mutex_enter(&freehblkp_lock);
10613 10612 if (freehblkcnt > HBLK_RESERVE_CNT) {
10614 10613 freehblkcnt--;
10615 10614 hblkp = freehblkp;
10616 10615 freehblkp = hblkp->hblk_next;
10617 10616 mutex_exit(&freehblkp_lock);
10618 10617 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10619 10618 kmem_cache_free(sfmmu8_cache, hblkp);
10620 10619 continue;
10621 10620 }
10622 10621 mutex_exit(&freehblkp_lock);
10623 10622 }
10624 10623 SFMMU_STAT(sf_put_free_fail);
10625 10624 return (0);
10626 10625 }
10627 10626
10628 10627 static void
10629 10628 sfmmu_hblk_swap(struct hme_blk *new)
10630 10629 {
10631 10630 struct hme_blk *old, *hblkp, *prev;
10632 10631 uint64_t newpa;
10633 10632 caddr_t base, vaddr, endaddr;
10634 10633 struct hmehash_bucket *hmebp;
10635 10634 struct sf_hment *osfhme, *nsfhme;
10636 10635 page_t *pp;
10637 10636 kmutex_t *pml;
10638 10637 tte_t tte;
10639 10638 struct hme_blk *list = NULL;
10640 10639
10641 10640 #ifdef DEBUG
10642 10641 hmeblk_tag hblktag;
10643 10642 struct hme_blk *found;
10644 10643 #endif
10645 10644 old = HBLK_RESERVE;
10646 10645 ASSERT(!old->hblk_shared);
10647 10646
10648 10647 /*
10649 10648 * save pa before bcopy clobbers it
10650 10649 */
10651 10650 newpa = new->hblk_nextpa;
10652 10651
10653 10652 base = (caddr_t)get_hblk_base(old);
10654 10653 endaddr = base + get_hblk_span(old);
10655 10654
10656 10655 /*
10657 10656 * acquire hash bucket lock.
10658 10657 */
10659 10658 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10660 10659 SFMMU_INVALID_SHMERID);
10661 10660
10662 10661 /*
10663 10662 * copy contents from old to new
10664 10663 */
10665 10664 bcopy((void *)old, (void *)new, HME8BLK_SZ);
10666 10665
10667 10666 /*
10668 10667 * add new to hash chain
10669 10668 */
10670 10669 sfmmu_hblk_hash_add(hmebp, new, newpa);
10671 10670
10672 10671 /*
10673 10672 * search hash chain for hblk_reserve; this needs to be performed
10674 10673 * after adding new, otherwise prev won't correspond to the hblk which
10675 10674 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10676 10675 * remove old later.
10677 10676 */
10678 10677 for (prev = NULL,
10679 10678 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10680 10679 prev = hblkp, hblkp = hblkp->hblk_next)
10681 10680 ;
10682 10681
10683 10682 if (hblkp != old)
10684 10683 panic("sfmmu_hblk_swap: hblk_reserve not found");
10685 10684
10686 10685 /*
10687 10686 * p_mapping list is still pointing to hments in hblk_reserve;
10688 10687 * fix up p_mapping list so that they point to hments in new.
10689 10688 *
10690 10689 * Since all these mappings are created by hblk_reserve_thread
10691 10690 * on the way and it's using at least one of the buffers from each of
10692 10691 * the newly minted slabs, there is no danger of any of these
10693 10692 * mappings getting unloaded by another thread.
10694 10693 *
10695 10694 * tsbmiss could only modify ref/mod bits of hments in old/new.
10696 10695 * Since all of these hments hold mappings established by segkmem
10697 10696 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10698 10697 * have no meaning for the mappings in hblk_reserve. hments in
10699 10698 * old and new are identical except for ref/mod bits.
10700 10699 */
10701 10700 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10702 10701
10703 10702 HBLKTOHME(osfhme, old, vaddr);
10704 10703 sfmmu_copytte(&osfhme->hme_tte, &tte);
10705 10704
10706 10705 if (TTE_IS_VALID(&tte)) {
10707 10706 if ((pp = osfhme->hme_page) == NULL)
10708 10707 panic("sfmmu_hblk_swap: page not mapped");
10709 10708
10710 10709 pml = sfmmu_mlist_enter(pp);
10711 10710
10712 10711 if (pp != osfhme->hme_page)
10713 10712 panic("sfmmu_hblk_swap: mapping changed");
10714 10713
10715 10714 HBLKTOHME(nsfhme, new, vaddr);
10716 10715
10717 10716 HME_ADD(nsfhme, pp);
10718 10717 HME_SUB(osfhme, pp);
10719 10718
10720 10719 sfmmu_mlist_exit(pml);
10721 10720 }
10722 10721 }
10723 10722
10724 10723 /*
10725 10724 * remove old from hash chain
10726 10725 */
10727 10726 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10728 10727
10729 10728 #ifdef DEBUG
10730 10729
10731 10730 hblktag.htag_id = ksfmmup;
10732 10731 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10733 10732 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10734 10733 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10735 10734 HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10736 10735
10737 10736 if (found != new)
10738 10737 panic("sfmmu_hblk_swap: new hblk not found");
10739 10738 #endif
10740 10739
10741 10740 SFMMU_HASH_UNLOCK(hmebp);
10742 10741
10743 10742 /*
10744 10743 * Reset hblk_reserve
10745 10744 */
10746 10745 bzero((void *)old, HME8BLK_SZ);
10747 10746 old->hblk_nextpa = va_to_pa((caddr_t)old);
10748 10747 }
10749 10748
10750 10749 /*
10751 10750 * Grab the mlist mutex for both pages passed in.
10752 10751 *
10753 10752 * low and high will be returned as pointers to the mutexes for these pages.
10754 10753 * low refers to the mutex residing in the lower bin of the mlist hash, while
10755 10754 * high refers to the mutex residing in the higher bin of the mlist hash. This
10756 10755 * is due to the locking order restrictions on the same thread grabbing
10757 10756 * multiple mlist mutexes. The low lock must be acquired before the high lock.
10758 10757 *
10759 10758 * If both pages hash to the same mutex, only grab that single mutex, and
10760 10759 * high will be returned as NULL
10761 10760 * If the pages hash to different bins in the hash, grab the lower addressed
10762 10761 * lock first and then the higher addressed lock in order to follow the locking
10763 10762 * rules involved with the same thread grabbing multiple mlist mutexes.
10764 10763 * low and high will both have non-NULL values.
10765 10764 */
10766 10765 static void
10767 10766 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10768 10767 kmutex_t **low, kmutex_t **high)
10769 10768 {
10770 10769 kmutex_t *mml_targ, *mml_repl;
10771 10770
10772 10771 /*
10773 10772 * no need to do the dance around szc as in sfmmu_mlist_enter()
10774 10773 * because this routine is only called by hat_page_relocate() and all
10775 10774 * targ and repl pages are already locked EXCL so szc can't change.
10776 10775 */
10777 10776
10778 10777 mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10779 10778 mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10780 10779
10781 10780 if (mml_targ == mml_repl) {
10782 10781 *low = mml_targ;
10783 10782 *high = NULL;
10784 10783 } else {
10785 10784 if (mml_targ < mml_repl) {
10786 10785 *low = mml_targ;
10787 10786 *high = mml_repl;
10788 10787 } else {
10789 10788 *low = mml_repl;
10790 10789 *high = mml_targ;
10791 10790 }
10792 10791 }
10793 10792
10794 10793 mutex_enter(*low);
10795 10794 if (*high)
10796 10795 mutex_enter(*high);
10797 10796 }
10798 10797
10799 10798 static void
10800 10799 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10801 10800 {
10802 10801 if (high)
10803 10802 mutex_exit(high);
10804 10803 mutex_exit(low);
10805 10804 }
10806 10805
10807 10806 static hatlock_t *
10808 10807 sfmmu_hat_enter(sfmmu_t *sfmmup)
10809 10808 {
10810 10809 hatlock_t *hatlockp;
10811 10810
10812 10811 if (sfmmup != ksfmmup) {
10813 10812 hatlockp = TSB_HASH(sfmmup);
10814 10813 mutex_enter(HATLOCK_MUTEXP(hatlockp));
10815 10814 return (hatlockp);
10816 10815 }
10817 10816 return (NULL);
10818 10817 }
10819 10818
10820 10819 static hatlock_t *
10821 10820 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
10822 10821 {
10823 10822 hatlock_t *hatlockp;
10824 10823
10825 10824 if (sfmmup != ksfmmup) {
10826 10825 hatlockp = TSB_HASH(sfmmup);
10827 10826 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
10828 10827 return (NULL);
10829 10828 return (hatlockp);
10830 10829 }
10831 10830 return (NULL);
10832 10831 }
10833 10832
10834 10833 static void
10835 10834 sfmmu_hat_exit(hatlock_t *hatlockp)
10836 10835 {
10837 10836 if (hatlockp != NULL)
10838 10837 mutex_exit(HATLOCK_MUTEXP(hatlockp));
10839 10838 }
10840 10839
10841 10840 static void
10842 10841 sfmmu_hat_lock_all(void)
10843 10842 {
10844 10843 int i;
10845 10844 for (i = 0; i < SFMMU_NUM_LOCK; i++)
10846 10845 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
10847 10846 }
10848 10847
10849 10848 static void
10850 10849 sfmmu_hat_unlock_all(void)
10851 10850 {
10852 10851 int i;
10853 10852 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
10854 10853 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
10855 10854 }
10856 10855
10857 10856 int
10858 10857 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
10859 10858 {
10860 10859 ASSERT(sfmmup != ksfmmup);
10861 10860 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10862 10861 }
10863 10862
10864 10863 /*
10865 10864 * Locking primitives to provide consistency between ISM unmap
10866 10865 * and other operations. Since ISM unmap can take a long time, we
10867 10866 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10868 10867 * contention on the hatlock buckets while ISM segments are being
10869 10868 * unmapped. The tradeoff is that the flags don't prevent priority
10870 10869 * inversion from occurring, so we must request kernel priority in
10871 10870 * case we have to sleep to keep from getting buried while holding
10872 10871 * the HAT_ISMBUSY flag set, which in turn could block other kernel
10873 10872 * threads from running (for example, in sfmmu_uvatopfn()).
10874 10873 */
10875 10874 static void
10876 10875 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10877 10876 {
10878 10877 hatlock_t *hatlockp;
10879 10878
10880 10879 THREAD_KPRI_REQUEST();
10881 10880 if (!hatlock_held)
10882 10881 hatlockp = sfmmu_hat_enter(sfmmup);
10883 10882 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10884 10883 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10885 10884 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10886 10885 if (!hatlock_held)
10887 10886 sfmmu_hat_exit(hatlockp);
10888 10887 }
10889 10888
10890 10889 static void
10891 10890 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
10892 10891 {
10893 10892 hatlock_t *hatlockp;
10894 10893
10895 10894 if (!hatlock_held)
10896 10895 hatlockp = sfmmu_hat_enter(sfmmup);
10897 10896 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10898 10897 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10899 10898 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10900 10899 if (!hatlock_held)
10901 10900 sfmmu_hat_exit(hatlockp);
10902 10901 THREAD_KPRI_RELEASE();
10903 10902 }
10904 10903
10905 10904 /*
10906 10905 *
10907 10906 * Algorithm:
10908 10907 *
10909 10908 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10910 10909 * hblks.
10911 10910 *
10912 10911 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10913 10912 *
10914 10913 * (a) try to return an hblk from reserve pool of free hblks;
10915 10914 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
10916 10915 * and return hblk_reserve.
10917 10916 *
10918 10917 * (3) call kmem_cache_alloc() to allocate hblk;
10919 10918 *
10920 10919 * (a) if hblk_reserve_lock is held by the current thread,
10921 10920 * atomically replace hblk_reserve by the hblk that is
10922 10921 * returned by kmem_cache_alloc; release hblk_reserve_lock
10923 10922 * and call kmem_cache_alloc() again.
10924 10923 * (b) if reserve pool is not full, add the hblk that is
10925 10924 * returned by kmem_cache_alloc to reserve pool and
10926 10925 * call kmem_cache_alloc again.
10927 10926 *
10928 10927 */
10929 10928 static struct hme_blk *
10930 10929 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
10931 10930 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10932 10931 uint_t flags, uint_t rid)
10933 10932 {
10934 10933 struct hme_blk *hmeblkp = NULL;
10935 10934 struct hme_blk *newhblkp;
10936 10935 struct hme_blk *shw_hblkp = NULL;
10937 10936 struct kmem_cache *sfmmu_cache = NULL;
10938 10937 uint64_t hblkpa;
10939 10938 ulong_t index;
10940 10939 uint_t owner; /* set to 1 if using hblk_reserve */
10941 10940 uint_t forcefree;
10942 10941 int sleep;
10943 10942 sf_srd_t *srdp;
10944 10943 sf_region_t *rgnp;
10945 10944
10946 10945 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10947 10946 ASSERT(hblktag.htag_rid == rid);
10948 10947 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
10949 10948 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
10950 10949 IS_P2ALIGNED(vaddr, TTEBYTES(size)));
10951 10950
10952 10951 /*
10953 10952 * If segkmem is not created yet, allocate from static hmeblks
10954 10953 * created at the end of startup_modules(). See the block comment
10955 10954 * in startup_modules() describing how we estimate the number of
10956 10955 * static hmeblks that will be needed during re-map.
10957 10956 */
10958 10957 if (!hblk_alloc_dynamic) {
10959 10958
10960 10959 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
10961 10960
10962 10961 if (size == TTE8K) {
10963 10962 index = nucleus_hblk8.index;
10964 10963 if (index >= nucleus_hblk8.len) {
10965 10964 /*
10966 10965 * If we panic here, see startup_modules() to
10967 10966 * make sure that we are calculating the
10968 10967 * number of hblk8's that we need correctly.
10969 10968 */
10970 10969 prom_panic("no nucleus hblk8 to allocate");
10971 10970 }
10972 10971 hmeblkp =
10973 10972 (struct hme_blk *)&nucleus_hblk8.list[index];
10974 10973 nucleus_hblk8.index++;
10975 10974 SFMMU_STAT(sf_hblk8_nalloc);
10976 10975 } else {
10977 10976 index = nucleus_hblk1.index;
10978 10977 if (nucleus_hblk1.index >= nucleus_hblk1.len) {
10979 10978 /*
10980 10979 * If we panic here, see startup_modules().
10981 10980 * Most likely you need to update the
10982 10981 * calculation of the number of hblk1 elements
10983 10982 * that the kernel needs to boot.
10984 10983 */
10985 10984 prom_panic("no nucleus hblk1 to allocate");
10986 10985 }
10987 10986 hmeblkp =
10988 10987 (struct hme_blk *)&nucleus_hblk1.list[index];
10989 10988 nucleus_hblk1.index++;
10990 10989 SFMMU_STAT(sf_hblk1_nalloc);
10991 10990 }
10992 10991
10993 10992 goto hblk_init;
10994 10993 }
10995 10994
10996 10995 SFMMU_HASH_UNLOCK(hmebp);
10997 10996
10998 10997 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
10999 10998 if (mmu_page_sizes == max_mmu_page_sizes) {
11000 10999 if (size < TTE256M)
11001 11000 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11002 11001 size, flags);
11003 11002 } else {
11004 11003 if (size < TTE4M)
11005 11004 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11006 11005 size, flags);
11007 11006 }
11008 11007 } else if (SFMMU_IS_SHMERID_VALID(rid)) {
11009 11008 /*
11010 11009 * Shared hmes use per region bitmaps in rgn_hmeflag
11011 11010 * rather than shadow hmeblks to keep track of the
11012 11011 * mapping sizes which have been allocated for the region.
11013 11012 * Here we cleanup old invalid hmeblks with this rid,
11014 11013 * which may be left around by pageunload().
11015 11014 */
11016 11015 int ttesz;
11017 11016 caddr_t va;
11018 11017 caddr_t eva = vaddr + TTEBYTES(size);
11019 11018
11020 11019 ASSERT(sfmmup != KHATID);
11021 11020
11022 11021 srdp = sfmmup->sfmmu_srdp;
11023 11022 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11024 11023 rgnp = srdp->srd_hmergnp[rid];
11025 11024 ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11026 11025 ASSERT(rgnp->rgn_refcnt != 0);
11027 11026 ASSERT(size <= rgnp->rgn_pgszc);
11028 11027
11029 11028 ttesz = HBLK_MIN_TTESZ;
11030 11029 do {
11031 11030 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11032 11031 continue;
11033 11032 }
11034 11033
11035 11034 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11036 11035 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11037 11036 } else if (ttesz < size) {
11038 11037 for (va = vaddr; va < eva;
11039 11038 va += TTEBYTES(ttesz)) {
11040 11039 sfmmu_cleanup_rhblk(srdp, va, rid,
11041 11040 ttesz);
11042 11041 }
11043 11042 }
11044 11043 } while (++ttesz <= rgnp->rgn_pgszc);
11045 11044 }
11046 11045
11047 11046 fill_hblk:
11048 11047 owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11049 11048
11050 11049 if (owner && size == TTE8K) {
11051 11050
11052 11051 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11053 11052 /*
11054 11053 * We are really in a tight spot. We already own
11055 11054 * hblk_reserve and we need another hblk. In anticipation
11056 11055 * of this kind of scenario, we specifically set aside
11057 11056 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11058 11057 * by owner of hblk_reserve.
11059 11058 */
11060 11059 SFMMU_STAT(sf_hblk_recurse_cnt);
11061 11060
11062 11061 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11063 11062 panic("sfmmu_hblk_alloc: reserve list is empty");
11064 11063
11065 11064 goto hblk_verify;
11066 11065 }
11067 11066
11068 11067 ASSERT(!owner);
11069 11068
11070 11069 if ((flags & HAT_NO_KALLOC) == 0) {
11071 11070
11072 11071 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11073 11072 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11074 11073
11075 11074 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11076 11075 hmeblkp = sfmmu_hblk_steal(size);
11077 11076 } else {
11078 11077 /*
11079 11078 * if we are the owner of hblk_reserve,
11080 11079 * swap hblk_reserve with hmeblkp and
11081 11080 * start a fresh life. Hope things go
11082 11081 * better this time.
11083 11082 */
11084 11083 if (hblk_reserve_thread == curthread) {
11085 11084 ASSERT(sfmmu_cache == sfmmu8_cache);
11086 11085 sfmmu_hblk_swap(hmeblkp);
11087 11086 hblk_reserve_thread = NULL;
11088 11087 mutex_exit(&hblk_reserve_lock);
11089 11088 goto fill_hblk;
11090 11089 }
11091 11090 /*
11092 11091 * let's donate this hblk to our reserve list if
11093 11092 * we are not mapping kernel range
11094 11093 */
11095 11094 if (size == TTE8K && sfmmup != KHATID) {
11096 11095 if (sfmmu_put_free_hblk(hmeblkp, 0))
11097 11096 goto fill_hblk;
11098 11097 }
11099 11098 }
11100 11099 } else {
11101 11100 /*
11102 11101 * We are here to map the slab in sfmmu8_cache; let's
11103 11102 * check if we could tap our reserve list; if successful,
11104 11103 * this will avoid the pain of going thru sfmmu_hblk_swap
11105 11104 */
11106 11105 SFMMU_STAT(sf_hblk_slab_cnt);
11107 11106 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11108 11107 /*
11109 11108 * let's start hblk_reserve dance
11110 11109 */
11111 11110 SFMMU_STAT(sf_hblk_reserve_cnt);
11112 11111 owner = 1;
11113 11112 mutex_enter(&hblk_reserve_lock);
11114 11113 hmeblkp = HBLK_RESERVE;
11115 11114 hblk_reserve_thread = curthread;
11116 11115 }
11117 11116 }
11118 11117
11119 11118 hblk_verify:
11120 11119 ASSERT(hmeblkp != NULL);
11121 11120 set_hblk_sz(hmeblkp, size);
11122 11121 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11123 11122 SFMMU_HASH_LOCK(hmebp);
11124 11123 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11125 11124 if (newhblkp != NULL) {
11126 11125 SFMMU_HASH_UNLOCK(hmebp);
11127 11126 if (hmeblkp != HBLK_RESERVE) {
11128 11127 /*
11129 11128 * This is really tricky!
11130 11129 *
11131 11130 * vmem_alloc(vmem_seg_arena)
11132 11131 * vmem_alloc(vmem_internal_arena)
11133 11132 * segkmem_alloc(heap_arena)
11134 11133 * vmem_alloc(heap_arena)
11135 11134 * page_create()
11136 11135 * hat_memload()
11137 11136 * kmem_cache_free()
11138 11137 * kmem_cache_alloc()
11139 11138 * kmem_slab_create()
11140 11139 * vmem_alloc(kmem_internal_arena)
11141 11140 * segkmem_alloc(heap_arena)
11142 11141 * vmem_alloc(heap_arena)
11143 11142 * page_create()
11144 11143 * hat_memload()
11145 11144 * kmem_cache_free()
11146 11145 * ...
11147 11146 *
11148 11147 * Thus, hat_memload() could call kmem_cache_free
11149 11148 * for enough number of times that we could easily
11150 11149 * hit the bottom of the stack or run out of reserve
11151 11150 * list of vmem_seg structs. So, we must donate
11152 11151 * this hblk to reserve list if it's allocated
11153 11152 * from sfmmu8_cache *and* mapping kernel range.
11154 11153 * We don't need to worry about freeing hmeblk1's
11155 11154 * to kmem since they don't map any kmem slabs.
11156 11155 *
11157 11156 * Note: When segkmem supports largepages, we must
11158 11157 * free hmeblk1's to reserve list as well.
11159 11158 */
11160 11159 forcefree = (sfmmup == KHATID) ? 1 : 0;
11161 11160 if (size == TTE8K &&
11162 11161 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11163 11162 goto re_verify;
11164 11163 }
11165 11164 ASSERT(sfmmup != KHATID);
11166 11165 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11167 11166 } else {
11168 11167 /*
11169 11168 * Hey! we don't need hblk_reserve any more.
11170 11169 */
11171 11170 ASSERT(owner);
11172 11171 hblk_reserve_thread = NULL;
11173 11172 mutex_exit(&hblk_reserve_lock);
11174 11173 owner = 0;
11175 11174 }
11176 11175 re_verify:
11177 11176 /*
11178 11177 * let's check if the goodies are still present
11179 11178 */
11180 11179 SFMMU_HASH_LOCK(hmebp);
11181 11180 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11182 11181 if (newhblkp != NULL) {
11183 11182 /*
11184 11183 * return newhblkp if it's not hblk_reserve;
11185 11184 * if newhblkp is hblk_reserve, return it
11186 11185 * _only if_ we are the owner of hblk_reserve.
11187 11186 */
11188 11187 if (newhblkp != HBLK_RESERVE || owner) {
11189 11188 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11190 11189 newhblkp->hblk_shared);
11191 11190 ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11192 11191 !newhblkp->hblk_shared);
11193 11192 return (newhblkp);
11194 11193 } else {
11195 11194 /*
11196 11195 * we just hit hblk_reserve in the hash and
11197 11196 * we are not the owner of that;
11198 11197 *
11199 11198 * block until hblk_reserve_thread completes
11200 11199 * swapping hblk_reserve and try the dance
11201 11200 * once again.
11202 11201 */
11203 11202 SFMMU_HASH_UNLOCK(hmebp);
11204 11203 mutex_enter(&hblk_reserve_lock);
11205 11204 mutex_exit(&hblk_reserve_lock);
11206 11205 SFMMU_STAT(sf_hblk_reserve_hit);
11207 11206 goto fill_hblk;
11208 11207 }
11209 11208 } else {
11210 11209 /*
11211 11210 * it's no more! try the dance once again.
11212 11211 */
11213 11212 SFMMU_HASH_UNLOCK(hmebp);
11214 11213 goto fill_hblk;
11215 11214 }
11216 11215 }
11217 11216
11218 11217 hblk_init:
11219 11218 if (SFMMU_IS_SHMERID_VALID(rid)) {
11220 11219 uint16_t tteflag = 0x1 <<
11221 11220 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11222 11221
11223 11222 if (!(rgnp->rgn_hmeflags & tteflag)) {
11224 11223 atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11225 11224 }
11226 11225 hmeblkp->hblk_shared = 1;
11227 11226 } else {
11228 11227 hmeblkp->hblk_shared = 0;
11229 11228 }
11230 11229 set_hblk_sz(hmeblkp, size);
11231 11230 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11232 11231 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11233 11232 hmeblkp->hblk_tag = hblktag;
11234 11233 hmeblkp->hblk_shadow = shw_hblkp;
11235 11234 hblkpa = hmeblkp->hblk_nextpa;
11236 11235 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11237 11236
11238 11237 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11239 11238 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11240 11239 ASSERT(hmeblkp->hblk_hmecnt == 0);
11241 11240 ASSERT(hmeblkp->hblk_vcnt == 0);
11242 11241 ASSERT(hmeblkp->hblk_lckcnt == 0);
11243 11242 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11244 11243 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11245 11244 return (hmeblkp);
11246 11245 }
11247 11246
11248 11247 /*
11249 11248 * This function cleans up the hme_blk and returns it to the free list.
11250 11249 */
11251 11250 /* ARGSUSED */
11252 11251 static void
11253 11252 sfmmu_hblk_free(struct hme_blk **listp)
11254 11253 {
11255 11254 struct hme_blk *hmeblkp, *next_hmeblkp;
11256 11255 int size;
11257 11256 uint_t critical;
11258 11257 uint64_t hblkpa;
11259 11258
11260 11259 ASSERT(*listp != NULL);
11261 11260
11262 11261 hmeblkp = *listp;
11263 11262 while (hmeblkp != NULL) {
11264 11263 next_hmeblkp = hmeblkp->hblk_next;
11265 11264 ASSERT(!hmeblkp->hblk_hmecnt);
11266 11265 ASSERT(!hmeblkp->hblk_vcnt);
11267 11266 ASSERT(!hmeblkp->hblk_lckcnt);
11268 11267 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11269 11268 ASSERT(hmeblkp->hblk_shared == 0);
11270 11269 ASSERT(hmeblkp->hblk_shw_bit == 0);
11271 11270 ASSERT(hmeblkp->hblk_shadow == NULL);
11272 11271
11273 11272 hblkpa = va_to_pa((caddr_t)hmeblkp);
11274 11273 ASSERT(hblkpa != (uint64_t)-1);
11275 11274 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11276 11275
11277 11276 size = get_hblk_ttesz(hmeblkp);
11278 11277 hmeblkp->hblk_next = NULL;
11279 11278 hmeblkp->hblk_nextpa = hblkpa;
11280 11279
11281 11280 if (hmeblkp->hblk_nuc_bit == 0) {
11282 11281
11283 11282 if (size != TTE8K ||
11284 11283 !sfmmu_put_free_hblk(hmeblkp, critical))
11285 11284 kmem_cache_free(get_hblk_cache(hmeblkp),
11286 11285 hmeblkp);
11287 11286 }
11288 11287 hmeblkp = next_hmeblkp;
11289 11288 }
11290 11289 }
11291 11290
11292 11291 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11293 11292 #define SFMMU_HBLK_STEAL_THRESHOLD 5
11294 11293
11295 11294 static uint_t sfmmu_hblk_steal_twice;
11296 11295 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11297 11296
11298 11297 /*
11299 11298 * Steal a hmeblk from user or kernel hme hash lists.
11300 11299 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11301 11300 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11302 11301 * tap into critical reserve of freehblkp.
11303 11302 * Note: We remain looping in this routine until we find one.
11304 11303 */
11305 11304 static struct hme_blk *
11306 11305 sfmmu_hblk_steal(int size)
11307 11306 {
11308 11307 static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11309 11308 struct hmehash_bucket *hmebp;
11310 11309 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11311 11310 uint64_t hblkpa;
11312 11311 int i;
11313 11312 uint_t loop_cnt = 0, critical;
11314 11313
11315 11314 for (;;) {
11316 11315 /* Check cpu hblk pending queues */
11317 11316 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11318 11317 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11319 11318 ASSERT(hmeblkp->hblk_hmecnt == 0);
11320 11319 ASSERT(hmeblkp->hblk_vcnt == 0);
11321 11320 return (hmeblkp);
11322 11321 }
11323 11322
11324 11323 if (size == TTE8K) {
11325 11324 critical =
11326 11325 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11327 11326 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11328 11327 return (hmeblkp);
11329 11328 }
11330 11329
11331 11330 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11332 11331 uhmehash_steal_hand;
11333 11332 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11334 11333
11335 11334 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11336 11335 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11337 11336 SFMMU_HASH_LOCK(hmebp);
11338 11337 hmeblkp = hmebp->hmeblkp;
11339 11338 hblkpa = hmebp->hmeh_nextpa;
11340 11339 pr_hblk = NULL;
11341 11340 while (hmeblkp) {
11342 11341 /*
11343 11342 * check if it is a hmeblk that is not locked
11344 11343 * and not shared. skip shadow hmeblks with
11345 11344 * shadow_mask set i.e valid count non zero.
11346 11345 */
11347 11346 if ((get_hblk_ttesz(hmeblkp) == size) &&
11348 11347 (hmeblkp->hblk_shw_bit == 0 ||
11349 11348 hmeblkp->hblk_vcnt == 0) &&
11350 11349 (hmeblkp->hblk_lckcnt == 0)) {
11351 11350 /*
11352 11351 * there is a high probability that we
11353 11352 * will find a free one. search some
11354 11353 * buckets for a free hmeblk initially
11355 11354 * before unloading a valid hmeblk.
11356 11355 */
11357 11356 if ((hmeblkp->hblk_vcnt == 0 &&
11358 11357 hmeblkp->hblk_hmecnt == 0) || (i >=
11359 11358 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11360 11359 if (sfmmu_steal_this_hblk(hmebp,
11361 11360 hmeblkp, hblkpa, pr_hblk)) {
11362 11361 /*
11363 11362 * Hblk is unloaded
11364 11363 * successfully
11365 11364 */
11366 11365 break;
11367 11366 }
11368 11367 }
11369 11368 }
11370 11369 pr_hblk = hmeblkp;
11371 11370 hblkpa = hmeblkp->hblk_nextpa;
11372 11371 hmeblkp = hmeblkp->hblk_next;
11373 11372 }
11374 11373
11375 11374 SFMMU_HASH_UNLOCK(hmebp);
11376 11375 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11377 11376 hmebp = uhme_hash;
11378 11377 }
11379 11378 uhmehash_steal_hand = hmebp;
11380 11379
11381 11380 if (hmeblkp != NULL)
11382 11381 break;
11383 11382
11384 11383 /*
11385 11384 * in the worst case, look for a free one in the kernel
11386 11385 * hash table.
11387 11386 */
11388 11387 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11389 11388 SFMMU_HASH_LOCK(hmebp);
11390 11389 hmeblkp = hmebp->hmeblkp;
11391 11390 hblkpa = hmebp->hmeh_nextpa;
11392 11391 pr_hblk = NULL;
11393 11392 while (hmeblkp) {
11394 11393 /*
11395 11394 * check if it is free hmeblk
11396 11395 */
11397 11396 if ((get_hblk_ttesz(hmeblkp) == size) &&
11398 11397 (hmeblkp->hblk_lckcnt == 0) &&
11399 11398 (hmeblkp->hblk_vcnt == 0) &&
11400 11399 (hmeblkp->hblk_hmecnt == 0)) {
11401 11400 if (sfmmu_steal_this_hblk(hmebp,
11402 11401 hmeblkp, hblkpa, pr_hblk)) {
11403 11402 break;
11404 11403 } else {
11405 11404 /*
11406 11405 * Cannot fail since we have
11407 11406 * hash lock.
11408 11407 */
11409 11408 panic("fail to steal?");
11410 11409 }
11411 11410 }
11412 11411
11413 11412 pr_hblk = hmeblkp;
11414 11413 hblkpa = hmeblkp->hblk_nextpa;
11415 11414 hmeblkp = hmeblkp->hblk_next;
11416 11415 }
11417 11416
11418 11417 SFMMU_HASH_UNLOCK(hmebp);
11419 11418 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11420 11419 hmebp = khme_hash;
11421 11420 }
11422 11421
11423 11422 if (hmeblkp != NULL)
11424 11423 break;
11425 11424 sfmmu_hblk_steal_twice++;
11426 11425 }
11427 11426 return (hmeblkp);
11428 11427 }
11429 11428
11430 11429 /*
11431 11430 * This routine does real work to prepare a hblk to be "stolen" by
11432 11431 * unloading the mappings, updating shadow counts ....
11433 11432 * It returns 1 if the block is ready to be reused (stolen), or 0
11434 11433 * means the block cannot be stolen yet- pageunload is still working
11435 11434 * on this hblk.
11436 11435 */
11437 11436 static int
11438 11437 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11439 11438 uint64_t hblkpa, struct hme_blk *pr_hblk)
11440 11439 {
11441 11440 int shw_size, vshift;
11442 11441 struct hme_blk *shw_hblkp;
11443 11442 caddr_t vaddr;
11444 11443 uint_t shw_mask, newshw_mask;
11445 11444 struct hme_blk *list = NULL;
11446 11445
11447 11446 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11448 11447
11449 11448 /*
11450 11449 * check if the hmeblk is free, unload if necessary
11451 11450 */
11452 11451 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11453 11452 sfmmu_t *sfmmup;
11454 11453 demap_range_t dmr;
11455 11454
11456 11455 sfmmup = hblktosfmmu(hmeblkp);
11457 11456 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11458 11457 return (0);
11459 11458 }
11460 11459 DEMAP_RANGE_INIT(sfmmup, &dmr);
11461 11460 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11462 11461 (caddr_t)get_hblk_base(hmeblkp),
11463 11462 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11464 11463 DEMAP_RANGE_FLUSH(&dmr);
11465 11464 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11466 11465 /*
11467 11466 * Pageunload is working on the same hblk.
11468 11467 */
11469 11468 return (0);
11470 11469 }
11471 11470
11472 11471 sfmmu_hblk_steal_unload_count++;
11473 11472 }
11474 11473
11475 11474 ASSERT(hmeblkp->hblk_lckcnt == 0);
11476 11475 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11477 11476
11478 11477 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11479 11478 hmeblkp->hblk_nextpa = hblkpa;
11480 11479
11481 11480 shw_hblkp = hmeblkp->hblk_shadow;
11482 11481 if (shw_hblkp) {
11483 11482 ASSERT(!hmeblkp->hblk_shared);
11484 11483 shw_size = get_hblk_ttesz(shw_hblkp);
11485 11484 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11486 11485 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11487 11486 ASSERT(vshift < 8);
11488 11487 /*
11489 11488 * Atomically clear shadow mask bit
11490 11489 */
11491 11490 do {
11492 11491 shw_mask = shw_hblkp->hblk_shw_mask;
11493 11492 ASSERT(shw_mask & (1 << vshift));
11494 11493 newshw_mask = shw_mask & ~(1 << vshift);
11495 11494 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11496 11495 shw_mask, newshw_mask);
11497 11496 } while (newshw_mask != shw_mask);
11498 11497 hmeblkp->hblk_shadow = NULL;
11499 11498 }
11500 11499
11501 11500 /*
11502 11501 * remove shadow bit if we are stealing an unused shadow hmeblk.
11503 11502 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11504 11503 * we are indeed allocating a shadow hmeblk.
11505 11504 */
11506 11505 hmeblkp->hblk_shw_bit = 0;
11507 11506
11508 11507 if (hmeblkp->hblk_shared) {
11509 11508 sf_srd_t *srdp;
11510 11509 sf_region_t *rgnp;
11511 11510 uint_t rid;
11512 11511
11513 11512 srdp = hblktosrd(hmeblkp);
11514 11513 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11515 11514 rid = hmeblkp->hblk_tag.htag_rid;
11516 11515 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11517 11516 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11518 11517 rgnp = srdp->srd_hmergnp[rid];
11519 11518 ASSERT(rgnp != NULL);
11520 11519 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11521 11520 hmeblkp->hblk_shared = 0;
11522 11521 }
11523 11522
11524 11523 sfmmu_hblk_steal_count++;
11525 11524 SFMMU_STAT(sf_steal_count);
11526 11525
11527 11526 return (1);
11528 11527 }
11529 11528
11530 11529 struct hme_blk *
11531 11530 sfmmu_hmetohblk(struct sf_hment *sfhme)
11532 11531 {
11533 11532 struct hme_blk *hmeblkp;
11534 11533 struct sf_hment *sfhme0;
11535 11534 struct hme_blk *hblk_dummy = 0;
11536 11535
11537 11536 /*
11538 11537 * No dummy sf_hments, please.
11539 11538 */
11540 11539 ASSERT(sfhme->hme_tte.ll != 0);
11541 11540
11542 11541 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11543 11542 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11544 11543 (uintptr_t)&hblk_dummy->hblk_hme[0]);
11545 11544
11546 11545 return (hmeblkp);
11547 11546 }
11548 11547
11549 11548 /*
11550 11549 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11551 11550 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11552 11551 * KM_SLEEP allocation.
11553 11552 *
11554 11553 * Return 0 on success, -1 otherwise.
11555 11554 */
11556 11555 static void
11557 11556 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11558 11557 {
11559 11558 struct tsb_info *tsbinfop, *next;
11560 11559 tsb_replace_rc_t rc;
11561 11560 boolean_t gotfirst = B_FALSE;
11562 11561
11563 11562 ASSERT(sfmmup != ksfmmup);
11564 11563 ASSERT(sfmmu_hat_lock_held(sfmmup));
11565 11564
11566 11565 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11567 11566 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11568 11567 }
11569 11568
11570 11569 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11571 11570 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11572 11571 } else {
11573 11572 return;
11574 11573 }
11575 11574
11576 11575 ASSERT(sfmmup->sfmmu_tsb != NULL);
11577 11576
11578 11577 /*
11579 11578 * Loop over all tsbinfo's replacing them with ones that actually have
11580 11579 * a TSB. If any of the replacements ever fail, bail out of the loop.
11581 11580 */
11582 11581 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11583 11582 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11584 11583 next = tsbinfop->tsb_next;
11585 11584 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11586 11585 hatlockp, TSB_SWAPIN);
11587 11586 if (rc != TSB_SUCCESS) {
11588 11587 break;
11589 11588 }
11590 11589 gotfirst = B_TRUE;
11591 11590 }
11592 11591
11593 11592 switch (rc) {
11594 11593 case TSB_SUCCESS:
11595 11594 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11596 11595 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11597 11596 return;
11598 11597 case TSB_LOSTRACE:
11599 11598 break;
11600 11599 case TSB_ALLOCFAIL:
11601 11600 break;
11602 11601 default:
11603 11602 panic("sfmmu_replace_tsb returned unrecognized failure code "
11604 11603 "%d", rc);
11605 11604 }
11606 11605
11607 11606 /*
11608 11607 * In this case, we failed to get one of our TSBs. If we failed to
11609 11608 * get the first TSB, get one of minimum size (8KB). Walk the list
11610 11609 * and throw away the tsbinfos, starting where the allocation failed;
11611 11610 * we can get by with just one TSB as long as we don't leave the
11612 11611 * SWAPPED tsbinfo structures lying around.
11613 11612 */
11614 11613 tsbinfop = sfmmup->sfmmu_tsb;
11615 11614 next = tsbinfop->tsb_next;
11616 11615 tsbinfop->tsb_next = NULL;
11617 11616
11618 11617 sfmmu_hat_exit(hatlockp);
11619 11618 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11620 11619 next = tsbinfop->tsb_next;
11621 11620 sfmmu_tsbinfo_free(tsbinfop);
11622 11621 }
11623 11622 hatlockp = sfmmu_hat_enter(sfmmup);
11624 11623
11625 11624 /*
11626 11625 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11627 11626 * pages.
11628 11627 */
11629 11628 if (!gotfirst) {
11630 11629 tsbinfop = sfmmup->sfmmu_tsb;
11631 11630 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11632 11631 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11633 11632 ASSERT(rc == TSB_SUCCESS);
11634 11633 }
11635 11634
11636 11635 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11637 11636 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11638 11637 }
11639 11638
11640 11639 static int
11641 11640 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11642 11641 {
11643 11642 ulong_t bix = 0;
11644 11643 uint_t rid;
11645 11644 sf_region_t *rgnp;
11646 11645
11647 11646 ASSERT(srdp != NULL);
11648 11647 ASSERT(srdp->srd_refcnt != 0);
11649 11648
11650 11649 w <<= BT_ULSHIFT;
11651 11650 while (bmw) {
11652 11651 if (!(bmw & 0x1)) {
11653 11652 bix++;
11654 11653 bmw >>= 1;
11655 11654 continue;
11656 11655 }
11657 11656 rid = w | bix;
11658 11657 rgnp = srdp->srd_hmergnp[rid];
11659 11658 ASSERT(rgnp->rgn_refcnt > 0);
11660 11659 ASSERT(rgnp->rgn_id == rid);
11661 11660 if (addr < rgnp->rgn_saddr ||
11662 11661 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11663 11662 bix++;
11664 11663 bmw >>= 1;
11665 11664 } else {
11666 11665 return (1);
11667 11666 }
11668 11667 }
11669 11668 return (0);
11670 11669 }
11671 11670
11672 11671 /*
11673 11672 * Handle exceptions for low level tsb_handler.
11674 11673 *
11675 11674 * There are many scenarios that could land us here:
11676 11675 *
11677 11676 * If the context is invalid we land here. The context can be invalid
11678 11677 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11679 11678 * perform a wrap around operation in order to allocate a new context.
11680 11679 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11681 11680 * TSBs configuration is changeing for this process and we are forced into
11682 11681 * here to do a syncronization operation. If the context is valid we can
11683 11682 * be here from window trap hanlder. In this case just call trap to handle
11684 11683 * the fault.
11685 11684 *
11686 11685 * Note that the process will run in INVALID_CONTEXT before
11687 11686 * faulting into here and subsequently loading the MMU registers
11688 11687 * (including the TSB base register) associated with this process.
11689 11688 * For this reason, the trap handlers must all test for
11690 11689 * INVALID_CONTEXT before attempting to access any registers other
11691 11690 * than the context registers.
11692 11691 */
11693 11692 void
11694 11693 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11695 11694 {
11696 11695 sfmmu_t *sfmmup, *shsfmmup;
11697 11696 uint_t ctxtype;
11698 11697 klwp_id_t lwp;
11699 11698 char lwp_save_state;
11700 11699 hatlock_t *hatlockp, *shatlockp;
11701 11700 struct tsb_info *tsbinfop;
11702 11701 struct tsbmiss *tsbmp;
11703 11702 sf_scd_t *scdp;
11704 11703
11705 11704 SFMMU_STAT(sf_tsb_exceptions);
11706 11705 SFMMU_MMU_STAT(mmu_tsb_exceptions);
11707 11706 sfmmup = astosfmmu(curthread->t_procp->p_as);
11708 11707 /*
11709 11708 * note that in sun4u, tagacces register contains ctxnum
11710 11709 * while sun4v passes ctxtype in the tagaccess register.
11711 11710 */
11712 11711 ctxtype = tagaccess & TAGACC_CTX_MASK;
11713 11712
11714 11713 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11715 11714 ASSERT(sfmmup->sfmmu_ismhat == 0);
11716 11715 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11717 11716 ctxtype == INVALID_CONTEXT);
11718 11717
11719 11718 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11720 11719 /*
11721 11720 * We may land here because shme bitmap and pagesize
11722 11721 * flags are updated lazily in tsbmiss area on other cpus.
11723 11722 * If we detect here that tsbmiss area is out of sync with
11724 11723 * sfmmu update it and retry the trapped instruction.
11725 11724 * Otherwise call trap().
11726 11725 */
11727 11726 int ret = 0;
11728 11727 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11729 11728 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11730 11729
11731 11730 /*
11732 11731 * Must set lwp state to LWP_SYS before
11733 11732 * trying to acquire any adaptive lock
11734 11733 */
11735 11734 lwp = ttolwp(curthread);
11736 11735 ASSERT(lwp);
11737 11736 lwp_save_state = lwp->lwp_state;
11738 11737 lwp->lwp_state = LWP_SYS;
11739 11738
11740 11739 hatlockp = sfmmu_hat_enter(sfmmup);
11741 11740 kpreempt_disable();
11742 11741 tsbmp = &tsbmiss_area[CPU->cpu_id];
11743 11742 ASSERT(sfmmup == tsbmp->usfmmup);
11744 11743 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11745 11744 ~tteflag_mask) ||
11746 11745 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) &
11747 11746 ~tteflag_mask)) {
11748 11747 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11749 11748 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11750 11749 ret = 1;
11751 11750 }
11752 11751 if (sfmmup->sfmmu_srdp != NULL) {
11753 11752 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11754 11753 ulong_t *tm = tsbmp->shmermap;
11755 11754 ulong_t i;
11756 11755 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11757 11756 ulong_t d = tm[i] ^ sm[i];
11758 11757 if (d) {
11759 11758 if (d & sm[i]) {
11760 11759 if (!ret && sfmmu_is_rgnva(
11761 11760 sfmmup->sfmmu_srdp,
11762 11761 addr, i, d & sm[i])) {
11763 11762 ret = 1;
11764 11763 }
11765 11764 }
11766 11765 tm[i] = sm[i];
11767 11766 }
11768 11767 }
11769 11768 }
11770 11769 kpreempt_enable();
11771 11770 sfmmu_hat_exit(hatlockp);
11772 11771 lwp->lwp_state = lwp_save_state;
11773 11772 if (ret) {
11774 11773 return;
11775 11774 }
11776 11775 } else if (ctxtype == INVALID_CONTEXT) {
11777 11776 /*
11778 11777 * First, make sure we come out of here with a valid ctx,
11779 11778 * since if we don't get one we'll simply loop on the
11780 11779 * faulting instruction.
11781 11780 *
11782 11781 * If the ISM mappings are changing, the TSB is relocated,
11783 11782 * the process is swapped, the process is joining SCD or
11784 11783 * leaving SCD or shared regions we serialize behind the
11785 11784 * controlling thread with hat lock, sfmmu_flags and
11786 11785 * sfmmu_tsb_cv condition variable.
11787 11786 */
11788 11787
11789 11788 /*
11790 11789 * Must set lwp state to LWP_SYS before
11791 11790 * trying to acquire any adaptive lock
11792 11791 */
11793 11792 lwp = ttolwp(curthread);
11794 11793 ASSERT(lwp);
11795 11794 lwp_save_state = lwp->lwp_state;
11796 11795 lwp->lwp_state = LWP_SYS;
11797 11796
11798 11797 hatlockp = sfmmu_hat_enter(sfmmup);
11799 11798 retry:
11800 11799 if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11801 11800 shsfmmup = scdp->scd_sfmmup;
11802 11801 ASSERT(shsfmmup != NULL);
11803 11802
11804 11803 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11805 11804 tsbinfop = tsbinfop->tsb_next) {
11806 11805 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11807 11806 /* drop the private hat lock */
11808 11807 sfmmu_hat_exit(hatlockp);
11809 11808 /* acquire the shared hat lock */
11810 11809 shatlockp = sfmmu_hat_enter(shsfmmup);
11811 11810 /*
11812 11811 * recheck to see if anything changed
11813 11812 * after we drop the private hat lock.
11814 11813 */
11815 11814 if (sfmmup->sfmmu_scdp == scdp &&
11816 11815 shsfmmup == scdp->scd_sfmmup) {
11817 11816 sfmmu_tsb_chk_reloc(shsfmmup,
11818 11817 shatlockp);
11819 11818 }
11820 11819 sfmmu_hat_exit(shatlockp);
11821 11820 hatlockp = sfmmu_hat_enter(sfmmup);
11822 11821 goto retry;
11823 11822 }
11824 11823 }
11825 11824 }
11826 11825
11827 11826 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
11828 11827 tsbinfop = tsbinfop->tsb_next) {
11829 11828 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11830 11829 cv_wait(&sfmmup->sfmmu_tsb_cv,
11831 11830 HATLOCK_MUTEXP(hatlockp));
11832 11831 goto retry;
11833 11832 }
11834 11833 }
11835 11834
11836 11835 /*
11837 11836 * Wait for ISM maps to be updated.
11838 11837 */
11839 11838 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
11840 11839 cv_wait(&sfmmup->sfmmu_tsb_cv,
11841 11840 HATLOCK_MUTEXP(hatlockp));
11842 11841 goto retry;
11843 11842 }
11844 11843
11845 11844 /* Is this process joining an SCD? */
11846 11845 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11847 11846 /*
11848 11847 * Flush private TSB and setup shared TSB.
11849 11848 * sfmmu_finish_join_scd() does not drop the
11850 11849 * hat lock.
11851 11850 */
11852 11851 sfmmu_finish_join_scd(sfmmup);
11853 11852 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
11854 11853 }
11855 11854
11856 11855 /*
11857 11856 * If we're swapping in, get TSB(s). Note that we must do
11858 11857 * this before we get a ctx or load the MMU state. Once
11859 11858 * we swap in we have to recheck to make sure the TSB(s) and
11860 11859 * ISM mappings didn't change while we slept.
11861 11860 */
11862 11861 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11863 11862 sfmmu_tsb_swapin(sfmmup, hatlockp);
11864 11863 goto retry;
11865 11864 }
11866 11865
11867 11866 sfmmu_get_ctx(sfmmup);
11868 11867
11869 11868 sfmmu_hat_exit(hatlockp);
11870 11869 /*
11871 11870 * Must restore lwp_state if not calling
11872 11871 * trap() for further processing. Restore
11873 11872 * it anyway.
11874 11873 */
11875 11874 lwp->lwp_state = lwp_save_state;
11876 11875 return;
11877 11876 }
11878 11877 trap(rp, (caddr_t)tagaccess, traptype, 0);
11879 11878 }
11880 11879
11881 11880 static void
11882 11881 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11883 11882 {
11884 11883 struct tsb_info *tp;
11885 11884
11886 11885 ASSERT(sfmmu_hat_lock_held(sfmmup));
11887 11886
11888 11887 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
11889 11888 if (tp->tsb_flags & TSB_RELOC_FLAG) {
11890 11889 cv_wait(&sfmmup->sfmmu_tsb_cv,
11891 11890 HATLOCK_MUTEXP(hatlockp));
11892 11891 break;
11893 11892 }
11894 11893 }
11895 11894 }
11896 11895
11897 11896 /*
11898 11897 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
11899 11898 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
11900 11899 * rather than spinning to avoid send mondo timeouts with
11901 11900 * interrupts enabled. When the lock is acquired it is immediately
11902 11901 * released and we return back to sfmmu_vatopfn just after
11903 11902 * the GET_TTE call.
11904 11903 */
11905 11904 void
11906 11905 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
11907 11906 {
11908 11907 struct page **pp;
11909 11908
11910 11909 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11911 11910 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11912 11911 }
11913 11912
11914 11913 /*
11915 11914 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
11916 11915 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
11917 11916 * cross traps which cannot be handled while spinning in the
11918 11917 * trap handlers. Simply enter and exit the kpr_suspendlock spin
11919 11918 * mutex, which is held by the holder of the suspend bit, and then
11920 11919 * retry the trapped instruction after unwinding.
11921 11920 */
11922 11921 /*ARGSUSED*/
11923 11922 void
11924 11923 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
11925 11924 {
11926 11925 ASSERT(curthread != kreloc_thread);
11927 11926 mutex_enter(&kpr_suspendlock);
11928 11927 mutex_exit(&kpr_suspendlock);
11929 11928 }
11930 11929
11931 11930 /*
11932 11931 * This routine could be optimized to reduce the number of xcalls by flushing
11933 11932 * the entire TLBs if region reference count is above some threshold but the
11934 11933 * tradeoff will depend on the size of the TLB. So for now flush the specific
11935 11934 * page a context at a time.
11936 11935 *
11937 11936 * If uselocks is 0 then it's called after all cpus were captured and all the
11938 11937 * hat locks were taken. In this case don't take the region lock by relying on
11939 11938 * the order of list region update operations in hat_join_region(),
11940 11939 * hat_leave_region() and hat_dup_region(). The ordering in those routines
11941 11940 * guarantees that list is always forward walkable and reaches active sfmmus
11942 11941 * regardless of where xc_attention() captures a cpu.
11943 11942 */
11944 11943 cpuset_t
11945 11944 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
11946 11945 struct hme_blk *hmeblkp, int uselocks)
11947 11946 {
11948 11947 sfmmu_t *sfmmup;
11949 11948 cpuset_t cpuset;
11950 11949 cpuset_t rcpuset;
11951 11950 hatlock_t *hatlockp;
11952 11951 uint_t rid = rgnp->rgn_id;
11953 11952 sf_rgn_link_t *rlink;
11954 11953 sf_scd_t *scdp;
11955 11954
11956 11955 ASSERT(hmeblkp->hblk_shared);
11957 11956 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11958 11957 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11959 11958
11960 11959 CPUSET_ZERO(rcpuset);
11961 11960 if (uselocks) {
11962 11961 mutex_enter(&rgnp->rgn_mutex);
11963 11962 }
11964 11963 sfmmup = rgnp->rgn_sfmmu_head;
11965 11964 while (sfmmup != NULL) {
11966 11965 if (uselocks) {
11967 11966 hatlockp = sfmmu_hat_enter(sfmmup);
11968 11967 }
11969 11968
11970 11969 /*
11971 11970 * When an SCD is created the SCD hat is linked on the sfmmu
11972 11971 * region lists for each hme region which is part of the
11973 11972 * SCD. If we find an SCD hat, when walking these lists,
11974 11973 * then we flush the shared TSBs, if we find a private hat,
11975 11974 * which is part of an SCD, but where the region
11976 11975 * is not part of the SCD then we flush the private TSBs.
11977 11976 */
11978 11977 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
11979 11978 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11980 11979 scdp = sfmmup->sfmmu_scdp;
11981 11980 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
11982 11981 if (uselocks) {
11983 11982 sfmmu_hat_exit(hatlockp);
11984 11983 }
11985 11984 goto next;
11986 11985 }
11987 11986 }
11988 11987
11989 11988 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
11990 11989
11991 11990 kpreempt_disable();
11992 11991 cpuset = sfmmup->sfmmu_cpusran;
11993 11992 CPUSET_AND(cpuset, cpu_ready_set);
11994 11993 CPUSET_DEL(cpuset, CPU->cpu_id);
11995 11994 SFMMU_XCALL_STATS(sfmmup);
11996 11995 xt_some(cpuset, vtag_flushpage_tl1,
11997 11996 (uint64_t)addr, (uint64_t)sfmmup);
11998 11997 vtag_flushpage(addr, (uint64_t)sfmmup);
11999 11998 if (uselocks) {
12000 11999 sfmmu_hat_exit(hatlockp);
12001 12000 }
12002 12001 kpreempt_enable();
12003 12002 CPUSET_OR(rcpuset, cpuset);
12004 12003
12005 12004 next:
12006 12005 /* LINTED: constant in conditional context */
12007 12006 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12008 12007 ASSERT(rlink != NULL);
12009 12008 sfmmup = rlink->next;
12010 12009 }
12011 12010 if (uselocks) {
12012 12011 mutex_exit(&rgnp->rgn_mutex);
12013 12012 }
12014 12013 return (rcpuset);
12015 12014 }
12016 12015
12017 12016 /*
12018 12017 * This routine takes an sfmmu pointer and the va for an adddress in an
12019 12018 * ISM region as input and returns the corresponding region id in ism_rid.
12020 12019 * The return value of 1 indicates that a region has been found and ism_rid
12021 12020 * is valid, otherwise 0 is returned.
12022 12021 */
12023 12022 static int
12024 12023 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12025 12024 {
12026 12025 ism_blk_t *ism_blkp;
12027 12026 int i;
12028 12027 ism_map_t *ism_map;
12029 12028 #ifdef DEBUG
12030 12029 struct hat *ism_hatid;
12031 12030 #endif
12032 12031 ASSERT(sfmmu_hat_lock_held(sfmmup));
12033 12032
12034 12033 ism_blkp = sfmmup->sfmmu_iblk;
12035 12034 while (ism_blkp != NULL) {
12036 12035 ism_map = ism_blkp->iblk_maps;
12037 12036 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12038 12037 if ((va >= ism_start(ism_map[i])) &&
12039 12038 (va < ism_end(ism_map[i]))) {
12040 12039
12041 12040 *ism_rid = ism_map[i].imap_rid;
12042 12041 #ifdef DEBUG
12043 12042 ism_hatid = ism_map[i].imap_ismhat;
12044 12043 ASSERT(ism_hatid == ism_sfmmup);
12045 12044 ASSERT(ism_hatid->sfmmu_ismhat);
12046 12045 #endif
12047 12046 return (1);
12048 12047 }
12049 12048 }
12050 12049 ism_blkp = ism_blkp->iblk_next;
12051 12050 }
12052 12051 return (0);
12053 12052 }
12054 12053
12055 12054 /*
12056 12055 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12057 12056 * This routine may be called with all cpu's captured. Therefore, the
12058 12057 * caller is responsible for holding all locks and disabling kernel
12059 12058 * preemption.
12060 12059 */
12061 12060 /* ARGSUSED */
12062 12061 static void
12063 12062 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12064 12063 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12065 12064 {
12066 12065 cpuset_t cpuset;
12067 12066 caddr_t va;
12068 12067 ism_ment_t *ment;
12069 12068 sfmmu_t *sfmmup;
12070 12069 #ifdef VAC
12071 12070 int vcolor;
12072 12071 #endif
12073 12072
12074 12073 sf_scd_t *scdp;
12075 12074 uint_t ism_rid;
12076 12075
12077 12076 ASSERT(!hmeblkp->hblk_shared);
12078 12077 /*
12079 12078 * Walk the ism_hat's mapping list and flush the page
12080 12079 * from every hat sharing this ism_hat. This routine
12081 12080 * may be called while all cpu's have been captured.
12082 12081 * Therefore we can't attempt to grab any locks. For now
12083 12082 * this means we will protect the ism mapping list under
12084 12083 * a single lock which will be grabbed by the caller.
12085 12084 * If hat_share/unshare scalibility becomes a performance
12086 12085 * problem then we may need to re-think ism mapping list locking.
12087 12086 */
12088 12087 ASSERT(ism_sfmmup->sfmmu_ismhat);
12089 12088 ASSERT(MUTEX_HELD(&ism_mlist_lock));
12090 12089 addr = addr - ISMID_STARTADDR;
12091 12090
12092 12091 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12093 12092
12094 12093 sfmmup = ment->iment_hat;
12095 12094
12096 12095 va = ment->iment_base_va;
12097 12096 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr);
12098 12097
12099 12098 /*
12100 12099 * When an SCD is created the SCD hat is linked on the ism
12101 12100 * mapping lists for each ISM segment which is part of the
12102 12101 * SCD. If we find an SCD hat, when walking these lists,
12103 12102 * then we flush the shared TSBs, if we find a private hat,
12104 12103 * which is part of an SCD, but where the region
12105 12104 * corresponding to this va is not part of the SCD then we
12106 12105 * flush the private TSBs.
12107 12106 */
12108 12107 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12109 12108 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12110 12109 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12111 12110 if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12112 12111 &ism_rid)) {
12113 12112 cmn_err(CE_PANIC,
12114 12113 "can't find matching ISM rid!");
12115 12114 }
12116 12115
12117 12116 scdp = sfmmup->sfmmu_scdp;
12118 12117 if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12119 12118 SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12120 12119 ism_rid)) {
12121 12120 continue;
12122 12121 }
12123 12122 }
12124 12123 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12125 12124
12126 12125 cpuset = sfmmup->sfmmu_cpusran;
12127 12126 CPUSET_AND(cpuset, cpu_ready_set);
12128 12127 CPUSET_DEL(cpuset, CPU->cpu_id);
12129 12128 SFMMU_XCALL_STATS(sfmmup);
12130 12129 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12131 12130 (uint64_t)sfmmup);
12132 12131 vtag_flushpage(va, (uint64_t)sfmmup);
12133 12132
12134 12133 #ifdef VAC
12135 12134 /*
12136 12135 * Flush D$
12137 12136 * When flushing D$ we must flush all
12138 12137 * cpu's. See sfmmu_cache_flush().
12139 12138 */
12140 12139 if (cache_flush_flag == CACHE_FLUSH) {
12141 12140 cpuset = cpu_ready_set;
12142 12141 CPUSET_DEL(cpuset, CPU->cpu_id);
12143 12142
12144 12143 SFMMU_XCALL_STATS(sfmmup);
12145 12144 vcolor = addr_to_vcolor(va);
12146 12145 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12147 12146 vac_flushpage(pfnum, vcolor);
12148 12147 }
12149 12148 #endif /* VAC */
12150 12149 }
12151 12150 }
12152 12151
12153 12152 /*
12154 12153 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12155 12154 * a particular virtual address and ctx. If noflush is set we do not
12156 12155 * flush the TLB/TSB. This function may or may not be called with the
12157 12156 * HAT lock held.
12158 12157 */
12159 12158 static void
12160 12159 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12161 12160 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12162 12161 int hat_lock_held)
12163 12162 {
12164 12163 #ifdef VAC
12165 12164 int vcolor;
12166 12165 #endif
12167 12166 cpuset_t cpuset;
12168 12167 hatlock_t *hatlockp;
12169 12168
12170 12169 ASSERT(!hmeblkp->hblk_shared);
12171 12170
12172 12171 #if defined(lint) && !defined(VAC)
12173 12172 pfnum = pfnum;
12174 12173 cpu_flag = cpu_flag;
12175 12174 cache_flush_flag = cache_flush_flag;
12176 12175 #endif
12177 12176
12178 12177 /*
12179 12178 * There is no longer a need to protect against ctx being
12180 12179 * stolen here since we don't store the ctx in the TSB anymore.
12181 12180 */
12182 12181 #ifdef VAC
12183 12182 vcolor = addr_to_vcolor(addr);
12184 12183 #endif
12185 12184
12186 12185 /*
12187 12186 * We must hold the hat lock during the flush of TLB,
12188 12187 * to avoid a race with sfmmu_invalidate_ctx(), where
12189 12188 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12190 12189 * causing TLB demap routine to skip flush on that MMU.
12191 12190 * If the context on a MMU has already been set to
12192 12191 * INVALID_CONTEXT, we just get an extra flush on
12193 12192 * that MMU.
12194 12193 */
12195 12194 if (!hat_lock_held && !tlb_noflush)
12196 12195 hatlockp = sfmmu_hat_enter(sfmmup);
12197 12196
12198 12197 kpreempt_disable();
12199 12198 if (!tlb_noflush) {
12200 12199 /*
12201 12200 * Flush the TSB and TLB.
12202 12201 */
12203 12202 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12204 12203
12205 12204 cpuset = sfmmup->sfmmu_cpusran;
12206 12205 CPUSET_AND(cpuset, cpu_ready_set);
12207 12206 CPUSET_DEL(cpuset, CPU->cpu_id);
12208 12207
12209 12208 SFMMU_XCALL_STATS(sfmmup);
12210 12209
12211 12210 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12212 12211 (uint64_t)sfmmup);
12213 12212
12214 12213 vtag_flushpage(addr, (uint64_t)sfmmup);
12215 12214 }
12216 12215
12217 12216 if (!hat_lock_held && !tlb_noflush)
12218 12217 sfmmu_hat_exit(hatlockp);
12219 12218
12220 12219 #ifdef VAC
12221 12220 /*
12222 12221 * Flush the D$
12223 12222 *
12224 12223 * Even if the ctx is stolen, we need to flush the
12225 12224 * cache. Our ctx stealer only flushes the TLBs.
12226 12225 */
12227 12226 if (cache_flush_flag == CACHE_FLUSH) {
12228 12227 if (cpu_flag & FLUSH_ALL_CPUS) {
12229 12228 cpuset = cpu_ready_set;
12230 12229 } else {
12231 12230 cpuset = sfmmup->sfmmu_cpusran;
12232 12231 CPUSET_AND(cpuset, cpu_ready_set);
12233 12232 }
12234 12233 CPUSET_DEL(cpuset, CPU->cpu_id);
12235 12234 SFMMU_XCALL_STATS(sfmmup);
12236 12235 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12237 12236 vac_flushpage(pfnum, vcolor);
12238 12237 }
12239 12238 #endif /* VAC */
12240 12239 kpreempt_enable();
12241 12240 }
12242 12241
12243 12242 /*
12244 12243 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12245 12244 * address and ctx. If noflush is set we do not currently do anything.
12246 12245 * This function may or may not be called with the HAT lock held.
12247 12246 */
12248 12247 static void
12249 12248 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12250 12249 int tlb_noflush, int hat_lock_held)
12251 12250 {
12252 12251 cpuset_t cpuset;
12253 12252 hatlock_t *hatlockp;
12254 12253
12255 12254 ASSERT(!hmeblkp->hblk_shared);
12256 12255
12257 12256 /*
12258 12257 * If the process is exiting we have nothing to do.
12259 12258 */
12260 12259 if (tlb_noflush)
12261 12260 return;
12262 12261
12263 12262 /*
12264 12263 * Flush TSB.
12265 12264 */
12266 12265 if (!hat_lock_held)
12267 12266 hatlockp = sfmmu_hat_enter(sfmmup);
12268 12267 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12269 12268
12270 12269 kpreempt_disable();
12271 12270
12272 12271 cpuset = sfmmup->sfmmu_cpusran;
12273 12272 CPUSET_AND(cpuset, cpu_ready_set);
12274 12273 CPUSET_DEL(cpuset, CPU->cpu_id);
12275 12274
12276 12275 SFMMU_XCALL_STATS(sfmmup);
12277 12276 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12278 12277
12279 12278 vtag_flushpage(addr, (uint64_t)sfmmup);
12280 12279
12281 12280 if (!hat_lock_held)
12282 12281 sfmmu_hat_exit(hatlockp);
12283 12282
12284 12283 kpreempt_enable();
12285 12284
12286 12285 }
12287 12286
12288 12287 /*
12289 12288 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12290 12289 * call handler that can flush a range of pages to save on xcalls.
12291 12290 */
12292 12291 static int sfmmu_xcall_save;
12293 12292
12294 12293 /*
12295 12294 * this routine is never used for demaping addresses backed by SRD hmeblks.
12296 12295 */
12297 12296 static void
12298 12297 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12299 12298 {
12300 12299 sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12301 12300 hatlock_t *hatlockp;
12302 12301 cpuset_t cpuset;
12303 12302 uint64_t sfmmu_pgcnt;
12304 12303 pgcnt_t pgcnt = 0;
12305 12304 int pgunload = 0;
12306 12305 int dirtypg = 0;
12307 12306 caddr_t addr = dmrp->dmr_addr;
12308 12307 caddr_t eaddr;
12309 12308 uint64_t bitvec = dmrp->dmr_bitvec;
12310 12309
12311 12310 ASSERT(bitvec & 1);
12312 12311
12313 12312 /*
12314 12313 * Flush TSB and calculate number of pages to flush.
12315 12314 */
12316 12315 while (bitvec != 0) {
12317 12316 dirtypg = 0;
12318 12317 /*
12319 12318 * Find the first page to flush and then count how many
12320 12319 * pages there are after it that also need to be flushed.
12321 12320 * This way the number of TSB flushes is minimized.
12322 12321 */
12323 12322 while ((bitvec & 1) == 0) {
12324 12323 pgcnt++;
12325 12324 addr += MMU_PAGESIZE;
12326 12325 bitvec >>= 1;
12327 12326 }
12328 12327 while (bitvec & 1) {
12329 12328 dirtypg++;
12330 12329 bitvec >>= 1;
12331 12330 }
12332 12331 eaddr = addr + ptob(dirtypg);
12333 12332 hatlockp = sfmmu_hat_enter(sfmmup);
12334 12333 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12335 12334 sfmmu_hat_exit(hatlockp);
12336 12335 pgunload += dirtypg;
12337 12336 addr = eaddr;
12338 12337 pgcnt += dirtypg;
12339 12338 }
12340 12339
12341 12340 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12342 12341 if (sfmmup->sfmmu_free == 0) {
12343 12342 addr = dmrp->dmr_addr;
12344 12343 bitvec = dmrp->dmr_bitvec;
12345 12344
12346 12345 /*
12347 12346 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12348 12347 * as it will be used to pack argument for xt_some
12349 12348 */
12350 12349 ASSERT((pgcnt > 0) &&
12351 12350 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12352 12351
12353 12352 /*
12354 12353 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12355 12354 * the low 6 bits of sfmmup. This is doable since pgcnt
12356 12355 * always >= 1.
12357 12356 */
12358 12357 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12359 12358 sfmmu_pgcnt = (uint64_t)sfmmup |
12360 12359 ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12361 12360
12362 12361 /*
12363 12362 * We must hold the hat lock during the flush of TLB,
12364 12363 * to avoid a race with sfmmu_invalidate_ctx(), where
12365 12364 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12366 12365 * causing TLB demap routine to skip flush on that MMU.
12367 12366 * If the context on a MMU has already been set to
12368 12367 * INVALID_CONTEXT, we just get an extra flush on
12369 12368 * that MMU.
12370 12369 */
12371 12370 hatlockp = sfmmu_hat_enter(sfmmup);
12372 12371 kpreempt_disable();
12373 12372
12374 12373 cpuset = sfmmup->sfmmu_cpusran;
12375 12374 CPUSET_AND(cpuset, cpu_ready_set);
12376 12375 CPUSET_DEL(cpuset, CPU->cpu_id);
12377 12376
12378 12377 SFMMU_XCALL_STATS(sfmmup);
12379 12378 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12380 12379 sfmmu_pgcnt);
12381 12380
12382 12381 for (; bitvec != 0; bitvec >>= 1) {
12383 12382 if (bitvec & 1)
12384 12383 vtag_flushpage(addr, (uint64_t)sfmmup);
12385 12384 addr += MMU_PAGESIZE;
12386 12385 }
12387 12386 kpreempt_enable();
12388 12387 sfmmu_hat_exit(hatlockp);
12389 12388
12390 12389 sfmmu_xcall_save += (pgunload-1);
12391 12390 }
12392 12391 dmrp->dmr_bitvec = 0;
12393 12392 }
12394 12393
12395 12394 /*
12396 12395 * In cases where we need to synchronize with TLB/TSB miss trap
12397 12396 * handlers, _and_ need to flush the TLB, it's a lot easier to
12398 12397 * throw away the context from the process than to do a
12399 12398 * special song and dance to keep things consistent for the
12400 12399 * handlers.
12401 12400 *
12402 12401 * Since the process suddenly ends up without a context and our caller
12403 12402 * holds the hat lock, threads that fault after this function is called
12404 12403 * will pile up on the lock. We can then do whatever we need to
12405 12404 * atomically from the context of the caller. The first blocked thread
12406 12405 * to resume executing will get the process a new context, and the
12407 12406 * process will resume executing.
12408 12407 *
12409 12408 * One added advantage of this approach is that on MMUs that
12410 12409 * support a "flush all" operation, we will delay the flush until
12411 12410 * cnum wrap-around, and then flush the TLB one time. This
12412 12411 * is rather rare, so it's a lot less expensive than making 8000
12413 12412 * x-calls to flush the TLB 8000 times.
12414 12413 *
12415 12414 * A per-process (PP) lock is used to synchronize ctx allocations in
12416 12415 * resume() and ctx invalidations here.
12417 12416 */
12418 12417 static void
12419 12418 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12420 12419 {
12421 12420 cpuset_t cpuset;
12422 12421 int cnum, currcnum;
12423 12422 mmu_ctx_t *mmu_ctxp;
12424 12423 int i;
12425 12424 uint_t pstate_save;
12426 12425
12427 12426 SFMMU_STAT(sf_ctx_inv);
12428 12427
12429 12428 ASSERT(sfmmu_hat_lock_held(sfmmup));
12430 12429 ASSERT(sfmmup != ksfmmup);
12431 12430
12432 12431 kpreempt_disable();
12433 12432
12434 12433 mmu_ctxp = CPU_MMU_CTXP(CPU);
12435 12434 ASSERT(mmu_ctxp);
12436 12435 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12437 12436 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12438 12437
12439 12438 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12440 12439
12441 12440 pstate_save = sfmmu_disable_intrs();
12442 12441
12443 12442 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */
12444 12443 /* set HAT cnum invalid across all context domains. */
12445 12444 for (i = 0; i < max_mmu_ctxdoms; i++) {
12446 12445
12447 12446 cnum = sfmmup->sfmmu_ctxs[i].cnum;
12448 12447 if (cnum == INVALID_CONTEXT) {
12449 12448 continue;
12450 12449 }
12451 12450
12452 12451 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12453 12452 }
12454 12453 membar_enter(); /* make sure globally visible to all CPUs */
12455 12454 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */
12456 12455
12457 12456 sfmmu_enable_intrs(pstate_save);
12458 12457
12459 12458 cpuset = sfmmup->sfmmu_cpusran;
12460 12459 CPUSET_DEL(cpuset, CPU->cpu_id);
12461 12460 CPUSET_AND(cpuset, cpu_ready_set);
12462 12461 if (!CPUSET_ISNULL(cpuset)) {
12463 12462 SFMMU_XCALL_STATS(sfmmup);
12464 12463 xt_some(cpuset, sfmmu_raise_tsb_exception,
12465 12464 (uint64_t)sfmmup, INVALID_CONTEXT);
12466 12465 xt_sync(cpuset);
12467 12466 SFMMU_STAT(sf_tsb_raise_exception);
12468 12467 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12469 12468 }
12470 12469
12471 12470 /*
12472 12471 * If the hat to-be-invalidated is the same as the current
12473 12472 * process on local CPU we need to invalidate
12474 12473 * this CPU context as well.
12475 12474 */
12476 12475 if ((sfmmu_getctx_sec() == currcnum) &&
12477 12476 (currcnum != INVALID_CONTEXT)) {
12478 12477 /* sets shared context to INVALID too */
12479 12478 sfmmu_setctx_sec(INVALID_CONTEXT);
12480 12479 sfmmu_clear_utsbinfo();
12481 12480 }
12482 12481
12483 12482 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12484 12483
12485 12484 kpreempt_enable();
12486 12485
12487 12486 /*
12488 12487 * we hold the hat lock, so nobody should allocate a context
12489 12488 * for us yet
12490 12489 */
12491 12490 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12492 12491 }
12493 12492
12494 12493 #ifdef VAC
12495 12494 /*
12496 12495 * We need to flush the cache in all cpus. It is possible that
12497 12496 * a process referenced a page as cacheable but has sinced exited
12498 12497 * and cleared the mapping list. We still to flush it but have no
12499 12498 * state so all cpus is the only alternative.
12500 12499 */
12501 12500 void
12502 12501 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12503 12502 {
12504 12503 cpuset_t cpuset;
12505 12504
12506 12505 kpreempt_disable();
12507 12506 cpuset = cpu_ready_set;
12508 12507 CPUSET_DEL(cpuset, CPU->cpu_id);
12509 12508 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12510 12509 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12511 12510 xt_sync(cpuset);
12512 12511 vac_flushpage(pfnum, vcolor);
12513 12512 kpreempt_enable();
12514 12513 }
12515 12514
12516 12515 void
12517 12516 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12518 12517 {
12519 12518 cpuset_t cpuset;
12520 12519
12521 12520 ASSERT(vcolor >= 0);
12522 12521
12523 12522 kpreempt_disable();
12524 12523 cpuset = cpu_ready_set;
12525 12524 CPUSET_DEL(cpuset, CPU->cpu_id);
12526 12525 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12527 12526 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12528 12527 xt_sync(cpuset);
12529 12528 vac_flushcolor(vcolor, pfnum);
12530 12529 kpreempt_enable();
12531 12530 }
12532 12531 #endif /* VAC */
12533 12532
12534 12533 /*
12535 12534 * We need to prevent processes from accessing the TSB using a cached physical
12536 12535 * address. It's alright if they try to access the TSB via virtual address
12537 12536 * since they will just fault on that virtual address once the mapping has
12538 12537 * been suspended.
12539 12538 */
12540 12539 #pragma weak sendmondo_in_recover
12541 12540
12542 12541 /* ARGSUSED */
12543 12542 static int
12544 12543 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12545 12544 {
12546 12545 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12547 12546 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12548 12547 hatlock_t *hatlockp;
12549 12548 sf_scd_t *scdp;
12550 12549
12551 12550 if (flags != HAT_PRESUSPEND)
12552 12551 return (0);
12553 12552
12554 12553 /*
12555 12554 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12556 12555 * be a shared hat, then set SCD's tsbinfo's flag.
12557 12556 * If tsb is not shared, sfmmup is a private hat, then set
12558 12557 * its private tsbinfo's flag.
12559 12558 */
12560 12559 hatlockp = sfmmu_hat_enter(sfmmup);
12561 12560 tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12562 12561
12563 12562 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12564 12563 sfmmu_tsb_inv_ctx(sfmmup);
12565 12564 sfmmu_hat_exit(hatlockp);
12566 12565 } else {
12567 12566 /* release lock on the shared hat */
12568 12567 sfmmu_hat_exit(hatlockp);
12569 12568 /* sfmmup is a shared hat */
12570 12569 ASSERT(sfmmup->sfmmu_scdhat);
12571 12570 scdp = sfmmup->sfmmu_scdp;
12572 12571 ASSERT(scdp != NULL);
12573 12572 /* get private hat from the scd list */
12574 12573 mutex_enter(&scdp->scd_mutex);
12575 12574 sfmmup = scdp->scd_sf_list;
12576 12575 while (sfmmup != NULL) {
12577 12576 hatlockp = sfmmu_hat_enter(sfmmup);
12578 12577 /*
12579 12578 * We do not call sfmmu_tsb_inv_ctx here because
12580 12579 * sendmondo_in_recover check is only needed for
12581 12580 * sun4u.
12582 12581 */
12583 12582 sfmmu_invalidate_ctx(sfmmup);
12584 12583 sfmmu_hat_exit(hatlockp);
12585 12584 sfmmup = sfmmup->sfmmu_scd_link.next;
12586 12585
12587 12586 }
12588 12587 mutex_exit(&scdp->scd_mutex);
12589 12588 }
12590 12589 return (0);
12591 12590 }
12592 12591
12593 12592 static void
12594 12593 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12595 12594 {
12596 12595 extern uint32_t sendmondo_in_recover;
12597 12596
12598 12597 ASSERT(sfmmu_hat_lock_held(sfmmup));
12599 12598
12600 12599 /*
12601 12600 * For Cheetah+ Erratum 25:
12602 12601 * Wait for any active recovery to finish. We can't risk
12603 12602 * relocating the TSB of the thread running mondo_recover_proc()
12604 12603 * since, if we did that, we would deadlock. The scenario we are
12605 12604 * trying to avoid is as follows:
12606 12605 *
12607 12606 * THIS CPU RECOVER CPU
12608 12607 * -------- -----------
12609 12608 * Begins recovery, walking through TSB
12610 12609 * hat_pagesuspend() TSB TTE
12611 12610 * TLB miss on TSB TTE, spins at TL1
12612 12611 * xt_sync()
12613 12612 * send_mondo_timeout()
12614 12613 * mondo_recover_proc()
12615 12614 * ((deadlocked))
12616 12615 *
12617 12616 * The second half of the workaround is that mondo_recover_proc()
12618 12617 * checks to see if the tsb_info has the RELOC flag set, and if it
12619 12618 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12620 12619 * and hence avoiding the TLB miss that could result in a deadlock.
12621 12620 */
12622 12621 if (&sendmondo_in_recover) {
12623 12622 membar_enter(); /* make sure RELOC flag visible */
12624 12623 while (sendmondo_in_recover) {
12625 12624 drv_usecwait(1);
12626 12625 membar_consumer();
12627 12626 }
12628 12627 }
12629 12628
12630 12629 sfmmu_invalidate_ctx(sfmmup);
12631 12630 }
12632 12631
12633 12632 /* ARGSUSED */
12634 12633 static int
12635 12634 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12636 12635 void *tsbinfo, pfn_t newpfn)
12637 12636 {
12638 12637 hatlock_t *hatlockp;
12639 12638 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12640 12639 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12641 12640
12642 12641 if (flags != HAT_POSTUNSUSPEND)
12643 12642 return (0);
12644 12643
12645 12644 hatlockp = sfmmu_hat_enter(sfmmup);
12646 12645
12647 12646 SFMMU_STAT(sf_tsb_reloc);
12648 12647
12649 12648 /*
12650 12649 * The process may have swapped out while we were relocating one
12651 12650 * of its TSBs. If so, don't bother doing the setup since the
12652 12651 * process can't be using the memory anymore.
12653 12652 */
12654 12653 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12655 12654 ASSERT(va == tsbinfop->tsb_va);
12656 12655 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12657 12656
12658 12657 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12659 12658 sfmmu_inv_tsb(tsbinfop->tsb_va,
12660 12659 TSB_BYTES(tsbinfop->tsb_szc));
12661 12660 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12662 12661 }
12663 12662 }
12664 12663
12665 12664 membar_exit();
12666 12665 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12667 12666 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12668 12667
12669 12668 sfmmu_hat_exit(hatlockp);
12670 12669
12671 12670 return (0);
12672 12671 }
12673 12672
12674 12673 /*
12675 12674 * Allocate and initialize a tsb_info structure. Note that we may or may not
12676 12675 * allocate a TSB here, depending on the flags passed in.
12677 12676 */
12678 12677 static int
12679 12678 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12680 12679 uint_t flags, sfmmu_t *sfmmup)
12681 12680 {
12682 12681 int err;
12683 12682
12684 12683 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12685 12684 sfmmu_tsbinfo_cache, KM_SLEEP);
12686 12685
12687 12686 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12688 12687 tsb_szc, flags, sfmmup)) != 0) {
12689 12688 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12690 12689 SFMMU_STAT(sf_tsb_allocfail);
12691 12690 *tsbinfopp = NULL;
12692 12691 return (err);
12693 12692 }
12694 12693 SFMMU_STAT(sf_tsb_alloc);
12695 12694
12696 12695 /*
12697 12696 * Bump the TSB size counters for this TSB size.
12698 12697 */
12699 12698 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12700 12699 return (0);
12701 12700 }
12702 12701
12703 12702 static void
12704 12703 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12705 12704 {
12706 12705 caddr_t tsbva = tsbinfo->tsb_va;
12707 12706 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12708 12707 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12709 12708 vmem_t *vmp = tsbinfo->tsb_vmp;
12710 12709
12711 12710 /*
12712 12711 * If we allocated this TSB from relocatable kernel memory, then we
12713 12712 * need to uninstall the callback handler.
12714 12713 */
12715 12714 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12716 12715 uintptr_t slab_mask;
12717 12716 caddr_t slab_vaddr;
12718 12717 page_t **ppl;
12719 12718 int ret;
12720 12719
12721 12720 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12722 12721 if (tsb_size > MMU_PAGESIZE4M)
12723 12722 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12724 12723 else
12725 12724 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12726 12725 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12727 12726
12728 12727 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12729 12728 ASSERT(ret == 0);
12730 12729 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12731 12730 0, NULL);
12732 12731 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12733 12732 }
12734 12733
12735 12734 if (kmem_cachep != NULL) {
12736 12735 kmem_cache_free(kmem_cachep, tsbva);
12737 12736 } else {
12738 12737 vmem_xfree(vmp, (void *)tsbva, tsb_size);
12739 12738 }
12740 12739 tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12741 12740 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12742 12741 }
12743 12742
12744 12743 static void
12745 12744 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12746 12745 {
12747 12746 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12748 12747 sfmmu_tsb_free(tsbinfo);
12749 12748 }
12750 12749 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12751 12750
12752 12751 }
12753 12752
12754 12753 /*
12755 12754 * Setup all the references to physical memory for this tsbinfo.
12756 12755 * The underlying page(s) must be locked.
12757 12756 */
12758 12757 static void
12759 12758 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12760 12759 {
12761 12760 ASSERT(pfn != PFN_INVALID);
12762 12761 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12763 12762
12764 12763 #ifndef sun4v
12765 12764 if (tsbinfo->tsb_szc == 0) {
12766 12765 sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12767 12766 PROT_WRITE|PROT_READ, TTE8K);
12768 12767 } else {
12769 12768 /*
12770 12769 * Round down PA and use a large mapping; the handlers will
12771 12770 * compute the TSB pointer at the correct offset into the
12772 12771 * big virtual page. NOTE: this assumes all TSBs larger
12773 12772 * than 8K must come from physically contiguous slabs of
12774 12773 * size tsb_slab_size.
12775 12774 */
12776 12775 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12777 12776 PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12778 12777 }
12779 12778 tsbinfo->tsb_pa = ptob(pfn);
12780 12779
12781 12780 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12782 12781 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */
12783 12782
12784 12783 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12785 12784 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12786 12785 #else /* sun4v */
12787 12786 tsbinfo->tsb_pa = ptob(pfn);
12788 12787 #endif /* sun4v */
12789 12788 }
12790 12789
12791 12790
12792 12791 /*
12793 12792 * Returns zero on success, ENOMEM if over the high water mark,
12794 12793 * or EAGAIN if the caller needs to retry with a smaller TSB
12795 12794 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12796 12795 *
12797 12796 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12798 12797 * is specified and the TSB requested is PAGESIZE, though it
12799 12798 * may sleep waiting for memory if sufficient memory is not
12800 12799 * available.
12801 12800 */
12802 12801 static int
12803 12802 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12804 12803 int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12805 12804 {
12806 12805 caddr_t vaddr = NULL;
12807 12806 caddr_t slab_vaddr;
12808 12807 uintptr_t slab_mask;
12809 12808 int tsbbytes = TSB_BYTES(tsbcode);
12810 12809 int lowmem = 0;
12811 12810 struct kmem_cache *kmem_cachep = NULL;
12812 12811 vmem_t *vmp = NULL;
12813 12812 lgrp_id_t lgrpid = LGRP_NONE;
12814 12813 pfn_t pfn;
12815 12814 uint_t cbflags = HAC_SLEEP;
12816 12815 page_t **pplist;
12817 12816 int ret;
12818 12817
12819 12818 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
12820 12819 if (tsbbytes > MMU_PAGESIZE4M)
12821 12820 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12822 12821 else
12823 12822 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12824 12823
12825 12824 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
12826 12825 flags |= TSB_ALLOC;
12827 12826
12828 12827 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
12829 12828
12830 12829 tsbinfo->tsb_sfmmu = sfmmup;
12831 12830
12832 12831 /*
12833 12832 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
12834 12833 * return.
12835 12834 */
12836 12835 if ((flags & TSB_ALLOC) == 0) {
12837 12836 tsbinfo->tsb_szc = tsbcode;
12838 12837 tsbinfo->tsb_ttesz_mask = tteszmask;
12839 12838 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
12840 12839 tsbinfo->tsb_pa = -1;
12841 12840 tsbinfo->tsb_tte.ll = 0;
12842 12841 tsbinfo->tsb_next = NULL;
12843 12842 tsbinfo->tsb_flags = TSB_SWAPPED;
12844 12843 tsbinfo->tsb_cache = NULL;
12845 12844 tsbinfo->tsb_vmp = NULL;
12846 12845 return (0);
12847 12846 }
12848 12847
12849 12848 #ifdef DEBUG
12850 12849 /*
12851 12850 * For debugging:
12852 12851 * Randomly force allocation failures every tsb_alloc_mtbf
12853 12852 * tries if TSB_FORCEALLOC is not specified. This will
12854 12853 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
12855 12854 * it is even, to allow testing of both failure paths...
12856 12855 */
12857 12856 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
12858 12857 (tsb_alloc_count++ == tsb_alloc_mtbf)) {
12859 12858 tsb_alloc_count = 0;
12860 12859 tsb_alloc_fail_mtbf++;
12861 12860 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
12862 12861 }
12863 12862 #endif /* DEBUG */
12864 12863
12865 12864 /*
12866 12865 * Enforce high water mark if we are not doing a forced allocation
12867 12866 * and are not shrinking a process' TSB.
12868 12867 */
12869 12868 if ((flags & TSB_SHRINK) == 0 &&
12870 12869 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
12871 12870 if ((flags & TSB_FORCEALLOC) == 0)
12872 12871 return (ENOMEM);
12873 12872 lowmem = 1;
12874 12873 }
12875 12874
12876 12875 /*
12877 12876 * Allocate from the correct location based upon the size of the TSB
12878 12877 * compared to the base page size, and what memory conditions dictate.
12879 12878 * Note we always do nonblocking allocations from the TSB arena since
12880 12879 * we don't want memory fragmentation to cause processes to block
12881 12880 * indefinitely waiting for memory; until the kernel algorithms that
12882 12881 * coalesce large pages are improved this is our best option.
12883 12882 *
12884 12883 * Algorithm:
12885 12884 * If allocating a "large" TSB (>8K), allocate from the
12886 12885 * appropriate kmem_tsb_default_arena vmem arena
12887 12886 * else if low on memory or the TSB_FORCEALLOC flag is set or
12888 12887 * tsb_forceheap is set
12889 12888 * Allocate from kernel heap via sfmmu_tsb8k_cache with
12890 12889 * KM_SLEEP (never fails)
12891 12890 * else
12892 12891 * Allocate from appropriate sfmmu_tsb_cache with
12893 12892 * KM_NOSLEEP
12894 12893 * endif
12895 12894 */
12896 12895 if (tsb_lgrp_affinity)
12897 12896 lgrpid = lgrp_home_id(curthread);
12898 12897 if (lgrpid == LGRP_NONE)
12899 12898 lgrpid = 0; /* use lgrp of boot CPU */
12900 12899
12901 12900 if (tsbbytes > MMU_PAGESIZE) {
12902 12901 if (tsbbytes > MMU_PAGESIZE4M) {
12903 12902 vmp = kmem_bigtsb_default_arena[lgrpid];
12904 12903 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12905 12904 0, 0, NULL, NULL, VM_NOSLEEP);
12906 12905 } else {
12907 12906 vmp = kmem_tsb_default_arena[lgrpid];
12908 12907 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12909 12908 0, 0, NULL, NULL, VM_NOSLEEP);
12910 12909 }
12911 12910 #ifdef DEBUG
12912 12911 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
12913 12912 #else /* !DEBUG */
12914 12913 } else if (lowmem || (flags & TSB_FORCEALLOC)) {
12915 12914 #endif /* DEBUG */
12916 12915 kmem_cachep = sfmmu_tsb8k_cache;
12917 12916 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
12918 12917 ASSERT(vaddr != NULL);
12919 12918 } else {
12920 12919 kmem_cachep = sfmmu_tsb_cache[lgrpid];
12921 12920 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12922 12921 }
12923 12922
12924 12923 tsbinfo->tsb_cache = kmem_cachep;
12925 12924 tsbinfo->tsb_vmp = vmp;
12926 12925
12927 12926 if (vaddr == NULL) {
12928 12927 return (EAGAIN);
12929 12928 }
12930 12929
12931 12930 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
12932 12931 kmem_cachep = tsbinfo->tsb_cache;
12933 12932
12934 12933 /*
12935 12934 * If we are allocating from outside the cage, then we need to
12936 12935 * register a relocation callback handler. Note that for now
12937 12936 * since pseudo mappings always hang off of the slab's root page,
12938 12937 * we need only lock the first 8K of the TSB slab. This is a bit
12939 12938 * hacky but it is good for performance.
12940 12939 */
12941 12940 if (kmem_cachep != sfmmu_tsb8k_cache) {
12942 12941 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
12943 12942 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
12944 12943 ASSERT(ret == 0);
12945 12944 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
12946 12945 cbflags, (void *)tsbinfo, &pfn, NULL);
12947 12946
12948 12947 /*
12949 12948 * Need to free up resources if we could not successfully
12950 12949 * add the callback function and return an error condition.
12951 12950 */
12952 12951 if (ret != 0) {
12953 12952 if (kmem_cachep) {
12954 12953 kmem_cache_free(kmem_cachep, vaddr);
12955 12954 } else {
12956 12955 vmem_xfree(vmp, (void *)vaddr, tsbbytes);
12957 12956 }
12958 12957 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
12959 12958 S_WRITE);
12960 12959 return (EAGAIN);
12961 12960 }
12962 12961 } else {
12963 12962 /*
12964 12963 * Since allocation of 8K TSBs from heap is rare and occurs
12965 12964 * during memory pressure we allocate them from permanent
12966 12965 * memory rather than using callbacks to get the PFN.
12967 12966 */
12968 12967 pfn = hat_getpfnum(kas.a_hat, vaddr);
12969 12968 }
12970 12969
12971 12970 tsbinfo->tsb_va = vaddr;
12972 12971 tsbinfo->tsb_szc = tsbcode;
12973 12972 tsbinfo->tsb_ttesz_mask = tteszmask;
12974 12973 tsbinfo->tsb_next = NULL;
12975 12974 tsbinfo->tsb_flags = 0;
12976 12975
12977 12976 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
12978 12977
12979 12978 sfmmu_inv_tsb(vaddr, tsbbytes);
12980 12979
12981 12980 if (kmem_cachep != sfmmu_tsb8k_cache) {
12982 12981 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
12983 12982 }
12984 12983
12985 12984 return (0);
12986 12985 }
12987 12986
12988 12987 /*
12989 12988 * Initialize per cpu tsb and per cpu tsbmiss_area
12990 12989 */
12991 12990 void
12992 12991 sfmmu_init_tsbs(void)
12993 12992 {
12994 12993 int i;
12995 12994 struct tsbmiss *tsbmissp;
12996 12995 struct kpmtsbm *kpmtsbmp;
12997 12996 #ifndef sun4v
12998 12997 extern int dcache_line_mask;
12999 12998 #endif /* sun4v */
13000 12999 extern uint_t vac_colors;
13001 13000
13002 13001 /*
13003 13002 * Init. tsb miss area.
13004 13003 */
13005 13004 tsbmissp = tsbmiss_area;
13006 13005
13007 13006 for (i = 0; i < NCPU; tsbmissp++, i++) {
13008 13007 /*
13009 13008 * initialize the tsbmiss area.
13010 13009 * Do this for all possible CPUs as some may be added
13011 13010 * while the system is running. There is no cost to this.
13012 13011 */
13013 13012 tsbmissp->ksfmmup = ksfmmup;
13014 13013 #ifndef sun4v
13015 13014 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13016 13015 #endif /* sun4v */
13017 13016 tsbmissp->khashstart =
13018 13017 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13019 13018 tsbmissp->uhashstart =
13020 13019 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13021 13020 tsbmissp->khashsz = khmehash_num;
13022 13021 tsbmissp->uhashsz = uhmehash_num;
13023 13022 }
13024 13023
13025 13024 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13026 13025 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13027 13026
13028 13027 if (kpm_enable == 0)
13029 13028 return;
13030 13029
13031 13030 /* -- Begin KPM specific init -- */
13032 13031
13033 13032 if (kpm_smallpages) {
13034 13033 /*
13035 13034 * If we're using base pagesize pages for seg_kpm
13036 13035 * mappings, we use the kernel TSB since we can't afford
13037 13036 * to allocate a second huge TSB for these mappings.
13038 13037 */
13039 13038 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13040 13039 kpm_tsbsz = ktsb_szcode;
13041 13040 kpmsm_tsbbase = kpm_tsbbase;
13042 13041 kpmsm_tsbsz = kpm_tsbsz;
13043 13042 } else {
13044 13043 /*
13045 13044 * In VAC conflict case, just put the entries in the
13046 13045 * kernel 8K indexed TSB for now so we can find them.
13047 13046 * This could really be changed in the future if we feel
13048 13047 * the need...
13049 13048 */
13050 13049 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13051 13050 kpmsm_tsbsz = ktsb_szcode;
13052 13051 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13053 13052 kpm_tsbsz = ktsb4m_szcode;
13054 13053 }
13055 13054
13056 13055 kpmtsbmp = kpmtsbm_area;
13057 13056 for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13058 13057 /*
13059 13058 * Initialize the kpmtsbm area.
13060 13059 * Do this for all possible CPUs as some may be added
13061 13060 * while the system is running. There is no cost to this.
13062 13061 */
13063 13062 kpmtsbmp->vbase = kpm_vbase;
13064 13063 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13065 13064 kpmtsbmp->sz_shift = kpm_size_shift;
13066 13065 kpmtsbmp->kpmp_shift = kpmp_shift;
13067 13066 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13068 13067 if (kpm_smallpages == 0) {
13069 13068 kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13070 13069 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13071 13070 } else {
13072 13071 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13073 13072 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13074 13073 }
13075 13074 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13076 13075 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13077 13076 #ifdef DEBUG
13078 13077 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0;
13079 13078 #endif /* DEBUG */
13080 13079 if (ktsb_phys)
13081 13080 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13082 13081 }
13083 13082
13084 13083 /* -- End KPM specific init -- */
13085 13084 }
13086 13085
13087 13086 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13088 13087 struct tsb_info ktsb_info[2];
13089 13088
13090 13089 /*
13091 13090 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13092 13091 */
13093 13092 void
13094 13093 sfmmu_init_ktsbinfo()
13095 13094 {
13096 13095 ASSERT(ksfmmup != NULL);
13097 13096 ASSERT(ksfmmup->sfmmu_tsb == NULL);
13098 13097 /*
13099 13098 * Allocate tsbinfos for kernel and copy in data
13100 13099 * to make debug easier and sun4v setup easier.
13101 13100 */
13102 13101 ktsb_info[0].tsb_sfmmu = ksfmmup;
13103 13102 ktsb_info[0].tsb_szc = ktsb_szcode;
13104 13103 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13105 13104 ktsb_info[0].tsb_va = ktsb_base;
13106 13105 ktsb_info[0].tsb_pa = ktsb_pbase;
13107 13106 ktsb_info[0].tsb_flags = 0;
13108 13107 ktsb_info[0].tsb_tte.ll = 0;
13109 13108 ktsb_info[0].tsb_cache = NULL;
13110 13109
13111 13110 ktsb_info[1].tsb_sfmmu = ksfmmup;
13112 13111 ktsb_info[1].tsb_szc = ktsb4m_szcode;
13113 13112 ktsb_info[1].tsb_ttesz_mask = TSB4M;
13114 13113 ktsb_info[1].tsb_va = ktsb4m_base;
13115 13114 ktsb_info[1].tsb_pa = ktsb4m_pbase;
13116 13115 ktsb_info[1].tsb_flags = 0;
13117 13116 ktsb_info[1].tsb_tte.ll = 0;
13118 13117 ktsb_info[1].tsb_cache = NULL;
13119 13118
13120 13119 /* Link them into ksfmmup. */
13121 13120 ktsb_info[0].tsb_next = &ktsb_info[1];
13122 13121 ktsb_info[1].tsb_next = NULL;
13123 13122 ksfmmup->sfmmu_tsb = &ktsb_info[0];
13124 13123
13125 13124 sfmmu_setup_tsbinfo(ksfmmup);
13126 13125 }
13127 13126
13128 13127 /*
13129 13128 * Cache the last value returned from va_to_pa(). If the VA specified
13130 13129 * in the current call to cached_va_to_pa() maps to the same Page (as the
13131 13130 * previous call to cached_va_to_pa()), then compute the PA using
13132 13131 * cached info, else call va_to_pa().
13133 13132 *
13134 13133 * Note: this function is neither MT-safe nor consistent in the presence
13135 13134 * of multiple, interleaved threads. This function was created to enable
13136 13135 * an optimization used during boot (at a point when there's only one thread
13137 13136 * executing on the "boot CPU", and before startup_vm() has been called).
13138 13137 */
13139 13138 static uint64_t
13140 13139 cached_va_to_pa(void *vaddr)
13141 13140 {
13142 13141 static uint64_t prev_vaddr_base = 0;
13143 13142 static uint64_t prev_pfn = 0;
13144 13143
13145 13144 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13146 13145 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13147 13146 } else {
13148 13147 uint64_t pa = va_to_pa(vaddr);
13149 13148
13150 13149 if (pa != ((uint64_t)-1)) {
13151 13150 /*
13152 13151 * Computed physical address is valid. Cache its
13153 13152 * related info for the next cached_va_to_pa() call.
13154 13153 */
13155 13154 prev_pfn = pa & MMU_PAGEMASK;
13156 13155 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13157 13156 }
13158 13157
13159 13158 return (pa);
13160 13159 }
13161 13160 }
13162 13161
13163 13162 /*
13164 13163 * Carve up our nucleus hblk region. We may allocate more hblks than
13165 13164 * asked due to rounding errors but we are guaranteed to have at least
13166 13165 * enough space to allocate the requested number of hblk8's and hblk1's.
13167 13166 */
13168 13167 void
13169 13168 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13170 13169 {
13171 13170 struct hme_blk *hmeblkp;
13172 13171 size_t hme8blk_sz, hme1blk_sz;
13173 13172 size_t i;
13174 13173 size_t hblk8_bound;
13175 13174 ulong_t j = 0, k = 0;
13176 13175
13177 13176 ASSERT(addr != NULL && size != 0);
13178 13177
13179 13178 /* Need to use proper structure alignment */
13180 13179 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13181 13180 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13182 13181
13183 13182 nucleus_hblk8.list = (void *)addr;
13184 13183 nucleus_hblk8.index = 0;
13185 13184
13186 13185 /*
13187 13186 * Use as much memory as possible for hblk8's since we
13188 13187 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13189 13188 * We need to hold back enough space for the hblk1's which
13190 13189 * we'll allocate next.
13191 13190 */
13192 13191 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13193 13192 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13194 13193 hmeblkp = (struct hme_blk *)addr;
13195 13194 addr += hme8blk_sz;
13196 13195 hmeblkp->hblk_nuc_bit = 1;
13197 13196 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13198 13197 }
13199 13198 nucleus_hblk8.len = j;
13200 13199 ASSERT(j >= nhblk8);
13201 13200 SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13202 13201
13203 13202 nucleus_hblk1.list = (void *)addr;
13204 13203 nucleus_hblk1.index = 0;
13205 13204 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13206 13205 hmeblkp = (struct hme_blk *)addr;
13207 13206 addr += hme1blk_sz;
13208 13207 hmeblkp->hblk_nuc_bit = 1;
13209 13208 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13210 13209 }
13211 13210 ASSERT(k >= nhblk1);
13212 13211 nucleus_hblk1.len = k;
13213 13212 SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13214 13213 }
13215 13214
13216 13215 /*
13217 13216 * This function is currently not supported on this platform. For what
13218 13217 * it's supposed to do, see hat.c and hat_srmmu.c
13219 13218 */
13220 13219 /* ARGSUSED */
13221 13220 faultcode_t
13222 13221 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13223 13222 uint_t flags)
13224 13223 {
13225 13224 return (FC_NOSUPPORT);
13226 13225 }
13227 13226
13228 13227 /*
↓ open down ↓ |
4987 lines elided |
↑ open up ↑ |
13229 13228 * Searchs the mapping list of the page for a mapping of the same size. If not
13230 13229 * found the corresponding bit is cleared in the p_index field. When large
13231 13230 * pages are more prevalent in the system, we can maintain the mapping list
13232 13231 * in order and we don't have to traverse the list each time. Just check the
13233 13232 * next and prev entries, and if both are of different size, we clear the bit.
13234 13233 */
13235 13234 static void
13236 13235 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13237 13236 {
13238 13237 struct sf_hment *sfhmep;
13239 - struct hme_blk *hmeblkp;
13240 13238 int index;
13241 13239 pgcnt_t npgs;
13242 13240
13243 13241 ASSERT(ttesz > TTE8K);
13244 13242
13245 13243 ASSERT(sfmmu_mlist_held(pp));
13246 13244
13247 13245 ASSERT(PP_ISMAPPED_LARGE(pp));
13248 13246
13249 13247 /*
13250 13248 * Traverse mapping list looking for another mapping of same size.
13251 13249 * since we only want to clear index field if all mappings of
13252 13250 * that size are gone.
13253 13251 */
13254 13252
13255 13253 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13256 13254 if (IS_PAHME(sfhmep))
13257 13255 continue;
13258 - hmeblkp = sfmmu_hmetohblk(sfhmep);
13259 13256 if (hme_size(sfhmep) == ttesz) {
13260 13257 /*
13261 13258 * another mapping of the same size. don't clear index.
13262 13259 */
13263 13260 return;
13264 13261 }
13265 13262 }
13266 13263
13267 13264 /*
13268 13265 * Clear the p_index bit for large page.
13269 13266 */
13270 13267 index = PAGESZ_TO_INDEX(ttesz);
13271 13268 npgs = TTEPAGES(ttesz);
13272 13269 while (npgs-- > 0) {
13273 13270 ASSERT(pp->p_index & index);
13274 13271 pp->p_index &= ~index;
13275 13272 pp = PP_PAGENEXT(pp);
13276 13273 }
13277 13274 }
13278 13275
13279 13276 /*
13280 13277 * return supported features
13281 13278 */
13282 13279 /* ARGSUSED */
13283 13280 int
13284 13281 hat_supported(enum hat_features feature, void *arg)
13285 13282 {
13286 13283 switch (feature) {
13287 13284 case HAT_SHARED_PT:
13288 13285 case HAT_DYNAMIC_ISM_UNMAP:
13289 13286 case HAT_VMODSORT:
13290 13287 return (1);
13291 13288 case HAT_SHARED_REGIONS:
13292 13289 if (shctx_on)
13293 13290 return (1);
13294 13291 else
13295 13292 return (0);
13296 13293 default:
13297 13294 return (0);
13298 13295 }
13299 13296 }
13300 13297
13301 13298 void
13302 13299 hat_enter(struct hat *hat)
13303 13300 {
13304 13301 hatlock_t *hatlockp;
13305 13302
13306 13303 if (hat != ksfmmup) {
13307 13304 hatlockp = TSB_HASH(hat);
13308 13305 mutex_enter(HATLOCK_MUTEXP(hatlockp));
13309 13306 }
13310 13307 }
13311 13308
13312 13309 void
13313 13310 hat_exit(struct hat *hat)
13314 13311 {
13315 13312 hatlock_t *hatlockp;
13316 13313
13317 13314 if (hat != ksfmmup) {
13318 13315 hatlockp = TSB_HASH(hat);
13319 13316 mutex_exit(HATLOCK_MUTEXP(hatlockp));
13320 13317 }
13321 13318 }
13322 13319
13323 13320 /*ARGSUSED*/
13324 13321 void
13325 13322 hat_reserve(struct as *as, caddr_t addr, size_t len)
13326 13323 {
13327 13324 }
13328 13325
13329 13326 static void
13330 13327 hat_kstat_init(void)
13331 13328 {
13332 13329 kstat_t *ksp;
13333 13330
13334 13331 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13335 13332 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13336 13333 KSTAT_FLAG_VIRTUAL);
13337 13334 if (ksp) {
13338 13335 ksp->ks_data = (void *) &sfmmu_global_stat;
13339 13336 kstat_install(ksp);
13340 13337 }
13341 13338 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13342 13339 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13343 13340 KSTAT_FLAG_VIRTUAL);
13344 13341 if (ksp) {
13345 13342 ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13346 13343 kstat_install(ksp);
13347 13344 }
13348 13345 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13349 13346 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13350 13347 KSTAT_FLAG_WRITABLE);
13351 13348 if (ksp) {
13352 13349 ksp->ks_update = sfmmu_kstat_percpu_update;
13353 13350 kstat_install(ksp);
13354 13351 }
13355 13352 }
13356 13353
13357 13354 /* ARGSUSED */
13358 13355 static int
13359 13356 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13360 13357 {
13361 13358 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13362 13359 struct tsbmiss *tsbm = tsbmiss_area;
13363 13360 struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13364 13361 int i;
13365 13362
13366 13363 ASSERT(cpu_kstat);
13367 13364 if (rw == KSTAT_READ) {
13368 13365 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13369 13366 cpu_kstat->sf_itlb_misses = 0;
13370 13367 cpu_kstat->sf_dtlb_misses = 0;
13371 13368 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13372 13369 tsbm->uprot_traps;
13373 13370 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13374 13371 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13375 13372 cpu_kstat->sf_tsb_hits = 0;
13376 13373 cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13377 13374 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13378 13375 }
13379 13376 } else {
13380 13377 /* KSTAT_WRITE is used to clear stats */
13381 13378 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13382 13379 tsbm->utsb_misses = 0;
13383 13380 tsbm->ktsb_misses = 0;
13384 13381 tsbm->uprot_traps = 0;
13385 13382 tsbm->kprot_traps = 0;
13386 13383 kpmtsbm->kpm_dtlb_misses = 0;
13387 13384 kpmtsbm->kpm_tsb_misses = 0;
13388 13385 }
13389 13386 }
13390 13387 return (0);
13391 13388 }
13392 13389
13393 13390 #ifdef DEBUG
13394 13391
13395 13392 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13396 13393
13397 13394 /*
13398 13395 * A tte checker. *orig_old is the value we read before cas.
13399 13396 * *cur is the value returned by cas.
13400 13397 * *new is the desired value when we do the cas.
13401 13398 *
13402 13399 * *hmeblkp is currently unused.
13403 13400 */
13404 13401
13405 13402 /* ARGSUSED */
13406 13403 void
13407 13404 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13408 13405 {
13409 13406 pfn_t i, j, k;
13410 13407 int cpuid = CPU->cpu_id;
13411 13408
13412 13409 gorig[cpuid] = orig_old;
13413 13410 gcur[cpuid] = cur;
13414 13411 gnew[cpuid] = new;
13415 13412
13416 13413 #ifdef lint
13417 13414 hmeblkp = hmeblkp;
13418 13415 #endif
13419 13416
13420 13417 if (TTE_IS_VALID(orig_old)) {
13421 13418 if (TTE_IS_VALID(cur)) {
13422 13419 i = TTE_TO_TTEPFN(orig_old);
13423 13420 j = TTE_TO_TTEPFN(cur);
13424 13421 k = TTE_TO_TTEPFN(new);
13425 13422 if (i != j) {
13426 13423 /* remap error? */
13427 13424 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13428 13425 }
13429 13426
13430 13427 if (i != k) {
13431 13428 /* remap error? */
13432 13429 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13433 13430 }
13434 13431 } else {
13435 13432 if (TTE_IS_VALID(new)) {
13436 13433 panic("chk_tte: invalid cur? ");
13437 13434 }
13438 13435
13439 13436 i = TTE_TO_TTEPFN(orig_old);
13440 13437 k = TTE_TO_TTEPFN(new);
13441 13438 if (i != k) {
13442 13439 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13443 13440 }
13444 13441 }
13445 13442 } else {
13446 13443 if (TTE_IS_VALID(cur)) {
13447 13444 j = TTE_TO_TTEPFN(cur);
13448 13445 if (TTE_IS_VALID(new)) {
13449 13446 k = TTE_TO_TTEPFN(new);
13450 13447 if (j != k) {
13451 13448 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13452 13449 j, k);
13453 13450 }
13454 13451 } else {
13455 13452 panic("chk_tte: why here?");
13456 13453 }
13457 13454 } else {
13458 13455 if (!TTE_IS_VALID(new)) {
13459 13456 panic("chk_tte: why here2 ?");
13460 13457 }
13461 13458 }
13462 13459 }
13463 13460 }
13464 13461
13465 13462 #endif /* DEBUG */
13466 13463
13467 13464 extern void prefetch_tsbe_read(struct tsbe *);
13468 13465 extern void prefetch_tsbe_write(struct tsbe *);
13469 13466
13470 13467
13471 13468 /*
13472 13469 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives
13473 13470 * us optimal performance on Cheetah+. You can only have 8 outstanding
13474 13471 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13475 13472 * prefetch to make the most utilization of the prefetch capability.
13476 13473 */
13477 13474 #define TSBE_PREFETCH_STRIDE (7)
13478 13475
13479 13476 void
13480 13477 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13481 13478 {
13482 13479 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13483 13480 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13484 13481 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13485 13482 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13486 13483 struct tsbe *old;
13487 13484 struct tsbe *new;
13488 13485 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13489 13486 uint64_t va;
13490 13487 int new_offset;
13491 13488 int i;
13492 13489 int vpshift;
13493 13490 int last_prefetch;
13494 13491
13495 13492 if (old_bytes == new_bytes) {
13496 13493 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13497 13494 } else {
13498 13495
13499 13496 /*
13500 13497 * A TSBE is 16 bytes which means there are four TSBE's per
13501 13498 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13502 13499 */
13503 13500 old = (struct tsbe *)old_tsbinfo->tsb_va;
13504 13501 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13505 13502 for (i = 0; i < old_entries; i++, old++) {
13506 13503 if (((i & (4-1)) == 0) && (i < last_prefetch))
13507 13504 prefetch_tsbe_read(old);
13508 13505 if (!old->tte_tag.tag_invalid) {
13509 13506 /*
13510 13507 * We have a valid TTE to remap. Check the
13511 13508 * size. We won't remap 64K or 512K TTEs
13512 13509 * because they span more than one TSB entry
13513 13510 * and are indexed using an 8K virt. page.
13514 13511 * Ditto for 32M and 256M TTEs.
13515 13512 */
13516 13513 if (TTE_CSZ(&old->tte_data) == TTE64K ||
13517 13514 TTE_CSZ(&old->tte_data) == TTE512K)
13518 13515 continue;
13519 13516 if (mmu_page_sizes == max_mmu_page_sizes) {
13520 13517 if (TTE_CSZ(&old->tte_data) == TTE32M ||
13521 13518 TTE_CSZ(&old->tte_data) == TTE256M)
13522 13519 continue;
13523 13520 }
13524 13521
13525 13522 /* clear the lower 22 bits of the va */
13526 13523 va = *(uint64_t *)old << 22;
13527 13524 /* turn va into a virtual pfn */
13528 13525 va >>= 22 - TSB_START_SIZE;
13529 13526 /*
13530 13527 * or in bits from the offset in the tsb
13531 13528 * to get the real virtual pfn. These
13532 13529 * correspond to bits [21:13] in the va
13533 13530 */
13534 13531 vpshift =
13535 13532 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13536 13533 0x1ff;
13537 13534 va |= (i << vpshift);
13538 13535 va >>= vpshift;
13539 13536 new_offset = va & (new_entries - 1);
13540 13537 new = new_base + new_offset;
13541 13538 prefetch_tsbe_write(new);
13542 13539 *new = *old;
13543 13540 }
13544 13541 }
13545 13542 }
13546 13543 }
13547 13544
13548 13545 /*
13549 13546 * unused in sfmmu
13550 13547 */
13551 13548 void
13552 13549 hat_dump(void)
13553 13550 {
13554 13551 }
13555 13552
13556 13553 /*
13557 13554 * Called when a thread is exiting and we have switched to the kernel address
13558 13555 * space. Perform the same VM initialization resume() uses when switching
13559 13556 * processes.
13560 13557 *
13561 13558 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13562 13559 * we call it anyway in case the semantics change in the future.
13563 13560 */
13564 13561 /*ARGSUSED*/
13565 13562 void
13566 13563 hat_thread_exit(kthread_t *thd)
13567 13564 {
13568 13565 uint_t pgsz_cnum;
13569 13566 uint_t pstate_save;
13570 13567
13571 13568 ASSERT(thd->t_procp->p_as == &kas);
13572 13569
13573 13570 pgsz_cnum = KCONTEXT;
13574 13571 #ifdef sun4u
13575 13572 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13576 13573 #endif
13577 13574
13578 13575 /*
13579 13576 * Note that sfmmu_load_mmustate() is currently a no-op for
13580 13577 * kernel threads. We need to disable interrupts here,
13581 13578 * simply because otherwise sfmmu_load_mmustate() would panic
13582 13579 * if the caller does not disable interrupts.
13583 13580 */
13584 13581 pstate_save = sfmmu_disable_intrs();
13585 13582
13586 13583 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13587 13584 sfmmu_setctx_sec(pgsz_cnum);
13588 13585 sfmmu_load_mmustate(ksfmmup);
13589 13586 sfmmu_enable_intrs(pstate_save);
13590 13587 }
13591 13588
13592 13589
13593 13590 /*
13594 13591 * SRD support
13595 13592 */
13596 13593 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \
13597 13594 (((uintptr_t)(vp)) >> 11)) & \
13598 13595 srd_hashmask)
13599 13596
13600 13597 /*
13601 13598 * Attach the process to the srd struct associated with the exec vnode
13602 13599 * from which the process is started.
13603 13600 */
13604 13601 void
13605 13602 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13606 13603 {
13607 13604 uint_t hash = SRD_HASH_FUNCTION(evp);
13608 13605 sf_srd_t *srdp;
13609 13606 sf_srd_t *newsrdp;
13610 13607
13611 13608 ASSERT(sfmmup != ksfmmup);
13612 13609 ASSERT(sfmmup->sfmmu_srdp == NULL);
13613 13610
13614 13611 if (!shctx_on) {
13615 13612 return;
13616 13613 }
13617 13614
13618 13615 VN_HOLD(evp);
13619 13616
13620 13617 if (srd_buckets[hash].srdb_srdp != NULL) {
13621 13618 mutex_enter(&srd_buckets[hash].srdb_lock);
13622 13619 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13623 13620 srdp = srdp->srd_hash) {
13624 13621 if (srdp->srd_evp == evp) {
13625 13622 ASSERT(srdp->srd_refcnt >= 0);
13626 13623 sfmmup->sfmmu_srdp = srdp;
13627 13624 atomic_inc_32(
13628 13625 (volatile uint_t *)&srdp->srd_refcnt);
13629 13626 mutex_exit(&srd_buckets[hash].srdb_lock);
13630 13627 return;
13631 13628 }
13632 13629 }
13633 13630 mutex_exit(&srd_buckets[hash].srdb_lock);
13634 13631 }
13635 13632 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13636 13633 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13637 13634
13638 13635 newsrdp->srd_evp = evp;
13639 13636 newsrdp->srd_refcnt = 1;
13640 13637 newsrdp->srd_hmergnfree = NULL;
13641 13638 newsrdp->srd_ismrgnfree = NULL;
13642 13639
13643 13640 mutex_enter(&srd_buckets[hash].srdb_lock);
13644 13641 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13645 13642 srdp = srdp->srd_hash) {
13646 13643 if (srdp->srd_evp == evp) {
13647 13644 ASSERT(srdp->srd_refcnt >= 0);
13648 13645 sfmmup->sfmmu_srdp = srdp;
13649 13646 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13650 13647 mutex_exit(&srd_buckets[hash].srdb_lock);
13651 13648 kmem_cache_free(srd_cache, newsrdp);
13652 13649 return;
13653 13650 }
13654 13651 }
13655 13652 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13656 13653 srd_buckets[hash].srdb_srdp = newsrdp;
13657 13654 sfmmup->sfmmu_srdp = newsrdp;
13658 13655
13659 13656 mutex_exit(&srd_buckets[hash].srdb_lock);
13660 13657
13661 13658 }
13662 13659
13663 13660 static void
13664 13661 sfmmu_leave_srd(sfmmu_t *sfmmup)
13665 13662 {
13666 13663 vnode_t *evp;
13667 13664 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13668 13665 uint_t hash;
13669 13666 sf_srd_t **prev_srdpp;
13670 13667 sf_region_t *rgnp;
13671 13668 sf_region_t *nrgnp;
13672 13669 #ifdef DEBUG
13673 13670 int rgns = 0;
13674 13671 #endif
13675 13672 int i;
13676 13673
13677 13674 ASSERT(sfmmup != ksfmmup);
13678 13675 ASSERT(srdp != NULL);
13679 13676 ASSERT(srdp->srd_refcnt > 0);
13680 13677 ASSERT(sfmmup->sfmmu_scdp == NULL);
13681 13678 ASSERT(sfmmup->sfmmu_free == 1);
13682 13679
13683 13680 sfmmup->sfmmu_srdp = NULL;
13684 13681 evp = srdp->srd_evp;
13685 13682 ASSERT(evp != NULL);
13686 13683 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13687 13684 VN_RELE(evp);
13688 13685 return;
13689 13686 }
13690 13687
13691 13688 hash = SRD_HASH_FUNCTION(evp);
13692 13689 mutex_enter(&srd_buckets[hash].srdb_lock);
13693 13690 for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13694 13691 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13695 13692 if (srdp->srd_evp == evp) {
13696 13693 break;
13697 13694 }
13698 13695 }
13699 13696 if (srdp == NULL || srdp->srd_refcnt) {
13700 13697 mutex_exit(&srd_buckets[hash].srdb_lock);
13701 13698 VN_RELE(evp);
13702 13699 return;
13703 13700 }
13704 13701 *prev_srdpp = srdp->srd_hash;
13705 13702 mutex_exit(&srd_buckets[hash].srdb_lock);
13706 13703
13707 13704 ASSERT(srdp->srd_refcnt == 0);
13708 13705 VN_RELE(evp);
13709 13706
13710 13707 #ifdef DEBUG
13711 13708 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13712 13709 ASSERT(srdp->srd_rgnhash[i] == NULL);
13713 13710 }
13714 13711 #endif /* DEBUG */
13715 13712
13716 13713 /* free each hme regions in the srd */
13717 13714 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13718 13715 nrgnp = rgnp->rgn_next;
13719 13716 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13720 13717 ASSERT(rgnp->rgn_refcnt == 0);
13721 13718 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13722 13719 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13723 13720 ASSERT(rgnp->rgn_hmeflags == 0);
13724 13721 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13725 13722 #ifdef DEBUG
13726 13723 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13727 13724 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13728 13725 }
13729 13726 rgns++;
13730 13727 #endif /* DEBUG */
13731 13728 kmem_cache_free(region_cache, rgnp);
13732 13729 }
13733 13730 ASSERT(rgns == srdp->srd_next_hmerid);
13734 13731
13735 13732 #ifdef DEBUG
13736 13733 rgns = 0;
13737 13734 #endif
13738 13735 /* free each ism rgns in the srd */
13739 13736 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13740 13737 nrgnp = rgnp->rgn_next;
13741 13738 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13742 13739 ASSERT(rgnp->rgn_refcnt == 0);
13743 13740 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13744 13741 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13745 13742 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13746 13743 #ifdef DEBUG
13747 13744 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13748 13745 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13749 13746 }
13750 13747 rgns++;
13751 13748 #endif /* DEBUG */
13752 13749 kmem_cache_free(region_cache, rgnp);
13753 13750 }
13754 13751 ASSERT(rgns == srdp->srd_next_ismrid);
13755 13752 ASSERT(srdp->srd_ismbusyrgns == 0);
13756 13753 ASSERT(srdp->srd_hmebusyrgns == 0);
13757 13754
13758 13755 srdp->srd_next_ismrid = 0;
13759 13756 srdp->srd_next_hmerid = 0;
13760 13757
13761 13758 bzero((void *)srdp->srd_ismrgnp,
13762 13759 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13763 13760 bzero((void *)srdp->srd_hmergnp,
13764 13761 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13765 13762
13766 13763 ASSERT(srdp->srd_scdp == NULL);
13767 13764 kmem_cache_free(srd_cache, srdp);
13768 13765 }
13769 13766
13770 13767 /* ARGSUSED */
13771 13768 static int
13772 13769 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13773 13770 {
13774 13771 sf_srd_t *srdp = (sf_srd_t *)buf;
13775 13772 bzero(buf, sizeof (*srdp));
13776 13773
13777 13774 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13778 13775 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13779 13776 return (0);
13780 13777 }
13781 13778
13782 13779 /* ARGSUSED */
13783 13780 static void
13784 13781 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13785 13782 {
13786 13783 sf_srd_t *srdp = (sf_srd_t *)buf;
13787 13784
13788 13785 mutex_destroy(&srdp->srd_mutex);
13789 13786 mutex_destroy(&srdp->srd_scd_mutex);
13790 13787 }
13791 13788
13792 13789 /*
13793 13790 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13794 13791 * at the same time for the same process and address range. This is ensured by
13795 13792 * the fact that address space is locked as writer when a process joins the
13796 13793 * regions. Therefore there's no need to hold an srd lock during the entire
13797 13794 * execution of hat_join_region()/hat_leave_region().
13798 13795 */
13799 13796
13800 13797 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13801 13798 (((uintptr_t)(obj)) >> 11)) & \
13802 13799 srd_rgn_hashmask)
13803 13800 /*
13804 13801 * This routine implements the shared context functionality required when
13805 13802 * attaching a segment to an address space. It must be called from
13806 13803 * hat_share() for D(ISM) segments and from segvn_create() for segments
13807 13804 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13808 13805 * which is saved in the private segment data for hme segments and
13809 13806 * the ism_map structure for ism segments.
13810 13807 */
13811 13808 hat_region_cookie_t
13812 13809 hat_join_region(struct hat *sfmmup,
13813 13810 caddr_t r_saddr,
13814 13811 size_t r_size,
13815 13812 void *r_obj,
13816 13813 u_offset_t r_objoff,
13817 13814 uchar_t r_perm,
13818 13815 uchar_t r_pgszc,
13819 13816 hat_rgn_cb_func_t r_cb_function,
13820 13817 uint_t flags)
13821 13818 {
13822 13819 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13823 13820 uint_t rhash;
13824 13821 uint_t rid;
13825 13822 hatlock_t *hatlockp;
13826 13823 sf_region_t *rgnp;
13827 13824 sf_region_t *new_rgnp = NULL;
13828 13825 int i;
13829 13826 uint16_t *nextidp;
13830 13827 sf_region_t **freelistp;
13831 13828 int maxids;
13832 13829 sf_region_t **rarrp;
13833 13830 uint16_t *busyrgnsp;
13834 13831 ulong_t rttecnt;
13835 13832 uchar_t tteflag;
13836 13833 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13837 13834 int text = (r_type == HAT_REGION_TEXT);
13838 13835
13839 13836 if (srdp == NULL || r_size == 0) {
13840 13837 return (HAT_INVALID_REGION_COOKIE);
13841 13838 }
13842 13839
13843 13840 ASSERT(sfmmup != ksfmmup);
13844 13841 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
13845 13842 ASSERT(srdp->srd_refcnt > 0);
13846 13843 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
13847 13844 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
13848 13845 ASSERT(r_pgszc < mmu_page_sizes);
13849 13846 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
13850 13847 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
13851 13848 panic("hat_join_region: region addr or size is not aligned\n");
13852 13849 }
13853 13850
13854 13851
13855 13852 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
13856 13853 SFMMU_REGION_HME;
13857 13854 /*
13858 13855 * Currently only support shared hmes for the read only main text
13859 13856 * region.
13860 13857 */
13861 13858 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
13862 13859 (r_perm & PROT_WRITE))) {
13863 13860 return (HAT_INVALID_REGION_COOKIE);
13864 13861 }
13865 13862
13866 13863 rhash = RGN_HASH_FUNCTION(r_obj);
13867 13864
13868 13865 if (r_type == SFMMU_REGION_ISM) {
13869 13866 nextidp = &srdp->srd_next_ismrid;
13870 13867 freelistp = &srdp->srd_ismrgnfree;
13871 13868 maxids = SFMMU_MAX_ISM_REGIONS;
13872 13869 rarrp = srdp->srd_ismrgnp;
13873 13870 busyrgnsp = &srdp->srd_ismbusyrgns;
13874 13871 } else {
13875 13872 nextidp = &srdp->srd_next_hmerid;
13876 13873 freelistp = &srdp->srd_hmergnfree;
13877 13874 maxids = SFMMU_MAX_HME_REGIONS;
13878 13875 rarrp = srdp->srd_hmergnp;
13879 13876 busyrgnsp = &srdp->srd_hmebusyrgns;
13880 13877 }
13881 13878
13882 13879 mutex_enter(&srdp->srd_mutex);
13883 13880
13884 13881 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
13885 13882 rgnp = rgnp->rgn_hash) {
13886 13883 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
13887 13884 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
13888 13885 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
13889 13886 break;
13890 13887 }
13891 13888 }
13892 13889
13893 13890 rfound:
13894 13891 if (rgnp != NULL) {
13895 13892 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
13896 13893 ASSERT(rgnp->rgn_cb_function == r_cb_function);
13897 13894 ASSERT(rgnp->rgn_refcnt >= 0);
13898 13895 rid = rgnp->rgn_id;
13899 13896 ASSERT(rid < maxids);
13900 13897 ASSERT(rarrp[rid] == rgnp);
13901 13898 ASSERT(rid < *nextidp);
13902 13899 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
13903 13900 mutex_exit(&srdp->srd_mutex);
13904 13901 if (new_rgnp != NULL) {
13905 13902 kmem_cache_free(region_cache, new_rgnp);
13906 13903 }
13907 13904 if (r_type == SFMMU_REGION_HME) {
13908 13905 int myjoin =
13909 13906 (sfmmup == astosfmmu(curthread->t_procp->p_as));
13910 13907
13911 13908 sfmmu_link_to_hmeregion(sfmmup, rgnp);
13912 13909 /*
13913 13910 * bitmap should be updated after linking sfmmu on
13914 13911 * region list so that pageunload() doesn't skip
13915 13912 * TSB/TLB flush. As soon as bitmap is updated another
13916 13913 * thread in this process can already start accessing
13917 13914 * this region.
13918 13915 */
13919 13916 /*
13920 13917 * Normally ttecnt accounting is done as part of
13921 13918 * pagefault handling. But a process may not take any
13922 13919 * pagefaults on shared hmeblks created by some other
13923 13920 * process. To compensate for this assume that the
13924 13921 * entire region will end up faulted in using
13925 13922 * the region's pagesize.
13926 13923 *
13927 13924 */
13928 13925 if (r_pgszc > TTE8K) {
13929 13926 tteflag = 1 << r_pgszc;
13930 13927 if (disable_large_pages & tteflag) {
13931 13928 tteflag = 0;
13932 13929 }
13933 13930 } else {
13934 13931 tteflag = 0;
13935 13932 }
13936 13933 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
13937 13934 hatlockp = sfmmu_hat_enter(sfmmup);
13938 13935 sfmmup->sfmmu_rtteflags |= tteflag;
13939 13936 sfmmu_hat_exit(hatlockp);
13940 13937 }
13941 13938 hatlockp = sfmmu_hat_enter(sfmmup);
13942 13939
13943 13940 /*
13944 13941 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
13945 13942 * region to allow for large page allocation failure.
13946 13943 */
13947 13944 if (r_pgszc >= TTE4M) {
13948 13945 sfmmup->sfmmu_tsb0_4minflcnt +=
13949 13946 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
13950 13947 }
13951 13948
13952 13949 /* update sfmmu_ttecnt with the shme rgn ttecnt */
13953 13950 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
13954 13951 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
13955 13952 rttecnt);
13956 13953
13957 13954 if (text && r_pgszc >= TTE4M &&
13958 13955 (tteflag || ((disable_large_pages >> TTE4M) &
13959 13956 ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
13960 13957 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
13961 13958 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
13962 13959 }
13963 13960
13964 13961 sfmmu_hat_exit(hatlockp);
13965 13962 /*
13966 13963 * On Panther we need to make sure TLB is programmed
13967 13964 * to accept 32M/256M pages. Call
13968 13965 * sfmmu_check_page_sizes() now to make sure TLB is
13969 13966 * setup before making hmeregions visible to other
13970 13967 * threads.
13971 13968 */
13972 13969 sfmmu_check_page_sizes(sfmmup, 1);
13973 13970 hatlockp = sfmmu_hat_enter(sfmmup);
13974 13971 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
13975 13972
13976 13973 /*
13977 13974 * if context is invalid tsb miss exception code will
13978 13975 * call sfmmu_check_page_sizes() and update tsbmiss
13979 13976 * area later.
13980 13977 */
13981 13978 kpreempt_disable();
13982 13979 if (myjoin &&
13983 13980 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
13984 13981 != INVALID_CONTEXT)) {
13985 13982 struct tsbmiss *tsbmp;
13986 13983
13987 13984 tsbmp = &tsbmiss_area[CPU->cpu_id];
13988 13985 ASSERT(sfmmup == tsbmp->usfmmup);
13989 13986 BT_SET(tsbmp->shmermap, rid);
13990 13987 if (r_pgszc > TTE64K) {
13991 13988 tsbmp->uhat_rtteflags |= tteflag;
13992 13989 }
13993 13990
13994 13991 }
13995 13992 kpreempt_enable();
13996 13993
13997 13994 sfmmu_hat_exit(hatlockp);
13998 13995 ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
13999 13996 HAT_INVALID_REGION_COOKIE);
14000 13997 } else {
14001 13998 hatlockp = sfmmu_hat_enter(sfmmup);
14002 13999 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14003 14000 sfmmu_hat_exit(hatlockp);
14004 14001 }
14005 14002 ASSERT(rid < maxids);
14006 14003
14007 14004 if (r_type == SFMMU_REGION_ISM) {
14008 14005 sfmmu_find_scd(sfmmup);
14009 14006 }
14010 14007 return ((hat_region_cookie_t)((uint64_t)rid));
14011 14008 }
14012 14009
14013 14010 ASSERT(new_rgnp == NULL);
14014 14011
14015 14012 if (*busyrgnsp >= maxids) {
14016 14013 mutex_exit(&srdp->srd_mutex);
14017 14014 return (HAT_INVALID_REGION_COOKIE);
14018 14015 }
14019 14016
14020 14017 ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14021 14018 if (*freelistp != NULL) {
14022 14019 rgnp = *freelistp;
14023 14020 *freelistp = rgnp->rgn_next;
14024 14021 ASSERT(rgnp->rgn_id < *nextidp);
14025 14022 ASSERT(rgnp->rgn_id < maxids);
14026 14023 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14027 14024 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14028 14025 == r_type);
14029 14026 ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14030 14027 ASSERT(rgnp->rgn_hmeflags == 0);
14031 14028 } else {
14032 14029 /*
14033 14030 * release local locks before memory allocation.
14034 14031 */
14035 14032 mutex_exit(&srdp->srd_mutex);
14036 14033
14037 14034 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14038 14035
14039 14036 mutex_enter(&srdp->srd_mutex);
14040 14037 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14041 14038 rgnp = rgnp->rgn_hash) {
14042 14039 if (rgnp->rgn_saddr == r_saddr &&
14043 14040 rgnp->rgn_size == r_size &&
14044 14041 rgnp->rgn_obj == r_obj &&
14045 14042 rgnp->rgn_objoff == r_objoff &&
14046 14043 rgnp->rgn_perm == r_perm &&
14047 14044 rgnp->rgn_pgszc == r_pgszc) {
14048 14045 break;
14049 14046 }
14050 14047 }
14051 14048 if (rgnp != NULL) {
14052 14049 goto rfound;
14053 14050 }
14054 14051
14055 14052 if (*nextidp >= maxids) {
14056 14053 mutex_exit(&srdp->srd_mutex);
14057 14054 goto fail;
14058 14055 }
14059 14056 rgnp = new_rgnp;
14060 14057 new_rgnp = NULL;
14061 14058 rgnp->rgn_id = (*nextidp)++;
14062 14059 ASSERT(rgnp->rgn_id < maxids);
14063 14060 ASSERT(rarrp[rgnp->rgn_id] == NULL);
14064 14061 rarrp[rgnp->rgn_id] = rgnp;
14065 14062 }
14066 14063
14067 14064 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14068 14065 ASSERT(rgnp->rgn_hmeflags == 0);
14069 14066 #ifdef DEBUG
14070 14067 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14071 14068 ASSERT(rgnp->rgn_ttecnt[i] == 0);
14072 14069 }
14073 14070 #endif
14074 14071 rgnp->rgn_saddr = r_saddr;
14075 14072 rgnp->rgn_size = r_size;
14076 14073 rgnp->rgn_obj = r_obj;
14077 14074 rgnp->rgn_objoff = r_objoff;
14078 14075 rgnp->rgn_perm = r_perm;
14079 14076 rgnp->rgn_pgszc = r_pgszc;
14080 14077 rgnp->rgn_flags = r_type;
14081 14078 rgnp->rgn_refcnt = 0;
14082 14079 rgnp->rgn_cb_function = r_cb_function;
14083 14080 rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14084 14081 srdp->srd_rgnhash[rhash] = rgnp;
14085 14082 (*busyrgnsp)++;
14086 14083 ASSERT(*busyrgnsp <= maxids);
14087 14084 goto rfound;
14088 14085
14089 14086 fail:
14090 14087 ASSERT(new_rgnp != NULL);
14091 14088 kmem_cache_free(region_cache, new_rgnp);
14092 14089 return (HAT_INVALID_REGION_COOKIE);
14093 14090 }
14094 14091
14095 14092 /*
14096 14093 * This function implements the shared context functionality required
14097 14094 * when detaching a segment from an address space. It must be called
14098 14095 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14099 14096 * for segments with a valid region_cookie.
14100 14097 * It will also be called from all seg_vn routines which change a
14101 14098 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14102 14099 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14103 14100 * from segvn_fault().
14104 14101 */
14105 14102 void
14106 14103 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14107 14104 {
14108 14105 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14109 14106 sf_scd_t *scdp;
14110 14107 uint_t rhash;
14111 14108 uint_t rid = (uint_t)((uint64_t)rcookie);
14112 14109 hatlock_t *hatlockp = NULL;
14113 14110 sf_region_t *rgnp;
14114 14111 sf_region_t **prev_rgnpp;
14115 14112 sf_region_t *cur_rgnp;
14116 14113 void *r_obj;
14117 14114 int i;
14118 14115 caddr_t r_saddr;
14119 14116 caddr_t r_eaddr;
14120 14117 size_t r_size;
14121 14118 uchar_t r_pgszc;
14122 14119 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14123 14120
14124 14121 ASSERT(sfmmup != ksfmmup);
14125 14122 ASSERT(srdp != NULL);
14126 14123 ASSERT(srdp->srd_refcnt > 0);
14127 14124 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14128 14125 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14129 14126 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14130 14127
14131 14128 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14132 14129 SFMMU_REGION_HME;
14133 14130
14134 14131 if (r_type == SFMMU_REGION_ISM) {
14135 14132 ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14136 14133 ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14137 14134 rgnp = srdp->srd_ismrgnp[rid];
14138 14135 } else {
14139 14136 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14140 14137 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14141 14138 rgnp = srdp->srd_hmergnp[rid];
14142 14139 }
14143 14140 ASSERT(rgnp != NULL);
14144 14141 ASSERT(rgnp->rgn_id == rid);
14145 14142 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14146 14143 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14147 14144 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
14148 14145
14149 14146 if (sfmmup->sfmmu_free) {
14150 14147 ulong_t rttecnt;
14151 14148 r_pgszc = rgnp->rgn_pgszc;
14152 14149 r_size = rgnp->rgn_size;
14153 14150
14154 14151 ASSERT(sfmmup->sfmmu_scdp == NULL);
14155 14152 if (r_type == SFMMU_REGION_ISM) {
14156 14153 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14157 14154 } else {
14158 14155 /* update shme rgns ttecnt in sfmmu_ttecnt */
14159 14156 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14160 14157 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14161 14158
14162 14159 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14163 14160 -rttecnt);
14164 14161
14165 14162 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14166 14163 }
14167 14164 } else if (r_type == SFMMU_REGION_ISM) {
14168 14165 hatlockp = sfmmu_hat_enter(sfmmup);
14169 14166 ASSERT(rid < srdp->srd_next_ismrid);
14170 14167 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14171 14168 scdp = sfmmup->sfmmu_scdp;
14172 14169 if (scdp != NULL &&
14173 14170 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14174 14171 sfmmu_leave_scd(sfmmup, r_type);
14175 14172 ASSERT(sfmmu_hat_lock_held(sfmmup));
14176 14173 }
14177 14174 sfmmu_hat_exit(hatlockp);
14178 14175 } else {
14179 14176 ulong_t rttecnt;
14180 14177 r_pgszc = rgnp->rgn_pgszc;
14181 14178 r_saddr = rgnp->rgn_saddr;
14182 14179 r_size = rgnp->rgn_size;
14183 14180 r_eaddr = r_saddr + r_size;
14184 14181
14185 14182 ASSERT(r_type == SFMMU_REGION_HME);
14186 14183 hatlockp = sfmmu_hat_enter(sfmmup);
14187 14184 ASSERT(rid < srdp->srd_next_hmerid);
14188 14185 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14189 14186
14190 14187 /*
14191 14188 * If region is part of an SCD call sfmmu_leave_scd().
14192 14189 * Otherwise if process is not exiting and has valid context
14193 14190 * just drop the context on the floor to lose stale TLB
14194 14191 * entries and force the update of tsb miss area to reflect
14195 14192 * the new region map. After that clean our TSB entries.
14196 14193 */
14197 14194 scdp = sfmmup->sfmmu_scdp;
14198 14195 if (scdp != NULL &&
14199 14196 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14200 14197 sfmmu_leave_scd(sfmmup, r_type);
14201 14198 ASSERT(sfmmu_hat_lock_held(sfmmup));
14202 14199 }
14203 14200 sfmmu_invalidate_ctx(sfmmup);
14204 14201
14205 14202 i = TTE8K;
14206 14203 while (i < mmu_page_sizes) {
14207 14204 if (rgnp->rgn_ttecnt[i] != 0) {
14208 14205 sfmmu_unload_tsb_range(sfmmup, r_saddr,
14209 14206 r_eaddr, i);
14210 14207 if (i < TTE4M) {
14211 14208 i = TTE4M;
14212 14209 continue;
14213 14210 } else {
14214 14211 break;
14215 14212 }
14216 14213 }
14217 14214 i++;
14218 14215 }
14219 14216 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14220 14217 if (r_pgszc >= TTE4M) {
14221 14218 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14222 14219 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14223 14220 rttecnt);
14224 14221 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14225 14222 }
14226 14223
14227 14224 /* update shme rgns ttecnt in sfmmu_ttecnt */
14228 14225 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14229 14226 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14230 14227 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14231 14228
14232 14229 sfmmu_hat_exit(hatlockp);
14233 14230 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14234 14231 /* sfmmup left the scd, grow private tsb */
14235 14232 sfmmu_check_page_sizes(sfmmup, 1);
14236 14233 } else {
14237 14234 sfmmu_check_page_sizes(sfmmup, 0);
14238 14235 }
14239 14236 }
14240 14237
14241 14238 if (r_type == SFMMU_REGION_HME) {
14242 14239 sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14243 14240 }
14244 14241
14245 14242 r_obj = rgnp->rgn_obj;
14246 14243 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14247 14244 return;
14248 14245 }
14249 14246
14250 14247 /*
14251 14248 * looks like nobody uses this region anymore. Free it.
14252 14249 */
14253 14250 rhash = RGN_HASH_FUNCTION(r_obj);
14254 14251 mutex_enter(&srdp->srd_mutex);
14255 14252 for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14256 14253 (cur_rgnp = *prev_rgnpp) != NULL;
14257 14254 prev_rgnpp = &cur_rgnp->rgn_hash) {
14258 14255 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14259 14256 break;
14260 14257 }
14261 14258 }
14262 14259
14263 14260 if (cur_rgnp == NULL) {
14264 14261 mutex_exit(&srdp->srd_mutex);
14265 14262 return;
14266 14263 }
14267 14264
14268 14265 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14269 14266 *prev_rgnpp = rgnp->rgn_hash;
14270 14267 if (r_type == SFMMU_REGION_ISM) {
14271 14268 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14272 14269 ASSERT(rid < srdp->srd_next_ismrid);
14273 14270 rgnp->rgn_next = srdp->srd_ismrgnfree;
14274 14271 srdp->srd_ismrgnfree = rgnp;
14275 14272 ASSERT(srdp->srd_ismbusyrgns > 0);
14276 14273 srdp->srd_ismbusyrgns--;
14277 14274 mutex_exit(&srdp->srd_mutex);
14278 14275 return;
14279 14276 }
14280 14277 mutex_exit(&srdp->srd_mutex);
14281 14278
14282 14279 /*
14283 14280 * Destroy region's hmeblks.
14284 14281 */
14285 14282 sfmmu_unload_hmeregion(srdp, rgnp);
14286 14283
14287 14284 rgnp->rgn_hmeflags = 0;
14288 14285
14289 14286 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14290 14287 ASSERT(rgnp->rgn_id == rid);
14291 14288 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14292 14289 rgnp->rgn_ttecnt[i] = 0;
14293 14290 }
14294 14291 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14295 14292 mutex_enter(&srdp->srd_mutex);
14296 14293 ASSERT(rid < srdp->srd_next_hmerid);
14297 14294 rgnp->rgn_next = srdp->srd_hmergnfree;
14298 14295 srdp->srd_hmergnfree = rgnp;
14299 14296 ASSERT(srdp->srd_hmebusyrgns > 0);
14300 14297 srdp->srd_hmebusyrgns--;
14301 14298 mutex_exit(&srdp->srd_mutex);
14302 14299 }
14303 14300
14304 14301 /*
14305 14302 * For now only called for hmeblk regions and not for ISM regions.
14306 14303 */
14307 14304 void
14308 14305 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14309 14306 {
14310 14307 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14311 14308 uint_t rid = (uint_t)((uint64_t)rcookie);
14312 14309 sf_region_t *rgnp;
14313 14310 sf_rgn_link_t *rlink;
14314 14311 sf_rgn_link_t *hrlink;
14315 14312 ulong_t rttecnt;
14316 14313
14317 14314 ASSERT(sfmmup != ksfmmup);
14318 14315 ASSERT(srdp != NULL);
14319 14316 ASSERT(srdp->srd_refcnt > 0);
14320 14317
14321 14318 ASSERT(rid < srdp->srd_next_hmerid);
14322 14319 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14323 14320 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14324 14321
14325 14322 rgnp = srdp->srd_hmergnp[rid];
14326 14323 ASSERT(rgnp->rgn_refcnt > 0);
14327 14324 ASSERT(rgnp->rgn_id == rid);
14328 14325 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14329 14326 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14330 14327
14331 14328 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14332 14329
14333 14330 /* LINTED: constant in conditional context */
14334 14331 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14335 14332 ASSERT(rlink != NULL);
14336 14333 mutex_enter(&rgnp->rgn_mutex);
14337 14334 ASSERT(rgnp->rgn_sfmmu_head != NULL);
14338 14335 /* LINTED: constant in conditional context */
14339 14336 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14340 14337 ASSERT(hrlink != NULL);
14341 14338 ASSERT(hrlink->prev == NULL);
14342 14339 rlink->next = rgnp->rgn_sfmmu_head;
14343 14340 rlink->prev = NULL;
14344 14341 hrlink->prev = sfmmup;
14345 14342 /*
14346 14343 * make sure rlink's next field is correct
14347 14344 * before making this link visible.
14348 14345 */
14349 14346 membar_stst();
14350 14347 rgnp->rgn_sfmmu_head = sfmmup;
14351 14348 mutex_exit(&rgnp->rgn_mutex);
14352 14349
14353 14350 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14354 14351 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14355 14352 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14356 14353 /* update tsb0 inflation count */
14357 14354 if (rgnp->rgn_pgszc >= TTE4M) {
14358 14355 sfmmup->sfmmu_tsb0_4minflcnt +=
14359 14356 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14360 14357 }
14361 14358 /*
14362 14359 * Update regionid bitmask without hat lock since no other thread
14363 14360 * can update this region bitmask right now.
14364 14361 */
14365 14362 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14366 14363 }
14367 14364
14368 14365 /* ARGSUSED */
14369 14366 static int
14370 14367 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14371 14368 {
14372 14369 sf_region_t *rgnp = (sf_region_t *)buf;
14373 14370 bzero(buf, sizeof (*rgnp));
14374 14371
14375 14372 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14376 14373
14377 14374 return (0);
14378 14375 }
14379 14376
14380 14377 /* ARGSUSED */
14381 14378 static void
14382 14379 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14383 14380 {
14384 14381 sf_region_t *rgnp = (sf_region_t *)buf;
14385 14382 mutex_destroy(&rgnp->rgn_mutex);
14386 14383 }
14387 14384
14388 14385 static int
14389 14386 sfrgnmap_isnull(sf_region_map_t *map)
14390 14387 {
14391 14388 int i;
14392 14389
14393 14390 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14394 14391 if (map->bitmap[i] != 0) {
14395 14392 return (0);
14396 14393 }
14397 14394 }
14398 14395 return (1);
14399 14396 }
14400 14397
14401 14398 static int
14402 14399 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14403 14400 {
14404 14401 int i;
14405 14402
14406 14403 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14407 14404 if (map->bitmap[i] != 0) {
14408 14405 return (0);
14409 14406 }
14410 14407 }
14411 14408 return (1);
14412 14409 }
14413 14410
14414 14411 #ifdef DEBUG
14415 14412 static void
14416 14413 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14417 14414 {
14418 14415 sfmmu_t *sp;
14419 14416 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14420 14417
14421 14418 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14422 14419 ASSERT(srdp == sp->sfmmu_srdp);
14423 14420 if (sp == sfmmup) {
14424 14421 if (onlist) {
14425 14422 return;
14426 14423 } else {
14427 14424 panic("shctx: sfmmu 0x%p found on scd"
14428 14425 "list 0x%p", (void *)sfmmup,
14429 14426 (void *)*headp);
14430 14427 }
14431 14428 }
14432 14429 }
14433 14430 if (onlist) {
14434 14431 panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14435 14432 (void *)sfmmup, (void *)*headp);
14436 14433 } else {
14437 14434 return;
14438 14435 }
14439 14436 }
14440 14437 #else /* DEBUG */
14441 14438 #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14442 14439 #endif /* DEBUG */
14443 14440
14444 14441 /*
14445 14442 * Removes an sfmmu from the SCD sfmmu list.
14446 14443 */
14447 14444 static void
14448 14445 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14449 14446 {
14450 14447 ASSERT(sfmmup->sfmmu_srdp != NULL);
14451 14448 check_scd_sfmmu_list(headp, sfmmup, 1);
14452 14449 if (sfmmup->sfmmu_scd_link.prev != NULL) {
14453 14450 ASSERT(*headp != sfmmup);
14454 14451 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14455 14452 sfmmup->sfmmu_scd_link.next;
14456 14453 } else {
14457 14454 ASSERT(*headp == sfmmup);
14458 14455 *headp = sfmmup->sfmmu_scd_link.next;
14459 14456 }
14460 14457 if (sfmmup->sfmmu_scd_link.next != NULL) {
14461 14458 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14462 14459 sfmmup->sfmmu_scd_link.prev;
14463 14460 }
14464 14461 }
14465 14462
14466 14463
14467 14464 /*
14468 14465 * Adds an sfmmu to the start of the queue.
14469 14466 */
14470 14467 static void
14471 14468 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14472 14469 {
14473 14470 check_scd_sfmmu_list(headp, sfmmup, 0);
14474 14471 sfmmup->sfmmu_scd_link.prev = NULL;
14475 14472 sfmmup->sfmmu_scd_link.next = *headp;
14476 14473 if (*headp != NULL)
14477 14474 (*headp)->sfmmu_scd_link.prev = sfmmup;
14478 14475 *headp = sfmmup;
14479 14476 }
14480 14477
14481 14478 /*
14482 14479 * Remove an scd from the start of the queue.
14483 14480 */
14484 14481 static void
14485 14482 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14486 14483 {
14487 14484 if (scdp->scd_prev != NULL) {
14488 14485 ASSERT(*headp != scdp);
14489 14486 scdp->scd_prev->scd_next = scdp->scd_next;
14490 14487 } else {
14491 14488 ASSERT(*headp == scdp);
14492 14489 *headp = scdp->scd_next;
14493 14490 }
14494 14491
14495 14492 if (scdp->scd_next != NULL) {
14496 14493 scdp->scd_next->scd_prev = scdp->scd_prev;
14497 14494 }
14498 14495 }
14499 14496
14500 14497 /*
14501 14498 * Add an scd to the start of the queue.
14502 14499 */
14503 14500 static void
14504 14501 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14505 14502 {
14506 14503 scdp->scd_prev = NULL;
14507 14504 scdp->scd_next = *headp;
14508 14505 if (*headp != NULL) {
14509 14506 (*headp)->scd_prev = scdp;
14510 14507 }
14511 14508 *headp = scdp;
14512 14509 }
14513 14510
14514 14511 static int
14515 14512 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14516 14513 {
14517 14514 uint_t rid;
14518 14515 uint_t i;
14519 14516 uint_t j;
14520 14517 ulong_t w;
14521 14518 sf_region_t *rgnp;
14522 14519 ulong_t tte8k_cnt = 0;
14523 14520 ulong_t tte4m_cnt = 0;
14524 14521 uint_t tsb_szc;
14525 14522 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14526 14523 sfmmu_t *ism_hatid;
14527 14524 struct tsb_info *newtsb;
14528 14525 int szc;
14529 14526
14530 14527 ASSERT(srdp != NULL);
14531 14528
14532 14529 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14533 14530 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14534 14531 continue;
14535 14532 }
14536 14533 j = 0;
14537 14534 while (w) {
14538 14535 if (!(w & 0x1)) {
14539 14536 j++;
14540 14537 w >>= 1;
14541 14538 continue;
14542 14539 }
14543 14540 rid = (i << BT_ULSHIFT) | j;
14544 14541 j++;
14545 14542 w >>= 1;
14546 14543
14547 14544 if (rid < SFMMU_MAX_HME_REGIONS) {
14548 14545 rgnp = srdp->srd_hmergnp[rid];
14549 14546 ASSERT(rgnp->rgn_id == rid);
14550 14547 ASSERT(rgnp->rgn_refcnt > 0);
14551 14548
14552 14549 if (rgnp->rgn_pgszc < TTE4M) {
14553 14550 tte8k_cnt += rgnp->rgn_size >>
14554 14551 TTE_PAGE_SHIFT(TTE8K);
14555 14552 } else {
14556 14553 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14557 14554 tte4m_cnt += rgnp->rgn_size >>
14558 14555 TTE_PAGE_SHIFT(TTE4M);
14559 14556 /*
14560 14557 * Inflate SCD tsb0 by preallocating
14561 14558 * 1/4 8k ttecnt for 4M regions to
14562 14559 * allow for lgpg alloc failure.
14563 14560 */
14564 14561 tte8k_cnt += rgnp->rgn_size >>
14565 14562 (TTE_PAGE_SHIFT(TTE8K) + 2);
14566 14563 }
14567 14564 } else {
14568 14565 rid -= SFMMU_MAX_HME_REGIONS;
14569 14566 rgnp = srdp->srd_ismrgnp[rid];
14570 14567 ASSERT(rgnp->rgn_id == rid);
14571 14568 ASSERT(rgnp->rgn_refcnt > 0);
14572 14569
14573 14570 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14574 14571 ASSERT(ism_hatid->sfmmu_ismhat);
14575 14572
14576 14573 for (szc = 0; szc < TTE4M; szc++) {
14577 14574 tte8k_cnt +=
14578 14575 ism_hatid->sfmmu_ttecnt[szc] <<
14579 14576 TTE_BSZS_SHIFT(szc);
14580 14577 }
14581 14578
14582 14579 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14583 14580 if (rgnp->rgn_pgszc >= TTE4M) {
14584 14581 tte4m_cnt += rgnp->rgn_size >>
14585 14582 TTE_PAGE_SHIFT(TTE4M);
14586 14583 }
14587 14584 }
14588 14585 }
14589 14586 }
14590 14587
14591 14588 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14592 14589
14593 14590 /* Allocate both the SCD TSBs here. */
14594 14591 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14595 14592 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14596 14593 (tsb_szc <= TSB_4M_SZCODE ||
14597 14594 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14598 14595 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14599 14596 TSB_ALLOC, scsfmmup))) {
14600 14597
14601 14598 SFMMU_STAT(sf_scd_1sttsb_allocfail);
14602 14599 return (TSB_ALLOCFAIL);
14603 14600 } else {
14604 14601 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14605 14602
14606 14603 if (tte4m_cnt) {
14607 14604 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14608 14605 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14609 14606 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14610 14607 (tsb_szc <= TSB_4M_SZCODE ||
14611 14608 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14612 14609 TSB4M|TSB32M|TSB256M,
14613 14610 TSB_ALLOC, scsfmmup))) {
14614 14611 /*
14615 14612 * If we fail to allocate the 2nd shared tsb,
14616 14613 * just free the 1st tsb, return failure.
14617 14614 */
14618 14615 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14619 14616 SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14620 14617 return (TSB_ALLOCFAIL);
14621 14618 } else {
14622 14619 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14623 14620 newtsb->tsb_flags |= TSB_SHAREDCTX;
14624 14621 scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14625 14622 SFMMU_STAT(sf_scd_2ndtsb_alloc);
14626 14623 }
14627 14624 }
14628 14625 SFMMU_STAT(sf_scd_1sttsb_alloc);
14629 14626 }
14630 14627 return (TSB_SUCCESS);
14631 14628 }
14632 14629
14633 14630 static void
14634 14631 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14635 14632 {
14636 14633 while (scd_sfmmu->sfmmu_tsb != NULL) {
14637 14634 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14638 14635 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14639 14636 scd_sfmmu->sfmmu_tsb = next;
14640 14637 }
14641 14638 }
14642 14639
14643 14640 /*
14644 14641 * Link the sfmmu onto the hme region list.
14645 14642 */
14646 14643 void
14647 14644 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14648 14645 {
14649 14646 uint_t rid;
14650 14647 sf_rgn_link_t *rlink;
14651 14648 sfmmu_t *head;
14652 14649 sf_rgn_link_t *hrlink;
14653 14650
14654 14651 rid = rgnp->rgn_id;
14655 14652 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14656 14653
14657 14654 /* LINTED: constant in conditional context */
14658 14655 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14659 14656 ASSERT(rlink != NULL);
14660 14657 mutex_enter(&rgnp->rgn_mutex);
14661 14658 if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14662 14659 rlink->next = NULL;
14663 14660 rlink->prev = NULL;
14664 14661 /*
14665 14662 * make sure rlink's next field is NULL
14666 14663 * before making this link visible.
14667 14664 */
14668 14665 membar_stst();
14669 14666 rgnp->rgn_sfmmu_head = sfmmup;
14670 14667 } else {
14671 14668 /* LINTED: constant in conditional context */
14672 14669 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14673 14670 ASSERT(hrlink != NULL);
14674 14671 ASSERT(hrlink->prev == NULL);
14675 14672 rlink->next = head;
14676 14673 rlink->prev = NULL;
14677 14674 hrlink->prev = sfmmup;
14678 14675 /*
14679 14676 * make sure rlink's next field is correct
14680 14677 * before making this link visible.
14681 14678 */
14682 14679 membar_stst();
14683 14680 rgnp->rgn_sfmmu_head = sfmmup;
14684 14681 }
14685 14682 mutex_exit(&rgnp->rgn_mutex);
14686 14683 }
14687 14684
14688 14685 /*
14689 14686 * Unlink the sfmmu from the hme region list.
14690 14687 */
14691 14688 void
14692 14689 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14693 14690 {
14694 14691 uint_t rid;
14695 14692 sf_rgn_link_t *rlink;
14696 14693
14697 14694 rid = rgnp->rgn_id;
14698 14695 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14699 14696
14700 14697 /* LINTED: constant in conditional context */
14701 14698 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14702 14699 ASSERT(rlink != NULL);
14703 14700 mutex_enter(&rgnp->rgn_mutex);
14704 14701 if (rgnp->rgn_sfmmu_head == sfmmup) {
14705 14702 sfmmu_t *next = rlink->next;
14706 14703 rgnp->rgn_sfmmu_head = next;
14707 14704 /*
14708 14705 * if we are stopped by xc_attention() after this
14709 14706 * point the forward link walking in
14710 14707 * sfmmu_rgntlb_demap() will work correctly since the
14711 14708 * head correctly points to the next element.
14712 14709 */
14713 14710 membar_stst();
14714 14711 rlink->next = NULL;
14715 14712 ASSERT(rlink->prev == NULL);
14716 14713 if (next != NULL) {
14717 14714 sf_rgn_link_t *nrlink;
14718 14715 /* LINTED: constant in conditional context */
14719 14716 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14720 14717 ASSERT(nrlink != NULL);
14721 14718 ASSERT(nrlink->prev == sfmmup);
14722 14719 nrlink->prev = NULL;
14723 14720 }
14724 14721 } else {
14725 14722 sfmmu_t *next = rlink->next;
14726 14723 sfmmu_t *prev = rlink->prev;
14727 14724 sf_rgn_link_t *prlink;
14728 14725
14729 14726 ASSERT(prev != NULL);
14730 14727 /* LINTED: constant in conditional context */
14731 14728 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14732 14729 ASSERT(prlink != NULL);
14733 14730 ASSERT(prlink->next == sfmmup);
14734 14731 prlink->next = next;
14735 14732 /*
14736 14733 * if we are stopped by xc_attention()
14737 14734 * after this point the forward link walking
14738 14735 * will work correctly since the prev element
14739 14736 * correctly points to the next element.
14740 14737 */
14741 14738 membar_stst();
14742 14739 rlink->next = NULL;
14743 14740 rlink->prev = NULL;
14744 14741 if (next != NULL) {
14745 14742 sf_rgn_link_t *nrlink;
14746 14743 /* LINTED: constant in conditional context */
14747 14744 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14748 14745 ASSERT(nrlink != NULL);
14749 14746 ASSERT(nrlink->prev == sfmmup);
14750 14747 nrlink->prev = prev;
14751 14748 }
14752 14749 }
14753 14750 mutex_exit(&rgnp->rgn_mutex);
14754 14751 }
14755 14752
14756 14753 /*
14757 14754 * Link scd sfmmu onto ism or hme region list for each region in the
14758 14755 * scd region map.
14759 14756 */
14760 14757 void
14761 14758 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14762 14759 {
14763 14760 uint_t rid;
14764 14761 uint_t i;
14765 14762 uint_t j;
14766 14763 ulong_t w;
14767 14764 sf_region_t *rgnp;
14768 14765 sfmmu_t *scsfmmup;
14769 14766
14770 14767 scsfmmup = scdp->scd_sfmmup;
14771 14768 ASSERT(scsfmmup->sfmmu_scdhat);
14772 14769 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14773 14770 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14774 14771 continue;
14775 14772 }
14776 14773 j = 0;
14777 14774 while (w) {
14778 14775 if (!(w & 0x1)) {
14779 14776 j++;
14780 14777 w >>= 1;
14781 14778 continue;
14782 14779 }
14783 14780 rid = (i << BT_ULSHIFT) | j;
14784 14781 j++;
14785 14782 w >>= 1;
14786 14783
14787 14784 if (rid < SFMMU_MAX_HME_REGIONS) {
14788 14785 rgnp = srdp->srd_hmergnp[rid];
14789 14786 ASSERT(rgnp->rgn_id == rid);
14790 14787 ASSERT(rgnp->rgn_refcnt > 0);
14791 14788 sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14792 14789 } else {
14793 14790 sfmmu_t *ism_hatid = NULL;
14794 14791 ism_ment_t *ism_ment;
14795 14792 rid -= SFMMU_MAX_HME_REGIONS;
14796 14793 rgnp = srdp->srd_ismrgnp[rid];
14797 14794 ASSERT(rgnp->rgn_id == rid);
14798 14795 ASSERT(rgnp->rgn_refcnt > 0);
14799 14796
14800 14797 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14801 14798 ASSERT(ism_hatid->sfmmu_ismhat);
14802 14799 ism_ment = &scdp->scd_ism_links[rid];
14803 14800 ism_ment->iment_hat = scsfmmup;
14804 14801 ism_ment->iment_base_va = rgnp->rgn_saddr;
14805 14802 mutex_enter(&ism_mlist_lock);
14806 14803 iment_add(ism_ment, ism_hatid);
14807 14804 mutex_exit(&ism_mlist_lock);
14808 14805
14809 14806 }
14810 14807 }
14811 14808 }
14812 14809 }
14813 14810 /*
14814 14811 * Unlink scd sfmmu from ism or hme region list for each region in the
14815 14812 * scd region map.
14816 14813 */
14817 14814 void
14818 14815 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14819 14816 {
14820 14817 uint_t rid;
14821 14818 uint_t i;
14822 14819 uint_t j;
14823 14820 ulong_t w;
14824 14821 sf_region_t *rgnp;
14825 14822 sfmmu_t *scsfmmup;
14826 14823
14827 14824 scsfmmup = scdp->scd_sfmmup;
14828 14825 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14829 14826 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14830 14827 continue;
14831 14828 }
14832 14829 j = 0;
14833 14830 while (w) {
14834 14831 if (!(w & 0x1)) {
14835 14832 j++;
14836 14833 w >>= 1;
14837 14834 continue;
14838 14835 }
14839 14836 rid = (i << BT_ULSHIFT) | j;
14840 14837 j++;
14841 14838 w >>= 1;
14842 14839
14843 14840 if (rid < SFMMU_MAX_HME_REGIONS) {
14844 14841 rgnp = srdp->srd_hmergnp[rid];
14845 14842 ASSERT(rgnp->rgn_id == rid);
14846 14843 ASSERT(rgnp->rgn_refcnt > 0);
14847 14844 sfmmu_unlink_from_hmeregion(scsfmmup,
14848 14845 rgnp);
14849 14846
14850 14847 } else {
14851 14848 sfmmu_t *ism_hatid = NULL;
14852 14849 ism_ment_t *ism_ment;
14853 14850 rid -= SFMMU_MAX_HME_REGIONS;
14854 14851 rgnp = srdp->srd_ismrgnp[rid];
14855 14852 ASSERT(rgnp->rgn_id == rid);
14856 14853 ASSERT(rgnp->rgn_refcnt > 0);
14857 14854
14858 14855 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14859 14856 ASSERT(ism_hatid->sfmmu_ismhat);
14860 14857 ism_ment = &scdp->scd_ism_links[rid];
14861 14858 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
14862 14859 ASSERT(ism_ment->iment_base_va ==
14863 14860 rgnp->rgn_saddr);
14864 14861 mutex_enter(&ism_mlist_lock);
14865 14862 iment_sub(ism_ment, ism_hatid);
14866 14863 mutex_exit(&ism_mlist_lock);
14867 14864
14868 14865 }
14869 14866 }
14870 14867 }
14871 14868 }
14872 14869 /*
14873 14870 * Allocates and initialises a new SCD structure, this is called with
14874 14871 * the srd_scd_mutex held and returns with the reference count
14875 14872 * initialised to 1.
14876 14873 */
14877 14874 static sf_scd_t *
14878 14875 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
14879 14876 {
14880 14877 sf_scd_t *new_scdp;
14881 14878 sfmmu_t *scsfmmup;
14882 14879 int i;
14883 14880
14884 14881 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
14885 14882 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
14886 14883
14887 14884 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
14888 14885 new_scdp->scd_sfmmup = scsfmmup;
14889 14886 scsfmmup->sfmmu_srdp = srdp;
14890 14887 scsfmmup->sfmmu_scdp = new_scdp;
14891 14888 scsfmmup->sfmmu_tsb0_4minflcnt = 0;
14892 14889 scsfmmup->sfmmu_scdhat = 1;
14893 14890 CPUSET_ALL(scsfmmup->sfmmu_cpusran);
14894 14891 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
14895 14892
14896 14893 ASSERT(max_mmu_ctxdoms > 0);
14897 14894 for (i = 0; i < max_mmu_ctxdoms; i++) {
14898 14895 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
14899 14896 scsfmmup->sfmmu_ctxs[i].gnum = 0;
14900 14897 }
14901 14898
14902 14899 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14903 14900 new_scdp->scd_rttecnt[i] = 0;
14904 14901 }
14905 14902
14906 14903 new_scdp->scd_region_map = *new_map;
14907 14904 new_scdp->scd_refcnt = 1;
14908 14905 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
14909 14906 kmem_cache_free(scd_cache, new_scdp);
14910 14907 kmem_cache_free(sfmmuid_cache, scsfmmup);
14911 14908 return (NULL);
14912 14909 }
14913 14910 if (&mmu_init_scd) {
14914 14911 mmu_init_scd(new_scdp);
14915 14912 }
14916 14913 return (new_scdp);
14917 14914 }
14918 14915
14919 14916 /*
14920 14917 * The first phase of a process joining an SCD. The hat structure is
14921 14918 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
14922 14919 * and a cross-call with context invalidation is used to cause the
14923 14920 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
14924 14921 * routine.
14925 14922 */
14926 14923 static void
14927 14924 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
14928 14925 {
14929 14926 hatlock_t *hatlockp;
14930 14927 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14931 14928 int i;
14932 14929 sf_scd_t *old_scdp;
14933 14930
14934 14931 ASSERT(srdp != NULL);
14935 14932 ASSERT(scdp != NULL);
14936 14933 ASSERT(scdp->scd_refcnt > 0);
14937 14934 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14938 14935
14939 14936 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
14940 14937 ASSERT(old_scdp != scdp);
14941 14938
14942 14939 mutex_enter(&old_scdp->scd_mutex);
14943 14940 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
14944 14941 mutex_exit(&old_scdp->scd_mutex);
14945 14942 /*
14946 14943 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
14947 14944 * include the shme rgn ttecnt for rgns that
14948 14945 * were in the old SCD
14949 14946 */
14950 14947 for (i = 0; i < mmu_page_sizes; i++) {
14951 14948 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
14952 14949 old_scdp->scd_rttecnt[i]);
14953 14950 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14954 14951 sfmmup->sfmmu_scdrttecnt[i]);
14955 14952 }
14956 14953 }
14957 14954
14958 14955 /*
14959 14956 * Move sfmmu to the scd lists.
14960 14957 */
14961 14958 mutex_enter(&scdp->scd_mutex);
14962 14959 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
14963 14960 mutex_exit(&scdp->scd_mutex);
14964 14961 SF_SCD_INCR_REF(scdp);
14965 14962
14966 14963 hatlockp = sfmmu_hat_enter(sfmmup);
14967 14964 /*
14968 14965 * For a multi-thread process, we must stop
14969 14966 * all the other threads before joining the scd.
14970 14967 */
14971 14968
14972 14969 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
14973 14970
14974 14971 sfmmu_invalidate_ctx(sfmmup);
14975 14972 sfmmup->sfmmu_scdp = scdp;
14976 14973
14977 14974 /*
14978 14975 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
14979 14976 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
14980 14977 */
14981 14978 for (i = 0; i < mmu_page_sizes; i++) {
14982 14979 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
14983 14980 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
14984 14981 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14985 14982 -sfmmup->sfmmu_scdrttecnt[i]);
14986 14983 }
14987 14984 /* update tsb0 inflation count */
14988 14985 if (old_scdp != NULL) {
14989 14986 sfmmup->sfmmu_tsb0_4minflcnt +=
14990 14987 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14991 14988 }
14992 14989 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14993 14990 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
14994 14991 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14995 14992
14996 14993 sfmmu_hat_exit(hatlockp);
14997 14994
14998 14995 if (old_scdp != NULL) {
14999 14996 SF_SCD_DECR_REF(srdp, old_scdp);
15000 14997 }
15001 14998
15002 14999 }
15003 15000
15004 15001 /*
15005 15002 * This routine is called by a process to become part of an SCD. It is called
15006 15003 * from sfmmu_tsbmiss_exception() once most of the initial work has been
15007 15004 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15008 15005 */
15009 15006 static void
15010 15007 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15011 15008 {
15012 15009 struct tsb_info *tsbinfop;
15013 15010
15014 15011 ASSERT(sfmmu_hat_lock_held(sfmmup));
15015 15012 ASSERT(sfmmup->sfmmu_scdp != NULL);
15016 15013 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15017 15014 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15018 15015 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15019 15016
15020 15017 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15021 15018 tsbinfop = tsbinfop->tsb_next) {
15022 15019 if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15023 15020 continue;
15024 15021 }
15025 15022 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15026 15023
15027 15024 sfmmu_inv_tsb(tsbinfop->tsb_va,
15028 15025 TSB_BYTES(tsbinfop->tsb_szc));
15029 15026 }
15030 15027
15031 15028 /* Set HAT_CTX1_FLAG for all SCD ISMs */
15032 15029 sfmmu_ism_hatflags(sfmmup, 1);
15033 15030
15034 15031 SFMMU_STAT(sf_join_scd);
15035 15032 }
15036 15033
15037 15034 /*
15038 15035 * This routine is called in order to check if there is an SCD which matches
15039 15036 * the process's region map if not then a new SCD may be created.
15040 15037 */
15041 15038 static void
15042 15039 sfmmu_find_scd(sfmmu_t *sfmmup)
15043 15040 {
15044 15041 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15045 15042 sf_scd_t *scdp, *new_scdp;
15046 15043 int ret;
15047 15044
15048 15045 ASSERT(srdp != NULL);
15049 15046 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15050 15047
15051 15048 mutex_enter(&srdp->srd_scd_mutex);
15052 15049 for (scdp = srdp->srd_scdp; scdp != NULL;
15053 15050 scdp = scdp->scd_next) {
15054 15051 SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15055 15052 &sfmmup->sfmmu_region_map, ret);
15056 15053 if (ret == 1) {
15057 15054 SF_SCD_INCR_REF(scdp);
15058 15055 mutex_exit(&srdp->srd_scd_mutex);
15059 15056 sfmmu_join_scd(scdp, sfmmup);
15060 15057 ASSERT(scdp->scd_refcnt >= 2);
15061 15058 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15062 15059 return;
15063 15060 } else {
15064 15061 /*
15065 15062 * If the sfmmu region map is a subset of the scd
15066 15063 * region map, then the assumption is that this process
15067 15064 * will continue attaching to ISM segments until the
15068 15065 * region maps are equal.
15069 15066 */
15070 15067 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15071 15068 &sfmmup->sfmmu_region_map, ret);
15072 15069 if (ret == 1) {
15073 15070 mutex_exit(&srdp->srd_scd_mutex);
15074 15071 return;
15075 15072 }
15076 15073 }
15077 15074 }
15078 15075
15079 15076 ASSERT(scdp == NULL);
15080 15077 /*
15081 15078 * No matching SCD has been found, create a new one.
15082 15079 */
15083 15080 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15084 15081 NULL) {
15085 15082 mutex_exit(&srdp->srd_scd_mutex);
15086 15083 return;
15087 15084 }
15088 15085
15089 15086 /*
15090 15087 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15091 15088 */
15092 15089
15093 15090 /* Set scd_rttecnt for shme rgns in SCD */
15094 15091 sfmmu_set_scd_rttecnt(srdp, new_scdp);
15095 15092
15096 15093 /*
15097 15094 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15098 15095 */
15099 15096 sfmmu_link_scd_to_regions(srdp, new_scdp);
15100 15097 sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15101 15098 SFMMU_STAT_ADD(sf_create_scd, 1);
15102 15099
15103 15100 mutex_exit(&srdp->srd_scd_mutex);
15104 15101 sfmmu_join_scd(new_scdp, sfmmup);
15105 15102 ASSERT(new_scdp->scd_refcnt >= 2);
15106 15103 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15107 15104 }
15108 15105
15109 15106 /*
15110 15107 * This routine is called by a process to remove itself from an SCD. It is
15111 15108 * either called when the processes has detached from a segment or from
15112 15109 * hat_free_start() as a result of calling exit.
15113 15110 */
15114 15111 static void
15115 15112 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15116 15113 {
15117 15114 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15118 15115 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15119 15116 hatlock_t *hatlockp = TSB_HASH(sfmmup);
15120 15117 int i;
15121 15118
15122 15119 ASSERT(scdp != NULL);
15123 15120 ASSERT(srdp != NULL);
15124 15121
15125 15122 if (sfmmup->sfmmu_free) {
15126 15123 /*
15127 15124 * If the process is part of an SCD the sfmmu is unlinked
15128 15125 * from scd_sf_list.
15129 15126 */
15130 15127 mutex_enter(&scdp->scd_mutex);
15131 15128 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15132 15129 mutex_exit(&scdp->scd_mutex);
15133 15130 /*
15134 15131 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15135 15132 * are about to leave the SCD
15136 15133 */
15137 15134 for (i = 0; i < mmu_page_sizes; i++) {
15138 15135 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15139 15136 scdp->scd_rttecnt[i]);
15140 15137 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15141 15138 sfmmup->sfmmu_scdrttecnt[i]);
15142 15139 sfmmup->sfmmu_scdrttecnt[i] = 0;
15143 15140 }
15144 15141 sfmmup->sfmmu_scdp = NULL;
15145 15142
15146 15143 SF_SCD_DECR_REF(srdp, scdp);
15147 15144 return;
15148 15145 }
15149 15146
15150 15147 ASSERT(r_type != SFMMU_REGION_ISM ||
15151 15148 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15152 15149 ASSERT(scdp->scd_refcnt);
15153 15150 ASSERT(!sfmmup->sfmmu_free);
15154 15151 ASSERT(sfmmu_hat_lock_held(sfmmup));
15155 15152 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
15156 15153
15157 15154 /*
15158 15155 * Wait for ISM maps to be updated.
15159 15156 */
15160 15157 if (r_type != SFMMU_REGION_ISM) {
15161 15158 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15162 15159 sfmmup->sfmmu_scdp != NULL) {
15163 15160 cv_wait(&sfmmup->sfmmu_tsb_cv,
15164 15161 HATLOCK_MUTEXP(hatlockp));
15165 15162 }
15166 15163
15167 15164 if (sfmmup->sfmmu_scdp == NULL) {
15168 15165 sfmmu_hat_exit(hatlockp);
15169 15166 return;
15170 15167 }
15171 15168 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15172 15169 }
15173 15170
15174 15171 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15175 15172 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15176 15173 /*
15177 15174 * Since HAT_JOIN_SCD was set our context
15178 15175 * is still invalid.
15179 15176 */
15180 15177 } else {
15181 15178 /*
15182 15179 * For a multi-thread process, we must stop
15183 15180 * all the other threads before leaving the scd.
15184 15181 */
15185 15182
15186 15183 sfmmu_invalidate_ctx(sfmmup);
15187 15184 }
15188 15185
15189 15186 /* Clear all the rid's for ISM, delete flags, etc */
15190 15187 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15191 15188 sfmmu_ism_hatflags(sfmmup, 0);
15192 15189
15193 15190 /*
15194 15191 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15195 15192 * are in SCD before this sfmmup leaves the SCD.
15196 15193 */
15197 15194 for (i = 0; i < mmu_page_sizes; i++) {
15198 15195 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15199 15196 scdp->scd_rttecnt[i]);
15200 15197 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15201 15198 sfmmup->sfmmu_scdrttecnt[i]);
15202 15199 sfmmup->sfmmu_scdrttecnt[i] = 0;
15203 15200 /* update ismttecnt to include SCD ism before hat leaves SCD */
15204 15201 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15205 15202 sfmmup->sfmmu_scdismttecnt[i] = 0;
15206 15203 }
15207 15204 /* update tsb0 inflation count */
15208 15205 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15209 15206
15210 15207 if (r_type != SFMMU_REGION_ISM) {
15211 15208 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15212 15209 }
15213 15210 sfmmup->sfmmu_scdp = NULL;
15214 15211
15215 15212 sfmmu_hat_exit(hatlockp);
15216 15213
15217 15214 /*
15218 15215 * Unlink sfmmu from scd_sf_list this can be done without holding
15219 15216 * the hat lock as we hold the sfmmu_as lock which prevents
15220 15217 * hat_join_region from adding this thread to the scd again. Other
15221 15218 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15222 15219 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15223 15220 * while holding the hat lock.
15224 15221 */
15225 15222 mutex_enter(&scdp->scd_mutex);
15226 15223 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15227 15224 mutex_exit(&scdp->scd_mutex);
15228 15225 SFMMU_STAT(sf_leave_scd);
15229 15226
15230 15227 SF_SCD_DECR_REF(srdp, scdp);
15231 15228 hatlockp = sfmmu_hat_enter(sfmmup);
15232 15229
15233 15230 }
15234 15231
15235 15232 /*
15236 15233 * Unlink and free up an SCD structure with a reference count of 0.
15237 15234 */
15238 15235 static void
15239 15236 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15240 15237 {
15241 15238 sfmmu_t *scsfmmup;
15242 15239 sf_scd_t *sp;
15243 15240 hatlock_t *shatlockp;
15244 15241 int i, ret;
15245 15242
15246 15243 mutex_enter(&srdp->srd_scd_mutex);
15247 15244 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15248 15245 if (sp == scdp)
15249 15246 break;
15250 15247 }
15251 15248 if (sp == NULL || sp->scd_refcnt) {
15252 15249 mutex_exit(&srdp->srd_scd_mutex);
15253 15250 return;
15254 15251 }
15255 15252
15256 15253 /*
15257 15254 * It is possible that the scd has been freed and reallocated with a
15258 15255 * different region map while we've been waiting for the srd_scd_mutex.
15259 15256 */
15260 15257 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15261 15258 if (ret != 1) {
15262 15259 mutex_exit(&srdp->srd_scd_mutex);
15263 15260 return;
15264 15261 }
15265 15262
15266 15263 ASSERT(scdp->scd_sf_list == NULL);
15267 15264 /*
15268 15265 * Unlink scd from srd_scdp list.
15269 15266 */
15270 15267 sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15271 15268 mutex_exit(&srdp->srd_scd_mutex);
15272 15269
15273 15270 sfmmu_unlink_scd_from_regions(srdp, scdp);
15274 15271
15275 15272 /* Clear shared context tsb and release ctx */
15276 15273 scsfmmup = scdp->scd_sfmmup;
15277 15274
15278 15275 /*
15279 15276 * create a barrier so that scd will not be destroyed
15280 15277 * if other thread still holds the same shared hat lock.
15281 15278 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15282 15279 * shared hat lock before checking the shared tsb reloc flag.
15283 15280 */
15284 15281 shatlockp = sfmmu_hat_enter(scsfmmup);
15285 15282 sfmmu_hat_exit(shatlockp);
15286 15283
15287 15284 sfmmu_free_scd_tsbs(scsfmmup);
15288 15285
15289 15286 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15290 15287 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15291 15288 kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15292 15289 SFMMU_L2_HMERLINKS_SIZE);
15293 15290 scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15294 15291 }
15295 15292 }
15296 15293 kmem_cache_free(sfmmuid_cache, scsfmmup);
15297 15294 kmem_cache_free(scd_cache, scdp);
15298 15295 SFMMU_STAT(sf_destroy_scd);
15299 15296 }
15300 15297
15301 15298 /*
15302 15299 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15303 15300 * bits which are set in the ism_region_map parameter. This flag indicates to
15304 15301 * the tsbmiss handler that mapping for these segments should be loaded using
15305 15302 * the shared context.
15306 15303 */
15307 15304 static void
15308 15305 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15309 15306 {
15310 15307 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15311 15308 ism_blk_t *ism_blkp;
15312 15309 ism_map_t *ism_map;
15313 15310 int i, rid;
15314 15311
15315 15312 ASSERT(sfmmup->sfmmu_iblk != NULL);
15316 15313 ASSERT(scdp != NULL);
15317 15314 /*
15318 15315 * Note that the caller either set HAT_ISMBUSY flag or checked
15319 15316 * under hat lock that HAT_ISMBUSY was not set by another thread.
15320 15317 */
15321 15318 ASSERT(sfmmu_hat_lock_held(sfmmup));
15322 15319
15323 15320 ism_blkp = sfmmup->sfmmu_iblk;
15324 15321 while (ism_blkp != NULL) {
15325 15322 ism_map = ism_blkp->iblk_maps;
15326 15323 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15327 15324 rid = ism_map[i].imap_rid;
15328 15325 if (rid == SFMMU_INVALID_ISMRID) {
15329 15326 continue;
15330 15327 }
15331 15328 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15332 15329 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15333 15330 addflag) {
15334 15331 ism_map[i].imap_hatflags |=
15335 15332 HAT_CTX1_FLAG;
15336 15333 } else {
15337 15334 ism_map[i].imap_hatflags &=
15338 15335 ~HAT_CTX1_FLAG;
15339 15336 }
15340 15337 }
15341 15338 ism_blkp = ism_blkp->iblk_next;
15342 15339 }
15343 15340 }
15344 15341
15345 15342 static int
15346 15343 sfmmu_srd_lock_held(sf_srd_t *srdp)
15347 15344 {
15348 15345 return (MUTEX_HELD(&srdp->srd_mutex));
15349 15346 }
15350 15347
15351 15348 /* ARGSUSED */
15352 15349 static int
15353 15350 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15354 15351 {
15355 15352 sf_scd_t *scdp = (sf_scd_t *)buf;
15356 15353
15357 15354 bzero(buf, sizeof (sf_scd_t));
15358 15355 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15359 15356 return (0);
15360 15357 }
15361 15358
15362 15359 /* ARGSUSED */
15363 15360 static void
15364 15361 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15365 15362 {
15366 15363 sf_scd_t *scdp = (sf_scd_t *)buf;
15367 15364
15368 15365 mutex_destroy(&scdp->scd_mutex);
15369 15366 }
15370 15367
15371 15368 /*
15372 15369 * The listp parameter is a pointer to a list of hmeblks which are partially
15373 15370 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15374 15371 * freeing process is to cross-call all cpus to ensure that there are no
15375 15372 * remaining cached references.
15376 15373 *
15377 15374 * If the local generation number is less than the global then we can free
15378 15375 * hmeblks which are already on the pending queue as another cpu has completed
15379 15376 * the cross-call.
15380 15377 *
15381 15378 * We cross-call to make sure that there are no threads on other cpus accessing
15382 15379 * these hmblks and then complete the process of freeing them under the
15383 15380 * following conditions:
15384 15381 * The total number of pending hmeblks is greater than the threshold
15385 15382 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15386 15383 * It is at least 1 second since the last time we cross-called
15387 15384 *
15388 15385 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15389 15386 */
15390 15387 static void
15391 15388 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15392 15389 {
15393 15390 struct hme_blk *hblkp, *pr_hblkp = NULL;
15394 15391 int count = 0;
15395 15392 cpuset_t cpuset = cpu_ready_set;
15396 15393 cpu_hme_pend_t *cpuhp;
15397 15394 timestruc_t now;
15398 15395 int one_second_expired = 0;
15399 15396
15400 15397 gethrestime_lasttick(&now);
15401 15398
15402 15399 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15403 15400 ASSERT(hblkp->hblk_shw_bit == 0);
15404 15401 ASSERT(hblkp->hblk_shared == 0);
15405 15402 count++;
15406 15403 pr_hblkp = hblkp;
15407 15404 }
15408 15405
15409 15406 cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15410 15407 mutex_enter(&cpuhp->chp_mutex);
15411 15408
15412 15409 if ((cpuhp->chp_count + count) == 0) {
15413 15410 mutex_exit(&cpuhp->chp_mutex);
15414 15411 return;
15415 15412 }
15416 15413
15417 15414 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15418 15415 one_second_expired = 1;
15419 15416 }
15420 15417
15421 15418 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15422 15419 (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15423 15420 one_second_expired)) {
15424 15421 /* Append global list to local */
15425 15422 if (pr_hblkp == NULL) {
15426 15423 *listp = cpuhp->chp_listp;
15427 15424 } else {
15428 15425 pr_hblkp->hblk_next = cpuhp->chp_listp;
15429 15426 }
15430 15427 cpuhp->chp_listp = NULL;
15431 15428 cpuhp->chp_count = 0;
15432 15429 cpuhp->chp_timestamp = now.tv_sec;
15433 15430 mutex_exit(&cpuhp->chp_mutex);
15434 15431
15435 15432 kpreempt_disable();
15436 15433 CPUSET_DEL(cpuset, CPU->cpu_id);
15437 15434 xt_sync(cpuset);
15438 15435 xt_sync(cpuset);
15439 15436 kpreempt_enable();
15440 15437
15441 15438 /*
15442 15439 * At this stage we know that no trap handlers on other
15443 15440 * cpus can have references to hmeblks on the list.
15444 15441 */
15445 15442 sfmmu_hblk_free(listp);
15446 15443 } else if (*listp != NULL) {
15447 15444 pr_hblkp->hblk_next = cpuhp->chp_listp;
15448 15445 cpuhp->chp_listp = *listp;
15449 15446 cpuhp->chp_count += count;
15450 15447 *listp = NULL;
15451 15448 mutex_exit(&cpuhp->chp_mutex);
15452 15449 } else {
15453 15450 mutex_exit(&cpuhp->chp_mutex);
15454 15451 }
15455 15452 }
15456 15453
15457 15454 /*
15458 15455 * Add an hmeblk to the the hash list.
15459 15456 */
15460 15457 void
15461 15458 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15462 15459 uint64_t hblkpa)
15463 15460 {
15464 15461 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15465 15462 #ifdef DEBUG
15466 15463 if (hmebp->hmeblkp == NULL) {
15467 15464 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15468 15465 }
15469 15466 #endif /* DEBUG */
15470 15467
15471 15468 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15472 15469 /*
15473 15470 * Since the TSB miss handler now does not lock the hash chain before
15474 15471 * walking it, make sure that the hmeblks nextpa is globally visible
15475 15472 * before we make the hmeblk globally visible by updating the chain root
15476 15473 * pointer in the hash bucket.
15477 15474 */
15478 15475 membar_producer();
15479 15476 hmebp->hmeh_nextpa = hblkpa;
15480 15477 hmeblkp->hblk_next = hmebp->hmeblkp;
15481 15478 hmebp->hmeblkp = hmeblkp;
15482 15479
15483 15480 }
15484 15481
15485 15482 /*
15486 15483 * This function is the first part of a 2 part process to remove an hmeblk
15487 15484 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15488 15485 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15489 15486 * a per-cpu pending list using the virtual address pointer.
15490 15487 *
15491 15488 * TSB miss trap handlers that start after this phase will no longer see
15492 15489 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15493 15490 * can still use it for further chain traversal because we haven't yet modifed
15494 15491 * the next physical pointer or freed it.
15495 15492 *
15496 15493 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15497 15494 * we reuse or free this hmeblk. This will make sure all lingering references to
15498 15495 * the hmeblk after first phase disappear before we finally reclaim it.
15499 15496 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15500 15497 * during their traversal.
15501 15498 *
15502 15499 * The hmehash_mutex must be held when calling this function.
15503 15500 *
15504 15501 * Input:
15505 15502 * hmebp - hme hash bucket pointer
15506 15503 * hmeblkp - address of hmeblk to be removed
15507 15504 * pr_hblk - virtual address of previous hmeblkp
15508 15505 * listp - pointer to list of hmeblks linked by virtual address
15509 15506 * free_now flag - indicates that a complete removal from the hash chains
15510 15507 * is necessary.
15511 15508 *
15512 15509 * It is inefficient to use the free_now flag as a cross-call is required to
15513 15510 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15514 15511 * in short supply.
15515 15512 */
15516 15513 void
15517 15514 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15518 15515 struct hme_blk *pr_hblk, struct hme_blk **listp,
15519 15516 int free_now)
15520 15517 {
15521 15518 int shw_size, vshift;
15522 15519 struct hme_blk *shw_hblkp;
15523 15520 uint_t shw_mask, newshw_mask;
15524 15521 caddr_t vaddr;
15525 15522 int size;
15526 15523 cpuset_t cpuset = cpu_ready_set;
15527 15524
15528 15525 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15529 15526
15530 15527 if (hmebp->hmeblkp == hmeblkp) {
15531 15528 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15532 15529 hmebp->hmeblkp = hmeblkp->hblk_next;
15533 15530 } else {
15534 15531 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15535 15532 pr_hblk->hblk_next = hmeblkp->hblk_next;
15536 15533 }
15537 15534
15538 15535 size = get_hblk_ttesz(hmeblkp);
15539 15536 shw_hblkp = hmeblkp->hblk_shadow;
15540 15537 if (shw_hblkp) {
15541 15538 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15542 15539 ASSERT(!hmeblkp->hblk_shared);
15543 15540 #ifdef DEBUG
15544 15541 if (mmu_page_sizes == max_mmu_page_sizes) {
15545 15542 ASSERT(size < TTE256M);
15546 15543 } else {
15547 15544 ASSERT(size < TTE4M);
15548 15545 }
15549 15546 #endif /* DEBUG */
15550 15547
15551 15548 shw_size = get_hblk_ttesz(shw_hblkp);
15552 15549 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15553 15550 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15554 15551 ASSERT(vshift < 8);
15555 15552 /*
15556 15553 * Atomically clear shadow mask bit
15557 15554 */
15558 15555 do {
15559 15556 shw_mask = shw_hblkp->hblk_shw_mask;
15560 15557 ASSERT(shw_mask & (1 << vshift));
15561 15558 newshw_mask = shw_mask & ~(1 << vshift);
15562 15559 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15563 15560 shw_mask, newshw_mask);
15564 15561 } while (newshw_mask != shw_mask);
15565 15562 hmeblkp->hblk_shadow = NULL;
15566 15563 }
15567 15564 hmeblkp->hblk_shw_bit = 0;
15568 15565
15569 15566 if (hmeblkp->hblk_shared) {
15570 15567 #ifdef DEBUG
15571 15568 sf_srd_t *srdp;
15572 15569 sf_region_t *rgnp;
15573 15570 uint_t rid;
15574 15571
15575 15572 srdp = hblktosrd(hmeblkp);
15576 15573 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15577 15574 rid = hmeblkp->hblk_tag.htag_rid;
15578 15575 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15579 15576 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15580 15577 rgnp = srdp->srd_hmergnp[rid];
15581 15578 ASSERT(rgnp != NULL);
15582 15579 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15583 15580 #endif /* DEBUG */
15584 15581 hmeblkp->hblk_shared = 0;
15585 15582 }
15586 15583 if (free_now) {
15587 15584 kpreempt_disable();
15588 15585 CPUSET_DEL(cpuset, CPU->cpu_id);
15589 15586 xt_sync(cpuset);
15590 15587 xt_sync(cpuset);
15591 15588 kpreempt_enable();
15592 15589
15593 15590 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15594 15591 hmeblkp->hblk_next = NULL;
15595 15592 } else {
15596 15593 /* Append hmeblkp to listp for processing later. */
15597 15594 hmeblkp->hblk_next = *listp;
15598 15595 *listp = hmeblkp;
15599 15596 }
15600 15597 }
15601 15598
15602 15599 /*
15603 15600 * This routine is called when memory is in short supply and returns a free
15604 15601 * hmeblk of the requested size from the cpu pending lists.
15605 15602 */
15606 15603 static struct hme_blk *
15607 15604 sfmmu_check_pending_hblks(int size)
15608 15605 {
15609 15606 int i;
15610 15607 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15611 15608 int found_hmeblk;
15612 15609 cpuset_t cpuset = cpu_ready_set;
15613 15610 cpu_hme_pend_t *cpuhp;
15614 15611
15615 15612 /* Flush cpu hblk pending queues */
15616 15613 for (i = 0; i < NCPU; i++) {
15617 15614 cpuhp = &cpu_hme_pend[i];
15618 15615 if (cpuhp->chp_listp != NULL) {
15619 15616 mutex_enter(&cpuhp->chp_mutex);
15620 15617 if (cpuhp->chp_listp == NULL) {
15621 15618 mutex_exit(&cpuhp->chp_mutex);
15622 15619 continue;
15623 15620 }
15624 15621 found_hmeblk = 0;
15625 15622 last_hmeblkp = NULL;
15626 15623 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15627 15624 hmeblkp = hmeblkp->hblk_next) {
15628 15625 if (get_hblk_ttesz(hmeblkp) == size) {
15629 15626 if (last_hmeblkp == NULL) {
15630 15627 cpuhp->chp_listp =
15631 15628 hmeblkp->hblk_next;
15632 15629 } else {
15633 15630 last_hmeblkp->hblk_next =
15634 15631 hmeblkp->hblk_next;
15635 15632 }
15636 15633 ASSERT(cpuhp->chp_count > 0);
15637 15634 cpuhp->chp_count--;
15638 15635 found_hmeblk = 1;
15639 15636 break;
15640 15637 } else {
15641 15638 last_hmeblkp = hmeblkp;
15642 15639 }
15643 15640 }
15644 15641 mutex_exit(&cpuhp->chp_mutex);
15645 15642
15646 15643 if (found_hmeblk) {
15647 15644 kpreempt_disable();
15648 15645 CPUSET_DEL(cpuset, CPU->cpu_id);
15649 15646 xt_sync(cpuset);
15650 15647 xt_sync(cpuset);
15651 15648 kpreempt_enable();
15652 15649 return (hmeblkp);
15653 15650 }
15654 15651 }
15655 15652 }
15656 15653 return (NULL);
15657 15654 }
↓ open down ↓ |
2389 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX