Print this page
OS-7753 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 26 * Copyright 2016 Gary Mills
27 + * Copyright 2019 Joyent, Inc.
27 28 */
28 29
29 30 /*
30 31 * VM - Hardware Address Translation management for Spitfire MMU.
31 32 *
32 33 * This file implements the machine specific hardware translation
33 34 * needed by the VM system. The machine independent interface is
34 35 * described in <vm/hat.h> while the machine dependent interface
35 36 * and data structures are described in <vm/hat_sfmmu.h>.
36 37 *
37 38 * The hat layer manages the address translation hardware as a cache
38 39 * driven by calls from the higher levels in the VM system.
39 40 */
40 41
41 42 #include <sys/types.h>
42 43 #include <sys/kstat.h>
43 44 #include <vm/hat.h>
44 45 #include <vm/hat_sfmmu.h>
45 46 #include <vm/page.h>
46 47 #include <sys/pte.h>
47 48 #include <sys/systm.h>
48 49 #include <sys/mman.h>
49 50 #include <sys/sysmacros.h>
50 51 #include <sys/machparam.h>
51 52 #include <sys/vtrace.h>
52 53 #include <sys/kmem.h>
53 54 #include <sys/mmu.h>
54 55 #include <sys/cmn_err.h>
55 56 #include <sys/cpu.h>
56 57 #include <sys/cpuvar.h>
57 58 #include <sys/debug.h>
58 59 #include <sys/lgrp.h>
59 60 #include <sys/archsystm.h>
60 61 #include <sys/machsystm.h>
61 62 #include <sys/vmsystm.h>
62 63 #include <vm/as.h>
63 64 #include <vm/seg.h>
64 65 #include <vm/seg_kp.h>
65 66 #include <vm/seg_kmem.h>
66 67 #include <vm/seg_kpm.h>
67 68 #include <vm/rm.h>
68 69 #include <sys/t_lock.h>
69 70 #include <sys/obpdefs.h>
70 71 #include <sys/vm_machparam.h>
71 72 #include <sys/var.h>
72 73 #include <sys/trap.h>
73 74 #include <sys/machtrap.h>
74 75 #include <sys/scb.h>
75 76 #include <sys/bitmap.h>
76 77 #include <sys/machlock.h>
77 78 #include <sys/membar.h>
78 79 #include <sys/atomic.h>
79 80 #include <sys/cpu_module.h>
80 81 #include <sys/prom_debug.h>
81 82 #include <sys/ksynch.h>
82 83 #include <sys/mem_config.h>
83 84 #include <sys/mem_cage.h>
84 85 #include <vm/vm_dep.h>
85 86 #include <sys/fpu/fpusystm.h>
86 87 #include <vm/mach_kpm.h>
87 88 #include <sys/callb.h>
88 89
89 90 #ifdef DEBUG
90 91 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
91 92 if (SFMMU_IS_SHMERID_VALID(rid)) { \
92 93 caddr_t _eaddr = (saddr) + (len); \
93 94 sf_srd_t *_srdp; \
94 95 sf_region_t *_rgnp; \
95 96 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
96 97 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
97 98 ASSERT((hat) != ksfmmup); \
98 99 _srdp = (hat)->sfmmu_srdp; \
99 100 ASSERT(_srdp != NULL); \
100 101 ASSERT(_srdp->srd_refcnt != 0); \
101 102 _rgnp = _srdp->srd_hmergnp[(rid)]; \
102 103 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
103 104 ASSERT(_rgnp->rgn_refcnt != 0); \
104 105 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \
105 106 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
106 107 SFMMU_REGION_HME); \
107 108 ASSERT((saddr) >= _rgnp->rgn_saddr); \
108 109 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \
109 110 ASSERT(_eaddr > _rgnp->rgn_saddr); \
110 111 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \
111 112 }
112 113
113 -#define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
114 -{ \
115 - caddr_t _hsva; \
116 - caddr_t _heva; \
117 - caddr_t _rsva; \
118 - caddr_t _reva; \
119 - int _ttesz = get_hblk_ttesz(hmeblkp); \
120 - int _flagtte; \
121 - ASSERT((srdp)->srd_refcnt != 0); \
122 - ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
123 - ASSERT((rgnp)->rgn_id == rid); \
124 - ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
125 - ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
126 - SFMMU_REGION_HME); \
127 - ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
128 - _hsva = (caddr_t)get_hblk_base(hmeblkp); \
129 - _heva = get_hblk_endaddr(hmeblkp); \
130 - _rsva = (caddr_t)P2ALIGN( \
131 - (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
132 - _reva = (caddr_t)P2ROUNDUP( \
133 - (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
134 - HBLK_MIN_BYTES); \
135 - ASSERT(_hsva >= _rsva); \
136 - ASSERT(_hsva < _reva); \
137 - ASSERT(_heva > _rsva); \
138 - ASSERT(_heva <= _reva); \
139 - _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
140 - _ttesz; \
141 - ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
114 +#define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
115 +{ \
116 + caddr_t _hsva; \
117 + caddr_t _heva; \
118 + caddr_t _rsva; \
119 + caddr_t _reva; \
120 + int _ttesz = get_hblk_ttesz(hmeblkp); \
121 + int _flagtte; \
122 + ASSERT((srdp)->srd_refcnt != 0); \
123 + ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
124 + ASSERT((rgnp)->rgn_id == rid); \
125 + ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
126 + ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
127 + SFMMU_REGION_HME); \
128 + ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
129 + _hsva = (caddr_t)get_hblk_base(hmeblkp); \
130 + _heva = get_hblk_endaddr(hmeblkp); \
131 + _rsva = (caddr_t)P2ALIGN( \
132 + (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
133 + _reva = (caddr_t)P2ROUNDUP( \
134 + (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
135 + HBLK_MIN_BYTES); \
136 + ASSERT(_hsva >= _rsva); \
137 + ASSERT(_hsva < _reva); \
138 + ASSERT(_heva > _rsva); \
139 + ASSERT(_heva <= _reva); \
140 + _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
141 + _ttesz; \
142 + ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
142 143 }
143 144
144 145 #else /* DEBUG */
145 146 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
146 147 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
147 148 #endif /* DEBUG */
148 149
149 150 #if defined(SF_ERRATA_57)
150 151 extern caddr_t errata57_limit;
151 152 #endif
152 153
153 154 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
154 155 (sizeof (int64_t)))
155 156 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve)
156 157
157 158 #define HBLK_RESERVE_CNT 128
158 159 #define HBLK_RESERVE_MIN 20
159 160
160 161 static struct hme_blk *freehblkp;
161 162 static kmutex_t freehblkp_lock;
162 163 static int freehblkcnt;
163 164
164 165 static int64_t hblk_reserve[HME8BLK_SZ_RND];
165 166 static kmutex_t hblk_reserve_lock;
166 167 static kthread_t *hblk_reserve_thread;
167 168
168 169 static nucleus_hblk8_info_t nucleus_hblk8;
169 170 static nucleus_hblk1_info_t nucleus_hblk1;
170 171
171 172 /*
172 173 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
173 174 * after the initial phase of removing an hmeblk from the hash chain, see
174 175 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
175 176 */
176 177 static cpu_hme_pend_t *cpu_hme_pend;
177 178 static uint_t cpu_hme_pend_thresh;
178 179 /*
179 180 * SFMMU specific hat functions
180 181 */
181 182 void hat_pagecachectl(struct page *, int);
182 183
183 184 /* flags for hat_pagecachectl */
184 185 #define HAT_CACHE 0x1
185 186 #define HAT_UNCACHE 0x2
186 187 #define HAT_TMPNC 0x4
187 188
188 189 /*
189 190 * Flag to allow the creation of non-cacheable translations
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
190 191 * to system memory. It is off by default. At the moment this
191 192 * flag is used by the ecache error injector. The error injector
192 193 * will turn it on when creating such a translation then shut it
193 194 * off when it's finished.
194 195 */
195 196
196 197 int sfmmu_allow_nc_trans = 0;
197 198
198 199 /*
199 200 * Flag to disable large page support.
200 - * value of 1 => disable all large pages.
201 + * value of 1 => disable all large pages.
201 202 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
202 203 *
203 204 * For example, use the value 0x4 to disable 512K pages.
204 205 *
205 206 */
206 207 #define LARGE_PAGES_OFF 0x1
207 208
208 209 /*
209 210 * The disable_large_pages and disable_ism_large_pages variables control
210 211 * hat_memload_array and the page sizes to be used by ISM and the kernel.
211 212 *
212 213 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
213 214 * are only used to control which OOB pages to use at upper VM segment creation
214 215 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
215 216 * Their values may come from platform or CPU specific code to disable page
216 217 * sizes that should not be used.
217 218 *
218 219 * WARNING: 512K pages are currently not supported for ISM/DISM.
219 220 */
220 221 uint_t disable_large_pages = 0;
221 222 uint_t disable_ism_large_pages = (1 << TTE512K);
222 223 uint_t disable_auto_data_large_pages = 0;
223 224 uint_t disable_auto_text_large_pages = 0;
224 225
225 226 /*
226 227 * Private sfmmu data structures for hat management
227 228 */
228 229 static struct kmem_cache *sfmmuid_cache;
229 230 static struct kmem_cache *mmuctxdom_cache;
230 231
231 232 /*
232 233 * Private sfmmu data structures for tsb management
233 234 */
234 235 static struct kmem_cache *sfmmu_tsbinfo_cache;
235 236 static struct kmem_cache *sfmmu_tsb8k_cache;
236 237 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
237 238 static vmem_t *kmem_bigtsb_arena;
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
238 239 static vmem_t *kmem_tsb_arena;
239 240
240 241 /*
241 242 * sfmmu static variables for hmeblk resource management.
242 243 */
243 244 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
244 245 static struct kmem_cache *sfmmu8_cache;
245 246 static struct kmem_cache *sfmmu1_cache;
246 247 static struct kmem_cache *pa_hment_cache;
247 248
248 -static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
249 +static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
249 250 /*
250 251 * private data for ism
251 252 */
252 253 static struct kmem_cache *ism_blk_cache;
253 254 static struct kmem_cache *ism_ment_cache;
254 255 #define ISMID_STARTADDR NULL
255 256
256 257 /*
257 258 * Region management data structures and function declarations.
258 259 */
259 260
260 261 static void sfmmu_leave_srd(sfmmu_t *);
261 262 static int sfmmu_srdcache_constructor(void *, void *, int);
262 263 static void sfmmu_srdcache_destructor(void *, void *);
263 264 static int sfmmu_rgncache_constructor(void *, void *, int);
264 265 static void sfmmu_rgncache_destructor(void *, void *);
265 266 static int sfrgnmap_isnull(sf_region_map_t *);
266 267 static int sfhmergnmap_isnull(sf_hmeregion_map_t *);
267 268 static int sfmmu_scdcache_constructor(void *, void *, int);
268 269 static void sfmmu_scdcache_destructor(void *, void *);
269 270 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
270 271 size_t, void *, u_offset_t);
271 272
272 273 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
273 274 static sf_srd_bucket_t *srd_buckets;
274 275 static struct kmem_cache *srd_cache;
275 276 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
276 277 static struct kmem_cache *region_cache;
277 278 static struct kmem_cache *scd_cache;
278 279
279 280 #ifdef sun4v
280 281 int use_bigtsb_arena = 1;
281 282 #else
282 283 int use_bigtsb_arena = 0;
283 284 #endif
284 285
285 286 /* External /etc/system tunable, for turning on&off the shctx support */
286 287 int disable_shctx = 0;
287 288 /* Internal variable, set by MD if the HW supports shctx feature */
288 289 int shctx_on = 0;
289 290
290 291 #ifdef DEBUG
291 292 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
292 293 #endif
293 294 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
294 295 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
295 296
296 297 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
297 298 static void sfmmu_find_scd(sfmmu_t *);
298 299 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
299 300 static void sfmmu_finish_join_scd(sfmmu_t *);
300 301 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
301 302 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
302 303 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
303 304 static void sfmmu_free_scd_tsbs(sfmmu_t *);
304 305 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
305 306 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
306 307 static void sfmmu_ism_hatflags(sfmmu_t *, int);
307 308 static int sfmmu_srd_lock_held(sf_srd_t *);
308 309 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
309 310 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
310 311 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
311 312 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
312 313 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
313 314 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
314 315
315 316 /*
316 317 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
317 318 * HAT flags, synchronizing TLB/TSB coherency, and context management.
318 319 * The lock is hashed on the sfmmup since the case where we need to lock
319 320 * all processes is rare but does occur (e.g. we need to unload a shared
320 321 * mapping from all processes using the mapping). We have a lot of buckets,
321 322 * and each slab of sfmmu_t's can use about a quarter of them, giving us
322 323 * a fairly good distribution without wasting too much space and overhead
323 324 * when we have to grab them all.
324 325 */
325 326 #define SFMMU_NUM_LOCK 128 /* must be power of two */
326 327 hatlock_t hat_lock[SFMMU_NUM_LOCK];
327 328
328 329 /*
329 330 * Hash algorithm optimized for a small number of slabs.
330 331 * 7 is (highbit((sizeof sfmmu_t)) - 1)
331 332 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
332 333 * kmem_cache, and thus they will be sequential within that cache. In
333 334 * addition, each new slab will have a different "color" up to cache_maxcolor
334 335 * which will skew the hashing for each successive slab which is allocated.
335 336 * If the size of sfmmu_t changed to a larger size, this algorithm may need
336 337 * to be revisited.
337 338 */
338 339 #define TSB_HASH_SHIFT_BITS (7)
339 340 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
340 341
341 342 #ifdef DEBUG
342 343 int tsb_hash_debug = 0;
343 344 #define TSB_HASH(sfmmup) \
344 345 (tsb_hash_debug ? &hat_lock[0] : \
345 346 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
346 347 #else /* DEBUG */
347 348 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
348 349 #endif /* DEBUG */
349 350
350 351
351 352 /* sfmmu_replace_tsb() return codes. */
352 353 typedef enum tsb_replace_rc {
353 354 TSB_SUCCESS,
354 355 TSB_ALLOCFAIL,
355 356 TSB_LOSTRACE,
356 357 TSB_ALREADY_SWAPPED,
357 358 TSB_CANTGROW
358 359 } tsb_replace_rc_t;
359 360
360 361 /*
361 362 * Flags for TSB allocation routines.
362 363 */
363 364 #define TSB_ALLOC 0x01
364 365 #define TSB_FORCEALLOC 0x02
365 366 #define TSB_GROW 0x04
366 367 #define TSB_SHRINK 0x08
367 368 #define TSB_SWAPIN 0x10
368 369
369 370 /*
370 371 * Support for HAT callbacks.
371 372 */
372 373 #define SFMMU_MAX_RELOC_CALLBACKS 10
373 374 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
374 375 static id_t sfmmu_cb_nextid = 0;
375 376 static id_t sfmmu_tsb_cb_id;
376 377 struct sfmmu_callback *sfmmu_cb_table;
377 378
378 379 kmutex_t kpr_mutex;
379 380 kmutex_t kpr_suspendlock;
380 381 kthread_t *kreloc_thread;
381 382
382 383 /*
383 384 * Enable VA->PA translation sanity checking on DEBUG kernels.
↓ open down ↓ |
125 lines elided |
↑ open up ↑ |
384 385 * Disabled by default. This is incompatible with some
385 386 * drivers (error injector, RSM) so if it breaks you get
386 387 * to keep both pieces.
387 388 */
388 389 int hat_check_vtop = 0;
389 390
390 391 /*
391 392 * Private sfmmu routines (prototypes)
392 393 */
393 394 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
394 -static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
395 +static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
395 396 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
396 397 uint_t);
397 398 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
398 399 caddr_t, demap_range_t *, uint_t);
399 400 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
400 401 caddr_t, int);
401 402 static void sfmmu_hblk_free(struct hme_blk **);
402 403 static void sfmmu_hblks_list_purge(struct hme_blk **, int);
403 404 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
404 405 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
405 406 static struct hme_blk *sfmmu_hblk_steal(int);
406 407 static int sfmmu_steal_this_hblk(struct hmehash_bucket *,
407 408 struct hme_blk *, uint64_t, struct hme_blk *);
408 409 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
409 410
410 411 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
411 412 struct page **, uint_t, uint_t, uint_t);
412 413 static void hat_do_memload(struct hat *, caddr_t, struct page *,
413 414 uint_t, uint_t, uint_t);
414 415 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
415 416 uint_t, uint_t, pgcnt_t, uint_t);
416 417 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
417 418 uint_t);
418 419 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
419 420 uint_t, uint_t);
420 421 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
421 422 caddr_t, int, uint_t);
422 423 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
423 424 struct hmehash_bucket *, caddr_t, uint_t, uint_t,
424 425 uint_t);
425 426 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
426 427 caddr_t, page_t **, uint_t, uint_t);
427 428 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
428 429
429 430 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
430 431 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
431 432 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
432 433 #ifdef VAC
433 434 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
434 435 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
435 436 int tst_tnc(page_t *pp, pgcnt_t);
436 437 void conv_tnc(page_t *pp, int);
437 438 #endif
438 439
439 440 static void sfmmu_get_ctx(sfmmu_t *);
440 441 static void sfmmu_free_sfmmu(sfmmu_t *);
441 442
442 443 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
443 444 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
444 445
445 446 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
446 447 static void hat_pagereload(struct page *, struct page *);
447 448 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
448 449 #ifdef VAC
449 450 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
450 451 static void sfmmu_page_cache(page_t *, int, int, int);
451 452 #endif
452 453
453 454 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
454 455 struct hme_blk *, int);
455 456 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
456 457 pfn_t, int, int, int, int);
457 458 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
458 459 pfn_t, int);
459 460 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
460 461 static void sfmmu_tlb_range_demap(demap_range_t *);
461 462 static void sfmmu_invalidate_ctx(sfmmu_t *);
462 463 static void sfmmu_sync_mmustate(sfmmu_t *);
463 464
464 -static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
465 +static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
465 466 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
466 467 sfmmu_t *);
467 468 static void sfmmu_tsb_free(struct tsb_info *);
468 469 static void sfmmu_tsbinfo_free(struct tsb_info *);
469 470 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
470 471 sfmmu_t *);
471 472 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
472 473 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
473 474 static int sfmmu_select_tsb_szc(pgcnt_t);
474 475 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
475 476 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
476 477 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
477 478 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \
478 479 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
479 480 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
480 481 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
481 482 hatlock_t *, uint_t);
482 483 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
483 484
484 485 #ifdef VAC
485 486 void sfmmu_cache_flush(pfn_t, int);
486 487 void sfmmu_cache_flushcolor(int, pfn_t);
487 488 #endif
488 489 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
489 490 caddr_t, demap_range_t *, uint_t, int);
490 491
491 492 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
492 493 static uint_t sfmmu_ptov_attr(tte_t *);
493 494 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
494 495 caddr_t, demap_range_t *, uint_t);
495 496 static uint_t sfmmu_vtop_prot(uint_t, uint_t *);
496 497 static int sfmmu_idcache_constructor(void *, void *, int);
497 498 static void sfmmu_idcache_destructor(void *, void *);
498 499 static int sfmmu_hblkcache_constructor(void *, void *, int);
499 500 static void sfmmu_hblkcache_destructor(void *, void *);
500 501 static void sfmmu_hblkcache_reclaim(void *);
501 502 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
502 503 struct hmehash_bucket *);
503 504 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
504 505 struct hme_blk *, struct hme_blk **, int);
505 506 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
506 507 uint64_t);
507 508 static struct hme_blk *sfmmu_check_pending_hblks(int);
508 509 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
509 510 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
510 511 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
511 512 int, caddr_t *);
512 513 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
513 514
514 515 static void sfmmu_rm_large_mappings(page_t *, int);
515 516
516 517 static void hat_lock_init(void);
517 518 static void hat_kstat_init(void);
518 519 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
519 520 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
520 521 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
521 522 static void sfmmu_check_page_sizes(sfmmu_t *, int);
522 523 int fnd_mapping_sz(page_t *);
523 524 static void iment_add(struct ism_ment *, struct hat *);
524 525 static void iment_sub(struct ism_ment *, struct hat *);
525 526 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc);
526 527 extern void sfmmu_setup_tsbinfo(sfmmu_t *);
527 528 extern void sfmmu_clear_utsbinfo(void);
528 529
529 530 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
530 531
531 532 extern int vpm_enable;
532 533
533 534 /* kpm globals */
534 535 #ifdef DEBUG
535 536 /*
536 537 * Enable trap level tsbmiss handling
537 538 */
538 539 int kpm_tsbmtl = 1;
539 540
540 541 /*
541 542 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
542 543 * required TLB shootdowns in this case, so handle w/ care. Off by default.
543 544 */
544 545 int kpm_tlb_flush;
545 546 #endif /* DEBUG */
546 547
547 548 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
548 549
549 550 #ifdef DEBUG
550 551 static void sfmmu_check_hblk_flist();
551 552 #endif
↓ open down ↓ |
77 lines elided |
↑ open up ↑ |
552 553
553 554 /*
554 555 * Semi-private sfmmu data structures. Some of them are initialize in
555 556 * startup or in hat_init. Some of them are private but accessed by
556 557 * assembly code or mach_sfmmu.c
557 558 */
558 559 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
559 560 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
560 561 uint64_t uhme_hash_pa; /* PA of uhme_hash */
561 562 uint64_t khme_hash_pa; /* PA of khme_hash */
562 -int uhmehash_num; /* # of buckets in user hash table */
563 -int khmehash_num; /* # of buckets in kernel hash table */
563 +int uhmehash_num; /* # of buckets in user hash table */
564 +int khmehash_num; /* # of buckets in kernel hash table */
564 565
565 566 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */
566 567 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
567 568 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
568 569
569 570 #define DEFAULT_NUM_CTXS_PER_MMU 8192
570 571 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
571 572
572 573 int cache; /* describes system cache */
573 574
574 575 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
575 576 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
576 577 int ktsb_szcode; /* kernel 8k-indexed tsb size code */
577 578 int ktsb_sz; /* kernel 8k-indexed tsb size */
578 579
579 580 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
580 581 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
581 582 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
582 583 int ktsb4m_sz; /* kernel 4m-indexed tsb size */
583 584
584 585 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
585 586 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */
586 587 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
587 588 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
588 589
589 590 #ifndef sun4v
590 591 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
591 592 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
592 593 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
593 594 caddr_t utsb_vabase; /* reserved kernel virtual memory */
594 595 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */
595 596 #endif /* sun4v */
596 597 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
597 598 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
598 599 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
599 600
600 601 /*
601 602 * Size to use for TSB slabs. Future platforms that support page sizes
602 603 * larger than 4M may wish to change these values, and provide their own
603 604 * assembly macros for building and decoding the TSB base register contents.
604 605 * Note disable_large_pages will override the value set here.
605 606 */
606 607 static uint_t tsb_slab_ttesz = TTE4M;
607 608 size_t tsb_slab_size = MMU_PAGESIZE4M;
608 609 uint_t tsb_slab_shift = MMU_PAGESHIFT4M;
609 610 /* PFN mask for TTE */
610 611 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
611 612
612 613 /*
613 614 * Size to use for TSB slabs. These are used only when 256M tsb arenas
614 615 * exist.
615 616 */
616 617 static uint_t bigtsb_slab_ttesz = TTE256M;
617 618 static size_t bigtsb_slab_size = MMU_PAGESIZE256M;
618 619 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M;
619 620 /* 256M page alignment for 8K pfn */
620 621 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
621 622
622 623 /* largest TSB size to grow to, will be smaller on smaller memory systems */
623 624 static int tsb_max_growsize = 0;
624 625
625 626 /*
626 627 * Tunable parameters dealing with TSB policies.
627 628 */
628 629
629 630 /*
630 631 * This undocumented tunable forces all 8K TSBs to be allocated from
631 632 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
632 633 */
633 634 #ifdef DEBUG
634 635 int tsb_forceheap = 0;
635 636 #endif /* DEBUG */
636 637
637 638 /*
638 639 * Decide whether to use per-lgroup arenas, or one global set of
639 640 * TSB arenas. The default is not to break up per-lgroup, since
640 641 * most platforms don't recognize any tangible benefit from it.
641 642 */
642 643 int tsb_lgrp_affinity = 0;
643 644
644 645 /*
645 646 * Used for growing the TSB based on the process RSS.
646 647 * tsb_rss_factor is based on the smallest TSB, and is
647 648 * shifted by the TSB size to determine if we need to grow.
648 649 * The default will grow the TSB if the number of TTEs for
649 650 * this page size exceeds 75% of the number of TSB entries,
650 651 * which should _almost_ eliminate all conflict misses
651 652 * (at the expense of using up lots and lots of memory).
652 653 */
653 654 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
654 655 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
655 656 #define SELECT_TSB_SIZECODE(pgcnt) ( \
656 657 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
657 658 default_tsb_size)
658 659 #define TSB_OK_SHRINK() \
659 660 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
660 661 #define TSB_OK_GROW() \
661 662 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
662 663
663 664 int enable_tsb_rss_sizing = 1;
664 665 int tsb_rss_factor = (int)TSB_RSS_FACTOR;
665 666
666 667 /* which TSB size code to use for new address spaces or if rss sizing off */
667 668 int default_tsb_size = TSB_8K_SZCODE;
668 669
669 670 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
670 671 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
671 672 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32
672 673
673 674 #ifdef DEBUG
674 675 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
675 676 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
676 677 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
677 678 static int tsb_alloc_fail_mtbf = 0;
678 679 static int tsb_alloc_count = 0;
679 680 #endif /* DEBUG */
680 681
681 682 /* if set to 1, will remap valid TTEs when growing TSB. */
682 683 int tsb_remap_ttes = 1;
683 684
684 685 /*
685 686 * If we have more than this many mappings, allocate a second TSB.
686 687 * This default is chosen because the I/D fully associative TLBs are
687 688 * assumed to have at least 8 available entries. Platforms with a
688 689 * larger fully-associative TLB could probably override the default.
689 690 */
690 691
691 692 #ifdef sun4v
692 693 int tsb_sectsb_threshold = 0;
693 694 #else
694 695 int tsb_sectsb_threshold = 8;
695 696 #endif
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
696 697
697 698 /*
698 699 * kstat data
699 700 */
700 701 struct sfmmu_global_stat sfmmu_global_stat;
701 702 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
702 703
703 704 /*
704 705 * Global data
705 706 */
706 -sfmmu_t *ksfmmup; /* kernel's hat id */
707 +sfmmu_t *ksfmmup; /* kernel's hat id */
707 708
708 709 #ifdef DEBUG
709 710 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
710 711 #endif
711 712
712 713 /* sfmmu locking operations */
713 714 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
714 715 static int sfmmu_mlspl_held(struct page *, int);
715 716
716 717 kmutex_t *sfmmu_page_enter(page_t *);
717 718 void sfmmu_page_exit(kmutex_t *);
718 719 int sfmmu_page_spl_held(struct page *);
719 720
720 721 /* sfmmu internal locking operations - accessed directly */
721 722 static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
722 723 kmutex_t **, kmutex_t **);
723 724 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
724 725 static hatlock_t *
725 726 sfmmu_hat_enter(sfmmu_t *);
726 727 static hatlock_t *
727 728 sfmmu_hat_tryenter(sfmmu_t *);
728 729 static void sfmmu_hat_exit(hatlock_t *);
729 730 static void sfmmu_hat_lock_all(void);
730 731 static void sfmmu_hat_unlock_all(void);
731 732 static void sfmmu_ismhat_enter(sfmmu_t *, int);
732 733 static void sfmmu_ismhat_exit(sfmmu_t *, int);
733 734
734 735 kpm_hlk_t *kpmp_table;
735 736 uint_t kpmp_table_sz; /* must be a power of 2 */
736 737 uchar_t kpmp_shift;
737 738
738 739 kpm_shlk_t *kpmp_stable;
739 740 uint_t kpmp_stable_sz; /* must be a power of 2 */
740 741
741 742 /*
742 743 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
743 744 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
744 745 */
745 746 #if ((2*NCPU_P2) > 128)
746 747 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1))
747 748 #else
748 749 #define SPL_SHIFT 7U
749 750 #endif
750 751 #define SPL_TABLE_SIZE (1U << SPL_SHIFT)
751 752 #define SPL_MASK (SPL_TABLE_SIZE - 1)
752 753
753 754 /*
754 755 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
755 756 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
756 757 */
757 758 #define SPL_INDEX(pp) \
758 759 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
759 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
760 761 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
761 762 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
762 763 SPL_MASK)
763 764
764 765 #define SPL_HASH(pp) \
765 766 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
766 767
767 768 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE];
768 769
769 770 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
770 771
771 772 #define MML_TABLE_SIZE SPL_TABLE_SIZE
772 773 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex)
773 774
774 775 static pad_mutex_t mml_table[MML_TABLE_SIZE];
775 776
776 777 /*
777 778 * hat_unload_callback() will group together callbacks in order
778 779 * to avoid xt_sync() calls. This is the maximum size of the group.
779 780 */
780 781 #define MAX_CB_ADDR 32
781 782
782 783 tte_t hw_tte;
783 784 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
784 785
785 786 static char *mmu_ctx_kstat_names[] = {
786 787 "mmu_ctx_tsb_exceptions",
787 788 "mmu_ctx_tsb_raise_exception",
788 789 "mmu_ctx_wrap_around",
789 790 };
790 791
791 792 /*
792 793 * Wrapper for vmem_xalloc since vmem_create only allows limited
793 794 * parameters for vm_source_alloc functions. This function allows us
794 795 * to specify alignment consistent with the size of the object being
795 796 * allocated.
796 797 */
797 798 static void *
798 799 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
799 800 {
800 801 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
801 802 }
802 803
803 804 /* Common code for setting tsb_alloc_hiwater. */
804 805 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
805 806 ptob(pages) / tsb_alloc_hiwater_factor
806 807
807 808 /*
808 809 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
809 810 * a single TSB. physmem is the number of physical pages so we need physmem 8K
810 811 * TTEs to represent all those physical pages. We round this up by using
811 812 * 1<<highbit(). To figure out which size code to use, remember that the size
812 813 * code is just an amount to shift the smallest TSB size to get the size of
813 814 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
814 815 * highbit() - 1) to get the size code for the smallest TSB that can represent
815 816 * all of physical memory, while erring on the side of too much.
816 817 *
817 818 * Restrict tsb_max_growsize to make sure that:
818 819 * 1) TSBs can't grow larger than the TSB slab size
819 820 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
820 821 */
821 822 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \
822 823 int _i, _szc, _slabszc, _tsbszc; \
823 824 \
824 825 _i = highbit(pages); \
825 826 if ((1 << (_i - 1)) == (pages)) \
826 827 _i--; /* 2^n case, round down */ \
827 828 _szc = _i - TSB_START_SIZE; \
828 829 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
829 830 _tsbszc = MIN(_szc, _slabszc); \
830 831 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \
831 832 }
832 833
833 834 /*
834 835 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
835 836 * tsb_info which handles that TTE size.
836 837 */
837 838 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \
838 839 (tsbinfop) = (sfmmup)->sfmmu_tsb; \
839 840 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \
840 841 sfmmu_hat_lock_held(sfmmup)); \
841 842 if ((tte_szc) >= TTE4M) { \
842 843 ASSERT((tsbinfop) != NULL); \
843 844 (tsbinfop) = (tsbinfop)->tsb_next; \
844 845 } \
845 846 }
846 847
847 848 /*
↓ open down ↓ |
131 lines elided |
↑ open up ↑ |
848 849 * Macro to use to unload entries from the TSB.
849 850 * It has knowledge of which page sizes get replicated in the TSB
850 851 * and will call the appropriate unload routine for the appropriate size.
851 852 */
852 853 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
853 854 { \
854 855 int ttesz = get_hblk_ttesz(hmeblkp); \
855 856 if (ttesz == TTE8K || ttesz == TTE4M) { \
856 857 sfmmu_unload_tsb(sfmmup, addr, ttesz); \
857 858 } else { \
858 - caddr_t sva = ismhat ? addr : \
859 + caddr_t sva = ismhat ? addr : \
859 860 (caddr_t)get_hblk_base(hmeblkp); \
860 861 caddr_t eva = sva + get_hblk_span(hmeblkp); \
861 862 ASSERT(addr >= sva && addr < eva); \
862 863 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \
863 864 } \
864 865 }
865 866
866 867
867 868 /* Update tsb_alloc_hiwater after memory is configured. */
868 869 /*ARGSUSED*/
869 870 static void
870 871 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
871 872 {
872 873 /* Assumes physmem has already been updated. */
873 874 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
874 875 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
875 876 }
876 877
877 878 /*
878 879 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
879 880 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
880 881 * deleted.
881 882 */
882 883 /*ARGSUSED*/
883 884 static int
884 885 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
885 886 {
886 887 return (0);
887 888 }
888 889
889 890 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
890 891 /*ARGSUSED*/
891 892 static void
892 893 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
893 894 {
894 895 /*
895 896 * Whether the delete was cancelled or not, just go ahead and update
896 897 * tsb_alloc_hiwater and tsb_max_growsize.
897 898 */
898 899 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
899 900 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
900 901 }
901 902
902 903 static kphysm_setup_vector_t sfmmu_update_vec = {
903 904 KPHYSM_SETUP_VECTOR_VERSION, /* version */
904 905 sfmmu_update_post_add, /* post_add */
905 906 sfmmu_update_pre_del, /* pre_del */
906 907 sfmmu_update_post_del /* post_del */
907 908 };
908 909
909 910
910 911 /*
911 912 * HME_BLK HASH PRIMITIVES
912 913 */
913 914
914 915 /*
915 916 * Enter a hme on the mapping list for page pp.
916 917 * When large pages are more prevalent in the system we might want to
917 918 * keep the mapping list in ascending order by the hment size. For now,
918 919 * small pages are more frequent, so don't slow it down.
919 920 */
920 921 #define HME_ADD(hme, pp) \
921 922 { \
922 923 ASSERT(sfmmu_mlist_held(pp)); \
923 924 \
924 925 hme->hme_prev = NULL; \
925 926 hme->hme_next = pp->p_mapping; \
926 927 hme->hme_page = pp; \
927 928 if (pp->p_mapping) { \
928 929 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
929 930 ASSERT(pp->p_share > 0); \
930 931 } else { \
931 932 /* EMPTY */ \
932 933 ASSERT(pp->p_share == 0); \
933 934 } \
934 935 pp->p_mapping = hme; \
935 936 pp->p_share++; \
936 937 }
937 938
938 939 /*
939 940 * Enter a hme on the mapping list for page pp.
940 941 * If we are unmapping a large translation, we need to make sure that the
941 942 * change is reflect in the corresponding bit of the p_index field.
942 943 */
943 944 #define HME_SUB(hme, pp) \
944 945 { \
945 946 ASSERT(sfmmu_mlist_held(pp)); \
946 947 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \
947 948 \
948 949 if (pp->p_mapping == NULL) { \
949 950 panic("hme_remove - no mappings"); \
950 951 } \
951 952 \
952 953 membar_stst(); /* ensure previous stores finish */ \
953 954 \
954 955 ASSERT(pp->p_share > 0); \
955 956 pp->p_share--; \
956 957 \
957 958 if (hme->hme_prev) { \
958 959 ASSERT(pp->p_mapping != hme); \
959 960 ASSERT(hme->hme_prev->hme_page == pp || \
960 961 IS_PAHME(hme->hme_prev)); \
961 962 hme->hme_prev->hme_next = hme->hme_next; \
962 963 } else { \
963 964 ASSERT(pp->p_mapping == hme); \
964 965 pp->p_mapping = hme->hme_next; \
965 966 ASSERT((pp->p_mapping == NULL) ? \
966 967 (pp->p_share == 0) : 1); \
967 968 } \
968 969 \
969 970 if (hme->hme_next) { \
970 971 ASSERT(hme->hme_next->hme_page == pp || \
971 972 IS_PAHME(hme->hme_next)); \
972 973 hme->hme_next->hme_prev = hme->hme_prev; \
973 974 } \
974 975 \
975 976 /* zero out the entry */ \
976 977 hme->hme_next = NULL; \
977 978 hme->hme_prev = NULL; \
978 979 hme->hme_page = NULL; \
979 980 \
980 981 if (hme_size(hme) > TTE8K) { \
981 982 /* remove mappings for remainder of large pg */ \
982 983 sfmmu_rm_large_mappings(pp, hme_size(hme)); \
983 984 } \
984 985 }
985 986
986 987 /*
987 988 * This function returns the hment given the hme_blk and a vaddr.
988 989 * It assumes addr has already been checked to belong to hme_blk's
989 990 * range.
990 991 */
991 992 #define HBLKTOHME(hment, hmeblkp, addr) \
992 993 { \
993 994 int index; \
994 995 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
995 996 }
996 997
997 998 /*
998 999 * Version of HBLKTOHME that also returns the index in hmeblkp
999 1000 * of the hment.
1000 1001 */
1001 1002 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1002 1003 { \
1003 1004 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1004 1005 \
1005 1006 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1006 1007 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1007 1008 } else \
1008 1009 idx = 0; \
↓ open down ↓ |
140 lines elided |
↑ open up ↑ |
1009 1010 \
1010 1011 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1011 1012 }
1012 1013
1013 1014 /*
1014 1015 * Disable any page sizes not supported by the CPU
1015 1016 */
1016 1017 void
1017 1018 hat_init_pagesizes()
1018 1019 {
1019 - int i;
1020 + int i;
1020 1021
1021 1022 mmu_exported_page_sizes = 0;
1022 1023 for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1023 1024
1024 1025 szc_2_userszc[i] = (uint_t)-1;
1025 1026 userszc_2_szc[i] = (uint_t)-1;
1026 1027
1027 1028 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1028 1029 disable_large_pages |= (1 << i);
1029 1030 } else {
1030 1031 szc_2_userszc[i] = mmu_exported_page_sizes;
1031 1032 userszc_2_szc[mmu_exported_page_sizes] = i;
1032 1033 mmu_exported_page_sizes++;
1033 1034 }
1034 1035 }
1035 1036
1036 1037 disable_ism_large_pages |= disable_large_pages;
1037 1038 disable_auto_data_large_pages = disable_large_pages;
1038 1039 disable_auto_text_large_pages = disable_large_pages;
1039 1040
1040 1041 /*
1041 1042 * Initialize mmu-specific large page sizes.
1042 1043 */
1043 1044 if (&mmu_large_pages_disabled) {
1044 1045 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1045 1046 disable_ism_large_pages |=
1046 1047 mmu_large_pages_disabled(HAT_LOAD_SHARE);
1047 1048 disable_auto_data_large_pages |=
1048 1049 mmu_large_pages_disabled(HAT_AUTO_DATA);
1049 1050 disable_auto_text_large_pages |=
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
1050 1051 mmu_large_pages_disabled(HAT_AUTO_TEXT);
1051 1052 }
1052 1053 }
1053 1054
1054 1055 /*
1055 1056 * Initialize the hardware address translation structures.
1056 1057 */
1057 1058 void
1058 1059 hat_init(void)
1059 1060 {
1060 - int i;
1061 + int i;
1061 1062 uint_t sz;
1062 1063 size_t size;
1063 1064
1064 1065 hat_lock_init();
1065 1066 hat_kstat_init();
1066 1067
1067 1068 /*
1068 1069 * Hardware-only bits in a TTE
1069 1070 */
1070 1071 MAKE_TTE_MASK(&hw_tte);
1071 1072
1072 1073 hat_init_pagesizes();
1073 1074
1074 1075 /* Initialize the hash locks */
1075 1076 for (i = 0; i < khmehash_num; i++) {
1076 1077 mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1077 1078 MUTEX_DEFAULT, NULL);
1078 1079 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1079 1080 }
1080 1081 for (i = 0; i < uhmehash_num; i++) {
1081 1082 mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1082 1083 MUTEX_DEFAULT, NULL);
1083 1084 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1084 1085 }
1085 1086 khmehash_num--; /* make sure counter starts from 0 */
1086 1087 uhmehash_num--; /* make sure counter starts from 0 */
1087 1088
1088 1089 /*
1089 1090 * Allocate context domain structures.
1090 1091 *
1091 1092 * A platform may choose to modify max_mmu_ctxdoms in
1092 1093 * set_platform_defaults(). If a platform does not define
1093 1094 * a set_platform_defaults() or does not choose to modify
1094 1095 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1095 1096 *
1096 1097 * For all platforms that have CPUs sharing MMUs, this
1097 1098 * value must be defined.
1098 1099 */
1099 1100 if (max_mmu_ctxdoms == 0)
1100 1101 max_mmu_ctxdoms = max_ncpus;
1101 1102
1102 1103 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1103 1104 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1104 1105
1105 1106 /* mmu_ctx_t is 64 bytes aligned */
1106 1107 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1107 1108 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1108 1109 /*
1109 1110 * MMU context domain initialization for the Boot CPU.
1110 1111 * This needs the context domains array allocated above.
1111 1112 */
1112 1113 mutex_enter(&cpu_lock);
1113 1114 sfmmu_cpu_init(CPU);
1114 1115 mutex_exit(&cpu_lock);
1115 1116
1116 1117 /*
1117 1118 * Intialize ism mapping list lock.
1118 1119 */
1119 1120
1120 1121 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1121 1122
1122 1123 /*
1123 1124 * Each sfmmu structure carries an array of MMU context info
1124 1125 * structures, one per context domain. The size of this array depends
1125 1126 * on the maximum number of context domains. So, the size of the
1126 1127 * sfmmu structure varies per platform.
1127 1128 *
1128 1129 * sfmmu is allocated from static arena, because trap
1129 1130 * handler at TL > 0 is not allowed to touch kernel relocatable
1130 1131 * memory. sfmmu's alignment is changed to 64 bytes from
1131 1132 * default 8 bytes, as the lower 6 bits will be used to pass
1132 1133 * pgcnt to vtag_flush_pgcnt_tl1.
1133 1134 */
1134 1135 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1135 1136
1136 1137 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1137 1138 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1138 1139 NULL, NULL, static_arena, 0);
1139 1140
1140 1141 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1141 1142 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1142 1143
1143 1144 /*
1144 1145 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1145 1146 * from the heap when low on memory or when TSB_FORCEALLOC is
1146 1147 * specified, don't use magazines to cache them--we want to return
1147 1148 * them to the system as quickly as possible.
1148 1149 */
1149 1150 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1150 1151 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1151 1152 static_arena, KMC_NOMAGAZINE);
1152 1153
1153 1154 /*
1154 1155 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1155 1156 * memory, which corresponds to the old static reserve for TSBs.
1156 1157 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
1157 1158 * memory we'll allocate for TSB slabs; beyond this point TSB
1158 1159 * allocations will be taken from the kernel heap (via
1159 1160 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1160 1161 * consumer.
1161 1162 */
1162 1163 if (tsb_alloc_hiwater_factor == 0) {
1163 1164 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1164 1165 }
1165 1166 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1166 1167
1167 1168 for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1168 1169 if (!(disable_large_pages & (1 << sz)))
1169 1170 break;
1170 1171 }
1171 1172
1172 1173 if (sz < tsb_slab_ttesz) {
1173 1174 tsb_slab_ttesz = sz;
1174 1175 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1175 1176 tsb_slab_size = 1 << tsb_slab_shift;
1176 1177 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1177 1178 use_bigtsb_arena = 0;
1178 1179 } else if (use_bigtsb_arena &&
1179 1180 (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1180 1181 use_bigtsb_arena = 0;
1181 1182 }
1182 1183
1183 1184 if (!use_bigtsb_arena) {
1184 1185 bigtsb_slab_shift = tsb_slab_shift;
1185 1186 }
1186 1187 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1187 1188
1188 1189 /*
1189 1190 * On smaller memory systems, allocate TSB memory in smaller chunks
1190 1191 * than the default 4M slab size. We also honor disable_large_pages
1191 1192 * here.
1192 1193 *
1193 1194 * The trap handlers need to be patched with the final slab shift,
1194 1195 * since they need to be able to construct the TSB pointer at runtime.
1195 1196 */
1196 1197 if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1197 1198 !(disable_large_pages & (1 << TTE512K))) {
1198 1199 tsb_slab_ttesz = TTE512K;
1199 1200 tsb_slab_shift = MMU_PAGESHIFT512K;
1200 1201 tsb_slab_size = MMU_PAGESIZE512K;
1201 1202 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1202 1203 use_bigtsb_arena = 0;
1203 1204 }
1204 1205
1205 1206 if (!use_bigtsb_arena) {
1206 1207 bigtsb_slab_ttesz = tsb_slab_ttesz;
1207 1208 bigtsb_slab_shift = tsb_slab_shift;
1208 1209 bigtsb_slab_size = tsb_slab_size;
1209 1210 bigtsb_slab_mask = tsb_slab_mask;
1210 1211 }
1211 1212
1212 1213
1213 1214 /*
1214 1215 * Set up memory callback to update tsb_alloc_hiwater and
1215 1216 * tsb_max_growsize.
1216 1217 */
1217 1218 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1218 1219 ASSERT(i == 0);
1219 1220
1220 1221 /*
1221 1222 * kmem_tsb_arena is the source from which large TSB slabs are
1222 1223 * drawn. The quantum of this arena corresponds to the largest
1223 1224 * TSB size we can dynamically allocate for user processes.
1224 1225 * Currently it must also be a supported page size since we
1225 1226 * use exactly one translation entry to map each slab page.
1226 1227 *
1227 1228 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1228 1229 * which most TSBs are allocated. Since most TSB allocations are
1229 1230 * typically 8K we have a kmem cache we stack on top of each
1230 1231 * kmem_tsb_default_arena to speed up those allocations.
1231 1232 *
1232 1233 * Note the two-level scheme of arenas is required only
1233 1234 * because vmem_create doesn't allow us to specify alignment
1234 1235 * requirements. If this ever changes the code could be
1235 1236 * simplified to use only one level of arenas.
1236 1237 *
1237 1238 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1238 1239 * will be provided in addition to the 4M kmem_tsb_arena.
1239 1240 */
1240 1241 if (use_bigtsb_arena) {
1241 1242 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1242 1243 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1243 1244 vmem_xfree, heap_arena, 0, VM_SLEEP);
1244 1245 }
1245 1246
1246 1247 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1247 1248 sfmmu_vmem_xalloc_aligned_wrapper,
1248 1249 vmem_xfree, heap_arena, 0, VM_SLEEP);
1249 1250
1250 1251 if (tsb_lgrp_affinity) {
1251 1252 char s[50];
1252 1253 for (i = 0; i < NLGRPS_MAX; i++) {
1253 1254 if (use_bigtsb_arena) {
1254 1255 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1255 1256 kmem_bigtsb_default_arena[i] = vmem_create(s,
1256 1257 NULL, 0, 2 * tsb_slab_size,
1257 1258 sfmmu_tsb_segkmem_alloc,
1258 1259 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1259 1260 0, VM_SLEEP | VM_BESTFIT);
1260 1261 }
1261 1262
1262 1263 (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1263 1264 kmem_tsb_default_arena[i] = vmem_create(s,
1264 1265 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1265 1266 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1266 1267 VM_SLEEP | VM_BESTFIT);
1267 1268
1268 1269 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1269 1270 sfmmu_tsb_cache[i] = kmem_cache_create(s,
1270 1271 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1271 1272 kmem_tsb_default_arena[i], 0);
1272 1273 }
1273 1274 } else {
1274 1275 if (use_bigtsb_arena) {
1275 1276 kmem_bigtsb_default_arena[0] =
1276 1277 vmem_create("kmem_bigtsb_default", NULL, 0,
1277 1278 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1278 1279 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1279 1280 VM_SLEEP | VM_BESTFIT);
1280 1281 }
1281 1282
1282 1283 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1283 1284 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1284 1285 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1285 1286 VM_SLEEP | VM_BESTFIT);
1286 1287 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1287 1288 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1288 1289 kmem_tsb_default_arena[0], 0);
1289 1290 }
1290 1291
1291 1292 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1292 1293 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1293 1294 sfmmu_hblkcache_destructor,
1294 1295 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1295 1296 hat_memload_arena, KMC_NOHASH);
1296 1297
1297 1298 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1298 1299 segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1299 1300 VMC_DUMPSAFE | VM_SLEEP);
1300 1301
1301 1302 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1302 1303 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1303 1304 sfmmu_hblkcache_destructor,
1304 1305 NULL, (void *)HME1BLK_SZ,
1305 1306 hat_memload1_arena, KMC_NOHASH);
1306 1307
1307 1308 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1308 1309 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1309 1310
1310 1311 ism_blk_cache = kmem_cache_create("ism_blk_cache",
1311 1312 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1312 1313 NULL, NULL, static_arena, KMC_NOHASH);
1313 1314
1314 1315 ism_ment_cache = kmem_cache_create("ism_ment_cache",
1315 1316 sizeof (ism_ment_t), 0, NULL, NULL,
1316 1317 NULL, NULL, NULL, 0);
1317 1318
1318 1319 /*
1319 1320 * We grab the first hat for the kernel,
1320 1321 */
1321 1322 AS_LOCK_ENTER(&kas, RW_WRITER);
1322 1323 kas.a_hat = hat_alloc(&kas);
1323 1324 AS_LOCK_EXIT(&kas);
1324 1325
1325 1326 /*
1326 1327 * Initialize hblk_reserve.
1327 1328 */
1328 1329 ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1329 1330 va_to_pa((caddr_t)hblk_reserve);
1330 1331
1331 1332 #ifndef UTSB_PHYS
1332 1333 /*
1333 1334 * Reserve some kernel virtual address space for the locked TTEs
1334 1335 * that allow us to probe the TSB from TL>0.
1335 1336 */
1336 1337 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1337 1338 0, 0, NULL, NULL, VM_SLEEP);
1338 1339 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1339 1340 0, 0, NULL, NULL, VM_SLEEP);
1340 1341 #endif
1341 1342
1342 1343 #ifdef VAC
1343 1344 /*
1344 1345 * The big page VAC handling code assumes VAC
1345 1346 * will not be bigger than the smallest big
1346 1347 * page- which is 64K.
1347 1348 */
1348 1349 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1349 1350 cmn_err(CE_PANIC, "VAC too big!");
1350 1351 }
1351 1352 #endif
1352 1353
1353 1354 uhme_hash_pa = va_to_pa(uhme_hash);
1354 1355 khme_hash_pa = va_to_pa(khme_hash);
1355 1356
1356 1357 /*
1357 1358 * Initialize relocation locks. kpr_suspendlock is held
1358 1359 * at PIL_MAX to prevent interrupts from pinning the holder
1359 1360 * of a suspended TTE which may access it leading to a
1360 1361 * deadlock condition.
1361 1362 */
1362 1363 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1363 1364 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1364 1365
1365 1366 /*
1366 1367 * If Shared context support is disabled via /etc/system
1367 1368 * set shctx_on to 0 here if it was set to 1 earlier in boot
1368 1369 * sequence by cpu module initialization code.
1369 1370 */
1370 1371 if (shctx_on && disable_shctx) {
1371 1372 shctx_on = 0;
1372 1373 }
1373 1374
1374 1375 if (shctx_on) {
1375 1376 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1376 1377 sizeof (srd_buckets[0]), KM_SLEEP);
1377 1378 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1378 1379 mutex_init(&srd_buckets[i].srdb_lock, NULL,
1379 1380 MUTEX_DEFAULT, NULL);
1380 1381 }
1381 1382
1382 1383 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1383 1384 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1384 1385 NULL, NULL, NULL, 0);
1385 1386 region_cache = kmem_cache_create("region_cache",
1386 1387 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1387 1388 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1388 1389 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1389 1390 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor,
1390 1391 NULL, NULL, NULL, 0);
1391 1392 }
1392 1393
1393 1394 /*
1394 1395 * Pre-allocate hrm_hashtab before enabling the collection of
1395 1396 * refmod statistics. Allocating on the fly would mean us
1396 1397 * running the risk of suffering recursive mutex enters or
1397 1398 * deadlocks.
1398 1399 */
1399 1400 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1400 1401 KM_SLEEP);
1401 1402
1402 1403 /* Allocate per-cpu pending freelist of hmeblks */
1403 1404 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1404 1405 KM_SLEEP);
1405 1406 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1406 1407 (uintptr_t)cpu_hme_pend, 64);
1407 1408
1408 1409 for (i = 0; i < NCPU; i++) {
1409 1410 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1410 1411 NULL);
1411 1412 }
1412 1413
1413 1414 if (cpu_hme_pend_thresh == 0) {
1414 1415 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1415 1416 }
1416 1417 }
1417 1418
1418 1419 /*
1419 1420 * Initialize locking for the hat layer, called early during boot.
1420 1421 */
1421 1422 static void
1422 1423 hat_lock_init()
1423 1424 {
1424 1425 int i;
1425 1426
1426 1427 /*
1427 1428 * initialize the array of mutexes protecting a page's mapping
1428 1429 * list and p_nrm field.
1429 1430 */
1430 1431 for (i = 0; i < MML_TABLE_SIZE; i++)
1431 1432 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1432 1433
1433 1434 if (kpm_enable) {
1434 1435 for (i = 0; i < kpmp_table_sz; i++) {
1435 1436 mutex_init(&kpmp_table[i].khl_mutex, NULL,
1436 1437 MUTEX_DEFAULT, NULL);
1437 1438 }
1438 1439 }
1439 1440
1440 1441 /*
1441 1442 * Initialize array of mutex locks that protects sfmmu fields and
1442 1443 * TSB lists.
1443 1444 */
1444 1445 for (i = 0; i < SFMMU_NUM_LOCK; i++)
1445 1446 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1446 1447 NULL);
1447 1448 }
1448 1449
1449 1450 #define SFMMU_KERNEL_MAXVA \
1450 1451 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1451 1452
1452 1453 /*
1453 1454 * Allocate a hat structure.
1454 1455 * Called when an address space first uses a hat.
1455 1456 */
1456 1457 struct hat *
1457 1458 hat_alloc(struct as *as)
1458 1459 {
1459 1460 sfmmu_t *sfmmup;
1460 1461 int i;
1461 1462 uint64_t cnum;
1462 1463 extern uint_t get_color_start(struct as *);
1463 1464
1464 1465 ASSERT(AS_WRITE_HELD(as));
1465 1466 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1466 1467 sfmmup->sfmmu_as = as;
1467 1468 sfmmup->sfmmu_flags = 0;
1468 1469 sfmmup->sfmmu_tteflags = 0;
1469 1470 sfmmup->sfmmu_rtteflags = 0;
1470 1471 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1471 1472
1472 1473 if (as == &kas) {
1473 1474 ksfmmup = sfmmup;
1474 1475 sfmmup->sfmmu_cext = 0;
1475 1476 cnum = KCONTEXT;
1476 1477
1477 1478 sfmmup->sfmmu_clrstart = 0;
1478 1479 sfmmup->sfmmu_tsb = NULL;
1479 1480 /*
1480 1481 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1481 1482 * to setup tsb_info for ksfmmup.
1482 1483 */
1483 1484 } else {
1484 1485
1485 1486 /*
1486 1487 * Just set to invalid ctx. When it faults, it will
1487 1488 * get a valid ctx. This would avoid the situation
1488 1489 * where we get a ctx, but it gets stolen and then
1489 1490 * we fault when we try to run and so have to get
1490 1491 * another ctx.
1491 1492 */
1492 1493 sfmmup->sfmmu_cext = 0;
1493 1494 cnum = INVALID_CONTEXT;
1494 1495
1495 1496 /* initialize original physical page coloring bin */
1496 1497 sfmmup->sfmmu_clrstart = get_color_start(as);
1497 1498 #ifdef DEBUG
1498 1499 if (tsb_random_size) {
1499 1500 uint32_t randval = (uint32_t)gettick() >> 4;
1500 1501 int size = randval % (tsb_max_growsize + 1);
1501 1502
1502 1503 /* chose a random tsb size for stress testing */
1503 1504 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1504 1505 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1505 1506 } else
1506 1507 #endif /* DEBUG */
1507 1508 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1508 1509 default_tsb_size,
1509 1510 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1510 1511 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1511 1512 ASSERT(sfmmup->sfmmu_tsb != NULL);
1512 1513 }
1513 1514
1514 1515 ASSERT(max_mmu_ctxdoms > 0);
1515 1516 for (i = 0; i < max_mmu_ctxdoms; i++) {
1516 1517 sfmmup->sfmmu_ctxs[i].cnum = cnum;
1517 1518 sfmmup->sfmmu_ctxs[i].gnum = 0;
1518 1519 }
1519 1520
1520 1521 for (i = 0; i < max_mmu_page_sizes; i++) {
1521 1522 sfmmup->sfmmu_ttecnt[i] = 0;
1522 1523 sfmmup->sfmmu_scdrttecnt[i] = 0;
1523 1524 sfmmup->sfmmu_ismttecnt[i] = 0;
1524 1525 sfmmup->sfmmu_scdismttecnt[i] = 0;
1525 1526 sfmmup->sfmmu_pgsz[i] = TTE8K;
1526 1527 }
1527 1528 sfmmup->sfmmu_tsb0_4minflcnt = 0;
1528 1529 sfmmup->sfmmu_iblk = NULL;
1529 1530 sfmmup->sfmmu_ismhat = 0;
1530 1531 sfmmup->sfmmu_scdhat = 0;
1531 1532 sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1532 1533 if (sfmmup == ksfmmup) {
1533 1534 CPUSET_ALL(sfmmup->sfmmu_cpusran);
1534 1535 } else {
1535 1536 CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1536 1537 }
1537 1538 sfmmup->sfmmu_free = 0;
1538 1539 sfmmup->sfmmu_rmstat = 0;
1539 1540 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1540 1541 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1541 1542 sfmmup->sfmmu_srdp = NULL;
1542 1543 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1543 1544 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1544 1545 sfmmup->sfmmu_scdp = NULL;
1545 1546 sfmmup->sfmmu_scd_link.next = NULL;
1546 1547 sfmmup->sfmmu_scd_link.prev = NULL;
1547 1548 return (sfmmup);
1548 1549 }
1549 1550
1550 1551 /*
1551 1552 * Create per-MMU context domain kstats for a given MMU ctx.
1552 1553 */
1553 1554 static void
1554 1555 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1555 1556 {
1556 1557 mmu_ctx_stat_t stat;
1557 1558 kstat_t *mmu_kstat;
1558 1559
1559 1560 ASSERT(MUTEX_HELD(&cpu_lock));
1560 1561 ASSERT(mmu_ctxp->mmu_kstat == NULL);
1561 1562
1562 1563 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1563 1564 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1564 1565
1565 1566 if (mmu_kstat == NULL) {
1566 1567 cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1567 1568 mmu_ctxp->mmu_idx);
1568 1569 } else {
1569 1570 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1570 1571 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1571 1572 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1572 1573 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1573 1574 mmu_ctxp->mmu_kstat = mmu_kstat;
1574 1575 kstat_install(mmu_kstat);
1575 1576 }
1576 1577 }
1577 1578
1578 1579 /*
1579 1580 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1580 1581 * context domain information for a given CPU. If a platform does not
1581 1582 * specify that interface, then the function below is used instead to return
1582 1583 * default information. The defaults are as follows:
1583 1584 *
1584 1585 * - The number of MMU context IDs supported on any CPU in the
1585 1586 * system is 8K.
1586 1587 * - There is one MMU context domain per CPU.
1587 1588 */
1588 1589 /*ARGSUSED*/
1589 1590 static void
1590 1591 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1591 1592 {
1592 1593 infop->mmu_nctxs = nctxs;
1593 1594 infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1594 1595 }
1595 1596
1596 1597 /*
1597 1598 * Called during CPU initialization to set the MMU context-related information
1598 1599 * for a CPU.
1599 1600 *
1600 1601 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1601 1602 */
1602 1603 void
1603 1604 sfmmu_cpu_init(cpu_t *cp)
1604 1605 {
1605 1606 mmu_ctx_info_t info;
1606 1607 mmu_ctx_t *mmu_ctxp;
1607 1608
1608 1609 ASSERT(MUTEX_HELD(&cpu_lock));
1609 1610
1610 1611 if (&plat_cpuid_to_mmu_ctx_info == NULL)
1611 1612 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1612 1613 else
1613 1614 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1614 1615
1615 1616 ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1616 1617
1617 1618 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1618 1619 /* Each mmu_ctx is cacheline aligned. */
1619 1620 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1620 1621 bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1621 1622
1622 1623 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1623 1624 (void *)ipltospl(DISP_LEVEL));
1624 1625 mmu_ctxp->mmu_idx = info.mmu_idx;
1625 1626 mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1626 1627 /*
1627 1628 * Globally for lifetime of a system,
1628 1629 * gnum must always increase.
1629 1630 * mmu_saved_gnum is protected by the cpu_lock.
1630 1631 */
1631 1632 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1632 1633 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1633 1634
1634 1635 sfmmu_mmu_kstat_create(mmu_ctxp);
1635 1636
1636 1637 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1637 1638 } else {
1638 1639 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1639 1640 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1640 1641 }
1641 1642
1642 1643 /*
1643 1644 * The mmu_lock is acquired here to prevent races with
1644 1645 * the wrap-around code.
1645 1646 */
1646 1647 mutex_enter(&mmu_ctxp->mmu_lock);
1647 1648
1648 1649
1649 1650 mmu_ctxp->mmu_ncpus++;
1650 1651 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1651 1652 CPU_MMU_IDX(cp) = info.mmu_idx;
1652 1653 CPU_MMU_CTXP(cp) = mmu_ctxp;
1653 1654
1654 1655 mutex_exit(&mmu_ctxp->mmu_lock);
1655 1656 }
1656 1657
1657 1658 static void
1658 1659 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1659 1660 {
1660 1661 ASSERT(MUTEX_HELD(&cpu_lock));
1661 1662 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1662 1663
1663 1664 mutex_destroy(&mmu_ctxp->mmu_lock);
1664 1665
1665 1666 if (mmu_ctxp->mmu_kstat)
1666 1667 kstat_delete(mmu_ctxp->mmu_kstat);
1667 1668
1668 1669 /* mmu_saved_gnum is protected by the cpu_lock. */
1669 1670 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1670 1671 mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1671 1672
1672 1673 kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1673 1674 }
1674 1675
1675 1676 /*
1676 1677 * Called to perform MMU context-related cleanup for a CPU.
1677 1678 */
1678 1679 void
1679 1680 sfmmu_cpu_cleanup(cpu_t *cp)
1680 1681 {
1681 1682 mmu_ctx_t *mmu_ctxp;
1682 1683
1683 1684 ASSERT(MUTEX_HELD(&cpu_lock));
1684 1685
1685 1686 mmu_ctxp = CPU_MMU_CTXP(cp);
1686 1687 ASSERT(mmu_ctxp != NULL);
1687 1688
1688 1689 /*
1689 1690 * The mmu_lock is acquired here to prevent races with
1690 1691 * the wrap-around code.
1691 1692 */
1692 1693 mutex_enter(&mmu_ctxp->mmu_lock);
1693 1694
1694 1695 CPU_MMU_CTXP(cp) = NULL;
1695 1696
1696 1697 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1697 1698 if (--mmu_ctxp->mmu_ncpus == 0) {
1698 1699 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1699 1700 mutex_exit(&mmu_ctxp->mmu_lock);
1700 1701 sfmmu_ctxdom_free(mmu_ctxp);
1701 1702 return;
1702 1703 }
1703 1704
1704 1705 mutex_exit(&mmu_ctxp->mmu_lock);
1705 1706 }
1706 1707
1707 1708 uint_t
1708 1709 sfmmu_ctxdom_nctxs(int idx)
1709 1710 {
1710 1711 return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1711 1712 }
1712 1713
1713 1714 #ifdef sun4v
1714 1715 /*
1715 1716 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1716 1717 * consistant after suspend/resume on system that can resume on a different
1717 1718 * hardware than it was suspended.
1718 1719 *
1719 1720 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1720 1721 * from being allocated. It acquires all hat_locks, which blocks most access to
1721 1722 * context data, except for a few cases that are handled separately or are
1722 1723 * harmless. It wraps each domain to increment gnum and invalidate on-CPU
1723 1724 * contexts, and forces cnum to its max. As a result of this call all user
1724 1725 * threads that are running on CPUs trap and try to perform wrap around but
1725 1726 * can't because hat_locks are taken. Threads that were not on CPUs but started
1726 1727 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1727 1728 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1728 1729 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs
1729 1730 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1730 1731 *
1731 1732 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1732 1733 * the CPUs that had them. It must be called after CPUs have been paused. This
1733 1734 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1734 1735 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1735 1736 * runs with interrupts disabled. When CPUs are later resumed, they may enter
1736 1737 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1737 1738 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus
1738 1739 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1739 1740 * accessing the old context domains.
1740 1741 *
1741 1742 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1742 1743 * allocates new context domains based on hardware layout. It initializes
1743 1744 * every CPU that had context domain before migration to have one again.
1744 1745 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1745 1746 * could deadlock acquiring locks held by paused CPUs.
1746 1747 *
1747 1748 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1748 1749 * acquire new context ids and continue execution.
1749 1750 *
1750 1751 * Therefore functions should be called in the following order:
1751 1752 * suspend_routine()
1752 1753 * sfmmu_ctxdom_lock()
1753 1754 * pause_cpus()
1754 1755 * suspend()
1755 1756 * if (suspend failed)
1756 1757 * sfmmu_ctxdom_unlock()
1757 1758 * ...
1758 1759 * sfmmu_ctxdom_remove()
1759 1760 * resume_cpus()
1760 1761 * sfmmu_ctxdom_update()
1761 1762 * sfmmu_ctxdom_unlock()
1762 1763 */
1763 1764 static cpuset_t sfmmu_ctxdoms_pset;
1764 1765
1765 1766 void
1766 1767 sfmmu_ctxdoms_remove()
1767 1768 {
1768 1769 processorid_t id;
1769 1770 cpu_t *cp;
1770 1771
1771 1772 /*
1772 1773 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1773 1774 * be restored post-migration. A CPU may be powered off and not have a
1774 1775 * domain, for example.
1775 1776 */
1776 1777 CPUSET_ZERO(sfmmu_ctxdoms_pset);
1777 1778
1778 1779 for (id = 0; id < NCPU; id++) {
1779 1780 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1780 1781 CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1781 1782 CPU_MMU_CTXP(cp) = NULL;
1782 1783 }
1783 1784 }
1784 1785 }
1785 1786
1786 1787 void
1787 1788 sfmmu_ctxdoms_lock(void)
1788 1789 {
1789 1790 int idx;
1790 1791 mmu_ctx_t *mmu_ctxp;
1791 1792
1792 1793 sfmmu_hat_lock_all();
1793 1794
1794 1795 /*
1795 1796 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1796 1797 * hat_lock is always taken before calling it.
1797 1798 *
1798 1799 * For each domain, set mmu_cnum to max so no more contexts can be
1799 1800 * allocated, and wrap to flush on-CPU contexts and force threads to
1800 1801 * acquire a new context when we later drop hat_lock after migration.
1801 1802 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1802 1803 * but the latter uses CAS and will miscompare and not overwrite it.
1803 1804 */
1804 1805 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1805 1806 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1806 1807 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1807 1808 mutex_enter(&mmu_ctxp->mmu_lock);
1808 1809 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1809 1810 /* make sure updated cnum visible */
1810 1811 membar_enter();
1811 1812 mutex_exit(&mmu_ctxp->mmu_lock);
1812 1813 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1813 1814 }
1814 1815 }
1815 1816 kpreempt_enable();
1816 1817 }
1817 1818
1818 1819 void
1819 1820 sfmmu_ctxdoms_unlock(void)
1820 1821 {
1821 1822 sfmmu_hat_unlock_all();
1822 1823 }
1823 1824
1824 1825 void
1825 1826 sfmmu_ctxdoms_update(void)
1826 1827 {
1827 1828 processorid_t id;
1828 1829 cpu_t *cp;
1829 1830 uint_t idx;
1830 1831 mmu_ctx_t *mmu_ctxp;
1831 1832
1832 1833 /*
1833 1834 * Free all context domains. As side effect, this increases
1834 1835 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1835 1836 * init gnum in the new domains, which therefore will be larger than the
1836 1837 * sfmmu gnum for any process, guaranteeing that every process will see
1837 1838 * a new generation and allocate a new context regardless of what new
1838 1839 * domain it runs in.
1839 1840 */
1840 1841 mutex_enter(&cpu_lock);
1841 1842
1842 1843 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1843 1844 if (mmu_ctxs_tbl[idx] != NULL) {
1844 1845 mmu_ctxp = mmu_ctxs_tbl[idx];
1845 1846 mmu_ctxs_tbl[idx] = NULL;
1846 1847 sfmmu_ctxdom_free(mmu_ctxp);
1847 1848 }
1848 1849 }
1849 1850
1850 1851 for (id = 0; id < NCPU; id++) {
1851 1852 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1852 1853 (cp = cpu[id]) != NULL)
1853 1854 sfmmu_cpu_init(cp);
1854 1855 }
1855 1856 mutex_exit(&cpu_lock);
1856 1857 }
1857 1858 #endif
1858 1859
1859 1860 /*
1860 1861 * Hat_setup, makes an address space context the current active one.
1861 1862 * In sfmmu this translates to setting the secondary context with the
1862 1863 * corresponding context.
1863 1864 */
1864 1865 void
1865 1866 hat_setup(struct hat *sfmmup, int allocflag)
1866 1867 {
1867 1868 hatlock_t *hatlockp;
1868 1869
1869 1870 /* Init needs some special treatment. */
1870 1871 if (allocflag == HAT_INIT) {
1871 1872 /*
1872 1873 * Make sure that we have
1873 1874 * 1. a TSB
1874 1875 * 2. a valid ctx that doesn't get stolen after this point.
1875 1876 */
1876 1877 hatlockp = sfmmu_hat_enter(sfmmup);
1877 1878
1878 1879 /*
1879 1880 * Swap in the TSB. hat_init() allocates tsbinfos without
1880 1881 * TSBs, but we need one for init, since the kernel does some
1881 1882 * special things to set up its stack and needs the TSB to
1882 1883 * resolve page faults.
1883 1884 */
1884 1885 sfmmu_tsb_swapin(sfmmup, hatlockp);
1885 1886
1886 1887 sfmmu_get_ctx(sfmmup);
1887 1888
1888 1889 sfmmu_hat_exit(hatlockp);
1889 1890 } else {
1890 1891 ASSERT(allocflag == HAT_ALLOC);
1891 1892
1892 1893 hatlockp = sfmmu_hat_enter(sfmmup);
1893 1894 kpreempt_disable();
1894 1895
1895 1896 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1896 1897 /*
1897 1898 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1898 1899 * pagesize bits don't matter in this case since we are passing
1899 1900 * INVALID_CONTEXT to it.
1900 1901 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1901 1902 */
1902 1903 sfmmu_setctx_sec(INVALID_CONTEXT);
1903 1904 sfmmu_clear_utsbinfo();
1904 1905
1905 1906 kpreempt_enable();
1906 1907 sfmmu_hat_exit(hatlockp);
1907 1908 }
1908 1909 }
1909 1910
1910 1911 /*
1911 1912 * Free all the translation resources for the specified address space.
1912 1913 * Called from as_free when an address space is being destroyed.
1913 1914 */
1914 1915 void
1915 1916 hat_free_start(struct hat *sfmmup)
1916 1917 {
1917 1918 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
1918 1919 ASSERT(sfmmup != ksfmmup);
1919 1920
1920 1921 sfmmup->sfmmu_free = 1;
1921 1922 if (sfmmup->sfmmu_scdp != NULL) {
1922 1923 sfmmu_leave_scd(sfmmup, 0);
1923 1924 }
1924 1925
1925 1926 ASSERT(sfmmup->sfmmu_scdp == NULL);
1926 1927 }
1927 1928
1928 1929 void
1929 1930 hat_free_end(struct hat *sfmmup)
1930 1931 {
1931 1932 int i;
1932 1933
1933 1934 ASSERT(sfmmup->sfmmu_free == 1);
1934 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1935 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1936 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1937 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1938 1939 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1939 1940 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1940 1941
1941 1942 if (sfmmup->sfmmu_rmstat) {
1942 1943 hat_freestat(sfmmup->sfmmu_as, NULL);
1943 1944 }
1944 1945
1945 1946 while (sfmmup->sfmmu_tsb != NULL) {
1946 1947 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1947 1948 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1948 1949 sfmmup->sfmmu_tsb = next;
1949 1950 }
1950 1951
1951 1952 if (sfmmup->sfmmu_srdp != NULL) {
1952 1953 sfmmu_leave_srd(sfmmup);
1953 1954 ASSERT(sfmmup->sfmmu_srdp == NULL);
1954 1955 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1955 1956 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1956 1957 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1957 1958 SFMMU_L2_HMERLINKS_SIZE);
1958 1959 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1959 1960 }
1960 1961 }
1961 1962 }
1962 1963 sfmmu_free_sfmmu(sfmmup);
1963 1964
1964 1965 #ifdef DEBUG
1965 1966 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1966 1967 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1967 1968 }
1968 1969 #endif
1969 1970
1970 1971 kmem_cache_free(sfmmuid_cache, sfmmup);
1971 1972 }
1972 1973
1973 1974 /*
1974 1975 * Set up any translation structures, for the specified address space,
1975 1976 * that are needed or preferred when the process is being swapped in.
1976 1977 */
1977 1978 /* ARGSUSED */
1978 1979 void
1979 1980 hat_swapin(struct hat *hat)
1980 1981 {
1981 1982 }
1982 1983
1983 1984 /*
1984 1985 * Free all of the translation resources, for the specified address space,
1985 1986 * that can be freed while the process is swapped out. Called from as_swapout.
1986 1987 * Also, free up the ctx that this process was using.
1987 1988 */
1988 1989 void
1989 1990 hat_swapout(struct hat *sfmmup)
1990 1991 {
1991 1992 struct hmehash_bucket *hmebp;
1992 1993 struct hme_blk *hmeblkp;
1993 1994 struct hme_blk *pr_hblk = NULL;
1994 1995 struct hme_blk *nx_hblk;
1995 1996 int i;
1996 1997 struct hme_blk *list = NULL;
1997 1998 hatlock_t *hatlockp;
1998 1999 struct tsb_info *tsbinfop;
1999 2000 struct free_tsb {
2000 2001 struct free_tsb *next;
2001 2002 struct tsb_info *tsbinfop;
2002 2003 }; /* free list of TSBs */
2003 2004 struct free_tsb *freelist, *last, *next;
2004 2005
2005 2006 SFMMU_STAT(sf_swapout);
2006 2007
2007 2008 /*
2008 2009 * There is no way to go from an as to all its translations in sfmmu.
2009 2010 * Here is one of the times when we take the big hit and traverse
2010 2011 * the hash looking for hme_blks to free up. Not only do we free up
2011 2012 * this as hme_blks but all those that are free. We are obviously
2012 2013 * swapping because we need memory so let's free up as much
2013 2014 * as we can.
2014 2015 *
2015 2016 * Note that we don't flush TLB/TSB here -- it's not necessary
2016 2017 * because:
2017 2018 * 1) we free the ctx we're using and throw away the TSB(s);
2018 2019 * 2) processes aren't runnable while being swapped out.
2019 2020 */
2020 2021 ASSERT(sfmmup != KHATID);
2021 2022 for (i = 0; i <= UHMEHASH_SZ; i++) {
2022 2023 hmebp = &uhme_hash[i];
2023 2024 SFMMU_HASH_LOCK(hmebp);
2024 2025 hmeblkp = hmebp->hmeblkp;
2025 2026 pr_hblk = NULL;
2026 2027 while (hmeblkp) {
2027 2028
2028 2029 if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2029 2030 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2030 2031 ASSERT(!hmeblkp->hblk_shared);
2031 2032 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2032 2033 (caddr_t)get_hblk_base(hmeblkp),
2033 2034 get_hblk_endaddr(hmeblkp),
2034 2035 NULL, HAT_UNLOAD);
2035 2036 }
2036 2037 nx_hblk = hmeblkp->hblk_next;
2037 2038 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2038 2039 ASSERT(!hmeblkp->hblk_lckcnt);
2039 2040 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2040 2041 &list, 0);
2041 2042 } else {
2042 2043 pr_hblk = hmeblkp;
2043 2044 }
2044 2045 hmeblkp = nx_hblk;
2045 2046 }
2046 2047 SFMMU_HASH_UNLOCK(hmebp);
2047 2048 }
2048 2049
2049 2050 sfmmu_hblks_list_purge(&list, 0);
2050 2051
2051 2052 /*
2052 2053 * Now free up the ctx so that others can reuse it.
2053 2054 */
2054 2055 hatlockp = sfmmu_hat_enter(sfmmup);
2055 2056
2056 2057 sfmmu_invalidate_ctx(sfmmup);
2057 2058
2058 2059 /*
2059 2060 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2060 2061 * If TSBs were never swapped in, just return.
2061 2062 * This implies that we don't support partial swapping
2062 2063 * of TSBs -- either all are swapped out, or none are.
2063 2064 *
2064 2065 * We must hold the HAT lock here to prevent racing with another
2065 2066 * thread trying to unmap TTEs from the TSB or running the post-
2066 2067 * relocator after relocating the TSB's memory. Unfortunately, we
2067 2068 * can't free memory while holding the HAT lock or we could
2068 2069 * deadlock, so we build a list of TSBs to be freed after marking
2069 2070 * the tsbinfos as swapped out and free them after dropping the
2070 2071 * lock.
2071 2072 */
2072 2073 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2073 2074 sfmmu_hat_exit(hatlockp);
2074 2075 return;
2075 2076 }
2076 2077
2077 2078 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2078 2079 last = freelist = NULL;
2079 2080 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2080 2081 tsbinfop = tsbinfop->tsb_next) {
2081 2082 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2082 2083
2083 2084 /*
2084 2085 * Cast the TSB into a struct free_tsb and put it on the free
2085 2086 * list.
2086 2087 */
2087 2088 if (freelist == NULL) {
2088 2089 last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2089 2090 } else {
2090 2091 last->next = (struct free_tsb *)tsbinfop->tsb_va;
2091 2092 last = last->next;
2092 2093 }
2093 2094 last->next = NULL;
2094 2095 last->tsbinfop = tsbinfop;
2095 2096 tsbinfop->tsb_flags |= TSB_SWAPPED;
2096 2097 /*
2097 2098 * Zero out the TTE to clear the valid bit.
2098 2099 * Note we can't use a value like 0xbad because we want to
2099 2100 * ensure diagnostic bits are NEVER set on TTEs that might
2100 2101 * be loaded. The intent is to catch any invalid access
2101 2102 * to the swapped TSB, such as a thread running with a valid
2102 2103 * context without first calling sfmmu_tsb_swapin() to
2103 2104 * allocate TSB memory.
2104 2105 */
2105 2106 tsbinfop->tsb_tte.ll = 0;
2106 2107 }
2107 2108
2108 2109 /* Now we can drop the lock and free the TSB memory. */
2109 2110 sfmmu_hat_exit(hatlockp);
2110 2111 for (; freelist != NULL; freelist = next) {
2111 2112 next = freelist->next;
↓ open down ↓ |
1041 lines elided |
↑ open up ↑ |
2112 2113 sfmmu_tsb_free(freelist->tsbinfop);
2113 2114 }
2114 2115 }
2115 2116
2116 2117 /*
2117 2118 * Duplicate the translations of an as into another newas
2118 2119 */
2119 2120 /* ARGSUSED */
2120 2121 int
2121 2122 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2122 - uint_t flag)
2123 + uint_t flag)
2123 2124 {
2124 2125 sf_srd_t *srdp;
2125 2126 sf_scd_t *scdp;
2126 2127 int i;
2127 2128 extern uint_t get_color_start(struct as *);
2128 2129
2129 2130 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2130 2131 (flag == HAT_DUP_SRD));
2131 2132 ASSERT(hat != ksfmmup);
2132 2133 ASSERT(newhat != ksfmmup);
2133 2134 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2134 2135
2135 2136 if (flag == HAT_DUP_COW) {
2136 2137 panic("hat_dup: HAT_DUP_COW not supported");
2137 2138 }
2138 2139
2139 2140 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2140 2141 ASSERT(srdp->srd_evp != NULL);
2141 2142 VN_HOLD(srdp->srd_evp);
2142 2143 ASSERT(srdp->srd_refcnt > 0);
2143 2144 newhat->sfmmu_srdp = srdp;
2144 2145 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2145 2146 }
2146 2147
2147 2148 /*
2148 2149 * HAT_DUP_ALL flag is used after as duplication is done.
2149 2150 */
2150 2151 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2151 2152 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2152 2153 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2153 2154 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2154 2155 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2155 2156 }
2156 2157
2157 2158 /* check if need to join scd */
2158 2159 if ((scdp = hat->sfmmu_scdp) != NULL &&
2159 2160 newhat->sfmmu_scdp != scdp) {
2160 2161 int ret;
2161 2162 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2162 2163 &scdp->scd_region_map, ret);
2163 2164 ASSERT(ret);
2164 2165 sfmmu_join_scd(scdp, newhat);
2165 2166 ASSERT(newhat->sfmmu_scdp == scdp &&
2166 2167 scdp->scd_refcnt >= 2);
2167 2168 for (i = 0; i < max_mmu_page_sizes; i++) {
2168 2169 newhat->sfmmu_ismttecnt[i] =
2169 2170 hat->sfmmu_ismttecnt[i];
2170 2171 newhat->sfmmu_scdismttecnt[i] =
2171 2172 hat->sfmmu_scdismttecnt[i];
2172 2173 }
2173 2174 }
2174 2175
2175 2176 sfmmu_check_page_sizes(newhat, 1);
2176 2177 }
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
2177 2178
2178 2179 if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2179 2180 update_proc_pgcolorbase_after_fork != 0) {
2180 2181 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2181 2182 }
2182 2183 return (0);
2183 2184 }
2184 2185
2185 2186 void
2186 2187 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2187 - uint_t attr, uint_t flags)
2188 + uint_t attr, uint_t flags)
2188 2189 {
2189 2190 hat_do_memload(hat, addr, pp, attr, flags,
2190 2191 SFMMU_INVALID_SHMERID);
2191 2192 }
2192 2193
2193 2194 void
2194 2195 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2195 - uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2196 + uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2196 2197 {
2197 2198 uint_t rid;
2198 2199 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2199 2200 hat_do_memload(hat, addr, pp, attr, flags,
2200 2201 SFMMU_INVALID_SHMERID);
2201 2202 return;
2202 2203 }
2203 2204 rid = (uint_t)((uint64_t)rcookie);
2204 2205 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2205 2206 hat_do_memload(hat, addr, pp, attr, flags, rid);
2206 2207 }
2207 2208
2208 2209 /*
2209 2210 * Set up addr to map to page pp with protection prot.
2210 2211 * As an optimization we also load the TSB with the
2211 2212 * corresponding tte but it is no big deal if the tte gets kicked out.
2212 2213 */
2213 2214 static void
2214 2215 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2215 - uint_t attr, uint_t flags, uint_t rid)
2216 + uint_t attr, uint_t flags, uint_t rid)
2216 2217 {
2217 2218 tte_t tte;
2218 2219
2219 2220
2220 2221 ASSERT(hat != NULL);
2221 2222 ASSERT(PAGE_LOCKED(pp));
2222 2223 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2223 2224 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2224 2225 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2225 2226 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2226 2227
2227 2228 if (PP_ISFREE(pp)) {
2228 2229 panic("hat_memload: loading a mapping to free page %p",
2229 2230 (void *)pp);
2230 2231 }
2231 2232
2232 2233 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2233 2234
2234 2235 if (flags & ~SFMMU_LOAD_ALLFLAG)
2235 2236 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2236 2237 flags & ~SFMMU_LOAD_ALLFLAG);
2237 2238
2238 2239 if (hat->sfmmu_rmstat)
2239 2240 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2240 2241
2241 2242 #if defined(SF_ERRATA_57)
2242 2243 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2243 2244 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2244 2245 !(flags & HAT_LOAD_SHARE)) {
2245 2246 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2246 2247 " page executable");
2247 2248 attr &= ~PROT_EXEC;
2248 2249 }
2249 2250 #endif
2250 2251
2251 2252 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2252 2253 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2253 2254
2254 2255 /*
2255 2256 * Check TSB and TLB page sizes.
2256 2257 */
2257 2258 if ((flags & HAT_LOAD_SHARE) == 0) {
2258 2259 sfmmu_check_page_sizes(hat, 1);
2259 2260 }
2260 2261 }
2261 2262
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
2262 2263 /*
2263 2264 * hat_devload can be called to map real memory (e.g.
2264 2265 * /dev/kmem) and even though hat_devload will determine pf is
2265 2266 * for memory, it will be unable to get a shared lock on the
2266 2267 * page (because someone else has it exclusively) and will
2267 2268 * pass dp = NULL. If tteload doesn't get a non-NULL
2268 2269 * page pointer it can't cache memory.
2269 2270 */
2270 2271 void
2271 2272 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2272 - uint_t attr, int flags)
2273 + uint_t attr, int flags)
2273 2274 {
2274 2275 tte_t tte;
2275 2276 struct page *pp = NULL;
2276 2277 int use_lgpg = 0;
2277 2278
2278 2279 ASSERT(hat != NULL);
2279 2280
2280 2281 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2281 2282 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2282 2283 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2283 2284 if (len == 0)
2284 2285 panic("hat_devload: zero len");
2285 2286 if (flags & ~SFMMU_LOAD_ALLFLAG)
2286 2287 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2287 2288 flags & ~SFMMU_LOAD_ALLFLAG);
2288 2289
2289 2290 #if defined(SF_ERRATA_57)
2290 2291 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2291 2292 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2292 2293 !(flags & HAT_LOAD_SHARE)) {
2293 2294 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2294 2295 " page executable");
2295 2296 attr &= ~PROT_EXEC;
2296 2297 }
2297 2298 #endif
2298 2299
2299 2300 /*
2300 2301 * If it's a memory page find its pp
2301 2302 */
2302 2303 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2303 2304 pp = page_numtopp_nolock(pfn);
2304 2305 if (pp == NULL) {
2305 2306 flags |= HAT_LOAD_NOCONSIST;
2306 2307 } else {
2307 2308 if (PP_ISFREE(pp)) {
2308 2309 panic("hat_memload: loading "
2309 2310 "a mapping to free page %p",
2310 2311 (void *)pp);
2311 2312 }
2312 2313 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2313 2314 panic("hat_memload: loading a mapping "
2314 2315 "to unlocked relocatable page %p",
2315 2316 (void *)pp);
2316 2317 }
2317 2318 ASSERT(len == MMU_PAGESIZE);
2318 2319 }
2319 2320 }
2320 2321
2321 2322 if (hat->sfmmu_rmstat)
2322 2323 hat_resvstat(len, hat->sfmmu_as, addr);
2323 2324
2324 2325 if (flags & HAT_LOAD_NOCONSIST) {
2325 2326 attr |= SFMMU_UNCACHEVTTE;
2326 2327 use_lgpg = 1;
2327 2328 }
2328 2329 if (!pf_is_memory(pfn)) {
2329 2330 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2330 2331 use_lgpg = 1;
2331 2332 switch (attr & HAT_ORDER_MASK) {
2332 2333 case HAT_STRICTORDER:
2333 2334 case HAT_UNORDERED_OK:
2334 2335 /*
2335 2336 * we set the side effect bit for all non
2336 2337 * memory mappings unless merging is ok
2337 2338 */
2338 2339 attr |= SFMMU_SIDEFFECT;
2339 2340 break;
2340 2341 case HAT_MERGING_OK:
2341 2342 case HAT_LOADCACHING_OK:
2342 2343 case HAT_STORECACHING_OK:
2343 2344 break;
2344 2345 default:
2345 2346 panic("hat_devload: bad attr");
2346 2347 break;
2347 2348 }
2348 2349 }
2349 2350 while (len) {
2350 2351 if (!use_lgpg) {
2351 2352 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2352 2353 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2353 2354 flags, SFMMU_INVALID_SHMERID);
2354 2355 len -= MMU_PAGESIZE;
2355 2356 addr += MMU_PAGESIZE;
2356 2357 pfn++;
2357 2358 continue;
2358 2359 }
2359 2360 /*
2360 2361 * try to use large pages, check va/pa alignments
2361 2362 * Note that 32M/256M page sizes are not (yet) supported.
2362 2363 */
2363 2364 if ((len >= MMU_PAGESIZE4M) &&
2364 2365 !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2365 2366 !(disable_large_pages & (1 << TTE4M)) &&
2366 2367 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2367 2368 sfmmu_memtte(&tte, pfn, attr, TTE4M);
2368 2369 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2369 2370 flags, SFMMU_INVALID_SHMERID);
2370 2371 len -= MMU_PAGESIZE4M;
2371 2372 addr += MMU_PAGESIZE4M;
2372 2373 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2373 2374 } else if ((len >= MMU_PAGESIZE512K) &&
2374 2375 !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2375 2376 !(disable_large_pages & (1 << TTE512K)) &&
2376 2377 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2377 2378 sfmmu_memtte(&tte, pfn, attr, TTE512K);
2378 2379 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2379 2380 flags, SFMMU_INVALID_SHMERID);
2380 2381 len -= MMU_PAGESIZE512K;
2381 2382 addr += MMU_PAGESIZE512K;
2382 2383 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2383 2384 } else if ((len >= MMU_PAGESIZE64K) &&
2384 2385 !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2385 2386 !(disable_large_pages & (1 << TTE64K)) &&
2386 2387 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2387 2388 sfmmu_memtte(&tte, pfn, attr, TTE64K);
2388 2389 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2389 2390 flags, SFMMU_INVALID_SHMERID);
2390 2391 len -= MMU_PAGESIZE64K;
2391 2392 addr += MMU_PAGESIZE64K;
2392 2393 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2393 2394 } else {
2394 2395 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2395 2396 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2396 2397 flags, SFMMU_INVALID_SHMERID);
2397 2398 len -= MMU_PAGESIZE;
2398 2399 addr += MMU_PAGESIZE;
2399 2400 pfn++;
2400 2401 }
2401 2402 }
2402 2403
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
2403 2404 /*
2404 2405 * Check TSB and TLB page sizes.
2405 2406 */
2406 2407 if ((flags & HAT_LOAD_SHARE) == 0) {
2407 2408 sfmmu_check_page_sizes(hat, 1);
2408 2409 }
2409 2410 }
2410 2411
2411 2412 void
2412 2413 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2413 - struct page **pps, uint_t attr, uint_t flags)
2414 + struct page **pps, uint_t attr, uint_t flags)
2414 2415 {
2415 2416 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2416 2417 SFMMU_INVALID_SHMERID);
2417 2418 }
2418 2419
2419 2420 void
2420 2421 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2421 - struct page **pps, uint_t attr, uint_t flags,
2422 - hat_region_cookie_t rcookie)
2422 + struct page **pps, uint_t attr, uint_t flags,
2423 + hat_region_cookie_t rcookie)
2423 2424 {
2424 2425 uint_t rid;
2425 2426 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2426 2427 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2427 2428 SFMMU_INVALID_SHMERID);
2428 2429 return;
2429 2430 }
2430 2431 rid = (uint_t)((uint64_t)rcookie);
2431 2432 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2432 2433 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2433 2434 }
2434 2435
2435 2436 /*
2436 2437 * Map the largest extend possible out of the page array. The array may NOT
2437 2438 * be in order. The largest possible mapping a page can have
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2438 2439 * is specified in the p_szc field. The p_szc field
2439 2440 * cannot change as long as there any mappings (large or small)
2440 2441 * to any of the pages that make up the large page. (ie. any
2441 2442 * promotion/demotion of page size is not up to the hat but up to
2442 2443 * the page free list manager). The array
2443 2444 * should consist of properly aligned contigous pages that are
2444 2445 * part of a big page for a large mapping to be created.
2445 2446 */
2446 2447 static void
2447 2448 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2448 - struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2449 + struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2449 2450 {
2450 2451 int ttesz;
2451 2452 size_t mapsz;
2452 2453 pgcnt_t numpg, npgs;
2453 2454 tte_t tte;
2454 2455 page_t *pp;
2455 2456 uint_t large_pages_disable;
2456 2457
2457 2458 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2458 2459 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2459 2460
2460 2461 if (hat->sfmmu_rmstat)
2461 2462 hat_resvstat(len, hat->sfmmu_as, addr);
2462 2463
2463 2464 #if defined(SF_ERRATA_57)
2464 2465 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2465 2466 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2466 2467 !(flags & HAT_LOAD_SHARE)) {
2467 2468 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2468 2469 "user page executable");
2469 2470 attr &= ~PROT_EXEC;
2470 2471 }
2471 2472 #endif
2472 2473
2473 2474 /* Get number of pages */
2474 2475 npgs = len >> MMU_PAGESHIFT;
2475 2476
2476 2477 if (flags & HAT_LOAD_SHARE) {
2477 2478 large_pages_disable = disable_ism_large_pages;
2478 2479 } else {
2479 2480 large_pages_disable = disable_large_pages;
2480 2481 }
2481 2482
2482 2483 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2483 2484 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2484 2485 rid);
2485 2486 return;
2486 2487 }
2487 2488
2488 2489 while (npgs >= NHMENTS) {
2489 2490 pp = *pps;
2490 2491 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2491 2492 /*
2492 2493 * Check if this page size is disabled.
2493 2494 */
2494 2495 if (large_pages_disable & (1 << ttesz))
2495 2496 continue;
2496 2497
2497 2498 numpg = TTEPAGES(ttesz);
2498 2499 mapsz = numpg << MMU_PAGESHIFT;
2499 2500 if ((npgs >= numpg) &&
2500 2501 IS_P2ALIGNED(addr, mapsz) &&
2501 2502 IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2502 2503 /*
2503 2504 * At this point we have enough pages and
2504 2505 * we know the virtual address and the pfn
2505 2506 * are properly aligned. We still need
2506 2507 * to check for physical contiguity but since
2507 2508 * it is very likely that this is the case
2508 2509 * we will assume they are so and undo
2509 2510 * the request if necessary. It would
2510 2511 * be great if we could get a hint flag
2511 2512 * like HAT_CONTIG which would tell us
2512 2513 * the pages are contigous for sure.
2513 2514 */
2514 2515 sfmmu_memtte(&tte, (*pps)->p_pagenum,
2515 2516 attr, ttesz);
2516 2517 if (!sfmmu_tteload_array(hat, &tte, addr,
2517 2518 pps, flags, rid)) {
2518 2519 break;
2519 2520 }
2520 2521 }
2521 2522 }
2522 2523 if (ttesz == TTE8K) {
2523 2524 /*
2524 2525 * We were not able to map array using a large page
2525 2526 * batch a hmeblk or fraction at a time.
2526 2527 */
2527 2528 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2528 2529 & (NHMENTS-1);
2529 2530 numpg = NHMENTS - numpg;
2530 2531 ASSERT(numpg <= npgs);
2531 2532 mapsz = numpg * MMU_PAGESIZE;
2532 2533 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2533 2534 numpg, rid);
2534 2535 }
2535 2536 addr += mapsz;
2536 2537 npgs -= numpg;
2537 2538 pps += numpg;
2538 2539 }
2539 2540
2540 2541 if (npgs) {
2541 2542 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2542 2543 rid);
2543 2544 }
2544 2545
2545 2546 /*
2546 2547 * Check TSB and TLB page sizes.
2547 2548 */
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
2548 2549 if ((flags & HAT_LOAD_SHARE) == 0) {
2549 2550 sfmmu_check_page_sizes(hat, 1);
2550 2551 }
2551 2552 }
2552 2553
2553 2554 /*
2554 2555 * Function tries to batch 8K pages into the same hme blk.
2555 2556 */
2556 2557 static void
2557 2558 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2558 - uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2559 + uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2559 2560 {
2560 2561 tte_t tte;
2561 2562 page_t *pp;
2562 2563 struct hmehash_bucket *hmebp;
2563 2564 struct hme_blk *hmeblkp;
2564 2565 int index;
2565 2566
2566 2567 while (npgs) {
2567 2568 /*
2568 2569 * Acquire the hash bucket.
2569 2570 */
2570 2571 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2571 2572 rid);
2572 2573 ASSERT(hmebp);
2573 2574
2574 2575 /*
2575 2576 * Find the hment block.
2576 2577 */
2577 2578 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2578 2579 TTE8K, flags, rid);
2579 2580 ASSERT(hmeblkp);
2580 2581
2581 2582 do {
2582 2583 /*
2583 2584 * Make the tte.
2584 2585 */
2585 2586 pp = *pps;
2586 2587 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2587 2588
2588 2589 /*
2589 2590 * Add the translation.
2590 2591 */
2591 2592 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2592 2593 vaddr, pps, flags, rid);
2593 2594
2594 2595 /*
2595 2596 * Goto next page.
2596 2597 */
2597 2598 pps++;
2598 2599 npgs--;
2599 2600
2600 2601 /*
2601 2602 * Goto next address.
2602 2603 */
2603 2604 vaddr += MMU_PAGESIZE;
2604 2605
2605 2606 /*
2606 2607 * Don't crossover into a different hmentblk.
2607 2608 */
2608 2609 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2609 2610 (NHMENTS-1));
2610 2611
2611 2612 } while (index != 0 && npgs != 0);
2612 2613
2613 2614 /*
2614 2615 * Release the hash bucket.
2615 2616 */
2616 2617
2617 2618 sfmmu_tteload_release_hashbucket(hmebp);
2618 2619 }
2619 2620 }
2620 2621
2621 2622 /*
2622 2623 * Construct a tte for a page:
2623 2624 *
2624 2625 * tte_valid = 1
2625 2626 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2626 2627 * tte_size = size
2627 2628 * tte_nfo = attr & HAT_NOFAULT
2628 2629 * tte_ie = attr & HAT_STRUCTURE_LE
2629 2630 * tte_hmenum = hmenum
2630 2631 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2631 2632 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2632 2633 * tte_ref = 1 (optimization)
2633 2634 * tte_wr_perm = attr & PROT_WRITE;
2634 2635 * tte_no_sync = attr & HAT_NOSYNC
2635 2636 * tte_lock = attr & SFMMU_LOCKTTE
2636 2637 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2637 2638 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2638 2639 * tte_e = attr & SFMMU_SIDEFFECT
2639 2640 * tte_priv = !(attr & PROT_USER)
2640 2641 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2641 2642 * tte_glb = 0
2642 2643 */
2643 2644 void
2644 2645 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2645 2646 {
2646 2647 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2647 2648
2648 2649 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2649 2650 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2650 2651
2651 2652 if (TTE_IS_NOSYNC(ttep)) {
2652 2653 TTE_SET_REF(ttep);
2653 2654 if (TTE_IS_WRITABLE(ttep)) {
2654 2655 TTE_SET_MOD(ttep);
2655 2656 }
2656 2657 }
2657 2658 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2658 2659 panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2659 2660 }
2660 2661 }
2661 2662
2662 2663 /*
2663 2664 * This function will add a translation to the hme_blk and allocate the
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
2664 2665 * hme_blk if one does not exist.
2665 2666 * If a page structure is specified then it will add the
2666 2667 * corresponding hment to the mapping list.
2667 2668 * It will also update the hmenum field for the tte.
2668 2669 *
2669 2670 * Currently this function is only used for kernel mappings.
2670 2671 * So pass invalid region to sfmmu_tteload_array().
2671 2672 */
2672 2673 void
2673 2674 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2674 - uint_t flags)
2675 + uint_t flags)
2675 2676 {
2676 2677 ASSERT(sfmmup == ksfmmup);
2677 2678 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2678 2679 SFMMU_INVALID_SHMERID);
2679 2680 }
2680 2681
2681 2682 /*
2682 2683 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2683 2684 * Assumes that a particular page size may only be resident in one TSB.
2684 2685 */
2685 2686 static void
2686 2687 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2687 2688 {
2688 2689 struct tsb_info *tsbinfop = NULL;
2689 2690 uint64_t tag;
2690 2691 struct tsbe *tsbe_addr;
2691 2692 uint64_t tsb_base;
2692 2693 uint_t tsb_size;
2693 2694 int vpshift = MMU_PAGESHIFT;
2694 2695 int phys = 0;
2695 2696
2696 2697 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2697 2698 phys = ktsb_phys;
2698 2699 if (ttesz >= TTE4M) {
2699 2700 #ifndef sun4v
2700 2701 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2701 2702 #endif
2702 2703 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2703 2704 tsb_size = ktsb4m_szcode;
2704 2705 } else {
2705 2706 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2706 2707 tsb_size = ktsb_szcode;
2707 2708 }
2708 2709 } else {
2709 2710 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2710 2711
2711 2712 /*
2712 2713 * If there isn't a TSB for this page size, or the TSB is
2713 2714 * swapped out, there is nothing to do. Note that the latter
2714 2715 * case seems impossible but can occur if hat_pageunload()
2715 2716 * is called on an ISM mapping while the process is swapped
2716 2717 * out.
2717 2718 */
2718 2719 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2719 2720 return;
2720 2721
2721 2722 /*
2722 2723 * If another thread is in the middle of relocating a TSB
2723 2724 * we can't unload the entry so set a flag so that the
2724 2725 * TSB will be flushed before it can be accessed by the
2725 2726 * process.
2726 2727 */
2727 2728 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2728 2729 if (ttep == NULL)
2729 2730 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2730 2731 return;
2731 2732 }
2732 2733 #if defined(UTSB_PHYS)
2733 2734 phys = 1;
2734 2735 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2735 2736 #else
2736 2737 tsb_base = (uint64_t)tsbinfop->tsb_va;
2737 2738 #endif
2738 2739 tsb_size = tsbinfop->tsb_szc;
2739 2740 }
2740 2741 if (ttesz >= TTE4M)
2741 2742 vpshift = MMU_PAGESHIFT4M;
2742 2743
2743 2744 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2744 2745 tag = sfmmu_make_tsbtag(vaddr);
2745 2746
2746 2747 if (ttep == NULL) {
2747 2748 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2748 2749 } else {
2749 2750 if (ttesz >= TTE4M) {
2750 2751 SFMMU_STAT(sf_tsb_load4m);
2751 2752 } else {
2752 2753 SFMMU_STAT(sf_tsb_load8k);
2753 2754 }
2754 2755
2755 2756 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2756 2757 }
2757 2758 }
2758 2759
2759 2760 /*
2760 2761 * Unmap all entries from [start, end) matching the given page size.
2761 2762 *
2762 2763 * This function is used primarily to unmap replicated 64K or 512K entries
2763 2764 * from the TSB that are inserted using the base page size TSB pointer, but
2764 2765 * it may also be called to unmap a range of addresses from the TSB.
2765 2766 */
2766 2767 void
2767 2768 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2768 2769 {
2769 2770 struct tsb_info *tsbinfop;
2770 2771 uint64_t tag;
2771 2772 struct tsbe *tsbe_addr;
2772 2773 caddr_t vaddr;
2773 2774 uint64_t tsb_base;
2774 2775 int vpshift, vpgsz;
2775 2776 uint_t tsb_size;
2776 2777 int phys = 0;
2777 2778
2778 2779 /*
2779 2780 * Assumptions:
2780 2781 * If ttesz == 8K, 64K or 512K, we walk through the range 8K
2781 2782 * at a time shooting down any valid entries we encounter.
2782 2783 *
2783 2784 * If ttesz >= 4M we walk the range 4M at a time shooting
2784 2785 * down any valid mappings we find.
2785 2786 */
2786 2787 if (sfmmup == ksfmmup) {
2787 2788 phys = ktsb_phys;
2788 2789 if (ttesz >= TTE4M) {
2789 2790 #ifndef sun4v
2790 2791 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2791 2792 #endif
2792 2793 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2793 2794 tsb_size = ktsb4m_szcode;
2794 2795 } else {
2795 2796 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2796 2797 tsb_size = ktsb_szcode;
2797 2798 }
2798 2799 } else {
2799 2800 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2800 2801
2801 2802 /*
2802 2803 * If there isn't a TSB for this page size, or the TSB is
2803 2804 * swapped out, there is nothing to do. Note that the latter
2804 2805 * case seems impossible but can occur if hat_pageunload()
2805 2806 * is called on an ISM mapping while the process is swapped
2806 2807 * out.
2807 2808 */
2808 2809 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2809 2810 return;
2810 2811
2811 2812 /*
2812 2813 * If another thread is in the middle of relocating a TSB
2813 2814 * we can't unload the entry so set a flag so that the
2814 2815 * TSB will be flushed before it can be accessed by the
2815 2816 * process.
2816 2817 */
2817 2818 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2818 2819 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2819 2820 return;
2820 2821 }
2821 2822 #if defined(UTSB_PHYS)
2822 2823 phys = 1;
2823 2824 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2824 2825 #else
2825 2826 tsb_base = (uint64_t)tsbinfop->tsb_va;
2826 2827 #endif
2827 2828 tsb_size = tsbinfop->tsb_szc;
2828 2829 }
2829 2830 if (ttesz >= TTE4M) {
2830 2831 vpshift = MMU_PAGESHIFT4M;
2831 2832 vpgsz = MMU_PAGESIZE4M;
2832 2833 } else {
2833 2834 vpshift = MMU_PAGESHIFT;
2834 2835 vpgsz = MMU_PAGESIZE;
2835 2836 }
2836 2837
2837 2838 for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2838 2839 tag = sfmmu_make_tsbtag(vaddr);
2839 2840 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2840 2841 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2841 2842 }
2842 2843 }
2843 2844
2844 2845 /*
2845 2846 * Select the optimum TSB size given the number of mappings
2846 2847 * that need to be cached.
2847 2848 */
2848 2849 static int
2849 2850 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2850 2851 {
2851 2852 int szc = 0;
2852 2853
2853 2854 #ifdef DEBUG
2854 2855 if (tsb_grow_stress) {
2855 2856 uint32_t randval = (uint32_t)gettick() >> 4;
2856 2857 return (randval % (tsb_max_growsize + 1));
2857 2858 }
2858 2859 #endif /* DEBUG */
2859 2860
2860 2861 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2861 2862 szc++;
2862 2863 return (szc);
2863 2864 }
2864 2865
2865 2866 /*
2866 2867 * This function will add a translation to the hme_blk and allocate the
↓ open down ↓ |
182 lines elided |
↑ open up ↑ |
2867 2868 * hme_blk if one does not exist.
2868 2869 * If a page structure is specified then it will add the
2869 2870 * corresponding hment to the mapping list.
2870 2871 * It will also update the hmenum field for the tte.
2871 2872 * Furthermore, it attempts to create a large page translation
2872 2873 * for <addr,hat> at page array pps. It assumes addr and first
2873 2874 * pp is correctly aligned. It returns 0 if successful and 1 otherwise.
2874 2875 */
2875 2876 static int
2876 2877 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2877 - page_t **pps, uint_t flags, uint_t rid)
2878 + page_t **pps, uint_t flags, uint_t rid)
2878 2879 {
2879 2880 struct hmehash_bucket *hmebp;
2880 2881 struct hme_blk *hmeblkp;
2881 - int ret;
2882 + int ret;
2882 2883 uint_t size;
2883 2884
2884 2885 /*
2885 2886 * Get mapping size.
2886 2887 */
2887 2888 size = TTE_CSZ(ttep);
2888 2889 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2889 2890
2890 2891 /*
2891 2892 * Acquire the hash bucket.
2892 2893 */
2893 2894 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2894 2895 ASSERT(hmebp);
2895 2896
2896 2897 /*
2897 2898 * Find the hment block.
2898 2899 */
2899 2900 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2900 2901 rid);
2901 2902 ASSERT(hmeblkp);
2902 2903
2903 2904 /*
2904 2905 * Add the translation.
2905 2906 */
2906 2907 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2907 2908 rid);
2908 2909
2909 2910 /*
2910 2911 * Release the hash bucket.
2911 2912 */
2912 2913 sfmmu_tteload_release_hashbucket(hmebp);
2913 2914
2914 2915 return (ret);
2915 2916 }
2916 2917
2917 2918 /*
2918 2919 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2919 2920 */
2920 2921 static struct hmehash_bucket *
2921 2922 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2922 2923 uint_t rid)
2923 2924 {
2924 2925 struct hmehash_bucket *hmebp;
2925 2926 int hmeshift;
2926 2927 void *htagid = sfmmutohtagid(sfmmup, rid);
2927 2928
2928 2929 ASSERT(htagid != NULL);
2929 2930
2930 2931 hmeshift = HME_HASH_SHIFT(size);
2931 2932
2932 2933 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2933 2934
2934 2935 SFMMU_HASH_LOCK(hmebp);
2935 2936
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
2936 2937 return (hmebp);
2937 2938 }
2938 2939
2939 2940 /*
2940 2941 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2941 2942 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2942 2943 * allocated.
2943 2944 */
2944 2945 static struct hme_blk *
2945 2946 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2946 - caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2947 + caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2947 2948 {
2948 2949 hmeblk_tag hblktag;
2949 2950 int hmeshift;
2950 2951 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2951 2952
2952 2953 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2953 2954
2954 2955 hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2955 2956 ASSERT(hblktag.htag_id != NULL);
2956 2957 hmeshift = HME_HASH_SHIFT(size);
2957 2958 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2958 2959 hblktag.htag_rehash = HME_HASH_REHASH(size);
2959 2960 hblktag.htag_rid = rid;
2960 2961
2961 2962 ttearray_realloc:
2962 2963
2963 2964 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2964 2965
2965 2966 /*
2966 2967 * We block until hblk_reserve_lock is released; it's held by
2967 2968 * the thread, temporarily using hblk_reserve, until hblk_reserve is
2968 2969 * replaced by a hblk from sfmmu8_cache.
2969 2970 */
2970 2971 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2971 2972 hblk_reserve_thread != curthread) {
2972 2973 SFMMU_HASH_UNLOCK(hmebp);
2973 2974 mutex_enter(&hblk_reserve_lock);
2974 2975 mutex_exit(&hblk_reserve_lock);
2975 2976 SFMMU_STAT(sf_hblk_reserve_hit);
2976 2977 SFMMU_HASH_LOCK(hmebp);
2977 2978 goto ttearray_realloc;
2978 2979 }
2979 2980
2980 2981 if (hmeblkp == NULL) {
2981 2982 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2982 2983 hblktag, flags, rid);
2983 2984 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2984 2985 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2985 2986 } else {
2986 2987 /*
2987 2988 * It is possible for 8k and 64k hblks to collide since they
2988 2989 * have the same rehash value. This is because we
2989 2990 * lazily free hblks and 8K/64K blks could be lingering.
2990 2991 * If we find size mismatch we free the block and & try again.
2991 2992 */
2992 2993 if (get_hblk_ttesz(hmeblkp) != size) {
2993 2994 ASSERT(!hmeblkp->hblk_vcnt);
2994 2995 ASSERT(!hmeblkp->hblk_hmecnt);
2995 2996 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2996 2997 &list, 0);
2997 2998 goto ttearray_realloc;
2998 2999 }
2999 3000 if (hmeblkp->hblk_shw_bit) {
3000 3001 /*
3001 3002 * if the hblk was previously used as a shadow hblk then
3002 3003 * we will change it to a normal hblk
3003 3004 */
3004 3005 ASSERT(!hmeblkp->hblk_shared);
3005 3006 if (hmeblkp->hblk_shw_mask) {
3006 3007 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3007 3008 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3008 3009 goto ttearray_realloc;
3009 3010 } else {
3010 3011 hmeblkp->hblk_shw_bit = 0;
3011 3012 }
3012 3013 }
3013 3014 SFMMU_STAT(sf_hblk_hit);
3014 3015 }
3015 3016
3016 3017 /*
3017 3018 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3018 3019 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3019 3020 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3020 3021 * just add these hmeblks to the per-cpu pending queue.
3021 3022 */
3022 3023 sfmmu_hblks_list_purge(&list, 1);
3023 3024
3024 3025 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3025 3026 ASSERT(!hmeblkp->hblk_shw_bit);
3026 3027 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3027 3028 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3028 3029 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
3029 3030
3030 3031 return (hmeblkp);
3031 3032 }
3032 3033
3033 3034 /*
3034 3035 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3035 3036 * otherwise.
3036 3037 */
3037 3038 static int
3038 3039 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3039 - caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3040 + caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3040 3041 {
3041 3042 page_t *pp = *pps;
3042 3043 int hmenum, size, remap;
3043 3044 tte_t tteold, flush_tte;
3044 3045 #ifdef DEBUG
3045 3046 tte_t orig_old;
3046 3047 #endif /* DEBUG */
3047 3048 struct sf_hment *sfhme;
3048 3049 kmutex_t *pml, *pmtx;
3049 3050 hatlock_t *hatlockp;
3050 3051 int myflt;
3051 3052
3052 3053 /*
3053 3054 * remove this panic when we decide to let user virtual address
3054 3055 * space be >= USERLIMIT.
3055 3056 */
3056 3057 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3057 3058 panic("user addr %p in kernel space", (void *)vaddr);
3058 3059 #if defined(TTE_IS_GLOBAL)
3059 3060 if (TTE_IS_GLOBAL(ttep))
3060 3061 panic("sfmmu_tteload: creating global tte");
3061 3062 #endif
3062 3063
3063 3064 #ifdef DEBUG
3064 3065 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3065 3066 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3066 3067 panic("sfmmu_tteload: non cacheable memory tte");
3067 3068 #endif /* DEBUG */
3068 3069
3069 3070 /* don't simulate dirty bit for writeable ISM/DISM mappings */
3070 3071 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3071 3072 TTE_SET_REF(ttep);
3072 3073 TTE_SET_MOD(ttep);
3073 3074 }
3074 3075
3075 3076 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3076 3077 !TTE_IS_MOD(ttep)) {
3077 3078 /*
3078 3079 * Don't load TSB for dummy as in ISM. Also don't preload
3079 3080 * the TSB if the TTE isn't writable since we're likely to
3080 3081 * fault on it again -- preloading can be fairly expensive.
3081 3082 */
3082 3083 flags |= SFMMU_NO_TSBLOAD;
3083 3084 }
3084 3085
3085 3086 size = TTE_CSZ(ttep);
3086 3087 switch (size) {
3087 3088 case TTE8K:
3088 3089 SFMMU_STAT(sf_tteload8k);
3089 3090 break;
3090 3091 case TTE64K:
3091 3092 SFMMU_STAT(sf_tteload64k);
3092 3093 break;
3093 3094 case TTE512K:
3094 3095 SFMMU_STAT(sf_tteload512k);
3095 3096 break;
3096 3097 case TTE4M:
3097 3098 SFMMU_STAT(sf_tteload4m);
3098 3099 break;
3099 3100 case (TTE32M):
3100 3101 SFMMU_STAT(sf_tteload32m);
3101 3102 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3102 3103 break;
3103 3104 case (TTE256M):
3104 3105 SFMMU_STAT(sf_tteload256m);
3105 3106 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3106 3107 break;
3107 3108 }
3108 3109
3109 3110 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3110 3111 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3111 3112 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3112 3113 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3113 3114
3114 3115 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3115 3116
3116 3117 /*
3117 3118 * Need to grab mlist lock here so that pageunload
3118 3119 * will not change tte behind us.
3119 3120 */
3120 3121 if (pp) {
3121 3122 pml = sfmmu_mlist_enter(pp);
3122 3123 }
3123 3124
3124 3125 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3125 3126 /*
3126 3127 * Look for corresponding hment and if valid verify
3127 3128 * pfns are equal.
3128 3129 */
3129 3130 remap = TTE_IS_VALID(&tteold);
3130 3131 if (remap) {
3131 3132 pfn_t new_pfn, old_pfn;
3132 3133
3133 3134 old_pfn = TTE_TO_PFN(vaddr, &tteold);
3134 3135 new_pfn = TTE_TO_PFN(vaddr, ttep);
3135 3136
3136 3137 if (flags & HAT_LOAD_REMAP) {
3137 3138 /* make sure we are remapping same type of pages */
3138 3139 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3139 3140 panic("sfmmu_tteload - tte remap io<->memory");
3140 3141 }
3141 3142 if (old_pfn != new_pfn &&
3142 3143 (pp != NULL || sfhme->hme_page != NULL)) {
3143 3144 panic("sfmmu_tteload - tte remap pp != NULL");
3144 3145 }
3145 3146 } else if (old_pfn != new_pfn) {
3146 3147 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3147 3148 (void *)hmeblkp);
3148 3149 }
3149 3150 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3150 3151 }
3151 3152
3152 3153 if (pp) {
3153 3154 if (size == TTE8K) {
3154 3155 #ifdef VAC
3155 3156 /*
3156 3157 * Handle VAC consistency
3157 3158 */
3158 3159 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3159 3160 sfmmu_vac_conflict(sfmmup, vaddr, pp);
3160 3161 }
3161 3162 #endif
3162 3163
3163 3164 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3164 3165 pmtx = sfmmu_page_enter(pp);
3165 3166 PP_CLRRO(pp);
3166 3167 sfmmu_page_exit(pmtx);
3167 3168 } else if (!PP_ISMAPPED(pp) &&
3168 3169 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3169 3170 pmtx = sfmmu_page_enter(pp);
3170 3171 if (!(PP_ISMOD(pp))) {
3171 3172 PP_SETRO(pp);
3172 3173 }
3173 3174 sfmmu_page_exit(pmtx);
3174 3175 }
3175 3176
3176 3177 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3177 3178 /*
3178 3179 * sfmmu_pagearray_setup failed so return
3179 3180 */
3180 3181 sfmmu_mlist_exit(pml);
3181 3182 return (1);
3182 3183 }
3183 3184 }
3184 3185
3185 3186 /*
3186 3187 * Make sure hment is not on a mapping list.
3187 3188 */
3188 3189 ASSERT(remap || (sfhme->hme_page == NULL));
3189 3190
3190 3191 /* if it is not a remap then hme->next better be NULL */
3191 3192 ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3192 3193
3193 3194 if (flags & HAT_LOAD_LOCK) {
3194 3195 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3195 3196 panic("too high lckcnt-hmeblk %p",
3196 3197 (void *)hmeblkp);
3197 3198 }
3198 3199 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3199 3200
3200 3201 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3201 3202 }
3202 3203
3203 3204 #ifdef VAC
3204 3205 if (pp && PP_ISNC(pp)) {
3205 3206 /*
3206 3207 * If the physical page is marked to be uncacheable, like
3207 3208 * by a vac conflict, make sure the new mapping is also
3208 3209 * uncacheable.
3209 3210 */
3210 3211 TTE_CLR_VCACHEABLE(ttep);
3211 3212 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3212 3213 }
3213 3214 #endif
3214 3215 ttep->tte_hmenum = hmenum;
3215 3216
3216 3217 #ifdef DEBUG
3217 3218 orig_old = tteold;
3218 3219 #endif /* DEBUG */
3219 3220
3220 3221 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3221 3222 if ((sfmmup == KHATID) &&
3222 3223 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3223 3224 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3224 3225 }
3225 3226 #ifdef DEBUG
3226 3227 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3227 3228 #endif /* DEBUG */
3228 3229 }
3229 3230 ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3230 3231
3231 3232 if (!TTE_IS_VALID(&tteold)) {
3232 3233
3233 3234 atomic_inc_16(&hmeblkp->hblk_vcnt);
3234 3235 if (rid == SFMMU_INVALID_SHMERID) {
3235 3236 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3236 3237 } else {
3237 3238 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3238 3239 sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3239 3240 /*
3240 3241 * We already accounted for region ttecnt's in sfmmu
3241 3242 * during hat_join_region() processing. Here we
3242 3243 * only update ttecnt's in region struture.
3243 3244 */
3244 3245 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3245 3246 }
3246 3247 }
3247 3248
3248 3249 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3249 3250 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3250 3251 sfmmup != ksfmmup) {
3251 3252 uchar_t tteflag = 1 << size;
3252 3253 if (rid == SFMMU_INVALID_SHMERID) {
3253 3254 if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3254 3255 hatlockp = sfmmu_hat_enter(sfmmup);
3255 3256 sfmmup->sfmmu_tteflags |= tteflag;
3256 3257 sfmmu_hat_exit(hatlockp);
3257 3258 }
3258 3259 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3259 3260 hatlockp = sfmmu_hat_enter(sfmmup);
3260 3261 sfmmup->sfmmu_rtteflags |= tteflag;
3261 3262 sfmmu_hat_exit(hatlockp);
3262 3263 }
3263 3264 /*
3264 3265 * Update the current CPU tsbmiss area, so the current thread
3265 3266 * won't need to take the tsbmiss for the new pagesize.
3266 3267 * The other threads in the process will update their tsb
3267 3268 * miss area lazily in sfmmu_tsbmiss_exception() when they
3268 3269 * fail to find the translation for a newly added pagesize.
3269 3270 */
3270 3271 if (size > TTE64K && myflt) {
3271 3272 struct tsbmiss *tsbmp;
3272 3273 kpreempt_disable();
3273 3274 tsbmp = &tsbmiss_area[CPU->cpu_id];
3274 3275 if (rid == SFMMU_INVALID_SHMERID) {
3275 3276 if (!(tsbmp->uhat_tteflags & tteflag)) {
3276 3277 tsbmp->uhat_tteflags |= tteflag;
3277 3278 }
3278 3279 } else {
3279 3280 if (!(tsbmp->uhat_rtteflags & tteflag)) {
3280 3281 tsbmp->uhat_rtteflags |= tteflag;
3281 3282 }
3282 3283 }
3283 3284 kpreempt_enable();
3284 3285 }
3285 3286 }
3286 3287
3287 3288 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3288 3289 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3289 3290 hatlockp = sfmmu_hat_enter(sfmmup);
3290 3291 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3291 3292 sfmmu_hat_exit(hatlockp);
3292 3293 }
3293 3294
3294 3295 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3295 3296 hw_tte.tte_intlo;
3296 3297 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3297 3298 hw_tte.tte_inthi;
3298 3299
3299 3300 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3300 3301 /*
3301 3302 * If remap and new tte differs from old tte we need
3302 3303 * to sync the mod bit and flush TLB/TSB. We don't
3303 3304 * need to sync ref bit because we currently always set
3304 3305 * ref bit in tteload.
3305 3306 */
3306 3307 ASSERT(TTE_IS_REF(ttep));
3307 3308 if (TTE_IS_MOD(&tteold)) {
3308 3309 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3309 3310 }
3310 3311 /*
3311 3312 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3312 3313 * hmes are only used for read only text. Adding this code for
3313 3314 * completeness and future use of shared hmeblks with writable
3314 3315 * mappings of VMODSORT vnodes.
3315 3316 */
3316 3317 if (hmeblkp->hblk_shared) {
3317 3318 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3318 3319 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3319 3320 xt_sync(cpuset);
3320 3321 SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3321 3322 } else {
3322 3323 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3323 3324 xt_sync(sfmmup->sfmmu_cpusran);
3324 3325 }
3325 3326 }
3326 3327
3327 3328 if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3328 3329 /*
3329 3330 * We only preload 8K and 4M mappings into the TSB, since
3330 3331 * 64K and 512K mappings are replicated and hence don't
3331 3332 * have a single, unique TSB entry. Ditto for 32M/256M.
3332 3333 */
3333 3334 if (size == TTE8K || size == TTE4M) {
3334 3335 sf_scd_t *scdp;
3335 3336 hatlockp = sfmmu_hat_enter(sfmmup);
3336 3337 /*
3337 3338 * Don't preload private TSB if the mapping is used
3338 3339 * by the shctx in the SCD.
3339 3340 */
3340 3341 scdp = sfmmup->sfmmu_scdp;
3341 3342 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3342 3343 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3343 3344 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3344 3345 size);
3345 3346 }
3346 3347 sfmmu_hat_exit(hatlockp);
3347 3348 }
3348 3349 }
3349 3350 if (pp) {
3350 3351 if (!remap) {
3351 3352 HME_ADD(sfhme, pp);
3352 3353 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3353 3354 ASSERT(hmeblkp->hblk_hmecnt > 0);
3354 3355
3355 3356 /*
3356 3357 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3357 3358 * see pageunload() for comment.
3358 3359 */
3359 3360 }
3360 3361 sfmmu_mlist_exit(pml);
3361 3362 }
3362 3363
3363 3364 return (0);
3364 3365 }
3365 3366 /*
3366 3367 * Function unlocks hash bucket.
3367 3368 */
3368 3369 static void
3369 3370 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3370 3371 {
3371 3372 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3372 3373 SFMMU_HASH_UNLOCK(hmebp);
3373 3374 }
3374 3375
↓ open down ↓ |
325 lines elided |
↑ open up ↑ |
3375 3376 /*
3376 3377 * function which checks and sets up page array for a large
3377 3378 * translation. Will set p_vcolor, p_index, p_ro fields.
3378 3379 * Assumes addr and pfnum of first page are properly aligned.
3379 3380 * Will check for physical contiguity. If check fails it return
3380 3381 * non null.
3381 3382 */
3382 3383 static int
3383 3384 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3384 3385 {
3385 - int i, index, ttesz;
3386 + int i, index, ttesz;
3386 3387 pfn_t pfnum;
3387 3388 pgcnt_t npgs;
3388 3389 page_t *pp, *pp1;
3389 3390 kmutex_t *pmtx;
3390 3391 #ifdef VAC
3391 3392 int osz;
3392 3393 int cflags = 0;
3393 3394 int vac_err = 0;
3394 3395 #endif
3395 3396 int newidx = 0;
3396 3397
3397 3398 ttesz = TTE_CSZ(ttep);
3398 3399
3399 3400 ASSERT(ttesz > TTE8K);
3400 3401
3401 3402 npgs = TTEPAGES(ttesz);
3402 3403 index = PAGESZ_TO_INDEX(ttesz);
3403 3404
3404 3405 pfnum = (*pps)->p_pagenum;
3405 3406 ASSERT(IS_P2ALIGNED(pfnum, npgs));
3406 3407
3407 3408 /*
3408 3409 * Save the first pp so we can do HAT_TMPNC at the end.
3409 3410 */
3410 3411 pp1 = *pps;
3411 3412 #ifdef VAC
3412 3413 osz = fnd_mapping_sz(pp1);
3413 3414 #endif
3414 3415
3415 3416 for (i = 0; i < npgs; i++, pps++) {
3416 3417 pp = *pps;
3417 3418 ASSERT(PAGE_LOCKED(pp));
3418 3419 ASSERT(pp->p_szc >= ttesz);
3419 3420 ASSERT(pp->p_szc == pp1->p_szc);
3420 3421 ASSERT(sfmmu_mlist_held(pp));
3421 3422
3422 3423 /*
3423 3424 * XXX is it possible to maintain P_RO on the root only?
3424 3425 */
3425 3426 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3426 3427 pmtx = sfmmu_page_enter(pp);
3427 3428 PP_CLRRO(pp);
3428 3429 sfmmu_page_exit(pmtx);
3429 3430 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3430 3431 !PP_ISMOD(pp)) {
3431 3432 pmtx = sfmmu_page_enter(pp);
3432 3433 if (!(PP_ISMOD(pp))) {
3433 3434 PP_SETRO(pp);
3434 3435 }
3435 3436 sfmmu_page_exit(pmtx);
3436 3437 }
3437 3438
3438 3439 /*
3439 3440 * If this is a remap we skip vac & contiguity checks.
3440 3441 */
3441 3442 if (remap)
3442 3443 continue;
3443 3444
3444 3445 /*
3445 3446 * set p_vcolor and detect any vac conflicts.
3446 3447 */
3447 3448 #ifdef VAC
3448 3449 if (vac_err == 0) {
3449 3450 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3450 3451
3451 3452 }
3452 3453 #endif
3453 3454
3454 3455 /*
3455 3456 * Save current index in case we need to undo it.
3456 3457 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))"
3457 3458 * "SFMMU_INDEX_SHIFT 6"
3458 3459 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)"
3459 3460 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)"
3460 3461 *
3461 3462 * So: index = PAGESZ_TO_INDEX(ttesz);
3462 3463 * if ttesz == 1 then index = 0x2
3463 3464 * 2 then index = 0x4
3464 3465 * 3 then index = 0x8
3465 3466 * 4 then index = 0x10
3466 3467 * 5 then index = 0x20
3467 3468 * The code below checks if it's a new pagesize (ie, newidx)
3468 3469 * in case we need to take it back out of p_index,
3469 3470 * and then or's the new index into the existing index.
3470 3471 */
3471 3472 if ((PP_MAPINDEX(pp) & index) == 0)
3472 3473 newidx = 1;
3473 3474 pp->p_index = (PP_MAPINDEX(pp) | index);
3474 3475
3475 3476 /*
3476 3477 * contiguity check
3477 3478 */
3478 3479 if (pp->p_pagenum != pfnum) {
3479 3480 /*
3480 3481 * If we fail the contiguity test then
3481 3482 * the only thing we need to fix is the p_index field.
3482 3483 * We might get a few extra flushes but since this
3483 3484 * path is rare that is ok. The p_ro field will
3484 3485 * get automatically fixed on the next tteload to
3485 3486 * the page. NO TNC bit is set yet.
3486 3487 */
3487 3488 while (i >= 0) {
3488 3489 pp = *pps;
3489 3490 if (newidx)
3490 3491 pp->p_index = (PP_MAPINDEX(pp) &
3491 3492 ~index);
3492 3493 pps--;
3493 3494 i--;
3494 3495 }
3495 3496 return (1);
3496 3497 }
3497 3498 pfnum++;
3498 3499 addr += MMU_PAGESIZE;
3499 3500 }
3500 3501
3501 3502 #ifdef VAC
3502 3503 if (vac_err) {
3503 3504 if (ttesz > osz) {
3504 3505 /*
3505 3506 * There are some smaller mappings that causes vac
3506 3507 * conflicts. Convert all existing small mappings to
3507 3508 * TNC.
3508 3509 */
3509 3510 SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3510 3511 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3511 3512 npgs);
3512 3513 } else {
3513 3514 /* EMPTY */
3514 3515 /*
3515 3516 * If there exists an big page mapping,
3516 3517 * that means the whole existing big page
3517 3518 * has TNC setting already. No need to covert to
3518 3519 * TNC again.
3519 3520 */
3520 3521 ASSERT(PP_ISTNC(pp1));
3521 3522 }
3522 3523 }
3523 3524 #endif /* VAC */
3524 3525
3525 3526 return (0);
3526 3527 }
3527 3528
3528 3529 #ifdef VAC
3529 3530 /*
3530 3531 * Routine that detects vac consistency for a large page. It also
3531 3532 * sets virtual color for all pp's for this big mapping.
3532 3533 */
3533 3534 static int
3534 3535 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3535 3536 {
3536 3537 int vcolor, ocolor;
3537 3538
3538 3539 ASSERT(sfmmu_mlist_held(pp));
3539 3540
3540 3541 if (PP_ISNC(pp)) {
3541 3542 return (HAT_TMPNC);
3542 3543 }
3543 3544
3544 3545 vcolor = addr_to_vcolor(addr);
3545 3546 if (PP_NEWPAGE(pp)) {
3546 3547 PP_SET_VCOLOR(pp, vcolor);
3547 3548 return (0);
3548 3549 }
3549 3550
3550 3551 ocolor = PP_GET_VCOLOR(pp);
3551 3552 if (ocolor == vcolor) {
3552 3553 return (0);
3553 3554 }
3554 3555
3555 3556 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3556 3557 /*
3557 3558 * Previous user of page had a differnet color
3558 3559 * but since there are no current users
3559 3560 * we just flush the cache and change the color.
3560 3561 * As an optimization for large pages we flush the
3561 3562 * entire cache of that color and set a flag.
3562 3563 */
3563 3564 SFMMU_STAT(sf_pgcolor_conflict);
3564 3565 if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3565 3566 CacheColor_SetFlushed(*cflags, ocolor);
3566 3567 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3567 3568 }
3568 3569 PP_SET_VCOLOR(pp, vcolor);
3569 3570 return (0);
3570 3571 }
3571 3572
3572 3573 /*
3573 3574 * We got a real conflict with a current mapping.
3574 3575 * set flags to start unencaching all mappings
3575 3576 * and return failure so we restart looping
3576 3577 * the pp array from the beginning.
3577 3578 */
3578 3579 return (HAT_TMPNC);
3579 3580 }
3580 3581 #endif /* VAC */
3581 3582
3582 3583 /*
3583 3584 * creates a large page shadow hmeblk for a tte.
3584 3585 * The purpose of this routine is to allow us to do quick unloads because
3585 3586 * the vm layer can easily pass a very large but sparsely populated range.
3586 3587 */
3587 3588 static struct hme_blk *
3588 3589 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3589 3590 {
3590 3591 struct hmehash_bucket *hmebp;
3591 3592 hmeblk_tag hblktag;
3592 3593 int hmeshift, size, vshift;
3593 3594 uint_t shw_mask, newshw_mask;
3594 3595 struct hme_blk *hmeblkp;
3595 3596
3596 3597 ASSERT(sfmmup != KHATID);
3597 3598 if (mmu_page_sizes == max_mmu_page_sizes) {
3598 3599 ASSERT(ttesz < TTE256M);
3599 3600 } else {
3600 3601 ASSERT(ttesz < TTE4M);
3601 3602 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3602 3603 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3603 3604 }
3604 3605
3605 3606 if (ttesz == TTE8K) {
3606 3607 size = TTE512K;
3607 3608 } else {
3608 3609 size = ++ttesz;
3609 3610 }
3610 3611
3611 3612 hblktag.htag_id = sfmmup;
3612 3613 hmeshift = HME_HASH_SHIFT(size);
3613 3614 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3614 3615 hblktag.htag_rehash = HME_HASH_REHASH(size);
3615 3616 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3616 3617 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3617 3618
3618 3619 SFMMU_HASH_LOCK(hmebp);
3619 3620
3620 3621 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3621 3622 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3622 3623 if (hmeblkp == NULL) {
3623 3624 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3624 3625 hblktag, flags, SFMMU_INVALID_SHMERID);
3625 3626 }
3626 3627 ASSERT(hmeblkp);
3627 3628 if (!hmeblkp->hblk_shw_mask) {
3628 3629 /*
3629 3630 * if this is a unused hblk it was just allocated or could
3630 3631 * potentially be a previous large page hblk so we need to
3631 3632 * set the shadow bit.
3632 3633 */
3633 3634 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3634 3635 hmeblkp->hblk_shw_bit = 1;
3635 3636 } else if (hmeblkp->hblk_shw_bit == 0) {
3636 3637 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3637 3638 (void *)hmeblkp);
3638 3639 }
3639 3640 ASSERT(hmeblkp->hblk_shw_bit == 1);
3640 3641 ASSERT(!hmeblkp->hblk_shared);
3641 3642 vshift = vaddr_to_vshift(hblktag, vaddr, size);
3642 3643 ASSERT(vshift < 8);
3643 3644 /*
3644 3645 * Atomically set shw mask bit
3645 3646 */
3646 3647 do {
3647 3648 shw_mask = hmeblkp->hblk_shw_mask;
3648 3649 newshw_mask = shw_mask | (1 << vshift);
3649 3650 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3650 3651 newshw_mask);
3651 3652 } while (newshw_mask != shw_mask);
3652 3653
3653 3654 SFMMU_HASH_UNLOCK(hmebp);
3654 3655
3655 3656 return (hmeblkp);
3656 3657 }
3657 3658
3658 3659 /*
↓ open down ↓ |
263 lines elided |
↑ open up ↑ |
3659 3660 * This routine cleanup a previous shadow hmeblk and changes it to
3660 3661 * a regular hblk. This happens rarely but it is possible
3661 3662 * when a process wants to use large pages and there are hblks still
3662 3663 * lying around from the previous as that used these hmeblks.
3663 3664 * The alternative was to cleanup the shadow hblks at unload time
3664 3665 * but since so few user processes actually use large pages, it is
3665 3666 * better to be lazy and cleanup at this time.
3666 3667 */
3667 3668 static void
3668 3669 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3669 - struct hmehash_bucket *hmebp)
3670 + struct hmehash_bucket *hmebp)
3670 3671 {
3671 3672 caddr_t addr, endaddr;
3672 3673 int hashno, size;
3673 3674
3674 3675 ASSERT(hmeblkp->hblk_shw_bit);
3675 3676 ASSERT(!hmeblkp->hblk_shared);
3676 3677
3677 3678 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3678 3679
3679 3680 if (!hmeblkp->hblk_shw_mask) {
3680 3681 hmeblkp->hblk_shw_bit = 0;
3681 3682 return;
3682 3683 }
3683 3684 addr = (caddr_t)get_hblk_base(hmeblkp);
3684 3685 endaddr = get_hblk_endaddr(hmeblkp);
3685 3686 size = get_hblk_ttesz(hmeblkp);
3686 3687 hashno = size - 1;
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
3687 3688 ASSERT(hashno > 0);
3688 3689 SFMMU_HASH_UNLOCK(hmebp);
3689 3690
3690 3691 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3691 3692
3692 3693 SFMMU_HASH_LOCK(hmebp);
3693 3694 }
3694 3695
3695 3696 static void
3696 3697 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3697 - int hashno)
3698 + int hashno)
3698 3699 {
3699 3700 int hmeshift, shadow = 0;
3700 3701 hmeblk_tag hblktag;
3701 3702 struct hmehash_bucket *hmebp;
3702 3703 struct hme_blk *hmeblkp;
3703 3704 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3704 3705
3705 3706 ASSERT(hashno > 0);
3706 3707 hblktag.htag_id = sfmmup;
3707 3708 hblktag.htag_rehash = hashno;
3708 3709 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3709 3710
3710 3711 hmeshift = HME_HASH_SHIFT(hashno);
3711 3712
3712 3713 while (addr < endaddr) {
3713 3714 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3714 3715 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3715 3716 SFMMU_HASH_LOCK(hmebp);
3716 3717 /* inline HME_HASH_SEARCH */
3717 3718 hmeblkp = hmebp->hmeblkp;
3718 3719 pr_hblk = NULL;
3719 3720 while (hmeblkp) {
3720 3721 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3721 3722 /* found hme_blk */
3722 3723 ASSERT(!hmeblkp->hblk_shared);
3723 3724 if (hmeblkp->hblk_shw_bit) {
3724 3725 if (hmeblkp->hblk_shw_mask) {
3725 3726 shadow = 1;
3726 3727 sfmmu_shadow_hcleanup(sfmmup,
3727 3728 hmeblkp, hmebp);
3728 3729 break;
3729 3730 } else {
3730 3731 hmeblkp->hblk_shw_bit = 0;
3731 3732 }
3732 3733 }
3733 3734
3734 3735 /*
3735 3736 * Hblk_hmecnt and hblk_vcnt could be non zero
3736 3737 * since hblk_unload() does not gurantee that.
3737 3738 *
3738 3739 * XXX - this could cause tteload() to spin
3739 3740 * where sfmmu_shadow_hcleanup() is called.
3740 3741 */
3741 3742 }
3742 3743
3743 3744 nx_hblk = hmeblkp->hblk_next;
3744 3745 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3745 3746 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3746 3747 &list, 0);
3747 3748 } else {
3748 3749 pr_hblk = hmeblkp;
3749 3750 }
3750 3751 hmeblkp = nx_hblk;
3751 3752 }
3752 3753
3753 3754 SFMMU_HASH_UNLOCK(hmebp);
3754 3755
3755 3756 if (shadow) {
3756 3757 /*
3757 3758 * We found another shadow hblk so cleaned its
3758 3759 * children. We need to go back and cleanup
3759 3760 * the original hblk so we don't change the
3760 3761 * addr.
3761 3762 */
3762 3763 shadow = 0;
3763 3764 } else {
3764 3765 addr = (caddr_t)roundup((uintptr_t)addr + 1,
3765 3766 (1 << hmeshift));
3766 3767 }
3767 3768 }
3768 3769 sfmmu_hblks_list_purge(&list, 0);
3769 3770 }
3770 3771
3771 3772 /*
3772 3773 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3773 3774 * may still linger on after pageunload.
3774 3775 */
3775 3776 static void
3776 3777 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3777 3778 {
3778 3779 int hmeshift;
3779 3780 hmeblk_tag hblktag;
3780 3781 struct hmehash_bucket *hmebp;
3781 3782 struct hme_blk *hmeblkp;
3782 3783 struct hme_blk *pr_hblk;
3783 3784 struct hme_blk *list = NULL;
3784 3785
3785 3786 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3786 3787 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3787 3788
3788 3789 hmeshift = HME_HASH_SHIFT(ttesz);
3789 3790 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3790 3791 hblktag.htag_rehash = ttesz;
3791 3792 hblktag.htag_rid = rid;
3792 3793 hblktag.htag_id = srdp;
3793 3794 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3794 3795
3795 3796 SFMMU_HASH_LOCK(hmebp);
3796 3797 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3797 3798 if (hmeblkp != NULL) {
3798 3799 ASSERT(hmeblkp->hblk_shared);
3799 3800 ASSERT(!hmeblkp->hblk_shw_bit);
3800 3801 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3801 3802 panic("sfmmu_cleanup_rhblk: valid hmeblk");
3802 3803 }
3803 3804 ASSERT(!hmeblkp->hblk_lckcnt);
3804 3805 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3805 3806 &list, 0);
3806 3807 }
3807 3808 SFMMU_HASH_UNLOCK(hmebp);
3808 3809 sfmmu_hblks_list_purge(&list, 0);
3809 3810 }
3810 3811
3811 3812 /* ARGSUSED */
3812 3813 static void
3813 3814 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3814 3815 size_t r_size, void *r_obj, u_offset_t r_objoff)
3815 3816 {
3816 3817 }
3817 3818
3818 3819 /*
3819 3820 * Searches for an hmeblk which maps addr, then unloads this mapping
3820 3821 * and updates *eaddrp, if the hmeblk is found.
3821 3822 */
3822 3823 static void
3823 3824 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3824 3825 caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3825 3826 {
3826 3827 int hmeshift;
3827 3828 hmeblk_tag hblktag;
3828 3829 struct hmehash_bucket *hmebp;
3829 3830 struct hme_blk *hmeblkp;
3830 3831 struct hme_blk *pr_hblk;
3831 3832 struct hme_blk *list = NULL;
3832 3833
3833 3834 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3834 3835 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3835 3836 ASSERT(ttesz >= HBLK_MIN_TTESZ);
3836 3837
3837 3838 hmeshift = HME_HASH_SHIFT(ttesz);
3838 3839 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3839 3840 hblktag.htag_rehash = ttesz;
3840 3841 hblktag.htag_rid = rid;
3841 3842 hblktag.htag_id = srdp;
3842 3843 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3843 3844
3844 3845 SFMMU_HASH_LOCK(hmebp);
3845 3846 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3846 3847 if (hmeblkp != NULL) {
3847 3848 ASSERT(hmeblkp->hblk_shared);
3848 3849 ASSERT(!hmeblkp->hblk_lckcnt);
3849 3850 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3850 3851 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3851 3852 eaddr, NULL, HAT_UNLOAD);
3852 3853 ASSERT(*eaddrp > addr);
3853 3854 }
3854 3855 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3855 3856 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3856 3857 &list, 0);
3857 3858 }
3858 3859 SFMMU_HASH_UNLOCK(hmebp);
3859 3860 sfmmu_hblks_list_purge(&list, 0);
3860 3861 }
3861 3862
3862 3863 static void
3863 3864 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3864 3865 {
3865 3866 int ttesz = rgnp->rgn_pgszc;
3866 3867 size_t rsz = rgnp->rgn_size;
3867 3868 caddr_t rsaddr = rgnp->rgn_saddr;
3868 3869 caddr_t readdr = rsaddr + rsz;
3869 3870 caddr_t rhsaddr;
3870 3871 caddr_t va;
3871 3872 uint_t rid = rgnp->rgn_id;
3872 3873 caddr_t cbsaddr;
3873 3874 caddr_t cbeaddr;
3874 3875 hat_rgn_cb_func_t rcbfunc;
3875 3876 ulong_t cnt;
3876 3877
3877 3878 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3878 3879 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3879 3880
3880 3881 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3881 3882 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3882 3883 if (ttesz < HBLK_MIN_TTESZ) {
3883 3884 ttesz = HBLK_MIN_TTESZ;
3884 3885 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3885 3886 } else {
3886 3887 rhsaddr = rsaddr;
3887 3888 }
3888 3889
3889 3890 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3890 3891 rcbfunc = sfmmu_rgn_cb_noop;
3891 3892 }
3892 3893
3893 3894 while (ttesz >= HBLK_MIN_TTESZ) {
3894 3895 cbsaddr = rsaddr;
3895 3896 cbeaddr = rsaddr;
3896 3897 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3897 3898 ttesz--;
3898 3899 continue;
3899 3900 }
3900 3901 cnt = 0;
3901 3902 va = rsaddr;
3902 3903 while (va < readdr) {
3903 3904 ASSERT(va >= rhsaddr);
3904 3905 if (va != cbeaddr) {
3905 3906 if (cbeaddr != cbsaddr) {
3906 3907 ASSERT(cbeaddr > cbsaddr);
3907 3908 (*rcbfunc)(cbsaddr, cbeaddr,
3908 3909 rsaddr, rsz, rgnp->rgn_obj,
3909 3910 rgnp->rgn_objoff);
3910 3911 }
3911 3912 cbsaddr = va;
3912 3913 cbeaddr = va;
3913 3914 }
3914 3915 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3915 3916 ttesz, &cbeaddr);
3916 3917 cnt++;
3917 3918 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3918 3919 }
3919 3920 if (cbeaddr != cbsaddr) {
3920 3921 ASSERT(cbeaddr > cbsaddr);
3921 3922 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3922 3923 rsz, rgnp->rgn_obj,
3923 3924 rgnp->rgn_objoff);
3924 3925 }
3925 3926 ttesz--;
3926 3927 }
3927 3928 }
3928 3929
3929 3930 /*
3930 3931 * Release one hardware address translation lock on the given address range.
3931 3932 */
3932 3933 void
3933 3934 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3934 3935 {
3935 3936 struct hmehash_bucket *hmebp;
3936 3937 hmeblk_tag hblktag;
3937 3938 int hmeshift, hashno = 1;
3938 3939 struct hme_blk *hmeblkp, *list = NULL;
3939 3940 caddr_t endaddr;
3940 3941
3941 3942 ASSERT(sfmmup != NULL);
3942 3943
3943 3944 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
3944 3945 ASSERT((len & MMU_PAGEOFFSET) == 0);
3945 3946 endaddr = addr + len;
3946 3947 hblktag.htag_id = sfmmup;
3947 3948 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3948 3949
3949 3950 /*
3950 3951 * Spitfire supports 4 page sizes.
3951 3952 * Most pages are expected to be of the smallest page size (8K) and
3952 3953 * these will not need to be rehashed. 64K pages also don't need to be
3953 3954 * rehashed because an hmeblk spans 64K of address space. 512K pages
3954 3955 * might need 1 rehash and and 4M pages might need 2 rehashes.
3955 3956 */
3956 3957 while (addr < endaddr) {
3957 3958 hmeshift = HME_HASH_SHIFT(hashno);
3958 3959 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3959 3960 hblktag.htag_rehash = hashno;
3960 3961 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3961 3962
3962 3963 SFMMU_HASH_LOCK(hmebp);
3963 3964
3964 3965 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3965 3966 if (hmeblkp != NULL) {
3966 3967 ASSERT(!hmeblkp->hblk_shared);
3967 3968 /*
3968 3969 * If we encounter a shadow hmeblk then
3969 3970 * we know there are no valid hmeblks mapping
3970 3971 * this address at this size or larger.
3971 3972 * Just increment address by the smallest
3972 3973 * page size.
3973 3974 */
3974 3975 if (hmeblkp->hblk_shw_bit) {
3975 3976 addr += MMU_PAGESIZE;
3976 3977 } else {
3977 3978 addr = sfmmu_hblk_unlock(hmeblkp, addr,
3978 3979 endaddr);
3979 3980 }
3980 3981 SFMMU_HASH_UNLOCK(hmebp);
3981 3982 hashno = 1;
3982 3983 continue;
3983 3984 }
3984 3985 SFMMU_HASH_UNLOCK(hmebp);
3985 3986
3986 3987 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3987 3988 /*
3988 3989 * We have traversed the whole list and rehashed
3989 3990 * if necessary without finding the address to unlock
3990 3991 * which should never happen.
3991 3992 */
3992 3993 panic("sfmmu_unlock: addr not found. "
3993 3994 "addr %p hat %p", (void *)addr, (void *)sfmmup);
3994 3995 } else {
3995 3996 hashno++;
3996 3997 }
3997 3998 }
3998 3999
3999 4000 sfmmu_hblks_list_purge(&list, 0);
4000 4001 }
4001 4002
4002 4003 void
4003 4004 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4004 4005 hat_region_cookie_t rcookie)
4005 4006 {
4006 4007 sf_srd_t *srdp;
4007 4008 sf_region_t *rgnp;
4008 4009 int ttesz;
4009 4010 uint_t rid;
4010 4011 caddr_t eaddr;
4011 4012 caddr_t va;
4012 4013 int hmeshift;
4013 4014 hmeblk_tag hblktag;
4014 4015 struct hmehash_bucket *hmebp;
4015 4016 struct hme_blk *hmeblkp;
4016 4017 struct hme_blk *pr_hblk;
4017 4018 struct hme_blk *list;
4018 4019
4019 4020 if (rcookie == HAT_INVALID_REGION_COOKIE) {
4020 4021 hat_unlock(sfmmup, addr, len);
4021 4022 return;
4022 4023 }
4023 4024
4024 4025 ASSERT(sfmmup != NULL);
4025 4026 ASSERT(sfmmup != ksfmmup);
4026 4027
4027 4028 srdp = sfmmup->sfmmu_srdp;
4028 4029 rid = (uint_t)((uint64_t)rcookie);
4029 4030 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4030 4031 eaddr = addr + len;
4031 4032 va = addr;
4032 4033 list = NULL;
4033 4034 rgnp = srdp->srd_hmergnp[rid];
4034 4035 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4035 4036
4036 4037 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4037 4038 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4038 4039 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4039 4040 ttesz = HBLK_MIN_TTESZ;
4040 4041 } else {
4041 4042 ttesz = rgnp->rgn_pgszc;
4042 4043 }
4043 4044 while (va < eaddr) {
4044 4045 while (ttesz < rgnp->rgn_pgszc &&
4045 4046 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4046 4047 ttesz++;
4047 4048 }
4048 4049 while (ttesz >= HBLK_MIN_TTESZ) {
4049 4050 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4050 4051 ttesz--;
4051 4052 continue;
4052 4053 }
4053 4054 hmeshift = HME_HASH_SHIFT(ttesz);
4054 4055 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4055 4056 hblktag.htag_rehash = ttesz;
4056 4057 hblktag.htag_rid = rid;
4057 4058 hblktag.htag_id = srdp;
4058 4059 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4059 4060 SFMMU_HASH_LOCK(hmebp);
4060 4061 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4061 4062 &list);
4062 4063 if (hmeblkp == NULL) {
4063 4064 SFMMU_HASH_UNLOCK(hmebp);
4064 4065 ttesz--;
4065 4066 continue;
4066 4067 }
4067 4068 ASSERT(hmeblkp->hblk_shared);
4068 4069 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4069 4070 ASSERT(va >= eaddr ||
4070 4071 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4071 4072 SFMMU_HASH_UNLOCK(hmebp);
4072 4073 break;
4073 4074 }
4074 4075 if (ttesz < HBLK_MIN_TTESZ) {
4075 4076 panic("hat_unlock_region: addr not found "
4076 4077 "addr %p hat %p", (void *)va, (void *)sfmmup);
4077 4078 }
4078 4079 }
4079 4080 sfmmu_hblks_list_purge(&list, 0);
4080 4081 }
4081 4082
4082 4083 /*
4083 4084 * Function to unlock a range of addresses in an hmeblk. It returns the
4084 4085 * next address that needs to be unlocked.
4085 4086 * Should be called with the hash lock held.
4086 4087 */
4087 4088 static caddr_t
4088 4089 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4089 4090 {
4090 4091 struct sf_hment *sfhme;
4091 4092 tte_t tteold, ttemod;
4092 4093 int ttesz, ret;
4093 4094
4094 4095 ASSERT(in_hblk_range(hmeblkp, addr));
4095 4096 ASSERT(hmeblkp->hblk_shw_bit == 0);
4096 4097
4097 4098 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4098 4099 ttesz = get_hblk_ttesz(hmeblkp);
4099 4100
4100 4101 HBLKTOHME(sfhme, hmeblkp, addr);
4101 4102 while (addr < endaddr) {
4102 4103 readtte:
4103 4104 sfmmu_copytte(&sfhme->hme_tte, &tteold);
4104 4105 if (TTE_IS_VALID(&tteold)) {
4105 4106
4106 4107 ttemod = tteold;
4107 4108
4108 4109 ret = sfmmu_modifytte_try(&tteold, &ttemod,
4109 4110 &sfhme->hme_tte);
4110 4111
4111 4112 if (ret < 0)
4112 4113 goto readtte;
4113 4114
4114 4115 if (hmeblkp->hblk_lckcnt == 0)
4115 4116 panic("zero hblk lckcnt");
4116 4117
4117 4118 if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4118 4119 (uintptr_t)endaddr)
4119 4120 panic("can't unlock large tte");
4120 4121
4121 4122 ASSERT(hmeblkp->hblk_lckcnt > 0);
4122 4123 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4123 4124 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4124 4125 } else {
4125 4126 panic("sfmmu_hblk_unlock: invalid tte");
4126 4127 }
4127 4128 addr += TTEBYTES(ttesz);
4128 4129 sfhme++;
4129 4130 }
4130 4131 return (addr);
4131 4132 }
4132 4133
4133 4134 /*
4134 4135 * Physical Address Mapping Framework
4135 4136 *
4136 4137 * General rules:
4137 4138 *
4138 4139 * (1) Applies only to seg_kmem memory pages. To make things easier,
4139 4140 * seg_kpm addresses are also accepted by the routines, but nothing
4140 4141 * is done with them since by definition their PA mappings are static.
4141 4142 * (2) hat_add_callback() may only be called while holding the page lock
4142 4143 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4143 4144 * or passing HAC_PAGELOCK flag.
4144 4145 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4145 4146 * hat_delete_callback(), nor should they allocate memory. Post quiesce
4146 4147 * callbacks may not sleep or acquire adaptive mutex locks.
4147 4148 * (4) Either prehandler() or posthandler() (but not both) may be specified
4148 4149 * as being NULL. Specifying an errhandler() is optional.
4149 4150 *
4150 4151 * Details of using the framework:
4151 4152 *
4152 4153 * registering a callback (hat_register_callback())
4153 4154 *
4154 4155 * Pass prehandler, posthandler, errhandler addresses
4155 4156 * as described below. If capture_cpus argument is nonzero,
4156 4157 * suspend callback to the prehandler will occur with CPUs
4157 4158 * captured and executing xc_loop() and CPUs will remain
4158 4159 * captured until after the posthandler suspend callback
4159 4160 * occurs.
4160 4161 *
4161 4162 * adding a callback (hat_add_callback())
4162 4163 *
4163 4164 * as_pagelock();
4164 4165 * hat_add_callback();
4165 4166 * save returned pfn in private data structures or program registers;
4166 4167 * as_pageunlock();
4167 4168 *
4168 4169 * prehandler()
4169 4170 *
4170 4171 * Stop all accesses by physical address to this memory page.
4171 4172 * Called twice: the first, PRESUSPEND, is a context safe to acquire
4172 4173 * adaptive locks. The second, SUSPEND, is called at high PIL with
4173 4174 * CPUs captured so adaptive locks may NOT be acquired (and all spin
4174 4175 * locks must be XCALL_PIL or higher locks).
4175 4176 *
4176 4177 * May return the following errors:
4177 4178 * EIO: A fatal error has occurred. This will result in panic.
4178 4179 * EAGAIN: The page cannot be suspended. This will fail the
4179 4180 * relocation.
4180 4181 * 0: Success.
4181 4182 *
4182 4183 * posthandler()
4183 4184 *
4184 4185 * Save new pfn in private data structures or program registers;
4185 4186 * not allowed to fail (non-zero return values will result in panic).
4186 4187 *
4187 4188 * errhandler()
4188 4189 *
4189 4190 * called when an error occurs related to the callback. Currently
4190 4191 * the only such error is HAT_CB_ERR_LEAKED which indicates that
4191 4192 * a page is being freed, but there are still outstanding callback(s)
4192 4193 * registered on the page.
4193 4194 *
4194 4195 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4195 4196 *
4196 4197 * stop using physical address
4197 4198 * hat_delete_callback();
4198 4199 *
4199 4200 */
4200 4201
↓ open down ↓ |
493 lines elided |
↑ open up ↑ |
4201 4202 /*
4202 4203 * Register a callback class. Each subsystem should do this once and
4203 4204 * cache the id_t returned for use in setting up and tearing down callbacks.
4204 4205 *
4205 4206 * There is no facility for removing callback IDs once they are created;
4206 4207 * the "key" should be unique for each module, so in case a module is unloaded
4207 4208 * and subsequently re-loaded, we can recycle the module's previous entry.
4208 4209 */
4209 4210 id_t
4210 4211 hat_register_callback(int key,
4211 - int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4212 - int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4213 - int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4214 - int capture_cpus)
4212 + int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4213 + int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4214 + int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4215 + int capture_cpus)
4215 4216 {
4216 4217 id_t id;
4217 4218
4218 4219 /*
4219 4220 * Search the table for a pre-existing callback associated with
4220 4221 * the identifier "key". If one exists, we re-use that entry in
4221 4222 * the table for this instance, otherwise we assign the next
4222 4223 * available table slot.
4223 4224 */
4224 4225 for (id = 0; id < sfmmu_max_cb_id; id++) {
4225 4226 if (sfmmu_cb_table[id].key == key)
4226 4227 break;
4227 4228 }
4228 4229
4229 4230 if (id == sfmmu_max_cb_id) {
4230 4231 id = sfmmu_cb_nextid++;
4231 4232 if (id >= sfmmu_max_cb_id)
4232 4233 panic("hat_register_callback: out of callback IDs");
4233 4234 }
4234 4235
4235 4236 ASSERT(prehandler != NULL || posthandler != NULL);
4236 4237
4237 4238 sfmmu_cb_table[id].key = key;
4238 4239 sfmmu_cb_table[id].prehandler = prehandler;
4239 4240 sfmmu_cb_table[id].posthandler = posthandler;
4240 4241 sfmmu_cb_table[id].errhandler = errhandler;
4241 4242 sfmmu_cb_table[id].capture_cpus = capture_cpus;
4242 4243
4243 4244 return (id);
4244 4245 }
4245 4246
4246 4247 #define HAC_COOKIE_NONE (void *)-1
4247 4248
4248 4249 /*
4249 4250 * Add relocation callbacks to the specified addr/len which will be called
4250 4251 * when relocating the associated page. See the description of pre and
4251 4252 * posthandler above for more details.
4252 4253 *
4253 4254 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4254 4255 * locked internally so the caller must be able to deal with the callback
4255 4256 * running even before this function has returned. If HAC_PAGELOCK is not
4256 4257 * set, it is assumed that the underlying memory pages are locked.
4257 4258 *
4258 4259 * Since the caller must track the individual page boundaries anyway,
4259 4260 * we only allow a callback to be added to a single page (large
4260 4261 * or small). Thus [addr, addr + len) MUST be contained within a single
4261 4262 * page.
4262 4263 *
4263 4264 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4264 4265 * _provided_that_ a unique parameter is specified for each callback.
4265 4266 * If multiple callbacks are registered on the same range the callback will
4266 4267 * be invoked with each unique parameter. Registering the same callback with
4267 4268 * the same argument more than once will result in corrupted kernel state.
4268 4269 *
4269 4270 * Returns the pfn of the underlying kernel page in *rpfn
4270 4271 * on success, or PFN_INVALID on failure.
4271 4272 *
4272 4273 * cookiep (if passed) provides storage space for an opaque cookie
4273 4274 * to return later to hat_delete_callback(). This cookie makes the callback
4274 4275 * deletion significantly quicker by avoiding a potentially lengthy hash
4275 4276 * search.
4276 4277 *
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
4277 4278 * Returns values:
4278 4279 * 0: success
4279 4280 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4280 4281 * EINVAL: callback ID is not valid
4281 4282 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4282 4283 * space
4283 4284 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4284 4285 */
4285 4286 int
4286 4287 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4287 - void *pvt, pfn_t *rpfn, void **cookiep)
4288 + void *pvt, pfn_t *rpfn, void **cookiep)
4288 4289 {
4289 - struct hmehash_bucket *hmebp;
4290 - hmeblk_tag hblktag;
4290 + struct hmehash_bucket *hmebp;
4291 + hmeblk_tag hblktag;
4291 4292 struct hme_blk *hmeblkp;
4292 - int hmeshift, hashno;
4293 - caddr_t saddr, eaddr, baseaddr;
4293 + int hmeshift, hashno;
4294 + caddr_t saddr, eaddr, baseaddr;
4294 4295 struct pa_hment *pahmep;
4295 4296 struct sf_hment *sfhmep, *osfhmep;
4296 4297 kmutex_t *pml;
4297 - tte_t tte;
4298 + tte_t tte;
4298 4299 page_t *pp;
4299 4300 vnode_t *vp;
4300 4301 u_offset_t off;
4301 4302 pfn_t pfn;
4302 4303 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4303 4304 int locked = 0;
4304 4305
4305 4306 /*
4306 4307 * For KPM mappings, just return the physical address since we
4307 4308 * don't need to register any callbacks.
4308 4309 */
4309 4310 if (IS_KPM_ADDR(vaddr)) {
4310 4311 uint64_t paddr;
4311 4312 SFMMU_KPM_VTOP(vaddr, paddr);
4312 4313 *rpfn = btop(paddr);
4313 4314 if (cookiep != NULL)
4314 4315 *cookiep = HAC_COOKIE_NONE;
4315 4316 return (0);
4316 4317 }
4317 4318
4318 4319 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4319 4320 *rpfn = PFN_INVALID;
4320 4321 return (EINVAL);
4321 4322 }
4322 4323
4323 4324 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4324 4325 *rpfn = PFN_INVALID;
4325 4326 return (ENOMEM);
4326 4327 }
4327 4328
4328 4329 sfhmep = &pahmep->sfment;
4329 4330
4330 4331 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4331 4332 eaddr = saddr + len;
4332 4333
4333 4334 rehash:
4334 4335 /* Find the mapping(s) for this page */
4335 4336 for (hashno = TTE64K, hmeblkp = NULL;
4336 4337 hmeblkp == NULL && hashno <= mmu_hashcnt;
4337 4338 hashno++) {
4338 4339 hmeshift = HME_HASH_SHIFT(hashno);
4339 4340 hblktag.htag_id = ksfmmup;
4340 4341 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4341 4342 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4342 4343 hblktag.htag_rehash = hashno;
4343 4344 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4344 4345
4345 4346 SFMMU_HASH_LOCK(hmebp);
4346 4347
4347 4348 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4348 4349
4349 4350 if (hmeblkp == NULL)
4350 4351 SFMMU_HASH_UNLOCK(hmebp);
4351 4352 }
4352 4353
4353 4354 if (hmeblkp == NULL) {
4354 4355 kmem_cache_free(pa_hment_cache, pahmep);
4355 4356 *rpfn = PFN_INVALID;
4356 4357 return (ENXIO);
4357 4358 }
4358 4359
4359 4360 ASSERT(!hmeblkp->hblk_shared);
4360 4361
4361 4362 HBLKTOHME(osfhmep, hmeblkp, saddr);
4362 4363 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4363 4364
4364 4365 if (!TTE_IS_VALID(&tte)) {
4365 4366 SFMMU_HASH_UNLOCK(hmebp);
4366 4367 kmem_cache_free(pa_hment_cache, pahmep);
4367 4368 *rpfn = PFN_INVALID;
4368 4369 return (ENXIO);
4369 4370 }
4370 4371
4371 4372 /*
4372 4373 * Make sure the boundaries for the callback fall within this
4373 4374 * single mapping.
4374 4375 */
4375 4376 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4376 4377 ASSERT(saddr >= baseaddr);
4377 4378 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4378 4379 SFMMU_HASH_UNLOCK(hmebp);
4379 4380 kmem_cache_free(pa_hment_cache, pahmep);
4380 4381 *rpfn = PFN_INVALID;
4381 4382 return (ERANGE);
4382 4383 }
4383 4384
4384 4385 pfn = sfmmu_ttetopfn(&tte, vaddr);
4385 4386
4386 4387 /*
4387 4388 * The pfn may not have a page_t underneath in which case we
4388 4389 * just return it. This can happen if we are doing I/O to a
4389 4390 * static portion of the kernel's address space, for instance.
4390 4391 */
4391 4392 pp = osfhmep->hme_page;
4392 4393 if (pp == NULL) {
4393 4394 SFMMU_HASH_UNLOCK(hmebp);
4394 4395 kmem_cache_free(pa_hment_cache, pahmep);
4395 4396 *rpfn = pfn;
4396 4397 if (cookiep)
4397 4398 *cookiep = HAC_COOKIE_NONE;
4398 4399 return (0);
4399 4400 }
4400 4401 ASSERT(pp == PP_PAGEROOT(pp));
4401 4402
4402 4403 vp = pp->p_vnode;
4403 4404 off = pp->p_offset;
4404 4405
4405 4406 pml = sfmmu_mlist_enter(pp);
4406 4407
4407 4408 if (flags & HAC_PAGELOCK) {
4408 4409 if (!page_trylock(pp, SE_SHARED)) {
4409 4410 /*
4410 4411 * Somebody is holding SE_EXCL lock. Might
4411 4412 * even be hat_page_relocate(). Drop all
4412 4413 * our locks, lookup the page in &kvp, and
4413 4414 * retry. If it doesn't exist in &kvp and &zvp,
4414 4415 * then we must be dealing with a kernel mapped
4415 4416 * page which doesn't actually belong to
4416 4417 * segkmem so we punt.
4417 4418 */
4418 4419 sfmmu_mlist_exit(pml);
4419 4420 SFMMU_HASH_UNLOCK(hmebp);
4420 4421 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4421 4422
4422 4423 /* check zvp before giving up */
4423 4424 if (pp == NULL)
4424 4425 pp = page_lookup(&zvp, (u_offset_t)saddr,
4425 4426 SE_SHARED);
4426 4427
4427 4428 /* Okay, we didn't find it, give up */
4428 4429 if (pp == NULL) {
4429 4430 kmem_cache_free(pa_hment_cache, pahmep);
4430 4431 *rpfn = pfn;
4431 4432 if (cookiep)
4432 4433 *cookiep = HAC_COOKIE_NONE;
4433 4434 return (0);
4434 4435 }
4435 4436 page_unlock(pp);
4436 4437 goto rehash;
4437 4438 }
4438 4439 locked = 1;
4439 4440 }
4440 4441
4441 4442 if (!PAGE_LOCKED(pp) && !panicstr)
4442 4443 panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4443 4444
4444 4445 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4445 4446 pp->p_offset != off) {
4446 4447 /*
4447 4448 * The page moved before we got our hands on it. Drop
4448 4449 * all the locks and try again.
4449 4450 */
4450 4451 ASSERT((flags & HAC_PAGELOCK) != 0);
4451 4452 sfmmu_mlist_exit(pml);
4452 4453 SFMMU_HASH_UNLOCK(hmebp);
4453 4454 page_unlock(pp);
4454 4455 locked = 0;
4455 4456 goto rehash;
4456 4457 }
4457 4458
4458 4459 if (!VN_ISKAS(vp)) {
4459 4460 /*
4460 4461 * This is not a segkmem page but another page which
4461 4462 * has been kernel mapped. It had better have at least
4462 4463 * a share lock on it. Return the pfn.
4463 4464 */
4464 4465 sfmmu_mlist_exit(pml);
4465 4466 SFMMU_HASH_UNLOCK(hmebp);
4466 4467 if (locked)
4467 4468 page_unlock(pp);
4468 4469 kmem_cache_free(pa_hment_cache, pahmep);
4469 4470 ASSERT(PAGE_LOCKED(pp));
4470 4471 *rpfn = pfn;
4471 4472 if (cookiep)
4472 4473 *cookiep = HAC_COOKIE_NONE;
4473 4474 return (0);
4474 4475 }
4475 4476
4476 4477 /*
4477 4478 * Setup this pa_hment and link its embedded dummy sf_hment into
4478 4479 * the mapping list.
4479 4480 */
4480 4481 pp->p_share++;
4481 4482 pahmep->cb_id = callback_id;
4482 4483 pahmep->addr = vaddr;
4483 4484 pahmep->len = len;
4484 4485 pahmep->refcnt = 1;
4485 4486 pahmep->flags = 0;
4486 4487 pahmep->pvt = pvt;
4487 4488
4488 4489 sfhmep->hme_tte.ll = 0;
4489 4490 sfhmep->hme_data = pahmep;
4490 4491 sfhmep->hme_prev = osfhmep;
4491 4492 sfhmep->hme_next = osfhmep->hme_next;
4492 4493
4493 4494 if (osfhmep->hme_next)
4494 4495 osfhmep->hme_next->hme_prev = sfhmep;
4495 4496
4496 4497 osfhmep->hme_next = sfhmep;
4497 4498
4498 4499 sfmmu_mlist_exit(pml);
4499 4500 SFMMU_HASH_UNLOCK(hmebp);
4500 4501
4501 4502 if (locked)
4502 4503 page_unlock(pp);
4503 4504
4504 4505 *rpfn = pfn;
4505 4506 if (cookiep)
↓ open down ↓ |
198 lines elided |
↑ open up ↑ |
4506 4507 *cookiep = (void *)pahmep;
4507 4508
4508 4509 return (0);
4509 4510 }
4510 4511
4511 4512 /*
4512 4513 * Remove the relocation callbacks from the specified addr/len.
4513 4514 */
4514 4515 void
4515 4516 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4516 - void *cookie)
4517 + void *cookie)
4517 4518 {
4518 4519 struct hmehash_bucket *hmebp;
4519 4520 hmeblk_tag hblktag;
4520 4521 struct hme_blk *hmeblkp;
4521 4522 int hmeshift, hashno;
4522 4523 caddr_t saddr;
4523 4524 struct pa_hment *pahmep;
4524 4525 struct sf_hment *sfhmep, *osfhmep;
4525 4526 kmutex_t *pml;
4526 4527 tte_t tte;
4527 4528 page_t *pp;
4528 4529 vnode_t *vp;
4529 4530 u_offset_t off;
4530 4531 int locked = 0;
4531 4532
4532 4533 /*
4533 4534 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4534 4535 * remove so just return.
4535 4536 */
4536 4537 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4537 4538 return;
4538 4539
4539 4540 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4540 4541
4541 4542 rehash:
4542 4543 /* Find the mapping(s) for this page */
4543 4544 for (hashno = TTE64K, hmeblkp = NULL;
4544 4545 hmeblkp == NULL && hashno <= mmu_hashcnt;
4545 4546 hashno++) {
4546 4547 hmeshift = HME_HASH_SHIFT(hashno);
4547 4548 hblktag.htag_id = ksfmmup;
4548 4549 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4549 4550 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4550 4551 hblktag.htag_rehash = hashno;
4551 4552 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4552 4553
4553 4554 SFMMU_HASH_LOCK(hmebp);
4554 4555
4555 4556 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4556 4557
4557 4558 if (hmeblkp == NULL)
4558 4559 SFMMU_HASH_UNLOCK(hmebp);
4559 4560 }
4560 4561
4561 4562 if (hmeblkp == NULL)
4562 4563 return;
4563 4564
4564 4565 ASSERT(!hmeblkp->hblk_shared);
4565 4566
4566 4567 HBLKTOHME(osfhmep, hmeblkp, saddr);
4567 4568
4568 4569 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4569 4570 if (!TTE_IS_VALID(&tte)) {
4570 4571 SFMMU_HASH_UNLOCK(hmebp);
4571 4572 return;
4572 4573 }
4573 4574
4574 4575 pp = osfhmep->hme_page;
4575 4576 if (pp == NULL) {
4576 4577 SFMMU_HASH_UNLOCK(hmebp);
4577 4578 ASSERT(cookie == NULL);
4578 4579 return;
4579 4580 }
4580 4581
4581 4582 vp = pp->p_vnode;
4582 4583 off = pp->p_offset;
4583 4584
4584 4585 pml = sfmmu_mlist_enter(pp);
4585 4586
4586 4587 if (flags & HAC_PAGELOCK) {
4587 4588 if (!page_trylock(pp, SE_SHARED)) {
4588 4589 /*
4589 4590 * Somebody is holding SE_EXCL lock. Might
4590 4591 * even be hat_page_relocate(). Drop all
4591 4592 * our locks, lookup the page in &kvp, and
4592 4593 * retry. If it doesn't exist in &kvp and &zvp,
4593 4594 * then we must be dealing with a kernel mapped
4594 4595 * page which doesn't actually belong to
4595 4596 * segkmem so we punt.
4596 4597 */
4597 4598 sfmmu_mlist_exit(pml);
4598 4599 SFMMU_HASH_UNLOCK(hmebp);
4599 4600 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4600 4601 /* check zvp before giving up */
4601 4602 if (pp == NULL)
4602 4603 pp = page_lookup(&zvp, (u_offset_t)saddr,
4603 4604 SE_SHARED);
4604 4605
4605 4606 if (pp == NULL) {
4606 4607 ASSERT(cookie == NULL);
4607 4608 return;
4608 4609 }
4609 4610 page_unlock(pp);
4610 4611 goto rehash;
4611 4612 }
4612 4613 locked = 1;
4613 4614 }
4614 4615
4615 4616 ASSERT(PAGE_LOCKED(pp));
4616 4617
4617 4618 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4618 4619 pp->p_offset != off) {
4619 4620 /*
4620 4621 * The page moved before we got our hands on it. Drop
4621 4622 * all the locks and try again.
4622 4623 */
4623 4624 ASSERT((flags & HAC_PAGELOCK) != 0);
4624 4625 sfmmu_mlist_exit(pml);
4625 4626 SFMMU_HASH_UNLOCK(hmebp);
4626 4627 page_unlock(pp);
4627 4628 locked = 0;
4628 4629 goto rehash;
4629 4630 }
4630 4631
4631 4632 if (!VN_ISKAS(vp)) {
4632 4633 /*
4633 4634 * This is not a segkmem page but another page which
4634 4635 * has been kernel mapped.
4635 4636 */
4636 4637 sfmmu_mlist_exit(pml);
4637 4638 SFMMU_HASH_UNLOCK(hmebp);
4638 4639 if (locked)
4639 4640 page_unlock(pp);
4640 4641 ASSERT(cookie == NULL);
4641 4642 return;
4642 4643 }
4643 4644
4644 4645 if (cookie != NULL) {
4645 4646 pahmep = (struct pa_hment *)cookie;
4646 4647 sfhmep = &pahmep->sfment;
4647 4648 } else {
4648 4649 for (sfhmep = pp->p_mapping; sfhmep != NULL;
4649 4650 sfhmep = sfhmep->hme_next) {
4650 4651
4651 4652 /*
4652 4653 * skip va<->pa mappings
4653 4654 */
4654 4655 if (!IS_PAHME(sfhmep))
4655 4656 continue;
4656 4657
4657 4658 pahmep = sfhmep->hme_data;
4658 4659 ASSERT(pahmep != NULL);
4659 4660
4660 4661 /*
4661 4662 * if pa_hment matches, remove it
4662 4663 */
4663 4664 if ((pahmep->pvt == pvt) &&
4664 4665 (pahmep->addr == vaddr) &&
4665 4666 (pahmep->len == len)) {
4666 4667 break;
4667 4668 }
4668 4669 }
4669 4670 }
4670 4671
4671 4672 if (sfhmep == NULL) {
4672 4673 if (!panicstr) {
4673 4674 panic("hat_delete_callback: pa_hment not found, pp %p",
4674 4675 (void *)pp);
4675 4676 }
4676 4677 return;
4677 4678 }
4678 4679
4679 4680 /*
4680 4681 * Note: at this point a valid kernel mapping must still be
4681 4682 * present on this page.
4682 4683 */
4683 4684 pp->p_share--;
4684 4685 if (pp->p_share <= 0)
4685 4686 panic("hat_delete_callback: zero p_share");
4686 4687
4687 4688 if (--pahmep->refcnt == 0) {
4688 4689 if (pahmep->flags != 0)
4689 4690 panic("hat_delete_callback: pa_hment is busy");
4690 4691
4691 4692 /*
4692 4693 * Remove sfhmep from the mapping list for the page.
4693 4694 */
4694 4695 if (sfhmep->hme_prev) {
4695 4696 sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4696 4697 } else {
4697 4698 pp->p_mapping = sfhmep->hme_next;
4698 4699 }
4699 4700
4700 4701 if (sfhmep->hme_next)
4701 4702 sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4702 4703
4703 4704 sfmmu_mlist_exit(pml);
4704 4705 SFMMU_HASH_UNLOCK(hmebp);
4705 4706
4706 4707 if (locked)
4707 4708 page_unlock(pp);
4708 4709
4709 4710 kmem_cache_free(pa_hment_cache, pahmep);
4710 4711 return;
4711 4712 }
4712 4713
4713 4714 sfmmu_mlist_exit(pml);
4714 4715 SFMMU_HASH_UNLOCK(hmebp);
4715 4716 if (locked)
4716 4717 page_unlock(pp);
4717 4718 }
4718 4719
4719 4720 /*
4720 4721 * hat_probe returns 1 if the translation for the address 'addr' is
4721 4722 * loaded, zero otherwise.
4722 4723 *
4723 4724 * hat_probe should be used only for advisorary purposes because it may
4724 4725 * occasionally return the wrong value. The implementation must guarantee that
4725 4726 * returning the wrong value is a very rare event. hat_probe is used
4726 4727 * to implement optimizations in the segment drivers.
4727 4728 *
4728 4729 */
4729 4730 int
4730 4731 hat_probe(struct hat *sfmmup, caddr_t addr)
4731 4732 {
4732 4733 pfn_t pfn;
4733 4734 tte_t tte;
4734 4735
4735 4736 ASSERT(sfmmup != NULL);
4736 4737
4737 4738 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4738 4739
4739 4740 if (sfmmup == ksfmmup) {
4740 4741 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4741 4742 == PFN_SUSPENDED) {
4742 4743 sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4743 4744 }
4744 4745 } else {
4745 4746 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4746 4747 }
4747 4748
4748 4749 if (pfn != PFN_INVALID)
4749 4750 return (1);
4750 4751 else
4751 4752 return (0);
4752 4753 }
4753 4754
4754 4755 ssize_t
4755 4756 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4756 4757 {
4757 4758 tte_t tte;
4758 4759
4759 4760 if (sfmmup == ksfmmup) {
4760 4761 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4761 4762 return (-1);
4762 4763 }
4763 4764 } else {
4764 4765 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4765 4766 return (-1);
4766 4767 }
4767 4768 }
4768 4769
4769 4770 ASSERT(TTE_IS_VALID(&tte));
4770 4771 return (TTEBYTES(TTE_CSZ(&tte)));
4771 4772 }
4772 4773
4773 4774 uint_t
4774 4775 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4775 4776 {
4776 4777 tte_t tte;
4777 4778
4778 4779 if (sfmmup == ksfmmup) {
4779 4780 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4780 4781 tte.ll = 0;
4781 4782 }
4782 4783 } else {
4783 4784 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4784 4785 tte.ll = 0;
4785 4786 }
4786 4787 }
4787 4788 if (TTE_IS_VALID(&tte)) {
4788 4789 *attr = sfmmu_ptov_attr(&tte);
4789 4790 return (0);
4790 4791 }
4791 4792 *attr = 0;
4792 4793 return ((uint_t)0xffffffff);
4793 4794 }
4794 4795
4795 4796 /*
4796 4797 * Enables more attributes on specified address range (ie. logical OR)
4797 4798 */
4798 4799 void
4799 4800 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4800 4801 {
4801 4802 ASSERT(hat->sfmmu_as != NULL);
4802 4803
4803 4804 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4804 4805 }
4805 4806
4806 4807 /*
4807 4808 * Assigns attributes to the specified address range. All the attributes
4808 4809 * are specified.
4809 4810 */
4810 4811 void
4811 4812 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4812 4813 {
4813 4814 ASSERT(hat->sfmmu_as != NULL);
4814 4815
4815 4816 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4816 4817 }
4817 4818
4818 4819 /*
4819 4820 * Remove attributes on the specified address range (ie. loginal NAND)
4820 4821 */
4821 4822 void
4822 4823 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4823 4824 {
↓ open down ↓ |
297 lines elided |
↑ open up ↑ |
4824 4825 ASSERT(hat->sfmmu_as != NULL);
4825 4826
4826 4827 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4827 4828 }
4828 4829
4829 4830 /*
4830 4831 * Change attributes on an address range to that specified by attr and mode.
4831 4832 */
4832 4833 static void
4833 4834 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4834 - int mode)
4835 + int mode)
4835 4836 {
4836 4837 struct hmehash_bucket *hmebp;
4837 4838 hmeblk_tag hblktag;
4838 4839 int hmeshift, hashno = 1;
4839 4840 struct hme_blk *hmeblkp, *list = NULL;
4840 4841 caddr_t endaddr;
4841 4842 cpuset_t cpuset;
4842 4843 demap_range_t dmr;
4843 4844
4844 4845 CPUSET_ZERO(cpuset);
4845 4846
4846 4847 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4847 4848 ASSERT((len & MMU_PAGEOFFSET) == 0);
4848 4849 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4849 4850
4850 4851 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4851 4852 ((addr + len) > (caddr_t)USERLIMIT)) {
4852 4853 panic("user addr %p in kernel space",
4853 4854 (void *)addr);
4854 4855 }
4855 4856
4856 4857 endaddr = addr + len;
4857 4858 hblktag.htag_id = sfmmup;
4858 4859 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4859 4860 DEMAP_RANGE_INIT(sfmmup, &dmr);
4860 4861
4861 4862 while (addr < endaddr) {
4862 4863 hmeshift = HME_HASH_SHIFT(hashno);
4863 4864 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4864 4865 hblktag.htag_rehash = hashno;
4865 4866 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4866 4867
4867 4868 SFMMU_HASH_LOCK(hmebp);
4868 4869
4869 4870 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4870 4871 if (hmeblkp != NULL) {
4871 4872 ASSERT(!hmeblkp->hblk_shared);
4872 4873 /*
4873 4874 * We've encountered a shadow hmeblk so skip the range
4874 4875 * of the next smaller mapping size.
4875 4876 */
4876 4877 if (hmeblkp->hblk_shw_bit) {
4877 4878 ASSERT(sfmmup != ksfmmup);
4878 4879 ASSERT(hashno > 1);
4879 4880 addr = (caddr_t)P2END((uintptr_t)addr,
4880 4881 TTEBYTES(hashno - 1));
4881 4882 } else {
4882 4883 addr = sfmmu_hblk_chgattr(sfmmup,
4883 4884 hmeblkp, addr, endaddr, &dmr, attr, mode);
4884 4885 }
4885 4886 SFMMU_HASH_UNLOCK(hmebp);
4886 4887 hashno = 1;
4887 4888 continue;
4888 4889 }
4889 4890 SFMMU_HASH_UNLOCK(hmebp);
4890 4891
4891 4892 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4892 4893 /*
4893 4894 * We have traversed the whole list and rehashed
4894 4895 * if necessary without finding the address to chgattr.
4895 4896 * This is ok, so we increment the address by the
4896 4897 * smallest hmeblk range for kernel mappings or for
4897 4898 * user mappings with no large pages, and the largest
4898 4899 * hmeblk range, to account for shadow hmeblks, for
4899 4900 * user mappings with large pages and continue.
4900 4901 */
4901 4902 if (sfmmup == ksfmmup)
4902 4903 addr = (caddr_t)P2END((uintptr_t)addr,
4903 4904 TTEBYTES(1));
4904 4905 else
4905 4906 addr = (caddr_t)P2END((uintptr_t)addr,
4906 4907 TTEBYTES(hashno));
4907 4908 hashno = 1;
4908 4909 } else {
4909 4910 hashno++;
4910 4911 }
4911 4912 }
4912 4913
4913 4914 sfmmu_hblks_list_purge(&list, 0);
4914 4915 DEMAP_RANGE_FLUSH(&dmr);
4915 4916 cpuset = sfmmup->sfmmu_cpusran;
4916 4917 xt_sync(cpuset);
4917 4918 }
4918 4919
4919 4920 /*
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
4920 4921 * This function chgattr on a range of addresses in an hmeblk. It returns the
4921 4922 * next addres that needs to be chgattr.
4922 4923 * It should be called with the hash lock held.
4923 4924 * XXX It should be possible to optimize chgattr by not flushing every time but
4924 4925 * on the other hand:
4925 4926 * 1. do one flush crosscall.
4926 4927 * 2. only flush if we are increasing permissions (make sure this will work)
4927 4928 */
4928 4929 static caddr_t
4929 4930 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4930 - caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4931 + caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4931 4932 {
4932 4933 tte_t tte, tteattr, tteflags, ttemod;
4933 4934 struct sf_hment *sfhmep;
4934 4935 int ttesz;
4935 4936 struct page *pp = NULL;
4936 4937 kmutex_t *pml, *pmtx;
4937 4938 int ret;
4938 4939 int use_demap_range;
4939 4940 #if defined(SF_ERRATA_57)
4940 4941 int check_exec;
4941 4942 #endif
4942 4943
4943 4944 ASSERT(in_hblk_range(hmeblkp, addr));
4944 4945 ASSERT(hmeblkp->hblk_shw_bit == 0);
4945 4946 ASSERT(!hmeblkp->hblk_shared);
4946 4947
4947 4948 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4948 4949 ttesz = get_hblk_ttesz(hmeblkp);
4949 4950
4950 4951 /*
4951 4952 * Flush the current demap region if addresses have been
4952 4953 * skipped or the page size doesn't match.
4953 4954 */
4954 4955 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4955 4956 if (use_demap_range) {
4956 4957 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4957 4958 } else if (dmrp != NULL) {
4958 4959 DEMAP_RANGE_FLUSH(dmrp);
4959 4960 }
4960 4961
4961 4962 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4962 4963 #if defined(SF_ERRATA_57)
4963 4964 check_exec = (sfmmup != ksfmmup) &&
4964 4965 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4965 4966 TTE_IS_EXECUTABLE(&tteattr);
4966 4967 #endif
4967 4968 HBLKTOHME(sfhmep, hmeblkp, addr);
4968 4969 while (addr < endaddr) {
4969 4970 sfmmu_copytte(&sfhmep->hme_tte, &tte);
4970 4971 if (TTE_IS_VALID(&tte)) {
4971 4972 if ((tte.ll & tteflags.ll) == tteattr.ll) {
4972 4973 /*
4973 4974 * if the new attr is the same as old
4974 4975 * continue
4975 4976 */
4976 4977 goto next_addr;
4977 4978 }
4978 4979 if (!TTE_IS_WRITABLE(&tteattr)) {
4979 4980 /*
4980 4981 * make sure we clear hw modify bit if we
4981 4982 * removing write protections
4982 4983 */
4983 4984 tteflags.tte_intlo |= TTE_HWWR_INT;
4984 4985 }
4985 4986
4986 4987 pml = NULL;
4987 4988 pp = sfhmep->hme_page;
4988 4989 if (pp) {
4989 4990 pml = sfmmu_mlist_enter(pp);
4990 4991 }
4991 4992
4992 4993 if (pp != sfhmep->hme_page) {
4993 4994 /*
4994 4995 * tte must have been unloaded.
4995 4996 */
4996 4997 ASSERT(pml);
4997 4998 sfmmu_mlist_exit(pml);
4998 4999 continue;
4999 5000 }
5000 5001
5001 5002 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5002 5003
5003 5004 ttemod = tte;
5004 5005 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5005 5006 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5006 5007
5007 5008 #if defined(SF_ERRATA_57)
5008 5009 if (check_exec && addr < errata57_limit)
5009 5010 ttemod.tte_exec_perm = 0;
5010 5011 #endif
5011 5012 ret = sfmmu_modifytte_try(&tte, &ttemod,
5012 5013 &sfhmep->hme_tte);
5013 5014
5014 5015 if (ret < 0) {
5015 5016 /* tte changed underneath us */
5016 5017 if (pml) {
5017 5018 sfmmu_mlist_exit(pml);
5018 5019 }
5019 5020 continue;
5020 5021 }
5021 5022
5022 5023 if (tteflags.tte_intlo & TTE_HWWR_INT) {
5023 5024 /*
5024 5025 * need to sync if we are clearing modify bit.
5025 5026 */
5026 5027 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5027 5028 }
5028 5029
5029 5030 if (pp && PP_ISRO(pp)) {
5030 5031 if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5031 5032 pmtx = sfmmu_page_enter(pp);
5032 5033 PP_CLRRO(pp);
5033 5034 sfmmu_page_exit(pmtx);
5034 5035 }
5035 5036 }
5036 5037
5037 5038 if (ret > 0 && use_demap_range) {
5038 5039 DEMAP_RANGE_MARKPG(dmrp, addr);
5039 5040 } else if (ret > 0) {
5040 5041 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5041 5042 }
5042 5043
5043 5044 if (pml) {
5044 5045 sfmmu_mlist_exit(pml);
5045 5046 }
5046 5047 }
5047 5048 next_addr:
5048 5049 addr += TTEBYTES(ttesz);
5049 5050 sfhmep++;
5050 5051 DEMAP_RANGE_NEXTPG(dmrp);
5051 5052 }
5052 5053 return (addr);
5053 5054 }
5054 5055
5055 5056 /*
5056 5057 * This routine converts virtual attributes to physical ones. It will
5057 5058 * update the tteflags field with the tte mask corresponding to the attributes
5058 5059 * affected and it returns the new attributes. It will also clear the modify
5059 5060 * bit if we are taking away write permission. This is necessary since the
5060 5061 * modify bit is the hardware permission bit and we need to clear it in order
5061 5062 * to detect write faults.
5062 5063 */
5063 5064 static uint64_t
5064 5065 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5065 5066 {
5066 5067 tte_t ttevalue;
5067 5068
5068 5069 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5069 5070
5070 5071 switch (mode) {
5071 5072 case SFMMU_CHGATTR:
5072 5073 /* all attributes specified */
5073 5074 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5074 5075 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5075 5076 ttemaskp->tte_inthi = TTEINTHI_ATTR;
5076 5077 ttemaskp->tte_intlo = TTEINTLO_ATTR;
5077 5078 break;
5078 5079 case SFMMU_SETATTR:
5079 5080 ASSERT(!(attr & ~HAT_PROT_MASK));
5080 5081 ttemaskp->ll = 0;
5081 5082 ttevalue.ll = 0;
5082 5083 /*
5083 5084 * a valid tte implies exec and read for sfmmu
5084 5085 * so no need to do anything about them.
5085 5086 * since priviledged access implies user access
5086 5087 * PROT_USER doesn't make sense either.
5087 5088 */
5088 5089 if (attr & PROT_WRITE) {
5089 5090 ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5090 5091 ttevalue.tte_intlo |= TTE_WRPRM_INT;
5091 5092 }
5092 5093 break;
5093 5094 case SFMMU_CLRATTR:
5094 5095 /* attributes will be nand with current ones */
5095 5096 if (attr & ~(PROT_WRITE | PROT_USER)) {
5096 5097 panic("sfmmu: attr %x not supported", attr);
5097 5098 }
5098 5099 ttemaskp->ll = 0;
5099 5100 ttevalue.ll = 0;
5100 5101 if (attr & PROT_WRITE) {
5101 5102 /* clear both writable and modify bit */
5102 5103 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5103 5104 }
5104 5105 if (attr & PROT_USER) {
5105 5106 ttemaskp->tte_intlo |= TTE_PRIV_INT;
5106 5107 ttevalue.tte_intlo |= TTE_PRIV_INT;
5107 5108 }
5108 5109 break;
5109 5110 default:
5110 5111 panic("sfmmu_vtop_attr: bad mode %x", mode);
5111 5112 }
5112 5113 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5113 5114 return (ttevalue.ll);
5114 5115 }
5115 5116
5116 5117 static uint_t
5117 5118 sfmmu_ptov_attr(tte_t *ttep)
5118 5119 {
5119 5120 uint_t attr;
5120 5121
5121 5122 ASSERT(TTE_IS_VALID(ttep));
5122 5123
5123 5124 attr = PROT_READ;
5124 5125
5125 5126 if (TTE_IS_WRITABLE(ttep)) {
5126 5127 attr |= PROT_WRITE;
5127 5128 }
5128 5129 if (TTE_IS_EXECUTABLE(ttep)) {
5129 5130 attr |= PROT_EXEC;
5130 5131 }
5131 5132 if (!TTE_IS_PRIVILEGED(ttep)) {
5132 5133 attr |= PROT_USER;
5133 5134 }
5134 5135 if (TTE_IS_NFO(ttep)) {
5135 5136 attr |= HAT_NOFAULT;
5136 5137 }
5137 5138 if (TTE_IS_NOSYNC(ttep)) {
5138 5139 attr |= HAT_NOSYNC;
5139 5140 }
5140 5141 if (TTE_IS_SIDEFFECT(ttep)) {
5141 5142 attr |= SFMMU_SIDEFFECT;
5142 5143 }
5143 5144 if (!TTE_IS_VCACHEABLE(ttep)) {
5144 5145 attr |= SFMMU_UNCACHEVTTE;
5145 5146 }
5146 5147 if (!TTE_IS_PCACHEABLE(ttep)) {
5147 5148 attr |= SFMMU_UNCACHEPTTE;
5148 5149 }
5149 5150 return (attr);
5150 5151 }
5151 5152
5152 5153 /*
5153 5154 * hat_chgprot is a deprecated hat call. New segment drivers
5154 5155 * should store all attributes and use hat_*attr calls.
5155 5156 *
5156 5157 * Change the protections in the virtual address range
5157 5158 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
5158 5159 * then remove write permission, leaving the other
5159 5160 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions.
5160 5161 *
5161 5162 */
5162 5163 void
5163 5164 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5164 5165 {
5165 5166 struct hmehash_bucket *hmebp;
5166 5167 hmeblk_tag hblktag;
5167 5168 int hmeshift, hashno = 1;
5168 5169 struct hme_blk *hmeblkp, *list = NULL;
5169 5170 caddr_t endaddr;
5170 5171 cpuset_t cpuset;
5171 5172 demap_range_t dmr;
5172 5173
5173 5174 ASSERT((len & MMU_PAGEOFFSET) == 0);
5174 5175 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5175 5176
5176 5177 ASSERT(sfmmup->sfmmu_as != NULL);
5177 5178
5178 5179 CPUSET_ZERO(cpuset);
5179 5180
5180 5181 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5181 5182 ((addr + len) > (caddr_t)USERLIMIT)) {
5182 5183 panic("user addr %p vprot %x in kernel space",
5183 5184 (void *)addr, vprot);
5184 5185 }
5185 5186 endaddr = addr + len;
5186 5187 hblktag.htag_id = sfmmup;
5187 5188 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5188 5189 DEMAP_RANGE_INIT(sfmmup, &dmr);
5189 5190
5190 5191 while (addr < endaddr) {
5191 5192 hmeshift = HME_HASH_SHIFT(hashno);
5192 5193 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5193 5194 hblktag.htag_rehash = hashno;
5194 5195 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5195 5196
5196 5197 SFMMU_HASH_LOCK(hmebp);
5197 5198
5198 5199 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5199 5200 if (hmeblkp != NULL) {
5200 5201 ASSERT(!hmeblkp->hblk_shared);
5201 5202 /*
5202 5203 * We've encountered a shadow hmeblk so skip the range
5203 5204 * of the next smaller mapping size.
5204 5205 */
5205 5206 if (hmeblkp->hblk_shw_bit) {
5206 5207 ASSERT(sfmmup != ksfmmup);
5207 5208 ASSERT(hashno > 1);
5208 5209 addr = (caddr_t)P2END((uintptr_t)addr,
5209 5210 TTEBYTES(hashno - 1));
5210 5211 } else {
5211 5212 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5212 5213 addr, endaddr, &dmr, vprot);
5213 5214 }
5214 5215 SFMMU_HASH_UNLOCK(hmebp);
5215 5216 hashno = 1;
5216 5217 continue;
5217 5218 }
5218 5219 SFMMU_HASH_UNLOCK(hmebp);
5219 5220
5220 5221 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5221 5222 /*
5222 5223 * We have traversed the whole list and rehashed
5223 5224 * if necessary without finding the address to chgprot.
5224 5225 * This is ok so we increment the address by the
5225 5226 * smallest hmeblk range for kernel mappings and the
5226 5227 * largest hmeblk range, to account for shadow hmeblks,
5227 5228 * for user mappings and continue.
5228 5229 */
5229 5230 if (sfmmup == ksfmmup)
5230 5231 addr = (caddr_t)P2END((uintptr_t)addr,
5231 5232 TTEBYTES(1));
5232 5233 else
5233 5234 addr = (caddr_t)P2END((uintptr_t)addr,
5234 5235 TTEBYTES(hashno));
5235 5236 hashno = 1;
5236 5237 } else {
5237 5238 hashno++;
5238 5239 }
5239 5240 }
5240 5241
5241 5242 sfmmu_hblks_list_purge(&list, 0);
5242 5243 DEMAP_RANGE_FLUSH(&dmr);
5243 5244 cpuset = sfmmup->sfmmu_cpusran;
5244 5245 xt_sync(cpuset);
5245 5246 }
5246 5247
5247 5248 /*
↓ open down ↓ |
307 lines elided |
↑ open up ↑ |
5248 5249 * This function chgprots a range of addresses in an hmeblk. It returns the
5249 5250 * next addres that needs to be chgprot.
5250 5251 * It should be called with the hash lock held.
5251 5252 * XXX It shold be possible to optimize chgprot by not flushing every time but
5252 5253 * on the other hand:
5253 5254 * 1. do one flush crosscall.
5254 5255 * 2. only flush if we are increasing permissions (make sure this will work)
5255 5256 */
5256 5257 static caddr_t
5257 5258 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5258 - caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5259 + caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5259 5260 {
5260 5261 uint_t pprot;
5261 5262 tte_t tte, ttemod;
5262 5263 struct sf_hment *sfhmep;
5263 5264 uint_t tteflags;
5264 5265 int ttesz;
5265 5266 struct page *pp = NULL;
5266 5267 kmutex_t *pml, *pmtx;
5267 5268 int ret;
5268 5269 int use_demap_range;
5269 5270 #if defined(SF_ERRATA_57)
5270 5271 int check_exec;
5271 5272 #endif
5272 5273
5273 5274 ASSERT(in_hblk_range(hmeblkp, addr));
5274 5275 ASSERT(hmeblkp->hblk_shw_bit == 0);
5275 5276 ASSERT(!hmeblkp->hblk_shared);
5276 5277
5277 5278 #ifdef DEBUG
5278 5279 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5279 5280 (endaddr < get_hblk_endaddr(hmeblkp))) {
5280 5281 panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5281 5282 }
5282 5283 #endif /* DEBUG */
5283 5284
5284 5285 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5285 5286 ttesz = get_hblk_ttesz(hmeblkp);
5286 5287
5287 5288 pprot = sfmmu_vtop_prot(vprot, &tteflags);
5288 5289 #if defined(SF_ERRATA_57)
5289 5290 check_exec = (sfmmup != ksfmmup) &&
5290 5291 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5291 5292 ((vprot & PROT_EXEC) == PROT_EXEC);
5292 5293 #endif
5293 5294 HBLKTOHME(sfhmep, hmeblkp, addr);
5294 5295
5295 5296 /*
5296 5297 * Flush the current demap region if addresses have been
5297 5298 * skipped or the page size doesn't match.
5298 5299 */
5299 5300 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5300 5301 if (use_demap_range) {
5301 5302 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5302 5303 } else if (dmrp != NULL) {
5303 5304 DEMAP_RANGE_FLUSH(dmrp);
5304 5305 }
5305 5306
5306 5307 while (addr < endaddr) {
5307 5308 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5308 5309 if (TTE_IS_VALID(&tte)) {
5309 5310 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5310 5311 /*
5311 5312 * if the new protection is the same as old
5312 5313 * continue
5313 5314 */
5314 5315 goto next_addr;
5315 5316 }
5316 5317 pml = NULL;
5317 5318 pp = sfhmep->hme_page;
5318 5319 if (pp) {
5319 5320 pml = sfmmu_mlist_enter(pp);
5320 5321 }
5321 5322 if (pp != sfhmep->hme_page) {
5322 5323 /*
5323 5324 * tte most have been unloaded
5324 5325 * underneath us. Recheck
5325 5326 */
5326 5327 ASSERT(pml);
5327 5328 sfmmu_mlist_exit(pml);
5328 5329 continue;
5329 5330 }
5330 5331
5331 5332 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5332 5333
5333 5334 ttemod = tte;
5334 5335 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5335 5336 #if defined(SF_ERRATA_57)
5336 5337 if (check_exec && addr < errata57_limit)
5337 5338 ttemod.tte_exec_perm = 0;
5338 5339 #endif
5339 5340 ret = sfmmu_modifytte_try(&tte, &ttemod,
5340 5341 &sfhmep->hme_tte);
5341 5342
5342 5343 if (ret < 0) {
5343 5344 /* tte changed underneath us */
5344 5345 if (pml) {
5345 5346 sfmmu_mlist_exit(pml);
5346 5347 }
5347 5348 continue;
5348 5349 }
5349 5350
5350 5351 if (tteflags & TTE_HWWR_INT) {
5351 5352 /*
5352 5353 * need to sync if we are clearing modify bit.
5353 5354 */
5354 5355 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5355 5356 }
5356 5357
5357 5358 if (pp && PP_ISRO(pp)) {
5358 5359 if (pprot & TTE_WRPRM_INT) {
5359 5360 pmtx = sfmmu_page_enter(pp);
5360 5361 PP_CLRRO(pp);
5361 5362 sfmmu_page_exit(pmtx);
5362 5363 }
5363 5364 }
5364 5365
5365 5366 if (ret > 0 && use_demap_range) {
5366 5367 DEMAP_RANGE_MARKPG(dmrp, addr);
5367 5368 } else if (ret > 0) {
5368 5369 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5369 5370 }
5370 5371
5371 5372 if (pml) {
5372 5373 sfmmu_mlist_exit(pml);
5373 5374 }
5374 5375 }
5375 5376 next_addr:
5376 5377 addr += TTEBYTES(ttesz);
5377 5378 sfhmep++;
5378 5379 DEMAP_RANGE_NEXTPG(dmrp);
5379 5380 }
5380 5381 return (addr);
5381 5382 }
5382 5383
5383 5384 /*
5384 5385 * This routine is deprecated and should only be used by hat_chgprot.
5385 5386 * The correct routine is sfmmu_vtop_attr.
5386 5387 * This routine converts virtual page protections to physical ones. It will
5387 5388 * update the tteflags field with the tte mask corresponding to the protections
5388 5389 * affected and it returns the new protections. It will also clear the modify
5389 5390 * bit if we are taking away write permission. This is necessary since the
5390 5391 * modify bit is the hardware permission bit and we need to clear it in order
5391 5392 * to detect write faults.
5392 5393 * It accepts the following special protections:
5393 5394 * ~PROT_WRITE = remove write permissions.
5394 5395 * ~PROT_USER = remove user permissions.
5395 5396 */
5396 5397 static uint_t
5397 5398 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5398 5399 {
5399 5400 if (vprot == (uint_t)~PROT_WRITE) {
5400 5401 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5401 5402 return (0); /* will cause wrprm to be cleared */
5402 5403 }
5403 5404 if (vprot == (uint_t)~PROT_USER) {
5404 5405 *tteflagsp = TTE_PRIV_INT;
5405 5406 return (0); /* will cause privprm to be cleared */
5406 5407 }
↓ open down ↓ |
138 lines elided |
↑ open up ↑ |
5407 5408 if ((vprot == 0) || (vprot == PROT_USER) ||
5408 5409 ((vprot & PROT_ALL) != vprot)) {
5409 5410 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5410 5411 }
5411 5412
5412 5413 switch (vprot) {
5413 5414 case (PROT_READ):
5414 5415 case (PROT_EXEC):
5415 5416 case (PROT_EXEC | PROT_READ):
5416 5417 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5417 - return (TTE_PRIV_INT); /* set prv and clr wrt */
5418 + return (TTE_PRIV_INT); /* set prv and clr wrt */
5418 5419 case (PROT_WRITE):
5419 5420 case (PROT_WRITE | PROT_READ):
5420 5421 case (PROT_EXEC | PROT_WRITE):
5421 5422 case (PROT_EXEC | PROT_WRITE | PROT_READ):
5422 5423 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5423 - return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5424 + return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5424 5425 case (PROT_USER | PROT_READ):
5425 5426 case (PROT_USER | PROT_EXEC):
5426 5427 case (PROT_USER | PROT_EXEC | PROT_READ):
5427 5428 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5428 - return (0); /* clr prv and wrt */
5429 + return (0); /* clr prv and wrt */
5429 5430 case (PROT_USER | PROT_WRITE):
5430 5431 case (PROT_USER | PROT_WRITE | PROT_READ):
5431 5432 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5432 5433 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5433 5434 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5434 - return (TTE_WRPRM_INT); /* clr prv and set wrt */
5435 + return (TTE_WRPRM_INT); /* clr prv and set wrt */
5435 5436 default:
5436 5437 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5437 5438 }
5438 5439 return (0);
5439 5440 }
5440 5441
5441 5442 /*
5442 5443 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5443 5444 * the normal algorithm would take too long for a very large VA range with
5444 5445 * few real mappings. This routine just walks thru all HMEs in the global
5445 5446 * hash table to find and remove mappings.
5446 5447 */
5447 5448 static void
5448 -hat_unload_large_virtual(
5449 - struct hat *sfmmup,
5450 - caddr_t startaddr,
5451 - size_t len,
5452 - uint_t flags,
5453 - hat_callback_t *callback)
5449 +hat_unload_large_virtual(struct hat *sfmmup, caddr_t startaddr, size_t len,
5450 + uint_t flags, hat_callback_t *callback)
5454 5451 {
5455 5452 struct hmehash_bucket *hmebp;
5456 5453 struct hme_blk *hmeblkp;
5457 5454 struct hme_blk *pr_hblk = NULL;
5458 5455 struct hme_blk *nx_hblk;
5459 5456 struct hme_blk *list = NULL;
5460 5457 int i;
5461 5458 demap_range_t dmr, *dmrp;
5462 5459 cpuset_t cpuset;
5463 5460 caddr_t endaddr = startaddr + len;
5464 5461 caddr_t sa;
5465 5462 caddr_t ea;
5466 5463 caddr_t cb_sa[MAX_CB_ADDR];
5467 5464 caddr_t cb_ea[MAX_CB_ADDR];
5468 5465 int addr_cnt = 0;
5469 5466 int a = 0;
5470 5467
5471 5468 if (sfmmup->sfmmu_free) {
5472 5469 dmrp = NULL;
5473 5470 } else {
5474 5471 dmrp = &dmr;
5475 5472 DEMAP_RANGE_INIT(sfmmup, dmrp);
5476 5473 }
5477 5474
5478 5475 /*
5479 5476 * Loop through all the hash buckets of HME blocks looking for matches.
5480 5477 */
5481 5478 for (i = 0; i <= UHMEHASH_SZ; i++) {
5482 5479 hmebp = &uhme_hash[i];
5483 5480 SFMMU_HASH_LOCK(hmebp);
5484 5481 hmeblkp = hmebp->hmeblkp;
5485 5482 pr_hblk = NULL;
5486 5483 while (hmeblkp) {
5487 5484 nx_hblk = hmeblkp->hblk_next;
5488 5485
5489 5486 /*
5490 5487 * skip if not this context, if a shadow block or
5491 5488 * if the mapping is not in the requested range
5492 5489 */
5493 5490 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5494 5491 hmeblkp->hblk_shw_bit ||
5495 5492 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5496 5493 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5497 5494 pr_hblk = hmeblkp;
5498 5495 goto next_block;
5499 5496 }
5500 5497
5501 5498 ASSERT(!hmeblkp->hblk_shared);
5502 5499 /*
5503 5500 * unload if there are any current valid mappings
5504 5501 */
5505 5502 if (hmeblkp->hblk_vcnt != 0 ||
5506 5503 hmeblkp->hblk_hmecnt != 0)
5507 5504 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5508 5505 sa, ea, dmrp, flags);
5509 5506
5510 5507 /*
5511 5508 * on unmap we also release the HME block itself, once
5512 5509 * all mappings are gone.
5513 5510 */
5514 5511 if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5515 5512 !hmeblkp->hblk_vcnt &&
5516 5513 !hmeblkp->hblk_hmecnt) {
5517 5514 ASSERT(!hmeblkp->hblk_lckcnt);
5518 5515 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5519 5516 &list, 0);
5520 5517 } else {
5521 5518 pr_hblk = hmeblkp;
5522 5519 }
5523 5520
5524 5521 if (callback == NULL)
5525 5522 goto next_block;
5526 5523
5527 5524 /*
5528 5525 * HME blocks may span more than one page, but we may be
5529 5526 * unmapping only one page, so check for a smaller range
5530 5527 * for the callback
5531 5528 */
5532 5529 if (sa < startaddr)
5533 5530 sa = startaddr;
5534 5531 if (--ea > endaddr)
5535 5532 ea = endaddr - 1;
5536 5533
5537 5534 cb_sa[addr_cnt] = sa;
5538 5535 cb_ea[addr_cnt] = ea;
5539 5536 if (++addr_cnt == MAX_CB_ADDR) {
5540 5537 if (dmrp != NULL) {
5541 5538 DEMAP_RANGE_FLUSH(dmrp);
5542 5539 cpuset = sfmmup->sfmmu_cpusran;
5543 5540 xt_sync(cpuset);
5544 5541 }
5545 5542
5546 5543 for (a = 0; a < MAX_CB_ADDR; ++a) {
5547 5544 callback->hcb_start_addr = cb_sa[a];
5548 5545 callback->hcb_end_addr = cb_ea[a];
5549 5546 callback->hcb_function(callback);
5550 5547 }
5551 5548 addr_cnt = 0;
5552 5549 }
5553 5550
5554 5551 next_block:
5555 5552 hmeblkp = nx_hblk;
5556 5553 }
5557 5554 SFMMU_HASH_UNLOCK(hmebp);
5558 5555 }
5559 5556
5560 5557 sfmmu_hblks_list_purge(&list, 0);
5561 5558 if (dmrp != NULL) {
5562 5559 DEMAP_RANGE_FLUSH(dmrp);
5563 5560 cpuset = sfmmup->sfmmu_cpusran;
5564 5561 xt_sync(cpuset);
5565 5562 }
5566 5563
5567 5564 for (a = 0; a < addr_cnt; ++a) {
5568 5565 callback->hcb_start_addr = cb_sa[a];
5569 5566 callback->hcb_end_addr = cb_ea[a];
5570 5567 callback->hcb_function(callback);
5571 5568 }
5572 5569
5573 5570 /*
5574 5571 * Check TSB and TLB page sizes if the process isn't exiting.
5575 5572 */
5576 5573 if (!sfmmup->sfmmu_free)
5577 5574 sfmmu_check_page_sizes(sfmmup, 0);
5578 5575 }
5579 5576
5580 5577 /*
↓ open down ↓ |
117 lines elided |
↑ open up ↑ |
5581 5578 * Unload all the mappings in the range [addr..addr+len). addr and len must
5582 5579 * be MMU_PAGESIZE aligned.
5583 5580 */
5584 5581
5585 5582 extern struct seg *segkmap;
5586 5583 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5587 5584 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5588 5585
5589 5586
5590 5587 void
5591 -hat_unload_callback(
5592 - struct hat *sfmmup,
5593 - caddr_t addr,
5594 - size_t len,
5595 - uint_t flags,
5596 - hat_callback_t *callback)
5588 +hat_unload_callback(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags,
5589 + hat_callback_t *callback)
5597 5590 {
5598 5591 struct hmehash_bucket *hmebp;
5599 5592 hmeblk_tag hblktag;
5600 5593 int hmeshift, hashno, iskernel;
5601 5594 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5602 5595 caddr_t endaddr;
5603 5596 cpuset_t cpuset;
5604 5597 int addr_count = 0;
5605 5598 int a;
5606 5599 caddr_t cb_start_addr[MAX_CB_ADDR];
5607 5600 caddr_t cb_end_addr[MAX_CB_ADDR];
5608 5601 int issegkmap = ISSEGKMAP(sfmmup, addr);
5609 5602 demap_range_t dmr, *dmrp;
5610 5603
5611 5604 ASSERT(sfmmup->sfmmu_as != NULL);
5612 5605
5613 5606 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5614 5607 AS_LOCK_HELD(sfmmup->sfmmu_as));
5615 5608
5616 5609 ASSERT(sfmmup != NULL);
5617 5610 ASSERT((len & MMU_PAGEOFFSET) == 0);
5618 5611 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5619 5612
5620 5613 /*
5621 5614 * Probing through a large VA range (say 63 bits) will be slow, even
5622 5615 * at 4 Meg steps between the probes. So, when the virtual address range
5623 5616 * is very large, search the HME entries for what to unload.
5624 5617 *
5625 5618 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5626 5619 *
5627 5620 * UHMEHASH_SZ is number of hash buckets to examine
5628 5621 *
5629 5622 */
5630 5623 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5631 5624 hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5632 5625 return;
5633 5626 }
5634 5627
5635 5628 CPUSET_ZERO(cpuset);
5636 5629
5637 5630 /*
5638 5631 * If the process is exiting, we can save a lot of fuss since
5639 5632 * we'll flush the TLB when we free the ctx anyway.
5640 5633 */
5641 5634 if (sfmmup->sfmmu_free) {
5642 5635 dmrp = NULL;
5643 5636 } else {
5644 5637 dmrp = &dmr;
5645 5638 DEMAP_RANGE_INIT(sfmmup, dmrp);
5646 5639 }
5647 5640
5648 5641 endaddr = addr + len;
5649 5642 hblktag.htag_id = sfmmup;
5650 5643 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5651 5644
5652 5645 /*
5653 5646 * It is likely for the vm to call unload over a wide range of
5654 5647 * addresses that are actually very sparsely populated by
5655 5648 * translations. In order to speed this up the sfmmu hat supports
5656 5649 * the concept of shadow hmeblks. Dummy large page hmeblks that
5657 5650 * correspond to actual small translations are allocated at tteload
5658 5651 * time and are referred to as shadow hmeblks. Now, during unload
5659 5652 * time, we first check if we have a shadow hmeblk for that
5660 5653 * translation. The absence of one means the corresponding address
5661 5654 * range is empty and can be skipped.
5662 5655 *
5663 5656 * The kernel is an exception to above statement and that is why
5664 5657 * we don't use shadow hmeblks and hash starting from the smallest
5665 5658 * page size.
5666 5659 */
5667 5660 if (sfmmup == KHATID) {
5668 5661 iskernel = 1;
5669 5662 hashno = TTE64K;
5670 5663 } else {
5671 5664 iskernel = 0;
5672 5665 if (mmu_page_sizes == max_mmu_page_sizes) {
5673 5666 hashno = TTE256M;
5674 5667 } else {
5675 5668 hashno = TTE4M;
5676 5669 }
5677 5670 }
5678 5671 while (addr < endaddr) {
5679 5672 hmeshift = HME_HASH_SHIFT(hashno);
5680 5673 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5681 5674 hblktag.htag_rehash = hashno;
5682 5675 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5683 5676
5684 5677 SFMMU_HASH_LOCK(hmebp);
5685 5678
5686 5679 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5687 5680 if (hmeblkp == NULL) {
5688 5681 /*
5689 5682 * didn't find an hmeblk. skip the appropiate
5690 5683 * address range.
5691 5684 */
5692 5685 SFMMU_HASH_UNLOCK(hmebp);
5693 5686 if (iskernel) {
5694 5687 if (hashno < mmu_hashcnt) {
5695 5688 hashno++;
5696 5689 continue;
5697 5690 } else {
5698 5691 hashno = TTE64K;
5699 5692 addr = (caddr_t)roundup((uintptr_t)addr
5700 5693 + 1, MMU_PAGESIZE64K);
5701 5694 continue;
5702 5695 }
5703 5696 }
5704 5697 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5705 5698 (1 << hmeshift));
5706 5699 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5707 5700 ASSERT(hashno == TTE64K);
5708 5701 continue;
5709 5702 }
5710 5703 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5711 5704 hashno = TTE512K;
5712 5705 continue;
5713 5706 }
5714 5707 if (mmu_page_sizes == max_mmu_page_sizes) {
5715 5708 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5716 5709 hashno = TTE4M;
5717 5710 continue;
5718 5711 }
5719 5712 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5720 5713 hashno = TTE32M;
5721 5714 continue;
5722 5715 }
5723 5716 hashno = TTE256M;
5724 5717 continue;
5725 5718 } else {
5726 5719 hashno = TTE4M;
5727 5720 continue;
5728 5721 }
5729 5722 }
5730 5723 ASSERT(hmeblkp);
5731 5724 ASSERT(!hmeblkp->hblk_shared);
5732 5725 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5733 5726 /*
5734 5727 * If the valid count is zero we can skip the range
5735 5728 * mapped by this hmeblk.
5736 5729 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP
5737 5730 * is used by segment drivers as a hint
5738 5731 * that the mapping resource won't be used any longer.
5739 5732 * The best example of this is during exit().
5740 5733 */
5741 5734 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5742 5735 get_hblk_span(hmeblkp));
5743 5736 if ((flags & HAT_UNLOAD_UNMAP) ||
5744 5737 (iskernel && !issegkmap)) {
5745 5738 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5746 5739 &list, 0);
5747 5740 }
5748 5741 SFMMU_HASH_UNLOCK(hmebp);
5749 5742
5750 5743 if (iskernel) {
5751 5744 hashno = TTE64K;
5752 5745 continue;
5753 5746 }
5754 5747 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5755 5748 ASSERT(hashno == TTE64K);
5756 5749 continue;
5757 5750 }
5758 5751 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5759 5752 hashno = TTE512K;
5760 5753 continue;
5761 5754 }
5762 5755 if (mmu_page_sizes == max_mmu_page_sizes) {
5763 5756 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5764 5757 hashno = TTE4M;
5765 5758 continue;
5766 5759 }
5767 5760 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5768 5761 hashno = TTE32M;
5769 5762 continue;
5770 5763 }
5771 5764 hashno = TTE256M;
5772 5765 continue;
5773 5766 } else {
5774 5767 hashno = TTE4M;
5775 5768 continue;
5776 5769 }
5777 5770 }
5778 5771 if (hmeblkp->hblk_shw_bit) {
5779 5772 /*
5780 5773 * If we encounter a shadow hmeblk we know there is
5781 5774 * smaller sized hmeblks mapping the same address space.
5782 5775 * Decrement the hash size and rehash.
5783 5776 */
5784 5777 ASSERT(sfmmup != KHATID);
5785 5778 hashno--;
5786 5779 SFMMU_HASH_UNLOCK(hmebp);
5787 5780 continue;
5788 5781 }
5789 5782
5790 5783 /*
5791 5784 * track callback address ranges.
5792 5785 * only start a new range when it's not contiguous
5793 5786 */
5794 5787 if (callback != NULL) {
5795 5788 if (addr_count > 0 &&
5796 5789 addr == cb_end_addr[addr_count - 1])
5797 5790 --addr_count;
5798 5791 else
5799 5792 cb_start_addr[addr_count] = addr;
5800 5793 }
5801 5794
5802 5795 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5803 5796 dmrp, flags);
5804 5797
5805 5798 if (callback != NULL)
5806 5799 cb_end_addr[addr_count++] = addr;
5807 5800
5808 5801 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5809 5802 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5810 5803 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5811 5804 }
5812 5805 SFMMU_HASH_UNLOCK(hmebp);
5813 5806
5814 5807 /*
5815 5808 * Notify our caller as to exactly which pages
5816 5809 * have been unloaded. We do these in clumps,
5817 5810 * to minimize the number of xt_sync()s that need to occur.
5818 5811 */
5819 5812 if (callback != NULL && addr_count == MAX_CB_ADDR) {
5820 5813 if (dmrp != NULL) {
5821 5814 DEMAP_RANGE_FLUSH(dmrp);
5822 5815 cpuset = sfmmup->sfmmu_cpusran;
5823 5816 xt_sync(cpuset);
5824 5817 }
5825 5818
5826 5819 for (a = 0; a < MAX_CB_ADDR; ++a) {
5827 5820 callback->hcb_start_addr = cb_start_addr[a];
5828 5821 callback->hcb_end_addr = cb_end_addr[a];
5829 5822 callback->hcb_function(callback);
5830 5823 }
5831 5824 addr_count = 0;
5832 5825 }
5833 5826 if (iskernel) {
5834 5827 hashno = TTE64K;
5835 5828 continue;
5836 5829 }
5837 5830 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5838 5831 ASSERT(hashno == TTE64K);
5839 5832 continue;
5840 5833 }
5841 5834 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5842 5835 hashno = TTE512K;
5843 5836 continue;
5844 5837 }
5845 5838 if (mmu_page_sizes == max_mmu_page_sizes) {
5846 5839 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5847 5840 hashno = TTE4M;
5848 5841 continue;
5849 5842 }
5850 5843 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5851 5844 hashno = TTE32M;
5852 5845 continue;
5853 5846 }
5854 5847 hashno = TTE256M;
5855 5848 } else {
5856 5849 hashno = TTE4M;
5857 5850 }
5858 5851 }
5859 5852
5860 5853 sfmmu_hblks_list_purge(&list, 0);
5861 5854 if (dmrp != NULL) {
5862 5855 DEMAP_RANGE_FLUSH(dmrp);
5863 5856 cpuset = sfmmup->sfmmu_cpusran;
5864 5857 xt_sync(cpuset);
5865 5858 }
5866 5859 if (callback && addr_count != 0) {
5867 5860 for (a = 0; a < addr_count; ++a) {
5868 5861 callback->hcb_start_addr = cb_start_addr[a];
5869 5862 callback->hcb_end_addr = cb_end_addr[a];
5870 5863 callback->hcb_function(callback);
5871 5864 }
5872 5865 }
5873 5866
5874 5867 /*
5875 5868 * Check TSB and TLB page sizes if the process isn't exiting.
5876 5869 */
5877 5870 if (!sfmmup->sfmmu_free)
5878 5871 sfmmu_check_page_sizes(sfmmup, 0);
5879 5872 }
5880 5873
5881 5874 /*
5882 5875 * Unload all the mappings in the range [addr..addr+len). addr and len must
5883 5876 * be MMU_PAGESIZE aligned.
5884 5877 */
5885 5878 void
5886 5879 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5887 5880 {
5888 5881 hat_unload_callback(sfmmup, addr, len, flags, NULL);
5889 5882 }
5890 5883
5891 5884
5892 5885 /*
5893 5886 * Find the largest mapping size for this page.
5894 5887 */
5895 5888 int
5896 5889 fnd_mapping_sz(page_t *pp)
5897 5890 {
5898 5891 int sz;
5899 5892 int p_index;
5900 5893
5901 5894 p_index = PP_MAPINDEX(pp);
5902 5895
5903 5896 sz = 0;
5904 5897 p_index >>= 1; /* don't care about 8K bit */
5905 5898 for (; p_index; p_index >>= 1) {
5906 5899 sz++;
5907 5900 }
5908 5901
↓ open down ↓ |
302 lines elided |
↑ open up ↑ |
5909 5902 return (sz);
5910 5903 }
5911 5904
5912 5905 /*
5913 5906 * This function unloads a range of addresses for an hmeblk.
5914 5907 * It returns the next address to be unloaded.
5915 5908 * It should be called with the hash lock held.
5916 5909 */
5917 5910 static caddr_t
5918 5911 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5919 - caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5912 + caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5920 5913 {
5921 5914 tte_t tte, ttemod;
5922 5915 struct sf_hment *sfhmep;
5923 5916 int ttesz;
5924 5917 long ttecnt;
5925 5918 page_t *pp;
5926 5919 kmutex_t *pml;
5927 5920 int ret;
5928 5921 int use_demap_range;
5929 5922
5930 5923 ASSERT(in_hblk_range(hmeblkp, addr));
5931 5924 ASSERT(!hmeblkp->hblk_shw_bit);
5932 5925 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5933 5926 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5934 5927 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5935 5928
5936 5929 #ifdef DEBUG
5937 5930 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5938 5931 (endaddr < get_hblk_endaddr(hmeblkp))) {
5939 5932 panic("sfmmu_hblk_unload: partial unload of large page");
5940 5933 }
5941 5934 #endif /* DEBUG */
5942 5935
5943 5936 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5944 5937 ttesz = get_hblk_ttesz(hmeblkp);
5945 5938
5946 5939 use_demap_range = ((dmrp == NULL) ||
5947 5940 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
5948 5941
5949 5942 if (use_demap_range) {
5950 5943 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5951 5944 } else if (dmrp != NULL) {
5952 5945 DEMAP_RANGE_FLUSH(dmrp);
5953 5946 }
5954 5947 ttecnt = 0;
5955 5948 HBLKTOHME(sfhmep, hmeblkp, addr);
5956 5949
5957 5950 while (addr < endaddr) {
5958 5951 pml = NULL;
5959 5952 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5960 5953 if (TTE_IS_VALID(&tte)) {
5961 5954 pp = sfhmep->hme_page;
5962 5955 if (pp != NULL) {
5963 5956 pml = sfmmu_mlist_enter(pp);
5964 5957 }
5965 5958
5966 5959 /*
5967 5960 * Verify if hme still points to 'pp' now that
5968 5961 * we have p_mapping lock.
5969 5962 */
5970 5963 if (sfhmep->hme_page != pp) {
5971 5964 if (pp != NULL && sfhmep->hme_page != NULL) {
5972 5965 ASSERT(pml != NULL);
5973 5966 sfmmu_mlist_exit(pml);
5974 5967 /* Re-start this iteration. */
5975 5968 continue;
5976 5969 }
5977 5970 ASSERT((pp != NULL) &&
5978 5971 (sfhmep->hme_page == NULL));
5979 5972 goto tte_unloaded;
5980 5973 }
5981 5974
5982 5975 /*
5983 5976 * This point on we have both HASH and p_mapping
5984 5977 * lock.
5985 5978 */
5986 5979 ASSERT(pp == sfhmep->hme_page);
5987 5980 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5988 5981
5989 5982 /*
5990 5983 * We need to loop on modify tte because it is
5991 5984 * possible for pagesync to come along and
5992 5985 * change the software bits beneath us.
5993 5986 *
5994 5987 * Page_unload can also invalidate the tte after
5995 5988 * we read tte outside of p_mapping lock.
5996 5989 */
5997 5990 again:
5998 5991 ttemod = tte;
5999 5992
6000 5993 TTE_SET_INVALID(&ttemod);
6001 5994 ret = sfmmu_modifytte_try(&tte, &ttemod,
6002 5995 &sfhmep->hme_tte);
6003 5996
6004 5997 if (ret <= 0) {
6005 5998 if (TTE_IS_VALID(&tte)) {
6006 5999 ASSERT(ret < 0);
6007 6000 goto again;
6008 6001 }
6009 6002 if (pp != NULL) {
6010 6003 panic("sfmmu_hblk_unload: pp = 0x%p "
6011 6004 "tte became invalid under mlist"
6012 6005 " lock = 0x%p", (void *)pp,
6013 6006 (void *)pml);
6014 6007 }
6015 6008 continue;
6016 6009 }
6017 6010
6018 6011 if (!(flags & HAT_UNLOAD_NOSYNC)) {
6019 6012 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6020 6013 }
6021 6014
6022 6015 /*
6023 6016 * Ok- we invalidated the tte. Do the rest of the job.
6024 6017 */
6025 6018 ttecnt++;
6026 6019
6027 6020 if (flags & HAT_UNLOAD_UNLOCK) {
6028 6021 ASSERT(hmeblkp->hblk_lckcnt > 0);
6029 6022 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6030 6023 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6031 6024 }
6032 6025
6033 6026 /*
6034 6027 * Normally we would need to flush the page
6035 6028 * from the virtual cache at this point in
6036 6029 * order to prevent a potential cache alias
6037 6030 * inconsistency.
6038 6031 * The particular scenario we need to worry
6039 6032 * about is:
6040 6033 * Given: va1 and va2 are two virtual address
6041 6034 * that alias and map the same physical
6042 6035 * address.
6043 6036 * 1. mapping exists from va1 to pa and data
6044 6037 * has been read into the cache.
6045 6038 * 2. unload va1.
6046 6039 * 3. load va2 and modify data using va2.
6047 6040 * 4 unload va2.
6048 6041 * 5. load va1 and reference data. Unless we
6049 6042 * flush the data cache when we unload we will
6050 6043 * get stale data.
6051 6044 * Fortunately, page coloring eliminates the
6052 6045 * above scenario by remembering the color a
6053 6046 * physical page was last or is currently
6054 6047 * mapped to. Now, we delay the flush until
6055 6048 * the loading of translations. Only when the
6056 6049 * new translation is of a different color
6057 6050 * are we forced to flush.
6058 6051 */
6059 6052 if (use_demap_range) {
6060 6053 /*
6061 6054 * Mark this page as needing a demap.
6062 6055 */
6063 6056 DEMAP_RANGE_MARKPG(dmrp, addr);
6064 6057 } else {
6065 6058 ASSERT(sfmmup != NULL);
6066 6059 ASSERT(!hmeblkp->hblk_shared);
6067 6060 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6068 6061 sfmmup->sfmmu_free, 0);
6069 6062 }
6070 6063
6071 6064 if (pp) {
6072 6065 /*
6073 6066 * Remove the hment from the mapping list
6074 6067 */
6075 6068 ASSERT(hmeblkp->hblk_hmecnt > 0);
6076 6069
6077 6070 /*
6078 6071 * Again, we cannot
6079 6072 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6080 6073 */
6081 6074 HME_SUB(sfhmep, pp);
6082 6075 membar_stst();
6083 6076 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6084 6077 }
6085 6078
6086 6079 ASSERT(hmeblkp->hblk_vcnt > 0);
6087 6080 atomic_dec_16(&hmeblkp->hblk_vcnt);
6088 6081
6089 6082 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6090 6083 !hmeblkp->hblk_lckcnt);
6091 6084
6092 6085 #ifdef VAC
6093 6086 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6094 6087 if (PP_ISTNC(pp)) {
6095 6088 /*
6096 6089 * If page was temporary
6097 6090 * uncached, try to recache
6098 6091 * it. Note that HME_SUB() was
6099 6092 * called above so p_index and
6100 6093 * mlist had been updated.
6101 6094 */
6102 6095 conv_tnc(pp, ttesz);
6103 6096 } else if (pp->p_mapping == NULL) {
6104 6097 ASSERT(kpm_enable);
6105 6098 /*
6106 6099 * Page is marked to be in VAC conflict
6107 6100 * to an existing kpm mapping and/or is
6108 6101 * kpm mapped using only the regular
6109 6102 * pagesize.
6110 6103 */
6111 6104 sfmmu_kpm_hme_unload(pp);
6112 6105 }
6113 6106 }
6114 6107 #endif /* VAC */
6115 6108 } else if ((pp = sfhmep->hme_page) != NULL) {
6116 6109 /*
6117 6110 * TTE is invalid but the hme
6118 6111 * still exists. let pageunload
6119 6112 * complete its job.
6120 6113 */
6121 6114 ASSERT(pml == NULL);
6122 6115 pml = sfmmu_mlist_enter(pp);
6123 6116 if (sfhmep->hme_page != NULL) {
6124 6117 sfmmu_mlist_exit(pml);
6125 6118 continue;
6126 6119 }
6127 6120 ASSERT(sfhmep->hme_page == NULL);
6128 6121 } else if (hmeblkp->hblk_hmecnt != 0) {
6129 6122 /*
6130 6123 * pageunload may have not finished decrementing
6131 6124 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6132 6125 * wait for pageunload to finish. Rely on pageunload
6133 6126 * to decrement hblk_hmecnt after hblk_vcnt.
6134 6127 */
6135 6128 pfn_t pfn = TTE_TO_TTEPFN(&tte);
6136 6129 ASSERT(pml == NULL);
6137 6130 if (pf_is_memory(pfn)) {
6138 6131 pp = page_numtopp_nolock(pfn);
6139 6132 if (pp != NULL) {
6140 6133 pml = sfmmu_mlist_enter(pp);
6141 6134 sfmmu_mlist_exit(pml);
6142 6135 pml = NULL;
6143 6136 }
6144 6137 }
6145 6138 }
6146 6139
6147 6140 tte_unloaded:
6148 6141 /*
6149 6142 * At this point, the tte we are looking at
6150 6143 * should be unloaded, and hme has been unlinked
6151 6144 * from page too. This is important because in
6152 6145 * pageunload, it does ttesync() then HME_SUB.
6153 6146 * We need to make sure HME_SUB has been completed
6154 6147 * so we know ttesync() has been completed. Otherwise,
6155 6148 * at exit time, after return from hat layer, VM will
6156 6149 * release as structure which hat_setstat() (called
6157 6150 * by ttesync()) needs.
6158 6151 */
6159 6152 #ifdef DEBUG
6160 6153 {
6161 6154 tte_t dtte;
6162 6155
6163 6156 ASSERT(sfhmep->hme_page == NULL);
6164 6157
6165 6158 sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6166 6159 ASSERT(!TTE_IS_VALID(&dtte));
6167 6160 }
6168 6161 #endif
6169 6162
6170 6163 if (pml) {
6171 6164 sfmmu_mlist_exit(pml);
6172 6165 }
6173 6166
6174 6167 addr += TTEBYTES(ttesz);
6175 6168 sfhmep++;
6176 6169 DEMAP_RANGE_NEXTPG(dmrp);
6177 6170 }
6178 6171 /*
6179 6172 * For shared hmeblks this routine is only called when region is freed
6180 6173 * and no longer referenced. So no need to decrement ttecnt
6181 6174 * in the region structure here.
6182 6175 */
6183 6176 if (ttecnt > 0 && sfmmup != NULL) {
6184 6177 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6185 6178 }
6186 6179 return (addr);
6187 6180 }
6188 6181
6189 6182 /*
6190 6183 * Invalidate a virtual address range for the local CPU.
6191 6184 * For best performance ensure that the va range is completely
6192 6185 * mapped, otherwise the entire TLB will be flushed.
6193 6186 */
6194 6187 void
6195 6188 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6196 6189 {
6197 6190 ssize_t sz;
6198 6191 caddr_t endva = va + size;
6199 6192
6200 6193 while (va < endva) {
6201 6194 sz = hat_getpagesize(sfmmup, va);
6202 6195 if (sz < 0) {
6203 6196 vtag_flushall();
6204 6197 break;
6205 6198 }
6206 6199 vtag_flushpage(va, (uint64_t)sfmmup);
6207 6200 va += sz;
6208 6201 }
6209 6202 }
6210 6203
6211 6204 /*
6212 6205 * Synchronize all the mappings in the range [addr..addr+len).
6213 6206 * Can be called with clearflag having two states:
6214 6207 * HAT_SYNC_DONTZERO means just return the rm stats
6215 6208 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6216 6209 */
6217 6210 void
6218 6211 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6219 6212 {
6220 6213 struct hmehash_bucket *hmebp;
6221 6214 hmeblk_tag hblktag;
6222 6215 int hmeshift, hashno = 1;
6223 6216 struct hme_blk *hmeblkp, *list = NULL;
6224 6217 caddr_t endaddr;
6225 6218 cpuset_t cpuset;
6226 6219
6227 6220 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
6228 6221 ASSERT((len & MMU_PAGEOFFSET) == 0);
6229 6222 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6230 6223 (clearflag == HAT_SYNC_ZERORM));
6231 6224
6232 6225 CPUSET_ZERO(cpuset);
6233 6226
6234 6227 endaddr = addr + len;
6235 6228 hblktag.htag_id = sfmmup;
6236 6229 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6237 6230
6238 6231 /*
6239 6232 * Spitfire supports 4 page sizes.
6240 6233 * Most pages are expected to be of the smallest page
6241 6234 * size (8K) and these will not need to be rehashed. 64K
6242 6235 * pages also don't need to be rehashed because the an hmeblk
6243 6236 * spans 64K of address space. 512K pages might need 1 rehash and
6244 6237 * and 4M pages 2 rehashes.
6245 6238 */
6246 6239 while (addr < endaddr) {
6247 6240 hmeshift = HME_HASH_SHIFT(hashno);
6248 6241 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6249 6242 hblktag.htag_rehash = hashno;
6250 6243 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6251 6244
6252 6245 SFMMU_HASH_LOCK(hmebp);
6253 6246
6254 6247 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6255 6248 if (hmeblkp != NULL) {
6256 6249 ASSERT(!hmeblkp->hblk_shared);
6257 6250 /*
6258 6251 * We've encountered a shadow hmeblk so skip the range
6259 6252 * of the next smaller mapping size.
6260 6253 */
6261 6254 if (hmeblkp->hblk_shw_bit) {
6262 6255 ASSERT(sfmmup != ksfmmup);
6263 6256 ASSERT(hashno > 1);
6264 6257 addr = (caddr_t)P2END((uintptr_t)addr,
6265 6258 TTEBYTES(hashno - 1));
6266 6259 } else {
6267 6260 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6268 6261 addr, endaddr, clearflag);
6269 6262 }
6270 6263 SFMMU_HASH_UNLOCK(hmebp);
6271 6264 hashno = 1;
6272 6265 continue;
6273 6266 }
6274 6267 SFMMU_HASH_UNLOCK(hmebp);
6275 6268
6276 6269 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6277 6270 /*
6278 6271 * We have traversed the whole list and rehashed
6279 6272 * if necessary without finding the address to sync.
6280 6273 * This is ok so we increment the address by the
6281 6274 * smallest hmeblk range for kernel mappings and the
6282 6275 * largest hmeblk range, to account for shadow hmeblks,
6283 6276 * for user mappings and continue.
6284 6277 */
6285 6278 if (sfmmup == ksfmmup)
6286 6279 addr = (caddr_t)P2END((uintptr_t)addr,
6287 6280 TTEBYTES(1));
6288 6281 else
6289 6282 addr = (caddr_t)P2END((uintptr_t)addr,
6290 6283 TTEBYTES(hashno));
6291 6284 hashno = 1;
6292 6285 } else {
↓ open down ↓ |
363 lines elided |
↑ open up ↑ |
6293 6286 hashno++;
6294 6287 }
6295 6288 }
6296 6289 sfmmu_hblks_list_purge(&list, 0);
6297 6290 cpuset = sfmmup->sfmmu_cpusran;
6298 6291 xt_sync(cpuset);
6299 6292 }
6300 6293
6301 6294 static caddr_t
6302 6295 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6303 - caddr_t endaddr, int clearflag)
6296 + caddr_t endaddr, int clearflag)
6304 6297 {
6305 6298 tte_t tte, ttemod;
6306 6299 struct sf_hment *sfhmep;
6307 6300 int ttesz;
6308 6301 struct page *pp;
6309 6302 kmutex_t *pml;
6310 6303 int ret;
6311 6304
6312 6305 ASSERT(hmeblkp->hblk_shw_bit == 0);
6313 6306 ASSERT(!hmeblkp->hblk_shared);
6314 6307
6315 6308 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6316 6309
6317 6310 ttesz = get_hblk_ttesz(hmeblkp);
6318 6311 HBLKTOHME(sfhmep, hmeblkp, addr);
6319 6312
6320 6313 while (addr < endaddr) {
6321 6314 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6322 6315 if (TTE_IS_VALID(&tte)) {
6323 6316 pml = NULL;
6324 6317 pp = sfhmep->hme_page;
6325 6318 if (pp) {
6326 6319 pml = sfmmu_mlist_enter(pp);
6327 6320 }
6328 6321 if (pp != sfhmep->hme_page) {
6329 6322 /*
6330 6323 * tte most have been unloaded
6331 6324 * underneath us. Recheck
6332 6325 */
6333 6326 ASSERT(pml);
6334 6327 sfmmu_mlist_exit(pml);
6335 6328 continue;
6336 6329 }
6337 6330
6338 6331 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6339 6332
6340 6333 if (clearflag == HAT_SYNC_ZERORM) {
6341 6334 ttemod = tte;
6342 6335 TTE_CLR_RM(&ttemod);
6343 6336 ret = sfmmu_modifytte_try(&tte, &ttemod,
6344 6337 &sfhmep->hme_tte);
6345 6338 if (ret < 0) {
6346 6339 if (pml) {
6347 6340 sfmmu_mlist_exit(pml);
6348 6341 }
6349 6342 continue;
6350 6343 }
6351 6344
6352 6345 if (ret > 0) {
6353 6346 sfmmu_tlb_demap(addr, sfmmup,
6354 6347 hmeblkp, 0, 0);
6355 6348 }
6356 6349 }
6357 6350 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6358 6351 if (pml) {
6359 6352 sfmmu_mlist_exit(pml);
6360 6353 }
6361 6354 }
6362 6355 addr += TTEBYTES(ttesz);
6363 6356 sfhmep++;
6364 6357 }
6365 6358 return (addr);
6366 6359 }
6367 6360
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
6368 6361 /*
6369 6362 * This function will sync a tte to the page struct and it will
6370 6363 * update the hat stats. Currently it allows us to pass a NULL pp
6371 6364 * and we will simply update the stats. We may want to change this
6372 6365 * so we only keep stats for pages backed by pp's.
6373 6366 */
6374 6367 static void
6375 6368 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6376 6369 {
6377 6370 uint_t rm = 0;
6378 - int sz;
6371 + int sz;
6379 6372 pgcnt_t npgs;
6380 6373
6381 6374 ASSERT(TTE_IS_VALID(ttep));
6382 6375
6383 6376 if (TTE_IS_NOSYNC(ttep)) {
6384 6377 return;
6385 6378 }
6386 6379
6387 6380 if (TTE_IS_REF(ttep)) {
6388 6381 rm = P_REF;
6389 6382 }
6390 6383 if (TTE_IS_MOD(ttep)) {
6391 6384 rm |= P_MOD;
6392 6385 }
6393 6386
6394 6387 if (rm == 0) {
6395 6388 return;
6396 6389 }
6397 6390
6398 6391 sz = TTE_CSZ(ttep);
6399 6392 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6400 6393 int i;
6401 6394 caddr_t vaddr = addr;
6402 6395
6403 6396 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6404 6397 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6405 6398 }
6406 6399
6407 6400 }
6408 6401
6409 6402 /*
6410 6403 * XXX I want to use cas to update nrm bits but they
6411 6404 * currently belong in common/vm and not in hat where
6412 6405 * they should be.
6413 6406 * The nrm bits are protected by the same mutex as
6414 6407 * the one that protects the page's mapping list.
6415 6408 */
6416 6409 if (!pp)
6417 6410 return;
6418 6411 ASSERT(sfmmu_mlist_held(pp));
6419 6412 /*
6420 6413 * If the tte is for a large page, we need to sync all the
6421 6414 * pages covered by the tte.
6422 6415 */
6423 6416 if (sz != TTE8K) {
6424 6417 ASSERT(pp->p_szc != 0);
6425 6418 pp = PP_GROUPLEADER(pp, sz);
6426 6419 ASSERT(sfmmu_mlist_held(pp));
6427 6420 }
6428 6421
6429 6422 /* Get number of pages from tte size. */
6430 6423 npgs = TTEPAGES(sz);
6431 6424
6432 6425 do {
6433 6426 ASSERT(pp);
6434 6427 ASSERT(sfmmu_mlist_held(pp));
6435 6428 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6436 6429 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6437 6430 hat_page_setattr(pp, rm);
6438 6431
6439 6432 /*
6440 6433 * Are we done? If not, we must have a large mapping.
6441 6434 * For large mappings we need to sync the rest of the pages
6442 6435 * covered by this tte; goto the next page.
6443 6436 */
6444 6437 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6445 6438 }
6446 6439
6447 6440 /*
6448 6441 * Execute pre-callback handler of each pa_hment linked to pp
6449 6442 *
6450 6443 * Inputs:
6451 6444 * flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6452 6445 * capture_cpus: pointer to return value (below)
6453 6446 *
6454 6447 * Returns:
6455 6448 * Propagates the subsystem callback return values back to the caller;
6456 6449 * returns 0 on success. If capture_cpus is non-NULL, the value returned
6457 6450 * is zero if all of the pa_hments are of a type that do not require
6458 6451 * capturing CPUs prior to suspending the mapping, else it is 1.
6459 6452 */
6460 6453 static int
6461 6454 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6462 6455 {
6463 6456 struct sf_hment *sfhmep;
6464 6457 struct pa_hment *pahmep;
6465 6458 int (*f)(caddr_t, uint_t, uint_t, void *);
6466 6459 int ret;
6467 6460 id_t id;
6468 6461 int locked = 0;
6469 6462 kmutex_t *pml;
6470 6463
6471 6464 ASSERT(PAGE_EXCL(pp));
6472 6465 if (!sfmmu_mlist_held(pp)) {
6473 6466 pml = sfmmu_mlist_enter(pp);
6474 6467 locked = 1;
6475 6468 }
6476 6469
6477 6470 if (capture_cpus)
6478 6471 *capture_cpus = 0;
6479 6472
6480 6473 top:
6481 6474 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6482 6475 /*
6483 6476 * skip sf_hments corresponding to VA<->PA mappings;
6484 6477 * for pa_hment's, hme_tte.ll is zero
6485 6478 */
6486 6479 if (!IS_PAHME(sfhmep))
6487 6480 continue;
6488 6481
6489 6482 pahmep = sfhmep->hme_data;
6490 6483 ASSERT(pahmep != NULL);
6491 6484
6492 6485 /*
6493 6486 * skip if pre-handler has been called earlier in this loop
6494 6487 */
6495 6488 if (pahmep->flags & flag)
6496 6489 continue;
6497 6490
6498 6491 id = pahmep->cb_id;
6499 6492 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6500 6493 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6501 6494 *capture_cpus = 1;
6502 6495 if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6503 6496 pahmep->flags |= flag;
6504 6497 continue;
6505 6498 }
6506 6499
6507 6500 /*
6508 6501 * Drop the mapping list lock to avoid locking order issues.
6509 6502 */
6510 6503 if (locked)
6511 6504 sfmmu_mlist_exit(pml);
6512 6505
6513 6506 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6514 6507 if (ret != 0)
6515 6508 return (ret); /* caller must do the cleanup */
6516 6509
6517 6510 if (locked) {
6518 6511 pml = sfmmu_mlist_enter(pp);
6519 6512 pahmep->flags |= flag;
6520 6513 goto top;
6521 6514 }
6522 6515
6523 6516 pahmep->flags |= flag;
6524 6517 }
6525 6518
6526 6519 if (locked)
6527 6520 sfmmu_mlist_exit(pml);
6528 6521
6529 6522 return (0);
6530 6523 }
6531 6524
6532 6525 /*
6533 6526 * Execute post-callback handler of each pa_hment linked to pp
6534 6527 *
6535 6528 * Same overall assumptions and restrictions apply as for
6536 6529 * hat_pageprocess_precallbacks().
6537 6530 */
6538 6531 static void
6539 6532 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6540 6533 {
6541 6534 pfn_t pgpfn = pp->p_pagenum;
6542 6535 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6543 6536 pfn_t newpfn;
6544 6537 struct sf_hment *sfhmep;
6545 6538 struct pa_hment *pahmep;
6546 6539 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6547 6540 id_t id;
6548 6541 int locked = 0;
6549 6542 kmutex_t *pml;
6550 6543
6551 6544 ASSERT(PAGE_EXCL(pp));
6552 6545 if (!sfmmu_mlist_held(pp)) {
6553 6546 pml = sfmmu_mlist_enter(pp);
6554 6547 locked = 1;
6555 6548 }
6556 6549
6557 6550 top:
6558 6551 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6559 6552 /*
6560 6553 * skip sf_hments corresponding to VA<->PA mappings;
6561 6554 * for pa_hment's, hme_tte.ll is zero
6562 6555 */
6563 6556 if (!IS_PAHME(sfhmep))
6564 6557 continue;
6565 6558
6566 6559 pahmep = sfhmep->hme_data;
6567 6560 ASSERT(pahmep != NULL);
6568 6561
6569 6562 if ((pahmep->flags & flag) == 0)
6570 6563 continue;
6571 6564
6572 6565 pahmep->flags &= ~flag;
6573 6566
6574 6567 id = pahmep->cb_id;
6575 6568 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6576 6569 if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6577 6570 continue;
6578 6571
6579 6572 /*
6580 6573 * Convert the base page PFN into the constituent PFN
6581 6574 * which is needed by the callback handler.
6582 6575 */
6583 6576 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6584 6577
6585 6578 /*
6586 6579 * Drop the mapping list lock to avoid locking order issues.
6587 6580 */
6588 6581 if (locked)
6589 6582 sfmmu_mlist_exit(pml);
6590 6583
6591 6584 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6592 6585 != 0)
6593 6586 panic("sfmmu: posthandler failed");
6594 6587
6595 6588 if (locked) {
6596 6589 pml = sfmmu_mlist_enter(pp);
6597 6590 goto top;
6598 6591 }
6599 6592 }
6600 6593
6601 6594 if (locked)
6602 6595 sfmmu_mlist_exit(pml);
6603 6596 }
6604 6597
6605 6598 /*
6606 6599 * Suspend locked kernel mapping
6607 6600 */
6608 6601 void
6609 6602 hat_pagesuspend(struct page *pp)
6610 6603 {
6611 6604 struct sf_hment *sfhmep;
6612 6605 sfmmu_t *sfmmup;
6613 6606 tte_t tte, ttemod;
6614 6607 struct hme_blk *hmeblkp;
6615 6608 caddr_t addr;
6616 6609 int index, cons;
6617 6610 cpuset_t cpuset;
6618 6611
6619 6612 ASSERT(PAGE_EXCL(pp));
6620 6613 ASSERT(sfmmu_mlist_held(pp));
6621 6614
6622 6615 mutex_enter(&kpr_suspendlock);
6623 6616
6624 6617 /*
6625 6618 * We're about to suspend a kernel mapping so mark this thread as
6626 6619 * non-traceable by DTrace. This prevents us from running into issues
6627 6620 * with probe context trying to touch a suspended page
6628 6621 * in the relocation codepath itself.
6629 6622 */
6630 6623 curthread->t_flag |= T_DONTDTRACE;
6631 6624
6632 6625 index = PP_MAPINDEX(pp);
6633 6626 cons = TTE8K;
6634 6627
6635 6628 retry:
6636 6629 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6637 6630
6638 6631 if (IS_PAHME(sfhmep))
6639 6632 continue;
6640 6633
6641 6634 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6642 6635 continue;
6643 6636
6644 6637 /*
6645 6638 * Loop until we successfully set the suspend bit in
6646 6639 * the TTE.
6647 6640 */
6648 6641 again:
6649 6642 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6650 6643 ASSERT(TTE_IS_VALID(&tte));
6651 6644
6652 6645 ttemod = tte;
6653 6646 TTE_SET_SUSPEND(&ttemod);
6654 6647 if (sfmmu_modifytte_try(&tte, &ttemod,
6655 6648 &sfhmep->hme_tte) < 0)
6656 6649 goto again;
6657 6650
6658 6651 /*
6659 6652 * Invalidate TSB entry
6660 6653 */
6661 6654 hmeblkp = sfmmu_hmetohblk(sfhmep);
6662 6655
6663 6656 sfmmup = hblktosfmmu(hmeblkp);
6664 6657 ASSERT(sfmmup == ksfmmup);
6665 6658 ASSERT(!hmeblkp->hblk_shared);
6666 6659
6667 6660 addr = tte_to_vaddr(hmeblkp, tte);
6668 6661
6669 6662 /*
6670 6663 * No need to make sure that the TSB for this sfmmu is
6671 6664 * not being relocated since it is ksfmmup and thus it
6672 6665 * will never be relocated.
6673 6666 */
6674 6667 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6675 6668
6676 6669 /*
6677 6670 * Update xcall stats
6678 6671 */
6679 6672 cpuset = cpu_ready_set;
6680 6673 CPUSET_DEL(cpuset, CPU->cpu_id);
6681 6674
6682 6675 /* LINTED: constant in conditional context */
6683 6676 SFMMU_XCALL_STATS(ksfmmup);
6684 6677
6685 6678 /*
6686 6679 * Flush TLB entry on remote CPU's
6687 6680 */
6688 6681 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6689 6682 (uint64_t)ksfmmup);
6690 6683 xt_sync(cpuset);
6691 6684
6692 6685 /*
6693 6686 * Flush TLB entry on local CPU
6694 6687 */
6695 6688 vtag_flushpage(addr, (uint64_t)ksfmmup);
6696 6689 }
6697 6690
6698 6691 while (index != 0) {
6699 6692 index = index >> 1;
6700 6693 if (index != 0)
6701 6694 cons++;
6702 6695 if (index & 0x1) {
6703 6696 pp = PP_GROUPLEADER(pp, cons);
6704 6697 goto retry;
6705 6698 }
6706 6699 }
6707 6700 }
6708 6701
6709 6702 #ifdef DEBUG
6710 6703
6711 6704 #define N_PRLE 1024
6712 6705 struct prle {
6713 6706 page_t *targ;
6714 6707 page_t *repl;
6715 6708 int status;
6716 6709 int pausecpus;
6717 6710 hrtime_t whence;
6718 6711 };
6719 6712
6720 6713 static struct prle page_relocate_log[N_PRLE];
6721 6714 static int prl_entry;
6722 6715 static kmutex_t prl_mutex;
6723 6716
6724 6717 #define PAGE_RELOCATE_LOG(t, r, s, p) \
6725 6718 mutex_enter(&prl_mutex); \
6726 6719 page_relocate_log[prl_entry].targ = *(t); \
6727 6720 page_relocate_log[prl_entry].repl = *(r); \
6728 6721 page_relocate_log[prl_entry].status = (s); \
6729 6722 page_relocate_log[prl_entry].pausecpus = (p); \
6730 6723 page_relocate_log[prl_entry].whence = gethrtime(); \
6731 6724 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \
6732 6725 mutex_exit(&prl_mutex);
↓ open down ↓ |
344 lines elided |
↑ open up ↑ |
6733 6726
6734 6727 #else /* !DEBUG */
6735 6728 #define PAGE_RELOCATE_LOG(t, r, s, p)
6736 6729 #endif
6737 6730
6738 6731 /*
6739 6732 * Core Kernel Page Relocation Algorithm
6740 6733 *
6741 6734 * Input:
6742 6735 *
6743 - * target : constituent pages are SE_EXCL locked.
6736 + * target : constituent pages are SE_EXCL locked.
6744 6737 * replacement: constituent pages are SE_EXCL locked.
6745 6738 *
6746 6739 * Output:
6747 6740 *
6748 6741 * nrelocp: number of pages relocated
6749 6742 */
6750 6743 int
6751 6744 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6752 6745 {
6753 6746 page_t *targ, *repl;
6754 6747 page_t *tpp, *rpp;
6755 6748 kmutex_t *low, *high;
6756 6749 spgcnt_t npages, i;
6757 6750 page_t *pl = NULL;
6758 6751 int old_pil;
6759 6752 cpuset_t cpuset;
6760 6753 int cap_cpus;
6761 6754 int ret;
6762 6755 #ifdef VAC
6763 6756 int cflags = 0;
6764 6757 #endif
6765 6758
6766 6759 if (!kcage_on || PP_ISNORELOC(*target)) {
6767 6760 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6768 6761 return (EAGAIN);
6769 6762 }
6770 6763
6771 6764 mutex_enter(&kpr_mutex);
6772 6765 kreloc_thread = curthread;
6773 6766
6774 6767 targ = *target;
6775 6768 repl = *replacement;
6776 6769 ASSERT(repl != NULL);
6777 6770 ASSERT(targ->p_szc == repl->p_szc);
6778 6771
6779 6772 npages = page_get_pagecnt(targ->p_szc);
6780 6773
6781 6774 /*
6782 6775 * unload VA<->PA mappings that are not locked
6783 6776 */
6784 6777 tpp = targ;
6785 6778 for (i = 0; i < npages; i++) {
6786 6779 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6787 6780 tpp++;
6788 6781 }
6789 6782
6790 6783 /*
6791 6784 * Do "presuspend" callbacks, in a context from which we can still
6792 6785 * block as needed. Note that we don't hold the mapping list lock
6793 6786 * of "targ" at this point due to potential locking order issues;
6794 6787 * we assume that between the hat_pageunload() above and holding
6795 6788 * the SE_EXCL lock that the mapping list *cannot* change at this
6796 6789 * point.
6797 6790 */
6798 6791 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6799 6792 if (ret != 0) {
6800 6793 /*
6801 6794 * EIO translates to fatal error, for all others cleanup
6802 6795 * and return EAGAIN.
6803 6796 */
6804 6797 ASSERT(ret != EIO);
6805 6798 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6806 6799 PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6807 6800 kreloc_thread = NULL;
6808 6801 mutex_exit(&kpr_mutex);
6809 6802 return (EAGAIN);
6810 6803 }
6811 6804
6812 6805 /*
6813 6806 * acquire p_mapping list lock for both the target and replacement
6814 6807 * root pages.
6815 6808 *
6816 6809 * low and high refer to the need to grab the mlist locks in a
6817 6810 * specific order in order to prevent race conditions. Thus the
6818 6811 * lower lock must be grabbed before the higher lock.
6819 6812 *
6820 6813 * This will block hat_unload's accessing p_mapping list. Since
6821 6814 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6822 6815 * blocked. Thus, no one else will be accessing the p_mapping list
6823 6816 * while we suspend and reload the locked mapping below.
6824 6817 */
6825 6818 tpp = targ;
6826 6819 rpp = repl;
6827 6820 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6828 6821
6829 6822 kpreempt_disable();
6830 6823
6831 6824 /*
6832 6825 * We raise our PIL to 13 so that we don't get captured by
6833 6826 * another CPU or pinned by an interrupt thread. We can't go to
6834 6827 * PIL 14 since the nexus driver(s) may need to interrupt at
6835 6828 * that level in the case of IOMMU pseudo mappings.
6836 6829 */
6837 6830 cpuset = cpu_ready_set;
6838 6831 CPUSET_DEL(cpuset, CPU->cpu_id);
6839 6832 if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6840 6833 old_pil = splr(XCALL_PIL);
6841 6834 } else {
6842 6835 old_pil = -1;
6843 6836 xc_attention(cpuset);
6844 6837 }
6845 6838 ASSERT(getpil() == XCALL_PIL);
6846 6839
6847 6840 /*
6848 6841 * Now do suspend callbacks. In the case of an IOMMU mapping
6849 6842 * this will suspend all DMA activity to the page while it is
6850 6843 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6851 6844 * may be captured at this point we should have acquired any needed
6852 6845 * locks in the presuspend callback.
6853 6846 */
6854 6847 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6855 6848 if (ret != 0) {
6856 6849 repl = targ;
6857 6850 goto suspend_fail;
6858 6851 }
6859 6852
6860 6853 /*
6861 6854 * Raise the PIL yet again, this time to block all high-level
6862 6855 * interrupts on this CPU. This is necessary to prevent an
6863 6856 * interrupt routine from pinning the thread which holds the
6864 6857 * mapping suspended and then touching the suspended page.
6865 6858 *
6866 6859 * Once the page is suspended we also need to be careful to
6867 6860 * avoid calling any functions which touch any seg_kmem memory
6868 6861 * since that memory may be backed by the very page we are
6869 6862 * relocating in here!
6870 6863 */
6871 6864 hat_pagesuspend(targ);
6872 6865
6873 6866 /*
6874 6867 * Now that we are confident everybody has stopped using this page,
6875 6868 * copy the page contents. Note we use a physical copy to prevent
6876 6869 * locking issues and to avoid fpRAS because we can't handle it in
6877 6870 * this context.
6878 6871 */
6879 6872 for (i = 0; i < npages; i++, tpp++, rpp++) {
6880 6873 #ifdef VAC
6881 6874 /*
6882 6875 * If the replacement has a different vcolor than
6883 6876 * the one being replacd, we need to handle VAC
6884 6877 * consistency for it just as we were setting up
6885 6878 * a new mapping to it.
6886 6879 */
6887 6880 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6888 6881 (tpp->p_vcolor != rpp->p_vcolor) &&
6889 6882 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6890 6883 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
6891 6884 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6892 6885 rpp->p_pagenum);
6893 6886 }
6894 6887 #endif
6895 6888 /*
6896 6889 * Copy the contents of the page.
6897 6890 */
6898 6891 ppcopy_kernel(tpp, rpp);
6899 6892 }
6900 6893
6901 6894 tpp = targ;
6902 6895 rpp = repl;
6903 6896 for (i = 0; i < npages; i++, tpp++, rpp++) {
6904 6897 /*
6905 6898 * Copy attributes. VAC consistency was handled above,
6906 6899 * if required.
6907 6900 */
6908 6901 rpp->p_nrm = tpp->p_nrm;
6909 6902 tpp->p_nrm = 0;
6910 6903 rpp->p_index = tpp->p_index;
6911 6904 tpp->p_index = 0;
6912 6905 #ifdef VAC
6913 6906 rpp->p_vcolor = tpp->p_vcolor;
6914 6907 #endif
6915 6908 }
6916 6909
6917 6910 /*
6918 6911 * First, unsuspend the page, if we set the suspend bit, and transfer
6919 6912 * the mapping list from the target page to the replacement page.
6920 6913 * Next process postcallbacks; since pa_hment's are linked only to the
6921 6914 * p_mapping list of root page, we don't iterate over the constituent
6922 6915 * pages.
6923 6916 */
6924 6917 hat_pagereload(targ, repl);
6925 6918
6926 6919 suspend_fail:
6927 6920 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6928 6921
6929 6922 /*
6930 6923 * Now lower our PIL and release any captured CPUs since we
6931 6924 * are out of the "danger zone". After this it will again be
6932 6925 * safe to acquire adaptive mutex locks, or to drop them...
6933 6926 */
6934 6927 if (old_pil != -1) {
6935 6928 splx(old_pil);
6936 6929 } else {
6937 6930 xc_dismissed(cpuset);
6938 6931 }
6939 6932
6940 6933 kpreempt_enable();
6941 6934
6942 6935 sfmmu_mlist_reloc_exit(low, high);
6943 6936
6944 6937 /*
6945 6938 * Postsuspend callbacks should drop any locks held across
6946 6939 * the suspend callbacks. As before, we don't hold the mapping
6947 6940 * list lock at this point.. our assumption is that the mapping
6948 6941 * list still can't change due to our holding SE_EXCL lock and
6949 6942 * there being no unlocked mappings left. Hence the restriction
6950 6943 * on calling context to hat_delete_callback()
6951 6944 */
6952 6945 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6953 6946 if (ret != 0) {
6954 6947 /*
6955 6948 * The second presuspend call failed: we got here through
6956 6949 * the suspend_fail label above.
6957 6950 */
6958 6951 ASSERT(ret != EIO);
6959 6952 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6960 6953 kreloc_thread = NULL;
6961 6954 mutex_exit(&kpr_mutex);
6962 6955 return (EAGAIN);
6963 6956 }
6964 6957
6965 6958 /*
6966 6959 * Now that we're out of the performance critical section we can
6967 6960 * take care of updating the hash table, since we still
6968 6961 * hold all the pages locked SE_EXCL at this point we
6969 6962 * needn't worry about things changing out from under us.
6970 6963 */
6971 6964 tpp = targ;
6972 6965 rpp = repl;
6973 6966 for (i = 0; i < npages; i++, tpp++, rpp++) {
6974 6967
6975 6968 /*
6976 6969 * replace targ with replacement in page_hash table
6977 6970 */
6978 6971 targ = tpp;
6979 6972 page_relocate_hash(rpp, targ);
6980 6973
6981 6974 /*
6982 6975 * concatenate target; caller of platform_page_relocate()
6983 6976 * expects target to be concatenated after returning.
6984 6977 */
6985 6978 ASSERT(targ->p_next == targ);
6986 6979 ASSERT(targ->p_prev == targ);
6987 6980 page_list_concat(&pl, &targ);
6988 6981 }
6989 6982
6990 6983 ASSERT(*target == pl);
6991 6984 *nrelocp = npages;
6992 6985 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6993 6986 kreloc_thread = NULL;
6994 6987 mutex_exit(&kpr_mutex);
6995 6988 return (0);
6996 6989 }
6997 6990
6998 6991 /*
6999 6992 * Called when stray pa_hments are found attached to a page which is
7000 6993 * being freed. Notify the subsystem which attached the pa_hment of
7001 6994 * the error if it registered a suitable handler, else panic.
7002 6995 */
7003 6996 static void
7004 6997 sfmmu_pahment_leaked(struct pa_hment *pahmep)
7005 6998 {
7006 6999 id_t cb_id = pahmep->cb_id;
7007 7000
7008 7001 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7009 7002 if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7010 7003 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7011 7004 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7012 7005 return; /* non-fatal */
7013 7006 }
7014 7007 panic("pa_hment leaked: 0x%p", (void *)pahmep);
7015 7008 }
7016 7009
7017 7010 /*
7018 7011 * Remove all mappings to page 'pp'.
7019 7012 */
7020 7013 int
7021 7014 hat_pageunload(struct page *pp, uint_t forceflag)
7022 7015 {
7023 7016 struct page *origpp = pp;
7024 7017 struct sf_hment *sfhme, *tmphme;
7025 7018 struct hme_blk *hmeblkp;
7026 7019 kmutex_t *pml;
7027 7020 #ifdef VAC
7028 7021 kmutex_t *pmtx;
7029 7022 #endif
7030 7023 cpuset_t cpuset, tset;
7031 7024 int index, cons;
7032 7025 int pa_hments;
7033 7026
7034 7027 ASSERT(PAGE_EXCL(pp));
7035 7028
7036 7029 tmphme = NULL;
7037 7030 pa_hments = 0;
7038 7031 CPUSET_ZERO(cpuset);
7039 7032
7040 7033 pml = sfmmu_mlist_enter(pp);
7041 7034
7042 7035 #ifdef VAC
7043 7036 if (pp->p_kpmref)
7044 7037 sfmmu_kpm_pageunload(pp);
7045 7038 ASSERT(!PP_ISMAPPED_KPM(pp));
7046 7039 #endif
7047 7040 /*
7048 7041 * Clear vpm reference. Since the page is exclusively locked
7049 7042 * vpm cannot be referencing it.
7050 7043 */
7051 7044 if (vpm_enable) {
7052 7045 pp->p_vpmref = 0;
7053 7046 }
7054 7047
7055 7048 index = PP_MAPINDEX(pp);
7056 7049 cons = TTE8K;
7057 7050 retry:
7058 7051 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7059 7052 tmphme = sfhme->hme_next;
7060 7053
7061 7054 if (IS_PAHME(sfhme)) {
7062 7055 ASSERT(sfhme->hme_data != NULL);
7063 7056 pa_hments++;
7064 7057 continue;
7065 7058 }
7066 7059
7067 7060 hmeblkp = sfmmu_hmetohblk(sfhme);
7068 7061
7069 7062 /*
7070 7063 * If there are kernel mappings don't unload them, they will
7071 7064 * be suspended.
7072 7065 */
7073 7066 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7074 7067 hmeblkp->hblk_tag.htag_id == ksfmmup)
7075 7068 continue;
7076 7069
7077 7070 tset = sfmmu_pageunload(pp, sfhme, cons);
7078 7071 CPUSET_OR(cpuset, tset);
7079 7072 }
7080 7073
7081 7074 while (index != 0) {
7082 7075 index = index >> 1;
7083 7076 if (index != 0)
7084 7077 cons++;
7085 7078 if (index & 0x1) {
7086 7079 /* Go to leading page */
7087 7080 pp = PP_GROUPLEADER(pp, cons);
7088 7081 ASSERT(sfmmu_mlist_held(pp));
7089 7082 goto retry;
7090 7083 }
7091 7084 }
7092 7085
7093 7086 /*
7094 7087 * cpuset may be empty if the page was only mapped by segkpm,
7095 7088 * in which case we won't actually cross-trap.
7096 7089 */
7097 7090 xt_sync(cpuset);
7098 7091
7099 7092 /*
7100 7093 * The page should have no mappings at this point, unless
7101 7094 * we were called from hat_page_relocate() in which case we
7102 7095 * leave the locked mappings which will be suspended later.
7103 7096 */
7104 7097 ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7105 7098 (forceflag == SFMMU_KERNEL_RELOC));
7106 7099
7107 7100 #ifdef VAC
7108 7101 if (PP_ISTNC(pp)) {
7109 7102 if (cons == TTE8K) {
7110 7103 pmtx = sfmmu_page_enter(pp);
7111 7104 PP_CLRTNC(pp);
7112 7105 sfmmu_page_exit(pmtx);
7113 7106 } else {
7114 7107 conv_tnc(pp, cons);
7115 7108 }
7116 7109 }
7117 7110 #endif /* VAC */
7118 7111
7119 7112 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7120 7113 /*
7121 7114 * Unlink any pa_hments and free them, calling back
7122 7115 * the responsible subsystem to notify it of the error.
7123 7116 * This can occur in situations such as drivers leaking
7124 7117 * DMA handles: naughty, but common enough that we'd like
7125 7118 * to keep the system running rather than bringing it
7126 7119 * down with an obscure error like "pa_hment leaked"
7127 7120 * which doesn't aid the user in debugging their driver.
7128 7121 */
7129 7122 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7130 7123 tmphme = sfhme->hme_next;
7131 7124 if (IS_PAHME(sfhme)) {
7132 7125 struct pa_hment *pahmep = sfhme->hme_data;
7133 7126 sfmmu_pahment_leaked(pahmep);
7134 7127 HME_SUB(sfhme, pp);
7135 7128 kmem_cache_free(pa_hment_cache, pahmep);
7136 7129 }
7137 7130 }
7138 7131
7139 7132 ASSERT(!PP_ISMAPPED(origpp));
7140 7133 }
7141 7134
7142 7135 sfmmu_mlist_exit(pml);
7143 7136
7144 7137 return (0);
7145 7138 }
7146 7139
7147 7140 cpuset_t
7148 7141 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7149 7142 {
7150 7143 struct hme_blk *hmeblkp;
7151 7144 sfmmu_t *sfmmup;
7152 7145 tte_t tte, ttemod;
7153 7146 #ifdef DEBUG
7154 7147 tte_t orig_old;
7155 7148 #endif /* DEBUG */
7156 7149 caddr_t addr;
7157 7150 int ttesz;
7158 7151 int ret;
7159 7152 cpuset_t cpuset;
7160 7153
7161 7154 ASSERT(pp != NULL);
7162 7155 ASSERT(sfmmu_mlist_held(pp));
7163 7156 ASSERT(!PP_ISKAS(pp));
7164 7157
7165 7158 CPUSET_ZERO(cpuset);
7166 7159
7167 7160 hmeblkp = sfmmu_hmetohblk(sfhme);
7168 7161
7169 7162 readtte:
7170 7163 sfmmu_copytte(&sfhme->hme_tte, &tte);
7171 7164 if (TTE_IS_VALID(&tte)) {
7172 7165 sfmmup = hblktosfmmu(hmeblkp);
7173 7166 ttesz = get_hblk_ttesz(hmeblkp);
7174 7167 /*
7175 7168 * Only unload mappings of 'cons' size.
7176 7169 */
7177 7170 if (ttesz != cons)
7178 7171 return (cpuset);
7179 7172
7180 7173 /*
7181 7174 * Note that we have p_mapping lock, but no hash lock here.
7182 7175 * hblk_unload() has to have both hash lock AND p_mapping
7183 7176 * lock before it tries to modify tte. So, the tte could
7184 7177 * not become invalid in the sfmmu_modifytte_try() below.
7185 7178 */
7186 7179 ttemod = tte;
7187 7180 #ifdef DEBUG
7188 7181 orig_old = tte;
7189 7182 #endif /* DEBUG */
7190 7183
7191 7184 TTE_SET_INVALID(&ttemod);
7192 7185 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7193 7186 if (ret < 0) {
7194 7187 #ifdef DEBUG
7195 7188 /* only R/M bits can change. */
7196 7189 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7197 7190 #endif /* DEBUG */
7198 7191 goto readtte;
7199 7192 }
7200 7193
7201 7194 if (ret == 0) {
7202 7195 panic("pageunload: cas failed?");
7203 7196 }
7204 7197
7205 7198 addr = tte_to_vaddr(hmeblkp, tte);
7206 7199
7207 7200 if (hmeblkp->hblk_shared) {
7208 7201 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7209 7202 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7210 7203 sf_region_t *rgnp;
7211 7204 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7212 7205 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7213 7206 ASSERT(srdp != NULL);
7214 7207 rgnp = srdp->srd_hmergnp[rid];
7215 7208 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7216 7209 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7217 7210 sfmmu_ttesync(NULL, addr, &tte, pp);
7218 7211 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7219 7212 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7220 7213 } else {
7221 7214 sfmmu_ttesync(sfmmup, addr, &tte, pp);
7222 7215 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7223 7216
7224 7217 /*
7225 7218 * We need to flush the page from the virtual cache
7226 7219 * in order to prevent a virtual cache alias
7227 7220 * inconsistency. The particular scenario we need
7228 7221 * to worry about is:
7229 7222 * Given: va1 and va2 are two virtual address that
7230 7223 * alias and will map the same physical address.
7231 7224 * 1. mapping exists from va1 to pa and data has
7232 7225 * been read into the cache.
7233 7226 * 2. unload va1.
7234 7227 * 3. load va2 and modify data using va2.
7235 7228 * 4 unload va2.
7236 7229 * 5. load va1 and reference data. Unless we flush
7237 7230 * the data cache when we unload we will get
7238 7231 * stale data.
7239 7232 * This scenario is taken care of by using virtual
7240 7233 * page coloring.
7241 7234 */
7242 7235 if (sfmmup->sfmmu_ismhat) {
7243 7236 /*
7244 7237 * Flush TSBs, TLBs and caches
7245 7238 * of every process
7246 7239 * sharing this ism segment.
7247 7240 */
7248 7241 sfmmu_hat_lock_all();
7249 7242 mutex_enter(&ism_mlist_lock);
7250 7243 kpreempt_disable();
7251 7244 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7252 7245 pp->p_pagenum, CACHE_NO_FLUSH);
7253 7246 kpreempt_enable();
7254 7247 mutex_exit(&ism_mlist_lock);
7255 7248 sfmmu_hat_unlock_all();
7256 7249 cpuset = cpu_ready_set;
7257 7250 } else {
7258 7251 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7259 7252 cpuset = sfmmup->sfmmu_cpusran;
7260 7253 }
7261 7254 }
7262 7255
7263 7256 /*
7264 7257 * Hme_sub has to run after ttesync() and a_rss update.
7265 7258 * See hblk_unload().
7266 7259 */
7267 7260 HME_SUB(sfhme, pp);
7268 7261 membar_stst();
7269 7262
7270 7263 /*
7271 7264 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7272 7265 * since pteload may have done a HME_ADD() right after
7273 7266 * we did the HME_SUB() above. Hmecnt is now maintained
7274 7267 * by cas only. no lock guranteed its value. The only
7275 7268 * gurantee we have is the hmecnt should not be less than
7276 7269 * what it should be so the hblk will not be taken away.
7277 7270 * It's also important that we decremented the hmecnt after
7278 7271 * we are done with hmeblkp so that this hmeblk won't be
7279 7272 * stolen.
7280 7273 */
7281 7274 ASSERT(hmeblkp->hblk_hmecnt > 0);
7282 7275 ASSERT(hmeblkp->hblk_vcnt > 0);
7283 7276 atomic_dec_16(&hmeblkp->hblk_vcnt);
7284 7277 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7285 7278 /*
7286 7279 * This is bug 4063182.
7287 7280 * XXX: fixme
7288 7281 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7289 7282 * !hmeblkp->hblk_lckcnt);
7290 7283 */
7291 7284 } else {
7292 7285 panic("invalid tte? pp %p &tte %p",
7293 7286 (void *)pp, (void *)&tte);
7294 7287 }
7295 7288
7296 7289 return (cpuset);
7297 7290 }
7298 7291
7299 7292 /*
7300 7293 * While relocating a kernel page, this function will move the mappings
7301 7294 * from tpp to dpp and modify any associated data with these mappings.
7302 7295 * It also unsuspends the suspended kernel mapping.
7303 7296 */
7304 7297 static void
7305 7298 hat_pagereload(struct page *tpp, struct page *dpp)
7306 7299 {
7307 7300 struct sf_hment *sfhme;
7308 7301 tte_t tte, ttemod;
7309 7302 int index, cons;
7310 7303
7311 7304 ASSERT(getpil() == PIL_MAX);
7312 7305 ASSERT(sfmmu_mlist_held(tpp));
7313 7306 ASSERT(sfmmu_mlist_held(dpp));
7314 7307
7315 7308 index = PP_MAPINDEX(tpp);
7316 7309 cons = TTE8K;
7317 7310
7318 7311 /* Update real mappings to the page */
7319 7312 retry:
7320 7313 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7321 7314 if (IS_PAHME(sfhme))
7322 7315 continue;
7323 7316 sfmmu_copytte(&sfhme->hme_tte, &tte);
7324 7317 ttemod = tte;
7325 7318
7326 7319 /*
7327 7320 * replace old pfn with new pfn in TTE
7328 7321 */
7329 7322 PFN_TO_TTE(ttemod, dpp->p_pagenum);
7330 7323
7331 7324 /*
7332 7325 * clear suspend bit
7333 7326 */
7334 7327 ASSERT(TTE_IS_SUSPEND(&ttemod));
7335 7328 TTE_CLR_SUSPEND(&ttemod);
7336 7329
7337 7330 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7338 7331 panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7339 7332
7340 7333 /*
7341 7334 * set hme_page point to new page
7342 7335 */
7343 7336 sfhme->hme_page = dpp;
7344 7337 }
7345 7338
7346 7339 /*
7347 7340 * move p_mapping list from old page to new page
7348 7341 */
7349 7342 dpp->p_mapping = tpp->p_mapping;
7350 7343 tpp->p_mapping = NULL;
7351 7344 dpp->p_share = tpp->p_share;
7352 7345 tpp->p_share = 0;
7353 7346
7354 7347 while (index != 0) {
7355 7348 index = index >> 1;
7356 7349 if (index != 0)
7357 7350 cons++;
7358 7351 if (index & 0x1) {
7359 7352 tpp = PP_GROUPLEADER(tpp, cons);
7360 7353 dpp = PP_GROUPLEADER(dpp, cons);
7361 7354 goto retry;
7362 7355 }
7363 7356 }
7364 7357
7365 7358 curthread->t_flag &= ~T_DONTDTRACE;
7366 7359 mutex_exit(&kpr_suspendlock);
7367 7360 }
7368 7361
7369 7362 uint_t
7370 7363 hat_pagesync(struct page *pp, uint_t clearflag)
7371 7364 {
7372 7365 struct sf_hment *sfhme, *tmphme = NULL;
7373 7366 struct hme_blk *hmeblkp;
7374 7367 kmutex_t *pml;
7375 7368 cpuset_t cpuset, tset;
7376 7369 int index, cons;
7377 7370 extern ulong_t po_share;
7378 7371 page_t *save_pp = pp;
7379 7372 int stop_on_sh = 0;
7380 7373 uint_t shcnt;
7381 7374
7382 7375 CPUSET_ZERO(cpuset);
7383 7376
7384 7377 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7385 7378 return (PP_GENERIC_ATTR(pp));
7386 7379 }
7387 7380
7388 7381 if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7389 7382 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7390 7383 return (PP_GENERIC_ATTR(pp));
7391 7384 }
7392 7385 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7393 7386 return (PP_GENERIC_ATTR(pp));
7394 7387 }
7395 7388 if (clearflag & HAT_SYNC_STOPON_SHARED) {
7396 7389 if (pp->p_share > po_share) {
7397 7390 hat_page_setattr(pp, P_REF);
7398 7391 return (PP_GENERIC_ATTR(pp));
7399 7392 }
7400 7393 stop_on_sh = 1;
7401 7394 shcnt = 0;
7402 7395 }
7403 7396 }
7404 7397
7405 7398 clearflag &= ~HAT_SYNC_STOPON_SHARED;
7406 7399 pml = sfmmu_mlist_enter(pp);
7407 7400 index = PP_MAPINDEX(pp);
7408 7401 cons = TTE8K;
7409 7402 retry:
7410 7403 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7411 7404 /*
7412 7405 * We need to save the next hment on the list since
7413 7406 * it is possible for pagesync to remove an invalid hment
7414 7407 * from the list.
7415 7408 */
7416 7409 tmphme = sfhme->hme_next;
7417 7410 if (IS_PAHME(sfhme))
7418 7411 continue;
7419 7412 /*
7420 7413 * If we are looking for large mappings and this hme doesn't
7421 7414 * reach the range we are seeking, just ignore it.
7422 7415 */
7423 7416 hmeblkp = sfmmu_hmetohblk(sfhme);
7424 7417
7425 7418 if (hme_size(sfhme) < cons)
7426 7419 continue;
7427 7420
7428 7421 if (stop_on_sh) {
7429 7422 if (hmeblkp->hblk_shared) {
7430 7423 sf_srd_t *srdp = hblktosrd(hmeblkp);
7431 7424 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7432 7425 sf_region_t *rgnp;
7433 7426 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7434 7427 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7435 7428 ASSERT(srdp != NULL);
7436 7429 rgnp = srdp->srd_hmergnp[rid];
7437 7430 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7438 7431 rgnp, rid);
7439 7432 shcnt += rgnp->rgn_refcnt;
7440 7433 } else {
7441 7434 shcnt++;
7442 7435 }
7443 7436 if (shcnt > po_share) {
7444 7437 /*
7445 7438 * tell the pager to spare the page this time
7446 7439 * around.
7447 7440 */
7448 7441 hat_page_setattr(save_pp, P_REF);
7449 7442 index = 0;
7450 7443 break;
7451 7444 }
7452 7445 }
7453 7446 tset = sfmmu_pagesync(pp, sfhme,
7454 7447 clearflag & ~HAT_SYNC_STOPON_RM);
7455 7448 CPUSET_OR(cpuset, tset);
7456 7449
7457 7450 /*
7458 7451 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7459 7452 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7460 7453 */
7461 7454 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7462 7455 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7463 7456 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7464 7457 index = 0;
7465 7458 break;
7466 7459 }
7467 7460 }
7468 7461
7469 7462 while (index) {
7470 7463 index = index >> 1;
7471 7464 cons++;
7472 7465 if (index & 0x1) {
7473 7466 /* Go to leading page */
7474 7467 pp = PP_GROUPLEADER(pp, cons);
7475 7468 goto retry;
7476 7469 }
7477 7470 }
7478 7471
↓ open down ↓ |
725 lines elided |
↑ open up ↑ |
7479 7472 xt_sync(cpuset);
7480 7473 sfmmu_mlist_exit(pml);
7481 7474 return (PP_GENERIC_ATTR(save_pp));
7482 7475 }
7483 7476
7484 7477 /*
7485 7478 * Get all the hardware dependent attributes for a page struct
7486 7479 */
7487 7480 static cpuset_t
7488 7481 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7489 - uint_t clearflag)
7482 + uint_t clearflag)
7490 7483 {
7491 7484 caddr_t addr;
7492 7485 tte_t tte, ttemod;
7493 7486 struct hme_blk *hmeblkp;
7494 7487 int ret;
7495 7488 sfmmu_t *sfmmup;
7496 7489 cpuset_t cpuset;
7497 7490
7498 7491 ASSERT(pp != NULL);
7499 7492 ASSERT(sfmmu_mlist_held(pp));
7500 7493 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7501 7494 (clearflag == HAT_SYNC_ZERORM));
7502 7495
7503 7496 SFMMU_STAT(sf_pagesync);
7504 7497
7505 7498 CPUSET_ZERO(cpuset);
7506 7499
7507 7500 sfmmu_pagesync_retry:
7508 7501
7509 7502 sfmmu_copytte(&sfhme->hme_tte, &tte);
7510 7503 if (TTE_IS_VALID(&tte)) {
7511 7504 hmeblkp = sfmmu_hmetohblk(sfhme);
7512 7505 sfmmup = hblktosfmmu(hmeblkp);
7513 7506 addr = tte_to_vaddr(hmeblkp, tte);
7514 7507 if (clearflag == HAT_SYNC_ZERORM) {
7515 7508 ttemod = tte;
7516 7509 TTE_CLR_RM(&ttemod);
7517 7510 ret = sfmmu_modifytte_try(&tte, &ttemod,
7518 7511 &sfhme->hme_tte);
7519 7512 if (ret < 0) {
7520 7513 /*
7521 7514 * cas failed and the new value is not what
7522 7515 * we want.
7523 7516 */
7524 7517 goto sfmmu_pagesync_retry;
7525 7518 }
7526 7519
7527 7520 if (ret > 0) {
7528 7521 /* we win the cas */
7529 7522 if (hmeblkp->hblk_shared) {
7530 7523 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7531 7524 uint_t rid =
7532 7525 hmeblkp->hblk_tag.htag_rid;
7533 7526 sf_region_t *rgnp;
7534 7527 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7535 7528 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7536 7529 ASSERT(srdp != NULL);
7537 7530 rgnp = srdp->srd_hmergnp[rid];
7538 7531 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7539 7532 srdp, rgnp, rid);
7540 7533 cpuset = sfmmu_rgntlb_demap(addr,
7541 7534 rgnp, hmeblkp, 1);
7542 7535 } else {
7543 7536 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7544 7537 0, 0);
7545 7538 cpuset = sfmmup->sfmmu_cpusran;
7546 7539 }
7547 7540 }
7548 7541 }
7549 7542 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7550 7543 &tte, pp);
7551 7544 }
7552 7545 return (cpuset);
7553 7546 }
7554 7547
7555 7548 /*
7556 7549 * Remove write permission from a mappings to a page, so that
7557 7550 * we can detect the next modification of it. This requires modifying
7558 7551 * the TTE then invalidating (demap) any TLB entry using that TTE.
7559 7552 * This code is similar to sfmmu_pagesync().
7560 7553 */
7561 7554 static cpuset_t
7562 7555 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7563 7556 {
7564 7557 caddr_t addr;
7565 7558 tte_t tte;
7566 7559 tte_t ttemod;
7567 7560 struct hme_blk *hmeblkp;
7568 7561 int ret;
7569 7562 sfmmu_t *sfmmup;
7570 7563 cpuset_t cpuset;
7571 7564
7572 7565 ASSERT(pp != NULL);
7573 7566 ASSERT(sfmmu_mlist_held(pp));
7574 7567
7575 7568 CPUSET_ZERO(cpuset);
7576 7569 SFMMU_STAT(sf_clrwrt);
7577 7570
7578 7571 retry:
7579 7572
7580 7573 sfmmu_copytte(&sfhme->hme_tte, &tte);
7581 7574 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7582 7575 hmeblkp = sfmmu_hmetohblk(sfhme);
7583 7576 sfmmup = hblktosfmmu(hmeblkp);
7584 7577 addr = tte_to_vaddr(hmeblkp, tte);
7585 7578
7586 7579 ttemod = tte;
7587 7580 TTE_CLR_WRT(&ttemod);
7588 7581 TTE_CLR_MOD(&ttemod);
7589 7582 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7590 7583
7591 7584 /*
7592 7585 * if cas failed and the new value is not what
7593 7586 * we want retry
7594 7587 */
7595 7588 if (ret < 0)
7596 7589 goto retry;
7597 7590
7598 7591 /* we win the cas */
7599 7592 if (ret > 0) {
7600 7593 if (hmeblkp->hblk_shared) {
7601 7594 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7602 7595 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7603 7596 sf_region_t *rgnp;
7604 7597 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7605 7598 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7606 7599 ASSERT(srdp != NULL);
7607 7600 rgnp = srdp->srd_hmergnp[rid];
7608 7601 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7609 7602 srdp, rgnp, rid);
7610 7603 cpuset = sfmmu_rgntlb_demap(addr,
7611 7604 rgnp, hmeblkp, 1);
7612 7605 } else {
7613 7606 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7614 7607 cpuset = sfmmup->sfmmu_cpusran;
7615 7608 }
7616 7609 }
7617 7610 }
7618 7611
7619 7612 return (cpuset);
7620 7613 }
7621 7614
7622 7615 /*
7623 7616 * Walk all mappings of a page, removing write permission and clearing the
7624 7617 * ref/mod bits. This code is similar to hat_pagesync()
7625 7618 */
7626 7619 static void
7627 7620 hat_page_clrwrt(page_t *pp)
7628 7621 {
7629 7622 struct sf_hment *sfhme;
7630 7623 struct sf_hment *tmphme = NULL;
7631 7624 kmutex_t *pml;
7632 7625 cpuset_t cpuset;
7633 7626 cpuset_t tset;
7634 7627 int index;
7635 7628 int cons;
7636 7629
7637 7630 CPUSET_ZERO(cpuset);
7638 7631
7639 7632 pml = sfmmu_mlist_enter(pp);
7640 7633 index = PP_MAPINDEX(pp);
7641 7634 cons = TTE8K;
7642 7635 retry:
7643 7636 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7644 7637 tmphme = sfhme->hme_next;
7645 7638
7646 7639 /*
7647 7640 * If we are looking for large mappings and this hme doesn't
7648 7641 * reach the range we are seeking, just ignore its.
7649 7642 */
7650 7643
7651 7644 if (hme_size(sfhme) < cons)
7652 7645 continue;
7653 7646
7654 7647 tset = sfmmu_pageclrwrt(pp, sfhme);
7655 7648 CPUSET_OR(cpuset, tset);
7656 7649 }
7657 7650
7658 7651 while (index) {
7659 7652 index = index >> 1;
7660 7653 cons++;
7661 7654 if (index & 0x1) {
7662 7655 /* Go to leading page */
7663 7656 pp = PP_GROUPLEADER(pp, cons);
7664 7657 goto retry;
7665 7658 }
7666 7659 }
7667 7660
7668 7661 xt_sync(cpuset);
7669 7662 sfmmu_mlist_exit(pml);
7670 7663 }
7671 7664
7672 7665 /*
7673 7666 * Set the given REF/MOD/RO bits for the given page.
7674 7667 * For a vnode with a sorted v_pages list, we need to change
7675 7668 * the attributes and the v_pages list together under page_vnode_mutex.
7676 7669 */
7677 7670 void
7678 7671 hat_page_setattr(page_t *pp, uint_t flag)
7679 7672 {
7680 7673 vnode_t *vp = pp->p_vnode;
7681 7674 page_t **listp;
7682 7675 kmutex_t *pmtx;
7683 7676 kmutex_t *vphm = NULL;
7684 7677 int noshuffle;
7685 7678
7686 7679 noshuffle = flag & P_NSH;
7687 7680 flag &= ~P_NSH;
7688 7681
7689 7682 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7690 7683
7691 7684 /*
7692 7685 * nothing to do if attribute already set
7693 7686 */
7694 7687 if ((pp->p_nrm & flag) == flag)
7695 7688 return;
7696 7689
7697 7690 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7698 7691 !noshuffle) {
7699 7692 vphm = page_vnode_mutex(vp);
7700 7693 mutex_enter(vphm);
7701 7694 }
7702 7695
7703 7696 pmtx = sfmmu_page_enter(pp);
7704 7697 pp->p_nrm |= flag;
7705 7698 sfmmu_page_exit(pmtx);
7706 7699
7707 7700 if (vphm != NULL) {
7708 7701 /*
7709 7702 * Some File Systems examine v_pages for NULL w/o
7710 7703 * grabbing the vphm mutex. Must not let it become NULL when
7711 7704 * pp is the only page on the list.
7712 7705 */
7713 7706 if (pp->p_vpnext != pp) {
7714 7707 page_vpsub(&vp->v_pages, pp);
7715 7708 if (vp->v_pages != NULL)
7716 7709 listp = &vp->v_pages->p_vpprev->p_vpnext;
7717 7710 else
7718 7711 listp = &vp->v_pages;
7719 7712 page_vpadd(listp, pp);
7720 7713 }
7721 7714 mutex_exit(vphm);
7722 7715 }
7723 7716 }
7724 7717
7725 7718 void
7726 7719 hat_page_clrattr(page_t *pp, uint_t flag)
7727 7720 {
7728 7721 vnode_t *vp = pp->p_vnode;
7729 7722 kmutex_t *pmtx;
7730 7723
7731 7724 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7732 7725
7733 7726 pmtx = sfmmu_page_enter(pp);
7734 7727
7735 7728 /*
7736 7729 * Caller is expected to hold page's io lock for VMODSORT to work
7737 7730 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7738 7731 * bit is cleared.
7739 7732 * We don't have assert to avoid tripping some existing third party
7740 7733 * code. The dirty page is moved back to top of the v_page list
7741 7734 * after IO is done in pvn_write_done().
7742 7735 */
7743 7736 pp->p_nrm &= ~flag;
7744 7737 sfmmu_page_exit(pmtx);
7745 7738
7746 7739 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7747 7740
7748 7741 /*
7749 7742 * VMODSORT works by removing write permissions and getting
7750 7743 * a fault when a page is made dirty. At this point
7751 7744 * we need to remove write permission from all mappings
7752 7745 * to this page.
7753 7746 */
7754 7747 hat_page_clrwrt(pp);
7755 7748 }
7756 7749 }
7757 7750
7758 7751 uint_t
7759 7752 hat_page_getattr(page_t *pp, uint_t flag)
7760 7753 {
7761 7754 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7762 7755 return ((uint_t)(pp->p_nrm & flag));
7763 7756 }
7764 7757
7765 7758 /*
7766 7759 * DEBUG kernels: verify that a kernel va<->pa translation
7767 7760 * is safe by checking the underlying page_t is in a page
7768 7761 * relocation-safe state.
7769 7762 */
7770 7763 #ifdef DEBUG
7771 7764 void
7772 7765 sfmmu_check_kpfn(pfn_t pfn)
7773 7766 {
7774 7767 page_t *pp;
7775 7768 int index, cons;
7776 7769
7777 7770 if (hat_check_vtop == 0)
7778 7771 return;
7779 7772
7780 7773 if (kvseg.s_base == NULL || panicstr)
7781 7774 return;
7782 7775
7783 7776 pp = page_numtopp_nolock(pfn);
7784 7777 if (!pp)
7785 7778 return;
7786 7779
7787 7780 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7788 7781 return;
7789 7782
7790 7783 /*
7791 7784 * Handed a large kernel page, we dig up the root page since we
7792 7785 * know the root page might have the lock also.
7793 7786 */
7794 7787 if (pp->p_szc != 0) {
7795 7788 index = PP_MAPINDEX(pp);
7796 7789 cons = TTE8K;
7797 7790 again:
7798 7791 while (index != 0) {
7799 7792 index >>= 1;
7800 7793 if (index != 0)
7801 7794 cons++;
7802 7795 if (index & 0x1) {
7803 7796 pp = PP_GROUPLEADER(pp, cons);
7804 7797 goto again;
7805 7798 }
7806 7799 }
7807 7800 }
7808 7801
7809 7802 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7810 7803 return;
7811 7804
7812 7805 /*
7813 7806 * Pages need to be locked or allocated "permanent" (either from
7814 7807 * static_arena arena or explicitly setting PG_NORELOC when calling
7815 7808 * page_create_va()) for VA->PA translations to be valid.
7816 7809 */
7817 7810 if (!PP_ISNORELOC(pp))
7818 7811 panic("Illegal VA->PA translation, pp 0x%p not permanent",
7819 7812 (void *)pp);
7820 7813 else
7821 7814 panic("Illegal VA->PA translation, pp 0x%p not locked",
7822 7815 (void *)pp);
7823 7816 }
7824 7817 #endif /* DEBUG */
7825 7818
7826 7819 /*
7827 7820 * Returns a page frame number for a given virtual address.
7828 7821 * Returns PFN_INVALID to indicate an invalid mapping
7829 7822 */
7830 7823 pfn_t
7831 7824 hat_getpfnum(struct hat *hat, caddr_t addr)
7832 7825 {
7833 7826 pfn_t pfn;
7834 7827 tte_t tte;
7835 7828
7836 7829 /*
7837 7830 * We would like to
7838 7831 * ASSERT(AS_LOCK_HELD(as));
7839 7832 * but we can't because the iommu driver will call this
7840 7833 * routine at interrupt time and it can't grab the as lock
7841 7834 * or it will deadlock: A thread could have the as lock
7842 7835 * and be waiting for io. The io can't complete
7843 7836 * because the interrupt thread is blocked trying to grab
7844 7837 * the as lock.
7845 7838 */
7846 7839
7847 7840 if (hat == ksfmmup) {
7848 7841 if (IS_KMEM_VA_LARGEPAGE(addr)) {
7849 7842 ASSERT(segkmem_lpszc > 0);
7850 7843 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7851 7844 if (pfn != PFN_INVALID) {
7852 7845 sfmmu_check_kpfn(pfn);
7853 7846 return (pfn);
7854 7847 }
7855 7848 } else if (segkpm && IS_KPM_ADDR(addr)) {
7856 7849 return (sfmmu_kpm_vatopfn(addr));
7857 7850 }
7858 7851 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7859 7852 == PFN_SUSPENDED) {
7860 7853 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7861 7854 }
7862 7855 sfmmu_check_kpfn(pfn);
7863 7856 return (pfn);
7864 7857 } else {
7865 7858 return (sfmmu_uvatopfn(addr, hat, NULL));
7866 7859 }
7867 7860 }
7868 7861
7869 7862 /*
7870 7863 * This routine will return both pfn and tte for the vaddr.
7871 7864 */
7872 7865 static pfn_t
7873 7866 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
7874 7867 {
7875 7868 struct hmehash_bucket *hmebp;
7876 7869 hmeblk_tag hblktag;
7877 7870 int hmeshift, hashno = 1;
7878 7871 struct hme_blk *hmeblkp = NULL;
7879 7872 tte_t tte;
7880 7873
7881 7874 struct sf_hment *sfhmep;
7882 7875 pfn_t pfn;
7883 7876
7884 7877 /* support for ISM */
7885 7878 ism_map_t *ism_map;
7886 7879 ism_blk_t *ism_blkp;
7887 7880 int i;
7888 7881 sfmmu_t *ism_hatid = NULL;
7889 7882 sfmmu_t *locked_hatid = NULL;
7890 7883 sfmmu_t *sv_sfmmup = sfmmup;
7891 7884 caddr_t sv_vaddr = vaddr;
7892 7885 sf_srd_t *srdp;
7893 7886
7894 7887 if (ttep == NULL) {
7895 7888 ttep = &tte;
7896 7889 } else {
7897 7890 ttep->ll = 0;
7898 7891 }
7899 7892
7900 7893 ASSERT(sfmmup != ksfmmup);
7901 7894 SFMMU_STAT(sf_user_vtop);
7902 7895 /*
7903 7896 * Set ism_hatid if vaddr falls in a ISM segment.
7904 7897 */
7905 7898 ism_blkp = sfmmup->sfmmu_iblk;
7906 7899 if (ism_blkp != NULL) {
7907 7900 sfmmu_ismhat_enter(sfmmup, 0);
7908 7901 locked_hatid = sfmmup;
7909 7902 }
7910 7903 while (ism_blkp != NULL && ism_hatid == NULL) {
7911 7904 ism_map = ism_blkp->iblk_maps;
7912 7905 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7913 7906 if (vaddr >= ism_start(ism_map[i]) &&
7914 7907 vaddr < ism_end(ism_map[i])) {
7915 7908 sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7916 7909 vaddr = (caddr_t)(vaddr -
7917 7910 ism_start(ism_map[i]));
7918 7911 break;
7919 7912 }
7920 7913 }
7921 7914 ism_blkp = ism_blkp->iblk_next;
7922 7915 }
7923 7916 if (locked_hatid) {
7924 7917 sfmmu_ismhat_exit(locked_hatid, 0);
7925 7918 }
7926 7919
7927 7920 hblktag.htag_id = sfmmup;
7928 7921 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
7929 7922 do {
7930 7923 hmeshift = HME_HASH_SHIFT(hashno);
7931 7924 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7932 7925 hblktag.htag_rehash = hashno;
7933 7926 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7934 7927
7935 7928 SFMMU_HASH_LOCK(hmebp);
7936 7929
7937 7930 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7938 7931 if (hmeblkp != NULL) {
7939 7932 ASSERT(!hmeblkp->hblk_shared);
7940 7933 HBLKTOHME(sfhmep, hmeblkp, vaddr);
7941 7934 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7942 7935 SFMMU_HASH_UNLOCK(hmebp);
7943 7936 if (TTE_IS_VALID(ttep)) {
7944 7937 pfn = TTE_TO_PFN(vaddr, ttep);
7945 7938 return (pfn);
7946 7939 }
7947 7940 break;
7948 7941 }
7949 7942 SFMMU_HASH_UNLOCK(hmebp);
7950 7943 hashno++;
7951 7944 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7952 7945
7953 7946 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
7954 7947 return (PFN_INVALID);
7955 7948 }
7956 7949 srdp = sv_sfmmup->sfmmu_srdp;
7957 7950 ASSERT(srdp != NULL);
7958 7951 ASSERT(srdp->srd_refcnt != 0);
7959 7952 hblktag.htag_id = srdp;
7960 7953 hashno = 1;
7961 7954 do {
7962 7955 hmeshift = HME_HASH_SHIFT(hashno);
7963 7956 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
7964 7957 hblktag.htag_rehash = hashno;
7965 7958 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
7966 7959
7967 7960 SFMMU_HASH_LOCK(hmebp);
7968 7961 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7969 7962 hmeblkp = hmeblkp->hblk_next) {
7970 7963 uint_t rid;
7971 7964 sf_region_t *rgnp;
7972 7965 caddr_t rsaddr;
7973 7966 caddr_t readdr;
7974 7967
7975 7968 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7976 7969 sv_sfmmup->sfmmu_hmeregion_map)) {
7977 7970 continue;
7978 7971 }
7979 7972 ASSERT(hmeblkp->hblk_shared);
7980 7973 rid = hmeblkp->hblk_tag.htag_rid;
7981 7974 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7982 7975 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7983 7976 rgnp = srdp->srd_hmergnp[rid];
7984 7977 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7985 7978 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7986 7979 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7987 7980 rsaddr = rgnp->rgn_saddr;
7988 7981 readdr = rsaddr + rgnp->rgn_size;
7989 7982 #ifdef DEBUG
7990 7983 if (TTE_IS_VALID(ttep) ||
7991 7984 get_hblk_ttesz(hmeblkp) > TTE8K) {
7992 7985 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
7993 7986 ASSERT(eva > sv_vaddr);
7994 7987 ASSERT(sv_vaddr >= rsaddr);
7995 7988 ASSERT(sv_vaddr < readdr);
7996 7989 ASSERT(eva <= readdr);
7997 7990 }
7998 7991 #endif /* DEBUG */
7999 7992 /*
8000 7993 * Continue the search if we
8001 7994 * found an invalid 8K tte outside of the area
8002 7995 * covered by this hmeblk's region.
8003 7996 */
8004 7997 if (TTE_IS_VALID(ttep)) {
8005 7998 SFMMU_HASH_UNLOCK(hmebp);
8006 7999 pfn = TTE_TO_PFN(sv_vaddr, ttep);
8007 8000 return (pfn);
8008 8001 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8009 8002 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8010 8003 SFMMU_HASH_UNLOCK(hmebp);
8011 8004 pfn = PFN_INVALID;
8012 8005 return (pfn);
8013 8006 }
8014 8007 }
8015 8008 SFMMU_HASH_UNLOCK(hmebp);
8016 8009 hashno++;
8017 8010 } while (hashno <= mmu_hashcnt);
8018 8011 return (PFN_INVALID);
8019 8012 }
8020 8013
8021 8014
8022 8015 /*
8023 8016 * For compatability with AT&T and later optimizations
8024 8017 */
8025 8018 /* ARGSUSED */
8026 8019 void
8027 8020 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8028 8021 {
8029 8022 ASSERT(hat != NULL);
8030 8023 }
8031 8024
8032 8025 /*
8033 8026 * Return the number of mappings to a particular page. This number is an
8034 8027 * approximation of the number of people sharing the page.
8035 8028 *
8036 8029 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8037 8030 * hat_page_checkshare() can be used to compare threshold to share
8038 8031 * count that reflects the number of region sharers albeit at higher cost.
8039 8032 */
8040 8033 ulong_t
8041 8034 hat_page_getshare(page_t *pp)
8042 8035 {
8043 8036 page_t *spp = pp; /* start page */
8044 8037 kmutex_t *pml;
8045 8038 ulong_t cnt;
8046 8039 int index, sz = TTE64K;
8047 8040
8048 8041 /*
8049 8042 * We need to grab the mlist lock to make sure any outstanding
8050 8043 * load/unloads complete. Otherwise we could return zero
8051 8044 * even though the unload(s) hasn't finished yet.
8052 8045 */
8053 8046 pml = sfmmu_mlist_enter(spp);
8054 8047 cnt = spp->p_share;
8055 8048
8056 8049 #ifdef VAC
8057 8050 if (kpm_enable)
8058 8051 cnt += spp->p_kpmref;
8059 8052 #endif
8060 8053 if (vpm_enable && pp->p_vpmref) {
8061 8054 cnt += 1;
8062 8055 }
8063 8056
8064 8057 /*
8065 8058 * If we have any large mappings, we count the number of
8066 8059 * mappings that this large page is part of.
8067 8060 */
8068 8061 index = PP_MAPINDEX(spp);
8069 8062 index >>= 1;
8070 8063 while (index) {
8071 8064 pp = PP_GROUPLEADER(spp, sz);
8072 8065 if ((index & 0x1) && pp != spp) {
8073 8066 cnt += pp->p_share;
8074 8067 spp = pp;
8075 8068 }
8076 8069 index >>= 1;
8077 8070 sz++;
8078 8071 }
8079 8072 sfmmu_mlist_exit(pml);
8080 8073 return (cnt);
8081 8074 }
8082 8075
8083 8076 /*
8084 8077 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8085 8078 * otherwise. Count shared hmeblks by region's refcnt.
8086 8079 */
8087 8080 int
8088 8081 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8089 8082 {
8090 8083 kmutex_t *pml;
8091 8084 ulong_t cnt = 0;
8092 8085 int index, sz = TTE8K;
8093 8086 struct sf_hment *sfhme, *tmphme = NULL;
8094 8087 struct hme_blk *hmeblkp;
8095 8088
8096 8089 pml = sfmmu_mlist_enter(pp);
8097 8090
8098 8091 #ifdef VAC
8099 8092 if (kpm_enable)
8100 8093 cnt = pp->p_kpmref;
8101 8094 #endif
8102 8095
8103 8096 if (vpm_enable && pp->p_vpmref) {
8104 8097 cnt += 1;
8105 8098 }
8106 8099
8107 8100 if (pp->p_share + cnt > sh_thresh) {
8108 8101 sfmmu_mlist_exit(pml);
8109 8102 return (1);
8110 8103 }
8111 8104
8112 8105 index = PP_MAPINDEX(pp);
8113 8106
8114 8107 again:
8115 8108 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8116 8109 tmphme = sfhme->hme_next;
8117 8110 if (IS_PAHME(sfhme)) {
8118 8111 continue;
8119 8112 }
8120 8113
8121 8114 hmeblkp = sfmmu_hmetohblk(sfhme);
8122 8115 if (hme_size(sfhme) != sz) {
8123 8116 continue;
8124 8117 }
8125 8118
8126 8119 if (hmeblkp->hblk_shared) {
8127 8120 sf_srd_t *srdp = hblktosrd(hmeblkp);
8128 8121 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8129 8122 sf_region_t *rgnp;
8130 8123 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8131 8124 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8132 8125 ASSERT(srdp != NULL);
8133 8126 rgnp = srdp->srd_hmergnp[rid];
8134 8127 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8135 8128 rgnp, rid);
8136 8129 cnt += rgnp->rgn_refcnt;
8137 8130 } else {
8138 8131 cnt++;
8139 8132 }
8140 8133 if (cnt > sh_thresh) {
8141 8134 sfmmu_mlist_exit(pml);
8142 8135 return (1);
8143 8136 }
8144 8137 }
8145 8138
8146 8139 index >>= 1;
8147 8140 sz++;
8148 8141 while (index) {
8149 8142 pp = PP_GROUPLEADER(pp, sz);
8150 8143 ASSERT(sfmmu_mlist_held(pp));
8151 8144 if (index & 0x1) {
8152 8145 goto again;
8153 8146 }
8154 8147 index >>= 1;
8155 8148 sz++;
8156 8149 }
8157 8150 sfmmu_mlist_exit(pml);
8158 8151 return (0);
8159 8152 }
8160 8153
8161 8154 /*
8162 8155 * Unload all large mappings to the pp and reset the p_szc field of every
8163 8156 * constituent page according to the remaining mappings.
8164 8157 *
8165 8158 * pp must be locked SE_EXCL. Even though no other constituent pages are
8166 8159 * locked it's legal to unload the large mappings to the pp because all
8167 8160 * constituent pages of large locked mappings have to be locked SE_SHARED.
8168 8161 * This means if we have SE_EXCL lock on one of constituent pages none of the
8169 8162 * large mappings to pp are locked.
8170 8163 *
8171 8164 * Decrease p_szc field starting from the last constituent page and ending
8172 8165 * with the root page. This method is used because other threads rely on the
8173 8166 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8174 8167 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8175 8168 * ensures that p_szc changes of the constituent pages appears atomic for all
8176 8169 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8177 8170 *
8178 8171 * This mechanism is only used for file system pages where it's not always
8179 8172 * possible to get SE_EXCL locks on all constituent pages to demote the size
8180 8173 * code (as is done for anonymous or kernel large pages).
8181 8174 *
8182 8175 * See more comments in front of sfmmu_mlspl_enter().
8183 8176 */
8184 8177 void
8185 8178 hat_page_demote(page_t *pp)
8186 8179 {
8187 8180 int index;
8188 8181 int sz;
8189 8182 cpuset_t cpuset;
8190 8183 int sync = 0;
8191 8184 page_t *rootpp;
8192 8185 struct sf_hment *sfhme;
8193 8186 struct sf_hment *tmphme = NULL;
8194 8187 uint_t pszc;
8195 8188 page_t *lastpp;
8196 8189 cpuset_t tset;
8197 8190 pgcnt_t npgs;
8198 8191 kmutex_t *pml;
8199 8192 kmutex_t *pmtx = NULL;
8200 8193
8201 8194 ASSERT(PAGE_EXCL(pp));
8202 8195 ASSERT(!PP_ISFREE(pp));
8203 8196 ASSERT(!PP_ISKAS(pp));
8204 8197 ASSERT(page_szc_lock_assert(pp));
8205 8198 pml = sfmmu_mlist_enter(pp);
8206 8199
8207 8200 pszc = pp->p_szc;
8208 8201 if (pszc == 0) {
8209 8202 goto out;
8210 8203 }
8211 8204
8212 8205 index = PP_MAPINDEX(pp) >> 1;
8213 8206
8214 8207 if (index) {
8215 8208 CPUSET_ZERO(cpuset);
8216 8209 sz = TTE64K;
8217 8210 sync = 1;
8218 8211 }
8219 8212
8220 8213 while (index) {
8221 8214 if (!(index & 0x1)) {
8222 8215 index >>= 1;
8223 8216 sz++;
8224 8217 continue;
8225 8218 }
8226 8219 ASSERT(sz <= pszc);
8227 8220 rootpp = PP_GROUPLEADER(pp, sz);
8228 8221 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8229 8222 tmphme = sfhme->hme_next;
8230 8223 ASSERT(!IS_PAHME(sfhme));
8231 8224 if (hme_size(sfhme) != sz) {
8232 8225 continue;
8233 8226 }
8234 8227 tset = sfmmu_pageunload(rootpp, sfhme, sz);
8235 8228 CPUSET_OR(cpuset, tset);
8236 8229 }
8237 8230 if (index >>= 1) {
8238 8231 sz++;
8239 8232 }
8240 8233 }
8241 8234
8242 8235 ASSERT(!PP_ISMAPPED_LARGE(pp));
8243 8236
8244 8237 if (sync) {
8245 8238 xt_sync(cpuset);
8246 8239 #ifdef VAC
8247 8240 if (PP_ISTNC(pp)) {
8248 8241 conv_tnc(rootpp, sz);
8249 8242 }
8250 8243 #endif /* VAC */
8251 8244 }
8252 8245
8253 8246 pmtx = sfmmu_page_enter(pp);
8254 8247
8255 8248 ASSERT(pp->p_szc == pszc);
8256 8249 rootpp = PP_PAGEROOT(pp);
8257 8250 ASSERT(rootpp->p_szc == pszc);
8258 8251 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8259 8252
8260 8253 while (lastpp != rootpp) {
8261 8254 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8262 8255 ASSERT(sz < pszc);
8263 8256 npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8264 8257 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8265 8258 while (--npgs > 0) {
8266 8259 lastpp->p_szc = (uchar_t)sz;
8267 8260 lastpp = PP_PAGEPREV(lastpp);
8268 8261 }
8269 8262 if (sz) {
8270 8263 /*
8271 8264 * make sure before current root's pszc
8272 8265 * is updated all updates to constituent pages pszc
8273 8266 * fields are globally visible.
8274 8267 */
8275 8268 membar_producer();
8276 8269 }
8277 8270 lastpp->p_szc = sz;
8278 8271 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8279 8272 if (lastpp != rootpp) {
8280 8273 lastpp = PP_PAGEPREV(lastpp);
8281 8274 }
8282 8275 }
8283 8276 if (sz == 0) {
8284 8277 /* the loop above doesn't cover this case */
8285 8278 rootpp->p_szc = 0;
8286 8279 }
8287 8280 out:
8288 8281 ASSERT(pp->p_szc == 0);
8289 8282 if (pmtx != NULL) {
8290 8283 sfmmu_page_exit(pmtx);
8291 8284 }
8292 8285 sfmmu_mlist_exit(pml);
8293 8286 }
8294 8287
8295 8288 /*
8296 8289 * Refresh the HAT ismttecnt[] element for size szc.
8297 8290 * Caller must have set ISM busy flag to prevent mapping
8298 8291 * lists from changing while we're traversing them.
8299 8292 */
8300 8293 pgcnt_t
8301 8294 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8302 8295 {
8303 8296 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk;
8304 8297 ism_map_t *ism_map;
8305 8298 pgcnt_t npgs = 0;
8306 8299 pgcnt_t npgs_scd = 0;
8307 8300 int j;
8308 8301 sf_scd_t *scdp;
8309 8302 uchar_t rid;
8310 8303
8311 8304 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8312 8305 scdp = sfmmup->sfmmu_scdp;
8313 8306
8314 8307 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8315 8308 ism_map = ism_blkp->iblk_maps;
8316 8309 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8317 8310 rid = ism_map[j].imap_rid;
8318 8311 ASSERT(rid == SFMMU_INVALID_ISMRID ||
8319 8312 rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8320 8313
8321 8314 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8322 8315 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8323 8316 /* ISM is in sfmmup's SCD */
8324 8317 npgs_scd +=
8325 8318 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8326 8319 } else {
8327 8320 /* ISMs is not in SCD */
8328 8321 npgs +=
8329 8322 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8330 8323 }
8331 8324 }
8332 8325 }
8333 8326 sfmmup->sfmmu_ismttecnt[szc] = npgs;
8334 8327 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8335 8328 return (npgs);
8336 8329 }
8337 8330
8338 8331 /*
8339 8332 * Yield the memory claim requirement for an address space.
8340 8333 *
8341 8334 * This is currently implemented as the number of bytes that have active
8342 8335 * hardware translations that have page structures. Therefore, it can
8343 8336 * underestimate the traditional resident set size, eg, if the
8344 8337 * physical page is present and the hardware translation is missing;
8345 8338 * and it can overestimate the rss, eg, if there are active
↓ open down ↓ |
846 lines elided |
↑ open up ↑ |
8346 8339 * translations to a frame buffer with page structs.
8347 8340 * Also, it does not take sharing into account.
8348 8341 *
8349 8342 * Note that we don't acquire locks here since this function is most often
8350 8343 * called from the clock thread.
8351 8344 */
8352 8345 size_t
8353 8346 hat_get_mapped_size(struct hat *hat)
8354 8347 {
8355 8348 size_t assize = 0;
8356 - int i;
8349 + int i;
8357 8350
8358 8351 if (hat == NULL)
8359 8352 return (0);
8360 8353
8361 8354 for (i = 0; i < mmu_page_sizes; i++)
8362 8355 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8363 8356 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8364 8357
8365 8358 if (hat->sfmmu_iblk == NULL)
8366 8359 return (assize);
8367 8360
8368 8361 for (i = 0; i < mmu_page_sizes; i++)
8369 8362 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8370 8363 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8371 8364
8372 8365 return (assize);
8373 8366 }
8374 8367
8375 8368 int
8376 8369 hat_stats_enable(struct hat *hat)
8377 8370 {
8378 8371 hatlock_t *hatlockp;
8379 8372
8380 8373 hatlockp = sfmmu_hat_enter(hat);
8381 8374 hat->sfmmu_rmstat++;
8382 8375 sfmmu_hat_exit(hatlockp);
8383 8376 return (1);
8384 8377 }
8385 8378
8386 8379 void
8387 8380 hat_stats_disable(struct hat *hat)
8388 8381 {
8389 8382 hatlock_t *hatlockp;
8390 8383
8391 8384 hatlockp = sfmmu_hat_enter(hat);
8392 8385 hat->sfmmu_rmstat--;
8393 8386 sfmmu_hat_exit(hatlockp);
8394 8387 }
8395 8388
8396 8389 /*
8397 8390 * Routines for entering or removing ourselves from the
8398 8391 * ism_hat's mapping list. This is used for both private and
8399 8392 * SCD hats.
8400 8393 */
8401 8394 static void
8402 8395 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8403 8396 {
8404 8397 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8405 8398
8406 8399 iment->iment_prev = NULL;
8407 8400 iment->iment_next = ism_hat->sfmmu_iment;
8408 8401 if (ism_hat->sfmmu_iment) {
8409 8402 ism_hat->sfmmu_iment->iment_prev = iment;
8410 8403 }
8411 8404 ism_hat->sfmmu_iment = iment;
8412 8405 }
8413 8406
8414 8407 static void
8415 8408 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8416 8409 {
8417 8410 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8418 8411
8419 8412 if (ism_hat->sfmmu_iment == NULL) {
8420 8413 panic("ism map entry remove - no entries");
8421 8414 }
8422 8415
8423 8416 if (iment->iment_prev) {
8424 8417 ASSERT(ism_hat->sfmmu_iment != iment);
8425 8418 iment->iment_prev->iment_next = iment->iment_next;
8426 8419 } else {
8427 8420 ASSERT(ism_hat->sfmmu_iment == iment);
8428 8421 ism_hat->sfmmu_iment = iment->iment_next;
8429 8422 }
8430 8423
8431 8424 if (iment->iment_next) {
8432 8425 iment->iment_next->iment_prev = iment->iment_prev;
8433 8426 }
8434 8427
8435 8428 /*
8436 8429 * zero out the entry
8437 8430 */
8438 8431 iment->iment_next = NULL;
8439 8432 iment->iment_prev = NULL;
8440 8433 iment->iment_hat = NULL;
8441 8434 iment->iment_base_va = 0;
8442 8435 }
8443 8436
8444 8437 /*
8445 8438 * Hat_share()/unshare() return an (non-zero) error
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
8446 8439 * when saddr and daddr are not properly aligned.
8447 8440 *
8448 8441 * The top level mapping element determines the alignment
8449 8442 * requirement for saddr and daddr, depending on different
8450 8443 * architectures.
8451 8444 *
8452 8445 * When hat_share()/unshare() are not supported,
8453 8446 * HATOP_SHARE()/UNSHARE() return 0
8454 8447 */
8455 8448 int
8456 -hat_share(struct hat *sfmmup, caddr_t addr,
8457 - struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8449 +hat_share(struct hat *sfmmup, caddr_t addr, struct hat *ism_hatid,
8450 + caddr_t sptaddr, size_t len, uint_t ismszc)
8458 8451 {
8459 8452 ism_blk_t *ism_blkp;
8460 8453 ism_blk_t *new_iblk;
8461 - ism_map_t *ism_map;
8454 + ism_map_t *ism_map;
8462 8455 ism_ment_t *ism_ment;
8463 8456 int i, added;
8464 8457 hatlock_t *hatlockp;
8465 8458 int reload_mmu = 0;
8466 8459 uint_t ismshift = page_get_shift(ismszc);
8467 8460 size_t ismpgsz = page_get_pagesize(ismszc);
8468 8461 uint_t ismmask = (uint_t)ismpgsz - 1;
8469 8462 size_t sh_size = ISM_SHIFT(ismshift, len);
8470 8463 ushort_t ismhatflag;
8471 8464 hat_region_cookie_t rcookie;
8472 8465 sf_scd_t *old_scdp;
8473 8466
8474 8467 #ifdef DEBUG
8475 8468 caddr_t eaddr = addr + len;
8476 8469 #endif /* DEBUG */
8477 8470
8478 8471 ASSERT(ism_hatid != NULL && sfmmup != NULL);
8479 8472 ASSERT(sptaddr == ISMID_STARTADDR);
8480 8473 /*
8481 8474 * Check the alignment.
8482 8475 */
8483 8476 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8484 8477 return (EINVAL);
8485 8478
8486 8479 /*
8487 8480 * Check size alignment.
8488 8481 */
8489 8482 if (!ISM_ALIGNED(ismshift, len))
8490 8483 return (EINVAL);
8491 8484
8492 8485 /*
8493 8486 * Allocate ism_ment for the ism_hat's mapping list, and an
8494 8487 * ism map blk in case we need one. We must do our
8495 8488 * allocations before acquiring locks to prevent a deadlock
8496 8489 * in the kmem allocator on the mapping list lock.
8497 8490 */
8498 8491 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8499 8492 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8500 8493
8501 8494 /*
8502 8495 * Serialize ISM mappings with the ISM busy flag, and also the
8503 8496 * trap handlers.
8504 8497 */
8505 8498 sfmmu_ismhat_enter(sfmmup, 0);
8506 8499
8507 8500 /*
8508 8501 * Allocate an ism map blk if necessary.
8509 8502 */
8510 8503 if (sfmmup->sfmmu_iblk == NULL) {
8511 8504 sfmmup->sfmmu_iblk = new_iblk;
8512 8505 bzero(new_iblk, sizeof (*new_iblk));
8513 8506 new_iblk->iblk_nextpa = (uint64_t)-1;
8514 8507 membar_stst(); /* make sure next ptr visible to all CPUs */
8515 8508 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8516 8509 reload_mmu = 1;
8517 8510 new_iblk = NULL;
8518 8511 }
8519 8512
8520 8513 #ifdef DEBUG
8521 8514 /*
8522 8515 * Make sure mapping does not already exist.
8523 8516 */
8524 8517 ism_blkp = sfmmup->sfmmu_iblk;
8525 8518 while (ism_blkp != NULL) {
8526 8519 ism_map = ism_blkp->iblk_maps;
8527 8520 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8528 8521 if ((addr >= ism_start(ism_map[i]) &&
8529 8522 addr < ism_end(ism_map[i])) ||
8530 8523 eaddr > ism_start(ism_map[i]) &&
8531 8524 eaddr <= ism_end(ism_map[i])) {
8532 8525 panic("sfmmu_share: Already mapped!");
8533 8526 }
8534 8527 }
8535 8528 ism_blkp = ism_blkp->iblk_next;
8536 8529 }
8537 8530 #endif /* DEBUG */
8538 8531
8539 8532 ASSERT(ismszc >= TTE4M);
8540 8533 if (ismszc == TTE4M) {
8541 8534 ismhatflag = HAT_4M_FLAG;
8542 8535 } else if (ismszc == TTE32M) {
8543 8536 ismhatflag = HAT_32M_FLAG;
8544 8537 } else if (ismszc == TTE256M) {
8545 8538 ismhatflag = HAT_256M_FLAG;
8546 8539 }
8547 8540 /*
8548 8541 * Add mapping to first available mapping slot.
8549 8542 */
8550 8543 ism_blkp = sfmmup->sfmmu_iblk;
8551 8544 added = 0;
8552 8545 while (!added) {
8553 8546 ism_map = ism_blkp->iblk_maps;
8554 8547 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8555 8548 if (ism_map[i].imap_ismhat == NULL) {
8556 8549
8557 8550 ism_map[i].imap_ismhat = ism_hatid;
8558 8551 ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8559 8552 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8560 8553 ism_map[i].imap_hatflags = ismhatflag;
8561 8554 ism_map[i].imap_sz_mask = ismmask;
8562 8555 /*
8563 8556 * imap_seg is checked in ISM_CHECK to see if
8564 8557 * non-NULL, then other info assumed valid.
8565 8558 */
8566 8559 membar_stst();
8567 8560 ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8568 8561 ism_map[i].imap_ment = ism_ment;
8569 8562
8570 8563 /*
8571 8564 * Now add ourselves to the ism_hat's
8572 8565 * mapping list.
8573 8566 */
8574 8567 ism_ment->iment_hat = sfmmup;
8575 8568 ism_ment->iment_base_va = addr;
8576 8569 ism_hatid->sfmmu_ismhat = 1;
8577 8570 mutex_enter(&ism_mlist_lock);
8578 8571 iment_add(ism_ment, ism_hatid);
8579 8572 mutex_exit(&ism_mlist_lock);
8580 8573 added = 1;
8581 8574 break;
8582 8575 }
8583 8576 }
8584 8577 if (!added && ism_blkp->iblk_next == NULL) {
8585 8578 ism_blkp->iblk_next = new_iblk;
8586 8579 new_iblk = NULL;
8587 8580 bzero(ism_blkp->iblk_next,
8588 8581 sizeof (*ism_blkp->iblk_next));
8589 8582 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8590 8583 membar_stst();
8591 8584 ism_blkp->iblk_nextpa =
8592 8585 va_to_pa((caddr_t)ism_blkp->iblk_next);
8593 8586 }
8594 8587 ism_blkp = ism_blkp->iblk_next;
8595 8588 }
8596 8589
8597 8590 /*
8598 8591 * After calling hat_join_region, sfmmup may join a new SCD or
8599 8592 * move from the old scd to a new scd, in which case, we want to
8600 8593 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8601 8594 * sfmmu_check_page_sizes at the end of this routine.
8602 8595 */
8603 8596 old_scdp = sfmmup->sfmmu_scdp;
8604 8597
8605 8598 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8606 8599 PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8607 8600 if (rcookie != HAT_INVALID_REGION_COOKIE) {
8608 8601 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8609 8602 }
8610 8603 /*
8611 8604 * Update our counters for this sfmmup's ism mappings.
8612 8605 */
8613 8606 for (i = 0; i <= ismszc; i++) {
8614 8607 if (!(disable_ism_large_pages & (1 << i)))
8615 8608 (void) ism_tsb_entries(sfmmup, i);
8616 8609 }
8617 8610
8618 8611 /*
8619 8612 * For ISM and DISM we do not support 512K pages, so we only only
8620 8613 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8621 8614 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8622 8615 *
8623 8616 * Need to set 32M/256M ISM flags to make sure
8624 8617 * sfmmu_check_page_sizes() enables them on Panther.
8625 8618 */
8626 8619 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8627 8620
8628 8621 switch (ismszc) {
8629 8622 case TTE256M:
8630 8623 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8631 8624 hatlockp = sfmmu_hat_enter(sfmmup);
8632 8625 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8633 8626 sfmmu_hat_exit(hatlockp);
8634 8627 }
8635 8628 break;
8636 8629 case TTE32M:
8637 8630 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8638 8631 hatlockp = sfmmu_hat_enter(sfmmup);
8639 8632 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8640 8633 sfmmu_hat_exit(hatlockp);
8641 8634 }
8642 8635 break;
8643 8636 default:
8644 8637 break;
8645 8638 }
8646 8639
8647 8640 /*
8648 8641 * If we updated the ismblkpa for this HAT we must make
8649 8642 * sure all CPUs running this process reload their tsbmiss area.
8650 8643 * Otherwise they will fail to load the mappings in the tsbmiss
8651 8644 * handler and will loop calling pagefault().
8652 8645 */
8653 8646 if (reload_mmu) {
8654 8647 hatlockp = sfmmu_hat_enter(sfmmup);
8655 8648 sfmmu_sync_mmustate(sfmmup);
8656 8649 sfmmu_hat_exit(hatlockp);
8657 8650 }
8658 8651
8659 8652 sfmmu_ismhat_exit(sfmmup, 0);
8660 8653
8661 8654 /*
8662 8655 * Free up ismblk if we didn't use it.
8663 8656 */
8664 8657 if (new_iblk != NULL)
8665 8658 kmem_cache_free(ism_blk_cache, new_iblk);
8666 8659
8667 8660 /*
8668 8661 * Check TSB and TLB page sizes.
8669 8662 */
8670 8663 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8671 8664 sfmmu_check_page_sizes(sfmmup, 0);
8672 8665 } else {
8673 8666 sfmmu_check_page_sizes(sfmmup, 1);
8674 8667 }
8675 8668 return (0);
↓ open down ↓ |
204 lines elided |
↑ open up ↑ |
8676 8669 }
8677 8670
8678 8671 /*
8679 8672 * hat_unshare removes exactly one ism_map from
8680 8673 * this process's as. It expects multiple calls
8681 8674 * to hat_unshare for multiple shm segments.
8682 8675 */
8683 8676 void
8684 8677 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8685 8678 {
8686 - ism_map_t *ism_map;
8679 + ism_map_t *ism_map;
8687 8680 ism_ment_t *free_ment = NULL;
8688 8681 ism_blk_t *ism_blkp;
8689 8682 struct hat *ism_hatid;
8690 - int found, i;
8683 + int found, i;
8691 8684 hatlock_t *hatlockp;
8692 8685 struct tsb_info *tsbinfo;
8693 8686 uint_t ismshift = page_get_shift(ismszc);
8694 8687 size_t sh_size = ISM_SHIFT(ismshift, len);
8695 8688 uchar_t ism_rid;
8696 8689 sf_scd_t *old_scdp;
8697 8690
8698 8691 ASSERT(ISM_ALIGNED(ismshift, addr));
8699 8692 ASSERT(ISM_ALIGNED(ismshift, len));
8700 8693 ASSERT(sfmmup != NULL);
8701 8694 ASSERT(sfmmup != ksfmmup);
8702 8695
8703 8696 ASSERT(sfmmup->sfmmu_as != NULL);
8704 8697
8705 8698 /*
8706 8699 * Make sure that during the entire time ISM mappings are removed,
8707 8700 * the trap handlers serialize behind us, and that no one else
8708 8701 * can be mucking with ISM mappings. This also lets us get away
8709 8702 * with not doing expensive cross calls to flush the TLB -- we
8710 8703 * just discard the context, flush the entire TSB, and call it
8711 8704 * a day.
8712 8705 */
8713 8706 sfmmu_ismhat_enter(sfmmup, 0);
8714 8707
8715 8708 /*
8716 8709 * Remove the mapping.
8717 8710 *
8718 8711 * We can't have any holes in the ism map.
8719 8712 * The tsb miss code while searching the ism map will
8720 8713 * stop on an empty map slot. So we must move
8721 8714 * everyone past the hole up 1 if any.
8722 8715 *
8723 8716 * Also empty ism map blks are not freed until the
8724 8717 * process exits. This is to prevent a MT race condition
8725 8718 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8726 8719 */
8727 8720 found = 0;
8728 8721 ism_blkp = sfmmup->sfmmu_iblk;
8729 8722 while (!found && ism_blkp != NULL) {
8730 8723 ism_map = ism_blkp->iblk_maps;
8731 8724 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8732 8725 if (addr == ism_start(ism_map[i]) &&
8733 8726 sh_size == (size_t)(ism_size(ism_map[i]))) {
8734 8727 found = 1;
8735 8728 break;
8736 8729 }
8737 8730 }
8738 8731 if (!found)
8739 8732 ism_blkp = ism_blkp->iblk_next;
8740 8733 }
8741 8734
8742 8735 if (found) {
8743 8736 ism_hatid = ism_map[i].imap_ismhat;
8744 8737 ism_rid = ism_map[i].imap_rid;
8745 8738 ASSERT(ism_hatid != NULL);
8746 8739 ASSERT(ism_hatid->sfmmu_ismhat == 1);
8747 8740
8748 8741 /*
8749 8742 * After hat_leave_region, the sfmmup may leave SCD,
8750 8743 * in which case, we want to grow the private tsb size when
8751 8744 * calling sfmmu_check_page_sizes at the end of the routine.
8752 8745 */
8753 8746 old_scdp = sfmmup->sfmmu_scdp;
8754 8747 /*
8755 8748 * Then remove ourselves from the region.
8756 8749 */
8757 8750 if (ism_rid != SFMMU_INVALID_ISMRID) {
8758 8751 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8759 8752 HAT_REGION_ISM);
8760 8753 }
8761 8754
8762 8755 /*
8763 8756 * And now guarantee that any other cpu
8764 8757 * that tries to process an ISM miss
8765 8758 * will go to tl=0.
8766 8759 */
8767 8760 hatlockp = sfmmu_hat_enter(sfmmup);
8768 8761 sfmmu_invalidate_ctx(sfmmup);
8769 8762 sfmmu_hat_exit(hatlockp);
8770 8763
8771 8764 /*
8772 8765 * Remove ourselves from the ism mapping list.
8773 8766 */
8774 8767 mutex_enter(&ism_mlist_lock);
8775 8768 iment_sub(ism_map[i].imap_ment, ism_hatid);
8776 8769 mutex_exit(&ism_mlist_lock);
8777 8770 free_ment = ism_map[i].imap_ment;
8778 8771
8779 8772 /*
8780 8773 * We delete the ism map by copying
8781 8774 * the next map over the current one.
8782 8775 * We will take the next one in the maps
8783 8776 * array or from the next ism_blk.
8784 8777 */
8785 8778 while (ism_blkp != NULL) {
8786 8779 ism_map = ism_blkp->iblk_maps;
8787 8780 while (i < (ISM_MAP_SLOTS - 1)) {
8788 8781 ism_map[i] = ism_map[i + 1];
8789 8782 i++;
8790 8783 }
8791 8784 /* i == (ISM_MAP_SLOTS - 1) */
8792 8785 ism_blkp = ism_blkp->iblk_next;
8793 8786 if (ism_blkp != NULL) {
8794 8787 ism_map[i] = ism_blkp->iblk_maps[0];
8795 8788 i = 0;
8796 8789 } else {
8797 8790 ism_map[i].imap_seg = 0;
8798 8791 ism_map[i].imap_vb_shift = 0;
8799 8792 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8800 8793 ism_map[i].imap_hatflags = 0;
8801 8794 ism_map[i].imap_sz_mask = 0;
8802 8795 ism_map[i].imap_ismhat = NULL;
8803 8796 ism_map[i].imap_ment = NULL;
8804 8797 }
8805 8798 }
8806 8799
8807 8800 /*
8808 8801 * Now flush entire TSB for the process, since
8809 8802 * demapping page by page can be too expensive.
8810 8803 * We don't have to flush the TLB here anymore
8811 8804 * since we switch to a new TLB ctx instead.
8812 8805 * Also, there is no need to flush if the process
8813 8806 * is exiting since the TSB will be freed later.
8814 8807 */
8815 8808 if (!sfmmup->sfmmu_free) {
8816 8809 hatlockp = sfmmu_hat_enter(sfmmup);
8817 8810 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8818 8811 tsbinfo = tsbinfo->tsb_next) {
8819 8812 if (tsbinfo->tsb_flags & TSB_SWAPPED)
8820 8813 continue;
8821 8814 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
8822 8815 tsbinfo->tsb_flags |=
8823 8816 TSB_FLUSH_NEEDED;
8824 8817 continue;
8825 8818 }
8826 8819
8827 8820 sfmmu_inv_tsb(tsbinfo->tsb_va,
8828 8821 TSB_BYTES(tsbinfo->tsb_szc));
8829 8822 }
8830 8823 sfmmu_hat_exit(hatlockp);
8831 8824 }
8832 8825 }
8833 8826
8834 8827 /*
8835 8828 * Update our counters for this sfmmup's ism mappings.
8836 8829 */
8837 8830 for (i = 0; i <= ismszc; i++) {
8838 8831 if (!(disable_ism_large_pages & (1 << i)))
8839 8832 (void) ism_tsb_entries(sfmmup, i);
8840 8833 }
8841 8834
8842 8835 sfmmu_ismhat_exit(sfmmup, 0);
8843 8836
8844 8837 /*
8845 8838 * We must do our freeing here after dropping locks
8846 8839 * to prevent a deadlock in the kmem allocator on the
8847 8840 * mapping list lock.
8848 8841 */
8849 8842 if (free_ment != NULL)
8850 8843 kmem_cache_free(ism_ment_cache, free_ment);
8851 8844
8852 8845 /*
8853 8846 * Check TSB and TLB page sizes if the process isn't exiting.
8854 8847 */
8855 8848 if (!sfmmup->sfmmu_free) {
8856 8849 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
8857 8850 sfmmu_check_page_sizes(sfmmup, 1);
8858 8851 } else {
8859 8852 sfmmu_check_page_sizes(sfmmup, 0);
8860 8853 }
8861 8854 }
8862 8855 }
8863 8856
8864 8857 /* ARGSUSED */
8865 8858 static int
8866 8859 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
8867 8860 {
8868 8861 /* void *buf is sfmmu_t pointer */
8869 8862 bzero(buf, sizeof (sfmmu_t));
8870 8863
8871 8864 return (0);
8872 8865 }
8873 8866
8874 8867 /* ARGSUSED */
8875 8868 static void
8876 8869 sfmmu_idcache_destructor(void *buf, void *cdrarg)
8877 8870 {
8878 8871 /* void *buf is sfmmu_t pointer */
8879 8872 }
8880 8873
8881 8874 /*
8882 8875 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
8883 8876 * field to be the pa of this hmeblk
8884 8877 */
8885 8878 /* ARGSUSED */
8886 8879 static int
8887 8880 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
8888 8881 {
8889 8882 struct hme_blk *hmeblkp;
8890 8883
8891 8884 bzero(buf, (size_t)cdrarg);
8892 8885 hmeblkp = (struct hme_blk *)buf;
8893 8886 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8894 8887
8895 8888 #ifdef HBLK_TRACE
8896 8889 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8897 8890 #endif /* HBLK_TRACE */
8898 8891
8899 8892 return (0);
8900 8893 }
8901 8894
8902 8895 /* ARGSUSED */
8903 8896 static void
8904 8897 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
8905 8898 {
8906 8899
8907 8900 #ifdef HBLK_TRACE
8908 8901
8909 8902 struct hme_blk *hmeblkp;
8910 8903
8911 8904 hmeblkp = (struct hme_blk *)buf;
8912 8905 mutex_destroy(&hmeblkp->hblk_audit_lock);
8913 8906
8914 8907 #endif /* HBLK_TRACE */
8915 8908 }
8916 8909
8917 8910 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
8918 8911 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
8919 8912 /*
8920 8913 * The kmem allocator will callback into our reclaim routine when the system
8921 8914 * is running low in memory. We traverse the hash and free up all unused but
8922 8915 * still cached hme_blks. We also traverse the free list and free them up
8923 8916 * as well.
8924 8917 */
8925 8918 /*ARGSUSED*/
8926 8919 static void
8927 8920 sfmmu_hblkcache_reclaim(void *cdrarg)
8928 8921 {
8929 8922 int i;
8930 8923 struct hmehash_bucket *hmebp;
8931 8924 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8932 8925 static struct hmehash_bucket *uhmehash_reclaim_hand;
8933 8926 static struct hmehash_bucket *khmehash_reclaim_hand;
8934 8927 struct hme_blk *list = NULL, *last_hmeblkp;
8935 8928 cpuset_t cpuset = cpu_ready_set;
8936 8929 cpu_hme_pend_t *cpuhp;
8937 8930
8938 8931 /* Free up hmeblks on the cpu pending lists */
8939 8932 for (i = 0; i < NCPU; i++) {
8940 8933 cpuhp = &cpu_hme_pend[i];
8941 8934 if (cpuhp->chp_listp != NULL) {
8942 8935 mutex_enter(&cpuhp->chp_mutex);
8943 8936 if (cpuhp->chp_listp == NULL) {
8944 8937 mutex_exit(&cpuhp->chp_mutex);
8945 8938 continue;
8946 8939 }
8947 8940 for (last_hmeblkp = cpuhp->chp_listp;
8948 8941 last_hmeblkp->hblk_next != NULL;
8949 8942 last_hmeblkp = last_hmeblkp->hblk_next)
8950 8943 ;
8951 8944 last_hmeblkp->hblk_next = list;
8952 8945 list = cpuhp->chp_listp;
8953 8946 cpuhp->chp_listp = NULL;
8954 8947 cpuhp->chp_count = 0;
8955 8948 mutex_exit(&cpuhp->chp_mutex);
8956 8949 }
8957 8950
8958 8951 }
8959 8952
8960 8953 if (list != NULL) {
8961 8954 kpreempt_disable();
8962 8955 CPUSET_DEL(cpuset, CPU->cpu_id);
8963 8956 xt_sync(cpuset);
8964 8957 xt_sync(cpuset);
8965 8958 kpreempt_enable();
8966 8959 sfmmu_hblk_free(&list);
8967 8960 list = NULL;
8968 8961 }
8969 8962
8970 8963 hmebp = uhmehash_reclaim_hand;
8971 8964 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
8972 8965 uhmehash_reclaim_hand = hmebp = uhme_hash;
8973 8966 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8974 8967
8975 8968 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8976 8969 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8977 8970 hmeblkp = hmebp->hmeblkp;
8978 8971 pr_hblk = NULL;
8979 8972 while (hmeblkp) {
8980 8973 nx_hblk = hmeblkp->hblk_next;
8981 8974 if (!hmeblkp->hblk_vcnt &&
8982 8975 !hmeblkp->hblk_hmecnt) {
8983 8976 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8984 8977 pr_hblk, &list, 0);
8985 8978 } else {
8986 8979 pr_hblk = hmeblkp;
8987 8980 }
8988 8981 hmeblkp = nx_hblk;
8989 8982 }
8990 8983 SFMMU_HASH_UNLOCK(hmebp);
8991 8984 }
8992 8985 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
8993 8986 hmebp = uhme_hash;
8994 8987 }
8995 8988
8996 8989 hmebp = khmehash_reclaim_hand;
8997 8990 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
8998 8991 khmehash_reclaim_hand = hmebp = khme_hash;
8999 8992 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9000 8993
9001 8994 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9002 8995 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9003 8996 hmeblkp = hmebp->hmeblkp;
9004 8997 pr_hblk = NULL;
9005 8998 while (hmeblkp) {
9006 8999 nx_hblk = hmeblkp->hblk_next;
9007 9000 if (!hmeblkp->hblk_vcnt &&
9008 9001 !hmeblkp->hblk_hmecnt) {
9009 9002 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9010 9003 pr_hblk, &list, 0);
9011 9004 } else {
9012 9005 pr_hblk = hmeblkp;
9013 9006 }
9014 9007 hmeblkp = nx_hblk;
9015 9008 }
9016 9009 SFMMU_HASH_UNLOCK(hmebp);
9017 9010 }
9018 9011 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9019 9012 hmebp = khme_hash;
9020 9013 }
9021 9014 sfmmu_hblks_list_purge(&list, 0);
9022 9015 }
9023 9016
9024 9017 /*
9025 9018 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9026 9019 * same goes for sfmmu_get_addrvcolor().
9027 9020 *
9028 9021 * This function will return the virtual color for the specified page. The
9029 9022 * virtual color corresponds to this page current mapping or its last mapping.
9030 9023 * It is used by memory allocators to choose addresses with the correct
9031 9024 * alignment so vac consistency is automatically maintained. If the page
9032 9025 * has no color it returns -1.
9033 9026 */
9034 9027 /*ARGSUSED*/
9035 9028 int
9036 9029 sfmmu_get_ppvcolor(struct page *pp)
9037 9030 {
9038 9031 #ifdef VAC
9039 9032 int color;
9040 9033
9041 9034 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9042 9035 return (-1);
9043 9036 }
9044 9037 color = PP_GET_VCOLOR(pp);
9045 9038 ASSERT(color < mmu_btop(shm_alignment));
9046 9039 return (color);
9047 9040 #else
9048 9041 return (-1);
9049 9042 #endif /* VAC */
9050 9043 }
9051 9044
9052 9045 /*
9053 9046 * This function will return the desired alignment for vac consistency
9054 9047 * (vac color) given a virtual address. If no vac is present it returns -1.
9055 9048 */
9056 9049 /*ARGSUSED*/
9057 9050 int
9058 9051 sfmmu_get_addrvcolor(caddr_t vaddr)
9059 9052 {
9060 9053 #ifdef VAC
9061 9054 if (cache & CACHE_VAC) {
9062 9055 return (addr_to_vcolor(vaddr));
9063 9056 } else {
9064 9057 return (-1);
9065 9058 }
9066 9059 #else
9067 9060 return (-1);
9068 9061 #endif /* VAC */
9069 9062 }
9070 9063
9071 9064 #ifdef VAC
9072 9065 /*
9073 9066 * Check for conflicts.
9074 9067 * A conflict exists if the new and existent mappings do not match in
9075 9068 * their "shm_alignment fields. If conflicts exist, the existant mappings
9076 9069 * are flushed unless one of them is locked. If one of them is locked, then
9077 9070 * the mappings are flushed and converted to non-cacheable mappings.
9078 9071 */
9079 9072 static void
9080 9073 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9081 9074 {
9082 9075 struct hat *tmphat;
9083 9076 struct sf_hment *sfhmep, *tmphme = NULL;
9084 9077 struct hme_blk *hmeblkp;
9085 9078 int vcolor;
9086 9079 tte_t tte;
9087 9080
9088 9081 ASSERT(sfmmu_mlist_held(pp));
9089 9082 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */
9090 9083
9091 9084 vcolor = addr_to_vcolor(addr);
9092 9085 if (PP_NEWPAGE(pp)) {
9093 9086 PP_SET_VCOLOR(pp, vcolor);
9094 9087 return;
9095 9088 }
9096 9089
9097 9090 if (PP_GET_VCOLOR(pp) == vcolor) {
9098 9091 return;
9099 9092 }
9100 9093
9101 9094 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9102 9095 /*
9103 9096 * Previous user of page had a different color
9104 9097 * but since there are no current users
9105 9098 * we just flush the cache and change the color.
9106 9099 */
9107 9100 SFMMU_STAT(sf_pgcolor_conflict);
9108 9101 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9109 9102 PP_SET_VCOLOR(pp, vcolor);
9110 9103 return;
9111 9104 }
9112 9105
9113 9106 /*
9114 9107 * If we get here we have a vac conflict with a current
9115 9108 * mapping. VAC conflict policy is as follows.
9116 9109 * - The default is to unload the other mappings unless:
9117 9110 * - If we have a large mapping we uncache the page.
9118 9111 * We need to uncache the rest of the large page too.
9119 9112 * - If any of the mappings are locked we uncache the page.
9120 9113 * - If the requested mapping is inconsistent
9121 9114 * with another mapping and that mapping
9122 9115 * is in the same address space we have to
9123 9116 * make it non-cached. The default thing
9124 9117 * to do is unload the inconsistent mapping
9125 9118 * but if they are in the same address space
9126 9119 * we run the risk of unmapping the pc or the
9127 9120 * stack which we will use as we return to the user,
9128 9121 * in which case we can then fault on the thing
9129 9122 * we just unloaded and get into an infinite loop.
9130 9123 */
9131 9124 if (PP_ISMAPPED_LARGE(pp)) {
9132 9125 int sz;
9133 9126
9134 9127 /*
9135 9128 * Existing mapping is for big pages. We don't unload
9136 9129 * existing big mappings to satisfy new mappings.
9137 9130 * Always convert all mappings to TNC.
9138 9131 */
9139 9132 sz = fnd_mapping_sz(pp);
9140 9133 pp = PP_GROUPLEADER(pp, sz);
9141 9134 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9142 9135 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9143 9136 TTEPAGES(sz));
9144 9137
9145 9138 return;
9146 9139 }
9147 9140
9148 9141 /*
9149 9142 * check if any mapping is in same as or if it is locked
9150 9143 * since in that case we need to uncache.
9151 9144 */
9152 9145 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9153 9146 tmphme = sfhmep->hme_next;
9154 9147 if (IS_PAHME(sfhmep))
9155 9148 continue;
9156 9149 hmeblkp = sfmmu_hmetohblk(sfhmep);
9157 9150 tmphat = hblktosfmmu(hmeblkp);
9158 9151 sfmmu_copytte(&sfhmep->hme_tte, &tte);
9159 9152 ASSERT(TTE_IS_VALID(&tte));
9160 9153 if (hmeblkp->hblk_shared || tmphat == hat ||
9161 9154 hmeblkp->hblk_lckcnt) {
9162 9155 /*
9163 9156 * We have an uncache conflict
9164 9157 */
9165 9158 SFMMU_STAT(sf_uncache_conflict);
9166 9159 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9167 9160 return;
9168 9161 }
9169 9162 }
9170 9163
9171 9164 /*
9172 9165 * We have an unload conflict
9173 9166 * We have already checked for LARGE mappings, therefore
9174 9167 * the remaining mapping(s) must be TTE8K.
9175 9168 */
9176 9169 SFMMU_STAT(sf_unload_conflict);
9177 9170
9178 9171 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9179 9172 tmphme = sfhmep->hme_next;
9180 9173 if (IS_PAHME(sfhmep))
9181 9174 continue;
9182 9175 hmeblkp = sfmmu_hmetohblk(sfhmep);
9183 9176 ASSERT(!hmeblkp->hblk_shared);
9184 9177 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9185 9178 }
9186 9179
9187 9180 if (PP_ISMAPPED_KPM(pp))
9188 9181 sfmmu_kpm_vac_unload(pp, addr);
9189 9182
9190 9183 /*
9191 9184 * Unloads only do TLB flushes so we need to flush the
9192 9185 * cache here.
9193 9186 */
9194 9187 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9195 9188 PP_SET_VCOLOR(pp, vcolor);
9196 9189 }
9197 9190
9198 9191 /*
9199 9192 * Whenever a mapping is unloaded and the page is in TNC state,
9200 9193 * we see if the page can be made cacheable again. 'pp' is
9201 9194 * the page that we just unloaded a mapping from, the size
9202 9195 * of mapping that was unloaded is 'ottesz'.
9203 9196 * Remark:
9204 9197 * The recache policy for mpss pages can leave a performance problem
9205 9198 * under the following circumstances:
9206 9199 * . A large page in uncached mode has just been unmapped.
9207 9200 * . All constituent pages are TNC due to a conflicting small mapping.
9208 9201 * . There are many other, non conflicting, small mappings around for
9209 9202 * a lot of the constituent pages.
9210 9203 * . We're called w/ the "old" groupleader page and the old ottesz,
9211 9204 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9212 9205 * we end up w/ TTE8K or npages == 1.
9213 9206 * . We call tst_tnc w/ the old groupleader only, and if there is no
9214 9207 * conflict, we re-cache only this page.
9215 9208 * . All other small mappings are not checked and will be left in TNC mode.
9216 9209 * The problem is not very serious because:
9217 9210 * . mpss is actually only defined for heap and stack, so the probability
9218 9211 * is not very high that a large page mapping exists in parallel to a small
9219 9212 * one (this is possible, but seems to be bad programming style in the
9220 9213 * appl).
9221 9214 * . The problem gets a little bit more serious, when those TNC pages
9222 9215 * have to be mapped into kernel space, e.g. for networking.
9223 9216 * . When VAC alias conflicts occur in applications, this is regarded
9224 9217 * as an application bug. So if kstat's show them, the appl should
9225 9218 * be changed anyway.
9226 9219 */
9227 9220 void
9228 9221 conv_tnc(page_t *pp, int ottesz)
9229 9222 {
9230 9223 int cursz, dosz;
9231 9224 pgcnt_t curnpgs, dopgs;
9232 9225 pgcnt_t pg64k;
9233 9226 page_t *pp2;
9234 9227
9235 9228 /*
9236 9229 * Determine how big a range we check for TNC and find
9237 9230 * leader page. cursz is the size of the biggest
9238 9231 * mapping that still exist on 'pp'.
9239 9232 */
9240 9233 if (PP_ISMAPPED_LARGE(pp)) {
9241 9234 cursz = fnd_mapping_sz(pp);
9242 9235 } else {
9243 9236 cursz = TTE8K;
9244 9237 }
9245 9238
9246 9239 if (ottesz >= cursz) {
9247 9240 dosz = ottesz;
9248 9241 pp2 = pp;
9249 9242 } else {
9250 9243 dosz = cursz;
9251 9244 pp2 = PP_GROUPLEADER(pp, dosz);
9252 9245 }
9253 9246
9254 9247 pg64k = TTEPAGES(TTE64K);
9255 9248 dopgs = TTEPAGES(dosz);
9256 9249
9257 9250 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9258 9251
9259 9252 while (dopgs != 0) {
9260 9253 curnpgs = TTEPAGES(cursz);
9261 9254 if (tst_tnc(pp2, curnpgs)) {
9262 9255 SFMMU_STAT_ADD(sf_recache, curnpgs);
9263 9256 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9264 9257 curnpgs);
9265 9258 }
9266 9259
9267 9260 ASSERT(dopgs >= curnpgs);
9268 9261 dopgs -= curnpgs;
9269 9262
9270 9263 if (dopgs == 0) {
9271 9264 break;
9272 9265 }
9273 9266
9274 9267 pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9275 9268 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9276 9269 cursz = fnd_mapping_sz(pp2);
9277 9270 } else {
9278 9271 cursz = TTE8K;
9279 9272 }
9280 9273 }
9281 9274 }
9282 9275
9283 9276 /*
9284 9277 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9285 9278 * returns 0 otherwise. Note that oaddr argument is valid for only
↓ open down ↓ |
585 lines elided |
↑ open up ↑ |
9286 9279 * 8k pages.
9287 9280 */
9288 9281 int
9289 9282 tst_tnc(page_t *pp, pgcnt_t npages)
9290 9283 {
9291 9284 struct sf_hment *sfhme;
9292 9285 struct hme_blk *hmeblkp;
9293 9286 tte_t tte;
9294 9287 caddr_t vaddr;
9295 9288 int clr_valid = 0;
9296 - int color, color1, bcolor;
9289 + int color, color1, bcolor;
9297 9290 int i, ncolors;
9298 9291
9299 9292 ASSERT(pp != NULL);
9300 9293 ASSERT(!(cache & CACHE_WRITEBACK));
9301 9294
9302 9295 if (npages > 1) {
9303 9296 ncolors = CACHE_NUM_COLOR;
9304 9297 }
9305 9298
9306 9299 for (i = 0; i < npages; i++) {
9307 9300 ASSERT(sfmmu_mlist_held(pp));
9308 9301 ASSERT(PP_ISTNC(pp));
9309 9302 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9310 9303
9311 9304 if (PP_ISPNC(pp)) {
9312 9305 return (0);
9313 9306 }
9314 9307
9315 9308 clr_valid = 0;
9316 9309 if (PP_ISMAPPED_KPM(pp)) {
9317 9310 caddr_t kpmvaddr;
9318 9311
9319 9312 ASSERT(kpm_enable);
9320 9313 kpmvaddr = hat_kpm_page2va(pp, 1);
9321 9314 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9322 9315 color1 = addr_to_vcolor(kpmvaddr);
9323 9316 clr_valid = 1;
9324 9317 }
9325 9318
9326 9319 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9327 9320 if (IS_PAHME(sfhme))
9328 9321 continue;
9329 9322 hmeblkp = sfmmu_hmetohblk(sfhme);
9330 9323
9331 9324 sfmmu_copytte(&sfhme->hme_tte, &tte);
9332 9325 ASSERT(TTE_IS_VALID(&tte));
9333 9326
9334 9327 vaddr = tte_to_vaddr(hmeblkp, tte);
9335 9328 color = addr_to_vcolor(vaddr);
9336 9329
9337 9330 if (npages > 1) {
9338 9331 /*
9339 9332 * If there is a big mapping, make sure
9340 9333 * 8K mapping is consistent with the big
9341 9334 * mapping.
9342 9335 */
9343 9336 bcolor = i % ncolors;
9344 9337 if (color != bcolor) {
9345 9338 return (0);
9346 9339 }
9347 9340 }
9348 9341 if (!clr_valid) {
9349 9342 clr_valid = 1;
9350 9343 color1 = color;
9351 9344 }
9352 9345
9353 9346 if (color1 != color) {
9354 9347 return (0);
9355 9348 }
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
9356 9349 }
9357 9350
9358 9351 pp = PP_PAGENEXT(pp);
9359 9352 }
9360 9353
9361 9354 return (1);
9362 9355 }
9363 9356
9364 9357 void
9365 9358 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9366 - pgcnt_t npages)
9359 + pgcnt_t npages)
9367 9360 {
9368 9361 kmutex_t *pmtx;
9369 9362 int i, ncolors, bcolor;
9370 9363 kpm_hlk_t *kpmp;
9371 9364 cpuset_t cpuset;
9372 9365
9373 9366 ASSERT(pp != NULL);
9374 9367 ASSERT(!(cache & CACHE_WRITEBACK));
9375 9368
9376 9369 kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9377 9370 pmtx = sfmmu_page_enter(pp);
9378 9371
9379 9372 /*
9380 9373 * Fast path caching single unmapped page
9381 9374 */
9382 9375 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9383 9376 flags == HAT_CACHE) {
9384 9377 PP_CLRTNC(pp);
9385 9378 PP_CLRPNC(pp);
9386 9379 sfmmu_page_exit(pmtx);
9387 9380 sfmmu_kpm_kpmp_exit(kpmp);
9388 9381 return;
9389 9382 }
9390 9383
9391 9384 /*
9392 9385 * We need to capture all cpus in order to change cacheability
9393 9386 * because we can't allow one cpu to access the same physical
9394 9387 * page using a cacheable and a non-cachebale mapping at the same
9395 9388 * time. Since we may end up walking the ism mapping list
9396 9389 * have to grab it's lock now since we can't after all the
9397 9390 * cpus have been captured.
9398 9391 */
9399 9392 sfmmu_hat_lock_all();
9400 9393 mutex_enter(&ism_mlist_lock);
9401 9394 kpreempt_disable();
9402 9395 cpuset = cpu_ready_set;
9403 9396 xc_attention(cpuset);
9404 9397
9405 9398 if (npages > 1) {
9406 9399 /*
9407 9400 * Make sure all colors are flushed since the
9408 9401 * sfmmu_page_cache() only flushes one color-
9409 9402 * it does not know big pages.
9410 9403 */
9411 9404 ncolors = CACHE_NUM_COLOR;
9412 9405 if (flags & HAT_TMPNC) {
9413 9406 for (i = 0; i < ncolors; i++) {
9414 9407 sfmmu_cache_flushcolor(i, pp->p_pagenum);
9415 9408 }
9416 9409 cache_flush_flag = CACHE_NO_FLUSH;
9417 9410 }
9418 9411 }
9419 9412
9420 9413 for (i = 0; i < npages; i++) {
9421 9414
9422 9415 ASSERT(sfmmu_mlist_held(pp));
9423 9416
9424 9417 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9425 9418
9426 9419 if (npages > 1) {
9427 9420 bcolor = i % ncolors;
9428 9421 } else {
9429 9422 bcolor = NO_VCOLOR;
9430 9423 }
9431 9424
9432 9425 sfmmu_page_cache(pp, flags, cache_flush_flag,
9433 9426 bcolor);
9434 9427 }
9435 9428
9436 9429 pp = PP_PAGENEXT(pp);
9437 9430 }
9438 9431
9439 9432 xt_sync(cpuset);
9440 9433 xc_dismissed(cpuset);
9441 9434 mutex_exit(&ism_mlist_lock);
9442 9435 sfmmu_hat_unlock_all();
9443 9436 sfmmu_page_exit(pmtx);
9444 9437 sfmmu_kpm_kpmp_exit(kpmp);
9445 9438 kpreempt_enable();
9446 9439 }
9447 9440
9448 9441 /*
9449 9442 * This function changes the virtual cacheability of all mappings to a
9450 9443 * particular page. When changing from uncache to cacheable the mappings will
9451 9444 * only be changed if all of them have the same virtual color.
9452 9445 * We need to flush the cache in all cpus. It is possible that
9453 9446 * a process referenced a page as cacheable but has sinced exited
9454 9447 * and cleared the mapping list. We still to flush it but have no
9455 9448 * state so all cpus is the only alternative.
9456 9449 */
9457 9450 static void
9458 9451 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9459 9452 {
9460 9453 struct sf_hment *sfhme;
9461 9454 struct hme_blk *hmeblkp;
9462 9455 sfmmu_t *sfmmup;
9463 9456 tte_t tte, ttemod;
9464 9457 caddr_t vaddr;
9465 9458 int ret, color;
9466 9459 pfn_t pfn;
9467 9460
9468 9461 color = bcolor;
9469 9462 pfn = pp->p_pagenum;
9470 9463
9471 9464 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9472 9465
9473 9466 if (IS_PAHME(sfhme))
9474 9467 continue;
9475 9468 hmeblkp = sfmmu_hmetohblk(sfhme);
9476 9469
9477 9470 sfmmu_copytte(&sfhme->hme_tte, &tte);
9478 9471 ASSERT(TTE_IS_VALID(&tte));
9479 9472 vaddr = tte_to_vaddr(hmeblkp, tte);
9480 9473 color = addr_to_vcolor(vaddr);
9481 9474
9482 9475 #ifdef DEBUG
9483 9476 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9484 9477 ASSERT(color == bcolor);
9485 9478 }
9486 9479 #endif
9487 9480
9488 9481 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9489 9482
9490 9483 ttemod = tte;
9491 9484 if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9492 9485 TTE_CLR_VCACHEABLE(&ttemod);
9493 9486 } else { /* flags & HAT_CACHE */
9494 9487 TTE_SET_VCACHEABLE(&ttemod);
9495 9488 }
9496 9489 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9497 9490 if (ret < 0) {
9498 9491 /*
9499 9492 * Since all cpus are captured modifytte should not
9500 9493 * fail.
9501 9494 */
9502 9495 panic("sfmmu_page_cache: write to tte failed");
9503 9496 }
9504 9497
9505 9498 sfmmup = hblktosfmmu(hmeblkp);
9506 9499 if (cache_flush_flag == CACHE_FLUSH) {
9507 9500 /*
9508 9501 * Flush TSBs, TLBs and caches
9509 9502 */
9510 9503 if (hmeblkp->hblk_shared) {
9511 9504 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9512 9505 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9513 9506 sf_region_t *rgnp;
9514 9507 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9515 9508 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9516 9509 ASSERT(srdp != NULL);
9517 9510 rgnp = srdp->srd_hmergnp[rid];
9518 9511 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9519 9512 srdp, rgnp, rid);
9520 9513 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9521 9514 hmeblkp, 0);
9522 9515 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9523 9516 } else if (sfmmup->sfmmu_ismhat) {
9524 9517 if (flags & HAT_CACHE) {
9525 9518 SFMMU_STAT(sf_ism_recache);
9526 9519 } else {
9527 9520 SFMMU_STAT(sf_ism_uncache);
9528 9521 }
9529 9522 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9530 9523 pfn, CACHE_FLUSH);
9531 9524 } else {
9532 9525 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9533 9526 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9534 9527 }
9535 9528
9536 9529 /*
9537 9530 * all cache entries belonging to this pfn are
9538 9531 * now flushed.
9539 9532 */
9540 9533 cache_flush_flag = CACHE_NO_FLUSH;
9541 9534 } else {
9542 9535 /*
9543 9536 * Flush only TSBs and TLBs.
9544 9537 */
9545 9538 if (hmeblkp->hblk_shared) {
9546 9539 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9547 9540 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9548 9541 sf_region_t *rgnp;
9549 9542 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9550 9543 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9551 9544 ASSERT(srdp != NULL);
9552 9545 rgnp = srdp->srd_hmergnp[rid];
9553 9546 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9554 9547 srdp, rgnp, rid);
9555 9548 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9556 9549 hmeblkp, 0);
9557 9550 } else if (sfmmup->sfmmu_ismhat) {
9558 9551 if (flags & HAT_CACHE) {
9559 9552 SFMMU_STAT(sf_ism_recache);
9560 9553 } else {
9561 9554 SFMMU_STAT(sf_ism_uncache);
9562 9555 }
9563 9556 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9564 9557 pfn, CACHE_NO_FLUSH);
9565 9558 } else {
9566 9559 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9567 9560 }
9568 9561 }
9569 9562 }
9570 9563
9571 9564 if (PP_ISMAPPED_KPM(pp))
9572 9565 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9573 9566
9574 9567 switch (flags) {
9575 9568
9576 9569 default:
9577 9570 panic("sfmmu_pagecache: unknown flags");
9578 9571 break;
9579 9572
9580 9573 case HAT_CACHE:
9581 9574 PP_CLRTNC(pp);
9582 9575 PP_CLRPNC(pp);
9583 9576 PP_SET_VCOLOR(pp, color);
9584 9577 break;
9585 9578
9586 9579 case HAT_TMPNC:
9587 9580 PP_SETTNC(pp);
9588 9581 PP_SET_VCOLOR(pp, NO_VCOLOR);
9589 9582 break;
9590 9583
9591 9584 case HAT_UNCACHE:
9592 9585 PP_SETPNC(pp);
9593 9586 PP_CLRTNC(pp);
9594 9587 PP_SET_VCOLOR(pp, NO_VCOLOR);
9595 9588 break;
9596 9589 }
9597 9590 }
9598 9591 #endif /* VAC */
9599 9592
9600 9593
9601 9594 /*
9602 9595 * Wrapper routine used to return a context.
9603 9596 *
9604 9597 * It's the responsibility of the caller to guarantee that the
9605 9598 * process serializes on calls here by taking the HAT lock for
9606 9599 * the hat.
9607 9600 *
9608 9601 */
9609 9602 static void
9610 9603 sfmmu_get_ctx(sfmmu_t *sfmmup)
9611 9604 {
9612 9605 mmu_ctx_t *mmu_ctxp;
9613 9606 uint_t pstate_save;
9614 9607 int ret;
9615 9608
9616 9609 ASSERT(sfmmu_hat_lock_held(sfmmup));
9617 9610 ASSERT(sfmmup != ksfmmup);
9618 9611
9619 9612 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9620 9613 sfmmu_setup_tsbinfo(sfmmup);
9621 9614 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9622 9615 }
9623 9616
9624 9617 kpreempt_disable();
9625 9618
9626 9619 mmu_ctxp = CPU_MMU_CTXP(CPU);
9627 9620 ASSERT(mmu_ctxp);
9628 9621 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9629 9622 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9630 9623
9631 9624 /*
9632 9625 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9633 9626 */
9634 9627 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9635 9628 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9636 9629
9637 9630 /*
9638 9631 * Let the MMU set up the page sizes to use for
9639 9632 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9640 9633 */
9641 9634 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9642 9635 mmu_set_ctx_page_sizes(sfmmup);
9643 9636 }
9644 9637
9645 9638 /*
9646 9639 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9647 9640 * interrupts disabled to prevent race condition with wrap-around
9648 9641 * ctx invalidatation. In sun4v, ctx invalidation also involves
9649 9642 * a HV call to set the number of TSBs to 0. If interrupts are not
9650 9643 * disabled until after sfmmu_load_mmustate is complete TSBs may
9651 9644 * become assigned to INVALID_CONTEXT. This is not allowed.
9652 9645 */
9653 9646 pstate_save = sfmmu_disable_intrs();
9654 9647
9655 9648 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9656 9649 sfmmup->sfmmu_scdp != NULL) {
9657 9650 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9658 9651 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9659 9652 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9660 9653 /* debug purpose only */
9661 9654 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9662 9655 != INVALID_CONTEXT);
9663 9656 }
9664 9657 sfmmu_load_mmustate(sfmmup);
9665 9658
9666 9659 sfmmu_enable_intrs(pstate_save);
9667 9660
9668 9661 kpreempt_enable();
9669 9662 }
9670 9663
9671 9664 /*
9672 9665 * When all cnums are used up in a MMU, cnum will wrap around to the
9673 9666 * next generation and start from 2.
9674 9667 */
9675 9668 static void
9676 9669 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9677 9670 {
9678 9671
9679 9672 /* caller must have disabled the preemption */
9680 9673 ASSERT(curthread->t_preempt >= 1);
9681 9674 ASSERT(mmu_ctxp != NULL);
9682 9675
9683 9676 /* acquire Per-MMU (PM) spin lock */
9684 9677 mutex_enter(&mmu_ctxp->mmu_lock);
9685 9678
9686 9679 /* re-check to see if wrap-around is needed */
9687 9680 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9688 9681 goto done;
9689 9682
9690 9683 SFMMU_MMU_STAT(mmu_wrap_around);
9691 9684
9692 9685 /* update gnum */
9693 9686 ASSERT(mmu_ctxp->mmu_gnum != 0);
9694 9687 mmu_ctxp->mmu_gnum++;
9695 9688 if (mmu_ctxp->mmu_gnum == 0 ||
9696 9689 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9697 9690 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9698 9691 (void *)mmu_ctxp);
9699 9692 }
9700 9693
9701 9694 if (mmu_ctxp->mmu_ncpus > 1) {
9702 9695 cpuset_t cpuset;
9703 9696
9704 9697 membar_enter(); /* make sure updated gnum visible */
9705 9698
9706 9699 SFMMU_XCALL_STATS(NULL);
9707 9700
9708 9701 /* xcall to others on the same MMU to invalidate ctx */
9709 9702 cpuset = mmu_ctxp->mmu_cpuset;
9710 9703 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9711 9704 CPUSET_DEL(cpuset, CPU->cpu_id);
9712 9705 CPUSET_AND(cpuset, cpu_ready_set);
9713 9706
9714 9707 /*
9715 9708 * Pass in INVALID_CONTEXT as the first parameter to
9716 9709 * sfmmu_raise_tsb_exception, which invalidates the context
9717 9710 * of any process running on the CPUs in the MMU.
9718 9711 */
9719 9712 xt_some(cpuset, sfmmu_raise_tsb_exception,
9720 9713 INVALID_CONTEXT, INVALID_CONTEXT);
9721 9714 xt_sync(cpuset);
9722 9715
9723 9716 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9724 9717 }
9725 9718
9726 9719 if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9727 9720 sfmmu_setctx_sec(INVALID_CONTEXT);
9728 9721 sfmmu_clear_utsbinfo();
9729 9722 }
9730 9723
9731 9724 /*
9732 9725 * No xcall is needed here. For sun4u systems all CPUs in context
9733 9726 * domain share a single physical MMU therefore it's enough to flush
9734 9727 * TLB on local CPU. On sun4v systems we use 1 global context
9735 9728 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9736 9729 * handler. Note that vtag_flushall_uctxs() is called
9737 9730 * for Ultra II machine, where the equivalent flushall functionality
9738 9731 * is implemented in SW, and only user ctx TLB entries are flushed.
9739 9732 */
9740 9733 if (&vtag_flushall_uctxs != NULL) {
9741 9734 vtag_flushall_uctxs();
9742 9735 } else {
9743 9736 vtag_flushall();
9744 9737 }
9745 9738
9746 9739 /* reset mmu cnum, skips cnum 0 and 1 */
9747 9740 if (reset_cnum == B_TRUE)
9748 9741 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9749 9742
9750 9743 done:
9751 9744 mutex_exit(&mmu_ctxp->mmu_lock);
9752 9745 }
9753 9746
9754 9747
9755 9748 /*
9756 9749 * For multi-threaded process, set the process context to INVALID_CONTEXT
9757 9750 * so that it faults and reloads the MMU state from TL=0. For single-threaded
9758 9751 * process, we can just load the MMU state directly without having to
9759 9752 * set context invalid. Caller must hold the hat lock since we don't
9760 9753 * acquire it here.
9761 9754 */
9762 9755 static void
9763 9756 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9764 9757 {
9765 9758 uint_t cnum;
9766 9759 uint_t pstate_save;
9767 9760
9768 9761 ASSERT(sfmmup != ksfmmup);
9769 9762 ASSERT(sfmmu_hat_lock_held(sfmmup));
9770 9763
9771 9764 kpreempt_disable();
9772 9765
9773 9766 /*
9774 9767 * We check whether the pass'ed-in sfmmup is the same as the
9775 9768 * current running proc. This is to makes sure the current proc
9776 9769 * stays single-threaded if it already is.
9777 9770 */
9778 9771 if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9779 9772 (curthread->t_procp->p_lwpcnt == 1)) {
9780 9773 /* single-thread */
9781 9774 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9782 9775 if (cnum != INVALID_CONTEXT) {
9783 9776 uint_t curcnum;
9784 9777 /*
9785 9778 * Disable interrupts to prevent race condition
9786 9779 * with sfmmu_ctx_wrap_around ctx invalidation.
9787 9780 * In sun4v, ctx invalidation involves setting
9788 9781 * TSB to NULL, hence, interrupts should be disabled
9789 9782 * untill after sfmmu_load_mmustate is completed.
9790 9783 */
9791 9784 pstate_save = sfmmu_disable_intrs();
9792 9785 curcnum = sfmmu_getctx_sec();
9793 9786 if (curcnum == cnum)
9794 9787 sfmmu_load_mmustate(sfmmup);
9795 9788 sfmmu_enable_intrs(pstate_save);
9796 9789 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9797 9790 }
9798 9791 } else {
9799 9792 /*
9800 9793 * multi-thread
9801 9794 * or when sfmmup is not the same as the curproc.
9802 9795 */
9803 9796 sfmmu_invalidate_ctx(sfmmup);
9804 9797 }
9805 9798
9806 9799 kpreempt_enable();
9807 9800 }
9808 9801
9809 9802
9810 9803 /*
9811 9804 * Replace the specified TSB with a new TSB. This function gets called when
9812 9805 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the
9813 9806 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9814 9807 * (8K).
9815 9808 *
9816 9809 * Caller must hold the HAT lock, but should assume any tsb_info
9817 9810 * pointers it has are no longer valid after calling this function.
9818 9811 *
9819 9812 * Return values:
9820 9813 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
9821 9814 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
9822 9815 * something to this tsbinfo/TSB
9823 9816 * TSB_SUCCESS Operation succeeded
9824 9817 */
9825 9818 static tsb_replace_rc_t
9826 9819 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9827 9820 hatlock_t *hatlockp, uint_t flags)
9828 9821 {
9829 9822 struct tsb_info *new_tsbinfo = NULL;
9830 9823 struct tsb_info *curtsb, *prevtsb;
9831 9824 uint_t tte_sz_mask;
9832 9825 int i;
9833 9826
9834 9827 ASSERT(sfmmup != ksfmmup);
9835 9828 ASSERT(sfmmup->sfmmu_ismhat == 0);
9836 9829 ASSERT(sfmmu_hat_lock_held(sfmmup));
9837 9830 ASSERT(szc <= tsb_max_growsize);
9838 9831
9839 9832 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
9840 9833 return (TSB_LOSTRACE);
9841 9834
9842 9835 /*
9843 9836 * Find the tsb_info ahead of this one in the list, and
9844 9837 * also make sure that the tsb_info passed in really
9845 9838 * exists!
9846 9839 */
9847 9840 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9848 9841 curtsb != old_tsbinfo && curtsb != NULL;
9849 9842 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9850 9843 ;
9851 9844 ASSERT(curtsb != NULL);
9852 9845
9853 9846 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9854 9847 /*
9855 9848 * The process is swapped out, so just set the new size
9856 9849 * code. When it swaps back in, we'll allocate a new one
9857 9850 * of the new chosen size.
9858 9851 */
9859 9852 curtsb->tsb_szc = szc;
9860 9853 return (TSB_SUCCESS);
9861 9854 }
9862 9855 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
9863 9856
9864 9857 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
9865 9858
9866 9859 /*
9867 9860 * All initialization is done inside of sfmmu_tsbinfo_alloc().
9868 9861 * If we fail to allocate a TSB, exit.
9869 9862 *
9870 9863 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
9871 9864 * then try 4M slab after the initial alloc fails.
9872 9865 *
9873 9866 * If tsb swapin with tsb size > 4M, then try 4M after the
9874 9867 * initial alloc fails.
9875 9868 */
9876 9869 sfmmu_hat_exit(hatlockp);
9877 9870 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
9878 9871 tte_sz_mask, flags, sfmmup) &&
9879 9872 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
9880 9873 (!(flags & TSB_SWAPIN) &&
9881 9874 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
9882 9875 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
9883 9876 tte_sz_mask, flags, sfmmup))) {
9884 9877 (void) sfmmu_hat_enter(sfmmup);
9885 9878 if (!(flags & TSB_SWAPIN))
9886 9879 SFMMU_STAT(sf_tsb_resize_failures);
9887 9880 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9888 9881 return (TSB_ALLOCFAIL);
9889 9882 }
9890 9883 (void) sfmmu_hat_enter(sfmmup);
9891 9884
9892 9885 /*
9893 9886 * Re-check to make sure somebody else didn't muck with us while we
9894 9887 * didn't hold the HAT lock. If the process swapped out, fine, just
9895 9888 * exit; this can happen if we try to shrink the TSB from the context
9896 9889 * of another process (such as on an ISM unmap), though it is rare.
9897 9890 */
9898 9891 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9899 9892 SFMMU_STAT(sf_tsb_resize_failures);
9900 9893 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9901 9894 sfmmu_hat_exit(hatlockp);
9902 9895 sfmmu_tsbinfo_free(new_tsbinfo);
9903 9896 (void) sfmmu_hat_enter(sfmmup);
9904 9897 return (TSB_LOSTRACE);
9905 9898 }
9906 9899
9907 9900 #ifdef DEBUG
9908 9901 /* Reverify that the tsb_info still exists.. for debugging only */
9909 9902 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9910 9903 curtsb != old_tsbinfo && curtsb != NULL;
9911 9904 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9912 9905 ;
9913 9906 ASSERT(curtsb != NULL);
9914 9907 #endif /* DEBUG */
9915 9908
9916 9909 /*
9917 9910 * Quiesce any CPUs running this process on their next TLB miss
9918 9911 * so they atomically see the new tsb_info. We temporarily set the
9919 9912 * context to invalid context so new threads that come on processor
9920 9913 * after we do the xcall to cpusran will also serialize behind the
9921 9914 * HAT lock on TLB miss and will see the new TSB. Since this short
9922 9915 * race with a new thread coming on processor is relatively rare,
9923 9916 * this synchronization mechanism should be cheaper than always
9924 9917 * pausing all CPUs for the duration of the setup, which is what
9925 9918 * the old implementation did. This is particuarly true if we are
9926 9919 * copying a huge chunk of memory around during that window.
9927 9920 *
9928 9921 * The memory barriers are to make sure things stay consistent
9929 9922 * with resume() since it does not hold the HAT lock while
9930 9923 * walking the list of tsb_info structures.
9931 9924 */
9932 9925 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9933 9926 /* The TSB is either growing or shrinking. */
9934 9927 sfmmu_invalidate_ctx(sfmmup);
9935 9928 } else {
9936 9929 /*
9937 9930 * It is illegal to swap in TSBs from a process other
9938 9931 * than a process being swapped in. This in turn
9939 9932 * implies we do not have a valid MMU context here
9940 9933 * since a process needs one to resolve translation
9941 9934 * misses.
9942 9935 */
9943 9936 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
9944 9937 }
9945 9938
9946 9939 #ifdef DEBUG
9947 9940 ASSERT(max_mmu_ctxdoms > 0);
9948 9941
9949 9942 /*
9950 9943 * Process should have INVALID_CONTEXT on all MMUs
9951 9944 */
9952 9945 for (i = 0; i < max_mmu_ctxdoms; i++) {
9953 9946
9954 9947 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
9955 9948 }
9956 9949 #endif
9957 9950
9958 9951 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
9959 9952 membar_stst(); /* strict ordering required */
9960 9953 if (prevtsb)
9961 9954 prevtsb->tsb_next = new_tsbinfo;
9962 9955 else
9963 9956 sfmmup->sfmmu_tsb = new_tsbinfo;
9964 9957 membar_enter(); /* make sure new TSB globally visible */
9965 9958
9966 9959 /*
9967 9960 * We need to migrate TSB entries from the old TSB to the new TSB
9968 9961 * if tsb_remap_ttes is set and the TSB is growing.
9969 9962 */
9970 9963 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9971 9964 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9972 9965
9973 9966 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9974 9967
9975 9968 /*
9976 9969 * Drop the HAT lock to free our old tsb_info.
9977 9970 */
9978 9971 sfmmu_hat_exit(hatlockp);
9979 9972
9980 9973 if ((flags & TSB_GROW) == TSB_GROW) {
9981 9974 SFMMU_STAT(sf_tsb_grow);
9982 9975 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9983 9976 SFMMU_STAT(sf_tsb_shrink);
9984 9977 }
9985 9978
9986 9979 sfmmu_tsbinfo_free(old_tsbinfo);
9987 9980
9988 9981 (void) sfmmu_hat_enter(sfmmup);
9989 9982 return (TSB_SUCCESS);
9990 9983 }
9991 9984
9992 9985 /*
9993 9986 * This function will re-program hat pgsz array, and invalidate the
9994 9987 * process' context, forcing the process to switch to another
9995 9988 * context on the next TLB miss, and therefore start using the
9996 9989 * TLB that is reprogrammed for the new page sizes.
9997 9990 */
9998 9991 void
9999 9992 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10000 9993 {
10001 9994 int i;
10002 9995 hatlock_t *hatlockp = NULL;
10003 9996
10004 9997 hatlockp = sfmmu_hat_enter(sfmmup);
10005 9998 /* USIII+-IV+ optimization, requires hat lock */
10006 9999 if (tmp_pgsz) {
10007 10000 for (i = 0; i < mmu_page_sizes; i++)
10008 10001 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10009 10002 }
10010 10003 SFMMU_STAT(sf_tlb_reprog_pgsz);
10011 10004
10012 10005 sfmmu_invalidate_ctx(sfmmup);
10013 10006
10014 10007 sfmmu_hat_exit(hatlockp);
10015 10008 }
10016 10009
10017 10010 /*
10018 10011 * The scd_rttecnt field in the SCD must be updated to take account of the
10019 10012 * regions which it contains.
10020 10013 */
10021 10014 static void
10022 10015 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10023 10016 {
10024 10017 uint_t rid;
10025 10018 uint_t i, j;
10026 10019 ulong_t w;
10027 10020 sf_region_t *rgnp;
10028 10021
10029 10022 ASSERT(srdp != NULL);
10030 10023
10031 10024 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10032 10025 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10033 10026 continue;
10034 10027 }
10035 10028
10036 10029 j = 0;
10037 10030 while (w) {
10038 10031 if (!(w & 0x1)) {
10039 10032 j++;
10040 10033 w >>= 1;
10041 10034 continue;
10042 10035 }
10043 10036 rid = (i << BT_ULSHIFT) | j;
10044 10037 j++;
10045 10038 w >>= 1;
10046 10039
10047 10040 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10048 10041 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10049 10042 rgnp = srdp->srd_hmergnp[rid];
10050 10043 ASSERT(rgnp->rgn_refcnt > 0);
10051 10044 ASSERT(rgnp->rgn_id == rid);
10052 10045
10053 10046 scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10054 10047 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10055 10048
10056 10049 /*
10057 10050 * Maintain the tsb0 inflation cnt for the regions
10058 10051 * in the SCD.
10059 10052 */
10060 10053 if (rgnp->rgn_pgszc >= TTE4M) {
10061 10054 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10062 10055 rgnp->rgn_size >>
10063 10056 (TTE_PAGE_SHIFT(TTE8K) + 2);
10064 10057 }
10065 10058 }
10066 10059 }
10067 10060 }
10068 10061
10069 10062 /*
10070 10063 * This function assumes that there are either four or six supported page
10071 10064 * sizes and at most two programmable TLBs, so we need to decide which
10072 10065 * page sizes are most important and then tell the MMU layer so it
10073 10066 * can adjust the TLB page sizes accordingly (if supported).
10074 10067 *
10075 10068 * If these assumptions change, this function will need to be
10076 10069 * updated to support whatever the new limits are.
10077 10070 *
10078 10071 * The growing flag is nonzero if we are growing the address space,
10079 10072 * and zero if it is shrinking. This allows us to decide whether
10080 10073 * to grow or shrink our TSB, depending upon available memory
10081 10074 * conditions.
10082 10075 */
10083 10076 static void
10084 10077 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10085 10078 {
10086 10079 uint64_t ttecnt[MMU_PAGE_SIZES];
10087 10080 uint64_t tte8k_cnt, tte4m_cnt;
10088 10081 uint8_t i;
10089 10082 int sectsb_thresh;
10090 10083
10091 10084 /*
10092 10085 * Kernel threads, processes with small address spaces not using
10093 10086 * large pages, and dummy ISM HATs need not apply.
10094 10087 */
10095 10088 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10096 10089 return;
10097 10090
10098 10091 if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10099 10092 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10100 10093 return;
10101 10094
10102 10095 for (i = 0; i < mmu_page_sizes; i++) {
10103 10096 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10104 10097 sfmmup->sfmmu_ismttecnt[i];
10105 10098 }
10106 10099
10107 10100 /* Check pagesizes in use, and possibly reprogram DTLB. */
10108 10101 if (&mmu_check_page_sizes)
10109 10102 mmu_check_page_sizes(sfmmup, ttecnt);
10110 10103
10111 10104 /*
10112 10105 * Calculate the number of 8k ttes to represent the span of these
10113 10106 * pages.
10114 10107 */
10115 10108 tte8k_cnt = ttecnt[TTE8K] +
10116 10109 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10117 10110 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10118 10111 if (mmu_page_sizes == max_mmu_page_sizes) {
10119 10112 tte4m_cnt = ttecnt[TTE4M] +
10120 10113 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10121 10114 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10122 10115 } else {
10123 10116 tte4m_cnt = ttecnt[TTE4M];
10124 10117 }
10125 10118
10126 10119 /*
10127 10120 * Inflate tte8k_cnt to allow for region large page allocation failure.
10128 10121 */
10129 10122 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10130 10123
10131 10124 /*
10132 10125 * Inflate TSB sizes by a factor of 2 if this process
10133 10126 * uses 4M text pages to minimize extra conflict misses
10134 10127 * in the first TSB since without counting text pages
10135 10128 * 8K TSB may become too small.
10136 10129 *
10137 10130 * Also double the size of the second TSB to minimize
10138 10131 * extra conflict misses due to competition between 4M text pages
10139 10132 * and data pages.
10140 10133 *
10141 10134 * We need to adjust the second TSB allocation threshold by the
10142 10135 * inflation factor, since there is no point in creating a second
10143 10136 * TSB when we know all the mappings can fit in the I/D TLBs.
10144 10137 */
10145 10138 sectsb_thresh = tsb_sectsb_threshold;
10146 10139 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10147 10140 tte8k_cnt <<= 1;
10148 10141 tte4m_cnt <<= 1;
10149 10142 sectsb_thresh <<= 1;
10150 10143 }
10151 10144
10152 10145 /*
10153 10146 * Check to see if our TSB is the right size; we may need to
10154 10147 * grow or shrink it. If the process is small, our work is
↓ open down ↓ |
778 lines elided |
↑ open up ↑ |
10155 10148 * finished at this point.
10156 10149 */
10157 10150 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10158 10151 return;
10159 10152 }
10160 10153 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10161 10154 }
10162 10155
10163 10156 static void
10164 10157 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10165 - uint64_t tte4m_cnt, int sectsb_thresh)
10158 + uint64_t tte4m_cnt, int sectsb_thresh)
10166 10159 {
10167 10160 int tsb_bits;
10168 10161 uint_t tsb_szc;
10169 10162 struct tsb_info *tsbinfop;
10170 10163 hatlock_t *hatlockp = NULL;
10171 10164
10172 10165 hatlockp = sfmmu_hat_enter(sfmmup);
10173 10166 ASSERT(hatlockp != NULL);
10174 10167 tsbinfop = sfmmup->sfmmu_tsb;
10175 10168 ASSERT(tsbinfop != NULL);
10176 10169
10177 10170 /*
10178 10171 * If we're growing, select the size based on RSS. If we're
10179 10172 * shrinking, leave some room so we don't have to turn around and
10180 10173 * grow again immediately.
10181 10174 */
10182 10175 if (growing)
10183 10176 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10184 10177 else
10185 10178 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10186 10179
10187 10180 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10188 10181 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10189 10182 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10190 10183 hatlockp, TSB_SHRINK);
10191 10184 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10192 10185 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10193 10186 hatlockp, TSB_GROW);
10194 10187 }
10195 10188 tsbinfop = sfmmup->sfmmu_tsb;
10196 10189
10197 10190 /*
10198 10191 * With the TLB and first TSB out of the way, we need to see if
10199 10192 * we need a second TSB for 4M pages. If we managed to reprogram
10200 10193 * the TLB page sizes above, the process will start using this new
10201 10194 * TSB right away; otherwise, it will start using it on the next
10202 10195 * context switch. Either way, it's no big deal so there's no
10203 10196 * synchronization with the trap handlers here unless we grow the
10204 10197 * TSB (in which case it's required to prevent using the old one
10205 10198 * after it's freed). Note: second tsb is required for 32M/256M
10206 10199 * page sizes.
10207 10200 */
10208 10201 if (tte4m_cnt > sectsb_thresh) {
10209 10202 /*
10210 10203 * If we're growing, select the size based on RSS. If we're
10211 10204 * shrinking, leave some room so we don't have to turn
10212 10205 * around and grow again immediately.
10213 10206 */
10214 10207 if (growing)
10215 10208 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10216 10209 else
10217 10210 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10218 10211 if (tsbinfop->tsb_next == NULL) {
10219 10212 struct tsb_info *newtsb;
10220 10213 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10221 10214 0 : TSB_ALLOC;
10222 10215
10223 10216 sfmmu_hat_exit(hatlockp);
10224 10217
10225 10218 /*
10226 10219 * Try to allocate a TSB for 4[32|256]M pages. If we
10227 10220 * can't get the size we want, retry w/a minimum sized
10228 10221 * TSB. If that still didn't work, give up; we can
10229 10222 * still run without one.
10230 10223 */
10231 10224 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10232 10225 TSB4M|TSB32M|TSB256M:TSB4M;
10233 10226 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10234 10227 allocflags, sfmmup)) &&
10235 10228 (tsb_szc <= TSB_4M_SZCODE ||
10236 10229 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10237 10230 tsb_bits, allocflags, sfmmup)) &&
10238 10231 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10239 10232 tsb_bits, allocflags, sfmmup)) {
10240 10233 return;
10241 10234 }
10242 10235
10243 10236 hatlockp = sfmmu_hat_enter(sfmmup);
10244 10237
10245 10238 sfmmu_invalidate_ctx(sfmmup);
10246 10239
10247 10240 if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10248 10241 sfmmup->sfmmu_tsb->tsb_next = newtsb;
10249 10242 SFMMU_STAT(sf_tsb_sectsb_create);
10250 10243 sfmmu_hat_exit(hatlockp);
10251 10244 return;
10252 10245 } else {
10253 10246 /*
10254 10247 * It's annoying, but possible for us
10255 10248 * to get here.. we dropped the HAT lock
10256 10249 * because of locking order in the kmem
10257 10250 * allocator, and while we were off getting
10258 10251 * our memory, some other thread decided to
10259 10252 * do us a favor and won the race to get a
10260 10253 * second TSB for this process. Sigh.
10261 10254 */
10262 10255 sfmmu_hat_exit(hatlockp);
10263 10256 sfmmu_tsbinfo_free(newtsb);
10264 10257 return;
10265 10258 }
10266 10259 }
10267 10260
10268 10261 /*
10269 10262 * We have a second TSB, see if it's big enough.
10270 10263 */
10271 10264 tsbinfop = tsbinfop->tsb_next;
10272 10265
10273 10266 /*
10274 10267 * Check to see if our second TSB is the right size;
10275 10268 * we may need to grow or shrink it.
10276 10269 * To prevent thrashing (e.g. growing the TSB on a
10277 10270 * subsequent map operation), only try to shrink if
10278 10271 * the TSB reach exceeds twice the virtual address
10279 10272 * space size.
10280 10273 */
10281 10274 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10282 10275 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10283 10276 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10284 10277 tsb_szc, hatlockp, TSB_SHRINK);
10285 10278 } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10286 10279 TSB_OK_GROW()) {
10287 10280 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10288 10281 tsb_szc, hatlockp, TSB_GROW);
10289 10282 }
10290 10283 }
10291 10284
10292 10285 sfmmu_hat_exit(hatlockp);
10293 10286 }
10294 10287
10295 10288 /*
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
10296 10289 * Free up a sfmmu
10297 10290 * Since the sfmmu is currently embedded in the hat struct we simply zero
10298 10291 * out our fields and free up the ism map blk list if any.
10299 10292 */
10300 10293 static void
10301 10294 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10302 10295 {
10303 10296 ism_blk_t *blkp, *nx_blkp;
10304 10297 #ifdef DEBUG
10305 10298 ism_map_t *map;
10306 - int i;
10299 + int i;
10307 10300 #endif
10308 10301
10309 10302 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10310 10303 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10311 10304 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10312 10305 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10313 10306 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10314 10307 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10315 10308 ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10316 10309
10317 10310 sfmmup->sfmmu_free = 0;
10318 10311 sfmmup->sfmmu_ismhat = 0;
10319 10312
10320 10313 blkp = sfmmup->sfmmu_iblk;
10321 10314 sfmmup->sfmmu_iblk = NULL;
10322 10315
10323 10316 while (blkp) {
10324 10317 #ifdef DEBUG
10325 10318 map = blkp->iblk_maps;
10326 10319 for (i = 0; i < ISM_MAP_SLOTS; i++) {
10327 10320 ASSERT(map[i].imap_seg == 0);
10328 10321 ASSERT(map[i].imap_ismhat == NULL);
10329 10322 ASSERT(map[i].imap_ment == NULL);
10330 10323 }
10331 10324 #endif
10332 10325 nx_blkp = blkp->iblk_next;
10333 10326 blkp->iblk_next = NULL;
10334 10327 blkp->iblk_nextpa = (uint64_t)-1;
10335 10328 kmem_cache_free(ism_blk_cache, blkp);
10336 10329 blkp = nx_blkp;
10337 10330 }
10338 10331 }
10339 10332
10340 10333 /*
10341 10334 * Locking primitves accessed by HATLOCK macros
10342 10335 */
10343 10336
10344 10337 #define SFMMU_SPL_MTX (0x0)
10345 10338 #define SFMMU_ML_MTX (0x1)
10346 10339
10347 10340 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \
10348 10341 SPL_HASH(pg) : MLIST_HASH(pg))
10349 10342
10350 10343 kmutex_t *
10351 10344 sfmmu_page_enter(struct page *pp)
10352 10345 {
10353 10346 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10354 10347 }
10355 10348
10356 10349 void
10357 10350 sfmmu_page_exit(kmutex_t *spl)
10358 10351 {
10359 10352 mutex_exit(spl);
10360 10353 }
10361 10354
10362 10355 int
10363 10356 sfmmu_page_spl_held(struct page *pp)
10364 10357 {
10365 10358 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10366 10359 }
10367 10360
10368 10361 kmutex_t *
10369 10362 sfmmu_mlist_enter(struct page *pp)
10370 10363 {
10371 10364 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10372 10365 }
10373 10366
10374 10367 void
10375 10368 sfmmu_mlist_exit(kmutex_t *mml)
10376 10369 {
10377 10370 mutex_exit(mml);
10378 10371 }
10379 10372
10380 10373 int
10381 10374 sfmmu_mlist_held(struct page *pp)
10382 10375 {
10383 10376
10384 10377 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10385 10378 }
10386 10379
10387 10380 /*
10388 10381 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For
10389 10382 * sfmmu_mlist_enter() case mml_table lock array is used and for
10390 10383 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10391 10384 *
10392 10385 * The lock is taken on a root page so that it protects an operation on all
10393 10386 * constituent pages of a large page pp belongs to.
10394 10387 *
10395 10388 * The routine takes a lock from the appropriate array. The lock is determined
10396 10389 * by hashing the root page. After taking the lock this routine checks if the
10397 10390 * root page has the same size code that was used to determine the root (i.e
10398 10391 * that root hasn't changed). If root page has the expected p_szc field we
10399 10392 * have the right lock and it's returned to the caller. If root's p_szc
10400 10393 * decreased we release the lock and retry from the beginning. This case can
10401 10394 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10402 10395 * value and taking the lock. The number of retries due to p_szc decrease is
10403 10396 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10404 10397 * determined by hashing pp itself.
10405 10398 *
10406 10399 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10407 10400 * possible that p_szc can increase. To increase p_szc a thread has to lock
10408 10401 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10409 10402 * callers that don't hold a page locked recheck if hmeblk through which pp
10410 10403 * was found still maps this pp. If it doesn't map it anymore returned lock
10411 10404 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10412 10405 * p_szc increase after taking the lock it returns this lock without further
10413 10406 * retries because in this case the caller doesn't care about which lock was
10414 10407 * taken. The caller will drop it right away.
10415 10408 *
10416 10409 * After the routine returns it's guaranteed that hat_page_demote() can't
10417 10410 * change p_szc field of any of constituent pages of a large page pp belongs
10418 10411 * to as long as pp was either locked at least SHARED prior to this call or
10419 10412 * the caller finds that hment that pointed to this pp still references this
10420 10413 * pp (this also assumes that the caller holds hme hash bucket lock so that
10421 10414 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10422 10415 * hat_pageunload()).
10423 10416 */
10424 10417 static kmutex_t *
10425 10418 sfmmu_mlspl_enter(struct page *pp, int type)
10426 10419 {
10427 10420 kmutex_t *mtx;
10428 10421 uint_t prev_rszc = UINT_MAX;
10429 10422 page_t *rootpp;
10430 10423 uint_t szc;
10431 10424 uint_t rszc;
10432 10425 uint_t pszc = pp->p_szc;
10433 10426
10434 10427 ASSERT(pp != NULL);
10435 10428
10436 10429 again:
10437 10430 if (pszc == 0) {
10438 10431 mtx = SFMMU_MLSPL_MTX(type, pp);
10439 10432 mutex_enter(mtx);
10440 10433 return (mtx);
10441 10434 }
10442 10435
10443 10436 /* The lock lives in the root page */
10444 10437 rootpp = PP_GROUPLEADER(pp, pszc);
10445 10438 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10446 10439 mutex_enter(mtx);
10447 10440
10448 10441 /*
10449 10442 * Return mml in the following 3 cases:
10450 10443 *
10451 10444 * 1) If pp itself is root since if its p_szc decreased before we took
10452 10445 * the lock pp is still the root of smaller szc page. And if its p_szc
10453 10446 * increased it doesn't matter what lock we return (see comment in
10454 10447 * front of this routine).
10455 10448 *
10456 10449 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10457 10450 * large page we have the right lock since any previous potential
10458 10451 * hat_page_demote() is done demoting from greater than current root's
10459 10452 * p_szc because hat_page_demote() changes root's p_szc last. No
10460 10453 * further hat_page_demote() can start or be in progress since it
10461 10454 * would need the same lock we currently hold.
10462 10455 *
10463 10456 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10464 10457 * matter what lock we return (see comment in front of this routine).
10465 10458 */
10466 10459 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10467 10460 rszc >= prev_rszc) {
10468 10461 return (mtx);
10469 10462 }
10470 10463
10471 10464 /*
10472 10465 * hat_page_demote() could have decreased root's p_szc.
10473 10466 * In this case pp's p_szc must also be smaller than pszc.
10474 10467 * Retry.
10475 10468 */
10476 10469 if (rszc < pszc) {
10477 10470 szc = pp->p_szc;
10478 10471 if (szc < pszc) {
10479 10472 mutex_exit(mtx);
10480 10473 pszc = szc;
10481 10474 goto again;
10482 10475 }
10483 10476 /*
10484 10477 * pp's p_szc increased after it was decreased.
10485 10478 * page cannot be mapped. Return current lock. The caller
10486 10479 * will drop it right away.
10487 10480 */
10488 10481 return (mtx);
10489 10482 }
10490 10483
10491 10484 /*
10492 10485 * root's p_szc is greater than pp's p_szc.
10493 10486 * hat_page_demote() is not done with all pages
10494 10487 * yet. Wait for it to complete.
10495 10488 */
10496 10489 mutex_exit(mtx);
10497 10490 rootpp = PP_GROUPLEADER(rootpp, rszc);
10498 10491 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10499 10492 mutex_enter(mtx);
10500 10493 mutex_exit(mtx);
10501 10494 prev_rszc = rszc;
10502 10495 goto again;
10503 10496 }
10504 10497
10505 10498 static int
10506 10499 sfmmu_mlspl_held(struct page *pp, int type)
10507 10500 {
10508 10501 kmutex_t *mtx;
10509 10502
10510 10503 ASSERT(pp != NULL);
10511 10504 /* The lock lives in the root page */
10512 10505 pp = PP_PAGEROOT(pp);
10513 10506 ASSERT(pp != NULL);
10514 10507
10515 10508 mtx = SFMMU_MLSPL_MTX(type, pp);
10516 10509 return (MUTEX_HELD(mtx));
10517 10510 }
10518 10511
10519 10512 static uint_t
10520 10513 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10521 10514 {
10522 10515 struct hme_blk *hblkp;
10523 10516
10524 10517
10525 10518 if (freehblkp != NULL) {
10526 10519 mutex_enter(&freehblkp_lock);
10527 10520 if (freehblkp != NULL) {
10528 10521 /*
10529 10522 * If the current thread is owning hblk_reserve OR
10530 10523 * critical request from sfmmu_hblk_steal()
10531 10524 * let it succeed even if freehblkcnt is really low.
10532 10525 */
10533 10526 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10534 10527 SFMMU_STAT(sf_get_free_throttle);
10535 10528 mutex_exit(&freehblkp_lock);
10536 10529 return (0);
10537 10530 }
10538 10531 freehblkcnt--;
10539 10532 *hmeblkpp = freehblkp;
10540 10533 hblkp = *hmeblkpp;
10541 10534 freehblkp = hblkp->hblk_next;
10542 10535 mutex_exit(&freehblkp_lock);
10543 10536 hblkp->hblk_next = NULL;
10544 10537 SFMMU_STAT(sf_get_free_success);
10545 10538
10546 10539 ASSERT(hblkp->hblk_hmecnt == 0);
10547 10540 ASSERT(hblkp->hblk_vcnt == 0);
10548 10541 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10549 10542
10550 10543 return (1);
10551 10544 }
10552 10545 mutex_exit(&freehblkp_lock);
10553 10546 }
10554 10547
10555 10548 /* Check cpu hblk pending queues */
10556 10549 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10557 10550 hblkp = *hmeblkpp;
10558 10551 hblkp->hblk_next = NULL;
10559 10552 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10560 10553
10561 10554 ASSERT(hblkp->hblk_hmecnt == 0);
10562 10555 ASSERT(hblkp->hblk_vcnt == 0);
10563 10556
10564 10557 return (1);
10565 10558 }
10566 10559
10567 10560 SFMMU_STAT(sf_get_free_fail);
10568 10561 return (0);
10569 10562 }
10570 10563
10571 10564 static uint_t
10572 10565 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10573 10566 {
10574 10567 struct hme_blk *hblkp;
10575 10568
10576 10569 ASSERT(hmeblkp->hblk_hmecnt == 0);
10577 10570 ASSERT(hmeblkp->hblk_vcnt == 0);
10578 10571 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10579 10572
10580 10573 /*
10581 10574 * If the current thread is mapping into kernel space,
10582 10575 * let it succede even if freehblkcnt is max
10583 10576 * so that it will avoid freeing it to kmem.
10584 10577 * This will prevent stack overflow due to
10585 10578 * possible recursion since kmem_cache_free()
10586 10579 * might require creation of a slab which
10587 10580 * in turn needs an hmeblk to map that slab;
10588 10581 * let's break this vicious chain at the first
10589 10582 * opportunity.
10590 10583 */
10591 10584 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10592 10585 mutex_enter(&freehblkp_lock);
10593 10586 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10594 10587 SFMMU_STAT(sf_put_free_success);
10595 10588 freehblkcnt++;
10596 10589 hmeblkp->hblk_next = freehblkp;
10597 10590 freehblkp = hmeblkp;
10598 10591 mutex_exit(&freehblkp_lock);
10599 10592 return (1);
10600 10593 }
10601 10594 mutex_exit(&freehblkp_lock);
10602 10595 }
10603 10596
10604 10597 /*
10605 10598 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10606 10599 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10607 10600 * we are not in the process of mapping into kernel space.
10608 10601 */
10609 10602 ASSERT(!critical);
10610 10603 while (freehblkcnt > HBLK_RESERVE_CNT) {
10611 10604 mutex_enter(&freehblkp_lock);
10612 10605 if (freehblkcnt > HBLK_RESERVE_CNT) {
10613 10606 freehblkcnt--;
10614 10607 hblkp = freehblkp;
10615 10608 freehblkp = hblkp->hblk_next;
10616 10609 mutex_exit(&freehblkp_lock);
10617 10610 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10618 10611 kmem_cache_free(sfmmu8_cache, hblkp);
10619 10612 continue;
10620 10613 }
10621 10614 mutex_exit(&freehblkp_lock);
10622 10615 }
10623 10616 SFMMU_STAT(sf_put_free_fail);
10624 10617 return (0);
10625 10618 }
10626 10619
10627 10620 static void
10628 10621 sfmmu_hblk_swap(struct hme_blk *new)
10629 10622 {
10630 10623 struct hme_blk *old, *hblkp, *prev;
10631 10624 uint64_t newpa;
10632 10625 caddr_t base, vaddr, endaddr;
10633 10626 struct hmehash_bucket *hmebp;
10634 10627 struct sf_hment *osfhme, *nsfhme;
10635 10628 page_t *pp;
10636 10629 kmutex_t *pml;
10637 10630 tte_t tte;
10638 10631 struct hme_blk *list = NULL;
10639 10632
10640 10633 #ifdef DEBUG
10641 10634 hmeblk_tag hblktag;
10642 10635 struct hme_blk *found;
10643 10636 #endif
10644 10637 old = HBLK_RESERVE;
10645 10638 ASSERT(!old->hblk_shared);
10646 10639
10647 10640 /*
10648 10641 * save pa before bcopy clobbers it
10649 10642 */
10650 10643 newpa = new->hblk_nextpa;
10651 10644
10652 10645 base = (caddr_t)get_hblk_base(old);
10653 10646 endaddr = base + get_hblk_span(old);
10654 10647
10655 10648 /*
10656 10649 * acquire hash bucket lock.
10657 10650 */
10658 10651 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10659 10652 SFMMU_INVALID_SHMERID);
10660 10653
10661 10654 /*
10662 10655 * copy contents from old to new
10663 10656 */
10664 10657 bcopy((void *)old, (void *)new, HME8BLK_SZ);
10665 10658
10666 10659 /*
10667 10660 * add new to hash chain
10668 10661 */
10669 10662 sfmmu_hblk_hash_add(hmebp, new, newpa);
10670 10663
10671 10664 /*
10672 10665 * search hash chain for hblk_reserve; this needs to be performed
10673 10666 * after adding new, otherwise prev won't correspond to the hblk which
10674 10667 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10675 10668 * remove old later.
10676 10669 */
10677 10670 for (prev = NULL,
10678 10671 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10679 10672 prev = hblkp, hblkp = hblkp->hblk_next)
10680 10673 ;
10681 10674
10682 10675 if (hblkp != old)
10683 10676 panic("sfmmu_hblk_swap: hblk_reserve not found");
10684 10677
10685 10678 /*
10686 10679 * p_mapping list is still pointing to hments in hblk_reserve;
10687 10680 * fix up p_mapping list so that they point to hments in new.
10688 10681 *
10689 10682 * Since all these mappings are created by hblk_reserve_thread
10690 10683 * on the way and it's using at least one of the buffers from each of
10691 10684 * the newly minted slabs, there is no danger of any of these
10692 10685 * mappings getting unloaded by another thread.
10693 10686 *
10694 10687 * tsbmiss could only modify ref/mod bits of hments in old/new.
10695 10688 * Since all of these hments hold mappings established by segkmem
10696 10689 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10697 10690 * have no meaning for the mappings in hblk_reserve. hments in
10698 10691 * old and new are identical except for ref/mod bits.
10699 10692 */
10700 10693 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10701 10694
10702 10695 HBLKTOHME(osfhme, old, vaddr);
10703 10696 sfmmu_copytte(&osfhme->hme_tte, &tte);
10704 10697
10705 10698 if (TTE_IS_VALID(&tte)) {
10706 10699 if ((pp = osfhme->hme_page) == NULL)
10707 10700 panic("sfmmu_hblk_swap: page not mapped");
10708 10701
10709 10702 pml = sfmmu_mlist_enter(pp);
10710 10703
10711 10704 if (pp != osfhme->hme_page)
10712 10705 panic("sfmmu_hblk_swap: mapping changed");
10713 10706
10714 10707 HBLKTOHME(nsfhme, new, vaddr);
10715 10708
10716 10709 HME_ADD(nsfhme, pp);
10717 10710 HME_SUB(osfhme, pp);
10718 10711
10719 10712 sfmmu_mlist_exit(pml);
10720 10713 }
10721 10714 }
10722 10715
10723 10716 /*
10724 10717 * remove old from hash chain
10725 10718 */
10726 10719 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10727 10720
10728 10721 #ifdef DEBUG
10729 10722
10730 10723 hblktag.htag_id = ksfmmup;
10731 10724 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10732 10725 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10733 10726 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10734 10727 HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10735 10728
10736 10729 if (found != new)
10737 10730 panic("sfmmu_hblk_swap: new hblk not found");
10738 10731 #endif
10739 10732
10740 10733 SFMMU_HASH_UNLOCK(hmebp);
10741 10734
10742 10735 /*
10743 10736 * Reset hblk_reserve
10744 10737 */
10745 10738 bzero((void *)old, HME8BLK_SZ);
10746 10739 old->hblk_nextpa = va_to_pa((caddr_t)old);
10747 10740 }
10748 10741
10749 10742 /*
10750 10743 * Grab the mlist mutex for both pages passed in.
10751 10744 *
10752 10745 * low and high will be returned as pointers to the mutexes for these pages.
10753 10746 * low refers to the mutex residing in the lower bin of the mlist hash, while
10754 10747 * high refers to the mutex residing in the higher bin of the mlist hash. This
10755 10748 * is due to the locking order restrictions on the same thread grabbing
10756 10749 * multiple mlist mutexes. The low lock must be acquired before the high lock.
10757 10750 *
10758 10751 * If both pages hash to the same mutex, only grab that single mutex, and
10759 10752 * high will be returned as NULL
10760 10753 * If the pages hash to different bins in the hash, grab the lower addressed
10761 10754 * lock first and then the higher addressed lock in order to follow the locking
10762 10755 * rules involved with the same thread grabbing multiple mlist mutexes.
10763 10756 * low and high will both have non-NULL values.
10764 10757 */
10765 10758 static void
10766 10759 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10767 10760 kmutex_t **low, kmutex_t **high)
10768 10761 {
10769 10762 kmutex_t *mml_targ, *mml_repl;
10770 10763
10771 10764 /*
10772 10765 * no need to do the dance around szc as in sfmmu_mlist_enter()
10773 10766 * because this routine is only called by hat_page_relocate() and all
10774 10767 * targ and repl pages are already locked EXCL so szc can't change.
10775 10768 */
10776 10769
10777 10770 mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10778 10771 mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10779 10772
10780 10773 if (mml_targ == mml_repl) {
10781 10774 *low = mml_targ;
10782 10775 *high = NULL;
10783 10776 } else {
10784 10777 if (mml_targ < mml_repl) {
10785 10778 *low = mml_targ;
10786 10779 *high = mml_repl;
10787 10780 } else {
10788 10781 *low = mml_repl;
10789 10782 *high = mml_targ;
10790 10783 }
10791 10784 }
10792 10785
10793 10786 mutex_enter(*low);
10794 10787 if (*high)
10795 10788 mutex_enter(*high);
10796 10789 }
10797 10790
10798 10791 static void
10799 10792 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10800 10793 {
10801 10794 if (high)
10802 10795 mutex_exit(high);
10803 10796 mutex_exit(low);
10804 10797 }
10805 10798
10806 10799 static hatlock_t *
10807 10800 sfmmu_hat_enter(sfmmu_t *sfmmup)
10808 10801 {
10809 10802 hatlock_t *hatlockp;
10810 10803
10811 10804 if (sfmmup != ksfmmup) {
10812 10805 hatlockp = TSB_HASH(sfmmup);
10813 10806 mutex_enter(HATLOCK_MUTEXP(hatlockp));
10814 10807 return (hatlockp);
10815 10808 }
10816 10809 return (NULL);
10817 10810 }
10818 10811
10819 10812 static hatlock_t *
10820 10813 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
10821 10814 {
10822 10815 hatlock_t *hatlockp;
10823 10816
10824 10817 if (sfmmup != ksfmmup) {
10825 10818 hatlockp = TSB_HASH(sfmmup);
10826 10819 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
10827 10820 return (NULL);
10828 10821 return (hatlockp);
10829 10822 }
10830 10823 return (NULL);
10831 10824 }
10832 10825
10833 10826 static void
10834 10827 sfmmu_hat_exit(hatlock_t *hatlockp)
10835 10828 {
10836 10829 if (hatlockp != NULL)
10837 10830 mutex_exit(HATLOCK_MUTEXP(hatlockp));
10838 10831 }
10839 10832
10840 10833 static void
10841 10834 sfmmu_hat_lock_all(void)
10842 10835 {
10843 10836 int i;
10844 10837 for (i = 0; i < SFMMU_NUM_LOCK; i++)
10845 10838 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
10846 10839 }
10847 10840
10848 10841 static void
10849 10842 sfmmu_hat_unlock_all(void)
10850 10843 {
10851 10844 int i;
10852 10845 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
10853 10846 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
10854 10847 }
10855 10848
10856 10849 int
10857 10850 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
10858 10851 {
10859 10852 ASSERT(sfmmup != ksfmmup);
10860 10853 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10861 10854 }
10862 10855
10863 10856 /*
10864 10857 * Locking primitives to provide consistency between ISM unmap
10865 10858 * and other operations. Since ISM unmap can take a long time, we
10866 10859 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10867 10860 * contention on the hatlock buckets while ISM segments are being
10868 10861 * unmapped. The tradeoff is that the flags don't prevent priority
↓ open down ↓ |
552 lines elided |
↑ open up ↑ |
10869 10862 * inversion from occurring, so we must request kernel priority in
10870 10863 * case we have to sleep to keep from getting buried while holding
10871 10864 * the HAT_ISMBUSY flag set, which in turn could block other kernel
10872 10865 * threads from running (for example, in sfmmu_uvatopfn()).
10873 10866 */
10874 10867 static void
10875 10868 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10876 10869 {
10877 10870 hatlock_t *hatlockp;
10878 10871
10879 - THREAD_KPRI_REQUEST();
10880 10872 if (!hatlock_held)
10881 10873 hatlockp = sfmmu_hat_enter(sfmmup);
10882 10874 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10883 10875 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10884 10876 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10885 10877 if (!hatlock_held)
10886 10878 sfmmu_hat_exit(hatlockp);
10887 10879 }
10888 10880
10889 10881 static void
10890 10882 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
10891 10883 {
10892 10884 hatlock_t *hatlockp;
10893 10885
10894 10886 if (!hatlock_held)
10895 10887 hatlockp = sfmmu_hat_enter(sfmmup);
10896 10888 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10897 10889 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10898 10890 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10899 10891 if (!hatlock_held)
10900 10892 sfmmu_hat_exit(hatlockp);
10901 - THREAD_KPRI_RELEASE();
10902 10893 }
10903 10894
10904 10895 /*
10905 10896 *
10906 10897 * Algorithm:
10907 10898 *
10908 10899 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10909 10900 * hblks.
10910 10901 *
10911 10902 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10912 10903 *
10913 - * (a) try to return an hblk from reserve pool of free hblks;
10904 + * (a) try to return an hblk from reserve pool of free hblks;
10914 10905 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
10915 10906 * and return hblk_reserve.
10916 10907 *
10917 10908 * (3) call kmem_cache_alloc() to allocate hblk;
10918 10909 *
10919 10910 * (a) if hblk_reserve_lock is held by the current thread,
10920 10911 * atomically replace hblk_reserve by the hblk that is
10921 10912 * returned by kmem_cache_alloc; release hblk_reserve_lock
10922 10913 * and call kmem_cache_alloc() again.
10923 10914 * (b) if reserve pool is not full, add the hblk that is
10924 10915 * returned by kmem_cache_alloc to reserve pool and
10925 10916 * call kmem_cache_alloc again.
10926 10917 *
10927 10918 */
10928 10919 static struct hme_blk *
10929 10920 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
10930 - struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10931 - uint_t flags, uint_t rid)
10921 + struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10922 + uint_t flags, uint_t rid)
10932 10923 {
10933 10924 struct hme_blk *hmeblkp = NULL;
10934 10925 struct hme_blk *newhblkp;
10935 10926 struct hme_blk *shw_hblkp = NULL;
10936 10927 struct kmem_cache *sfmmu_cache = NULL;
10937 10928 uint64_t hblkpa;
10938 10929 ulong_t index;
10939 10930 uint_t owner; /* set to 1 if using hblk_reserve */
10940 10931 uint_t forcefree;
10941 10932 int sleep;
10942 10933 sf_srd_t *srdp;
10943 10934 sf_region_t *rgnp;
10944 10935
10945 10936 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10946 10937 ASSERT(hblktag.htag_rid == rid);
10947 10938 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
10948 10939 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
10949 10940 IS_P2ALIGNED(vaddr, TTEBYTES(size)));
10950 10941
10951 10942 /*
10952 10943 * If segkmem is not created yet, allocate from static hmeblks
10953 10944 * created at the end of startup_modules(). See the block comment
10954 10945 * in startup_modules() describing how we estimate the number of
10955 10946 * static hmeblks that will be needed during re-map.
10956 10947 */
10957 10948 if (!hblk_alloc_dynamic) {
10958 10949
10959 10950 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
10960 10951
10961 10952 if (size == TTE8K) {
10962 10953 index = nucleus_hblk8.index;
10963 10954 if (index >= nucleus_hblk8.len) {
10964 10955 /*
10965 10956 * If we panic here, see startup_modules() to
10966 10957 * make sure that we are calculating the
10967 10958 * number of hblk8's that we need correctly.
10968 10959 */
10969 10960 prom_panic("no nucleus hblk8 to allocate");
10970 10961 }
10971 10962 hmeblkp =
10972 10963 (struct hme_blk *)&nucleus_hblk8.list[index];
10973 10964 nucleus_hblk8.index++;
10974 10965 SFMMU_STAT(sf_hblk8_nalloc);
10975 10966 } else {
10976 10967 index = nucleus_hblk1.index;
10977 10968 if (nucleus_hblk1.index >= nucleus_hblk1.len) {
10978 10969 /*
10979 10970 * If we panic here, see startup_modules().
10980 10971 * Most likely you need to update the
10981 10972 * calculation of the number of hblk1 elements
10982 10973 * that the kernel needs to boot.
10983 10974 */
10984 10975 prom_panic("no nucleus hblk1 to allocate");
10985 10976 }
10986 10977 hmeblkp =
10987 10978 (struct hme_blk *)&nucleus_hblk1.list[index];
10988 10979 nucleus_hblk1.index++;
10989 10980 SFMMU_STAT(sf_hblk1_nalloc);
10990 10981 }
10991 10982
10992 10983 goto hblk_init;
10993 10984 }
10994 10985
10995 10986 SFMMU_HASH_UNLOCK(hmebp);
10996 10987
10997 10988 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
10998 10989 if (mmu_page_sizes == max_mmu_page_sizes) {
10999 10990 if (size < TTE256M)
11000 10991 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11001 10992 size, flags);
11002 10993 } else {
11003 10994 if (size < TTE4M)
11004 10995 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11005 10996 size, flags);
11006 10997 }
11007 10998 } else if (SFMMU_IS_SHMERID_VALID(rid)) {
11008 10999 /*
11009 11000 * Shared hmes use per region bitmaps in rgn_hmeflag
11010 11001 * rather than shadow hmeblks to keep track of the
11011 11002 * mapping sizes which have been allocated for the region.
11012 11003 * Here we cleanup old invalid hmeblks with this rid,
11013 11004 * which may be left around by pageunload().
11014 11005 */
11015 11006 int ttesz;
11016 11007 caddr_t va;
11017 11008 caddr_t eva = vaddr + TTEBYTES(size);
11018 11009
11019 11010 ASSERT(sfmmup != KHATID);
11020 11011
11021 11012 srdp = sfmmup->sfmmu_srdp;
11022 11013 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11023 11014 rgnp = srdp->srd_hmergnp[rid];
11024 11015 ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11025 11016 ASSERT(rgnp->rgn_refcnt != 0);
11026 11017 ASSERT(size <= rgnp->rgn_pgszc);
11027 11018
11028 11019 ttesz = HBLK_MIN_TTESZ;
11029 11020 do {
11030 11021 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11031 11022 continue;
11032 11023 }
11033 11024
11034 11025 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11035 11026 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11036 11027 } else if (ttesz < size) {
11037 11028 for (va = vaddr; va < eva;
11038 11029 va += TTEBYTES(ttesz)) {
11039 11030 sfmmu_cleanup_rhblk(srdp, va, rid,
11040 11031 ttesz);
11041 11032 }
11042 11033 }
11043 11034 } while (++ttesz <= rgnp->rgn_pgszc);
11044 11035 }
11045 11036
11046 11037 fill_hblk:
11047 11038 owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11048 11039
11049 11040 if (owner && size == TTE8K) {
11050 11041
11051 11042 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11052 11043 /*
11053 11044 * We are really in a tight spot. We already own
11054 11045 * hblk_reserve and we need another hblk. In anticipation
11055 11046 * of this kind of scenario, we specifically set aside
11056 11047 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11057 11048 * by owner of hblk_reserve.
11058 11049 */
11059 11050 SFMMU_STAT(sf_hblk_recurse_cnt);
11060 11051
11061 11052 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11062 11053 panic("sfmmu_hblk_alloc: reserve list is empty");
11063 11054
11064 11055 goto hblk_verify;
11065 11056 }
11066 11057
11067 11058 ASSERT(!owner);
11068 11059
11069 11060 if ((flags & HAT_NO_KALLOC) == 0) {
11070 11061
11071 11062 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11072 11063 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11073 11064
11074 11065 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11075 11066 hmeblkp = sfmmu_hblk_steal(size);
11076 11067 } else {
11077 11068 /*
11078 11069 * if we are the owner of hblk_reserve,
11079 11070 * swap hblk_reserve with hmeblkp and
11080 11071 * start a fresh life. Hope things go
11081 11072 * better this time.
11082 11073 */
11083 11074 if (hblk_reserve_thread == curthread) {
11084 11075 ASSERT(sfmmu_cache == sfmmu8_cache);
11085 11076 sfmmu_hblk_swap(hmeblkp);
11086 11077 hblk_reserve_thread = NULL;
11087 11078 mutex_exit(&hblk_reserve_lock);
11088 11079 goto fill_hblk;
11089 11080 }
11090 11081 /*
11091 11082 * let's donate this hblk to our reserve list if
11092 11083 * we are not mapping kernel range
11093 11084 */
11094 11085 if (size == TTE8K && sfmmup != KHATID) {
11095 11086 if (sfmmu_put_free_hblk(hmeblkp, 0))
11096 11087 goto fill_hblk;
11097 11088 }
11098 11089 }
11099 11090 } else {
11100 11091 /*
11101 11092 * We are here to map the slab in sfmmu8_cache; let's
11102 11093 * check if we could tap our reserve list; if successful,
11103 11094 * this will avoid the pain of going thru sfmmu_hblk_swap
11104 11095 */
11105 11096 SFMMU_STAT(sf_hblk_slab_cnt);
11106 11097 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11107 11098 /*
11108 11099 * let's start hblk_reserve dance
11109 11100 */
11110 11101 SFMMU_STAT(sf_hblk_reserve_cnt);
11111 11102 owner = 1;
11112 11103 mutex_enter(&hblk_reserve_lock);
11113 11104 hmeblkp = HBLK_RESERVE;
11114 11105 hblk_reserve_thread = curthread;
11115 11106 }
11116 11107 }
11117 11108
11118 11109 hblk_verify:
11119 11110 ASSERT(hmeblkp != NULL);
11120 11111 set_hblk_sz(hmeblkp, size);
11121 11112 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11122 11113 SFMMU_HASH_LOCK(hmebp);
11123 11114 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11124 11115 if (newhblkp != NULL) {
11125 11116 SFMMU_HASH_UNLOCK(hmebp);
11126 11117 if (hmeblkp != HBLK_RESERVE) {
11127 11118 /*
11128 11119 * This is really tricky!
11129 11120 *
11130 11121 * vmem_alloc(vmem_seg_arena)
11131 11122 * vmem_alloc(vmem_internal_arena)
11132 11123 * segkmem_alloc(heap_arena)
11133 11124 * vmem_alloc(heap_arena)
11134 11125 * page_create()
11135 11126 * hat_memload()
11136 11127 * kmem_cache_free()
11137 11128 * kmem_cache_alloc()
11138 11129 * kmem_slab_create()
11139 11130 * vmem_alloc(kmem_internal_arena)
11140 11131 * segkmem_alloc(heap_arena)
11141 11132 * vmem_alloc(heap_arena)
11142 11133 * page_create()
11143 11134 * hat_memload()
11144 11135 * kmem_cache_free()
11145 11136 * ...
11146 11137 *
11147 11138 * Thus, hat_memload() could call kmem_cache_free
11148 11139 * for enough number of times that we could easily
11149 11140 * hit the bottom of the stack or run out of reserve
11150 11141 * list of vmem_seg structs. So, we must donate
11151 11142 * this hblk to reserve list if it's allocated
11152 11143 * from sfmmu8_cache *and* mapping kernel range.
11153 11144 * We don't need to worry about freeing hmeblk1's
11154 11145 * to kmem since they don't map any kmem slabs.
11155 11146 *
11156 11147 * Note: When segkmem supports largepages, we must
11157 11148 * free hmeblk1's to reserve list as well.
11158 11149 */
11159 11150 forcefree = (sfmmup == KHATID) ? 1 : 0;
11160 11151 if (size == TTE8K &&
11161 11152 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11162 11153 goto re_verify;
11163 11154 }
11164 11155 ASSERT(sfmmup != KHATID);
11165 11156 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11166 11157 } else {
11167 11158 /*
11168 11159 * Hey! we don't need hblk_reserve any more.
11169 11160 */
11170 11161 ASSERT(owner);
11171 11162 hblk_reserve_thread = NULL;
11172 11163 mutex_exit(&hblk_reserve_lock);
11173 11164 owner = 0;
11174 11165 }
11175 11166 re_verify:
11176 11167 /*
11177 11168 * let's check if the goodies are still present
11178 11169 */
11179 11170 SFMMU_HASH_LOCK(hmebp);
11180 11171 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11181 11172 if (newhblkp != NULL) {
11182 11173 /*
11183 11174 * return newhblkp if it's not hblk_reserve;
11184 11175 * if newhblkp is hblk_reserve, return it
11185 11176 * _only if_ we are the owner of hblk_reserve.
11186 11177 */
11187 11178 if (newhblkp != HBLK_RESERVE || owner) {
11188 11179 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11189 11180 newhblkp->hblk_shared);
11190 11181 ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11191 11182 !newhblkp->hblk_shared);
11192 11183 return (newhblkp);
11193 11184 } else {
11194 11185 /*
11195 11186 * we just hit hblk_reserve in the hash and
11196 11187 * we are not the owner of that;
11197 11188 *
11198 11189 * block until hblk_reserve_thread completes
11199 11190 * swapping hblk_reserve and try the dance
11200 11191 * once again.
11201 11192 */
11202 11193 SFMMU_HASH_UNLOCK(hmebp);
11203 11194 mutex_enter(&hblk_reserve_lock);
11204 11195 mutex_exit(&hblk_reserve_lock);
11205 11196 SFMMU_STAT(sf_hblk_reserve_hit);
11206 11197 goto fill_hblk;
11207 11198 }
11208 11199 } else {
11209 11200 /*
11210 11201 * it's no more! try the dance once again.
11211 11202 */
11212 11203 SFMMU_HASH_UNLOCK(hmebp);
11213 11204 goto fill_hblk;
11214 11205 }
11215 11206 }
11216 11207
11217 11208 hblk_init:
11218 11209 if (SFMMU_IS_SHMERID_VALID(rid)) {
11219 11210 uint16_t tteflag = 0x1 <<
11220 11211 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11221 11212
11222 11213 if (!(rgnp->rgn_hmeflags & tteflag)) {
11223 11214 atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11224 11215 }
11225 11216 hmeblkp->hblk_shared = 1;
11226 11217 } else {
11227 11218 hmeblkp->hblk_shared = 0;
11228 11219 }
11229 11220 set_hblk_sz(hmeblkp, size);
11230 11221 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11231 11222 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11232 11223 hmeblkp->hblk_tag = hblktag;
11233 11224 hmeblkp->hblk_shadow = shw_hblkp;
11234 11225 hblkpa = hmeblkp->hblk_nextpa;
11235 11226 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11236 11227
11237 11228 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11238 11229 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11239 11230 ASSERT(hmeblkp->hblk_hmecnt == 0);
11240 11231 ASSERT(hmeblkp->hblk_vcnt == 0);
11241 11232 ASSERT(hmeblkp->hblk_lckcnt == 0);
11242 11233 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11243 11234 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11244 11235 return (hmeblkp);
11245 11236 }
11246 11237
11247 11238 /*
11248 11239 * This function cleans up the hme_blk and returns it to the free list.
11249 11240 */
11250 11241 /* ARGSUSED */
11251 11242 static void
11252 11243 sfmmu_hblk_free(struct hme_blk **listp)
11253 11244 {
11254 11245 struct hme_blk *hmeblkp, *next_hmeblkp;
11255 11246 int size;
11256 11247 uint_t critical;
11257 11248 uint64_t hblkpa;
11258 11249
11259 11250 ASSERT(*listp != NULL);
11260 11251
11261 11252 hmeblkp = *listp;
11262 11253 while (hmeblkp != NULL) {
11263 11254 next_hmeblkp = hmeblkp->hblk_next;
11264 11255 ASSERT(!hmeblkp->hblk_hmecnt);
11265 11256 ASSERT(!hmeblkp->hblk_vcnt);
11266 11257 ASSERT(!hmeblkp->hblk_lckcnt);
11267 11258 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11268 11259 ASSERT(hmeblkp->hblk_shared == 0);
11269 11260 ASSERT(hmeblkp->hblk_shw_bit == 0);
11270 11261 ASSERT(hmeblkp->hblk_shadow == NULL);
11271 11262
11272 11263 hblkpa = va_to_pa((caddr_t)hmeblkp);
11273 11264 ASSERT(hblkpa != (uint64_t)-1);
11274 11265 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11275 11266
11276 11267 size = get_hblk_ttesz(hmeblkp);
11277 11268 hmeblkp->hblk_next = NULL;
11278 11269 hmeblkp->hblk_nextpa = hblkpa;
11279 11270
11280 11271 if (hmeblkp->hblk_nuc_bit == 0) {
11281 11272
11282 11273 if (size != TTE8K ||
11283 11274 !sfmmu_put_free_hblk(hmeblkp, critical))
11284 11275 kmem_cache_free(get_hblk_cache(hmeblkp),
11285 11276 hmeblkp);
11286 11277 }
11287 11278 hmeblkp = next_hmeblkp;
11288 11279 }
11289 11280 }
11290 11281
11291 11282 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11292 11283 #define SFMMU_HBLK_STEAL_THRESHOLD 5
11293 11284
11294 11285 static uint_t sfmmu_hblk_steal_twice;
11295 11286 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11296 11287
11297 11288 /*
11298 11289 * Steal a hmeblk from user or kernel hme hash lists.
11299 11290 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11300 11291 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11301 11292 * tap into critical reserve of freehblkp.
11302 11293 * Note: We remain looping in this routine until we find one.
11303 11294 */
11304 11295 static struct hme_blk *
11305 11296 sfmmu_hblk_steal(int size)
11306 11297 {
11307 11298 static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11308 11299 struct hmehash_bucket *hmebp;
11309 11300 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11310 11301 uint64_t hblkpa;
11311 11302 int i;
11312 11303 uint_t loop_cnt = 0, critical;
11313 11304
11314 11305 for (;;) {
11315 11306 /* Check cpu hblk pending queues */
11316 11307 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11317 11308 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11318 11309 ASSERT(hmeblkp->hblk_hmecnt == 0);
11319 11310 ASSERT(hmeblkp->hblk_vcnt == 0);
11320 11311 return (hmeblkp);
11321 11312 }
11322 11313
11323 11314 if (size == TTE8K) {
11324 11315 critical =
11325 11316 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11326 11317 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11327 11318 return (hmeblkp);
11328 11319 }
11329 11320
11330 11321 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11331 11322 uhmehash_steal_hand;
11332 11323 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11333 11324
11334 11325 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11335 11326 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11336 11327 SFMMU_HASH_LOCK(hmebp);
11337 11328 hmeblkp = hmebp->hmeblkp;
11338 11329 hblkpa = hmebp->hmeh_nextpa;
11339 11330 pr_hblk = NULL;
11340 11331 while (hmeblkp) {
11341 11332 /*
11342 11333 * check if it is a hmeblk that is not locked
11343 11334 * and not shared. skip shadow hmeblks with
11344 11335 * shadow_mask set i.e valid count non zero.
11345 11336 */
11346 11337 if ((get_hblk_ttesz(hmeblkp) == size) &&
11347 11338 (hmeblkp->hblk_shw_bit == 0 ||
11348 11339 hmeblkp->hblk_vcnt == 0) &&
11349 11340 (hmeblkp->hblk_lckcnt == 0)) {
11350 11341 /*
11351 11342 * there is a high probability that we
11352 11343 * will find a free one. search some
11353 11344 * buckets for a free hmeblk initially
11354 11345 * before unloading a valid hmeblk.
11355 11346 */
11356 11347 if ((hmeblkp->hblk_vcnt == 0 &&
11357 11348 hmeblkp->hblk_hmecnt == 0) || (i >=
11358 11349 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11359 11350 if (sfmmu_steal_this_hblk(hmebp,
11360 11351 hmeblkp, hblkpa, pr_hblk)) {
11361 11352 /*
11362 11353 * Hblk is unloaded
11363 11354 * successfully
11364 11355 */
11365 11356 break;
11366 11357 }
11367 11358 }
11368 11359 }
11369 11360 pr_hblk = hmeblkp;
11370 11361 hblkpa = hmeblkp->hblk_nextpa;
11371 11362 hmeblkp = hmeblkp->hblk_next;
11372 11363 }
11373 11364
11374 11365 SFMMU_HASH_UNLOCK(hmebp);
11375 11366 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11376 11367 hmebp = uhme_hash;
11377 11368 }
11378 11369 uhmehash_steal_hand = hmebp;
11379 11370
11380 11371 if (hmeblkp != NULL)
11381 11372 break;
11382 11373
11383 11374 /*
11384 11375 * in the worst case, look for a free one in the kernel
11385 11376 * hash table.
11386 11377 */
11387 11378 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11388 11379 SFMMU_HASH_LOCK(hmebp);
11389 11380 hmeblkp = hmebp->hmeblkp;
11390 11381 hblkpa = hmebp->hmeh_nextpa;
11391 11382 pr_hblk = NULL;
11392 11383 while (hmeblkp) {
11393 11384 /*
11394 11385 * check if it is free hmeblk
11395 11386 */
11396 11387 if ((get_hblk_ttesz(hmeblkp) == size) &&
11397 11388 (hmeblkp->hblk_lckcnt == 0) &&
11398 11389 (hmeblkp->hblk_vcnt == 0) &&
11399 11390 (hmeblkp->hblk_hmecnt == 0)) {
11400 11391 if (sfmmu_steal_this_hblk(hmebp,
11401 11392 hmeblkp, hblkpa, pr_hblk)) {
11402 11393 break;
11403 11394 } else {
11404 11395 /*
11405 11396 * Cannot fail since we have
11406 11397 * hash lock.
11407 11398 */
11408 11399 panic("fail to steal?");
11409 11400 }
11410 11401 }
11411 11402
11412 11403 pr_hblk = hmeblkp;
11413 11404 hblkpa = hmeblkp->hblk_nextpa;
11414 11405 hmeblkp = hmeblkp->hblk_next;
11415 11406 }
11416 11407
11417 11408 SFMMU_HASH_UNLOCK(hmebp);
11418 11409 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11419 11410 hmebp = khme_hash;
11420 11411 }
11421 11412
11422 11413 if (hmeblkp != NULL)
11423 11414 break;
11424 11415 sfmmu_hblk_steal_twice++;
11425 11416 }
11426 11417 return (hmeblkp);
11427 11418 }
↓ open down ↓ |
486 lines elided |
↑ open up ↑ |
11428 11419
11429 11420 /*
11430 11421 * This routine does real work to prepare a hblk to be "stolen" by
11431 11422 * unloading the mappings, updating shadow counts ....
11432 11423 * It returns 1 if the block is ready to be reused (stolen), or 0
11433 11424 * means the block cannot be stolen yet- pageunload is still working
11434 11425 * on this hblk.
11435 11426 */
11436 11427 static int
11437 11428 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11438 - uint64_t hblkpa, struct hme_blk *pr_hblk)
11429 + uint64_t hblkpa, struct hme_blk *pr_hblk)
11439 11430 {
11440 11431 int shw_size, vshift;
11441 11432 struct hme_blk *shw_hblkp;
11442 11433 caddr_t vaddr;
11443 11434 uint_t shw_mask, newshw_mask;
11444 11435 struct hme_blk *list = NULL;
11445 11436
11446 11437 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11447 11438
11448 11439 /*
11449 11440 * check if the hmeblk is free, unload if necessary
11450 11441 */
11451 11442 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11452 11443 sfmmu_t *sfmmup;
11453 11444 demap_range_t dmr;
11454 11445
11455 11446 sfmmup = hblktosfmmu(hmeblkp);
11456 11447 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11457 11448 return (0);
11458 11449 }
11459 11450 DEMAP_RANGE_INIT(sfmmup, &dmr);
11460 11451 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11461 11452 (caddr_t)get_hblk_base(hmeblkp),
11462 11453 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11463 11454 DEMAP_RANGE_FLUSH(&dmr);
11464 11455 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11465 11456 /*
11466 11457 * Pageunload is working on the same hblk.
11467 11458 */
11468 11459 return (0);
11469 11460 }
11470 11461
11471 11462 sfmmu_hblk_steal_unload_count++;
11472 11463 }
11473 11464
11474 11465 ASSERT(hmeblkp->hblk_lckcnt == 0);
11475 11466 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11476 11467
11477 11468 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11478 11469 hmeblkp->hblk_nextpa = hblkpa;
11479 11470
11480 11471 shw_hblkp = hmeblkp->hblk_shadow;
11481 11472 if (shw_hblkp) {
11482 11473 ASSERT(!hmeblkp->hblk_shared);
11483 11474 shw_size = get_hblk_ttesz(shw_hblkp);
11484 11475 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11485 11476 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11486 11477 ASSERT(vshift < 8);
11487 11478 /*
11488 11479 * Atomically clear shadow mask bit
11489 11480 */
11490 11481 do {
11491 11482 shw_mask = shw_hblkp->hblk_shw_mask;
11492 11483 ASSERT(shw_mask & (1 << vshift));
11493 11484 newshw_mask = shw_mask & ~(1 << vshift);
11494 11485 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11495 11486 shw_mask, newshw_mask);
11496 11487 } while (newshw_mask != shw_mask);
11497 11488 hmeblkp->hblk_shadow = NULL;
11498 11489 }
11499 11490
11500 11491 /*
11501 11492 * remove shadow bit if we are stealing an unused shadow hmeblk.
11502 11493 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11503 11494 * we are indeed allocating a shadow hmeblk.
11504 11495 */
11505 11496 hmeblkp->hblk_shw_bit = 0;
11506 11497
11507 11498 if (hmeblkp->hblk_shared) {
11508 11499 sf_srd_t *srdp;
11509 11500 sf_region_t *rgnp;
11510 11501 uint_t rid;
11511 11502
11512 11503 srdp = hblktosrd(hmeblkp);
11513 11504 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11514 11505 rid = hmeblkp->hblk_tag.htag_rid;
11515 11506 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11516 11507 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11517 11508 rgnp = srdp->srd_hmergnp[rid];
11518 11509 ASSERT(rgnp != NULL);
11519 11510 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11520 11511 hmeblkp->hblk_shared = 0;
11521 11512 }
11522 11513
11523 11514 sfmmu_hblk_steal_count++;
11524 11515 SFMMU_STAT(sf_steal_count);
11525 11516
11526 11517 return (1);
11527 11518 }
11528 11519
11529 11520 struct hme_blk *
11530 11521 sfmmu_hmetohblk(struct sf_hment *sfhme)
11531 11522 {
11532 11523 struct hme_blk *hmeblkp;
11533 11524 struct sf_hment *sfhme0;
11534 11525 struct hme_blk *hblk_dummy = 0;
11535 11526
11536 11527 /*
11537 11528 * No dummy sf_hments, please.
11538 11529 */
11539 11530 ASSERT(sfhme->hme_tte.ll != 0);
11540 11531
11541 11532 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11542 11533 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11543 11534 (uintptr_t)&hblk_dummy->hblk_hme[0]);
11544 11535
11545 11536 return (hmeblkp);
11546 11537 }
11547 11538
11548 11539 /*
11549 11540 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11550 11541 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11551 11542 * KM_SLEEP allocation.
11552 11543 *
11553 11544 * Return 0 on success, -1 otherwise.
11554 11545 */
11555 11546 static void
11556 11547 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11557 11548 {
11558 11549 struct tsb_info *tsbinfop, *next;
11559 11550 tsb_replace_rc_t rc;
11560 11551 boolean_t gotfirst = B_FALSE;
11561 11552
11562 11553 ASSERT(sfmmup != ksfmmup);
11563 11554 ASSERT(sfmmu_hat_lock_held(sfmmup));
11564 11555
11565 11556 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11566 11557 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11567 11558 }
11568 11559
11569 11560 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11570 11561 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11571 11562 } else {
11572 11563 return;
11573 11564 }
11574 11565
11575 11566 ASSERT(sfmmup->sfmmu_tsb != NULL);
11576 11567
11577 11568 /*
11578 11569 * Loop over all tsbinfo's replacing them with ones that actually have
11579 11570 * a TSB. If any of the replacements ever fail, bail out of the loop.
11580 11571 */
11581 11572 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11582 11573 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11583 11574 next = tsbinfop->tsb_next;
11584 11575 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11585 11576 hatlockp, TSB_SWAPIN);
11586 11577 if (rc != TSB_SUCCESS) {
11587 11578 break;
11588 11579 }
11589 11580 gotfirst = B_TRUE;
11590 11581 }
11591 11582
11592 11583 switch (rc) {
11593 11584 case TSB_SUCCESS:
11594 11585 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11595 11586 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11596 11587 return;
11597 11588 case TSB_LOSTRACE:
11598 11589 break;
11599 11590 case TSB_ALLOCFAIL:
11600 11591 break;
11601 11592 default:
11602 11593 panic("sfmmu_replace_tsb returned unrecognized failure code "
11603 11594 "%d", rc);
11604 11595 }
11605 11596
11606 11597 /*
11607 11598 * In this case, we failed to get one of our TSBs. If we failed to
11608 11599 * get the first TSB, get one of minimum size (8KB). Walk the list
11609 11600 * and throw away the tsbinfos, starting where the allocation failed;
11610 11601 * we can get by with just one TSB as long as we don't leave the
11611 11602 * SWAPPED tsbinfo structures lying around.
11612 11603 */
11613 11604 tsbinfop = sfmmup->sfmmu_tsb;
11614 11605 next = tsbinfop->tsb_next;
11615 11606 tsbinfop->tsb_next = NULL;
11616 11607
11617 11608 sfmmu_hat_exit(hatlockp);
11618 11609 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11619 11610 next = tsbinfop->tsb_next;
11620 11611 sfmmu_tsbinfo_free(tsbinfop);
11621 11612 }
11622 11613 hatlockp = sfmmu_hat_enter(sfmmup);
11623 11614
11624 11615 /*
11625 11616 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11626 11617 * pages.
11627 11618 */
11628 11619 if (!gotfirst) {
11629 11620 tsbinfop = sfmmup->sfmmu_tsb;
11630 11621 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11631 11622 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11632 11623 ASSERT(rc == TSB_SUCCESS);
11633 11624 }
11634 11625
11635 11626 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11636 11627 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11637 11628 }
11638 11629
11639 11630 static int
11640 11631 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11641 11632 {
11642 11633 ulong_t bix = 0;
11643 11634 uint_t rid;
11644 11635 sf_region_t *rgnp;
11645 11636
11646 11637 ASSERT(srdp != NULL);
11647 11638 ASSERT(srdp->srd_refcnt != 0);
11648 11639
11649 11640 w <<= BT_ULSHIFT;
11650 11641 while (bmw) {
11651 11642 if (!(bmw & 0x1)) {
11652 11643 bix++;
11653 11644 bmw >>= 1;
11654 11645 continue;
11655 11646 }
11656 11647 rid = w | bix;
11657 11648 rgnp = srdp->srd_hmergnp[rid];
11658 11649 ASSERT(rgnp->rgn_refcnt > 0);
11659 11650 ASSERT(rgnp->rgn_id == rid);
11660 11651 if (addr < rgnp->rgn_saddr ||
11661 11652 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11662 11653 bix++;
11663 11654 bmw >>= 1;
11664 11655 } else {
11665 11656 return (1);
11666 11657 }
11667 11658 }
11668 11659 return (0);
11669 11660 }
11670 11661
11671 11662 /*
11672 11663 * Handle exceptions for low level tsb_handler.
11673 11664 *
11674 11665 * There are many scenarios that could land us here:
11675 11666 *
11676 11667 * If the context is invalid we land here. The context can be invalid
11677 11668 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11678 11669 * perform a wrap around operation in order to allocate a new context.
11679 11670 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11680 11671 * TSBs configuration is changeing for this process and we are forced into
11681 11672 * here to do a syncronization operation. If the context is valid we can
11682 11673 * be here from window trap hanlder. In this case just call trap to handle
11683 11674 * the fault.
11684 11675 *
11685 11676 * Note that the process will run in INVALID_CONTEXT before
11686 11677 * faulting into here and subsequently loading the MMU registers
11687 11678 * (including the TSB base register) associated with this process.
11688 11679 * For this reason, the trap handlers must all test for
11689 11680 * INVALID_CONTEXT before attempting to access any registers other
11690 11681 * than the context registers.
11691 11682 */
11692 11683 void
11693 11684 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11694 11685 {
11695 11686 sfmmu_t *sfmmup, *shsfmmup;
11696 11687 uint_t ctxtype;
11697 11688 klwp_id_t lwp;
11698 11689 char lwp_save_state;
11699 11690 hatlock_t *hatlockp, *shatlockp;
11700 11691 struct tsb_info *tsbinfop;
11701 11692 struct tsbmiss *tsbmp;
11702 11693 sf_scd_t *scdp;
11703 11694
11704 11695 SFMMU_STAT(sf_tsb_exceptions);
11705 11696 SFMMU_MMU_STAT(mmu_tsb_exceptions);
11706 11697 sfmmup = astosfmmu(curthread->t_procp->p_as);
11707 11698 /*
11708 11699 * note that in sun4u, tagacces register contains ctxnum
11709 11700 * while sun4v passes ctxtype in the tagaccess register.
11710 11701 */
11711 11702 ctxtype = tagaccess & TAGACC_CTX_MASK;
11712 11703
11713 11704 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11714 11705 ASSERT(sfmmup->sfmmu_ismhat == 0);
11715 11706 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11716 11707 ctxtype == INVALID_CONTEXT);
11717 11708
11718 11709 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11719 11710 /*
11720 11711 * We may land here because shme bitmap and pagesize
11721 11712 * flags are updated lazily in tsbmiss area on other cpus.
11722 11713 * If we detect here that tsbmiss area is out of sync with
11723 11714 * sfmmu update it and retry the trapped instruction.
11724 11715 * Otherwise call trap().
11725 11716 */
11726 11717 int ret = 0;
11727 11718 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11728 11719 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11729 11720
11730 11721 /*
11731 11722 * Must set lwp state to LWP_SYS before
11732 11723 * trying to acquire any adaptive lock
11733 11724 */
11734 11725 lwp = ttolwp(curthread);
11735 11726 ASSERT(lwp);
11736 11727 lwp_save_state = lwp->lwp_state;
11737 11728 lwp->lwp_state = LWP_SYS;
11738 11729
11739 11730 hatlockp = sfmmu_hat_enter(sfmmup);
11740 11731 kpreempt_disable();
11741 11732 tsbmp = &tsbmiss_area[CPU->cpu_id];
11742 11733 ASSERT(sfmmup == tsbmp->usfmmup);
11743 11734 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11744 11735 ~tteflag_mask) ||
11745 11736 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) &
11746 11737 ~tteflag_mask)) {
11747 11738 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11748 11739 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11749 11740 ret = 1;
11750 11741 }
11751 11742 if (sfmmup->sfmmu_srdp != NULL) {
11752 11743 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11753 11744 ulong_t *tm = tsbmp->shmermap;
11754 11745 ulong_t i;
11755 11746 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11756 11747 ulong_t d = tm[i] ^ sm[i];
11757 11748 if (d) {
11758 11749 if (d & sm[i]) {
11759 11750 if (!ret && sfmmu_is_rgnva(
11760 11751 sfmmup->sfmmu_srdp,
11761 11752 addr, i, d & sm[i])) {
11762 11753 ret = 1;
11763 11754 }
11764 11755 }
11765 11756 tm[i] = sm[i];
11766 11757 }
11767 11758 }
11768 11759 }
11769 11760 kpreempt_enable();
11770 11761 sfmmu_hat_exit(hatlockp);
11771 11762 lwp->lwp_state = lwp_save_state;
11772 11763 if (ret) {
11773 11764 return;
11774 11765 }
11775 11766 } else if (ctxtype == INVALID_CONTEXT) {
11776 11767 /*
11777 11768 * First, make sure we come out of here with a valid ctx,
11778 11769 * since if we don't get one we'll simply loop on the
11779 11770 * faulting instruction.
11780 11771 *
11781 11772 * If the ISM mappings are changing, the TSB is relocated,
11782 11773 * the process is swapped, the process is joining SCD or
11783 11774 * leaving SCD or shared regions we serialize behind the
11784 11775 * controlling thread with hat lock, sfmmu_flags and
11785 11776 * sfmmu_tsb_cv condition variable.
11786 11777 */
11787 11778
11788 11779 /*
11789 11780 * Must set lwp state to LWP_SYS before
11790 11781 * trying to acquire any adaptive lock
11791 11782 */
11792 11783 lwp = ttolwp(curthread);
11793 11784 ASSERT(lwp);
11794 11785 lwp_save_state = lwp->lwp_state;
11795 11786 lwp->lwp_state = LWP_SYS;
11796 11787
11797 11788 hatlockp = sfmmu_hat_enter(sfmmup);
11798 11789 retry:
11799 11790 if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11800 11791 shsfmmup = scdp->scd_sfmmup;
11801 11792 ASSERT(shsfmmup != NULL);
11802 11793
11803 11794 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11804 11795 tsbinfop = tsbinfop->tsb_next) {
11805 11796 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11806 11797 /* drop the private hat lock */
11807 11798 sfmmu_hat_exit(hatlockp);
11808 11799 /* acquire the shared hat lock */
11809 11800 shatlockp = sfmmu_hat_enter(shsfmmup);
11810 11801 /*
11811 11802 * recheck to see if anything changed
11812 11803 * after we drop the private hat lock.
11813 11804 */
11814 11805 if (sfmmup->sfmmu_scdp == scdp &&
11815 11806 shsfmmup == scdp->scd_sfmmup) {
11816 11807 sfmmu_tsb_chk_reloc(shsfmmup,
11817 11808 shatlockp);
11818 11809 }
11819 11810 sfmmu_hat_exit(shatlockp);
11820 11811 hatlockp = sfmmu_hat_enter(sfmmup);
11821 11812 goto retry;
11822 11813 }
11823 11814 }
11824 11815 }
11825 11816
11826 11817 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
11827 11818 tsbinfop = tsbinfop->tsb_next) {
11828 11819 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11829 11820 cv_wait(&sfmmup->sfmmu_tsb_cv,
11830 11821 HATLOCK_MUTEXP(hatlockp));
11831 11822 goto retry;
11832 11823 }
11833 11824 }
11834 11825
11835 11826 /*
11836 11827 * Wait for ISM maps to be updated.
11837 11828 */
11838 11829 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
11839 11830 cv_wait(&sfmmup->sfmmu_tsb_cv,
11840 11831 HATLOCK_MUTEXP(hatlockp));
11841 11832 goto retry;
11842 11833 }
11843 11834
11844 11835 /* Is this process joining an SCD? */
11845 11836 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11846 11837 /*
11847 11838 * Flush private TSB and setup shared TSB.
11848 11839 * sfmmu_finish_join_scd() does not drop the
11849 11840 * hat lock.
11850 11841 */
11851 11842 sfmmu_finish_join_scd(sfmmup);
11852 11843 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
11853 11844 }
11854 11845
11855 11846 /*
11856 11847 * If we're swapping in, get TSB(s). Note that we must do
11857 11848 * this before we get a ctx or load the MMU state. Once
11858 11849 * we swap in we have to recheck to make sure the TSB(s) and
11859 11850 * ISM mappings didn't change while we slept.
11860 11851 */
11861 11852 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11862 11853 sfmmu_tsb_swapin(sfmmup, hatlockp);
11863 11854 goto retry;
11864 11855 }
11865 11856
11866 11857 sfmmu_get_ctx(sfmmup);
11867 11858
11868 11859 sfmmu_hat_exit(hatlockp);
11869 11860 /*
11870 11861 * Must restore lwp_state if not calling
11871 11862 * trap() for further processing. Restore
11872 11863 * it anyway.
11873 11864 */
11874 11865 lwp->lwp_state = lwp_save_state;
11875 11866 return;
11876 11867 }
11877 11868 trap(rp, (caddr_t)tagaccess, traptype, 0);
11878 11869 }
11879 11870
11880 11871 static void
11881 11872 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11882 11873 {
11883 11874 struct tsb_info *tp;
11884 11875
11885 11876 ASSERT(sfmmu_hat_lock_held(sfmmup));
11886 11877
11887 11878 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
11888 11879 if (tp->tsb_flags & TSB_RELOC_FLAG) {
11889 11880 cv_wait(&sfmmup->sfmmu_tsb_cv,
11890 11881 HATLOCK_MUTEXP(hatlockp));
11891 11882 break;
11892 11883 }
11893 11884 }
11894 11885 }
11895 11886
11896 11887 /*
11897 11888 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
11898 11889 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
11899 11890 * rather than spinning to avoid send mondo timeouts with
11900 11891 * interrupts enabled. When the lock is acquired it is immediately
11901 11892 * released and we return back to sfmmu_vatopfn just after
11902 11893 * the GET_TTE call.
11903 11894 */
11904 11895 void
11905 11896 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
11906 11897 {
11907 11898 struct page **pp;
11908 11899
11909 11900 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11910 11901 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11911 11902 }
11912 11903
11913 11904 /*
11914 11905 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
11915 11906 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
11916 11907 * cross traps which cannot be handled while spinning in the
11917 11908 * trap handlers. Simply enter and exit the kpr_suspendlock spin
11918 11909 * mutex, which is held by the holder of the suspend bit, and then
11919 11910 * retry the trapped instruction after unwinding.
11920 11911 */
11921 11912 /*ARGSUSED*/
11922 11913 void
11923 11914 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
11924 11915 {
11925 11916 ASSERT(curthread != kreloc_thread);
11926 11917 mutex_enter(&kpr_suspendlock);
11927 11918 mutex_exit(&kpr_suspendlock);
11928 11919 }
11929 11920
11930 11921 /*
11931 11922 * This routine could be optimized to reduce the number of xcalls by flushing
11932 11923 * the entire TLBs if region reference count is above some threshold but the
11933 11924 * tradeoff will depend on the size of the TLB. So for now flush the specific
11934 11925 * page a context at a time.
11935 11926 *
11936 11927 * If uselocks is 0 then it's called after all cpus were captured and all the
11937 11928 * hat locks were taken. In this case don't take the region lock by relying on
11938 11929 * the order of list region update operations in hat_join_region(),
11939 11930 * hat_leave_region() and hat_dup_region(). The ordering in those routines
11940 11931 * guarantees that list is always forward walkable and reaches active sfmmus
11941 11932 * regardless of where xc_attention() captures a cpu.
11942 11933 */
11943 11934 cpuset_t
11944 11935 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
11945 11936 struct hme_blk *hmeblkp, int uselocks)
11946 11937 {
11947 11938 sfmmu_t *sfmmup;
11948 11939 cpuset_t cpuset;
11949 11940 cpuset_t rcpuset;
11950 11941 hatlock_t *hatlockp;
11951 11942 uint_t rid = rgnp->rgn_id;
11952 11943 sf_rgn_link_t *rlink;
11953 11944 sf_scd_t *scdp;
11954 11945
11955 11946 ASSERT(hmeblkp->hblk_shared);
11956 11947 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11957 11948 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11958 11949
11959 11950 CPUSET_ZERO(rcpuset);
11960 11951 if (uselocks) {
11961 11952 mutex_enter(&rgnp->rgn_mutex);
11962 11953 }
11963 11954 sfmmup = rgnp->rgn_sfmmu_head;
11964 11955 while (sfmmup != NULL) {
11965 11956 if (uselocks) {
11966 11957 hatlockp = sfmmu_hat_enter(sfmmup);
11967 11958 }
11968 11959
11969 11960 /*
11970 11961 * When an SCD is created the SCD hat is linked on the sfmmu
11971 11962 * region lists for each hme region which is part of the
11972 11963 * SCD. If we find an SCD hat, when walking these lists,
11973 11964 * then we flush the shared TSBs, if we find a private hat,
11974 11965 * which is part of an SCD, but where the region
11975 11966 * is not part of the SCD then we flush the private TSBs.
11976 11967 */
11977 11968 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
11978 11969 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11979 11970 scdp = sfmmup->sfmmu_scdp;
11980 11971 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
11981 11972 if (uselocks) {
11982 11973 sfmmu_hat_exit(hatlockp);
11983 11974 }
11984 11975 goto next;
11985 11976 }
11986 11977 }
11987 11978
11988 11979 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
11989 11980
11990 11981 kpreempt_disable();
11991 11982 cpuset = sfmmup->sfmmu_cpusran;
11992 11983 CPUSET_AND(cpuset, cpu_ready_set);
11993 11984 CPUSET_DEL(cpuset, CPU->cpu_id);
11994 11985 SFMMU_XCALL_STATS(sfmmup);
11995 11986 xt_some(cpuset, vtag_flushpage_tl1,
11996 11987 (uint64_t)addr, (uint64_t)sfmmup);
11997 11988 vtag_flushpage(addr, (uint64_t)sfmmup);
11998 11989 if (uselocks) {
11999 11990 sfmmu_hat_exit(hatlockp);
12000 11991 }
12001 11992 kpreempt_enable();
12002 11993 CPUSET_OR(rcpuset, cpuset);
12003 11994
12004 11995 next:
12005 11996 /* LINTED: constant in conditional context */
12006 11997 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12007 11998 ASSERT(rlink != NULL);
12008 11999 sfmmup = rlink->next;
12009 12000 }
12010 12001 if (uselocks) {
12011 12002 mutex_exit(&rgnp->rgn_mutex);
12012 12003 }
12013 12004 return (rcpuset);
12014 12005 }
12015 12006
12016 12007 /*
12017 12008 * This routine takes an sfmmu pointer and the va for an adddress in an
12018 12009 * ISM region as input and returns the corresponding region id in ism_rid.
12019 12010 * The return value of 1 indicates that a region has been found and ism_rid
12020 12011 * is valid, otherwise 0 is returned.
12021 12012 */
12022 12013 static int
12023 12014 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12024 12015 {
12025 12016 ism_blk_t *ism_blkp;
12026 12017 int i;
12027 12018 ism_map_t *ism_map;
12028 12019 #ifdef DEBUG
12029 12020 struct hat *ism_hatid;
12030 12021 #endif
12031 12022 ASSERT(sfmmu_hat_lock_held(sfmmup));
12032 12023
12033 12024 ism_blkp = sfmmup->sfmmu_iblk;
12034 12025 while (ism_blkp != NULL) {
12035 12026 ism_map = ism_blkp->iblk_maps;
12036 12027 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12037 12028 if ((va >= ism_start(ism_map[i])) &&
12038 12029 (va < ism_end(ism_map[i]))) {
12039 12030
12040 12031 *ism_rid = ism_map[i].imap_rid;
12041 12032 #ifdef DEBUG
12042 12033 ism_hatid = ism_map[i].imap_ismhat;
12043 12034 ASSERT(ism_hatid == ism_sfmmup);
12044 12035 ASSERT(ism_hatid->sfmmu_ismhat);
12045 12036 #endif
12046 12037 return (1);
12047 12038 }
12048 12039 }
12049 12040 ism_blkp = ism_blkp->iblk_next;
12050 12041 }
12051 12042 return (0);
12052 12043 }
↓ open down ↓ |
604 lines elided |
↑ open up ↑ |
12053 12044
12054 12045 /*
12055 12046 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12056 12047 * This routine may be called with all cpu's captured. Therefore, the
12057 12048 * caller is responsible for holding all locks and disabling kernel
12058 12049 * preemption.
12059 12050 */
12060 12051 /* ARGSUSED */
12061 12052 static void
12062 12053 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12063 - struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12054 + struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12064 12055 {
12065 - cpuset_t cpuset;
12066 - caddr_t va;
12056 + cpuset_t cpuset;
12057 + caddr_t va;
12067 12058 ism_ment_t *ment;
12068 12059 sfmmu_t *sfmmup;
12069 12060 #ifdef VAC
12070 - int vcolor;
12061 + int vcolor;
12071 12062 #endif
12072 12063
12073 12064 sf_scd_t *scdp;
12074 12065 uint_t ism_rid;
12075 12066
12076 12067 ASSERT(!hmeblkp->hblk_shared);
12077 12068 /*
12078 12069 * Walk the ism_hat's mapping list and flush the page
12079 12070 * from every hat sharing this ism_hat. This routine
12080 12071 * may be called while all cpu's have been captured.
12081 12072 * Therefore we can't attempt to grab any locks. For now
12082 12073 * this means we will protect the ism mapping list under
12083 12074 * a single lock which will be grabbed by the caller.
12084 12075 * If hat_share/unshare scalibility becomes a performance
12085 12076 * problem then we may need to re-think ism mapping list locking.
12086 12077 */
12087 12078 ASSERT(ism_sfmmup->sfmmu_ismhat);
12088 12079 ASSERT(MUTEX_HELD(&ism_mlist_lock));
12089 12080 addr = addr - ISMID_STARTADDR;
12090 12081
12091 12082 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12092 12083
12093 12084 sfmmup = ment->iment_hat;
12094 12085
12095 12086 va = ment->iment_base_va;
12096 12087 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr);
12097 12088
12098 12089 /*
12099 12090 * When an SCD is created the SCD hat is linked on the ism
12100 12091 * mapping lists for each ISM segment which is part of the
12101 12092 * SCD. If we find an SCD hat, when walking these lists,
12102 12093 * then we flush the shared TSBs, if we find a private hat,
12103 12094 * which is part of an SCD, but where the region
12104 12095 * corresponding to this va is not part of the SCD then we
12105 12096 * flush the private TSBs.
12106 12097 */
12107 12098 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12108 12099 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12109 12100 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12110 12101 if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12111 12102 &ism_rid)) {
12112 12103 cmn_err(CE_PANIC,
12113 12104 "can't find matching ISM rid!");
12114 12105 }
12115 12106
12116 12107 scdp = sfmmup->sfmmu_scdp;
12117 12108 if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12118 12109 SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12119 12110 ism_rid)) {
12120 12111 continue;
12121 12112 }
12122 12113 }
12123 12114 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12124 12115
12125 12116 cpuset = sfmmup->sfmmu_cpusran;
12126 12117 CPUSET_AND(cpuset, cpu_ready_set);
12127 12118 CPUSET_DEL(cpuset, CPU->cpu_id);
12128 12119 SFMMU_XCALL_STATS(sfmmup);
12129 12120 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12130 12121 (uint64_t)sfmmup);
12131 12122 vtag_flushpage(va, (uint64_t)sfmmup);
12132 12123
12133 12124 #ifdef VAC
12134 12125 /*
12135 12126 * Flush D$
12136 12127 * When flushing D$ we must flush all
12137 12128 * cpu's. See sfmmu_cache_flush().
12138 12129 */
12139 12130 if (cache_flush_flag == CACHE_FLUSH) {
12140 12131 cpuset = cpu_ready_set;
12141 12132 CPUSET_DEL(cpuset, CPU->cpu_id);
12142 12133
12143 12134 SFMMU_XCALL_STATS(sfmmup);
12144 12135 vcolor = addr_to_vcolor(va);
12145 12136 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12146 12137 vac_flushpage(pfnum, vcolor);
12147 12138 }
12148 12139 #endif /* VAC */
12149 12140 }
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
12150 12141 }
12151 12142
12152 12143 /*
12153 12144 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12154 12145 * a particular virtual address and ctx. If noflush is set we do not
12155 12146 * flush the TLB/TSB. This function may or may not be called with the
12156 12147 * HAT lock held.
12157 12148 */
12158 12149 static void
12159 12150 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12160 - pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12161 - int hat_lock_held)
12151 + pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12152 + int hat_lock_held)
12162 12153 {
12163 12154 #ifdef VAC
12164 12155 int vcolor;
12165 12156 #endif
12166 12157 cpuset_t cpuset;
12167 12158 hatlock_t *hatlockp;
12168 12159
12169 12160 ASSERT(!hmeblkp->hblk_shared);
12170 12161
12171 12162 #if defined(lint) && !defined(VAC)
12172 12163 pfnum = pfnum;
12173 12164 cpu_flag = cpu_flag;
12174 12165 cache_flush_flag = cache_flush_flag;
12175 12166 #endif
12176 12167
12177 12168 /*
12178 12169 * There is no longer a need to protect against ctx being
12179 12170 * stolen here since we don't store the ctx in the TSB anymore.
12180 12171 */
12181 12172 #ifdef VAC
12182 12173 vcolor = addr_to_vcolor(addr);
12183 12174 #endif
12184 12175
12185 12176 /*
12186 12177 * We must hold the hat lock during the flush of TLB,
12187 12178 * to avoid a race with sfmmu_invalidate_ctx(), where
12188 12179 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12189 12180 * causing TLB demap routine to skip flush on that MMU.
12190 12181 * If the context on a MMU has already been set to
12191 12182 * INVALID_CONTEXT, we just get an extra flush on
12192 12183 * that MMU.
12193 12184 */
12194 12185 if (!hat_lock_held && !tlb_noflush)
12195 12186 hatlockp = sfmmu_hat_enter(sfmmup);
12196 12187
12197 12188 kpreempt_disable();
12198 12189 if (!tlb_noflush) {
12199 12190 /*
12200 12191 * Flush the TSB and TLB.
12201 12192 */
12202 12193 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12203 12194
12204 12195 cpuset = sfmmup->sfmmu_cpusran;
12205 12196 CPUSET_AND(cpuset, cpu_ready_set);
12206 12197 CPUSET_DEL(cpuset, CPU->cpu_id);
12207 12198
12208 12199 SFMMU_XCALL_STATS(sfmmup);
12209 12200
12210 12201 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12211 12202 (uint64_t)sfmmup);
12212 12203
12213 12204 vtag_flushpage(addr, (uint64_t)sfmmup);
12214 12205 }
12215 12206
12216 12207 if (!hat_lock_held && !tlb_noflush)
12217 12208 sfmmu_hat_exit(hatlockp);
12218 12209
12219 12210 #ifdef VAC
12220 12211 /*
12221 12212 * Flush the D$
12222 12213 *
12223 12214 * Even if the ctx is stolen, we need to flush the
12224 12215 * cache. Our ctx stealer only flushes the TLBs.
12225 12216 */
12226 12217 if (cache_flush_flag == CACHE_FLUSH) {
12227 12218 if (cpu_flag & FLUSH_ALL_CPUS) {
12228 12219 cpuset = cpu_ready_set;
12229 12220 } else {
12230 12221 cpuset = sfmmup->sfmmu_cpusran;
12231 12222 CPUSET_AND(cpuset, cpu_ready_set);
12232 12223 }
12233 12224 CPUSET_DEL(cpuset, CPU->cpu_id);
12234 12225 SFMMU_XCALL_STATS(sfmmup);
12235 12226 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12236 12227 vac_flushpage(pfnum, vcolor);
12237 12228 }
12238 12229 #endif /* VAC */
↓ open down ↓ |
67 lines elided |
↑ open up ↑ |
12239 12230 kpreempt_enable();
12240 12231 }
12241 12232
12242 12233 /*
12243 12234 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12244 12235 * address and ctx. If noflush is set we do not currently do anything.
12245 12236 * This function may or may not be called with the HAT lock held.
12246 12237 */
12247 12238 static void
12248 12239 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12249 - int tlb_noflush, int hat_lock_held)
12240 + int tlb_noflush, int hat_lock_held)
12250 12241 {
12251 12242 cpuset_t cpuset;
12252 12243 hatlock_t *hatlockp;
12253 12244
12254 12245 ASSERT(!hmeblkp->hblk_shared);
12255 12246
12256 12247 /*
12257 12248 * If the process is exiting we have nothing to do.
12258 12249 */
12259 12250 if (tlb_noflush)
12260 12251 return;
12261 12252
12262 12253 /*
12263 12254 * Flush TSB.
12264 12255 */
12265 12256 if (!hat_lock_held)
12266 12257 hatlockp = sfmmu_hat_enter(sfmmup);
12267 12258 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12268 12259
12269 12260 kpreempt_disable();
12270 12261
12271 12262 cpuset = sfmmup->sfmmu_cpusran;
12272 12263 CPUSET_AND(cpuset, cpu_ready_set);
12273 12264 CPUSET_DEL(cpuset, CPU->cpu_id);
12274 12265
12275 12266 SFMMU_XCALL_STATS(sfmmup);
12276 12267 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12277 12268
12278 12269 vtag_flushpage(addr, (uint64_t)sfmmup);
12279 12270
12280 12271 if (!hat_lock_held)
12281 12272 sfmmu_hat_exit(hatlockp);
12282 12273
12283 12274 kpreempt_enable();
12284 12275
12285 12276 }
12286 12277
12287 12278 /*
12288 12279 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12289 12280 * call handler that can flush a range of pages to save on xcalls.
12290 12281 */
12291 12282 static int sfmmu_xcall_save;
12292 12283
12293 12284 /*
12294 12285 * this routine is never used for demaping addresses backed by SRD hmeblks.
12295 12286 */
12296 12287 static void
12297 12288 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12298 12289 {
12299 12290 sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12300 12291 hatlock_t *hatlockp;
12301 12292 cpuset_t cpuset;
12302 12293 uint64_t sfmmu_pgcnt;
12303 12294 pgcnt_t pgcnt = 0;
12304 12295 int pgunload = 0;
12305 12296 int dirtypg = 0;
12306 12297 caddr_t addr = dmrp->dmr_addr;
12307 12298 caddr_t eaddr;
12308 12299 uint64_t bitvec = dmrp->dmr_bitvec;
12309 12300
12310 12301 ASSERT(bitvec & 1);
12311 12302
12312 12303 /*
12313 12304 * Flush TSB and calculate number of pages to flush.
12314 12305 */
12315 12306 while (bitvec != 0) {
12316 12307 dirtypg = 0;
12317 12308 /*
12318 12309 * Find the first page to flush and then count how many
12319 12310 * pages there are after it that also need to be flushed.
12320 12311 * This way the number of TSB flushes is minimized.
12321 12312 */
12322 12313 while ((bitvec & 1) == 0) {
12323 12314 pgcnt++;
12324 12315 addr += MMU_PAGESIZE;
12325 12316 bitvec >>= 1;
12326 12317 }
12327 12318 while (bitvec & 1) {
12328 12319 dirtypg++;
12329 12320 bitvec >>= 1;
12330 12321 }
12331 12322 eaddr = addr + ptob(dirtypg);
12332 12323 hatlockp = sfmmu_hat_enter(sfmmup);
12333 12324 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12334 12325 sfmmu_hat_exit(hatlockp);
12335 12326 pgunload += dirtypg;
12336 12327 addr = eaddr;
12337 12328 pgcnt += dirtypg;
12338 12329 }
12339 12330
12340 12331 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12341 12332 if (sfmmup->sfmmu_free == 0) {
12342 12333 addr = dmrp->dmr_addr;
12343 12334 bitvec = dmrp->dmr_bitvec;
12344 12335
12345 12336 /*
12346 12337 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12347 12338 * as it will be used to pack argument for xt_some
12348 12339 */
12349 12340 ASSERT((pgcnt > 0) &&
12350 12341 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12351 12342
12352 12343 /*
12353 12344 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12354 12345 * the low 6 bits of sfmmup. This is doable since pgcnt
12355 12346 * always >= 1.
12356 12347 */
12357 12348 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12358 12349 sfmmu_pgcnt = (uint64_t)sfmmup |
12359 12350 ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12360 12351
12361 12352 /*
12362 12353 * We must hold the hat lock during the flush of TLB,
12363 12354 * to avoid a race with sfmmu_invalidate_ctx(), where
12364 12355 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12365 12356 * causing TLB demap routine to skip flush on that MMU.
12366 12357 * If the context on a MMU has already been set to
12367 12358 * INVALID_CONTEXT, we just get an extra flush on
12368 12359 * that MMU.
12369 12360 */
12370 12361 hatlockp = sfmmu_hat_enter(sfmmup);
12371 12362 kpreempt_disable();
12372 12363
12373 12364 cpuset = sfmmup->sfmmu_cpusran;
12374 12365 CPUSET_AND(cpuset, cpu_ready_set);
12375 12366 CPUSET_DEL(cpuset, CPU->cpu_id);
12376 12367
12377 12368 SFMMU_XCALL_STATS(sfmmup);
12378 12369 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12379 12370 sfmmu_pgcnt);
12380 12371
12381 12372 for (; bitvec != 0; bitvec >>= 1) {
12382 12373 if (bitvec & 1)
12383 12374 vtag_flushpage(addr, (uint64_t)sfmmup);
12384 12375 addr += MMU_PAGESIZE;
12385 12376 }
12386 12377 kpreempt_enable();
12387 12378 sfmmu_hat_exit(hatlockp);
12388 12379
12389 12380 sfmmu_xcall_save += (pgunload-1);
12390 12381 }
12391 12382 dmrp->dmr_bitvec = 0;
12392 12383 }
12393 12384
12394 12385 /*
12395 12386 * In cases where we need to synchronize with TLB/TSB miss trap
12396 12387 * handlers, _and_ need to flush the TLB, it's a lot easier to
12397 12388 * throw away the context from the process than to do a
12398 12389 * special song and dance to keep things consistent for the
12399 12390 * handlers.
12400 12391 *
12401 12392 * Since the process suddenly ends up without a context and our caller
12402 12393 * holds the hat lock, threads that fault after this function is called
12403 12394 * will pile up on the lock. We can then do whatever we need to
12404 12395 * atomically from the context of the caller. The first blocked thread
12405 12396 * to resume executing will get the process a new context, and the
12406 12397 * process will resume executing.
12407 12398 *
12408 12399 * One added advantage of this approach is that on MMUs that
12409 12400 * support a "flush all" operation, we will delay the flush until
12410 12401 * cnum wrap-around, and then flush the TLB one time. This
12411 12402 * is rather rare, so it's a lot less expensive than making 8000
12412 12403 * x-calls to flush the TLB 8000 times.
12413 12404 *
12414 12405 * A per-process (PP) lock is used to synchronize ctx allocations in
12415 12406 * resume() and ctx invalidations here.
12416 12407 */
12417 12408 static void
12418 12409 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12419 12410 {
12420 12411 cpuset_t cpuset;
12421 12412 int cnum, currcnum;
12422 12413 mmu_ctx_t *mmu_ctxp;
12423 12414 int i;
12424 12415 uint_t pstate_save;
12425 12416
12426 12417 SFMMU_STAT(sf_ctx_inv);
12427 12418
12428 12419 ASSERT(sfmmu_hat_lock_held(sfmmup));
12429 12420 ASSERT(sfmmup != ksfmmup);
12430 12421
12431 12422 kpreempt_disable();
12432 12423
12433 12424 mmu_ctxp = CPU_MMU_CTXP(CPU);
12434 12425 ASSERT(mmu_ctxp);
12435 12426 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
↓ open down ↓ |
176 lines elided |
↑ open up ↑ |
12436 12427 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12437 12428
12438 12429 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12439 12430
12440 12431 pstate_save = sfmmu_disable_intrs();
12441 12432
12442 12433 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */
12443 12434 /* set HAT cnum invalid across all context domains. */
12444 12435 for (i = 0; i < max_mmu_ctxdoms; i++) {
12445 12436
12446 - cnum = sfmmup->sfmmu_ctxs[i].cnum;
12437 + cnum = sfmmup->sfmmu_ctxs[i].cnum;
12447 12438 if (cnum == INVALID_CONTEXT) {
12448 12439 continue;
12449 12440 }
12450 12441
12451 12442 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12452 12443 }
12453 12444 membar_enter(); /* make sure globally visible to all CPUs */
12454 12445 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */
12455 12446
12456 12447 sfmmu_enable_intrs(pstate_save);
12457 12448
12458 12449 cpuset = sfmmup->sfmmu_cpusran;
12459 12450 CPUSET_DEL(cpuset, CPU->cpu_id);
12460 12451 CPUSET_AND(cpuset, cpu_ready_set);
12461 12452 if (!CPUSET_ISNULL(cpuset)) {
12462 12453 SFMMU_XCALL_STATS(sfmmup);
12463 12454 xt_some(cpuset, sfmmu_raise_tsb_exception,
12464 12455 (uint64_t)sfmmup, INVALID_CONTEXT);
12465 12456 xt_sync(cpuset);
12466 12457 SFMMU_STAT(sf_tsb_raise_exception);
12467 12458 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12468 12459 }
12469 12460
12470 12461 /*
12471 12462 * If the hat to-be-invalidated is the same as the current
12472 12463 * process on local CPU we need to invalidate
12473 12464 * this CPU context as well.
12474 12465 */
12475 12466 if ((sfmmu_getctx_sec() == currcnum) &&
12476 12467 (currcnum != INVALID_CONTEXT)) {
12477 12468 /* sets shared context to INVALID too */
12478 12469 sfmmu_setctx_sec(INVALID_CONTEXT);
12479 12470 sfmmu_clear_utsbinfo();
12480 12471 }
12481 12472
12482 12473 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12483 12474
12484 12475 kpreempt_enable();
12485 12476
12486 12477 /*
12487 12478 * we hold the hat lock, so nobody should allocate a context
12488 12479 * for us yet
12489 12480 */
12490 12481 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12491 12482 }
12492 12483
12493 12484 #ifdef VAC
12494 12485 /*
12495 12486 * We need to flush the cache in all cpus. It is possible that
12496 12487 * a process referenced a page as cacheable but has sinced exited
12497 12488 * and cleared the mapping list. We still to flush it but have no
12498 12489 * state so all cpus is the only alternative.
12499 12490 */
12500 12491 void
12501 12492 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12502 12493 {
12503 12494 cpuset_t cpuset;
12504 12495
12505 12496 kpreempt_disable();
12506 12497 cpuset = cpu_ready_set;
12507 12498 CPUSET_DEL(cpuset, CPU->cpu_id);
12508 12499 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12509 12500 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12510 12501 xt_sync(cpuset);
12511 12502 vac_flushpage(pfnum, vcolor);
12512 12503 kpreempt_enable();
12513 12504 }
12514 12505
12515 12506 void
12516 12507 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12517 12508 {
12518 12509 cpuset_t cpuset;
12519 12510
12520 12511 ASSERT(vcolor >= 0);
12521 12512
12522 12513 kpreempt_disable();
12523 12514 cpuset = cpu_ready_set;
12524 12515 CPUSET_DEL(cpuset, CPU->cpu_id);
12525 12516 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12526 12517 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12527 12518 xt_sync(cpuset);
12528 12519 vac_flushcolor(vcolor, pfnum);
12529 12520 kpreempt_enable();
12530 12521 }
12531 12522 #endif /* VAC */
12532 12523
12533 12524 /*
12534 12525 * We need to prevent processes from accessing the TSB using a cached physical
12535 12526 * address. It's alright if they try to access the TSB via virtual address
12536 12527 * since they will just fault on that virtual address once the mapping has
12537 12528 * been suspended.
12538 12529 */
12539 12530 #pragma weak sendmondo_in_recover
12540 12531
12541 12532 /* ARGSUSED */
12542 12533 static int
12543 12534 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12544 12535 {
12545 12536 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12546 12537 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12547 12538 hatlock_t *hatlockp;
12548 12539 sf_scd_t *scdp;
12549 12540
12550 12541 if (flags != HAT_PRESUSPEND)
12551 12542 return (0);
12552 12543
12553 12544 /*
12554 12545 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12555 12546 * be a shared hat, then set SCD's tsbinfo's flag.
12556 12547 * If tsb is not shared, sfmmup is a private hat, then set
12557 12548 * its private tsbinfo's flag.
12558 12549 */
12559 12550 hatlockp = sfmmu_hat_enter(sfmmup);
12560 12551 tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12561 12552
12562 12553 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12563 12554 sfmmu_tsb_inv_ctx(sfmmup);
12564 12555 sfmmu_hat_exit(hatlockp);
12565 12556 } else {
12566 12557 /* release lock on the shared hat */
12567 12558 sfmmu_hat_exit(hatlockp);
12568 12559 /* sfmmup is a shared hat */
12569 12560 ASSERT(sfmmup->sfmmu_scdhat);
12570 12561 scdp = sfmmup->sfmmu_scdp;
12571 12562 ASSERT(scdp != NULL);
12572 12563 /* get private hat from the scd list */
12573 12564 mutex_enter(&scdp->scd_mutex);
12574 12565 sfmmup = scdp->scd_sf_list;
12575 12566 while (sfmmup != NULL) {
12576 12567 hatlockp = sfmmu_hat_enter(sfmmup);
12577 12568 /*
12578 12569 * We do not call sfmmu_tsb_inv_ctx here because
12579 12570 * sendmondo_in_recover check is only needed for
12580 12571 * sun4u.
12581 12572 */
12582 12573 sfmmu_invalidate_ctx(sfmmup);
12583 12574 sfmmu_hat_exit(hatlockp);
12584 12575 sfmmup = sfmmup->sfmmu_scd_link.next;
12585 12576
12586 12577 }
12587 12578 mutex_exit(&scdp->scd_mutex);
12588 12579 }
12589 12580 return (0);
12590 12581 }
12591 12582
12592 12583 static void
12593 12584 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12594 12585 {
12595 12586 extern uint32_t sendmondo_in_recover;
12596 12587
12597 12588 ASSERT(sfmmu_hat_lock_held(sfmmup));
12598 12589
12599 12590 /*
12600 12591 * For Cheetah+ Erratum 25:
12601 12592 * Wait for any active recovery to finish. We can't risk
12602 12593 * relocating the TSB of the thread running mondo_recover_proc()
12603 12594 * since, if we did that, we would deadlock. The scenario we are
12604 12595 * trying to avoid is as follows:
12605 12596 *
12606 12597 * THIS CPU RECOVER CPU
12607 12598 * -------- -----------
12608 12599 * Begins recovery, walking through TSB
12609 12600 * hat_pagesuspend() TSB TTE
12610 12601 * TLB miss on TSB TTE, spins at TL1
12611 12602 * xt_sync()
12612 12603 * send_mondo_timeout()
12613 12604 * mondo_recover_proc()
12614 12605 * ((deadlocked))
12615 12606 *
12616 12607 * The second half of the workaround is that mondo_recover_proc()
12617 12608 * checks to see if the tsb_info has the RELOC flag set, and if it
12618 12609 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12619 12610 * and hence avoiding the TLB miss that could result in a deadlock.
12620 12611 */
12621 12612 if (&sendmondo_in_recover) {
12622 12613 membar_enter(); /* make sure RELOC flag visible */
12623 12614 while (sendmondo_in_recover) {
12624 12615 drv_usecwait(1);
↓ open down ↓ |
168 lines elided |
↑ open up ↑ |
12625 12616 membar_consumer();
12626 12617 }
12627 12618 }
12628 12619
12629 12620 sfmmu_invalidate_ctx(sfmmup);
12630 12621 }
12631 12622
12632 12623 /* ARGSUSED */
12633 12624 static int
12634 12625 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12635 - void *tsbinfo, pfn_t newpfn)
12626 + void *tsbinfo, pfn_t newpfn)
12636 12627 {
12637 12628 hatlock_t *hatlockp;
12638 12629 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12639 12630 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12640 12631
12641 12632 if (flags != HAT_POSTUNSUSPEND)
12642 12633 return (0);
12643 12634
12644 12635 hatlockp = sfmmu_hat_enter(sfmmup);
12645 12636
12646 12637 SFMMU_STAT(sf_tsb_reloc);
12647 12638
12648 12639 /*
12649 12640 * The process may have swapped out while we were relocating one
12650 12641 * of its TSBs. If so, don't bother doing the setup since the
12651 12642 * process can't be using the memory anymore.
12652 12643 */
12653 12644 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12654 12645 ASSERT(va == tsbinfop->tsb_va);
12655 12646 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12656 12647
12657 12648 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12658 12649 sfmmu_inv_tsb(tsbinfop->tsb_va,
12659 12650 TSB_BYTES(tsbinfop->tsb_szc));
12660 12651 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12661 12652 }
12662 12653 }
12663 12654
12664 12655 membar_exit();
12665 12656 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12666 12657 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12667 12658
12668 12659 sfmmu_hat_exit(hatlockp);
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
12669 12660
12670 12661 return (0);
12671 12662 }
12672 12663
12673 12664 /*
12674 12665 * Allocate and initialize a tsb_info structure. Note that we may or may not
12675 12666 * allocate a TSB here, depending on the flags passed in.
12676 12667 */
12677 12668 static int
12678 12669 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12679 - uint_t flags, sfmmu_t *sfmmup)
12670 + uint_t flags, sfmmu_t *sfmmup)
12680 12671 {
12681 12672 int err;
12682 12673
12683 12674 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12684 12675 sfmmu_tsbinfo_cache, KM_SLEEP);
12685 12676
12686 12677 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12687 12678 tsb_szc, flags, sfmmup)) != 0) {
12688 12679 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12689 12680 SFMMU_STAT(sf_tsb_allocfail);
12690 12681 *tsbinfopp = NULL;
12691 12682 return (err);
12692 12683 }
12693 12684 SFMMU_STAT(sf_tsb_alloc);
12694 12685
12695 12686 /*
12696 12687 * Bump the TSB size counters for this TSB size.
12697 12688 */
12698 12689 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12699 12690 return (0);
12700 12691 }
12701 12692
12702 12693 static void
12703 12694 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12704 12695 {
12705 12696 caddr_t tsbva = tsbinfo->tsb_va;
12706 12697 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12707 12698 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12708 12699 vmem_t *vmp = tsbinfo->tsb_vmp;
12709 12700
12710 12701 /*
12711 12702 * If we allocated this TSB from relocatable kernel memory, then we
12712 12703 * need to uninstall the callback handler.
12713 12704 */
12714 12705 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12715 12706 uintptr_t slab_mask;
12716 12707 caddr_t slab_vaddr;
12717 12708 page_t **ppl;
12718 12709 int ret;
12719 12710
12720 12711 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12721 12712 if (tsb_size > MMU_PAGESIZE4M)
12722 12713 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12723 12714 else
12724 12715 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12725 12716 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12726 12717
12727 12718 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12728 12719 ASSERT(ret == 0);
12729 12720 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12730 12721 0, NULL);
12731 12722 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12732 12723 }
12733 12724
12734 12725 if (kmem_cachep != NULL) {
12735 12726 kmem_cache_free(kmem_cachep, tsbva);
12736 12727 } else {
12737 12728 vmem_xfree(vmp, (void *)tsbva, tsb_size);
12738 12729 }
12739 12730 tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12740 12731 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12741 12732 }
12742 12733
12743 12734 static void
12744 12735 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12745 12736 {
12746 12737 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12747 12738 sfmmu_tsb_free(tsbinfo);
12748 12739 }
12749 12740 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12750 12741
12751 12742 }
12752 12743
12753 12744 /*
12754 12745 * Setup all the references to physical memory for this tsbinfo.
12755 12746 * The underlying page(s) must be locked.
12756 12747 */
12757 12748 static void
12758 12749 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12759 12750 {
12760 12751 ASSERT(pfn != PFN_INVALID);
12761 12752 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12762 12753
12763 12754 #ifndef sun4v
12764 12755 if (tsbinfo->tsb_szc == 0) {
12765 12756 sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12766 12757 PROT_WRITE|PROT_READ, TTE8K);
12767 12758 } else {
12768 12759 /*
12769 12760 * Round down PA and use a large mapping; the handlers will
12770 12761 * compute the TSB pointer at the correct offset into the
12771 12762 * big virtual page. NOTE: this assumes all TSBs larger
12772 12763 * than 8K must come from physically contiguous slabs of
12773 12764 * size tsb_slab_size.
12774 12765 */
12775 12766 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12776 12767 PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12777 12768 }
12778 12769 tsbinfo->tsb_pa = ptob(pfn);
12779 12770
12780 12771 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12781 12772 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */
12782 12773
12783 12774 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12784 12775 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12785 12776 #else /* sun4v */
12786 12777 tsbinfo->tsb_pa = ptob(pfn);
12787 12778 #endif /* sun4v */
12788 12779 }
12789 12780
12790 12781
12791 12782 /*
12792 12783 * Returns zero on success, ENOMEM if over the high water mark,
12793 12784 * or EAGAIN if the caller needs to retry with a smaller TSB
12794 12785 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12795 12786 *
12796 12787 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12797 12788 * is specified and the TSB requested is PAGESIZE, though it
12798 12789 * may sleep waiting for memory if sufficient memory is not
12799 12790 * available.
12800 12791 */
12801 12792 static int
12802 12793 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12803 12794 int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12804 12795 {
12805 12796 caddr_t vaddr = NULL;
12806 12797 caddr_t slab_vaddr;
12807 12798 uintptr_t slab_mask;
12808 12799 int tsbbytes = TSB_BYTES(tsbcode);
12809 12800 int lowmem = 0;
12810 12801 struct kmem_cache *kmem_cachep = NULL;
12811 12802 vmem_t *vmp = NULL;
12812 12803 lgrp_id_t lgrpid = LGRP_NONE;
12813 12804 pfn_t pfn;
12814 12805 uint_t cbflags = HAC_SLEEP;
12815 12806 page_t **pplist;
12816 12807 int ret;
12817 12808
12818 12809 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
12819 12810 if (tsbbytes > MMU_PAGESIZE4M)
12820 12811 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12821 12812 else
12822 12813 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12823 12814
12824 12815 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
12825 12816 flags |= TSB_ALLOC;
12826 12817
12827 12818 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
12828 12819
12829 12820 tsbinfo->tsb_sfmmu = sfmmup;
12830 12821
12831 12822 /*
12832 12823 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
12833 12824 * return.
12834 12825 */
12835 12826 if ((flags & TSB_ALLOC) == 0) {
12836 12827 tsbinfo->tsb_szc = tsbcode;
12837 12828 tsbinfo->tsb_ttesz_mask = tteszmask;
12838 12829 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
12839 12830 tsbinfo->tsb_pa = -1;
12840 12831 tsbinfo->tsb_tte.ll = 0;
12841 12832 tsbinfo->tsb_next = NULL;
12842 12833 tsbinfo->tsb_flags = TSB_SWAPPED;
12843 12834 tsbinfo->tsb_cache = NULL;
12844 12835 tsbinfo->tsb_vmp = NULL;
12845 12836 return (0);
12846 12837 }
12847 12838
12848 12839 #ifdef DEBUG
12849 12840 /*
12850 12841 * For debugging:
12851 12842 * Randomly force allocation failures every tsb_alloc_mtbf
12852 12843 * tries if TSB_FORCEALLOC is not specified. This will
12853 12844 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
12854 12845 * it is even, to allow testing of both failure paths...
12855 12846 */
12856 12847 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
12857 12848 (tsb_alloc_count++ == tsb_alloc_mtbf)) {
12858 12849 tsb_alloc_count = 0;
12859 12850 tsb_alloc_fail_mtbf++;
12860 12851 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
12861 12852 }
12862 12853 #endif /* DEBUG */
12863 12854
12864 12855 /*
12865 12856 * Enforce high water mark if we are not doing a forced allocation
12866 12857 * and are not shrinking a process' TSB.
12867 12858 */
12868 12859 if ((flags & TSB_SHRINK) == 0 &&
12869 12860 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
12870 12861 if ((flags & TSB_FORCEALLOC) == 0)
12871 12862 return (ENOMEM);
12872 12863 lowmem = 1;
12873 12864 }
12874 12865
12875 12866 /*
12876 12867 * Allocate from the correct location based upon the size of the TSB
12877 12868 * compared to the base page size, and what memory conditions dictate.
12878 12869 * Note we always do nonblocking allocations from the TSB arena since
12879 12870 * we don't want memory fragmentation to cause processes to block
12880 12871 * indefinitely waiting for memory; until the kernel algorithms that
12881 12872 * coalesce large pages are improved this is our best option.
12882 12873 *
12883 12874 * Algorithm:
12884 12875 * If allocating a "large" TSB (>8K), allocate from the
12885 12876 * appropriate kmem_tsb_default_arena vmem arena
12886 12877 * else if low on memory or the TSB_FORCEALLOC flag is set or
12887 12878 * tsb_forceheap is set
12888 12879 * Allocate from kernel heap via sfmmu_tsb8k_cache with
12889 12880 * KM_SLEEP (never fails)
12890 12881 * else
12891 12882 * Allocate from appropriate sfmmu_tsb_cache with
12892 12883 * KM_NOSLEEP
12893 12884 * endif
12894 12885 */
12895 12886 if (tsb_lgrp_affinity)
12896 12887 lgrpid = lgrp_home_id(curthread);
12897 12888 if (lgrpid == LGRP_NONE)
12898 12889 lgrpid = 0; /* use lgrp of boot CPU */
12899 12890
12900 12891 if (tsbbytes > MMU_PAGESIZE) {
12901 12892 if (tsbbytes > MMU_PAGESIZE4M) {
12902 12893 vmp = kmem_bigtsb_default_arena[lgrpid];
12903 12894 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12904 12895 0, 0, NULL, NULL, VM_NOSLEEP);
12905 12896 } else {
12906 12897 vmp = kmem_tsb_default_arena[lgrpid];
12907 12898 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12908 12899 0, 0, NULL, NULL, VM_NOSLEEP);
12909 12900 }
12910 12901 #ifdef DEBUG
12911 12902 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
12912 12903 #else /* !DEBUG */
12913 12904 } else if (lowmem || (flags & TSB_FORCEALLOC)) {
12914 12905 #endif /* DEBUG */
12915 12906 kmem_cachep = sfmmu_tsb8k_cache;
12916 12907 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
12917 12908 ASSERT(vaddr != NULL);
12918 12909 } else {
12919 12910 kmem_cachep = sfmmu_tsb_cache[lgrpid];
12920 12911 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12921 12912 }
12922 12913
12923 12914 tsbinfo->tsb_cache = kmem_cachep;
12924 12915 tsbinfo->tsb_vmp = vmp;
12925 12916
12926 12917 if (vaddr == NULL) {
12927 12918 return (EAGAIN);
12928 12919 }
12929 12920
12930 12921 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
12931 12922 kmem_cachep = tsbinfo->tsb_cache;
12932 12923
12933 12924 /*
12934 12925 * If we are allocating from outside the cage, then we need to
12935 12926 * register a relocation callback handler. Note that for now
12936 12927 * since pseudo mappings always hang off of the slab's root page,
12937 12928 * we need only lock the first 8K of the TSB slab. This is a bit
12938 12929 * hacky but it is good for performance.
12939 12930 */
12940 12931 if (kmem_cachep != sfmmu_tsb8k_cache) {
12941 12932 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
12942 12933 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
12943 12934 ASSERT(ret == 0);
12944 12935 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
12945 12936 cbflags, (void *)tsbinfo, &pfn, NULL);
12946 12937
12947 12938 /*
12948 12939 * Need to free up resources if we could not successfully
12949 12940 * add the callback function and return an error condition.
12950 12941 */
12951 12942 if (ret != 0) {
12952 12943 if (kmem_cachep) {
12953 12944 kmem_cache_free(kmem_cachep, vaddr);
12954 12945 } else {
12955 12946 vmem_xfree(vmp, (void *)vaddr, tsbbytes);
12956 12947 }
12957 12948 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
12958 12949 S_WRITE);
12959 12950 return (EAGAIN);
12960 12951 }
12961 12952 } else {
12962 12953 /*
12963 12954 * Since allocation of 8K TSBs from heap is rare and occurs
12964 12955 * during memory pressure we allocate them from permanent
12965 12956 * memory rather than using callbacks to get the PFN.
12966 12957 */
12967 12958 pfn = hat_getpfnum(kas.a_hat, vaddr);
12968 12959 }
12969 12960
12970 12961 tsbinfo->tsb_va = vaddr;
12971 12962 tsbinfo->tsb_szc = tsbcode;
12972 12963 tsbinfo->tsb_ttesz_mask = tteszmask;
12973 12964 tsbinfo->tsb_next = NULL;
12974 12965 tsbinfo->tsb_flags = 0;
12975 12966
12976 12967 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
12977 12968
12978 12969 sfmmu_inv_tsb(vaddr, tsbbytes);
12979 12970
12980 12971 if (kmem_cachep != sfmmu_tsb8k_cache) {
12981 12972 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
12982 12973 }
12983 12974
12984 12975 return (0);
12985 12976 }
12986 12977
12987 12978 /*
12988 12979 * Initialize per cpu tsb and per cpu tsbmiss_area
12989 12980 */
12990 12981 void
12991 12982 sfmmu_init_tsbs(void)
12992 12983 {
12993 12984 int i;
12994 12985 struct tsbmiss *tsbmissp;
12995 12986 struct kpmtsbm *kpmtsbmp;
12996 12987 #ifndef sun4v
12997 12988 extern int dcache_line_mask;
12998 12989 #endif /* sun4v */
12999 12990 extern uint_t vac_colors;
13000 12991
13001 12992 /*
13002 12993 * Init. tsb miss area.
13003 12994 */
13004 12995 tsbmissp = tsbmiss_area;
13005 12996
13006 12997 for (i = 0; i < NCPU; tsbmissp++, i++) {
13007 12998 /*
13008 12999 * initialize the tsbmiss area.
13009 13000 * Do this for all possible CPUs as some may be added
13010 13001 * while the system is running. There is no cost to this.
13011 13002 */
13012 13003 tsbmissp->ksfmmup = ksfmmup;
13013 13004 #ifndef sun4v
13014 13005 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13015 13006 #endif /* sun4v */
13016 13007 tsbmissp->khashstart =
13017 13008 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13018 13009 tsbmissp->uhashstart =
13019 13010 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13020 13011 tsbmissp->khashsz = khmehash_num;
13021 13012 tsbmissp->uhashsz = uhmehash_num;
13022 13013 }
13023 13014
13024 13015 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13025 13016 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13026 13017
13027 13018 if (kpm_enable == 0)
13028 13019 return;
13029 13020
13030 13021 /* -- Begin KPM specific init -- */
13031 13022
13032 13023 if (kpm_smallpages) {
13033 13024 /*
13034 13025 * If we're using base pagesize pages for seg_kpm
13035 13026 * mappings, we use the kernel TSB since we can't afford
13036 13027 * to allocate a second huge TSB for these mappings.
13037 13028 */
13038 13029 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13039 13030 kpm_tsbsz = ktsb_szcode;
13040 13031 kpmsm_tsbbase = kpm_tsbbase;
13041 13032 kpmsm_tsbsz = kpm_tsbsz;
13042 13033 } else {
13043 13034 /*
13044 13035 * In VAC conflict case, just put the entries in the
13045 13036 * kernel 8K indexed TSB for now so we can find them.
13046 13037 * This could really be changed in the future if we feel
13047 13038 * the need...
13048 13039 */
13049 13040 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13050 13041 kpmsm_tsbsz = ktsb_szcode;
13051 13042 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13052 13043 kpm_tsbsz = ktsb4m_szcode;
13053 13044 }
13054 13045
13055 13046 kpmtsbmp = kpmtsbm_area;
13056 13047 for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13057 13048 /*
13058 13049 * Initialize the kpmtsbm area.
13059 13050 * Do this for all possible CPUs as some may be added
13060 13051 * while the system is running. There is no cost to this.
13061 13052 */
13062 13053 kpmtsbmp->vbase = kpm_vbase;
13063 13054 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13064 13055 kpmtsbmp->sz_shift = kpm_size_shift;
13065 13056 kpmtsbmp->kpmp_shift = kpmp_shift;
13066 13057 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13067 13058 if (kpm_smallpages == 0) {
13068 13059 kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13069 13060 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13070 13061 } else {
13071 13062 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13072 13063 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13073 13064 }
13074 13065 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13075 13066 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13076 13067 #ifdef DEBUG
13077 13068 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0;
13078 13069 #endif /* DEBUG */
13079 13070 if (ktsb_phys)
13080 13071 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13081 13072 }
13082 13073
13083 13074 /* -- End KPM specific init -- */
13084 13075 }
13085 13076
13086 13077 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13087 13078 struct tsb_info ktsb_info[2];
13088 13079
13089 13080 /*
13090 13081 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13091 13082 */
13092 13083 void
13093 13084 sfmmu_init_ktsbinfo()
13094 13085 {
13095 13086 ASSERT(ksfmmup != NULL);
13096 13087 ASSERT(ksfmmup->sfmmu_tsb == NULL);
13097 13088 /*
13098 13089 * Allocate tsbinfos for kernel and copy in data
13099 13090 * to make debug easier and sun4v setup easier.
13100 13091 */
13101 13092 ktsb_info[0].tsb_sfmmu = ksfmmup;
13102 13093 ktsb_info[0].tsb_szc = ktsb_szcode;
13103 13094 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13104 13095 ktsb_info[0].tsb_va = ktsb_base;
13105 13096 ktsb_info[0].tsb_pa = ktsb_pbase;
13106 13097 ktsb_info[0].tsb_flags = 0;
13107 13098 ktsb_info[0].tsb_tte.ll = 0;
13108 13099 ktsb_info[0].tsb_cache = NULL;
13109 13100
13110 13101 ktsb_info[1].tsb_sfmmu = ksfmmup;
13111 13102 ktsb_info[1].tsb_szc = ktsb4m_szcode;
13112 13103 ktsb_info[1].tsb_ttesz_mask = TSB4M;
13113 13104 ktsb_info[1].tsb_va = ktsb4m_base;
13114 13105 ktsb_info[1].tsb_pa = ktsb4m_pbase;
13115 13106 ktsb_info[1].tsb_flags = 0;
13116 13107 ktsb_info[1].tsb_tte.ll = 0;
13117 13108 ktsb_info[1].tsb_cache = NULL;
13118 13109
13119 13110 /* Link them into ksfmmup. */
13120 13111 ktsb_info[0].tsb_next = &ktsb_info[1];
13121 13112 ktsb_info[1].tsb_next = NULL;
13122 13113 ksfmmup->sfmmu_tsb = &ktsb_info[0];
13123 13114
13124 13115 sfmmu_setup_tsbinfo(ksfmmup);
13125 13116 }
13126 13117
13127 13118 /*
13128 13119 * Cache the last value returned from va_to_pa(). If the VA specified
13129 13120 * in the current call to cached_va_to_pa() maps to the same Page (as the
13130 13121 * previous call to cached_va_to_pa()), then compute the PA using
13131 13122 * cached info, else call va_to_pa().
13132 13123 *
13133 13124 * Note: this function is neither MT-safe nor consistent in the presence
13134 13125 * of multiple, interleaved threads. This function was created to enable
13135 13126 * an optimization used during boot (at a point when there's only one thread
13136 13127 * executing on the "boot CPU", and before startup_vm() has been called).
13137 13128 */
13138 13129 static uint64_t
13139 13130 cached_va_to_pa(void *vaddr)
13140 13131 {
13141 13132 static uint64_t prev_vaddr_base = 0;
13142 13133 static uint64_t prev_pfn = 0;
13143 13134
13144 13135 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13145 13136 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13146 13137 } else {
13147 13138 uint64_t pa = va_to_pa(vaddr);
13148 13139
13149 13140 if (pa != ((uint64_t)-1)) {
13150 13141 /*
13151 13142 * Computed physical address is valid. Cache its
13152 13143 * related info for the next cached_va_to_pa() call.
13153 13144 */
13154 13145 prev_pfn = pa & MMU_PAGEMASK;
13155 13146 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13156 13147 }
13157 13148
13158 13149 return (pa);
13159 13150 }
13160 13151 }
13161 13152
13162 13153 /*
13163 13154 * Carve up our nucleus hblk region. We may allocate more hblks than
13164 13155 * asked due to rounding errors but we are guaranteed to have at least
13165 13156 * enough space to allocate the requested number of hblk8's and hblk1's.
13166 13157 */
13167 13158 void
13168 13159 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13169 13160 {
13170 13161 struct hme_blk *hmeblkp;
13171 13162 size_t hme8blk_sz, hme1blk_sz;
13172 13163 size_t i;
13173 13164 size_t hblk8_bound;
13174 13165 ulong_t j = 0, k = 0;
13175 13166
13176 13167 ASSERT(addr != NULL && size != 0);
13177 13168
13178 13169 /* Need to use proper structure alignment */
13179 13170 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13180 13171 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13181 13172
13182 13173 nucleus_hblk8.list = (void *)addr;
13183 13174 nucleus_hblk8.index = 0;
13184 13175
13185 13176 /*
13186 13177 * Use as much memory as possible for hblk8's since we
13187 13178 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13188 13179 * We need to hold back enough space for the hblk1's which
13189 13180 * we'll allocate next.
13190 13181 */
13191 13182 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13192 13183 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13193 13184 hmeblkp = (struct hme_blk *)addr;
13194 13185 addr += hme8blk_sz;
13195 13186 hmeblkp->hblk_nuc_bit = 1;
13196 13187 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13197 13188 }
13198 13189 nucleus_hblk8.len = j;
13199 13190 ASSERT(j >= nhblk8);
13200 13191 SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13201 13192
13202 13193 nucleus_hblk1.list = (void *)addr;
13203 13194 nucleus_hblk1.index = 0;
13204 13195 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13205 13196 hmeblkp = (struct hme_blk *)addr;
13206 13197 addr += hme1blk_sz;
13207 13198 hmeblkp->hblk_nuc_bit = 1;
13208 13199 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13209 13200 }
13210 13201 ASSERT(k >= nhblk1);
13211 13202 nucleus_hblk1.len = k;
13212 13203 SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13213 13204 }
13214 13205
13215 13206 /*
13216 13207 * This function is currently not supported on this platform. For what
13217 13208 * it's supposed to do, see hat.c and hat_srmmu.c
13218 13209 */
13219 13210 /* ARGSUSED */
13220 13211 faultcode_t
13221 13212 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13222 13213 uint_t flags)
13223 13214 {
13224 13215 return (FC_NOSUPPORT);
13225 13216 }
13226 13217
13227 13218 /*
13228 13219 * Searchs the mapping list of the page for a mapping of the same size. If not
13229 13220 * found the corresponding bit is cleared in the p_index field. When large
13230 13221 * pages are more prevalent in the system, we can maintain the mapping list
13231 13222 * in order and we don't have to traverse the list each time. Just check the
13232 13223 * next and prev entries, and if both are of different size, we clear the bit.
13233 13224 */
13234 13225 static void
13235 13226 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13236 13227 {
13237 13228 struct sf_hment *sfhmep;
13238 13229 int index;
13239 13230 pgcnt_t npgs;
13240 13231
13241 13232 ASSERT(ttesz > TTE8K);
13242 13233
13243 13234 ASSERT(sfmmu_mlist_held(pp));
13244 13235
13245 13236 ASSERT(PP_ISMAPPED_LARGE(pp));
13246 13237
13247 13238 /*
13248 13239 * Traverse mapping list looking for another mapping of same size.
13249 13240 * since we only want to clear index field if all mappings of
13250 13241 * that size are gone.
13251 13242 */
13252 13243
13253 13244 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13254 13245 if (IS_PAHME(sfhmep))
13255 13246 continue;
13256 13247 if (hme_size(sfhmep) == ttesz) {
13257 13248 /*
13258 13249 * another mapping of the same size. don't clear index.
13259 13250 */
13260 13251 return;
13261 13252 }
13262 13253 }
13263 13254
13264 13255 /*
13265 13256 * Clear the p_index bit for large page.
13266 13257 */
13267 13258 index = PAGESZ_TO_INDEX(ttesz);
13268 13259 npgs = TTEPAGES(ttesz);
13269 13260 while (npgs-- > 0) {
13270 13261 ASSERT(pp->p_index & index);
13271 13262 pp->p_index &= ~index;
13272 13263 pp = PP_PAGENEXT(pp);
13273 13264 }
13274 13265 }
13275 13266
13276 13267 /*
13277 13268 * return supported features
13278 13269 */
13279 13270 /* ARGSUSED */
13280 13271 int
13281 13272 hat_supported(enum hat_features feature, void *arg)
13282 13273 {
13283 13274 switch (feature) {
13284 13275 case HAT_SHARED_PT:
13285 13276 case HAT_DYNAMIC_ISM_UNMAP:
13286 13277 case HAT_VMODSORT:
13287 13278 return (1);
13288 13279 case HAT_SHARED_REGIONS:
13289 13280 if (shctx_on)
13290 13281 return (1);
13291 13282 else
13292 13283 return (0);
13293 13284 default:
13294 13285 return (0);
13295 13286 }
13296 13287 }
13297 13288
13298 13289 void
13299 13290 hat_enter(struct hat *hat)
13300 13291 {
13301 13292 hatlock_t *hatlockp;
13302 13293
13303 13294 if (hat != ksfmmup) {
13304 13295 hatlockp = TSB_HASH(hat);
13305 13296 mutex_enter(HATLOCK_MUTEXP(hatlockp));
13306 13297 }
13307 13298 }
13308 13299
13309 13300 void
13310 13301 hat_exit(struct hat *hat)
13311 13302 {
13312 13303 hatlock_t *hatlockp;
13313 13304
13314 13305 if (hat != ksfmmup) {
13315 13306 hatlockp = TSB_HASH(hat);
13316 13307 mutex_exit(HATLOCK_MUTEXP(hatlockp));
13317 13308 }
13318 13309 }
13319 13310
13320 13311 /*ARGSUSED*/
13321 13312 void
13322 13313 hat_reserve(struct as *as, caddr_t addr, size_t len)
13323 13314 {
13324 13315 }
13325 13316
13326 13317 static void
13327 13318 hat_kstat_init(void)
13328 13319 {
13329 13320 kstat_t *ksp;
13330 13321
13331 13322 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13332 13323 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13333 13324 KSTAT_FLAG_VIRTUAL);
13334 13325 if (ksp) {
13335 13326 ksp->ks_data = (void *) &sfmmu_global_stat;
13336 13327 kstat_install(ksp);
13337 13328 }
13338 13329 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13339 13330 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13340 13331 KSTAT_FLAG_VIRTUAL);
13341 13332 if (ksp) {
13342 13333 ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13343 13334 kstat_install(ksp);
13344 13335 }
13345 13336 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13346 13337 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13347 13338 KSTAT_FLAG_WRITABLE);
13348 13339 if (ksp) {
13349 13340 ksp->ks_update = sfmmu_kstat_percpu_update;
13350 13341 kstat_install(ksp);
13351 13342 }
13352 13343 }
13353 13344
13354 13345 /* ARGSUSED */
13355 13346 static int
13356 13347 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13357 13348 {
13358 13349 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13359 13350 struct tsbmiss *tsbm = tsbmiss_area;
13360 13351 struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13361 13352 int i;
13362 13353
13363 13354 ASSERT(cpu_kstat);
13364 13355 if (rw == KSTAT_READ) {
13365 13356 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13366 13357 cpu_kstat->sf_itlb_misses = 0;
13367 13358 cpu_kstat->sf_dtlb_misses = 0;
13368 13359 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13369 13360 tsbm->uprot_traps;
13370 13361 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13371 13362 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13372 13363 cpu_kstat->sf_tsb_hits = 0;
13373 13364 cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13374 13365 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13375 13366 }
13376 13367 } else {
13377 13368 /* KSTAT_WRITE is used to clear stats */
13378 13369 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13379 13370 tsbm->utsb_misses = 0;
13380 13371 tsbm->ktsb_misses = 0;
13381 13372 tsbm->uprot_traps = 0;
13382 13373 tsbm->kprot_traps = 0;
13383 13374 kpmtsbm->kpm_dtlb_misses = 0;
13384 13375 kpmtsbm->kpm_tsb_misses = 0;
13385 13376 }
13386 13377 }
13387 13378 return (0);
13388 13379 }
13389 13380
13390 13381 #ifdef DEBUG
13391 13382
13392 13383 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13393 13384
13394 13385 /*
13395 13386 * A tte checker. *orig_old is the value we read before cas.
13396 13387 * *cur is the value returned by cas.
13397 13388 * *new is the desired value when we do the cas.
13398 13389 *
13399 13390 * *hmeblkp is currently unused.
13400 13391 */
13401 13392
13402 13393 /* ARGSUSED */
13403 13394 void
13404 13395 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13405 13396 {
13406 13397 pfn_t i, j, k;
13407 13398 int cpuid = CPU->cpu_id;
13408 13399
13409 13400 gorig[cpuid] = orig_old;
13410 13401 gcur[cpuid] = cur;
13411 13402 gnew[cpuid] = new;
13412 13403
13413 13404 #ifdef lint
13414 13405 hmeblkp = hmeblkp;
13415 13406 #endif
13416 13407
13417 13408 if (TTE_IS_VALID(orig_old)) {
13418 13409 if (TTE_IS_VALID(cur)) {
13419 13410 i = TTE_TO_TTEPFN(orig_old);
13420 13411 j = TTE_TO_TTEPFN(cur);
13421 13412 k = TTE_TO_TTEPFN(new);
13422 13413 if (i != j) {
13423 13414 /* remap error? */
13424 13415 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13425 13416 }
13426 13417
13427 13418 if (i != k) {
13428 13419 /* remap error? */
13429 13420 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13430 13421 }
13431 13422 } else {
13432 13423 if (TTE_IS_VALID(new)) {
13433 13424 panic("chk_tte: invalid cur? ");
13434 13425 }
13435 13426
13436 13427 i = TTE_TO_TTEPFN(orig_old);
13437 13428 k = TTE_TO_TTEPFN(new);
13438 13429 if (i != k) {
13439 13430 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13440 13431 }
13441 13432 }
13442 13433 } else {
13443 13434 if (TTE_IS_VALID(cur)) {
13444 13435 j = TTE_TO_TTEPFN(cur);
13445 13436 if (TTE_IS_VALID(new)) {
13446 13437 k = TTE_TO_TTEPFN(new);
13447 13438 if (j != k) {
13448 13439 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13449 13440 j, k);
13450 13441 }
13451 13442 } else {
13452 13443 panic("chk_tte: why here?");
13453 13444 }
13454 13445 } else {
13455 13446 if (!TTE_IS_VALID(new)) {
13456 13447 panic("chk_tte: why here2 ?");
13457 13448 }
13458 13449 }
13459 13450 }
13460 13451 }
13461 13452
13462 13453 #endif /* DEBUG */
13463 13454
13464 13455 extern void prefetch_tsbe_read(struct tsbe *);
13465 13456 extern void prefetch_tsbe_write(struct tsbe *);
13466 13457
13467 13458
13468 13459 /*
13469 13460 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives
13470 13461 * us optimal performance on Cheetah+. You can only have 8 outstanding
13471 13462 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13472 13463 * prefetch to make the most utilization of the prefetch capability.
13473 13464 */
13474 13465 #define TSBE_PREFETCH_STRIDE (7)
13475 13466
13476 13467 void
13477 13468 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13478 13469 {
13479 13470 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13480 13471 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13481 13472 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13482 13473 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13483 13474 struct tsbe *old;
13484 13475 struct tsbe *new;
13485 13476 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13486 13477 uint64_t va;
13487 13478 int new_offset;
13488 13479 int i;
13489 13480 int vpshift;
13490 13481 int last_prefetch;
13491 13482
13492 13483 if (old_bytes == new_bytes) {
13493 13484 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13494 13485 } else {
13495 13486
13496 13487 /*
13497 13488 * A TSBE is 16 bytes which means there are four TSBE's per
13498 13489 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13499 13490 */
13500 13491 old = (struct tsbe *)old_tsbinfo->tsb_va;
13501 13492 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13502 13493 for (i = 0; i < old_entries; i++, old++) {
13503 13494 if (((i & (4-1)) == 0) && (i < last_prefetch))
13504 13495 prefetch_tsbe_read(old);
13505 13496 if (!old->tte_tag.tag_invalid) {
13506 13497 /*
13507 13498 * We have a valid TTE to remap. Check the
13508 13499 * size. We won't remap 64K or 512K TTEs
13509 13500 * because they span more than one TSB entry
13510 13501 * and are indexed using an 8K virt. page.
13511 13502 * Ditto for 32M and 256M TTEs.
13512 13503 */
13513 13504 if (TTE_CSZ(&old->tte_data) == TTE64K ||
13514 13505 TTE_CSZ(&old->tte_data) == TTE512K)
13515 13506 continue;
13516 13507 if (mmu_page_sizes == max_mmu_page_sizes) {
13517 13508 if (TTE_CSZ(&old->tte_data) == TTE32M ||
13518 13509 TTE_CSZ(&old->tte_data) == TTE256M)
13519 13510 continue;
13520 13511 }
13521 13512
13522 13513 /* clear the lower 22 bits of the va */
13523 13514 va = *(uint64_t *)old << 22;
13524 13515 /* turn va into a virtual pfn */
13525 13516 va >>= 22 - TSB_START_SIZE;
13526 13517 /*
13527 13518 * or in bits from the offset in the tsb
13528 13519 * to get the real virtual pfn. These
13529 13520 * correspond to bits [21:13] in the va
13530 13521 */
13531 13522 vpshift =
13532 13523 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13533 13524 0x1ff;
13534 13525 va |= (i << vpshift);
13535 13526 va >>= vpshift;
13536 13527 new_offset = va & (new_entries - 1);
13537 13528 new = new_base + new_offset;
13538 13529 prefetch_tsbe_write(new);
13539 13530 *new = *old;
13540 13531 }
13541 13532 }
13542 13533 }
13543 13534 }
13544 13535
13545 13536 /*
13546 13537 * unused in sfmmu
13547 13538 */
13548 13539 void
13549 13540 hat_dump(void)
13550 13541 {
13551 13542 }
13552 13543
13553 13544 /*
13554 13545 * Called when a thread is exiting and we have switched to the kernel address
13555 13546 * space. Perform the same VM initialization resume() uses when switching
13556 13547 * processes.
13557 13548 *
13558 13549 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13559 13550 * we call it anyway in case the semantics change in the future.
13560 13551 */
13561 13552 /*ARGSUSED*/
13562 13553 void
13563 13554 hat_thread_exit(kthread_t *thd)
13564 13555 {
13565 13556 uint_t pgsz_cnum;
13566 13557 uint_t pstate_save;
13567 13558
13568 13559 ASSERT(thd->t_procp->p_as == &kas);
13569 13560
13570 13561 pgsz_cnum = KCONTEXT;
13571 13562 #ifdef sun4u
13572 13563 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13573 13564 #endif
13574 13565
13575 13566 /*
13576 13567 * Note that sfmmu_load_mmustate() is currently a no-op for
13577 13568 * kernel threads. We need to disable interrupts here,
13578 13569 * simply because otherwise sfmmu_load_mmustate() would panic
13579 13570 * if the caller does not disable interrupts.
13580 13571 */
13581 13572 pstate_save = sfmmu_disable_intrs();
13582 13573
13583 13574 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13584 13575 sfmmu_setctx_sec(pgsz_cnum);
13585 13576 sfmmu_load_mmustate(ksfmmup);
13586 13577 sfmmu_enable_intrs(pstate_save);
13587 13578 }
13588 13579
13589 13580
13590 13581 /*
13591 13582 * SRD support
13592 13583 */
13593 13584 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \
13594 13585 (((uintptr_t)(vp)) >> 11)) & \
13595 13586 srd_hashmask)
13596 13587
13597 13588 /*
13598 13589 * Attach the process to the srd struct associated with the exec vnode
13599 13590 * from which the process is started.
13600 13591 */
13601 13592 void
13602 13593 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13603 13594 {
13604 13595 uint_t hash = SRD_HASH_FUNCTION(evp);
13605 13596 sf_srd_t *srdp;
13606 13597 sf_srd_t *newsrdp;
13607 13598
13608 13599 ASSERT(sfmmup != ksfmmup);
13609 13600 ASSERT(sfmmup->sfmmu_srdp == NULL);
13610 13601
13611 13602 if (!shctx_on) {
13612 13603 return;
13613 13604 }
13614 13605
13615 13606 VN_HOLD(evp);
13616 13607
13617 13608 if (srd_buckets[hash].srdb_srdp != NULL) {
13618 13609 mutex_enter(&srd_buckets[hash].srdb_lock);
13619 13610 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13620 13611 srdp = srdp->srd_hash) {
13621 13612 if (srdp->srd_evp == evp) {
13622 13613 ASSERT(srdp->srd_refcnt >= 0);
13623 13614 sfmmup->sfmmu_srdp = srdp;
13624 13615 atomic_inc_32(
13625 13616 (volatile uint_t *)&srdp->srd_refcnt);
13626 13617 mutex_exit(&srd_buckets[hash].srdb_lock);
13627 13618 return;
13628 13619 }
13629 13620 }
13630 13621 mutex_exit(&srd_buckets[hash].srdb_lock);
13631 13622 }
13632 13623 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13633 13624 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13634 13625
13635 13626 newsrdp->srd_evp = evp;
13636 13627 newsrdp->srd_refcnt = 1;
13637 13628 newsrdp->srd_hmergnfree = NULL;
13638 13629 newsrdp->srd_ismrgnfree = NULL;
13639 13630
13640 13631 mutex_enter(&srd_buckets[hash].srdb_lock);
13641 13632 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13642 13633 srdp = srdp->srd_hash) {
13643 13634 if (srdp->srd_evp == evp) {
13644 13635 ASSERT(srdp->srd_refcnt >= 0);
13645 13636 sfmmup->sfmmu_srdp = srdp;
13646 13637 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13647 13638 mutex_exit(&srd_buckets[hash].srdb_lock);
13648 13639 kmem_cache_free(srd_cache, newsrdp);
13649 13640 return;
13650 13641 }
13651 13642 }
13652 13643 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13653 13644 srd_buckets[hash].srdb_srdp = newsrdp;
13654 13645 sfmmup->sfmmu_srdp = newsrdp;
13655 13646
13656 13647 mutex_exit(&srd_buckets[hash].srdb_lock);
13657 13648
13658 13649 }
13659 13650
13660 13651 static void
13661 13652 sfmmu_leave_srd(sfmmu_t *sfmmup)
13662 13653 {
13663 13654 vnode_t *evp;
13664 13655 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13665 13656 uint_t hash;
13666 13657 sf_srd_t **prev_srdpp;
13667 13658 sf_region_t *rgnp;
13668 13659 sf_region_t *nrgnp;
13669 13660 #ifdef DEBUG
13670 13661 int rgns = 0;
13671 13662 #endif
13672 13663 int i;
13673 13664
13674 13665 ASSERT(sfmmup != ksfmmup);
13675 13666 ASSERT(srdp != NULL);
13676 13667 ASSERT(srdp->srd_refcnt > 0);
13677 13668 ASSERT(sfmmup->sfmmu_scdp == NULL);
13678 13669 ASSERT(sfmmup->sfmmu_free == 1);
13679 13670
13680 13671 sfmmup->sfmmu_srdp = NULL;
13681 13672 evp = srdp->srd_evp;
13682 13673 ASSERT(evp != NULL);
13683 13674 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13684 13675 VN_RELE(evp);
13685 13676 return;
13686 13677 }
13687 13678
13688 13679 hash = SRD_HASH_FUNCTION(evp);
13689 13680 mutex_enter(&srd_buckets[hash].srdb_lock);
13690 13681 for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13691 13682 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13692 13683 if (srdp->srd_evp == evp) {
13693 13684 break;
13694 13685 }
13695 13686 }
13696 13687 if (srdp == NULL || srdp->srd_refcnt) {
13697 13688 mutex_exit(&srd_buckets[hash].srdb_lock);
13698 13689 VN_RELE(evp);
13699 13690 return;
13700 13691 }
13701 13692 *prev_srdpp = srdp->srd_hash;
13702 13693 mutex_exit(&srd_buckets[hash].srdb_lock);
13703 13694
13704 13695 ASSERT(srdp->srd_refcnt == 0);
13705 13696 VN_RELE(evp);
13706 13697
13707 13698 #ifdef DEBUG
13708 13699 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13709 13700 ASSERT(srdp->srd_rgnhash[i] == NULL);
13710 13701 }
13711 13702 #endif /* DEBUG */
13712 13703
13713 13704 /* free each hme regions in the srd */
13714 13705 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13715 13706 nrgnp = rgnp->rgn_next;
13716 13707 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13717 13708 ASSERT(rgnp->rgn_refcnt == 0);
13718 13709 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13719 13710 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13720 13711 ASSERT(rgnp->rgn_hmeflags == 0);
13721 13712 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13722 13713 #ifdef DEBUG
13723 13714 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13724 13715 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13725 13716 }
13726 13717 rgns++;
13727 13718 #endif /* DEBUG */
13728 13719 kmem_cache_free(region_cache, rgnp);
13729 13720 }
13730 13721 ASSERT(rgns == srdp->srd_next_hmerid);
13731 13722
13732 13723 #ifdef DEBUG
13733 13724 rgns = 0;
13734 13725 #endif
13735 13726 /* free each ism rgns in the srd */
13736 13727 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13737 13728 nrgnp = rgnp->rgn_next;
13738 13729 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13739 13730 ASSERT(rgnp->rgn_refcnt == 0);
13740 13731 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13741 13732 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13742 13733 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13743 13734 #ifdef DEBUG
13744 13735 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13745 13736 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13746 13737 }
13747 13738 rgns++;
13748 13739 #endif /* DEBUG */
13749 13740 kmem_cache_free(region_cache, rgnp);
13750 13741 }
13751 13742 ASSERT(rgns == srdp->srd_next_ismrid);
13752 13743 ASSERT(srdp->srd_ismbusyrgns == 0);
13753 13744 ASSERT(srdp->srd_hmebusyrgns == 0);
13754 13745
13755 13746 srdp->srd_next_ismrid = 0;
13756 13747 srdp->srd_next_hmerid = 0;
13757 13748
13758 13749 bzero((void *)srdp->srd_ismrgnp,
13759 13750 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13760 13751 bzero((void *)srdp->srd_hmergnp,
13761 13752 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13762 13753
13763 13754 ASSERT(srdp->srd_scdp == NULL);
13764 13755 kmem_cache_free(srd_cache, srdp);
13765 13756 }
13766 13757
13767 13758 /* ARGSUSED */
13768 13759 static int
13769 13760 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13770 13761 {
13771 13762 sf_srd_t *srdp = (sf_srd_t *)buf;
13772 13763 bzero(buf, sizeof (*srdp));
13773 13764
13774 13765 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13775 13766 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13776 13767 return (0);
13777 13768 }
13778 13769
13779 13770 /* ARGSUSED */
13780 13771 static void
13781 13772 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13782 13773 {
13783 13774 sf_srd_t *srdp = (sf_srd_t *)buf;
13784 13775
13785 13776 mutex_destroy(&srdp->srd_mutex);
13786 13777 mutex_destroy(&srdp->srd_scd_mutex);
13787 13778 }
13788 13779
13789 13780 /*
13790 13781 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13791 13782 * at the same time for the same process and address range. This is ensured by
13792 13783 * the fact that address space is locked as writer when a process joins the
13793 13784 * regions. Therefore there's no need to hold an srd lock during the entire
13794 13785 * execution of hat_join_region()/hat_leave_region().
13795 13786 */
13796 13787
13797 13788 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13798 13789 (((uintptr_t)(obj)) >> 11)) & \
↓ open down ↓ |
1109 lines elided |
↑ open up ↑ |
13799 13790 srd_rgn_hashmask)
13800 13791 /*
13801 13792 * This routine implements the shared context functionality required when
13802 13793 * attaching a segment to an address space. It must be called from
13803 13794 * hat_share() for D(ISM) segments and from segvn_create() for segments
13804 13795 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13805 13796 * which is saved in the private segment data for hme segments and
13806 13797 * the ism_map structure for ism segments.
13807 13798 */
13808 13799 hat_region_cookie_t
13809 -hat_join_region(struct hat *sfmmup,
13810 - caddr_t r_saddr,
13811 - size_t r_size,
13812 - void *r_obj,
13813 - u_offset_t r_objoff,
13814 - uchar_t r_perm,
13815 - uchar_t r_pgszc,
13816 - hat_rgn_cb_func_t r_cb_function,
13817 - uint_t flags)
13800 +hat_join_region(struct hat *sfmmup, caddr_t r_saddr, size_t r_size,
13801 + void *r_obj, u_offset_t r_objoff, uchar_t r_perm, uchar_t r_pgszc,
13802 + hat_rgn_cb_func_t r_cb_function, uint_t flags)
13818 13803 {
13819 13804 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13820 13805 uint_t rhash;
13821 13806 uint_t rid;
13822 13807 hatlock_t *hatlockp;
13823 13808 sf_region_t *rgnp;
13824 13809 sf_region_t *new_rgnp = NULL;
13825 13810 int i;
13826 13811 uint16_t *nextidp;
13827 13812 sf_region_t **freelistp;
13828 13813 int maxids;
13829 13814 sf_region_t **rarrp;
13830 13815 uint16_t *busyrgnsp;
13831 13816 ulong_t rttecnt;
13832 13817 uchar_t tteflag;
13833 13818 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13834 13819 int text = (r_type == HAT_REGION_TEXT);
13835 13820
13836 13821 if (srdp == NULL || r_size == 0) {
13837 13822 return (HAT_INVALID_REGION_COOKIE);
13838 13823 }
13839 13824
13840 13825 ASSERT(sfmmup != ksfmmup);
13841 13826 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
13842 13827 ASSERT(srdp->srd_refcnt > 0);
13843 13828 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
13844 13829 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
13845 13830 ASSERT(r_pgszc < mmu_page_sizes);
13846 13831 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
13847 13832 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
13848 13833 panic("hat_join_region: region addr or size is not aligned\n");
13849 13834 }
13850 13835
13851 13836
13852 13837 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
13853 13838 SFMMU_REGION_HME;
13854 13839 /*
13855 13840 * Currently only support shared hmes for the read only main text
13856 13841 * region.
13857 13842 */
13858 13843 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
13859 13844 (r_perm & PROT_WRITE))) {
13860 13845 return (HAT_INVALID_REGION_COOKIE);
13861 13846 }
13862 13847
13863 13848 rhash = RGN_HASH_FUNCTION(r_obj);
13864 13849
13865 13850 if (r_type == SFMMU_REGION_ISM) {
13866 13851 nextidp = &srdp->srd_next_ismrid;
13867 13852 freelistp = &srdp->srd_ismrgnfree;
13868 13853 maxids = SFMMU_MAX_ISM_REGIONS;
13869 13854 rarrp = srdp->srd_ismrgnp;
13870 13855 busyrgnsp = &srdp->srd_ismbusyrgns;
13871 13856 } else {
13872 13857 nextidp = &srdp->srd_next_hmerid;
13873 13858 freelistp = &srdp->srd_hmergnfree;
13874 13859 maxids = SFMMU_MAX_HME_REGIONS;
13875 13860 rarrp = srdp->srd_hmergnp;
13876 13861 busyrgnsp = &srdp->srd_hmebusyrgns;
13877 13862 }
13878 13863
13879 13864 mutex_enter(&srdp->srd_mutex);
13880 13865
13881 13866 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
13882 13867 rgnp = rgnp->rgn_hash) {
13883 13868 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
13884 13869 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
13885 13870 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
13886 13871 break;
13887 13872 }
13888 13873 }
13889 13874
13890 13875 rfound:
13891 13876 if (rgnp != NULL) {
13892 13877 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
13893 13878 ASSERT(rgnp->rgn_cb_function == r_cb_function);
13894 13879 ASSERT(rgnp->rgn_refcnt >= 0);
13895 13880 rid = rgnp->rgn_id;
13896 13881 ASSERT(rid < maxids);
13897 13882 ASSERT(rarrp[rid] == rgnp);
13898 13883 ASSERT(rid < *nextidp);
13899 13884 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
13900 13885 mutex_exit(&srdp->srd_mutex);
13901 13886 if (new_rgnp != NULL) {
13902 13887 kmem_cache_free(region_cache, new_rgnp);
13903 13888 }
13904 13889 if (r_type == SFMMU_REGION_HME) {
13905 13890 int myjoin =
13906 13891 (sfmmup == astosfmmu(curthread->t_procp->p_as));
13907 13892
13908 13893 sfmmu_link_to_hmeregion(sfmmup, rgnp);
13909 13894 /*
13910 13895 * bitmap should be updated after linking sfmmu on
13911 13896 * region list so that pageunload() doesn't skip
13912 13897 * TSB/TLB flush. As soon as bitmap is updated another
13913 13898 * thread in this process can already start accessing
13914 13899 * this region.
13915 13900 */
13916 13901 /*
13917 13902 * Normally ttecnt accounting is done as part of
13918 13903 * pagefault handling. But a process may not take any
13919 13904 * pagefaults on shared hmeblks created by some other
13920 13905 * process. To compensate for this assume that the
13921 13906 * entire region will end up faulted in using
13922 13907 * the region's pagesize.
13923 13908 *
13924 13909 */
13925 13910 if (r_pgszc > TTE8K) {
13926 13911 tteflag = 1 << r_pgszc;
13927 13912 if (disable_large_pages & tteflag) {
13928 13913 tteflag = 0;
13929 13914 }
13930 13915 } else {
13931 13916 tteflag = 0;
13932 13917 }
13933 13918 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
13934 13919 hatlockp = sfmmu_hat_enter(sfmmup);
13935 13920 sfmmup->sfmmu_rtteflags |= tteflag;
13936 13921 sfmmu_hat_exit(hatlockp);
13937 13922 }
13938 13923 hatlockp = sfmmu_hat_enter(sfmmup);
13939 13924
13940 13925 /*
13941 13926 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
13942 13927 * region to allow for large page allocation failure.
13943 13928 */
13944 13929 if (r_pgszc >= TTE4M) {
13945 13930 sfmmup->sfmmu_tsb0_4minflcnt +=
13946 13931 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
13947 13932 }
13948 13933
13949 13934 /* update sfmmu_ttecnt with the shme rgn ttecnt */
13950 13935 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
13951 13936 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
13952 13937 rttecnt);
13953 13938
13954 13939 if (text && r_pgszc >= TTE4M &&
13955 13940 (tteflag || ((disable_large_pages >> TTE4M) &
13956 13941 ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
13957 13942 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
13958 13943 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
13959 13944 }
13960 13945
13961 13946 sfmmu_hat_exit(hatlockp);
13962 13947 /*
13963 13948 * On Panther we need to make sure TLB is programmed
13964 13949 * to accept 32M/256M pages. Call
13965 13950 * sfmmu_check_page_sizes() now to make sure TLB is
13966 13951 * setup before making hmeregions visible to other
13967 13952 * threads.
13968 13953 */
13969 13954 sfmmu_check_page_sizes(sfmmup, 1);
13970 13955 hatlockp = sfmmu_hat_enter(sfmmup);
13971 13956 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
13972 13957
13973 13958 /*
13974 13959 * if context is invalid tsb miss exception code will
13975 13960 * call sfmmu_check_page_sizes() and update tsbmiss
13976 13961 * area later.
13977 13962 */
13978 13963 kpreempt_disable();
13979 13964 if (myjoin &&
13980 13965 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
13981 13966 != INVALID_CONTEXT)) {
13982 13967 struct tsbmiss *tsbmp;
13983 13968
13984 13969 tsbmp = &tsbmiss_area[CPU->cpu_id];
13985 13970 ASSERT(sfmmup == tsbmp->usfmmup);
13986 13971 BT_SET(tsbmp->shmermap, rid);
13987 13972 if (r_pgszc > TTE64K) {
13988 13973 tsbmp->uhat_rtteflags |= tteflag;
13989 13974 }
13990 13975
13991 13976 }
13992 13977 kpreempt_enable();
13993 13978
13994 13979 sfmmu_hat_exit(hatlockp);
13995 13980 ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
13996 13981 HAT_INVALID_REGION_COOKIE);
13997 13982 } else {
13998 13983 hatlockp = sfmmu_hat_enter(sfmmup);
13999 13984 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14000 13985 sfmmu_hat_exit(hatlockp);
14001 13986 }
14002 13987 ASSERT(rid < maxids);
14003 13988
14004 13989 if (r_type == SFMMU_REGION_ISM) {
14005 13990 sfmmu_find_scd(sfmmup);
14006 13991 }
14007 13992 return ((hat_region_cookie_t)((uint64_t)rid));
14008 13993 }
14009 13994
14010 13995 ASSERT(new_rgnp == NULL);
14011 13996
14012 13997 if (*busyrgnsp >= maxids) {
14013 13998 mutex_exit(&srdp->srd_mutex);
14014 13999 return (HAT_INVALID_REGION_COOKIE);
14015 14000 }
14016 14001
14017 14002 ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14018 14003 if (*freelistp != NULL) {
14019 14004 rgnp = *freelistp;
14020 14005 *freelistp = rgnp->rgn_next;
14021 14006 ASSERT(rgnp->rgn_id < *nextidp);
14022 14007 ASSERT(rgnp->rgn_id < maxids);
14023 14008 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14024 14009 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14025 14010 == r_type);
14026 14011 ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14027 14012 ASSERT(rgnp->rgn_hmeflags == 0);
14028 14013 } else {
14029 14014 /*
14030 14015 * release local locks before memory allocation.
14031 14016 */
14032 14017 mutex_exit(&srdp->srd_mutex);
14033 14018
14034 14019 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14035 14020
14036 14021 mutex_enter(&srdp->srd_mutex);
14037 14022 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14038 14023 rgnp = rgnp->rgn_hash) {
14039 14024 if (rgnp->rgn_saddr == r_saddr &&
14040 14025 rgnp->rgn_size == r_size &&
14041 14026 rgnp->rgn_obj == r_obj &&
14042 14027 rgnp->rgn_objoff == r_objoff &&
14043 14028 rgnp->rgn_perm == r_perm &&
14044 14029 rgnp->rgn_pgszc == r_pgszc) {
14045 14030 break;
14046 14031 }
14047 14032 }
14048 14033 if (rgnp != NULL) {
14049 14034 goto rfound;
14050 14035 }
14051 14036
14052 14037 if (*nextidp >= maxids) {
14053 14038 mutex_exit(&srdp->srd_mutex);
14054 14039 goto fail;
14055 14040 }
14056 14041 rgnp = new_rgnp;
14057 14042 new_rgnp = NULL;
14058 14043 rgnp->rgn_id = (*nextidp)++;
14059 14044 ASSERT(rgnp->rgn_id < maxids);
14060 14045 ASSERT(rarrp[rgnp->rgn_id] == NULL);
14061 14046 rarrp[rgnp->rgn_id] = rgnp;
14062 14047 }
14063 14048
14064 14049 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14065 14050 ASSERT(rgnp->rgn_hmeflags == 0);
14066 14051 #ifdef DEBUG
14067 14052 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14068 14053 ASSERT(rgnp->rgn_ttecnt[i] == 0);
14069 14054 }
14070 14055 #endif
14071 14056 rgnp->rgn_saddr = r_saddr;
14072 14057 rgnp->rgn_size = r_size;
14073 14058 rgnp->rgn_obj = r_obj;
14074 14059 rgnp->rgn_objoff = r_objoff;
14075 14060 rgnp->rgn_perm = r_perm;
14076 14061 rgnp->rgn_pgszc = r_pgszc;
14077 14062 rgnp->rgn_flags = r_type;
14078 14063 rgnp->rgn_refcnt = 0;
14079 14064 rgnp->rgn_cb_function = r_cb_function;
14080 14065 rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14081 14066 srdp->srd_rgnhash[rhash] = rgnp;
14082 14067 (*busyrgnsp)++;
14083 14068 ASSERT(*busyrgnsp <= maxids);
14084 14069 goto rfound;
14085 14070
14086 14071 fail:
14087 14072 ASSERT(new_rgnp != NULL);
14088 14073 kmem_cache_free(region_cache, new_rgnp);
14089 14074 return (HAT_INVALID_REGION_COOKIE);
14090 14075 }
14091 14076
14092 14077 /*
14093 14078 * This function implements the shared context functionality required
14094 14079 * when detaching a segment from an address space. It must be called
14095 14080 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14096 14081 * for segments with a valid region_cookie.
14097 14082 * It will also be called from all seg_vn routines which change a
14098 14083 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14099 14084 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14100 14085 * from segvn_fault().
14101 14086 */
14102 14087 void
14103 14088 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14104 14089 {
14105 14090 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14106 14091 sf_scd_t *scdp;
14107 14092 uint_t rhash;
14108 14093 uint_t rid = (uint_t)((uint64_t)rcookie);
14109 14094 hatlock_t *hatlockp = NULL;
14110 14095 sf_region_t *rgnp;
14111 14096 sf_region_t **prev_rgnpp;
14112 14097 sf_region_t *cur_rgnp;
14113 14098 void *r_obj;
14114 14099 int i;
14115 14100 caddr_t r_saddr;
14116 14101 caddr_t r_eaddr;
14117 14102 size_t r_size;
14118 14103 uchar_t r_pgszc;
14119 14104 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14120 14105
14121 14106 ASSERT(sfmmup != ksfmmup);
14122 14107 ASSERT(srdp != NULL);
14123 14108 ASSERT(srdp->srd_refcnt > 0);
14124 14109 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14125 14110 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14126 14111 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14127 14112
14128 14113 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14129 14114 SFMMU_REGION_HME;
14130 14115
14131 14116 if (r_type == SFMMU_REGION_ISM) {
14132 14117 ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14133 14118 ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14134 14119 rgnp = srdp->srd_ismrgnp[rid];
14135 14120 } else {
14136 14121 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14137 14122 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14138 14123 rgnp = srdp->srd_hmergnp[rid];
14139 14124 }
14140 14125 ASSERT(rgnp != NULL);
14141 14126 ASSERT(rgnp->rgn_id == rid);
14142 14127 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14143 14128 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14144 14129 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
14145 14130
14146 14131 if (sfmmup->sfmmu_free) {
14147 14132 ulong_t rttecnt;
14148 14133 r_pgszc = rgnp->rgn_pgszc;
14149 14134 r_size = rgnp->rgn_size;
14150 14135
14151 14136 ASSERT(sfmmup->sfmmu_scdp == NULL);
14152 14137 if (r_type == SFMMU_REGION_ISM) {
14153 14138 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14154 14139 } else {
14155 14140 /* update shme rgns ttecnt in sfmmu_ttecnt */
14156 14141 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14157 14142 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14158 14143
14159 14144 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14160 14145 -rttecnt);
14161 14146
14162 14147 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14163 14148 }
14164 14149 } else if (r_type == SFMMU_REGION_ISM) {
14165 14150 hatlockp = sfmmu_hat_enter(sfmmup);
14166 14151 ASSERT(rid < srdp->srd_next_ismrid);
14167 14152 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14168 14153 scdp = sfmmup->sfmmu_scdp;
14169 14154 if (scdp != NULL &&
14170 14155 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14171 14156 sfmmu_leave_scd(sfmmup, r_type);
14172 14157 ASSERT(sfmmu_hat_lock_held(sfmmup));
14173 14158 }
14174 14159 sfmmu_hat_exit(hatlockp);
14175 14160 } else {
14176 14161 ulong_t rttecnt;
14177 14162 r_pgszc = rgnp->rgn_pgszc;
14178 14163 r_saddr = rgnp->rgn_saddr;
14179 14164 r_size = rgnp->rgn_size;
14180 14165 r_eaddr = r_saddr + r_size;
14181 14166
14182 14167 ASSERT(r_type == SFMMU_REGION_HME);
14183 14168 hatlockp = sfmmu_hat_enter(sfmmup);
14184 14169 ASSERT(rid < srdp->srd_next_hmerid);
14185 14170 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14186 14171
14187 14172 /*
14188 14173 * If region is part of an SCD call sfmmu_leave_scd().
14189 14174 * Otherwise if process is not exiting and has valid context
14190 14175 * just drop the context on the floor to lose stale TLB
14191 14176 * entries and force the update of tsb miss area to reflect
14192 14177 * the new region map. After that clean our TSB entries.
14193 14178 */
14194 14179 scdp = sfmmup->sfmmu_scdp;
14195 14180 if (scdp != NULL &&
14196 14181 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14197 14182 sfmmu_leave_scd(sfmmup, r_type);
14198 14183 ASSERT(sfmmu_hat_lock_held(sfmmup));
14199 14184 }
14200 14185 sfmmu_invalidate_ctx(sfmmup);
14201 14186
14202 14187 i = TTE8K;
14203 14188 while (i < mmu_page_sizes) {
14204 14189 if (rgnp->rgn_ttecnt[i] != 0) {
14205 14190 sfmmu_unload_tsb_range(sfmmup, r_saddr,
14206 14191 r_eaddr, i);
14207 14192 if (i < TTE4M) {
14208 14193 i = TTE4M;
14209 14194 continue;
14210 14195 } else {
14211 14196 break;
14212 14197 }
14213 14198 }
14214 14199 i++;
14215 14200 }
14216 14201 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14217 14202 if (r_pgszc >= TTE4M) {
14218 14203 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14219 14204 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14220 14205 rttecnt);
14221 14206 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14222 14207 }
14223 14208
14224 14209 /* update shme rgns ttecnt in sfmmu_ttecnt */
14225 14210 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14226 14211 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14227 14212 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14228 14213
14229 14214 sfmmu_hat_exit(hatlockp);
14230 14215 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14231 14216 /* sfmmup left the scd, grow private tsb */
14232 14217 sfmmu_check_page_sizes(sfmmup, 1);
14233 14218 } else {
14234 14219 sfmmu_check_page_sizes(sfmmup, 0);
14235 14220 }
14236 14221 }
14237 14222
14238 14223 if (r_type == SFMMU_REGION_HME) {
14239 14224 sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14240 14225 }
14241 14226
14242 14227 r_obj = rgnp->rgn_obj;
14243 14228 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14244 14229 return;
14245 14230 }
14246 14231
14247 14232 /*
14248 14233 * looks like nobody uses this region anymore. Free it.
14249 14234 */
14250 14235 rhash = RGN_HASH_FUNCTION(r_obj);
14251 14236 mutex_enter(&srdp->srd_mutex);
14252 14237 for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14253 14238 (cur_rgnp = *prev_rgnpp) != NULL;
14254 14239 prev_rgnpp = &cur_rgnp->rgn_hash) {
14255 14240 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14256 14241 break;
14257 14242 }
14258 14243 }
14259 14244
14260 14245 if (cur_rgnp == NULL) {
14261 14246 mutex_exit(&srdp->srd_mutex);
14262 14247 return;
14263 14248 }
14264 14249
14265 14250 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14266 14251 *prev_rgnpp = rgnp->rgn_hash;
14267 14252 if (r_type == SFMMU_REGION_ISM) {
14268 14253 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14269 14254 ASSERT(rid < srdp->srd_next_ismrid);
14270 14255 rgnp->rgn_next = srdp->srd_ismrgnfree;
14271 14256 srdp->srd_ismrgnfree = rgnp;
14272 14257 ASSERT(srdp->srd_ismbusyrgns > 0);
14273 14258 srdp->srd_ismbusyrgns--;
14274 14259 mutex_exit(&srdp->srd_mutex);
14275 14260 return;
14276 14261 }
14277 14262 mutex_exit(&srdp->srd_mutex);
14278 14263
14279 14264 /*
14280 14265 * Destroy region's hmeblks.
14281 14266 */
14282 14267 sfmmu_unload_hmeregion(srdp, rgnp);
14283 14268
14284 14269 rgnp->rgn_hmeflags = 0;
14285 14270
14286 14271 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14287 14272 ASSERT(rgnp->rgn_id == rid);
14288 14273 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14289 14274 rgnp->rgn_ttecnt[i] = 0;
14290 14275 }
14291 14276 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14292 14277 mutex_enter(&srdp->srd_mutex);
14293 14278 ASSERT(rid < srdp->srd_next_hmerid);
14294 14279 rgnp->rgn_next = srdp->srd_hmergnfree;
14295 14280 srdp->srd_hmergnfree = rgnp;
14296 14281 ASSERT(srdp->srd_hmebusyrgns > 0);
14297 14282 srdp->srd_hmebusyrgns--;
14298 14283 mutex_exit(&srdp->srd_mutex);
14299 14284 }
14300 14285
14301 14286 /*
14302 14287 * For now only called for hmeblk regions and not for ISM regions.
14303 14288 */
14304 14289 void
14305 14290 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14306 14291 {
14307 14292 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14308 14293 uint_t rid = (uint_t)((uint64_t)rcookie);
14309 14294 sf_region_t *rgnp;
14310 14295 sf_rgn_link_t *rlink;
14311 14296 sf_rgn_link_t *hrlink;
14312 14297 ulong_t rttecnt;
14313 14298
14314 14299 ASSERT(sfmmup != ksfmmup);
14315 14300 ASSERT(srdp != NULL);
14316 14301 ASSERT(srdp->srd_refcnt > 0);
14317 14302
14318 14303 ASSERT(rid < srdp->srd_next_hmerid);
14319 14304 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14320 14305 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14321 14306
14322 14307 rgnp = srdp->srd_hmergnp[rid];
14323 14308 ASSERT(rgnp->rgn_refcnt > 0);
14324 14309 ASSERT(rgnp->rgn_id == rid);
14325 14310 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14326 14311 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14327 14312
14328 14313 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14329 14314
14330 14315 /* LINTED: constant in conditional context */
14331 14316 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14332 14317 ASSERT(rlink != NULL);
14333 14318 mutex_enter(&rgnp->rgn_mutex);
14334 14319 ASSERT(rgnp->rgn_sfmmu_head != NULL);
14335 14320 /* LINTED: constant in conditional context */
14336 14321 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14337 14322 ASSERT(hrlink != NULL);
14338 14323 ASSERT(hrlink->prev == NULL);
14339 14324 rlink->next = rgnp->rgn_sfmmu_head;
14340 14325 rlink->prev = NULL;
14341 14326 hrlink->prev = sfmmup;
14342 14327 /*
14343 14328 * make sure rlink's next field is correct
14344 14329 * before making this link visible.
14345 14330 */
14346 14331 membar_stst();
14347 14332 rgnp->rgn_sfmmu_head = sfmmup;
14348 14333 mutex_exit(&rgnp->rgn_mutex);
14349 14334
14350 14335 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14351 14336 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14352 14337 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14353 14338 /* update tsb0 inflation count */
14354 14339 if (rgnp->rgn_pgszc >= TTE4M) {
14355 14340 sfmmup->sfmmu_tsb0_4minflcnt +=
14356 14341 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14357 14342 }
14358 14343 /*
14359 14344 * Update regionid bitmask without hat lock since no other thread
14360 14345 * can update this region bitmask right now.
14361 14346 */
14362 14347 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14363 14348 }
14364 14349
14365 14350 /* ARGSUSED */
14366 14351 static int
14367 14352 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14368 14353 {
14369 14354 sf_region_t *rgnp = (sf_region_t *)buf;
14370 14355 bzero(buf, sizeof (*rgnp));
14371 14356
14372 14357 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14373 14358
14374 14359 return (0);
14375 14360 }
14376 14361
14377 14362 /* ARGSUSED */
14378 14363 static void
14379 14364 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14380 14365 {
14381 14366 sf_region_t *rgnp = (sf_region_t *)buf;
14382 14367 mutex_destroy(&rgnp->rgn_mutex);
14383 14368 }
14384 14369
14385 14370 static int
14386 14371 sfrgnmap_isnull(sf_region_map_t *map)
14387 14372 {
14388 14373 int i;
14389 14374
14390 14375 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14391 14376 if (map->bitmap[i] != 0) {
14392 14377 return (0);
14393 14378 }
14394 14379 }
14395 14380 return (1);
14396 14381 }
14397 14382
14398 14383 static int
14399 14384 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14400 14385 {
14401 14386 int i;
14402 14387
14403 14388 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14404 14389 if (map->bitmap[i] != 0) {
14405 14390 return (0);
14406 14391 }
14407 14392 }
14408 14393 return (1);
14409 14394 }
14410 14395
14411 14396 #ifdef DEBUG
14412 14397 static void
14413 14398 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14414 14399 {
14415 14400 sfmmu_t *sp;
14416 14401 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14417 14402
14418 14403 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14419 14404 ASSERT(srdp == sp->sfmmu_srdp);
14420 14405 if (sp == sfmmup) {
14421 14406 if (onlist) {
14422 14407 return;
14423 14408 } else {
14424 14409 panic("shctx: sfmmu 0x%p found on scd"
14425 14410 "list 0x%p", (void *)sfmmup,
14426 14411 (void *)*headp);
14427 14412 }
14428 14413 }
14429 14414 }
14430 14415 if (onlist) {
14431 14416 panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14432 14417 (void *)sfmmup, (void *)*headp);
14433 14418 } else {
14434 14419 return;
14435 14420 }
14436 14421 }
14437 14422 #else /* DEBUG */
14438 14423 #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14439 14424 #endif /* DEBUG */
14440 14425
14441 14426 /*
14442 14427 * Removes an sfmmu from the SCD sfmmu list.
14443 14428 */
14444 14429 static void
14445 14430 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14446 14431 {
14447 14432 ASSERT(sfmmup->sfmmu_srdp != NULL);
14448 14433 check_scd_sfmmu_list(headp, sfmmup, 1);
14449 14434 if (sfmmup->sfmmu_scd_link.prev != NULL) {
14450 14435 ASSERT(*headp != sfmmup);
14451 14436 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14452 14437 sfmmup->sfmmu_scd_link.next;
14453 14438 } else {
14454 14439 ASSERT(*headp == sfmmup);
14455 14440 *headp = sfmmup->sfmmu_scd_link.next;
14456 14441 }
14457 14442 if (sfmmup->sfmmu_scd_link.next != NULL) {
14458 14443 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14459 14444 sfmmup->sfmmu_scd_link.prev;
14460 14445 }
14461 14446 }
14462 14447
14463 14448
14464 14449 /*
14465 14450 * Adds an sfmmu to the start of the queue.
14466 14451 */
14467 14452 static void
14468 14453 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14469 14454 {
14470 14455 check_scd_sfmmu_list(headp, sfmmup, 0);
14471 14456 sfmmup->sfmmu_scd_link.prev = NULL;
14472 14457 sfmmup->sfmmu_scd_link.next = *headp;
14473 14458 if (*headp != NULL)
14474 14459 (*headp)->sfmmu_scd_link.prev = sfmmup;
14475 14460 *headp = sfmmup;
14476 14461 }
14477 14462
14478 14463 /*
14479 14464 * Remove an scd from the start of the queue.
14480 14465 */
14481 14466 static void
14482 14467 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14483 14468 {
14484 14469 if (scdp->scd_prev != NULL) {
14485 14470 ASSERT(*headp != scdp);
14486 14471 scdp->scd_prev->scd_next = scdp->scd_next;
14487 14472 } else {
14488 14473 ASSERT(*headp == scdp);
14489 14474 *headp = scdp->scd_next;
14490 14475 }
14491 14476
14492 14477 if (scdp->scd_next != NULL) {
14493 14478 scdp->scd_next->scd_prev = scdp->scd_prev;
14494 14479 }
14495 14480 }
14496 14481
14497 14482 /*
14498 14483 * Add an scd to the start of the queue.
14499 14484 */
14500 14485 static void
14501 14486 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14502 14487 {
14503 14488 scdp->scd_prev = NULL;
14504 14489 scdp->scd_next = *headp;
14505 14490 if (*headp != NULL) {
14506 14491 (*headp)->scd_prev = scdp;
14507 14492 }
14508 14493 *headp = scdp;
14509 14494 }
14510 14495
14511 14496 static int
14512 14497 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14513 14498 {
14514 14499 uint_t rid;
14515 14500 uint_t i;
14516 14501 uint_t j;
14517 14502 ulong_t w;
14518 14503 sf_region_t *rgnp;
14519 14504 ulong_t tte8k_cnt = 0;
14520 14505 ulong_t tte4m_cnt = 0;
14521 14506 uint_t tsb_szc;
14522 14507 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14523 14508 sfmmu_t *ism_hatid;
14524 14509 struct tsb_info *newtsb;
14525 14510 int szc;
14526 14511
14527 14512 ASSERT(srdp != NULL);
14528 14513
14529 14514 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14530 14515 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14531 14516 continue;
14532 14517 }
14533 14518 j = 0;
14534 14519 while (w) {
14535 14520 if (!(w & 0x1)) {
14536 14521 j++;
14537 14522 w >>= 1;
14538 14523 continue;
14539 14524 }
14540 14525 rid = (i << BT_ULSHIFT) | j;
14541 14526 j++;
14542 14527 w >>= 1;
14543 14528
14544 14529 if (rid < SFMMU_MAX_HME_REGIONS) {
14545 14530 rgnp = srdp->srd_hmergnp[rid];
14546 14531 ASSERT(rgnp->rgn_id == rid);
14547 14532 ASSERT(rgnp->rgn_refcnt > 0);
14548 14533
14549 14534 if (rgnp->rgn_pgszc < TTE4M) {
14550 14535 tte8k_cnt += rgnp->rgn_size >>
14551 14536 TTE_PAGE_SHIFT(TTE8K);
14552 14537 } else {
14553 14538 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14554 14539 tte4m_cnt += rgnp->rgn_size >>
14555 14540 TTE_PAGE_SHIFT(TTE4M);
14556 14541 /*
14557 14542 * Inflate SCD tsb0 by preallocating
14558 14543 * 1/4 8k ttecnt for 4M regions to
14559 14544 * allow for lgpg alloc failure.
14560 14545 */
14561 14546 tte8k_cnt += rgnp->rgn_size >>
14562 14547 (TTE_PAGE_SHIFT(TTE8K) + 2);
14563 14548 }
14564 14549 } else {
14565 14550 rid -= SFMMU_MAX_HME_REGIONS;
14566 14551 rgnp = srdp->srd_ismrgnp[rid];
14567 14552 ASSERT(rgnp->rgn_id == rid);
14568 14553 ASSERT(rgnp->rgn_refcnt > 0);
14569 14554
14570 14555 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14571 14556 ASSERT(ism_hatid->sfmmu_ismhat);
14572 14557
14573 14558 for (szc = 0; szc < TTE4M; szc++) {
14574 14559 tte8k_cnt +=
14575 14560 ism_hatid->sfmmu_ttecnt[szc] <<
14576 14561 TTE_BSZS_SHIFT(szc);
14577 14562 }
14578 14563
14579 14564 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14580 14565 if (rgnp->rgn_pgszc >= TTE4M) {
14581 14566 tte4m_cnt += rgnp->rgn_size >>
14582 14567 TTE_PAGE_SHIFT(TTE4M);
14583 14568 }
14584 14569 }
14585 14570 }
14586 14571 }
14587 14572
14588 14573 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14589 14574
14590 14575 /* Allocate both the SCD TSBs here. */
14591 14576 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14592 14577 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14593 14578 (tsb_szc <= TSB_4M_SZCODE ||
14594 14579 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14595 14580 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14596 14581 TSB_ALLOC, scsfmmup))) {
14597 14582
14598 14583 SFMMU_STAT(sf_scd_1sttsb_allocfail);
14599 14584 return (TSB_ALLOCFAIL);
14600 14585 } else {
14601 14586 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14602 14587
14603 14588 if (tte4m_cnt) {
14604 14589 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14605 14590 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14606 14591 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14607 14592 (tsb_szc <= TSB_4M_SZCODE ||
14608 14593 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14609 14594 TSB4M|TSB32M|TSB256M,
14610 14595 TSB_ALLOC, scsfmmup))) {
14611 14596 /*
14612 14597 * If we fail to allocate the 2nd shared tsb,
14613 14598 * just free the 1st tsb, return failure.
14614 14599 */
14615 14600 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14616 14601 SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14617 14602 return (TSB_ALLOCFAIL);
14618 14603 } else {
14619 14604 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14620 14605 newtsb->tsb_flags |= TSB_SHAREDCTX;
14621 14606 scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14622 14607 SFMMU_STAT(sf_scd_2ndtsb_alloc);
14623 14608 }
14624 14609 }
14625 14610 SFMMU_STAT(sf_scd_1sttsb_alloc);
14626 14611 }
14627 14612 return (TSB_SUCCESS);
14628 14613 }
14629 14614
14630 14615 static void
14631 14616 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14632 14617 {
14633 14618 while (scd_sfmmu->sfmmu_tsb != NULL) {
14634 14619 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14635 14620 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14636 14621 scd_sfmmu->sfmmu_tsb = next;
14637 14622 }
14638 14623 }
14639 14624
14640 14625 /*
14641 14626 * Link the sfmmu onto the hme region list.
14642 14627 */
14643 14628 void
14644 14629 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14645 14630 {
14646 14631 uint_t rid;
14647 14632 sf_rgn_link_t *rlink;
14648 14633 sfmmu_t *head;
14649 14634 sf_rgn_link_t *hrlink;
14650 14635
14651 14636 rid = rgnp->rgn_id;
14652 14637 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14653 14638
14654 14639 /* LINTED: constant in conditional context */
14655 14640 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14656 14641 ASSERT(rlink != NULL);
14657 14642 mutex_enter(&rgnp->rgn_mutex);
14658 14643 if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14659 14644 rlink->next = NULL;
14660 14645 rlink->prev = NULL;
14661 14646 /*
14662 14647 * make sure rlink's next field is NULL
14663 14648 * before making this link visible.
14664 14649 */
14665 14650 membar_stst();
14666 14651 rgnp->rgn_sfmmu_head = sfmmup;
14667 14652 } else {
14668 14653 /* LINTED: constant in conditional context */
14669 14654 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14670 14655 ASSERT(hrlink != NULL);
14671 14656 ASSERT(hrlink->prev == NULL);
14672 14657 rlink->next = head;
14673 14658 rlink->prev = NULL;
14674 14659 hrlink->prev = sfmmup;
14675 14660 /*
14676 14661 * make sure rlink's next field is correct
14677 14662 * before making this link visible.
14678 14663 */
14679 14664 membar_stst();
14680 14665 rgnp->rgn_sfmmu_head = sfmmup;
14681 14666 }
14682 14667 mutex_exit(&rgnp->rgn_mutex);
14683 14668 }
14684 14669
14685 14670 /*
14686 14671 * Unlink the sfmmu from the hme region list.
14687 14672 */
14688 14673 void
14689 14674 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14690 14675 {
14691 14676 uint_t rid;
14692 14677 sf_rgn_link_t *rlink;
14693 14678
14694 14679 rid = rgnp->rgn_id;
14695 14680 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14696 14681
14697 14682 /* LINTED: constant in conditional context */
14698 14683 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14699 14684 ASSERT(rlink != NULL);
14700 14685 mutex_enter(&rgnp->rgn_mutex);
14701 14686 if (rgnp->rgn_sfmmu_head == sfmmup) {
14702 14687 sfmmu_t *next = rlink->next;
14703 14688 rgnp->rgn_sfmmu_head = next;
14704 14689 /*
14705 14690 * if we are stopped by xc_attention() after this
14706 14691 * point the forward link walking in
14707 14692 * sfmmu_rgntlb_demap() will work correctly since the
14708 14693 * head correctly points to the next element.
14709 14694 */
14710 14695 membar_stst();
14711 14696 rlink->next = NULL;
14712 14697 ASSERT(rlink->prev == NULL);
14713 14698 if (next != NULL) {
14714 14699 sf_rgn_link_t *nrlink;
14715 14700 /* LINTED: constant in conditional context */
14716 14701 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14717 14702 ASSERT(nrlink != NULL);
14718 14703 ASSERT(nrlink->prev == sfmmup);
14719 14704 nrlink->prev = NULL;
14720 14705 }
14721 14706 } else {
14722 14707 sfmmu_t *next = rlink->next;
14723 14708 sfmmu_t *prev = rlink->prev;
14724 14709 sf_rgn_link_t *prlink;
14725 14710
14726 14711 ASSERT(prev != NULL);
14727 14712 /* LINTED: constant in conditional context */
14728 14713 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14729 14714 ASSERT(prlink != NULL);
14730 14715 ASSERT(prlink->next == sfmmup);
14731 14716 prlink->next = next;
14732 14717 /*
14733 14718 * if we are stopped by xc_attention()
14734 14719 * after this point the forward link walking
14735 14720 * will work correctly since the prev element
14736 14721 * correctly points to the next element.
14737 14722 */
14738 14723 membar_stst();
14739 14724 rlink->next = NULL;
14740 14725 rlink->prev = NULL;
14741 14726 if (next != NULL) {
14742 14727 sf_rgn_link_t *nrlink;
14743 14728 /* LINTED: constant in conditional context */
14744 14729 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14745 14730 ASSERT(nrlink != NULL);
14746 14731 ASSERT(nrlink->prev == sfmmup);
14747 14732 nrlink->prev = prev;
14748 14733 }
14749 14734 }
14750 14735 mutex_exit(&rgnp->rgn_mutex);
14751 14736 }
14752 14737
14753 14738 /*
14754 14739 * Link scd sfmmu onto ism or hme region list for each region in the
14755 14740 * scd region map.
14756 14741 */
14757 14742 void
14758 14743 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14759 14744 {
14760 14745 uint_t rid;
14761 14746 uint_t i;
14762 14747 uint_t j;
14763 14748 ulong_t w;
14764 14749 sf_region_t *rgnp;
14765 14750 sfmmu_t *scsfmmup;
14766 14751
14767 14752 scsfmmup = scdp->scd_sfmmup;
14768 14753 ASSERT(scsfmmup->sfmmu_scdhat);
14769 14754 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14770 14755 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14771 14756 continue;
14772 14757 }
14773 14758 j = 0;
14774 14759 while (w) {
14775 14760 if (!(w & 0x1)) {
14776 14761 j++;
14777 14762 w >>= 1;
14778 14763 continue;
14779 14764 }
14780 14765 rid = (i << BT_ULSHIFT) | j;
14781 14766 j++;
14782 14767 w >>= 1;
14783 14768
14784 14769 if (rid < SFMMU_MAX_HME_REGIONS) {
14785 14770 rgnp = srdp->srd_hmergnp[rid];
14786 14771 ASSERT(rgnp->rgn_id == rid);
14787 14772 ASSERT(rgnp->rgn_refcnt > 0);
14788 14773 sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14789 14774 } else {
14790 14775 sfmmu_t *ism_hatid = NULL;
14791 14776 ism_ment_t *ism_ment;
14792 14777 rid -= SFMMU_MAX_HME_REGIONS;
14793 14778 rgnp = srdp->srd_ismrgnp[rid];
14794 14779 ASSERT(rgnp->rgn_id == rid);
14795 14780 ASSERT(rgnp->rgn_refcnt > 0);
14796 14781
14797 14782 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14798 14783 ASSERT(ism_hatid->sfmmu_ismhat);
14799 14784 ism_ment = &scdp->scd_ism_links[rid];
14800 14785 ism_ment->iment_hat = scsfmmup;
14801 14786 ism_ment->iment_base_va = rgnp->rgn_saddr;
14802 14787 mutex_enter(&ism_mlist_lock);
14803 14788 iment_add(ism_ment, ism_hatid);
14804 14789 mutex_exit(&ism_mlist_lock);
14805 14790
14806 14791 }
14807 14792 }
14808 14793 }
14809 14794 }
14810 14795 /*
14811 14796 * Unlink scd sfmmu from ism or hme region list for each region in the
14812 14797 * scd region map.
14813 14798 */
14814 14799 void
14815 14800 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14816 14801 {
14817 14802 uint_t rid;
14818 14803 uint_t i;
14819 14804 uint_t j;
14820 14805 ulong_t w;
14821 14806 sf_region_t *rgnp;
14822 14807 sfmmu_t *scsfmmup;
14823 14808
14824 14809 scsfmmup = scdp->scd_sfmmup;
14825 14810 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14826 14811 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14827 14812 continue;
14828 14813 }
14829 14814 j = 0;
14830 14815 while (w) {
14831 14816 if (!(w & 0x1)) {
14832 14817 j++;
14833 14818 w >>= 1;
14834 14819 continue;
14835 14820 }
14836 14821 rid = (i << BT_ULSHIFT) | j;
14837 14822 j++;
14838 14823 w >>= 1;
14839 14824
14840 14825 if (rid < SFMMU_MAX_HME_REGIONS) {
14841 14826 rgnp = srdp->srd_hmergnp[rid];
14842 14827 ASSERT(rgnp->rgn_id == rid);
14843 14828 ASSERT(rgnp->rgn_refcnt > 0);
14844 14829 sfmmu_unlink_from_hmeregion(scsfmmup,
14845 14830 rgnp);
14846 14831
14847 14832 } else {
14848 14833 sfmmu_t *ism_hatid = NULL;
14849 14834 ism_ment_t *ism_ment;
14850 14835 rid -= SFMMU_MAX_HME_REGIONS;
14851 14836 rgnp = srdp->srd_ismrgnp[rid];
14852 14837 ASSERT(rgnp->rgn_id == rid);
14853 14838 ASSERT(rgnp->rgn_refcnt > 0);
14854 14839
14855 14840 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14856 14841 ASSERT(ism_hatid->sfmmu_ismhat);
14857 14842 ism_ment = &scdp->scd_ism_links[rid];
14858 14843 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
14859 14844 ASSERT(ism_ment->iment_base_va ==
14860 14845 rgnp->rgn_saddr);
14861 14846 mutex_enter(&ism_mlist_lock);
14862 14847 iment_sub(ism_ment, ism_hatid);
14863 14848 mutex_exit(&ism_mlist_lock);
14864 14849
14865 14850 }
14866 14851 }
14867 14852 }
14868 14853 }
14869 14854 /*
14870 14855 * Allocates and initialises a new SCD structure, this is called with
14871 14856 * the srd_scd_mutex held and returns with the reference count
14872 14857 * initialised to 1.
14873 14858 */
14874 14859 static sf_scd_t *
14875 14860 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
14876 14861 {
14877 14862 sf_scd_t *new_scdp;
14878 14863 sfmmu_t *scsfmmup;
14879 14864 int i;
14880 14865
14881 14866 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
14882 14867 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
14883 14868
14884 14869 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
14885 14870 new_scdp->scd_sfmmup = scsfmmup;
14886 14871 scsfmmup->sfmmu_srdp = srdp;
14887 14872 scsfmmup->sfmmu_scdp = new_scdp;
14888 14873 scsfmmup->sfmmu_tsb0_4minflcnt = 0;
14889 14874 scsfmmup->sfmmu_scdhat = 1;
14890 14875 CPUSET_ALL(scsfmmup->sfmmu_cpusran);
14891 14876 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
14892 14877
14893 14878 ASSERT(max_mmu_ctxdoms > 0);
14894 14879 for (i = 0; i < max_mmu_ctxdoms; i++) {
14895 14880 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
14896 14881 scsfmmup->sfmmu_ctxs[i].gnum = 0;
14897 14882 }
14898 14883
14899 14884 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14900 14885 new_scdp->scd_rttecnt[i] = 0;
14901 14886 }
14902 14887
14903 14888 new_scdp->scd_region_map = *new_map;
14904 14889 new_scdp->scd_refcnt = 1;
14905 14890 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
14906 14891 kmem_cache_free(scd_cache, new_scdp);
14907 14892 kmem_cache_free(sfmmuid_cache, scsfmmup);
14908 14893 return (NULL);
14909 14894 }
14910 14895 if (&mmu_init_scd) {
14911 14896 mmu_init_scd(new_scdp);
14912 14897 }
14913 14898 return (new_scdp);
14914 14899 }
14915 14900
14916 14901 /*
14917 14902 * The first phase of a process joining an SCD. The hat structure is
14918 14903 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
14919 14904 * and a cross-call with context invalidation is used to cause the
14920 14905 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
14921 14906 * routine.
14922 14907 */
14923 14908 static void
14924 14909 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
14925 14910 {
14926 14911 hatlock_t *hatlockp;
14927 14912 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14928 14913 int i;
14929 14914 sf_scd_t *old_scdp;
14930 14915
14931 14916 ASSERT(srdp != NULL);
14932 14917 ASSERT(scdp != NULL);
14933 14918 ASSERT(scdp->scd_refcnt > 0);
14934 14919 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14935 14920
14936 14921 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
14937 14922 ASSERT(old_scdp != scdp);
14938 14923
14939 14924 mutex_enter(&old_scdp->scd_mutex);
14940 14925 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
14941 14926 mutex_exit(&old_scdp->scd_mutex);
14942 14927 /*
14943 14928 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
14944 14929 * include the shme rgn ttecnt for rgns that
14945 14930 * were in the old SCD
14946 14931 */
14947 14932 for (i = 0; i < mmu_page_sizes; i++) {
14948 14933 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
14949 14934 old_scdp->scd_rttecnt[i]);
14950 14935 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14951 14936 sfmmup->sfmmu_scdrttecnt[i]);
14952 14937 }
14953 14938 }
14954 14939
14955 14940 /*
14956 14941 * Move sfmmu to the scd lists.
14957 14942 */
14958 14943 mutex_enter(&scdp->scd_mutex);
14959 14944 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
14960 14945 mutex_exit(&scdp->scd_mutex);
14961 14946 SF_SCD_INCR_REF(scdp);
14962 14947
14963 14948 hatlockp = sfmmu_hat_enter(sfmmup);
14964 14949 /*
14965 14950 * For a multi-thread process, we must stop
14966 14951 * all the other threads before joining the scd.
14967 14952 */
14968 14953
14969 14954 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
14970 14955
14971 14956 sfmmu_invalidate_ctx(sfmmup);
14972 14957 sfmmup->sfmmu_scdp = scdp;
14973 14958
14974 14959 /*
14975 14960 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
14976 14961 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
14977 14962 */
14978 14963 for (i = 0; i < mmu_page_sizes; i++) {
14979 14964 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
14980 14965 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
14981 14966 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14982 14967 -sfmmup->sfmmu_scdrttecnt[i]);
14983 14968 }
14984 14969 /* update tsb0 inflation count */
14985 14970 if (old_scdp != NULL) {
14986 14971 sfmmup->sfmmu_tsb0_4minflcnt +=
14987 14972 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14988 14973 }
14989 14974 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14990 14975 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
14991 14976 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14992 14977
14993 14978 sfmmu_hat_exit(hatlockp);
14994 14979
14995 14980 if (old_scdp != NULL) {
14996 14981 SF_SCD_DECR_REF(srdp, old_scdp);
14997 14982 }
14998 14983
14999 14984 }
15000 14985
15001 14986 /*
15002 14987 * This routine is called by a process to become part of an SCD. It is called
15003 14988 * from sfmmu_tsbmiss_exception() once most of the initial work has been
15004 14989 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15005 14990 */
15006 14991 static void
15007 14992 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15008 14993 {
15009 14994 struct tsb_info *tsbinfop;
15010 14995
15011 14996 ASSERT(sfmmu_hat_lock_held(sfmmup));
15012 14997 ASSERT(sfmmup->sfmmu_scdp != NULL);
15013 14998 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15014 14999 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15015 15000 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15016 15001
15017 15002 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15018 15003 tsbinfop = tsbinfop->tsb_next) {
15019 15004 if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15020 15005 continue;
15021 15006 }
15022 15007 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15023 15008
15024 15009 sfmmu_inv_tsb(tsbinfop->tsb_va,
15025 15010 TSB_BYTES(tsbinfop->tsb_szc));
15026 15011 }
15027 15012
15028 15013 /* Set HAT_CTX1_FLAG for all SCD ISMs */
15029 15014 sfmmu_ism_hatflags(sfmmup, 1);
15030 15015
15031 15016 SFMMU_STAT(sf_join_scd);
15032 15017 }
15033 15018
15034 15019 /*
15035 15020 * This routine is called in order to check if there is an SCD which matches
15036 15021 * the process's region map if not then a new SCD may be created.
15037 15022 */
15038 15023 static void
15039 15024 sfmmu_find_scd(sfmmu_t *sfmmup)
15040 15025 {
15041 15026 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15042 15027 sf_scd_t *scdp, *new_scdp;
15043 15028 int ret;
15044 15029
15045 15030 ASSERT(srdp != NULL);
15046 15031 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15047 15032
15048 15033 mutex_enter(&srdp->srd_scd_mutex);
15049 15034 for (scdp = srdp->srd_scdp; scdp != NULL;
15050 15035 scdp = scdp->scd_next) {
15051 15036 SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15052 15037 &sfmmup->sfmmu_region_map, ret);
15053 15038 if (ret == 1) {
15054 15039 SF_SCD_INCR_REF(scdp);
15055 15040 mutex_exit(&srdp->srd_scd_mutex);
15056 15041 sfmmu_join_scd(scdp, sfmmup);
15057 15042 ASSERT(scdp->scd_refcnt >= 2);
15058 15043 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15059 15044 return;
15060 15045 } else {
15061 15046 /*
15062 15047 * If the sfmmu region map is a subset of the scd
15063 15048 * region map, then the assumption is that this process
15064 15049 * will continue attaching to ISM segments until the
15065 15050 * region maps are equal.
15066 15051 */
15067 15052 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15068 15053 &sfmmup->sfmmu_region_map, ret);
15069 15054 if (ret == 1) {
15070 15055 mutex_exit(&srdp->srd_scd_mutex);
15071 15056 return;
15072 15057 }
15073 15058 }
15074 15059 }
15075 15060
15076 15061 ASSERT(scdp == NULL);
15077 15062 /*
15078 15063 * No matching SCD has been found, create a new one.
15079 15064 */
15080 15065 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15081 15066 NULL) {
15082 15067 mutex_exit(&srdp->srd_scd_mutex);
15083 15068 return;
15084 15069 }
15085 15070
15086 15071 /*
15087 15072 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15088 15073 */
15089 15074
15090 15075 /* Set scd_rttecnt for shme rgns in SCD */
15091 15076 sfmmu_set_scd_rttecnt(srdp, new_scdp);
15092 15077
15093 15078 /*
15094 15079 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15095 15080 */
15096 15081 sfmmu_link_scd_to_regions(srdp, new_scdp);
15097 15082 sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15098 15083 SFMMU_STAT_ADD(sf_create_scd, 1);
15099 15084
15100 15085 mutex_exit(&srdp->srd_scd_mutex);
15101 15086 sfmmu_join_scd(new_scdp, sfmmup);
15102 15087 ASSERT(new_scdp->scd_refcnt >= 2);
15103 15088 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15104 15089 }
15105 15090
15106 15091 /*
15107 15092 * This routine is called by a process to remove itself from an SCD. It is
15108 15093 * either called when the processes has detached from a segment or from
15109 15094 * hat_free_start() as a result of calling exit.
15110 15095 */
15111 15096 static void
15112 15097 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15113 15098 {
15114 15099 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15115 15100 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15116 15101 hatlock_t *hatlockp = TSB_HASH(sfmmup);
15117 15102 int i;
15118 15103
15119 15104 ASSERT(scdp != NULL);
15120 15105 ASSERT(srdp != NULL);
15121 15106
15122 15107 if (sfmmup->sfmmu_free) {
15123 15108 /*
15124 15109 * If the process is part of an SCD the sfmmu is unlinked
15125 15110 * from scd_sf_list.
15126 15111 */
15127 15112 mutex_enter(&scdp->scd_mutex);
15128 15113 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15129 15114 mutex_exit(&scdp->scd_mutex);
15130 15115 /*
15131 15116 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15132 15117 * are about to leave the SCD
15133 15118 */
15134 15119 for (i = 0; i < mmu_page_sizes; i++) {
15135 15120 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15136 15121 scdp->scd_rttecnt[i]);
15137 15122 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15138 15123 sfmmup->sfmmu_scdrttecnt[i]);
15139 15124 sfmmup->sfmmu_scdrttecnt[i] = 0;
15140 15125 }
15141 15126 sfmmup->sfmmu_scdp = NULL;
15142 15127
15143 15128 SF_SCD_DECR_REF(srdp, scdp);
15144 15129 return;
15145 15130 }
15146 15131
15147 15132 ASSERT(r_type != SFMMU_REGION_ISM ||
15148 15133 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15149 15134 ASSERT(scdp->scd_refcnt);
15150 15135 ASSERT(!sfmmup->sfmmu_free);
15151 15136 ASSERT(sfmmu_hat_lock_held(sfmmup));
15152 15137 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
15153 15138
15154 15139 /*
15155 15140 * Wait for ISM maps to be updated.
15156 15141 */
15157 15142 if (r_type != SFMMU_REGION_ISM) {
15158 15143 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15159 15144 sfmmup->sfmmu_scdp != NULL) {
15160 15145 cv_wait(&sfmmup->sfmmu_tsb_cv,
15161 15146 HATLOCK_MUTEXP(hatlockp));
15162 15147 }
15163 15148
15164 15149 if (sfmmup->sfmmu_scdp == NULL) {
15165 15150 sfmmu_hat_exit(hatlockp);
15166 15151 return;
15167 15152 }
15168 15153 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15169 15154 }
15170 15155
15171 15156 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15172 15157 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15173 15158 /*
15174 15159 * Since HAT_JOIN_SCD was set our context
15175 15160 * is still invalid.
15176 15161 */
15177 15162 } else {
15178 15163 /*
15179 15164 * For a multi-thread process, we must stop
15180 15165 * all the other threads before leaving the scd.
15181 15166 */
15182 15167
15183 15168 sfmmu_invalidate_ctx(sfmmup);
15184 15169 }
15185 15170
15186 15171 /* Clear all the rid's for ISM, delete flags, etc */
15187 15172 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15188 15173 sfmmu_ism_hatflags(sfmmup, 0);
15189 15174
15190 15175 /*
15191 15176 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15192 15177 * are in SCD before this sfmmup leaves the SCD.
15193 15178 */
15194 15179 for (i = 0; i < mmu_page_sizes; i++) {
15195 15180 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15196 15181 scdp->scd_rttecnt[i]);
15197 15182 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15198 15183 sfmmup->sfmmu_scdrttecnt[i]);
15199 15184 sfmmup->sfmmu_scdrttecnt[i] = 0;
15200 15185 /* update ismttecnt to include SCD ism before hat leaves SCD */
15201 15186 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15202 15187 sfmmup->sfmmu_scdismttecnt[i] = 0;
15203 15188 }
15204 15189 /* update tsb0 inflation count */
15205 15190 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15206 15191
15207 15192 if (r_type != SFMMU_REGION_ISM) {
15208 15193 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15209 15194 }
15210 15195 sfmmup->sfmmu_scdp = NULL;
15211 15196
15212 15197 sfmmu_hat_exit(hatlockp);
15213 15198
15214 15199 /*
15215 15200 * Unlink sfmmu from scd_sf_list this can be done without holding
15216 15201 * the hat lock as we hold the sfmmu_as lock which prevents
15217 15202 * hat_join_region from adding this thread to the scd again. Other
15218 15203 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15219 15204 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15220 15205 * while holding the hat lock.
15221 15206 */
15222 15207 mutex_enter(&scdp->scd_mutex);
15223 15208 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15224 15209 mutex_exit(&scdp->scd_mutex);
15225 15210 SFMMU_STAT(sf_leave_scd);
15226 15211
15227 15212 SF_SCD_DECR_REF(srdp, scdp);
15228 15213 hatlockp = sfmmu_hat_enter(sfmmup);
15229 15214
15230 15215 }
15231 15216
15232 15217 /*
15233 15218 * Unlink and free up an SCD structure with a reference count of 0.
15234 15219 */
15235 15220 static void
15236 15221 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15237 15222 {
15238 15223 sfmmu_t *scsfmmup;
15239 15224 sf_scd_t *sp;
15240 15225 hatlock_t *shatlockp;
15241 15226 int i, ret;
15242 15227
15243 15228 mutex_enter(&srdp->srd_scd_mutex);
15244 15229 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15245 15230 if (sp == scdp)
15246 15231 break;
15247 15232 }
15248 15233 if (sp == NULL || sp->scd_refcnt) {
15249 15234 mutex_exit(&srdp->srd_scd_mutex);
15250 15235 return;
15251 15236 }
15252 15237
15253 15238 /*
15254 15239 * It is possible that the scd has been freed and reallocated with a
15255 15240 * different region map while we've been waiting for the srd_scd_mutex.
15256 15241 */
15257 15242 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15258 15243 if (ret != 1) {
15259 15244 mutex_exit(&srdp->srd_scd_mutex);
15260 15245 return;
15261 15246 }
15262 15247
15263 15248 ASSERT(scdp->scd_sf_list == NULL);
15264 15249 /*
15265 15250 * Unlink scd from srd_scdp list.
15266 15251 */
15267 15252 sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15268 15253 mutex_exit(&srdp->srd_scd_mutex);
15269 15254
15270 15255 sfmmu_unlink_scd_from_regions(srdp, scdp);
15271 15256
15272 15257 /* Clear shared context tsb and release ctx */
15273 15258 scsfmmup = scdp->scd_sfmmup;
15274 15259
15275 15260 /*
15276 15261 * create a barrier so that scd will not be destroyed
15277 15262 * if other thread still holds the same shared hat lock.
15278 15263 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15279 15264 * shared hat lock before checking the shared tsb reloc flag.
15280 15265 */
15281 15266 shatlockp = sfmmu_hat_enter(scsfmmup);
15282 15267 sfmmu_hat_exit(shatlockp);
15283 15268
15284 15269 sfmmu_free_scd_tsbs(scsfmmup);
15285 15270
15286 15271 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15287 15272 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15288 15273 kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15289 15274 SFMMU_L2_HMERLINKS_SIZE);
15290 15275 scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15291 15276 }
15292 15277 }
15293 15278 kmem_cache_free(sfmmuid_cache, scsfmmup);
15294 15279 kmem_cache_free(scd_cache, scdp);
15295 15280 SFMMU_STAT(sf_destroy_scd);
15296 15281 }
15297 15282
15298 15283 /*
15299 15284 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15300 15285 * bits which are set in the ism_region_map parameter. This flag indicates to
15301 15286 * the tsbmiss handler that mapping for these segments should be loaded using
15302 15287 * the shared context.
15303 15288 */
15304 15289 static void
15305 15290 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15306 15291 {
15307 15292 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15308 15293 ism_blk_t *ism_blkp;
15309 15294 ism_map_t *ism_map;
15310 15295 int i, rid;
15311 15296
15312 15297 ASSERT(sfmmup->sfmmu_iblk != NULL);
15313 15298 ASSERT(scdp != NULL);
15314 15299 /*
15315 15300 * Note that the caller either set HAT_ISMBUSY flag or checked
15316 15301 * under hat lock that HAT_ISMBUSY was not set by another thread.
15317 15302 */
15318 15303 ASSERT(sfmmu_hat_lock_held(sfmmup));
15319 15304
15320 15305 ism_blkp = sfmmup->sfmmu_iblk;
15321 15306 while (ism_blkp != NULL) {
15322 15307 ism_map = ism_blkp->iblk_maps;
15323 15308 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15324 15309 rid = ism_map[i].imap_rid;
15325 15310 if (rid == SFMMU_INVALID_ISMRID) {
15326 15311 continue;
15327 15312 }
15328 15313 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15329 15314 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15330 15315 addflag) {
15331 15316 ism_map[i].imap_hatflags |=
15332 15317 HAT_CTX1_FLAG;
15333 15318 } else {
15334 15319 ism_map[i].imap_hatflags &=
15335 15320 ~HAT_CTX1_FLAG;
15336 15321 }
15337 15322 }
15338 15323 ism_blkp = ism_blkp->iblk_next;
15339 15324 }
15340 15325 }
15341 15326
15342 15327 static int
15343 15328 sfmmu_srd_lock_held(sf_srd_t *srdp)
15344 15329 {
15345 15330 return (MUTEX_HELD(&srdp->srd_mutex));
15346 15331 }
15347 15332
15348 15333 /* ARGSUSED */
15349 15334 static int
15350 15335 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15351 15336 {
15352 15337 sf_scd_t *scdp = (sf_scd_t *)buf;
15353 15338
15354 15339 bzero(buf, sizeof (sf_scd_t));
15355 15340 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15356 15341 return (0);
15357 15342 }
15358 15343
15359 15344 /* ARGSUSED */
15360 15345 static void
15361 15346 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15362 15347 {
15363 15348 sf_scd_t *scdp = (sf_scd_t *)buf;
15364 15349
15365 15350 mutex_destroy(&scdp->scd_mutex);
15366 15351 }
15367 15352
15368 15353 /*
15369 15354 * The listp parameter is a pointer to a list of hmeblks which are partially
15370 15355 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
↓ open down ↓ |
1543 lines elided |
↑ open up ↑ |
15371 15356 * freeing process is to cross-call all cpus to ensure that there are no
15372 15357 * remaining cached references.
15373 15358 *
15374 15359 * If the local generation number is less than the global then we can free
15375 15360 * hmeblks which are already on the pending queue as another cpu has completed
15376 15361 * the cross-call.
15377 15362 *
15378 15363 * We cross-call to make sure that there are no threads on other cpus accessing
15379 15364 * these hmblks and then complete the process of freeing them under the
15380 15365 * following conditions:
15381 - * The total number of pending hmeblks is greater than the threshold
15366 + * The total number of pending hmeblks is greater than the threshold
15382 15367 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15383 15368 * It is at least 1 second since the last time we cross-called
15384 15369 *
15385 15370 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15386 15371 */
15387 15372 static void
15388 15373 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15389 15374 {
15390 15375 struct hme_blk *hblkp, *pr_hblkp = NULL;
15391 15376 int count = 0;
15392 15377 cpuset_t cpuset = cpu_ready_set;
15393 15378 cpu_hme_pend_t *cpuhp;
15394 15379 timestruc_t now;
15395 15380 int one_second_expired = 0;
15396 15381
15397 15382 gethrestime_lasttick(&now);
15398 15383
15399 15384 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15400 15385 ASSERT(hblkp->hblk_shw_bit == 0);
15401 15386 ASSERT(hblkp->hblk_shared == 0);
15402 15387 count++;
15403 15388 pr_hblkp = hblkp;
15404 15389 }
15405 15390
15406 15391 cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15407 15392 mutex_enter(&cpuhp->chp_mutex);
15408 15393
15409 15394 if ((cpuhp->chp_count + count) == 0) {
15410 15395 mutex_exit(&cpuhp->chp_mutex);
15411 15396 return;
15412 15397 }
15413 15398
15414 15399 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15415 15400 one_second_expired = 1;
15416 15401 }
15417 15402
15418 15403 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15419 15404 (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15420 15405 one_second_expired)) {
15421 15406 /* Append global list to local */
15422 15407 if (pr_hblkp == NULL) {
15423 15408 *listp = cpuhp->chp_listp;
15424 15409 } else {
15425 15410 pr_hblkp->hblk_next = cpuhp->chp_listp;
15426 15411 }
15427 15412 cpuhp->chp_listp = NULL;
15428 15413 cpuhp->chp_count = 0;
15429 15414 cpuhp->chp_timestamp = now.tv_sec;
15430 15415 mutex_exit(&cpuhp->chp_mutex);
15431 15416
15432 15417 kpreempt_disable();
15433 15418 CPUSET_DEL(cpuset, CPU->cpu_id);
15434 15419 xt_sync(cpuset);
15435 15420 xt_sync(cpuset);
15436 15421 kpreempt_enable();
15437 15422
15438 15423 /*
15439 15424 * At this stage we know that no trap handlers on other
15440 15425 * cpus can have references to hmeblks on the list.
15441 15426 */
15442 15427 sfmmu_hblk_free(listp);
15443 15428 } else if (*listp != NULL) {
15444 15429 pr_hblkp->hblk_next = cpuhp->chp_listp;
15445 15430 cpuhp->chp_listp = *listp;
15446 15431 cpuhp->chp_count += count;
15447 15432 *listp = NULL;
15448 15433 mutex_exit(&cpuhp->chp_mutex);
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
15449 15434 } else {
15450 15435 mutex_exit(&cpuhp->chp_mutex);
15451 15436 }
15452 15437 }
15453 15438
15454 15439 /*
15455 15440 * Add an hmeblk to the the hash list.
15456 15441 */
15457 15442 void
15458 15443 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15459 - uint64_t hblkpa)
15444 + uint64_t hblkpa)
15460 15445 {
15461 15446 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15462 15447 #ifdef DEBUG
15463 15448 if (hmebp->hmeblkp == NULL) {
15464 15449 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15465 15450 }
15466 15451 #endif /* DEBUG */
15467 15452
15468 15453 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15469 15454 /*
15470 15455 * Since the TSB miss handler now does not lock the hash chain before
15471 15456 * walking it, make sure that the hmeblks nextpa is globally visible
15472 15457 * before we make the hmeblk globally visible by updating the chain root
15473 15458 * pointer in the hash bucket.
15474 15459 */
15475 15460 membar_producer();
15476 15461 hmebp->hmeh_nextpa = hblkpa;
15477 15462 hmeblkp->hblk_next = hmebp->hmeblkp;
15478 15463 hmebp->hmeblkp = hmeblkp;
15479 15464
15480 15465 }
15481 15466
15482 15467 /*
15483 15468 * This function is the first part of a 2 part process to remove an hmeblk
15484 15469 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15485 15470 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15486 15471 * a per-cpu pending list using the virtual address pointer.
15487 15472 *
15488 15473 * TSB miss trap handlers that start after this phase will no longer see
15489 15474 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15490 15475 * can still use it for further chain traversal because we haven't yet modifed
15491 15476 * the next physical pointer or freed it.
15492 15477 *
15493 15478 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15494 15479 * we reuse or free this hmeblk. This will make sure all lingering references to
15495 15480 * the hmeblk after first phase disappear before we finally reclaim it.
15496 15481 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15497 15482 * during their traversal.
15498 15483 *
15499 15484 * The hmehash_mutex must be held when calling this function.
15500 15485 *
15501 15486 * Input:
15502 15487 * hmebp - hme hash bucket pointer
15503 15488 * hmeblkp - address of hmeblk to be removed
15504 15489 * pr_hblk - virtual address of previous hmeblkp
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
15505 15490 * listp - pointer to list of hmeblks linked by virtual address
15506 15491 * free_now flag - indicates that a complete removal from the hash chains
15507 15492 * is necessary.
15508 15493 *
15509 15494 * It is inefficient to use the free_now flag as a cross-call is required to
15510 15495 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15511 15496 * in short supply.
15512 15497 */
15513 15498 void
15514 15499 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15515 - struct hme_blk *pr_hblk, struct hme_blk **listp,
15516 - int free_now)
15500 + struct hme_blk *pr_hblk, struct hme_blk **listp, int free_now)
15517 15501 {
15518 15502 int shw_size, vshift;
15519 15503 struct hme_blk *shw_hblkp;
15520 15504 uint_t shw_mask, newshw_mask;
15521 15505 caddr_t vaddr;
15522 15506 int size;
15523 15507 cpuset_t cpuset = cpu_ready_set;
15524 15508
15525 15509 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15526 15510
15527 15511 if (hmebp->hmeblkp == hmeblkp) {
15528 15512 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15529 15513 hmebp->hmeblkp = hmeblkp->hblk_next;
15530 15514 } else {
15531 15515 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15532 15516 pr_hblk->hblk_next = hmeblkp->hblk_next;
15533 15517 }
15534 15518
15535 15519 size = get_hblk_ttesz(hmeblkp);
15536 15520 shw_hblkp = hmeblkp->hblk_shadow;
15537 15521 if (shw_hblkp) {
15538 15522 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15539 15523 ASSERT(!hmeblkp->hblk_shared);
15540 15524 #ifdef DEBUG
15541 15525 if (mmu_page_sizes == max_mmu_page_sizes) {
15542 15526 ASSERT(size < TTE256M);
15543 15527 } else {
15544 15528 ASSERT(size < TTE4M);
15545 15529 }
15546 15530 #endif /* DEBUG */
15547 15531
15548 15532 shw_size = get_hblk_ttesz(shw_hblkp);
15549 15533 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15550 15534 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15551 15535 ASSERT(vshift < 8);
15552 15536 /*
15553 15537 * Atomically clear shadow mask bit
15554 15538 */
15555 15539 do {
15556 15540 shw_mask = shw_hblkp->hblk_shw_mask;
15557 15541 ASSERT(shw_mask & (1 << vshift));
15558 15542 newshw_mask = shw_mask & ~(1 << vshift);
15559 15543 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15560 15544 shw_mask, newshw_mask);
15561 15545 } while (newshw_mask != shw_mask);
15562 15546 hmeblkp->hblk_shadow = NULL;
15563 15547 }
15564 15548 hmeblkp->hblk_shw_bit = 0;
15565 15549
15566 15550 if (hmeblkp->hblk_shared) {
15567 15551 #ifdef DEBUG
15568 15552 sf_srd_t *srdp;
15569 15553 sf_region_t *rgnp;
15570 15554 uint_t rid;
15571 15555
15572 15556 srdp = hblktosrd(hmeblkp);
15573 15557 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15574 15558 rid = hmeblkp->hblk_tag.htag_rid;
15575 15559 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15576 15560 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15577 15561 rgnp = srdp->srd_hmergnp[rid];
15578 15562 ASSERT(rgnp != NULL);
15579 15563 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15580 15564 #endif /* DEBUG */
15581 15565 hmeblkp->hblk_shared = 0;
15582 15566 }
15583 15567 if (free_now) {
15584 15568 kpreempt_disable();
15585 15569 CPUSET_DEL(cpuset, CPU->cpu_id);
15586 15570 xt_sync(cpuset);
15587 15571 xt_sync(cpuset);
15588 15572 kpreempt_enable();
15589 15573
15590 15574 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15591 15575 hmeblkp->hblk_next = NULL;
15592 15576 } else {
15593 15577 /* Append hmeblkp to listp for processing later. */
15594 15578 hmeblkp->hblk_next = *listp;
15595 15579 *listp = hmeblkp;
15596 15580 }
15597 15581 }
15598 15582
15599 15583 /*
15600 15584 * This routine is called when memory is in short supply and returns a free
15601 15585 * hmeblk of the requested size from the cpu pending lists.
15602 15586 */
15603 15587 static struct hme_blk *
15604 15588 sfmmu_check_pending_hblks(int size)
15605 15589 {
15606 15590 int i;
15607 15591 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15608 15592 int found_hmeblk;
15609 15593 cpuset_t cpuset = cpu_ready_set;
15610 15594 cpu_hme_pend_t *cpuhp;
15611 15595
15612 15596 /* Flush cpu hblk pending queues */
15613 15597 for (i = 0; i < NCPU; i++) {
15614 15598 cpuhp = &cpu_hme_pend[i];
15615 15599 if (cpuhp->chp_listp != NULL) {
15616 15600 mutex_enter(&cpuhp->chp_mutex);
15617 15601 if (cpuhp->chp_listp == NULL) {
15618 15602 mutex_exit(&cpuhp->chp_mutex);
15619 15603 continue;
15620 15604 }
15621 15605 found_hmeblk = 0;
15622 15606 last_hmeblkp = NULL;
15623 15607 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15624 15608 hmeblkp = hmeblkp->hblk_next) {
15625 15609 if (get_hblk_ttesz(hmeblkp) == size) {
15626 15610 if (last_hmeblkp == NULL) {
15627 15611 cpuhp->chp_listp =
15628 15612 hmeblkp->hblk_next;
15629 15613 } else {
15630 15614 last_hmeblkp->hblk_next =
15631 15615 hmeblkp->hblk_next;
15632 15616 }
15633 15617 ASSERT(cpuhp->chp_count > 0);
15634 15618 cpuhp->chp_count--;
15635 15619 found_hmeblk = 1;
15636 15620 break;
15637 15621 } else {
15638 15622 last_hmeblkp = hmeblkp;
15639 15623 }
15640 15624 }
15641 15625 mutex_exit(&cpuhp->chp_mutex);
15642 15626
15643 15627 if (found_hmeblk) {
15644 15628 kpreempt_disable();
15645 15629 CPUSET_DEL(cpuset, CPU->cpu_id);
15646 15630 xt_sync(cpuset);
15647 15631 xt_sync(cpuset);
15648 15632 kpreempt_enable();
15649 15633 return (hmeblkp);
15650 15634 }
15651 15635 }
15652 15636 }
15653 15637 return (NULL);
15654 15638 }
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX