Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/ml/sfmmu_asm.s
+++ new/usr/src/uts/sfmmu/ml/sfmmu_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * SFMMU primitives. These primitives should only be used by sfmmu
29 29 * routines.
30 30 */
31 31
32 -#if defined(lint)
33 -#include <sys/types.h>
34 -#else /* lint */
35 32 #include "assym.h"
36 -#endif /* lint */
37 33
38 34 #include <sys/asm_linkage.h>
39 35 #include <sys/machtrap.h>
40 36 #include <sys/machasi.h>
41 37 #include <sys/sun4asi.h>
42 38 #include <sys/pte.h>
43 39 #include <sys/mmu.h>
44 40 #include <vm/hat_sfmmu.h>
45 41 #include <vm/seg_spt.h>
46 42 #include <sys/machparam.h>
47 43 #include <sys/privregs.h>
48 44 #include <sys/scb.h>
49 45 #include <sys/intreg.h>
50 46 #include <sys/machthread.h>
51 47 #include <sys/intr.h>
52 48 #include <sys/clock.h>
53 49 #include <sys/trapstat.h>
54 50
55 51 #ifdef TRAPTRACE
56 52 #include <sys/traptrace.h>
57 53
58 54 /*
59 55 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
60 56 */
61 57 #define TT_TRACE(label) \
62 58 ba label ;\
63 59 rd %pc, %g7
64 60 #else
65 61
66 62 #define TT_TRACE(label)
67 63
68 64 #endif /* TRAPTRACE */
69 65
70 -#ifndef lint
71 -
72 66 #if (TTE_SUSPEND_SHIFT > 0)
73 67 #define TTE_SUSPEND_INT_SHIFT(reg) \
74 68 sllx reg, TTE_SUSPEND_SHIFT, reg
75 69 #else
76 70 #define TTE_SUSPEND_INT_SHIFT(reg)
77 71 #endif
78 72
79 -#endif /* lint */
80 -
81 -#ifndef lint
82 -
83 73 /*
84 74 * Assumes TSBE_TAG is 0
85 75 * Assumes TSBE_INTHI is 0
86 76 * Assumes TSBREG.split is 0
87 77 */
88 78
89 79 #if TSBE_TAG != 0
90 80 #error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
91 81 #endif
92 82
93 83 #if TSBTAG_INTHI != 0
94 84 #error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
95 85 #endif
96 86
97 87 /*
98 88 * The following code assumes the tsb is not split.
99 89 *
100 90 * With TSBs no longer shared between processes, it's no longer
101 91 * necessary to hash the context bits into the tsb index to get
102 92 * tsb coloring; the new implementation treats the TSB as a
103 93 * direct-mapped, virtually-addressed cache.
104 94 *
105 95 * In:
106 96 * vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
107 97 * tsbbase = base address of TSB (clobbered)
108 98 * tagacc = tag access register (clobbered)
109 99 * szc = size code of TSB (ro)
110 100 * tmp = scratch reg
111 101 * Out:
112 102 * tsbbase = pointer to entry in TSB
113 103 */
114 104 #define GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp) \
115 105 mov TSB_ENTRIES(0), tmp /* nentries in TSB size 0 */ ;\
116 106 srlx tagacc, vpshift, tagacc ;\
117 107 sllx tmp, szc, tmp /* tmp = nentries in TSB */ ;\
118 108 sub tmp, 1, tmp /* mask = nentries - 1 */ ;\
119 109 and tagacc, tmp, tmp /* tsbent = virtpage & mask */ ;\
120 110 sllx tmp, TSB_ENTRY_SHIFT, tmp /* entry num --> ptr */ ;\
121 111 add tsbbase, tmp, tsbbase /* add entry offset to TSB base */
122 112
123 113 /*
124 114 * When the kpm TSB is used it is assumed that it is direct mapped
125 115 * using (vaddr>>vpshift)%tsbsz as the index.
126 116 *
127 117 * Note that, for now, the kpm TSB and kernel TSB are the same for
128 118 * each mapping size. However that need not always be the case. If
129 119 * the trap handlers are updated to search a different TSB for kpm
130 120 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
131 121 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
132 122 *
133 123 * In:
134 124 * vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
135 125 * vaddr = virtual address (clobbered)
136 126 * tsbp, szc, tmp = scratch
137 127 * Out:
138 128 * tsbp = pointer to entry in TSB
139 129 */
140 130 #define GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp) \
141 131 cmp vpshift, MMU_PAGESHIFT ;\
142 132 bne,pn %icc, 1f /* branch if large case */ ;\
143 133 sethi %hi(kpmsm_tsbsz), szc ;\
144 134 sethi %hi(kpmsm_tsbbase), tsbp ;\
145 135 ld [szc + %lo(kpmsm_tsbsz)], szc ;\
146 136 ldx [tsbp + %lo(kpmsm_tsbbase)], tsbp ;\
147 137 ba,pt %icc, 2f ;\
148 138 nop ;\
149 139 1: sethi %hi(kpm_tsbsz), szc ;\
150 140 sethi %hi(kpm_tsbbase), tsbp ;\
151 141 ld [szc + %lo(kpm_tsbsz)], szc ;\
152 142 ldx [tsbp + %lo(kpm_tsbbase)], tsbp ;\
153 143 2: GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
154 144
155 145 /*
156 146 * Lock the TSBE at virtual address tsbep.
157 147 *
158 148 * tsbep = TSBE va (ro)
159 149 * tmp1, tmp2 = scratch registers (clobbered)
160 150 * label = label to jump to if we fail to lock the tsb entry
161 151 * %asi = ASI to use for TSB access
162 152 *
163 153 * NOTE that we flush the TSB using fast VIS instructions that
164 154 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
165 155 * not be treated as a locked entry or we'll get stuck spinning on
166 156 * an entry that isn't locked but really invalid.
167 157 */
168 158
169 159 #if defined(UTSB_PHYS)
170 160
171 161 #define TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) \
172 162 lda [tsbep]ASI_MEM, tmp1 ;\
173 163 sethi %hi(TSBTAG_LOCKED), tmp2 ;\
174 164 cmp tmp1, tmp2 ;\
175 165 be,a,pn %icc, label /* if locked ignore */ ;\
176 166 nop ;\
177 167 casa [tsbep]ASI_MEM, tmp1, tmp2 ;\
178 168 cmp tmp1, tmp2 ;\
179 169 bne,a,pn %icc, label /* didn't lock so ignore */ ;\
180 170 nop ;\
181 171 /* tsbe lock acquired */ ;\
182 172 membar #StoreStore
183 173
184 174 #else /* UTSB_PHYS */
185 175
186 176 #define TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) \
187 177 lda [tsbep]%asi, tmp1 ;\
188 178 sethi %hi(TSBTAG_LOCKED), tmp2 ;\
189 179 cmp tmp1, tmp2 ;\
190 180 be,a,pn %icc, label /* if locked ignore */ ;\
191 181 nop ;\
192 182 casa [tsbep]%asi, tmp1, tmp2 ;\
193 183 cmp tmp1, tmp2 ;\
194 184 bne,a,pn %icc, label /* didn't lock so ignore */ ;\
195 185 nop ;\
196 186 /* tsbe lock acquired */ ;\
197 187 membar #StoreStore
198 188
199 189 #endif /* UTSB_PHYS */
200 190
201 191 /*
202 192 * Atomically write TSBE at virtual address tsbep.
203 193 *
204 194 * tsbep = TSBE va (ro)
205 195 * tte = TSBE TTE (ro)
206 196 * tagtarget = TSBE tag (ro)
207 197 * %asi = ASI to use for TSB access
208 198 */
209 199
210 200 #if defined(UTSB_PHYS)
211 201
212 202 #define TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) \
213 203 add tsbep, TSBE_TTE, tmp1 ;\
214 204 stxa tte, [tmp1]ASI_MEM /* write tte data */ ;\
215 205 membar #StoreStore ;\
216 206 add tsbep, TSBE_TAG, tmp1 ;\
217 207 stxa tagtarget, [tmp1]ASI_MEM /* write tte tag & unlock */
218 208
219 209 #else /* UTSB_PHYS */
220 210
221 211 #define TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1) \
222 212 stxa tte, [tsbep + TSBE_TTE]%asi /* write tte data */ ;\
223 213 membar #StoreStore ;\
224 214 stxa tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
225 215
226 216 #endif /* UTSB_PHYS */
227 217
228 218 /*
229 219 * Load an entry into the TSB at TL > 0.
230 220 *
231 221 * tsbep = pointer to the TSBE to load as va (ro)
232 222 * tte = value of the TTE retrieved and loaded (wo)
233 223 * tagtarget = tag target register. To get TSBE tag to load,
234 224 * we need to mask off the context and leave only the va (clobbered)
235 225 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
236 226 * tmp1, tmp2 = scratch registers
237 227 * label = label to jump to if we fail to lock the tsb entry
238 228 * %asi = ASI to use for TSB access
239 229 */
240 230
241 231 #if defined(UTSB_PHYS)
242 232
243 233 #define TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
244 234 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\
245 235 /* ;\
246 236 * I don't need to update the TSB then check for the valid tte. ;\
247 237 * TSB invalidate will spin till the entry is unlocked. Note, ;\
248 238 * we always invalidate the hash table before we unload the TSB.;\
249 239 */ ;\
250 240 sllx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
251 241 ldxa [ttepa]ASI_MEM, tte ;\
252 242 srlx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
253 243 sethi %hi(TSBTAG_INVALID), tmp2 ;\
254 244 add tsbep, TSBE_TAG, tmp1 ;\
255 245 brgez,a,pn tte, label ;\
256 246 sta tmp2, [tmp1]ASI_MEM /* unlock */ ;\
257 247 TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) ;\
258 248 label:
259 249
260 250 #else /* UTSB_PHYS */
261 251
262 252 #define TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
263 253 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\
264 254 /* ;\
265 255 * I don't need to update the TSB then check for the valid tte. ;\
266 256 * TSB invalidate will spin till the entry is unlocked. Note, ;\
267 257 * we always invalidate the hash table before we unload the TSB.;\
268 258 */ ;\
269 259 sllx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
270 260 ldxa [ttepa]ASI_MEM, tte ;\
271 261 srlx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
272 262 sethi %hi(TSBTAG_INVALID), tmp2 ;\
273 263 brgez,a,pn tte, label ;\
274 264 sta tmp2, [tsbep + TSBE_TAG]%asi /* unlock */ ;\
275 265 TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) ;\
276 266 label:
277 267
278 268 #endif /* UTSB_PHYS */
279 269
280 270 /*
281 271 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
282 272 * for ITLB synthesis.
283 273 *
284 274 * tsbep = pointer to the TSBE to load as va (ro)
285 275 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
286 276 * with exec_perm turned off and exec_synth turned on
287 277 * tagtarget = tag target register. To get TSBE tag to load,
288 278 * we need to mask off the context and leave only the va (clobbered)
289 279 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
290 280 * tmp1, tmp2 = scratch registers
291 281 * label = label to use for branch (text)
292 282 * %asi = ASI to use for TSB access
293 283 */
294 284
295 285 #define TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
296 286 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\
297 287 /* ;\
298 288 * I don't need to update the TSB then check for the valid tte. ;\
299 289 * TSB invalidate will spin till the entry is unlocked. Note, ;\
300 290 * we always invalidate the hash table before we unload the TSB.;\
301 291 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0 ;\
302 292 * and exec_synth bit to 1. ;\
303 293 */ ;\
304 294 sllx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
305 295 mov tte, tmp1 ;\
306 296 ldxa [ttepa]ASI_MEM, tte ;\
307 297 srlx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
308 298 sethi %hi(TSBTAG_INVALID), tmp2 ;\
309 299 brgez,a,pn tte, label ;\
310 300 sta tmp2, [tsbep + TSBE_TAG]%asi /* unlock */ ;\
311 301 or tte, tmp1, tte ;\
312 302 andn tte, TTE_EXECPRM_INT, tte ;\
313 303 or tte, TTE_E_SYNTH_INT, tte ;\
314 304 TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) ;\
315 305 label:
316 306
317 307 /*
318 308 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
319 309 *
320 310 * tte = value of the TTE, used to get tte_size bits (ro)
321 311 * tagaccess = tag access register, used to get 4M pfn bits (ro)
322 312 * pfn = 4M pfn bits shifted to offset for tte (out)
323 313 * tmp1 = scratch register
324 314 * label = label to use for branch (text)
325 315 */
326 316
327 317 #define GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label) \
328 318 /* ;\
329 319 * Get 4M bits from tagaccess for 32M, 256M pagesizes. ;\
330 320 * Return them, shifted, in pfn. ;\
331 321 */ ;\
332 322 srlx tagaccess, MMU_PAGESHIFT4M, tagaccess ;\
333 323 srlx tte, TTE_SZ_SHFT, tmp /* isolate the */ ;\
334 324 andcc tmp, TTE_SZ_BITS, %g0 /* tte_size bits */ ;\
335 325 bz,a,pt %icc, label/**/f /* if 0, is */ ;\
336 326 and tagaccess, 0x7, tagaccess /* 32M page size */ ;\
337 327 and tagaccess, 0x3f, tagaccess /* else 256M page size */ ;\
338 328 label: ;\
339 329 sllx tagaccess, MMU_PAGESHIFT4M, pfn
340 330
341 331 /*
342 332 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
343 333 * for ITLB synthesis.
344 334 *
345 335 * tte = value of the TTE, used to get tte_size bits (rw)
346 336 * tmp1 = scratch register
347 337 */
348 338
349 339 #define SET_TTE4M_PN(tte, tmp) \
350 340 /* ;\
351 341 * Set 4M pagesize tte bits. ;\
352 342 */ ;\
353 343 set TTE4M, tmp ;\
354 344 sllx tmp, TTE_SZ_SHFT, tmp ;\
355 345 or tte, tmp, tte
356 346
357 347 /*
358 348 * Load an entry into the TSB at TL=0.
359 349 *
360 350 * tsbep = pointer to the TSBE to load as va (ro)
361 351 * tteva = pointer to the TTE to load as va (ro)
362 352 * tagtarget = TSBE tag to load (which contains no context), synthesized
363 353 * to match va of MMU tag target register only (ro)
364 354 * tmp1, tmp2 = scratch registers (clobbered)
365 355 * label = label to use for branches (text)
366 356 * %asi = ASI to use for TSB access
367 357 */
368 358
369 359 #if defined(UTSB_PHYS)
370 360
371 361 #define TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) \
372 362 /* can't rd tteva after locking tsb because it can tlb miss */ ;\
373 363 ldx [tteva], tteva /* load tte */ ;\
374 364 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\
375 365 sethi %hi(TSBTAG_INVALID), tmp2 ;\
376 366 add tsbep, TSBE_TAG, tmp1 ;\
377 367 brgez,a,pn tteva, label ;\
378 368 sta tmp2, [tmp1]ASI_MEM /* unlock */ ;\
379 369 TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1) ;\
380 370 label:
381 371
382 372 #else /* UTSB_PHYS */
383 373
384 374 #define TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) \
385 375 /* can't rd tteva after locking tsb because it can tlb miss */ ;\
386 376 ldx [tteva], tteva /* load tte */ ;\
387 377 TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\
388 378 sethi %hi(TSBTAG_INVALID), tmp2 ;\
389 379 brgez,a,pn tteva, label ;\
390 380 sta tmp2, [tsbep + TSBE_TAG]%asi /* unlock */ ;\
391 381 TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1) ;\
392 382 label:
393 383
394 384 #endif /* UTSB_PHYS */
395 385
396 386 /*
397 387 * Invalidate a TSB entry in the TSB.
398 388 *
399 389 * NOTE: TSBE_TAG is assumed to be zero. There is a compile time check
400 390 * about this earlier to ensure this is true. Thus when we are
401 391 * directly referencing tsbep below, we are referencing the tte_tag
402 392 * field of the TSBE. If this offset ever changes, the code below
403 393 * will need to be modified.
404 394 *
405 395 * tsbep = pointer to TSBE as va (ro)
406 396 * tag = invalidation is done if this matches the TSBE tag (ro)
407 397 * tmp1 - tmp3 = scratch registers (clobbered)
408 398 * label = label name to use for branches (text)
409 399 * %asi = ASI to use for TSB access
410 400 */
411 401
412 402 #if defined(UTSB_PHYS)
413 403
414 404 #define TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) \
415 405 lda [tsbep]ASI_MEM, tmp1 /* tmp1 = tsbe tag */ ;\
416 406 sethi %hi(TSBTAG_LOCKED), tmp2 ;\
417 407 label/**/1: ;\
418 408 cmp tmp1, tmp2 /* see if tsbe is locked, if */ ;\
419 409 be,a,pn %icc, label/**/1 /* so, loop until unlocked */ ;\
420 410 lda [tsbep]ASI_MEM, tmp1 /* reloading value each time */ ;\
421 411 ldxa [tsbep]ASI_MEM, tmp3 /* tmp3 = tsbe tag */ ;\
422 412 cmp tag, tmp3 /* compare tags */ ;\
423 413 bne,pt %xcc, label/**/2 /* if different, do nothing */ ;\
424 414 sethi %hi(TSBTAG_INVALID), tmp3 ;\
425 415 casa [tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */ ;\
426 416 cmp tmp1, tmp3 /* if not successful */ ;\
427 417 bne,a,pn %icc, label/**/1 /* start over from the top */ ;\
428 418 lda [tsbep]ASI_MEM, tmp1 /* reloading tsbe tag */ ;\
429 419 label/**/2:
430 420
431 421 #else /* UTSB_PHYS */
432 422
433 423 #define TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) \
434 424 lda [tsbep]%asi, tmp1 /* tmp1 = tsbe tag */ ;\
435 425 sethi %hi(TSBTAG_LOCKED), tmp2 ;\
436 426 label/**/1: ;\
437 427 cmp tmp1, tmp2 /* see if tsbe is locked, if */ ;\
438 428 be,a,pn %icc, label/**/1 /* so, loop until unlocked */ ;\
439 429 lda [tsbep]%asi, tmp1 /* reloading value each time */ ;\
440 430 ldxa [tsbep]%asi, tmp3 /* tmp3 = tsbe tag */ ;\
441 431 cmp tag, tmp3 /* compare tags */ ;\
442 432 bne,pt %xcc, label/**/2 /* if different, do nothing */ ;\
443 433 sethi %hi(TSBTAG_INVALID), tmp3 ;\
444 434 casa [tsbep]%asi, tmp1, tmp3 /* try to set tag invalid */ ;\
445 435 cmp tmp1, tmp3 /* if not successful */ ;\
446 436 bne,a,pn %icc, label/**/1 /* start over from the top */ ;\
447 437 lda [tsbep]%asi, tmp1 /* reloading tsbe tag */ ;\
448 438 label/**/2:
449 439
450 440 #endif /* UTSB_PHYS */
451 441
452 442 #if TSB_SOFTSZ_MASK < TSB_SZ_MASK
453 443 #error - TSB_SOFTSZ_MASK too small
454 444 #endif
455 445
456 446
457 447 /*
458 448 * An implementation of setx which will be hot patched at run time.
459 449 * since it is being hot patched, there is no value passed in.
460 450 * Thus, essentially we are implementing
461 451 * setx value, tmp, dest
462 452 * where value is RUNTIME_PATCH (aka 0) in this case.
↓ open down ↓ |
370 lines elided |
↑ open up ↑ |
463 453 */
464 454 #define RUNTIME_PATCH_SETX(dest, tmp) \
465 455 sethi %hh(RUNTIME_PATCH), tmp ;\
466 456 sethi %lm(RUNTIME_PATCH), dest ;\
467 457 or tmp, %hm(RUNTIME_PATCH), tmp ;\
468 458 or dest, %lo(RUNTIME_PATCH), dest ;\
469 459 sllx tmp, 32, tmp ;\
470 460 nop /* for perf reasons */ ;\
471 461 or tmp, dest, dest /* contents of patched value */
472 462
473 -#endif /* lint */
474 463
475 -
476 -#if defined (lint)
477 -
478 -/*
479 - * sfmmu related subroutines
480 - */
481 -uint_t
482 -sfmmu_disable_intrs()
483 -{ return(0); }
484 -
485 -/* ARGSUSED */
486 -void
487 -sfmmu_enable_intrs(uint_t pstate_save)
488 -{}
489 -
490 -/* ARGSUSED */
491 -int
492 -sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
493 -{ return(0); }
494 -
495 -/*
496 - * Use cas, if tte has changed underneath us then reread and try again.
497 - * In the case of a retry, it will update sttep with the new original.
498 - */
499 -/* ARGSUSED */
500 -int
501 -sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
502 -{ return(0); }
503 -
504 -/*
505 - * Use cas, if tte has changed underneath us then return 1, else return 0
506 - */
507 -/* ARGSUSED */
508 -int
509 -sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
510 -{ return(0); }
511 -
512 -/* ARGSUSED */
513 -void
514 -sfmmu_copytte(tte_t *sttep, tte_t *dttep)
515 -{}
516 -
517 -/*ARGSUSED*/
518 -struct tsbe *
519 -sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
520 -{ return(0); }
521 -
522 -/*ARGSUSED*/
523 -uint64_t
524 -sfmmu_make_tsbtag(caddr_t va)
525 -{ return(0); }
526 -
527 -#else /* lint */
528 -
529 464 .seg ".data"
530 465 .global sfmmu_panic1
531 466 sfmmu_panic1:
532 467 .asciz "sfmmu_asm: interrupts already disabled"
533 468
534 469 .global sfmmu_panic3
535 470 sfmmu_panic3:
536 471 .asciz "sfmmu_asm: sfmmu_vatopfn called for user"
537 472
538 473 .global sfmmu_panic4
539 474 sfmmu_panic4:
540 475 .asciz "sfmmu_asm: 4M tsb pointer mis-match"
541 476
542 477 .global sfmmu_panic5
543 478 sfmmu_panic5:
544 479 .asciz "sfmmu_asm: no unlocked TTEs in TLB 0"
545 480
546 481 .global sfmmu_panic6
547 482 sfmmu_panic6:
548 483 .asciz "sfmmu_asm: interrupts not disabled"
549 484
550 485 .global sfmmu_panic7
551 486 sfmmu_panic7:
552 487 .asciz "sfmmu_asm: kernel as"
553 488
554 489 .global sfmmu_panic8
555 490 sfmmu_panic8:
556 491 .asciz "sfmmu_asm: gnum is zero"
557 492
558 493 .global sfmmu_panic9
559 494 sfmmu_panic9:
560 495 .asciz "sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
561 496
562 497 .global sfmmu_panic10
563 498 sfmmu_panic10:
564 499 .asciz "sfmmu_asm: valid SCD with no 3rd scd TSB"
565 500
566 501 .global sfmmu_panic11
567 502 sfmmu_panic11:
568 503 .asciz "sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform"
569 504
570 505 ENTRY(sfmmu_disable_intrs)
571 506 rdpr %pstate, %o0
572 507 #ifdef DEBUG
573 508 PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
574 509 #endif /* DEBUG */
575 510 retl
576 511 wrpr %o0, PSTATE_IE, %pstate
577 512 SET_SIZE(sfmmu_disable_intrs)
578 513
579 514 ENTRY(sfmmu_enable_intrs)
580 515 retl
581 516 wrpr %g0, %o0, %pstate
582 517 SET_SIZE(sfmmu_enable_intrs)
583 518
584 519 /*
585 520 * This routine is called both by resume() and sfmmu_get_ctx() to
586 521 * allocate a new context for the process on a MMU.
587 522 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
588 523 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
589 524 * is the case when sfmmu_alloc_ctx is called from resume().
590 525 *
591 526 * The caller must disable interrupts before entering this routine.
592 527 * To reduce ctx switch overhead, the code contains both 'fast path' and
593 528 * 'slow path' code. The fast path code covers the common case where only
594 529 * a quick check is needed and the real ctx allocation is not required.
595 530 * It can be done without holding the per-process (PP) lock.
596 531 * The 'slow path' code must be protected by the PP Lock and performs ctx
597 532 * allocation.
598 533 * Hardware context register and HAT mmu cnum are updated accordingly.
599 534 *
600 535 * %o0 - sfmmup
601 536 * %o1 - allocflag
602 537 * %o2 - CPU
603 538 * %o3 - sfmmu private/shared flag
604 539 *
605 540 * ret - 0: no ctx is allocated
606 541 * 1: a ctx is allocated
607 542 */
608 543 ENTRY_NP(sfmmu_alloc_ctx)
609 544
610 545 #ifdef DEBUG
611 546 sethi %hi(ksfmmup), %g1
612 547 ldx [%g1 + %lo(ksfmmup)], %g1
613 548 cmp %g1, %o0
614 549 bne,pt %xcc, 0f
615 550 nop
616 551
617 552 sethi %hi(panicstr), %g1 ! if kernel as, panic
618 553 ldx [%g1 + %lo(panicstr)], %g1
619 554 tst %g1
620 555 bnz,pn %icc, 7f
621 556 nop
622 557
623 558 sethi %hi(sfmmu_panic7), %o0
624 559 call panic
625 560 or %o0, %lo(sfmmu_panic7), %o0
626 561
627 562 7:
628 563 retl
629 564 mov %g0, %o0 ! %o0 = ret = 0
630 565
631 566 0:
632 567 PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
633 568 #endif /* DEBUG */
634 569
635 570 mov %o3, %g1 ! save sfmmu pri/sh flag in %g1
636 571
637 572 ! load global mmu_ctxp info
638 573 ldx [%o2 + CPU_MMU_CTXP], %o3 ! %o3 = mmu_ctx_t ptr
639 574
640 575 #ifdef sun4v
641 576 /* During suspend on sun4v, context domains can be temporary removed */
642 577 brz,a,pn %o3, 0f
643 578 nop
644 579 #endif
645 580
646 581 lduw [%o2 + CPU_MMU_IDX], %g2 ! %g2 = mmu index
647 582
648 583 ! load global mmu_ctxp gnum
649 584 ldx [%o3 + MMU_CTX_GNUM], %o4 ! %o4 = mmu_ctxp->gnum
650 585
651 586 #ifdef DEBUG
652 587 cmp %o4, %g0 ! mmu_ctxp->gnum should never be 0
653 588 bne,pt %xcc, 3f
654 589 nop
655 590
656 591 sethi %hi(panicstr), %g1 ! test if panicstr is already set
657 592 ldx [%g1 + %lo(panicstr)], %g1
658 593 tst %g1
659 594 bnz,pn %icc, 1f
660 595 nop
661 596
662 597 sethi %hi(sfmmu_panic8), %o0
663 598 call panic
664 599 or %o0, %lo(sfmmu_panic8), %o0
665 600 1:
666 601 retl
667 602 mov %g0, %o0 ! %o0 = ret = 0
668 603 3:
669 604 #endif
670 605
671 606 ! load HAT sfmmu_ctxs[mmuid] gnum, cnum
672 607
673 608 sllx %g2, SFMMU_MMU_CTX_SHIFT, %g2
674 609 add %o0, %g2, %g2 ! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
675 610
676 611 /*
677 612 * %g5 = sfmmu gnum returned
678 613 * %g6 = sfmmu cnum returned
679 614 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
680 615 * %g4 = scratch
681 616 *
682 617 * Fast path code, do a quick check.
683 618 */
684 619 SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
685 620
686 621 cmp %g6, INVALID_CONTEXT ! hat cnum == INVALID ??
687 622 bne,pt %icc, 1f ! valid hat cnum, check gnum
688 623 nop
689 624
690 625 ! cnum == INVALID, check allocflag
691 626 mov %g0, %g4 ! %g4 = ret = 0
692 627 brz,pt %o1, 8f ! allocflag == 0, skip ctx allocation, bail
693 628 mov %g6, %o1
694 629
695 630 ! (invalid HAT cnum) && (allocflag == 1)
696 631 ba,pt %icc, 2f
697 632 nop
698 633 #ifdef sun4v
699 634 0:
700 635 set INVALID_CONTEXT, %o1
701 636 membar #LoadStore|#StoreStore
702 637 ba,pt %icc, 8f
703 638 mov %g0, %g4 ! %g4 = ret = 0
704 639 #endif
705 640 1:
706 641 ! valid HAT cnum, check gnum
707 642 cmp %g5, %o4
708 643 mov 1, %g4 !%g4 = ret = 1
709 644 be,a,pt %icc, 8f ! gnum unchanged, go to done
710 645 mov %g6, %o1
711 646
712 647 2:
713 648 /*
714 649 * Grab per process (PP) sfmmu_ctx_lock spinlock,
715 650 * followed by the 'slow path' code.
716 651 */
717 652 ldstub [%o0 + SFMMU_CTX_LOCK], %g3 ! %g3 = per process (PP) lock
718 653 3:
719 654 brz %g3, 5f
720 655 nop
721 656 4:
722 657 brnz,a,pt %g3, 4b ! spin if lock is 1
723 658 ldub [%o0 + SFMMU_CTX_LOCK], %g3
724 659 ba %xcc, 3b ! retry the lock
725 660 ldstub [%o0 + SFMMU_CTX_LOCK], %g3 ! %g3 = PP lock
726 661
727 662 5:
728 663 membar #LoadLoad
729 664 /*
730 665 * %g5 = sfmmu gnum returned
731 666 * %g6 = sfmmu cnum returned
732 667 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
733 668 * %g4 = scratch
734 669 */
735 670 SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
736 671
737 672 cmp %g6, INVALID_CONTEXT ! hat cnum == INVALID ??
738 673 bne,pt %icc, 1f ! valid hat cnum, check gnum
739 674 nop
740 675
741 676 ! cnum == INVALID, check allocflag
742 677 mov %g0, %g4 ! %g4 = ret = 0
743 678 brz,pt %o1, 2f ! allocflag == 0, called from resume, set hw
744 679 mov %g6, %o1
745 680
746 681 ! (invalid HAT cnum) && (allocflag == 1)
747 682 ba,pt %icc, 6f
748 683 nop
749 684 1:
750 685 ! valid HAT cnum, check gnum
751 686 cmp %g5, %o4
752 687 mov 1, %g4 ! %g4 = ret = 1
753 688 be,a,pt %icc, 2f ! gnum unchanged, go to done
754 689 mov %g6, %o1
755 690
756 691 ba,pt %icc, 6f
757 692 nop
758 693 2:
759 694 membar #LoadStore|#StoreStore
760 695 ba,pt %icc, 8f
761 696 clrb [%o0 + SFMMU_CTX_LOCK]
762 697 6:
763 698 /*
764 699 * We get here if we do not have a valid context, or
765 700 * the HAT gnum does not match global gnum. We hold
766 701 * sfmmu_ctx_lock spinlock. Allocate that context.
767 702 *
768 703 * %o3 = mmu_ctxp
769 704 */
770 705 add %o3, MMU_CTX_CNUM, %g3
771 706 ld [%o3 + MMU_CTX_NCTXS], %g4
772 707
773 708 /*
774 709 * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
775 710 * %g3 = mmu cnum address
776 711 * %g4 = mmu nctxs
777 712 *
778 713 * %o0 = sfmmup
779 714 * %o1 = mmu current cnum value (used as new cnum)
780 715 * %o4 = mmu gnum
781 716 *
782 717 * %o5 = scratch
783 718 */
784 719 ld [%g3], %o1
785 720 0:
786 721 cmp %o1, %g4
787 722 bl,a,pt %icc, 1f
788 723 add %o1, 1, %o5 ! %o5 = mmu_ctxp->cnum + 1
789 724
790 725 /*
791 726 * cnum reachs max, bail, so wrap around can be performed later.
792 727 */
793 728 set INVALID_CONTEXT, %o1
794 729 mov %g0, %g4 ! %g4 = ret = 0
795 730
796 731 membar #LoadStore|#StoreStore
797 732 ba,pt %icc, 8f
798 733 clrb [%o0 + SFMMU_CTX_LOCK]
799 734 1:
800 735 ! %g3 = addr of mmu_ctxp->cnum
801 736 ! %o5 = mmu_ctxp->cnum + 1
802 737 cas [%g3], %o1, %o5
803 738 cmp %o1, %o5
804 739 bne,a,pn %xcc, 0b ! cas failed
805 740 ld [%g3], %o1
806 741
807 742 #ifdef DEBUG
808 743 set MAX_SFMMU_CTX_VAL, %o5
809 744 cmp %o1, %o5
810 745 ble,pt %icc, 2f
811 746 nop
812 747
813 748 sethi %hi(sfmmu_panic9), %o0
814 749 call panic
815 750 or %o0, %lo(sfmmu_panic9), %o0
816 751 2:
817 752 #endif
818 753 ! update hat gnum and cnum
819 754 sllx %o4, SFMMU_MMU_GNUM_RSHIFT, %o4
820 755 or %o4, %o1, %o4
821 756 stx %o4, [%g2 + SFMMU_CTXS]
822 757
823 758 membar #LoadStore|#StoreStore
824 759 clrb [%o0 + SFMMU_CTX_LOCK]
825 760
826 761 mov 1, %g4 ! %g4 = ret = 1
827 762 8:
828 763 /*
829 764 * program the secondary context register
830 765 *
831 766 * %o1 = cnum
832 767 * %g1 = sfmmu private/shared flag (0:private, 1:shared)
833 768 */
834 769
835 770 /*
836 771 * When we come here and context is invalid, we want to set both
837 772 * private and shared ctx regs to INVALID. In order to
838 773 * do so, we set the sfmmu priv/shared flag to 'private' regardless
839 774 * so that private ctx reg will be set to invalid.
840 775 * Note that on sun4v values written to private context register are
841 776 * automatically written to corresponding shared context register as
842 777 * well. On sun4u SET_SECCTX() will invalidate shared context register
843 778 * when it sets a private secondary context register.
844 779 */
845 780
846 781 cmp %o1, INVALID_CONTEXT
847 782 be,a,pn %icc, 9f
848 783 clr %g1
849 784 9:
850 785
851 786 #ifdef sun4u
852 787 ldub [%o0 + SFMMU_CEXT], %o2
853 788 sll %o2, CTXREG_EXT_SHIFT, %o2
854 789 or %o1, %o2, %o1
855 790 #endif /* sun4u */
856 791
857 792 SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
858 793
859 794 retl
860 795 mov %g4, %o0 ! %o0 = ret
861 796
862 797 SET_SIZE(sfmmu_alloc_ctx)
863 798
864 799 ENTRY_NP(sfmmu_modifytte)
865 800 ldx [%o2], %g3 /* current */
866 801 ldx [%o0], %g1 /* original */
867 802 2:
868 803 ldx [%o1], %g2 /* modified */
869 804 cmp %g2, %g3 /* is modified = current? */
870 805 be,a,pt %xcc,1f /* yes, don't write */
871 806 stx %g3, [%o0] /* update new original */
872 807 casx [%o2], %g1, %g2
873 808 cmp %g1, %g2
874 809 be,pt %xcc, 1f /* cas succeeded - return */
875 810 nop
876 811 ldx [%o2], %g3 /* new current */
877 812 stx %g3, [%o0] /* save as new original */
878 813 ba,pt %xcc, 2b
879 814 mov %g3, %g1
880 815 1: retl
881 816 membar #StoreLoad
882 817 SET_SIZE(sfmmu_modifytte)
883 818
884 819 ENTRY_NP(sfmmu_modifytte_try)
885 820 ldx [%o1], %g2 /* modified */
886 821 ldx [%o2], %g3 /* current */
887 822 ldx [%o0], %g1 /* original */
888 823 cmp %g3, %g2 /* is modified = current? */
889 824 be,a,pn %xcc,1f /* yes, don't write */
890 825 mov 0, %o1 /* as if cas failed. */
891 826
892 827 casx [%o2], %g1, %g2
893 828 membar #StoreLoad
894 829 cmp %g1, %g2
895 830 movne %xcc, -1, %o1 /* cas failed. */
896 831 move %xcc, 1, %o1 /* cas succeeded. */
897 832 1:
898 833 stx %g2, [%o0] /* report "current" value */
899 834 retl
900 835 mov %o1, %o0
901 836 SET_SIZE(sfmmu_modifytte_try)
902 837
903 838 ENTRY_NP(sfmmu_copytte)
904 839 ldx [%o0], %g1
905 840 retl
906 841 stx %g1, [%o1]
907 842 SET_SIZE(sfmmu_copytte)
908 843
909 844
910 845 /*
911 846 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
912 847 * %o0 = TSB base address (in), pointer to TSB entry (out)
913 848 * %o1 = vaddr (in)
914 849 * %o2 = vpshift (in)
915 850 * %o3 = tsb size code (in)
916 851 * %o4 = scratch register
917 852 */
918 853 ENTRY_NP(sfmmu_get_tsbe)
919 854 GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
920 855 retl
921 856 nop
922 857 SET_SIZE(sfmmu_get_tsbe)
923 858
↓ open down ↓ |
385 lines elided |
↑ open up ↑ |
924 859 /*
925 860 * Return a TSB tag for the given va.
926 861 * %o0 = va (in/clobbered)
927 862 * %o0 = va shifted to be in tsb tag format (with no context) (out)
928 863 */
929 864 ENTRY_NP(sfmmu_make_tsbtag)
930 865 retl
931 866 srln %o0, TTARGET_VA_SHIFT, %o0
932 867 SET_SIZE(sfmmu_make_tsbtag)
933 868
934 -#endif /* lint */
935 -
936 869 /*
937 870 * Other sfmmu primitives
938 871 */
939 872
940 873
941 -#if defined (lint)
942 -void
943 -sfmmu_patch_ktsb(void)
944 -{
945 -}
946 -
947 -void
948 -sfmmu_kpm_patch_tlbm(void)
949 -{
950 -}
951 -
952 -void
953 -sfmmu_kpm_patch_tsbm(void)
954 -{
955 -}
956 -
957 -void
958 -sfmmu_patch_shctx(void)
959 -{
960 -}
961 -
962 -/* ARGSUSED */
963 -void
964 -sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
965 -{
966 -}
967 -
968 -/* ARGSUSED */
969 -void
970 -sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
971 -{
972 -}
973 -
974 -/* ARGSUSED */
975 -void
976 -sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
977 -{
978 -}
979 -
980 -/* ARGSUSED */
981 -void
982 -sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
983 -{
984 -}
985 -
986 -#else /* lint */
987 -
988 874 #define I_SIZE 4
989 875
990 876 ENTRY_NP(sfmmu_fix_ktlb_traptable)
991 877 /*
992 878 * %o0 = start of patch area
993 879 * %o1 = size code of TSB to patch
994 880 * %o3 = scratch
995 881 */
996 882 /* fix sll */
997 883 ld [%o0], %o3 /* get sll */
998 884 sub %o3, %o1, %o3 /* decrease shift by tsb szc */
999 885 st %o3, [%o0] /* write sll */
1000 886 flush %o0
1001 887 /* fix srl */
1002 888 add %o0, I_SIZE, %o0 /* goto next instr. */
1003 889 ld [%o0], %o3 /* get srl */
1004 890 sub %o3, %o1, %o3 /* decrease shift by tsb szc */
1005 891 st %o3, [%o0] /* write srl */
1006 892 retl
1007 893 flush %o0
1008 894 SET_SIZE(sfmmu_fix_ktlb_traptable)
1009 895
1010 896 ENTRY_NP(sfmmu_fixup_ktsbbase)
1011 897 /*
1012 898 * %o0 = start of patch area
1013 899 * %o5 = kernel virtual or physical tsb base address
1014 900 * %o2, %o3 are used as scratch registers.
1015 901 */
1016 902 /* fixup sethi instruction */
1017 903 ld [%o0], %o3
1018 904 srl %o5, 10, %o2 ! offset is bits 32:10
1019 905 or %o3, %o2, %o3 ! set imm22
1020 906 st %o3, [%o0]
1021 907 /* fixup offset of lduw/ldx */
1022 908 add %o0, I_SIZE, %o0 ! next instr
1023 909 ld [%o0], %o3
1024 910 and %o5, 0x3ff, %o2 ! set imm13 to bits 9:0
1025 911 or %o3, %o2, %o3
1026 912 st %o3, [%o0]
1027 913 retl
1028 914 flush %o0
1029 915 SET_SIZE(sfmmu_fixup_ktsbbase)
1030 916
1031 917 ENTRY_NP(sfmmu_fixup_setx)
1032 918 /*
1033 919 * %o0 = start of patch area
1034 920 * %o4 = 64 bit value to patch
1035 921 * %o2, %o3 are used as scratch registers.
1036 922 *
1037 923 * Note: Assuming that all parts of the instructions which need to be
1038 924 * patched correspond to RUNTIME_PATCH (aka 0)
1039 925 *
1040 926 * Note the implementation of setx which is being patched is as follows:
1041 927 *
1042 928 * sethi %hh(RUNTIME_PATCH), tmp
1043 929 * sethi %lm(RUNTIME_PATCH), dest
1044 930 * or tmp, %hm(RUNTIME_PATCH), tmp
1045 931 * or dest, %lo(RUNTIME_PATCH), dest
1046 932 * sllx tmp, 32, tmp
1047 933 * nop
1048 934 * or tmp, dest, dest
1049 935 *
1050 936 * which differs from the implementation in the
1051 937 * "SPARC Architecture Manual"
1052 938 */
1053 939 /* fixup sethi instruction */
1054 940 ld [%o0], %o3
1055 941 srlx %o4, 42, %o2 ! bits [63:42]
1056 942 or %o3, %o2, %o3 ! set imm22
1057 943 st %o3, [%o0]
1058 944 /* fixup sethi instruction */
1059 945 add %o0, I_SIZE, %o0 ! next instr
1060 946 ld [%o0], %o3
1061 947 sllx %o4, 32, %o2 ! clear upper bits
1062 948 srlx %o2, 42, %o2 ! bits [31:10]
1063 949 or %o3, %o2, %o3 ! set imm22
1064 950 st %o3, [%o0]
1065 951 /* fixup or instruction */
1066 952 add %o0, I_SIZE, %o0 ! next instr
1067 953 ld [%o0], %o3
1068 954 srlx %o4, 32, %o2 ! bits [63:32]
1069 955 and %o2, 0x3ff, %o2 ! bits [41:32]
1070 956 or %o3, %o2, %o3 ! set imm
1071 957 st %o3, [%o0]
1072 958 /* fixup or instruction */
1073 959 add %o0, I_SIZE, %o0 ! next instr
1074 960 ld [%o0], %o3
1075 961 and %o4, 0x3ff, %o2 ! bits [9:0]
1076 962 or %o3, %o2, %o3 ! set imm
1077 963 st %o3, [%o0]
1078 964 retl
1079 965 flush %o0
1080 966 SET_SIZE(sfmmu_fixup_setx)
1081 967
1082 968 ENTRY_NP(sfmmu_fixup_or)
1083 969 /*
1084 970 * %o0 = start of patch area
1085 971 * %o4 = 32 bit value to patch
1086 972 * %o2, %o3 are used as scratch registers.
1087 973 * Note: Assuming that all parts of the instructions which need to be
1088 974 * patched correspond to RUNTIME_PATCH (aka 0)
1089 975 */
1090 976 ld [%o0], %o3
1091 977 and %o4, 0x3ff, %o2 ! bits [9:0]
1092 978 or %o3, %o2, %o3 ! set imm
1093 979 st %o3, [%o0]
1094 980 retl
1095 981 flush %o0
1096 982 SET_SIZE(sfmmu_fixup_or)
1097 983
1098 984 ENTRY_NP(sfmmu_fixup_shiftx)
1099 985 /*
1100 986 * %o0 = start of patch area
1101 987 * %o4 = signed int immediate value to add to sllx/srlx imm field
1102 988 * %o2, %o3 are used as scratch registers.
1103 989 *
1104 990 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1105 991 * so we do a simple add. The caller must be careful to prevent
1106 992 * overflow, which could easily occur if the initial value is nonzero!
1107 993 */
1108 994 ld [%o0], %o3 ! %o3 = instruction to patch
1109 995 and %o3, 0x3f, %o2 ! %o2 = existing imm value
1110 996 add %o2, %o4, %o2 ! %o2 = new imm value
1111 997 andn %o3, 0x3f, %o3 ! clear old imm value
1112 998 and %o2, 0x3f, %o2 ! truncate new imm value
1113 999 or %o3, %o2, %o3 ! set new imm value
1114 1000 st %o3, [%o0] ! store updated instruction
1115 1001 retl
1116 1002 flush %o0
1117 1003 SET_SIZE(sfmmu_fixup_shiftx)
1118 1004
1119 1005 ENTRY_NP(sfmmu_fixup_mmu_asi)
1120 1006 /*
1121 1007 * Patch imm_asi of all ldda instructions in the MMU
1122 1008 * trap handlers. We search MMU_PATCH_INSTR instructions
1123 1009 * starting from the itlb miss handler (trap 0x64).
1124 1010 * %o0 = address of tt[0,1]_itlbmiss
1125 1011 * %o1 = imm_asi to setup, shifted by appropriate offset.
1126 1012 * %o3 = number of instructions to search
1127 1013 * %o4 = reserved by caller: called from leaf routine
1128 1014 */
1129 1015 1: ldsw [%o0], %o2 ! load instruction to %o2
1130 1016 brgez,pt %o2, 2f
1131 1017 srl %o2, 30, %o5
1132 1018 btst 1, %o5 ! test bit 30; skip if not set
1133 1019 bz,pt %icc, 2f
1134 1020 sllx %o2, 39, %o5 ! bit 24 -> bit 63
1135 1021 srlx %o5, 58, %o5 ! isolate op3 part of opcode
1136 1022 xor %o5, 0x13, %o5 ! 01 0011 binary == ldda
1137 1023 brnz,pt %o5, 2f ! skip if not a match
1138 1024 or %o2, %o1, %o2 ! or in imm_asi
1139 1025 st %o2, [%o0] ! write patched instruction
1140 1026 2: dec %o3
1141 1027 brnz,a,pt %o3, 1b ! loop until we're done
1142 1028 add %o0, I_SIZE, %o0
1143 1029 retl
1144 1030 flush %o0
1145 1031 SET_SIZE(sfmmu_fixup_mmu_asi)
1146 1032
1147 1033 /*
1148 1034 * Patch immediate ASI used to access the TSB in the
1149 1035 * trap table.
1150 1036 * inputs: %o0 = value of ktsb_phys
1151 1037 */
1152 1038 ENTRY_NP(sfmmu_patch_mmu_asi)
1153 1039 mov %o7, %o4 ! save return pc in %o4
1154 1040 mov ASI_QUAD_LDD_PHYS, %o3 ! set QUAD_LDD_PHYS by default
1155 1041
1156 1042 #ifdef sun4v
1157 1043
1158 1044 /*
1159 1045 * Check ktsb_phys. It must be non-zero for sun4v, panic if not.
1160 1046 */
1161 1047
1162 1048 brnz,pt %o0, do_patch
1163 1049 nop
1164 1050
1165 1051 sethi %hi(sfmmu_panic11), %o0
1166 1052 call panic
1167 1053 or %o0, %lo(sfmmu_panic11), %o0
1168 1054 do_patch:
1169 1055
1170 1056 #else /* sun4v */
1171 1057 /*
1172 1058 * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0).
1173 1059 * Note that ASI_NQUAD_LD is not defined/used for sun4v
1174 1060 */
1175 1061 movrz %o0, ASI_NQUAD_LD, %o3
1176 1062
1177 1063 #endif /* sun4v */
1178 1064
1179 1065 sll %o3, 5, %o1 ! imm_asi offset
1180 1066 mov 6, %o3 ! number of instructions
1181 1067 sethi %hi(dktsb), %o0 ! to search
1182 1068 call sfmmu_fixup_mmu_asi ! patch kdtlb miss
1183 1069 or %o0, %lo(dktsb), %o0
1184 1070 mov 6, %o3 ! number of instructions
1185 1071 sethi %hi(dktsb4m), %o0 ! to search
1186 1072 call sfmmu_fixup_mmu_asi ! patch kdtlb4m miss
1187 1073 or %o0, %lo(dktsb4m), %o0
1188 1074 mov 6, %o3 ! number of instructions
1189 1075 sethi %hi(iktsb), %o0 ! to search
1190 1076 call sfmmu_fixup_mmu_asi ! patch kitlb miss
1191 1077 or %o0, %lo(iktsb), %o0
1192 1078 mov 6, %o3 ! number of instructions
1193 1079 sethi %hi(iktsb4m), %o0 ! to search
1194 1080 call sfmmu_fixup_mmu_asi ! patch kitlb4m miss
1195 1081 or %o0, %lo(iktsb4m), %o0
1196 1082 mov %o4, %o7 ! retore return pc -- leaf
1197 1083 retl
1198 1084 nop
1199 1085 SET_SIZE(sfmmu_patch_mmu_asi)
1200 1086
1201 1087
1202 1088 ENTRY_NP(sfmmu_patch_ktsb)
1203 1089 /*
1204 1090 * We need to fix iktsb, dktsb, et. al.
1205 1091 */
1206 1092 save %sp, -SA(MINFRAME), %sp
1207 1093 set ktsb_phys, %o1
1208 1094 ld [%o1], %o4
1209 1095 set ktsb_base, %o5
1210 1096 set ktsb4m_base, %l1
1211 1097 brz,pt %o4, 1f
1212 1098 nop
1213 1099 set ktsb_pbase, %o5
1214 1100 set ktsb4m_pbase, %l1
1215 1101 1:
1216 1102 sethi %hi(ktsb_szcode), %o1
1217 1103 ld [%o1 + %lo(ktsb_szcode)], %o1 /* %o1 = ktsb size code */
1218 1104
1219 1105 sethi %hi(iktsb), %o0
1220 1106 call sfmmu_fix_ktlb_traptable
1221 1107 or %o0, %lo(iktsb), %o0
1222 1108
1223 1109 sethi %hi(dktsb), %o0
1224 1110 call sfmmu_fix_ktlb_traptable
1225 1111 or %o0, %lo(dktsb), %o0
1226 1112
1227 1113 sethi %hi(ktsb4m_szcode), %o1
1228 1114 ld [%o1 + %lo(ktsb4m_szcode)], %o1 /* %o1 = ktsb4m size code */
1229 1115
1230 1116 sethi %hi(iktsb4m), %o0
1231 1117 call sfmmu_fix_ktlb_traptable
1232 1118 or %o0, %lo(iktsb4m), %o0
1233 1119
1234 1120 sethi %hi(dktsb4m), %o0
1235 1121 call sfmmu_fix_ktlb_traptable
1236 1122 or %o0, %lo(dktsb4m), %o0
1237 1123
1238 1124 #ifndef sun4v
1239 1125 mov ASI_N, %o2
1240 1126 movrnz %o4, ASI_MEM, %o2 ! setup kernel 32bit ASI to patch
1241 1127 mov %o2, %o4 ! sfmmu_fixup_or needs this in %o4
1242 1128 sethi %hi(tsb_kernel_patch_asi), %o0
1243 1129 call sfmmu_fixup_or
1244 1130 or %o0, %lo(tsb_kernel_patch_asi), %o0
1245 1131 #endif /* !sun4v */
1246 1132
1247 1133 ldx [%o5], %o4 ! load ktsb base addr (VA or PA)
1248 1134
1249 1135 sethi %hi(dktsbbase), %o0
1250 1136 call sfmmu_fixup_setx ! patch value of ktsb base addr
1251 1137 or %o0, %lo(dktsbbase), %o0
1252 1138
1253 1139 sethi %hi(iktsbbase), %o0
1254 1140 call sfmmu_fixup_setx ! patch value of ktsb base addr
1255 1141 or %o0, %lo(iktsbbase), %o0
1256 1142
1257 1143 sethi %hi(sfmmu_kprot_patch_ktsb_base), %o0
1258 1144 call sfmmu_fixup_setx ! patch value of ktsb base addr
1259 1145 or %o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1260 1146
1261 1147 #ifdef sun4v
1262 1148 sethi %hi(sfmmu_dslow_patch_ktsb_base), %o0
1263 1149 call sfmmu_fixup_setx ! patch value of ktsb base addr
1264 1150 or %o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1265 1151 #endif /* sun4v */
1266 1152
1267 1153 ldx [%l1], %o4 ! load ktsb4m base addr (VA or PA)
1268 1154
1269 1155 sethi %hi(dktsb4mbase), %o0
1270 1156 call sfmmu_fixup_setx ! patch value of ktsb4m base addr
1271 1157 or %o0, %lo(dktsb4mbase), %o0
1272 1158
1273 1159 sethi %hi(iktsb4mbase), %o0
1274 1160 call sfmmu_fixup_setx ! patch value of ktsb4m base addr
1275 1161 or %o0, %lo(iktsb4mbase), %o0
1276 1162
1277 1163 sethi %hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1278 1164 call sfmmu_fixup_setx ! patch value of ktsb4m base addr
1279 1165 or %o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1280 1166
1281 1167 #ifdef sun4v
1282 1168 sethi %hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1283 1169 call sfmmu_fixup_setx ! patch value of ktsb4m base addr
1284 1170 or %o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1285 1171 #endif /* sun4v */
1286 1172
1287 1173 set ktsb_szcode, %o4
1288 1174 ld [%o4], %o4
1289 1175 sethi %hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1290 1176 call sfmmu_fixup_or ! patch value of ktsb_szcode
1291 1177 or %o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1292 1178
1293 1179 #ifdef sun4v
1294 1180 sethi %hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1295 1181 call sfmmu_fixup_or ! patch value of ktsb_szcode
1296 1182 or %o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1297 1183 #endif /* sun4v */
1298 1184
1299 1185 set ktsb4m_szcode, %o4
1300 1186 ld [%o4], %o4
1301 1187 sethi %hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1302 1188 call sfmmu_fixup_or ! patch value of ktsb4m_szcode
1303 1189 or %o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1304 1190
1305 1191 #ifdef sun4v
1306 1192 sethi %hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1307 1193 call sfmmu_fixup_or ! patch value of ktsb4m_szcode
1308 1194 or %o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1309 1195 #endif /* sun4v */
1310 1196
1311 1197 ret
1312 1198 restore
1313 1199 SET_SIZE(sfmmu_patch_ktsb)
1314 1200
1315 1201 ENTRY_NP(sfmmu_kpm_patch_tlbm)
1316 1202 /*
1317 1203 * Fixup trap handlers in common segkpm case. This is reserved
1318 1204 * for future use should kpm TSB be changed to be other than the
1319 1205 * kernel TSB.
1320 1206 */
1321 1207 retl
1322 1208 nop
1323 1209 SET_SIZE(sfmmu_kpm_patch_tlbm)
1324 1210
1325 1211 ENTRY_NP(sfmmu_kpm_patch_tsbm)
1326 1212 /*
1327 1213 * nop the branch to sfmmu_kpm_dtsb_miss_small
1328 1214 * in the case where we are using large pages for
1329 1215 * seg_kpm (and hence must probe the second TSB for
1330 1216 * seg_kpm VAs)
1331 1217 */
1332 1218 set dktsb4m_kpmcheck_small, %o0
1333 1219 MAKE_NOP_INSTR(%o1)
1334 1220 st %o1, [%o0]
1335 1221 flush %o0
1336 1222 retl
1337 1223 nop
1338 1224 SET_SIZE(sfmmu_kpm_patch_tsbm)
1339 1225
1340 1226 ENTRY_NP(sfmmu_patch_utsb)
1341 1227 #ifdef UTSB_PHYS
1342 1228 retl
1343 1229 nop
1344 1230 #else /* UTSB_PHYS */
1345 1231 /*
1346 1232 * We need to hot patch utsb_vabase and utsb4m_vabase
1347 1233 */
1348 1234 save %sp, -SA(MINFRAME), %sp
1349 1235
1350 1236 /* patch value of utsb_vabase */
1351 1237 set utsb_vabase, %o1
1352 1238 ldx [%o1], %o4
1353 1239 sethi %hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1354 1240 call sfmmu_fixup_setx
1355 1241 or %o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1356 1242 sethi %hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1357 1243 call sfmmu_fixup_setx
1358 1244 or %o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1359 1245 sethi %hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1360 1246 call sfmmu_fixup_setx
1361 1247 or %o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1362 1248
1363 1249 /* patch value of utsb4m_vabase */
1364 1250 set utsb4m_vabase, %o1
1365 1251 ldx [%o1], %o4
1366 1252 sethi %hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1367 1253 call sfmmu_fixup_setx
1368 1254 or %o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1369 1255 sethi %hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1370 1256 call sfmmu_fixup_setx
1371 1257 or %o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1372 1258 sethi %hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1373 1259 call sfmmu_fixup_setx
1374 1260 or %o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1375 1261
1376 1262 /*
1377 1263 * Patch TSB base register masks and shifts if needed.
1378 1264 * By default the TSB base register contents are set up for 4M slab.
1379 1265 * If we're using a smaller slab size and reserved VA range we need
1380 1266 * to patch up those values here.
1381 1267 */
1382 1268 set tsb_slab_shift, %o1
1383 1269 set MMU_PAGESHIFT4M, %o4
1384 1270 lduw [%o1], %o3
1385 1271 subcc %o4, %o3, %o4
1386 1272 bz,pt %icc, 1f
1387 1273 /* delay slot safe */
1388 1274
1389 1275 /* patch reserved VA range size if needed. */
1390 1276 sethi %hi(sfmmu_tsb_1st_resv_offset), %o0
1391 1277 call sfmmu_fixup_shiftx
1392 1278 or %o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1393 1279 call sfmmu_fixup_shiftx
1394 1280 add %o0, I_SIZE, %o0
1395 1281 sethi %hi(sfmmu_tsb_2nd_resv_offset), %o0
1396 1282 call sfmmu_fixup_shiftx
1397 1283 or %o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1398 1284 call sfmmu_fixup_shiftx
1399 1285 add %o0, I_SIZE, %o0
1400 1286 1:
1401 1287 /* patch TSBREG_VAMASK used to set up TSB base register */
1402 1288 set tsb_slab_mask, %o1
1403 1289 ldx [%o1], %o4
1404 1290 sethi %hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1405 1291 call sfmmu_fixup_or
1406 1292 or %o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1407 1293 sethi %hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1408 1294 call sfmmu_fixup_or
1409 1295 or %o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1410 1296
1411 1297 ret
1412 1298 restore
1413 1299 #endif /* UTSB_PHYS */
1414 1300 SET_SIZE(sfmmu_patch_utsb)
1415 1301
1416 1302 ENTRY_NP(sfmmu_patch_shctx)
1417 1303 #ifdef sun4u
1418 1304 retl
1419 1305 nop
1420 1306 #else /* sun4u */
1421 1307 set sfmmu_shctx_cpu_mondo_patch, %o0
1422 1308 MAKE_JMP_INSTR(5, %o1, %o2) ! jmp %g5
1423 1309 st %o1, [%o0]
1424 1310 flush %o0
1425 1311 MAKE_NOP_INSTR(%o1)
1426 1312 add %o0, I_SIZE, %o0 ! next instr
1427 1313 st %o1, [%o0]
1428 1314 flush %o0
1429 1315
1430 1316 set sfmmu_shctx_user_rtt_patch, %o0
1431 1317 st %o1, [%o0] ! nop 1st instruction
1432 1318 flush %o0
1433 1319 add %o0, I_SIZE, %o0
1434 1320 st %o1, [%o0] ! nop 2nd instruction
1435 1321 flush %o0
1436 1322 add %o0, I_SIZE, %o0
1437 1323 st %o1, [%o0] ! nop 3rd instruction
1438 1324 flush %o0
1439 1325 add %o0, I_SIZE, %o0
1440 1326 st %o1, [%o0] ! nop 4th instruction
1441 1327 flush %o0
1442 1328 add %o0, I_SIZE, %o0
1443 1329 st %o1, [%o0] ! nop 5th instruction
1444 1330 flush %o0
1445 1331 add %o0, I_SIZE, %o0
1446 1332 st %o1, [%o0] ! nop 6th instruction
1447 1333 retl
1448 1334 flush %o0
1449 1335 #endif /* sun4u */
1450 1336 SET_SIZE(sfmmu_patch_shctx)
1451 1337
1452 1338 /*
1453 1339 * Routine that loads an entry into a tsb using virtual addresses.
1454 1340 * Locking is required since all cpus can use the same TSB.
1455 1341 * Note that it is no longer required to have a valid context
1456 1342 * when calling this function.
1457 1343 */
1458 1344 ENTRY_NP(sfmmu_load_tsbe)
1459 1345 /*
1460 1346 * %o0 = pointer to tsbe to load
1461 1347 * %o1 = tsb tag
1462 1348 * %o2 = virtual pointer to TTE
1463 1349 * %o3 = 1 if physical address in %o0 else 0
1464 1350 */
1465 1351 rdpr %pstate, %o5
1466 1352 #ifdef DEBUG
1467 1353 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1468 1354 #endif /* DEBUG */
1469 1355
1470 1356 wrpr %o5, PSTATE_IE, %pstate /* disable interrupts */
1471 1357
1472 1358 SETUP_TSB_ASI(%o3, %g3)
1473 1359 TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
1474 1360
1475 1361 wrpr %g0, %o5, %pstate /* enable interrupts */
1476 1362
1477 1363 retl
1478 1364 membar #StoreStore|#StoreLoad
1479 1365 SET_SIZE(sfmmu_load_tsbe)
1480 1366
1481 1367 /*
1482 1368 * Flush TSB of a given entry if the tag matches.
1483 1369 */
1484 1370 ENTRY(sfmmu_unload_tsbe)
1485 1371 /*
1486 1372 * %o0 = pointer to tsbe to be flushed
1487 1373 * %o1 = tag to match
1488 1374 * %o2 = 1 if physical address in %o0 else 0
1489 1375 */
1490 1376 SETUP_TSB_ASI(%o2, %g1)
1491 1377 TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1492 1378 retl
1493 1379 membar #StoreStore|#StoreLoad
1494 1380 SET_SIZE(sfmmu_unload_tsbe)
1495 1381
1496 1382 /*
1497 1383 * Routine that loads a TTE into the kpm TSB from C code.
1498 1384 * Locking is required since kpm TSB is shared among all CPUs.
1499 1385 */
1500 1386 ENTRY_NP(sfmmu_kpm_load_tsb)
1501 1387 /*
1502 1388 * %o0 = vaddr
1503 1389 * %o1 = ttep
1504 1390 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1505 1391 */
1506 1392 rdpr %pstate, %o5 ! %o5 = saved pstate
1507 1393 #ifdef DEBUG
1508 1394 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1509 1395 #endif /* DEBUG */
1510 1396 wrpr %o5, PSTATE_IE, %pstate ! disable interrupts
1511 1397
1512 1398 #ifndef sun4v
1513 1399 sethi %hi(ktsb_phys), %o4
1514 1400 mov ASI_N, %o3
1515 1401 ld [%o4 + %lo(ktsb_phys)], %o4
1516 1402 movrnz %o4, ASI_MEM, %o3
1517 1403 mov %o3, %asi
1518 1404 #endif /* !sun4v */
1519 1405 mov %o0, %g1 ! %g1 = vaddr
1520 1406
1521 1407 /* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1522 1408 GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1523 1409 /* %g2 = tsbep, %g1 clobbered */
1524 1410
1525 1411 srlx %o0, TTARGET_VA_SHIFT, %g1; ! %g1 = tag target
1526 1412 /* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1527 1413 TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
1528 1414
1529 1415 wrpr %g0, %o5, %pstate ! enable interrupts
1530 1416 retl
1531 1417 membar #StoreStore|#StoreLoad
1532 1418 SET_SIZE(sfmmu_kpm_load_tsb)
1533 1419
1534 1420 /*
1535 1421 * Routine that shoots down a TTE in the kpm TSB or in the
1536 1422 * kernel TSB depending on virtpg. Locking is required since
1537 1423 * kpm/kernel TSB is shared among all CPUs.
1538 1424 */
1539 1425 ENTRY_NP(sfmmu_kpm_unload_tsb)
1540 1426 /*
1541 1427 * %o0 = vaddr
1542 1428 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1543 1429 */
1544 1430 #ifndef sun4v
1545 1431 sethi %hi(ktsb_phys), %o4
1546 1432 mov ASI_N, %o3
1547 1433 ld [%o4 + %lo(ktsb_phys)], %o4
1548 1434 movrnz %o4, ASI_MEM, %o3
1549 1435 mov %o3, %asi
1550 1436 #endif /* !sun4v */
1551 1437 mov %o0, %g1 ! %g1 = vaddr
1552 1438
1553 1439 /* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1554 1440 GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
↓ open down ↓ |
557 lines elided |
↑ open up ↑ |
1555 1441 /* %g2 = tsbep, %g1 clobbered */
1556 1442
1557 1443 srlx %o0, TTARGET_VA_SHIFT, %g1; ! %g1 = tag target
1558 1444 /* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1559 1445 TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1560 1446
1561 1447 retl
1562 1448 membar #StoreStore|#StoreLoad
1563 1449 SET_SIZE(sfmmu_kpm_unload_tsb)
1564 1450
1565 -#endif /* lint */
1566 1451
1567 -
1568 -#if defined (lint)
1569 -
1570 -/*ARGSUSED*/
1571 -pfn_t
1572 -sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1573 -{ return(0); }
1574 -
1575 -#else /* lint */
1576 -
1577 1452 ENTRY_NP(sfmmu_ttetopfn)
1578 1453 ldx [%o0], %g1 /* read tte */
1579 1454 TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1580 1455 /*
1581 1456 * g1 = pfn
1582 1457 */
1583 1458 retl
1584 1459 mov %g1, %o0
1585 1460 SET_SIZE(sfmmu_ttetopfn)
1586 1461
1587 -#endif /* !lint */
1588 -
1589 1462 /*
1590 1463 * These macros are used to update global sfmmu hme hash statistics
1591 1464 * in perf critical paths. It is only enabled in debug kernels or
1592 1465 * if SFMMU_STAT_GATHER is defined
1593 1466 */
1594 1467 #if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1595 1468 #define HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2) \
1596 1469 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\
1597 1470 mov HATSTAT_KHASH_SEARCH, tmp2 ;\
1598 1471 cmp tmp1, hatid ;\
1599 1472 movne %ncc, HATSTAT_UHASH_SEARCH, tmp2 ;\
1600 1473 set sfmmu_global_stat, tmp1 ;\
1601 1474 add tmp1, tmp2, tmp1 ;\
1602 1475 ld [tmp1], tmp2 ;\
1603 1476 inc tmp2 ;\
1604 1477 st tmp2, [tmp1]
1605 1478
1606 1479 #define HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2) \
1607 1480 ldn [tsbarea + TSBMISS_KHATID], tmp1 ;\
1608 1481 mov HATSTAT_KHASH_LINKS, tmp2 ;\
1609 1482 cmp tmp1, hatid ;\
1610 1483 movne %ncc, HATSTAT_UHASH_LINKS, tmp2 ;\
1611 1484 set sfmmu_global_stat, tmp1 ;\
1612 1485 add tmp1, tmp2, tmp1 ;\
1613 1486 ld [tmp1], tmp2 ;\
1614 1487 inc tmp2 ;\
1615 1488 st tmp2, [tmp1]
1616 1489
1617 1490
1618 1491 #else /* DEBUG || SFMMU_STAT_GATHER */
1619 1492
1620 1493 #define HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1621 1494
1622 1495 #define HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1623 1496
1624 1497 #endif /* DEBUG || SFMMU_STAT_GATHER */
1625 1498
1626 1499 /*
1627 1500 * This macro is used to update global sfmmu kstas in non
1628 1501 * perf critical areas so they are enabled all the time
1629 1502 */
1630 1503 #define HAT_GLOBAL_STAT(statname, tmp1, tmp2) \
1631 1504 sethi %hi(sfmmu_global_stat), tmp1 ;\
1632 1505 add tmp1, statname, tmp1 ;\
1633 1506 ld [tmp1 + %lo(sfmmu_global_stat)], tmp2 ;\
1634 1507 inc tmp2 ;\
1635 1508 st tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1636 1509
1637 1510 /*
1638 1511 * These macros are used to update per cpu stats in non perf
1639 1512 * critical areas so they are enabled all the time
1640 1513 */
1641 1514 #define HAT_PERCPU_STAT32(tsbarea, stat, tmp1) \
1642 1515 ld [tsbarea + stat], tmp1 ;\
1643 1516 inc tmp1 ;\
1644 1517 st tmp1, [tsbarea + stat]
1645 1518
1646 1519 /*
1647 1520 * These macros are used to update per cpu stats in non perf
1648 1521 * critical areas so they are enabled all the time
1649 1522 */
1650 1523 #define HAT_PERCPU_STAT16(tsbarea, stat, tmp1) \
1651 1524 lduh [tsbarea + stat], tmp1 ;\
1652 1525 inc tmp1 ;\
1653 1526 stuh tmp1, [tsbarea + stat]
1654 1527
1655 1528 #if defined(KPM_TLBMISS_STATS_GATHER)
1656 1529 /*
1657 1530 * Count kpm dtlb misses separately to allow a different
1658 1531 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1659 1532 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1660 1533 */
1661 1534 #define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) \
1662 1535 brgez tagacc, label /* KPM VA? */ ;\
1663 1536 nop ;\
1664 1537 CPU_INDEX(tmp1, tsbma) ;\
1665 1538 sethi %hi(kpmtsbm_area), tsbma ;\
1666 1539 sllx tmp1, KPMTSBM_SHIFT, tmp1 ;\
1667 1540 or tsbma, %lo(kpmtsbm_area), tsbma ;\
1668 1541 add tsbma, tmp1, tsbma /* kpmtsbm area */ ;\
1669 1542 /* VA range check */ ;\
1670 1543 ldx [tsbma + KPMTSBM_VBASE], val ;\
1671 1544 cmp tagacc, val ;\
1672 1545 blu,pn %xcc, label ;\
1673 1546 ldx [tsbma + KPMTSBM_VEND], tmp1 ;\
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
1674 1547 cmp tagacc, tmp1 ;\
1675 1548 bgeu,pn %xcc, label ;\
1676 1549 lduw [tsbma + KPMTSBM_DTLBMISS], val ;\
1677 1550 inc val ;\
1678 1551 st val, [tsbma + KPMTSBM_DTLBMISS] ;\
1679 1552 label:
1680 1553 #else
1681 1554 #define KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1682 1555 #endif /* KPM_TLBMISS_STATS_GATHER */
1683 1556
1684 -#if defined (lint)
1685 -/*
1686 - * The following routines are jumped to from the mmu trap handlers to do
1687 - * the setting up to call systrap. They are separate routines instead of
1688 - * being part of the handlers because the handlers would exceed 32
1689 - * instructions and since this is part of the slow path the jump
1690 - * cost is irrelevant.
1691 - */
1692 -void
1693 -sfmmu_pagefault(void)
1694 -{
1695 -}
1696 -
1697 -void
1698 -sfmmu_mmu_trap(void)
1699 -{
1700 -}
1701 -
1702 -void
1703 -sfmmu_window_trap(void)
1704 -{
1705 -}
1706 -
1707 -void
1708 -sfmmu_kpm_exception(void)
1709 -{
1710 -}
1711 -
1712 -#else /* lint */
1713 -
1714 1557 #ifdef PTL1_PANIC_DEBUG
1715 1558 .seg ".data"
1716 1559 .global test_ptl1_panic
1717 1560 test_ptl1_panic:
1718 1561 .word 0
1719 1562 .align 8
1720 1563
1721 1564 .seg ".text"
1722 1565 .align 4
1723 1566 #endif /* PTL1_PANIC_DEBUG */
1724 1567
1725 1568
1726 1569 ENTRY_NP(sfmmu_pagefault)
1727 1570 SET_GL_REG(1)
1728 1571 USE_ALTERNATE_GLOBALS(%g5)
1729 1572 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1730 1573 rdpr %tt, %g6
1731 1574 cmp %g6, FAST_IMMU_MISS_TT
1732 1575 be,a,pn %icc, 1f
1733 1576 mov T_INSTR_MMU_MISS, %g3
1734 1577 cmp %g6, T_INSTR_MMU_MISS
1735 1578 be,a,pn %icc, 1f
1736 1579 mov T_INSTR_MMU_MISS, %g3
1737 1580 mov %g5, %g2
1738 1581 mov T_DATA_PROT, %g3 /* arg2 = traptype */
1739 1582 cmp %g6, FAST_DMMU_MISS_TT
1740 1583 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */
1741 1584 cmp %g6, T_DATA_MMU_MISS
1742 1585 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */
1743 1586
1744 1587 #ifdef PTL1_PANIC_DEBUG
1745 1588 /* check if we want to test the tl1 panic */
1746 1589 sethi %hi(test_ptl1_panic), %g4
1747 1590 ld [%g4 + %lo(test_ptl1_panic)], %g1
1748 1591 st %g0, [%g4 + %lo(test_ptl1_panic)]
1749 1592 cmp %g1, %g0
1750 1593 bne,a,pn %icc, ptl1_panic
1751 1594 or %g0, PTL1_BAD_DEBUG, %g1
1752 1595 #endif /* PTL1_PANIC_DEBUG */
1753 1596 1:
1754 1597 HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1755 1598 /*
1756 1599 * g2 = tag access reg
1757 1600 * g3.l = type
1758 1601 * g3.h = 0
1759 1602 */
1760 1603 sethi %hi(trap), %g1
1761 1604 or %g1, %lo(trap), %g1
1762 1605 2:
1763 1606 ba,pt %xcc, sys_trap
1764 1607 mov -1, %g4
1765 1608 SET_SIZE(sfmmu_pagefault)
1766 1609
1767 1610 ENTRY_NP(sfmmu_mmu_trap)
1768 1611 SET_GL_REG(1)
1769 1612 USE_ALTERNATE_GLOBALS(%g5)
1770 1613 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1771 1614 rdpr %tt, %g6
1772 1615 cmp %g6, FAST_IMMU_MISS_TT
1773 1616 be,a,pn %icc, 1f
1774 1617 mov T_INSTR_MMU_MISS, %g3
1775 1618 cmp %g6, T_INSTR_MMU_MISS
1776 1619 be,a,pn %icc, 1f
1777 1620 mov T_INSTR_MMU_MISS, %g3
1778 1621 mov %g5, %g2
1779 1622 mov T_DATA_PROT, %g3 /* arg2 = traptype */
1780 1623 cmp %g6, FAST_DMMU_MISS_TT
1781 1624 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */
1782 1625 cmp %g6, T_DATA_MMU_MISS
1783 1626 move %icc, T_DATA_MMU_MISS, %g3 /* arg2 = traptype */
1784 1627 1:
1785 1628 /*
1786 1629 * g2 = tag access reg
1787 1630 * g3 = type
1788 1631 */
1789 1632 sethi %hi(sfmmu_tsbmiss_exception), %g1
1790 1633 or %g1, %lo(sfmmu_tsbmiss_exception), %g1
1791 1634 ba,pt %xcc, sys_trap
1792 1635 mov -1, %g4
1793 1636 /*NOTREACHED*/
1794 1637 SET_SIZE(sfmmu_mmu_trap)
1795 1638
1796 1639 ENTRY_NP(sfmmu_suspend_tl)
1797 1640 SET_GL_REG(1)
1798 1641 USE_ALTERNATE_GLOBALS(%g5)
1799 1642 GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1800 1643 rdpr %tt, %g6
1801 1644 cmp %g6, FAST_IMMU_MISS_TT
1802 1645 be,a,pn %icc, 1f
1803 1646 mov T_INSTR_MMU_MISS, %g3
1804 1647 mov %g5, %g2
1805 1648 cmp %g6, FAST_DMMU_MISS_TT
1806 1649 move %icc, T_DATA_MMU_MISS, %g3
1807 1650 movne %icc, T_DATA_PROT, %g3
1808 1651 1:
1809 1652 sethi %hi(sfmmu_tsbmiss_suspended), %g1
1810 1653 or %g1, %lo(sfmmu_tsbmiss_suspended), %g1
1811 1654 /* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1812 1655 ba,pt %xcc, sys_trap
1813 1656 mov PIL_15, %g4
1814 1657 /*NOTREACHED*/
1815 1658 SET_SIZE(sfmmu_suspend_tl)
1816 1659
1817 1660 /*
1818 1661 * No %g registers in use at this point.
1819 1662 */
1820 1663 ENTRY_NP(sfmmu_window_trap)
1821 1664 rdpr %tpc, %g1
1822 1665 #ifdef sun4v
1823 1666 #ifdef DEBUG
1824 1667 /* We assume previous %gl was 1 */
1825 1668 rdpr %tstate, %g4
1826 1669 srlx %g4, TSTATE_GL_SHIFT, %g4
1827 1670 and %g4, TSTATE_GL_MASK, %g4
1828 1671 cmp %g4, 1
1829 1672 bne,a,pn %icc, ptl1_panic
1830 1673 mov PTL1_BAD_WTRAP, %g1
1831 1674 #endif /* DEBUG */
1832 1675 /* user miss at tl>1. better be the window handler or user_rtt */
1833 1676 /* in user_rtt? */
1834 1677 set rtt_fill_start, %g4
1835 1678 cmp %g1, %g4
1836 1679 blu,pn %xcc, 6f
1837 1680 .empty
1838 1681 set rtt_fill_end, %g4
1839 1682 cmp %g1, %g4
1840 1683 bgeu,pn %xcc, 6f
1841 1684 nop
1842 1685 set fault_rtt_fn1, %g1
1843 1686 wrpr %g0, %g1, %tnpc
1844 1687 ba,a 7f
1845 1688 6:
1846 1689 ! must save this trap level before descending trap stack
1847 1690 ! no need to save %tnpc, either overwritten or discarded
1848 1691 ! already got it: rdpr %tpc, %g1
1849 1692 rdpr %tstate, %g6
1850 1693 rdpr %tt, %g7
1851 1694 ! trap level saved, go get underlying trap type
1852 1695 rdpr %tl, %g5
1853 1696 sub %g5, 1, %g3
1854 1697 wrpr %g3, %tl
1855 1698 rdpr %tt, %g2
1856 1699 wrpr %g5, %tl
1857 1700 ! restore saved trap level
1858 1701 wrpr %g1, %tpc
1859 1702 wrpr %g6, %tstate
1860 1703 wrpr %g7, %tt
1861 1704 #else /* sun4v */
1862 1705 /* user miss at tl>1. better be the window handler */
1863 1706 rdpr %tl, %g5
1864 1707 sub %g5, 1, %g3
1865 1708 wrpr %g3, %tl
1866 1709 rdpr %tt, %g2
1867 1710 wrpr %g5, %tl
1868 1711 #endif /* sun4v */
1869 1712 and %g2, WTRAP_TTMASK, %g4
1870 1713 cmp %g4, WTRAP_TYPE
1871 1714 bne,pn %xcc, 1f
1872 1715 nop
1873 1716 /* tpc should be in the trap table */
1874 1717 set trap_table, %g4
1875 1718 cmp %g1, %g4
1876 1719 blt,pn %xcc, 1f
1877 1720 .empty
1878 1721 set etrap_table, %g4
1879 1722 cmp %g1, %g4
1880 1723 bge,pn %xcc, 1f
1881 1724 .empty
1882 1725 andn %g1, WTRAP_ALIGN, %g1 /* 128 byte aligned */
1883 1726 add %g1, WTRAP_FAULTOFF, %g1
1884 1727 wrpr %g0, %g1, %tnpc
1885 1728 7:
1886 1729 /*
1887 1730 * some wbuf handlers will call systrap to resolve the fault
1888 1731 * we pass the trap type so they figure out the correct parameters.
1889 1732 * g5 = trap type, g6 = tag access reg
1890 1733 */
1891 1734
1892 1735 /*
1893 1736 * only use g5, g6, g7 registers after we have switched to alternate
1894 1737 * globals.
1895 1738 */
1896 1739 SET_GL_REG(1)
1897 1740 USE_ALTERNATE_GLOBALS(%g5)
1898 1741 GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
1899 1742 rdpr %tt, %g7
1900 1743 cmp %g7, FAST_IMMU_MISS_TT
1901 1744 be,a,pn %icc, ptl1_panic
1902 1745 mov PTL1_BAD_WTRAP, %g1
1903 1746 cmp %g7, T_INSTR_MMU_MISS
1904 1747 be,a,pn %icc, ptl1_panic
1905 1748 mov PTL1_BAD_WTRAP, %g1
1906 1749 mov T_DATA_PROT, %g5
1907 1750 cmp %g7, FAST_DMMU_MISS_TT
1908 1751 move %icc, T_DATA_MMU_MISS, %g5
1909 1752 cmp %g7, T_DATA_MMU_MISS
1910 1753 move %icc, T_DATA_MMU_MISS, %g5
1911 1754 ! XXXQ AGS re-check out this one
1912 1755 done
1913 1756 1:
1914 1757 CPU_PADDR(%g1, %g4)
1915 1758 add %g1, CPU_TL1_HDLR, %g1
1916 1759 lda [%g1]ASI_MEM, %g4
1917 1760 brnz,a,pt %g4, sfmmu_mmu_trap
1918 1761 sta %g0, [%g1]ASI_MEM
1919 1762 ba,pt %icc, ptl1_panic
1920 1763 mov PTL1_BAD_TRAP, %g1
1921 1764 SET_SIZE(sfmmu_window_trap)
1922 1765
1923 1766 ENTRY_NP(sfmmu_kpm_exception)
1924 1767 /*
1925 1768 * We have accessed an unmapped segkpm address or a legal segkpm
1926 1769 * address which is involved in a VAC alias conflict prevention.
1927 1770 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
1928 1771 * set. If it is, we will instead note that a fault has occurred
1929 1772 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
1930 1773 * a "retry"). This will step over the faulting instruction.
1931 1774 * Note that this means that a legal segkpm address involved in
1932 1775 * a VAC alias conflict prevention (a rare case to begin with)
1933 1776 * cannot be used in DTrace.
1934 1777 */
1935 1778 CPU_INDEX(%g1, %g2)
1936 1779 set cpu_core, %g2
1937 1780 sllx %g1, CPU_CORE_SHIFT, %g1
1938 1781 add %g1, %g2, %g1
1939 1782 lduh [%g1 + CPUC_DTRACE_FLAGS], %g2
1940 1783 andcc %g2, CPU_DTRACE_NOFAULT, %g0
1941 1784 bz 0f
1942 1785 or %g2, CPU_DTRACE_BADADDR, %g2
1943 1786 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS]
1944 1787 GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
1945 1788 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL]
1946 1789 done
1947 1790 0:
1948 1791 TSTAT_CHECK_TL1(1f, %g1, %g2)
1949 1792 1:
1950 1793 SET_GL_REG(1)
1951 1794 USE_ALTERNATE_GLOBALS(%g5)
1952 1795 GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
↓ open down ↓ |
229 lines elided |
↑ open up ↑ |
1953 1796 mov T_DATA_MMU_MISS, %g3 /* arg2 = traptype */
1954 1797 /*
1955 1798 * g2=tagacc g3.l=type g3.h=0
1956 1799 */
1957 1800 sethi %hi(trap), %g1
1958 1801 or %g1, %lo(trap), %g1
1959 1802 ba,pt %xcc, sys_trap
1960 1803 mov -1, %g4
1961 1804 SET_SIZE(sfmmu_kpm_exception)
1962 1805
1963 -#endif /* lint */
1964 -
1965 -#if defined (lint)
1966 -
1967 -void
1968 -sfmmu_tsb_miss(void)
1969 -{
1970 -}
1971 -
1972 -void
1973 -sfmmu_kpm_dtsb_miss(void)
1974 -{
1975 -}
1976 -
1977 -void
1978 -sfmmu_kpm_dtsb_miss_small(void)
1979 -{
1980 -}
1981 -
1982 -#else /* lint */
1983 -
1984 1806 #if (IMAP_SEG != 0)
1985 1807 #error - ism_map->ism_seg offset is not zero
1986 1808 #endif
1987 1809
1988 1810 /*
1989 1811 * Copies ism mapping for this ctx in param "ism" if this is a ISM
1990 1812 * tlb miss and branches to label "ismhit". If this is not an ISM
1991 1813 * process or an ISM tlb miss it falls thru.
1992 1814 *
1993 1815 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
1994 1816 * this process.
1995 1817 * If so, it will branch to label "ismhit". If not, it will fall through.
1996 1818 *
1997 1819 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
1998 1820 * so that any other threads of this process will not try and walk the ism
1999 1821 * maps while they are being changed.
2000 1822 *
2001 1823 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2002 1824 * will make sure of that. This means we can terminate our search on
2003 1825 * the first zero mapping we find.
2004 1826 *
2005 1827 * Parameters:
2006 1828 * tagacc = (pseudo-)tag access register (vaddr + ctx) (in)
2007 1829 * tsbmiss = address of tsb miss area (in)
2008 1830 * ismseg = contents of ism_seg for this ism map (out)
2009 1831 * ismhat = physical address of imap_ismhat for this ism map (out)
2010 1832 * tmp1 = scratch reg (CLOBBERED)
2011 1833 * tmp2 = scratch reg (CLOBBERED)
2012 1834 * tmp3 = scratch reg (CLOBBERED)
2013 1835 * label: temporary labels
2014 1836 * ismhit: label where to jump to if an ism dtlb miss
2015 1837 * exitlabel:label where to jump if hat is busy due to hat_unshare.
2016 1838 */
2017 1839 #define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
2018 1840 label, ismhit) \
2019 1841 ldx [tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */ ;\
2020 1842 brlz,pt tmp1, label/**/3 /* exit if -1 */ ;\
2021 1843 add tmp1, IBLK_MAPS, ismhat /* ismhat = &ismblk.map[0] */ ;\
2022 1844 label/**/1: ;\
2023 1845 ldxa [ismhat]ASI_MEM, ismseg /* ismblk.map[0].ism_seg */ ;\
2024 1846 mov tmp1, tmp3 /* update current ismblkpa head */ ;\
2025 1847 label/**/2: ;\
2026 1848 brz,pt ismseg, label/**/3 /* no mapping */ ;\
2027 1849 add ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */ ;\
2028 1850 lduba [tmp1]ASI_MEM, tmp1 /* tmp1 = vb shift*/ ;\
2029 1851 srlx ismseg, tmp1, tmp2 /* tmp2 = vbase */ ;\
2030 1852 srlx tagacc, tmp1, tmp1 /* tmp1 = va seg*/ ;\
2031 1853 sub tmp1, tmp2, tmp2 /* tmp2 = va - vbase */ ;\
2032 1854 add ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */ ;\
2033 1855 lda [tmp1]ASI_MEM, tmp1 /* tmp1 = sz_mask */ ;\
2034 1856 and ismseg, tmp1, tmp1 /* tmp1 = size */ ;\
2035 1857 cmp tmp2, tmp1 /* check va <= offset*/ ;\
2036 1858 blu,a,pt %xcc, ismhit /* ism hit */ ;\
2037 1859 add ismhat, IMAP_ISMHAT, ismhat /* ismhat = &ism_sfmmu*/ ;\
2038 1860 ;\
2039 1861 add ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ ;\
2040 1862 add tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1 ;\
2041 1863 cmp ismhat, tmp1 ;\
2042 1864 bl,pt %xcc, label/**/2 /* keep looking */ ;\
2043 1865 ldxa [ismhat]ASI_MEM, ismseg /* ismseg = map[ismhat] */ ;\
2044 1866 ;\
2045 1867 add tmp3, IBLK_NEXTPA, tmp1 ;\
2046 1868 ldxa [tmp1]ASI_MEM, tmp1 /* check blk->nextpa */ ;\
2047 1869 brgez,pt tmp1, label/**/1 /* continue if not -1*/ ;\
2048 1870 add tmp1, IBLK_MAPS, ismhat /* ismhat = &ismblk.map[0]*/ ;\
2049 1871 label/**/3:
2050 1872
2051 1873 /*
2052 1874 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2053 1875 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2054 1876 * Parameters:
2055 1877 * tagacc = reg containing virtual address
2056 1878 * hatid = reg containing sfmmu pointer
2057 1879 * hmeshift = constant/register to shift vaddr to obtain vapg
2058 1880 * hmebp = register where bucket pointer will be stored
2059 1881 * vapg = register where virtual page will be stored
2060 1882 * tmp1, tmp2 = tmp registers
2061 1883 */
2062 1884
2063 1885
2064 1886 #define HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp, \
2065 1887 vapg, label, tmp1, tmp2) \
2066 1888 sllx tagacc, TAGACC_CTX_LSHIFT, tmp1 ;\
2067 1889 brnz,a,pt tmp1, label/**/1 ;\
2068 1890 ld [tsbarea + TSBMISS_UHASHSZ], hmebp ;\
2069 1891 ld [tsbarea + TSBMISS_KHASHSZ], hmebp ;\
2070 1892 ba,pt %xcc, label/**/2 ;\
2071 1893 ldx [tsbarea + TSBMISS_KHASHSTART], tmp1 ;\
2072 1894 label/**/1: ;\
2073 1895 ldx [tsbarea + TSBMISS_UHASHSTART], tmp1 ;\
2074 1896 label/**/2: ;\
2075 1897 srlx tagacc, hmeshift, vapg ;\
2076 1898 xor vapg, hatid, tmp2 /* hatid ^ (vaddr >> shift) */ ;\
2077 1899 and tmp2, hmebp, hmebp /* index into hme_hash */ ;\
2078 1900 mulx hmebp, HMEBUCK_SIZE, hmebp ;\
2079 1901 add hmebp, tmp1, hmebp
2080 1902
2081 1903 /*
2082 1904 * hashtag includes bspage + hashno (64 bits).
2083 1905 */
2084 1906
2085 1907 #define MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag) \
2086 1908 sllx vapg, hmeshift, vapg ;\
2087 1909 mov hashno, hblktag ;\
2088 1910 sllx hblktag, HTAG_REHASH_SHIFT, hblktag ;\
2089 1911 or vapg, hblktag, hblktag
2090 1912
2091 1913 /*
2092 1914 * Function to traverse hmeblk hash link list and find corresponding match.
2093 1915 * The search is done using physical pointers. It returns the physical address
2094 1916 * pointer to the hmeblk that matches with the tag provided.
2095 1917 * Parameters:
2096 1918 * hmebp = register that points to hme hash bucket, also used as
2097 1919 * tmp reg (clobbered)
2098 1920 * hmeblktag = register with hmeblk tag match
2099 1921 * hatid = register with hatid
2100 1922 * hmeblkpa = register where physical ptr will be stored
2101 1923 * tmp1 = tmp reg
2102 1924 * label: temporary label
2103 1925 */
2104 1926
2105 1927 #define HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, tsbarea, \
2106 1928 tmp1, label) \
2107 1929 add hmebp, HMEBUCK_NEXTPA, hmeblkpa ;\
2108 1930 ldxa [hmeblkpa]ASI_MEM, hmeblkpa ;\
2109 1931 HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1) ;\
2110 1932 label/**/1: ;\
2111 1933 cmp hmeblkpa, HMEBLK_ENDPA ;\
2112 1934 be,pn %xcc, label/**/2 ;\
2113 1935 HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1) ;\
2114 1936 add hmeblkpa, HMEBLK_TAG, hmebp ;\
2115 1937 ldxa [hmebp]ASI_MEM, tmp1 /* read 1st part of tag */ ;\
2116 1938 add hmebp, CLONGSIZE, hmebp ;\
2117 1939 ldxa [hmebp]ASI_MEM, hmebp /* read 2nd part of tag */ ;\
2118 1940 xor tmp1, hmeblktag, tmp1 ;\
2119 1941 xor hmebp, hatid, hmebp ;\
2120 1942 or hmebp, tmp1, hmebp ;\
2121 1943 brz,pn hmebp, label/**/2 /* branch on hit */ ;\
2122 1944 add hmeblkpa, HMEBLK_NEXTPA, hmebp ;\
2123 1945 ba,pt %xcc, label/**/1 ;\
2124 1946 ldxa [hmebp]ASI_MEM, hmeblkpa /* hmeblk ptr pa */ ;\
2125 1947 label/**/2:
2126 1948
2127 1949 /*
2128 1950 * Function to traverse hmeblk hash link list and find corresponding match.
2129 1951 * The search is done using physical pointers. It returns the physical address
2130 1952 * pointer to the hmeblk that matches with the tag
2131 1953 * provided.
2132 1954 * Parameters:
2133 1955 * hmeblktag = register with hmeblk tag match (rid field is 0)
2134 1956 * hatid = register with hatid (pointer to SRD)
2135 1957 * hmeblkpa = register where physical ptr will be stored
2136 1958 * tmp1 = tmp reg
2137 1959 * tmp2 = tmp reg
2138 1960 * label: temporary label
2139 1961 */
2140 1962
2141 1963 #define HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, tsbarea, \
2142 1964 tmp1, tmp2, label) \
2143 1965 label/**/1: ;\
2144 1966 cmp hmeblkpa, HMEBLK_ENDPA ;\
2145 1967 be,pn %xcc, label/**/4 ;\
2146 1968 HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2) ;\
2147 1969 add hmeblkpa, HMEBLK_TAG, tmp2 ;\
2148 1970 ldxa [tmp2]ASI_MEM, tmp1 /* read 1st part of tag */ ;\
2149 1971 add tmp2, CLONGSIZE, tmp2 ;\
2150 1972 ldxa [tmp2]ASI_MEM, tmp2 /* read 2nd part of tag */ ;\
2151 1973 xor tmp1, hmeblktag, tmp1 ;\
2152 1974 xor tmp2, hatid, tmp2 ;\
2153 1975 brz,pn tmp2, label/**/3 /* branch on hit */ ;\
2154 1976 add hmeblkpa, HMEBLK_NEXTPA, tmp2 ;\
2155 1977 label/**/2: ;\
2156 1978 ba,pt %xcc, label/**/1 ;\
2157 1979 ldxa [tmp2]ASI_MEM, hmeblkpa /* hmeblk ptr pa */ ;\
2158 1980 label/**/3: ;\
2159 1981 cmp tmp1, SFMMU_MAX_HME_REGIONS ;\
2160 1982 bgeu,pt %xcc, label/**/2 ;\
2161 1983 add hmeblkpa, HMEBLK_NEXTPA, tmp2 ;\
2162 1984 and tmp1, BT_ULMASK, tmp2 ;\
2163 1985 srlx tmp1, BT_ULSHIFT, tmp1 ;\
2164 1986 sllx tmp1, CLONGSHIFT, tmp1 ;\
2165 1987 add tsbarea, tmp1, tmp1 ;\
2166 1988 ldx [tmp1 + TSBMISS_SHMERMAP], tmp1 ;\
2167 1989 srlx tmp1, tmp2, tmp1 ;\
2168 1990 btst 0x1, tmp1 ;\
2169 1991 bz,pn %xcc, label/**/2 ;\
2170 1992 add hmeblkpa, HMEBLK_NEXTPA, tmp2 ;\
2171 1993 label/**/4:
2172 1994
2173 1995 #if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2174 1996 #error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2175 1997 #endif
2176 1998
2177 1999 /*
2178 2000 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2179 2001 * the offset for the corresponding hment.
2180 2002 * Parameters:
2181 2003 * In:
2182 2004 * vaddr = register with virtual address
2183 2005 * hmeblkpa = physical pointer to hme_blk
2184 2006 * Out:
2185 2007 * hmentoff = register where hment offset will be stored
2186 2008 * hmemisc = hblk_misc
2187 2009 * Scratch:
2188 2010 * tmp1
2189 2011 */
2190 2012 #define HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2191 2013 add hmeblkpa, HMEBLK_MISC, hmentoff ;\
2192 2014 lda [hmentoff]ASI_MEM, hmemisc ;\
2193 2015 andcc hmemisc, HBLK_SZMASK, %g0 ;\
2194 2016 bnz,a,pn %icc, label1 /* if sz != TTE8K branch */ ;\
2195 2017 or %g0, HMEBLK_HME1, hmentoff ;\
2196 2018 srl vaddr, MMU_PAGESHIFT, tmp1 ;\
2197 2019 and tmp1, NHMENTS - 1, tmp1 /* tmp1 = index */ ;\
2198 2020 sllx tmp1, SFHME_SHIFT, tmp1 ;\
2199 2021 add tmp1, HMEBLK_HME1, hmentoff ;\
2200 2022 label1:
2201 2023
2202 2024 /*
2203 2025 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2204 2026 *
2205 2027 * tagacc = (pseudo-)tag access register (in)
2206 2028 * hatid = sfmmu pointer for TSB miss (in)
2207 2029 * tte = tte for TLB miss if found, otherwise clobbered (out)
2208 2030 * hmeblkpa = PA of hment if found, otherwise clobbered (out)
2209 2031 * tsbarea = pointer to the tsbmiss area for this cpu. (in)
2210 2032 * hmemisc = hblk_misc if TTE is found (out), otherwise clobbered
2211 2033 * hmeshift = constant/register to shift VA to obtain the virtual pfn
2212 2034 * for this page size.
2213 2035 * hashno = constant/register hash number
2214 2036 * tmp = temp value - clobbered
2215 2037 * label = temporary label for branching within macro.
2216 2038 * foundlabel = label to jump to when tte is found.
2217 2039 * suspendlabel= label to jump to when tte is suspended.
2218 2040 * exitlabel = label to jump to when tte is not found.
2219 2041 *
2220 2042 */
2221 2043 #define GET_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, hmeshift, \
2222 2044 hashno, tmp, label, foundlabel, suspendlabel, exitlabel) \
2223 2045 ;\
2224 2046 stn tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)] ;\
2225 2047 stn hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)] ;\
2226 2048 HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte, \
2227 2049 hmeblkpa, label/**/5, hmemisc, tmp) ;\
2228 2050 ;\
2229 2051 /* ;\
2230 2052 * tagacc = tagacc ;\
2231 2053 * hatid = hatid ;\
2232 2054 * tsbarea = tsbarea ;\
2233 2055 * tte = hmebp (hme bucket pointer) ;\
2234 2056 * hmeblkpa = vapg (virtual page) ;\
2235 2057 * hmemisc, tmp = scratch ;\
2236 2058 */ ;\
2237 2059 MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc) ;\
2238 2060 or hmemisc, SFMMU_INVALID_SHMERID, hmemisc ;\
2239 2061 ;\
2240 2062 /* ;\
2241 2063 * tagacc = tagacc ;\
2242 2064 * hatid = hatid ;\
2243 2065 * tte = hmebp ;\
2244 2066 * hmeblkpa = CLOBBERED ;\
2245 2067 * hmemisc = htag_bspage+hashno+invalid_rid ;\
2246 2068 * tmp = scratch ;\
2247 2069 */ ;\
2248 2070 stn tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)] ;\
2249 2071 HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, \
2250 2072 tsbarea, tagacc, label/**/1) ;\
2251 2073 /* ;\
2252 2074 * tagacc = CLOBBERED ;\
2253 2075 * tte = CLOBBERED ;\
2254 2076 * hmeblkpa = hmeblkpa ;\
2255 2077 * tmp = scratch ;\
2256 2078 */ ;\
2257 2079 cmp hmeblkpa, HMEBLK_ENDPA ;\
2258 2080 bne,pn %xcc, label/**/4 /* branch if hmeblk found */ ;\
2259 2081 ldn [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc ;\
2260 2082 ba,pt %xcc, exitlabel /* exit if hblk not found */ ;\
2261 2083 nop ;\
2262 2084 label/**/4: ;\
2263 2085 /* ;\
2264 2086 * We have found the hmeblk containing the hment. ;\
2265 2087 * Now we calculate the corresponding tte. ;\
2266 2088 * ;\
2267 2089 * tagacc = tagacc ;\
2268 2090 * hatid = hatid ;\
2269 2091 * tte = clobbered ;\
2270 2092 * hmeblkpa = hmeblkpa ;\
2271 2093 * hmemisc = hblktag ;\
2272 2094 * tmp = scratch ;\
2273 2095 */ ;\
2274 2096 HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte, \
2275 2097 label/**/2) ;\
2276 2098 ;\
2277 2099 /* ;\
2278 2100 * tagacc = tagacc ;\
2279 2101 * hatid = hmentoff ;\
2280 2102 * tte = clobbered ;\
2281 2103 * hmeblkpa = hmeblkpa ;\
2282 2104 * hmemisc = hblk_misc ;\
2283 2105 * tmp = scratch ;\
2284 2106 */ ;\
2285 2107 ;\
2286 2108 add hatid, SFHME_TTE, hatid ;\
2287 2109 add hmeblkpa, hatid, hmeblkpa ;\
2288 2110 ldxa [hmeblkpa]ASI_MEM, tte /* MMU_READTTE through pa */ ;\
2289 2111 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid ;\
2290 2112 set TTE_SUSPEND, hatid ;\
2291 2113 TTE_SUSPEND_INT_SHIFT(hatid) ;\
2292 2114 btst tte, hatid ;\
2293 2115 bz,pt %xcc, foundlabel ;\
2294 2116 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid ;\
2295 2117 ;\
2296 2118 /* ;\
2297 2119 * Mapping is suspended, so goto suspend label. ;\
2298 2120 */ ;\
2299 2121 ba,pt %xcc, suspendlabel ;\
2300 2122 nop
2301 2123
2302 2124 /*
2303 2125 * GET_SHME_TTE is similar to GET_TTE() except it searches
2304 2126 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2305 2127 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2306 2128 * either 0 (not part of scd) or 1 (part of scd).
2307 2129 */
2308 2130 #define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, \
2309 2131 hmeshift, hashno, tmp, label, foundlabel, \
2310 2132 suspendlabel, exitlabel) \
2311 2133 ;\
2312 2134 stn tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)] ;\
2313 2135 stn hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)] ;\
2314 2136 HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte, \
2315 2137 hmeblkpa, label/**/5, hmemisc, tmp) ;\
2316 2138 ;\
2317 2139 /* ;\
2318 2140 * tagacc = tagacc ;\
2319 2141 * hatid = hatid ;\
2320 2142 * tsbarea = tsbarea ;\
2321 2143 * tte = hmebp (hme bucket pointer) ;\
2322 2144 * hmeblkpa = vapg (virtual page) ;\
2323 2145 * hmemisc, tmp = scratch ;\
2324 2146 */ ;\
2325 2147 MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc) ;\
2326 2148 ;\
2327 2149 /* ;\
2328 2150 * tagacc = tagacc ;\
2329 2151 * hatid = hatid ;\
2330 2152 * tsbarea = tsbarea ;\
2331 2153 * tte = hmebp ;\
2332 2154 * hmemisc = htag_bspage + hashno + 0 (for rid) ;\
2333 2155 * hmeblkpa = CLOBBERED ;\
2334 2156 * tmp = scratch ;\
2335 2157 */ ;\
2336 2158 stn tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)] ;\
2337 2159 ;\
2338 2160 add tte, HMEBUCK_NEXTPA, hmeblkpa ;\
2339 2161 ldxa [hmeblkpa]ASI_MEM, hmeblkpa ;\
2340 2162 HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte) ;\
2341 2163 ;\
2342 2164 label/**/8: ;\
2343 2165 HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa, \
2344 2166 tsbarea, tagacc, tte, label/**/1) ;\
2345 2167 /* ;\
2346 2168 * tagacc = CLOBBERED ;\
2347 2169 * tte = CLOBBERED ;\
2348 2170 * hmeblkpa = hmeblkpa ;\
2349 2171 * tmp = scratch ;\
2350 2172 */ ;\
2351 2173 cmp hmeblkpa, HMEBLK_ENDPA ;\
2352 2174 bne,pn %xcc, label/**/4 /* branch if hmeblk found */ ;\
2353 2175 ldn [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc ;\
2354 2176 ba,pt %xcc, exitlabel /* exit if hblk not found */ ;\
2355 2177 nop ;\
2356 2178 label/**/4: ;\
2357 2179 /* ;\
2358 2180 * We have found the hmeblk containing the hment. ;\
2359 2181 * Now we calculate the corresponding tte. ;\
2360 2182 * ;\
2361 2183 * tagacc = tagacc ;\
2362 2184 * hatid = hatid ;\
2363 2185 * tte = clobbered ;\
2364 2186 * hmeblkpa = hmeblkpa ;\
2365 2187 * hmemisc = hblktag ;\
2366 2188 * tsbarea = tsbmiss area ;\
2367 2189 * tmp = scratch ;\
2368 2190 */ ;\
2369 2191 HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte, \
2370 2192 label/**/2) ;\
2371 2193 ;\
2372 2194 /* ;\
2373 2195 * tagacc = tagacc ;\
2374 2196 * hatid = hmentoff ;\
2375 2197 * tte = clobbered ;\
2376 2198 * hmeblkpa = hmeblkpa ;\
2377 2199 * hmemisc = hblk_misc ;\
2378 2200 * tsbarea = tsbmiss area ;\
2379 2201 * tmp = scratch ;\
2380 2202 */ ;\
2381 2203 ;\
2382 2204 add hatid, SFHME_TTE, hatid ;\
2383 2205 add hmeblkpa, hatid, hmeblkpa ;\
2384 2206 ldxa [hmeblkpa]ASI_MEM, tte /* MMU_READTTE through pa */ ;\
2385 2207 brlz,pt tte, label/**/6 ;\
2386 2208 nop ;\
2387 2209 btst HBLK_SZMASK, hmemisc ;\
2388 2210 bnz,a,pt %icc, label/**/7 ;\
2389 2211 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid ;\
2390 2212 ;\
2391 2213 /* ;\
2392 2214 * We found an invalid 8K tte in shme. ;\
2393 2215 * it may not belong to shme's region since ;\
2394 2216 * region size/alignment granularity is 8K but different ;\
2395 2217 * regions don't share hmeblks. Continue the search. ;\
2396 2218 */ ;\
2397 2219 sub hmeblkpa, hatid, hmeblkpa ;\
2398 2220 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid ;\
2399 2221 srlx tagacc, hmeshift, tte ;\
2400 2222 add hmeblkpa, HMEBLK_NEXTPA, hmeblkpa ;\
2401 2223 ldxa [hmeblkpa]ASI_MEM, hmeblkpa ;\
2402 2224 MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc) ;\
2403 2225 ba,a,pt %xcc, label/**/8 ;\
2404 2226 label/**/6: ;\
2405 2227 GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc) ;\
2406 2228 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid ;\
2407 2229 label/**/7: ;\
2408 2230 set TTE_SUSPEND, hatid ;\
2409 2231 TTE_SUSPEND_INT_SHIFT(hatid) ;\
2410 2232 btst tte, hatid ;\
2411 2233 bz,pt %xcc, foundlabel ;\
2412 2234 ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid ;\
2413 2235 ;\
2414 2236 /* ;\
2415 2237 * Mapping is suspended, so goto suspend label. ;\
2416 2238 */ ;\
2417 2239 ba,pt %xcc, suspendlabel ;\
2418 2240 nop
2419 2241
2420 2242 /*
2421 2243 * KERNEL PROTECTION HANDLER
2422 2244 *
2423 2245 * g1 = tsb8k pointer register (clobbered)
2424 2246 * g2 = tag access register (ro)
2425 2247 * g3 - g7 = scratch registers
2426 2248 *
2427 2249 * Note: This function is patched at runtime for performance reasons.
2428 2250 * Any changes here require sfmmu_patch_ktsb fixed.
2429 2251 */
2430 2252 ENTRY_NP(sfmmu_kprot_trap)
2431 2253 mov %g2, %g7 ! TSB pointer macro clobbers tagacc
2432 2254 sfmmu_kprot_patch_ktsb_base:
2433 2255 RUNTIME_PATCH_SETX(%g1, %g6)
2434 2256 /* %g1 = contents of ktsb_base or ktsb_pbase */
2435 2257 sfmmu_kprot_patch_ktsb_szcode:
2436 2258 or %g0, RUNTIME_PATCH, %g3 ! ktsb_szcode (hot patched)
2437 2259
2438 2260 GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2439 2261 ! %g1 = First TSB entry pointer, as TSB miss handler expects
2440 2262
2441 2263 mov %g2, %g7 ! TSB pointer macro clobbers tagacc
2442 2264 sfmmu_kprot_patch_ktsb4m_base:
2443 2265 RUNTIME_PATCH_SETX(%g3, %g6)
2444 2266 /* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2445 2267 sfmmu_kprot_patch_ktsb4m_szcode:
2446 2268 or %g0, RUNTIME_PATCH, %g6 ! ktsb4m_szcode (hot patched)
2447 2269
2448 2270 GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2449 2271 ! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2450 2272
2451 2273 CPU_TSBMISS_AREA(%g6, %g7)
2452 2274 HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2453 2275 ba,pt %xcc, sfmmu_tsb_miss_tt
2454 2276 nop
2455 2277
2456 2278 /*
2457 2279 * USER PROTECTION HANDLER
2458 2280 *
2459 2281 * g1 = tsb8k pointer register (ro)
2460 2282 * g2 = tag access register (ro)
2461 2283 * g3 = faulting context (clobbered, currently not used)
2462 2284 * g4 - g7 = scratch registers
2463 2285 */
2464 2286 ALTENTRY(sfmmu_uprot_trap)
2465 2287 #ifdef sun4v
2466 2288 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2467 2289 /* %g1 = first TSB entry ptr now, %g2 preserved */
2468 2290
2469 2291 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3) /* get 2nd utsbreg */
2470 2292 brlz,pt %g3, 9f /* check for 2nd TSB */
2471 2293 nop
2472 2294
2473 2295 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2474 2296 /* %g3 = second TSB entry ptr now, %g2 preserved */
2475 2297
2476 2298 #else /* sun4v */
2477 2299 #ifdef UTSB_PHYS
2478 2300 /* g1 = first TSB entry ptr */
2479 2301 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2480 2302 brlz,pt %g3, 9f /* check for 2nd TSB */
2481 2303 nop
2482 2304
2483 2305 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2484 2306 /* %g3 = second TSB entry ptr now, %g2 preserved */
2485 2307 #else /* UTSB_PHYS */
2486 2308 brgez,pt %g1, 9f /* check for 2nd TSB */
2487 2309 mov -1, %g3 /* set second tsbe ptr to -1 */
2488 2310
2489 2311 mov %g2, %g7
2490 2312 GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2491 2313 /* %g3 = second TSB entry ptr now, %g7 clobbered */
2492 2314 mov %g1, %g7
2493 2315 GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2494 2316 #endif /* UTSB_PHYS */
2495 2317 #endif /* sun4v */
2496 2318 9:
2497 2319 CPU_TSBMISS_AREA(%g6, %g7)
2498 2320 HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2499 2321 ba,pt %xcc, sfmmu_tsb_miss_tt /* branch TSB miss handler */
2500 2322 nop
2501 2323
2502 2324 /*
2503 2325 * Kernel 8K page iTLB miss. We also get here if we took a
2504 2326 * fast instruction access mmu miss trap while running in
2505 2327 * invalid context.
2506 2328 *
2507 2329 * %g1 = 8K TSB pointer register (not used, clobbered)
2508 2330 * %g2 = tag access register (used)
2509 2331 * %g3 = faulting context id (used)
2510 2332 * %g7 = TSB tag to match (used)
2511 2333 */
2512 2334 .align 64
2513 2335 ALTENTRY(sfmmu_kitlb_miss)
2514 2336 brnz,pn %g3, tsb_tl0_noctxt
2515 2337 nop
2516 2338
2517 2339 /* kernel miss */
2518 2340 /* get kernel tsb pointer */
2519 2341 /* we patch the next set of instructions at run time */
2520 2342 /* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2521 2343 iktsbbase:
2522 2344 RUNTIME_PATCH_SETX(%g4, %g5)
2523 2345 /* %g4 = contents of ktsb_base or ktsb_pbase */
2524 2346
2525 2347 iktsb: sllx %g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2526 2348 srlx %g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2527 2349 or %g4, %g1, %g1 ! form tsb ptr
2528 2350 ldda [%g1]RUNTIME_PATCH, %g4 ! %g4 = tag, %g5 = data
2529 2351 cmp %g4, %g7
2530 2352 bne,pn %xcc, iktsb4mbase ! check 4m ktsb
2531 2353 srlx %g2, MMU_PAGESHIFT4M, %g3 ! use 4m virt-page as TSB index
2532 2354
2533 2355 andcc %g5, TTE_EXECPRM_INT, %g0 ! check exec bit
2534 2356 bz,pn %icc, exec_fault
2535 2357 nop
2536 2358 TT_TRACE(trace_tsbhit) ! 2 instr traptrace
2537 2359 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2538 2360 retry
2539 2361
2540 2362 iktsb4mbase:
2541 2363 RUNTIME_PATCH_SETX(%g4, %g6)
2542 2364 /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2543 2365 iktsb4m:
2544 2366 sllx %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2545 2367 srlx %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2546 2368 add %g4, %g3, %g3 ! %g3 = 4m tsbe ptr
2547 2369 ldda [%g3]RUNTIME_PATCH, %g4 ! %g4 = tag, %g5 = data
2548 2370 cmp %g4, %g7
2549 2371 bne,pn %xcc, sfmmu_tsb_miss_tt ! branch on miss
2550 2372 andcc %g5, TTE_EXECPRM_INT, %g0 ! check exec bit
2551 2373 bz,pn %icc, exec_fault
2552 2374 nop
2553 2375 TT_TRACE(trace_tsbhit) ! 2 instr traptrace
2554 2376 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2555 2377 retry
2556 2378
2557 2379 /*
2558 2380 * Kernel dTLB miss. We also get here if we took a fast data
2559 2381 * access mmu miss trap while running in invalid context.
2560 2382 *
2561 2383 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2562 2384 * We select the TSB miss handler to branch to depending on
2563 2385 * the virtual address of the access. In the future it may
2564 2386 * be desirable to separate kpm TTEs into their own TSB,
2565 2387 * in which case all that needs to be done is to set
2566 2388 * kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2567 2389 * early in the miss if we detect a kpm VA to a new handler.
2568 2390 *
2569 2391 * %g1 = 8K TSB pointer register (not used, clobbered)
2570 2392 * %g2 = tag access register (used)
2571 2393 * %g3 = faulting context id (used)
2572 2394 */
2573 2395 .align 64
2574 2396 ALTENTRY(sfmmu_kdtlb_miss)
2575 2397 brnz,pn %g3, tsb_tl0_noctxt /* invalid context? */
2576 2398 nop
2577 2399
2578 2400 /* Gather some stats for kpm misses in the TLB. */
2579 2401 /* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2580 2402 KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2581 2403
2582 2404 /*
2583 2405 * Get first TSB offset and look for 8K/64K/512K mapping
2584 2406 * using the 8K virtual page as the index.
2585 2407 *
2586 2408 * We patch the next set of instructions at run time;
2587 2409 * any changes here require sfmmu_patch_ktsb changes too.
2588 2410 */
2589 2411 dktsbbase:
2590 2412 RUNTIME_PATCH_SETX(%g7, %g6)
2591 2413 /* %g7 = contents of ktsb_base or ktsb_pbase */
2592 2414
2593 2415 dktsb: sllx %g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2594 2416 srlx %g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2595 2417
2596 2418 /*
2597 2419 * At this point %g1 is our index into the TSB.
2598 2420 * We just masked off enough bits of the VA depending
2599 2421 * on our TSB size code.
2600 2422 */
2601 2423 ldda [%g7 + %g1]RUNTIME_PATCH, %g4 ! %g4 = tag, %g5 = data
2602 2424 srlx %g2, TAG_VALO_SHIFT, %g6 ! make tag to compare
2603 2425 cmp %g6, %g4 ! compare tag
2604 2426 bne,pn %xcc, dktsb4m_kpmcheck_small
2605 2427 add %g7, %g1, %g1 /* form tsb ptr */
2606 2428 TT_TRACE(trace_tsbhit)
2607 2429 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2608 2430 /* trapstat expects tte in %g5 */
2609 2431 retry
2610 2432
2611 2433 /*
2612 2434 * If kpm is using large pages, the following instruction needs
2613 2435 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2614 2436 * so that we will probe the 4M TSB regardless of the VA. In
2615 2437 * the case kpm is using small pages, we know no large kernel
2616 2438 * mappings are located above 0x80000000.00000000 so we skip the
2617 2439 * probe as an optimization.
2618 2440 */
2619 2441 dktsb4m_kpmcheck_small:
2620 2442 brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2621 2443 /* delay slot safe, below */
2622 2444
2623 2445 /*
2624 2446 * Get second TSB offset and look for 4M mapping
2625 2447 * using 4M virtual page as the TSB index.
2626 2448 *
2627 2449 * Here:
2628 2450 * %g1 = 8K TSB pointer. Don't squash it.
2629 2451 * %g2 = tag access register (we still need it)
2630 2452 */
2631 2453 srlx %g2, MMU_PAGESHIFT4M, %g3
2632 2454
2633 2455 /*
2634 2456 * We patch the next set of instructions at run time;
2635 2457 * any changes here require sfmmu_patch_ktsb changes too.
2636 2458 */
2637 2459 dktsb4mbase:
2638 2460 RUNTIME_PATCH_SETX(%g7, %g6)
2639 2461 /* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2640 2462 dktsb4m:
2641 2463 sllx %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2642 2464 srlx %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2643 2465
2644 2466 /*
2645 2467 * At this point %g3 is our index into the TSB.
2646 2468 * We just masked off enough bits of the VA depending
2647 2469 * on our TSB size code.
2648 2470 */
2649 2471 ldda [%g7 + %g3]RUNTIME_PATCH, %g4 ! %g4 = tag, %g5 = data
2650 2472 srlx %g2, TAG_VALO_SHIFT, %g6 ! make tag to compare
2651 2473 cmp %g6, %g4 ! compare tag
2652 2474
2653 2475 dktsb4m_tsbmiss:
2654 2476 bne,pn %xcc, dktsb4m_kpmcheck
2655 2477 add %g7, %g3, %g3 ! %g3 = kernel second TSB ptr
2656 2478 TT_TRACE(trace_tsbhit)
2657 2479 /* we don't check TTE size here since we assume 4M TSB is separate */
2658 2480 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2659 2481 /* trapstat expects tte in %g5 */
2660 2482 retry
2661 2483
2662 2484 /*
2663 2485 * So, we failed to find a valid TTE to match the faulting
2664 2486 * address in either TSB. There are a few cases that could land
2665 2487 * us here:
2666 2488 *
2667 2489 * 1) This is a kernel VA below 0x80000000.00000000. We branch
2668 2490 * to sfmmu_tsb_miss_tt to handle the miss.
2669 2491 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2670 2492 * 4M TSB. Let segkpm handle it.
2671 2493 *
2672 2494 * Note that we shouldn't land here in the case of a kpm VA when
2673 2495 * kpm_smallpages is active -- we handled that case earlier at
2674 2496 * dktsb4m_kpmcheck_small.
2675 2497 *
2676 2498 * At this point:
2677 2499 * g1 = 8K-indexed primary TSB pointer
2678 2500 * g2 = tag access register
2679 2501 * g3 = 4M-indexed secondary TSB pointer
2680 2502 */
2681 2503 dktsb4m_kpmcheck:
2682 2504 cmp %g2, %g0
2683 2505 bl,pn %xcc, sfmmu_kpm_dtsb_miss
2684 2506 nop
2685 2507 ba,a,pt %icc, sfmmu_tsb_miss_tt
2686 2508 nop
2687 2509
2688 2510 #ifdef sun4v
2689 2511 /*
2690 2512 * User instruction miss w/ single TSB.
2691 2513 * The first probe covers 8K, 64K, and 512K page sizes,
2692 2514 * because 64K and 512K mappings are replicated off 8K
2693 2515 * pointer.
2694 2516 *
2695 2517 * g1 = tsb8k pointer register
2696 2518 * g2 = tag access register
2697 2519 * g3 - g6 = scratch registers
2698 2520 * g7 = TSB tag to match
2699 2521 */
2700 2522 .align 64
2701 2523 ALTENTRY(sfmmu_uitlb_fastpath)
2702 2524
2703 2525 PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2704 2526 /* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2705 2527 ba,pn %xcc, sfmmu_tsb_miss_tt
2706 2528 mov -1, %g3
2707 2529
2708 2530 /*
2709 2531 * User data miss w/ single TSB.
2710 2532 * The first probe covers 8K, 64K, and 512K page sizes,
2711 2533 * because 64K and 512K mappings are replicated off 8K
2712 2534 * pointer.
2713 2535 *
2714 2536 * g1 = tsb8k pointer register
2715 2537 * g2 = tag access register
2716 2538 * g3 - g6 = scratch registers
2717 2539 * g7 = TSB tag to match
2718 2540 */
2719 2541 .align 64
2720 2542 ALTENTRY(sfmmu_udtlb_fastpath)
2721 2543
2722 2544 PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2723 2545 /* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2724 2546 ba,pn %xcc, sfmmu_tsb_miss_tt
2725 2547 mov -1, %g3
2726 2548
2727 2549 /*
2728 2550 * User instruction miss w/ multiple TSBs (sun4v).
2729 2551 * The first probe covers 8K, 64K, and 512K page sizes,
2730 2552 * because 64K and 512K mappings are replicated off 8K
2731 2553 * pointer. Second probe covers 4M page size only.
2732 2554 *
2733 2555 * Just like sfmmu_udtlb_slowpath, except:
2734 2556 * o Uses ASI_ITLB_IN
2735 2557 * o checks for execute permission
2736 2558 * o No ISM prediction.
2737 2559 *
2738 2560 * g1 = tsb8k pointer register
2739 2561 * g2 = tag access register
2740 2562 * g3 - g6 = scratch registers
2741 2563 * g7 = TSB tag to match
2742 2564 */
2743 2565 .align 64
2744 2566 ALTENTRY(sfmmu_uitlb_slowpath)
2745 2567
2746 2568 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2747 2569 PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2748 2570 /* g4 - g5 = clobbered here */
2749 2571
2750 2572 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2751 2573 /* g1 = first TSB pointer, g3 = second TSB pointer */
2752 2574 srlx %g2, TAG_VALO_SHIFT, %g7
2753 2575 PROBE_2ND_ITSB(%g3, %g7)
2754 2576 /* NOT REACHED */
2755 2577
2756 2578 #else /* sun4v */
2757 2579
2758 2580 /*
2759 2581 * User instruction miss w/ multiple TSBs (sun4u).
2760 2582 * The first probe covers 8K, 64K, and 512K page sizes,
2761 2583 * because 64K and 512K mappings are replicated off 8K
2762 2584 * pointer. Probe of 1st TSB has already been done prior to entry
2763 2585 * into this routine. For the UTSB_PHYS case we probe up to 3
2764 2586 * valid other TSBs in the following order:
2765 2587 * 1) shared TSB for 4M-256M pages
2766 2588 * 2) private TSB for 4M-256M pages
2767 2589 * 3) shared TSB for 8K-512K pages
2768 2590 *
2769 2591 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
2770 2592 * 4M-256M pages.
2771 2593 *
2772 2594 * Just like sfmmu_udtlb_slowpath, except:
2773 2595 * o Uses ASI_ITLB_IN
2774 2596 * o checks for execute permission
2775 2597 * o No ISM prediction.
2776 2598 *
2777 2599 * g1 = tsb8k pointer register
2778 2600 * g2 = tag access register
2779 2601 * g4 - g6 = scratch registers
2780 2602 * g7 = TSB tag to match
2781 2603 */
2782 2604 .align 64
2783 2605 ALTENTRY(sfmmu_uitlb_slowpath)
2784 2606
2785 2607 #ifdef UTSB_PHYS
2786 2608
2787 2609 GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2788 2610 brlz,pt %g6, 1f
2789 2611 nop
2790 2612 GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2791 2613 PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
2792 2614 1:
2793 2615 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2794 2616 brlz,pt %g3, 2f
2795 2617 nop
2796 2618 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2797 2619 PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
2798 2620 2:
2799 2621 GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2800 2622 brlz,pt %g6, sfmmu_tsb_miss_tt
2801 2623 nop
2802 2624 GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2803 2625 PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
2804 2626 ba,pn %xcc, sfmmu_tsb_miss_tt
2805 2627 nop
2806 2628
2807 2629 #else /* UTSB_PHYS */
2808 2630 mov %g1, %g3 /* save tsb8k reg in %g3 */
2809 2631 GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2810 2632 PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2811 2633 mov %g2, %g6 /* GET_2ND_TSBE_PTR clobbers tagacc */
2812 2634 mov %g3, %g7 /* copy tsb8k reg in %g7 */
2813 2635 GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2814 2636 /* g1 = first TSB pointer, g3 = second TSB pointer */
2815 2637 srlx %g2, TAG_VALO_SHIFT, %g7
2816 2638 PROBE_2ND_ITSB(%g3, %g7, isynth)
2817 2639 ba,pn %xcc, sfmmu_tsb_miss_tt
2818 2640 nop
2819 2641
2820 2642 #endif /* UTSB_PHYS */
2821 2643 #endif /* sun4v */
2822 2644
2823 2645 #if defined(sun4u) && defined(UTSB_PHYS)
2824 2646
2825 2647 /*
2826 2648 * We come here for ism predict DTLB_MISS case or if
2827 2649 * if probe in first TSB failed.
2828 2650 */
2829 2651
2830 2652 .align 64
2831 2653 ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
2832 2654
2833 2655 /*
2834 2656 * g1 = tsb8k pointer register
2835 2657 * g2 = tag access register
2836 2658 * g4 - %g6 = scratch registers
2837 2659 * g7 = TSB tag to match
2838 2660 */
2839 2661
2840 2662 /*
2841 2663 * ISM non-predict probe order
2842 2664 * probe 1ST_TSB (8K index)
2843 2665 * probe 2ND_TSB (4M index)
2844 2666 * probe 4TH_TSB (4M index)
2845 2667 * probe 3RD_TSB (8K index)
2846 2668 *
2847 2669 * We already probed first TSB in DTLB_MISS handler.
2848 2670 */
2849 2671
2850 2672 /*
2851 2673 * Private 2ND TSB 4M-256 pages
2852 2674 */
2853 2675 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2854 2676 brlz,pt %g3, 1f
2855 2677 nop
2856 2678 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2857 2679 PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2858 2680
2859 2681 /*
2860 2682 * Shared Context 4TH TSB 4M-256 pages
2861 2683 */
2862 2684 1:
2863 2685 GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2864 2686 brlz,pt %g6, 2f
2865 2687 nop
2866 2688 GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2867 2689 PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
2868 2690
2869 2691 /*
2870 2692 * Shared Context 3RD TSB 8K-512K pages
2871 2693 */
2872 2694 2:
2873 2695 GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2874 2696 brlz,pt %g6, sfmmu_tsb_miss_tt
2875 2697 nop
2876 2698 GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2877 2699 PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
2878 2700 ba,pn %xcc, sfmmu_tsb_miss_tt
2879 2701 nop
2880 2702
2881 2703 .align 64
2882 2704 ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
2883 2705
2884 2706 /*
2885 2707 * g1 = tsb8k pointer register
2886 2708 * g2 = tag access register
2887 2709 * g4 - g6 = scratch registers
2888 2710 * g7 = TSB tag to match
2889 2711 */
2890 2712
2891 2713 /*
2892 2714 * ISM predict probe order
2893 2715 * probe 4TH_TSB (4M index)
2894 2716 * probe 2ND_TSB (4M index)
2895 2717 * probe 1ST_TSB (8K index)
2896 2718 * probe 3RD_TSB (8K index)
2897 2719
2898 2720 /*
2899 2721 * Shared Context 4TH TSB 4M-256 pages
2900 2722 */
2901 2723 GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
2902 2724 brlz,pt %g6, 4f
2903 2725 nop
2904 2726 GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
2905 2727 PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
2906 2728
2907 2729 /*
2908 2730 * Private 2ND TSB 4M-256 pages
2909 2731 */
2910 2732 4:
2911 2733 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
2912 2734 brlz,pt %g3, 5f
2913 2735 nop
2914 2736 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2915 2737 PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
2916 2738
2917 2739 5:
2918 2740 PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
2919 2741
2920 2742 /*
2921 2743 * Shared Context 3RD TSB 8K-512K pages
2922 2744 */
2923 2745 GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
2924 2746 brlz,pt %g6, 6f
2925 2747 nop
2926 2748 GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
2927 2749 PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
2928 2750 6:
2929 2751 ba,pn %xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
2930 2752 nop
2931 2753
2932 2754 #else /* sun4u && UTSB_PHYS */
2933 2755
2934 2756 .align 64
2935 2757 ALTENTRY(sfmmu_udtlb_slowpath)
2936 2758
2937 2759 srax %g2, PREDISM_BASESHIFT, %g6 /* g6 > 0 : ISM predicted */
2938 2760 brgz,pn %g6, udtlb_miss_probesecond /* check for ISM */
2939 2761 mov %g1, %g3
2940 2762
2941 2763 udtlb_miss_probefirst:
2942 2764 /*
2943 2765 * g1 = 8K TSB pointer register
2944 2766 * g2 = tag access register
2945 2767 * g3 = (potentially) second TSB entry ptr
2946 2768 * g6 = ism pred.
2947 2769 * g7 = vpg_4m
2948 2770 */
2949 2771 #ifdef sun4v
2950 2772 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2951 2773 PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2952 2774
2953 2775 /*
2954 2776 * Here:
2955 2777 * g1 = first TSB pointer
2956 2778 * g2 = tag access reg
2957 2779 * g3 = second TSB ptr IFF ISM pred. (else don't care)
2958 2780 */
2959 2781 brgz,pn %g6, sfmmu_tsb_miss_tt
2960 2782 nop
2961 2783 #else /* sun4v */
2962 2784 mov %g1, %g4
2963 2785 GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2964 2786 PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2965 2787
2966 2788 /*
2967 2789 * Here:
2968 2790 * g1 = first TSB pointer
2969 2791 * g2 = tag access reg
2970 2792 * g3 = second TSB ptr IFF ISM pred. (else don't care)
2971 2793 */
2972 2794 brgz,pn %g6, sfmmu_tsb_miss_tt
2973 2795 nop
2974 2796 ldxa [%g0]ASI_DMMU_TSB_8K, %g3
2975 2797 /* fall through in 8K->4M probe order */
2976 2798 #endif /* sun4v */
2977 2799
2978 2800 udtlb_miss_probesecond:
2979 2801 /*
2980 2802 * Look in the second TSB for the TTE
2981 2803 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2982 2804 * g2 = tag access reg
2983 2805 * g3 = 8K TSB pointer register
2984 2806 * g6 = ism pred.
2985 2807 * g7 = vpg_4m
2986 2808 */
2987 2809 #ifdef sun4v
2988 2810 /* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2989 2811 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2990 2812 /* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2991 2813 #else /* sun4v */
2992 2814 mov %g3, %g7
2993 2815 GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2994 2816 /* %g2 clobbered, %g3 =second tsbe ptr */
2995 2817 mov MMU_TAG_ACCESS, %g2
2996 2818 ldxa [%g2]ASI_DMMU, %g2
2997 2819 #endif /* sun4v */
2998 2820
2999 2821 srlx %g2, TAG_VALO_SHIFT, %g7
3000 2822 PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3001 2823 /* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
3002 2824 brgz,pn %g6, udtlb_miss_probefirst
3003 2825 nop
3004 2826
3005 2827 /* fall through to sfmmu_tsb_miss_tt */
3006 2828 #endif /* sun4u && UTSB_PHYS */
3007 2829
3008 2830
3009 2831 ALTENTRY(sfmmu_tsb_miss_tt)
3010 2832 TT_TRACE(trace_tsbmiss)
3011 2833 /*
3012 2834 * We get here if there is a TSB miss OR a write protect trap.
3013 2835 *
3014 2836 * g1 = First TSB entry pointer
3015 2837 * g2 = tag access register
3016 2838 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
3017 2839 * g4 - g7 = scratch registers
3018 2840 */
3019 2841
3020 2842 ALTENTRY(sfmmu_tsb_miss)
3021 2843
3022 2844 /*
3023 2845 * If trapstat is running, we need to shift the %tpc and %tnpc to
3024 2846 * point to trapstat's TSB miss return code (note that trapstat
3025 2847 * itself will patch the correct offset to add).
3026 2848 */
3027 2849 rdpr %tl, %g7
3028 2850 cmp %g7, 1
3029 2851 ble,pt %xcc, 0f
3030 2852 sethi %hi(KERNELBASE), %g6
3031 2853 rdpr %tpc, %g7
3032 2854 or %g6, %lo(KERNELBASE), %g6
3033 2855 cmp %g7, %g6
3034 2856 bgeu,pt %xcc, 0f
3035 2857 /* delay slot safe */
3036 2858
3037 2859 ALTENTRY(tsbmiss_trapstat_patch_point)
3038 2860 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */
3039 2861 wrpr %g7, %tpc
3040 2862 add %g7, 4, %g7
3041 2863 wrpr %g7, %tnpc
3042 2864 0:
3043 2865 CPU_TSBMISS_AREA(%g6, %g7)
3044 2866 stn %g1, [%g6 + TSBMISS_TSBPTR] /* save 1ST tsb pointer */
3045 2867 stn %g3, [%g6 + TSBMISS_TSBPTR4M] /* save 2ND tsb pointer */
3046 2868
3047 2869 sllx %g2, TAGACC_CTX_LSHIFT, %g3
3048 2870 brz,a,pn %g3, 1f /* skip ahead if kernel */
3049 2871 ldn [%g6 + TSBMISS_KHATID], %g7
3050 2872 srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctxnum */
3051 2873 ldn [%g6 + TSBMISS_UHATID], %g7 /* g7 = hatid */
3052 2874
3053 2875 HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
3054 2876
3055 2877 cmp %g3, INVALID_CONTEXT
3056 2878 be,pn %icc, tsb_tl0_noctxt /* no ctx miss exception */
3057 2879 stn %g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
3058 2880
3059 2881 #if defined(sun4v) || defined(UTSB_PHYS)
3060 2882 ldub [%g6 + TSBMISS_URTTEFLAGS], %g7 /* clear ctx1 flag set from */
3061 2883 andn %g7, HAT_CHKCTX1_FLAG, %g7 /* the previous tsb miss */
3062 2884 stub %g7, [%g6 + TSBMISS_URTTEFLAGS]
3063 2885 #endif /* sun4v || UTSB_PHYS */
3064 2886
3065 2887 ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
3066 2888 /*
3067 2889 * The miss wasn't in an ISM segment.
3068 2890 *
3069 2891 * %g1 %g3, %g4, %g5, %g7 all clobbered
3070 2892 * %g2 = (pseudo) tag access
3071 2893 */
3072 2894
3073 2895 ba,pt %icc, 2f
3074 2896 ldn [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
3075 2897
3076 2898 1:
3077 2899 HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
3078 2900 /*
3079 2901 * 8K and 64K hash.
3080 2902 */
3081 2903 2:
3082 2904
3083 2905 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3084 2906 MMU_PAGESHIFT64K, TTE64K, %g5, tsb_l8K, tsb_checktte,
3085 2907 sfmmu_suspend_tl, tsb_512K)
3086 2908 /* NOT REACHED */
3087 2909
3088 2910 tsb_512K:
3089 2911 sllx %g2, TAGACC_CTX_LSHIFT, %g5
3090 2912 brz,pn %g5, 3f
3091 2913 ldub [%g6 + TSBMISS_UTTEFLAGS], %g4
3092 2914 and %g4, HAT_512K_FLAG, %g5
3093 2915
3094 2916 /*
3095 2917 * Note that there is a small window here where we may have
3096 2918 * a 512k page in the hash list but have not set the HAT_512K_FLAG
3097 2919 * flag yet, so we will skip searching the 512k hash list.
3098 2920 * In this case we will end up in pagefault which will find
3099 2921 * the mapping and return. So, in this instance we will end up
3100 2922 * spending a bit more time resolving this TSB miss, but it can
3101 2923 * only happen once per process and even then, the chances of that
3102 2924 * are very small, so it's not worth the extra overhead it would
3103 2925 * take to close this window.
3104 2926 */
3105 2927 brz,pn %g5, tsb_4M
3106 2928 nop
3107 2929 3:
3108 2930 /*
3109 2931 * 512K hash
3110 2932 */
3111 2933
3112 2934 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3113 2935 MMU_PAGESHIFT512K, TTE512K, %g5, tsb_l512K, tsb_checktte,
3114 2936 sfmmu_suspend_tl, tsb_4M)
3115 2937 /* NOT REACHED */
3116 2938
3117 2939 tsb_4M:
3118 2940 sllx %g2, TAGACC_CTX_LSHIFT, %g5
3119 2941 brz,pn %g5, 4f
3120 2942 ldub [%g6 + TSBMISS_UTTEFLAGS], %g4
3121 2943 and %g4, HAT_4M_FLAG, %g5
3122 2944 brz,pn %g5, tsb_32M
3123 2945 nop
3124 2946 4:
3125 2947 /*
3126 2948 * 4M hash
3127 2949 */
3128 2950
3129 2951 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3130 2952 MMU_PAGESHIFT4M, TTE4M, %g5, tsb_l4M, tsb_checktte,
3131 2953 sfmmu_suspend_tl, tsb_32M)
3132 2954 /* NOT REACHED */
3133 2955
3134 2956 tsb_32M:
3135 2957 sllx %g2, TAGACC_CTX_LSHIFT, %g5
3136 2958 #ifdef sun4v
3137 2959 brz,pn %g5, 6f
3138 2960 #else
3139 2961 brz,pn %g5, tsb_pagefault
3140 2962 #endif
3141 2963 ldub [%g6 + TSBMISS_UTTEFLAGS], %g4
3142 2964 and %g4, HAT_32M_FLAG, %g5
3143 2965 brz,pn %g5, tsb_256M
3144 2966 nop
3145 2967 5:
3146 2968 /*
3147 2969 * 32M hash
3148 2970 */
3149 2971
3150 2972 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3151 2973 MMU_PAGESHIFT32M, TTE32M, %g5, tsb_l32M, tsb_checktte,
3152 2974 sfmmu_suspend_tl, tsb_256M)
3153 2975 /* NOT REACHED */
3154 2976
3155 2977 #if defined(sun4u) && !defined(UTSB_PHYS)
3156 2978 #define tsb_shme tsb_pagefault
3157 2979 #endif
3158 2980 tsb_256M:
3159 2981 ldub [%g6 + TSBMISS_UTTEFLAGS], %g4
3160 2982 and %g4, HAT_256M_FLAG, %g5
3161 2983 brz,pn %g5, tsb_shme
3162 2984 nop
3163 2985 6:
3164 2986 /*
3165 2987 * 256M hash
3166 2988 */
3167 2989
3168 2990 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3169 2991 MMU_PAGESHIFT256M, TTE256M, %g5, tsb_l256M, tsb_checktte,
3170 2992 sfmmu_suspend_tl, tsb_shme)
3171 2993 /* NOT REACHED */
3172 2994
3173 2995 tsb_checktte:
3174 2996 /*
3175 2997 * g1 = hblk_misc
3176 2998 * g2 = tagacc
3177 2999 * g3 = tte
3178 3000 * g4 = tte pa
3179 3001 * g6 = tsbmiss area
3180 3002 * g7 = hatid
3181 3003 */
3182 3004 brlz,a,pt %g3, tsb_validtte
3183 3005 rdpr %tt, %g7
3184 3006
3185 3007 #if defined(sun4u) && !defined(UTSB_PHYS)
3186 3008 #undef tsb_shme
3187 3009 ba tsb_pagefault
3188 3010 nop
3189 3011 #else /* sun4u && !UTSB_PHYS */
3190 3012
3191 3013 tsb_shme:
3192 3014 /*
3193 3015 * g2 = tagacc
3194 3016 * g6 = tsbmiss area
3195 3017 */
3196 3018 sllx %g2, TAGACC_CTX_LSHIFT, %g5
3197 3019 brz,pn %g5, tsb_pagefault
3198 3020 nop
3199 3021 ldx [%g6 + TSBMISS_SHARED_UHATID], %g7 /* g7 = srdp */
3200 3022 brz,pn %g7, tsb_pagefault
3201 3023 nop
3202 3024
3203 3025 GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3204 3026 MMU_PAGESHIFT64K, TTE64K, %g5, tsb_shme_l8K, tsb_shme_checktte,
3205 3027 sfmmu_suspend_tl, tsb_shme_512K)
3206 3028 /* NOT REACHED */
3207 3029
3208 3030 tsb_shme_512K:
3209 3031 ldub [%g6 + TSBMISS_URTTEFLAGS], %g4
3210 3032 and %g4, HAT_512K_FLAG, %g5
3211 3033 brz,pn %g5, tsb_shme_4M
3212 3034 nop
3213 3035
3214 3036 /*
3215 3037 * 512K hash
3216 3038 */
3217 3039
3218 3040 GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3219 3041 MMU_PAGESHIFT512K, TTE512K, %g5, tsb_shme_l512K, tsb_shme_checktte,
3220 3042 sfmmu_suspend_tl, tsb_shme_4M)
3221 3043 /* NOT REACHED */
3222 3044
3223 3045 tsb_shme_4M:
3224 3046 ldub [%g6 + TSBMISS_URTTEFLAGS], %g4
3225 3047 and %g4, HAT_4M_FLAG, %g5
3226 3048 brz,pn %g5, tsb_shme_32M
3227 3049 nop
3228 3050 4:
3229 3051 /*
3230 3052 * 4M hash
3231 3053 */
3232 3054 GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3233 3055 MMU_PAGESHIFT4M, TTE4M, %g5, tsb_shme_l4M, tsb_shme_checktte,
3234 3056 sfmmu_suspend_tl, tsb_shme_32M)
3235 3057 /* NOT REACHED */
3236 3058
3237 3059 tsb_shme_32M:
3238 3060 ldub [%g6 + TSBMISS_URTTEFLAGS], %g4
3239 3061 and %g4, HAT_32M_FLAG, %g5
3240 3062 brz,pn %g5, tsb_shme_256M
3241 3063 nop
3242 3064
3243 3065 /*
3244 3066 * 32M hash
3245 3067 */
3246 3068
3247 3069 GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3248 3070 MMU_PAGESHIFT32M, TTE32M, %g5, tsb_shme_l32M, tsb_shme_checktte,
3249 3071 sfmmu_suspend_tl, tsb_shme_256M)
3250 3072 /* NOT REACHED */
3251 3073
3252 3074 tsb_shme_256M:
3253 3075 ldub [%g6 + TSBMISS_URTTEFLAGS], %g4
3254 3076 and %g4, HAT_256M_FLAG, %g5
3255 3077 brz,pn %g5, tsb_pagefault
3256 3078 nop
3257 3079
3258 3080 /*
3259 3081 * 256M hash
3260 3082 */
3261 3083
3262 3084 GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
3263 3085 MMU_PAGESHIFT256M, TTE256M, %g5, tsb_shme_l256M, tsb_shme_checktte,
3264 3086 sfmmu_suspend_tl, tsb_pagefault)
3265 3087 /* NOT REACHED */
3266 3088
3267 3089 tsb_shme_checktte:
3268 3090
3269 3091 brgez,pn %g3, tsb_pagefault
3270 3092 rdpr %tt, %g7
3271 3093 /*
3272 3094 * g1 = ctx1 flag
3273 3095 * g3 = tte
3274 3096 * g4 = tte pa
3275 3097 * g6 = tsbmiss area
3276 3098 * g7 = tt
3277 3099 */
3278 3100
3279 3101 brz,pt %g1, tsb_validtte
3280 3102 nop
3281 3103 ldub [%g6 + TSBMISS_URTTEFLAGS], %g1
3282 3104 or %g1, HAT_CHKCTX1_FLAG, %g1
3283 3105 stub %g1, [%g6 + TSBMISS_URTTEFLAGS]
3284 3106
3285 3107 SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3286 3108 #endif /* sun4u && !UTSB_PHYS */
3287 3109
3288 3110 tsb_validtte:
3289 3111 /*
3290 3112 * g3 = tte
3291 3113 * g4 = tte pa
3292 3114 * g6 = tsbmiss area
3293 3115 * g7 = tt
3294 3116 */
3295 3117
3296 3118 /*
3297 3119 * Set ref/mod bits if this is a prot trap. Usually, it isn't.
3298 3120 */
3299 3121 cmp %g7, FAST_PROT_TT
3300 3122 bne,pt %icc, 4f
3301 3123 nop
3302 3124
3303 3125 TTE_SET_REFMOD_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_refmod,
3304 3126 tsb_protfault)
3305 3127
3306 3128 GET_MMU_D_TTARGET(%g2, %g7) /* %g2 = ttarget */
3307 3129 #ifdef sun4v
3308 3130 MMU_FAULT_STATUS_AREA(%g7)
3309 3131 ldx [%g7 + MMFSA_D_ADDR], %g5 /* load fault addr for later */
3310 3132 #else /* sun4v */
3311 3133 mov MMU_TAG_ACCESS, %g5
3312 3134 ldxa [%g5]ASI_DMMU, %g5
3313 3135 #endif /* sun4v */
3314 3136 ba,pt %xcc, tsb_update_tl1
3315 3137 nop
3316 3138 4:
3317 3139 /*
3318 3140 * If ITLB miss check exec bit.
3319 3141 * If not set treat as invalid TTE.
3320 3142 */
3321 3143 cmp %g7, T_INSTR_MMU_MISS
3322 3144 be,pn %icc, 5f
3323 3145 andcc %g3, TTE_EXECPRM_INT, %g0 /* check execute bit is set */
3324 3146 cmp %g7, FAST_IMMU_MISS_TT
3325 3147 bne,pt %icc, 3f
3326 3148 andcc %g3, TTE_EXECPRM_INT, %g0 /* check execute bit is set */
3327 3149 5:
3328 3150 bz,pn %icc, tsb_protfault
3329 3151 nop
3330 3152
3331 3153 3:
3332 3154 /*
3333 3155 * Set reference bit if not already set
3334 3156 */
3335 3157 TTE_SET_REF_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_ref)
3336 3158
3337 3159 /*
3338 3160 * Now, load into TSB/TLB. At this point:
3339 3161 * g3 = tte
3340 3162 * g4 = patte
3341 3163 * g6 = tsbmiss area
3342 3164 */
3343 3165 rdpr %tt, %g7
3344 3166 #ifdef sun4v
3345 3167 MMU_FAULT_STATUS_AREA(%g2)
3346 3168 cmp %g7, T_INSTR_MMU_MISS
3347 3169 be,a,pt %icc, 9f
3348 3170 nop
3349 3171 cmp %g7, FAST_IMMU_MISS_TT
3350 3172 be,a,pt %icc, 9f
3351 3173 nop
3352 3174 add %g2, MMFSA_D_, %g2
3353 3175 9:
3354 3176 ldx [%g2 + MMFSA_CTX_], %g7
3355 3177 sllx %g7, TTARGET_CTX_SHIFT, %g7
3356 3178 ldx [%g2 + MMFSA_ADDR_], %g2
3357 3179 mov %g2, %g5 ! load the fault addr for later use
3358 3180 srlx %g2, TTARGET_VA_SHIFT, %g2
3359 3181 or %g2, %g7, %g2
3360 3182 #else /* sun4v */
3361 3183 mov MMU_TAG_ACCESS, %g5
3362 3184 cmp %g7, FAST_IMMU_MISS_TT
3363 3185 be,a,pt %icc, 9f
3364 3186 ldxa [%g0]ASI_IMMU, %g2
3365 3187 ldxa [%g0]ASI_DMMU, %g2
3366 3188 ba,pt %icc, tsb_update_tl1
3367 3189 ldxa [%g5]ASI_DMMU, %g5
3368 3190 9:
3369 3191 ldxa [%g5]ASI_IMMU, %g5
3370 3192 #endif /* sun4v */
3371 3193
3372 3194 tsb_update_tl1:
3373 3195 srlx %g2, TTARGET_CTX_SHIFT, %g7
3374 3196 brz,pn %g7, tsb_kernel
3375 3197 #ifdef sun4v
3376 3198 and %g3, TTE_SZ_BITS, %g7 ! assumes TTE_SZ_SHFT is 0
3377 3199 #else /* sun4v */
3378 3200 srlx %g3, TTE_SZ_SHFT, %g7
3379 3201 #endif /* sun4v */
3380 3202
3381 3203 tsb_user:
3382 3204 #ifdef sun4v
3383 3205 cmp %g7, TTE4M
3384 3206 bge,pn %icc, tsb_user4m
3385 3207 nop
3386 3208 #else /* sun4v */
3387 3209 cmp %g7, TTESZ_VALID | TTE4M
3388 3210 be,pn %icc, tsb_user4m
3389 3211 srlx %g3, TTE_SZ2_SHFT, %g7
3390 3212 andcc %g7, TTE_SZ2_BITS, %g7 ! check 32/256MB
3391 3213 #ifdef ITLB_32M_256M_SUPPORT
3392 3214 bnz,pn %icc, tsb_user4m
3393 3215 nop
3394 3216 #else /* ITLB_32M_256M_SUPPORT */
3395 3217 bnz,a,pn %icc, tsb_user_pn_synth
3396 3218 nop
3397 3219 #endif /* ITLB_32M_256M_SUPPORT */
3398 3220 #endif /* sun4v */
3399 3221
3400 3222 tsb_user8k:
3401 3223 #if defined(sun4v) || defined(UTSB_PHYS)
3402 3224 ldub [%g6 + TSBMISS_URTTEFLAGS], %g7
3403 3225 and %g7, HAT_CHKCTX1_FLAG, %g1
3404 3226 brz,a,pn %g1, 1f
3405 3227 ldn [%g6 + TSBMISS_TSBPTR], %g1 ! g1 = 1ST TSB ptr
3406 3228 GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3407 3229 brlz,a,pn %g1, ptl1_panic ! if no shared 3RD tsb
3408 3230 mov PTL1_NO_SCDTSB8K, %g1 ! panic
3409 3231 GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
3410 3232 1:
3411 3233 #else /* defined(sun4v) || defined(UTSB_PHYS) */
3412 3234 ldn [%g6 + TSBMISS_TSBPTR], %g1 ! g1 = 1ST TSB ptr
3413 3235 #endif /* defined(sun4v) || defined(UTSB_PHYS) */
3414 3236
3415 3237 #ifndef UTSB_PHYS
3416 3238 mov ASI_N, %g7 ! user TSBs accessed by VA
3417 3239 mov %g7, %asi
3418 3240 #endif /* !UTSB_PHYS */
3419 3241
3420 3242 TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l3)
3421 3243
3422 3244 rdpr %tt, %g5
3423 3245 #ifdef sun4v
3424 3246 cmp %g5, T_INSTR_MMU_MISS
3425 3247 be,a,pn %xcc, 9f
3426 3248 mov %g3, %g5
3427 3249 #endif /* sun4v */
3428 3250 cmp %g5, FAST_IMMU_MISS_TT
3429 3251 be,pn %xcc, 9f
3430 3252 mov %g3, %g5
3431 3253
3432 3254 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3433 3255 ! trapstat wants TTE in %g5
3434 3256 retry
3435 3257 9:
3436 3258 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3437 3259 ! trapstat wants TTE in %g5
3438 3260 retry
3439 3261
3440 3262 tsb_user4m:
3441 3263 #if defined(sun4v) || defined(UTSB_PHYS)
3442 3264 ldub [%g6 + TSBMISS_URTTEFLAGS], %g7
3443 3265 and %g7, HAT_CHKCTX1_FLAG, %g1
3444 3266 brz,a,pn %g1, 4f
3445 3267 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 ! g1 = 2ND TSB ptr
3446 3268 GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
3447 3269 brlz,a,pn %g1, 5f ! if no shared 4TH TSB
3448 3270 nop
3449 3271 GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3450 3272
3451 3273 #else /* defined(sun4v) || defined(UTSB_PHYS) */
3452 3274 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 ! g1 = 2ND TSB ptr
3453 3275 #endif /* defined(sun4v) || defined(UTSB_PHYS) */
3454 3276 4:
3455 3277 brlz,pn %g1, 5f /* Check to see if we have 2nd TSB programmed */
3456 3278 nop
3457 3279
3458 3280 #ifndef UTSB_PHYS
3459 3281 mov ASI_N, %g7 ! user TSBs accessed by VA
3460 3282 mov %g7, %asi
3461 3283 #endif /* UTSB_PHYS */
3462 3284
3463 3285 TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l4)
3464 3286
3465 3287 5:
3466 3288 rdpr %tt, %g5
3467 3289 #ifdef sun4v
3468 3290 cmp %g5, T_INSTR_MMU_MISS
3469 3291 be,a,pn %xcc, 9f
3470 3292 mov %g3, %g5
3471 3293 #endif /* sun4v */
3472 3294 cmp %g5, FAST_IMMU_MISS_TT
3473 3295 be,pn %xcc, 9f
3474 3296 mov %g3, %g5
3475 3297
3476 3298 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3477 3299 ! trapstat wants TTE in %g5
3478 3300 retry
3479 3301 9:
3480 3302 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3481 3303 ! trapstat wants TTE in %g5
3482 3304 retry
3483 3305
3484 3306 #if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3485 3307 /*
3486 3308 * Panther ITLB synthesis.
3487 3309 * The Panther 32M and 256M ITLB code simulates these two large page
3488 3310 * sizes with 4M pages, to provide support for programs, for example
3489 3311 * Java, that may copy instructions into a 32M or 256M data page and
3490 3312 * then execute them. The code below generates the 4M pfn bits and
3491 3313 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3492 3314 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3493 3315 * are ignored by the hardware.
3494 3316 *
3495 3317 * Now, load into TSB/TLB. At this point:
3496 3318 * g2 = tagtarget
3497 3319 * g3 = tte
3498 3320 * g4 = patte
3499 3321 * g5 = tt
3500 3322 * g6 = tsbmiss area
3501 3323 */
3502 3324 tsb_user_pn_synth:
3503 3325 rdpr %tt, %g5
3504 3326 cmp %g5, FAST_IMMU_MISS_TT
3505 3327 be,pt %xcc, tsb_user_itlb_synth /* ITLB miss */
3506 3328 andcc %g3, TTE_EXECPRM_INT, %g0 /* is execprm bit set */
3507 3329 bz,pn %icc, 4b /* if not, been here before */
3508 3330 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 /* g1 = tsbp */
3509 3331 brlz,a,pn %g1, 5f /* no 2nd tsb */
3510 3332 mov %g3, %g5
3511 3333
3512 3334 mov MMU_TAG_ACCESS, %g7
3513 3335 ldxa [%g7]ASI_DMMU, %g6 /* get tag access va */
3514 3336 GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1) /* make 4M pfn offset */
3515 3337
3516 3338 mov ASI_N, %g7 /* user TSBs always accessed by VA */
3517 3339 mov %g7, %asi
3518 3340 TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l5) /* update TSB */
3519 3341 5:
3520 3342 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3521 3343 retry
3522 3344
3523 3345 tsb_user_itlb_synth:
3524 3346 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 /* g1 = 2ND TSB */
3525 3347
3526 3348 mov MMU_TAG_ACCESS, %g7
3527 3349 ldxa [%g7]ASI_IMMU, %g6 /* get tag access va */
3528 3350 GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2) /* make 4M pfn offset */
3529 3351 brlz,a,pn %g1, 7f /* Check to see if we have 2nd TSB programmed */
3530 3352 or %g5, %g3, %g5 /* add 4M bits to TTE */
3531 3353
3532 3354 mov ASI_N, %g7 /* user TSBs always accessed by VA */
3533 3355 mov %g7, %asi
3534 3356 TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l6) /* update TSB */
3535 3357 7:
3536 3358 SET_TTE4M_PN(%g5, %g7) /* add TTE4M pagesize to TTE */
3537 3359 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3538 3360 retry
3539 3361 #endif /* sun4v && ITLB_32M_256M_SUPPORT */
3540 3362
3541 3363 tsb_kernel:
3542 3364 rdpr %tt, %g5
3543 3365 #ifdef sun4v
3544 3366 cmp %g7, TTE4M
3545 3367 bge,pn %icc, 5f
3546 3368 #else
3547 3369 cmp %g7, TTESZ_VALID | TTE4M ! no 32M or 256M support
3548 3370 be,pn %icc, 5f
3549 3371 #endif /* sun4v */
3550 3372 nop
3551 3373 ldn [%g6 + TSBMISS_TSBPTR], %g1 ! g1 = 8K TSB ptr
3552 3374 ba,pt %xcc, 6f
3553 3375 nop
3554 3376 5:
3555 3377 ldn [%g6 + TSBMISS_TSBPTR4M], %g1 ! g1 = 4M TSB ptr
3556 3378 brlz,pn %g1, 3f /* skip programming if 4M TSB ptr is -1 */
3557 3379 nop
3558 3380 6:
3559 3381 #ifndef sun4v
3560 3382 tsb_kernel_patch_asi:
3561 3383 or %g0, RUNTIME_PATCH, %g6
3562 3384 mov %g6, %asi ! XXX avoid writing to %asi !!
3563 3385 #endif
3564 3386 TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l7)
3565 3387 3:
3566 3388 #ifdef sun4v
3567 3389 cmp %g5, T_INSTR_MMU_MISS
3568 3390 be,a,pn %icc, 1f
3569 3391 mov %g3, %g5 ! trapstat wants TTE in %g5
3570 3392 #endif /* sun4v */
3571 3393 cmp %g5, FAST_IMMU_MISS_TT
3572 3394 be,pn %icc, 1f
3573 3395 mov %g3, %g5 ! trapstat wants TTE in %g5
3574 3396 DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3575 3397 ! trapstat wants TTE in %g5
3576 3398 retry
3577 3399 1:
3578 3400 ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3579 3401 ! trapstat wants TTE in %g5
3580 3402 retry
3581 3403
3582 3404 tsb_ism:
3583 3405 /*
3584 3406 * This is an ISM [i|d]tlb miss. We optimize for largest
3585 3407 * page size down to smallest.
3586 3408 *
3587 3409 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3588 3410 * register
3589 3411 * g3 = ismmap->ism_seg
3590 3412 * g4 = physical address of ismmap->ism_sfmmu
3591 3413 * g6 = tsbmiss area
3592 3414 */
3593 3415 ldna [%g4]ASI_MEM, %g7 /* g7 = ism hatid */
3594 3416 brz,a,pn %g7, ptl1_panic /* if zero jmp ahead */
3595 3417 mov PTL1_BAD_ISM, %g1
3596 3418 /* g5 = pa of imap_vb_shift */
3597 3419 sub %g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3598 3420 lduba [%g5]ASI_MEM, %g4 /* g4 = imap_vb_shift */
3599 3421 srlx %g3, %g4, %g3 /* clr size field */
3600 3422 set TAGACC_CTX_MASK, %g1 /* mask off ctx number */
3601 3423 sllx %g3, %g4, %g3 /* g3 = ism vbase */
3602 3424 and %g2, %g1, %g4 /* g4 = ctx number */
3603 3425 andn %g2, %g1, %g1 /* g1 = tlb miss vaddr */
3604 3426 sub %g1, %g3, %g2 /* g2 = offset in ISM seg */
3605 3427 or %g2, %g4, %g2 /* g2 = (pseudo-)tagacc */
3606 3428 sub %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3607 3429 lduha [%g5]ASI_MEM, %g4 /* g5 = pa of imap_hatflags */
3608 3430 #if defined(sun4v) || defined(UTSB_PHYS)
3609 3431 and %g4, HAT_CTX1_FLAG, %g5 /* g5 = imap_hatflags */
3610 3432 brz,pt %g5, tsb_chk4M_ism
3611 3433 nop
3612 3434 ldub [%g6 + TSBMISS_URTTEFLAGS], %g5
3613 3435 or %g5, HAT_CHKCTX1_FLAG, %g5
3614 3436 stub %g5, [%g6 + TSBMISS_URTTEFLAGS]
3615 3437 rdpr %tt, %g5
3616 3438 SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
3617 3439 #endif /* defined(sun4v) || defined(UTSB_PHYS) */
3618 3440
3619 3441 /*
3620 3442 * ISM pages are always locked down.
3621 3443 * If we can't find the tte then pagefault
3622 3444 * and let the spt segment driver resolve it.
3623 3445 *
3624 3446 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3625 3447 * g4 = imap_hatflags
3626 3448 * g6 = tsb miss area
3627 3449 * g7 = ISM hatid
3628 3450 */
3629 3451
3630 3452 tsb_chk4M_ism:
3631 3453 and %g4, HAT_4M_FLAG, %g5 /* g4 = imap_hatflags */
3632 3454 brnz,pt %g5, tsb_ism_4M /* branch if 4M pages */
3633 3455 nop
3634 3456
3635 3457 tsb_ism_32M:
3636 3458 and %g4, HAT_32M_FLAG, %g5 /* check default 32M next */
3637 3459 brz,pn %g5, tsb_ism_256M
3638 3460 nop
3639 3461
3640 3462 /*
3641 3463 * 32M hash.
3642 3464 */
3643 3465
3644 3466 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT32M,
3645 3467 TTE32M, %g5, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3646 3468 tsb_ism_4M)
3647 3469 /* NOT REACHED */
3648 3470
3649 3471 tsb_ism_32M_found:
3650 3472 brlz,a,pt %g3, tsb_validtte
3651 3473 rdpr %tt, %g7
3652 3474 ba,pt %xcc, tsb_ism_4M
3653 3475 nop
3654 3476
3655 3477 tsb_ism_256M:
3656 3478 and %g4, HAT_256M_FLAG, %g5 /* 256M is last resort */
3657 3479 brz,a,pn %g5, ptl1_panic
3658 3480 mov PTL1_BAD_ISM, %g1
3659 3481
3660 3482 /*
3661 3483 * 256M hash.
3662 3484 */
3663 3485 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT256M,
3664 3486 TTE256M, %g5, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3665 3487 tsb_ism_4M)
3666 3488
3667 3489 tsb_ism_256M_found:
3668 3490 brlz,a,pt %g3, tsb_validtte
3669 3491 rdpr %tt, %g7
3670 3492
3671 3493 tsb_ism_4M:
3672 3494 /*
3673 3495 * 4M hash.
3674 3496 */
3675 3497 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT4M,
3676 3498 TTE4M, %g5, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3677 3499 tsb_ism_8K)
3678 3500 /* NOT REACHED */
3679 3501
3680 3502 tsb_ism_4M_found:
3681 3503 brlz,a,pt %g3, tsb_validtte
3682 3504 rdpr %tt, %g7
3683 3505
3684 3506 tsb_ism_8K:
3685 3507 /*
3686 3508 * 8K and 64K hash.
3687 3509 */
3688 3510
3689 3511 GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT64K,
3690 3512 TTE64K, %g5, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3691 3513 tsb_pagefault)
3692 3514 /* NOT REACHED */
3693 3515
3694 3516 tsb_ism_8K_found:
3695 3517 brlz,a,pt %g3, tsb_validtte
3696 3518 rdpr %tt, %g7
3697 3519
3698 3520 tsb_pagefault:
3699 3521 rdpr %tt, %g7
3700 3522 cmp %g7, FAST_PROT_TT
3701 3523 be,a,pn %icc, tsb_protfault
3702 3524 wrpr %g0, FAST_DMMU_MISS_TT, %tt
3703 3525
3704 3526 tsb_protfault:
3705 3527 /*
3706 3528 * we get here if we couldn't find a valid tte in the hash.
3707 3529 *
3708 3530 * If user and we are at tl>1 we go to window handling code.
3709 3531 *
3710 3532 * If kernel and the fault is on the same page as our stack
3711 3533 * pointer, then we know the stack is bad and the trap handler
3712 3534 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3713 3535 *
3714 3536 * If this is a kernel trap and tl>1, panic.
3715 3537 *
3716 3538 * Otherwise we call pagefault.
3717 3539 */
3718 3540 cmp %g7, FAST_IMMU_MISS_TT
3719 3541 #ifdef sun4v
3720 3542 MMU_FAULT_STATUS_AREA(%g4)
3721 3543 ldx [%g4 + MMFSA_I_CTX], %g5
3722 3544 ldx [%g4 + MMFSA_D_CTX], %g4
3723 3545 move %icc, %g5, %g4
3724 3546 cmp %g7, T_INSTR_MMU_MISS
3725 3547 move %icc, %g5, %g4
3726 3548 #else
3727 3549 mov MMU_TAG_ACCESS, %g4
3728 3550 ldxa [%g4]ASI_DMMU, %g2
3729 3551 ldxa [%g4]ASI_IMMU, %g5
3730 3552 move %icc, %g5, %g2
3731 3553 cmp %g7, T_INSTR_MMU_MISS
3732 3554 move %icc, %g5, %g2
3733 3555 sllx %g2, TAGACC_CTX_LSHIFT, %g4
3734 3556 #endif /* sun4v */
3735 3557 brnz,pn %g4, 3f /* skip if not kernel */
3736 3558 rdpr %tl, %g5
3737 3559
3738 3560 add %sp, STACK_BIAS, %g3
3739 3561 srlx %g3, MMU_PAGESHIFT, %g3
3740 3562 srlx %g2, MMU_PAGESHIFT, %g4
3741 3563 cmp %g3, %g4
3742 3564 be,a,pn %icc, ptl1_panic /* panic if bad %sp */
3743 3565 mov PTL1_BAD_STACK, %g1
3744 3566
3745 3567 cmp %g5, 1
3746 3568 ble,pt %icc, 2f
3747 3569 nop
3748 3570 TSTAT_CHECK_TL1(2f, %g1, %g2)
3749 3571 rdpr %tt, %g2
3750 3572 cmp %g2, FAST_PROT_TT
3751 3573 mov PTL1_BAD_KPROT_FAULT, %g1
3752 3574 movne %icc, PTL1_BAD_KMISS, %g1
3753 3575 ba,pt %icc, ptl1_panic
3754 3576 nop
3755 3577
3756 3578 2:
3757 3579 /*
3758 3580 * We are taking a pagefault in the kernel on a kernel address. If
3759 3581 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3760 3582 * want to call sfmmu_pagefault -- we will instead note that a fault
3761 3583 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3762 3584 * (instead of a "retry"). This will step over the faulting
3763 3585 * instruction.
3764 3586 */
3765 3587 CPU_INDEX(%g1, %g2)
3766 3588 set cpu_core, %g2
3767 3589 sllx %g1, CPU_CORE_SHIFT, %g1
3768 3590 add %g1, %g2, %g1
3769 3591 lduh [%g1 + CPUC_DTRACE_FLAGS], %g2
3770 3592 andcc %g2, CPU_DTRACE_NOFAULT, %g0
3771 3593 bz sfmmu_pagefault
3772 3594 or %g2, CPU_DTRACE_BADADDR, %g2
3773 3595 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS]
3774 3596 GET_MMU_D_ADDR(%g3, %g4)
3775 3597 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL]
3776 3598 done
3777 3599
3778 3600 3:
3779 3601 cmp %g5, 1
3780 3602 ble,pt %icc, 4f
3781 3603 nop
3782 3604 TSTAT_CHECK_TL1(4f, %g1, %g2)
3783 3605 ba,pt %icc, sfmmu_window_trap
3784 3606 nop
3785 3607
3786 3608 4:
3787 3609 /*
3788 3610 * We are taking a pagefault on a non-kernel address. If we are in
3789 3611 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3790 3612 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3791 3613 */
3792 3614 CPU_INDEX(%g1, %g2)
3793 3615 set cpu_core, %g2
3794 3616 sllx %g1, CPU_CORE_SHIFT, %g1
3795 3617 add %g1, %g2, %g1
3796 3618 lduh [%g1 + CPUC_DTRACE_FLAGS], %g2
3797 3619 andcc %g2, CPU_DTRACE_NOFAULT, %g0
3798 3620 bz sfmmu_mmu_trap
3799 3621 or %g2, CPU_DTRACE_BADADDR, %g2
3800 3622 stuh %g2, [%g1 + CPUC_DTRACE_FLAGS]
3801 3623 GET_MMU_D_ADDR(%g3, %g4)
3802 3624 stx %g3, [%g1 + CPUC_DTRACE_ILLVAL]
3803 3625
3804 3626 /*
3805 3627 * Be sure that we're actually taking this miss from the kernel --
3806 3628 * otherwise we have managed to return to user-level with
3807 3629 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3808 3630 */
3809 3631 rdpr %tstate, %g2
3810 3632 btst TSTATE_PRIV, %g2
3811 3633 bz,a ptl1_panic
3812 3634 mov PTL1_BAD_DTRACE_FLAGS, %g1
3813 3635 done
3814 3636
3815 3637 ALTENTRY(tsb_tl0_noctxt)
3816 3638 /*
3817 3639 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3818 3640 * if it is, indicated that we have faulted and issue a done.
3819 3641 */
3820 3642 CPU_INDEX(%g5, %g6)
3821 3643 set cpu_core, %g6
3822 3644 sllx %g5, CPU_CORE_SHIFT, %g5
3823 3645 add %g5, %g6, %g5
3824 3646 lduh [%g5 + CPUC_DTRACE_FLAGS], %g6
3825 3647 andcc %g6, CPU_DTRACE_NOFAULT, %g0
3826 3648 bz 1f
3827 3649 or %g6, CPU_DTRACE_BADADDR, %g6
3828 3650 stuh %g6, [%g5 + CPUC_DTRACE_FLAGS]
3829 3651 GET_MMU_D_ADDR(%g3, %g4)
3830 3652 stx %g3, [%g5 + CPUC_DTRACE_ILLVAL]
3831 3653
3832 3654 /*
3833 3655 * Be sure that we're actually taking this miss from the kernel --
3834 3656 * otherwise we have managed to return to user-level with
3835 3657 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3836 3658 */
3837 3659 rdpr %tstate, %g5
3838 3660 btst TSTATE_PRIV, %g5
3839 3661 bz,a ptl1_panic
3840 3662 mov PTL1_BAD_DTRACE_FLAGS, %g1
3841 3663 TSTAT_CHECK_TL1(2f, %g1, %g2);
3842 3664 2:
3843 3665 done
3844 3666
3845 3667 1:
3846 3668 rdpr %tt, %g5
3847 3669 cmp %g5, FAST_IMMU_MISS_TT
3848 3670 #ifdef sun4v
3849 3671 MMU_FAULT_STATUS_AREA(%g2)
3850 3672 be,a,pt %icc, 2f
3851 3673 ldx [%g2 + MMFSA_I_CTX], %g3
3852 3674 cmp %g5, T_INSTR_MMU_MISS
3853 3675 be,a,pt %icc, 2f
3854 3676 ldx [%g2 + MMFSA_I_CTX], %g3
3855 3677 ldx [%g2 + MMFSA_D_CTX], %g3
3856 3678 2:
3857 3679 #else
3858 3680 mov MMU_TAG_ACCESS, %g2
3859 3681 be,a,pt %icc, 2f
3860 3682 ldxa [%g2]ASI_IMMU, %g3
3861 3683 ldxa [%g2]ASI_DMMU, %g3
3862 3684 2: sllx %g3, TAGACC_CTX_LSHIFT, %g3
3863 3685 #endif /* sun4v */
↓ open down ↓ |
1870 lines elided |
↑ open up ↑ |
3864 3686 brz,a,pn %g3, ptl1_panic ! panic if called for kernel
3865 3687 mov PTL1_BAD_CTX_STEAL, %g1 ! since kernel ctx was stolen
3866 3688 rdpr %tl, %g5
3867 3689 cmp %g5, 1
3868 3690 ble,pt %icc, sfmmu_mmu_trap
3869 3691 nop
3870 3692 TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3871 3693 ba,pt %icc, sfmmu_window_trap
3872 3694 nop
3873 3695 SET_SIZE(sfmmu_tsb_miss)
3874 -#endif /* lint */
3875 3696
3876 -#if defined (lint)
3877 -/*
3878 - * This routine will look for a user or kernel vaddr in the hash
3879 - * structure. It returns a valid pfn or PFN_INVALID. It doesn't
3880 - * grab any locks. It should only be used by other sfmmu routines.
3881 - */
3882 -/* ARGSUSED */
3883 -pfn_t
3884 -sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3885 -{
3886 - return(0);
3887 -}
3888 -
3889 -/* ARGSUSED */
3890 -pfn_t
3891 -sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3892 -{
3893 - return(0);
3894 -}
3895 -
3896 -#else /* lint */
3897 -
3898 3697 ENTRY_NP(sfmmu_vatopfn)
3899 3698 /*
3900 3699 * disable interrupts
3901 3700 */
3902 3701 rdpr %pstate, %o3
3903 3702 #ifdef DEBUG
3904 3703 PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3905 3704 #endif
3906 3705 /*
3907 3706 * disable interrupts to protect the TSBMISS area
3908 3707 */
3909 3708 andn %o3, PSTATE_IE, %o5
3910 3709 wrpr %o5, 0, %pstate
3911 3710
3912 3711 /*
3913 3712 * o0 = vaddr
3914 3713 * o1 = sfmmup
3915 3714 * o2 = ttep
3916 3715 */
3917 3716 CPU_TSBMISS_AREA(%g1, %o5)
3918 3717 ldn [%g1 + TSBMISS_KHATID], %o4
3919 3718 cmp %o4, %o1
3920 3719 bne,pn %ncc, vatopfn_nokernel
3921 3720 mov TTE64K, %g5 /* g5 = rehash # */
3922 3721 mov %g1,%o5 /* o5 = tsbmiss_area */
3923 3722 /*
3924 3723 * o0 = vaddr
3925 3724 * o1 & o4 = hatid
3926 3725 * o2 = ttep
3927 3726 * o5 = tsbmiss area
3928 3727 */
3929 3728 mov HBLK_RANGE_SHIFT, %g6
3930 3729 1:
3931 3730
3932 3731 /*
3933 3732 * o0 = vaddr
3934 3733 * o1 = sfmmup
3935 3734 * o2 = ttep
3936 3735 * o3 = old %pstate
3937 3736 * o4 = hatid
3938 3737 * o5 = tsbmiss
3939 3738 * g5 = rehash #
3940 3739 * g6 = hmeshift
3941 3740 *
3942 3741 * The first arg to GET_TTE is actually tagaccess register
3943 3742 * not just vaddr. Since this call is for kernel we need to clear
3944 3743 * any lower vaddr bits that would be interpreted as ctx bits.
3945 3744 */
3946 3745 set TAGACC_CTX_MASK, %g1
3947 3746 andn %o0, %g1, %o0
3948 3747 GET_TTE(%o0, %o4, %g1, %g2, %o5, %g4, %g6, %g5, %g3,
3949 3748 vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3950 3749
3951 3750 kvtop_hblk_found:
3952 3751 /*
3953 3752 * o0 = vaddr
3954 3753 * o1 = sfmmup
3955 3754 * o2 = ttep
3956 3755 * g1 = tte
3957 3756 * g2 = tte pa
3958 3757 * g3 = scratch
3959 3758 * o2 = tsbmiss area
3960 3759 * o1 = hat id
3961 3760 */
3962 3761 brgez,a,pn %g1, 6f /* if tte invalid goto tl0 */
3963 3762 mov -1, %o0 /* output = -1 (PFN_INVALID) */
3964 3763 stx %g1,[%o2] /* put tte into *ttep */
3965 3764 TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3966 3765 /*
3967 3766 * o0 = vaddr
3968 3767 * o1 = sfmmup
3969 3768 * o2 = ttep
3970 3769 * g1 = pfn
3971 3770 */
3972 3771 ba,pt %xcc, 6f
3973 3772 mov %g1, %o0
3974 3773
3975 3774 kvtop_nohblk:
3976 3775 /*
3977 3776 * we get here if we couldn't find valid hblk in hash. We rehash
3978 3777 * if neccesary.
3979 3778 */
3980 3779 ldn [%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3981 3780 #ifdef sun4v
3982 3781 cmp %g5, MAX_HASHCNT
3983 3782 #else
3984 3783 cmp %g5, DEFAULT_MAX_HASHCNT /* no 32/256M kernel pages */
3985 3784 #endif /* sun4v */
3986 3785 be,a,pn %icc, 6f
3987 3786 mov -1, %o0 /* output = -1 (PFN_INVALID) */
3988 3787 mov %o1, %o4 /* restore hatid */
3989 3788 #ifdef sun4v
3990 3789 add %g5, 2, %g5
3991 3790 cmp %g5, 3
3992 3791 move %icc, MMU_PAGESHIFT4M, %g6
3993 3792 ba,pt %icc, 1b
3994 3793 movne %icc, MMU_PAGESHIFT256M, %g6
3995 3794 #else
3996 3795 inc %g5
3997 3796 cmp %g5, 2
3998 3797 move %icc, MMU_PAGESHIFT512K, %g6
3999 3798 ba,pt %icc, 1b
4000 3799 movne %icc, MMU_PAGESHIFT4M, %g6
4001 3800 #endif /* sun4v */
4002 3801 6:
4003 3802 retl
4004 3803 wrpr %g0, %o3, %pstate /* re-enable interrupts */
4005 3804
4006 3805 tsb_suspend:
4007 3806 /*
4008 3807 * o0 = vaddr
4009 3808 * o1 = sfmmup
4010 3809 * o2 = ttep
4011 3810 * g1 = tte
4012 3811 * g2 = tte pa
4013 3812 * g3 = tte va
4014 3813 * o2 = tsbmiss area use o5 instead of o2 for tsbmiss
4015 3814 */
4016 3815 stx %g1,[%o2] /* put tte into *ttep */
4017 3816 brgez,a,pn %g1, 8f /* if tte invalid goto 8: */
4018 3817 sub %g0, 1, %o0 /* output = PFN_INVALID */
4019 3818 sub %g0, 2, %o0 /* output = PFN_SUSPENDED */
4020 3819 8:
4021 3820 retl
4022 3821 wrpr %g0, %o3, %pstate /* enable interrupts */
4023 3822
4024 3823 vatopfn_nokernel:
4025 3824 /*
4026 3825 * This routine does NOT support user addresses
4027 3826 * There is a routine in C that supports this.
4028 3827 * The only reason why we don't have the C routine
4029 3828 * support kernel addresses as well is because
4030 3829 * we do va_to_pa while holding the hashlock.
4031 3830 */
4032 3831 wrpr %g0, %o3, %pstate /* re-enable interrupts */
4033 3832 save %sp, -SA(MINFRAME), %sp
4034 3833 sethi %hi(sfmmu_panic3), %o0
4035 3834 call panic
4036 3835 or %o0, %lo(sfmmu_panic3), %o0
4037 3836
4038 3837 SET_SIZE(sfmmu_vatopfn)
4039 3838
4040 3839 /*
4041 3840 * %o0 = vaddr
4042 3841 * %o1 = hashno (aka szc)
4043 3842 *
4044 3843 *
4045 3844 * This routine is similar to sfmmu_vatopfn() but will only look for
4046 3845 * a kernel vaddr in the hash structure for the specified rehash value.
4047 3846 * It's just an optimization for the case when pagesize for a given
4048 3847 * va range is already known (e.g. large page heap) and we don't want
4049 3848 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4050 3849 *
4051 3850 * Returns valid pfn or PFN_INVALID if
4052 3851 * tte for specified rehash # is not found, invalid or suspended.
4053 3852 */
4054 3853 ENTRY_NP(sfmmu_kvaszc2pfn)
4055 3854 /*
4056 3855 * disable interrupts
4057 3856 */
4058 3857 rdpr %pstate, %o3
4059 3858 #ifdef DEBUG
4060 3859 PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4061 3860 #endif
4062 3861 /*
4063 3862 * disable interrupts to protect the TSBMISS area
4064 3863 */
4065 3864 andn %o3, PSTATE_IE, %o5
4066 3865 wrpr %o5, 0, %pstate
4067 3866
4068 3867 CPU_TSBMISS_AREA(%g1, %o5)
4069 3868 ldn [%g1 + TSBMISS_KHATID], %o4
4070 3869 sll %o1, 1, %g6
4071 3870 add %g6, %o1, %g6
4072 3871 add %g6, MMU_PAGESHIFT, %g6
4073 3872 /*
4074 3873 * %o0 = vaddr
4075 3874 * %o1 = hashno
4076 3875 * %o3 = old %pstate
4077 3876 * %o4 = ksfmmup
4078 3877 * %g1 = tsbmiss area
4079 3878 * %g6 = hmeshift
4080 3879 */
4081 3880
4082 3881 /*
4083 3882 * The first arg to GET_TTE is actually tagaccess register
4084 3883 * not just vaddr. Since this call is for kernel we need to clear
4085 3884 * any lower vaddr bits that would be interpreted as ctx bits.
4086 3885 */
4087 3886 srlx %o0, MMU_PAGESHIFT, %o0
4088 3887 sllx %o0, MMU_PAGESHIFT, %o0
4089 3888 GET_TTE(%o0, %o4, %g3, %g4, %g1, %o5, %g6, %o1, %g5,
4090 3889 kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4091 3890 kvaszc2pfn_nohblk)
4092 3891
4093 3892 kvaszc2pfn_hblk_found:
4094 3893 /*
4095 3894 * %g3 = tte
4096 3895 * %o0 = vaddr
4097 3896 */
4098 3897 brgez,a,pn %g3, 1f /* check if tte is invalid */
4099 3898 mov -1, %o0 /* output = -1 (PFN_INVALID) */
4100 3899 TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4101 3900 /*
4102 3901 * g3 = pfn
4103 3902 */
4104 3903 ba,pt %xcc, 1f
4105 3904 mov %g3, %o0
↓ open down ↓ |
198 lines elided |
↑ open up ↑ |
4106 3905
4107 3906 kvaszc2pfn_nohblk:
4108 3907 mov -1, %o0
4109 3908
4110 3909 1:
4111 3910 retl
4112 3911 wrpr %g0, %o3, %pstate /* re-enable interrupts */
4113 3912
4114 3913 SET_SIZE(sfmmu_kvaszc2pfn)
4115 3914
4116 -#endif /* lint */
4117 3915
4118 3916
4119 -
4120 -#if !defined(lint)
4121 -
4122 3917 /*
4123 3918 * kpm lock used between trap level tsbmiss handler and kpm C level.
4124 3919 */
4125 3920 #define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) \
4126 3921 mov 0xff, tmp1 ;\
4127 3922 label1: ;\
4128 3923 casa [kpmlckp]asi, %g0, tmp1 ;\
4129 3924 brnz,pn tmp1, label1 ;\
4130 3925 mov 0xff, tmp1 ;\
4131 3926 membar #LoadLoad
4132 3927
4133 3928 #define KPMLOCK_EXIT(kpmlckp, asi) \
4134 3929 membar #LoadStore|#StoreStore ;\
4135 3930 sta %g0, [kpmlckp]asi
4136 3931
4137 3932 /*
4138 3933 * Lookup a memseg for a given pfn and if found, return the physical
4139 3934 * address of the corresponding struct memseg in mseg, otherwise
4140 3935 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4141 3936 * tsbmp, %asi is assumed to be ASI_MEM.
4142 3937 * This lookup is done by strictly traversing only the physical memseg
4143 3938 * linkage. The more generic approach, to check the virtual linkage
4144 3939 * before using the physical (used e.g. with hmehash buckets), cannot
4145 3940 * be used here. Memory DR operations can run in parallel to this
4146 3941 * lookup w/o any locks and updates of the physical and virtual linkage
4147 3942 * cannot be done atomically wrt. to each other. Because physical
4148 3943 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
4149 3944 * as "physical NULL" pointer.
4150 3945 */
4151 3946 #define PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
4152 3947 sethi %hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */ ;\
4153 3948 ldx [tmp3 + %lo(mhash_per_slot)], mseg ;\
4154 3949 udivx pfn, mseg, mseg ;\
4155 3950 ldx [tsbmp + KPMTSBM_MSEGPHASHPA], tmp1 ;\
4156 3951 and mseg, SFMMU_N_MEM_SLOTS - 1, mseg ;\
4157 3952 sllx mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg ;\
4158 3953 add tmp1, mseg, tmp1 ;\
4159 3954 ldxa [tmp1]%asi, mseg ;\
4160 3955 cmp mseg, MSEG_NULLPTR_PA ;\
4161 3956 be,pn %xcc, label/**/1 /* if not found */ ;\
4162 3957 nop ;\
4163 3958 ldxa [mseg + MEMSEG_PAGES_BASE]%asi, tmp1 ;\
4164 3959 cmp pfn, tmp1 /* pfn - pages_base */ ;\
4165 3960 blu,pn %xcc, label/**/1 ;\
4166 3961 ldxa [mseg + MEMSEG_PAGES_END]%asi, tmp2 ;\
4167 3962 cmp pfn, tmp2 /* pfn - pages_end */ ;\
4168 3963 bgeu,pn %xcc, label/**/1 ;\
4169 3964 sub pfn, tmp1, tmp1 /* pfn - pages_base */ ;\
4170 3965 mulx tmp1, PAGE_SIZE, tmp1 ;\
4171 3966 ldxa [mseg + MEMSEG_PAGESPA]%asi, tmp2 /* pages */ ;\
4172 3967 add tmp2, tmp1, tmp1 /* pp */ ;\
4173 3968 lduwa [tmp1 + PAGE_PAGENUM]%asi, tmp2 ;\
4174 3969 cmp tmp2, pfn ;\
4175 3970 be,pt %xcc, label/**/_ok /* found */ ;\
4176 3971 label/**/1: ;\
4177 3972 /* brute force lookup */ ;\
4178 3973 sethi %hi(memsegspa), tmp3 /* no tsbmp use due to DR */ ;\
4179 3974 ldx [tmp3 + %lo(memsegspa)], mseg ;\
4180 3975 label/**/2: ;\
4181 3976 cmp mseg, MSEG_NULLPTR_PA ;\
4182 3977 be,pn %xcc, label/**/_ok /* if not found */ ;\
4183 3978 nop ;\
4184 3979 ldxa [mseg + MEMSEG_PAGES_BASE]%asi, tmp1 ;\
4185 3980 cmp pfn, tmp1 /* pfn - pages_base */ ;\
4186 3981 blu,a,pt %xcc, label/**/2 ;\
4187 3982 ldxa [mseg + MEMSEG_NEXTPA]%asi, mseg ;\
4188 3983 ldxa [mseg + MEMSEG_PAGES_END]%asi, tmp2 ;\
4189 3984 cmp pfn, tmp2 /* pfn - pages_end */ ;\
4190 3985 bgeu,a,pt %xcc, label/**/2 ;\
4191 3986 ldxa [mseg + MEMSEG_NEXTPA]%asi, mseg ;\
4192 3987 label/**/_ok:
4193 3988
4194 3989 /*
4195 3990 * kpm tsb miss handler large pages
4196 3991 * g1 = 8K kpm TSB entry pointer
4197 3992 * g2 = tag access register
4198 3993 * g3 = 4M kpm TSB entry pointer
4199 3994 */
4200 3995 ALTENTRY(sfmmu_kpm_dtsb_miss)
4201 3996 TT_TRACE(trace_tsbmiss)
4202 3997
4203 3998 CPU_INDEX(%g7, %g6)
4204 3999 sethi %hi(kpmtsbm_area), %g6
4205 4000 sllx %g7, KPMTSBM_SHIFT, %g7
4206 4001 or %g6, %lo(kpmtsbm_area), %g6
4207 4002 add %g6, %g7, %g6 /* g6 = kpmtsbm ptr */
4208 4003
4209 4004 /* check enable flag */
4210 4005 ldub [%g6 + KPMTSBM_FLAGS], %g4
4211 4006 and %g4, KPMTSBM_ENABLE_FLAG, %g5
4212 4007 brz,pn %g5, sfmmu_tsb_miss /* if kpm not enabled */
4213 4008 nop
4214 4009
4215 4010 /* VA range check */
4216 4011 ldx [%g6 + KPMTSBM_VBASE], %g7
4217 4012 cmp %g2, %g7
4218 4013 blu,pn %xcc, sfmmu_tsb_miss
4219 4014 ldx [%g6 + KPMTSBM_VEND], %g5
4220 4015 cmp %g2, %g5
4221 4016 bgeu,pn %xcc, sfmmu_tsb_miss
4222 4017 stx %g3, [%g6 + KPMTSBM_TSBPTR]
4223 4018
4224 4019 /*
4225 4020 * check TL tsbmiss handling flag
4226 4021 * bump tsbmiss counter
4227 4022 */
4228 4023 lduw [%g6 + KPMTSBM_TSBMISS], %g5
4229 4024 #ifdef DEBUG
4230 4025 and %g4, KPMTSBM_TLTSBM_FLAG, %g3
4231 4026 inc %g5
4232 4027 brz,pn %g3, sfmmu_kpm_exception
4233 4028 st %g5, [%g6 + KPMTSBM_TSBMISS]
4234 4029 #else
4235 4030 inc %g5
4236 4031 st %g5, [%g6 + KPMTSBM_TSBMISS]
4237 4032 #endif
4238 4033 /*
4239 4034 * At this point:
4240 4035 * g1 = 8K kpm TSB pointer (not used)
4241 4036 * g2 = tag access register
4242 4037 * g3 = clobbered
4243 4038 * g6 = per-CPU kpm tsbmiss area
4244 4039 * g7 = kpm_vbase
4245 4040 */
4246 4041
4247 4042 /* vaddr2pfn */
4248 4043 ldub [%g6 + KPMTSBM_SZSHIFT], %g3
4249 4044 sub %g2, %g7, %g4 /* paddr = vaddr-kpm_vbase */
4250 4045 srax %g4, %g3, %g2 /* which alias range (r) */
4251 4046 brnz,pn %g2, sfmmu_kpm_exception /* if (r != 0) goto C handler */
4252 4047 srlx %g4, MMU_PAGESHIFT, %g2 /* %g2 = pfn */
4253 4048
4254 4049 /*
4255 4050 * Setup %asi
4256 4051 * mseg_pa = page_numtomemseg_nolock(pfn)
4257 4052 * if (mseg_pa == NULL) sfmmu_kpm_exception
4258 4053 * g2=pfn
4259 4054 */
4260 4055 mov ASI_MEM, %asi
4261 4056 PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4262 4057 cmp %g3, MSEG_NULLPTR_PA
4263 4058 be,pn %xcc, sfmmu_kpm_exception /* if mseg not found */
4264 4059 nop
4265 4060
4266 4061 /*
4267 4062 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4268 4063 * g2=pfn g3=mseg_pa
4269 4064 */
4270 4065 ldub [%g6 + KPMTSBM_KPMP2PSHFT], %g5
4271 4066 ldxa [%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4272 4067 srlx %g2, %g5, %g4
4273 4068 sllx %g4, %g5, %g4
4274 4069 sub %g4, %g7, %g4
4275 4070 srlx %g4, %g5, %g4
4276 4071
4277 4072 /*
4278 4073 * Validate inx value
4279 4074 * g2=pfn g3=mseg_pa g4=inx
4280 4075 */
4281 4076 #ifdef DEBUG
4282 4077 ldxa [%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4283 4078 cmp %g4, %g5 /* inx - nkpmpgs */
4284 4079 bgeu,pn %xcc, sfmmu_kpm_exception /* if out of range */
4285 4080 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7
4286 4081 #else
4287 4082 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7
4288 4083 #endif
4289 4084 /*
4290 4085 * kp = &mseg_pa->kpm_pages[inx]
4291 4086 */
4292 4087 sllx %g4, KPMPAGE_SHIFT, %g4 /* kpm_pages offset */
4293 4088 ldxa [%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4294 4089 add %g5, %g4, %g5 /* kp */
4295 4090
4296 4091 /*
4297 4092 * KPMP_HASH(kp)
4298 4093 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4299 4094 */
4300 4095 ldub [%g6 + KPMTSBM_KPMPSHIFT], %g1 /* kpmp_shift */
4301 4096 sub %g7, 1, %g7 /* mask */
4302 4097 srlx %g5, %g1, %g1 /* x = ksp >> kpmp_shift */
4303 4098 add %g5, %g1, %g5 /* y = ksp + x */
4304 4099 and %g5, %g7, %g5 /* hashinx = y & mask */
4305 4100
4306 4101 /*
4307 4102 * Calculate physical kpm_page pointer
4308 4103 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4309 4104 */
4310 4105 ldxa [%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4311 4106 add %g1, %g4, %g1 /* kp_pa */
4312 4107
4313 4108 /*
4314 4109 * Calculate physical hash lock address
4315 4110 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4316 4111 */
4317 4112 ldx [%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4318 4113 sllx %g5, KPMHLK_SHIFT, %g5
4319 4114 add %g4, %g5, %g3
4320 4115 add %g3, KPMHLK_LOCK, %g3 /* hlck_pa */
4321 4116
4322 4117 /*
4323 4118 * Assemble tte
4324 4119 * g1=kp_pa g2=pfn g3=hlck_pa
4325 4120 */
4326 4121 #ifdef sun4v
4327 4122 sethi %hi(TTE_VALID_INT), %g5 /* upper part */
4328 4123 sllx %g5, 32, %g5
4329 4124 mov (TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4330 4125 or %g4, TTE4M, %g4
4331 4126 or %g5, %g4, %g5
4332 4127 #else
4333 4128 sethi %hi(TTE_VALID_INT), %g4
4334 4129 mov TTE4M, %g5
4335 4130 sllx %g5, TTE_SZ_SHFT_INT, %g5
4336 4131 or %g5, %g4, %g5 /* upper part */
4337 4132 sllx %g5, 32, %g5
4338 4133 mov (TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4339 4134 or %g5, %g4, %g5
4340 4135 #endif
4341 4136 sllx %g2, MMU_PAGESHIFT, %g4
4342 4137 or %g5, %g4, %g5 /* tte */
4343 4138 ldx [%g6 + KPMTSBM_TSBPTR], %g4
4344 4139 GET_MMU_D_TTARGET(%g2, %g7) /* %g2 = ttarget */
4345 4140
4346 4141 /*
4347 4142 * tsb dropin
4348 4143 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4349 4144 */
4350 4145
4351 4146 /* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4352 4147 KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4353 4148
4354 4149 /* use C-handler if there's no go for dropin */
4355 4150 ldsha [%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4356 4151 cmp %g7, -1
4357 4152 bne,pn %xcc, 5f /* use C-handler if there's no go for dropin */
4358 4153 nop
4359 4154
4360 4155 #ifdef DEBUG
4361 4156 /* double check refcnt */
4362 4157 ldsha [%g1 + KPMPAGE_REFCNT]%asi, %g7
4363 4158 brz,pn %g7, 5f /* let C-handler deal with this */
4364 4159 nop
4365 4160 #endif
4366 4161
4367 4162 #ifndef sun4v
4368 4163 ldub [%g6 + KPMTSBM_FLAGS], %g7
4369 4164 mov ASI_N, %g1
4370 4165 andcc %g7, KPMTSBM_TSBPHYS_FLAG, %g0
4371 4166 movnz %icc, ASI_MEM, %g1
4372 4167 mov %g1, %asi
4373 4168 #endif
4374 4169
4375 4170 /*
4376 4171 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4377 4172 * If we fail to lock the TSB entry then just load the tte into the
4378 4173 * TLB.
4379 4174 */
4380 4175 TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l1)
4381 4176
4382 4177 /* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4383 4178 TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4384 4179 locked_tsb_l1:
4385 4180 DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4386 4181
4387 4182 /* KPMLOCK_EXIT(kpmlckp, asi) */
4388 4183 KPMLOCK_EXIT(%g3, ASI_MEM)
4389 4184
4390 4185 /*
4391 4186 * If trapstat is running, we need to shift the %tpc and %tnpc to
4392 4187 * point to trapstat's TSB miss return code (note that trapstat
4393 4188 * itself will patch the correct offset to add).
4394 4189 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4395 4190 */
4396 4191 rdpr %tl, %g7
4397 4192 cmp %g7, 1
4398 4193 ble %icc, 0f
4399 4194 sethi %hi(KERNELBASE), %g6
4400 4195 rdpr %tpc, %g7
4401 4196 or %g6, %lo(KERNELBASE), %g6
4402 4197 cmp %g7, %g6
4403 4198 bgeu %xcc, 0f
4404 4199 ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4405 4200 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */
4406 4201 wrpr %g7, %tpc
4407 4202 add %g7, 4, %g7
4408 4203 wrpr %g7, %tnpc
4409 4204 0:
4410 4205 retry
4411 4206 5:
4412 4207 /* g3=hlck_pa */
4413 4208 KPMLOCK_EXIT(%g3, ASI_MEM)
4414 4209 ba,pt %icc, sfmmu_kpm_exception
4415 4210 nop
4416 4211 SET_SIZE(sfmmu_kpm_dtsb_miss)
4417 4212
4418 4213 /*
4419 4214 * kpm tsbmiss handler for smallpages
4420 4215 * g1 = 8K kpm TSB pointer
4421 4216 * g2 = tag access register
4422 4217 * g3 = 4M kpm TSB pointer
4423 4218 */
4424 4219 ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4425 4220 TT_TRACE(trace_tsbmiss)
4426 4221 CPU_INDEX(%g7, %g6)
4427 4222 sethi %hi(kpmtsbm_area), %g6
4428 4223 sllx %g7, KPMTSBM_SHIFT, %g7
4429 4224 or %g6, %lo(kpmtsbm_area), %g6
4430 4225 add %g6, %g7, %g6 /* g6 = kpmtsbm ptr */
4431 4226
4432 4227 /* check enable flag */
4433 4228 ldub [%g6 + KPMTSBM_FLAGS], %g4
4434 4229 and %g4, KPMTSBM_ENABLE_FLAG, %g5
4435 4230 brz,pn %g5, sfmmu_tsb_miss /* if kpm not enabled */
4436 4231 nop
4437 4232
4438 4233 /*
4439 4234 * VA range check
4440 4235 * On fail: goto sfmmu_tsb_miss
4441 4236 */
4442 4237 ldx [%g6 + KPMTSBM_VBASE], %g7
4443 4238 cmp %g2, %g7
4444 4239 blu,pn %xcc, sfmmu_tsb_miss
4445 4240 ldx [%g6 + KPMTSBM_VEND], %g5
4446 4241 cmp %g2, %g5
4447 4242 bgeu,pn %xcc, sfmmu_tsb_miss
4448 4243 stx %g1, [%g6 + KPMTSBM_TSBPTR] /* save 8K kpm TSB pointer */
4449 4244
4450 4245 /*
4451 4246 * check TL tsbmiss handling flag
4452 4247 * bump tsbmiss counter
4453 4248 */
4454 4249 lduw [%g6 + KPMTSBM_TSBMISS], %g5
4455 4250 #ifdef DEBUG
4456 4251 and %g4, KPMTSBM_TLTSBM_FLAG, %g1
4457 4252 inc %g5
4458 4253 brz,pn %g1, sfmmu_kpm_exception
4459 4254 st %g5, [%g6 + KPMTSBM_TSBMISS]
4460 4255 #else
4461 4256 inc %g5
4462 4257 st %g5, [%g6 + KPMTSBM_TSBMISS]
4463 4258 #endif
4464 4259 /*
4465 4260 * At this point:
4466 4261 * g1 = clobbered
4467 4262 * g2 = tag access register
4468 4263 * g3 = 4M kpm TSB pointer (not used)
4469 4264 * g6 = per-CPU kpm tsbmiss area
4470 4265 * g7 = kpm_vbase
4471 4266 */
4472 4267
4473 4268 /*
4474 4269 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
4475 4270 * which is defined in mach_kpm.h. Any changes in that macro
4476 4271 * should also be ported back to this assembly code.
4477 4272 */
4478 4273 ldub [%g6 + KPMTSBM_SZSHIFT], %g3 /* g3 = kpm_size_shift */
4479 4274 sub %g2, %g7, %g4 /* paddr = vaddr-kpm_vbase */
4480 4275 srax %g4, %g3, %g7 /* which alias range (r) */
4481 4276 brz,pt %g7, 2f
4482 4277 sethi %hi(vac_colors_mask), %g5
4483 4278 ld [%g5 + %lo(vac_colors_mask)], %g5
4484 4279
4485 4280 srlx %g2, MMU_PAGESHIFT, %g1 /* vaddr >> MMU_PAGESHIFT */
4486 4281 and %g1, %g5, %g1 /* g1 = v */
4487 4282 sllx %g7, %g3, %g5 /* g5 = r << kpm_size_shift */
4488 4283 cmp %g7, %g1 /* if (r > v) */
4489 4284 bleu,pn %xcc, 1f
4490 4285 sub %g4, %g5, %g4 /* paddr -= r << kpm_size_shift */
4491 4286 sub %g7, %g1, %g5 /* g5 = r - v */
4492 4287 sllx %g5, MMU_PAGESHIFT, %g7 /* (r-v) << MMU_PAGESHIFT */
4493 4288 add %g4, %g7, %g4 /* paddr += (r-v)<<MMU_PAGESHIFT */
4494 4289 ba 2f
4495 4290 nop
4496 4291 1:
4497 4292 sllx %g7, MMU_PAGESHIFT, %g5 /* else */
4498 4293 sub %g4, %g5, %g4 /* paddr -= r << MMU_PAGESHIFT */
4499 4294
4500 4295 /*
4501 4296 * paddr2pfn
4502 4297 * g1 = vcolor (not used)
4503 4298 * g2 = tag access register
4504 4299 * g3 = clobbered
4505 4300 * g4 = paddr
4506 4301 * g5 = clobbered
4507 4302 * g6 = per-CPU kpm tsbmiss area
4508 4303 * g7 = clobbered
4509 4304 */
4510 4305 2:
4511 4306 srlx %g4, MMU_PAGESHIFT, %g2 /* g2 = pfn */
4512 4307
4513 4308 /*
4514 4309 * Setup %asi
4515 4310 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4516 4311 * if (mseg not found) sfmmu_kpm_exception
4517 4312 * g2=pfn g6=per-CPU kpm tsbmiss area
4518 4313 * g4 g5 g7 for scratch use.
4519 4314 */
4520 4315 mov ASI_MEM, %asi
4521 4316 PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4522 4317 cmp %g3, MSEG_NULLPTR_PA
4523 4318 be,pn %xcc, sfmmu_kpm_exception /* if mseg not found */
4524 4319 nop
4525 4320
4526 4321 /*
4527 4322 * inx = pfn - mseg_pa->kpm_pbase
4528 4323 * g2=pfn g3=mseg_pa g6=per-CPU kpm tsbmiss area
4529 4324 */
4530 4325 ldxa [%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4531 4326 sub %g2, %g7, %g4
4532 4327
4533 4328 #ifdef DEBUG
4534 4329 /*
4535 4330 * Validate inx value
4536 4331 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
4537 4332 */
4538 4333 ldxa [%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4539 4334 cmp %g4, %g5 /* inx - nkpmpgs */
4540 4335 bgeu,pn %xcc, sfmmu_kpm_exception /* if out of range */
4541 4336 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7
4542 4337 #else
4543 4338 ld [%g6 + KPMTSBM_KPMPTABLESZ], %g7
4544 4339 #endif
4545 4340 /* ksp = &mseg_pa->kpm_spages[inx] */
4546 4341 ldxa [%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4547 4342 add %g5, %g4, %g5 /* ksp */
4548 4343
4549 4344 /*
4550 4345 * KPMP_SHASH(kp)
4551 4346 * g2=pfn g3=mseg_pa g4=inx g5=ksp
4552 4347 * g6=per-CPU kpm tsbmiss area g7=kpmp_stable_sz
4553 4348 */
4554 4349 ldub [%g6 + KPMTSBM_KPMPSHIFT], %g1 /* kpmp_shift */
4555 4350 sub %g7, 1, %g7 /* mask */
4556 4351 sllx %g5, %g1, %g1 /* x = ksp << kpmp_shift */
4557 4352 add %g5, %g1, %g5 /* y = ksp + x */
4558 4353 and %g5, %g7, %g5 /* hashinx = y & mask */
4559 4354
4560 4355 /*
4561 4356 * Calculate physical kpm_spage pointer
4562 4357 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4563 4358 * g6=per-CPU kpm tsbmiss area
4564 4359 */
4565 4360 ldxa [%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4566 4361 add %g1, %g4, %g1 /* ksp_pa */
4567 4362
4568 4363 /*
4569 4364 * Calculate physical hash lock address.
4570 4365 * Note: Changes in kpm_shlk_t must be reflected here.
4571 4366 * g1=ksp_pa g2=pfn g5=hashinx
4572 4367 * g6=per-CPU kpm tsbmiss area
4573 4368 */
4574 4369 ldx [%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4575 4370 sllx %g5, KPMSHLK_SHIFT, %g5
4576 4371 add %g4, %g5, %g3 /* hlck_pa */
4577 4372
4578 4373 /*
4579 4374 * Assemble non-cacheable tte initially
4580 4375 * g1=ksp_pa g2=pfn g3=hlck_pa
4581 4376 * g6=per-CPU kpm tsbmiss area
4582 4377 */
4583 4378 sethi %hi(TTE_VALID_INT), %g5 /* upper part */
4584 4379 sllx %g5, 32, %g5
4585 4380 mov (TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4586 4381 or %g5, %g4, %g5
4587 4382 sllx %g2, MMU_PAGESHIFT, %g4
4588 4383 or %g5, %g4, %g5 /* tte */
4589 4384 ldx [%g6 + KPMTSBM_TSBPTR], %g4
4590 4385 GET_MMU_D_TTARGET(%g2, %g7) /* %g2 = ttarget */
4591 4386
4592 4387 /*
4593 4388 * tsb dropin
4594 4389 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
4595 4390 * g6=per-CPU kpm tsbmiss area g7=scratch register
4596 4391 */
4597 4392
4598 4393 /* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4599 4394 KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4600 4395
4601 4396 /* use C-handler if there's no go for dropin */
4602 4397 ldsba [%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
4603 4398 andcc %g7, KPM_MAPPED_GO, %g0 /* go or no go ? */
4604 4399 bz,pt %icc, 5f /* no go */
4605 4400 nop
4606 4401 and %g7, KPM_MAPPED_MASK, %g7 /* go */
4607 4402 cmp %g7, KPM_MAPPEDS /* cacheable ? */
4608 4403 be,a,pn %xcc, 3f
4609 4404 or %g5, TTE_CV_INT, %g5 /* cacheable */
4610 4405 3:
4611 4406 #ifndef sun4v
4612 4407 ldub [%g6 + KPMTSBM_FLAGS], %g7
4613 4408 mov ASI_N, %g1
4614 4409 andcc %g7, KPMTSBM_TSBPHYS_FLAG, %g0
4615 4410 movnz %icc, ASI_MEM, %g1
4616 4411 mov %g1, %asi
4617 4412 #endif
4618 4413
4619 4414 /*
4620 4415 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
4621 4416 * If we fail to lock the TSB entry then just load the tte into the
4622 4417 * TLB.
4623 4418 */
4624 4419 TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l2)
4625 4420
4626 4421 /* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4627 4422 TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4628 4423 locked_tsb_l2:
4629 4424 DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4630 4425
4631 4426 /* KPMLOCK_EXIT(kpmlckp, asi) */
4632 4427 KPMLOCK_EXIT(%g3, ASI_MEM)
4633 4428
4634 4429 /*
4635 4430 * If trapstat is running, we need to shift the %tpc and %tnpc to
4636 4431 * point to trapstat's TSB miss return code (note that trapstat
4637 4432 * itself will patch the correct offset to add).
4638 4433 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4639 4434 */
4640 4435 rdpr %tl, %g7
4641 4436 cmp %g7, 1
4642 4437 ble %icc, 0f
4643 4438 sethi %hi(KERNELBASE), %g6
4644 4439 rdpr %tpc, %g7
4645 4440 or %g6, %lo(KERNELBASE), %g6
4646 4441 cmp %g7, %g6
4647 4442 bgeu %xcc, 0f
4648 4443 ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4649 4444 add %g7, RUNTIME_PATCH, %g7 /* must match TSTAT_TSBMISS_INSTR */
4650 4445 wrpr %g7, %tpc
4651 4446 add %g7, 4, %g7
4652 4447 wrpr %g7, %tnpc
4653 4448 0:
4654 4449 retry
4655 4450 5:
↓ open down ↓ |
524 lines elided |
↑ open up ↑ |
4656 4451 /* g3=hlck_pa */
4657 4452 KPMLOCK_EXIT(%g3, ASI_MEM)
4658 4453 ba,pt %icc, sfmmu_kpm_exception
4659 4454 nop
4660 4455 SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4661 4456
4662 4457 #if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4663 4458 #error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4664 4459 #endif
4665 4460
4666 -#endif /* lint */
4667 -
4668 -#ifdef lint
4669 -/*
4670 - * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4671 - * Called from C-level, sets/clears "go" indication for trap level handler.
4672 - * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4673 - * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4674 - * Assumes khl_mutex is held when called from C-level.
4675 - */
4676 -/* ARGSUSED */
4677 -void
4678 -sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4679 -{
4680 -}
4681 -
4682 -/*
4683 - * kpm_smallpages: stores val to byte at address mapped within
4684 - * low level lock brackets. The old value is returned.
4685 - * Called from C-level.
4686 - */
4687 -/* ARGSUSED */
4688 -int
4689 -sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
4690 -{
4691 - return (0);
4692 -}
4693 -
4694 -#else /* lint */
4695 -
4696 4461 .seg ".data"
4697 4462 sfmmu_kpm_tsbmtl_panic:
4698 4463 .ascii "sfmmu_kpm_tsbmtl: interrupts disabled"
4699 4464 .byte 0
4700 4465 sfmmu_kpm_stsbmtl_panic:
4701 4466 .ascii "sfmmu_kpm_stsbmtl: interrupts disabled"
4702 4467 .byte 0
4703 4468 .align 4
4704 4469 .seg ".text"
4705 4470
4706 4471 ENTRY_NP(sfmmu_kpm_tsbmtl)
4707 4472 rdpr %pstate, %o3
4708 4473 /*
4709 4474 * %o0 = &kp_refcntc
4710 4475 * %o1 = &khl_lock
4711 4476 * %o2 = 0/1 (off/on)
4712 4477 * %o3 = pstate save
4713 4478 */
4714 4479 #ifdef DEBUG
4715 4480 andcc %o3, PSTATE_IE, %g0 /* if interrupts already */
4716 4481 bnz,pt %icc, 1f /* disabled, panic */
4717 4482 nop
4718 4483 save %sp, -SA(MINFRAME), %sp
4719 4484 sethi %hi(sfmmu_kpm_tsbmtl_panic), %o0
4720 4485 call panic
4721 4486 or %o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4722 4487 ret
4723 4488 restore
4724 4489 1:
4725 4490 #endif /* DEBUG */
4726 4491 wrpr %o3, PSTATE_IE, %pstate /* disable interrupts */
4727 4492
4728 4493 KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4729 4494 mov -1, %o5
4730 4495 brz,a %o2, 2f
4731 4496 mov 0, %o5
4732 4497 2:
4733 4498 sth %o5, [%o0]
4734 4499 KPMLOCK_EXIT(%o1, ASI_N)
4735 4500
4736 4501 retl
4737 4502 wrpr %g0, %o3, %pstate /* enable interrupts */
4738 4503 SET_SIZE(sfmmu_kpm_tsbmtl)
4739 4504
4740 4505 ENTRY_NP(sfmmu_kpm_stsbmtl)
4741 4506 rdpr %pstate, %o3
4742 4507 /*
4743 4508 * %o0 = &mapped
4744 4509 * %o1 = &kshl_lock
4745 4510 * %o2 = val
4746 4511 * %o3 = pstate save
4747 4512 */
4748 4513 #ifdef DEBUG
4749 4514 andcc %o3, PSTATE_IE, %g0 /* if interrupts already */
4750 4515 bnz,pt %icc, 1f /* disabled, panic */
4751 4516 nop
4752 4517 save %sp, -SA(MINFRAME), %sp
4753 4518 sethi %hi(sfmmu_kpm_stsbmtl_panic), %o0
4754 4519 call panic
4755 4520 or %o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4756 4521 ret
4757 4522 restore
4758 4523 1:
4759 4524 #endif /* DEBUG */
4760 4525 wrpr %o3, PSTATE_IE, %pstate /* disable interrupts */
4761 4526
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
4762 4527 KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4763 4528 ldsb [%o0], %o5
4764 4529 stb %o2, [%o0]
4765 4530 KPMLOCK_EXIT(%o1, ASI_N)
4766 4531
4767 4532 and %o5, KPM_MAPPED_MASK, %o0 /* return old val */
4768 4533 retl
4769 4534 wrpr %g0, %o3, %pstate /* enable interrupts */
4770 4535 SET_SIZE(sfmmu_kpm_stsbmtl)
4771 4536
4772 -#endif /* lint */
4773 -
4774 -#ifndef lint
4775 4537 #ifdef sun4v
4776 4538 /*
4777 4539 * User/kernel data miss w// multiple TSBs
4778 4540 * The first probe covers 8K, 64K, and 512K page sizes,
4779 4541 * because 64K and 512K mappings are replicated off 8K
4780 4542 * pointer. Second probe covers 4M page size only.
4781 4543 *
4782 4544 * MMU fault area contains miss address and context.
4783 4545 */
4784 4546 ALTENTRY(sfmmu_slow_dmmu_miss)
4785 4547 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) ! %g2 = ptagacc, %g3 = ctx type
4786 4548
4787 4549 slow_miss_common:
4788 4550 /*
4789 4551 * %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4790 4552 * %g3 = ctx (cannot be INVALID_CONTEXT)
4791 4553 */
4792 4554 brnz,pt %g3, 8f ! check for user context
4793 4555 nop
4794 4556
4795 4557 /*
4796 4558 * Kernel miss
4797 4559 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4798 4560 * branch to sfmmu_tsb_miss_tt to handle it.
4799 4561 */
4800 4562 mov %g2, %g7 ! TSB pointer macro clobbers tagacc
4801 4563 sfmmu_dslow_patch_ktsb_base:
4802 4564 RUNTIME_PATCH_SETX(%g1, %g6) ! %g1 = contents of ktsb_pbase
4803 4565 sfmmu_dslow_patch_ktsb_szcode:
4804 4566 or %g0, RUNTIME_PATCH, %g3 ! ktsb_szcode (hot patched)
4805 4567
4806 4568 GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4807 4569 ! %g1 = First TSB entry pointer, as TSB miss handler expects
4808 4570
4809 4571 mov %g2, %g7 ! TSB pointer macro clobbers tagacc
4810 4572 sfmmu_dslow_patch_ktsb4m_base:
4811 4573 RUNTIME_PATCH_SETX(%g3, %g6) ! %g3 = contents of ktsb4m_pbase
4812 4574 sfmmu_dslow_patch_ktsb4m_szcode:
4813 4575 or %g0, RUNTIME_PATCH, %g6 ! ktsb4m_szcode (hot patched)
4814 4576
4815 4577 GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4816 4578 ! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4817 4579 ba,a,pt %xcc, sfmmu_tsb_miss_tt
4818 4580 .empty
4819 4581
4820 4582 8:
4821 4583 /*
4822 4584 * User miss
4823 4585 * Get first TSB pointer in %g1
4824 4586 * Get second TSB pointer (or NULL if no second TSB) in %g3
4825 4587 * Branch to sfmmu_tsb_miss_tt to handle it
4826 4588 */
4827 4589 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4828 4590 /* %g1 = first TSB entry ptr now, %g2 preserved */
4829 4591
4830 4592 GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3) /* get 2nd utsbreg */
4831 4593 brlz,pt %g3, sfmmu_tsb_miss_tt /* done if no 2nd TSB */
4832 4594 nop
4833 4595
4834 4596 GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4835 4597 /* %g3 = second TSB entry ptr now, %g2 preserved */
4836 4598 9:
4837 4599 ba,a,pt %xcc, sfmmu_tsb_miss_tt
4838 4600 .empty
4839 4601 SET_SIZE(sfmmu_slow_dmmu_miss)
4840 4602
4841 4603
4842 4604 /*
4843 4605 * User/kernel instruction miss w/ multiple TSBs
4844 4606 * The first probe covers 8K, 64K, and 512K page sizes,
4845 4607 * because 64K and 512K mappings are replicated off 8K
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
4846 4608 * pointer. Second probe covers 4M page size only.
4847 4609 *
4848 4610 * MMU fault area contains miss address and context.
4849 4611 */
4850 4612 ALTENTRY(sfmmu_slow_immu_miss)
4851 4613 GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4852 4614 ba,a,pt %xcc, slow_miss_common
4853 4615 SET_SIZE(sfmmu_slow_immu_miss)
4854 4616
4855 4617 #endif /* sun4v */
4856 -#endif /* lint */
4857 4618
4858 -#ifndef lint
4859 -
4860 4619 /*
4861 4620 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4862 4621 */
4863 4622 .seg ".data"
4864 4623 .align 64
4865 4624 .global tsbmiss_area
4866 4625 tsbmiss_area:
4867 4626 .skip (TSBMISS_SIZE * NCPU)
4868 4627
4869 4628 .align 64
4870 4629 .global kpmtsbm_area
4871 4630 kpmtsbm_area:
4872 4631 .skip (KPMTSBM_SIZE * NCPU)
4873 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX