Print this page
restore sparc comments
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
+++ new/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 -#pragma ident "%Z%%M% %I% %E% SMI"
27 -
28 26 /*
29 27 * SFMMU primitives. These primitives should only be used by sfmmu
30 28 * routines.
31 29 */
32 30
33 -#if defined(lint)
34 -#include <sys/types.h>
35 -#else /* lint */
36 31 #include "assym.h"
37 -#endif /* lint */
38 32
39 33 #include <sys/asm_linkage.h>
40 34 #include <sys/machtrap.h>
41 35 #include <sys/machasi.h>
42 36 #include <sys/sun4asi.h>
43 37 #include <sys/pte.h>
44 38 #include <sys/mmu.h>
45 39 #include <vm/hat_sfmmu.h>
46 40 #include <vm/seg_spt.h>
47 41 #include <sys/machparam.h>
48 42 #include <sys/privregs.h>
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
49 43 #include <sys/scb.h>
50 44 #include <sys/intreg.h>
51 45 #include <sys/machthread.h>
52 46 #include <sys/clock.h>
53 47 #include <sys/trapstat.h>
54 48
55 49 /*
56 50 * sfmmu related subroutines
57 51 */
58 52
59 -#if defined (lint)
60 -
61 53 /*
62 - * sfmmu related subroutines
63 - */
64 -/* ARGSUSED */
65 -void
66 -sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
67 -{}
68 -
69 -/* ARGSUSED */
70 -void
71 -sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
72 -{}
73 -
74 -/* ARGSUSED */
75 -void
76 -sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
77 -{}
78 -
79 -int
80 -sfmmu_getctx_pri()
81 -{ return(0); }
82 -
83 -int
84 -sfmmu_getctx_sec()
85 -{ return(0); }
86 -
87 -/* ARGSUSED */
88 -void
89 -sfmmu_setctx_sec(uint_t ctx)
90 -{}
91 -
92 -/* ARGSUSED */
93 -void
94 -sfmmu_load_mmustate(sfmmu_t *sfmmup)
95 -{
96 -}
97 -
98 -#else /* lint */
99 -
100 -/*
101 54 * Invalidate either the context of a specific victim or any process
102 55 * currently running on this CPU.
103 56 *
104 57 * %g1 = sfmmup whose ctx is being invalidated
105 58 * when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
106 59 * Note %g1 is the only input argument used by this xcall handler.
107 60 */
108 61 ENTRY(sfmmu_raise_tsb_exception)
109 62 !
110 63 ! if (victim == INVALID_CONTEXT ||
111 64 ! current CPU tsbmiss->usfmmup == victim sfmmup) {
112 65 ! if (shctx_on) {
113 66 ! shctx = INVALID;
114 67 ! }
115 68 ! if (sec-ctx > INVALID_CONTEXT) {
116 69 ! write INVALID_CONTEXT to sec-ctx
117 70 ! }
118 71 ! if (pri-ctx > INVALID_CONTEXT) {
119 72 ! write INVALID_CONTEXT to pri-ctx
120 73 ! }
121 74 ! }
122 75
123 76 sethi %hi(ksfmmup), %g3
124 77 ldx [%g3 + %lo(ksfmmup)], %g3
125 78 cmp %g1, %g3
126 79 be,a,pn %xcc, ptl1_panic /* can't invalidate kernel ctx */
127 80 mov PTL1_BAD_RAISE_TSBEXCP, %g1
128 81
129 82 set INVALID_CONTEXT, %g2
130 83 cmp %g1, INVALID_CONTEXT
131 84 be,pn %xcc, 0f /* called from wrap_around? */
132 85 mov MMU_SCONTEXT, %g3
133 86
134 87 CPU_TSBMISS_AREA(%g5, %g6) /* load cpu tsbmiss area */
135 88 ldx [%g5 + TSBMISS_UHATID], %g5 /* load usfmmup */
136 89 cmp %g5, %g1 /* hat toBe-invalid running? */
137 90 bne,pt %xcc, 3f
138 91 nop
139 92
140 93 0:
141 94 sethi %hi(shctx_on), %g5
142 95 ld [%g5 + %lo(shctx_on)], %g5
143 96 brz %g5, 1f
144 97 mov MMU_SHARED_CONTEXT, %g5
145 98 sethi %hi(FLUSH_ADDR), %g4
146 99 stxa %g0, [%g5]ASI_MMU_CTX
147 100 flush %g4
148 101
149 102 1:
150 103 ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = pgsz | sec-ctx */
151 104 set CTXREG_CTX_MASK, %g4
152 105 and %g5, %g4, %g5 /* %g5 = sec-ctx */
153 106 cmp %g5, INVALID_CONTEXT /* kernel ctx or invald ctx? */
154 107 ble,pn %xcc, 2f /* yes, no need to change */
155 108 mov MMU_PCONTEXT, %g7
156 109
157 110 stxa %g2, [%g3]ASI_MMU_CTX /* set invalid ctx */
158 111 membar #Sync
159 112
160 113 2:
161 114 ldxa [%g7]ASI_MMU_CTX, %g3 /* get pgz | pri-ctx */
162 115 and %g3, %g4, %g5 /* %g5 = pri-ctx */
163 116 cmp %g5, INVALID_CONTEXT /* kernel ctx or invald ctx? */
164 117 ble,pn %xcc, 3f /* yes, no need to change */
165 118 srlx %g3, CTXREG_NEXT_SHIFT, %g3 /* %g3 = nucleus pgsz */
166 119 sllx %g3, CTXREG_NEXT_SHIFT, %g3 /* need to preserve nucleus pgsz */
167 120 or %g3, %g2, %g2 /* %g2 = nucleus pgsz | INVALID_CONTEXT */
168 121
169 122 stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */
170 123 3:
171 124 retry
172 125 SET_SIZE(sfmmu_raise_tsb_exception)
173 126
174 127
175 128
176 129 /*
177 130 * %o0 = virtual address
178 131 * %o1 = address of TTE to be loaded
179 132 */
180 133 ENTRY_NP(sfmmu_itlb_ld_kva)
181 134 rdpr %pstate, %o3
182 135 #ifdef DEBUG
183 136 PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l1, %g1)
184 137 #endif /* DEBUG */
185 138 wrpr %o3, PSTATE_IE, %pstate ! Disable interrupts
186 139 srln %o0, MMU_PAGESHIFT, %o0
187 140 slln %o0, MMU_PAGESHIFT, %o0 ! Clear page offset
188 141
189 142 ldx [%o1], %g1
190 143 set MMU_TAG_ACCESS, %o5
191 144 #ifdef CHEETAHPLUS_ERRATUM_34
192 145 !
193 146 ! If this is Cheetah or derivative and the specified TTE is locked
194 147 ! and hence to be loaded into the T16, fully-associative TLB, we
195 148 ! must avoid Cheetah+ erratum 34. In Cheetah+ erratum 34, under
196 149 ! certain conditions an ITLB locked index 0 TTE will erroneously be
197 150 ! displaced when a new TTE is loaded via ASI_ITLB_IN. To avoid
198 151 ! this erratum, we scan the T16 top down for an unlocked TTE and
199 152 ! explicitly load the specified TTE into that index.
200 153 !
201 154 GET_CPU_IMPL(%g2)
202 155 cmp %g2, CHEETAH_IMPL
203 156 bl,pn %icc, 0f
204 157 nop
205 158
206 159 andcc %g1, TTE_LCK_INT, %g0
207 160 bz %icc, 0f ! Lock bit is not set;
208 161 ! load normally.
209 162 or %g0, (15 << 3), %g3 ! Start searching from the
210 163 ! top down.
211 164
212 165 1:
213 166 ldxa [%g3]ASI_ITLB_ACCESS, %g4 ! Load TTE from t16
214 167
215 168 !
216 169 ! If this entry isn't valid, we'll choose to displace it (regardless
217 170 ! of the lock bit).
218 171 !
219 172 cmp %g4, %g0
220 173 bge %xcc, 2f ! TTE is > 0 iff not valid
221 174 andcc %g4, TTE_LCK_INT, %g0 ! Check for lock bit
222 175 bz %icc, 2f ! If unlocked, go displace
223 176 nop
224 177 sub %g3, (1 << 3), %g3
225 178 brgz %g3, 1b ! Still more TLB entries
226 179 nop ! to search
227 180
228 181 sethi %hi(sfmmu_panic5), %o0 ! We searched all entries and
229 182 call panic ! found no unlocked TTE so
230 183 or %o0, %lo(sfmmu_panic5), %o0 ! give up.
231 184
232 185
233 186 2:
234 187 !
235 188 ! We have found an unlocked or non-valid entry; we'll explicitly load
236 189 ! our locked entry here.
237 190 !
238 191 sethi %hi(FLUSH_ADDR), %o1 ! Flush addr doesn't matter
239 192 stxa %o0, [%o5]ASI_IMMU
240 193 stxa %g1, [%g3]ASI_ITLB_ACCESS
241 194 flush %o1 ! Flush required for I-MMU
242 195 ba 3f ! Delay slot of ba is empty
243 196 nop ! per Erratum 64
244 197
245 198 0:
246 199 #endif /* CHEETAHPLUS_ERRATUM_34 */
247 200 sethi %hi(FLUSH_ADDR), %o1 ! Flush addr doesn't matter
248 201 stxa %o0, [%o5]ASI_IMMU
249 202 stxa %g1, [%g0]ASI_ITLB_IN
250 203 flush %o1 ! Flush required for I-MMU
251 204 3:
252 205 retl
253 206 wrpr %g0, %o3, %pstate ! Enable interrupts
254 207 SET_SIZE(sfmmu_itlb_ld_kva)
255 208
256 209 /*
257 210 * Load an entry into the DTLB.
258 211 *
259 212 * Special handling is required for locked entries since there
260 213 * are some TLB slots that are reserved for the kernel but not
261 214 * always held locked. We want to avoid loading locked TTEs
262 215 * into those slots since they could be displaced.
263 216 *
264 217 * %o0 = virtual address
265 218 * %o1 = address of TTE to be loaded
266 219 */
267 220 ENTRY_NP(sfmmu_dtlb_ld_kva)
268 221 rdpr %pstate, %o3
269 222 #ifdef DEBUG
270 223 PANIC_IF_INTR_DISABLED_PSTR(%o3, msfmmu_di_l2, %g1)
271 224 #endif /* DEBUG */
272 225 wrpr %o3, PSTATE_IE, %pstate ! disable interrupts
273 226 srln %o0, MMU_PAGESHIFT, %o0
274 227 slln %o0, MMU_PAGESHIFT, %o0 ! clear page offset
275 228
276 229 ldx [%o1], %g1
277 230
278 231 set MMU_TAG_ACCESS, %o5
279 232
280 233 set cpu_impl_dual_pgsz, %o2
281 234 ld [%o2], %o2
282 235 brz %o2, 1f
283 236 nop
284 237
285 238 sethi %hi(ksfmmup), %o2
286 239 ldx [%o2 + %lo(ksfmmup)], %o2
287 240 ldub [%o2 + SFMMU_CEXT], %o2
288 241 sll %o2, TAGACCEXT_SHIFT, %o2
289 242
290 243 set MMU_TAG_ACCESS_EXT, %o4 ! can go into T8 if unlocked
291 244 stxa %o2,[%o4]ASI_DMMU
292 245 membar #Sync
293 246 1:
294 247 andcc %g1, TTE_LCK_INT, %g0 ! Locked entries require
295 248 bnz,pn %icc, 2f ! special handling
296 249 sethi %hi(dtlb_resv_ttenum), %g3
297 250 stxa %o0,[%o5]ASI_DMMU ! Load unlocked TTE
298 251 stxa %g1,[%g0]ASI_DTLB_IN ! via DTLB_IN
299 252 membar #Sync
300 253 retl
301 254 wrpr %g0, %o3, %pstate ! enable interrupts
302 255 2:
303 256 #ifdef CHEETAHPLUS_ERRATUM_34
304 257 GET_CPU_IMPL(%g2)
305 258 #endif
306 259 ld [%g3 + %lo(dtlb_resv_ttenum)], %g3
307 260 sll %g3, 3, %g3 ! First reserved idx in TLB 0
308 261 sub %g3, (1 << 3), %g3 ! Decrement idx
309 262 ! Erratum 15 workaround due to ld [%g3 + %lo(dtlb_resv_ttenum)], %g3
310 263 ldxa [%g3]ASI_DTLB_ACCESS, %g4 ! Load TTE from TLB 0
311 264 3:
312 265 ldxa [%g3]ASI_DTLB_ACCESS, %g4 ! Load TTE from TLB 0
313 266 !
314 267 ! If this entry isn't valid, we'll choose to displace it (regardless
315 268 ! of the lock bit).
316 269 !
317 270 brgez,pn %g4, 4f ! TTE is > 0 iff not valid
318 271 nop
319 272 andcc %g4, TTE_LCK_INT, %g0 ! Check for lock bit
320 273 bz,pn %icc, 4f ! If unlocked, go displace
321 274 nop
322 275 sub %g3, (1 << 3), %g3 ! Decrement idx
323 276 #ifdef CHEETAHPLUS_ERRATUM_34
324 277 !
325 278 ! If this is a Cheetah or derivative, we must work around Erratum 34
326 279 ! for the DTLB. Erratum 34 states that under certain conditions,
327 280 ! a locked entry 0 TTE may be improperly displaced. To avoid this,
328 281 ! we do not place a locked TTE in entry 0.
329 282 !
330 283 brgz %g3, 3b
331 284 nop
332 285 cmp %g2, CHEETAH_IMPL
333 286 bge,pt %icc, 5f
334 287 nop
335 288 brz %g3, 3b
336 289 nop
337 290 #else /* CHEETAHPLUS_ERRATUM_34 */
338 291 brgez %g3, 3b
339 292 nop
340 293 #endif /* CHEETAHPLUS_ERRATUM_34 */
341 294 5:
342 295 sethi %hi(sfmmu_panic5), %o0 ! We searched all entries and
343 296 call panic ! found no unlocked TTE so
344 297 or %o0, %lo(sfmmu_panic5), %o0 ! give up.
345 298 4:
346 299 stxa %o0,[%o5]ASI_DMMU ! Setup tag access
347 300 #ifdef OLYMPUS_SHARED_FTLB
348 301 stxa %g1,[%g0]ASI_DTLB_IN
349 302 #else
350 303 stxa %g1,[%g3]ASI_DTLB_ACCESS ! Displace entry at idx
351 304 #endif
352 305 membar #Sync
353 306 retl
354 307 wrpr %g0, %o3, %pstate ! enable interrupts
355 308 SET_SIZE(sfmmu_dtlb_ld_kva)
356 309
357 310 ENTRY_NP(sfmmu_getctx_pri)
358 311 set MMU_PCONTEXT, %o0
359 312 retl
360 313 ldxa [%o0]ASI_MMU_CTX, %o0
361 314 SET_SIZE(sfmmu_getctx_pri)
362 315
363 316 ENTRY_NP(sfmmu_getctx_sec)
364 317 set MMU_SCONTEXT, %o0
365 318 set CTXREG_CTX_MASK, %o1
366 319 ldxa [%o0]ASI_MMU_CTX, %o0
367 320 retl
368 321 and %o0, %o1, %o0
369 322 SET_SIZE(sfmmu_getctx_sec)
370 323
371 324 /*
372 325 * Set the secondary context register for this process.
373 326 * %o0 = page_size | context number for this process.
374 327 */
375 328 ENTRY_NP(sfmmu_setctx_sec)
376 329 /*
377 330 * From resume we call sfmmu_setctx_sec with interrupts disabled.
378 331 * But we can also get called from C with interrupts enabled. So,
379 332 * we need to check first.
380 333 */
381 334
382 335 /* If interrupts are not disabled, then disable them */
383 336 rdpr %pstate, %g1
384 337 btst PSTATE_IE, %g1
385 338 bnz,a,pt %icc, 1f
386 339 wrpr %g1, PSTATE_IE, %pstate /* disable interrupts */
387 340
388 341 1:
389 342 mov MMU_SCONTEXT, %o1
390 343
391 344 sethi %hi(FLUSH_ADDR), %o4
392 345 stxa %o0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */
393 346 flush %o4
394 347 sethi %hi(shctx_on), %g3
395 348 ld [%g3 + %lo(shctx_on)], %g3
396 349 brz %g3, 2f
397 350 nop
398 351 set CTXREG_CTX_MASK, %o4
399 352 and %o0,%o4,%o1
400 353 cmp %o1, INVALID_CONTEXT
401 354 bne,pn %icc, 2f
402 355 mov MMU_SHARED_CONTEXT, %o1
403 356 sethi %hi(FLUSH_ADDR), %o4
404 357 stxa %g0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */
405 358 flush %o4
406 359
407 360 /*
408 361 * if the routine was entered with intr enabled, then enable intr now.
409 362 * otherwise, keep intr disabled, return without enabing intr.
410 363 * %g1 - old intr state
411 364 */
412 365 2: btst PSTATE_IE, %g1
413 366 bnz,a,pt %icc, 3f
414 367 wrpr %g0, %g1, %pstate /* enable interrupts */
415 368 3: retl
416 369 nop
417 370 SET_SIZE(sfmmu_setctx_sec)
418 371
419 372 /*
420 373 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
421 374 * returns the detection value in %o0.
422 375 *
423 376 * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
424 377 * - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
425 378 * - FJ OPL Olympus-C and later (less than SPITFIRE_IMPL)
426 379 *
427 380 */
428 381 ENTRY_NP(sfmmu_setup_4lp)
429 382 GET_CPU_IMPL(%o0);
430 383 cmp %o0, CHEETAH_PLUS_IMPL
431 384 bge,pt %icc, 4f
432 385 mov 1, %o1
433 386 cmp %o0, SPITFIRE_IMPL
434 387 bge,a,pn %icc, 3f
435 388 clr %o1
436 389 4:
437 390 set ktsb_phys, %o2
438 391 st %o1, [%o2]
439 392 3: retl
440 393 mov %o1, %o0
441 394 SET_SIZE(sfmmu_setup_4lp)
442 395
443 396
444 397 /*
445 398 * Called to load MMU registers and tsbmiss area
446 399 * for the active process. This function should
447 400 * only be called from TL=0.
448 401 *
449 402 * %o0 - hat pointer
450 403 *
451 404 */
452 405 ENTRY_NP(sfmmu_load_mmustate)
453 406
454 407 #ifdef DEBUG
455 408 PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l3, %g1)
456 409 #endif /* DEBUG */
457 410
458 411 sethi %hi(ksfmmup), %o3
459 412 ldx [%o3 + %lo(ksfmmup)], %o3
460 413 cmp %o3, %o0
461 414 be,pn %xcc, 8f ! if kernel as, do nothing
462 415 nop
463 416 /*
464 417 * We need to set up the TSB base register, tsbmiss
465 418 * area, and load locked TTE(s) for the TSB.
466 419 */
467 420 ldx [%o0 + SFMMU_TSB], %o1 ! %o1 = first tsbinfo
468 421 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second tsbinfo
469 422
470 423 #ifdef UTSB_PHYS
471 424 /*
472 425 * UTSB_PHYS accesses user TSBs via physical addresses. The first
473 426 * TSB is in the MMU I/D TSB Base registers. The 2nd, 3rd and
474 427 * 4th TSBs use designated ASI_SCRATCHPAD regs as pseudo TSB base regs.
475 428 */
476 429
477 430 /* create/set first UTSBREG actually loaded into MMU_TSB */
478 431 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = first utsbreg
479 432 LOAD_TSBREG(%o2, %o3, %o4) ! write TSB base register
480 433
481 434 brz,a,pt %g2, 2f
482 435 mov -1, %o2 ! use -1 if no second TSB
483 436
484 437 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = second utsbreg
485 438 2:
486 439 SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
487 440
488 441 /* make 3rd and 4th TSB */
489 442 CPU_TSBMISS_AREA(%o4, %o3) ! %o4 = tsbmiss area
490 443
491 444 ldx [%o0 + SFMMU_SCDP], %g2 ! %g2 = sfmmu_scd
492 445 brz,pt %g2, 3f
493 446 mov -1, %o2 ! use -1 if no third TSB
494 447
495 448 ldx [%g2 + SCD_SFMMUP], %g3 ! %g3 = scdp->scd_sfmmup
496 449 ldx [%g3 + SFMMU_TSB], %o1 ! %o1 = first scd tsbinfo
497 450 brz,pn %o1, 5f
498 451 nop ! panic if no third TSB
499 452
500 453 /* make 3rd UTSBREG */
501 454 MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = third utsbreg
502 455 3:
503 456 SET_UTSBREG(SCRATCHPAD_UTSBREG3, %o2, %o3)
504 457 stn %o2, [%o4 + TSBMISS_TSBSCDPTR]
505 458
506 459 brz,pt %g2, 4f
507 460 mov -1, %o2 ! use -1 if no 3rd or 4th TSB
508 461
509 462 ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second scd tsbinfo
510 463 brz,pt %g2, 4f
511 464 mov -1, %o2 ! use -1 if no 4th TSB
512 465
513 466 /* make 4th UTSBREG */
514 467 MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = fourth utsbreg
515 468 4:
516 469 SET_UTSBREG(SCRATCHPAD_UTSBREG4, %o2, %o3)
517 470 stn %o2, [%o4 + TSBMISS_TSBSCDPTR4M]
518 471 ba,pt %icc, 6f
519 472 mov %o4, %o2 ! %o2 = tsbmiss area
520 473 5:
521 474 sethi %hi(panicstr), %g1 ! panic if no 3rd TSB
522 475 ldx [%g1 + %lo(panicstr)], %g1
523 476 tst %g1
524 477
525 478 bnz,pn %xcc, 8f
526 479 nop
527 480
528 481 sethi %hi(sfmmu_panic10), %o0
529 482 call panic
530 483 or %o0, %lo(sfmmu_panic10), %o0
531 484
532 485 #else /* UTSBREG_PHYS */
533 486
534 487 brz,pt %g2, 4f
535 488 nop
536 489 /*
537 490 * We have a second TSB for this process, so we need to
538 491 * encode data for both the first and second TSB in our single
539 492 * TSB base register. See hat_sfmmu.h for details on what bits
540 493 * correspond to which TSB.
541 494 * We also need to load a locked TTE into the TLB for the second TSB
542 495 * in this case.
543 496 */
544 497 MAKE_TSBREG_SECTSB(%o2, %o1, %g2, %o3, %o4, %g3, sfmmu_tsb_2nd)
545 498 ! %o2 = tsbreg
546 499 sethi %hi(utsb4m_dtlb_ttenum), %o3
547 500 sethi %hi(utsb4m_vabase), %o4
548 501 ld [%o3 + %lo(utsb4m_dtlb_ttenum)], %o3
549 502 ldx [%o4 + %lo(utsb4m_vabase)], %o4 ! %o4 = TLB tag for sec TSB
550 503 sll %o3, DTACC_SHIFT, %o3 ! %o3 = sec TSB TLB index
551 504 RESV_OFFSET(%g2, %o4, %g3, sfmmu_tsb_2nd) ! or-in bits of TSB VA
552 505 LOAD_TSBTTE(%g2, %o3, %o4, %g3) ! load sec TSB locked TTE
553 506 sethi %hi(utsb_vabase), %g3
554 507 ldx [%g3 + %lo(utsb_vabase)], %g3 ! %g3 = TLB tag for first TSB
555 508 ba,pt %xcc, 5f
556 509 nop
557 510
558 511 4: sethi %hi(utsb_vabase), %g3
559 512 ldx [%g3 + %lo(utsb_vabase)], %g3 ! %g3 = TLB tag for first TSB
560 513 MAKE_TSBREG(%o2, %o1, %g3, %o3, %o4, sfmmu_tsb_1st) ! %o2 = tsbreg
561 514
562 515 5: LOAD_TSBREG(%o2, %o3, %o4) ! write TSB base register
563 516
564 517 /*
565 518 * Load the TTE for the first TSB at the appropriate location in
566 519 * the TLB
567 520 */
568 521 sethi %hi(utsb_dtlb_ttenum), %o2
569 522 ld [%o2 + %lo(utsb_dtlb_ttenum)], %o2
570 523 sll %o2, DTACC_SHIFT, %o2 ! %o1 = first TSB TLB index
571 524 RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st) ! or-in bits of TSB VA
572 525 LOAD_TSBTTE(%o1, %o2, %g3, %o4) ! load first TSB locked TTE
573 526 CPU_TSBMISS_AREA(%o2, %o3)
574 527 #endif /* UTSB_PHYS */
575 528 6:
576 529 ldx [%o0 + SFMMU_ISMBLKPA], %o1 ! copy members of sfmmu
577 530 ! we need to access from
578 531 stx %o1, [%o2 + TSBMISS_ISMBLKPA] ! sfmmu_tsb_miss into the
579 532 ldub [%o0 + SFMMU_TTEFLAGS], %o3 ! per-CPU tsbmiss area.
580 533 stx %o0, [%o2 + TSBMISS_UHATID]
581 534 stub %o3, [%o2 + TSBMISS_UTTEFLAGS]
582 535 #ifdef UTSB_PHYS
583 536 ldx [%o0 + SFMMU_SRDP], %o1
584 537 ldub [%o0 + SFMMU_RTTEFLAGS], %o4
585 538 stub %o4, [%o2 + TSBMISS_URTTEFLAGS]
586 539 stx %o1, [%o2 + TSBMISS_SHARED_UHATID]
587 540 brz,pn %o1, 8f ! check for sfmmu_srdp
588 541 add %o0, SFMMU_HMERMAP, %o1
589 542 add %o2, TSBMISS_SHMERMAP, %o2
590 543 mov SFMMU_HMERGNMAP_WORDS, %o3
591 544 ! set tsbmiss shmermap
592 545 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
593 546
594 547 ldx [%o0 + SFMMU_SCDP], %o4 ! %o4 = sfmmu_scd
595 548 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
596 549 mov SFMMU_HMERGNMAP_WORDS, %o3
597 550 brnz,pt %o4, 7f ! check for sfmmu_scdp else
598 551 add %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
599 552 ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
600 553 ba 8f
601 554 nop
↓ open down ↓ |
491 lines elided |
↑ open up ↑ |
602 555 7:
603 556 add %o4, SCD_HMERMAP, %o1
604 557 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
605 558 #endif /* UTSB_PHYS */
606 559
607 560 8:
608 561 retl
609 562 nop
610 563 SET_SIZE(sfmmu_load_mmustate)
611 564
612 -#endif /* lint */
613 -
614 -#if defined (lint)
615 565 /*
616 - * Invalidate all of the entries within the tsb, by setting the inv bit
566 + * Invalidate all of the entries within the TSB, by setting the inv bit
617 567 * in the tte_tag field of each tsbe.
618 568 *
619 - * We take advantage of the fact TSBs are page aligned and a multiple of
620 - * PAGESIZE to use block stores.
569 + * We take advantage of the fact that the TSBs are page aligned and a
570 + * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI.
621 571 *
622 572 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
623 573 * (in short, we set all bits in the upper word of the tag, and we give the
624 574 * invalid bit precedence over other tag bits in both places).
625 575 */
626 -/* ARGSUSED */
627 -void
628 -sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
629 -{}
630 576
631 -#else /* lint */
632 -
633 577 #define VIS_BLOCKSIZE 64
634 578
635 579 ENTRY(sfmmu_inv_tsb_fast)
636 580
637 581 ! Get space for aligned block of saved fp regs.
638 582 save %sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
639 583
640 584 ! kpreempt_disable();
641 585 ldsb [THREAD_REG + T_PREEMPT], %l3
642 586 inc %l3
643 587 stb %l3, [THREAD_REG + T_PREEMPT]
644 588
645 589 ! See if fpu was in use. If it was, we need to save off the
646 590 ! floating point registers to the stack.
647 591 rd %fprs, %l0 ! %l0 = cached copy of fprs
648 592 btst FPRS_FEF, %l0
649 593 bz,pt %icc, 4f
650 594 nop
651 595
652 596 ! save in-use fpregs on stack
653 597 membar #Sync ! make sure tranx to fp regs
654 598 ! have completed
655 599 add %fp, STACK_BIAS - 65, %l1 ! get stack frame for fp regs
656 600 and %l1, -VIS_BLOCKSIZE, %l1 ! block align frame
657 601 stda %d0, [%l1]ASI_BLK_P ! %l1 = addr of saved fp regs
658 602
659 603 ! enable fp
660 604 4: membar #StoreStore|#StoreLoad|#LoadStore
661 605 wr %g0, FPRS_FEF, %fprs
662 606 wr %g0, ASI_BLK_P, %asi
663 607
664 608 ! load up FP registers with invalid TSB tag.
665 609 fone %d0 ! ones in tag
666 610 fzero %d2 ! zeros in TTE
667 611 fone %d4 ! ones in tag
668 612 fzero %d6 ! zeros in TTE
669 613 fone %d8 ! ones in tag
670 614 fzero %d10 ! zeros in TTE
671 615 fone %d12 ! ones in tag
672 616 fzero %d14 ! zeros in TTE
673 617 ba,pt %xcc, .sfmmu_inv_doblock
674 618 mov (4*VIS_BLOCKSIZE), %i4 ! we do 4 stda's each loop below
675 619
676 620 .sfmmu_inv_blkstart:
677 621 ! stda %d0, [%i0+192]%asi ! in dly slot of branch that got us here
678 622 stda %d0, [%i0+128]%asi
679 623 stda %d0, [%i0+64]%asi
680 624 stda %d0, [%i0]%asi
681 625
682 626 add %i0, %i4, %i0
683 627 sub %i1, %i4, %i1
684 628
685 629 .sfmmu_inv_doblock:
686 630 cmp %i1, (4*VIS_BLOCKSIZE) ! check for completion
687 631 bgeu,a %icc, .sfmmu_inv_blkstart
688 632 stda %d0, [%i0+192]%asi
689 633
690 634 .sfmmu_inv_finish:
691 635 membar #Sync
692 636 btst FPRS_FEF, %l0 ! saved from above
693 637 bz,a .sfmmu_inv_finished
694 638 wr %l0, 0, %fprs ! restore fprs
695 639
696 640 ! restore fpregs from stack
697 641 ldda [%l1]ASI_BLK_P, %d0
698 642 membar #Sync
699 643 wr %l0, 0, %fprs ! restore fprs
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
700 644
701 645 .sfmmu_inv_finished:
702 646 ! kpreempt_enable();
703 647 ldsb [THREAD_REG + T_PREEMPT], %l3
704 648 dec %l3
705 649 stb %l3, [THREAD_REG + T_PREEMPT]
706 650 ret
707 651 restore
708 652 SET_SIZE(sfmmu_inv_tsb_fast)
709 653
710 -#endif /* lint */
711 -
712 -#if defined(lint)
713 -
714 654 /*
715 655 * Prefetch "struct tsbe" while walking TSBs.
716 656 * prefetch 7 cache lines ahead of where we are at now.
717 657 * #n_reads is being used since #one_read only applies to
718 658 * floating point reads, and we are not doing floating point
719 659 * reads. However, this has the negative side effect of polluting
720 660 * the ecache.
721 661 * The 448 comes from (7 * 64) which is how far ahead of our current
722 662 * address, we want to prefetch.
723 663 */
724 -/*ARGSUSED*/
725 -void
726 -prefetch_tsbe_read(struct tsbe *tsbep)
727 -{}
728 -
729 -/* Prefetch the tsbe that we are about to write */
730 -/*ARGSUSED*/
731 -void
732 -prefetch_tsbe_write(struct tsbe *tsbep)
733 -{}
734 -
735 -#else /* lint */
736 -
737 664 ENTRY(prefetch_tsbe_read)
738 665 retl
739 666 prefetch [%o0+448], #n_reads
740 667 SET_SIZE(prefetch_tsbe_read)
741 668
669 +/* Prefetch the tsbe that we are about to write */
742 670 ENTRY(prefetch_tsbe_write)
743 671 retl
744 672 prefetch [%o0], #n_writes
745 673 SET_SIZE(prefetch_tsbe_write)
746 -#endif /* lint */
747 674
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX