Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
+++ new/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 *
25 25 * Assembly code support for the Cheetah+ module
26 26 */
27 27
28 -#pragma ident "%Z%%M% %I% %E% SMI"
29 -
30 -#if !defined(lint)
31 28 #include "assym.h"
32 -#endif /* lint */
33 29
34 30 #include <sys/asm_linkage.h>
35 31 #include <sys/mmu.h>
36 32 #include <vm/hat_sfmmu.h>
37 33 #include <sys/machparam.h>
38 34 #include <sys/machcpuvar.h>
39 35 #include <sys/machthread.h>
40 36 #include <sys/machtrap.h>
41 37 #include <sys/privregs.h>
42 38 #include <sys/asm_linkage.h>
43 39 #include <sys/trap.h>
44 40 #include <sys/cheetahregs.h>
45 41 #include <sys/us3_module.h>
46 42 #include <sys/xc_impl.h>
47 43 #include <sys/intreg.h>
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
48 44 #include <sys/async.h>
49 45 #include <sys/clock.h>
50 46 #include <sys/cheetahasm.h>
51 47 #include <sys/cmpregs.h>
52 48
53 49 #ifdef TRAPTRACE
54 50 #include <sys/traptrace.h>
55 51 #endif /* TRAPTRACE */
56 52
57 53
58 -#if !defined(lint)
59 -
60 54 /* BEGIN CSTYLED */
61 55
62 56 /*
63 57 * Cheetah+ version to reflush an Ecache line by index.
64 58 *
65 59 * By default we assume the Ecache is 2-way so we flush both
66 60 * ways. Even if the cache is direct-mapped no harm will come
67 61 * from performing the flush twice, apart from perhaps a performance
68 62 * penalty.
69 63 *
70 64 * XXX - scr2 not used.
71 65 */
72 66 #define ECACHE_REFLUSH_LINE(ec_set_size, index, scr2) \
73 67 ldxa [index]ASI_EC_DIAG, %g0; \
74 68 ldxa [index + ec_set_size]ASI_EC_DIAG, %g0;
75 69
76 70 /*
77 71 * Cheetah+ version of ecache_flush_line. Uses Cheetah+ Ecache Displacement
78 72 * Flush feature.
79 73 */
80 74 #define ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2) \
81 75 sub ec_set_size, 1, scr1; \
82 76 and physaddr, scr1, scr1; \
83 77 set CHP_ECACHE_IDX_DISP_FLUSH, scr2; \
84 78 or scr2, scr1, scr1; \
85 79 ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
86 80
87 81 /* END CSTYLED */
88 82
89 83 /*
90 84 * Panther version to reflush a line from both the L2 cache and L3
91 85 * cache by the respective indexes. Flushes all ways of the line from
92 86 * each cache.
93 87 *
94 88 * l2_index Index into the L2$ of the line to be flushed. This
95 89 * register will not be modified by this routine.
96 90 * l3_index Index into the L3$ of the line to be flushed. This
97 91 * register will not be modified by this routine.
98 92 * scr2 scratch register.
99 93 * scr3 scratch register.
100 94 *
101 95 */
102 96 #define PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3) \
103 97 set PN_L2_MAX_SET, scr2; \
104 98 set PN_L2_SET_SIZE, scr3; \
105 99 1: \
106 100 ldxa [l2_index + scr2]ASI_L2_TAG, %g0; \
107 101 cmp scr2, %g0; \
108 102 bg,a 1b; \
109 103 sub scr2, scr3, scr2; \
110 104 mov 6, scr2; \
111 105 7: \
112 106 cmp scr2, %g0; \
113 107 bg,a 7b; \
114 108 sub scr2, 1, scr2; \
115 109 set PN_L3_MAX_SET, scr2; \
116 110 set PN_L3_SET_SIZE, scr3; \
117 111 2: \
118 112 ldxa [l3_index + scr2]ASI_EC_DIAG, %g0; \
119 113 cmp scr2, %g0; \
120 114 bg,a 2b; \
121 115 sub scr2, scr3, scr2;
122 116
123 117 /*
124 118 * Panther version of ecache_flush_line. Flushes the line corresponding
125 119 * to physaddr from both the L2 cache and the L3 cache.
126 120 *
127 121 * physaddr Input: Physical address to flush.
128 122 * Output: Physical address to flush (preserved).
129 123 * l2_idx_out Input: scratch register.
130 124 * Output: Index into the L2$ of the line to be flushed.
131 125 * l3_idx_out Input: scratch register.
132 126 * Output: Index into the L3$ of the line to be flushed.
133 127 * scr3 scratch register.
134 128 * scr4 scratch register.
135 129 *
136 130 */
137 131 #define PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4) \
138 132 set PN_L3_SET_SIZE, l2_idx_out; \
139 133 sub l2_idx_out, 1, l2_idx_out; \
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
140 134 and physaddr, l2_idx_out, l3_idx_out; \
141 135 set PN_L3_IDX_DISP_FLUSH, l2_idx_out; \
142 136 or l2_idx_out, l3_idx_out, l3_idx_out; \
143 137 set PN_L2_SET_SIZE, l2_idx_out; \
144 138 sub l2_idx_out, 1, l2_idx_out; \
145 139 and physaddr, l2_idx_out, l2_idx_out; \
146 140 set PN_L2_IDX_DISP_FLUSH, scr3; \
147 141 or l2_idx_out, scr3, l2_idx_out; \
148 142 PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
149 143
150 -#endif /* !lint */
151 -
152 144 /*
153 145 * Fast ECC error at TL>0 handler
154 146 * We get here via trap 70 at TL>0->Software trap 0 at TL>0. We enter
155 147 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
156 148 * For a complete description of the Fast ECC at TL>0 handling see the
157 149 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
158 150 * us3_common_asm.s
159 151 */
160 -#if defined(lint)
161 152
162 -void
163 -fast_ecc_tl1_err(void)
164 -{}
165 -
166 -#else /* lint */
167 -
168 153 .section ".text"
169 154 .align 64
170 155 ENTRY_NP(fast_ecc_tl1_err)
171 156
172 157 /*
173 158 * This macro turns off the D$/I$ if they are on and saves their
174 159 * original state in ch_err_tl1_tmp, saves all the %g registers in the
175 160 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
176 161 * the %tpc in ch_err_tl1_tpc. At the end of this macro, %g1 will
177 162 * point to the ch_err_tl1_data structure and the original D$/I$ state
178 163 * will be saved in ch_err_tl1_tmp. All %g registers except for %g1
179 164 * will be available.
180 165 */
181 166 CH_ERR_TL1_FECC_ENTER;
182 167
183 168 /*
184 169 * Get the diagnostic logout data. %g4 must be initialized to
185 170 * current CEEN state, %g5 must point to logout structure in
186 171 * ch_err_tl1_data_t. %g3 will contain the nesting count upon
187 172 * return.
188 173 */
189 174 ldxa [%g0]ASI_ESTATE_ERR, %g4
190 175 and %g4, EN_REG_CEEN, %g4
191 176 add %g1, CH_ERR_TL1_LOGOUT, %g5
192 177 DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
193 178
194 179 /*
195 180 * If the logout nesting count is exceeded, we're probably
196 181 * not making any progress, try to panic instead.
197 182 */
198 183 cmp %g3, CLO_NESTING_MAX
199 184 bge fecc_tl1_err
200 185 nop
201 186
202 187 /*
203 188 * Save the current CEEN and NCEEN state in %g7 and turn them off
204 189 * before flushing the Ecache.
205 190 */
206 191 ldxa [%g0]ASI_ESTATE_ERR, %g7
207 192 andn %g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
208 193 stxa %g5, [%g0]ASI_ESTATE_ERR
209 194 membar #Sync
210 195
211 196 /*
212 197 * Flush the Ecache, using the largest possible cache size with the
213 198 * smallest possible line size since we can't get the actual sizes
214 199 * from the cpu_node due to DTLB misses.
215 200 */
216 201 PN_L2_FLUSHALL(%g3, %g4, %g5)
217 202
218 203 set CH_ECACHE_MAX_SIZE, %g4
219 204 set CH_ECACHE_MIN_LSIZE, %g5
220 205
221 206 GET_CPU_IMPL(%g6)
222 207 cmp %g6, PANTHER_IMPL
223 208 bne %xcc, 2f
224 209 nop
225 210 set PN_L3_SIZE, %g4
226 211 2:
227 212 mov %g6, %g3
228 213 CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
229 214
230 215 /*
231 216 * Restore CEEN and NCEEN to the previous state.
232 217 */
233 218 stxa %g7, [%g0]ASI_ESTATE_ERR
234 219 membar #Sync
235 220
236 221 /*
237 222 * If we turned off the D$, then flush it and turn it back on.
238 223 */
239 224 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3
240 225 andcc %g3, CH_ERR_TSTATE_DC_ON, %g0
241 226 bz %xcc, 3f
242 227 nop
243 228
244 229 /*
245 230 * Flush the D$.
246 231 */
247 232 ASM_LD(%g4, dcache_size)
248 233 ASM_LD(%g5, dcache_linesize)
249 234 CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
250 235
251 236 /*
252 237 * Turn the D$ back on.
253 238 */
254 239 ldxa [%g0]ASI_DCU, %g3
255 240 or %g3, DCU_DC, %g3
256 241 stxa %g3, [%g0]ASI_DCU
257 242 membar #Sync
258 243 3:
259 244 /*
260 245 * If we turned off the I$, then flush it and turn it back on.
261 246 */
262 247 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3
263 248 andcc %g3, CH_ERR_TSTATE_IC_ON, %g0
264 249 bz %xcc, 4f
265 250 nop
266 251
267 252 /*
268 253 * Flush the I$. Panther has different I$ parameters, and we
269 254 * can't access the logout I$ params without possibly generating
270 255 * a MMU miss.
271 256 */
272 257 GET_CPU_IMPL(%g6)
273 258 set PN_ICACHE_SIZE, %g3
274 259 set CH_ICACHE_SIZE, %g4
275 260 mov CH_ICACHE_LSIZE, %g5
276 261 cmp %g6, PANTHER_IMPL
277 262 movz %xcc, %g3, %g4
278 263 movz %xcc, PN_ICACHE_LSIZE, %g5
279 264 CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
280 265
281 266 /*
282 267 * Turn the I$ back on. Changing DCU_IC requires flush.
283 268 */
284 269 ldxa [%g0]ASI_DCU, %g3
285 270 or %g3, DCU_IC, %g3
286 271 stxa %g3, [%g0]ASI_DCU
287 272 flush %g0
288 273 4:
289 274
290 275 #ifdef TRAPTRACE
291 276 /*
292 277 * Get current trap trace entry physical pointer.
293 278 */
294 279 CPU_INDEX(%g6, %g5)
295 280 sll %g6, TRAPTR_SIZE_SHIFT, %g6
296 281 set trap_trace_ctl, %g5
297 282 add %g6, %g5, %g6
298 283 ld [%g6 + TRAPTR_LIMIT], %g5
299 284 tst %g5
300 285 be %icc, skip_traptrace
301 286 nop
302 287 ldx [%g6 + TRAPTR_PBASE], %g5
303 288 ld [%g6 + TRAPTR_OFFSET], %g4
304 289 add %g5, %g4, %g5
305 290
306 291 /*
307 292 * Create trap trace entry.
308 293 */
309 294 rd %asi, %g7
310 295 wr %g0, TRAPTR_ASI, %asi
311 296 rd STICK, %g4
312 297 stxa %g4, [%g5 + TRAP_ENT_TICK]%asi
313 298 rdpr %tl, %g4
314 299 stha %g4, [%g5 + TRAP_ENT_TL]%asi
315 300 rdpr %tt, %g4
316 301 stha %g4, [%g5 + TRAP_ENT_TT]%asi
317 302 rdpr %tpc, %g4
318 303 stna %g4, [%g5 + TRAP_ENT_TPC]%asi
319 304 rdpr %tstate, %g4
320 305 stxa %g4, [%g5 + TRAP_ENT_TSTATE]%asi
321 306 stna %sp, [%g5 + TRAP_ENT_SP]%asi
322 307 stna %g0, [%g5 + TRAP_ENT_TR]%asi
323 308 wr %g0, %g7, %asi
324 309 ldxa [%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
325 310 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
326 311 wr %g0, TRAPTR_ASI, %asi
327 312 stna %g3, [%g5 + TRAP_ENT_F1]%asi
328 313 stna %g4, [%g5 + TRAP_ENT_F2]%asi
329 314 wr %g0, %g7, %asi
330 315 ldxa [%g1 + CH_ERR_TL1_AFAR]%asi, %g3
331 316 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4
332 317 wr %g0, TRAPTR_ASI, %asi
333 318 stna %g3, [%g5 + TRAP_ENT_F3]%asi
334 319 stna %g4, [%g5 + TRAP_ENT_F4]%asi
335 320 wr %g0, %g7, %asi
336 321
337 322 /*
338 323 * Advance trap trace pointer.
339 324 */
340 325 ld [%g6 + TRAPTR_OFFSET], %g5
341 326 ld [%g6 + TRAPTR_LIMIT], %g4
342 327 st %g5, [%g6 + TRAPTR_LAST_OFFSET]
343 328 add %g5, TRAP_ENT_SIZE, %g5
344 329 sub %g4, TRAP_ENT_SIZE, %g4
345 330 cmp %g5, %g4
346 331 movge %icc, 0, %g5
347 332 st %g5, [%g6 + TRAPTR_OFFSET]
348 333 skip_traptrace:
349 334 #endif /* TRAPTRACE */
350 335
351 336 /*
352 337 * If nesting count is not zero, skip all the AFSR/AFAR
353 338 * handling and just do the necessary cache-flushing.
354 339 */
355 340 ldxa [%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
356 341 brnz %g2, 6f
357 342 nop
358 343
359 344 /*
360 345 * If a UCU or L3_UCU followed by a WDU has occurred go ahead
361 346 * and panic since a UE will occur (on the retry) before the
362 347 * UCU and WDU messages are enqueued. On a Panther processor,
363 348 * we need to also see an L3_WDU before panicking. Note that
364 349 * we avoid accessing the _EXT ASIs if not on a Panther.
365 350 */
366 351 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
367 352 set 1, %g4
368 353 sllx %g4, C_AFSR_UCU_SHIFT, %g4
369 354 btst %g4, %g3 ! UCU in original shadow AFSR?
370 355 bnz %xcc, 5f
371 356 nop
372 357 GET_CPU_IMPL(%g6)
373 358 cmp %g6, PANTHER_IMPL
374 359 bne %xcc, 6f ! not Panther, no UCU, skip the rest
375 360 nop
376 361 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
377 362 btst C_AFSR_L3_UCU, %g3 ! L3_UCU in original shadow AFSR_EXT?
378 363 bz %xcc, 6f ! neither UCU nor L3_UCU was seen
379 364 nop
380 365 5:
381 366 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4 ! original AFSR
382 367 ldxa [%g0]ASI_AFSR, %g3 ! current AFSR
383 368 or %g3, %g4, %g3 ! %g3 = original + current AFSR
384 369 set 1, %g4
385 370 sllx %g4, C_AFSR_WDU_SHIFT, %g4
386 371 btst %g4, %g3 ! WDU in original or current AFSR?
387 372 bz %xcc, 6f ! no WDU, skip remaining tests
388 373 nop
389 374 GET_CPU_IMPL(%g6)
390 375 cmp %g6, PANTHER_IMPL
391 376 bne %xcc, fecc_tl1_err ! if not Panther, panic (saw UCU, WDU)
392 377 nop
393 378 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g4 ! original AFSR_EXT
394 379 set ASI_AFSR_EXT_VA, %g6 ! ASI of current AFSR_EXT
395 380 ldxa [%g6]ASI_AFSR, %g3 ! value of current AFSR_EXT
396 381 or %g3, %g4, %g3 ! %g3 = original + current AFSR_EXT
397 382 btst C_AFSR_L3_WDU, %g3 ! L3_WDU in original or current AFSR?
398 383 bnz %xcc, fecc_tl1_err ! panic (saw L3_WDU and UCU or L3_UCU)
399 384 nop
400 385 6:
401 386 /*
402 387 * We fall into this macro if we've successfully logged the error in
403 388 * the ch_err_tl1_data structure and want the PIL15 softint to pick
404 389 * it up and log it. %g1 must point to the ch_err_tl1_data structure.
405 390 * Restores the %g registers and issues retry.
↓ open down ↓ |
228 lines elided |
↑ open up ↑ |
406 391 */
407 392 CH_ERR_TL1_EXIT;
408 393
409 394 /*
410 395 * Establish panic exit label.
411 396 */
412 397 CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
413 398
414 399 SET_SIZE(fast_ecc_tl1_err)
415 400
416 -#endif /* lint */
417 401
418 -
419 -#if defined(lint)
420 -/*
421 - * scrubphys - Pass in the aligned physical memory address
422 - * that you want to scrub, along with the ecache set size.
423 - *
424 - * 1) Displacement flush the E$ line corresponding to %addr.
425 - * The first ldxa guarantees that the %addr is no longer in
426 - * M, O, or E (goes to I or S (if instruction fetch also happens).
427 - * 2) "Write" the data using a CAS %addr,%g0,%g0.
428 - * The casxa guarantees a transition from I to M or S to M.
429 - * 3) Displacement flush the E$ line corresponding to %addr.
430 - * The second ldxa pushes the M line out of the ecache, into the
431 - * writeback buffers, on the way to memory.
432 - * 4) The "membar #Sync" pushes the cache line out of the writeback
433 - * buffers onto the bus, on the way to dram finally.
434 - *
435 - * This is a modified version of the algorithm suggested by Gary Lauterbach.
436 - * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
437 - * as modified, but then we found out that for spitfire, if it misses in the
438 - * E$ it will probably install as an M, but if it hits in the E$, then it
439 - * will stay E, if the store doesn't happen. So the first displacement flush
440 - * should ensure that the CAS will miss in the E$. Arrgh.
441 - */
442 -/* ARGSUSED */
443 -void
444 -scrubphys(uint64_t paddr, int ecache_set_size)
445 -{}
446 -
447 -#else /* lint */
448 402 ENTRY(scrubphys)
449 403 rdpr %pstate, %o4
450 404 andn %o4, PSTATE_IE | PSTATE_AM, %o5
451 405 wrpr %o5, %g0, %pstate ! clear IE, AM bits
452 406
453 407 GET_CPU_IMPL(%o5) ! Panther Ecache is flushed differently
454 408 cmp %o5, PANTHER_IMPL
455 409 bne scrubphys_1
456 410 nop
457 411 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
458 412 casxa [%o0]ASI_MEM, %g0, %g0
459 413 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
460 414 b scrubphys_2
461 415 nop
462 416 scrubphys_1:
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
463 417 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
464 418 casxa [%o0]ASI_MEM, %g0, %g0
465 419 ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
466 420 scrubphys_2:
467 421 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
468 422
469 423 retl
470 424 membar #Sync ! move the data out of the load buffer
471 425 SET_SIZE(scrubphys)
472 426
473 -#endif /* lint */
474 427
475 -
476 -#if defined(lint)
477 -/*
478 - * clearphys - Pass in the physical memory address of the checkblock
479 - * that you want to push out, cleared with a recognizable pattern,
480 - * from the ecache.
481 - *
482 - * To ensure that the ecc gets recalculated after the bad data is cleared,
483 - * we must write out enough data to fill the w$ line (64 bytes). So we read
484 - * in an entire ecache subblock's worth of data, and write it back out.
485 - * Then we overwrite the 16 bytes of bad data with the pattern.
486 - */
487 -/* ARGSUSED */
488 -void
489 -clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
490 -{
491 -}
492 -
493 -#else /* lint */
494 428 ENTRY(clearphys)
495 429 /* turn off IE, AM bits */
496 430 rdpr %pstate, %o4
497 431 andn %o4, PSTATE_IE | PSTATE_AM, %o5
498 432 wrpr %o5, %g0, %pstate
499 433
500 434 /* turn off NCEEN */
501 435 ldxa [%g0]ASI_ESTATE_ERR, %o5
502 436 andn %o5, EN_REG_NCEEN, %o3
503 437 stxa %o3, [%g0]ASI_ESTATE_ERR
504 438 membar #Sync
505 439
506 440 /* align address passed with 64 bytes subblock size */
507 441 mov CH_ECACHE_SUBBLK_SIZE, %o2
508 442 andn %o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1
509 443
510 444 /* move the good data into the W$ */
511 445 clearphys_1:
512 446 subcc %o2, 8, %o2
513 447 ldxa [%g1 + %o2]ASI_MEM, %g2
514 448 bge clearphys_1
515 449 stxa %g2, [%g1 + %o2]ASI_MEM
516 450
517 451 /* now overwrite the bad data */
518 452 setx 0xbadecc00badecc01, %g1, %g2
519 453 stxa %g2, [%o0]ASI_MEM
520 454 mov 8, %g1
521 455 stxa %g2, [%o0 + %g1]ASI_MEM
522 456
523 457 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently
524 458 cmp %o3, PANTHER_IMPL
525 459 bne clearphys_2
526 460 nop
527 461 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
528 462 casxa [%o0]ASI_MEM, %g0, %g0
529 463 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
530 464 b clearphys_3
531 465 nop
532 466 clearphys_2:
533 467 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
534 468 casxa [%o0]ASI_MEM, %g0, %g0
535 469 ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
536 470 clearphys_3:
537 471 /* clear the AFSR */
538 472 ldxa [%g0]ASI_AFSR, %o1
539 473 stxa %o1, [%g0]ASI_AFSR
540 474 membar #Sync
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
541 475
542 476 /* turn NCEEN back on */
543 477 stxa %o5, [%g0]ASI_ESTATE_ERR
544 478 membar #Sync
545 479
546 480 /* return and re-enable IE and AM */
547 481 retl
548 482 wrpr %g0, %o4, %pstate
549 483 SET_SIZE(clearphys)
550 484
551 -#endif /* lint */
552 485
553 -
554 -#if defined(lint)
555 -/*
556 - * Cheetah+ Ecache displacement flush the specified line from the E$
557 - *
558 - * For Panther, this means flushing the specified line from both the
559 - * L2 cache and L3 cache.
560 - *
561 - * Register usage:
562 - * %o0 - 64 bit physical address for flushing
563 - * %o1 - Ecache set size
564 - */
565 -/*ARGSUSED*/
566 -void
567 -ecache_flush_line(uint64_t flushaddr, int ec_set_size)
568 -{
569 -}
570 -#else /* lint */
571 486 ENTRY(ecache_flush_line)
572 487
573 488 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently
574 489 cmp %o3, PANTHER_IMPL
575 490 bne ecache_flush_line_1
576 491 nop
577 492
578 493 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
579 494 b ecache_flush_line_2
580 495 nop
581 496 ecache_flush_line_1:
582 497 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
583 498 ecache_flush_line_2:
584 499 retl
585 500 nop
586 501 SET_SIZE(ecache_flush_line)
587 -#endif /* lint */
588 502
589 -#if defined(lint)
590 -void
591 -set_afsr_ext(uint64_t afsr_ext)
592 -{
593 - afsr_ext = afsr_ext;
594 -}
595 -#else /* lint */
596 -
597 503 ENTRY(set_afsr_ext)
598 504 set ASI_AFSR_EXT_VA, %o1
599 505 stxa %o0, [%o1]ASI_AFSR ! afsr_ext reg
600 506 membar #Sync
601 507 retl
602 508 nop
603 509 SET_SIZE(set_afsr_ext)
604 510
605 -#endif /* lint */
606 511
607 -
608 -#if defined(lint)
609 -/*
610 - * The CPU jumps here from the MMU exception handler if an ITLB parity
611 - * error is detected and we are running on Panther.
612 - *
613 - * In this routine we collect diagnostic information and write it to our
614 - * logout structure (if possible) and clear all ITLB entries that may have
615 - * caused our parity trap.
616 - * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
617 - * and log any error messages. As for parameters to cpu_tlb_parity_error, we
618 - * send two:
619 - *
620 - * %g2 - Contains the VA whose lookup in the ITLB caused the parity error
621 - * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct,
622 - * regardless of whether or not we actually used the logout struct.
623 - *
624 - * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
625 - * parameters to the data contained in the logout structure in order to
626 - * determine whether the logout information is valid for this particular
627 - * error or not.
628 - */
629 -void
630 -itlb_parity_trap(void)
631 -{}
632 -
633 -#else /* lint */
634 -
635 512 ENTRY_NP(itlb_parity_trap)
636 513 /*
637 514 * Collect important information about the trap which will be
638 515 * used as a parameter to the TL0 handler.
639 516 */
640 517 wr %g0, ASI_IMMU, %asi
641 518 rdpr %tpc, %g2 ! VA that caused the IMMU trap
642 519 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page size
643 520 set PN_ITLB_PGSZ_MASK, %g4
644 521 and %g3, %g4, %g3
645 522 ldxa [MMU_TAG_ACCESS]%asi, %g4
646 523 set TAGREAD_CTX_MASK, %g5
647 524 and %g4, %g5, %g4
648 525 or %g4, %g3, %g3 ! 'or' in the trap context and
649 526 mov 1, %g4 ! add the IMMU flag to complete
650 527 sllx %g4, PN_TLO_INFO_IMMU_SHIFT, %g4
651 528 or %g4, %g3, %g3 ! the tlo_info field for logout
652 529 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR
653 530 membar #Sync
654 531
655 532 /*
656 533 * at this point:
657 534 * %g2 - contains the VA whose lookup caused the trap
658 535 * %g3 - contains the tlo_info field
659 536 *
660 537 * Next, we calculate the TLB index value for the failing VA.
661 538 */
662 539 mov %g2, %g4 ! We need the ITLB index
663 540 set PN_ITLB_PGSZ_MASK, %g5
664 541 and %g3, %g5, %g5
665 542 srlx %g5, PN_ITLB_PGSZ_SHIFT, %g5
666 543 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the index
667 544 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place
668 545 set PN_ITLB_T512, %g5
669 546 or %g4, %g5, %g4 ! and add in the TLB ID
670 547
671 548 /*
672 549 * at this point:
673 550 * %g2 - contains the VA whose lookup caused the trap
674 551 * %g3 - contains the tlo_info field
675 552 * %g4 - contains the TLB access index value for the
676 553 * VA/PgSz in question
677 554 *
678 555 * Check to see if the logout structure is available.
679 556 */
680 557 set CHPR_TLB_LOGOUT, %g6
681 558 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
682 559 set LOGOUT_INVALID_U32, %g6
683 560 sllx %g6, 32, %g6 ! if our logout structure is
684 561 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is
685 562 or %g5, %g6, %g5 ! already being used, then we
686 563 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic
687 564 cmp %g6, %g5 ! information before clearing
688 565 bne itlb_parity_trap_1 ! and logging the error.
689 566 nop
690 567
691 568 /*
692 569 * Record the logout information. %g4 contains our index + TLB ID
693 570 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
694 571 * the pointer to our logout struct.
695 572 */
696 573 stx %g3, [%g1 + PN_TLO_INFO]
697 574 stx %g2, [%g1 + PN_TLO_ADDR]
698 575 stx %g2, [%g1 + PN_TLO_PC] ! %tpc == fault addr for IMMU
699 576
700 577 add %g1, PN_TLO_ITLB_TTE, %g1 ! move up the pointer
701 578
702 579 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data
703 580 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away
704 581 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag
705 582 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away
706 583
707 584 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
708 585 or %g4, %g6, %g4
709 586 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer
710 587
711 588 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data
712 589 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away
713 590 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag
714 591 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away
715 592
716 593 andn %g4, %g6, %g4 ! back to way 0
717 594
718 595 itlb_parity_trap_1:
719 596 /*
720 597 * at this point:
721 598 * %g2 - contains the VA whose lookup caused the trap
722 599 * %g3 - contains the tlo_info field
723 600 * %g4 - contains the TLB access index value for the
724 601 * VA/PgSz in question
725 602 *
726 603 * Here we will clear the errors from the TLB.
727 604 */
728 605 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of
729 606 stxa %g0, [%g5]ASI_IMMU ! 0 as it will be invalid.
730 607 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write the data and tag
731 608 membar #Sync
732 609
733 610 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
734 611 or %g4, %g6, %g4
735 612
736 613 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write same data and tag
737 614 membar #Sync
738 615
739 616 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a
740 617 flush %g6 ! flush after writing MMU regs
741 618
742 619 /*
743 620 * at this point:
744 621 * %g2 - contains the VA whose lookup caused the trap
745 622 * %g3 - contains the tlo_info field
746 623 *
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
747 624 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
748 625 * already at PIL 15. */
749 626 set cpu_tlb_parity_error, %g1
750 627 rdpr %pil, %g4
751 628 cmp %g4, PIL_14
752 629 movl %icc, PIL_14, %g4
753 630 ba sys_trap
754 631 nop
755 632 SET_SIZE(itlb_parity_trap)
756 633
757 -#endif /* lint */
758 -
759 -#if defined(lint)
760 -/*
761 - * The CPU jumps here from the MMU exception handler if a DTLB parity
762 - * error is detected and we are running on Panther.
763 - *
764 - * In this routine we collect diagnostic information and write it to our
765 - * logout structure (if possible) and clear all DTLB entries that may have
766 - * caused our parity trap.
767 - * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
768 - * and log any error messages. As for parameters to cpu_tlb_parity_error, we
769 - * send two:
770 - *
771 - * %g2 - Contains the VA whose lookup in the DTLB caused the parity error
772 - * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct,
773 - * regardless of whether or not we actually used the logout struct.
774 - *
775 - * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
776 - * parameters to the data contained in the logout structure in order to
777 - * determine whether the logout information is valid for this particular
778 - * error or not.
779 - */
780 -void
781 -dtlb_parity_trap(void)
782 -{}
783 -
784 -#else /* lint */
785 -
786 634 ENTRY_NP(dtlb_parity_trap)
787 635 /*
788 636 * Collect important information about the trap which will be
789 637 * used as a parameter to the TL0 handler.
790 638 */
791 639 wr %g0, ASI_DMMU, %asi
792 640 ldxa [MMU_SFAR]%asi, %g2 ! VA that caused the IMMU trap
793 641 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page sizes
794 642 set PN_DTLB_PGSZ_MASK, %g4
795 643 and %g3, %g4, %g3
796 644 ldxa [MMU_TAG_ACCESS]%asi, %g4
797 645 set TAGREAD_CTX_MASK, %g5 ! 'or' in the trap context
798 646 and %g4, %g5, %g4 ! to complete the tlo_info
799 647 or %g4, %g3, %g3 ! field for logout
800 648 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR
801 649 membar #Sync
802 650
803 651 /*
804 652 * at this point:
805 653 * %g2 - contains the VA whose lookup caused the trap
806 654 * %g3 - contains the tlo_info field
807 655 *
808 656 * Calculate the TLB index values for the failing VA. Since the T512
809 657 * TLBs can be configured for different page sizes, we need to find
810 658 * the index into each one separately.
811 659 */
812 660 mov %g2, %g4 ! First we get the DTLB_0 index
813 661 set PN_DTLB_PGSZ0_MASK, %g5
814 662 and %g3, %g5, %g5
815 663 srlx %g5, PN_DTLB_PGSZ0_SHIFT, %g5
816 664 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the DTLB_0 index
817 665 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place
818 666 set PN_DTLB_T512_0, %g5
819 667 or %g4, %g5, %g4 ! and add in the TLB ID
820 668
821 669 mov %g2, %g7 ! Next we get the DTLB_1 index
822 670 set PN_DTLB_PGSZ1_MASK, %g5
823 671 and %g3, %g5, %g5
824 672 srlx %g5, PN_DTLB_PGSZ1_SHIFT, %g5
825 673 PN_GET_TLB_INDEX(%g7, %g5) ! %g7 has the DTLB_1 index
826 674 sllx %g7, PN_TLB_ACC_IDX_SHIFT, %g7 ! shift the index into place
827 675 set PN_DTLB_T512_1, %g5
828 676 or %g7, %g5, %g7 ! and add in the TLB ID
829 677
830 678 /*
831 679 * at this point:
832 680 * %g2 - contains the VA whose lookup caused the trap
833 681 * %g3 - contains the tlo_info field
834 682 * %g4 - contains the T512_0 access index value for the
835 683 * VA/PgSz in question
836 684 * %g7 - contains the T512_1 access index value for the
837 685 * VA/PgSz in question
838 686 *
839 687 * If this trap happened at TL>0, then we don't want to mess
840 688 * with the normal logout struct since that could caused a TLB
841 689 * miss.
842 690 */
843 691 rdpr %tl, %g6 ! read current trap level
844 692 cmp %g6, 1 ! skip over the tl>1 code
845 693 ble dtlb_parity_trap_1 ! if TL <= 1.
846 694 nop
847 695
848 696 /*
849 697 * If we are here, then the trap happened at TL>1. Simply
850 698 * update our tlo_info field and then skip to the TLB flush
851 699 * code.
852 700 */
853 701 mov 1, %g6
854 702 sllx %g6, PN_TLO_INFO_TL1_SHIFT, %g6
855 703 or %g6, %g3, %g3
856 704 ba dtlb_parity_trap_2
857 705 nop
858 706
859 707 dtlb_parity_trap_1:
860 708 /*
861 709 * at this point:
862 710 * %g2 - contains the VA whose lookup caused the trap
863 711 * %g3 - contains the tlo_info field
864 712 * %g4 - contains the T512_0 access index value for the
865 713 * VA/PgSz in question
866 714 * %g7 - contains the T512_1 access index value for the
867 715 * VA/PgSz in question
868 716 *
869 717 * Check to see if the logout structure is available.
870 718 */
871 719 set CHPR_TLB_LOGOUT, %g6
872 720 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
873 721 set LOGOUT_INVALID_U32, %g6
874 722 sllx %g6, 32, %g6 ! if our logout structure is
875 723 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is
876 724 or %g5, %g6, %g5 ! already being used, then we
877 725 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic
878 726 cmp %g6, %g5 ! information before clearing
879 727 bne dtlb_parity_trap_2 ! and logging the error.
880 728 nop
881 729
882 730 /*
883 731 * Record the logout information. %g4 contains our DTLB_0
884 732 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
885 733 * both of which will be used for ASI_DTLB_ACCESS and
886 734 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
887 735 * struct.
888 736 */
889 737 stx %g3, [%g1 + PN_TLO_INFO]
890 738 stx %g2, [%g1 + PN_TLO_ADDR]
891 739 rdpr %tpc, %g5
892 740 stx %g5, [%g1 + PN_TLO_PC]
893 741
894 742 add %g1, PN_TLO_DTLB_TTE, %g1 ! move up the pointer
895 743
896 744 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0
897 745 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 0 and store it away
898 746 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0
899 747 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 0 and store it away
900 748
901 749 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 0
902 750 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
903 751 ldxa [%g7]ASI_DTLB_TAGREAD, %g5
904 752 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
905 753
906 754 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
907 755 or %g4, %g6, %g4 ! of each TLB.
908 756 or %g7, %g6, %g7
909 757 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer
910 758
911 759 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0
912 760 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 1 and store it away
913 761 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0
914 762 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 1 and store it away
915 763
916 764 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 1
917 765 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
918 766 ldxa [%g7]ASI_DTLB_TAGREAD, %g5
919 767 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
920 768
921 769 andn %g4, %g6, %g4 ! back to way 0
922 770 andn %g7, %g6, %g7 ! back to way 0
923 771
924 772 dtlb_parity_trap_2:
925 773 /*
926 774 * at this point:
927 775 * %g2 - contains the VA whose lookup caused the trap
928 776 * %g3 - contains the tlo_info field
929 777 * %g4 - contains the T512_0 access index value for the
930 778 * VA/PgSz in question
931 779 * %g7 - contains the T512_1 access index value for the
932 780 * VA/PgSz in question
933 781 *
934 782 * Here we will clear the errors from the DTLB.
935 783 */
936 784 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of
937 785 stxa %g0, [%g5]ASI_DMMU ! 0 as it will be invalid.
938 786 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write the data and tag.
939 787 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0
940 788 membar #Sync
941 789
942 790 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
943 791 or %g4, %g6, %g4
944 792 or %g7, %g6, %g7
945 793
946 794 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write same data and tag.
947 795 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0
948 796 membar #Sync
949 797
950 798 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a
951 799 flush %g6 ! flush after writing MMU regs
952 800
953 801 /*
954 802 * at this point:
955 803 * %g2 - contains the VA whose lookup caused the trap
956 804 * %g3 - contains the tlo_info field
957 805 *
958 806 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
959 807 * already at PIL 15. We do this even for TL>1 traps since
↓ open down ↓ |
164 lines elided |
↑ open up ↑ |
960 808 * those will lead to a system panic.
961 809 */
962 810 set cpu_tlb_parity_error, %g1
963 811 rdpr %pil, %g4
964 812 cmp %g4, PIL_14
965 813 movl %icc, PIL_14, %g4
966 814 ba sys_trap
967 815 nop
968 816 SET_SIZE(dtlb_parity_trap)
969 817
970 -#endif /* lint */
971 818
972 -
973 -#if defined(lint)
974 -/*
975 - * Calculates the Panther TLB index based on a virtual address and page size
976 - *
977 - * Register usage:
978 - * %o0 - virtual address whose index we want
979 - * %o1 - Page Size of the TLB in question as encoded in the
980 - * ASI_[D|I]MMU_TAG_ACCESS_EXT register.
981 - */
982 -uint64_t
983 -pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
984 -{
985 - return ((va + pg_sz)-(va + pg_sz));
986 -}
987 -#else /* lint */
988 819 ENTRY(pn_get_tlb_index)
989 820
990 821 PN_GET_TLB_INDEX(%o0, %o1)
991 822
992 823 retl
993 824 nop
994 825 SET_SIZE(pn_get_tlb_index)
995 -#endif /* lint */
996 826
997 827
998 -#if defined(lint)
999 -/*
1000 - * For Panther CPUs we need to flush the IPB after any I$ or D$
1001 - * parity errors are detected.
1002 - */
1003 -void
1004 -flush_ipb(void)
1005 -{ return; }
1006 -
1007 -#else /* lint */
1008 -
1009 828 ENTRY(flush_ipb)
1010 829 clr %o0
1011 830
1012 831 flush_ipb_1:
1013 832 stxa %g0, [%o0]ASI_IPB_TAG
1014 833 membar #Sync
1015 834 cmp %o0, PN_IPB_TAG_ADDR_MAX
1016 835 blt flush_ipb_1
1017 836 add %o0, PN_IPB_TAG_ADDR_LINESIZE, %o0
1018 837
1019 838 sethi %hi(FLUSH_ADDR), %o0
1020 839 flush %o0
1021 840 retl
1022 841 nop
1023 842 SET_SIZE(flush_ipb)
1024 843
1025 -#endif /* lint */
1026 844
1027 -
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX