Print this page
restore sparc comments
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
+++ new/usr/src/uts/sun4u/cpu/us3_cheetahplus_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 *
25 25 * Assembly code support for the Cheetah+ module
26 26 */
27 27
28 -#pragma ident "%Z%%M% %I% %E% SMI"
29 -
30 -#if !defined(lint)
31 28 #include "assym.h"
32 -#endif /* lint */
33 29
34 30 #include <sys/asm_linkage.h>
35 31 #include <sys/mmu.h>
36 32 #include <vm/hat_sfmmu.h>
37 33 #include <sys/machparam.h>
38 34 #include <sys/machcpuvar.h>
39 35 #include <sys/machthread.h>
40 36 #include <sys/machtrap.h>
41 37 #include <sys/privregs.h>
42 38 #include <sys/asm_linkage.h>
43 39 #include <sys/trap.h>
44 40 #include <sys/cheetahregs.h>
45 41 #include <sys/us3_module.h>
46 42 #include <sys/xc_impl.h>
47 43 #include <sys/intreg.h>
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
48 44 #include <sys/async.h>
49 45 #include <sys/clock.h>
50 46 #include <sys/cheetahasm.h>
51 47 #include <sys/cmpregs.h>
52 48
53 49 #ifdef TRAPTRACE
54 50 #include <sys/traptrace.h>
55 51 #endif /* TRAPTRACE */
56 52
57 53
58 -#if !defined(lint)
59 -
60 54 /* BEGIN CSTYLED */
61 55
62 56 /*
63 57 * Cheetah+ version to reflush an Ecache line by index.
64 58 *
65 59 * By default we assume the Ecache is 2-way so we flush both
66 60 * ways. Even if the cache is direct-mapped no harm will come
67 61 * from performing the flush twice, apart from perhaps a performance
68 62 * penalty.
69 63 *
70 64 * XXX - scr2 not used.
71 65 */
72 66 #define ECACHE_REFLUSH_LINE(ec_set_size, index, scr2) \
73 67 ldxa [index]ASI_EC_DIAG, %g0; \
74 68 ldxa [index + ec_set_size]ASI_EC_DIAG, %g0;
75 69
76 70 /*
77 71 * Cheetah+ version of ecache_flush_line. Uses Cheetah+ Ecache Displacement
78 72 * Flush feature.
79 73 */
80 74 #define ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2) \
81 75 sub ec_set_size, 1, scr1; \
82 76 and physaddr, scr1, scr1; \
83 77 set CHP_ECACHE_IDX_DISP_FLUSH, scr2; \
84 78 or scr2, scr1, scr1; \
85 79 ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
86 80
87 81 /* END CSTYLED */
88 82
89 83 /*
90 84 * Panther version to reflush a line from both the L2 cache and L3
91 85 * cache by the respective indexes. Flushes all ways of the line from
92 86 * each cache.
93 87 *
94 88 * l2_index Index into the L2$ of the line to be flushed. This
95 89 * register will not be modified by this routine.
96 90 * l3_index Index into the L3$ of the line to be flushed. This
97 91 * register will not be modified by this routine.
98 92 * scr2 scratch register.
99 93 * scr3 scratch register.
100 94 *
101 95 */
102 96 #define PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3) \
103 97 set PN_L2_MAX_SET, scr2; \
104 98 set PN_L2_SET_SIZE, scr3; \
105 99 1: \
106 100 ldxa [l2_index + scr2]ASI_L2_TAG, %g0; \
107 101 cmp scr2, %g0; \
108 102 bg,a 1b; \
109 103 sub scr2, scr3, scr2; \
110 104 mov 6, scr2; \
111 105 7: \
112 106 cmp scr2, %g0; \
113 107 bg,a 7b; \
114 108 sub scr2, 1, scr2; \
115 109 set PN_L3_MAX_SET, scr2; \
116 110 set PN_L3_SET_SIZE, scr3; \
117 111 2: \
118 112 ldxa [l3_index + scr2]ASI_EC_DIAG, %g0; \
119 113 cmp scr2, %g0; \
120 114 bg,a 2b; \
121 115 sub scr2, scr3, scr2;
122 116
123 117 /*
124 118 * Panther version of ecache_flush_line. Flushes the line corresponding
125 119 * to physaddr from both the L2 cache and the L3 cache.
126 120 *
127 121 * physaddr Input: Physical address to flush.
128 122 * Output: Physical address to flush (preserved).
129 123 * l2_idx_out Input: scratch register.
130 124 * Output: Index into the L2$ of the line to be flushed.
131 125 * l3_idx_out Input: scratch register.
132 126 * Output: Index into the L3$ of the line to be flushed.
133 127 * scr3 scratch register.
134 128 * scr4 scratch register.
135 129 *
136 130 */
137 131 #define PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4) \
138 132 set PN_L3_SET_SIZE, l2_idx_out; \
139 133 sub l2_idx_out, 1, l2_idx_out; \
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
140 134 and physaddr, l2_idx_out, l3_idx_out; \
141 135 set PN_L3_IDX_DISP_FLUSH, l2_idx_out; \
142 136 or l2_idx_out, l3_idx_out, l3_idx_out; \
143 137 set PN_L2_SET_SIZE, l2_idx_out; \
144 138 sub l2_idx_out, 1, l2_idx_out; \
145 139 and physaddr, l2_idx_out, l2_idx_out; \
146 140 set PN_L2_IDX_DISP_FLUSH, scr3; \
147 141 or l2_idx_out, scr3, l2_idx_out; \
148 142 PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
149 143
150 -#endif /* !lint */
151 -
152 144 /*
153 145 * Fast ECC error at TL>0 handler
154 146 * We get here via trap 70 at TL>0->Software trap 0 at TL>0. We enter
155 147 * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
156 148 * For a complete description of the Fast ECC at TL>0 handling see the
157 149 * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
158 150 * us3_common_asm.s
159 151 */
160 -#if defined(lint)
161 152
162 -void
163 -fast_ecc_tl1_err(void)
164 -{}
165 -
166 -#else /* lint */
167 -
168 153 .section ".text"
169 154 .align 64
170 155 ENTRY_NP(fast_ecc_tl1_err)
171 156
172 157 /*
173 158 * This macro turns off the D$/I$ if they are on and saves their
174 159 * original state in ch_err_tl1_tmp, saves all the %g registers in the
175 160 * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
176 161 * the %tpc in ch_err_tl1_tpc. At the end of this macro, %g1 will
177 162 * point to the ch_err_tl1_data structure and the original D$/I$ state
178 163 * will be saved in ch_err_tl1_tmp. All %g registers except for %g1
179 164 * will be available.
180 165 */
181 166 CH_ERR_TL1_FECC_ENTER;
182 167
183 168 /*
184 169 * Get the diagnostic logout data. %g4 must be initialized to
185 170 * current CEEN state, %g5 must point to logout structure in
186 171 * ch_err_tl1_data_t. %g3 will contain the nesting count upon
187 172 * return.
188 173 */
189 174 ldxa [%g0]ASI_ESTATE_ERR, %g4
190 175 and %g4, EN_REG_CEEN, %g4
191 176 add %g1, CH_ERR_TL1_LOGOUT, %g5
192 177 DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
193 178
194 179 /*
195 180 * If the logout nesting count is exceeded, we're probably
196 181 * not making any progress, try to panic instead.
197 182 */
198 183 cmp %g3, CLO_NESTING_MAX
199 184 bge fecc_tl1_err
200 185 nop
201 186
202 187 /*
203 188 * Save the current CEEN and NCEEN state in %g7 and turn them off
204 189 * before flushing the Ecache.
205 190 */
206 191 ldxa [%g0]ASI_ESTATE_ERR, %g7
207 192 andn %g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
208 193 stxa %g5, [%g0]ASI_ESTATE_ERR
209 194 membar #Sync
210 195
211 196 /*
212 197 * Flush the Ecache, using the largest possible cache size with the
213 198 * smallest possible line size since we can't get the actual sizes
214 199 * from the cpu_node due to DTLB misses.
215 200 */
216 201 PN_L2_FLUSHALL(%g3, %g4, %g5)
217 202
218 203 set CH_ECACHE_MAX_SIZE, %g4
219 204 set CH_ECACHE_MIN_LSIZE, %g5
220 205
221 206 GET_CPU_IMPL(%g6)
222 207 cmp %g6, PANTHER_IMPL
223 208 bne %xcc, 2f
224 209 nop
225 210 set PN_L3_SIZE, %g4
226 211 2:
227 212 mov %g6, %g3
228 213 CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
229 214
230 215 /*
231 216 * Restore CEEN and NCEEN to the previous state.
232 217 */
233 218 stxa %g7, [%g0]ASI_ESTATE_ERR
234 219 membar #Sync
235 220
236 221 /*
237 222 * If we turned off the D$, then flush it and turn it back on.
238 223 */
239 224 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3
240 225 andcc %g3, CH_ERR_TSTATE_DC_ON, %g0
241 226 bz %xcc, 3f
242 227 nop
243 228
244 229 /*
245 230 * Flush the D$.
246 231 */
247 232 ASM_LD(%g4, dcache_size)
248 233 ASM_LD(%g5, dcache_linesize)
249 234 CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
250 235
251 236 /*
252 237 * Turn the D$ back on.
253 238 */
254 239 ldxa [%g0]ASI_DCU, %g3
255 240 or %g3, DCU_DC, %g3
256 241 stxa %g3, [%g0]ASI_DCU
257 242 membar #Sync
258 243 3:
259 244 /*
260 245 * If we turned off the I$, then flush it and turn it back on.
261 246 */
262 247 ldxa [%g1 + CH_ERR_TL1_TMP]%asi, %g3
263 248 andcc %g3, CH_ERR_TSTATE_IC_ON, %g0
264 249 bz %xcc, 4f
265 250 nop
266 251
267 252 /*
268 253 * Flush the I$. Panther has different I$ parameters, and we
269 254 * can't access the logout I$ params without possibly generating
270 255 * a MMU miss.
271 256 */
272 257 GET_CPU_IMPL(%g6)
273 258 set PN_ICACHE_SIZE, %g3
274 259 set CH_ICACHE_SIZE, %g4
275 260 mov CH_ICACHE_LSIZE, %g5
276 261 cmp %g6, PANTHER_IMPL
277 262 movz %xcc, %g3, %g4
278 263 movz %xcc, PN_ICACHE_LSIZE, %g5
279 264 CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
280 265
281 266 /*
282 267 * Turn the I$ back on. Changing DCU_IC requires flush.
283 268 */
284 269 ldxa [%g0]ASI_DCU, %g3
285 270 or %g3, DCU_IC, %g3
286 271 stxa %g3, [%g0]ASI_DCU
287 272 flush %g0
288 273 4:
289 274
290 275 #ifdef TRAPTRACE
291 276 /*
292 277 * Get current trap trace entry physical pointer.
293 278 */
294 279 CPU_INDEX(%g6, %g5)
295 280 sll %g6, TRAPTR_SIZE_SHIFT, %g6
296 281 set trap_trace_ctl, %g5
297 282 add %g6, %g5, %g6
298 283 ld [%g6 + TRAPTR_LIMIT], %g5
299 284 tst %g5
300 285 be %icc, skip_traptrace
301 286 nop
302 287 ldx [%g6 + TRAPTR_PBASE], %g5
303 288 ld [%g6 + TRAPTR_OFFSET], %g4
304 289 add %g5, %g4, %g5
305 290
306 291 /*
307 292 * Create trap trace entry.
308 293 */
309 294 rd %asi, %g7
310 295 wr %g0, TRAPTR_ASI, %asi
311 296 rd STICK, %g4
312 297 stxa %g4, [%g5 + TRAP_ENT_TICK]%asi
313 298 rdpr %tl, %g4
314 299 stha %g4, [%g5 + TRAP_ENT_TL]%asi
315 300 rdpr %tt, %g4
316 301 stha %g4, [%g5 + TRAP_ENT_TT]%asi
317 302 rdpr %tpc, %g4
318 303 stna %g4, [%g5 + TRAP_ENT_TPC]%asi
319 304 rdpr %tstate, %g4
320 305 stxa %g4, [%g5 + TRAP_ENT_TSTATE]%asi
321 306 stna %sp, [%g5 + TRAP_ENT_SP]%asi
322 307 stna %g0, [%g5 + TRAP_ENT_TR]%asi
323 308 wr %g0, %g7, %asi
324 309 ldxa [%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
325 310 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
326 311 wr %g0, TRAPTR_ASI, %asi
327 312 stna %g3, [%g5 + TRAP_ENT_F1]%asi
328 313 stna %g4, [%g5 + TRAP_ENT_F2]%asi
329 314 wr %g0, %g7, %asi
330 315 ldxa [%g1 + CH_ERR_TL1_AFAR]%asi, %g3
331 316 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4
332 317 wr %g0, TRAPTR_ASI, %asi
333 318 stna %g3, [%g5 + TRAP_ENT_F3]%asi
334 319 stna %g4, [%g5 + TRAP_ENT_F4]%asi
335 320 wr %g0, %g7, %asi
336 321
337 322 /*
338 323 * Advance trap trace pointer.
339 324 */
340 325 ld [%g6 + TRAPTR_OFFSET], %g5
341 326 ld [%g6 + TRAPTR_LIMIT], %g4
342 327 st %g5, [%g6 + TRAPTR_LAST_OFFSET]
343 328 add %g5, TRAP_ENT_SIZE, %g5
344 329 sub %g4, TRAP_ENT_SIZE, %g4
345 330 cmp %g5, %g4
346 331 movge %icc, 0, %g5
347 332 st %g5, [%g6 + TRAPTR_OFFSET]
348 333 skip_traptrace:
349 334 #endif /* TRAPTRACE */
350 335
351 336 /*
352 337 * If nesting count is not zero, skip all the AFSR/AFAR
353 338 * handling and just do the necessary cache-flushing.
354 339 */
355 340 ldxa [%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
356 341 brnz %g2, 6f
357 342 nop
358 343
359 344 /*
360 345 * If a UCU or L3_UCU followed by a WDU has occurred go ahead
361 346 * and panic since a UE will occur (on the retry) before the
362 347 * UCU and WDU messages are enqueued. On a Panther processor,
363 348 * we need to also see an L3_WDU before panicking. Note that
364 349 * we avoid accessing the _EXT ASIs if not on a Panther.
365 350 */
366 351 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
367 352 set 1, %g4
368 353 sllx %g4, C_AFSR_UCU_SHIFT, %g4
369 354 btst %g4, %g3 ! UCU in original shadow AFSR?
370 355 bnz %xcc, 5f
371 356 nop
372 357 GET_CPU_IMPL(%g6)
373 358 cmp %g6, PANTHER_IMPL
374 359 bne %xcc, 6f ! not Panther, no UCU, skip the rest
375 360 nop
376 361 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
377 362 btst C_AFSR_L3_UCU, %g3 ! L3_UCU in original shadow AFSR_EXT?
378 363 bz %xcc, 6f ! neither UCU nor L3_UCU was seen
379 364 nop
380 365 5:
381 366 ldxa [%g1 + CH_ERR_TL1_AFSR]%asi, %g4 ! original AFSR
382 367 ldxa [%g0]ASI_AFSR, %g3 ! current AFSR
383 368 or %g3, %g4, %g3 ! %g3 = original + current AFSR
384 369 set 1, %g4
385 370 sllx %g4, C_AFSR_WDU_SHIFT, %g4
386 371 btst %g4, %g3 ! WDU in original or current AFSR?
387 372 bz %xcc, 6f ! no WDU, skip remaining tests
388 373 nop
389 374 GET_CPU_IMPL(%g6)
390 375 cmp %g6, PANTHER_IMPL
391 376 bne %xcc, fecc_tl1_err ! if not Panther, panic (saw UCU, WDU)
392 377 nop
393 378 ldxa [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g4 ! original AFSR_EXT
394 379 set ASI_AFSR_EXT_VA, %g6 ! ASI of current AFSR_EXT
395 380 ldxa [%g6]ASI_AFSR, %g3 ! value of current AFSR_EXT
396 381 or %g3, %g4, %g3 ! %g3 = original + current AFSR_EXT
397 382 btst C_AFSR_L3_WDU, %g3 ! L3_WDU in original or current AFSR?
398 383 bnz %xcc, fecc_tl1_err ! panic (saw L3_WDU and UCU or L3_UCU)
399 384 nop
400 385 6:
401 386 /*
402 387 * We fall into this macro if we've successfully logged the error in
403 388 * the ch_err_tl1_data structure and want the PIL15 softint to pick
404 389 * it up and log it. %g1 must point to the ch_err_tl1_data structure.
405 390 * Restores the %g registers and issues retry.
↓ open down ↓ |
228 lines elided |
↑ open up ↑ |
406 391 */
407 392 CH_ERR_TL1_EXIT;
408 393
409 394 /*
410 395 * Establish panic exit label.
411 396 */
412 397 CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
413 398
414 399 SET_SIZE(fast_ecc_tl1_err)
415 400
416 -#endif /* lint */
417 401
418 -
419 -#if defined(lint)
420 402 /*
421 403 * scrubphys - Pass in the aligned physical memory address
422 404 * that you want to scrub, along with the ecache set size.
423 405 *
424 406 * 1) Displacement flush the E$ line corresponding to %addr.
425 407 * The first ldxa guarantees that the %addr is no longer in
426 408 * M, O, or E (goes to I or S (if instruction fetch also happens).
427 409 * 2) "Write" the data using a CAS %addr,%g0,%g0.
428 410 * The casxa guarantees a transition from I to M or S to M.
429 411 * 3) Displacement flush the E$ line corresponding to %addr.
430 412 * The second ldxa pushes the M line out of the ecache, into the
431 413 * writeback buffers, on the way to memory.
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
432 414 * 4) The "membar #Sync" pushes the cache line out of the writeback
433 415 * buffers onto the bus, on the way to dram finally.
434 416 *
435 417 * This is a modified version of the algorithm suggested by Gary Lauterbach.
436 418 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
437 419 * as modified, but then we found out that for spitfire, if it misses in the
438 420 * E$ it will probably install as an M, but if it hits in the E$, then it
439 421 * will stay E, if the store doesn't happen. So the first displacement flush
440 422 * should ensure that the CAS will miss in the E$. Arrgh.
441 423 */
442 -/* ARGSUSED */
443 -void
444 -scrubphys(uint64_t paddr, int ecache_set_size)
445 -{}
446 -
447 -#else /* lint */
448 424 ENTRY(scrubphys)
449 425 rdpr %pstate, %o4
450 426 andn %o4, PSTATE_IE | PSTATE_AM, %o5
451 427 wrpr %o5, %g0, %pstate ! clear IE, AM bits
452 428
453 429 GET_CPU_IMPL(%o5) ! Panther Ecache is flushed differently
454 430 cmp %o5, PANTHER_IMPL
455 431 bne scrubphys_1
456 432 nop
457 433 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
458 434 casxa [%o0]ASI_MEM, %g0, %g0
459 435 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
460 436 b scrubphys_2
461 437 nop
462 438 scrubphys_1:
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
463 439 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
464 440 casxa [%o0]ASI_MEM, %g0, %g0
465 441 ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
466 442 scrubphys_2:
467 443 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
468 444
469 445 retl
470 446 membar #Sync ! move the data out of the load buffer
471 447 SET_SIZE(scrubphys)
472 448
473 -#endif /* lint */
474 449
475 -
476 -#if defined(lint)
477 450 /*
478 451 * clearphys - Pass in the physical memory address of the checkblock
479 452 * that you want to push out, cleared with a recognizable pattern,
480 453 * from the ecache.
481 454 *
482 455 * To ensure that the ecc gets recalculated after the bad data is cleared,
483 456 * we must write out enough data to fill the w$ line (64 bytes). So we read
484 457 * in an entire ecache subblock's worth of data, and write it back out.
485 458 * Then we overwrite the 16 bytes of bad data with the pattern.
486 459 */
487 -/* ARGSUSED */
488 -void
489 -clearphys(uint64_t paddr, int ecache_set_size, int ecache_linesize)
490 -{
491 -}
492 -
493 -#else /* lint */
494 460 ENTRY(clearphys)
495 461 /* turn off IE, AM bits */
496 462 rdpr %pstate, %o4
497 463 andn %o4, PSTATE_IE | PSTATE_AM, %o5
498 464 wrpr %o5, %g0, %pstate
499 465
500 466 /* turn off NCEEN */
501 467 ldxa [%g0]ASI_ESTATE_ERR, %o5
502 468 andn %o5, EN_REG_NCEEN, %o3
503 469 stxa %o3, [%g0]ASI_ESTATE_ERR
504 470 membar #Sync
505 471
506 472 /* align address passed with 64 bytes subblock size */
507 473 mov CH_ECACHE_SUBBLK_SIZE, %o2
508 474 andn %o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1
509 475
510 476 /* move the good data into the W$ */
511 477 clearphys_1:
512 478 subcc %o2, 8, %o2
513 479 ldxa [%g1 + %o2]ASI_MEM, %g2
514 480 bge clearphys_1
515 481 stxa %g2, [%g1 + %o2]ASI_MEM
516 482
517 483 /* now overwrite the bad data */
518 484 setx 0xbadecc00badecc01, %g1, %g2
519 485 stxa %g2, [%o0]ASI_MEM
520 486 mov 8, %g1
521 487 stxa %g2, [%o0 + %g1]ASI_MEM
522 488
523 489 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently
524 490 cmp %o3, PANTHER_IMPL
525 491 bne clearphys_2
526 492 nop
527 493 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
528 494 casxa [%o0]ASI_MEM, %g0, %g0
529 495 PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
530 496 b clearphys_3
531 497 nop
532 498 clearphys_2:
533 499 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
534 500 casxa [%o0]ASI_MEM, %g0, %g0
535 501 ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
536 502 clearphys_3:
537 503 /* clear the AFSR */
538 504 ldxa [%g0]ASI_AFSR, %o1
539 505 stxa %o1, [%g0]ASI_AFSR
540 506 membar #Sync
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
541 507
542 508 /* turn NCEEN back on */
543 509 stxa %o5, [%g0]ASI_ESTATE_ERR
544 510 membar #Sync
545 511
546 512 /* return and re-enable IE and AM */
547 513 retl
548 514 wrpr %g0, %o4, %pstate
549 515 SET_SIZE(clearphys)
550 516
551 -#endif /* lint */
552 517
553 -
554 -#if defined(lint)
555 518 /*
556 519 * Cheetah+ Ecache displacement flush the specified line from the E$
557 520 *
558 521 * For Panther, this means flushing the specified line from both the
559 522 * L2 cache and L3 cache.
560 523 *
561 524 * Register usage:
562 525 * %o0 - 64 bit physical address for flushing
563 526 * %o1 - Ecache set size
564 527 */
565 -/*ARGSUSED*/
566 -void
567 -ecache_flush_line(uint64_t flushaddr, int ec_set_size)
568 -{
569 -}
570 -#else /* lint */
571 528 ENTRY(ecache_flush_line)
572 529
573 530 GET_CPU_IMPL(%o3) ! Panther Ecache is flushed differently
574 531 cmp %o3, PANTHER_IMPL
575 532 bne ecache_flush_line_1
576 533 nop
577 534
578 535 PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
579 536 b ecache_flush_line_2
580 537 nop
581 538 ecache_flush_line_1:
582 539 ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
583 540 ecache_flush_line_2:
584 541 retl
585 542 nop
586 543 SET_SIZE(ecache_flush_line)
587 -#endif /* lint */
588 544
589 -#if defined(lint)
590 -void
591 -set_afsr_ext(uint64_t afsr_ext)
592 -{
593 - afsr_ext = afsr_ext;
594 -}
595 -#else /* lint */
596 -
597 545 ENTRY(set_afsr_ext)
598 546 set ASI_AFSR_EXT_VA, %o1
599 547 stxa %o0, [%o1]ASI_AFSR ! afsr_ext reg
600 548 membar #Sync
601 549 retl
602 550 nop
603 551 SET_SIZE(set_afsr_ext)
604 552
605 -#endif /* lint */
606 553
607 -
608 -#if defined(lint)
609 554 /*
610 555 * The CPU jumps here from the MMU exception handler if an ITLB parity
611 556 * error is detected and we are running on Panther.
612 557 *
613 558 * In this routine we collect diagnostic information and write it to our
614 559 * logout structure (if possible) and clear all ITLB entries that may have
615 560 * caused our parity trap.
616 561 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
617 562 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
618 563 * send two:
619 564 *
620 565 * %g2 - Contains the VA whose lookup in the ITLB caused the parity error
621 566 * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct,
622 567 * regardless of whether or not we actually used the logout struct.
623 568 *
624 569 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
625 570 * parameters to the data contained in the logout structure in order to
626 571 * determine whether the logout information is valid for this particular
627 572 * error or not.
628 573 */
629 -void
630 -itlb_parity_trap(void)
631 -{}
632 -
633 -#else /* lint */
634 -
635 574 ENTRY_NP(itlb_parity_trap)
636 575 /*
637 576 * Collect important information about the trap which will be
638 577 * used as a parameter to the TL0 handler.
639 578 */
640 579 wr %g0, ASI_IMMU, %asi
641 580 rdpr %tpc, %g2 ! VA that caused the IMMU trap
642 581 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page size
643 582 set PN_ITLB_PGSZ_MASK, %g4
644 583 and %g3, %g4, %g3
645 584 ldxa [MMU_TAG_ACCESS]%asi, %g4
646 585 set TAGREAD_CTX_MASK, %g5
647 586 and %g4, %g5, %g4
648 587 or %g4, %g3, %g3 ! 'or' in the trap context and
649 588 mov 1, %g4 ! add the IMMU flag to complete
650 589 sllx %g4, PN_TLO_INFO_IMMU_SHIFT, %g4
651 590 or %g4, %g3, %g3 ! the tlo_info field for logout
652 591 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR
653 592 membar #Sync
654 593
655 594 /*
656 595 * at this point:
657 596 * %g2 - contains the VA whose lookup caused the trap
658 597 * %g3 - contains the tlo_info field
659 598 *
660 599 * Next, we calculate the TLB index value for the failing VA.
661 600 */
662 601 mov %g2, %g4 ! We need the ITLB index
663 602 set PN_ITLB_PGSZ_MASK, %g5
664 603 and %g3, %g5, %g5
665 604 srlx %g5, PN_ITLB_PGSZ_SHIFT, %g5
666 605 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the index
667 606 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place
668 607 set PN_ITLB_T512, %g5
669 608 or %g4, %g5, %g4 ! and add in the TLB ID
670 609
671 610 /*
672 611 * at this point:
673 612 * %g2 - contains the VA whose lookup caused the trap
674 613 * %g3 - contains the tlo_info field
675 614 * %g4 - contains the TLB access index value for the
676 615 * VA/PgSz in question
677 616 *
678 617 * Check to see if the logout structure is available.
679 618 */
680 619 set CHPR_TLB_LOGOUT, %g6
681 620 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
682 621 set LOGOUT_INVALID_U32, %g6
683 622 sllx %g6, 32, %g6 ! if our logout structure is
684 623 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is
685 624 or %g5, %g6, %g5 ! already being used, then we
686 625 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic
687 626 cmp %g6, %g5 ! information before clearing
688 627 bne itlb_parity_trap_1 ! and logging the error.
689 628 nop
690 629
691 630 /*
692 631 * Record the logout information. %g4 contains our index + TLB ID
693 632 * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
694 633 * the pointer to our logout struct.
695 634 */
696 635 stx %g3, [%g1 + PN_TLO_INFO]
697 636 stx %g2, [%g1 + PN_TLO_ADDR]
698 637 stx %g2, [%g1 + PN_TLO_PC] ! %tpc == fault addr for IMMU
699 638
700 639 add %g1, PN_TLO_ITLB_TTE, %g1 ! move up the pointer
701 640
702 641 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data
703 642 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away
704 643 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag
705 644 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away
706 645
707 646 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
708 647 or %g4, %g6, %g4
709 648 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer
710 649
711 650 ldxa [%g4]ASI_ITLB_ACCESS, %g5 ! read the data
712 651 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! store it away
713 652 ldxa [%g4]ASI_ITLB_TAGREAD, %g5 ! read the tag
714 653 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! store it away
715 654
716 655 andn %g4, %g6, %g4 ! back to way 0
717 656
718 657 itlb_parity_trap_1:
719 658 /*
720 659 * at this point:
721 660 * %g2 - contains the VA whose lookup caused the trap
722 661 * %g3 - contains the tlo_info field
723 662 * %g4 - contains the TLB access index value for the
724 663 * VA/PgSz in question
725 664 *
726 665 * Here we will clear the errors from the TLB.
727 666 */
728 667 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of
729 668 stxa %g0, [%g5]ASI_IMMU ! 0 as it will be invalid.
730 669 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write the data and tag
731 670 membar #Sync
732 671
733 672 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
734 673 or %g4, %g6, %g4
735 674
736 675 stxa %g0, [%g4]ASI_ITLB_ACCESS ! Write same data and tag
737 676 membar #Sync
738 677
739 678 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a
740 679 flush %g6 ! flush after writing MMU regs
741 680
742 681 /*
743 682 * at this point:
744 683 * %g2 - contains the VA whose lookup caused the trap
745 684 * %g3 - contains the tlo_info field
746 685 *
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
747 686 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
748 687 * already at PIL 15. */
749 688 set cpu_tlb_parity_error, %g1
750 689 rdpr %pil, %g4
751 690 cmp %g4, PIL_14
752 691 movl %icc, PIL_14, %g4
753 692 ba sys_trap
754 693 nop
755 694 SET_SIZE(itlb_parity_trap)
756 695
757 -#endif /* lint */
758 -
759 -#if defined(lint)
760 696 /*
761 697 * The CPU jumps here from the MMU exception handler if a DTLB parity
762 698 * error is detected and we are running on Panther.
763 699 *
764 700 * In this routine we collect diagnostic information and write it to our
765 701 * logout structure (if possible) and clear all DTLB entries that may have
766 702 * caused our parity trap.
767 703 * Then we call cpu_tlb_parity_error via systrap in order to drop down to TL0
768 704 * and log any error messages. As for parameters to cpu_tlb_parity_error, we
769 705 * send two:
770 706 *
771 707 * %g2 - Contains the VA whose lookup in the DTLB caused the parity error
772 708 * %g3 - Contains the tlo_info field of the pn_tlb_logout logout struct,
773 709 * regardless of whether or not we actually used the logout struct.
774 710 *
775 711 * In the TL0 handler (cpu_tlb_parity_error) we will compare those two
776 712 * parameters to the data contained in the logout structure in order to
777 713 * determine whether the logout information is valid for this particular
778 714 * error or not.
779 715 */
780 -void
781 -dtlb_parity_trap(void)
782 -{}
783 -
784 -#else /* lint */
785 -
786 716 ENTRY_NP(dtlb_parity_trap)
787 717 /*
788 718 * Collect important information about the trap which will be
789 719 * used as a parameter to the TL0 handler.
790 720 */
791 721 wr %g0, ASI_DMMU, %asi
792 722 ldxa [MMU_SFAR]%asi, %g2 ! VA that caused the IMMU trap
793 723 ldxa [MMU_TAG_ACCESS_EXT]%asi, %g3 ! read the trap VA page sizes
794 724 set PN_DTLB_PGSZ_MASK, %g4
795 725 and %g3, %g4, %g3
796 726 ldxa [MMU_TAG_ACCESS]%asi, %g4
797 727 set TAGREAD_CTX_MASK, %g5 ! 'or' in the trap context
798 728 and %g4, %g5, %g4 ! to complete the tlo_info
799 729 or %g4, %g3, %g3 ! field for logout
800 730 stxa %g0,[MMU_SFSR]%asi ! clear the SFSR
801 731 membar #Sync
802 732
803 733 /*
804 734 * at this point:
805 735 * %g2 - contains the VA whose lookup caused the trap
806 736 * %g3 - contains the tlo_info field
807 737 *
808 738 * Calculate the TLB index values for the failing VA. Since the T512
809 739 * TLBs can be configured for different page sizes, we need to find
810 740 * the index into each one separately.
811 741 */
812 742 mov %g2, %g4 ! First we get the DTLB_0 index
813 743 set PN_DTLB_PGSZ0_MASK, %g5
814 744 and %g3, %g5, %g5
815 745 srlx %g5, PN_DTLB_PGSZ0_SHIFT, %g5
816 746 PN_GET_TLB_INDEX(%g4, %g5) ! %g4 has the DTLB_0 index
817 747 sllx %g4, PN_TLB_ACC_IDX_SHIFT, %g4 ! shift the index into place
818 748 set PN_DTLB_T512_0, %g5
819 749 or %g4, %g5, %g4 ! and add in the TLB ID
820 750
821 751 mov %g2, %g7 ! Next we get the DTLB_1 index
822 752 set PN_DTLB_PGSZ1_MASK, %g5
823 753 and %g3, %g5, %g5
824 754 srlx %g5, PN_DTLB_PGSZ1_SHIFT, %g5
825 755 PN_GET_TLB_INDEX(%g7, %g5) ! %g7 has the DTLB_1 index
826 756 sllx %g7, PN_TLB_ACC_IDX_SHIFT, %g7 ! shift the index into place
827 757 set PN_DTLB_T512_1, %g5
828 758 or %g7, %g5, %g7 ! and add in the TLB ID
829 759
830 760 /*
831 761 * at this point:
832 762 * %g2 - contains the VA whose lookup caused the trap
833 763 * %g3 - contains the tlo_info field
834 764 * %g4 - contains the T512_0 access index value for the
835 765 * VA/PgSz in question
836 766 * %g7 - contains the T512_1 access index value for the
837 767 * VA/PgSz in question
838 768 *
839 769 * If this trap happened at TL>0, then we don't want to mess
840 770 * with the normal logout struct since that could caused a TLB
841 771 * miss.
842 772 */
843 773 rdpr %tl, %g6 ! read current trap level
844 774 cmp %g6, 1 ! skip over the tl>1 code
845 775 ble dtlb_parity_trap_1 ! if TL <= 1.
846 776 nop
847 777
848 778 /*
849 779 * If we are here, then the trap happened at TL>1. Simply
850 780 * update our tlo_info field and then skip to the TLB flush
851 781 * code.
852 782 */
853 783 mov 1, %g6
854 784 sllx %g6, PN_TLO_INFO_TL1_SHIFT, %g6
855 785 or %g6, %g3, %g3
856 786 ba dtlb_parity_trap_2
857 787 nop
858 788
859 789 dtlb_parity_trap_1:
860 790 /*
861 791 * at this point:
862 792 * %g2 - contains the VA whose lookup caused the trap
863 793 * %g3 - contains the tlo_info field
864 794 * %g4 - contains the T512_0 access index value for the
865 795 * VA/PgSz in question
866 796 * %g7 - contains the T512_1 access index value for the
867 797 * VA/PgSz in question
868 798 *
869 799 * Check to see if the logout structure is available.
870 800 */
871 801 set CHPR_TLB_LOGOUT, %g6
872 802 GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
873 803 set LOGOUT_INVALID_U32, %g6
874 804 sllx %g6, 32, %g6 ! if our logout structure is
875 805 set LOGOUT_INVALID_L32, %g5 ! unavailable or if it is
876 806 or %g5, %g6, %g5 ! already being used, then we
877 807 ldx [%g1 + PN_TLO_ADDR], %g6 ! don't collect any diagnostic
878 808 cmp %g6, %g5 ! information before clearing
879 809 bne dtlb_parity_trap_2 ! and logging the error.
880 810 nop
881 811
882 812 /*
883 813 * Record the logout information. %g4 contains our DTLB_0
884 814 * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
885 815 * both of which will be used for ASI_DTLB_ACCESS and
886 816 * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
887 817 * struct.
888 818 */
889 819 stx %g3, [%g1 + PN_TLO_INFO]
890 820 stx %g2, [%g1 + PN_TLO_ADDR]
891 821 rdpr %tpc, %g5
892 822 stx %g5, [%g1 + PN_TLO_PC]
893 823
894 824 add %g1, PN_TLO_DTLB_TTE, %g1 ! move up the pointer
895 825
896 826 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0
897 827 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 0 and store it away
898 828 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0
899 829 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 0 and store it away
900 830
901 831 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 0
902 832 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
903 833 ldxa [%g7]ASI_DTLB_TAGREAD, %g5
904 834 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
905 835
906 836 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
907 837 or %g4, %g6, %g4 ! of each TLB.
908 838 or %g7, %g6, %g7
909 839 add %g1, CH_TLO_TTE_SIZE, %g1 ! move up the pointer
910 840
911 841 ldxa [%g4]ASI_DTLB_ACCESS, %g5 ! read the data from DTLB_0
912 842 stx %g5, [%g1 + CH_TLO_TTE_DATA] ! way 1 and store it away
913 843 ldxa [%g4]ASI_DTLB_TAGREAD, %g5 ! read the tag from DTLB_0
914 844 stx %g5, [%g1 + CH_TLO_TTE_TAG] ! way 1 and store it away
915 845
916 846 ldxa [%g7]ASI_DTLB_ACCESS, %g5 ! now repeat for DTLB_1 way 1
917 847 stx %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
918 848 ldxa [%g7]ASI_DTLB_TAGREAD, %g5
919 849 stx %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
920 850
921 851 andn %g4, %g6, %g4 ! back to way 0
922 852 andn %g7, %g6, %g7 ! back to way 0
923 853
924 854 dtlb_parity_trap_2:
925 855 /*
926 856 * at this point:
927 857 * %g2 - contains the VA whose lookup caused the trap
928 858 * %g3 - contains the tlo_info field
929 859 * %g4 - contains the T512_0 access index value for the
930 860 * VA/PgSz in question
931 861 * %g7 - contains the T512_1 access index value for the
932 862 * VA/PgSz in question
933 863 *
934 864 * Here we will clear the errors from the DTLB.
935 865 */
936 866 set MMU_TAG_ACCESS, %g5 ! We write a TTE tag value of
937 867 stxa %g0, [%g5]ASI_DMMU ! 0 as it will be invalid.
938 868 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write the data and tag.
939 869 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0
940 870 membar #Sync
941 871
942 872 set PN_TLB_ACC_WAY_BIT, %g6 ! same thing again for way 1
943 873 or %g4, %g6, %g4
944 874 or %g7, %g6, %g7
945 875
946 876 stxa %g0, [%g4]ASI_DTLB_ACCESS ! Write same data and tag.
947 877 stxa %g0, [%g7]ASI_DTLB_ACCESS ! Now repeat for DTLB_1 way 0
948 878 membar #Sync
949 879
950 880 sethi %hi(FLUSH_ADDR), %g6 ! PRM says we need to issue a
951 881 flush %g6 ! flush after writing MMU regs
952 882
953 883 /*
954 884 * at this point:
955 885 * %g2 - contains the VA whose lookup caused the trap
956 886 * %g3 - contains the tlo_info field
957 887 *
958 888 * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
959 889 * already at PIL 15. We do this even for TL>1 traps since
↓ open down ↓ |
164 lines elided |
↑ open up ↑ |
960 890 * those will lead to a system panic.
961 891 */
962 892 set cpu_tlb_parity_error, %g1
963 893 rdpr %pil, %g4
964 894 cmp %g4, PIL_14
965 895 movl %icc, PIL_14, %g4
966 896 ba sys_trap
967 897 nop
968 898 SET_SIZE(dtlb_parity_trap)
969 899
970 -#endif /* lint */
971 900
972 -
973 -#if defined(lint)
974 901 /*
975 902 * Calculates the Panther TLB index based on a virtual address and page size
976 903 *
977 904 * Register usage:
978 905 * %o0 - virtual address whose index we want
979 906 * %o1 - Page Size of the TLB in question as encoded in the
980 907 * ASI_[D|I]MMU_TAG_ACCESS_EXT register.
981 908 */
982 -uint64_t
983 -pn_get_tlb_index(uint64_t va, uint64_t pg_sz)
984 -{
985 - return ((va + pg_sz)-(va + pg_sz));
986 -}
987 -#else /* lint */
988 909 ENTRY(pn_get_tlb_index)
989 910
990 911 PN_GET_TLB_INDEX(%o0, %o1)
991 912
992 913 retl
993 914 nop
994 915 SET_SIZE(pn_get_tlb_index)
995 -#endif /* lint */
996 916
997 917
998 -#if defined(lint)
999 918 /*
1000 919 * For Panther CPUs we need to flush the IPB after any I$ or D$
1001 920 * parity errors are detected.
1002 921 */
1003 -void
1004 -flush_ipb(void)
1005 -{ return; }
1006 -
1007 -#else /* lint */
1008 -
1009 922 ENTRY(flush_ipb)
1010 923 clr %o0
1011 924
1012 925 flush_ipb_1:
1013 926 stxa %g0, [%o0]ASI_IPB_TAG
1014 927 membar #Sync
1015 928 cmp %o0, PN_IPB_TAG_ADDR_MAX
1016 929 blt flush_ipb_1
1017 930 add %o0, PN_IPB_TAG_ADDR_LINESIZE, %o0
1018 931
1019 932 sethi %hi(FLUSH_ADDR), %o0
1020 933 flush %o0
1021 934 retl
1022 935 nop
1023 936 SET_SIZE(flush_ipb)
1024 937
1025 -#endif /* lint */
1026 938
1027 -
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX