Print this page
restore sparc comments
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/cpu/spitfire_asm.s
+++ new/usr/src/uts/sun4u/cpu/spitfire_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 -#pragma ident "%Z%%M% %I% %E% SMI"
27 -
28 -#if !defined(lint)
29 26 #include "assym.h"
30 -#endif /* lint */
31 27
32 28 #include <sys/asm_linkage.h>
33 29 #include <sys/mmu.h>
34 30 #include <vm/hat_sfmmu.h>
35 31 #include <sys/machparam.h>
36 32 #include <sys/machcpuvar.h>
37 33 #include <sys/machthread.h>
38 34 #include <sys/privregs.h>
39 35 #include <sys/asm_linkage.h>
40 36 #include <sys/machasi.h>
41 37 #include <sys/trap.h>
42 38 #include <sys/spitregs.h>
43 39 #include <sys/xc_impl.h>
44 40 #include <sys/intreg.h>
45 41 #include <sys/async.h>
46 42
47 43 #ifdef TRAPTRACE
48 44 #include <sys/traptrace.h>
49 45 #endif /* TRAPTRACE */
50 46
51 -#ifndef lint
52 -
53 47 /* BEGIN CSTYLED */
54 48 #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \
55 49 ldxa [%g0]ASI_LSU, tmp1 ;\
56 50 btst LSU_DC, tmp1 /* is dcache enabled? */ ;\
57 51 bz,pn %icc, 1f ;\
58 52 sethi %hi(dcache_linesize), tmp1 ;\
59 53 ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\
60 54 sethi %hi(dflush_type), tmp2 ;\
61 55 ld [tmp2 + %lo(dflush_type)], tmp2 ;\
62 56 cmp tmp2, FLUSHPAGE_TYPE ;\
63 57 be,pt %icc, 2f ;\
64 58 sllx arg1, SF_DC_VBIT_SHIFT, arg1 /* tag to compare */ ;\
65 59 sethi %hi(dcache_size), tmp3 ;\
66 60 ld [tmp3 + %lo(dcache_size)], tmp3 ;\
67 61 cmp tmp2, FLUSHMATCH_TYPE ;\
68 62 be,pt %icc, 3f ;\
69 63 nop ;\
70 64 /* \
71 65 * flushtype = FLUSHALL_TYPE, flush the whole thing \
72 66 * tmp3 = cache size \
73 67 * tmp1 = cache line size \
74 68 */ \
75 69 sub tmp3, tmp1, tmp2 ;\
76 70 4: \
77 71 stxa %g0, [tmp2]ASI_DC_TAG ;\
78 72 membar #Sync ;\
79 73 cmp %g0, tmp2 ;\
80 74 bne,pt %icc, 4b ;\
81 75 sub tmp2, tmp1, tmp2 ;\
82 76 ba,pt %icc, 1f ;\
83 77 nop ;\
84 78 /* \
85 79 * flushtype = FLUSHPAGE_TYPE \
86 80 * arg1 = tag to compare against \
87 81 * arg2 = virtual color \
88 82 * tmp1 = cache line size \
89 83 * tmp2 = tag from cache \
90 84 * tmp3 = counter \
91 85 */ \
92 86 2: \
93 87 set MMU_PAGESIZE, tmp3 ;\
94 88 sllx arg2, MMU_PAGESHIFT, arg2 /* color to dcache page */ ;\
95 89 sub tmp3, tmp1, tmp3 ;\
96 90 4: \
97 91 ldxa [arg2 + tmp3]ASI_DC_TAG, tmp2 /* read tag */ ;\
98 92 btst SF_DC_VBIT_MASK, tmp2 ;\
99 93 bz,pn %icc, 5f /* branch if no valid sub-blocks */ ;\
100 94 andn tmp2, SF_DC_VBIT_MASK, tmp2 /* clear out v bits */ ;\
101 95 cmp tmp2, arg1 ;\
102 96 bne,pn %icc, 5f /* br if tag miss */ ;\
103 97 nop ;\
104 98 stxa %g0, [arg2 + tmp3]ASI_DC_TAG ;\
105 99 membar #Sync ;\
106 100 5: \
107 101 cmp %g0, tmp3 ;\
108 102 bnz,pt %icc, 4b /* branch if not done */ ;\
109 103 sub tmp3, tmp1, tmp3 ;\
110 104 ba,pt %icc, 1f ;\
111 105 nop ;\
112 106 /* \
113 107 * flushtype = FLUSHMATCH_TYPE \
114 108 * arg1 = tag to compare against \
115 109 * tmp1 = cache line size \
116 110 * tmp3 = cache size \
117 111 * arg2 = counter \
118 112 * tmp2 = cache tag \
119 113 */ \
120 114 3: \
121 115 sub tmp3, tmp1, arg2 ;\
122 116 4: \
123 117 ldxa [arg2]ASI_DC_TAG, tmp2 /* read tag */ ;\
124 118 btst SF_DC_VBIT_MASK, tmp2 ;\
125 119 bz,pn %icc, 5f /* br if no valid sub-blocks */ ;\
126 120 andn tmp2, SF_DC_VBIT_MASK, tmp2 /* clear out v bits */ ;\
127 121 cmp tmp2, arg1 ;\
128 122 bne,pn %icc, 5f /* branch if tag miss */ ;\
129 123 nop ;\
130 124 stxa %g0, [arg2]ASI_DC_TAG ;\
131 125 membar #Sync ;\
132 126 5: \
133 127 cmp %g0, arg2 ;\
134 128 bne,pt %icc, 4b /* branch if not done */ ;\
135 129 sub arg2, tmp1, arg2 ;\
136 130 1:
137 131
138 132 /*
139 133 * macro that flushes the entire dcache color
140 134 */
141 135 #define DCACHE_FLUSHCOLOR(arg, tmp1, tmp2) \
142 136 ldxa [%g0]ASI_LSU, tmp1; \
143 137 btst LSU_DC, tmp1; /* is dcache enabled? */ \
144 138 bz,pn %icc, 1f; \
145 139 sethi %hi(dcache_linesize), tmp1; \
146 140 ld [tmp1 + %lo(dcache_linesize)], tmp1; \
147 141 set MMU_PAGESIZE, tmp2; \
148 142 /* \
149 143 * arg = virtual color \
150 144 * tmp2 = page size \
151 145 * tmp1 = cache line size \
152 146 */ \
153 147 sllx arg, MMU_PAGESHIFT, arg; /* color to dcache page */ \
154 148 sub tmp2, tmp1, tmp2; \
155 149 2: \
156 150 stxa %g0, [arg + tmp2]ASI_DC_TAG; \
157 151 membar #Sync; \
158 152 cmp %g0, tmp2; \
159 153 bne,pt %icc, 2b; \
160 154 sub tmp2, tmp1, tmp2; \
161 155 1:
162 156
163 157 /*
164 158 * macro that flushes the entire dcache
165 159 */
166 160 #define DCACHE_FLUSHALL(size, linesize, tmp) \
167 161 ldxa [%g0]ASI_LSU, tmp; \
168 162 btst LSU_DC, tmp; /* is dcache enabled? */ \
169 163 bz,pn %icc, 1f; \
170 164 \
171 165 sub size, linesize, tmp; \
172 166 2: \
173 167 stxa %g0, [tmp]ASI_DC_TAG; \
174 168 membar #Sync; \
175 169 cmp %g0, tmp; \
176 170 bne,pt %icc, 2b; \
177 171 sub tmp, linesize, tmp; \
178 172 1:
179 173
180 174 /*
181 175 * macro that flushes the entire icache
182 176 */
183 177 #define ICACHE_FLUSHALL(size, linesize, tmp) \
184 178 ldxa [%g0]ASI_LSU, tmp; \
185 179 btst LSU_IC, tmp; \
186 180 bz,pn %icc, 1f; \
187 181 \
188 182 sub size, linesize, tmp; \
189 183 2: \
190 184 stxa %g0, [tmp]ASI_IC_TAG; \
191 185 membar #Sync; \
192 186 cmp %g0, tmp; \
193 187 bne,pt %icc, 2b; \
194 188 sub tmp, linesize, tmp; \
195 189 1:
196 190
197 191 #ifdef SF_ERRATA_32
198 192 #define SF_WORKAROUND(tmp1, tmp2) \
199 193 sethi %hi(FLUSH_ADDR), tmp2 ;\
200 194 set MMU_PCONTEXT, tmp1 ;\
201 195 stxa %g0, [tmp1]ASI_DMMU ;\
202 196 flush tmp2 ;
203 197 #else
204 198 #define SF_WORKAROUND(tmp1, tmp2)
205 199 #endif /* SF_ERRATA_32 */
206 200
207 201 /*
208 202 * arg1 = vaddr
209 203 * arg2 = ctxnum
210 204 * - disable interrupts and clear address mask
211 205 * to access 64 bit physaddr
212 206 * - Blow out the TLB, flush user page.
213 207 * . use secondary context.
214 208 */
215 209 #define VTAG_FLUSHUPAGE(lbl, arg1, arg2, tmp1, tmp2, tmp3, tmp4) \
216 210 rdpr %pstate, tmp1 ;\
217 211 andn tmp1, PSTATE_IE, tmp2 ;\
218 212 wrpr tmp2, 0, %pstate ;\
219 213 sethi %hi(FLUSH_ADDR), tmp2 ;\
220 214 set MMU_SCONTEXT, tmp3 ;\
221 215 ldxa [tmp3]ASI_DMMU, tmp4 ;\
222 216 or DEMAP_SECOND | DEMAP_PAGE_TYPE, arg1, arg1 ;\
223 217 cmp tmp4, arg2 ;\
224 218 be,a,pt %icc, lbl/**/4 ;\
225 219 nop ;\
226 220 stxa arg2, [tmp3]ASI_DMMU ;\
227 221 lbl/**/4: ;\
228 222 stxa %g0, [arg1]ASI_DTLB_DEMAP ;\
229 223 stxa %g0, [arg1]ASI_ITLB_DEMAP ;\
230 224 flush tmp2 ;\
231 225 be,a,pt %icc, lbl/**/5 ;\
232 226 nop ;\
233 227 stxa tmp4, [tmp3]ASI_DMMU ;\
234 228 flush tmp2 ;\
235 229 lbl/**/5: ;\
236 230 wrpr %g0, tmp1, %pstate
237 231
238 232
239 233 /*
240 234 * macro that flushes all the user entries in dtlb
241 235 * arg1 = dtlb entries
242 236 * - Before first compare:
243 237 * tmp4 = tte
244 238 * tmp5 = vaddr
245 239 * tmp6 = cntxnum
246 240 */
247 241 #define DTLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
248 242 tmp4, tmp5, tmp6) \
249 243 lbl/**/0: ;\
250 244 sllx arg1, 3, tmp3 ;\
251 245 SF_WORKAROUND(tmp1, tmp2) ;\
252 246 ldxa [tmp3]ASI_DTLB_ACCESS, tmp4 ;\
253 247 srlx tmp4, 6, tmp4 ;\
254 248 andcc tmp4, 1, %g0 ;\
255 249 bnz,pn %xcc, lbl/**/1 ;\
256 250 srlx tmp4, 57, tmp4 ;\
257 251 andcc tmp4, 1, %g0 ;\
258 252 beq,pn %xcc, lbl/**/1 ;\
259 253 nop ;\
260 254 set TAGREAD_CTX_MASK, tmp1 ;\
261 255 ldxa [tmp3]ASI_DTLB_TAGREAD, tmp2 ;\
262 256 and tmp2, tmp1, tmp6 ;\
263 257 andn tmp2, tmp1, tmp5 ;\
264 258 set KCONTEXT, tmp4 ;\
265 259 cmp tmp6, tmp4 ;\
266 260 be lbl/**/1 ;\
267 261 nop ;\
268 262 VTAG_FLUSHUPAGE(VD/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
269 263 lbl/**/1: ;\
270 264 brgz,pt arg1, lbl/**/0 ;\
271 265 sub arg1, 1, arg1
272 266
273 267
274 268 /*
275 269 * macro that flushes all the user entries in itlb
276 270 * arg1 = itlb entries
277 271 * - Before first compare:
278 272 * tmp4 = tte
279 273 * tmp5 = vaddr
280 274 * tmp6 = cntxnum
281 275 */
282 276 #define ITLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
283 277 tmp4, tmp5, tmp6) \
284 278 lbl/**/0: ;\
285 279 sllx arg1, 3, tmp3 ;\
286 280 SF_WORKAROUND(tmp1, tmp2) ;\
287 281 ldxa [tmp3]ASI_ITLB_ACCESS, tmp4 ;\
288 282 srlx tmp4, 6, tmp4 ;\
289 283 andcc tmp4, 1, %g0 ;\
290 284 bnz,pn %xcc, lbl/**/1 ;\
291 285 srlx tmp4, 57, tmp4 ;\
292 286 andcc tmp4, 1, %g0 ;\
293 287 beq,pn %xcc, lbl/**/1 ;\
294 288 nop ;\
295 289 set TAGREAD_CTX_MASK, tmp1 ;\
296 290 ldxa [tmp3]ASI_ITLB_TAGREAD, tmp2 ;\
297 291 and tmp2, tmp1, tmp6 ;\
298 292 andn tmp2, tmp1, tmp5 ;\
299 293 set KCONTEXT, tmp4 ;\
300 294 cmp tmp6, tmp4 ;\
301 295 be lbl/**/1 ;\
302 296 nop ;\
303 297 VTAG_FLUSHUPAGE(VI/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
304 298 lbl/**/1: ;\
305 299 brgz,pt arg1, lbl/**/0 ;\
306 300 sub arg1, 1, arg1
307 301
308 302
309 303
310 304 /*
311 305 * Macro for getting to offset from 'cpu_private' ptr. The 'cpu_private'
312 306 * ptr is in the machcpu structure.
313 307 * r_or_s: Register or symbol off offset from 'cpu_private' ptr.
314 308 * scr1: Scratch, ptr is returned in this register.
315 309 * scr2: Scratch
316 310 */
317 311 #define GET_CPU_PRIVATE_PTR(r_or_s, scr1, scr2, label) \
318 312 CPU_ADDR(scr1, scr2); \
319 313 ldn [scr1 + CPU_PRIVATE], scr1; \
320 314 cmp scr1, 0; \
321 315 be label; \
322 316 nop; \
323 317 add scr1, r_or_s, scr1; \
324 318
325 319 #ifdef HUMMINGBIRD
326 320 /*
327 321 * UltraSPARC-IIe processor supports both 4-way set associative and
328 322 * direct map E$. For performance reasons, we flush E$ by placing it
329 323 * in direct map mode for data load/store and restore the state after
330 324 * we are done flushing it. Keep interrupts off while flushing in this
331 325 * manner.
332 326 *
333 327 * We flush the entire ecache by starting at one end and loading each
334 328 * successive ecache line for the 2*ecache-size range. We have to repeat
335 329 * the flush operation to guarantee that the entire ecache has been
336 330 * flushed.
337 331 *
338 332 * For flushing a specific physical address, we start at the aliased
339 333 * address and load at set-size stride, wrapping around at 2*ecache-size
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
340 334 * boundary and skipping the physical address being flushed. It takes
341 335 * 10 loads to guarantee that the physical address has been flushed.
342 336 */
343 337
344 338 #define HB_ECACHE_FLUSH_CNT 2
345 339 #define HB_PHYS_FLUSH_CNT 10 /* #loads to flush specific paddr */
346 340 #endif /* HUMMINGBIRD */
347 341
348 342 /* END CSTYLED */
349 343
350 -#endif /* !lint */
351 -
352 344 /*
353 345 * Spitfire MMU and Cache operations.
354 346 */
355 347
356 -#if defined(lint)
357 -
358 -/*ARGSUSED*/
359 -void
360 -vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
361 -{}
362 -
363 -/*ARGSUSED*/
364 -void
365 -vtag_flushall(void)
366 -{}
367 -
368 -/*ARGSUSED*/
369 -void
370 -vtag_flushall_uctxs(void)
371 -{}
372 -
373 -/*ARGSUSED*/
374 -void
375 -vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
376 -{}
377 -
378 -/*ARGSUSED*/
379 -void
380 -vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
381 -{}
382 -
383 -/*ARGSUSED*/
384 -void
385 -vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
386 -{}
387 -
388 -/*ARGSUSED*/
389 -void
390 -vac_flushpage(pfn_t pfnum, int vcolor)
391 -{}
392 -
393 -/*ARGSUSED*/
394 -void
395 -vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
396 -{}
397 -
398 -/*ARGSUSED*/
399 -void
400 -init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
401 -{}
402 -
403 -/*ARGSUSED*/
404 -void
405 -init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
406 -{}
407 -
408 -/*ARGSUSED*/
409 -void
410 -flush_instr_mem(caddr_t vaddr, size_t len)
411 -{}
412 -
413 -/*ARGSUSED*/
414 -void
415 -flush_ecache(uint64_t physaddr, size_t size, size_t linesize)
416 -{}
417 -
418 -/*ARGSUSED*/
419 -void
420 -get_ecache_dtag(uint32_t ecache_idx, uint64_t *ecache_data,
421 - uint64_t *ecache_tag, uint64_t *oafsr, uint64_t *acc_afsr)
422 -{}
423 -
424 -/* ARGSUSED */
425 -uint64_t
426 -get_ecache_tag(uint32_t id, uint64_t *nafsr, uint64_t *acc_afsr)
427 -{
428 - return ((uint64_t)0);
429 -}
430 -
431 -/* ARGSUSED */
432 -uint64_t
433 -check_ecache_line(uint32_t id, uint64_t *acc_afsr)
434 -{
435 - return ((uint64_t)0);
436 -}
437 -
438 -/*ARGSUSED*/
439 -void
440 -kdi_flush_idcache(int dcache_size, int dcache_lsize,
441 - int icache_size, int icache_lsize)
442 -{}
443 -
444 -#else /* lint */
445 -
446 348 ENTRY_NP(vtag_flushpage)
447 349 /*
448 350 * flush page from the tlb
449 351 *
450 352 * %o0 = vaddr
451 353 * %o1 = sfmmup
452 354 */
453 355 rdpr %pstate, %o5
454 356 #ifdef DEBUG
455 357 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
456 358 #endif /* DEBUG */
457 359 /*
458 360 * disable ints
459 361 */
460 362 andn %o5, PSTATE_IE, %o4
461 363 wrpr %o4, 0, %pstate
462 364
463 365 /*
464 366 * Then, blow out the tlb
465 367 * Interrupts are disabled to prevent the secondary ctx register
466 368 * from changing underneath us.
467 369 */
468 370 sethi %hi(ksfmmup), %o3
469 371 ldx [%o3 + %lo(ksfmmup)], %o3
470 372 cmp %o3, %o1
471 373 bne,pt %xcc, 1f ! if not kernel as, go to 1
472 374 sethi %hi(FLUSH_ADDR), %o3
473 375 /*
474 376 * For KCONTEXT demaps use primary. type = page implicitly
475 377 */
476 378 stxa %g0, [%o0]ASI_DTLB_DEMAP /* dmmu flush for KCONTEXT */
477 379 stxa %g0, [%o0]ASI_ITLB_DEMAP /* immu flush for KCONTEXT */
478 380 flush %o3
479 381 b 5f
480 382 nop
481 383 1:
482 384 /*
483 385 * User demap. We need to set the secondary context properly.
484 386 * %o0 = vaddr
485 387 * %o1 = sfmmup
486 388 * %o3 = FLUSH_ADDR
487 389 */
488 390 SFMMU_CPU_CNUM(%o1, %g1, %g2) /* %g1 = sfmmu cnum on this CPU */
489 391
490 392 set MMU_SCONTEXT, %o4
491 393 ldxa [%o4]ASI_DMMU, %o2 /* rd old ctxnum */
492 394 or DEMAP_SECOND | DEMAP_PAGE_TYPE, %o0, %o0
493 395 cmp %o2, %g1
494 396 be,pt %icc, 4f
495 397 nop
496 398 stxa %g1, [%o4]ASI_DMMU /* wr new ctxum */
497 399 4:
498 400 stxa %g0, [%o0]ASI_DTLB_DEMAP
499 401 stxa %g0, [%o0]ASI_ITLB_DEMAP
500 402 flush %o3
501 403 be,pt %icc, 5f
502 404 nop
503 405 stxa %o2, [%o4]ASI_DMMU /* restore old ctxnum */
504 406 flush %o3
505 407 5:
506 408 retl
507 409 wrpr %g0, %o5, %pstate /* enable interrupts */
508 410 SET_SIZE(vtag_flushpage)
509 411
510 412 .seg ".text"
511 413 .flushallmsg:
512 414 .asciz "sfmmu_asm: unimplemented flush operation"
513 415
514 416 ENTRY_NP(vtag_flushall)
515 417 sethi %hi(.flushallmsg), %o0
516 418 call panic
517 419 or %o0, %lo(.flushallmsg), %o0
518 420 SET_SIZE(vtag_flushall)
519 421
520 422 ENTRY_NP(vtag_flushall_uctxs)
521 423 /*
522 424 * flush entire DTLB/ITLB.
523 425 */
524 426 CPU_INDEX(%g1, %g2)
525 427 mulx %g1, CPU_NODE_SIZE, %g1
526 428 set cpunodes, %g2
527 429 add %g1, %g2, %g1
528 430 lduh [%g1 + ITLB_SIZE], %g2 ! %g2 = # entries in ITLB
529 431 lduh [%g1 + DTLB_SIZE], %g1 ! %g1 = # entries in DTLB
530 432 sub %g2, 1, %g2 ! %g2 = # entries in ITLB - 1
531 433 sub %g1, 1, %g1 ! %g1 = # entries in DTLB - 1
532 434
533 435 !
534 436 ! Flush itlb's
535 437 !
536 438 ITLB_FLUSH_UNLOCKED_UCTXS(I, %g2, %g3, %g4, %o2, %o3, %o4, %o5)
537 439
538 440 !
539 441 ! Flush dtlb's
540 442 !
541 443 DTLB_FLUSH_UNLOCKED_UCTXS(D, %g1, %g3, %g4, %o2, %o3, %o4, %o5)
542 444
543 445 membar #Sync
544 446 retl
545 447 nop
546 448
547 449 SET_SIZE(vtag_flushall_uctxs)
548 450
549 451 ENTRY_NP(vtag_flushpage_tl1)
550 452 /*
551 453 * x-trap to flush page from tlb and tsb
552 454 *
553 455 * %g1 = vaddr, zero-extended on 32-bit kernel
554 456 * %g2 = sfmmup
555 457 *
556 458 * assumes TSBE_TAG = 0
557 459 */
558 460 srln %g1, MMU_PAGESHIFT, %g1
559 461 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
560 462
561 463 SFMMU_CPU_CNUM(%g2, %g3, %g4) /* %g3 = sfmmu cnum on this CPU */
562 464
563 465 /* We need to set the secondary context properly. */
564 466 set MMU_SCONTEXT, %g4
565 467 ldxa [%g4]ASI_DMMU, %g5 /* rd old ctxnum */
566 468 or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
567 469 stxa %g3, [%g4]ASI_DMMU /* wr new ctxum */
568 470 stxa %g0, [%g1]ASI_DTLB_DEMAP
569 471 stxa %g0, [%g1]ASI_ITLB_DEMAP
570 472 stxa %g5, [%g4]ASI_DMMU /* restore old ctxnum */
571 473 membar #Sync
572 474 retry
573 475 SET_SIZE(vtag_flushpage_tl1)
574 476
575 477 ENTRY_NP(vtag_flush_pgcnt_tl1)
576 478 /*
577 479 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
578 480 *
579 481 * %g1 = vaddr, zero-extended on 32-bit kernel
580 482 * %g2 = <sfmmup58 | pgcnt6>
581 483 *
582 484 * NOTE: this handler relies on the fact that no
583 485 * interrupts or traps can occur during the loop
584 486 * issuing the TLB_DEMAP operations. It is assumed
585 487 * that interrupts are disabled and this code is
586 488 * fetching from the kernel locked text address.
587 489 *
588 490 * assumes TSBE_TAG = 0
589 491 */
590 492 srln %g1, MMU_PAGESHIFT, %g1
591 493 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
592 494 or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
593 495
594 496 set SFMMU_PGCNT_MASK, %g4
595 497 and %g4, %g2, %g3 /* g3 = pgcnt - 1 */
596 498 add %g3, 1, %g3 /* g3 = pgcnt */
597 499
598 500 andn %g2, SFMMU_PGCNT_MASK, %g2 /* g2 = sfmmup */
599 501
600 502 SFMMU_CPU_CNUM(%g2, %g5, %g6) ! %g5 = sfmmu cnum on this CPU
601 503
602 504 /* We need to set the secondary context properly. */
603 505 set MMU_SCONTEXT, %g4
604 506 ldxa [%g4]ASI_DMMU, %g6 /* read old ctxnum */
605 507 stxa %g5, [%g4]ASI_DMMU /* write new ctxum */
606 508
607 509 set MMU_PAGESIZE, %g2 /* g2 = pgsize */
608 510 sethi %hi(FLUSH_ADDR), %g5
609 511 1:
610 512 stxa %g0, [%g1]ASI_DTLB_DEMAP
611 513 stxa %g0, [%g1]ASI_ITLB_DEMAP
612 514 flush %g5
613 515 deccc %g3 /* decr pgcnt */
614 516 bnz,pt %icc,1b
615 517 add %g1, %g2, %g1 /* go to nextpage */
616 518
617 519 stxa %g6, [%g4]ASI_DMMU /* restore old ctxnum */
618 520 membar #Sync
619 521 retry
620 522 SET_SIZE(vtag_flush_pgcnt_tl1)
621 523
622 524 ! Not implemented on US1/US2
623 525 ENTRY_NP(vtag_flushall_tl1)
624 526 retry
625 527 SET_SIZE(vtag_flushall_tl1)
626 528
627 529 /*
628 530 * vac_flushpage(pfnum, color)
629 531 * Flush 1 8k page of the D-$ with physical page = pfnum
630 532 * Algorithm:
631 533 * The spitfire dcache is a 16k direct mapped virtual indexed,
632 534 * physically tagged cache. Given the pfnum we read all cache
633 535 * lines for the corresponding page in the cache (determined by
634 536 * the color). Each cache line is compared with
635 537 * the tag created from the pfnum. If the tags match we flush
636 538 * the line.
637 539 */
638 540 .seg ".data"
639 541 .align 8
640 542 .global dflush_type
641 543 dflush_type:
642 544 .word FLUSHPAGE_TYPE
643 545 .seg ".text"
644 546
645 547 ENTRY(vac_flushpage)
646 548 /*
647 549 * flush page from the d$
648 550 *
649 551 * %o0 = pfnum, %o1 = color
650 552 */
651 553 DCACHE_FLUSHPAGE(%o0, %o1, %o2, %o3, %o4)
652 554 retl
653 555 nop
654 556 SET_SIZE(vac_flushpage)
655 557
656 558 ENTRY_NP(vac_flushpage_tl1)
657 559 /*
658 560 * x-trap to flush page from the d$
659 561 *
660 562 * %g1 = pfnum, %g2 = color
661 563 */
662 564 DCACHE_FLUSHPAGE(%g1, %g2, %g3, %g4, %g5)
663 565 retry
664 566 SET_SIZE(vac_flushpage_tl1)
665 567
666 568 ENTRY(vac_flushcolor)
667 569 /*
668 570 * %o0 = vcolor
669 571 */
670 572 DCACHE_FLUSHCOLOR(%o0, %o1, %o2)
671 573 retl
672 574 nop
673 575 SET_SIZE(vac_flushcolor)
674 576
675 577 ENTRY(vac_flushcolor_tl1)
676 578 /*
677 579 * %g1 = vcolor
678 580 */
679 581 DCACHE_FLUSHCOLOR(%g1, %g2, %g3)
680 582 retry
681 583 SET_SIZE(vac_flushcolor_tl1)
682 584
683 585
684 586 .global _dispatch_status_busy
685 587 _dispatch_status_busy:
686 588 .asciz "ASI_INTR_DISPATCH_STATUS error: busy"
687 589 .align 4
688 590
689 591 /*
690 592 * Determine whether or not the IDSR is busy.
691 593 * Entry: no arguments
692 594 * Returns: 1 if busy, 0 otherwise
693 595 */
694 596 ENTRY(idsr_busy)
695 597 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
696 598 clr %o0
697 599 btst IDSR_BUSY, %g1
698 600 bz,a,pt %xcc, 1f
699 601 mov 1, %o0
700 602 1:
701 603 retl
702 604 nop
703 605 SET_SIZE(idsr_busy)
704 606
705 607 /*
706 608 * Setup interrupt dispatch data registers
707 609 * Entry:
708 610 * %o0 - function or inumber to call
709 611 * %o1, %o2 - arguments (2 uint64_t's)
710 612 */
711 613 .seg "text"
712 614
713 615 ENTRY(init_mondo)
714 616 #ifdef DEBUG
715 617 !
716 618 ! IDSR should not be busy at the moment
717 619 !
718 620 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
719 621 btst IDSR_BUSY, %g1
720 622 bz,pt %xcc, 1f
721 623 nop
722 624
723 625 sethi %hi(_dispatch_status_busy), %o0
724 626 call panic
725 627 or %o0, %lo(_dispatch_status_busy), %o0
726 628 #endif /* DEBUG */
727 629
728 630 ALTENTRY(init_mondo_nocheck)
729 631 !
730 632 ! interrupt vector dispach data reg 0
731 633 !
732 634 1:
733 635 mov IDDR_0, %g1
734 636 mov IDDR_1, %g2
735 637 mov IDDR_2, %g3
736 638 stxa %o0, [%g1]ASI_INTR_DISPATCH
737 639
738 640 !
739 641 ! interrupt vector dispach data reg 1
740 642 !
741 643 stxa %o1, [%g2]ASI_INTR_DISPATCH
742 644
743 645 !
744 646 ! interrupt vector dispach data reg 2
745 647 !
746 648 stxa %o2, [%g3]ASI_INTR_DISPATCH
747 649
748 650 retl
749 651 membar #Sync ! allowed to be in the delay slot
750 652 SET_SIZE(init_mondo)
751 653
752 654 /*
753 655 * Ship mondo to upaid
754 656 */
755 657 ENTRY_NP(shipit)
756 658 sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<18:14> = upa id
757 659 or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70
758 660 stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch
759 661 #if defined(SF_ERRATA_54)
760 662 membar #Sync ! store must occur before load
761 663 mov 0x20, %g3 ! UDBH Control Register Read
762 664 ldxa [%g3]ASI_SDB_INTR_R, %g0
763 665 #endif
764 666 retl
765 667 membar #Sync
766 668 SET_SIZE(shipit)
767 669
768 670
769 671 /*
770 672 * flush_instr_mem:
771 673 * Flush a portion of the I-$ starting at vaddr
772 674 * %o0 vaddr
773 675 * %o1 bytes to be flushed
774 676 */
775 677
776 678 ENTRY(flush_instr_mem)
777 679 membar #StoreStore ! Ensure the stores
778 680 ! are globally visible
779 681 1:
780 682 flush %o0
781 683 subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20
782 684 bgu,pt %ncc, 1b
783 685 add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20
784 686
785 687 retl
786 688 nop
787 689 SET_SIZE(flush_instr_mem)
788 690
789 691 /*
790 692 * flush_ecache:
791 693 * Flush the entire e$ using displacement flush by reading through a
792 694 * physically contiguous area. We use mmu bypass asi (ASI_MEM) while
793 695 * reading this physical address range so that data doesn't go to d$.
794 696 * incoming arguments:
795 697 * %o0 - 64 bit physical address
796 698 * %o1 - size of address range to read
797 699 * %o2 - ecache linesize
798 700 */
799 701 ENTRY(flush_ecache)
800 702 #ifndef HUMMINGBIRD
801 703 b 2f
802 704 nop
803 705 1:
804 706 ldxa [%o0 + %o1]ASI_MEM, %g0 ! start reading from physaddr + size
805 707 2:
806 708 subcc %o1, %o2, %o1
807 709 bcc,a,pt %ncc, 1b
808 710 nop
809 711
810 712 #else /* HUMMINGBIRD */
811 713 /*
812 714 * UltraSPARC-IIe processor supports both 4-way set associative
813 715 * and direct map E$. For performance reasons, we flush E$ by
814 716 * placing it in direct map mode for data load/store and restore
815 717 * the state after we are done flushing it. It takes 2 iterations
816 718 * to guarantee that the entire ecache has been flushed.
817 719 *
818 720 * Keep the interrupts disabled while flushing E$ in this manner.
819 721 */
820 722 rdpr %pstate, %g4 ! current pstate (restored later)
821 723 andn %g4, PSTATE_IE, %g5
822 724 wrpr %g0, %g5, %pstate ! disable interrupts
823 725
824 726 ! Place E$ in direct map mode for data access
825 727 or %g0, 1, %g5
826 728 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
827 729 ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
828 730 or %g1, %g5, %g5
829 731 membar #Sync
830 732 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
831 733 membar #Sync
832 734
833 735 ! flush entire ecache HB_ECACHE_FLUSH_CNT times
834 736 mov HB_ECACHE_FLUSH_CNT-1, %g5
835 737 2:
836 738 sub %o1, %o2, %g3 ! start from last entry
837 739 1:
838 740 ldxa [%o0 + %g3]ASI_MEM, %g0 ! start reading from physaddr + size
839 741 subcc %g3, %o2, %g3
840 742 bgeu,a,pt %ncc, 1b
841 743 nop
842 744 brgz,a,pt %g5, 2b
843 745 dec %g5
844 746
845 747 membar #Sync
846 748 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config reg
847 749 membar #Sync
848 750 wrpr %g0, %g4, %pstate ! restore earlier pstate
849 751 #endif /* HUMMINGBIRD */
850 752
851 753 retl
852 754 nop
853 755 SET_SIZE(flush_ecache)
854 756
855 757 /*
856 758 * void kdi_flush_idcache(int dcache_size, int dcache_linesize,
857 759 * int icache_size, int icache_linesize)
858 760 */
859 761 ENTRY(kdi_flush_idcache)
860 762 DCACHE_FLUSHALL(%o0, %o1, %g1)
861 763 ICACHE_FLUSHALL(%o2, %o3, %g1)
862 764 membar #Sync
863 765 retl
864 766 nop
865 767 SET_SIZE(kdi_flush_idcache)
866 768
867 769
868 770 /*
869 771 * void get_ecache_dtag(uint32_t ecache_idx, uint64_t *data, uint64_t *tag,
870 772 * uint64_t *oafsr, uint64_t *acc_afsr)
871 773 *
872 774 * Get ecache data and tag. The ecache_idx argument is assumed to be aligned
873 775 * on a 64-byte boundary. The corresponding AFSR value is also read for each
874 776 * 8 byte ecache data obtained. The ecache data is assumed to be a pointer
875 777 * to an array of 16 uint64_t's (e$data & afsr value). The action to read the
876 778 * data and tag should be atomic to make sense. We will be executing at PIL15
877 779 * and will disable IE, so nothing can occur between the two reads. We also
878 780 * assume that the execution of this code does not interfere with what we are
879 781 * reading - not really possible, but we'll live with it for now.
880 782 * We also pass the old AFSR value before clearing it, and caller will take
881 783 * appropriate actions if the important bits are non-zero.
882 784 *
883 785 * If the caller wishes to track the AFSR in cases where the CP bit is
884 786 * set, an address should be passed in for acc_afsr. Otherwise, this
885 787 * argument may be null.
886 788 *
887 789 * Register Usage:
888 790 * i0: In: 32-bit e$ index
889 791 * i1: In: addr of e$ data
890 792 * i2: In: addr of e$ tag
891 793 * i3: In: addr of old afsr
892 794 * i4: In: addr of accumulated afsr - may be null
893 795 */
894 796 ENTRY(get_ecache_dtag)
895 797 save %sp, -SA(MINFRAME), %sp
896 798 or %g0, 1, %l4
897 799 sllx %l4, 39, %l4 ! set bit 39 for e$ data access
898 800 or %i0, %l4, %g6 ! %g6 = e$ addr for data read
899 801 sllx %l4, 1, %l4 ! set bit 40 for e$ tag access
900 802 or %i0, %l4, %l4 ! %l4 = e$ addr for tag read
901 803
902 804 rdpr %pstate, %i5
903 805 andn %i5, PSTATE_IE | PSTATE_AM, %i0
904 806 wrpr %i0, %g0, %pstate ! clear IE, AM bits
905 807
906 808 ldxa [%g0]ASI_ESTATE_ERR, %g1
907 809 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
908 810 membar #Sync
909 811
910 812 ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before tag read
911 813 stx %i0, [%i3] ! write back the old-afsr
912 814
913 815 ldxa [%l4]ASI_EC_R, %g0 ! read tag into E$ tag reg
914 816 ldxa [%g0]ASI_EC_DIAG, %i0 ! read tag from E$ tag reg
915 817 stx %i0, [%i2] ! write back tag result
916 818
917 819 clr %i2 ! loop count
918 820
919 821 brz %i4, 1f ! acc_afsr == NULL?
920 822 ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before clearing
921 823 srlx %i0, P_AFSR_CP_SHIFT, %l0
922 824 btst 1, %l0
923 825 bz 1f
924 826 nop
925 827 ldx [%i4], %g4
926 828 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
927 829 stx %g4, [%i4]
928 830 1:
929 831 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
930 832 membar #Sync
931 833 ldxa [%g6]ASI_EC_R, %i0 ! read the 8byte E$data
932 834 stx %i0, [%i1] ! save the E$data
933 835 add %g6, 8, %g6
934 836 add %i1, 8, %i1
935 837 ldxa [%g0]ASI_AFSR, %i0 ! read AFSR for this 16byte read
936 838 srlx %i0, P_AFSR_CP_SHIFT, %l0
937 839 btst 1, %l0
938 840 bz 2f
939 841 stx %i0, [%i1] ! save the AFSR
940 842
941 843 brz %i4, 2f ! acc_afsr == NULL?
942 844 nop
943 845 ldx [%i4], %g4
944 846 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
945 847 stx %g4, [%i4]
946 848 2:
947 849 add %i2, 8, %i2
948 850 cmp %i2, 64
↓ open down ↓ |
493 lines elided |
↑ open up ↑ |
949 851 bl,a 1b
950 852 add %i1, 8, %i1
951 853 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
952 854 membar #Sync
953 855 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
954 856 membar #Sync
955 857 wrpr %g0, %i5, %pstate
956 858 ret
957 859 restore
958 860 SET_SIZE(get_ecache_dtag)
959 -#endif /* lint */
960 861
961 -#if defined(lint)
962 862 /*
963 863 * The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0.
964 864 * Steps: 1. GET AFSR 2. Get AFAR <40:4> 3. Get datapath error status
965 865 * 4. Clear datapath error bit(s) 5. Clear AFSR error bit
966 866 * 6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap
967 867 * %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1
968 868 * %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
969 869 */
970 -void
971 -ce_err(void)
972 -{}
973 -
974 -void
975 -ce_err_tl1(void)
976 -{}
977 -
978 -
979 -/*
980 - * The async_err function handles trap types 0x0A (instruction_access_error)
981 - * and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here,
982 - * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
983 - *
984 - * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
985 - * 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
986 - * 6. package data in %g2 and %g3 7. disable all cpu errors, because
987 - * trap is likely to be fatal 8. call cpu_async_error vis sys_trap
988 - *
989 - * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
990 - * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
991 - */
992 -void
993 -async_err(void)
994 -{}
995 -
996 -/*
997 - * The clr_datapath function clears any error bits set in the UDB regs.
998 - */
999 -void
1000 -clr_datapath(void)
1001 -{}
1002 -
1003 -/*
1004 - * The get_udb_errors() function gets the current value of the
1005 - * Datapath Error Registers.
1006 - */
1007 -/*ARGSUSED*/
1008 -void
1009 -get_udb_errors(uint64_t *udbh, uint64_t *udbl)
1010 -{
1011 - *udbh = 0;
1012 - *udbl = 0;
1013 -}
1014 -
1015 -#else /* lint */
1016 -
1017 870 ENTRY_NP(ce_err)
1018 871 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
1019 872
1020 873 !
1021 874 ! Check for a UE... From Kevin.Normoyle:
1022 875 ! We try to switch to the trap for the UE, but since that's
1023 876 ! a hardware pipeline, we might get to the CE trap before we
1024 877 ! can switch. The UDB and AFSR registers will have both the
1025 878 ! UE and CE bits set but the UDB syndrome and the AFAR will be
1026 879 ! for the UE.
1027 880 !
1028 881 or %g0, 1, %g1 ! put 1 in g1
1029 882 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
1030 883 andcc %g1, %g3, %g0 ! check for UE in afsr
1031 884 bnz async_err ! handle the UE, not the CE
1032 885 or %g0, 0x63, %g5 ! pass along the CE ttype
1033 886 !
1034 887 ! Disable further CE traps to avoid recursion (stack overflow)
1035 888 ! and staying above XCALL_PIL for extended periods.
1036 889 !
1037 890 ldxa [%g0]ASI_ESTATE_ERR, %g2
1038 891 andn %g2, 0x1, %g2 ! clear bit 0 - CEEN
1039 892 stxa %g2, [%g0]ASI_ESTATE_ERR
1040 893 membar #Sync ! required
1041 894 !
1042 895 ! handle the CE
1043 896 ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
1044 897
1045 898 set P_DER_H, %g4 ! put P_DER_H in g4
1046 899 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
1047 900 or %g0, 1, %g6 ! put 1 in g6
1048 901 sllx %g6, 8, %g6 ! shift g6 to <8> sdb CE
1049 902 andcc %g5, %g6, %g1 ! check for CE in upper half
1050 903 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1051 904 or %g3, %g5, %g3 ! or with afsr bits
1052 905 bz,a 1f ! no error, goto 1f
1053 906 nop
1054 907 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
1055 908 membar #Sync ! membar sync required
1056 909 1:
1057 910 set P_DER_L, %g4 ! put P_DER_L in g4
1058 911 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g6
1059 912 andcc %g5, %g6, %g1 ! check for CE in lower half
1060 913 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
1061 914 or %g3, %g5, %g3 ! or with afsr bits
1062 915 bz,a 2f ! no error, goto 2f
1063 916 nop
1064 917 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
1065 918 membar #Sync ! membar sync required
1066 919 2:
1067 920 or %g0, 1, %g4 ! put 1 in g4
1068 921 sllx %g4, 20, %g4 ! shift left to <20> afsr CE
1069 922 stxa %g4, [%g0]ASI_AFSR ! use g4 to clear afsr CE error
1070 923 membar #Sync ! membar sync required
1071 924
1072 925 set cpu_ce_error, %g1 ! put *cpu_ce_error() in g1
1073 926 rdpr %pil, %g6 ! read pil into %g6
1074 927 subcc %g6, PIL_15, %g0
1075 928 movneg %icc, PIL_14, %g4 ! run at pil 14 unless already at 15
1076 929 sethi %hi(sys_trap), %g5
1077 930 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1078 931 movge %icc, PIL_15, %g4 ! already at pil 15
1079 932 SET_SIZE(ce_err)
1080 933
1081 934 ENTRY_NP(ce_err_tl1)
1082 935 #ifndef TRAPTRACE
1083 936 ldxa [%g0]ASI_AFSR, %g7
1084 937 stxa %g7, [%g0]ASI_AFSR
1085 938 membar #Sync
1086 939 retry
1087 940 #else
1088 941 set ce_trap_tl1, %g1
1089 942 sethi %hi(dis_err_panic1), %g4
1090 943 jmp %g4 + %lo(dis_err_panic1)
1091 944 nop
1092 945 #endif
1093 946 SET_SIZE(ce_err_tl1)
1094 947
1095 948 #ifdef TRAPTRACE
1096 949 .celevel1msg:
1097 950 .asciz "Softerror with trap tracing at tl1: AFAR 0x%08x.%08x AFSR 0x%08x.%08x";
1098 951
1099 952 ENTRY_NP(ce_trap_tl1)
1100 953 ! upper 32 bits of AFSR already in o3
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
1101 954 mov %o4, %o0 ! save AFAR upper 32 bits
1102 955 mov %o2, %o4 ! lower 32 bits of AFSR
1103 956 mov %o1, %o2 ! lower 32 bits of AFAR
1104 957 mov %o0, %o1 ! upper 32 bits of AFAR
1105 958 set .celevel1msg, %o0
1106 959 call panic
1107 960 nop
1108 961 SET_SIZE(ce_trap_tl1)
1109 962 #endif
1110 963
1111 - !
1112 - ! async_err is the assembly glue code to get us from the actual trap
1113 - ! into the CPU module's C error handler. Note that we also branch
1114 - ! here from ce_err() above.
1115 - !
964 +/*
965 + * The async_err function handles trap types 0x0A (instruction_access_error)
966 + * and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here,
967 + * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
968 + *
969 + * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
970 + * 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
971 + * 6. package data in %g2 and %g3 7. disable all cpu errors, because
972 + * trap is likely to be fatal 8. call cpu_async_error vis sys_trap
973 + *
974 + * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
975 + * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
976 + *
977 + * async_err is the assembly glue code to get us from the actual trap
978 + * into the CPU module's C error handler. Note that we also branch
979 + * here from ce_err() above.
980 + */
1116 981 ENTRY_NP(async_err)
1117 982 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable ecc and other cpu errors
1118 983 membar #Sync ! membar sync required
1119 984
1120 985 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
1121 986 ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
1122 987
1123 988 sllx %g5, 53, %g5 ! move ttype to <63:53>
1124 989 or %g3, %g5, %g3 ! or to afsr in g3
1125 990
1126 991 or %g0, 1, %g1 ! put 1 in g1
1127 992 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
1128 993 andcc %g1, %g3, %g0 ! check for UE in afsr
1129 994 bz,a,pn %icc, 2f ! if !UE skip sdb read/clear
1130 995 nop
1131 996
1132 997 set P_DER_H, %g4 ! put P_DER_H in g4
1133 998 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into 56
1134 999 or %g0, 1, %g6 ! put 1 in g6
1135 1000 sllx %g6, 9, %g6 ! shift g6 to <9> sdb UE
1136 1001 andcc %g5, %g6, %g1 ! check for UE in upper half
1137 1002 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1138 1003 or %g3, %g5, %g3 ! or with afsr bits
1139 1004 bz,a 1f ! no error, goto 1f
1140 1005 nop
1141 1006 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit
1142 1007 membar #Sync ! membar sync required
1143 1008 1:
1144 1009 set P_DER_L, %g4 ! put P_DER_L in g4
1145 1010 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
1146 1011 andcc %g5, %g6, %g1 ! check for UE in lower half
1147 1012 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
1148 1013 or %g3, %g5, %g3 ! or with afsr bits
1149 1014 bz,a 2f ! no error, goto 2f
1150 1015 nop
1151 1016 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit
1152 1017 membar #Sync ! membar sync required
1153 1018 2:
1154 1019 stxa %g3, [%g0]ASI_AFSR ! clear all the sticky bits
1155 1020 membar #Sync ! membar sync required
1156 1021
1157 1022 RESET_USER_RTT_REGS(%g4, %g5, async_err_resetskip)
1158 1023 async_err_resetskip:
1159 1024
1160 1025 set cpu_async_error, %g1 ! put cpu_async_error in g1
1161 1026 sethi %hi(sys_trap), %g5
1162 1027 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1163 1028 or %g0, PIL_15, %g4 ! run at pil 15
1164 1029 SET_SIZE(async_err)
1165 1030
1166 1031 ENTRY_NP(dis_err_panic1)
1167 1032 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable all error traps
1168 1033 membar #Sync
1169 1034 ! save destination routine is in g1
1170 1035 ldxa [%g0]ASI_AFAR, %g2 ! read afar
1171 1036 ldxa [%g0]ASI_AFSR, %g3 ! read afsr
1172 1037 set P_DER_H, %g4 ! put P_DER_H in g4
1173 1038 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
1174 1039 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1175 1040 or %g3, %g5, %g3 ! or with afsr bits
1176 1041 set P_DER_L, %g4 ! put P_DER_L in g4
1177 1042 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
1178 1043 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
1179 1044 or %g3, %g5, %g3 ! or with afsr bits
1180 1045
1181 1046 RESET_USER_RTT_REGS(%g4, %g5, dis_err_panic1_resetskip)
1182 1047 dis_err_panic1_resetskip:
1183 1048
1184 1049 sethi %hi(sys_trap), %g5
1185 1050 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1186 1051 sub %g0, 1, %g4
1187 1052 SET_SIZE(dis_err_panic1)
1188 1053
1054 +/*
1055 + * The clr_datapath function clears any error bits set in the UDB regs.
1056 + */
1189 1057 ENTRY(clr_datapath)
1190 1058 set P_DER_H, %o4 ! put P_DER_H in o4
1191 1059 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb upper half into o3
1192 1060 or %g0, 0x3, %o2 ! put 0x3 in o2
1193 1061 sllx %o2, 8, %o2 ! shift o2 to <9:8> sdb
1194 1062 andcc %o5, %o2, %o1 ! check for UE,CE in upper half
1195 1063 bz,a 1f ! no error, goto 1f
1196 1064 nop
1197 1065 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1198 1066 membar #Sync ! membar sync required
1199 1067 1:
1200 1068 set P_DER_L, %o4 ! put P_DER_L in o4
1201 1069 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb lower half into o5
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1202 1070 andcc %o5, %o2, %o1 ! check for UE,CE in lower half
1203 1071 bz,a 2f ! no error, goto 2f
1204 1072 nop
1205 1073 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1206 1074 membar #Sync
1207 1075 2:
1208 1076 retl
1209 1077 nop
1210 1078 SET_SIZE(clr_datapath)
1211 1079
1080 +/*
1081 + * The get_udb_errors() function gets the current value of the
1082 + * Datapath Error Registers.
1083 + */
1212 1084 ENTRY(get_udb_errors)
1213 1085 set P_DER_H, %o3
1214 1086 ldxa [%o3]ASI_SDB_INTR_R, %o2
1215 1087 stx %o2, [%o0]
1216 1088 set P_DER_L, %o3
1217 1089 ldxa [%o3]ASI_SDB_INTR_R, %o2
1218 1090 retl
1219 1091 stx %o2, [%o1]
1220 1092 SET_SIZE(get_udb_errors)
1221 1093
1222 -#endif /* lint */
1223 -
1224 -#if defined(lint)
1225 1094 /*
1226 1095 * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the
1227 1096 * tte, the virtual address, and the ctxnum of the specified tlb entry. They
1228 1097 * should only be used in places where you have no choice but to look at the
1229 1098 * tlb itself.
1230 1099 *
1231 1100 * Note: These two routines are required by the Estar "cpr" loadable module.
1232 1101 */
1233 -/*ARGSUSED*/
1234 -void
1235 -itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1236 -{}
1237 -
1238 -/*ARGSUSED*/
1239 -void
1240 -dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1241 -{}
1242 -#else /* lint */
1243 1102 /*
1244 1103 * NB - In Spitfire cpus, when reading a tte from the hardware, we
1245 1104 * need to clear [42-41] because the general definitions in pte.h
1246 1105 * define the PA to be [42-13] whereas Spitfire really uses [40-13].
1247 1106 * When cloning these routines for other cpus the "andn" below is not
1248 1107 * necessary.
1249 1108 */
1250 1109 ENTRY_NP(itlb_rd_entry)
1251 1110 sllx %o0, 3, %o0
1252 1111 #if defined(SF_ERRATA_32)
1253 1112 sethi %hi(FLUSH_ADDR), %g2
1254 1113 set MMU_PCONTEXT, %g1
1255 1114 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1256 1115 flush %g2
1257 1116 #endif
1258 1117 ldxa [%o0]ASI_ITLB_ACCESS, %g1
1259 1118 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1260 1119 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1261 1120 andn %g1, %g2, %g1 ! for details
1262 1121 stx %g1, [%o1]
1263 1122 ldxa [%o0]ASI_ITLB_TAGREAD, %g2
1264 1123 set TAGREAD_CTX_MASK, %o4
1265 1124 andn %g2, %o4, %o5
1266 1125 retl
1267 1126 stx %o5, [%o2]
1268 1127 SET_SIZE(itlb_rd_entry)
1269 1128
1270 1129 ENTRY_NP(dtlb_rd_entry)
1271 1130 sllx %o0, 3, %o0
1272 1131 #if defined(SF_ERRATA_32)
1273 1132 sethi %hi(FLUSH_ADDR), %g2
1274 1133 set MMU_PCONTEXT, %g1
1275 1134 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1276 1135 flush %g2
1277 1136 #endif
1278 1137 ldxa [%o0]ASI_DTLB_ACCESS, %g1
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
1279 1138 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1280 1139 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1281 1140 andn %g1, %g2, %g1 ! itlb_rd_entry
1282 1141 stx %g1, [%o1]
1283 1142 ldxa [%o0]ASI_DTLB_TAGREAD, %g2
1284 1143 set TAGREAD_CTX_MASK, %o4
1285 1144 andn %g2, %o4, %o5
1286 1145 retl
1287 1146 stx %o5, [%o2]
1288 1147 SET_SIZE(dtlb_rd_entry)
1289 -#endif /* lint */
1290 1148
1291 -#if defined(lint)
1292 -
1293 -/*
1294 - * routines to get and set the LSU register
1295 - */
1296 -uint64_t
1297 -get_lsu(void)
1298 -{
1299 - return ((uint64_t)0);
1300 -}
1301 -
1302 -/*ARGSUSED*/
1303 -void
1304 -set_lsu(uint64_t lsu)
1305 -{}
1306 -
1307 -#else /* lint */
1308 -
1309 1149 ENTRY(set_lsu)
1310 1150 stxa %o0, [%g0]ASI_LSU ! store to LSU
1311 1151 retl
1312 1152 membar #Sync
1313 1153 SET_SIZE(set_lsu)
1314 1154
1315 1155 ENTRY(get_lsu)
1316 1156 retl
1317 1157 ldxa [%g0]ASI_LSU, %o0 ! load LSU
1318 1158 SET_SIZE(get_lsu)
1319 1159
1320 -#endif /* lint */
1321 -
1322 -#ifndef lint
1323 1160 /*
1324 1161 * Clear the NPT (non-privileged trap) bit in the %tick
1325 1162 * registers. In an effort to make the change in the
1326 1163 * tick counter as consistent as possible, we disable
1327 1164 * all interrupts while we're changing the registers. We also
1328 1165 * ensure that the read and write instructions are in the same
1329 1166 * line in the instruction cache.
1330 1167 */
1331 1168 ENTRY_NP(cpu_clearticknpt)
1332 1169 rdpr %pstate, %g1 /* save processor state */
1333 1170 andn %g1, PSTATE_IE, %g3 /* turn off */
1334 1171 wrpr %g0, %g3, %pstate /* interrupts */
1335 1172 rdpr %tick, %g2 /* get tick register */
1336 1173 brgez,pn %g2, 1f /* if NPT bit off, we're done */
1337 1174 mov 1, %g3 /* create mask */
1338 1175 sllx %g3, 63, %g3 /* for NPT bit */
1339 1176 ba,a,pt %xcc, 2f
1340 1177 .align 64 /* Align to I$ boundary */
1341 1178 2:
1342 1179 rdpr %tick, %g2 /* get tick register */
1343 1180 wrpr %g3, %g2, %tick /* write tick register, */
1344 1181 /* clearing NPT bit */
1345 1182 #if defined(BB_ERRATA_1)
1346 1183 rdpr %tick, %g0 /* read (s)tick (BB_ERRATA_1) */
1347 1184 #endif
1348 1185 1:
1349 1186 jmp %g4 + 4
1350 1187 wrpr %g0, %g1, %pstate /* restore processor state */
1351 1188 SET_SIZE(cpu_clearticknpt)
1352 1189
1353 1190 /*
1354 1191 * get_ecache_tag()
1355 1192 * Register Usage:
1356 1193 * %o0: In: 32-bit E$ index
1357 1194 * Out: 64-bit E$ tag value
1358 1195 * %o1: In: 64-bit AFSR value after clearing sticky bits
1359 1196 * %o2: In: address of cpu private afsr storage
1360 1197 */
1361 1198 ENTRY(get_ecache_tag)
1362 1199 or %g0, 1, %o4
1363 1200 sllx %o4, 40, %o4 ! set bit 40 for e$ tag access
1364 1201 or %o0, %o4, %o4 ! %o4 = e$ addr for tag read
1365 1202 rdpr %pstate, %o5
1366 1203 andn %o5, PSTATE_IE | PSTATE_AM, %o0
1367 1204 wrpr %o0, %g0, %pstate ! clear IE, AM bits
1368 1205
1369 1206 ldxa [%g0]ASI_ESTATE_ERR, %g1
1370 1207 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1371 1208 membar #Sync
1372 1209
1373 1210 ldxa [%g0]ASI_AFSR, %o0
1374 1211 srlx %o0, P_AFSR_CP_SHIFT, %o3
1375 1212 btst 1, %o3
1376 1213 bz 1f
1377 1214 nop
1378 1215 ldx [%o2], %g4
1379 1216 or %g4, %o0, %g4 ! aggregate AFSR in cpu private
1380 1217 stx %g4, [%o2]
1381 1218 1:
1382 1219 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1383 1220 membar #Sync
1384 1221
1385 1222 ldxa [%o4]ASI_EC_R, %g0
1386 1223 ldxa [%g0]ASI_EC_DIAG, %o0 ! read tag from e$ tag reg
1387 1224
1388 1225 ldxa [%g0]ASI_AFSR, %o3
1389 1226 srlx %o3, P_AFSR_CP_SHIFT, %o4
1390 1227 btst 1, %o4
1391 1228 bz 2f
1392 1229 stx %o3, [%o1] ! AFSR after sticky clear
1393 1230 ldx [%o2], %g4
1394 1231 or %g4, %o3, %g4 ! aggregate AFSR in cpu private
1395 1232 stx %g4, [%o2]
1396 1233 2:
1397 1234 membar #Sync
1398 1235
1399 1236 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1400 1237 membar #Sync
1401 1238 retl
1402 1239 wrpr %g0, %o5, %pstate
1403 1240 SET_SIZE(get_ecache_tag)
1404 1241
1405 1242 /*
1406 1243 * check_ecache_line()
1407 1244 * Register Usage:
1408 1245 * %o0: In: 32-bit E$ index
1409 1246 * Out: 64-bit accumulated AFSR
1410 1247 * %o1: In: address of cpu private afsr storage
1411 1248 */
1412 1249 ENTRY(check_ecache_line)
1413 1250 or %g0, 1, %o4
1414 1251 sllx %o4, 39, %o4 ! set bit 39 for e$ data access
1415 1252 or %o0, %o4, %o4 ! %o4 = e$ addr for data read
1416 1253
1417 1254 rdpr %pstate, %o5
1418 1255 andn %o5, PSTATE_IE | PSTATE_AM, %o0
1419 1256 wrpr %o0, %g0, %pstate ! clear IE, AM bits
1420 1257
1421 1258 ldxa [%g0]ASI_ESTATE_ERR, %g1
1422 1259 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1423 1260 membar #Sync
1424 1261
1425 1262 ldxa [%g0]ASI_AFSR, %o0
1426 1263 srlx %o0, P_AFSR_CP_SHIFT, %o2
1427 1264 btst 1, %o2
1428 1265 bz 1f
1429 1266 clr %o2 ! loop count
1430 1267 ldx [%o1], %o3
1431 1268 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1432 1269 stx %o3, [%o1]
1433 1270 1:
1434 1271 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1435 1272 membar #Sync
1436 1273
1437 1274 2:
1438 1275 ldxa [%o4]ASI_EC_R, %g0 ! Read the E$ data 8bytes each
1439 1276 add %o2, 1, %o2
1440 1277 cmp %o2, 8
1441 1278 bl,a 2b
1442 1279 add %o4, 8, %o4
1443 1280
1444 1281 membar #Sync
1445 1282 ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR
1446 1283 srlx %o0, P_AFSR_CP_SHIFT, %o2
1447 1284 btst 1, %o2
1448 1285 bz 3f
1449 1286 nop
1450 1287 ldx [%o1], %o3
↓ open down ↓ |
118 lines elided |
↑ open up ↑ |
1451 1288 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1452 1289 stx %o3, [%o1]
1453 1290 3:
1454 1291 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1455 1292 membar #Sync
1456 1293 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1457 1294 membar #Sync
1458 1295 retl
1459 1296 wrpr %g0, %o5, %pstate
1460 1297 SET_SIZE(check_ecache_line)
1461 -#endif /* lint */
1462 1298
1463 -#if defined(lint)
1464 -uint64_t
1465 -read_and_clear_afsr()
1466 -{
1467 - return ((uint64_t)0);
1468 -}
1469 -#else /* lint */
1470 1299 ENTRY(read_and_clear_afsr)
1471 1300 ldxa [%g0]ASI_AFSR, %o0
1472 1301 retl
1473 1302 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1474 1303 SET_SIZE(read_and_clear_afsr)
1475 -#endif /* lint */
1476 1304
1477 -#if defined(lint)
1478 -/* ARGSUSED */
1479 -void
1480 -scrubphys(uint64_t paddr, int ecache_size)
1481 -{
1482 -}
1483 -
1484 -#else /* lint */
1485 -
1486 1305 /*
1487 1306 * scrubphys - Pass in the aligned physical memory address that you want
1488 1307 * to scrub, along with the ecache size.
1489 1308 *
1490 1309 * 1) Displacement flush the E$ line corresponding to %addr.
1491 1310 * The first ldxa guarantees that the %addr is no longer in
1492 1311 * M, O, or E (goes to I or S (if instruction fetch also happens).
1493 1312 * 2) "Write" the data using a CAS %addr,%g0,%g0.
1494 1313 * The casxa guarantees a transition from I to M or S to M.
1495 1314 * 3) Displacement flush the E$ line corresponding to %addr.
1496 1315 * The second ldxa pushes the M line out of the ecache, into the
1497 1316 * writeback buffers, on the way to memory.
1498 1317 * 4) The "membar #Sync" pushes the cache line out of the writeback
1499 1318 * buffers onto the bus, on the way to dram finally.
1500 1319 *
1501 1320 * This is a modified version of the algorithm suggested by Gary Lauterbach.
1502 1321 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
1503 1322 * as modified, but then we found out that for spitfire, if it misses in the
1504 1323 * E$ it will probably install as an M, but if it hits in the E$, then it
1505 1324 * will stay E, if the store doesn't happen. So the first displacement flush
1506 1325 * should ensure that the CAS will miss in the E$. Arrgh.
1507 1326 */
1508 1327
1509 1328 ENTRY(scrubphys)
1510 1329 or %o1, %g0, %o2 ! put ecache size in %o2
1511 1330 #ifndef HUMMINGBIRD
1512 1331 xor %o0, %o2, %o1 ! calculate alias address
1513 1332 add %o2, %o2, %o3 ! 2 * ecachesize in case
1514 1333 ! addr == ecache_flushaddr
1515 1334 sub %o3, 1, %o3 ! -1 == mask
1516 1335 and %o1, %o3, %o1 ! and with xor'd address
1517 1336 set ecache_flushaddr, %o3
1518 1337 ldx [%o3], %o3
1519 1338
1520 1339 rdpr %pstate, %o4
1521 1340 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1522 1341 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1523 1342
1524 1343 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1525 1344 casxa [%o0]ASI_MEM, %g0, %g0
1526 1345 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1527 1346
1528 1347 #else /* HUMMINGBIRD */
1529 1348 /*
1530 1349 * UltraSPARC-IIe processor supports both 4-way set associative
1531 1350 * and direct map E$. We need to reconfigure E$ to direct map
1532 1351 * mode for data load/store before displacement flush. Also, we
1533 1352 * need to flush all 4 sets of the E$ to ensure that the physaddr
1534 1353 * has been flushed. Keep the interrupts disabled while flushing
1535 1354 * E$ in this manner.
1536 1355 *
1537 1356 * For flushing a specific physical address, we start at the
1538 1357 * aliased address and load at set-size stride, wrapping around
1539 1358 * at 2*ecache-size boundary and skipping fault physical address.
1540 1359 * It takes 10 loads to guarantee that the physical address has
1541 1360 * been flushed.
1542 1361 *
1543 1362 * Usage:
1544 1363 * %o0 physaddr
1545 1364 * %o5 physaddr - ecache_flushaddr
1546 1365 * %g1 UPA config (restored later)
1547 1366 * %g2 E$ set size
1548 1367 * %g3 E$ flush address range mask (i.e. 2 * E$ -1)
1549 1368 * %g4 #loads to flush phys address
1550 1369 * %g5 temp
1551 1370 */
1552 1371
1553 1372 sethi %hi(ecache_associativity), %g5
1554 1373 ld [%g5 + %lo(ecache_associativity)], %g5
1555 1374 udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
1556 1375 xor %o0, %o2, %o1 ! calculate alias address
1557 1376 add %o2, %o2, %g3 ! 2 * ecachesize in case
1558 1377 ! addr == ecache_flushaddr
1559 1378 sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
1560 1379 and %o1, %g3, %o1 ! and with xor'd address
1561 1380 sethi %hi(ecache_flushaddr), %o3
1562 1381 ldx [%o3 + %lo(ecache_flushaddr)], %o3
1563 1382
1564 1383 rdpr %pstate, %o4
1565 1384 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1566 1385 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1567 1386
1568 1387 ! Place E$ in direct map mode for data access
1569 1388 or %g0, 1, %g5
1570 1389 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
1571 1390 ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
1572 1391 or %g1, %g5, %g5
1573 1392 membar #Sync
1574 1393 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
1575 1394 membar #Sync
1576 1395
1577 1396 ! Displace cache line from each set of E$ starting at the
1578 1397 ! aliased address. at set-size stride, wrapping at 2*ecache_size
1579 1398 ! and skipping load from physaddr. We need 10 loads to flush the
1580 1399 ! physaddr from E$.
1581 1400 mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr
1582 1401 sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
1583 1402 or %o1, %g0, %g5 ! starting aliased offset
1584 1403 2:
1585 1404 ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1586 1405 1:
1587 1406 add %g5, %g2, %g5 ! calculate offset in next set
1588 1407 and %g5, %g3, %g5 ! force offset within aliased range
1589 1408 cmp %g5, %o5 ! skip loads from physaddr
1590 1409 be,pn %ncc, 1b
1591 1410 nop
1592 1411 brgz,pt %g4, 2b
1593 1412 dec %g4
1594 1413
1595 1414 casxa [%o0]ASI_MEM, %g0, %g0
1596 1415
1597 1416 ! Flush %o0 from ecahe again.
1598 1417 ! Need single displacement flush at offset %o1 this time as
1599 1418 ! the E$ is already in direct map mode.
1600 1419 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1601 1420
↓ open down ↓ |
106 lines elided |
↑ open up ↑ |
1602 1421 membar #Sync
1603 1422 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1604 1423 membar #Sync
1605 1424 #endif /* HUMMINGBIRD */
1606 1425 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1607 1426
1608 1427 retl
1609 1428 membar #Sync ! move the data out of the load buffer
1610 1429 SET_SIZE(scrubphys)
1611 1430
1612 -#endif /* lint */
1613 -
1614 -#if defined(lint)
1615 -
1616 1431 /*
1617 1432 * clearphys - Pass in the aligned physical memory address that you want
1618 1433 * to push out, as a 64 byte block of zeros, from the ecache zero-filled.
1619 1434 * Since this routine does not bypass the ecache, it is possible that
1620 1435 * it could generate a UE error while trying to clear the a bad line.
1621 1436 * This routine clears and restores the error enable flag.
1622 1437 * TBD - Hummingbird may need similar protection
1623 1438 */
1624 -/* ARGSUSED */
1625 -void
1626 -clearphys(uint64_t paddr, int ecache_size, int ecache_linesize)
1627 -{
1628 -}
1629 -
1630 -#else /* lint */
1631 -
1632 1439 ENTRY(clearphys)
1633 1440 or %o2, %g0, %o3 ! ecache linesize
1634 1441 or %o1, %g0, %o2 ! ecache size
1635 1442 #ifndef HUMMINGBIRD
1636 1443 or %o3, %g0, %o4 ! save ecache linesize
1637 1444 xor %o0, %o2, %o1 ! calculate alias address
1638 1445 add %o2, %o2, %o3 ! 2 * ecachesize
1639 1446 sub %o3, 1, %o3 ! -1 == mask
1640 1447 and %o1, %o3, %o1 ! and with xor'd address
1641 1448 set ecache_flushaddr, %o3
1642 1449 ldx [%o3], %o3
1643 1450 or %o4, %g0, %o2 ! saved ecache linesize
1644 1451
1645 1452 rdpr %pstate, %o4
1646 1453 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1647 1454 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1648 1455
1649 1456 ldxa [%g0]ASI_ESTATE_ERR, %g1
1650 1457 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1651 1458 membar #Sync
1652 1459
1653 1460 ! need to put zeros in the cache line before displacing it
1654 1461
1655 1462 sub %o2, 8, %o2 ! get offset of last double word in ecache line
1656 1463 1:
1657 1464 stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line
1658 1465 sub %o2, 8, %o2
1659 1466 brgez,a,pt %o2, 1b
1660 1467 nop
1661 1468 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1662 1469 casxa [%o0]ASI_MEM, %g0, %g0
1663 1470 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1664 1471
1665 1472 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1666 1473 membar #Sync
1667 1474
1668 1475 #else /* HUMMINGBIRD... */
1669 1476 /*
1670 1477 * UltraSPARC-IIe processor supports both 4-way set associative
1671 1478 * and direct map E$. We need to reconfigure E$ to direct map
1672 1479 * mode for data load/store before displacement flush. Also, we
1673 1480 * need to flush all 4 sets of the E$ to ensure that the physaddr
1674 1481 * has been flushed. Keep the interrupts disabled while flushing
1675 1482 * E$ in this manner.
1676 1483 *
1677 1484 * For flushing a specific physical address, we start at the
1678 1485 * aliased address and load at set-size stride, wrapping around
1679 1486 * at 2*ecache-size boundary and skipping fault physical address.
1680 1487 * It takes 10 loads to guarantee that the physical address has
1681 1488 * been flushed.
1682 1489 *
1683 1490 * Usage:
1684 1491 * %o0 physaddr
1685 1492 * %o5 physaddr - ecache_flushaddr
1686 1493 * %g1 UPA config (restored later)
1687 1494 * %g2 E$ set size
1688 1495 * %g3 E$ flush address range mask (i.e. 2 * E$ -1)
1689 1496 * %g4 #loads to flush phys address
1690 1497 * %g5 temp
1691 1498 */
1692 1499
1693 1500 or %o3, %g0, %o4 ! save ecache linesize
1694 1501 sethi %hi(ecache_associativity), %g5
1695 1502 ld [%g5 + %lo(ecache_associativity)], %g5
1696 1503 udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
1697 1504
1698 1505 xor %o0, %o2, %o1 ! calculate alias address
1699 1506 add %o2, %o2, %g3 ! 2 * ecachesize
1700 1507 sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
1701 1508 and %o1, %g3, %o1 ! and with xor'd address
1702 1509 sethi %hi(ecache_flushaddr), %o3
1703 1510 ldx [%o3 +%lo(ecache_flushaddr)], %o3
1704 1511 or %o4, %g0, %o2 ! saved ecache linesize
1705 1512
1706 1513 rdpr %pstate, %o4
1707 1514 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1708 1515 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1709 1516
1710 1517 ! Place E$ in direct map mode for data access
1711 1518 or %g0, 1, %g5
1712 1519 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
1713 1520 ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
1714 1521 or %g1, %g5, %g5
1715 1522 membar #Sync
1716 1523 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
1717 1524 membar #Sync
1718 1525
1719 1526 ! need to put zeros in the cache line before displacing it
1720 1527
1721 1528 sub %o2, 8, %o2 ! get offset of last double word in ecache line
1722 1529 1:
1723 1530 stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line
1724 1531 sub %o2, 8, %o2
1725 1532 brgez,a,pt %o2, 1b
1726 1533 nop
1727 1534
1728 1535 ! Displace cache line from each set of E$ starting at the
1729 1536 ! aliased address. at set-size stride, wrapping at 2*ecache_size
1730 1537 ! and skipping load from physaddr. We need 10 loads to flush the
1731 1538 ! physaddr from E$.
1732 1539 mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr
1733 1540 sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
1734 1541 or %o1, %g0, %g5 ! starting offset
1735 1542 2:
1736 1543 ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1737 1544 3:
1738 1545 add %g5, %g2, %g5 ! calculate offset in next set
1739 1546 and %g5, %g3, %g5 ! force offset within aliased range
1740 1547 cmp %g5, %o5 ! skip loads from physaddr
1741 1548 be,pn %ncc, 3b
1742 1549 nop
1743 1550 brgz,pt %g4, 2b
1744 1551 dec %g4
1745 1552
1746 1553 casxa [%o0]ASI_MEM, %g0, %g0
1747 1554
1748 1555 ! Flush %o0 from ecahe again.
1749 1556 ! Need single displacement flush at offset %o1 this time as
1750 1557 ! the E$ is already in direct map mode.
1751 1558 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
↓ open down ↓ |
110 lines elided |
↑ open up ↑ |
1752 1559
1753 1560 membar #Sync
1754 1561 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1755 1562 membar #Sync
1756 1563 #endif /* HUMMINGBIRD... */
1757 1564
1758 1565 retl
1759 1566 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1760 1567 SET_SIZE(clearphys)
1761 1568
1762 -#endif /* lint */
1763 -
1764 -#if defined(lint)
1765 -/* ARGSUSED */
1766 -void
1767 -flushecacheline(uint64_t paddr, int ecache_size)
1768 -{
1769 -}
1770 -
1771 -#else /* lint */
1772 1569 /*
1773 1570 * flushecacheline - This is a simpler version of scrubphys
1774 1571 * which simply does a displacement flush of the line in
1775 1572 * question. This routine is mainly used in handling async
1776 1573 * errors where we want to get rid of a bad line in ecache.
1777 1574 * Note that if the line is modified and it has suffered
1778 1575 * data corruption - we are guarantee that the hw will write
1779 1576 * a UE back to mark the page poisoned.
1780 1577 */
1781 1578 ENTRY(flushecacheline)
1782 1579 or %o1, %g0, %o2 ! put ecache size in %o2
1783 1580 #ifndef HUMMINGBIRD
1784 1581 xor %o0, %o2, %o1 ! calculate alias address
1785 1582 add %o2, %o2, %o3 ! 2 * ecachesize in case
1786 1583 ! addr == ecache_flushaddr
1787 1584 sub %o3, 1, %o3 ! -1 == mask
1788 1585 and %o1, %o3, %o1 ! and with xor'd address
1789 1586 set ecache_flushaddr, %o3
1790 1587 ldx [%o3], %o3
1791 1588
1792 1589 rdpr %pstate, %o4
1793 1590 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1794 1591 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1795 1592
1796 1593 ldxa [%g0]ASI_ESTATE_ERR, %g1
1797 1594 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1798 1595 membar #Sync
1799 1596
1800 1597 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1801 1598 membar #Sync
1802 1599 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1803 1600 membar #Sync
1804 1601 #else /* HUMMINGBIRD */
1805 1602 /*
1806 1603 * UltraSPARC-IIe processor supports both 4-way set associative
1807 1604 * and direct map E$. We need to reconfigure E$ to direct map
1808 1605 * mode for data load/store before displacement flush. Also, we
1809 1606 * need to flush all 4 sets of the E$ to ensure that the physaddr
1810 1607 * has been flushed. Keep the interrupts disabled while flushing
1811 1608 * E$ in this manner.
1812 1609 *
1813 1610 * For flushing a specific physical address, we start at the
1814 1611 * aliased address and load at set-size stride, wrapping around
1815 1612 * at 2*ecache-size boundary and skipping fault physical address.
1816 1613 * It takes 10 loads to guarantee that the physical address has
1817 1614 * been flushed.
1818 1615 *
1819 1616 * Usage:
1820 1617 * %o0 physaddr
1821 1618 * %o5 physaddr - ecache_flushaddr
1822 1619 * %g1 error enable register
1823 1620 * %g2 E$ set size
1824 1621 * %g3 E$ flush address range mask (i.e. 2 * E$ -1)
1825 1622 * %g4 UPA config (restored later)
1826 1623 * %g5 temp
1827 1624 */
1828 1625
1829 1626 sethi %hi(ecache_associativity), %g5
1830 1627 ld [%g5 + %lo(ecache_associativity)], %g5
1831 1628 udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
1832 1629 xor %o0, %o2, %o1 ! calculate alias address
1833 1630 add %o2, %o2, %g3 ! 2 * ecachesize in case
1834 1631 ! addr == ecache_flushaddr
1835 1632 sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
1836 1633 and %o1, %g3, %o1 ! and with xor'd address
1837 1634 sethi %hi(ecache_flushaddr), %o3
1838 1635 ldx [%o3 + %lo(ecache_flushaddr)], %o3
1839 1636
1840 1637 rdpr %pstate, %o4
1841 1638 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1842 1639 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1843 1640
1844 1641 ! Place E$ in direct map mode for data access
1845 1642 or %g0, 1, %g5
1846 1643 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
1847 1644 ldxa [%g0]ASI_UPA_CONFIG, %g4 ! current UPA config (restored later)
1848 1645 or %g4, %g5, %g5
1849 1646 membar #Sync
1850 1647 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
1851 1648 membar #Sync
1852 1649
1853 1650 ldxa [%g0]ASI_ESTATE_ERR, %g1
1854 1651 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1855 1652 membar #Sync
1856 1653
1857 1654 ! Displace cache line from each set of E$ starting at the
1858 1655 ! aliased address. at set-size stride, wrapping at 2*ecache_size
1859 1656 ! and skipping load from physaddr. We need 10 loads to flush the
1860 1657 ! physaddr from E$.
1861 1658 mov HB_PHYS_FLUSH_CNT-1, %g5 ! #loads to flush physaddr
1862 1659 sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
1863 1660 2:
1864 1661 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1865 1662 3:
1866 1663 add %o1, %g2, %o1 ! calculate offset in next set
1867 1664 and %o1, %g3, %o1 ! force offset within aliased range
1868 1665 cmp %o1, %o5 ! skip loads from physaddr
1869 1666 be,pn %ncc, 3b
1870 1667 nop
1871 1668 brgz,pt %g5, 2b
1872 1669 dec %g5
1873 1670
1874 1671 membar #Sync
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
1875 1672 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1876 1673 membar #Sync
1877 1674
1878 1675 stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1879 1676 membar #Sync
1880 1677 #endif /* HUMMINGBIRD */
1881 1678 retl
1882 1679 wrpr %g0, %o4, %pstate
1883 1680 SET_SIZE(flushecacheline)
1884 1681
1885 -#endif /* lint */
1886 -
1887 -#if defined(lint)
1888 -/* ARGSUSED */
1889 -void
1890 -ecache_scrubreq_tl1(uint64_t inum, uint64_t dummy)
1891 -{
1892 -}
1893 -
1894 -#else /* lint */
1895 1682 /*
1896 1683 * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
1897 1684 * from the clock CPU. It atomically increments the outstanding request
1898 1685 * counter and, if there was not already an outstanding request,
1899 1686 * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum.
1900 1687 */
1901 1688
1902 1689 ! Register usage:
1903 1690 !
1904 1691 ! Arguments:
1905 1692 ! %g1 - inum
1906 1693 !
1907 1694 ! Internal:
1908 1695 ! %g2, %g3, %g5 - scratch
1909 1696 ! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding.
1910 1697 ! %g6 - setsoftint_tl1 address
1911 1698
1912 1699 ENTRY_NP(ecache_scrubreq_tl1)
1913 1700 set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2
1914 1701 GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f);
1915 1702 ld [%g4], %g2 ! cpu's ec_scrub_outstanding.
1916 1703 set setsoftint_tl1, %g6
1917 1704 !
1918 1705 ! no need to use atomic instructions for the following
1919 1706 ! increment - we're at tl1
1920 1707 !
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
1921 1708 add %g2, 0x1, %g3
1922 1709 brnz,pn %g2, 1f ! no need to enqueue more intr_vec
1923 1710 st %g3, [%g4] ! delay - store incremented counter
1924 1711 jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec
1925 1712 nop
1926 1713 ! not reached
1927 1714 1:
1928 1715 retry
1929 1716 SET_SIZE(ecache_scrubreq_tl1)
1930 1717
1931 -#endif /* lint */
1932 -
1933 -#if defined(lint)
1934 -/*ARGSUSED*/
1935 -void
1936 -write_ec_tag_parity(uint32_t id)
1937 -{}
1938 -#else /* lint */
1939 -
1940 1718 /*
1941 1719 * write_ec_tag_parity(), which zero's the ecache tag,
1942 1720 * marks the state as invalid and writes good parity to the tag.
1943 1721 * Input %o1= 32 bit E$ index
1944 1722 */
1945 1723 ENTRY(write_ec_tag_parity)
1946 1724 or %g0, 1, %o4
1947 1725 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1948 1726 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1949 1727
1950 1728 rdpr %pstate, %o5
1951 1729 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1952 1730 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1953 1731
1954 1732 ldxa [%g0]ASI_ESTATE_ERR, %g1
1955 1733 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1956 1734 membar #Sync
1957 1735
1958 1736 ba 1f
1959 1737 nop
1960 1738 /*
1961 1739 * Align on the ecache boundary in order to force
1962 1740 * ciritical code section onto the same ecache line.
1963 1741 */
1964 1742 .align 64
1965 1743
1966 1744 1:
1967 1745 set S_EC_PARITY, %o3 ! clear tag, state invalid
1968 1746 sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1969 1747 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1970 1748 stxa %g0, [%o4]ASI_EC_W
1971 1749 membar #Sync
1972 1750
1973 1751 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1974 1752 membar #Sync
1975 1753 retl
1976 1754 wrpr %g0, %o5, %pstate
1977 1755 SET_SIZE(write_ec_tag_parity)
1978 1756
1979 -#endif /* lint */
1980 -
1981 -#if defined(lint)
1982 -/*ARGSUSED*/
1983 -void
1984 -write_hb_ec_tag_parity(uint32_t id)
1985 -{}
1986 -#else /* lint */
1987 -
1988 1757 /*
1989 1758 * write_hb_ec_tag_parity(), which zero's the ecache tag,
1990 1759 * marks the state as invalid and writes good parity to the tag.
1991 1760 * Input %o1= 32 bit E$ index
1992 1761 */
1993 1762 ENTRY(write_hb_ec_tag_parity)
1994 1763 or %g0, 1, %o4
1995 1764 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1996 1765 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1997 1766
1998 1767 rdpr %pstate, %o5
1999 1768 andn %o5, PSTATE_IE | PSTATE_AM, %o1
2000 1769 wrpr %o1, %g0, %pstate ! clear IE, AM bits
2001 1770
2002 1771 ldxa [%g0]ASI_ESTATE_ERR, %g1
2003 1772 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
2004 1773 membar #Sync
2005 1774
2006 1775 ba 1f
2007 1776 nop
2008 1777 /*
2009 1778 * Align on the ecache boundary in order to force
2010 1779 * ciritical code section onto the same ecache line.
2011 1780 */
2012 1781 .align 64
2013 1782 1:
2014 1783 #ifdef HUMMINGBIRD
2015 1784 set HB_EC_PARITY, %o3 ! clear tag, state invalid
2016 1785 sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity
2017 1786 #else /* !HUMMINGBIRD */
2018 1787 set SB_EC_PARITY, %o3 ! clear tag, state invalid
2019 1788 sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity
2020 1789 #endif /* !HUMMINGBIRD */
2021 1790
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
2022 1791 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
2023 1792 stxa %g0, [%o4]ASI_EC_W
2024 1793 membar #Sync
2025 1794
2026 1795 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
2027 1796 membar #Sync
2028 1797 retl
2029 1798 wrpr %g0, %o5, %pstate
2030 1799 SET_SIZE(write_hb_ec_tag_parity)
2031 1800
2032 -#endif /* lint */
2033 -
2034 1801 #define VIS_BLOCKSIZE 64
2035 1802
2036 -#if defined(lint)
2037 -
2038 -/*ARGSUSED*/
2039 -int
2040 -dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
2041 -{ return (0); }
2042 -
2043 -#else
2044 -
2045 1803 ENTRY(dtrace_blksuword32)
2046 1804 save %sp, -SA(MINFRAME + 4), %sp
2047 1805
2048 1806 rdpr %pstate, %l1
2049 1807 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
2050 1808 wrpr %g0, %l2, %pstate ! protect our FPU diddling
2051 1809
2052 1810 rd %fprs, %l0
2053 1811 andcc %l0, FPRS_FEF, %g0
2054 1812 bz,a,pt %xcc, 1f ! if the fpu is disabled
2055 1813 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
2056 1814
2057 1815 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
2058 1816 1:
2059 1817 set 0f, %l5
2060 1818 /*
2061 1819 * We're about to write a block full or either total garbage
2062 1820 * (not kernel data, don't worry) or user floating-point data
2063 1821 * (so it only _looks_ like garbage).
2064 1822 */
2065 1823 ld [%i1], %f0 ! modify the block
2066 1824 membar #Sync
2067 1825 stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler
2068 1826 stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block
2069 1827 membar #Sync
2070 1828 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
2071 1829
2072 1830 bz,a,pt %xcc, 1f
2073 1831 wr %g0, %l0, %fprs ! restore %fprs
2074 1832
2075 1833 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
2076 1834 1:
2077 1835
2078 1836 wrpr %g0, %l1, %pstate ! restore interrupts
2079 1837
2080 1838 ret
2081 1839 restore %g0, %g0, %o0
2082 1840
2083 1841 0:
2084 1842 membar #Sync
2085 1843 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
2086 1844
2087 1845 bz,a,pt %xcc, 1f
2088 1846 wr %g0, %l0, %fprs ! restore %fprs
2089 1847
2090 1848 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
2091 1849 1:
2092 1850
2093 1851 wrpr %g0, %l1, %pstate ! restore interrupts
2094 1852
2095 1853 /*
2096 1854 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
2097 1855 * which deals with watchpoints. Otherwise, just return -1.
2098 1856 */
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
2099 1857 brnz,pt %i2, 1f
2100 1858 nop
2101 1859 ret
2102 1860 restore %g0, -1, %o0
2103 1861 1:
2104 1862 call dtrace_blksuword32_err
2105 1863 restore
2106 1864
2107 1865 SET_SIZE(dtrace_blksuword32)
2108 1866
2109 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX