Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/cpu/spitfire_asm.s
+++ new/usr/src/uts/sun4u/cpu/spitfire_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 -#pragma ident "%Z%%M% %I% %E% SMI"
27 -
28 -#if !defined(lint)
29 26 #include "assym.h"
30 -#endif /* lint */
31 27
32 28 #include <sys/asm_linkage.h>
33 29 #include <sys/mmu.h>
34 30 #include <vm/hat_sfmmu.h>
35 31 #include <sys/machparam.h>
36 32 #include <sys/machcpuvar.h>
37 33 #include <sys/machthread.h>
38 34 #include <sys/privregs.h>
39 35 #include <sys/asm_linkage.h>
40 36 #include <sys/machasi.h>
41 37 #include <sys/trap.h>
42 38 #include <sys/spitregs.h>
43 39 #include <sys/xc_impl.h>
44 40 #include <sys/intreg.h>
45 41 #include <sys/async.h>
46 42
47 43 #ifdef TRAPTRACE
48 44 #include <sys/traptrace.h>
49 45 #endif /* TRAPTRACE */
50 46
51 -#ifndef lint
52 -
53 47 /* BEGIN CSTYLED */
54 48 #define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \
55 49 ldxa [%g0]ASI_LSU, tmp1 ;\
56 50 btst LSU_DC, tmp1 /* is dcache enabled? */ ;\
57 51 bz,pn %icc, 1f ;\
58 52 sethi %hi(dcache_linesize), tmp1 ;\
59 53 ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\
60 54 sethi %hi(dflush_type), tmp2 ;\
61 55 ld [tmp2 + %lo(dflush_type)], tmp2 ;\
62 56 cmp tmp2, FLUSHPAGE_TYPE ;\
63 57 be,pt %icc, 2f ;\
64 58 sllx arg1, SF_DC_VBIT_SHIFT, arg1 /* tag to compare */ ;\
65 59 sethi %hi(dcache_size), tmp3 ;\
66 60 ld [tmp3 + %lo(dcache_size)], tmp3 ;\
67 61 cmp tmp2, FLUSHMATCH_TYPE ;\
68 62 be,pt %icc, 3f ;\
69 63 nop ;\
70 64 /* \
71 65 * flushtype = FLUSHALL_TYPE, flush the whole thing \
72 66 * tmp3 = cache size \
73 67 * tmp1 = cache line size \
74 68 */ \
75 69 sub tmp3, tmp1, tmp2 ;\
76 70 4: \
77 71 stxa %g0, [tmp2]ASI_DC_TAG ;\
78 72 membar #Sync ;\
79 73 cmp %g0, tmp2 ;\
80 74 bne,pt %icc, 4b ;\
81 75 sub tmp2, tmp1, tmp2 ;\
82 76 ba,pt %icc, 1f ;\
83 77 nop ;\
84 78 /* \
85 79 * flushtype = FLUSHPAGE_TYPE \
86 80 * arg1 = tag to compare against \
87 81 * arg2 = virtual color \
88 82 * tmp1 = cache line size \
89 83 * tmp2 = tag from cache \
90 84 * tmp3 = counter \
91 85 */ \
92 86 2: \
93 87 set MMU_PAGESIZE, tmp3 ;\
94 88 sllx arg2, MMU_PAGESHIFT, arg2 /* color to dcache page */ ;\
95 89 sub tmp3, tmp1, tmp3 ;\
96 90 4: \
97 91 ldxa [arg2 + tmp3]ASI_DC_TAG, tmp2 /* read tag */ ;\
98 92 btst SF_DC_VBIT_MASK, tmp2 ;\
99 93 bz,pn %icc, 5f /* branch if no valid sub-blocks */ ;\
100 94 andn tmp2, SF_DC_VBIT_MASK, tmp2 /* clear out v bits */ ;\
101 95 cmp tmp2, arg1 ;\
102 96 bne,pn %icc, 5f /* br if tag miss */ ;\
103 97 nop ;\
104 98 stxa %g0, [arg2 + tmp3]ASI_DC_TAG ;\
105 99 membar #Sync ;\
106 100 5: \
107 101 cmp %g0, tmp3 ;\
108 102 bnz,pt %icc, 4b /* branch if not done */ ;\
109 103 sub tmp3, tmp1, tmp3 ;\
110 104 ba,pt %icc, 1f ;\
111 105 nop ;\
112 106 /* \
113 107 * flushtype = FLUSHMATCH_TYPE \
114 108 * arg1 = tag to compare against \
115 109 * tmp1 = cache line size \
116 110 * tmp3 = cache size \
117 111 * arg2 = counter \
118 112 * tmp2 = cache tag \
119 113 */ \
120 114 3: \
121 115 sub tmp3, tmp1, arg2 ;\
122 116 4: \
123 117 ldxa [arg2]ASI_DC_TAG, tmp2 /* read tag */ ;\
124 118 btst SF_DC_VBIT_MASK, tmp2 ;\
125 119 bz,pn %icc, 5f /* br if no valid sub-blocks */ ;\
126 120 andn tmp2, SF_DC_VBIT_MASK, tmp2 /* clear out v bits */ ;\
127 121 cmp tmp2, arg1 ;\
128 122 bne,pn %icc, 5f /* branch if tag miss */ ;\
129 123 nop ;\
130 124 stxa %g0, [arg2]ASI_DC_TAG ;\
131 125 membar #Sync ;\
132 126 5: \
133 127 cmp %g0, arg2 ;\
134 128 bne,pt %icc, 4b /* branch if not done */ ;\
135 129 sub arg2, tmp1, arg2 ;\
136 130 1:
137 131
138 132 /*
139 133 * macro that flushes the entire dcache color
140 134 */
141 135 #define DCACHE_FLUSHCOLOR(arg, tmp1, tmp2) \
142 136 ldxa [%g0]ASI_LSU, tmp1; \
143 137 btst LSU_DC, tmp1; /* is dcache enabled? */ \
144 138 bz,pn %icc, 1f; \
145 139 sethi %hi(dcache_linesize), tmp1; \
146 140 ld [tmp1 + %lo(dcache_linesize)], tmp1; \
147 141 set MMU_PAGESIZE, tmp2; \
148 142 /* \
149 143 * arg = virtual color \
150 144 * tmp2 = page size \
151 145 * tmp1 = cache line size \
152 146 */ \
153 147 sllx arg, MMU_PAGESHIFT, arg; /* color to dcache page */ \
154 148 sub tmp2, tmp1, tmp2; \
155 149 2: \
156 150 stxa %g0, [arg + tmp2]ASI_DC_TAG; \
157 151 membar #Sync; \
158 152 cmp %g0, tmp2; \
159 153 bne,pt %icc, 2b; \
160 154 sub tmp2, tmp1, tmp2; \
161 155 1:
162 156
163 157 /*
164 158 * macro that flushes the entire dcache
165 159 */
166 160 #define DCACHE_FLUSHALL(size, linesize, tmp) \
167 161 ldxa [%g0]ASI_LSU, tmp; \
168 162 btst LSU_DC, tmp; /* is dcache enabled? */ \
169 163 bz,pn %icc, 1f; \
170 164 \
171 165 sub size, linesize, tmp; \
172 166 2: \
173 167 stxa %g0, [tmp]ASI_DC_TAG; \
174 168 membar #Sync; \
175 169 cmp %g0, tmp; \
176 170 bne,pt %icc, 2b; \
177 171 sub tmp, linesize, tmp; \
178 172 1:
179 173
180 174 /*
181 175 * macro that flushes the entire icache
182 176 */
183 177 #define ICACHE_FLUSHALL(size, linesize, tmp) \
184 178 ldxa [%g0]ASI_LSU, tmp; \
185 179 btst LSU_IC, tmp; \
186 180 bz,pn %icc, 1f; \
187 181 \
188 182 sub size, linesize, tmp; \
189 183 2: \
190 184 stxa %g0, [tmp]ASI_IC_TAG; \
191 185 membar #Sync; \
192 186 cmp %g0, tmp; \
193 187 bne,pt %icc, 2b; \
194 188 sub tmp, linesize, tmp; \
195 189 1:
196 190
197 191 #ifdef SF_ERRATA_32
198 192 #define SF_WORKAROUND(tmp1, tmp2) \
199 193 sethi %hi(FLUSH_ADDR), tmp2 ;\
200 194 set MMU_PCONTEXT, tmp1 ;\
201 195 stxa %g0, [tmp1]ASI_DMMU ;\
202 196 flush tmp2 ;
203 197 #else
204 198 #define SF_WORKAROUND(tmp1, tmp2)
205 199 #endif /* SF_ERRATA_32 */
206 200
207 201 /*
208 202 * arg1 = vaddr
209 203 * arg2 = ctxnum
210 204 * - disable interrupts and clear address mask
211 205 * to access 64 bit physaddr
212 206 * - Blow out the TLB, flush user page.
213 207 * . use secondary context.
214 208 */
215 209 #define VTAG_FLUSHUPAGE(lbl, arg1, arg2, tmp1, tmp2, tmp3, tmp4) \
216 210 rdpr %pstate, tmp1 ;\
217 211 andn tmp1, PSTATE_IE, tmp2 ;\
218 212 wrpr tmp2, 0, %pstate ;\
219 213 sethi %hi(FLUSH_ADDR), tmp2 ;\
220 214 set MMU_SCONTEXT, tmp3 ;\
221 215 ldxa [tmp3]ASI_DMMU, tmp4 ;\
222 216 or DEMAP_SECOND | DEMAP_PAGE_TYPE, arg1, arg1 ;\
223 217 cmp tmp4, arg2 ;\
224 218 be,a,pt %icc, lbl/**/4 ;\
225 219 nop ;\
226 220 stxa arg2, [tmp3]ASI_DMMU ;\
227 221 lbl/**/4: ;\
228 222 stxa %g0, [arg1]ASI_DTLB_DEMAP ;\
229 223 stxa %g0, [arg1]ASI_ITLB_DEMAP ;\
230 224 flush tmp2 ;\
231 225 be,a,pt %icc, lbl/**/5 ;\
232 226 nop ;\
233 227 stxa tmp4, [tmp3]ASI_DMMU ;\
234 228 flush tmp2 ;\
235 229 lbl/**/5: ;\
236 230 wrpr %g0, tmp1, %pstate
237 231
238 232
239 233 /*
240 234 * macro that flushes all the user entries in dtlb
241 235 * arg1 = dtlb entries
242 236 * - Before first compare:
243 237 * tmp4 = tte
244 238 * tmp5 = vaddr
245 239 * tmp6 = cntxnum
246 240 */
247 241 #define DTLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
248 242 tmp4, tmp5, tmp6) \
249 243 lbl/**/0: ;\
250 244 sllx arg1, 3, tmp3 ;\
251 245 SF_WORKAROUND(tmp1, tmp2) ;\
252 246 ldxa [tmp3]ASI_DTLB_ACCESS, tmp4 ;\
253 247 srlx tmp4, 6, tmp4 ;\
254 248 andcc tmp4, 1, %g0 ;\
255 249 bnz,pn %xcc, lbl/**/1 ;\
256 250 srlx tmp4, 57, tmp4 ;\
257 251 andcc tmp4, 1, %g0 ;\
258 252 beq,pn %xcc, lbl/**/1 ;\
259 253 nop ;\
260 254 set TAGREAD_CTX_MASK, tmp1 ;\
261 255 ldxa [tmp3]ASI_DTLB_TAGREAD, tmp2 ;\
262 256 and tmp2, tmp1, tmp6 ;\
263 257 andn tmp2, tmp1, tmp5 ;\
264 258 set KCONTEXT, tmp4 ;\
265 259 cmp tmp6, tmp4 ;\
266 260 be lbl/**/1 ;\
267 261 nop ;\
268 262 VTAG_FLUSHUPAGE(VD/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
269 263 lbl/**/1: ;\
270 264 brgz,pt arg1, lbl/**/0 ;\
271 265 sub arg1, 1, arg1
272 266
273 267
274 268 /*
275 269 * macro that flushes all the user entries in itlb
276 270 * arg1 = itlb entries
277 271 * - Before first compare:
278 272 * tmp4 = tte
279 273 * tmp5 = vaddr
280 274 * tmp6 = cntxnum
281 275 */
282 276 #define ITLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
283 277 tmp4, tmp5, tmp6) \
284 278 lbl/**/0: ;\
285 279 sllx arg1, 3, tmp3 ;\
286 280 SF_WORKAROUND(tmp1, tmp2) ;\
287 281 ldxa [tmp3]ASI_ITLB_ACCESS, tmp4 ;\
288 282 srlx tmp4, 6, tmp4 ;\
289 283 andcc tmp4, 1, %g0 ;\
290 284 bnz,pn %xcc, lbl/**/1 ;\
291 285 srlx tmp4, 57, tmp4 ;\
292 286 andcc tmp4, 1, %g0 ;\
293 287 beq,pn %xcc, lbl/**/1 ;\
294 288 nop ;\
295 289 set TAGREAD_CTX_MASK, tmp1 ;\
296 290 ldxa [tmp3]ASI_ITLB_TAGREAD, tmp2 ;\
297 291 and tmp2, tmp1, tmp6 ;\
298 292 andn tmp2, tmp1, tmp5 ;\
299 293 set KCONTEXT, tmp4 ;\
300 294 cmp tmp6, tmp4 ;\
301 295 be lbl/**/1 ;\
302 296 nop ;\
303 297 VTAG_FLUSHUPAGE(VI/**/lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
304 298 lbl/**/1: ;\
305 299 brgz,pt arg1, lbl/**/0 ;\
306 300 sub arg1, 1, arg1
307 301
308 302
309 303
310 304 /*
311 305 * Macro for getting to offset from 'cpu_private' ptr. The 'cpu_private'
312 306 * ptr is in the machcpu structure.
313 307 * r_or_s: Register or symbol off offset from 'cpu_private' ptr.
314 308 * scr1: Scratch, ptr is returned in this register.
315 309 * scr2: Scratch
316 310 */
317 311 #define GET_CPU_PRIVATE_PTR(r_or_s, scr1, scr2, label) \
318 312 CPU_ADDR(scr1, scr2); \
319 313 ldn [scr1 + CPU_PRIVATE], scr1; \
320 314 cmp scr1, 0; \
321 315 be label; \
322 316 nop; \
323 317 add scr1, r_or_s, scr1; \
324 318
325 319 #ifdef HUMMINGBIRD
326 320 /*
327 321 * UltraSPARC-IIe processor supports both 4-way set associative and
328 322 * direct map E$. For performance reasons, we flush E$ by placing it
329 323 * in direct map mode for data load/store and restore the state after
330 324 * we are done flushing it. Keep interrupts off while flushing in this
331 325 * manner.
332 326 *
333 327 * We flush the entire ecache by starting at one end and loading each
334 328 * successive ecache line for the 2*ecache-size range. We have to repeat
335 329 * the flush operation to guarantee that the entire ecache has been
336 330 * flushed.
337 331 *
338 332 * For flushing a specific physical address, we start at the aliased
339 333 * address and load at set-size stride, wrapping around at 2*ecache-size
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
340 334 * boundary and skipping the physical address being flushed. It takes
341 335 * 10 loads to guarantee that the physical address has been flushed.
342 336 */
343 337
344 338 #define HB_ECACHE_FLUSH_CNT 2
345 339 #define HB_PHYS_FLUSH_CNT 10 /* #loads to flush specific paddr */
346 340 #endif /* HUMMINGBIRD */
347 341
348 342 /* END CSTYLED */
349 343
350 -#endif /* !lint */
351 -
352 344 /*
353 345 * Spitfire MMU and Cache operations.
354 346 */
355 347
356 -#if defined(lint)
357 -
358 -/*ARGSUSED*/
359 -void
360 -vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
361 -{}
362 -
363 -/*ARGSUSED*/
364 -void
365 -vtag_flushall(void)
366 -{}
367 -
368 -/*ARGSUSED*/
369 -void
370 -vtag_flushall_uctxs(void)
371 -{}
372 -
373 -/*ARGSUSED*/
374 -void
375 -vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
376 -{}
377 -
378 -/*ARGSUSED*/
379 -void
380 -vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
381 -{}
382 -
383 -/*ARGSUSED*/
384 -void
385 -vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
386 -{}
387 -
388 -/*ARGSUSED*/
389 -void
390 -vac_flushpage(pfn_t pfnum, int vcolor)
391 -{}
392 -
393 -/*ARGSUSED*/
394 -void
395 -vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
396 -{}
397 -
398 -/*ARGSUSED*/
399 -void
400 -init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
401 -{}
402 -
403 -/*ARGSUSED*/
404 -void
405 -init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
406 -{}
407 -
408 -/*ARGSUSED*/
409 -void
410 -flush_instr_mem(caddr_t vaddr, size_t len)
411 -{}
412 -
413 -/*ARGSUSED*/
414 -void
415 -flush_ecache(uint64_t physaddr, size_t size, size_t linesize)
416 -{}
417 -
418 -/*ARGSUSED*/
419 -void
420 -get_ecache_dtag(uint32_t ecache_idx, uint64_t *ecache_data,
421 - uint64_t *ecache_tag, uint64_t *oafsr, uint64_t *acc_afsr)
422 -{}
423 -
424 -/* ARGSUSED */
425 -uint64_t
426 -get_ecache_tag(uint32_t id, uint64_t *nafsr, uint64_t *acc_afsr)
427 -{
428 - return ((uint64_t)0);
429 -}
430 -
431 -/* ARGSUSED */
432 -uint64_t
433 -check_ecache_line(uint32_t id, uint64_t *acc_afsr)
434 -{
435 - return ((uint64_t)0);
436 -}
437 -
438 -/*ARGSUSED*/
439 -void
440 -kdi_flush_idcache(int dcache_size, int dcache_lsize,
441 - int icache_size, int icache_lsize)
442 -{}
443 -
444 -#else /* lint */
445 -
446 348 ENTRY_NP(vtag_flushpage)
447 349 /*
448 350 * flush page from the tlb
449 351 *
450 352 * %o0 = vaddr
451 353 * %o1 = sfmmup
452 354 */
453 355 rdpr %pstate, %o5
454 356 #ifdef DEBUG
455 357 PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
456 358 #endif /* DEBUG */
457 359 /*
458 360 * disable ints
459 361 */
460 362 andn %o5, PSTATE_IE, %o4
461 363 wrpr %o4, 0, %pstate
462 364
463 365 /*
464 366 * Then, blow out the tlb
465 367 * Interrupts are disabled to prevent the secondary ctx register
466 368 * from changing underneath us.
467 369 */
468 370 sethi %hi(ksfmmup), %o3
469 371 ldx [%o3 + %lo(ksfmmup)], %o3
470 372 cmp %o3, %o1
471 373 bne,pt %xcc, 1f ! if not kernel as, go to 1
472 374 sethi %hi(FLUSH_ADDR), %o3
473 375 /*
474 376 * For KCONTEXT demaps use primary. type = page implicitly
475 377 */
476 378 stxa %g0, [%o0]ASI_DTLB_DEMAP /* dmmu flush for KCONTEXT */
477 379 stxa %g0, [%o0]ASI_ITLB_DEMAP /* immu flush for KCONTEXT */
478 380 flush %o3
479 381 b 5f
480 382 nop
481 383 1:
482 384 /*
483 385 * User demap. We need to set the secondary context properly.
484 386 * %o0 = vaddr
485 387 * %o1 = sfmmup
486 388 * %o3 = FLUSH_ADDR
487 389 */
488 390 SFMMU_CPU_CNUM(%o1, %g1, %g2) /* %g1 = sfmmu cnum on this CPU */
489 391
490 392 set MMU_SCONTEXT, %o4
491 393 ldxa [%o4]ASI_DMMU, %o2 /* rd old ctxnum */
492 394 or DEMAP_SECOND | DEMAP_PAGE_TYPE, %o0, %o0
493 395 cmp %o2, %g1
494 396 be,pt %icc, 4f
495 397 nop
496 398 stxa %g1, [%o4]ASI_DMMU /* wr new ctxum */
497 399 4:
498 400 stxa %g0, [%o0]ASI_DTLB_DEMAP
499 401 stxa %g0, [%o0]ASI_ITLB_DEMAP
500 402 flush %o3
501 403 be,pt %icc, 5f
502 404 nop
503 405 stxa %o2, [%o4]ASI_DMMU /* restore old ctxnum */
504 406 flush %o3
505 407 5:
506 408 retl
507 409 wrpr %g0, %o5, %pstate /* enable interrupts */
508 410 SET_SIZE(vtag_flushpage)
509 411
510 412 .seg ".text"
511 413 .flushallmsg:
512 414 .asciz "sfmmu_asm: unimplemented flush operation"
513 415
514 416 ENTRY_NP(vtag_flushall)
515 417 sethi %hi(.flushallmsg), %o0
516 418 call panic
517 419 or %o0, %lo(.flushallmsg), %o0
518 420 SET_SIZE(vtag_flushall)
519 421
520 422 ENTRY_NP(vtag_flushall_uctxs)
521 423 /*
522 424 * flush entire DTLB/ITLB.
523 425 */
524 426 CPU_INDEX(%g1, %g2)
525 427 mulx %g1, CPU_NODE_SIZE, %g1
526 428 set cpunodes, %g2
527 429 add %g1, %g2, %g1
528 430 lduh [%g1 + ITLB_SIZE], %g2 ! %g2 = # entries in ITLB
529 431 lduh [%g1 + DTLB_SIZE], %g1 ! %g1 = # entries in DTLB
530 432 sub %g2, 1, %g2 ! %g2 = # entries in ITLB - 1
531 433 sub %g1, 1, %g1 ! %g1 = # entries in DTLB - 1
532 434
533 435 !
534 436 ! Flush itlb's
535 437 !
536 438 ITLB_FLUSH_UNLOCKED_UCTXS(I, %g2, %g3, %g4, %o2, %o3, %o4, %o5)
537 439
538 440 !
539 441 ! Flush dtlb's
540 442 !
541 443 DTLB_FLUSH_UNLOCKED_UCTXS(D, %g1, %g3, %g4, %o2, %o3, %o4, %o5)
542 444
543 445 membar #Sync
544 446 retl
545 447 nop
546 448
547 449 SET_SIZE(vtag_flushall_uctxs)
548 450
549 451 ENTRY_NP(vtag_flushpage_tl1)
550 452 /*
551 453 * x-trap to flush page from tlb and tsb
552 454 *
553 455 * %g1 = vaddr, zero-extended on 32-bit kernel
554 456 * %g2 = sfmmup
555 457 *
556 458 * assumes TSBE_TAG = 0
557 459 */
558 460 srln %g1, MMU_PAGESHIFT, %g1
559 461 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
560 462
561 463 SFMMU_CPU_CNUM(%g2, %g3, %g4) /* %g3 = sfmmu cnum on this CPU */
562 464
563 465 /* We need to set the secondary context properly. */
564 466 set MMU_SCONTEXT, %g4
565 467 ldxa [%g4]ASI_DMMU, %g5 /* rd old ctxnum */
566 468 or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
567 469 stxa %g3, [%g4]ASI_DMMU /* wr new ctxum */
568 470 stxa %g0, [%g1]ASI_DTLB_DEMAP
569 471 stxa %g0, [%g1]ASI_ITLB_DEMAP
570 472 stxa %g5, [%g4]ASI_DMMU /* restore old ctxnum */
571 473 membar #Sync
572 474 retry
573 475 SET_SIZE(vtag_flushpage_tl1)
574 476
575 477 ENTRY_NP(vtag_flush_pgcnt_tl1)
576 478 /*
577 479 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
578 480 *
579 481 * %g1 = vaddr, zero-extended on 32-bit kernel
580 482 * %g2 = <sfmmup58 | pgcnt6>
581 483 *
582 484 * NOTE: this handler relies on the fact that no
583 485 * interrupts or traps can occur during the loop
584 486 * issuing the TLB_DEMAP operations. It is assumed
585 487 * that interrupts are disabled and this code is
586 488 * fetching from the kernel locked text address.
587 489 *
588 490 * assumes TSBE_TAG = 0
589 491 */
590 492 srln %g1, MMU_PAGESHIFT, %g1
591 493 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
592 494 or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
593 495
594 496 set SFMMU_PGCNT_MASK, %g4
595 497 and %g4, %g2, %g3 /* g3 = pgcnt - 1 */
596 498 add %g3, 1, %g3 /* g3 = pgcnt */
597 499
598 500 andn %g2, SFMMU_PGCNT_MASK, %g2 /* g2 = sfmmup */
599 501
600 502 SFMMU_CPU_CNUM(%g2, %g5, %g6) ! %g5 = sfmmu cnum on this CPU
601 503
602 504 /* We need to set the secondary context properly. */
603 505 set MMU_SCONTEXT, %g4
604 506 ldxa [%g4]ASI_DMMU, %g6 /* read old ctxnum */
605 507 stxa %g5, [%g4]ASI_DMMU /* write new ctxum */
606 508
607 509 set MMU_PAGESIZE, %g2 /* g2 = pgsize */
608 510 sethi %hi(FLUSH_ADDR), %g5
609 511 1:
610 512 stxa %g0, [%g1]ASI_DTLB_DEMAP
611 513 stxa %g0, [%g1]ASI_ITLB_DEMAP
612 514 flush %g5
613 515 deccc %g3 /* decr pgcnt */
614 516 bnz,pt %icc,1b
615 517 add %g1, %g2, %g1 /* go to nextpage */
616 518
617 519 stxa %g6, [%g4]ASI_DMMU /* restore old ctxnum */
618 520 membar #Sync
619 521 retry
620 522 SET_SIZE(vtag_flush_pgcnt_tl1)
621 523
622 524 ! Not implemented on US1/US2
623 525 ENTRY_NP(vtag_flushall_tl1)
624 526 retry
625 527 SET_SIZE(vtag_flushall_tl1)
626 528
627 529 /*
628 530 * vac_flushpage(pfnum, color)
629 531 * Flush 1 8k page of the D-$ with physical page = pfnum
630 532 * Algorithm:
631 533 * The spitfire dcache is a 16k direct mapped virtual indexed,
632 534 * physically tagged cache. Given the pfnum we read all cache
633 535 * lines for the corresponding page in the cache (determined by
634 536 * the color). Each cache line is compared with
635 537 * the tag created from the pfnum. If the tags match we flush
636 538 * the line.
637 539 */
638 540 .seg ".data"
639 541 .align 8
640 542 .global dflush_type
641 543 dflush_type:
642 544 .word FLUSHPAGE_TYPE
643 545 .seg ".text"
644 546
645 547 ENTRY(vac_flushpage)
646 548 /*
647 549 * flush page from the d$
648 550 *
649 551 * %o0 = pfnum, %o1 = color
650 552 */
651 553 DCACHE_FLUSHPAGE(%o0, %o1, %o2, %o3, %o4)
652 554 retl
653 555 nop
654 556 SET_SIZE(vac_flushpage)
655 557
656 558 ENTRY_NP(vac_flushpage_tl1)
657 559 /*
658 560 * x-trap to flush page from the d$
659 561 *
660 562 * %g1 = pfnum, %g2 = color
661 563 */
662 564 DCACHE_FLUSHPAGE(%g1, %g2, %g3, %g4, %g5)
663 565 retry
664 566 SET_SIZE(vac_flushpage_tl1)
665 567
666 568 ENTRY(vac_flushcolor)
667 569 /*
668 570 * %o0 = vcolor
669 571 */
670 572 DCACHE_FLUSHCOLOR(%o0, %o1, %o2)
671 573 retl
672 574 nop
673 575 SET_SIZE(vac_flushcolor)
674 576
675 577 ENTRY(vac_flushcolor_tl1)
676 578 /*
677 579 * %g1 = vcolor
678 580 */
679 581 DCACHE_FLUSHCOLOR(%g1, %g2, %g3)
680 582 retry
681 583 SET_SIZE(vac_flushcolor_tl1)
682 584
683 585
684 586 .global _dispatch_status_busy
685 587 _dispatch_status_busy:
686 588 .asciz "ASI_INTR_DISPATCH_STATUS error: busy"
687 589 .align 4
688 590
689 591 /*
690 592 * Determine whether or not the IDSR is busy.
691 593 * Entry: no arguments
692 594 * Returns: 1 if busy, 0 otherwise
693 595 */
694 596 ENTRY(idsr_busy)
695 597 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
696 598 clr %o0
697 599 btst IDSR_BUSY, %g1
698 600 bz,a,pt %xcc, 1f
699 601 mov 1, %o0
700 602 1:
701 603 retl
702 604 nop
703 605 SET_SIZE(idsr_busy)
704 606
705 607 /*
706 608 * Setup interrupt dispatch data registers
707 609 * Entry:
708 610 * %o0 - function or inumber to call
709 611 * %o1, %o2 - arguments (2 uint64_t's)
710 612 */
711 613 .seg "text"
712 614
713 615 ENTRY(init_mondo)
714 616 #ifdef DEBUG
715 617 !
716 618 ! IDSR should not be busy at the moment
717 619 !
718 620 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
719 621 btst IDSR_BUSY, %g1
720 622 bz,pt %xcc, 1f
721 623 nop
722 624
723 625 sethi %hi(_dispatch_status_busy), %o0
724 626 call panic
725 627 or %o0, %lo(_dispatch_status_busy), %o0
726 628 #endif /* DEBUG */
727 629
728 630 ALTENTRY(init_mondo_nocheck)
729 631 !
730 632 ! interrupt vector dispach data reg 0
731 633 !
732 634 1:
733 635 mov IDDR_0, %g1
734 636 mov IDDR_1, %g2
735 637 mov IDDR_2, %g3
736 638 stxa %o0, [%g1]ASI_INTR_DISPATCH
737 639
738 640 !
739 641 ! interrupt vector dispach data reg 1
740 642 !
741 643 stxa %o1, [%g2]ASI_INTR_DISPATCH
742 644
743 645 !
744 646 ! interrupt vector dispach data reg 2
745 647 !
746 648 stxa %o2, [%g3]ASI_INTR_DISPATCH
747 649
748 650 retl
749 651 membar #Sync ! allowed to be in the delay slot
750 652 SET_SIZE(init_mondo)
751 653
752 654 /*
753 655 * Ship mondo to upaid
754 656 */
755 657 ENTRY_NP(shipit)
756 658 sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<18:14> = upa id
757 659 or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70
758 660 stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch
759 661 #if defined(SF_ERRATA_54)
760 662 membar #Sync ! store must occur before load
761 663 mov 0x20, %g3 ! UDBH Control Register Read
762 664 ldxa [%g3]ASI_SDB_INTR_R, %g0
763 665 #endif
764 666 retl
765 667 membar #Sync
766 668 SET_SIZE(shipit)
767 669
768 670
769 671 /*
770 672 * flush_instr_mem:
771 673 * Flush a portion of the I-$ starting at vaddr
772 674 * %o0 vaddr
773 675 * %o1 bytes to be flushed
774 676 */
775 677
776 678 ENTRY(flush_instr_mem)
777 679 membar #StoreStore ! Ensure the stores
778 680 ! are globally visible
779 681 1:
780 682 flush %o0
781 683 subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20
782 684 bgu,pt %ncc, 1b
783 685 add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20
784 686
785 687 retl
786 688 nop
787 689 SET_SIZE(flush_instr_mem)
788 690
789 691 /*
790 692 * flush_ecache:
791 693 * Flush the entire e$ using displacement flush by reading through a
792 694 * physically contiguous area. We use mmu bypass asi (ASI_MEM) while
793 695 * reading this physical address range so that data doesn't go to d$.
794 696 * incoming arguments:
795 697 * %o0 - 64 bit physical address
796 698 * %o1 - size of address range to read
797 699 * %o2 - ecache linesize
798 700 */
799 701 ENTRY(flush_ecache)
800 702 #ifndef HUMMINGBIRD
801 703 b 2f
802 704 nop
803 705 1:
804 706 ldxa [%o0 + %o1]ASI_MEM, %g0 ! start reading from physaddr + size
805 707 2:
806 708 subcc %o1, %o2, %o1
807 709 bcc,a,pt %ncc, 1b
808 710 nop
809 711
810 712 #else /* HUMMINGBIRD */
811 713 /*
812 714 * UltraSPARC-IIe processor supports both 4-way set associative
813 715 * and direct map E$. For performance reasons, we flush E$ by
814 716 * placing it in direct map mode for data load/store and restore
815 717 * the state after we are done flushing it. It takes 2 iterations
816 718 * to guarantee that the entire ecache has been flushed.
817 719 *
818 720 * Keep the interrupts disabled while flushing E$ in this manner.
819 721 */
820 722 rdpr %pstate, %g4 ! current pstate (restored later)
821 723 andn %g4, PSTATE_IE, %g5
822 724 wrpr %g0, %g5, %pstate ! disable interrupts
823 725
824 726 ! Place E$ in direct map mode for data access
825 727 or %g0, 1, %g5
826 728 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
827 729 ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
828 730 or %g1, %g5, %g5
829 731 membar #Sync
830 732 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
831 733 membar #Sync
832 734
833 735 ! flush entire ecache HB_ECACHE_FLUSH_CNT times
834 736 mov HB_ECACHE_FLUSH_CNT-1, %g5
835 737 2:
836 738 sub %o1, %o2, %g3 ! start from last entry
837 739 1:
838 740 ldxa [%o0 + %g3]ASI_MEM, %g0 ! start reading from physaddr + size
839 741 subcc %g3, %o2, %g3
840 742 bgeu,a,pt %ncc, 1b
841 743 nop
842 744 brgz,a,pt %g5, 2b
843 745 dec %g5
844 746
845 747 membar #Sync
846 748 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config reg
847 749 membar #Sync
848 750 wrpr %g0, %g4, %pstate ! restore earlier pstate
849 751 #endif /* HUMMINGBIRD */
850 752
851 753 retl
852 754 nop
853 755 SET_SIZE(flush_ecache)
854 756
855 757 /*
856 758 * void kdi_flush_idcache(int dcache_size, int dcache_linesize,
857 759 * int icache_size, int icache_linesize)
858 760 */
859 761 ENTRY(kdi_flush_idcache)
860 762 DCACHE_FLUSHALL(%o0, %o1, %g1)
861 763 ICACHE_FLUSHALL(%o2, %o3, %g1)
862 764 membar #Sync
863 765 retl
864 766 nop
865 767 SET_SIZE(kdi_flush_idcache)
866 768
867 769
868 770 /*
869 771 * void get_ecache_dtag(uint32_t ecache_idx, uint64_t *data, uint64_t *tag,
870 772 * uint64_t *oafsr, uint64_t *acc_afsr)
871 773 *
872 774 * Get ecache data and tag. The ecache_idx argument is assumed to be aligned
873 775 * on a 64-byte boundary. The corresponding AFSR value is also read for each
874 776 * 8 byte ecache data obtained. The ecache data is assumed to be a pointer
875 777 * to an array of 16 uint64_t's (e$data & afsr value). The action to read the
876 778 * data and tag should be atomic to make sense. We will be executing at PIL15
877 779 * and will disable IE, so nothing can occur between the two reads. We also
878 780 * assume that the execution of this code does not interfere with what we are
879 781 * reading - not really possible, but we'll live with it for now.
880 782 * We also pass the old AFSR value before clearing it, and caller will take
881 783 * appropriate actions if the important bits are non-zero.
882 784 *
883 785 * If the caller wishes to track the AFSR in cases where the CP bit is
884 786 * set, an address should be passed in for acc_afsr. Otherwise, this
885 787 * argument may be null.
886 788 *
887 789 * Register Usage:
888 790 * i0: In: 32-bit e$ index
889 791 * i1: In: addr of e$ data
890 792 * i2: In: addr of e$ tag
891 793 * i3: In: addr of old afsr
892 794 * i4: In: addr of accumulated afsr - may be null
893 795 */
894 796 ENTRY(get_ecache_dtag)
895 797 save %sp, -SA(MINFRAME), %sp
896 798 or %g0, 1, %l4
897 799 sllx %l4, 39, %l4 ! set bit 39 for e$ data access
898 800 or %i0, %l4, %g6 ! %g6 = e$ addr for data read
899 801 sllx %l4, 1, %l4 ! set bit 40 for e$ tag access
900 802 or %i0, %l4, %l4 ! %l4 = e$ addr for tag read
901 803
902 804 rdpr %pstate, %i5
903 805 andn %i5, PSTATE_IE | PSTATE_AM, %i0
904 806 wrpr %i0, %g0, %pstate ! clear IE, AM bits
905 807
906 808 ldxa [%g0]ASI_ESTATE_ERR, %g1
907 809 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
908 810 membar #Sync
909 811
910 812 ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before tag read
911 813 stx %i0, [%i3] ! write back the old-afsr
912 814
913 815 ldxa [%l4]ASI_EC_R, %g0 ! read tag into E$ tag reg
914 816 ldxa [%g0]ASI_EC_DIAG, %i0 ! read tag from E$ tag reg
915 817 stx %i0, [%i2] ! write back tag result
916 818
917 819 clr %i2 ! loop count
918 820
919 821 brz %i4, 1f ! acc_afsr == NULL?
920 822 ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before clearing
921 823 srlx %i0, P_AFSR_CP_SHIFT, %l0
922 824 btst 1, %l0
923 825 bz 1f
924 826 nop
925 827 ldx [%i4], %g4
926 828 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
927 829 stx %g4, [%i4]
928 830 1:
929 831 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
930 832 membar #Sync
931 833 ldxa [%g6]ASI_EC_R, %i0 ! read the 8byte E$data
932 834 stx %i0, [%i1] ! save the E$data
933 835 add %g6, 8, %g6
934 836 add %i1, 8, %i1
935 837 ldxa [%g0]ASI_AFSR, %i0 ! read AFSR for this 16byte read
936 838 srlx %i0, P_AFSR_CP_SHIFT, %l0
937 839 btst 1, %l0
938 840 bz 2f
939 841 stx %i0, [%i1] ! save the AFSR
940 842
941 843 brz %i4, 2f ! acc_afsr == NULL?
942 844 nop
943 845 ldx [%i4], %g4
944 846 or %g4, %i0, %g4 ! aggregate AFSR in cpu private
945 847 stx %g4, [%i4]
946 848 2:
947 849 add %i2, 8, %i2
948 850 cmp %i2, 64
↓ open down ↓ |
493 lines elided |
↑ open up ↑ |
949 851 bl,a 1b
950 852 add %i1, 8, %i1
951 853 stxa %i0, [%g0]ASI_AFSR ! clear AFSR
952 854 membar #Sync
953 855 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
954 856 membar #Sync
955 857 wrpr %g0, %i5, %pstate
956 858 ret
957 859 restore
958 860 SET_SIZE(get_ecache_dtag)
959 -#endif /* lint */
960 861
961 -#if defined(lint)
962 -/*
963 - * The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0.
964 - * Steps: 1. GET AFSR 2. Get AFAR <40:4> 3. Get datapath error status
965 - * 4. Clear datapath error bit(s) 5. Clear AFSR error bit
966 - * 6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap
967 - * %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1
968 - * %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
969 - */
970 -void
971 -ce_err(void)
972 -{}
973 -
974 -void
975 -ce_err_tl1(void)
976 -{}
977 -
978 -
979 -/*
980 - * The async_err function handles trap types 0x0A (instruction_access_error)
981 - * and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here,
982 - * %g5 will have the trap type (with 0x200 set if we're at TL > 0).
983 - *
984 - * Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
985 - * 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
986 - * 6. package data in %g2 and %g3 7. disable all cpu errors, because
987 - * trap is likely to be fatal 8. call cpu_async_error vis sys_trap
988 - *
989 - * %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
990 - * %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
991 - */
992 -void
993 -async_err(void)
994 -{}
995 -
996 -/*
997 - * The clr_datapath function clears any error bits set in the UDB regs.
998 - */
999 -void
1000 -clr_datapath(void)
1001 -{}
1002 -
1003 -/*
1004 - * The get_udb_errors() function gets the current value of the
1005 - * Datapath Error Registers.
1006 - */
1007 -/*ARGSUSED*/
1008 -void
1009 -get_udb_errors(uint64_t *udbh, uint64_t *udbl)
1010 -{
1011 - *udbh = 0;
1012 - *udbl = 0;
1013 -}
1014 -
1015 -#else /* lint */
1016 -
1017 862 ENTRY_NP(ce_err)
1018 863 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
1019 864
1020 865 !
1021 866 ! Check for a UE... From Kevin.Normoyle:
1022 867 ! We try to switch to the trap for the UE, but since that's
1023 868 ! a hardware pipeline, we might get to the CE trap before we
1024 869 ! can switch. The UDB and AFSR registers will have both the
1025 870 ! UE and CE bits set but the UDB syndrome and the AFAR will be
1026 871 ! for the UE.
1027 872 !
1028 873 or %g0, 1, %g1 ! put 1 in g1
1029 874 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
1030 875 andcc %g1, %g3, %g0 ! check for UE in afsr
1031 876 bnz async_err ! handle the UE, not the CE
1032 877 or %g0, 0x63, %g5 ! pass along the CE ttype
1033 878 !
1034 879 ! Disable further CE traps to avoid recursion (stack overflow)
1035 880 ! and staying above XCALL_PIL for extended periods.
1036 881 !
1037 882 ldxa [%g0]ASI_ESTATE_ERR, %g2
1038 883 andn %g2, 0x1, %g2 ! clear bit 0 - CEEN
1039 884 stxa %g2, [%g0]ASI_ESTATE_ERR
1040 885 membar #Sync ! required
1041 886 !
1042 887 ! handle the CE
1043 888 ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
1044 889
1045 890 set P_DER_H, %g4 ! put P_DER_H in g4
1046 891 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
1047 892 or %g0, 1, %g6 ! put 1 in g6
1048 893 sllx %g6, 8, %g6 ! shift g6 to <8> sdb CE
1049 894 andcc %g5, %g6, %g1 ! check for CE in upper half
1050 895 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1051 896 or %g3, %g5, %g3 ! or with afsr bits
1052 897 bz,a 1f ! no error, goto 1f
1053 898 nop
1054 899 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
1055 900 membar #Sync ! membar sync required
1056 901 1:
1057 902 set P_DER_L, %g4 ! put P_DER_L in g4
1058 903 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g6
1059 904 andcc %g5, %g6, %g1 ! check for CE in lower half
1060 905 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
1061 906 or %g3, %g5, %g3 ! or with afsr bits
1062 907 bz,a 2f ! no error, goto 2f
1063 908 nop
1064 909 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
1065 910 membar #Sync ! membar sync required
1066 911 2:
1067 912 or %g0, 1, %g4 ! put 1 in g4
1068 913 sllx %g4, 20, %g4 ! shift left to <20> afsr CE
1069 914 stxa %g4, [%g0]ASI_AFSR ! use g4 to clear afsr CE error
1070 915 membar #Sync ! membar sync required
1071 916
1072 917 set cpu_ce_error, %g1 ! put *cpu_ce_error() in g1
1073 918 rdpr %pil, %g6 ! read pil into %g6
1074 919 subcc %g6, PIL_15, %g0
1075 920 movneg %icc, PIL_14, %g4 ! run at pil 14 unless already at 15
1076 921 sethi %hi(sys_trap), %g5
1077 922 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1078 923 movge %icc, PIL_15, %g4 ! already at pil 15
1079 924 SET_SIZE(ce_err)
1080 925
1081 926 ENTRY_NP(ce_err_tl1)
1082 927 #ifndef TRAPTRACE
1083 928 ldxa [%g0]ASI_AFSR, %g7
1084 929 stxa %g7, [%g0]ASI_AFSR
1085 930 membar #Sync
1086 931 retry
1087 932 #else
1088 933 set ce_trap_tl1, %g1
1089 934 sethi %hi(dis_err_panic1), %g4
1090 935 jmp %g4 + %lo(dis_err_panic1)
1091 936 nop
1092 937 #endif
1093 938 SET_SIZE(ce_err_tl1)
1094 939
1095 940 #ifdef TRAPTRACE
1096 941 .celevel1msg:
1097 942 .asciz "Softerror with trap tracing at tl1: AFAR 0x%08x.%08x AFSR 0x%08x.%08x";
1098 943
1099 944 ENTRY_NP(ce_trap_tl1)
1100 945 ! upper 32 bits of AFSR already in o3
1101 946 mov %o4, %o0 ! save AFAR upper 32 bits
1102 947 mov %o2, %o4 ! lower 32 bits of AFSR
1103 948 mov %o1, %o2 ! lower 32 bits of AFAR
1104 949 mov %o0, %o1 ! upper 32 bits of AFAR
1105 950 set .celevel1msg, %o0
1106 951 call panic
1107 952 nop
1108 953 SET_SIZE(ce_trap_tl1)
1109 954 #endif
1110 955
1111 956 !
1112 957 ! async_err is the assembly glue code to get us from the actual trap
1113 958 ! into the CPU module's C error handler. Note that we also branch
1114 959 ! here from ce_err() above.
1115 960 !
1116 961 ENTRY_NP(async_err)
1117 962 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable ecc and other cpu errors
1118 963 membar #Sync ! membar sync required
1119 964
1120 965 ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
1121 966 ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
1122 967
1123 968 sllx %g5, 53, %g5 ! move ttype to <63:53>
1124 969 or %g3, %g5, %g3 ! or to afsr in g3
1125 970
1126 971 or %g0, 1, %g1 ! put 1 in g1
1127 972 sllx %g1, 21, %g1 ! shift left to <21> afsr UE
1128 973 andcc %g1, %g3, %g0 ! check for UE in afsr
1129 974 bz,a,pn %icc, 2f ! if !UE skip sdb read/clear
1130 975 nop
1131 976
1132 977 set P_DER_H, %g4 ! put P_DER_H in g4
1133 978 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into 56
1134 979 or %g0, 1, %g6 ! put 1 in g6
1135 980 sllx %g6, 9, %g6 ! shift g6 to <9> sdb UE
1136 981 andcc %g5, %g6, %g1 ! check for UE in upper half
1137 982 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1138 983 or %g3, %g5, %g3 ! or with afsr bits
1139 984 bz,a 1f ! no error, goto 1f
1140 985 nop
1141 986 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit
1142 987 membar #Sync ! membar sync required
1143 988 1:
1144 989 set P_DER_L, %g4 ! put P_DER_L in g4
1145 990 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
1146 991 andcc %g5, %g6, %g1 ! check for UE in lower half
1147 992 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
1148 993 or %g3, %g5, %g3 ! or with afsr bits
1149 994 bz,a 2f ! no error, goto 2f
1150 995 nop
1151 996 stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit
1152 997 membar #Sync ! membar sync required
1153 998 2:
1154 999 stxa %g3, [%g0]ASI_AFSR ! clear all the sticky bits
1155 1000 membar #Sync ! membar sync required
1156 1001
1157 1002 RESET_USER_RTT_REGS(%g4, %g5, async_err_resetskip)
1158 1003 async_err_resetskip:
1159 1004
1160 1005 set cpu_async_error, %g1 ! put cpu_async_error in g1
1161 1006 sethi %hi(sys_trap), %g5
1162 1007 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1163 1008 or %g0, PIL_15, %g4 ! run at pil 15
1164 1009 SET_SIZE(async_err)
1165 1010
1166 1011 ENTRY_NP(dis_err_panic1)
1167 1012 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable all error traps
1168 1013 membar #Sync
1169 1014 ! save destination routine is in g1
1170 1015 ldxa [%g0]ASI_AFAR, %g2 ! read afar
1171 1016 ldxa [%g0]ASI_AFSR, %g3 ! read afsr
1172 1017 set P_DER_H, %g4 ! put P_DER_H in g4
1173 1018 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
1174 1019 sllx %g5, 33, %g5 ! shift upper bits to <42:33>
1175 1020 or %g3, %g5, %g3 ! or with afsr bits
1176 1021 set P_DER_L, %g4 ! put P_DER_L in g4
1177 1022 ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
1178 1023 sllx %g5, 43, %g5 ! shift upper bits to <52:43>
1179 1024 or %g3, %g5, %g3 ! or with afsr bits
1180 1025
1181 1026 RESET_USER_RTT_REGS(%g4, %g5, dis_err_panic1_resetskip)
1182 1027 dis_err_panic1_resetskip:
1183 1028
1184 1029 sethi %hi(sys_trap), %g5
1185 1030 jmp %g5 + %lo(sys_trap) ! goto sys_trap
1186 1031 sub %g0, 1, %g4
1187 1032 SET_SIZE(dis_err_panic1)
1188 1033
1189 1034 ENTRY(clr_datapath)
1190 1035 set P_DER_H, %o4 ! put P_DER_H in o4
1191 1036 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb upper half into o3
1192 1037 or %g0, 0x3, %o2 ! put 0x3 in o2
1193 1038 sllx %o2, 8, %o2 ! shift o2 to <9:8> sdb
1194 1039 andcc %o5, %o2, %o1 ! check for UE,CE in upper half
1195 1040 bz,a 1f ! no error, goto 1f
1196 1041 nop
1197 1042 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1198 1043 membar #Sync ! membar sync required
1199 1044 1:
1200 1045 set P_DER_L, %o4 ! put P_DER_L in o4
1201 1046 ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb lower half into o5
1202 1047 andcc %o5, %o2, %o1 ! check for UE,CE in lower half
1203 1048 bz,a 2f ! no error, goto 2f
1204 1049 nop
1205 1050 stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
1206 1051 membar #Sync
1207 1052 2:
1208 1053 retl
1209 1054 nop
1210 1055 SET_SIZE(clr_datapath)
1211 1056
↓ open down ↓ |
185 lines elided |
↑ open up ↑ |
1212 1057 ENTRY(get_udb_errors)
1213 1058 set P_DER_H, %o3
1214 1059 ldxa [%o3]ASI_SDB_INTR_R, %o2
1215 1060 stx %o2, [%o0]
1216 1061 set P_DER_L, %o3
1217 1062 ldxa [%o3]ASI_SDB_INTR_R, %o2
1218 1063 retl
1219 1064 stx %o2, [%o1]
1220 1065 SET_SIZE(get_udb_errors)
1221 1066
1222 -#endif /* lint */
1223 -
1224 -#if defined(lint)
1225 1067 /*
1226 - * The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the
1227 - * tte, the virtual address, and the ctxnum of the specified tlb entry. They
1228 - * should only be used in places where you have no choice but to look at the
1229 - * tlb itself.
1230 - *
1231 - * Note: These two routines are required by the Estar "cpr" loadable module.
1232 - */
1233 -/*ARGSUSED*/
1234 -void
1235 -itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1236 -{}
1237 -
1238 -/*ARGSUSED*/
1239 -void
1240 -dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
1241 -{}
1242 -#else /* lint */
1243 -/*
1244 1068 * NB - In Spitfire cpus, when reading a tte from the hardware, we
1245 1069 * need to clear [42-41] because the general definitions in pte.h
1246 1070 * define the PA to be [42-13] whereas Spitfire really uses [40-13].
1247 1071 * When cloning these routines for other cpus the "andn" below is not
1248 1072 * necessary.
1249 1073 */
1250 1074 ENTRY_NP(itlb_rd_entry)
1251 1075 sllx %o0, 3, %o0
1252 1076 #if defined(SF_ERRATA_32)
1253 1077 sethi %hi(FLUSH_ADDR), %g2
1254 1078 set MMU_PCONTEXT, %g1
1255 1079 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1256 1080 flush %g2
1257 1081 #endif
1258 1082 ldxa [%o0]ASI_ITLB_ACCESS, %g1
1259 1083 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1260 1084 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1261 1085 andn %g1, %g2, %g1 ! for details
1262 1086 stx %g1, [%o1]
1263 1087 ldxa [%o0]ASI_ITLB_TAGREAD, %g2
1264 1088 set TAGREAD_CTX_MASK, %o4
1265 1089 andn %g2, %o4, %o5
1266 1090 retl
1267 1091 stx %o5, [%o2]
1268 1092 SET_SIZE(itlb_rd_entry)
1269 1093
1270 1094 ENTRY_NP(dtlb_rd_entry)
1271 1095 sllx %o0, 3, %o0
1272 1096 #if defined(SF_ERRATA_32)
1273 1097 sethi %hi(FLUSH_ADDR), %g2
1274 1098 set MMU_PCONTEXT, %g1
1275 1099 stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
1276 1100 flush %g2
1277 1101 #endif
1278 1102 ldxa [%o0]ASI_DTLB_ACCESS, %g1
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
1279 1103 set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
1280 1104 sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
1281 1105 andn %g1, %g2, %g1 ! itlb_rd_entry
1282 1106 stx %g1, [%o1]
1283 1107 ldxa [%o0]ASI_DTLB_TAGREAD, %g2
1284 1108 set TAGREAD_CTX_MASK, %o4
1285 1109 andn %g2, %o4, %o5
1286 1110 retl
1287 1111 stx %o5, [%o2]
1288 1112 SET_SIZE(dtlb_rd_entry)
1289 -#endif /* lint */
1290 1113
1291 -#if defined(lint)
1292 -
1293 -/*
1294 - * routines to get and set the LSU register
1295 - */
1296 -uint64_t
1297 -get_lsu(void)
1298 -{
1299 - return ((uint64_t)0);
1300 -}
1301 -
1302 -/*ARGSUSED*/
1303 -void
1304 -set_lsu(uint64_t lsu)
1305 -{}
1306 -
1307 -#else /* lint */
1308 -
1309 1114 ENTRY(set_lsu)
1310 1115 stxa %o0, [%g0]ASI_LSU ! store to LSU
1311 1116 retl
1312 1117 membar #Sync
1313 1118 SET_SIZE(set_lsu)
1314 1119
1315 1120 ENTRY(get_lsu)
1316 1121 retl
1317 1122 ldxa [%g0]ASI_LSU, %o0 ! load LSU
1318 1123 SET_SIZE(get_lsu)
1319 1124
1320 -#endif /* lint */
1321 -
1322 -#ifndef lint
1323 1125 /*
1324 1126 * Clear the NPT (non-privileged trap) bit in the %tick
1325 1127 * registers. In an effort to make the change in the
1326 1128 * tick counter as consistent as possible, we disable
1327 1129 * all interrupts while we're changing the registers. We also
1328 1130 * ensure that the read and write instructions are in the same
1329 1131 * line in the instruction cache.
1330 1132 */
1331 1133 ENTRY_NP(cpu_clearticknpt)
1332 1134 rdpr %pstate, %g1 /* save processor state */
1333 1135 andn %g1, PSTATE_IE, %g3 /* turn off */
1334 1136 wrpr %g0, %g3, %pstate /* interrupts */
1335 1137 rdpr %tick, %g2 /* get tick register */
1336 1138 brgez,pn %g2, 1f /* if NPT bit off, we're done */
1337 1139 mov 1, %g3 /* create mask */
1338 1140 sllx %g3, 63, %g3 /* for NPT bit */
1339 1141 ba,a,pt %xcc, 2f
1340 1142 .align 64 /* Align to I$ boundary */
1341 1143 2:
1342 1144 rdpr %tick, %g2 /* get tick register */
1343 1145 wrpr %g3, %g2, %tick /* write tick register, */
1344 1146 /* clearing NPT bit */
1345 1147 #if defined(BB_ERRATA_1)
1346 1148 rdpr %tick, %g0 /* read (s)tick (BB_ERRATA_1) */
1347 1149 #endif
1348 1150 1:
1349 1151 jmp %g4 + 4
1350 1152 wrpr %g0, %g1, %pstate /* restore processor state */
1351 1153 SET_SIZE(cpu_clearticknpt)
1352 1154
1353 1155 /*
1354 1156 * get_ecache_tag()
1355 1157 * Register Usage:
1356 1158 * %o0: In: 32-bit E$ index
1357 1159 * Out: 64-bit E$ tag value
1358 1160 * %o1: In: 64-bit AFSR value after clearing sticky bits
1359 1161 * %o2: In: address of cpu private afsr storage
1360 1162 */
1361 1163 ENTRY(get_ecache_tag)
1362 1164 or %g0, 1, %o4
1363 1165 sllx %o4, 40, %o4 ! set bit 40 for e$ tag access
1364 1166 or %o0, %o4, %o4 ! %o4 = e$ addr for tag read
1365 1167 rdpr %pstate, %o5
1366 1168 andn %o5, PSTATE_IE | PSTATE_AM, %o0
1367 1169 wrpr %o0, %g0, %pstate ! clear IE, AM bits
1368 1170
1369 1171 ldxa [%g0]ASI_ESTATE_ERR, %g1
1370 1172 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1371 1173 membar #Sync
1372 1174
1373 1175 ldxa [%g0]ASI_AFSR, %o0
1374 1176 srlx %o0, P_AFSR_CP_SHIFT, %o3
1375 1177 btst 1, %o3
1376 1178 bz 1f
1377 1179 nop
1378 1180 ldx [%o2], %g4
1379 1181 or %g4, %o0, %g4 ! aggregate AFSR in cpu private
1380 1182 stx %g4, [%o2]
1381 1183 1:
1382 1184 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1383 1185 membar #Sync
1384 1186
1385 1187 ldxa [%o4]ASI_EC_R, %g0
1386 1188 ldxa [%g0]ASI_EC_DIAG, %o0 ! read tag from e$ tag reg
1387 1189
1388 1190 ldxa [%g0]ASI_AFSR, %o3
1389 1191 srlx %o3, P_AFSR_CP_SHIFT, %o4
1390 1192 btst 1, %o4
1391 1193 bz 2f
1392 1194 stx %o3, [%o1] ! AFSR after sticky clear
1393 1195 ldx [%o2], %g4
1394 1196 or %g4, %o3, %g4 ! aggregate AFSR in cpu private
1395 1197 stx %g4, [%o2]
1396 1198 2:
1397 1199 membar #Sync
1398 1200
1399 1201 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1400 1202 membar #Sync
1401 1203 retl
1402 1204 wrpr %g0, %o5, %pstate
1403 1205 SET_SIZE(get_ecache_tag)
1404 1206
1405 1207 /*
1406 1208 * check_ecache_line()
1407 1209 * Register Usage:
1408 1210 * %o0: In: 32-bit E$ index
1409 1211 * Out: 64-bit accumulated AFSR
1410 1212 * %o1: In: address of cpu private afsr storage
1411 1213 */
1412 1214 ENTRY(check_ecache_line)
1413 1215 or %g0, 1, %o4
1414 1216 sllx %o4, 39, %o4 ! set bit 39 for e$ data access
1415 1217 or %o0, %o4, %o4 ! %o4 = e$ addr for data read
1416 1218
1417 1219 rdpr %pstate, %o5
1418 1220 andn %o5, PSTATE_IE | PSTATE_AM, %o0
1419 1221 wrpr %o0, %g0, %pstate ! clear IE, AM bits
1420 1222
1421 1223 ldxa [%g0]ASI_ESTATE_ERR, %g1
1422 1224 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1423 1225 membar #Sync
1424 1226
1425 1227 ldxa [%g0]ASI_AFSR, %o0
1426 1228 srlx %o0, P_AFSR_CP_SHIFT, %o2
1427 1229 btst 1, %o2
1428 1230 bz 1f
1429 1231 clr %o2 ! loop count
1430 1232 ldx [%o1], %o3
1431 1233 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1432 1234 stx %o3, [%o1]
1433 1235 1:
1434 1236 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1435 1237 membar #Sync
1436 1238
1437 1239 2:
1438 1240 ldxa [%o4]ASI_EC_R, %g0 ! Read the E$ data 8bytes each
1439 1241 add %o2, 1, %o2
1440 1242 cmp %o2, 8
1441 1243 bl,a 2b
1442 1244 add %o4, 8, %o4
1443 1245
1444 1246 membar #Sync
1445 1247 ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR
1446 1248 srlx %o0, P_AFSR_CP_SHIFT, %o2
1447 1249 btst 1, %o2
1448 1250 bz 3f
1449 1251 nop
1450 1252 ldx [%o1], %o3
↓ open down ↓ |
118 lines elided |
↑ open up ↑ |
1451 1253 or %o3, %o0, %o3 ! aggregate AFSR in cpu private
1452 1254 stx %o3, [%o1]
1453 1255 3:
1454 1256 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1455 1257 membar #Sync
1456 1258 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1457 1259 membar #Sync
1458 1260 retl
1459 1261 wrpr %g0, %o5, %pstate
1460 1262 SET_SIZE(check_ecache_line)
1461 -#endif /* lint */
1462 1263
1463 -#if defined(lint)
1464 -uint64_t
1465 -read_and_clear_afsr()
1466 -{
1467 - return ((uint64_t)0);
1468 -}
1469 -#else /* lint */
1470 1264 ENTRY(read_and_clear_afsr)
1471 1265 ldxa [%g0]ASI_AFSR, %o0
1472 1266 retl
1473 1267 stxa %o0, [%g0]ASI_AFSR ! clear AFSR
1474 1268 SET_SIZE(read_and_clear_afsr)
1475 -#endif /* lint */
1476 1269
1477 -#if defined(lint)
1478 -/* ARGSUSED */
1479 -void
1480 -scrubphys(uint64_t paddr, int ecache_size)
1481 -{
1482 -}
1483 -
1484 -#else /* lint */
1485 -
1486 1270 /*
1487 1271 * scrubphys - Pass in the aligned physical memory address that you want
1488 1272 * to scrub, along with the ecache size.
1489 1273 *
1490 1274 * 1) Displacement flush the E$ line corresponding to %addr.
1491 1275 * The first ldxa guarantees that the %addr is no longer in
1492 1276 * M, O, or E (goes to I or S (if instruction fetch also happens).
1493 1277 * 2) "Write" the data using a CAS %addr,%g0,%g0.
1494 1278 * The casxa guarantees a transition from I to M or S to M.
1495 1279 * 3) Displacement flush the E$ line corresponding to %addr.
1496 1280 * The second ldxa pushes the M line out of the ecache, into the
1497 1281 * writeback buffers, on the way to memory.
1498 1282 * 4) The "membar #Sync" pushes the cache line out of the writeback
1499 1283 * buffers onto the bus, on the way to dram finally.
1500 1284 *
1501 1285 * This is a modified version of the algorithm suggested by Gary Lauterbach.
1502 1286 * In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
1503 1287 * as modified, but then we found out that for spitfire, if it misses in the
1504 1288 * E$ it will probably install as an M, but if it hits in the E$, then it
1505 1289 * will stay E, if the store doesn't happen. So the first displacement flush
1506 1290 * should ensure that the CAS will miss in the E$. Arrgh.
1507 1291 */
1508 1292
1509 1293 ENTRY(scrubphys)
1510 1294 or %o1, %g0, %o2 ! put ecache size in %o2
1511 1295 #ifndef HUMMINGBIRD
1512 1296 xor %o0, %o2, %o1 ! calculate alias address
1513 1297 add %o2, %o2, %o3 ! 2 * ecachesize in case
1514 1298 ! addr == ecache_flushaddr
1515 1299 sub %o3, 1, %o3 ! -1 == mask
1516 1300 and %o1, %o3, %o1 ! and with xor'd address
1517 1301 set ecache_flushaddr, %o3
1518 1302 ldx [%o3], %o3
1519 1303
1520 1304 rdpr %pstate, %o4
1521 1305 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1522 1306 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1523 1307
1524 1308 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1525 1309 casxa [%o0]ASI_MEM, %g0, %g0
1526 1310 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1527 1311
1528 1312 #else /* HUMMINGBIRD */
1529 1313 /*
1530 1314 * UltraSPARC-IIe processor supports both 4-way set associative
1531 1315 * and direct map E$. We need to reconfigure E$ to direct map
1532 1316 * mode for data load/store before displacement flush. Also, we
1533 1317 * need to flush all 4 sets of the E$ to ensure that the physaddr
1534 1318 * has been flushed. Keep the interrupts disabled while flushing
1535 1319 * E$ in this manner.
1536 1320 *
1537 1321 * For flushing a specific physical address, we start at the
1538 1322 * aliased address and load at set-size stride, wrapping around
1539 1323 * at 2*ecache-size boundary and skipping fault physical address.
1540 1324 * It takes 10 loads to guarantee that the physical address has
1541 1325 * been flushed.
1542 1326 *
1543 1327 * Usage:
1544 1328 * %o0 physaddr
1545 1329 * %o5 physaddr - ecache_flushaddr
1546 1330 * %g1 UPA config (restored later)
1547 1331 * %g2 E$ set size
1548 1332 * %g3 E$ flush address range mask (i.e. 2 * E$ -1)
1549 1333 * %g4 #loads to flush phys address
1550 1334 * %g5 temp
1551 1335 */
1552 1336
1553 1337 sethi %hi(ecache_associativity), %g5
1554 1338 ld [%g5 + %lo(ecache_associativity)], %g5
1555 1339 udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
1556 1340 xor %o0, %o2, %o1 ! calculate alias address
1557 1341 add %o2, %o2, %g3 ! 2 * ecachesize in case
1558 1342 ! addr == ecache_flushaddr
1559 1343 sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
1560 1344 and %o1, %g3, %o1 ! and with xor'd address
1561 1345 sethi %hi(ecache_flushaddr), %o3
1562 1346 ldx [%o3 + %lo(ecache_flushaddr)], %o3
1563 1347
1564 1348 rdpr %pstate, %o4
1565 1349 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1566 1350 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1567 1351
1568 1352 ! Place E$ in direct map mode for data access
1569 1353 or %g0, 1, %g5
1570 1354 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
1571 1355 ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
1572 1356 or %g1, %g5, %g5
1573 1357 membar #Sync
1574 1358 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
1575 1359 membar #Sync
1576 1360
1577 1361 ! Displace cache line from each set of E$ starting at the
1578 1362 ! aliased address. at set-size stride, wrapping at 2*ecache_size
1579 1363 ! and skipping load from physaddr. We need 10 loads to flush the
1580 1364 ! physaddr from E$.
1581 1365 mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr
1582 1366 sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
1583 1367 or %o1, %g0, %g5 ! starting aliased offset
1584 1368 2:
1585 1369 ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1586 1370 1:
1587 1371 add %g5, %g2, %g5 ! calculate offset in next set
1588 1372 and %g5, %g3, %g5 ! force offset within aliased range
1589 1373 cmp %g5, %o5 ! skip loads from physaddr
1590 1374 be,pn %ncc, 1b
1591 1375 nop
1592 1376 brgz,pt %g4, 2b
1593 1377 dec %g4
1594 1378
1595 1379 casxa [%o0]ASI_MEM, %g0, %g0
1596 1380
1597 1381 ! Flush %o0 from ecahe again.
1598 1382 ! Need single displacement flush at offset %o1 this time as
1599 1383 ! the E$ is already in direct map mode.
1600 1384 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1601 1385
↓ open down ↓ |
106 lines elided |
↑ open up ↑ |
1602 1386 membar #Sync
1603 1387 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1604 1388 membar #Sync
1605 1389 #endif /* HUMMINGBIRD */
1606 1390 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1607 1391
1608 1392 retl
1609 1393 membar #Sync ! move the data out of the load buffer
1610 1394 SET_SIZE(scrubphys)
1611 1395
1612 -#endif /* lint */
1613 -
1614 -#if defined(lint)
1615 -
1616 -/*
1617 - * clearphys - Pass in the aligned physical memory address that you want
1618 - * to push out, as a 64 byte block of zeros, from the ecache zero-filled.
1619 - * Since this routine does not bypass the ecache, it is possible that
1620 - * it could generate a UE error while trying to clear the a bad line.
1621 - * This routine clears and restores the error enable flag.
1622 - * TBD - Hummingbird may need similar protection
1623 - */
1624 -/* ARGSUSED */
1625 -void
1626 -clearphys(uint64_t paddr, int ecache_size, int ecache_linesize)
1627 -{
1628 -}
1629 -
1630 -#else /* lint */
1631 -
1632 1396 ENTRY(clearphys)
1633 1397 or %o2, %g0, %o3 ! ecache linesize
1634 1398 or %o1, %g0, %o2 ! ecache size
1635 1399 #ifndef HUMMINGBIRD
1636 1400 or %o3, %g0, %o4 ! save ecache linesize
1637 1401 xor %o0, %o2, %o1 ! calculate alias address
1638 1402 add %o2, %o2, %o3 ! 2 * ecachesize
1639 1403 sub %o3, 1, %o3 ! -1 == mask
1640 1404 and %o1, %o3, %o1 ! and with xor'd address
1641 1405 set ecache_flushaddr, %o3
1642 1406 ldx [%o3], %o3
1643 1407 or %o4, %g0, %o2 ! saved ecache linesize
1644 1408
1645 1409 rdpr %pstate, %o4
1646 1410 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1647 1411 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1648 1412
1649 1413 ldxa [%g0]ASI_ESTATE_ERR, %g1
1650 1414 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1651 1415 membar #Sync
1652 1416
1653 1417 ! need to put zeros in the cache line before displacing it
1654 1418
1655 1419 sub %o2, 8, %o2 ! get offset of last double word in ecache line
1656 1420 1:
1657 1421 stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line
1658 1422 sub %o2, 8, %o2
1659 1423 brgez,a,pt %o2, 1b
1660 1424 nop
1661 1425 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1662 1426 casxa [%o0]ASI_MEM, %g0, %g0
1663 1427 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1664 1428
1665 1429 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1666 1430 membar #Sync
1667 1431
1668 1432 #else /* HUMMINGBIRD... */
1669 1433 /*
1670 1434 * UltraSPARC-IIe processor supports both 4-way set associative
1671 1435 * and direct map E$. We need to reconfigure E$ to direct map
1672 1436 * mode for data load/store before displacement flush. Also, we
1673 1437 * need to flush all 4 sets of the E$ to ensure that the physaddr
1674 1438 * has been flushed. Keep the interrupts disabled while flushing
1675 1439 * E$ in this manner.
1676 1440 *
1677 1441 * For flushing a specific physical address, we start at the
1678 1442 * aliased address and load at set-size stride, wrapping around
1679 1443 * at 2*ecache-size boundary and skipping fault physical address.
1680 1444 * It takes 10 loads to guarantee that the physical address has
1681 1445 * been flushed.
1682 1446 *
1683 1447 * Usage:
1684 1448 * %o0 physaddr
1685 1449 * %o5 physaddr - ecache_flushaddr
1686 1450 * %g1 UPA config (restored later)
1687 1451 * %g2 E$ set size
1688 1452 * %g3 E$ flush address range mask (i.e. 2 * E$ -1)
1689 1453 * %g4 #loads to flush phys address
1690 1454 * %g5 temp
1691 1455 */
1692 1456
1693 1457 or %o3, %g0, %o4 ! save ecache linesize
1694 1458 sethi %hi(ecache_associativity), %g5
1695 1459 ld [%g5 + %lo(ecache_associativity)], %g5
1696 1460 udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
1697 1461
1698 1462 xor %o0, %o2, %o1 ! calculate alias address
1699 1463 add %o2, %o2, %g3 ! 2 * ecachesize
1700 1464 sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
1701 1465 and %o1, %g3, %o1 ! and with xor'd address
1702 1466 sethi %hi(ecache_flushaddr), %o3
1703 1467 ldx [%o3 +%lo(ecache_flushaddr)], %o3
1704 1468 or %o4, %g0, %o2 ! saved ecache linesize
1705 1469
1706 1470 rdpr %pstate, %o4
1707 1471 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1708 1472 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1709 1473
1710 1474 ! Place E$ in direct map mode for data access
1711 1475 or %g0, 1, %g5
1712 1476 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
1713 1477 ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
1714 1478 or %g1, %g5, %g5
1715 1479 membar #Sync
1716 1480 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
1717 1481 membar #Sync
1718 1482
1719 1483 ! need to put zeros in the cache line before displacing it
1720 1484
1721 1485 sub %o2, 8, %o2 ! get offset of last double word in ecache line
1722 1486 1:
1723 1487 stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line
1724 1488 sub %o2, 8, %o2
1725 1489 brgez,a,pt %o2, 1b
1726 1490 nop
1727 1491
1728 1492 ! Displace cache line from each set of E$ starting at the
1729 1493 ! aliased address. at set-size stride, wrapping at 2*ecache_size
1730 1494 ! and skipping load from physaddr. We need 10 loads to flush the
1731 1495 ! physaddr from E$.
1732 1496 mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr
1733 1497 sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
1734 1498 or %o1, %g0, %g5 ! starting offset
1735 1499 2:
1736 1500 ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1737 1501 3:
1738 1502 add %g5, %g2, %g5 ! calculate offset in next set
1739 1503 and %g5, %g3, %g5 ! force offset within aliased range
1740 1504 cmp %g5, %o5 ! skip loads from physaddr
1741 1505 be,pn %ncc, 3b
1742 1506 nop
1743 1507 brgz,pt %g4, 2b
1744 1508 dec %g4
1745 1509
1746 1510 casxa [%o0]ASI_MEM, %g0, %g0
1747 1511
1748 1512 ! Flush %o0 from ecahe again.
1749 1513 ! Need single displacement flush at offset %o1 this time as
1750 1514 ! the E$ is already in direct map mode.
1751 1515 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
↓ open down ↓ |
110 lines elided |
↑ open up ↑ |
1752 1516
1753 1517 membar #Sync
1754 1518 stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1755 1519 membar #Sync
1756 1520 #endif /* HUMMINGBIRD... */
1757 1521
1758 1522 retl
1759 1523 wrpr %g0, %o4, %pstate ! restore earlier pstate register value
1760 1524 SET_SIZE(clearphys)
1761 1525
1762 -#endif /* lint */
1763 -
1764 -#if defined(lint)
1765 -/* ARGSUSED */
1766 -void
1767 -flushecacheline(uint64_t paddr, int ecache_size)
1768 -{
1769 -}
1770 -
1771 -#else /* lint */
1772 1526 /*
1773 1527 * flushecacheline - This is a simpler version of scrubphys
1774 1528 * which simply does a displacement flush of the line in
1775 1529 * question. This routine is mainly used in handling async
1776 1530 * errors where we want to get rid of a bad line in ecache.
1777 1531 * Note that if the line is modified and it has suffered
1778 1532 * data corruption - we are guarantee that the hw will write
1779 1533 * a UE back to mark the page poisoned.
1780 1534 */
1781 1535 ENTRY(flushecacheline)
1782 1536 or %o1, %g0, %o2 ! put ecache size in %o2
1783 1537 #ifndef HUMMINGBIRD
1784 1538 xor %o0, %o2, %o1 ! calculate alias address
1785 1539 add %o2, %o2, %o3 ! 2 * ecachesize in case
1786 1540 ! addr == ecache_flushaddr
1787 1541 sub %o3, 1, %o3 ! -1 == mask
1788 1542 and %o1, %o3, %o1 ! and with xor'd address
1789 1543 set ecache_flushaddr, %o3
1790 1544 ldx [%o3], %o3
1791 1545
1792 1546 rdpr %pstate, %o4
1793 1547 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1794 1548 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1795 1549
1796 1550 ldxa [%g0]ASI_ESTATE_ERR, %g1
1797 1551 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1798 1552 membar #Sync
1799 1553
1800 1554 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1801 1555 membar #Sync
1802 1556 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1803 1557 membar #Sync
1804 1558 #else /* HUMMINGBIRD */
1805 1559 /*
1806 1560 * UltraSPARC-IIe processor supports both 4-way set associative
1807 1561 * and direct map E$. We need to reconfigure E$ to direct map
1808 1562 * mode for data load/store before displacement flush. Also, we
1809 1563 * need to flush all 4 sets of the E$ to ensure that the physaddr
1810 1564 * has been flushed. Keep the interrupts disabled while flushing
1811 1565 * E$ in this manner.
1812 1566 *
1813 1567 * For flushing a specific physical address, we start at the
1814 1568 * aliased address and load at set-size stride, wrapping around
1815 1569 * at 2*ecache-size boundary and skipping fault physical address.
1816 1570 * It takes 10 loads to guarantee that the physical address has
1817 1571 * been flushed.
1818 1572 *
1819 1573 * Usage:
1820 1574 * %o0 physaddr
1821 1575 * %o5 physaddr - ecache_flushaddr
1822 1576 * %g1 error enable register
1823 1577 * %g2 E$ set size
1824 1578 * %g3 E$ flush address range mask (i.e. 2 * E$ -1)
1825 1579 * %g4 UPA config (restored later)
1826 1580 * %g5 temp
1827 1581 */
1828 1582
1829 1583 sethi %hi(ecache_associativity), %g5
1830 1584 ld [%g5 + %lo(ecache_associativity)], %g5
1831 1585 udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
1832 1586 xor %o0, %o2, %o1 ! calculate alias address
1833 1587 add %o2, %o2, %g3 ! 2 * ecachesize in case
1834 1588 ! addr == ecache_flushaddr
1835 1589 sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
1836 1590 and %o1, %g3, %o1 ! and with xor'd address
1837 1591 sethi %hi(ecache_flushaddr), %o3
1838 1592 ldx [%o3 + %lo(ecache_flushaddr)], %o3
1839 1593
1840 1594 rdpr %pstate, %o4
1841 1595 andn %o4, PSTATE_IE | PSTATE_AM, %o5
1842 1596 wrpr %o5, %g0, %pstate ! clear IE, AM bits
1843 1597
1844 1598 ! Place E$ in direct map mode for data access
1845 1599 or %g0, 1, %g5
1846 1600 sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
1847 1601 ldxa [%g0]ASI_UPA_CONFIG, %g4 ! current UPA config (restored later)
1848 1602 or %g4, %g5, %g5
1849 1603 membar #Sync
1850 1604 stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
1851 1605 membar #Sync
1852 1606
1853 1607 ldxa [%g0]ASI_ESTATE_ERR, %g1
1854 1608 stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
1855 1609 membar #Sync
1856 1610
1857 1611 ! Displace cache line from each set of E$ starting at the
1858 1612 ! aliased address. at set-size stride, wrapping at 2*ecache_size
1859 1613 ! and skipping load from physaddr. We need 10 loads to flush the
1860 1614 ! physaddr from E$.
1861 1615 mov HB_PHYS_FLUSH_CNT-1, %g5 ! #loads to flush physaddr
1862 1616 sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
1863 1617 2:
1864 1618 ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1865 1619 3:
1866 1620 add %o1, %g2, %o1 ! calculate offset in next set
1867 1621 and %o1, %g3, %o1 ! force offset within aliased range
1868 1622 cmp %o1, %o5 ! skip loads from physaddr
1869 1623 be,pn %ncc, 3b
1870 1624 nop
1871 1625 brgz,pt %g5, 2b
1872 1626 dec %g5
1873 1627
1874 1628 membar #Sync
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
1875 1629 stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
1876 1630 membar #Sync
1877 1631
1878 1632 stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
1879 1633 membar #Sync
1880 1634 #endif /* HUMMINGBIRD */
1881 1635 retl
1882 1636 wrpr %g0, %o4, %pstate
1883 1637 SET_SIZE(flushecacheline)
1884 1638
1885 -#endif /* lint */
1886 -
1887 -#if defined(lint)
1888 -/* ARGSUSED */
1889 -void
1890 -ecache_scrubreq_tl1(uint64_t inum, uint64_t dummy)
1891 -{
1892 -}
1893 -
1894 -#else /* lint */
1895 1639 /*
1896 1640 * ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
1897 1641 * from the clock CPU. It atomically increments the outstanding request
1898 1642 * counter and, if there was not already an outstanding request,
1899 1643 * branches to setsoftint_tl1 to enqueue an intr_vec for the given inum.
1900 1644 */
1901 1645
1902 1646 ! Register usage:
1903 1647 !
1904 1648 ! Arguments:
1905 1649 ! %g1 - inum
1906 1650 !
1907 1651 ! Internal:
1908 1652 ! %g2, %g3, %g5 - scratch
1909 1653 ! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding.
1910 1654 ! %g6 - setsoftint_tl1 address
1911 1655
1912 1656 ENTRY_NP(ecache_scrubreq_tl1)
1913 1657 set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2
1914 1658 GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f);
1915 1659 ld [%g4], %g2 ! cpu's ec_scrub_outstanding.
1916 1660 set setsoftint_tl1, %g6
1917 1661 !
1918 1662 ! no need to use atomic instructions for the following
1919 1663 ! increment - we're at tl1
1920 1664 !
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
1921 1665 add %g2, 0x1, %g3
1922 1666 brnz,pn %g2, 1f ! no need to enqueue more intr_vec
1923 1667 st %g3, [%g4] ! delay - store incremented counter
1924 1668 jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec
1925 1669 nop
1926 1670 ! not reached
1927 1671 1:
1928 1672 retry
1929 1673 SET_SIZE(ecache_scrubreq_tl1)
1930 1674
1931 -#endif /* lint */
1932 -
1933 -#if defined(lint)
1934 -/*ARGSUSED*/
1935 -void
1936 -write_ec_tag_parity(uint32_t id)
1937 -{}
1938 -#else /* lint */
1939 -
1940 1675 /*
1941 1676 * write_ec_tag_parity(), which zero's the ecache tag,
1942 1677 * marks the state as invalid and writes good parity to the tag.
1943 1678 * Input %o1= 32 bit E$ index
1944 1679 */
1945 1680 ENTRY(write_ec_tag_parity)
1946 1681 or %g0, 1, %o4
1947 1682 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1948 1683 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1949 1684
1950 1685 rdpr %pstate, %o5
1951 1686 andn %o5, PSTATE_IE | PSTATE_AM, %o1
1952 1687 wrpr %o1, %g0, %pstate ! clear IE, AM bits
1953 1688
1954 1689 ldxa [%g0]ASI_ESTATE_ERR, %g1
1955 1690 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
1956 1691 membar #Sync
1957 1692
1958 1693 ba 1f
1959 1694 nop
1960 1695 /*
1961 1696 * Align on the ecache boundary in order to force
1962 1697 * ciritical code section onto the same ecache line.
1963 1698 */
1964 1699 .align 64
1965 1700
1966 1701 1:
1967 1702 set S_EC_PARITY, %o3 ! clear tag, state invalid
1968 1703 sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1969 1704 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
1970 1705 stxa %g0, [%o4]ASI_EC_W
1971 1706 membar #Sync
1972 1707
1973 1708 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
1974 1709 membar #Sync
1975 1710 retl
1976 1711 wrpr %g0, %o5, %pstate
1977 1712 SET_SIZE(write_ec_tag_parity)
1978 1713
1979 -#endif /* lint */
1980 -
1981 -#if defined(lint)
1982 -/*ARGSUSED*/
1983 -void
1984 -write_hb_ec_tag_parity(uint32_t id)
1985 -{}
1986 -#else /* lint */
1987 -
1988 1714 /*
1989 1715 * write_hb_ec_tag_parity(), which zero's the ecache tag,
1990 1716 * marks the state as invalid and writes good parity to the tag.
1991 1717 * Input %o1= 32 bit E$ index
1992 1718 */
1993 1719 ENTRY(write_hb_ec_tag_parity)
1994 1720 or %g0, 1, %o4
1995 1721 sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
1996 1722 or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
1997 1723
1998 1724 rdpr %pstate, %o5
1999 1725 andn %o5, PSTATE_IE | PSTATE_AM, %o1
2000 1726 wrpr %o1, %g0, %pstate ! clear IE, AM bits
2001 1727
2002 1728 ldxa [%g0]ASI_ESTATE_ERR, %g1
2003 1729 stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
2004 1730 membar #Sync
2005 1731
2006 1732 ba 1f
2007 1733 nop
2008 1734 /*
2009 1735 * Align on the ecache boundary in order to force
2010 1736 * ciritical code section onto the same ecache line.
2011 1737 */
2012 1738 .align 64
2013 1739 1:
2014 1740 #ifdef HUMMINGBIRD
2015 1741 set HB_EC_PARITY, %o3 ! clear tag, state invalid
2016 1742 sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity
2017 1743 #else /* !HUMMINGBIRD */
2018 1744 set SB_EC_PARITY, %o3 ! clear tag, state invalid
2019 1745 sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity
2020 1746 #endif /* !HUMMINGBIRD */
2021 1747
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
2022 1748 stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
2023 1749 stxa %g0, [%o4]ASI_EC_W
2024 1750 membar #Sync
2025 1751
2026 1752 stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
2027 1753 membar #Sync
2028 1754 retl
2029 1755 wrpr %g0, %o5, %pstate
2030 1756 SET_SIZE(write_hb_ec_tag_parity)
2031 1757
2032 -#endif /* lint */
2033 -
2034 1758 #define VIS_BLOCKSIZE 64
2035 1759
2036 -#if defined(lint)
2037 -
2038 -/*ARGSUSED*/
2039 -int
2040 -dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
2041 -{ return (0); }
2042 -
2043 -#else
2044 -
2045 1760 ENTRY(dtrace_blksuword32)
2046 1761 save %sp, -SA(MINFRAME + 4), %sp
2047 1762
2048 1763 rdpr %pstate, %l1
2049 1764 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
2050 1765 wrpr %g0, %l2, %pstate ! protect our FPU diddling
2051 1766
2052 1767 rd %fprs, %l0
2053 1768 andcc %l0, FPRS_FEF, %g0
2054 1769 bz,a,pt %xcc, 1f ! if the fpu is disabled
2055 1770 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
2056 1771
2057 1772 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
2058 1773 1:
2059 1774 set 0f, %l5
2060 1775 /*
2061 1776 * We're about to write a block full or either total garbage
2062 1777 * (not kernel data, don't worry) or user floating-point data
2063 1778 * (so it only _looks_ like garbage).
2064 1779 */
2065 1780 ld [%i1], %f0 ! modify the block
2066 1781 membar #Sync
2067 1782 stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler
2068 1783 stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block
2069 1784 membar #Sync
2070 1785 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
2071 1786
2072 1787 bz,a,pt %xcc, 1f
2073 1788 wr %g0, %l0, %fprs ! restore %fprs
2074 1789
2075 1790 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
2076 1791 1:
2077 1792
2078 1793 wrpr %g0, %l1, %pstate ! restore interrupts
2079 1794
2080 1795 ret
2081 1796 restore %g0, %g0, %o0
2082 1797
2083 1798 0:
2084 1799 membar #Sync
2085 1800 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
2086 1801
2087 1802 bz,a,pt %xcc, 1f
2088 1803 wr %g0, %l0, %fprs ! restore %fprs
2089 1804
2090 1805 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
2091 1806 1:
2092 1807
2093 1808 wrpr %g0, %l1, %pstate ! restore interrupts
2094 1809
2095 1810 /*
2096 1811 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
2097 1812 * which deals with watchpoints. Otherwise, just return -1.
2098 1813 */
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
2099 1814 brnz,pt %i2, 1f
2100 1815 nop
2101 1816 ret
2102 1817 restore %g0, -1, %o0
2103 1818 1:
2104 1819 call dtrace_blksuword32_err
2105 1820 restore
2106 1821
2107 1822 SET_SIZE(dtrace_blksuword32)
2108 1823
2109 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX