Print this page
restore sparc comments
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/cpu/opl_olympus_asm.s
+++ new/usr/src/uts/sun4u/cpu/opl_olympus_asm.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 *
25 25 * Assembly code support for the Olympus-C module
26 26 */
27 27
28 -#if !defined(lint)
29 28 #include "assym.h"
30 -#endif /* lint */
31 29
32 30 #include <sys/asm_linkage.h>
33 31 #include <sys/mmu.h>
34 32 #include <vm/hat_sfmmu.h>
35 33 #include <sys/machparam.h>
36 34 #include <sys/machcpuvar.h>
37 35 #include <sys/machthread.h>
38 36 #include <sys/machtrap.h>
39 37 #include <sys/privregs.h>
40 38 #include <sys/asm_linkage.h>
41 39 #include <sys/trap.h>
42 40 #include <sys/opl_olympus_regs.h>
43 41 #include <sys/opl_module.h>
44 42 #include <sys/xc_impl.h>
45 43 #include <sys/intreg.h>
46 44 #include <sys/async.h>
47 45 #include <sys/clock.h>
48 46 #include <sys/cmpregs.h>
49 47
50 48 #ifdef TRAPTRACE
51 49 #include <sys/traptrace.h>
52 50 #endif /* TRAPTRACE */
53 51
54 52 /*
55 53 * Macro that flushes the entire Ecache.
56 54 *
57 55 * arg1 = ecache size
58 56 * arg2 = ecache linesize
59 57 * arg3 = ecache flush address - Not used for olympus-C
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
60 58 */
61 59 #define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1) \
62 60 mov ASI_L2_CTRL_U2_FLUSH, arg1; \
63 61 mov ASI_L2_CTRL_RW_ADDR, arg2; \
64 62 stxa arg1, [arg2]ASI_L2_CTRL
65 63
66 64 /*
67 65 * SPARC64-VI MMU and Cache operations.
68 66 */
69 67
70 -#if defined(lint)
71 -
72 -/* ARGSUSED */
73 -void
74 -vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
75 -{}
76 -
77 -#else /* lint */
78 -
79 68 ENTRY_NP(vtag_flushpage)
80 69 /*
81 70 * flush page from the tlb
82 71 *
83 72 * %o0 = vaddr
84 73 * %o1 = sfmmup
85 74 */
86 75 rdpr %pstate, %o5
87 76 #ifdef DEBUG
88 77 PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1)
89 78 #endif /* DEBUG */
90 79 /*
91 80 * disable ints
92 81 */
93 82 andn %o5, PSTATE_IE, %o4
94 83 wrpr %o4, 0, %pstate
95 84
96 85 /*
97 86 * Then, blow out the tlb
98 87 * Interrupts are disabled to prevent the primary ctx register
99 88 * from changing underneath us.
100 89 */
101 90 sethi %hi(ksfmmup), %o3
102 91 ldx [%o3 + %lo(ksfmmup)], %o3
103 92 cmp %o3, %o1
104 93 bne,pt %xcc, 1f ! if not kernel as, go to 1
105 94 sethi %hi(FLUSH_ADDR), %o3
106 95 /*
107 96 * For Kernel demaps use primary. type = page implicitly
108 97 */
109 98 stxa %g0, [%o0]ASI_DTLB_DEMAP /* dmmu flush for KCONTEXT */
110 99 stxa %g0, [%o0]ASI_ITLB_DEMAP /* immu flush for KCONTEXT */
111 100 flush %o3
112 101 retl
113 102 wrpr %g0, %o5, %pstate /* enable interrupts */
114 103 1:
115 104 /*
116 105 * User demap. We need to set the primary context properly.
117 106 * Secondary context cannot be used for SPARC64-VI IMMU.
118 107 * %o0 = vaddr
119 108 * %o1 = sfmmup
120 109 * %o3 = FLUSH_ADDR
121 110 */
122 111 SFMMU_CPU_CNUM(%o1, %g1, %g2) ! %g1 = sfmmu cnum on this CPU
123 112
124 113 ldub [%o1 + SFMMU_CEXT], %o4 ! %o4 = sfmmup->sfmmu_cext
125 114 sll %o4, CTXREG_EXT_SHIFT, %o4
126 115 or %g1, %o4, %g1 ! %g1 = primary pgsz | cnum
127 116
128 117 wrpr %g0, 1, %tl
129 118 set MMU_PCONTEXT, %o4
130 119 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
131 120 ldxa [%o4]ASI_DMMU, %o2 ! %o2 = save old ctxnum
132 121 srlx %o2, CTXREG_NEXT_SHIFT, %o1 ! need to preserve nucleus pgsz
133 122 sllx %o1, CTXREG_NEXT_SHIFT, %o1 ! %o1 = nucleus pgsz
134 123 or %g1, %o1, %g1 ! %g1 = nucleus pgsz | primary pgsz | cnum
135 124 stxa %g1, [%o4]ASI_DMMU ! wr new ctxum
136 125
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
137 126 stxa %g0, [%o0]ASI_DTLB_DEMAP
138 127 stxa %g0, [%o0]ASI_ITLB_DEMAP
139 128 stxa %o2, [%o4]ASI_DMMU /* restore old ctxnum */
140 129 flush %o3
141 130 wrpr %g0, 0, %tl
142 131
143 132 retl
144 133 wrpr %g0, %o5, %pstate /* enable interrupts */
145 134 SET_SIZE(vtag_flushpage)
146 135
147 -#endif /* lint */
148 136
149 -
150 -#if defined(lint)
151 -
152 -void
153 -vtag_flushall(void)
154 -{}
155 -
156 -#else /* lint */
157 -
158 137 ENTRY_NP2(vtag_flushall, demap_all)
159 138 /*
160 139 * flush the tlb
161 140 */
162 141 sethi %hi(FLUSH_ADDR), %o3
163 142 set DEMAP_ALL_TYPE, %g1
164 143 stxa %g0, [%g1]ASI_DTLB_DEMAP
165 144 stxa %g0, [%g1]ASI_ITLB_DEMAP
166 145 flush %o3
167 146 retl
168 147 nop
169 148 SET_SIZE(demap_all)
170 149 SET_SIZE(vtag_flushall)
171 150
172 -#endif /* lint */
173 151
174 -
175 -#if defined(lint)
176 -
177 -/* ARGSUSED */
178 -void
179 -vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
180 -{}
181 -
182 -#else /* lint */
183 -
184 152 ENTRY_NP(vtag_flushpage_tl1)
185 153 /*
186 154 * x-trap to flush page from tlb and tsb
187 155 *
188 156 * %g1 = vaddr, zero-extended on 32-bit kernel
189 157 * %g2 = sfmmup
190 158 *
191 159 * assumes TSBE_TAG = 0
192 160 */
193 161 srln %g1, MMU_PAGESHIFT, %g1
194 162
195 163 sethi %hi(ksfmmup), %g3
196 164 ldx [%g3 + %lo(ksfmmup)], %g3
197 165 cmp %g3, %g2
198 166 bne,pt %xcc, 1f ! if not kernel as, go to 1
199 167 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
200 168
201 169 /* We need to demap in the kernel context */
202 170 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
203 171 stxa %g0, [%g1]ASI_DTLB_DEMAP
204 172 stxa %g0, [%g1]ASI_ITLB_DEMAP
205 173 retry
206 174 1:
207 175 /* We need to demap in a user context */
208 176 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
209 177
210 178 SFMMU_CPU_CNUM(%g2, %g6, %g3) ! %g6 = sfmmu cnum on this CPU
211 179
212 180 ldub [%g2 + SFMMU_CEXT], %g4 ! %g4 = sfmmup->cext
213 181 sll %g4, CTXREG_EXT_SHIFT, %g4
214 182 or %g6, %g4, %g6 ! %g6 = primary pgsz | cnum
215 183
216 184 set MMU_PCONTEXT, %g4
217 185 ldxa [%g4]ASI_DMMU, %g5 ! %g5 = save old ctxnum
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
218 186 srlx %g5, CTXREG_NEXT_SHIFT, %g2 ! %g2 = nucleus pgsz
219 187 sllx %g2, CTXREG_NEXT_SHIFT, %g2 ! preserve nucleus pgsz
220 188 or %g6, %g2, %g6 ! %g6 = nucleus pgsz | primary pgsz | cnum
221 189 stxa %g6, [%g4]ASI_DMMU ! wr new ctxum
222 190 stxa %g0, [%g1]ASI_DTLB_DEMAP
223 191 stxa %g0, [%g1]ASI_ITLB_DEMAP
224 192 stxa %g5, [%g4]ASI_DMMU ! restore old ctxnum
225 193 retry
226 194 SET_SIZE(vtag_flushpage_tl1)
227 195
228 -#endif /* lint */
229 196
230 -
231 -#if defined(lint)
232 -
233 -/* ARGSUSED */
234 -void
235 -vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
236 -{}
237 -
238 -#else /* lint */
239 -
240 197 ENTRY_NP(vtag_flush_pgcnt_tl1)
241 198 /*
242 199 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
243 200 *
244 201 * %g1 = vaddr, zero-extended on 32-bit kernel
245 202 * %g2 = <sfmmup58|pgcnt6>
246 203 *
247 204 * NOTE: this handler relies on the fact that no
248 205 * interrupts or traps can occur during the loop
249 206 * issuing the TLB_DEMAP operations. It is assumed
250 207 * that interrupts are disabled and this code is
251 208 * fetching from the kernel locked text address.
252 209 *
253 210 * assumes TSBE_TAG = 0
254 211 */
255 212 set SFMMU_PGCNT_MASK, %g4
256 213 and %g4, %g2, %g3 /* g3 = pgcnt - 1 */
257 214 add %g3, 1, %g3 /* g3 = pgcnt */
258 215
259 216 andn %g2, SFMMU_PGCNT_MASK, %g2 /* g2 = sfmmup */
260 217 srln %g1, MMU_PAGESHIFT, %g1
261 218
262 219 sethi %hi(ksfmmup), %g4
263 220 ldx [%g4 + %lo(ksfmmup)], %g4
264 221 cmp %g4, %g2
265 222 bne,pn %xcc, 1f /* if not kernel as, go to 1 */
266 223 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
267 224
268 225 /* We need to demap in the kernel context */
269 226 or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
270 227 set MMU_PAGESIZE, %g2 /* g2 = pgsize */
271 228 sethi %hi(FLUSH_ADDR), %g5
272 229 4:
273 230 stxa %g0, [%g1]ASI_DTLB_DEMAP
274 231 stxa %g0, [%g1]ASI_ITLB_DEMAP
275 232 flush %g5 ! flush required by immu
276 233
277 234 deccc %g3 /* decr pgcnt */
278 235 bnz,pt %icc,4b
279 236 add %g1, %g2, %g1 /* next page */
280 237 retry
281 238 1:
282 239 /*
283 240 * We need to demap in a user context
284 241 *
285 242 * g2 = sfmmup
286 243 * g3 = pgcnt
287 244 */
288 245 SFMMU_CPU_CNUM(%g2, %g5, %g6) ! %g5 = sfmmu cnum on this CPU
289 246
290 247 or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
291 248
292 249 ldub [%g2 + SFMMU_CEXT], %g4 ! %g4 = sfmmup->cext
293 250 sll %g4, CTXREG_EXT_SHIFT, %g4
294 251 or %g5, %g4, %g5
295 252
296 253 set MMU_PCONTEXT, %g4
297 254 ldxa [%g4]ASI_DMMU, %g6 /* rd old ctxnum */
298 255 srlx %g6, CTXREG_NEXT_SHIFT, %g2 /* %g2 = nucleus pgsz */
299 256 sllx %g2, CTXREG_NEXT_SHIFT, %g2 /* preserve nucleus pgsz */
300 257 or %g5, %g2, %g5 /* %g5 = nucleus pgsz | primary pgsz | cnum */
301 258 stxa %g5, [%g4]ASI_DMMU /* wr new ctxum */
302 259
303 260 set MMU_PAGESIZE, %g2 /* g2 = pgsize */
304 261 sethi %hi(FLUSH_ADDR), %g5
305 262 3:
306 263 stxa %g0, [%g1]ASI_DTLB_DEMAP
307 264 stxa %g0, [%g1]ASI_ITLB_DEMAP
↓ open down ↓ |
58 lines elided |
↑ open up ↑ |
308 265 flush %g5 ! flush required by immu
309 266
310 267 deccc %g3 /* decr pgcnt */
311 268 bnz,pt %icc,3b
312 269 add %g1, %g2, %g1 /* next page */
313 270
314 271 stxa %g6, [%g4]ASI_DMMU /* restore old ctxnum */
315 272 retry
316 273 SET_SIZE(vtag_flush_pgcnt_tl1)
317 274
318 -#endif /* lint */
319 275
320 -
321 -#if defined(lint)
322 -
323 -/*ARGSUSED*/
324 -void
325 -vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
326 -{}
327 -
328 -#else /* lint */
329 -
330 276 ENTRY_NP(vtag_flushall_tl1)
331 277 /*
332 278 * x-trap to flush tlb
333 279 */
334 280 set DEMAP_ALL_TYPE, %g4
335 281 stxa %g0, [%g4]ASI_DTLB_DEMAP
336 282 stxa %g0, [%g4]ASI_ITLB_DEMAP
337 283 retry
338 284 SET_SIZE(vtag_flushall_tl1)
339 285
340 -#endif /* lint */
341 286
342 -
343 287 /*
344 288 * VAC (virtual address conflict) does not apply to OPL.
345 289 * VAC resolution is managed by the Olympus processor hardware.
346 290 * As a result, all OPL VAC flushing routines are no-ops.
347 291 */
348 292
349 -#if defined(lint)
350 -
351 -/* ARGSUSED */
352 -void
353 -vac_flushpage(pfn_t pfnum, int vcolor)
354 -{}
355 -
356 -#else /* lint */
357 -
358 293 ENTRY(vac_flushpage)
359 294 retl
360 295 nop
361 296 SET_SIZE(vac_flushpage)
362 297
363 -#endif /* lint */
364 -
365 -#if defined(lint)
366 -
367 -/* ARGSUSED */
368 -void
369 -vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
370 -{}
371 -
372 -#else /* lint */
373 -
374 298 ENTRY_NP(vac_flushpage_tl1)
375 299 retry
376 300 SET_SIZE(vac_flushpage_tl1)
377 301
378 -#endif /* lint */
379 302
380 -
381 -#if defined(lint)
382 -
383 -/* ARGSUSED */
384 -void
385 -vac_flushcolor(int vcolor, pfn_t pfnum)
386 -{}
387 -
388 -#else /* lint */
389 -
390 303 ENTRY(vac_flushcolor)
391 304 retl
392 305 nop
393 306 SET_SIZE(vac_flushcolor)
394 307
395 -#endif /* lint */
396 308
397 309
398 -
399 -#if defined(lint)
400 -
401 -/* ARGSUSED */
402 -void
403 -vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum)
404 -{}
405 -
406 -#else /* lint */
407 -
408 310 ENTRY(vac_flushcolor_tl1)
409 311 retry
410 312 SET_SIZE(vac_flushcolor_tl1)
411 313
412 -#endif /* lint */
413 -
414 -#if defined(lint)
415 -
416 -int
417 -idsr_busy(void)
418 -{
419 - return (0);
420 -}
421 -
422 -#else /* lint */
423 -
424 314 /*
425 315 * Determine whether or not the IDSR is busy.
426 316 * Entry: no arguments
427 317 * Returns: 1 if busy, 0 otherwise
428 318 */
429 319 ENTRY(idsr_busy)
430 320 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
431 321 clr %o0
432 322 btst IDSR_BUSY, %g1
433 323 bz,a,pt %xcc, 1f
434 324 mov 1, %o0
435 325 1:
436 326 retl
437 327 nop
438 328 SET_SIZE(idsr_busy)
439 329
440 -#endif /* lint */
441 -
442 -#if defined(lint)
443 -
444 -/* ARGSUSED */
445 -void
446 -init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
447 -{}
448 -
449 -/* ARGSUSED */
450 -void
451 -init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
452 -{}
453 -
454 -#else /* lint */
455 -
456 330 .global _dispatch_status_busy
457 331 _dispatch_status_busy:
458 332 .asciz "ASI_INTR_DISPATCH_STATUS error: busy"
459 333 .align 4
460 334
461 335 /*
462 336 * Setup interrupt dispatch data registers
463 337 * Entry:
464 338 * %o0 - function or inumber to call
465 339 * %o1, %o2 - arguments (2 uint64_t's)
466 340 */
467 341 .seg "text"
468 342
469 343 ENTRY(init_mondo)
470 344 #ifdef DEBUG
471 345 !
472 346 ! IDSR should not be busy at the moment
473 347 !
474 348 ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
475 349 btst IDSR_BUSY, %g1
476 350 bz,pt %xcc, 1f
477 351 nop
478 352 sethi %hi(_dispatch_status_busy), %o0
479 353 call panic
480 354 or %o0, %lo(_dispatch_status_busy), %o0
481 355 #endif /* DEBUG */
482 356
483 357 ALTENTRY(init_mondo_nocheck)
484 358 !
485 359 ! interrupt vector dispatch data reg 0
486 360 !
487 361 1:
488 362 mov IDDR_0, %g1
489 363 mov IDDR_1, %g2
490 364 mov IDDR_2, %g3
491 365 stxa %o0, [%g1]ASI_INTR_DISPATCH
492 366
493 367 !
494 368 ! interrupt vector dispatch data reg 1
495 369 !
496 370 stxa %o1, [%g2]ASI_INTR_DISPATCH
497 371
498 372 !
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
499 373 ! interrupt vector dispatch data reg 2
500 374 !
501 375 stxa %o2, [%g3]ASI_INTR_DISPATCH
502 376
503 377 membar #Sync
504 378 retl
505 379 nop
506 380 SET_SIZE(init_mondo_nocheck)
507 381 SET_SIZE(init_mondo)
508 382
509 -#endif /* lint */
510 383
511 -
512 -#if defined(lint)
513 -
514 -/* ARGSUSED */
515 -void
516 -shipit(int upaid, int bn)
517 -{ return; }
518 -
519 -#else /* lint */
520 -
521 384 /*
522 385 * Ship mondo to aid using busy/nack pair bn
523 386 */
524 387 ENTRY_NP(shipit)
525 388 sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<23:14> = agent id
526 389 sll %o1, IDCR_BN_SHIFT, %g2 ! IDCR<28:24> = b/n pair
527 390 or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70
528 391 or %g1, %g2, %g1
529 392 stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch
530 393 membar #Sync
531 394 retl
532 395 nop
533 396 SET_SIZE(shipit)
534 397
535 -#endif /* lint */
536 398
537 -
538 -#if defined(lint)
539 -
540 -/* ARGSUSED */
541 -void
542 -flush_instr_mem(caddr_t vaddr, size_t len)
543 -{}
544 -
545 -#else /* lint */
546 -
547 399 /*
548 400 * flush_instr_mem:
549 401 * Flush 1 page of the I-$ starting at vaddr
550 402 * %o0 vaddr
551 403 * %o1 bytes to be flushed
552 404 *
553 405 * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
554 406 * the stores from all processors so that a FLUSH instruction is only needed
555 407 * to ensure pipeline is consistent. This means a single flush is sufficient at
556 408 * the end of a sequence of stores that updates the instruction stream to
557 409 * ensure correct operation.
558 410 */
559 411
560 412 ENTRY(flush_instr_mem)
561 413 flush %o0 ! address irrelevant
562 414 retl
563 415 nop
564 416 SET_SIZE(flush_instr_mem)
565 417
566 -#endif /* lint */
567 418
568 -
569 419 /*
570 420 * flush_ecache:
571 421 * %o0 - 64 bit physical address
572 422 * %o1 - ecache size
573 423 * %o2 - ecache linesize
574 424 */
575 -#if defined(lint)
576 425
577 -/*ARGSUSED*/
578 -void
579 -flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize)
580 -{}
581 -
582 -#else /* !lint */
583 -
584 426 ENTRY(flush_ecache)
585 427
586 428 /*
587 429 * Flush the entire Ecache.
588 430 */
589 431 ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
590 432 retl
591 433 nop
592 434 SET_SIZE(flush_ecache)
593 435
594 -#endif /* lint */
595 -
596 -#if defined(lint)
597 -
598 -/*ARGSUSED*/
599 -void
600 -kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size,
601 - int icache_lsize)
602 -{
603 -}
604 -
605 -#else /* lint */
606 -
607 436 /*
608 437 * I/D cache flushing is not needed for OPL processors
609 438 */
610 439 ENTRY(kdi_flush_idcache)
611 440 retl
612 441 nop
613 442 SET_SIZE(kdi_flush_idcache)
614 443
615 -#endif /* lint */
616 -
617 444 #ifdef TRAPTRACE
618 445 /*
619 446 * Simplified trap trace macro for OPL. Adapted from us3.
620 447 */
621 448 #define OPL_TRAPTRACE(ptr, scr1, scr2, label) \
622 449 CPU_INDEX(scr1, ptr); \
623 450 sll scr1, TRAPTR_SIZE_SHIFT, scr1; \
624 451 set trap_trace_ctl, ptr; \
625 452 add ptr, scr1, scr1; \
626 453 ld [scr1 + TRAPTR_LIMIT], ptr; \
627 454 tst ptr; \
628 455 be,pn %icc, label/**/1; \
629 456 ldx [scr1 + TRAPTR_PBASE], ptr; \
630 457 ld [scr1 + TRAPTR_OFFSET], scr1; \
631 458 add ptr, scr1, ptr; \
632 459 rd %asi, scr2; \
633 460 wr %g0, TRAPTR_ASI, %asi; \
634 461 rd STICK, scr1; \
635 462 stxa scr1, [ptr + TRAP_ENT_TICK]%asi; \
636 463 rdpr %tl, scr1; \
637 464 stha scr1, [ptr + TRAP_ENT_TL]%asi; \
638 465 rdpr %tt, scr1; \
639 466 stha scr1, [ptr + TRAP_ENT_TT]%asi; \
640 467 rdpr %tpc, scr1; \
641 468 stna scr1, [ptr + TRAP_ENT_TPC]%asi; \
642 469 rdpr %tstate, scr1; \
643 470 stxa scr1, [ptr + TRAP_ENT_TSTATE]%asi; \
644 471 stna %sp, [ptr + TRAP_ENT_SP]%asi; \
645 472 stna %g0, [ptr + TRAP_ENT_TR]%asi; \
646 473 stna %g0, [ptr + TRAP_ENT_F1]%asi; \
647 474 stna %g0, [ptr + TRAP_ENT_F2]%asi; \
648 475 stna %g0, [ptr + TRAP_ENT_F3]%asi; \
649 476 stna %g0, [ptr + TRAP_ENT_F4]%asi; \
650 477 wr %g0, scr2, %asi; \
651 478 CPU_INDEX(ptr, scr1); \
652 479 sll ptr, TRAPTR_SIZE_SHIFT, ptr; \
653 480 set trap_trace_ctl, scr1; \
654 481 add scr1, ptr, ptr; \
655 482 ld [ptr + TRAPTR_OFFSET], scr1; \
656 483 ld [ptr + TRAPTR_LIMIT], scr2; \
657 484 st scr1, [ptr + TRAPTR_LAST_OFFSET]; \
658 485 add scr1, TRAP_ENT_SIZE, scr1; \
659 486 sub scr2, TRAP_ENT_SIZE, scr2; \
660 487 cmp scr1, scr2; \
661 488 movge %icc, 0, scr1; \
662 489 st scr1, [ptr + TRAPTR_OFFSET]; \
663 490 label/**/1:
664 491 #endif /* TRAPTRACE */
665 492
666 493
667 494
668 495 /*
669 496 * Macros facilitating error handling.
670 497 */
671 498
672 499 /*
673 500 * Save alternative global registers reg1, reg2, reg3
674 501 * to scratchpad registers 1, 2, 3 respectively.
675 502 */
676 503 #define OPL_SAVE_GLOBAL(reg1, reg2, reg3) \
677 504 stxa reg1, [%g0]ASI_SCRATCHPAD ;\
678 505 mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\
679 506 stxa reg2, [reg1]ASI_SCRATCHPAD ;\
680 507 mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\
681 508 stxa reg3, [reg1]ASI_SCRATCHPAD
682 509
683 510 /*
684 511 * Restore alternative global registers reg1, reg2, reg3
685 512 * from scratchpad registers 1, 2, 3 respectively.
686 513 */
687 514 #define OPL_RESTORE_GLOBAL(reg1, reg2, reg3) \
688 515 mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\
689 516 ldxa [reg1]ASI_SCRATCHPAD, reg3 ;\
690 517 mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\
691 518 ldxa [reg1]ASI_SCRATCHPAD, reg2 ;\
692 519 ldxa [%g0]ASI_SCRATCHPAD, reg1
693 520
694 521 /*
695 522 * Logs value `val' into the member `offset' of a structure
696 523 * at physical address `pa'
697 524 */
698 525 #define LOG_REG(pa, offset, val) \
699 526 add pa, offset, pa ;\
700 527 stxa val, [pa]ASI_MEM
701 528
702 529 #define FLUSH_ALL_TLB(tmp1) \
703 530 set DEMAP_ALL_TYPE, tmp1 ;\
704 531 stxa %g0, [tmp1]ASI_ITLB_DEMAP ;\
705 532 stxa %g0, [tmp1]ASI_DTLB_DEMAP ;\
706 533 sethi %hi(FLUSH_ADDR), tmp1 ;\
707 534 flush tmp1
708 535
709 536 /*
710 537 * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG
711 538 * scratch register by zeroing all other fields. Result is in pa.
712 539 */
713 540 #define LOG_ADDR(pa) \
714 541 mov OPL_SCRATCHPAD_ERRLOG, pa ;\
715 542 ldxa [pa]ASI_SCRATCHPAD, pa ;\
716 543 sllx pa, 64-ERRLOG_REG_EIDR_SHIFT, pa ;\
717 544 srlx pa, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT, pa ;\
718 545 sllx pa, ERRLOG_REG_ERR_SHIFT, pa
719 546
720 547 /*
721 548 * Advance the per-cpu error log buffer pointer to the next
722 549 * ERRLOG_SZ entry, making sure that it will modulo (wraparound)
723 550 * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are
724 551 * unused input registers for this macro.
725 552 *
726 553 * Algorithm:
727 554 * 1. logpa = contents of errorlog scratchpad register
728 555 * 2. bufmask = ERRLOG_BUFSIZ - 1
729 556 * 3. tmp = logpa & ~(bufmask) (tmp is now logbase)
730 557 * 4. logpa += ERRLOG_SZ
731 558 * 5. logpa = logpa & bufmask (get new offset to logbase)
732 559 * 4. logpa = tmp | logpa
733 560 * 7. write logpa back into errorlog scratchpad register
734 561 *
735 562 * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask)
736 563 *
737 564 */
738 565 #define UPDATE_LOGADD(logpa, bufmask, tmp) \
739 566 set OPL_SCRATCHPAD_ERRLOG, tmp ;\
740 567 ldxa [tmp]ASI_SCRATCHPAD, logpa ;\
741 568 set (ERRLOG_BUFSZ-1), bufmask ;\
742 569 andn logpa, bufmask, tmp ;\
743 570 add logpa, ERRLOG_SZ, logpa ;\
744 571 and logpa, bufmask, logpa ;\
745 572 or tmp, logpa, logpa ;\
746 573 set OPL_SCRATCHPAD_ERRLOG, tmp ;\
747 574 stxa logpa, [tmp]ASI_SCRATCHPAD
748 575
749 576 /* Log error status registers into the log buffer */
750 577 #define LOG_SYNC_REG(sfsr, sfar, tmp) \
751 578 LOG_ADDR(tmp) ;\
752 579 LOG_REG(tmp, LOG_SFSR_OFF, sfsr) ;\
753 580 LOG_ADDR(tmp) ;\
754 581 mov tmp, sfsr ;\
755 582 LOG_REG(tmp, LOG_SFAR_OFF, sfar) ;\
756 583 rd STICK, sfar ;\
757 584 mov sfsr, tmp ;\
758 585 LOG_REG(tmp, LOG_STICK_OFF, sfar) ;\
759 586 rdpr %tl, tmp ;\
760 587 sllx tmp, 32, sfar ;\
761 588 rdpr %tt, tmp ;\
762 589 or sfar, tmp, sfar ;\
763 590 mov sfsr, tmp ;\
764 591 LOG_REG(tmp, LOG_TL_OFF, sfar) ;\
765 592 set OPL_SCRATCHPAD_ERRLOG, tmp ;\
766 593 ldxa [tmp]ASI_SCRATCHPAD, sfar ;\
767 594 mov sfsr, tmp ;\
768 595 LOG_REG(tmp, LOG_ASI3_OFF, sfar) ;\
769 596 rdpr %tpc, sfar ;\
770 597 mov sfsr, tmp ;\
771 598 LOG_REG(tmp, LOG_TPC_OFF, sfar) ;\
772 599 UPDATE_LOGADD(sfsr, sfar, tmp)
773 600
774 601 #define LOG_UGER_REG(uger, tmp, tmp2) \
775 602 LOG_ADDR(tmp) ;\
776 603 mov tmp, tmp2 ;\
777 604 LOG_REG(tmp2, LOG_UGER_OFF, uger) ;\
778 605 mov tmp, uger ;\
779 606 rd STICK, tmp2 ;\
780 607 LOG_REG(tmp, LOG_STICK_OFF, tmp2) ;\
781 608 rdpr %tl, tmp ;\
782 609 sllx tmp, 32, tmp2 ;\
783 610 rdpr %tt, tmp ;\
784 611 or tmp2, tmp, tmp2 ;\
785 612 mov uger, tmp ;\
786 613 LOG_REG(tmp, LOG_TL_OFF, tmp2) ;\
787 614 set OPL_SCRATCHPAD_ERRLOG, tmp2 ;\
788 615 ldxa [tmp2]ASI_SCRATCHPAD, tmp2 ;\
789 616 mov uger, tmp ;\
790 617 LOG_REG(tmp, LOG_ASI3_OFF, tmp2) ;\
791 618 rdpr %tstate, tmp2 ;\
792 619 mov uger, tmp ;\
793 620 LOG_REG(tmp, LOG_TSTATE_OFF, tmp2) ;\
794 621 rdpr %tpc, tmp2 ;\
795 622 mov uger, tmp ;\
796 623 LOG_REG(tmp, LOG_TPC_OFF, tmp2) ;\
797 624 UPDATE_LOGADD(uger, tmp, tmp2)
798 625
799 626 /*
800 627 * Scrub the STICK_COMPARE register to clear error by updating
801 628 * it to a reasonable value for interrupt generation.
802 629 * Ensure that we observe the CPU_ENABLE flag so that we
803 630 * don't accidentally enable TICK interrupt in STICK_COMPARE
804 631 * i.e. no clock interrupt will be generated if CPU_ENABLE flag
805 632 * is off.
806 633 */
807 634 #define UPDATE_STICK_COMPARE(tmp1, tmp2) \
808 635 CPU_ADDR(tmp1, tmp2) ;\
809 636 lduh [tmp1 + CPU_FLAGS], tmp2 ;\
810 637 andcc tmp2, CPU_ENABLE, %g0 ;\
811 638 set OPL_UGER_STICK_DIFF, tmp2 ;\
812 639 rd STICK, tmp1 ;\
813 640 add tmp1, tmp2, tmp1 ;\
814 641 mov 1, tmp2 ;\
815 642 sllx tmp2, TICKINT_DIS_SHFT, tmp2 ;\
816 643 or tmp1, tmp2, tmp2 ;\
817 644 movnz %xcc, tmp1, tmp2 ;\
818 645 wr tmp2, %g0, STICK_COMPARE
819 646
820 647 /*
821 648 * Reset registers that may be corrupted by IAUG_CRE error.
822 649 * To update interrupt handling related registers force the
823 650 * clock interrupt.
824 651 */
825 652 #define IAG_CRE(tmp1, tmp2) \
826 653 set OPL_SCRATCHPAD_ERRLOG, tmp1 ;\
827 654 ldxa [tmp1]ASI_SCRATCHPAD, tmp1 ;\
828 655 srlx tmp1, ERRLOG_REG_EIDR_SHIFT, tmp1 ;\
829 656 set ERRLOG_REG_EIDR_MASK, tmp2 ;\
830 657 and tmp1, tmp2, tmp1 ;\
831 658 stxa tmp1, [%g0]ASI_EIDR ;\
832 659 wr %g0, 0, SOFTINT ;\
833 660 sethi %hi(hres_last_tick), tmp1 ;\
834 661 ldx [tmp1 + %lo(hres_last_tick)], tmp1 ;\
835 662 set OPL_UGER_STICK_DIFF, tmp2 ;\
836 663 add tmp1, tmp2, tmp1 ;\
837 664 wr tmp1, %g0, STICK ;\
838 665 UPDATE_STICK_COMPARE(tmp1, tmp2)
839 666
840 667
841 668 #define CLEAR_FPREGS(tmp) \
842 669 wr %g0, FPRS_FEF, %fprs ;\
843 670 wr %g0, %g0, %gsr ;\
844 671 sethi %hi(opl_clr_freg), tmp ;\
845 672 or tmp, %lo(opl_clr_freg), tmp ;\
846 673 ldx [tmp], %fsr ;\
847 674 fzero %d0 ;\
848 675 fzero %d2 ;\
849 676 fzero %d4 ;\
850 677 fzero %d6 ;\
851 678 fzero %d8 ;\
852 679 fzero %d10 ;\
853 680 fzero %d12 ;\
854 681 fzero %d14 ;\
855 682 fzero %d16 ;\
856 683 fzero %d18 ;\
857 684 fzero %d20 ;\
858 685 fzero %d22 ;\
859 686 fzero %d24 ;\
860 687 fzero %d26 ;\
861 688 fzero %d28 ;\
862 689 fzero %d30 ;\
863 690 fzero %d32 ;\
864 691 fzero %d34 ;\
865 692 fzero %d36 ;\
866 693 fzero %d38 ;\
867 694 fzero %d40 ;\
868 695 fzero %d42 ;\
869 696 fzero %d44 ;\
870 697 fzero %d46 ;\
871 698 fzero %d48 ;\
872 699 fzero %d50 ;\
873 700 fzero %d52 ;\
874 701 fzero %d54 ;\
875 702 fzero %d56 ;\
876 703 fzero %d58 ;\
877 704 fzero %d60 ;\
878 705 fzero %d62 ;\
879 706 wr %g0, %g0, %fprs
880 707
881 708 #define CLEAR_GLOBALS() \
882 709 mov %g0, %g1 ;\
883 710 mov %g0, %g2 ;\
884 711 mov %g0, %g3 ;\
885 712 mov %g0, %g4 ;\
886 713 mov %g0, %g5 ;\
887 714 mov %g0, %g6 ;\
888 715 mov %g0, %g7
889 716
890 717 /*
891 718 * We do not clear the alternative globals here because they
892 719 * are scratch registers, i.e. there is no code that reads from
893 720 * them without write to them firstly. In other words every
894 721 * read always follows write that makes extra write to the
895 722 * alternative globals unnecessary.
896 723 */
897 724 #define CLEAR_GEN_REGS(tmp1, label) \
898 725 set TSTATE_KERN, tmp1 ;\
899 726 wrpr %g0, tmp1, %tstate ;\
900 727 mov %g0, %y ;\
901 728 mov %g0, %asi ;\
902 729 mov %g0, %ccr ;\
903 730 mov %g0, %l0 ;\
904 731 mov %g0, %l1 ;\
905 732 mov %g0, %l2 ;\
906 733 mov %g0, %l3 ;\
907 734 mov %g0, %l4 ;\
908 735 mov %g0, %l5 ;\
909 736 mov %g0, %l6 ;\
910 737 mov %g0, %l7 ;\
911 738 mov %g0, %i0 ;\
912 739 mov %g0, %i1 ;\
913 740 mov %g0, %i2 ;\
914 741 mov %g0, %i3 ;\
915 742 mov %g0, %i4 ;\
916 743 mov %g0, %i5 ;\
917 744 mov %g0, %i6 ;\
918 745 mov %g0, %i7 ;\
919 746 mov %g0, %o1 ;\
920 747 mov %g0, %o2 ;\
921 748 mov %g0, %o3 ;\
922 749 mov %g0, %o4 ;\
923 750 mov %g0, %o5 ;\
924 751 mov %g0, %o6 ;\
925 752 mov %g0, %o7 ;\
926 753 mov %g0, %o0 ;\
927 754 mov %g0, %g4 ;\
928 755 mov %g0, %g5 ;\
929 756 mov %g0, %g6 ;\
930 757 mov %g0, %g7 ;\
931 758 rdpr %tl, tmp1 ;\
932 759 cmp tmp1, 1 ;\
933 760 be,pt %xcc, label/**/1 ;\
934 761 rdpr %pstate, tmp1 ;\
935 762 wrpr tmp1, PSTATE_AG|PSTATE_IG, %pstate ;\
936 763 CLEAR_GLOBALS() ;\
937 764 rdpr %pstate, tmp1 ;\
938 765 wrpr tmp1, PSTATE_IG|PSTATE_MG, %pstate ;\
939 766 CLEAR_GLOBALS() ;\
940 767 rdpr %pstate, tmp1 ;\
941 768 wrpr tmp1, PSTATE_MG|PSTATE_AG, %pstate ;\
942 769 ba,pt %xcc, label/**/2 ;\
943 770 nop ;\
944 771 label/**/1: ;\
945 772 wrpr tmp1, PSTATE_AG, %pstate ;\
946 773 CLEAR_GLOBALS() ;\
947 774 rdpr %pstate, tmp1 ;\
948 775 wrpr tmp1, PSTATE_AG, %pstate ;\
949 776 label/**/2:
950 777
951 778
952 779 /*
953 780 * Reset all window related registers
954 781 */
955 782 #define RESET_WINREG(tmp) \
956 783 sethi %hi(nwin_minus_one), tmp ;\
957 784 ld [tmp + %lo(nwin_minus_one)], tmp ;\
958 785 wrpr %g0, tmp, %cwp ;\
959 786 wrpr %g0, tmp, %cleanwin ;\
960 787 sub tmp, 1, tmp ;\
961 788 wrpr %g0, tmp, %cansave ;\
962 789 wrpr %g0, %g0, %canrestore ;\
963 790 wrpr %g0, %g0, %otherwin ;\
964 791 wrpr %g0, PIL_MAX, %pil ;\
965 792 wrpr %g0, WSTATE_KERN, %wstate
966 793
967 794
968 795 #define RESET_PREV_TSTATE(tmp1, tmp2, label) \
969 796 rdpr %tl, tmp1 ;\
970 797 subcc tmp1, 1, tmp1 ;\
971 798 bz,pt %xcc, label/**/1 ;\
972 799 nop ;\
973 800 wrpr tmp1, %g0, %tl ;\
974 801 set TSTATE_KERN, tmp2 ;\
975 802 wrpr tmp2, %g0, %tstate ;\
976 803 wrpr %g0, %g0, %tpc ;\
977 804 wrpr %g0, %g0, %tnpc ;\
978 805 add tmp1, 1, tmp1 ;\
979 806 wrpr tmp1, %g0, %tl ;\
980 807 label/**/1:
981 808
982 809
983 810 /*
984 811 * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
985 812 * and we reset these regiseter here.
986 813 */
987 814 #define RESET_CUR_TSTATE(tmp) \
988 815 set TSTATE_KERN, tmp ;\
989 816 wrpr %g0, tmp, %tstate ;\
990 817 wrpr %g0, 0, %tpc ;\
991 818 wrpr %g0, 0, %tnpc ;\
992 819 RESET_WINREG(tmp)
993 820
↓ open down ↓ |
367 lines elided |
↑ open up ↑ |
994 821 /*
995 822 * In case of urgent errors some MMU registers may be
996 823 * corrupted, so we set here some reasonable values for
997 824 * them. Note that resetting MMU registers also reset the context
998 825 * info, we will need to reset the window registers to prevent
999 826 * spill/fill that depends on context info for correct behaviour.
1000 827 * Note that the TLBs must be flushed before programming the context
1001 828 * registers.
1002 829 */
1003 830
1004 -#if !defined(lint)
1005 831 #define RESET_MMU_REGS(tmp1, tmp2, tmp3) \
1006 832 FLUSH_ALL_TLB(tmp1) ;\
1007 833 set MMU_PCONTEXT, tmp1 ;\
1008 834 sethi %hi(kcontextreg), tmp2 ;\
1009 835 ldx [tmp2 + %lo(kcontextreg)], tmp2 ;\
1010 836 stxa tmp2, [tmp1]ASI_DMMU ;\
1011 837 set MMU_SCONTEXT, tmp1 ;\
1012 838 stxa tmp2, [tmp1]ASI_DMMU ;\
1013 839 sethi %hi(ktsb_base), tmp1 ;\
1014 840 ldx [tmp1 + %lo(ktsb_base)], tmp2 ;\
1015 841 mov MMU_TSB, tmp3 ;\
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1016 842 stxa tmp2, [tmp3]ASI_IMMU ;\
1017 843 stxa tmp2, [tmp3]ASI_DMMU ;\
1018 844 membar #Sync ;\
1019 845 RESET_WINREG(tmp1)
1020 846
1021 847 #define RESET_TSB_TAGPTR(tmp) \
1022 848 set MMU_TAG_ACCESS, tmp ;\
1023 849 stxa %g0, [tmp]ASI_IMMU ;\
1024 850 stxa %g0, [tmp]ASI_DMMU ;\
1025 851 membar #Sync
1026 -#endif /* lint */
1027 852
1028 853 /*
1029 854 * In case of errors in the MMU_TSB_PREFETCH registers we have to
1030 855 * reset them. We can use "0" as the reset value, this way we set
1031 856 * the "V" bit of the registers to 0, which will disable the prefetch
1032 857 * so the values of the other fields are irrelevant.
1033 858 */
1034 -#if !defined(lint)
1035 859 #define RESET_TSB_PREFETCH(tmp) \
1036 860 set VA_UTSBPREF_8K, tmp ;\
1037 861 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1038 862 set VA_UTSBPREF_4M, tmp ;\
1039 863 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1040 864 set VA_KTSBPREF_8K, tmp ;\
1041 865 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1042 866 set VA_KTSBPREF_4M, tmp ;\
1043 867 stxa %g0, [tmp]ASI_ITSB_PREFETCH ;\
1044 868 set VA_UTSBPREF_8K, tmp ;\
1045 869 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\
1046 870 set VA_UTSBPREF_4M, tmp ;\
1047 871 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\
1048 872 set VA_KTSBPREF_8K, tmp ;\
1049 873 stxa %g0, [tmp]ASI_DTSB_PREFETCH ;\
1050 874 set VA_KTSBPREF_4M, tmp ;\
1051 875 stxa %g0, [tmp]ASI_DTSB_PREFETCH
1052 -#endif /* lint */
1053 876
1054 877 /*
1055 878 * In case of errors in the MMU_SHARED_CONTEXT register we have to
1056 879 * reset its value. We can use "0" as the reset value, it will put
1057 880 * 0 in the IV field disabling the shared context support, and
1058 881 * making values of all the other fields of the register irrelevant.
1059 882 */
1060 -#if !defined(lint)
1061 883 #define RESET_SHARED_CTXT(tmp) \
1062 884 set MMU_SHARED_CONTEXT, tmp ;\
1063 885 stxa %g0, [tmp]ASI_DMMU
1064 -#endif /* lint */
1065 886
1066 887 /*
1067 888 * RESET_TO_PRIV()
1068 889 *
1069 890 * In many cases, we need to force the thread into privilege mode because
1070 891 * privilege mode is only thing in which the system continue to work
1071 892 * due to undeterminable user mode information that come from register
1072 893 * corruption.
1073 894 *
1074 895 * - opl_uger_ctxt
1075 896 * If the error is secondary TSB related register parity, we have no idea
1076 897 * what value is supposed to be for it.
1077 898 *
1078 899 * The below three cases %tstate is not accessible until it is overwritten
1079 900 * with some value, so we have no clue if the thread was running on user mode
1080 901 * or not
1081 902 * - opl_uger_pstate
1082 903 * If the error is %pstate parity, it propagates to %tstate.
1083 904 * - opl_uger_tstate
1084 905 * No need to say the reason
1085 906 * - opl_uger_r
1086 907 * If the error is %ccr or %asi parity, it propagates to %tstate
1087 908 *
1088 909 * For the above four cases, user mode info may not be available for
1089 910 * sys_trap() and user_trap() to work consistently. So we have to force
1090 911 * the thread into privilege mode.
1091 912 *
1092 913 * Forcing the thread to privilege mode requires forcing
1093 914 * regular %g7 to be CPU_THREAD. Because if it was running on user mode,
1094 915 * %g7 will be set in user_trap(). Also since the %sp may be in
1095 916 * an inconsistent state, we need to do a stack reset and switch to
1096 917 * something we know i.e. current thread's kernel stack.
1097 918 * We also reset the window registers and MMU registers just to
1098 919 * make sure.
1099 920 *
1100 921 * To set regular %g7, we need to clear PSTATE_AG bit and need to
1101 922 * use one local register. Note that we are panicking and will never
1102 923 * unwind back so it is ok to clobber a local.
1103 924 *
1104 925 * If the thread was running in user mode, the %tpc value itself might be
1105 926 * within the range of OBP addresses. %tpc must be forced to be zero to prevent
1106 927 * sys_trap() from going to prom_trap()
1107 928 *
1108 929 */
1109 930 #define RESET_TO_PRIV(tmp, tmp1, tmp2, local) \
1110 931 RESET_MMU_REGS(tmp, tmp1, tmp2) ;\
1111 932 CPU_ADDR(tmp, tmp1) ;\
1112 933 ldx [tmp + CPU_THREAD], local ;\
1113 934 ldx [local + T_STACK], tmp ;\
1114 935 sub tmp, STACK_BIAS, %sp ;\
1115 936 rdpr %pstate, tmp ;\
1116 937 wrpr tmp, PSTATE_AG, %pstate ;\
1117 938 mov local, %g7 ;\
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
1118 939 rdpr %pstate, local ;\
1119 940 wrpr local, PSTATE_AG, %pstate ;\
1120 941 wrpr %g0, 1, %tl ;\
1121 942 set TSTATE_KERN, tmp ;\
1122 943 rdpr %cwp, tmp1 ;\
1123 944 or tmp, tmp1, tmp ;\
1124 945 wrpr tmp, %g0, %tstate ;\
1125 946 wrpr %g0, %tpc
1126 947
1127 948
1128 -#if defined(lint)
1129 -
1130 -void
1131 -ce_err(void)
1132 -{}
1133 -
1134 -#else /* lint */
1135 -
1136 949 /*
1137 950 * We normally don't expect CE traps since we disable the
1138 951 * 0x63 trap reporting at the start of day. There is a
1139 952 * small window before we disable them, so let check for
1140 953 * it. Otherwise, panic.
1141 954 */
1142 955
1143 956 .align 128
1144 957 ENTRY_NP(ce_err)
1145 958 mov AFSR_ECR, %g1
1146 959 ldxa [%g1]ASI_ECR, %g1
1147 960 andcc %g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
1148 961 bz,pn %xcc, 1f
1149 962 nop
1150 963 retry
1151 964 1:
1152 965 /*
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
1153 966 * We did disabled the 0x63 trap reporting.
1154 967 * This shouldn't happen - panic.
1155 968 */
1156 969 set trap, %g1
1157 970 rdpr %tt, %g3
1158 971 sethi %hi(sys_trap), %g5
1159 972 jmp %g5 + %lo(sys_trap)
1160 973 sub %g0, 1, %g4
1161 974 SET_SIZE(ce_err)
1162 975
1163 -#endif /* lint */
1164 976
1165 -
1166 -#if defined(lint)
1167 -
1168 -void
1169 -ce_err_tl1(void)
1170 -{}
1171 -
1172 -#else /* lint */
1173 -
1174 977 /*
1175 978 * We don't use trap for CE detection.
1176 979 */
1177 980 ENTRY_NP(ce_err_tl1)
1178 981 set trap, %g1
1179 982 rdpr %tt, %g3
1180 983 sethi %hi(sys_trap), %g5
1181 984 jmp %g5 + %lo(sys_trap)
1182 985 sub %g0, 1, %g4
1183 986 SET_SIZE(ce_err_tl1)
1184 987
1185 -#endif /* lint */
1186 988
1187 -
1188 -#if defined(lint)
1189 -
1190 -void
1191 -async_err(void)
1192 -{}
1193 -
1194 -#else /* lint */
1195 -
1196 989 /*
1197 990 * async_err is the default handler for IAE/DAE traps.
1198 991 * For OPL, we patch in the right handler at start of day.
1199 992 * But if a IAE/DAE trap get generated before the handler
1200 993 * is patched, panic.
1201 994 */
1202 995 ENTRY_NP(async_err)
1203 996 set trap, %g1
1204 997 rdpr %tt, %g3
1205 998 sethi %hi(sys_trap), %g5
1206 999 jmp %g5 + %lo(sys_trap)
1207 1000 sub %g0, 1, %g4
1208 1001 SET_SIZE(async_err)
1209 1002
1210 -#endif /* lint */
1211 -
1212 -#if defined(lint)
1213 -void
1214 -opl_sync_trap(void)
1215 -{}
1216 -#else /* lint */
1217 -
1218 1003 .seg ".data"
1219 1004 .global opl_clr_freg
1220 1005 .global opl_cpu0_err_log
1221 1006
1222 1007 .align 16
1223 1008 opl_clr_freg:
1224 1009 .word 0
1225 1010 .align 16
1226 1011
1227 1012 .align MMU_PAGESIZE
1228 1013 opl_cpu0_err_log:
1229 1014 .skip MMU_PAGESIZE
1230 1015
1231 1016 /*
1232 1017 * Common synchronous error trap handler (tt=0xA, 0x32)
1233 1018 * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1234 1019 * The error handling can be best summarized as follows:
1235 1020 * 0. Do TRAPTRACE if enabled.
1236 1021 * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1237 1022 * 2. The SFSR register is read and verified as valid by checking
1238 1023 * SFSR.FV bit being set. If the SFSR.FV is not set, the
1239 1024 * error cases cannot be decoded/determined and the SFPAR
1240 1025 * register that contain the physical faultaddr is also
1241 1026 * not valid. Also the SPFAR is only valid for UE/TO/BERR error
1242 1027 * cases. Assuming the SFSR.FV is valid:
1243 1028 * - BERR(bus error)/TO(timeout)/UE case
1244 1029 * If any of these error cases are detected, read the SFPAR
1245 1030 * to get the faultaddress. Generate ereport.
1246 1031 * - TLB Parity case (only recoverable case)
1247 1032 * For DAE, read SFAR for the faultaddress. For IAE,
1248 1033 * use %tpc for faultaddress (SFAR is not valid in IAE)
1249 1034 * Flush all the tlbs.
1250 1035 * Subtract one from the recoverable error count stored in
1251 1036 * the error log scratch register. If the threshold limit
1252 1037 * is reached (zero) - generate ereport. Else
1253 1038 * restore globals and retry (no ereport is generated).
1254 1039 * - TLB Multiple hits
1255 1040 * For DAE, read SFAR for the faultaddress. For IAE,
1256 1041 * use %tpc for faultaddress (SFAR is not valid in IAE).
1257 1042 * Flush all tlbs and generate ereport.
1258 1043 * 3. TL=0 and TL>0 considerations
1259 1044 * - Since both TL=0 & TL>1 traps are made to vector into
1260 1045 * the same handler, the underlying assumption/design here is
1261 1046 * that any nested error condition (if happens) occurs only
1262 1047 * in the handler and the system is assumed to eventually
1263 1048 * Red-mode. With this philosophy in mind, the recoverable
1264 1049 * TLB Parity error case never check the TL level before it
1265 1050 * retry. Note that this is ok for the TL>1 case (assuming we
1266 1051 * don't have a nested error) since we always save the globals
1267 1052 * %g1, %g2 & %g3 whenever we enter this trap handler.
1268 1053 * - Additional TL=0 vs TL>1 handling includes:
1269 1054 * - For UE error occuring under TL>1, special handling
1270 1055 * is added to prevent the unlikely chance of a cpu-lockup
1271 1056 * when a UE was originally detected in user stack and
1272 1057 * the spill trap handler taken from sys_trap() so happened
1273 1058 * to reference the same UE location. Under the above
1274 1059 * condition (TL>1 and UE error), paranoid code is added
1275 1060 * to reset window regs so that spill traps can't happen
1276 1061 * during the unwind back to TL=0 handling.
1277 1062 * Note that we can do that because we are not returning
1278 1063 * back.
1279 1064 * 4. Ereport generation.
1280 1065 * - Ereport generation is performed when we unwind to the TL=0
1281 1066 * handling code via sys_trap(). on_trap()/lofault protection
1282 1067 * will apply there.
1283 1068 *
1284 1069 */
1285 1070 ENTRY_NP(opl_sync_trap)
1286 1071 #ifdef TRAPTRACE
1287 1072 OPL_TRAPTRACE(%g1, %g2, %g3, opl_sync_trap_lb)
1288 1073 rdpr %tt, %g1
1289 1074 #endif /* TRAPTRACE */
1290 1075 cmp %g1, T_INSTR_ERROR
1291 1076 bne,pt %xcc, 0f
1292 1077 mov MMU_SFSR, %g3
1293 1078 ldxa [%g3]ASI_IMMU, %g1 ! IAE trap case tt = 0xa
1294 1079 andcc %g1, SFSR_FV, %g0
1295 1080 bz,a,pn %xcc, 2f ! Branch if SFSR is invalid and
1296 1081 rdpr %tpc, %g2 ! use %tpc for faultaddr instead
1297 1082
1298 1083 sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1299 1084 andcc %g1, %g3, %g0 ! Check for UE/BERR/TO errors
1300 1085 bz,a,pt %xcc, 1f ! Branch if not UE/BERR/TO and
1301 1086 rdpr %tpc, %g2 ! use %tpc as faultaddr
1302 1087 set OPL_MMU_SFPAR, %g3 ! In the UE/BERR/TO cases, use
1303 1088 ba,pt %xcc, 2f ! SFPAR as faultaddr
1304 1089 ldxa [%g3]ASI_IMMU, %g2
1305 1090 0:
1306 1091 ldxa [%g3]ASI_DMMU, %g1 ! DAE trap case tt = 0x32
1307 1092 andcc %g1, SFSR_FV, %g0
1308 1093 bnz,pt %xcc, 7f ! branch if SFSR.FV is valid
1309 1094 mov MMU_SFAR, %g2 ! set %g2 to use SFAR
1310 1095 ba,pt %xcc, 2f ! SFSR.FV is not valid, read SFAR
1311 1096 ldxa [%g2]ASI_DMMU, %g2 ! for faultaddr
1312 1097 7:
1313 1098 sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1314 1099 andcc %g1, %g3, %g0 ! Check UE/BERR/TO for valid SFPAR
1315 1100 movnz %xcc, OPL_MMU_SFPAR, %g2 ! Use SFPAR instead of SFAR for
1316 1101 ldxa [%g2]ASI_DMMU, %g2 ! faultaddr
1317 1102 1:
1318 1103 sethi %hi(SFSR_TLB_PRT), %g3
1319 1104 andcc %g1, %g3, %g0
1320 1105 bz,pt %xcc, 8f ! branch for TLB multi-hit check
1321 1106 nop
1322 1107 /*
1323 1108 * This is the TLB parity error case and it is the
1324 1109 * only retryable error case.
1325 1110 * Only %g1, %g2 and %g3 are allowed
1326 1111 */
1327 1112 FLUSH_ALL_TLB(%g3)
1328 1113 set OPL_SCRATCHPAD_ERRLOG, %g3
1329 1114 ldxa [%g3]ASI_SCRATCHPAD, %g3 ! Read errlog scratchreg
1330 1115 and %g3, ERRLOG_REG_NUMERR_MASK, %g3! Extract the error count
1331 1116 subcc %g3, 1, %g0 ! Subtract one from the count
1332 1117 bz,pn %xcc, 2f ! too many TLB parity errs in a certain
1333 1118 nop ! period, branch to generate ereport
1334 1119 LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log
1335 1120 set OPL_SCRATCHPAD_ERRLOG, %g3
1336 1121 ldxa [%g3]ASI_SCRATCHPAD, %g2
1337 1122 sub %g2, 1, %g2 ! decrement error counter by 1
1338 1123 stxa %g2, [%g3]ASI_SCRATCHPAD ! update the errlog scratchreg
1339 1124 OPL_RESTORE_GLOBAL(%g1, %g2, %g3)
1340 1125 retry
1341 1126 8:
1342 1127 sethi %hi(SFSR_TLB_MUL), %g3
1343 1128 andcc %g1, %g3, %g0
1344 1129 bz,pt %xcc, 2f ! check for the TLB multi-hit errors
1345 1130 nop
1346 1131 FLUSH_ALL_TLB(%g3)
1347 1132 2:
1348 1133 /*
1349 1134 * non-retryable error handling
1350 1135 * now we can use other registers since
1351 1136 * we will not be returning back
1352 1137 */
1353 1138 mov %g1, %g5 ! %g5 = SFSR
1354 1139 mov %g2, %g6 ! %g6 = SFPAR or SFAR/tpc
1355 1140 LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log
1356 1141
1357 1142 /*
1358 1143 * Special case for UE on user stack.
1359 1144 * There is a possibility that the same error may come back here
1360 1145 * by touching the same UE in spill trap handler taken from
1361 1146 * sys_trap(). It ends up with an infinite loop causing a cpu lockup.
1362 1147 * Conditions for this handling this case are:
1363 1148 * - SFSR_FV is valid and SFSR_UE is set
1364 1149 * - we are at TL > 1
1365 1150 * If the above conditions are true, we force %cansave to be a
1366 1151 * big number to prevent spill trap in sys_trap(). Note that
1367 1152 * we will not be returning back.
1368 1153 */
1369 1154 rdpr %tt, %g4 ! %g4 == ttype
1370 1155 rdpr %tl, %g1 ! %g1 == tl
1371 1156 cmp %g1, 1 ! Check if TL == 1
1372 1157 be,pt %xcc, 3f ! branch if we came from TL=0
1373 1158 nop
1374 1159 andcc %g5, SFSR_FV, %g0 ! see if SFSR.FV is valid
1375 1160 bz,pn %xcc, 4f ! branch, checking UE is meaningless
1376 1161 sethi %hi(SFSR_UE), %g2
1377 1162 andcc %g5, %g2, %g0 ! check for UE
1378 1163 bz,pt %xcc, 4f ! branch if not UE
1379 1164 nop
1380 1165 RESET_WINREG(%g1) ! reset windows to prevent spills
1381 1166 4:
1382 1167 RESET_USER_RTT_REGS(%g2, %g3, opl_sync_trap_resetskip)
1383 1168 opl_sync_trap_resetskip:
1384 1169 mov %g5, %g3 ! pass SFSR to the 3rd arg
1385 1170 mov %g6, %g2 ! pass SFAR to the 2nd arg
1386 1171 set opl_cpu_isync_tl1_error, %g1
1387 1172 set opl_cpu_dsync_tl1_error, %g6
1388 1173 cmp %g4, T_INSTR_ERROR
1389 1174 movne %icc, %g6, %g1
1390 1175 ba,pt %icc, 6f
1391 1176 nop
1392 1177 3:
1393 1178 mov %g5, %g3 ! pass SFSR to the 3rd arg
↓ open down ↓ |
166 lines elided |
↑ open up ↑ |
1394 1179 mov %g6, %g2 ! pass SFAR to the 2nd arg
1395 1180 set opl_cpu_isync_tl0_error, %g1
1396 1181 set opl_cpu_dsync_tl0_error, %g6
1397 1182 cmp %g4, T_INSTR_ERROR
1398 1183 movne %icc, %g6, %g1
1399 1184 6:
1400 1185 sethi %hi(sys_trap), %g5
1401 1186 jmp %g5 + %lo(sys_trap)
1402 1187 mov PIL_15, %g4
1403 1188 SET_SIZE(opl_sync_trap)
1404 -#endif /* lint */
1405 1189
1406 -#if defined(lint)
1407 -void
1408 -opl_uger_trap(void)
1409 -{}
1410 -#else /* lint */
1411 1190 /*
1412 1191 * Common Urgent error trap handler (tt=0x40)
1413 1192 * All TL=0 and TL>0 0x40 traps vector to this handler.
1414 1193 * The error handling can be best summarized as follows:
1415 1194 * 1. Read the Urgent error status register (UGERSR)
1416 1195 * Faultaddress is N/A here and it is not collected.
1417 1196 * 2. Check to see if we have a multiple errors case
1418 1197 * If so, we enable WEAK_ED (weak error detection) bit
1419 1198 * to prevent any potential error storms and branch directly
1420 1199 * to generate ereport. (we don't decode/handle individual
1421 1200 * error cases when we get a multiple error situation)
1422 1201 * 3. Now look for the recoverable error cases which include
1423 1202 * IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1424 1203 * recoverable errors are detected, do the following:
1425 1204 * - Flush all tlbs.
1426 1205 * - Verify that we came from TL=0, if not, generate
1427 1206 * ereport. Note that the reason we don't recover
1428 1207 * at TL>0 is because the AGs might be corrupted or
1429 1208 * inconsistent. We can't save/restore them into
1430 1209 * the scratchpad regs like we did for opl_sync_trap().
1431 1210 * - Check the INSTEND[5:4] bits in the UGERSR. If the
1432 1211 * value is 0x3 (11b), this error is not recoverable.
1433 1212 * Generate ereport.
1434 1213 * - Subtract one from the recoverable error count stored in
1435 1214 * the error log scratch register. If the threshold limit
1436 1215 * is reached (zero) - generate ereport.
1437 1216 * - If the count is within the limit, update the count
1438 1217 * in the error log register (subtract one). Log the error
1439 1218 * info in the log buffer. Capture traptrace if enabled.
1440 1219 * Retry (no ereport generated)
1441 1220 * 4. The rest of the error cases are unrecoverable and will
1442 1221 * be handled according (flushing regs, etc as required).
1443 1222 * For details on these error cases (UGER_CRE, UGER_CTXT, etc..)
1444 1223 * consult the OPL cpu/mem philosophy doc.
1445 1224 * Ereport will be generated for these errors.
1446 1225 * 5. Ereport generation.
1447 1226 * - Ereport generation for urgent error trap always
1448 1227 * result in a panic when we unwind to the TL=0 handling
1449 1228 * code via sys_trap(). on_trap()/lofault protection do
1450 1229 * not apply there.
1451 1230 */
1452 1231 ENTRY_NP(opl_uger_trap)
1453 1232 set ASI_UGERSR, %g2
1454 1233 ldxa [%g2]ASI_AFSR, %g1 ! Read the UGERSR reg
1455 1234
1456 1235 set UGESR_MULTI, %g2
1457 1236 andcc %g1, %g2, %g0 ! Check for Multi-errs
1458 1237 bz,pt %xcc, opl_uger_is_recover ! branch if not Multi-errs
1459 1238 nop
1460 1239 set AFSR_ECR, %g2
1461 1240 ldxa [%g2]ASI_AFSR, %g3 ! Enable Weak error
1462 1241 or %g3, ASI_ECR_WEAK_ED, %g3 ! detect mode to prevent
1463 1242 stxa %g3, [%g2]ASI_AFSR ! potential error storms
1464 1243 ba %xcc, opl_uger_panic1
1465 1244 nop
1466 1245
1467 1246 opl_uger_is_recover:
1468 1247 set UGESR_CAN_RECOVER, %g2 ! Check for recoverable
1469 1248 andcc %g1, %g2, %g0 ! errors i.e.IUG_DTLB,
1470 1249 bz,pt %xcc, opl_uger_cre ! IUG_ITLB or COREERR
1471 1250 nop
1472 1251
1473 1252 /*
1474 1253 * Fall thru to handle recoverable case
1475 1254 * Need to do the following additional checks to determine
1476 1255 * if this is indeed recoverable.
1477 1256 * 1. Error trap came from TL=0 and
1478 1257 * 2. INSTEND[5:4] bits in UGERSR is not 0x3
1479 1258 * 3. Recoverable error count limit not reached
1480 1259 *
1481 1260 */
1482 1261 FLUSH_ALL_TLB(%g3)
1483 1262 rdpr %tl, %g3 ! Read TL
1484 1263 cmp %g3, 1 ! Check if we came from TL=0
1485 1264 bne,pt %xcc, opl_uger_panic ! branch if came from TL>0
1486 1265 nop
1487 1266 srlx %g1, 4, %g2 ! shift INSTEND[5:4] -> [1:0]
1488 1267 and %g2, 3, %g2 ! extract the shifted [1:0] bits
1489 1268 cmp %g2, 3 ! check if INSTEND is recoverable
1490 1269 be,pt %xcc, opl_uger_panic ! panic if ([1:0] = 11b)
1491 1270 nop
1492 1271 set OPL_SCRATCHPAD_ERRLOG, %g3
1493 1272 ldxa [%g3]ASI_SCRATCHPAD, %g2 ! Read errlog scratch reg
1494 1273 and %g2, ERRLOG_REG_NUMERR_MASK, %g3! Extract error count and
1495 1274 subcc %g3, 1, %g3 ! subtract one from it
1496 1275 bz,pt %xcc, opl_uger_panic ! If count reached zero, too many
1497 1276 nop ! errors, branch to generate ereport
1498 1277 sub %g2, 1, %g2 ! Subtract one from the count
1499 1278 set OPL_SCRATCHPAD_ERRLOG, %g3 ! and write back the updated
1500 1279 stxa %g2, [%g3]ASI_SCRATCHPAD ! count into the errlog reg
1501 1280 LOG_UGER_REG(%g1, %g2, %g3) ! Log the error info
1502 1281 #ifdef TRAPTRACE
1503 1282 OPL_TRAPTRACE(%g1, %g2, %g3, opl_uger_trap_lb)
1504 1283 #endif /* TRAPTRACE */
1505 1284 retry ! retry - no ereport
1506 1285
1507 1286 /*
1508 1287 * Process the rest of the unrecoverable error cases
1509 1288 * All error cases below ultimately branch to either
1510 1289 * opl_uger_panic or opl_uger_panic1.
1511 1290 * opl_uger_panic1 is the same as opl_uger_panic except
1512 1291 * for the additional execution of the RESET_TO_PRIV()
1513 1292 * macro that does a heavy handed reset. Read the
1514 1293 * comments for RESET_TO_PRIV() macro for more info.
1515 1294 */
1516 1295 opl_uger_cre:
1517 1296 set UGESR_IAUG_CRE, %g2
1518 1297 andcc %g1, %g2, %g0
1519 1298 bz,pt %xcc, opl_uger_ctxt
1520 1299 nop
1521 1300 IAG_CRE(%g2, %g3)
1522 1301 set AFSR_ECR, %g2
1523 1302 ldxa [%g2]ASI_AFSR, %g3
1524 1303 or %g3, ASI_ECR_WEAK_ED, %g3
1525 1304 stxa %g3, [%g2]ASI_AFSR
1526 1305 ba %xcc, opl_uger_panic
1527 1306 nop
1528 1307
1529 1308 opl_uger_ctxt:
1530 1309 set UGESR_IAUG_TSBCTXT, %g2
1531 1310 andcc %g1, %g2, %g0
1532 1311 bz,pt %xcc, opl_uger_tsbp
1533 1312 nop
1534 1313 GET_CPU_IMPL(%g2)
1535 1314 cmp %g2, JUPITER_IMPL
1536 1315 bne %xcc, 1f
1537 1316 nop
1538 1317 RESET_SHARED_CTXT(%g2)
1539 1318 1:
1540 1319 RESET_MMU_REGS(%g2, %g3, %g4)
1541 1320 ba %xcc, opl_uger_panic
1542 1321 nop
1543 1322
1544 1323 opl_uger_tsbp:
1545 1324 set UGESR_IUG_TSBP, %g2
1546 1325 andcc %g1, %g2, %g0
1547 1326 bz,pt %xcc, opl_uger_pstate
1548 1327 nop
1549 1328 GET_CPU_IMPL(%g2)
1550 1329 cmp %g2, JUPITER_IMPL
1551 1330 bne %xcc, 1f
1552 1331 nop
1553 1332 RESET_TSB_PREFETCH(%g2)
1554 1333 1:
1555 1334 RESET_TSB_TAGPTR(%g2)
1556 1335
1557 1336 /*
1558 1337 * IUG_TSBP error may corrupt MMU registers
1559 1338 * Reset them here.
1560 1339 */
1561 1340 RESET_MMU_REGS(%g2, %g3, %g4)
1562 1341 ba %xcc, opl_uger_panic
1563 1342 nop
1564 1343
1565 1344 opl_uger_pstate:
1566 1345 set UGESR_IUG_PSTATE, %g2
1567 1346 andcc %g1, %g2, %g0
1568 1347 bz,pt %xcc, opl_uger_tstate
1569 1348 nop
1570 1349 RESET_CUR_TSTATE(%g2)
1571 1350 ba %xcc, opl_uger_panic1
1572 1351 nop
1573 1352
1574 1353 opl_uger_tstate:
1575 1354 set UGESR_IUG_TSTATE, %g2
1576 1355 andcc %g1, %g2, %g0
1577 1356 bz,pt %xcc, opl_uger_f
1578 1357 nop
1579 1358 RESET_PREV_TSTATE(%g2, %g3, opl_uger_tstate_1)
1580 1359 ba %xcc, opl_uger_panic1
1581 1360 nop
1582 1361
1583 1362 opl_uger_f:
1584 1363 set UGESR_IUG_F, %g2
1585 1364 andcc %g1, %g2, %g0
1586 1365 bz,pt %xcc, opl_uger_r
1587 1366 nop
1588 1367 CLEAR_FPREGS(%g2)
1589 1368 ba %xcc, opl_uger_panic
1590 1369 nop
1591 1370
1592 1371 opl_uger_r:
1593 1372 set UGESR_IUG_R, %g2
1594 1373 andcc %g1, %g2, %g0
1595 1374 bz,pt %xcc, opl_uger_panic1
1596 1375 nop
1597 1376 CLEAR_GEN_REGS(%g2, opl_uger_r_1)
1598 1377 ba %xcc, opl_uger_panic1
1599 1378 nop
1600 1379
1601 1380 opl_uger_panic:
1602 1381 mov %g1, %g2 ! %g2 = arg #1
1603 1382 LOG_UGER_REG(%g1, %g3, %g4)
1604 1383 ba %xcc, opl_uger_panic_cmn
1605 1384 nop
1606 1385
1607 1386 opl_uger_panic1:
1608 1387 mov %g1, %g2 ! %g2 = arg #1
1609 1388 LOG_UGER_REG(%g1, %g3, %g4)
1610 1389 RESET_TO_PRIV(%g1, %g3, %g4, %l0)
1611 1390
1612 1391 /*
1613 1392 * Set up the argument for sys_trap.
1614 1393 * %g2 = arg #1 already set above
↓ open down ↓ |
194 lines elided |
↑ open up ↑ |
1615 1394 */
1616 1395 opl_uger_panic_cmn:
1617 1396 RESET_USER_RTT_REGS(%g4, %g5, opl_uger_panic_resetskip)
1618 1397 opl_uger_panic_resetskip:
1619 1398 rdpr %tl, %g3 ! arg #2
1620 1399 set opl_cpu_urgent_error, %g1 ! pc
1621 1400 sethi %hi(sys_trap), %g5
1622 1401 jmp %g5 + %lo(sys_trap)
1623 1402 mov PIL_15, %g4
1624 1403 SET_SIZE(opl_uger_trap)
1625 -#endif /* lint */
1626 1404
1627 -#if defined(lint)
1628 -void
1629 -opl_ta3_trap(void)
1630 -{}
1631 -void
1632 -opl_cleanw_subr(void)
1633 -{}
1634 -#else /* lint */
1635 1405 /*
1636 1406 * OPL ta3 support (note please, that win_reg
1637 1407 * area size for each cpu is 2^7 bytes)
1638 1408 */
1639 1409
1640 1410 #define RESTORE_WREGS(tmp1, tmp2) \
1641 1411 CPU_INDEX(tmp1, tmp2) ;\
1642 1412 sethi %hi(opl_ta3_save), tmp2 ;\
1643 1413 ldx [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1644 1414 sllx tmp1, 7, tmp1 ;\
1645 1415 add tmp2, tmp1, tmp2 ;\
1646 1416 ldx [tmp2 + 0], %l0 ;\
1647 1417 ldx [tmp2 + 8], %l1 ;\
1648 1418 ldx [tmp2 + 16], %l2 ;\
1649 1419 ldx [tmp2 + 24], %l3 ;\
1650 1420 ldx [tmp2 + 32], %l4 ;\
1651 1421 ldx [tmp2 + 40], %l5 ;\
1652 1422 ldx [tmp2 + 48], %l6 ;\
1653 1423 ldx [tmp2 + 56], %l7 ;\
1654 1424 ldx [tmp2 + 64], %i0 ;\
1655 1425 ldx [tmp2 + 72], %i1 ;\
1656 1426 ldx [tmp2 + 80], %i2 ;\
1657 1427 ldx [tmp2 + 88], %i3 ;\
1658 1428 ldx [tmp2 + 96], %i4 ;\
1659 1429 ldx [tmp2 + 104], %i5 ;\
1660 1430 ldx [tmp2 + 112], %i6 ;\
1661 1431 ldx [tmp2 + 120], %i7
1662 1432
1663 1433 #define SAVE_WREGS(tmp1, tmp2) \
1664 1434 CPU_INDEX(tmp1, tmp2) ;\
1665 1435 sethi %hi(opl_ta3_save), tmp2 ;\
1666 1436 ldx [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1667 1437 sllx tmp1, 7, tmp1 ;\
1668 1438 add tmp2, tmp1, tmp2 ;\
1669 1439 stx %l0, [tmp2 + 0] ;\
1670 1440 stx %l1, [tmp2 + 8] ;\
1671 1441 stx %l2, [tmp2 + 16] ;\
1672 1442 stx %l3, [tmp2 + 24] ;\
1673 1443 stx %l4, [tmp2 + 32] ;\
1674 1444 stx %l5, [tmp2 + 40] ;\
1675 1445 stx %l6, [tmp2 + 48] ;\
1676 1446 stx %l7, [tmp2 + 56] ;\
1677 1447 stx %i0, [tmp2 + 64] ;\
1678 1448 stx %i1, [tmp2 + 72] ;\
1679 1449 stx %i2, [tmp2 + 80] ;\
1680 1450 stx %i3, [tmp2 + 88] ;\
1681 1451 stx %i4, [tmp2 + 96] ;\
1682 1452 stx %i5, [tmp2 + 104] ;\
1683 1453 stx %i6, [tmp2 + 112] ;\
1684 1454 stx %i7, [tmp2 + 120]
1685 1455
1686 1456
1687 1457 /*
1688 1458 * The purpose of this function is to make sure that the restore
1689 1459 * instruction after the flushw does not cause a fill trap. The sun4u
1690 1460 * fill trap handler can not handle a tlb fault of an unmapped stack
1691 1461 * except at the restore instruction at user_rtt. On OPL systems the
1692 1462 * stack can get unmapped between the flushw and restore instructions
1693 1463 * since multiple strands share the tlb.
1694 1464 */
1695 1465 ENTRY_NP(opl_ta3_trap)
1696 1466 set trap, %g1
1697 1467 mov T_FLUSHW, %g3
1698 1468 sub %g0, 1, %g4
1699 1469 rdpr %cwp, %g5
1700 1470 SAVE_WREGS(%g2, %g6)
1701 1471 save
1702 1472 flushw
1703 1473 rdpr %cwp, %g6
1704 1474 wrpr %g5, %cwp
1705 1475 RESTORE_WREGS(%g2, %g5)
1706 1476 wrpr %g6, %cwp
1707 1477 restored
1708 1478 restore
1709 1479
1710 1480 ba,a fast_trap_done
1711 1481 SET_SIZE(opl_ta3_trap)
1712 1482
1713 1483 ENTRY_NP(opl_cleanw_subr)
1714 1484 set trap, %g1
1715 1485 mov T_FLUSHW, %g3
1716 1486 sub %g0, 1, %g4
1717 1487 rdpr %cwp, %g5
1718 1488 SAVE_WREGS(%g2, %g6)
1719 1489 save
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
1720 1490 flushw
1721 1491 rdpr %cwp, %g6
1722 1492 wrpr %g5, %cwp
1723 1493 RESTORE_WREGS(%g2, %g5)
1724 1494 wrpr %g6, %cwp
1725 1495 restored
1726 1496 restore
1727 1497 jmp %g7
1728 1498 nop
1729 1499 SET_SIZE(opl_cleanw_subr)
1730 -#endif /* lint */
1731 1500
1732 -#if defined(lint)
1733 -
1734 -void
1735 -opl_serr_instr(void)
1736 -{}
1737 -
1738 -#else /* lint */
1739 1501 /*
1740 1502 * The actual trap handler for tt=0x0a, and tt=0x32
1741 1503 */
1742 1504 ENTRY_NP(opl_serr_instr)
1743 1505 OPL_SAVE_GLOBAL(%g1,%g2,%g3)
1744 1506 sethi %hi(opl_sync_trap), %g3
1745 1507 jmp %g3 + %lo(opl_sync_trap)
1746 1508 rdpr %tt, %g1
1747 1509 .align 32
1748 1510 SET_SIZE(opl_serr_instr)
1749 1511
1750 -#endif /* lint */
1751 -
1752 -#if defined(lint)
1753 -
1754 -void
1755 -opl_ugerr_instr(void)
1756 -{}
1757 -
1758 -#else /* lint */
1759 1512 /*
1760 1513 * The actual trap handler for tt=0x40
1761 1514 */
1762 1515 ENTRY_NP(opl_ugerr_instr)
1763 1516 sethi %hi(opl_uger_trap), %g3
1764 1517 jmp %g3 + %lo(opl_uger_trap)
1765 1518 nop
1766 1519 .align 32
1767 1520 SET_SIZE(opl_ugerr_instr)
1768 1521
1769 -#endif /* lint */
1770 -
1771 -#if defined(lint)
1772 -
1773 -void
1774 -opl_ta3_instr(void)
1775 -{}
1776 -
1777 -#else /* lint */
1778 1522 /*
1779 1523 * The actual trap handler for tt=0x103 (flushw)
1780 1524 */
1781 1525 ENTRY_NP(opl_ta3_instr)
1782 1526 sethi %hi(opl_ta3_trap), %g3
1783 1527 jmp %g3 + %lo(opl_ta3_trap)
1784 1528 nop
1785 1529 .align 32
1786 1530 SET_SIZE(opl_ta3_instr)
1787 1531
1788 -#endif /* lint */
1789 -
1790 -#if defined(lint)
1791 -
1792 -void
1793 -opl_ta4_instr(void)
1794 -{}
1795 -
1796 -#else /* lint */
1797 1532 /*
1798 1533 * The patch for the .clean_windows code
1799 1534 */
1800 1535 ENTRY_NP(opl_ta4_instr)
1801 1536 sethi %hi(opl_cleanw_subr), %g3
1802 1537 add %g3, %lo(opl_cleanw_subr), %g3
1803 1538 jmpl %g3, %g7
1804 1539 add %g7, 8, %g7
1805 1540 nop
1806 1541 nop
1807 1542 nop
1808 1543 SET_SIZE(opl_ta4_instr)
1809 1544
1810 -#endif /* lint */
1811 -
1812 -#if defined(lint)
1813 -/*
1814 - * Get timestamp (stick).
1815 - */
1816 -/* ARGSUSED */
1817 -void
1818 -stick_timestamp(int64_t *ts)
1819 -{
1820 -}
1821 -
1822 -#else /* lint */
1823 -
1824 1545 ENTRY_NP(stick_timestamp)
1825 1546 rd STICK, %g1 ! read stick reg
1826 1547 sllx %g1, 1, %g1
1827 1548 srlx %g1, 1, %g1 ! clear npt bit
1828 1549
1829 1550 retl
1830 1551 stx %g1, [%o0] ! store the timestamp
1831 1552 SET_SIZE(stick_timestamp)
1832 1553
1833 -#endif /* lint */
1834 1554
1835 -
1836 -#if defined(lint)
1837 -/*
1838 - * Set STICK adjusted by skew.
1839 - */
1840 -/* ARGSUSED */
1841 -void
1842 -stick_adj(int64_t skew)
1843 -{
1844 -}
1845 -
1846 -#else /* lint */
1847 -
1848 1555 ENTRY_NP(stick_adj)
1849 1556 rdpr %pstate, %g1 ! save processor state
1850 1557 andn %g1, PSTATE_IE, %g3
1851 1558 ba 1f ! cache align stick adj
1852 1559 wrpr %g0, %g3, %pstate ! turn off interrupts
1853 1560
1854 1561 .align 16
1855 1562 1: nop
1856 1563
1857 1564 rd STICK, %g4 ! read stick reg
1858 1565 add %g4, %o0, %o1 ! adjust stick with skew
1859 1566 wr %o1, %g0, STICK ! write stick reg
1860 1567
1861 1568 retl
1862 1569 wrpr %g1, %pstate ! restore processor state
1863 1570 SET_SIZE(stick_adj)
1864 1571
1865 -#endif /* lint */
1866 -
1867 -#if defined(lint)
1868 -/*
1869 - * Debugger-specific stick retrieval
1870 - */
1871 -/*ARGSUSED*/
1872 -int
1873 -kdi_get_stick(uint64_t *stickp)
1874 -{
1875 - return (0);
1876 -}
1877 -
1878 -#else /* lint */
1879 -
1880 1572 ENTRY_NP(kdi_get_stick)
1881 1573 rd STICK, %g1
1882 1574 stx %g1, [%o0]
1883 1575 retl
1884 1576 mov %g0, %o0
1885 1577 SET_SIZE(kdi_get_stick)
1886 1578
1887 -#endif /* lint */
1888 -
1889 -#if defined(lint)
1890 -
1891 -/*ARGSUSED*/
1892 -int
1893 -dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
1894 -{ return (0); }
1895 -
1896 -#else
1897 -
1898 1579 ENTRY(dtrace_blksuword32)
1899 1580 save %sp, -SA(MINFRAME + 4), %sp
1900 1581
1901 1582 rdpr %pstate, %l1
1902 1583 andn %l1, PSTATE_IE, %l2 ! disable interrupts to
1903 1584 wrpr %g0, %l2, %pstate ! protect our FPU diddling
1904 1585
1905 1586 rd %fprs, %l0
1906 1587 andcc %l0, FPRS_FEF, %g0
1907 1588 bz,a,pt %xcc, 1f ! if the fpu is disabled
1908 1589 wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
1909 1590
1910 1591 st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
1911 1592 1:
1912 1593 set 0f, %l5
1913 1594 /*
1914 1595 * We're about to write a block full or either total garbage
1915 1596 * (not kernel data, don't worry) or user floating-point data
1916 1597 * (so it only _looks_ like garbage).
1917 1598 */
1918 1599 ld [%i1], %f0 ! modify the block
1919 1600 membar #Sync
1920 1601 stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler
1921 1602 stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block
1922 1603 membar #Sync
1923 1604 flush %i0 ! flush instruction pipeline
1924 1605 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
1925 1606
1926 1607 bz,a,pt %xcc, 1f
1927 1608 wr %g0, %l0, %fprs ! restore %fprs
1928 1609
1929 1610 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1930 1611 1:
1931 1612
1932 1613 wrpr %g0, %l1, %pstate ! restore interrupts
1933 1614
1934 1615 ret
1935 1616 restore %g0, %g0, %o0
1936 1617
1937 1618 0:
1938 1619 membar #Sync
1939 1620 stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
1940 1621
1941 1622 bz,a,pt %xcc, 1f
1942 1623 wr %g0, %l0, %fprs ! restore %fprs
1943 1624
1944 1625 ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1945 1626 1:
1946 1627
1947 1628 wrpr %g0, %l1, %pstate ! restore interrupts
1948 1629
1949 1630 /*
1950 1631 * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1951 1632 * which deals with watchpoints. Otherwise, just return -1.
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
1952 1633 */
1953 1634 brnz,pt %i2, 1f
1954 1635 nop
1955 1636 ret
1956 1637 restore %g0, -1, %o0
1957 1638 1:
1958 1639 call dtrace_blksuword32_err
1959 1640 restore
1960 1641
1961 1642 SET_SIZE(dtrace_blksuword32)
1962 -#endif /* lint */
1963 1643
1964 -#if defined(lint)
1965 -/*ARGSUSED*/
1966 -void
1967 -ras_cntr_reset(void *arg)
1968 -{
1969 -}
1970 -#else
1971 1644 ENTRY_NP(ras_cntr_reset)
1972 1645 set OPL_SCRATCHPAD_ERRLOG, %o1
1973 1646 ldxa [%o1]ASI_SCRATCHPAD, %o0
1974 1647 or %o0, ERRLOG_REG_NUMERR_MASK, %o0
1975 1648 retl
1976 1649 stxa %o0, [%o1]ASI_SCRATCHPAD
1977 1650 SET_SIZE(ras_cntr_reset)
1978 -#endif /* lint */
1979 1651
1980 -#if defined(lint)
1981 -/* ARGSUSED */
1982 -void
1983 -opl_error_setup(uint64_t cpu_err_log_pa)
1984 -{
1985 -}
1986 -
1987 -#else /* lint */
1988 1652 ENTRY_NP(opl_error_setup)
1989 1653 /*
1990 1654 * Initialize the error log scratchpad register
1991 1655 */
1992 1656 ldxa [%g0]ASI_EIDR, %o2
1993 1657 sethi %hi(ERRLOG_REG_EIDR_MASK), %o1
1994 1658 or %o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
1995 1659 and %o2, %o1, %o3
1996 1660 sllx %o3, ERRLOG_REG_EIDR_SHIFT, %o2
1997 1661 or %o2, %o0, %o3
1998 1662 or %o3, ERRLOG_REG_NUMERR_MASK, %o0
1999 1663 set OPL_SCRATCHPAD_ERRLOG, %o1
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
2000 1664 stxa %o0, [%o1]ASI_SCRATCHPAD
2001 1665 /*
2002 1666 * Disable all restrainable error traps
2003 1667 */
2004 1668 mov AFSR_ECR, %o1
2005 1669 ldxa [%o1]ASI_AFSR, %o0
2006 1670 andn %o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
2007 1671 retl
2008 1672 stxa %o0, [%o1]ASI_AFSR
2009 1673 SET_SIZE(opl_error_setup)
2010 -#endif /* lint */
2011 1674
2012 -#if defined(lint)
2013 -/* ARGSUSED */
2014 -void
2015 -cpu_early_feature_init(void)
2016 -{
2017 -}
2018 -#else /* lint */
2019 1675 ENTRY_NP(cpu_early_feature_init)
2020 1676 /*
2021 1677 * Enable MMU translating multiple page sizes for
2022 1678 * sITLB and sDTLB.
2023 1679 */
2024 1680 mov LSU_MCNTL, %o0
2025 1681 ldxa [%o0] ASI_MCNTL, %o1
2026 1682 or %o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
2027 1683 stxa %o1, [%o0] ASI_MCNTL
2028 1684 /*
2029 1685 * Demap all previous entries.
2030 1686 */
2031 1687 sethi %hi(FLUSH_ADDR), %o1
2032 1688 set DEMAP_ALL_TYPE, %o0
2033 1689 stxa %g0, [%o0]ASI_DTLB_DEMAP
2034 1690 stxa %g0, [%o0]ASI_ITLB_DEMAP
2035 1691 retl
2036 1692 flush %o1
2037 1693 SET_SIZE(cpu_early_feature_init)
2038 -#endif /* lint */
2039 1694
2040 -#if defined(lint)
2041 1695 /*
2042 1696 * This function is called for each (enabled) CPU. We use it to
2043 1697 * initialize error handling related registers.
2044 1698 */
2045 -/*ARGSUSED*/
2046 -void
2047 -cpu_feature_init(void)
2048 -{}
2049 -#else /* lint */
2050 1699 ENTRY(cpu_feature_init)
2051 1700 !
2052 1701 ! get the device_id and store the device_id
2053 1702 ! in the appropriate cpunodes structure
2054 1703 ! given the cpus index
2055 1704 !
2056 1705 CPU_INDEX(%o0, %o1)
2057 1706 mulx %o0, CPU_NODE_SIZE, %o0
2058 1707 set cpunodes + DEVICE_ID, %o1
2059 1708 ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
2060 1709 stx %o2, [%o0 + %o1]
2061 1710 !
2062 1711 ! initialize CPU registers
2063 1712 !
2064 1713 ba opl_cpu_reg_init
2065 1714 nop
2066 1715 SET_SIZE(cpu_feature_init)
2067 -#endif /* lint */
2068 1716
2069 -#if defined(lint)
2070 -
2071 -void
2072 -cpu_cleartickpnt(void)
2073 -{}
2074 -
2075 -#else /* lint */
2076 1717 /*
2077 1718 * Clear the NPT (non-privileged trap) bit in the %tick/%stick
2078 1719 * registers. In an effort to make the change in the
2079 1720 * tick/stick counter as consistent as possible, we disable
2080 1721 * all interrupts while we're changing the registers. We also
2081 1722 * ensure that the read and write instructions are in the same
2082 1723 * line in the instruction cache.
2083 1724 */
2084 1725 ENTRY_NP(cpu_clearticknpt)
2085 1726 rdpr %pstate, %g1 /* save processor state */
2086 1727 andn %g1, PSTATE_IE, %g3 /* turn off */
2087 1728 wrpr %g0, %g3, %pstate /* interrupts */
2088 1729 rdpr %tick, %g2 /* get tick register */
2089 1730 brgez,pn %g2, 1f /* if NPT bit off, we're done */
2090 1731 mov 1, %g3 /* create mask */
2091 1732 sllx %g3, 63, %g3 /* for NPT bit */
2092 1733 ba,a,pt %xcc, 2f
2093 1734 .align 8 /* Ensure rd/wr in same i$ line */
2094 1735 2:
2095 1736 rdpr %tick, %g2 /* get tick register */
2096 1737 wrpr %g3, %g2, %tick /* write tick register, */
2097 1738 /* clearing NPT bit */
2098 1739 1:
2099 1740 rd STICK, %g2 /* get stick register */
2100 1741 brgez,pn %g2, 3f /* if NPT bit off, we're done */
2101 1742 mov 1, %g3 /* create mask */
2102 1743 sllx %g3, 63, %g3 /* for NPT bit */
2103 1744 ba,a,pt %xcc, 4f
2104 1745 .align 8 /* Ensure rd/wr in same i$ line */
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
2105 1746 4:
2106 1747 rd STICK, %g2 /* get stick register */
2107 1748 wr %g3, %g2, STICK /* write stick register, */
2108 1749 /* clearing NPT bit */
2109 1750 3:
2110 1751 jmp %g4 + 4
2111 1752 wrpr %g0, %g1, %pstate /* restore processor state */
2112 1753
2113 1754 SET_SIZE(cpu_clearticknpt)
2114 1755
2115 -#endif /* lint */
2116 -
2117 -#if defined(lint)
2118 -
2119 -void
2120 -cpu_halt_cpu(void)
2121 -{}
2122 -
2123 -void
2124 -cpu_smt_pause(void)
2125 -{}
2126 -
2127 -#else /* lint */
2128 -
2129 1756 /*
2130 1757 * Halt the current strand with the suspend instruction.
2131 1758 * The compiler/asm currently does not support this suspend
2132 1759 * instruction mnemonic, use byte code for now.
2133 1760 */
2134 1761 ENTRY_NP(cpu_halt_cpu)
2135 1762 .word 0x81b01040
2136 1763 retl
2137 1764 nop
2138 1765 SET_SIZE(cpu_halt_cpu)
2139 1766
2140 1767 /*
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
2141 1768 * Pause the current strand with the sleep instruction.
2142 1769 * The compiler/asm currently does not support this sleep
2143 1770 * instruction mnemonic, use byte code for now.
2144 1771 */
2145 1772 ENTRY_NP(cpu_smt_pause)
2146 1773 .word 0x81b01060
2147 1774 retl
2148 1775 nop
2149 1776 SET_SIZE(cpu_smt_pause)
2150 1777
2151 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX