1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  *
  25  * Assembly code support for the Olympus-C module
  26  */
  27 
  28 #include "assym.h"
  29 
  30 #include <sys/asm_linkage.h>
  31 #include <sys/mmu.h>
  32 #include <vm/hat_sfmmu.h>
  33 #include <sys/machparam.h>
  34 #include <sys/machcpuvar.h>
  35 #include <sys/machthread.h>
  36 #include <sys/machtrap.h>
  37 #include <sys/privregs.h>
  38 #include <sys/asm_linkage.h>
  39 #include <sys/trap.h>
  40 #include <sys/opl_olympus_regs.h>
  41 #include <sys/opl_module.h>
  42 #include <sys/xc_impl.h>
  43 #include <sys/intreg.h>
  44 #include <sys/async.h>
  45 #include <sys/clock.h>
  46 #include <sys/cmpregs.h>
  47 
  48 #ifdef TRAPTRACE
  49 #include <sys/traptrace.h>
  50 #endif /* TRAPTRACE */
  51 
  52 /*
  53  * Macro that flushes the entire Ecache.
  54  *
  55  * arg1 = ecache size
  56  * arg2 = ecache linesize
  57  * arg3 = ecache flush address - Not used for olympus-C
  58  */
  59 #define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1)                         \
  60         mov     ASI_L2_CTRL_U2_FLUSH, arg1;                             \
  61         mov     ASI_L2_CTRL_RW_ADDR, arg2;                              \
  62         stxa    arg1, [arg2]ASI_L2_CTRL
  63 
  64 /*
  65  * SPARC64-VI MMU and Cache operations.
  66  */
  67 
  68         ENTRY_NP(vtag_flushpage)
  69         /*
  70          * flush page from the tlb
  71          *
  72          * %o0 = vaddr
  73          * %o1 = sfmmup
  74          */
  75         rdpr    %pstate, %o5
  76 #ifdef DEBUG
  77         PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1)
  78 #endif /* DEBUG */
  79         /*
  80          * disable ints
  81          */
  82         andn    %o5, PSTATE_IE, %o4
  83         wrpr    %o4, 0, %pstate
  84 
  85         /*
  86          * Then, blow out the tlb
  87          * Interrupts are disabled to prevent the primary ctx register
  88          * from changing underneath us.
  89          */
  90         sethi   %hi(ksfmmup), %o3
  91         ldx     [%o3 + %lo(ksfmmup)], %o3
  92         cmp     %o3, %o1
  93         bne,pt   %xcc, 1f                       ! if not kernel as, go to 1
  94           sethi %hi(FLUSH_ADDR), %o3
  95         /*
  96          * For Kernel demaps use primary. type = page implicitly
  97          */
  98         stxa    %g0, [%o0]ASI_DTLB_DEMAP        /* dmmu flush for KCONTEXT */
  99         stxa    %g0, [%o0]ASI_ITLB_DEMAP        /* immu flush for KCONTEXT */
 100         flush   %o3
 101         retl
 102           wrpr  %g0, %o5, %pstate               /* enable interrupts */
 103 1:
 104         /*
 105          * User demap.  We need to set the primary context properly.
 106          * Secondary context cannot be used for SPARC64-VI IMMU.
 107          * %o0 = vaddr
 108          * %o1 = sfmmup
 109          * %o3 = FLUSH_ADDR
 110          */
 111         SFMMU_CPU_CNUM(%o1, %g1, %g2)           ! %g1 = sfmmu cnum on this CPU
 112         
 113         ldub    [%o1 + SFMMU_CEXT], %o4         ! %o4 = sfmmup->sfmmu_cext
 114         sll     %o4, CTXREG_EXT_SHIFT, %o4
 115         or      %g1, %o4, %g1                   ! %g1 = primary pgsz | cnum
 116 
 117         wrpr    %g0, 1, %tl
 118         set     MMU_PCONTEXT, %o4
 119         or      DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
 120         ldxa    [%o4]ASI_DMMU, %o2              ! %o2 = save old ctxnum
 121         srlx    %o2, CTXREG_NEXT_SHIFT, %o1     ! need to preserve nucleus pgsz
 122         sllx    %o1, CTXREG_NEXT_SHIFT, %o1     ! %o1 = nucleus pgsz
 123         or      %g1, %o1, %g1                   ! %g1 = nucleus pgsz | primary pgsz | cnum
 124         stxa    %g1, [%o4]ASI_DMMU              ! wr new ctxum 
 125 
 126         stxa    %g0, [%o0]ASI_DTLB_DEMAP
 127         stxa    %g0, [%o0]ASI_ITLB_DEMAP
 128         stxa    %o2, [%o4]ASI_DMMU              /* restore old ctxnum */
 129         flush   %o3
 130         wrpr    %g0, 0, %tl
 131 
 132         retl
 133         wrpr    %g0, %o5, %pstate               /* enable interrupts */
 134         SET_SIZE(vtag_flushpage)
 135 
 136 
 137         ENTRY_NP2(vtag_flushall, demap_all)
 138         /*
 139          * flush the tlb
 140          */
 141         sethi   %hi(FLUSH_ADDR), %o3
 142         set     DEMAP_ALL_TYPE, %g1
 143         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 144         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 145         flush   %o3
 146         retl
 147         nop
 148         SET_SIZE(demap_all)
 149         SET_SIZE(vtag_flushall)
 150 
 151 
 152         ENTRY_NP(vtag_flushpage_tl1)
 153         /*
 154          * x-trap to flush page from tlb and tsb
 155          *
 156          * %g1 = vaddr, zero-extended on 32-bit kernel
 157          * %g2 = sfmmup
 158          *
 159          * assumes TSBE_TAG = 0
 160          */
 161         srln    %g1, MMU_PAGESHIFT, %g1
 162                 
 163         sethi   %hi(ksfmmup), %g3
 164         ldx     [%g3 + %lo(ksfmmup)], %g3
 165         cmp     %g3, %g2
 166         bne,pt  %xcc, 1f                        ! if not kernel as, go to 1
 167           slln  %g1, MMU_PAGESHIFT, %g1         /* g1 = vaddr */
 168 
 169         /* We need to demap in the kernel context */
 170         or      DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
 171         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 172         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 173         retry
 174 1:
 175         /* We need to demap in a user context */
 176         or      DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
 177 
 178         SFMMU_CPU_CNUM(%g2, %g6, %g3)   ! %g6 = sfmmu cnum on this CPU
 179         
 180         ldub    [%g2 + SFMMU_CEXT], %g4         ! %g4 = sfmmup->cext
 181         sll     %g4, CTXREG_EXT_SHIFT, %g4
 182         or      %g6, %g4, %g6                   ! %g6 = primary pgsz | cnum
 183 
 184         set     MMU_PCONTEXT, %g4
 185         ldxa    [%g4]ASI_DMMU, %g5              ! %g5 = save old ctxnum
 186         srlx    %g5, CTXREG_NEXT_SHIFT, %g2     ! %g2 = nucleus pgsz 
 187         sllx    %g2, CTXREG_NEXT_SHIFT, %g2     ! preserve nucleus pgsz 
 188         or      %g6, %g2, %g6                   ! %g6 = nucleus pgsz | primary pgsz | cnum      
 189         stxa    %g6, [%g4]ASI_DMMU              ! wr new ctxum
 190         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 191         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 192         stxa    %g5, [%g4]ASI_DMMU              ! restore old ctxnum
 193         retry
 194         SET_SIZE(vtag_flushpage_tl1)
 195 
 196 
 197         ENTRY_NP(vtag_flush_pgcnt_tl1)
 198         /*
 199          * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
 200          *
 201          * %g1 = vaddr, zero-extended on 32-bit kernel
 202          * %g2 = <sfmmup58|pgcnt6>
 203          *
 204          * NOTE: this handler relies on the fact that no
 205          *      interrupts or traps can occur during the loop
 206          *      issuing the TLB_DEMAP operations. It is assumed
 207          *      that interrupts are disabled and this code is
 208          *      fetching from the kernel locked text address.
 209          *
 210          * assumes TSBE_TAG = 0
 211          */
 212         set     SFMMU_PGCNT_MASK, %g4
 213         and     %g4, %g2, %g3                   /* g3 = pgcnt - 1 */
 214         add     %g3, 1, %g3                     /* g3 = pgcnt */
 215 
 216         andn    %g2, SFMMU_PGCNT_MASK, %g2      /* g2 = sfmmup */
 217         srln    %g1, MMU_PAGESHIFT, %g1
 218 
 219         sethi   %hi(ksfmmup), %g4
 220         ldx     [%g4 + %lo(ksfmmup)], %g4
 221         cmp     %g4, %g2
 222         bne,pn   %xcc, 1f                       /* if not kernel as, go to 1 */
 223           slln  %g1, MMU_PAGESHIFT, %g1         /* g1 = vaddr */
 224 
 225         /* We need to demap in the kernel context */
 226         or      DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
 227         set     MMU_PAGESIZE, %g2               /* g2 = pgsize */
 228         sethi   %hi(FLUSH_ADDR), %g5
 229 4:
 230         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 231         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 232         flush   %g5                             ! flush required by immu
 233 
 234         deccc   %g3                             /* decr pgcnt */
 235         bnz,pt  %icc,4b
 236           add   %g1, %g2, %g1                   /* next page */
 237         retry
 238 1:
 239         /*
 240          * We need to demap in a user context
 241          *
 242          * g2 = sfmmup
 243          * g3 = pgcnt
 244          */
 245         SFMMU_CPU_CNUM(%g2, %g5, %g6)           ! %g5 = sfmmu cnum on this CPU
 246                 
 247         or      DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
 248 
 249         ldub    [%g2 + SFMMU_CEXT], %g4         ! %g4 = sfmmup->cext
 250         sll     %g4, CTXREG_EXT_SHIFT, %g4
 251         or      %g5, %g4, %g5
 252 
 253         set     MMU_PCONTEXT, %g4
 254         ldxa    [%g4]ASI_DMMU, %g6              /* rd old ctxnum */
 255         srlx    %g6, CTXREG_NEXT_SHIFT, %g2     /* %g2 = nucleus pgsz */
 256         sllx    %g2, CTXREG_NEXT_SHIFT, %g2     /* preserve nucleus pgsz */
 257         or      %g5, %g2, %g5                   /* %g5 = nucleus pgsz | primary pgsz | cnum */
 258         stxa    %g5, [%g4]ASI_DMMU              /* wr new ctxum */
 259 
 260         set     MMU_PAGESIZE, %g2               /* g2 = pgsize */
 261         sethi   %hi(FLUSH_ADDR), %g5
 262 3:
 263         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 264         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 265         flush   %g5                             ! flush required by immu
 266 
 267         deccc   %g3                             /* decr pgcnt */
 268         bnz,pt  %icc,3b
 269           add   %g1, %g2, %g1                   /* next page */
 270 
 271         stxa    %g6, [%g4]ASI_DMMU              /* restore old ctxnum */
 272         retry
 273         SET_SIZE(vtag_flush_pgcnt_tl1)
 274 
 275 
 276         ENTRY_NP(vtag_flushall_tl1)
 277         /*
 278          * x-trap to flush tlb
 279          */
 280         set     DEMAP_ALL_TYPE, %g4
 281         stxa    %g0, [%g4]ASI_DTLB_DEMAP
 282         stxa    %g0, [%g4]ASI_ITLB_DEMAP
 283         retry
 284         SET_SIZE(vtag_flushall_tl1)
 285 
 286 
 287 /*
 288  * VAC (virtual address conflict) does not apply to OPL.
 289  * VAC resolution is managed by the Olympus processor hardware.
 290  * As a result, all OPL VAC flushing routines are no-ops.
 291  */
 292 
 293         ENTRY(vac_flushpage)
 294         retl
 295           nop
 296         SET_SIZE(vac_flushpage)
 297 
 298         ENTRY_NP(vac_flushpage_tl1)
 299         retry
 300         SET_SIZE(vac_flushpage_tl1)
 301 
 302 
 303         ENTRY(vac_flushcolor)
 304         retl
 305          nop
 306         SET_SIZE(vac_flushcolor)
 307 
 308 
 309 
 310         ENTRY(vac_flushcolor_tl1)
 311         retry
 312         SET_SIZE(vac_flushcolor_tl1)
 313 
 314 /*
 315  * Determine whether or not the IDSR is busy.
 316  * Entry: no arguments
 317  * Returns: 1 if busy, 0 otherwise
 318  */
 319         ENTRY(idsr_busy)
 320         ldxa    [%g0]ASI_INTR_DISPATCH_STATUS, %g1
 321         clr     %o0
 322         btst    IDSR_BUSY, %g1
 323         bz,a,pt %xcc, 1f
 324         mov     1, %o0
 325 1:
 326         retl
 327         nop
 328         SET_SIZE(idsr_busy)
 329 
 330         .global _dispatch_status_busy
 331 _dispatch_status_busy:
 332         .asciz  "ASI_INTR_DISPATCH_STATUS error: busy"
 333         .align  4
 334 
 335 /*
 336  * Setup interrupt dispatch data registers
 337  * Entry:
 338  *      %o0 - function or inumber to call
 339  *      %o1, %o2 - arguments (2 uint64_t's)
 340  */
 341         .seg "text"
 342 
 343         ENTRY(init_mondo)
 344 #ifdef DEBUG
 345         !
 346         ! IDSR should not be busy at the moment
 347         !
 348         ldxa    [%g0]ASI_INTR_DISPATCH_STATUS, %g1
 349         btst    IDSR_BUSY, %g1
 350         bz,pt   %xcc, 1f
 351         nop
 352         sethi   %hi(_dispatch_status_busy), %o0
 353         call    panic
 354         or      %o0, %lo(_dispatch_status_busy), %o0
 355 #endif /* DEBUG */
 356 
 357         ALTENTRY(init_mondo_nocheck)
 358         !
 359         ! interrupt vector dispatch data reg 0
 360         !
 361 1:
 362         mov     IDDR_0, %g1
 363         mov     IDDR_1, %g2
 364         mov     IDDR_2, %g3
 365         stxa    %o0, [%g1]ASI_INTR_DISPATCH
 366 
 367         !
 368         ! interrupt vector dispatch data reg 1
 369         !
 370         stxa    %o1, [%g2]ASI_INTR_DISPATCH
 371 
 372         !
 373         ! interrupt vector dispatch data reg 2
 374         !
 375         stxa    %o2, [%g3]ASI_INTR_DISPATCH
 376 
 377         membar  #Sync
 378         retl
 379         nop
 380         SET_SIZE(init_mondo_nocheck)
 381         SET_SIZE(init_mondo)
 382 
 383 
 384 /*
 385  * Ship mondo to aid using busy/nack pair bn
 386  */
 387         ENTRY_NP(shipit)
 388         sll     %o0, IDCR_PID_SHIFT, %g1        ! IDCR<23:14> = agent id
 389         sll     %o1, IDCR_BN_SHIFT, %g2         ! IDCR<28:24> = b/n pair
 390         or      %g1, IDCR_OFFSET, %g1           ! IDCR<13:0> = 0x70
 391         or      %g1, %g2, %g1
 392         stxa    %g0, [%g1]ASI_INTR_DISPATCH     ! interrupt vector dispatch
 393         membar  #Sync
 394         retl
 395         nop
 396         SET_SIZE(shipit)
 397 
 398 
 399 /*
 400  * flush_instr_mem:
 401  *      Flush 1 page of the I-$ starting at vaddr
 402  *      %o0 vaddr
 403  *      %o1 bytes to be flushed
 404  *
 405  * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
 406  * the stores from all processors so that a FLUSH instruction is only needed
 407  * to ensure pipeline is consistent. This means a single flush is sufficient at
 408  * the end of a sequence of stores that updates the instruction stream to
 409  * ensure correct operation.
 410  */
 411 
 412         ENTRY(flush_instr_mem)
 413         flush   %o0                     ! address irrelevant
 414         retl
 415         nop
 416         SET_SIZE(flush_instr_mem)
 417 
 418 
 419 /*
 420  * flush_ecache:
 421  *      %o0 - 64 bit physical address
 422  *      %o1 - ecache size
 423  *      %o2 - ecache linesize
 424  */
 425 
 426         ENTRY(flush_ecache)
 427 
 428         /*
 429          * Flush the entire Ecache.
 430          */
 431         ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
 432         retl
 433         nop
 434         SET_SIZE(flush_ecache)
 435 
 436         /*
 437          * I/D cache flushing is not needed for OPL processors
 438          */
 439         ENTRY(kdi_flush_idcache)
 440         retl
 441         nop
 442         SET_SIZE(kdi_flush_idcache)
 443 
 444 #ifdef  TRAPTRACE
 445 /*
 446  * Simplified trap trace macro for OPL. Adapted from us3.
 447  */
 448 #define OPL_TRAPTRACE(ptr, scr1, scr2, label)                   \
 449         CPU_INDEX(scr1, ptr);                                   \
 450         sll     scr1, TRAPTR_SIZE_SHIFT, scr1;                  \
 451         set     trap_trace_ctl, ptr;                            \
 452         add     ptr, scr1, scr1;                                \
 453         ld      [scr1 + TRAPTR_LIMIT], ptr;                     \
 454         tst     ptr;                                            \
 455         be,pn   %icc, label/**/1;                               \
 456          ldx    [scr1 + TRAPTR_PBASE], ptr;                     \
 457         ld      [scr1 + TRAPTR_OFFSET], scr1;                   \
 458         add     ptr, scr1, ptr;                                 \
 459         rd      %asi, scr2;                                     \
 460         wr      %g0, TRAPTR_ASI, %asi;                          \
 461         rd      STICK, scr1;                                    \
 462         stxa    scr1, [ptr + TRAP_ENT_TICK]%asi;                \
 463         rdpr    %tl, scr1;                                      \
 464         stha    scr1, [ptr + TRAP_ENT_TL]%asi;                  \
 465         rdpr    %tt, scr1;                                      \
 466         stha    scr1, [ptr + TRAP_ENT_TT]%asi;                  \
 467         rdpr    %tpc, scr1;                                     \
 468         stna    scr1, [ptr + TRAP_ENT_TPC]%asi;                 \
 469         rdpr    %tstate, scr1;                                  \
 470         stxa    scr1, [ptr + TRAP_ENT_TSTATE]%asi;              \
 471         stna    %sp, [ptr + TRAP_ENT_SP]%asi;                   \
 472         stna    %g0, [ptr + TRAP_ENT_TR]%asi;                   \
 473         stna    %g0, [ptr + TRAP_ENT_F1]%asi;                   \
 474         stna    %g0, [ptr + TRAP_ENT_F2]%asi;                   \
 475         stna    %g0, [ptr + TRAP_ENT_F3]%asi;                   \
 476         stna    %g0, [ptr + TRAP_ENT_F4]%asi;                   \
 477         wr      %g0, scr2, %asi;                                \
 478         CPU_INDEX(ptr, scr1);                                   \
 479         sll     ptr, TRAPTR_SIZE_SHIFT, ptr;                    \
 480         set     trap_trace_ctl, scr1;                           \
 481         add     scr1, ptr, ptr;                                 \
 482         ld      [ptr + TRAPTR_OFFSET], scr1;                    \
 483         ld      [ptr + TRAPTR_LIMIT], scr2;                     \
 484         st      scr1, [ptr + TRAPTR_LAST_OFFSET];               \
 485         add     scr1, TRAP_ENT_SIZE, scr1;                      \
 486         sub     scr2, TRAP_ENT_SIZE, scr2;                      \
 487         cmp     scr1, scr2;                                     \
 488         movge   %icc, 0, scr1;                                  \
 489         st      scr1, [ptr + TRAPTR_OFFSET];                    \
 490 label/**/1:
 491 #endif  /* TRAPTRACE */
 492 
 493 
 494 
 495 /*
 496  * Macros facilitating error handling.
 497  */
 498 
 499 /*
 500  * Save alternative global registers reg1, reg2, reg3
 501  * to scratchpad registers 1, 2, 3 respectively.
 502  */
 503 #define OPL_SAVE_GLOBAL(reg1, reg2, reg3)       \
 504         stxa    reg1, [%g0]ASI_SCRATCHPAD               ;\
 505         mov     OPL_SCRATCHPAD_SAVE_AG2, reg1   ;\
 506         stxa    reg2, [reg1]ASI_SCRATCHPAD              ;\
 507         mov     OPL_SCRATCHPAD_SAVE_AG3, reg1   ;\
 508         stxa    reg3, [reg1]ASI_SCRATCHPAD
 509 
 510 /*
 511  * Restore alternative global registers reg1, reg2, reg3
 512  * from scratchpad registers 1, 2, 3 respectively.
 513  */
 514 #define OPL_RESTORE_GLOBAL(reg1, reg2, reg3)                    \
 515         mov     OPL_SCRATCHPAD_SAVE_AG3, reg1                   ;\
 516         ldxa    [reg1]ASI_SCRATCHPAD, reg3                              ;\
 517         mov     OPL_SCRATCHPAD_SAVE_AG2, reg1                   ;\
 518         ldxa    [reg1]ASI_SCRATCHPAD, reg2                              ;\
 519         ldxa    [%g0]ASI_SCRATCHPAD, reg1
 520 
 521 /*
 522  * Logs value `val' into the member `offset' of a structure
 523  * at physical address `pa'
 524  */
 525 #define LOG_REG(pa, offset, val)                                \
 526         add     pa, offset, pa                                  ;\
 527         stxa    val, [pa]ASI_MEM
 528 
 529 #define FLUSH_ALL_TLB(tmp1)                                     \
 530         set     DEMAP_ALL_TYPE, tmp1                            ;\
 531         stxa    %g0, [tmp1]ASI_ITLB_DEMAP                       ;\
 532         stxa    %g0, [tmp1]ASI_DTLB_DEMAP                       ;\
 533         sethi   %hi(FLUSH_ADDR), tmp1                           ;\
 534         flush   tmp1
 535 
 536 /*
 537  * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG
 538  * scratch register by zeroing all other fields. Result is in pa.
 539  */
 540 #define LOG_ADDR(pa)                                                    \
 541         mov     OPL_SCRATCHPAD_ERRLOG, pa                               ;\
 542         ldxa    [pa]ASI_SCRATCHPAD, pa                                  ;\
 543         sllx    pa, 64-ERRLOG_REG_EIDR_SHIFT, pa                        ;\
 544         srlx    pa, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT, pa   ;\
 545         sllx    pa, ERRLOG_REG_ERR_SHIFT, pa
 546 
 547 /*
 548  * Advance the per-cpu error log buffer pointer to the next
 549  * ERRLOG_SZ entry, making sure that it will modulo (wraparound)
 550  * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are
 551  * unused input registers for this macro.
 552  *
 553  * Algorithm:
 554  * 1. logpa = contents of errorlog scratchpad register
 555  * 2. bufmask = ERRLOG_BUFSIZ - 1
 556  * 3. tmp = logpa & ~(bufmask)     (tmp is now logbase)
 557  * 4. logpa += ERRLOG_SZ
 558  * 5. logpa = logpa & bufmask      (get new offset to logbase)
 559  * 4. logpa = tmp | logpa
 560  * 7. write logpa back into errorlog scratchpad register
 561  *
 562  * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask)
 563  *
 564  */
 565 #define UPDATE_LOGADD(logpa, bufmask, tmp)                      \
 566         set     OPL_SCRATCHPAD_ERRLOG, tmp                      ;\
 567         ldxa    [tmp]ASI_SCRATCHPAD, logpa                              ;\
 568         set     (ERRLOG_BUFSZ-1), bufmask                       ;\
 569         andn    logpa, bufmask, tmp                             ;\
 570         add     logpa, ERRLOG_SZ, logpa                         ;\
 571         and     logpa, bufmask, logpa                           ;\
 572         or      tmp, logpa, logpa                               ;\
 573         set     OPL_SCRATCHPAD_ERRLOG, tmp                      ;\
 574         stxa    logpa, [tmp]ASI_SCRATCHPAD
 575 
 576 /* Log error status registers into the log buffer */
 577 #define LOG_SYNC_REG(sfsr, sfar, tmp)                           \
 578         LOG_ADDR(tmp)                                           ;\
 579         LOG_REG(tmp, LOG_SFSR_OFF, sfsr)                        ;\
 580         LOG_ADDR(tmp)                                           ;\
 581         mov     tmp, sfsr                                       ;\
 582         LOG_REG(tmp, LOG_SFAR_OFF, sfar)                        ;\
 583         rd      STICK, sfar                                     ;\
 584         mov     sfsr, tmp                                       ;\
 585         LOG_REG(tmp, LOG_STICK_OFF, sfar)                       ;\
 586         rdpr    %tl, tmp                                        ;\
 587         sllx    tmp, 32, sfar                                   ;\
 588         rdpr    %tt, tmp                                        ;\
 589         or      sfar, tmp, sfar                                 ;\
 590         mov     sfsr, tmp                                       ;\
 591         LOG_REG(tmp, LOG_TL_OFF, sfar)                          ;\
 592         set     OPL_SCRATCHPAD_ERRLOG, tmp                      ;\
 593         ldxa    [tmp]ASI_SCRATCHPAD, sfar                               ;\
 594         mov     sfsr, tmp                                       ;\
 595         LOG_REG(tmp, LOG_ASI3_OFF, sfar)                        ;\
 596         rdpr    %tpc, sfar                                      ;\
 597         mov     sfsr, tmp                                       ;\
 598         LOG_REG(tmp, LOG_TPC_OFF, sfar)                         ;\
 599         UPDATE_LOGADD(sfsr, sfar, tmp)
 600 
 601 #define LOG_UGER_REG(uger, tmp, tmp2)                           \
 602         LOG_ADDR(tmp)                                           ;\
 603         mov     tmp, tmp2                                       ;\
 604         LOG_REG(tmp2, LOG_UGER_OFF, uger)                       ;\
 605         mov     tmp, uger                                       ;\
 606         rd      STICK, tmp2                                     ;\
 607         LOG_REG(tmp, LOG_STICK_OFF, tmp2)                       ;\
 608         rdpr    %tl, tmp                                        ;\
 609         sllx    tmp, 32, tmp2                                   ;\
 610         rdpr    %tt, tmp                                        ;\
 611         or      tmp2, tmp, tmp2                                 ;\
 612         mov     uger, tmp                                       ;\
 613         LOG_REG(tmp, LOG_TL_OFF, tmp2)                          ;\
 614         set     OPL_SCRATCHPAD_ERRLOG, tmp2                     ;\
 615         ldxa    [tmp2]ASI_SCRATCHPAD, tmp2                              ;\
 616         mov     uger, tmp                                       ;\
 617         LOG_REG(tmp, LOG_ASI3_OFF, tmp2)                        ;\
 618         rdpr    %tstate, tmp2                                   ;\
 619         mov     uger, tmp                                       ;\
 620         LOG_REG(tmp, LOG_TSTATE_OFF, tmp2)                      ;\
 621         rdpr    %tpc, tmp2                                      ;\
 622         mov     uger, tmp                                       ;\
 623         LOG_REG(tmp, LOG_TPC_OFF, tmp2)                         ;\
 624         UPDATE_LOGADD(uger, tmp, tmp2)
 625 
 626 /*
 627  * Scrub the STICK_COMPARE register to clear error by updating
 628  * it to a reasonable value for interrupt generation.
 629  * Ensure that we observe the CPU_ENABLE flag so that we
 630  * don't accidentally enable TICK interrupt in STICK_COMPARE
 631  * i.e. no clock interrupt will be generated if CPU_ENABLE flag
 632  * is off.
 633  */
 634 #define UPDATE_STICK_COMPARE(tmp1, tmp2)                        \
 635         CPU_ADDR(tmp1, tmp2)                                    ;\
 636         lduh    [tmp1 + CPU_FLAGS], tmp2                        ;\
 637         andcc   tmp2, CPU_ENABLE, %g0                           ;\
 638         set     OPL_UGER_STICK_DIFF, tmp2                       ;\
 639         rd      STICK, tmp1                                     ;\
 640         add     tmp1, tmp2, tmp1                                ;\
 641         mov     1, tmp2                                         ;\
 642         sllx    tmp2, TICKINT_DIS_SHFT, tmp2                    ;\
 643         or      tmp1, tmp2, tmp2                                ;\
 644         movnz   %xcc, tmp1, tmp2                                ;\
 645         wr      tmp2, %g0, STICK_COMPARE
 646 
 647 /*
 648  * Reset registers that may be corrupted by IAUG_CRE error.
 649  * To update interrupt handling related registers force the
 650  * clock interrupt.
 651  */
 652 #define IAG_CRE(tmp1, tmp2)                                     \
 653         set     OPL_SCRATCHPAD_ERRLOG, tmp1                     ;\
 654         ldxa    [tmp1]ASI_SCRATCHPAD, tmp1                              ;\
 655         srlx    tmp1, ERRLOG_REG_EIDR_SHIFT, tmp1               ;\
 656         set     ERRLOG_REG_EIDR_MASK, tmp2                      ;\
 657         and     tmp1, tmp2, tmp1                                ;\
 658         stxa    tmp1, [%g0]ASI_EIDR                             ;\
 659         wr      %g0, 0, SOFTINT                                 ;\
 660         sethi   %hi(hres_last_tick), tmp1                       ;\
 661         ldx     [tmp1 + %lo(hres_last_tick)], tmp1              ;\
 662         set     OPL_UGER_STICK_DIFF, tmp2                       ;\
 663         add     tmp1, tmp2, tmp1                                ;\
 664         wr      tmp1, %g0, STICK                                ;\
 665         UPDATE_STICK_COMPARE(tmp1, tmp2)
 666 
 667 
 668 #define CLEAR_FPREGS(tmp)                                       \
 669         wr      %g0, FPRS_FEF, %fprs                            ;\
 670         wr      %g0, %g0, %gsr                                  ;\
 671         sethi   %hi(opl_clr_freg), tmp                          ;\
 672         or      tmp, %lo(opl_clr_freg), tmp                     ;\
 673         ldx     [tmp], %fsr                                     ;\
 674         fzero    %d0                                            ;\
 675         fzero    %d2                                            ;\
 676         fzero    %d4                                            ;\
 677         fzero    %d6                                            ;\
 678         fzero    %d8                                            ;\
 679         fzero    %d10                                           ;\
 680         fzero    %d12                                           ;\
 681         fzero    %d14                                           ;\
 682         fzero    %d16                                           ;\
 683         fzero    %d18                                           ;\
 684         fzero    %d20                                           ;\
 685         fzero    %d22                                           ;\
 686         fzero    %d24                                           ;\
 687         fzero    %d26                                           ;\
 688         fzero    %d28                                           ;\
 689         fzero    %d30                                           ;\
 690         fzero    %d32                                           ;\
 691         fzero    %d34                                           ;\
 692         fzero    %d36                                           ;\
 693         fzero    %d38                                           ;\
 694         fzero    %d40                                           ;\
 695         fzero    %d42                                           ;\
 696         fzero    %d44                                           ;\
 697         fzero    %d46                                           ;\
 698         fzero    %d48                                           ;\
 699         fzero    %d50                                           ;\
 700         fzero    %d52                                           ;\
 701         fzero    %d54                                           ;\
 702         fzero    %d56                                           ;\
 703         fzero    %d58                                           ;\
 704         fzero    %d60                                           ;\
 705         fzero    %d62                                           ;\
 706         wr      %g0, %g0, %fprs
 707 
 708 #define CLEAR_GLOBALS()                                         \
 709         mov     %g0, %g1                                        ;\
 710         mov     %g0, %g2                                        ;\
 711         mov     %g0, %g3                                        ;\
 712         mov     %g0, %g4                                        ;\
 713         mov     %g0, %g5                                        ;\
 714         mov     %g0, %g6                                        ;\
 715         mov     %g0, %g7
 716 
 717 /*
 718  * We do not clear the alternative globals here because they
 719  * are scratch registers, i.e. there is no code that reads from
 720  * them without write to them firstly. In other words every
 721  * read always follows write that makes extra write to the
 722  * alternative globals unnecessary.
 723  */
 724 #define CLEAR_GEN_REGS(tmp1, label)                             \
 725         set     TSTATE_KERN, tmp1                               ;\
 726         wrpr    %g0, tmp1, %tstate                              ;\
 727         mov     %g0, %y                                         ;\
 728         mov     %g0, %asi                                       ;\
 729         mov     %g0, %ccr                                       ;\
 730         mov     %g0, %l0                                        ;\
 731         mov     %g0, %l1                                        ;\
 732         mov     %g0, %l2                                        ;\
 733         mov     %g0, %l3                                        ;\
 734         mov     %g0, %l4                                        ;\
 735         mov     %g0, %l5                                        ;\
 736         mov     %g0, %l6                                        ;\
 737         mov     %g0, %l7                                        ;\
 738         mov     %g0, %i0                                        ;\
 739         mov     %g0, %i1                                        ;\
 740         mov     %g0, %i2                                        ;\
 741         mov     %g0, %i3                                        ;\
 742         mov     %g0, %i4                                        ;\
 743         mov     %g0, %i5                                        ;\
 744         mov     %g0, %i6                                        ;\
 745         mov     %g0, %i7                                        ;\
 746         mov     %g0, %o1                                        ;\
 747         mov     %g0, %o2                                        ;\
 748         mov     %g0, %o3                                        ;\
 749         mov     %g0, %o4                                        ;\
 750         mov     %g0, %o5                                        ;\
 751         mov     %g0, %o6                                        ;\
 752         mov     %g0, %o7                                        ;\
 753         mov     %g0, %o0                                        ;\
 754         mov     %g0, %g4                                        ;\
 755         mov     %g0, %g5                                        ;\
 756         mov     %g0, %g6                                        ;\
 757         mov     %g0, %g7                                        ;\
 758         rdpr    %tl, tmp1                                       ;\
 759         cmp     tmp1, 1                                         ;\
 760         be,pt   %xcc, label/**/1                                ;\
 761          rdpr   %pstate, tmp1                                   ;\
 762         wrpr    tmp1, PSTATE_AG|PSTATE_IG, %pstate              ;\
 763         CLEAR_GLOBALS()                                         ;\
 764         rdpr    %pstate, tmp1                                   ;\
 765         wrpr    tmp1, PSTATE_IG|PSTATE_MG, %pstate              ;\
 766         CLEAR_GLOBALS()                                         ;\
 767         rdpr    %pstate, tmp1                                   ;\
 768         wrpr    tmp1, PSTATE_MG|PSTATE_AG, %pstate              ;\
 769         ba,pt   %xcc, label/**/2                                ;\
 770          nop                                                    ;\
 771 label/**/1:                                                     ;\
 772         wrpr    tmp1, PSTATE_AG, %pstate                        ;\
 773         CLEAR_GLOBALS()                                         ;\
 774         rdpr    %pstate, tmp1                                   ;\
 775         wrpr    tmp1, PSTATE_AG, %pstate                        ;\
 776 label/**/2:
 777 
 778 
 779 /*
 780  * Reset all window related registers
 781  */
 782 #define RESET_WINREG(tmp)                                       \
 783         sethi   %hi(nwin_minus_one), tmp                        ;\
 784         ld      [tmp + %lo(nwin_minus_one)], tmp                ;\
 785         wrpr    %g0, tmp, %cwp                                  ;\
 786         wrpr    %g0, tmp, %cleanwin                             ;\
 787         sub     tmp, 1, tmp                                     ;\
 788         wrpr    %g0, tmp, %cansave                              ;\
 789         wrpr    %g0, %g0, %canrestore                           ;\
 790         wrpr    %g0, %g0, %otherwin                             ;\
 791         wrpr    %g0, PIL_MAX, %pil                              ;\
 792         wrpr    %g0, WSTATE_KERN, %wstate
 793 
 794 
 795 #define RESET_PREV_TSTATE(tmp1, tmp2, label)                    \
 796         rdpr    %tl, tmp1                                       ;\
 797         subcc   tmp1, 1, tmp1                                   ;\
 798         bz,pt   %xcc, label/**/1                                ;\
 799          nop                                                    ;\
 800         wrpr    tmp1, %g0, %tl                                  ;\
 801         set     TSTATE_KERN, tmp2                               ;\
 802         wrpr    tmp2, %g0, %tstate                              ;\
 803         wrpr    %g0, %g0, %tpc                                  ;\
 804         wrpr    %g0, %g0, %tnpc                                 ;\
 805         add     tmp1, 1, tmp1                                   ;\
 806         wrpr    tmp1, %g0, %tl                                  ;\
 807 label/**/1:
 808 
 809 
 810 /*
 811  * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
 812  * and we reset these regiseter here.
 813  */
 814 #define RESET_CUR_TSTATE(tmp)                                   \
 815         set     TSTATE_KERN, tmp                                ;\
 816         wrpr    %g0, tmp, %tstate                               ;\
 817         wrpr    %g0, 0, %tpc                                    ;\
 818         wrpr    %g0, 0, %tnpc                                   ;\
 819         RESET_WINREG(tmp)
 820 
 821 /*
 822  * In case of urgent errors some MMU registers may be
 823  * corrupted, so we set here some reasonable values for
 824  * them. Note that resetting MMU registers also reset the context
 825  * info, we will need to reset the window registers to prevent
 826  * spill/fill that depends on context info for correct behaviour.
 827  * Note that the TLBs must be flushed before programming the context
 828  * registers.
 829  */
 830 
 831 #define RESET_MMU_REGS(tmp1, tmp2, tmp3)                        \
 832         FLUSH_ALL_TLB(tmp1)                                     ;\
 833         set     MMU_PCONTEXT, tmp1                              ;\
 834         sethi   %hi(kcontextreg), tmp2                          ;\
 835         ldx     [tmp2 + %lo(kcontextreg)], tmp2                 ;\
 836         stxa    tmp2, [tmp1]ASI_DMMU                            ;\
 837         set     MMU_SCONTEXT, tmp1                              ;\
 838         stxa    tmp2, [tmp1]ASI_DMMU                            ;\
 839         sethi   %hi(ktsb_base), tmp1                            ;\
 840         ldx     [tmp1 + %lo(ktsb_base)], tmp2                   ;\
 841         mov     MMU_TSB, tmp3                                   ;\
 842         stxa    tmp2, [tmp3]ASI_IMMU                            ;\
 843         stxa    tmp2, [tmp3]ASI_DMMU                            ;\
 844         membar  #Sync                                           ;\
 845         RESET_WINREG(tmp1)
 846 
 847 #define RESET_TSB_TAGPTR(tmp)                                   \
 848         set     MMU_TAG_ACCESS, tmp                             ;\
 849         stxa    %g0, [tmp]ASI_IMMU                              ;\
 850         stxa    %g0, [tmp]ASI_DMMU                              ;\
 851         membar  #Sync
 852 
 853 /*
 854  * In case of errors in the MMU_TSB_PREFETCH registers we have to
 855  * reset them. We can use "0" as the reset value, this way we set
 856  * the "V" bit of the registers to 0, which will disable the prefetch
 857  * so the values of the other fields are irrelevant.
 858  */
 859 #define RESET_TSB_PREFETCH(tmp)                 \
 860         set     VA_UTSBPREF_8K, tmp             ;\
 861         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 862         set     VA_UTSBPREF_4M, tmp             ;\
 863         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 864         set     VA_KTSBPREF_8K, tmp             ;\
 865         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 866         set     VA_KTSBPREF_4M, tmp             ;\
 867         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 868         set     VA_UTSBPREF_8K, tmp             ;\
 869         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
 870         set     VA_UTSBPREF_4M, tmp             ;\
 871         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
 872         set     VA_KTSBPREF_8K, tmp             ;\
 873         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
 874         set     VA_KTSBPREF_4M, tmp             ;\
 875         stxa    %g0, [tmp]ASI_DTSB_PREFETCH
 876 
 877 /*
 878  * In case of errors in the MMU_SHARED_CONTEXT register we have to
 879  * reset its value. We can use "0" as the reset value, it will put
 880  * 0 in the IV field disabling the shared context support, and
 881  * making values of all the other fields of the register irrelevant.
 882  */
 883 #define RESET_SHARED_CTXT(tmp)                  \
 884         set     MMU_SHARED_CONTEXT, tmp         ;\
 885         stxa    %g0, [tmp]ASI_DMMU
 886 
 887 /*
 888  * RESET_TO_PRIV()
 889  *
 890  * In many cases, we need to force the thread into privilege mode because
 891  * privilege mode is only thing in which the system continue to work
 892  * due to undeterminable user mode information that come from register
 893  * corruption.
 894  *
 895  *  - opl_uger_ctxt
 896  *    If the error is secondary TSB related register parity, we have no idea
 897  *    what value is supposed to be for it.
 898  *
 899  *  The below three cases %tstate is not accessible until it is overwritten
 900  *  with some value, so we have no clue if the thread was running on user mode
 901  *  or not
 902  *   - opl_uger_pstate
 903  *     If the error is %pstate parity, it propagates to %tstate.
 904  *   - opl_uger_tstate
 905  *     No need to say the reason
 906  *   - opl_uger_r
 907  *     If the error is %ccr or %asi parity, it propagates to %tstate
 908  *
 909  * For the above four cases, user mode info may not be available for
 910  * sys_trap() and user_trap() to work consistently. So we have to force
 911  * the thread into privilege mode.
 912  *
 913  * Forcing the thread to privilege mode requires forcing
 914  * regular %g7 to be CPU_THREAD. Because if it was running on user mode,
 915  * %g7 will be set in user_trap(). Also since the %sp may be in
 916  * an inconsistent state, we need to do a stack reset and switch to
 917  * something we know i.e. current thread's kernel stack.
 918  * We also reset the window registers and MMU registers just to
 919  * make sure.
 920  *
 921  * To set regular %g7, we need to clear PSTATE_AG bit and need to
 922  * use one local register. Note that we are panicking and will never
 923  * unwind back so it is ok to clobber a local.
 924  *
 925  * If the thread was running in user mode, the %tpc value itself might be
 926  * within the range of OBP addresses. %tpc must be forced to be zero to prevent
 927  * sys_trap() from going to prom_trap()
 928  *
 929  */
 930 #define RESET_TO_PRIV(tmp, tmp1, tmp2, local)                   \
 931         RESET_MMU_REGS(tmp, tmp1, tmp2)                         ;\
 932         CPU_ADDR(tmp, tmp1)                                     ;\
 933         ldx     [tmp + CPU_THREAD], local                       ;\
 934         ldx     [local + T_STACK], tmp                          ;\
 935         sub     tmp, STACK_BIAS, %sp                            ;\
 936         rdpr    %pstate, tmp                                    ;\
 937         wrpr    tmp, PSTATE_AG, %pstate                         ;\
 938         mov     local, %g7                                      ;\
 939         rdpr    %pstate, local                                  ;\
 940         wrpr    local, PSTATE_AG, %pstate                       ;\
 941         wrpr    %g0, 1, %tl                                     ;\
 942         set     TSTATE_KERN, tmp                                ;\
 943         rdpr    %cwp, tmp1                                      ;\
 944         or      tmp, tmp1, tmp                                  ;\
 945         wrpr    tmp, %g0, %tstate                               ;\
 946         wrpr    %g0, %tpc
 947 
 948 
 949 /*
 950  * We normally don't expect CE traps since we disable the
 951  * 0x63 trap reporting at the start of day. There is a
 952  * small window before we disable them, so let check for
 953  * it. Otherwise, panic.
 954  */
 955 
 956         .align  128
 957         ENTRY_NP(ce_err)
 958         mov     AFSR_ECR, %g1
 959         ldxa    [%g1]ASI_ECR, %g1
 960         andcc   %g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
 961         bz,pn   %xcc, 1f
 962          nop
 963         retry
 964 1:
 965         /*
 966          * We did disabled the 0x63 trap reporting.
 967          * This shouldn't happen - panic.
 968          */
 969         set     trap, %g1
 970         rdpr    %tt, %g3
 971         sethi   %hi(sys_trap), %g5
 972         jmp     %g5 + %lo(sys_trap)
 973         sub     %g0, 1, %g4
 974         SET_SIZE(ce_err)
 975 
 976 
 977 /*
 978  * We don't use trap for CE detection.
 979  */
 980         ENTRY_NP(ce_err_tl1)
 981         set     trap, %g1
 982         rdpr    %tt, %g3
 983         sethi   %hi(sys_trap), %g5
 984         jmp     %g5 + %lo(sys_trap)
 985         sub     %g0, 1, %g4
 986         SET_SIZE(ce_err_tl1)
 987 
 988 
 989 /*
 990  * async_err is the default handler for IAE/DAE traps.
 991  * For OPL, we patch in the right handler at start of day.
 992  * But if a IAE/DAE trap get generated before the handler
 993  * is patched, panic.
 994  */
 995         ENTRY_NP(async_err)
 996         set     trap, %g1
 997         rdpr    %tt, %g3
 998         sethi   %hi(sys_trap), %g5
 999         jmp     %g5 + %lo(sys_trap)
1000         sub     %g0, 1, %g4
1001         SET_SIZE(async_err)
1002 
1003         .seg    ".data"
1004         .global opl_clr_freg
1005         .global opl_cpu0_err_log
1006 
1007         .align  16
1008 opl_clr_freg:
1009         .word   0
1010         .align  16
1011 
1012         .align  MMU_PAGESIZE
1013 opl_cpu0_err_log:
1014         .skip   MMU_PAGESIZE
1015 
1016 /*
1017  * Common synchronous error trap handler (tt=0xA, 0x32)
1018  * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1019  * The error handling can be best summarized as follows:
1020  * 0. Do TRAPTRACE if enabled.
1021  * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1022  * 2. The SFSR register is read and verified as valid by checking
1023  *    SFSR.FV bit being set. If the SFSR.FV is not set, the
1024  *    error cases cannot be decoded/determined and the SFPAR
1025  *    register that contain the physical faultaddr is also
1026  *    not valid. Also the SPFAR is only valid for UE/TO/BERR error
1027  *    cases. Assuming the SFSR.FV is valid:
1028  *    - BERR(bus error)/TO(timeout)/UE case
1029  *      If any of these error cases are detected, read the SFPAR
1030  *      to get the faultaddress. Generate ereport.
1031  *    - TLB Parity case (only recoverable case)
1032  *      For DAE, read SFAR for the faultaddress. For IAE,
1033  *      use %tpc for faultaddress (SFAR is not valid in IAE)
1034  *      Flush all the tlbs.
1035  *      Subtract one from the recoverable error count stored in
1036  *      the error log scratch register. If the threshold limit
1037  *      is reached (zero) - generate ereport. Else
1038  *      restore globals and retry (no ereport is generated).
1039  *    - TLB Multiple hits
1040  *      For DAE, read SFAR for the faultaddress. For IAE,
1041  *      use %tpc for faultaddress (SFAR is not valid in IAE).
1042  *      Flush all tlbs and generate ereport.
1043  * 3. TL=0 and TL>0 considerations
1044  *    - Since both TL=0 & TL>1 traps are made to vector into
1045  *      the same handler, the underlying assumption/design here is
1046  *      that any nested error condition (if happens) occurs only
1047  *      in the handler and the system is assumed to eventually
1048  *      Red-mode. With this philosophy in mind, the recoverable
1049  *      TLB Parity error case never check the TL level before it
1050  *      retry. Note that this is ok for the TL>1 case (assuming we
1051  *      don't have a nested error) since we always save the globals
1052  *      %g1, %g2 & %g3 whenever we enter this trap handler.
1053  *    - Additional TL=0 vs TL>1 handling includes:
1054  *      - For UE error occuring under TL>1, special handling
1055  *        is added to prevent the unlikely chance of a cpu-lockup
1056  *        when a UE was originally detected in user stack and
1057  *        the spill trap handler taken from sys_trap() so happened
1058  *        to reference the same UE location. Under the above
1059  *        condition (TL>1 and UE error), paranoid code is added
1060  *        to reset window regs so that spill traps can't happen
1061  *        during the unwind back to TL=0 handling.
1062  *        Note that we can do that because we are not returning
1063  *        back.
1064  * 4. Ereport generation.
1065  *    - Ereport generation is performed when we unwind to the TL=0
1066  *      handling code via sys_trap(). on_trap()/lofault protection
1067  *      will apply there.
1068  *
1069  */
1070         ENTRY_NP(opl_sync_trap)
1071 #ifdef  TRAPTRACE
1072         OPL_TRAPTRACE(%g1, %g2, %g3, opl_sync_trap_lb)
1073         rdpr    %tt, %g1
1074 #endif  /* TRAPTRACE */
1075         cmp     %g1, T_INSTR_ERROR
1076         bne,pt  %xcc, 0f
1077          mov    MMU_SFSR, %g3
1078         ldxa    [%g3]ASI_IMMU, %g1      ! IAE trap case tt = 0xa
1079         andcc   %g1, SFSR_FV, %g0
1080         bz,a,pn %xcc, 2f                ! Branch if SFSR is invalid and
1081          rdpr   %tpc, %g2               ! use %tpc for faultaddr instead
1082 
1083         sethi   %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1084         andcc   %g1, %g3, %g0           ! Check for UE/BERR/TO errors
1085         bz,a,pt %xcc, 1f                ! Branch if not UE/BERR/TO and
1086          rdpr   %tpc, %g2               ! use %tpc as faultaddr
1087         set     OPL_MMU_SFPAR, %g3      ! In the UE/BERR/TO cases, use
1088         ba,pt   %xcc, 2f                ! SFPAR as faultaddr
1089          ldxa   [%g3]ASI_IMMU, %g2
1090 0:
1091         ldxa    [%g3]ASI_DMMU, %g1      ! DAE trap case tt = 0x32
1092         andcc   %g1, SFSR_FV, %g0
1093         bnz,pt  %xcc, 7f                ! branch if SFSR.FV is valid
1094          mov    MMU_SFAR, %g2           ! set %g2 to use SFAR
1095         ba,pt   %xcc, 2f                ! SFSR.FV is not valid, read SFAR
1096          ldxa   [%g2]ASI_DMMU, %g2      ! for faultaddr
1097 7:
1098         sethi  %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
1099         andcc   %g1, %g3, %g0           ! Check UE/BERR/TO for valid SFPAR
1100         movnz   %xcc, OPL_MMU_SFPAR, %g2 ! Use SFPAR instead of SFAR for
1101         ldxa    [%g2]ASI_DMMU, %g2      ! faultaddr
1102 1:
1103         sethi   %hi(SFSR_TLB_PRT), %g3
1104         andcc   %g1, %g3, %g0
1105         bz,pt   %xcc, 8f                ! branch for TLB multi-hit check
1106          nop
1107         /*
1108          * This is the TLB parity error case and it is the
1109          * only retryable error case.
1110          * Only %g1, %g2 and %g3 are allowed
1111          */
1112         FLUSH_ALL_TLB(%g3)
1113         set     OPL_SCRATCHPAD_ERRLOG, %g3
1114         ldxa    [%g3]ASI_SCRATCHPAD, %g3                ! Read errlog scratchreg
1115         and     %g3, ERRLOG_REG_NUMERR_MASK, %g3! Extract the error count
1116         subcc   %g3, 1, %g0                     ! Subtract one from the count
1117         bz,pn   %xcc, 2f                ! too many TLB parity errs in a certain
1118          nop                            ! period, branch to generate ereport
1119         LOG_SYNC_REG(%g1, %g2, %g3)     ! Record into the error log
1120         set     OPL_SCRATCHPAD_ERRLOG, %g3
1121         ldxa    [%g3]ASI_SCRATCHPAD, %g2
1122         sub     %g2, 1, %g2             ! decrement error counter by 1
1123         stxa    %g2, [%g3]ASI_SCRATCHPAD        ! update the errlog scratchreg
1124         OPL_RESTORE_GLOBAL(%g1, %g2, %g3)
1125         retry
1126 8:
1127         sethi   %hi(SFSR_TLB_MUL), %g3
1128         andcc   %g1, %g3, %g0
1129         bz,pt   %xcc, 2f                ! check for the TLB multi-hit errors
1130          nop
1131         FLUSH_ALL_TLB(%g3)
1132 2:
1133         /*
1134          * non-retryable error handling
1135          * now we can use other registers since
1136          * we will not be returning back
1137          */
1138         mov     %g1, %g5                ! %g5 = SFSR
1139         mov     %g2, %g6                ! %g6 = SFPAR or SFAR/tpc
1140         LOG_SYNC_REG(%g1, %g2, %g3)     ! Record into the error log
1141 
1142         /*
1143          * Special case for UE on user stack.
1144          * There is a possibility that the same error may come back here
1145          * by touching the same UE in spill trap handler taken from
1146          * sys_trap(). It ends up with an infinite loop causing a cpu lockup.
1147          * Conditions for this handling this case are:
1148          * - SFSR_FV is valid and SFSR_UE is set
1149          * - we are at TL > 1
1150          * If the above conditions are true,  we force %cansave to be a
1151          * big number to prevent spill trap in sys_trap(). Note that
1152          * we will not be returning back.
1153          */
1154         rdpr    %tt, %g4                ! %g4 == ttype
1155         rdpr    %tl, %g1                ! %g1 == tl
1156         cmp     %g1, 1                  ! Check if TL == 1
1157         be,pt   %xcc, 3f                ! branch if we came from TL=0
1158          nop
1159         andcc   %g5, SFSR_FV, %g0       ! see if SFSR.FV is valid
1160         bz,pn   %xcc, 4f                ! branch, checking UE is meaningless
1161         sethi   %hi(SFSR_UE), %g2
1162         andcc   %g5, %g2, %g0           ! check for UE
1163         bz,pt   %xcc, 4f                ! branch if not UE
1164          nop
1165         RESET_WINREG(%g1)               ! reset windows to prevent spills
1166 4:
1167         RESET_USER_RTT_REGS(%g2, %g3, opl_sync_trap_resetskip)
1168 opl_sync_trap_resetskip:
1169         mov     %g5, %g3                ! pass SFSR to the 3rd arg
1170         mov     %g6, %g2                ! pass SFAR to the 2nd arg
1171         set     opl_cpu_isync_tl1_error, %g1
1172         set     opl_cpu_dsync_tl1_error, %g6
1173         cmp     %g4, T_INSTR_ERROR
1174         movne   %icc, %g6, %g1
1175         ba,pt   %icc, 6f
1176         nop
1177 3:
1178         mov     %g5, %g3                ! pass SFSR to the 3rd arg
1179         mov     %g6, %g2                ! pass SFAR to the 2nd arg
1180         set     opl_cpu_isync_tl0_error, %g1
1181         set     opl_cpu_dsync_tl0_error, %g6
1182         cmp     %g4, T_INSTR_ERROR
1183         movne   %icc, %g6, %g1
1184 6:
1185         sethi   %hi(sys_trap), %g5
1186         jmp     %g5 + %lo(sys_trap)
1187          mov    PIL_15, %g4
1188         SET_SIZE(opl_sync_trap)
1189 
1190 /*
1191  * Common Urgent error trap handler (tt=0x40)
1192  * All TL=0 and TL>0 0x40 traps vector to this handler.
1193  * The error handling can be best summarized as follows:
1194  * 1. Read the Urgent error status register (UGERSR)
1195  *    Faultaddress is N/A here and it is not collected.
1196  * 2. Check to see if we have a multiple errors case
1197  *    If so, we enable WEAK_ED (weak error detection) bit
1198  *    to prevent any potential error storms and branch directly
1199  *    to generate ereport. (we don't decode/handle individual
1200  *    error cases when we get a multiple error situation)
1201  * 3. Now look for the recoverable error cases which include
1202  *    IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1203  *    recoverable errors are detected, do the following:
1204  *    - Flush all tlbs.
1205  *    - Verify that we came from TL=0, if not, generate
1206  *      ereport. Note that the reason we don't recover
1207  *      at TL>0 is because the AGs might be corrupted or
1208  *      inconsistent. We can't save/restore them into
1209  *      the scratchpad regs like we did for opl_sync_trap().
1210  *    - Check the INSTEND[5:4] bits in the UGERSR. If the
1211  *      value is 0x3 (11b), this error is not recoverable.
1212  *      Generate ereport.
1213  *    - Subtract one from the recoverable error count stored in
1214  *      the error log scratch register. If the threshold limit
1215  *      is reached (zero) - generate ereport.
1216  *    - If the count is within the limit, update the count
1217  *      in the error log register (subtract one). Log the error
1218  *      info in the log buffer. Capture traptrace if enabled.
1219  *      Retry (no ereport generated)
1220  * 4. The rest of the error cases are unrecoverable and will
1221  *    be handled according (flushing regs, etc as required).
1222  *    For details on these error cases (UGER_CRE, UGER_CTXT, etc..)
1223  *    consult the OPL cpu/mem philosophy doc.
1224  *    Ereport will be generated for these errors.
1225  * 5. Ereport generation.
1226  *    - Ereport generation for urgent error trap always
1227  *      result in a panic when we unwind to the TL=0 handling
1228  *      code via sys_trap(). on_trap()/lofault protection do
1229  *      not apply there.
1230  */
1231         ENTRY_NP(opl_uger_trap)
1232         set     ASI_UGERSR, %g2
1233         ldxa    [%g2]ASI_AFSR, %g1              ! Read the UGERSR reg
1234 
1235         set     UGESR_MULTI, %g2
1236         andcc   %g1, %g2, %g0                   ! Check for Multi-errs
1237         bz,pt   %xcc, opl_uger_is_recover       ! branch if not Multi-errs
1238          nop
1239         set     AFSR_ECR, %g2
1240         ldxa    [%g2]ASI_AFSR, %g3              ! Enable Weak error
1241         or      %g3, ASI_ECR_WEAK_ED, %g3       ! detect mode to prevent
1242         stxa    %g3, [%g2]ASI_AFSR              ! potential error storms
1243         ba      %xcc, opl_uger_panic1
1244          nop
1245 
1246 opl_uger_is_recover:
1247         set     UGESR_CAN_RECOVER, %g2          ! Check for recoverable
1248         andcc   %g1, %g2, %g0                   ! errors i.e.IUG_DTLB,
1249         bz,pt   %xcc, opl_uger_cre              ! IUG_ITLB or COREERR
1250          nop
1251 
1252         /*
1253          * Fall thru to handle recoverable case
1254          * Need to do the following additional checks to determine
1255          * if this is indeed recoverable.
1256          * 1. Error trap came from TL=0 and
1257          * 2. INSTEND[5:4] bits in UGERSR is not 0x3
1258          * 3. Recoverable error count limit not reached
1259          *
1260          */
1261         FLUSH_ALL_TLB(%g3)
1262         rdpr    %tl, %g3                ! Read TL
1263         cmp     %g3, 1                  ! Check if we came from TL=0
1264         bne,pt  %xcc, opl_uger_panic    ! branch if came from TL>0
1265          nop
1266         srlx    %g1, 4, %g2             ! shift INSTEND[5:4] -> [1:0]
1267         and     %g2, 3, %g2             ! extract the shifted [1:0] bits
1268         cmp     %g2, 3                  ! check if INSTEND is recoverable
1269         be,pt   %xcc, opl_uger_panic    ! panic if ([1:0] = 11b)
1270          nop
1271         set     OPL_SCRATCHPAD_ERRLOG, %g3
1272         ldxa    [%g3]ASI_SCRATCHPAD, %g2                ! Read errlog scratch reg
1273         and     %g2, ERRLOG_REG_NUMERR_MASK, %g3! Extract error count and
1274         subcc   %g3, 1, %g3                     ! subtract one from it
1275         bz,pt   %xcc, opl_uger_panic    ! If count reached zero, too many
1276          nop                            ! errors, branch to generate ereport
1277         sub     %g2, 1, %g2                     ! Subtract one from the count
1278         set     OPL_SCRATCHPAD_ERRLOG, %g3      ! and write back the updated
1279         stxa    %g2, [%g3]ASI_SCRATCHPAD                ! count into the errlog reg
1280         LOG_UGER_REG(%g1, %g2, %g3)             ! Log the error info
1281 #ifdef  TRAPTRACE
1282         OPL_TRAPTRACE(%g1, %g2, %g3, opl_uger_trap_lb)
1283 #endif  /* TRAPTRACE */
1284         retry                                   ! retry - no ereport
1285 
1286         /*
1287          * Process the rest of the unrecoverable error cases
1288          * All error cases below ultimately branch to either
1289          * opl_uger_panic or opl_uger_panic1.
1290          * opl_uger_panic1 is the same as opl_uger_panic except
1291          * for the additional execution of the RESET_TO_PRIV()
1292          * macro that does a heavy handed reset. Read the
1293          * comments for RESET_TO_PRIV() macro for more info.
1294          */
1295 opl_uger_cre:
1296         set     UGESR_IAUG_CRE, %g2
1297         andcc   %g1, %g2, %g0
1298         bz,pt   %xcc, opl_uger_ctxt
1299          nop
1300         IAG_CRE(%g2, %g3)
1301         set     AFSR_ECR, %g2
1302         ldxa    [%g2]ASI_AFSR, %g3
1303         or      %g3, ASI_ECR_WEAK_ED, %g3
1304         stxa    %g3, [%g2]ASI_AFSR
1305         ba      %xcc, opl_uger_panic
1306          nop
1307 
1308 opl_uger_ctxt:
1309         set     UGESR_IAUG_TSBCTXT, %g2
1310         andcc   %g1, %g2, %g0
1311         bz,pt   %xcc, opl_uger_tsbp
1312          nop
1313         GET_CPU_IMPL(%g2)
1314         cmp     %g2, JUPITER_IMPL
1315         bne     %xcc, 1f
1316           nop
1317         RESET_SHARED_CTXT(%g2)
1318 1:
1319         RESET_MMU_REGS(%g2, %g3, %g4)
1320         ba      %xcc, opl_uger_panic
1321          nop
1322 
1323 opl_uger_tsbp:
1324         set     UGESR_IUG_TSBP, %g2
1325         andcc   %g1, %g2, %g0
1326         bz,pt   %xcc, opl_uger_pstate
1327          nop
1328         GET_CPU_IMPL(%g2)
1329         cmp     %g2, JUPITER_IMPL
1330         bne     %xcc, 1f
1331           nop
1332         RESET_TSB_PREFETCH(%g2)
1333 1:
1334         RESET_TSB_TAGPTR(%g2)
1335 
1336         /*
1337          * IUG_TSBP error may corrupt MMU registers
1338          * Reset them here.
1339          */
1340         RESET_MMU_REGS(%g2, %g3, %g4)
1341         ba      %xcc, opl_uger_panic
1342          nop
1343 
1344 opl_uger_pstate:
1345         set     UGESR_IUG_PSTATE, %g2
1346         andcc   %g1, %g2, %g0
1347         bz,pt   %xcc, opl_uger_tstate
1348          nop
1349         RESET_CUR_TSTATE(%g2)
1350         ba      %xcc, opl_uger_panic1
1351          nop
1352 
1353 opl_uger_tstate:
1354         set     UGESR_IUG_TSTATE, %g2
1355         andcc   %g1, %g2, %g0
1356         bz,pt   %xcc, opl_uger_f
1357          nop
1358         RESET_PREV_TSTATE(%g2, %g3, opl_uger_tstate_1)
1359         ba      %xcc, opl_uger_panic1
1360          nop
1361 
1362 opl_uger_f:
1363         set     UGESR_IUG_F, %g2
1364         andcc   %g1, %g2, %g0
1365         bz,pt   %xcc, opl_uger_r
1366          nop
1367         CLEAR_FPREGS(%g2)
1368         ba      %xcc, opl_uger_panic
1369          nop
1370 
1371 opl_uger_r:
1372         set     UGESR_IUG_R, %g2
1373         andcc   %g1, %g2, %g0
1374         bz,pt   %xcc, opl_uger_panic1
1375          nop
1376         CLEAR_GEN_REGS(%g2, opl_uger_r_1)
1377         ba      %xcc, opl_uger_panic1
1378          nop
1379 
1380 opl_uger_panic:
1381         mov     %g1, %g2                        ! %g2 = arg #1
1382         LOG_UGER_REG(%g1, %g3, %g4)
1383         ba      %xcc, opl_uger_panic_cmn
1384          nop
1385 
1386 opl_uger_panic1:
1387         mov     %g1, %g2                        ! %g2 = arg #1
1388         LOG_UGER_REG(%g1, %g3, %g4)
1389         RESET_TO_PRIV(%g1, %g3, %g4, %l0)
1390 
1391         /*
1392          * Set up the argument for sys_trap.
1393          * %g2 = arg #1 already set above
1394          */
1395 opl_uger_panic_cmn:
1396         RESET_USER_RTT_REGS(%g4, %g5, opl_uger_panic_resetskip)
1397 opl_uger_panic_resetskip:
1398         rdpr    %tl, %g3                        ! arg #2
1399         set     opl_cpu_urgent_error, %g1       ! pc
1400         sethi   %hi(sys_trap), %g5
1401         jmp     %g5 + %lo(sys_trap)
1402          mov    PIL_15, %g4
1403         SET_SIZE(opl_uger_trap)
1404 
1405 /*
1406  * OPL ta3 support (note please, that win_reg
1407  * area size for each cpu is 2^7 bytes)
1408  */
1409 
1410 #define RESTORE_WREGS(tmp1, tmp2)               \
1411         CPU_INDEX(tmp1, tmp2)                   ;\
1412         sethi   %hi(opl_ta3_save), tmp2         ;\
1413         ldx     [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1414         sllx    tmp1, 7, tmp1                   ;\
1415         add     tmp2, tmp1, tmp2                ;\
1416         ldx     [tmp2 + 0], %l0                 ;\
1417         ldx     [tmp2 + 8], %l1                 ;\
1418         ldx     [tmp2 + 16], %l2                ;\
1419         ldx     [tmp2 + 24], %l3                ;\
1420         ldx     [tmp2 + 32], %l4                ;\
1421         ldx     [tmp2 + 40], %l5                ;\
1422         ldx     [tmp2 + 48], %l6                ;\
1423         ldx     [tmp2 + 56], %l7                ;\
1424         ldx     [tmp2 + 64], %i0                ;\
1425         ldx     [tmp2 + 72], %i1                ;\
1426         ldx     [tmp2 + 80], %i2                ;\
1427         ldx     [tmp2 + 88], %i3                ;\
1428         ldx     [tmp2 + 96], %i4                ;\
1429         ldx     [tmp2 + 104], %i5               ;\
1430         ldx     [tmp2 + 112], %i6               ;\
1431         ldx     [tmp2 + 120], %i7
1432 
1433 #define SAVE_WREGS(tmp1, tmp2)                  \
1434         CPU_INDEX(tmp1, tmp2)                   ;\
1435         sethi   %hi(opl_ta3_save), tmp2         ;\
1436         ldx     [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1437         sllx    tmp1, 7, tmp1                   ;\
1438         add     tmp2, tmp1, tmp2                ;\
1439         stx     %l0, [tmp2 + 0]                 ;\
1440         stx     %l1, [tmp2 + 8]                 ;\
1441         stx     %l2, [tmp2 + 16]                ;\
1442         stx     %l3, [tmp2 + 24]                ;\
1443         stx     %l4, [tmp2 + 32]                ;\
1444         stx     %l5, [tmp2 + 40]                ;\
1445         stx     %l6, [tmp2 + 48]                ;\
1446         stx     %l7, [tmp2 + 56]                ;\
1447         stx     %i0, [tmp2 + 64]                ;\
1448         stx     %i1, [tmp2 + 72]                ;\
1449         stx     %i2, [tmp2 + 80]                ;\
1450         stx     %i3, [tmp2 + 88]                ;\
1451         stx     %i4, [tmp2 + 96]                ;\
1452         stx     %i5, [tmp2 + 104]               ;\
1453         stx     %i6, [tmp2 + 112]               ;\
1454         stx     %i7, [tmp2 + 120] 
1455 
1456 
1457 /*
1458  * The purpose of this function is to make sure that the restore 
1459  * instruction after the flushw does not cause a fill trap. The sun4u 
1460  * fill trap handler can not handle a tlb fault of an unmapped stack 
1461  * except at the restore instruction at user_rtt. On OPL systems the 
1462  * stack can get unmapped between the flushw and restore instructions 
1463  * since multiple strands share the tlb.
1464  */
1465         ENTRY_NP(opl_ta3_trap)
1466         set     trap, %g1
1467         mov     T_FLUSHW, %g3
1468         sub     %g0, 1, %g4
1469         rdpr    %cwp, %g5
1470         SAVE_WREGS(%g2, %g6)
1471         save
1472         flushw
1473         rdpr    %cwp, %g6
1474         wrpr    %g5, %cwp
1475         RESTORE_WREGS(%g2, %g5)
1476         wrpr    %g6, %cwp
1477         restored
1478         restore
1479 
1480         ba,a    fast_trap_done
1481         SET_SIZE(opl_ta3_trap)
1482 
1483         ENTRY_NP(opl_cleanw_subr)
1484         set     trap, %g1
1485         mov     T_FLUSHW, %g3
1486         sub     %g0, 1, %g4
1487         rdpr    %cwp, %g5
1488         SAVE_WREGS(%g2, %g6)
1489         save
1490         flushw
1491         rdpr    %cwp, %g6
1492         wrpr    %g5, %cwp
1493         RESTORE_WREGS(%g2, %g5)
1494         wrpr    %g6, %cwp
1495         restored
1496         restore
1497         jmp     %g7
1498           nop
1499         SET_SIZE(opl_cleanw_subr)
1500 
1501 /*
1502  * The actual trap handler for tt=0x0a, and tt=0x32
1503  */
1504         ENTRY_NP(opl_serr_instr)
1505         OPL_SAVE_GLOBAL(%g1,%g2,%g3)
1506         sethi   %hi(opl_sync_trap), %g3
1507         jmp     %g3 + %lo(opl_sync_trap)
1508          rdpr    %tt, %g1
1509         .align  32
1510         SET_SIZE(opl_serr_instr)
1511 
1512 /*
1513  * The actual trap handler for tt=0x40
1514  */
1515         ENTRY_NP(opl_ugerr_instr)
1516         sethi   %hi(opl_uger_trap), %g3
1517         jmp     %g3 + %lo(opl_uger_trap)
1518          nop
1519         .align  32
1520         SET_SIZE(opl_ugerr_instr)
1521 
1522 /*
1523  * The actual trap handler for tt=0x103 (flushw)
1524  */
1525         ENTRY_NP(opl_ta3_instr)
1526         sethi   %hi(opl_ta3_trap), %g3
1527         jmp     %g3 + %lo(opl_ta3_trap)
1528          nop
1529         .align  32
1530         SET_SIZE(opl_ta3_instr)
1531 
1532 /*
1533  * The patch for the .clean_windows code
1534  */
1535         ENTRY_NP(opl_ta4_instr)
1536         sethi   %hi(opl_cleanw_subr), %g3
1537         add     %g3, %lo(opl_cleanw_subr), %g3
1538         jmpl    %g3, %g7
1539           add   %g7, 8, %g7
1540         nop
1541         nop
1542         nop
1543         SET_SIZE(opl_ta4_instr)
1544 
1545         ENTRY_NP(stick_timestamp)
1546         rd      STICK, %g1      ! read stick reg
1547         sllx    %g1, 1, %g1
1548         srlx    %g1, 1, %g1     ! clear npt bit
1549 
1550         retl
1551         stx     %g1, [%o0]      ! store the timestamp
1552         SET_SIZE(stick_timestamp)
1553 
1554 
1555         ENTRY_NP(stick_adj)
1556         rdpr    %pstate, %g1            ! save processor state
1557         andn    %g1, PSTATE_IE, %g3
1558         ba      1f                      ! cache align stick adj
1559         wrpr    %g0, %g3, %pstate       ! turn off interrupts
1560 
1561         .align  16
1562 1:      nop
1563 
1564         rd      STICK, %g4              ! read stick reg
1565         add     %g4, %o0, %o1           ! adjust stick with skew
1566         wr      %o1, %g0, STICK         ! write stick reg
1567 
1568         retl
1569         wrpr    %g1, %pstate            ! restore processor state
1570         SET_SIZE(stick_adj)
1571 
1572         ENTRY_NP(kdi_get_stick)
1573         rd      STICK, %g1
1574         stx     %g1, [%o0]
1575         retl
1576         mov     %g0, %o0
1577         SET_SIZE(kdi_get_stick)
1578 
1579         ENTRY(dtrace_blksuword32)
1580         save    %sp, -SA(MINFRAME + 4), %sp
1581 
1582         rdpr    %pstate, %l1
1583         andn    %l1, PSTATE_IE, %l2             ! disable interrupts to
1584         wrpr    %g0, %l2, %pstate               ! protect our FPU diddling
1585 
1586         rd      %fprs, %l0
1587         andcc   %l0, FPRS_FEF, %g0
1588         bz,a,pt %xcc, 1f                        ! if the fpu is disabled
1589         wr      %g0, FPRS_FEF, %fprs            ! ... enable the fpu
1590 
1591         st      %f0, [%fp + STACK_BIAS - 4]     ! save %f0 to the stack
1592 1:
1593         set     0f, %l5
1594         /*
1595          * We're about to write a block full or either total garbage
1596          * (not kernel data, don't worry) or user floating-point data
1597          * (so it only _looks_ like garbage).
1598          */
1599         ld      [%i1], %f0                      ! modify the block
1600         membar  #Sync
1601         stn     %l5, [THREAD_REG + T_LOFAULT]   ! set up the lofault handler
1602         stda    %d0, [%i0]ASI_BLK_COMMIT_S      ! store the modified block
1603         membar  #Sync
1604         flush   %i0                             ! flush instruction pipeline
1605         stn     %g0, [THREAD_REG + T_LOFAULT]   ! remove the lofault handler
1606 
1607         bz,a,pt %xcc, 1f
1608         wr      %g0, %l0, %fprs                 ! restore %fprs
1609 
1610         ld      [%fp + STACK_BIAS - 4], %f0     ! restore %f0
1611 1:
1612 
1613         wrpr    %g0, %l1, %pstate               ! restore interrupts
1614 
1615         ret
1616         restore %g0, %g0, %o0
1617 
1618 0:
1619         membar  #Sync
1620         stn     %g0, [THREAD_REG + T_LOFAULT]   ! remove the lofault handler
1621 
1622         bz,a,pt %xcc, 1f
1623         wr      %g0, %l0, %fprs                 ! restore %fprs
1624 
1625         ld      [%fp + STACK_BIAS - 4], %f0     ! restore %f0
1626 1:
1627 
1628         wrpr    %g0, %l1, %pstate               ! restore interrupts
1629 
1630         /*
1631          * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1632          * which deals with watchpoints. Otherwise, just return -1.
1633          */
1634         brnz,pt %i2, 1f
1635         nop
1636         ret
1637         restore %g0, -1, %o0
1638 1:
1639         call    dtrace_blksuword32_err
1640         restore
1641 
1642         SET_SIZE(dtrace_blksuword32)
1643 
1644         ENTRY_NP(ras_cntr_reset)
1645         set     OPL_SCRATCHPAD_ERRLOG, %o1
1646         ldxa    [%o1]ASI_SCRATCHPAD, %o0
1647         or      %o0, ERRLOG_REG_NUMERR_MASK, %o0
1648         retl
1649          stxa   %o0, [%o1]ASI_SCRATCHPAD
1650         SET_SIZE(ras_cntr_reset)
1651 
1652         ENTRY_NP(opl_error_setup)
1653         /*
1654          * Initialize the error log scratchpad register
1655          */
1656         ldxa    [%g0]ASI_EIDR, %o2
1657         sethi   %hi(ERRLOG_REG_EIDR_MASK), %o1
1658         or      %o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
1659         and     %o2, %o1, %o3
1660         sllx    %o3, ERRLOG_REG_EIDR_SHIFT, %o2
1661         or      %o2, %o0, %o3
1662         or      %o3, ERRLOG_REG_NUMERR_MASK, %o0
1663         set     OPL_SCRATCHPAD_ERRLOG, %o1
1664         stxa    %o0, [%o1]ASI_SCRATCHPAD
1665         /*
1666          * Disable all restrainable error traps
1667          */
1668         mov     AFSR_ECR, %o1
1669         ldxa    [%o1]ASI_AFSR, %o0
1670         andn    %o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
1671         retl
1672           stxa  %o0, [%o1]ASI_AFSR
1673         SET_SIZE(opl_error_setup)
1674 
1675         ENTRY_NP(cpu_early_feature_init)
1676         /*
1677          * Enable MMU translating multiple page sizes for
1678          * sITLB and sDTLB.
1679          */
1680         mov     LSU_MCNTL, %o0
1681         ldxa    [%o0] ASI_MCNTL, %o1
1682         or      %o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
1683           stxa  %o1, [%o0] ASI_MCNTL
1684         /*
1685          * Demap all previous entries.
1686          */
1687         sethi   %hi(FLUSH_ADDR), %o1
1688         set     DEMAP_ALL_TYPE, %o0
1689         stxa    %g0, [%o0]ASI_DTLB_DEMAP
1690         stxa    %g0, [%o0]ASI_ITLB_DEMAP
1691         retl
1692           flush %o1
1693         SET_SIZE(cpu_early_feature_init)
1694 
1695         ENTRY(cpu_feature_init)
1696         !
1697         ! get the device_id and store the device_id
1698         ! in the appropriate cpunodes structure
1699         ! given the cpus index
1700         !
1701         CPU_INDEX(%o0, %o1)
1702         mulx %o0, CPU_NODE_SIZE, %o0
1703         set  cpunodes + DEVICE_ID, %o1
1704         ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
1705         stx  %o2, [%o0 + %o1]
1706         !
1707         ! initialize CPU registers
1708         !
1709         ba      opl_cpu_reg_init
1710         nop
1711         SET_SIZE(cpu_feature_init)
1712 
1713         /*
1714          * Clear the NPT (non-privileged trap) bit in the %tick/%stick
1715          * registers. In an effort to make the change in the
1716          * tick/stick counter as consistent as possible, we disable
1717          * all interrupts while we're changing the registers. We also
1718          * ensure that the read and write instructions are in the same
1719          * line in the instruction cache.
1720          */
1721         ENTRY_NP(cpu_clearticknpt)
1722         rdpr    %pstate, %g1            /* save processor state */
1723         andn    %g1, PSTATE_IE, %g3     /* turn off */
1724         wrpr    %g0, %g3, %pstate       /*   interrupts */
1725         rdpr    %tick, %g2              /* get tick register */
1726         brgez,pn %g2, 1f                /* if NPT bit off, we're done */
1727         mov     1, %g3                  /* create mask */
1728         sllx    %g3, 63, %g3            /*   for NPT bit */
1729         ba,a,pt %xcc, 2f
1730         .align  8                       /* Ensure rd/wr in same i$ line */
1731 2:
1732         rdpr    %tick, %g2              /* get tick register */
1733         wrpr    %g3, %g2, %tick         /* write tick register, */
1734                                         /*   clearing NPT bit   */
1735 1:
1736         rd      STICK, %g2              /* get stick register */
1737         brgez,pn %g2, 3f                /* if NPT bit off, we're done */
1738         mov     1, %g3                  /* create mask */
1739         sllx    %g3, 63, %g3            /*   for NPT bit */
1740         ba,a,pt %xcc, 4f
1741         .align  8                       /* Ensure rd/wr in same i$ line */
1742 4:
1743         rd      STICK, %g2              /* get stick register */
1744         wr      %g3, %g2, STICK         /* write stick register, */
1745                                         /*   clearing NPT bit   */
1746 3:
1747         jmp     %g4 + 4
1748         wrpr    %g0, %g1, %pstate       /* restore processor state */
1749 
1750         SET_SIZE(cpu_clearticknpt)
1751 
1752         /*
1753          * Halt the current strand with the suspend instruction.
1754          * The compiler/asm currently does not support this suspend
1755          * instruction mnemonic, use byte code for now.
1756          */
1757         ENTRY_NP(cpu_halt_cpu)
1758         .word   0x81b01040
1759         retl
1760         nop
1761         SET_SIZE(cpu_halt_cpu)
1762 
1763         /*
1764          * Pause the current strand with the sleep instruction.
1765          * The compiler/asm currently does not support this sleep
1766          * instruction mnemonic, use byte code for now.
1767          */
1768         ENTRY_NP(cpu_smt_pause)
1769         .word   0x81b01060
1770         retl
1771         nop
1772         SET_SIZE(cpu_smt_pause)
1773