1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  *
  25  * Assembly code support for the Cheetah+ module
  26  */
  27 
  28 #include "assym.h"
  29 
  30 #include <sys/asm_linkage.h>
  31 #include <sys/mmu.h>
  32 #include <vm/hat_sfmmu.h>
  33 #include <sys/machparam.h>
  34 #include <sys/machcpuvar.h>
  35 #include <sys/machthread.h>
  36 #include <sys/machtrap.h>
  37 #include <sys/privregs.h>
  38 #include <sys/asm_linkage.h>
  39 #include <sys/trap.h>
  40 #include <sys/cheetahregs.h>
  41 #include <sys/us3_module.h>
  42 #include <sys/xc_impl.h>
  43 #include <sys/intreg.h>
  44 #include <sys/async.h>
  45 #include <sys/clock.h>
  46 #include <sys/cheetahasm.h>
  47 #include <sys/cmpregs.h>
  48 
  49 #ifdef TRAPTRACE
  50 #include <sys/traptrace.h>
  51 #endif /* TRAPTRACE */
  52 
  53 
  54 /* BEGIN CSTYLED */
  55 
  56 /*
  57  * Cheetah+ version to reflush an Ecache line by index.
  58  *
  59  * By default we assume the Ecache is 2-way so we flush both
  60  * ways. Even if the cache is direct-mapped no harm will come
  61  * from performing the flush twice, apart from perhaps a performance
  62  * penalty.
  63  *
  64  * XXX - scr2 not used.
  65  */
  66 #define ECACHE_REFLUSH_LINE(ec_set_size, index, scr2)                   \
  67         ldxa    [index]ASI_EC_DIAG, %g0;                                \
  68         ldxa    [index + ec_set_size]ASI_EC_DIAG, %g0;
  69 
  70 /*
  71  * Cheetah+ version of ecache_flush_line.  Uses Cheetah+ Ecache Displacement
  72  * Flush feature.
  73  */
  74 #define ECACHE_FLUSH_LINE(physaddr, ec_set_size, scr1, scr2)            \
  75         sub     ec_set_size, 1, scr1;                                   \
  76         and     physaddr, scr1, scr1;                                   \
  77         set     CHP_ECACHE_IDX_DISP_FLUSH, scr2;                        \
  78         or      scr2, scr1, scr1;                                       \
  79         ECACHE_REFLUSH_LINE(ec_set_size, scr1, scr2)
  80 
  81 /* END CSTYLED */
  82 
  83 /*
  84  * Panther version to reflush a line from both the L2 cache and L3
  85  * cache by the respective indexes. Flushes all ways of the line from
  86  * each cache.
  87  *
  88  * l2_index     Index into the L2$ of the line to be flushed. This
  89  *              register will not be modified by this routine.
  90  * l3_index     Index into the L3$ of the line to be flushed. This
  91  *              register will not be modified by this routine.
  92  * scr2         scratch register.
  93  * scr3         scratch register.
  94  *
  95  */
  96 #define PN_ECACHE_REFLUSH_LINE(l2_index, l3_index, scr2, scr3)          \
  97         set     PN_L2_MAX_SET, scr2;                                    \
  98         set     PN_L2_SET_SIZE, scr3;                                   \
  99 1:                                                                      \
 100         ldxa    [l2_index + scr2]ASI_L2_TAG, %g0;                       \
 101         cmp     scr2, %g0;                                              \
 102         bg,a    1b;                                                     \
 103           sub   scr2, scr3, scr2;                                       \
 104         mov     6, scr2;                                                \
 105 7:                                                                      \
 106         cmp     scr2, %g0;                                              \
 107         bg,a    7b;                                                     \
 108           sub   scr2, 1, scr2;                                          \
 109         set     PN_L3_MAX_SET, scr2;                                    \
 110         set     PN_L3_SET_SIZE, scr3;                                   \
 111 2:                                                                      \
 112         ldxa    [l3_index + scr2]ASI_EC_DIAG, %g0;                      \
 113         cmp     scr2, %g0;                                              \
 114         bg,a    2b;                                                     \
 115           sub   scr2, scr3, scr2;
 116 
 117 /*
 118  * Panther version of ecache_flush_line. Flushes the line corresponding
 119  * to physaddr from both the L2 cache and the L3 cache.
 120  *
 121  * physaddr     Input: Physical address to flush.
 122  *              Output: Physical address to flush (preserved).
 123  * l2_idx_out   Input: scratch register.
 124  *              Output: Index into the L2$ of the line to be flushed.
 125  * l3_idx_out   Input: scratch register.
 126  *              Output: Index into the L3$ of the line to be flushed.
 127  * scr3         scratch register.
 128  * scr4         scratch register.
 129  *
 130  */
 131 #define PN_ECACHE_FLUSH_LINE(physaddr, l2_idx_out, l3_idx_out, scr3, scr4)      \
 132         set     PN_L3_SET_SIZE, l2_idx_out;                                     \
 133         sub     l2_idx_out, 1, l2_idx_out;                                      \
 134         and     physaddr, l2_idx_out, l3_idx_out;                               \
 135         set     PN_L3_IDX_DISP_FLUSH, l2_idx_out;                               \
 136         or      l2_idx_out, l3_idx_out, l3_idx_out;                             \
 137         set     PN_L2_SET_SIZE, l2_idx_out;                                     \
 138         sub     l2_idx_out, 1, l2_idx_out;                                      \
 139         and     physaddr, l2_idx_out, l2_idx_out;                               \
 140         set     PN_L2_IDX_DISP_FLUSH, scr3;                                     \
 141         or      l2_idx_out, scr3, l2_idx_out;                                   \
 142         PN_ECACHE_REFLUSH_LINE(l2_idx_out, l3_idx_out, scr3, scr4)
 143 
 144 /*
 145  * Fast ECC error at TL>0 handler
 146  * We get here via trap 70 at TL>0->Software trap 0 at TL>0.  We enter
 147  * this routine with %g1 and %g2 already saved in %tpc, %tnpc and %tstate.
 148  * For a complete description of the Fast ECC at TL>0 handling see the
 149  * comment block "Cheetah/Cheetah+ Fast ECC at TL>0 trap strategy" in
 150  * us3_common_asm.s
 151  */
 152 
 153         .section ".text"
 154         .align  64
 155         ENTRY_NP(fast_ecc_tl1_err)
 156 
 157         /*
 158          * This macro turns off the D$/I$ if they are on and saves their
 159          * original state in ch_err_tl1_tmp, saves all the %g registers in the
 160          * ch_err_tl1_data structure, updates the ch_err_tl1_flags and saves
 161          * the %tpc in ch_err_tl1_tpc.  At the end of this macro, %g1 will
 162          * point to the ch_err_tl1_data structure and the original D$/I$ state
 163          * will be saved in ch_err_tl1_tmp.  All %g registers except for %g1
 164          * will be available.
 165          */
 166         CH_ERR_TL1_FECC_ENTER;
 167 
 168         /*
 169          * Get the diagnostic logout data.  %g4 must be initialized to
 170          * current CEEN state, %g5 must point to logout structure in
 171          * ch_err_tl1_data_t.  %g3 will contain the nesting count upon
 172          * return.
 173          */
 174         ldxa    [%g0]ASI_ESTATE_ERR, %g4
 175         and     %g4, EN_REG_CEEN, %g4
 176         add     %g1, CH_ERR_TL1_LOGOUT, %g5
 177         DO_TL1_CPU_LOGOUT(%g3, %g2, %g4, %g5, %g6, %g3, %g4)
 178 
 179         /*
 180          * If the logout nesting count is exceeded, we're probably
 181          * not making any progress, try to panic instead.
 182          */
 183         cmp     %g3, CLO_NESTING_MAX
 184         bge     fecc_tl1_err
 185           nop
 186 
 187         /*
 188          * Save the current CEEN and NCEEN state in %g7 and turn them off
 189          * before flushing the Ecache.
 190          */
 191         ldxa    [%g0]ASI_ESTATE_ERR, %g7
 192         andn    %g7, EN_REG_CEEN | EN_REG_NCEEN, %g5
 193         stxa    %g5, [%g0]ASI_ESTATE_ERR
 194         membar  #Sync
 195 
 196         /*
 197          * Flush the Ecache, using the largest possible cache size with the
 198          * smallest possible line size since we can't get the actual sizes
 199          * from the cpu_node due to DTLB misses.
 200          */
 201         PN_L2_FLUSHALL(%g3, %g4, %g5)
 202 
 203         set     CH_ECACHE_MAX_SIZE, %g4
 204         set     CH_ECACHE_MIN_LSIZE, %g5
 205 
 206         GET_CPU_IMPL(%g6)
 207         cmp     %g6, PANTHER_IMPL
 208         bne     %xcc, 2f
 209           nop
 210         set     PN_L3_SIZE, %g4
 211 2:
 212         mov     %g6, %g3
 213         CHP_ECACHE_FLUSHALL(%g4, %g5, %g3)
 214 
 215         /*
 216          * Restore CEEN and NCEEN to the previous state.
 217          */
 218         stxa    %g7, [%g0]ASI_ESTATE_ERR
 219         membar  #Sync
 220 
 221         /*
 222          * If we turned off the D$, then flush it and turn it back on.
 223          */
 224         ldxa    [%g1 + CH_ERR_TL1_TMP]%asi, %g3
 225         andcc   %g3, CH_ERR_TSTATE_DC_ON, %g0
 226         bz      %xcc, 3f
 227           nop
 228 
 229         /*
 230          * Flush the D$.
 231          */
 232         ASM_LD(%g4, dcache_size)
 233         ASM_LD(%g5, dcache_linesize)
 234         CH_DCACHE_FLUSHALL(%g4, %g5, %g6)
 235 
 236         /*
 237          * Turn the D$ back on.
 238          */
 239         ldxa    [%g0]ASI_DCU, %g3
 240         or      %g3, DCU_DC, %g3
 241         stxa    %g3, [%g0]ASI_DCU
 242         membar  #Sync
 243 3:
 244         /*
 245          * If we turned off the I$, then flush it and turn it back on.
 246          */
 247         ldxa    [%g1 + CH_ERR_TL1_TMP]%asi, %g3
 248         andcc   %g3, CH_ERR_TSTATE_IC_ON, %g0
 249         bz      %xcc, 4f
 250           nop
 251 
 252         /*
 253          * Flush the I$.  Panther has different I$ parameters, and we
 254          * can't access the logout I$ params without possibly generating
 255          * a MMU miss.
 256          */
 257         GET_CPU_IMPL(%g6)
 258         set     PN_ICACHE_SIZE, %g3
 259         set     CH_ICACHE_SIZE, %g4
 260         mov     CH_ICACHE_LSIZE, %g5
 261         cmp     %g6, PANTHER_IMPL
 262         movz    %xcc, %g3, %g4
 263         movz    %xcc, PN_ICACHE_LSIZE, %g5
 264         CH_ICACHE_FLUSHALL(%g4, %g5, %g6, %g3)
 265 
 266         /*
 267          * Turn the I$ back on.  Changing DCU_IC requires flush.
 268          */
 269         ldxa    [%g0]ASI_DCU, %g3
 270         or      %g3, DCU_IC, %g3
 271         stxa    %g3, [%g0]ASI_DCU
 272         flush   %g0
 273 4:
 274 
 275 #ifdef TRAPTRACE
 276         /*
 277          * Get current trap trace entry physical pointer.
 278          */
 279         CPU_INDEX(%g6, %g5)
 280         sll     %g6, TRAPTR_SIZE_SHIFT, %g6
 281         set     trap_trace_ctl, %g5
 282         add     %g6, %g5, %g6
 283         ld      [%g6 + TRAPTR_LIMIT], %g5
 284         tst     %g5
 285         be      %icc, skip_traptrace
 286           nop
 287         ldx     [%g6 + TRAPTR_PBASE], %g5
 288         ld      [%g6 + TRAPTR_OFFSET], %g4
 289         add     %g5, %g4, %g5
 290 
 291         /*
 292          * Create trap trace entry.
 293          */
 294         rd      %asi, %g7
 295         wr      %g0, TRAPTR_ASI, %asi
 296         rd      STICK, %g4
 297         stxa    %g4, [%g5 + TRAP_ENT_TICK]%asi
 298         rdpr    %tl, %g4
 299         stha    %g4, [%g5 + TRAP_ENT_TL]%asi
 300         rdpr    %tt, %g4
 301         stha    %g4, [%g5 + TRAP_ENT_TT]%asi
 302         rdpr    %tpc, %g4
 303         stna    %g4, [%g5 + TRAP_ENT_TPC]%asi
 304         rdpr    %tstate, %g4
 305         stxa    %g4, [%g5 + TRAP_ENT_TSTATE]%asi
 306         stna    %sp, [%g5 + TRAP_ENT_SP]%asi
 307         stna    %g0, [%g5 + TRAP_ENT_TR]%asi
 308         wr      %g0, %g7, %asi
 309         ldxa    [%g1 + CH_ERR_TL1_SDW_AFAR]%asi, %g3
 310         ldxa    [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g4
 311         wr      %g0, TRAPTR_ASI, %asi
 312         stna    %g3, [%g5 + TRAP_ENT_F1]%asi
 313         stna    %g4, [%g5 + TRAP_ENT_F2]%asi
 314         wr      %g0, %g7, %asi
 315         ldxa    [%g1 + CH_ERR_TL1_AFAR]%asi, %g3
 316         ldxa    [%g1 + CH_ERR_TL1_AFSR]%asi, %g4
 317         wr      %g0, TRAPTR_ASI, %asi
 318         stna    %g3, [%g5 + TRAP_ENT_F3]%asi
 319         stna    %g4, [%g5 + TRAP_ENT_F4]%asi
 320         wr      %g0, %g7, %asi
 321 
 322         /*
 323          * Advance trap trace pointer.
 324          */
 325         ld      [%g6 + TRAPTR_OFFSET], %g5
 326         ld      [%g6 + TRAPTR_LIMIT], %g4
 327         st      %g5, [%g6 + TRAPTR_LAST_OFFSET]
 328         add     %g5, TRAP_ENT_SIZE, %g5
 329         sub     %g4, TRAP_ENT_SIZE, %g4
 330         cmp     %g5, %g4
 331         movge   %icc, 0, %g5
 332         st      %g5, [%g6 + TRAPTR_OFFSET]
 333 skip_traptrace:
 334 #endif  /* TRAPTRACE */
 335 
 336         /*
 337          * If nesting count is not zero, skip all the AFSR/AFAR
 338          * handling and just do the necessary cache-flushing.
 339          */
 340         ldxa    [%g1 + CH_ERR_TL1_NEST_CNT]%asi, %g2
 341         brnz    %g2, 6f
 342           nop
 343 
 344         /*
 345          * If a UCU or L3_UCU followed by a WDU has occurred go ahead
 346          * and panic since a UE will occur (on the retry) before the
 347          * UCU and WDU messages are enqueued.  On a Panther processor, 
 348          * we need to also see an L3_WDU before panicking.  Note that
 349          * we avoid accessing the _EXT ASIs if not on a Panther.
 350          */
 351         ldxa    [%g1 + CH_ERR_TL1_SDW_AFSR]%asi, %g3
 352         set     1, %g4
 353         sllx    %g4, C_AFSR_UCU_SHIFT, %g4
 354         btst    %g4, %g3                ! UCU in original shadow AFSR?
 355         bnz     %xcc, 5f
 356           nop
 357         GET_CPU_IMPL(%g6)
 358         cmp     %g6, PANTHER_IMPL
 359         bne     %xcc, 6f                ! not Panther, no UCU, skip the rest
 360           nop
 361         ldxa    [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g3
 362         btst    C_AFSR_L3_UCU, %g3      ! L3_UCU in original shadow AFSR_EXT?
 363         bz      %xcc, 6f                ! neither UCU nor L3_UCU was seen
 364           nop
 365 5:
 366         ldxa    [%g1 + CH_ERR_TL1_AFSR]%asi, %g4        ! original AFSR
 367         ldxa    [%g0]ASI_AFSR, %g3      ! current AFSR
 368         or      %g3, %g4, %g3           ! %g3 = original + current AFSR
 369         set     1, %g4
 370         sllx    %g4, C_AFSR_WDU_SHIFT, %g4
 371         btst    %g4, %g3                ! WDU in original or current AFSR?
 372         bz      %xcc, 6f                ! no WDU, skip remaining tests
 373           nop
 374         GET_CPU_IMPL(%g6)
 375         cmp     %g6, PANTHER_IMPL
 376         bne     %xcc, fecc_tl1_err      ! if not Panther, panic (saw UCU, WDU)
 377           nop
 378         ldxa    [%g1 + CH_ERR_TL1_SDW_AFSR_EXT]%asi, %g4 ! original AFSR_EXT
 379         set     ASI_AFSR_EXT_VA, %g6    ! ASI of current AFSR_EXT
 380         ldxa    [%g6]ASI_AFSR, %g3      ! value of current AFSR_EXT
 381         or      %g3, %g4, %g3           ! %g3 = original + current AFSR_EXT
 382         btst    C_AFSR_L3_WDU, %g3      ! L3_WDU in original or current AFSR?
 383         bnz     %xcc, fecc_tl1_err      ! panic (saw L3_WDU and UCU or L3_UCU)
 384           nop
 385 6:
 386         /*
 387          * We fall into this macro if we've successfully logged the error in
 388          * the ch_err_tl1_data structure and want the PIL15 softint to pick
 389          * it up and log it.  %g1 must point to the ch_err_tl1_data structure.
 390          * Restores the %g registers and issues retry.
 391          */
 392         CH_ERR_TL1_EXIT;
 393 
 394         /*
 395          * Establish panic exit label.
 396          */
 397         CH_ERR_TL1_PANIC_EXIT(fecc_tl1_err);
 398 
 399         SET_SIZE(fast_ecc_tl1_err)
 400 
 401 
 402         ENTRY(scrubphys)
 403         rdpr    %pstate, %o4
 404         andn    %o4, PSTATE_IE | PSTATE_AM, %o5
 405         wrpr    %o5, %g0, %pstate       ! clear IE, AM bits
 406 
 407         GET_CPU_IMPL(%o5)               ! Panther Ecache is flushed differently
 408         cmp     %o5, PANTHER_IMPL
 409         bne     scrubphys_1
 410           nop
 411         PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o5)
 412         casxa   [%o0]ASI_MEM, %g0, %g0
 413         PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
 414         b       scrubphys_2
 415           nop
 416 scrubphys_1:
 417         ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
 418         casxa   [%o0]ASI_MEM, %g0, %g0
 419         ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
 420 scrubphys_2:
 421         wrpr    %g0, %o4, %pstate       ! restore earlier pstate register value
 422 
 423         retl
 424         membar  #Sync                   ! move the data out of the load buffer
 425         SET_SIZE(scrubphys)
 426 
 427 
 428         ENTRY(clearphys)
 429         /* turn off IE, AM bits */
 430         rdpr    %pstate, %o4
 431         andn    %o4, PSTATE_IE | PSTATE_AM, %o5
 432         wrpr    %o5, %g0, %pstate
 433 
 434         /* turn off NCEEN */
 435         ldxa    [%g0]ASI_ESTATE_ERR, %o5
 436         andn    %o5, EN_REG_NCEEN, %o3
 437         stxa    %o3, [%g0]ASI_ESTATE_ERR
 438         membar  #Sync
 439 
 440         /* align address passed with 64 bytes subblock size */
 441         mov     CH_ECACHE_SUBBLK_SIZE, %o2
 442         andn    %o0, (CH_ECACHE_SUBBLK_SIZE - 1), %g1
 443 
 444         /* move the good data into the W$ */    
 445 clearphys_1:
 446         subcc   %o2, 8, %o2
 447         ldxa    [%g1 + %o2]ASI_MEM, %g2
 448         bge     clearphys_1
 449           stxa  %g2, [%g1 + %o2]ASI_MEM
 450 
 451         /* now overwrite the bad data */
 452         setx    0xbadecc00badecc01, %g1, %g2
 453         stxa    %g2, [%o0]ASI_MEM
 454         mov     8, %g1
 455         stxa    %g2, [%o0 + %g1]ASI_MEM
 456         
 457         GET_CPU_IMPL(%o3)               ! Panther Ecache is flushed differently
 458         cmp     %o3, PANTHER_IMPL
 459         bne     clearphys_2
 460           nop
 461         PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %g1)
 462         casxa   [%o0]ASI_MEM, %g0, %g0
 463         PN_ECACHE_REFLUSH_LINE(%o1, %o2, %o3, %o0)
 464         b       clearphys_3
 465           nop
 466 clearphys_2:
 467         ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
 468         casxa   [%o0]ASI_MEM, %g0, %g0
 469         ECACHE_REFLUSH_LINE(%o1, %o2, %o3)
 470 clearphys_3:
 471         /* clear the AFSR */
 472         ldxa    [%g0]ASI_AFSR, %o1
 473         stxa    %o1, [%g0]ASI_AFSR
 474         membar  #Sync
 475 
 476         /* turn NCEEN back on */
 477         stxa    %o5, [%g0]ASI_ESTATE_ERR
 478         membar  #Sync
 479 
 480         /* return and re-enable IE and AM */
 481         retl
 482           wrpr  %g0, %o4, %pstate
 483         SET_SIZE(clearphys)
 484 
 485 
 486         ENTRY(ecache_flush_line)
 487 
 488         GET_CPU_IMPL(%o3)               ! Panther Ecache is flushed differently
 489         cmp     %o3, PANTHER_IMPL
 490         bne     ecache_flush_line_1
 491           nop
 492 
 493         PN_ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3, %o4)
 494         b       ecache_flush_line_2
 495           nop
 496 ecache_flush_line_1:
 497         ECACHE_FLUSH_LINE(%o0, %o1, %o2, %o3)
 498 ecache_flush_line_2:
 499         retl
 500           nop
 501         SET_SIZE(ecache_flush_line)
 502 
 503         ENTRY(set_afsr_ext)
 504         set     ASI_AFSR_EXT_VA, %o1
 505         stxa    %o0, [%o1]ASI_AFSR              ! afsr_ext reg
 506         membar  #Sync
 507         retl
 508         nop
 509         SET_SIZE(set_afsr_ext)
 510 
 511 
 512         ENTRY_NP(itlb_parity_trap)
 513         /*
 514          * Collect important information about the trap which will be
 515          * used as a parameter to the TL0 handler.
 516          */
 517         wr      %g0, ASI_IMMU, %asi
 518         rdpr    %tpc, %g2                       ! VA that caused the IMMU trap
 519         ldxa    [MMU_TAG_ACCESS_EXT]%asi, %g3   ! read the trap VA page size
 520         set     PN_ITLB_PGSZ_MASK, %g4
 521         and     %g3, %g4, %g3
 522         ldxa    [MMU_TAG_ACCESS]%asi, %g4
 523         set     TAGREAD_CTX_MASK, %g5
 524         and     %g4, %g5, %g4
 525         or      %g4, %g3, %g3                   ! 'or' in the trap context and
 526         mov     1, %g4                          ! add the IMMU flag to complete
 527         sllx    %g4, PN_TLO_INFO_IMMU_SHIFT, %g4
 528         or      %g4, %g3, %g3                   ! the tlo_info field for logout
 529         stxa    %g0,[MMU_SFSR]%asi              ! clear the SFSR
 530         membar  #Sync
 531 
 532         /*
 533          * at this point:
 534          *    %g2 - contains the VA whose lookup caused the trap
 535          *    %g3 - contains the tlo_info field
 536          *
 537          * Next, we calculate the TLB index value for the failing VA.
 538          */
 539         mov     %g2, %g4                        ! We need the ITLB index
 540         set     PN_ITLB_PGSZ_MASK, %g5
 541         and     %g3, %g5, %g5
 542         srlx    %g5, PN_ITLB_PGSZ_SHIFT, %g5
 543         PN_GET_TLB_INDEX(%g4, %g5)              ! %g4 has the index
 544         sllx    %g4, PN_TLB_ACC_IDX_SHIFT, %g4  ! shift the index into place
 545         set     PN_ITLB_T512, %g5
 546         or      %g4, %g5, %g4                   ! and add in the TLB ID
 547 
 548         /*
 549          * at this point:
 550          *    %g2 - contains the VA whose lookup caused the trap
 551          *    %g3 - contains the tlo_info field
 552          *    %g4 - contains the TLB access index value for the
 553          *          VA/PgSz in question
 554          *
 555          * Check to see if the logout structure is available.
 556          */
 557         set     CHPR_TLB_LOGOUT, %g6
 558         GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, itlb_parity_trap_1)
 559         set     LOGOUT_INVALID_U32, %g6
 560         sllx    %g6, 32, %g6                    ! if our logout structure is
 561         set     LOGOUT_INVALID_L32, %g5         ! unavailable or if it is
 562         or      %g5, %g6, %g5                   ! already being used, then we
 563         ldx     [%g1 + PN_TLO_ADDR], %g6        ! don't collect any diagnostic
 564         cmp     %g6, %g5                        ! information before clearing
 565         bne     itlb_parity_trap_1              ! and logging the error.
 566           nop
 567 
 568         /*
 569          * Record the logout information. %g4 contains our index + TLB ID
 570          * for use in ASI_ITLB_ACCESS and ASI_ITLB_TAGREAD. %g1 contains
 571          * the pointer to our logout struct.
 572          */
 573         stx     %g3, [%g1 + PN_TLO_INFO]
 574         stx     %g2, [%g1 + PN_TLO_ADDR]
 575         stx     %g2, [%g1 + PN_TLO_PC]          ! %tpc == fault addr for IMMU
 576 
 577         add     %g1, PN_TLO_ITLB_TTE, %g1       ! move up the pointer
 578 
 579         ldxa    [%g4]ASI_ITLB_ACCESS, %g5       ! read the data
 580         stx     %g5, [%g1 + CH_TLO_TTE_DATA]    ! store it away
 581         ldxa    [%g4]ASI_ITLB_TAGREAD, %g5      ! read the tag
 582         stx     %g5, [%g1 + CH_TLO_TTE_TAG]     ! store it away
 583 
 584         set     PN_TLB_ACC_WAY_BIT, %g6         ! same thing again for way 1
 585         or      %g4, %g6, %g4
 586         add     %g1, CH_TLO_TTE_SIZE, %g1       ! move up the pointer
 587 
 588         ldxa    [%g4]ASI_ITLB_ACCESS, %g5       ! read the data
 589         stx     %g5, [%g1 + CH_TLO_TTE_DATA]    ! store it away
 590         ldxa    [%g4]ASI_ITLB_TAGREAD, %g5      ! read the tag
 591         stx     %g5, [%g1 + CH_TLO_TTE_TAG]     ! store it away
 592 
 593         andn    %g4, %g6, %g4                   ! back to way 0
 594 
 595 itlb_parity_trap_1:
 596         /*
 597          * at this point:
 598          *    %g2 - contains the VA whose lookup caused the trap
 599          *    %g3 - contains the tlo_info field
 600          *    %g4 - contains the TLB access index value for the
 601          *          VA/PgSz in question
 602          *
 603          * Here we will clear the errors from the TLB.
 604          */
 605         set     MMU_TAG_ACCESS, %g5             ! We write a TTE tag value of
 606         stxa    %g0, [%g5]ASI_IMMU              ! 0 as it will be invalid.
 607         stxa    %g0, [%g4]ASI_ITLB_ACCESS       ! Write the data and tag
 608         membar  #Sync
 609 
 610         set     PN_TLB_ACC_WAY_BIT, %g6         ! same thing again for way 1
 611         or      %g4, %g6, %g4
 612 
 613         stxa    %g0, [%g4]ASI_ITLB_ACCESS       ! Write same data and tag
 614         membar  #Sync
 615 
 616         sethi   %hi(FLUSH_ADDR), %g6            ! PRM says we need to issue a
 617         flush   %g6                             ! flush after writing MMU regs
 618 
 619         /*
 620          * at this point:
 621          *    %g2 - contains the VA whose lookup caused the trap
 622          *    %g3 - contains the tlo_info field
 623          *
 624          * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
 625          * already at PIL 15.    */
 626         set     cpu_tlb_parity_error, %g1
 627         rdpr    %pil, %g4
 628         cmp     %g4, PIL_14
 629         movl    %icc, PIL_14, %g4
 630         ba      sys_trap
 631           nop
 632         SET_SIZE(itlb_parity_trap)
 633 
 634         ENTRY_NP(dtlb_parity_trap)
 635         /*
 636          * Collect important information about the trap which will be
 637          * used as a parameter to the TL0 handler.
 638          */
 639         wr      %g0, ASI_DMMU, %asi
 640         ldxa    [MMU_SFAR]%asi, %g2             ! VA that caused the IMMU trap
 641         ldxa    [MMU_TAG_ACCESS_EXT]%asi, %g3   ! read the trap VA page sizes
 642         set     PN_DTLB_PGSZ_MASK, %g4
 643         and     %g3, %g4, %g3
 644         ldxa    [MMU_TAG_ACCESS]%asi, %g4
 645         set     TAGREAD_CTX_MASK, %g5           ! 'or' in the trap context
 646         and     %g4, %g5, %g4                   ! to complete the tlo_info
 647         or      %g4, %g3, %g3                   ! field for logout
 648         stxa    %g0,[MMU_SFSR]%asi              ! clear the SFSR
 649         membar  #Sync
 650 
 651         /*
 652          * at this point:
 653          *    %g2 - contains the VA whose lookup caused the trap
 654          *    %g3 - contains the tlo_info field
 655          *
 656          * Calculate the TLB index values for the failing VA. Since the T512
 657          * TLBs can be configured for different page sizes, we need to find
 658          * the index into each one separately.
 659          */
 660         mov     %g2, %g4                        ! First we get the DTLB_0 index
 661         set     PN_DTLB_PGSZ0_MASK, %g5
 662         and     %g3, %g5, %g5
 663         srlx    %g5, PN_DTLB_PGSZ0_SHIFT, %g5
 664         PN_GET_TLB_INDEX(%g4, %g5)              ! %g4 has the DTLB_0 index
 665         sllx    %g4, PN_TLB_ACC_IDX_SHIFT, %g4  ! shift the index into place
 666         set     PN_DTLB_T512_0, %g5
 667         or      %g4, %g5, %g4                   ! and add in the TLB ID
 668 
 669         mov     %g2, %g7                        ! Next we get the DTLB_1 index
 670         set     PN_DTLB_PGSZ1_MASK, %g5
 671         and     %g3, %g5, %g5
 672         srlx    %g5, PN_DTLB_PGSZ1_SHIFT, %g5
 673         PN_GET_TLB_INDEX(%g7, %g5)              ! %g7 has the DTLB_1 index
 674         sllx    %g7, PN_TLB_ACC_IDX_SHIFT, %g7  ! shift the index into place
 675         set     PN_DTLB_T512_1, %g5
 676         or      %g7, %g5, %g7                   ! and add in the TLB ID
 677 
 678         /*
 679          * at this point:
 680          *    %g2 - contains the VA whose lookup caused the trap
 681          *    %g3 - contains the tlo_info field
 682          *    %g4 - contains the T512_0 access index value for the
 683          *          VA/PgSz in question
 684          *    %g7 - contains the T512_1 access index value for the
 685          *          VA/PgSz in question
 686          *
 687          * If this trap happened at TL>0, then we don't want to mess
 688          * with the normal logout struct since that could caused a TLB
 689          * miss.
 690          */
 691         rdpr    %tl, %g6                        ! read current trap level
 692         cmp     %g6, 1                          ! skip over the tl>1 code
 693         ble     dtlb_parity_trap_1              ! if TL <= 1.
 694           nop
 695 
 696         /*
 697          * If we are here, then the trap happened at TL>1. Simply
 698          * update our tlo_info field and then skip to the TLB flush
 699          * code.
 700          */
 701         mov     1, %g6
 702         sllx    %g6, PN_TLO_INFO_TL1_SHIFT, %g6
 703         or      %g6, %g3, %g3
 704         ba      dtlb_parity_trap_2
 705           nop
 706 
 707 dtlb_parity_trap_1:
 708         /*
 709          * at this point:
 710          *    %g2 - contains the VA whose lookup caused the trap
 711          *    %g3 - contains the tlo_info field
 712          *    %g4 - contains the T512_0 access index value for the
 713          *          VA/PgSz in question
 714          *    %g7 - contains the T512_1 access index value for the
 715          *          VA/PgSz in question
 716          *
 717          * Check to see if the logout structure is available.
 718          */
 719         set     CHPR_TLB_LOGOUT, %g6
 720         GET_CPU_PRIVATE_PTR(%g6, %g1, %g5, dtlb_parity_trap_2)
 721         set     LOGOUT_INVALID_U32, %g6
 722         sllx    %g6, 32, %g6                    ! if our logout structure is
 723         set     LOGOUT_INVALID_L32, %g5         ! unavailable or if it is
 724         or      %g5, %g6, %g5                   ! already being used, then we
 725         ldx     [%g1 + PN_TLO_ADDR], %g6        ! don't collect any diagnostic
 726         cmp     %g6, %g5                        ! information before clearing
 727         bne     dtlb_parity_trap_2              ! and logging the error.
 728           nop
 729 
 730         /*
 731          * Record the logout information. %g4 contains our DTLB_0 
 732          * index + TLB ID and %g7 contains our DTLB_1 index + TLB ID
 733          * both of which will be used for ASI_DTLB_ACCESS and
 734          * ASI_DTLB_TAGREAD. %g1 contains the pointer to our logout
 735          * struct.
 736          */
 737         stx     %g3, [%g1 + PN_TLO_INFO]
 738         stx     %g2, [%g1 + PN_TLO_ADDR]
 739         rdpr    %tpc, %g5
 740         stx     %g5, [%g1 + PN_TLO_PC]
 741 
 742         add     %g1, PN_TLO_DTLB_TTE, %g1       ! move up the pointer
 743 
 744         ldxa    [%g4]ASI_DTLB_ACCESS, %g5       ! read the data from DTLB_0
 745         stx     %g5, [%g1 + CH_TLO_TTE_DATA]    ! way 0 and store it away
 746         ldxa    [%g4]ASI_DTLB_TAGREAD, %g5      ! read the tag from DTLB_0
 747         stx     %g5, [%g1 + CH_TLO_TTE_TAG]     ! way 0 and store it away
 748 
 749         ldxa    [%g7]ASI_DTLB_ACCESS, %g5       ! now repeat for DTLB_1 way 0
 750         stx     %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
 751         ldxa    [%g7]ASI_DTLB_TAGREAD, %g5
 752         stx     %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
 753 
 754         set     PN_TLB_ACC_WAY_BIT, %g6         ! same thing again for way 1
 755         or      %g4, %g6, %g4                   ! of each TLB.
 756         or      %g7, %g6, %g7
 757         add     %g1, CH_TLO_TTE_SIZE, %g1       ! move up the pointer
 758 
 759         ldxa    [%g4]ASI_DTLB_ACCESS, %g5       ! read the data from DTLB_0
 760         stx     %g5, [%g1 + CH_TLO_TTE_DATA]    ! way 1 and store it away
 761         ldxa    [%g4]ASI_DTLB_TAGREAD, %g5      ! read the tag from DTLB_0
 762         stx     %g5, [%g1 + CH_TLO_TTE_TAG]     ! way 1 and store it away
 763 
 764         ldxa    [%g7]ASI_DTLB_ACCESS, %g5       ! now repeat for DTLB_1 way 1
 765         stx     %g5, [%g1 + (CH_TLO_TTE_DATA + (CH_TLO_TTE_SIZE * 2))]
 766         ldxa    [%g7]ASI_DTLB_TAGREAD, %g5
 767         stx     %g5, [%g1 + (CH_TLO_TTE_TAG + (CH_TLO_TTE_SIZE * 2))]
 768 
 769         andn    %g4, %g6, %g4                   ! back to way 0
 770         andn    %g7, %g6, %g7                   ! back to way 0
 771 
 772 dtlb_parity_trap_2:
 773         /*
 774          * at this point:
 775          *    %g2 - contains the VA whose lookup caused the trap
 776          *    %g3 - contains the tlo_info field
 777          *    %g4 - contains the T512_0 access index value for the
 778          *          VA/PgSz in question
 779          *    %g7 - contains the T512_1 access index value for the
 780          *          VA/PgSz in question
 781          *
 782          * Here we will clear the errors from the DTLB.
 783          */
 784         set     MMU_TAG_ACCESS, %g5             ! We write a TTE tag value of
 785         stxa    %g0, [%g5]ASI_DMMU              ! 0 as it will be invalid.
 786         stxa    %g0, [%g4]ASI_DTLB_ACCESS       ! Write the data and tag.
 787         stxa    %g0, [%g7]ASI_DTLB_ACCESS       ! Now repeat for DTLB_1 way 0
 788         membar  #Sync
 789 
 790         set     PN_TLB_ACC_WAY_BIT, %g6         ! same thing again for way 1
 791         or      %g4, %g6, %g4
 792         or      %g7, %g6, %g7
 793 
 794         stxa    %g0, [%g4]ASI_DTLB_ACCESS       ! Write same data and tag.
 795         stxa    %g0, [%g7]ASI_DTLB_ACCESS       ! Now repeat for DTLB_1 way 0
 796         membar  #Sync
 797 
 798         sethi   %hi(FLUSH_ADDR), %g6            ! PRM says we need to issue a
 799         flush   %g6                             ! flush after writing MMU regs
 800 
 801         /*
 802          * at this point:
 803          *    %g2 - contains the VA whose lookup caused the trap
 804          *    %g3 - contains the tlo_info field
 805          *
 806          * Call cpu_tlb_parity_error via systrap at PIL 14 unless we're
 807          * already at PIL 15. We do this even for TL>1 traps since
 808          * those will lead to a system panic.
 809          */
 810         set     cpu_tlb_parity_error, %g1
 811         rdpr    %pil, %g4
 812         cmp     %g4, PIL_14
 813         movl    %icc, PIL_14, %g4
 814         ba      sys_trap
 815           nop
 816         SET_SIZE(dtlb_parity_trap)
 817 
 818 
 819         ENTRY(pn_get_tlb_index)
 820 
 821         PN_GET_TLB_INDEX(%o0, %o1)
 822 
 823         retl
 824           nop
 825         SET_SIZE(pn_get_tlb_index)
 826 
 827 
 828         ENTRY(flush_ipb)
 829         clr     %o0
 830 
 831 flush_ipb_1:
 832         stxa    %g0, [%o0]ASI_IPB_TAG
 833         membar  #Sync
 834         cmp     %o0, PN_IPB_TAG_ADDR_MAX
 835         blt     flush_ipb_1
 836           add   %o0, PN_IPB_TAG_ADDR_LINESIZE,  %o0
 837 
 838         sethi   %hi(FLUSH_ADDR), %o0
 839         flush   %o0
 840         retl
 841         nop
 842         SET_SIZE(flush_ipb)
 843 
 844