de-linting of .s files

   1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25  

  26 #include "assym.h"

  27 #include <sys/asm_linkage.h>
  28 #include <sys/privregs.h>
  29 #include <sys/sun4asi.h>
  30 #include <sys/spitregs.h>
  31 #include <sys/cheetahregs.h>
  32 #include <sys/machtrap.h>
  33 #include <sys/machthread.h>
  34 #include <sys/machbrand.h>
  35 #include <sys/pcb.h>
  36 #include <sys/pte.h>
  37 #include <sys/mmu.h>
  38 #include <sys/machpcb.h>
  39 #include <sys/async.h>
  40 #include <sys/intreg.h>
  41 #include <sys/scb.h>
  42 #include <sys/psr_compat.h>
  43 #include <sys/syscall.h>
  44 #include <sys/machparam.h>
  45 #include <sys/traptrace.h>
  46 #include <vm/hat_sfmmu.h>
  47 #include <sys/archsystm.h>
  48 #include <sys/utrap.h>
  49 #include <sys/clock.h>
  50 #include <sys/intr.h>
  51 #include <sys/fpu/fpu_simulator.h>
  52 #include <vm/seg_spt.h>
  53 
  54 /*
  55  * WARNING: If you add a fast trap handler which can be invoked by a
  56  * non-privileged user, you may have to use the FAST_TRAP_DONE macro
  57  * instead of "done" instruction to return back to the user mode. See
  58  * comments for the "fast_trap_done" entry point for more information.
  59  *
  60  * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the
  61  * cases where you always want to process any pending interrupts before
  62  * returning back to the user mode.
  63  */
  64 #define FAST_TRAP_DONE          \
  65         ba,a    fast_trap_done
  66 
  67 #define FAST_TRAP_DONE_CHK_INTR \
  68         ba,a    fast_trap_done_chk_intr
  69 
  70 /*
  71  * SPARC V9 Trap Table
  72  *
  73  * Most of the trap handlers are made from common building
  74  * blocks, and some are instantiated multiple times within
  75  * the trap table. So, I build a bunch of macros, then
  76  * populate the table using only the macros.
  77  *
  78  * Many macros branch to sys_trap.  Its calling convention is:
  79  *      %g1             kernel trap handler
  80  *      %g2, %g3        args for above
  81  *      %g4             desire %pil
  82  */
  83 
  84 #ifdef  TRAPTRACE
  85 
  86 /*
  87  * Tracing macro. Adds two instructions if TRAPTRACE is defined.
  88  */
  89 #define TT_TRACE(label)         \
  90         ba      label           ;\
  91         rd      %pc, %g7
  92 #define TT_TRACE_INS    2
  93 
  94 #define TT_TRACE_L(label)       \
  95         ba      label           ;\
  96         rd      %pc, %l4        ;\
  97         clr     %l4
  98 #define TT_TRACE_L_INS  3
  99 
 100 #else
 101 
 102 #define TT_TRACE(label)
 103 #define TT_TRACE_INS    0
 104 
 105 #define TT_TRACE_L(label)
 106 #define TT_TRACE_L_INS  0
 107 
 108 #endif
 109 
 110 /*
 111  * This first set are funneled to trap() with %tt as the type.
 112  * Trap will then either panic or send the user a signal.
 113  */
 114 /*
 115  * NOT is used for traps that just shouldn't happen.
 116  * It comes in both single and quadruple flavors.
 117  */

 118         .global trap

 119 #define NOT                     \
 120         TT_TRACE(trace_gen)     ;\
 121         set     trap, %g1       ;\
 122         rdpr    %tt, %g3        ;\
 123         ba,pt   %xcc, sys_trap  ;\
 124         sub     %g0, 1, %g4     ;\
 125         .align  32
 126 #define NOT4    NOT; NOT; NOT; NOT
 127 /*
 128  * RED is for traps that use the red mode handler.
 129  * We should never see these either.
 130  */
 131 #define RED     NOT
 132 /*
 133  * BAD is used for trap vectors we don't have a kernel
 134  * handler for.
 135  * It also comes in single and quadruple versions.
 136  */
 137 #define BAD     NOT
 138 #define BAD4    NOT4
 139 
 140 #define DONE                    \
 141         done;                   \
 142         .align  32
 143 
 144 /*
 145  * TRAP vectors to the trap() function.
 146  * It's main use is for user errors.
 147  */

 148         .global trap

 149 #define TRAP(arg)               \
 150         TT_TRACE(trace_gen)     ;\
 151         set     trap, %g1       ;\
 152         mov     arg, %g3        ;\
 153         ba,pt   %xcc, sys_trap  ;\
 154         sub     %g0, 1, %g4     ;\
 155         .align  32
 156 
 157 /*
 158  * SYSCALL is used for unsupported syscall interfaces (with 'which'
 159  * set to 'nosys') and legacy support of old SunOS 4.x syscalls (with
 160  * 'which' set to 'syscall_trap32').
 161  *
 162  * The SYSCALL_TRAP* macros are used for syscall entry points.
 163  * SYSCALL_TRAP is used to support LP64 syscalls and SYSCALL_TRAP32
 164  * is used to support ILP32.  Each macro can only be used once
 165  * since they each define a symbol.  The symbols are used as hot patch
 166  * points by the brand infrastructure to dynamically enable and disable
 167  * brand syscall interposition.  See the comments around BRAND_CALLBACK
 168  * and brand_plat_interposition_enable() for more information.
 169  */
 170 #define SYSCALL_NOTT(which)             \
 171         set     (which), %g1            ;\
 172         ba,pt   %xcc, sys_trap          ;\
 173         sub     %g0, 1, %g4             ;\
 174         .align  32
 175 
 176 #define SYSCALL(which)                  \
 177         TT_TRACE(trace_gen)             ;\
 178         SYSCALL_NOTT(which)
 179 
 180 #define SYSCALL_TRAP32                          \
 181         TT_TRACE(trace_gen)                     ;\
 182         ALTENTRY(syscall_trap32_patch_point)    \
 183         SYSCALL_NOTT(syscall_trap32)
 184 
 185 #define SYSCALL_TRAP                            \
 186         TT_TRACE(trace_gen)                     ;\
 187         ALTENTRY(syscall_trap_patch_point)      \
 188         SYSCALL_NOTT(syscall_trap)
 189 
 190 #define FLUSHW(h_name)                  \
 191         .global h_name                  ;\
 192 h_name:                                 ;\
 193         set     trap, %g1               ;\
 194         mov     T_FLUSHW, %g3           ;\
 195         sub     %g0, 1, %g4             ;\
 196         save                            ;\
 197         flushw                          ;\
 198         restore                         ;\
 199         FAST_TRAP_DONE                  ;\
 200         .align  32
 201 
 202 /*
 203  * GOTO just jumps to a label.
 204  * It's used for things that can be fixed without going thru sys_trap.
 205  */
 206 #define GOTO(label)             \
 207         .global label           ;\
 208         ba,a    label           ;\
 209         .empty                  ;\
 210         .align  32
 211 
 212 /*
 213  * GOTO_TT just jumps to a label.
 214  * correctable ECC error traps at  level 0 and 1 will use this macro.
 215  * It's used for things that can be fixed without going thru sys_trap.
 216  */
 217 #define GOTO_TT(label, ttlabel)         \
 218         .global label           ;\
 219         TT_TRACE(ttlabel)       ;\
 220         ba,a    label           ;\
 221         .empty                  ;\
 222         .align  32
 223 
 224 /*
 225  * Privileged traps
 226  * Takes breakpoint if privileged, calls trap() if not.
 227  */
 228 #define PRIV(label)                     \
 229         rdpr    %tstate, %g1            ;\
 230         btst    TSTATE_PRIV, %g1        ;\
 231         bnz     label                   ;\
 232         rdpr    %tt, %g3                ;\
 233         set     trap, %g1               ;\
 234         ba,pt   %xcc, sys_trap          ;\
 235         sub     %g0, 1, %g4             ;\
 236         .align  32
 237 
 238 
 239 /*
 240  * DTrace traps.
 241  */
 242 #define DTRACE_PID                      \
 243         .global dtrace_pid_probe                                ;\
 244         set     dtrace_pid_probe, %g1                           ;\
 245         ba,pt   %xcc, user_trap                                 ;\
 246         sub     %g0, 1, %g4                                     ;\
 247         .align  32
 248 
 249 #define DTRACE_RETURN                   \
 250         .global dtrace_return_probe                             ;\
 251         set     dtrace_return_probe, %g1                        ;\
 252         ba,pt   %xcc, user_trap                                 ;\
 253         sub     %g0, 1, %g4                                     ;\
 254         .align  32
 255 
 256 /*
 257  * REGISTER WINDOW MANAGEMENT MACROS
 258  */
 259 
 260 /*
 261  * various convenient units of padding
 262  */
 263 #define SKIP(n) .skip 4*(n)
 264 
 265 /*
 266  * CLEAN_WINDOW is the simple handler for cleaning a register window.
 267  */
 268 #define CLEAN_WINDOW                                            \
 269         TT_TRACE_L(trace_win)                                   ;\
 270         rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin       ;\
 271         clr %l0; clr %l1; clr %l2; clr %l3                      ;\
 272         clr %l4; clr %l5; clr %l6; clr %l7                      ;\
 273         clr %o0; clr %o1; clr %o2; clr %o3                      ;\
 274         clr %o4; clr %o5; clr %o6; clr %o7                      ;\
 275         retry; .align 128
 276 


 277 /*
 278  * If we get an unresolved tlb miss while in a window handler, the fault
 279  * handler will resume execution at the last instruction of the window
 280  * hander, instead of delivering the fault to the kernel.  Spill handlers
 281  * use this to spill windows into the wbuf.
 282  *
 283  * The mixed handler works by checking %sp, and branching to the correct
 284  * handler.  This is done by branching back to label 1: for 32b frames,
 285  * or label 2: for 64b frames; which implies the handler order is: 32b,
 286  * 64b, mixed.  The 1: and 2: labels are offset into the routines to
 287  * allow the branchs' delay slots to contain useful instructions.
 288  */
 289 
 290 /*
 291  * SPILL_32bit spills a 32-bit-wide kernel register window.  It
 292  * assumes that the kernel context and the nucleus context are the
 293  * same.  The stack pointer is required to be eight-byte aligned even
 294  * though this code only needs it to be four-byte aligned.
 295  */
 296 #define SPILL_32bit(tail)                                       \
 297         srl     %sp, 0, %sp                                     ;\
 298 1:      st      %l0, [%sp + 0]                                  ;\
 299         st      %l1, [%sp + 4]                                  ;\
 300         st      %l2, [%sp + 8]                                  ;\
 301         st      %l3, [%sp + 12]                                 ;\
 302         st      %l4, [%sp + 16]                                 ;\
 303         st      %l5, [%sp + 20]                                 ;\
 304         st      %l6, [%sp + 24]                                 ;\
 305         st      %l7, [%sp + 28]                                 ;\
 306         st      %i0, [%sp + 32]                                 ;\
 307         st      %i1, [%sp + 36]                                 ;\
 308         st      %i2, [%sp + 40]                                 ;\
 309         st      %i3, [%sp + 44]                                 ;\
 310         st      %i4, [%sp + 48]                                 ;\
 311         st      %i5, [%sp + 52]                                 ;\
 312         st      %i6, [%sp + 56]                                 ;\
 313         st      %i7, [%sp + 60]                                 ;\
 314         TT_TRACE_L(trace_win)                                   ;\
 315         saved                                                   ;\
 316         retry                                                   ;\
 317         SKIP(31-19-TT_TRACE_L_INS)                              ;\
 318         ba,a,pt %xcc, fault_32bit_/**/tail                      ;\
 319         .empty
 320 
 321 /*
 322  * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit
 323  * wide address space via the designated asi.  It is used to spill
 324  * non-kernel windows.  The stack pointer is required to be eight-byte
 325  * aligned even though this code only needs it to be four-byte
 326  * aligned.
 327  */
 328 #define SPILL_32bit_asi(asi_num, tail)                          \
 329         srl     %sp, 0, %sp                                     ;\
 330 1:      sta     %l0, [%sp + %g0]asi_num                         ;\
 331         mov     4, %g1                                          ;\
 332         sta     %l1, [%sp + %g1]asi_num                         ;\
 333         mov     8, %g2                                          ;\
 334         sta     %l2, [%sp + %g2]asi_num                         ;\
 335         mov     12, %g3                                         ;\
 336         sta     %l3, [%sp + %g3]asi_num                         ;\
 337         add     %sp, 16, %g4                                    ;\
 338         sta     %l4, [%g4 + %g0]asi_num                         ;\
 339         sta     %l5, [%g4 + %g1]asi_num                         ;\
 340         sta     %l6, [%g4 + %g2]asi_num                         ;\
 341         sta     %l7, [%g4 + %g3]asi_num                         ;\
 342         add     %g4, 16, %g4                                    ;\
 343         sta     %i0, [%g4 + %g0]asi_num                         ;\
 344         sta     %i1, [%g4 + %g1]asi_num                         ;\
 345         sta     %i2, [%g4 + %g2]asi_num                         ;\
 346         sta     %i3, [%g4 + %g3]asi_num                         ;\
 347         add     %g4, 16, %g4                                    ;\
 348         sta     %i4, [%g4 + %g0]asi_num                         ;\
 349         sta     %i5, [%g4 + %g1]asi_num                         ;\
 350         sta     %i6, [%g4 + %g2]asi_num                         ;\
 351         sta     %i7, [%g4 + %g3]asi_num                         ;\
 352         TT_TRACE_L(trace_win)                                   ;\
 353         saved                                                   ;\
 354         retry                                                   ;\
 355         SKIP(31-25-TT_TRACE_L_INS)                              ;\
 356         ba,a,pt %xcc, fault_32bit_/**/tail                      ;\
 357         .empty
 358 
 359 /*
 360  * SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit
 361  * wide address space via the designated asi.  It is used to spill
 362  * windows at tl>1 where performance isn't the primary concern and
 363  * where we don't want to use unnecessary registers.  The stack
 364  * pointer is required to be eight-byte aligned even though this code
 365  * only needs it to be four-byte aligned.
 366  */
 367 #define SPILL_32bit_tt1(asi_num, tail)                          \
 368         mov     asi_num, %asi                                   ;\
 369 1:      srl     %sp, 0, %sp                                     ;\
 370         sta     %l0, [%sp + 0]%asi                              ;\
 371         sta     %l1, [%sp + 4]%asi                              ;\
 372         sta     %l2, [%sp + 8]%asi                              ;\
 373         sta     %l3, [%sp + 12]%asi                             ;\
 374         sta     %l4, [%sp + 16]%asi                             ;\
 375         sta     %l5, [%sp + 20]%asi                             ;\
 376         sta     %l6, [%sp + 24]%asi                             ;\
 377         sta     %l7, [%sp + 28]%asi                             ;\
 378         sta     %i0, [%sp + 32]%asi                             ;\
 379         sta     %i1, [%sp + 36]%asi                             ;\
 380         sta     %i2, [%sp + 40]%asi                             ;\
 381         sta     %i3, [%sp + 44]%asi                             ;\
 382         sta     %i4, [%sp + 48]%asi                             ;\
 383         sta     %i5, [%sp + 52]%asi                             ;\
 384         sta     %i6, [%sp + 56]%asi                             ;\
 385         sta     %i7, [%sp + 60]%asi                             ;\
 386         TT_TRACE_L(trace_win)                                   ;\
 387         saved                                                   ;\
 388         retry                                                   ;\
 389         SKIP(31-20-TT_TRACE_L_INS)                              ;\
 390         ba,a,pt %xcc, fault_32bit_/**/tail                      ;\
 391         .empty
 392 
 393 
 394 /*
 395  * FILL_32bit fills a 32-bit-wide kernel register window.  It assumes
 396  * that the kernel context and the nucleus context are the same.  The
 397  * stack pointer is required to be eight-byte aligned even though this
 398  * code only needs it to be four-byte aligned.
 399  */
 400 #define FILL_32bit(tail)                                        \
 401         srl     %sp, 0, %sp                                     ;\
 402 1:      TT_TRACE_L(trace_win)                                   ;\
 403         ld      [%sp + 0], %l0                                  ;\
 404         ld      [%sp + 4], %l1                                  ;\
 405         ld      [%sp + 8], %l2                                  ;\
 406         ld      [%sp + 12], %l3                                 ;\
 407         ld      [%sp + 16], %l4                                 ;\
 408         ld      [%sp + 20], %l5                                 ;\
 409         ld      [%sp + 24], %l6                                 ;\
 410         ld      [%sp + 28], %l7                                 ;\
 411         ld      [%sp + 32], %i0                                 ;\
 412         ld      [%sp + 36], %i1                                 ;\
 413         ld      [%sp + 40], %i2                                 ;\
 414         ld      [%sp + 44], %i3                                 ;\
 415         ld      [%sp + 48], %i4                                 ;\
 416         ld      [%sp + 52], %i5                                 ;\
 417         ld      [%sp + 56], %i6                                 ;\
 418         ld      [%sp + 60], %i7                                 ;\
 419         restored                                                ;\
 420         retry                                                   ;\
 421         SKIP(31-19-TT_TRACE_L_INS)                              ;\
 422         ba,a,pt %xcc, fault_32bit_/**/tail                      ;\
 423         .empty
 424 
 425 /*
 426  * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit
 427  * wide address space via the designated asi.  It is used to fill
 428  * non-kernel windows.  The stack pointer is required to be eight-byte
 429  * aligned even though this code only needs it to be four-byte
 430  * aligned.
 431  */
 432 #define FILL_32bit_asi(asi_num, tail)                           \
 433         srl     %sp, 0, %sp                                     ;\
 434 1:      TT_TRACE_L(trace_win)                                   ;\
 435         mov     4, %g1                                          ;\
 436         lda     [%sp + %g0]asi_num, %l0                         ;\
 437         mov     8, %g2                                          ;\
 438         lda     [%sp + %g1]asi_num, %l1                         ;\
 439         mov     12, %g3                                         ;\
 440         lda     [%sp + %g2]asi_num, %l2                         ;\
 441         lda     [%sp + %g3]asi_num, %l3                         ;\
 442         add     %sp, 16, %g4                                    ;\
 443         lda     [%g4 + %g0]asi_num, %l4                         ;\
 444         lda     [%g4 + %g1]asi_num, %l5                         ;\
 445         lda     [%g4 + %g2]asi_num, %l6                         ;\
 446         lda     [%g4 + %g3]asi_num, %l7                         ;\
 447         add     %g4, 16, %g4                                    ;\
 448         lda     [%g4 + %g0]asi_num, %i0                         ;\
 449         lda     [%g4 + %g1]asi_num, %i1                         ;\
 450         lda     [%g4 + %g2]asi_num, %i2                         ;\
 451         lda     [%g4 + %g3]asi_num, %i3                         ;\
 452         add     %g4, 16, %g4                                    ;\
 453         lda     [%g4 + %g0]asi_num, %i4                         ;\
 454         lda     [%g4 + %g1]asi_num, %i5                         ;\
 455         lda     [%g4 + %g2]asi_num, %i6                         ;\
 456         lda     [%g4 + %g3]asi_num, %i7                         ;\
 457         restored                                                ;\
 458         retry                                                   ;\
 459         SKIP(31-25-TT_TRACE_L_INS)                              ;\
 460         ba,a,pt %xcc, fault_32bit_/**/tail                      ;\
 461         .empty
 462 
 463 /*
 464  * FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit
 465  * wide address space via the designated asi.  It is used to fill
 466  * windows at tl>1 where performance isn't the primary concern and
 467  * where we don't want to use unnecessary registers.  The stack
 468  * pointer is required to be eight-byte aligned even though this code
 469  * only needs it to be four-byte aligned.
 470  */
 471 #define FILL_32bit_tt1(asi_num, tail)                           \
 472         mov     asi_num, %asi                                   ;\
 473 1:      srl     %sp, 0, %sp                                     ;\
 474         TT_TRACE_L(trace_win)                                   ;\
 475         lda     [%sp + 0]%asi, %l0                              ;\
 476         lda     [%sp + 4]%asi, %l1                              ;\
 477         lda     [%sp + 8]%asi, %l2                              ;\
 478         lda     [%sp + 12]%asi, %l3                             ;\
 479         lda     [%sp + 16]%asi, %l4                             ;\
 480         lda     [%sp + 20]%asi, %l5                             ;\
 481         lda     [%sp + 24]%asi, %l6                             ;\
 482         lda     [%sp + 28]%asi, %l7                             ;\
 483         lda     [%sp + 32]%asi, %i0                             ;\
 484         lda     [%sp + 36]%asi, %i1                             ;\
 485         lda     [%sp + 40]%asi, %i2                             ;\
 486         lda     [%sp + 44]%asi, %i3                             ;\
 487         lda     [%sp + 48]%asi, %i4                             ;\
 488         lda     [%sp + 52]%asi, %i5                             ;\
 489         lda     [%sp + 56]%asi, %i6                             ;\
 490         lda     [%sp + 60]%asi, %i7                             ;\
 491         restored                                                ;\
 492         retry                                                   ;\
 493         SKIP(31-20-TT_TRACE_L_INS)                              ;\
 494         ba,a,pt %xcc, fault_32bit_/**/tail                      ;\
 495         .empty
 496 
 497 
 498 /*
 499  * SPILL_64bit spills a 64-bit-wide kernel register window.  It
 500  * assumes that the kernel context and the nucleus context are the
 501  * same.  The stack pointer is required to be eight-byte aligned.
 502  */
 503 #define SPILL_64bit(tail)                                       \
 504 2:      stx     %l0, [%sp + V9BIAS64 + 0]                       ;\
 505         stx     %l1, [%sp + V9BIAS64 + 8]                       ;\
 506         stx     %l2, [%sp + V9BIAS64 + 16]                      ;\
 507         stx     %l3, [%sp + V9BIAS64 + 24]                      ;\
 508         stx     %l4, [%sp + V9BIAS64 + 32]                      ;\
 509         stx     %l5, [%sp + V9BIAS64 + 40]                      ;\
 510         stx     %l6, [%sp + V9BIAS64 + 48]                      ;\
 511         stx     %l7, [%sp + V9BIAS64 + 56]                      ;\
 512         stx     %i0, [%sp + V9BIAS64 + 64]                      ;\
 513         stx     %i1, [%sp + V9BIAS64 + 72]                      ;\
 514         stx     %i2, [%sp + V9BIAS64 + 80]                      ;\
 515         stx     %i3, [%sp + V9BIAS64 + 88]                      ;\
 516         stx     %i4, [%sp + V9BIAS64 + 96]                      ;\
 517         stx     %i5, [%sp + V9BIAS64 + 104]                     ;\
 518         stx     %i6, [%sp + V9BIAS64 + 112]                     ;\
 519         stx     %i7, [%sp + V9BIAS64 + 120]                     ;\
 520         TT_TRACE_L(trace_win)                                   ;\
 521         saved                                                   ;\
 522         retry                                                   ;\
 523         SKIP(31-18-TT_TRACE_L_INS)                              ;\
 524         ba,a,pt %xcc, fault_64bit_/**/tail                      ;\
 525         .empty
 526 
 527 /*
 528  * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit
 529  * wide address space via the designated asi.  It is used to spill
 530  * non-kernel windows.  The stack pointer is required to be eight-byte
 531  * aligned.
 532  */
 533 #define SPILL_64bit_asi(asi_num, tail)                          \
 534         mov     0 + V9BIAS64, %g1                               ;\
 535 2:      stxa    %l0, [%sp + %g1]asi_num                         ;\
 536         mov     8 + V9BIAS64, %g2                               ;\
 537         stxa    %l1, [%sp + %g2]asi_num                         ;\
 538         mov     16 + V9BIAS64, %g3                              ;\
 539         stxa    %l2, [%sp + %g3]asi_num                         ;\
 540         mov     24 + V9BIAS64, %g4                              ;\
 541         stxa    %l3, [%sp + %g4]asi_num                         ;\
 542         add     %sp, 32, %g5                                    ;\
 543         stxa    %l4, [%g5 + %g1]asi_num                         ;\
 544         stxa    %l5, [%g5 + %g2]asi_num                         ;\
 545         stxa    %l6, [%g5 + %g3]asi_num                         ;\
 546         stxa    %l7, [%g5 + %g4]asi_num                         ;\
 547         add     %g5, 32, %g5                                    ;\
 548         stxa    %i0, [%g5 + %g1]asi_num                         ;\
 549         stxa    %i1, [%g5 + %g2]asi_num                         ;\
 550         stxa    %i2, [%g5 + %g3]asi_num                         ;\
 551         stxa    %i3, [%g5 + %g4]asi_num                         ;\
 552         add     %g5, 32, %g5                                    ;\
 553         stxa    %i4, [%g5 + %g1]asi_num                         ;\
 554         stxa    %i5, [%g5 + %g2]asi_num                         ;\
 555         stxa    %i6, [%g5 + %g3]asi_num                         ;\
 556         stxa    %i7, [%g5 + %g4]asi_num                         ;\
 557         TT_TRACE_L(trace_win)                                   ;\
 558         saved                                                   ;\
 559         retry                                                   ;\
 560         SKIP(31-25-TT_TRACE_L_INS)                              ;\
 561         ba,a,pt %xcc, fault_64bit_/**/tail                      ;\
 562         .empty
 563 
 564 /*
 565  * SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit
 566  * wide address space via the designated asi.  It is used to spill
 567  * windows at tl>1 where performance isn't the primary concern and
 568  * where we don't want to use unnecessary registers.  The stack
 569  * pointer is required to be eight-byte aligned.
 570  */
 571 #define SPILL_64bit_tt1(asi_num, tail)                          \
 572         mov     asi_num, %asi                                   ;\
 573 2:      stxa    %l0, [%sp + V9BIAS64 + 0]%asi                   ;\
 574         stxa    %l1, [%sp + V9BIAS64 + 8]%asi                   ;\
 575         stxa    %l2, [%sp + V9BIAS64 + 16]%asi                  ;\
 576         stxa    %l3, [%sp + V9BIAS64 + 24]%asi                  ;\
 577         stxa    %l4, [%sp + V9BIAS64 + 32]%asi                  ;\
 578         stxa    %l5, [%sp + V9BIAS64 + 40]%asi                  ;\
 579         stxa    %l6, [%sp + V9BIAS64 + 48]%asi                  ;\
 580         stxa    %l7, [%sp + V9BIAS64 + 56]%asi                  ;\
 581         stxa    %i0, [%sp + V9BIAS64 + 64]%asi                  ;\
 582         stxa    %i1, [%sp + V9BIAS64 + 72]%asi                  ;\
 583         stxa    %i2, [%sp + V9BIAS64 + 80]%asi                  ;\
 584         stxa    %i3, [%sp + V9BIAS64 + 88]%asi                  ;\
 585         stxa    %i4, [%sp + V9BIAS64 + 96]%asi                  ;\
 586         stxa    %i5, [%sp + V9BIAS64 + 104]%asi                 ;\
 587         stxa    %i6, [%sp + V9BIAS64 + 112]%asi                 ;\
 588         stxa    %i7, [%sp + V9BIAS64 + 120]%asi                 ;\
 589         TT_TRACE_L(trace_win)                                   ;\
 590         saved                                                   ;\
 591         retry                                                   ;\
 592         SKIP(31-19-TT_TRACE_L_INS)                              ;\
 593         ba,a,pt %xcc, fault_64bit_/**/tail                      ;\
 594         .empty
 595 
 596 
 597 /*
 598  * FILL_64bit fills a 64-bit-wide kernel register window.  It assumes
 599  * that the kernel context and the nucleus context are the same.  The
 600  * stack pointer is required to be eight-byte aligned.
 601  */
 602 #define FILL_64bit(tail)                                        \
 603 2:      TT_TRACE_L(trace_win)                                   ;\
 604         ldx     [%sp + V9BIAS64 + 0], %l0                       ;\
 605         ldx     [%sp + V9BIAS64 + 8], %l1                       ;\
 606         ldx     [%sp + V9BIAS64 + 16], %l2                      ;\
 607         ldx     [%sp + V9BIAS64 + 24], %l3                      ;\
 608         ldx     [%sp + V9BIAS64 + 32], %l4                      ;\
 609         ldx     [%sp + V9BIAS64 + 40], %l5                      ;\
 610         ldx     [%sp + V9BIAS64 + 48], %l6                      ;\
 611         ldx     [%sp + V9BIAS64 + 56], %l7                      ;\
 612         ldx     [%sp + V9BIAS64 + 64], %i0                      ;\
 613         ldx     [%sp + V9BIAS64 + 72], %i1                      ;\
 614         ldx     [%sp + V9BIAS64 + 80], %i2                      ;\
 615         ldx     [%sp + V9BIAS64 + 88], %i3                      ;\
 616         ldx     [%sp + V9BIAS64 + 96], %i4                      ;\
 617         ldx     [%sp + V9BIAS64 + 104], %i5                     ;\
 618         ldx     [%sp + V9BIAS64 + 112], %i6                     ;\
 619         ldx     [%sp + V9BIAS64 + 120], %i7                     ;\
 620         restored                                                ;\
 621         retry                                                   ;\
 622         SKIP(31-18-TT_TRACE_L_INS)                              ;\
 623         ba,a,pt %xcc, fault_64bit_/**/tail                      ;\
 624         .empty
 625 
 626 /*
 627  * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit
 628  * wide address space via the designated asi.  It is used to fill
 629  * non-kernel windows.  The stack pointer is required to be eight-byte
 630  * aligned.
 631  */
 632 #define FILL_64bit_asi(asi_num, tail)                           \
 633         mov     V9BIAS64 + 0, %g1                               ;\
 634 2:      TT_TRACE_L(trace_win)                                   ;\
 635         ldxa    [%sp + %g1]asi_num, %l0                         ;\
 636         mov     V9BIAS64 + 8, %g2                               ;\
 637         ldxa    [%sp + %g2]asi_num, %l1                         ;\
 638         mov     V9BIAS64 + 16, %g3                              ;\
 639         ldxa    [%sp + %g3]asi_num, %l2                         ;\
 640         mov     V9BIAS64 + 24, %g4                              ;\
 641         ldxa    [%sp + %g4]asi_num, %l3                         ;\
 642         add     %sp, 32, %g5                                    ;\
 643         ldxa    [%g5 + %g1]asi_num, %l4                         ;\
 644         ldxa    [%g5 + %g2]asi_num, %l5                         ;\
 645         ldxa    [%g5 + %g3]asi_num, %l6                         ;\
 646         ldxa    [%g5 + %g4]asi_num, %l7                         ;\
 647         add     %g5, 32, %g5                                    ;\
 648         ldxa    [%g5 + %g1]asi_num, %i0                         ;\
 649         ldxa    [%g5 + %g2]asi_num, %i1                         ;\
 650         ldxa    [%g5 + %g3]asi_num, %i2                         ;\
 651         ldxa    [%g5 + %g4]asi_num, %i3                         ;\
 652         add     %g5, 32, %g5                                    ;\
 653         ldxa    [%g5 + %g1]asi_num, %i4                         ;\
 654         ldxa    [%g5 + %g2]asi_num, %i5                         ;\
 655         ldxa    [%g5 + %g3]asi_num, %i6                         ;\
 656         ldxa    [%g5 + %g4]asi_num, %i7                         ;\
 657         restored                                                ;\
 658         retry                                                   ;\
 659         SKIP(31-25-TT_TRACE_L_INS)                              ;\
 660         ba,a,pt %xcc, fault_64bit_/**/tail                      ;\
 661         .empty
 662 
 663 /*
 664  * FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit
 665  * wide address space via the designated asi.  It is used to fill
 666  * windows at tl>1 where performance isn't the primary concern and
 667  * where we don't want to use unnecessary registers.  The stack
 668  * pointer is required to be eight-byte aligned.
 669  */
 670 #define FILL_64bit_tt1(asi_num, tail)                           \
 671         mov     asi_num, %asi                                   ;\
 672         TT_TRACE_L(trace_win)                                   ;\
 673         ldxa    [%sp + V9BIAS64 + 0]%asi, %l0                   ;\
 674         ldxa    [%sp + V9BIAS64 + 8]%asi, %l1                   ;\
 675         ldxa    [%sp + V9BIAS64 + 16]%asi, %l2                  ;\
 676         ldxa    [%sp + V9BIAS64 + 24]%asi, %l3                  ;\
 677         ldxa    [%sp + V9BIAS64 + 32]%asi, %l4                  ;\
 678         ldxa    [%sp + V9BIAS64 + 40]%asi, %l5                  ;\
 679         ldxa    [%sp + V9BIAS64 + 48]%asi, %l6                  ;\
 680         ldxa    [%sp + V9BIAS64 + 56]%asi, %l7                  ;\
 681         ldxa    [%sp + V9BIAS64 + 64]%asi, %i0                  ;\
 682         ldxa    [%sp + V9BIAS64 + 72]%asi, %i1                  ;\
 683         ldxa    [%sp + V9BIAS64 + 80]%asi, %i2                  ;\
 684         ldxa    [%sp + V9BIAS64 + 88]%asi, %i3                  ;\
 685         ldxa    [%sp + V9BIAS64 + 96]%asi, %i4                  ;\
 686         ldxa    [%sp + V9BIAS64 + 104]%asi, %i5                 ;\
 687         ldxa    [%sp + V9BIAS64 + 112]%asi, %i6                 ;\
 688         ldxa    [%sp + V9BIAS64 + 120]%asi, %i7                 ;\
 689         restored                                                ;\
 690         retry                                                   ;\
 691         SKIP(31-19-TT_TRACE_L_INS)                              ;\
 692         ba,a,pt %xcc, fault_64bit_/**/tail                      ;\
 693         .empty
 694 


 695 /*
 696  * SPILL_mixed spills either size window, depending on
 697  * whether %sp is even or odd, to a 32-bit address space.
 698  * This may only be used in conjunction with SPILL_32bit/
 699  * SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be
 700  * needed for use with SPILL_{32,64}bit_{tt1,asi}.  Particular
 701  * attention should be paid to the instructions that belong
 702  * in the delay slots of the branches depending on the type
 703  * of spill handler being branched to.
 704  * Clear upper 32 bits of %sp if it is odd.
 705  * We won't need to clear them in 64 bit kernel.
 706  */
 707 #define SPILL_mixed                                             \
 708         btst    1, %sp                                          ;\
 709         bz,a,pt %xcc, 1b                                        ;\
 710         srl     %sp, 0, %sp                                     ;\
 711         ba,pt   %xcc, 2b                                        ;\
 712         nop                                                     ;\
 713         .align  128
 714 
 715 /*
 716  * FILL_mixed(ASI) fills either size window, depending on
 717  * whether %sp is even or odd, from a 32-bit address space.
 718  * This may only be used in conjunction with FILL_32bit/
 719  * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be
 720  * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular
 721  * attention should be paid to the instructions that belong
 722  * in the delay slots of the branches depending on the type
 723  * of fill handler being branched to.
 724  * Clear upper 32 bits of %sp if it is odd.
 725  * We won't need to clear them in 64 bit kernel.
 726  */
 727 #define FILL_mixed                                              \
 728         btst    1, %sp                                          ;\
 729         bz,a,pt %xcc, 1b                                        ;\
 730         srl     %sp, 0, %sp                                     ;\
 731         ba,pt   %xcc, 2b                                        ;\
 732         nop                                                     ;\
 733         .align  128
 734 
 735 
 736 /*
 737  * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows,
 738  * respectively, into the address space via the designated asi.  The
 739  * unbiased stack pointer is required to be eight-byte aligned (even for
 740  * the 32-bit case even though this code does not require such strict
 741  * alignment).
 742  *
 743  * With SPARC v9 the spill trap takes precedence over the cleanwin trap
 744  * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save
 745  * will cause cwp + 2 to be spilled but will not clean cwp + 1.  That
 746  * window may contain kernel data so in user_rtt we set wstate to call
 747  * these spill handlers on the first user spill trap.  These handler then
 748  * spill the appropriate window but also back up a window and clean the
 749  * window that didn't get a cleanwin trap.
 750  */
 751 #define SPILL_32clean(asi_num, tail)                            \
 752         srl     %sp, 0, %sp                                     ;\
 753         sta     %l0, [%sp + %g0]asi_num                         ;\
 754         mov     4, %g1                                          ;\
 755         sta     %l1, [%sp + %g1]asi_num                         ;\
 756         mov     8, %g2                                          ;\
 757         sta     %l2, [%sp + %g2]asi_num                         ;\
 758         mov     12, %g3                                         ;\
 759         sta     %l3, [%sp + %g3]asi_num                         ;\
 760         add     %sp, 16, %g4                                    ;\
 761         sta     %l4, [%g4 + %g0]asi_num                         ;\
 762         sta     %l5, [%g4 + %g1]asi_num                         ;\
 763         sta     %l6, [%g4 + %g2]asi_num                         ;\
 764         sta     %l7, [%g4 + %g3]asi_num                         ;\
 765         add     %g4, 16, %g4                                    ;\
 766         sta     %i0, [%g4 + %g0]asi_num                         ;\
 767         sta     %i1, [%g4 + %g1]asi_num                         ;\
 768         sta     %i2, [%g4 + %g2]asi_num                         ;\
 769         sta     %i3, [%g4 + %g3]asi_num                         ;\
 770         add     %g4, 16, %g4                                    ;\
 771         sta     %i4, [%g4 + %g0]asi_num                         ;\
 772         sta     %i5, [%g4 + %g1]asi_num                         ;\
 773         sta     %i6, [%g4 + %g2]asi_num                         ;\
 774         sta     %i7, [%g4 + %g3]asi_num                         ;\
 775         TT_TRACE_L(trace_win)                                   ;\
 776         b       .spill_clean                                    ;\
 777           mov   WSTATE_USER32, %g7                              ;\
 778         SKIP(31-25-TT_TRACE_L_INS)                              ;\
 779         ba,a,pt %xcc, fault_32bit_/**/tail                      ;\
 780         .empty
 781 
 782 #define SPILL_64clean(asi_num, tail)                            \
 783         mov     0 + V9BIAS64, %g1                               ;\
 784         stxa    %l0, [%sp + %g1]asi_num                         ;\
 785         mov     8 + V9BIAS64, %g2                               ;\
 786         stxa    %l1, [%sp + %g2]asi_num                         ;\
 787         mov     16 + V9BIAS64, %g3                              ;\
 788         stxa    %l2, [%sp + %g3]asi_num                         ;\
 789         mov     24 + V9BIAS64, %g4                              ;\
 790         stxa    %l3, [%sp + %g4]asi_num                         ;\
 791         add     %sp, 32, %g5                                    ;\
 792         stxa    %l4, [%g5 + %g1]asi_num                         ;\
 793         stxa    %l5, [%g5 + %g2]asi_num                         ;\
 794         stxa    %l6, [%g5 + %g3]asi_num                         ;\
 795         stxa    %l7, [%g5 + %g4]asi_num                         ;\
 796         add     %g5, 32, %g5                                    ;\
 797         stxa    %i0, [%g5 + %g1]asi_num                         ;\
 798         stxa    %i1, [%g5 + %g2]asi_num                         ;\
 799         stxa    %i2, [%g5 + %g3]asi_num                         ;\
 800         stxa    %i3, [%g5 + %g4]asi_num                         ;\
 801         add     %g5, 32, %g5                                    ;\
 802         stxa    %i4, [%g5 + %g1]asi_num                         ;\
 803         stxa    %i5, [%g5 + %g2]asi_num                         ;\
 804         stxa    %i6, [%g5 + %g3]asi_num                         ;\
 805         stxa    %i7, [%g5 + %g4]asi_num                         ;\
 806         TT_TRACE_L(trace_win)                                   ;\
 807         b       .spill_clean                                    ;\
 808           mov   WSTATE_USER64, %g7                              ;\
 809         SKIP(31-25-TT_TRACE_L_INS)                              ;\
 810         ba,a,pt %xcc, fault_64bit_/**/tail                      ;\
 811         .empty
 812 
 813 
 814 /*
 815  * Floating point disabled.
 816  */
 817 #define FP_DISABLED_TRAP                \
 818         TT_TRACE(trace_gen)             ;\
 819         ba,pt   %xcc,.fp_disabled       ;\
 820         nop                             ;\
 821         .align  32
 822 
 823 /*
 824  * Floating point exceptions.
 825  */
 826 #define FP_IEEE_TRAP                    \
 827         TT_TRACE(trace_gen)             ;\
 828         ba,pt   %xcc,.fp_ieee_exception ;\
 829         nop                             ;\
 830         .align  32
 831 
 832 #define FP_TRAP                         \
 833         TT_TRACE(trace_gen)             ;\
 834         ba,pt   %xcc,.fp_exception      ;\
 835         nop                             ;\
 836         .align  32
 837 

 838 /*
 839  * asynchronous traps at level 0 and level 1
 840  *
 841  * The first instruction must be a membar for UltraSPARC-III
 842  * to stop RED state entry if the store queue has many
 843  * pending bad stores (PRM, Chapter 11).
 844  */
 845 #define ASYNC_TRAP(ttype, ttlabel, table_name)\
 846         .global table_name      ;\
 847 table_name:                     ;\
 848         membar  #Sync           ;\
 849         TT_TRACE(ttlabel)       ;\
 850         ba      async_err       ;\
 851         mov     ttype, %g5      ;\
 852         .align  32
 853 
 854 /*
 855  * Defaults to BAD entry, but establishes label to be used for
 856  * architecture-specific overwrite of trap table entry.
 857  */
 858 #define LABELED_BAD(table_name)         \
 859         .global table_name              ;\
 860 table_name:                             ;\
 861         BAD
 862 


 863 /*
 864  * illegal instruction trap
 865  */
 866 #define ILLTRAP_INSTR                     \
 867         membar  #Sync                     ;\
 868         TT_TRACE(trace_gen)               ;\
 869         or      %g0, P_UTRAP4, %g2        ;\
 870         or      %g0, T_UNIMP_INSTR, %g3   ;\
 871         sethi   %hi(.check_v9utrap), %g4  ;\
 872         jmp     %g4 + %lo(.check_v9utrap) ;\
 873         nop                               ;\
 874         .align  32
 875 
 876 /*
 877  * tag overflow trap
 878  */
 879 #define TAG_OVERFLOW                      \
 880         TT_TRACE(trace_gen)               ;\
 881         or      %g0, P_UTRAP10, %g2       ;\
 882         or      %g0, T_TAG_OVERFLOW, %g3  ;\
 883         sethi   %hi(.check_v9utrap), %g4  ;\
 884         jmp     %g4 + %lo(.check_v9utrap) ;\
 885         nop                               ;\
 886         .align  32
 887 
 888 /*
 889  * divide by zero trap
 890  */
 891 #define DIV_BY_ZERO                       \
 892         TT_TRACE(trace_gen)               ;\
 893         or      %g0, P_UTRAP11, %g2       ;\
 894         or      %g0, T_IDIV0, %g3         ;\
 895         sethi   %hi(.check_v9utrap), %g4  ;\
 896         jmp     %g4 + %lo(.check_v9utrap) ;\
 897         nop                               ;\
 898         .align  32
 899 
 900 /*
 901  * trap instruction for V9 user trap handlers
 902  */
 903 #define TRAP_INSTR                        \
 904         TT_TRACE(trace_gen)               ;\
 905         or      %g0, T_SOFTWARE_TRAP, %g3 ;\
 906         sethi   %hi(.check_v9utrap), %g4  ;\
 907         jmp     %g4 + %lo(.check_v9utrap) ;\
 908         nop                               ;\
 909         .align  32
 910 #define TRP4    TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR
 911 
 912 /*
 913  * LEVEL_INTERRUPT is for level N interrupts.
 914  * VECTOR_INTERRUPT is for the vector trap.
 915  */
 916 #define LEVEL_INTERRUPT(level)          \
 917         .global tt_pil/**/level         ;\
 918 tt_pil/**/level:                        ;\
 919         ba,pt   %xcc, pil_interrupt     ;\
 920         mov     level, %g4              ;\
 921         .align  32
 922 
 923 #define LEVEL14_INTERRUPT                       \
 924         ba      pil14_interrupt                 ;\
 925         mov     PIL_14, %g4                     ;\
 926         .align  32
 927 
 928 #define        LEVEL15_INTERRUPT                       \
 929        ba      pil15_interrupt                 ;\
 930        mov     PIL_15, %g4                     ;\
 931        .align  32
 932 
 933 #define VECTOR_INTERRUPT                                \
 934         ldxa    [%g0]ASI_INTR_RECEIVE_STATUS, %g1       ;\
 935         btst    IRSR_BUSY, %g1                          ;\
 936         bnz,pt  %xcc, vec_interrupt                     ;\
 937         nop                                             ;\
 938         ba,a,pt %xcc, vec_intr_spurious                 ;\
 939         .empty                                          ;\
 940         .align  32
 941 
 942 /*
 943  * MMU Trap Handlers.
 944  */
 945 #define SWITCH_GLOBALS  /* mmu->alt, alt->mmu */                  \
 946         rdpr    %pstate, %g5                                            ;\
 947         wrpr    %g5, PSTATE_MG | PSTATE_AG, %pstate
 948 
 949 #define IMMU_EXCEPTION                                                  \
 950         membar  #Sync                                                   ;\
 951         SWITCH_GLOBALS                                                  ;\
 952         wr      %g0, ASI_IMMU, %asi                                     ;\
 953         rdpr    %tpc, %g2                                               ;\
 954         ldxa    [MMU_SFSR]%asi, %g3                                     ;\
 955         ba,pt   %xcc, .mmu_exception_end                                ;\
 956         mov     T_INSTR_EXCEPTION, %g1                                  ;\
 957         .align  32
 958 
 959 #define DMMU_EXCEPTION                                                  \
 960         SWITCH_GLOBALS                                                  ;\
 961         wr      %g0, ASI_DMMU, %asi                                     ;\
 962         ldxa    [MMU_TAG_ACCESS]%asi, %g2                               ;\
 963         ldxa    [MMU_SFSR]%asi, %g3                                     ;\
 964         ba,pt   %xcc, .mmu_exception_end                                ;\
 965         mov     T_DATA_EXCEPTION, %g1                                   ;\
 966         .align  32
 967 
 968 #define DMMU_EXC_AG_PRIV                                                \
 969         wr      %g0, ASI_DMMU, %asi                                     ;\
 970         ldxa    [MMU_SFAR]%asi, %g2                                     ;\
 971         ba,pt   %xcc, .mmu_priv_exception                               ;\
 972         ldxa    [MMU_SFSR]%asi, %g3                                     ;\
 973         .align  32
 974 
 975 #define DMMU_EXC_AG_NOT_ALIGNED                                         \
 976         wr      %g0, ASI_DMMU, %asi                                     ;\
 977         ldxa    [MMU_SFAR]%asi, %g2                                     ;\
 978         ba,pt   %xcc, .mmu_exception_not_aligned                        ;\
 979         ldxa    [MMU_SFSR]%asi, %g3                                     ;\
 980         .align  32
 981 
 982 /*
 983  * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2)
 984  */
 985 #define DMMU_EXC_LDDF_NOT_ALIGNED                                       \
 986         btst    1, %sp                                                  ;\
 987         bnz,pt  %xcc, .lddf_exception_not_aligned                       ;\
 988         wr      %g0, ASI_DMMU, %asi                                     ;\
 989         ldxa    [MMU_SFAR]%asi, %g2                                     ;\
 990         ba,pt   %xcc, .mmu_exception_not_aligned                        ;\
 991         ldxa    [MMU_SFSR]%asi, %g3                                     ;\
 992         .align  32
 993 
 994 #define DMMU_EXC_STDF_NOT_ALIGNED                                       \
 995         btst    1, %sp                                                  ;\
 996         bnz,pt  %xcc, .stdf_exception_not_aligned                       ;\
 997         wr      %g0, ASI_DMMU, %asi                                     ;\
 998         ldxa    [MMU_SFAR]%asi, %g2                                     ;\
 999         ba,pt   %xcc, .mmu_exception_not_aligned                        ;\
1000         ldxa    [MMU_SFSR]%asi, %g3                                     ;\
1001         .align  32
1002 
1003 /*
1004  * Flush the TLB using either the primary, secondary, or nucleus flush
1005  * operation based on whether the ctx from the tag access register matches
1006  * the primary or secondary context (flush the nucleus if neither matches).
1007  *
1008  * Requires a membar #Sync before next ld/st.
1009  * exits with:
1010  * g2 = tag access register
1011  * g3 = ctx number
1012  */
1013 #if TAGACC_CTX_MASK != CTXREG_CTX_MASK
1014 #error "TAGACC_CTX_MASK != CTXREG_CTX_MASK"
1015 #endif
1016 #define DTLB_DEMAP_ENTRY                                                \
1017         mov     MMU_TAG_ACCESS, %g1                                     ;\
1018         mov     MMU_PCONTEXT, %g5                                       ;\
1019         ldxa    [%g1]ASI_DMMU, %g2                                      ;\
1020         sethi   %hi(TAGACC_CTX_MASK), %g4                               ;\
1021         or      %g4, %lo(TAGACC_CTX_MASK), %g4                          ;\
1022         and     %g2, %g4, %g3                   /* g3 = ctx */          ;\
1023         ldxa    [%g5]ASI_DMMU, %g6              /* g6 = primary ctx */  ;\
1024         and     %g6, %g4, %g6                   /* &= CTXREG_CTX_MASK */ ;\
1025         cmp     %g3, %g6                                                ;\
1026         be,pt   %xcc, 1f                                                ;\
1027         andn    %g2, %g4, %g1                   /* ctx = primary */     ;\
1028         mov     MMU_SCONTEXT, %g5                                       ;\
1029         ldxa    [%g5]ASI_DMMU, %g6              /* g6 = secondary ctx */ ;\
1030         and     %g6, %g4, %g6                   /* &= CTXREG_CTX_MASK */ ;\
1031         cmp     %g3, %g6                                                ;\
1032         be,a,pt %xcc, 1f                                                ;\
1033           or    %g1, DEMAP_SECOND, %g1                                  ;\
1034         or      %g1, DEMAP_NUCLEUS, %g1                                 ;\
1035 1:      stxa    %g0, [%g1]ASI_DTLB_DEMAP        /* MMU_DEMAP_PAGE */    ;\
1036         membar  #Sync
1037 
1038 #if defined(cscope)
1039 /*
1040  * Define labels to direct cscope quickly to labels that
1041  * are generated by macro expansion of DTLB_MISS().
1042  */
1043         .global tt0_dtlbmiss
1044 tt0_dtlbmiss:
1045         .global tt1_dtlbmiss
1046 tt1_dtlbmiss:
1047         nop
1048 #endif
1049 
1050 /*
1051  * Needs to be exactly 32 instructions
1052  *
1053  * UTLB NOTE: If we don't hit on the 8k pointer then we branch
1054  * to a special 4M tsb handler. It would be nice if that handler
1055  * could live in this file but currently it seems better to allow
1056  * it to fall thru to sfmmu_tsb_miss.
1057  */
1058 #ifdef UTSB_PHYS
1059 #define DTLB_MISS(table_name)                                           ;\
1060         .global table_name/**/_dtlbmiss                                 ;\
1061 table_name/**/_dtlbmiss:                                                ;\
1062         mov     MMU_TAG_ACCESS, %g6             /* select tag acc */    ;\
1063         ldxa    [%g0]ASI_DMMU_TSB_8K, %g1       /* g1 = tsbe ptr */     ;\
1064         ldxa    [%g6]ASI_DMMU, %g2              /* g2 = tag access */   ;\
1065         sllx    %g2, TAGACC_CTX_LSHIFT, %g3                             ;\
1066         srlx    %g3, TAGACC_CTX_LSHIFT, %g3     /* g3 = ctx */          ;\
1067         cmp     %g3, INVALID_CONTEXT                                    ;\
1068         ble,pn  %xcc, sfmmu_kdtlb_miss                                  ;\
1069           srax  %g2, PREDISM_BASESHIFT, %g6  /* g6 > 0 ISM predicted */ ;\
1070         brgz,pn %g6, sfmmu_udtlb_slowpath_ismpred                       ;\
1071           srlx  %g2, TAG_VALO_SHIFT, %g7        /* g7 = tsb tag */      ;\
1072         ldda    [%g1]ASI_QUAD_LDD_PHYS, %g4     /* g4 = tag, %g5 data */;\
1073         cmp     %g4, %g7                                                ;\
1074         bne,pn %xcc, sfmmu_udtlb_slowpath_noismpred                     ;\
1075           nop                                                           ;\
1076         TT_TRACE(trace_tsbhit)          /* 2 instr ifdef TRAPTRACE */   ;\
1077         stxa    %g5, [%g0]ASI_DTLB_IN   /* trapstat expects TTE */      ;\
1078         retry                           /* in %g5 */                    ;\
1079         unimp   0                                                       ;\
1080         unimp   0                                                       ;\
1081         unimp   0                                                       ;\
1082         unimp   0                                                       ;\
1083         unimp   0                                                       ;\
1084         unimp   0                                                       ;\
1085         unimp   0                                                       ;\
1086         unimp   0                                                       ;\
1087         unimp   0                                                       ;\
1088         unimp   0                                                       ;\
1089         unimp   0                                                       ;\
1090         unimp   0                                                       ;\
1091         unimp   0                                                       ;\
1092         unimp   0                                                       ;\
1093         .align 128
1094         
1095 #else /* UTSB_PHYS */
1096 #define DTLB_MISS(table_name)                                           ;\
1097         .global table_name/**/_dtlbmiss                                 ;\
1098 table_name/**/_dtlbmiss:                                                ;\
1099         mov     MMU_TAG_ACCESS, %g6             /* select tag acc */    ;\
1100         ldxa    [%g0]ASI_DMMU_TSB_8K, %g1       /* g1 = tsbe ptr */     ;\
1101         ldxa    [%g6]ASI_DMMU, %g2              /* g2 = tag access */   ;\
1102         sllx    %g2, TAGACC_CTX_LSHIFT, %g3                             ;\
1103         srlx    %g3, TAGACC_CTX_LSHIFT, %g3     /* g3 = ctx */          ;\
1104         cmp     %g3, INVALID_CONTEXT                                    ;\
1105         ble,pn  %xcc, sfmmu_kdtlb_miss                                  ;\
1106           srlx  %g2, TAG_VALO_SHIFT, %g7        /* g7 = tsb tag */      ;\
1107         brlz,pn %g1, sfmmu_udtlb_slowpath                               ;\
1108           nop                                                           ;\
1109         ldda    [%g1]ASI_NQUAD_LD, %g4  /* g4 = tag, %g5 data */        ;\
1110         cmp     %g4, %g7                                                ;\
1111         bne,pn  %xcc, sfmmu_tsb_miss_tt         /* no 4M TSB, miss */   ;\
1112           mov   -1, %g3         /* set 4M tsbe ptr to -1 */             ;\
1113         TT_TRACE(trace_tsbhit)          /* 2 instr ifdef TRAPTRACE */   ;\
1114         stxa    %g5, [%g0]ASI_DTLB_IN   /* trapstat expects TTE */      ;\
1115         retry                           /* in %g5 */                    ;\
1116         unimp   0                                                       ;\
1117         unimp   0                                                       ;\
1118         unimp   0                                                       ;\
1119         unimp   0                                                       ;\
1120         unimp   0                                                       ;\
1121         unimp   0                                                       ;\
1122         unimp   0                                                       ;\
1123         unimp   0                                                       ;\
1124         unimp   0                                                       ;\
1125         unimp   0                                                       ;\
1126         unimp   0                                                       ;\
1127         unimp   0                                                       ;\
1128         unimp   0                                                       ;\
1129         unimp   0                                                       ;\
1130         .align 128
1131 #endif /* UTSB_PHYS */
1132 
1133 #if defined(cscope)
1134 /*
1135  * Define labels to direct cscope quickly to labels that
1136  * are generated by macro expansion of ITLB_MISS().
1137  */
1138         .global tt0_itlbmiss
1139 tt0_itlbmiss:
1140         .global tt1_itlbmiss
1141 tt1_itlbmiss:
1142         nop
1143 #endif
1144 
1145 /*
1146  * Instruction miss handler.
1147  * ldda instructions will have their ASI patched
1148  * by sfmmu_patch_ktsb at runtime.
1149  * MUST be EXACTLY 32 instructions or we'll break.
1150  */
1151 #ifdef UTSB_PHYS
1152 #define ITLB_MISS(table_name)                                            \
1153         .global table_name/**/_itlbmiss                                 ;\
1154 table_name/**/_itlbmiss:                                                ;\
1155         mov     MMU_TAG_ACCESS, %g6             /* select tag acc */    ;\
1156         ldxa    [%g0]ASI_IMMU_TSB_8K, %g1       /* g1 = tsbe ptr */     ;\
1157         ldxa    [%g6]ASI_IMMU, %g2              /* g2 = tag access */   ;\
1158         sllx    %g2, TAGACC_CTX_LSHIFT, %g3                             ;\
1159         srlx    %g3, TAGACC_CTX_LSHIFT, %g3     /* g3 = ctx */          ;\
1160         cmp     %g3, INVALID_CONTEXT                                    ;\
1161         ble,pn  %xcc, sfmmu_kitlb_miss                                  ;\
1162           srlx  %g2, TAG_VALO_SHIFT, %g7        /* g7 = tsb tag */      ;\
1163         ldda    [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */   ;\
1164         cmp     %g4, %g7                                                ;\
1165         bne,pn  %xcc, sfmmu_uitlb_slowpath                              ;\
1166           andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */       ;\
1167         bz,pn   %icc, exec_fault                                        ;\
1168           nop                                                           ;\
1169         TT_TRACE(trace_tsbhit)          /* 2 instr ifdef TRAPTRACE */   ;\
1170         stxa    %g5, [%g0]ASI_ITLB_IN   /* trapstat expects %g5 */      ;\
1171         retry                                                           ;\
1172         unimp   0                                                       ;\
1173         unimp   0                                                       ;\
1174         unimp   0                                                       ;\
1175         unimp   0                                                       ;\
1176         unimp   0                                                       ;\
1177         unimp   0                                                       ;\
1178         unimp   0                                                       ;\
1179         unimp   0                                                       ;\
1180         unimp   0                                                       ;\
1181         unimp   0                                                       ;\
1182         unimp   0                                                       ;\
1183         unimp   0                                                       ;\
1184         unimp   0                                                       ;\
1185         unimp   0                                                       ;\
1186         .align 128 
1187 
1188 #else /* UTSB_PHYS */
1189 #define ITLB_MISS(table_name)                                            \
1190         .global table_name/**/_itlbmiss                                 ;\
1191 table_name/**/_itlbmiss:                                                ;\
1192         mov     MMU_TAG_ACCESS, %g6             /* select tag acc */    ;\
1193         ldxa    [%g0]ASI_IMMU_TSB_8K, %g1       /* g1 = tsbe ptr */     ;\
1194         ldxa    [%g6]ASI_IMMU, %g2              /* g2 = tag access */   ;\
1195         sllx    %g2, TAGACC_CTX_LSHIFT, %g3                             ;\
1196         srlx    %g3, TAGACC_CTX_LSHIFT, %g3     /* g3 = ctx */          ;\
1197         cmp     %g3, INVALID_CONTEXT                                    ;\
1198         ble,pn  %xcc, sfmmu_kitlb_miss                                  ;\
1199           srlx  %g2, TAG_VALO_SHIFT, %g7        /* g7 = tsb tag */      ;\
1200         brlz,pn %g1, sfmmu_uitlb_slowpath       /* if >1 TSB branch */       ;\
1201           nop                                                           ;\
1202         ldda    [%g1]ASI_NQUAD_LD, %g4  /* g4 = tag, g5 = data */       ;\
1203         cmp     %g4, %g7                                                ;\
1204         bne,pn  %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */         ;\
1205           mov   -1, %g3         /* set 4M TSB ptr to -1 */              ;\
1206         andcc   %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */       ;\
1207         bz,pn   %icc, exec_fault                                        ;\
1208           nop                                                           ;\
1209         TT_TRACE(trace_tsbhit)          /* 2 instr ifdef TRAPTRACE */   ;\
1210         stxa    %g5, [%g0]ASI_ITLB_IN   /* trapstat expects %g5 */      ;\
1211         retry                                                           ;\
1212         unimp   0                                                       ;\
1213         unimp   0                                                       ;\
1214         unimp   0                                                       ;\
1215         unimp   0                                                       ;\
1216         unimp   0                                                       ;\
1217         unimp   0                                                       ;\
1218         unimp   0                                                       ;\
1219         unimp   0                                                       ;\
1220         unimp   0                                                       ;\
1221         unimp   0                                                       ;\
1222         unimp   0                                                       ;\
1223         .align 128
1224 #endif /* UTSB_PHYS */
1225 
1226 
1227 /*
1228  * This macro is the first level handler for fast protection faults.
1229  * It first demaps the tlb entry which generated the fault and then
1230  * attempts to set the modify bit on the hash.  It needs to be
1231  * exactly 32 instructions.
1232  */
1233 #define DTLB_PROT                                                        \
1234         DTLB_DEMAP_ENTRY                /* 20 instructions */           ;\
1235         /*                                                              ;\
1236          * At this point:                                               ;\
1237          *   g1 = ????                                                  ;\
1238          *   g2 = tag access register                                   ;\
1239          *   g3 = ctx number                                            ;\
1240          *   g4 = ????                                                  ;\
1241          */                                                             ;\
1242         TT_TRACE(trace_dataprot)        /* 2 instr ifdef TRAPTRACE */   ;\
1243                                         /* clobbers g1 and g6 */        ;\
1244         ldxa    [%g0]ASI_DMMU_TSB_8K, %g1       /* g1 = tsbe ptr */     ;\
1245         brnz,pt %g3, sfmmu_uprot_trap           /* user trap */         ;\
1246           nop                                                           ;\
1247         ba,a,pt %xcc, sfmmu_kprot_trap          /* kernel trap */       ;\
1248         unimp   0                                                       ;\
1249         unimp   0                                                       ;\
1250         unimp   0                                                       ;\
1251         unimp   0                                                       ;\
1252         unimp   0                                                       ;\
1253         unimp   0                                                       ;\
1254         .align 128
1255 
1256 #define DMMU_EXCEPTION_TL1                                              ;\
1257         SWITCH_GLOBALS                                                  ;\
1258         ba,a,pt %xcc, mmu_trap_tl1                                      ;\
1259           nop                                                           ;\
1260         .align 32
1261 
1262 #define MISALIGN_ADDR_TL1                                               ;\
1263         ba,a,pt %xcc, mmu_trap_tl1                                      ;\
1264           nop                                                           ;\
1265         .align 32
1266 
1267 /*
1268  * Trace a tsb hit
1269  * g1 = tsbe pointer (in/clobbered)
1270  * g2 = tag access register (in)
1271  * g3 - g4 = scratch (clobbered)
1272  * g5 = tsbe data (in)
1273  * g6 = scratch (clobbered)
1274  * g7 = pc we jumped here from (in)
1275  * ttextra = value to OR in to trap type (%tt) (in)
1276  */
1277 #ifdef TRAPTRACE
1278 #define TRACE_TSBHIT(ttextra)                                            \
1279         membar  #Sync                                                   ;\
1280         sethi   %hi(FLUSH_ADDR), %g6                                    ;\
1281         flush   %g6                                                     ;\
1282         TRACE_PTR(%g3, %g6)                                             ;\
1283         GET_TRACE_TICK(%g6, %g4)                                        ;\
1284         stxa    %g6, [%g3 + TRAP_ENT_TICK]%asi                          ;\
1285         stxa    %g2, [%g3 + TRAP_ENT_SP]%asi    /* tag access */        ;\
1286         stxa    %g5, [%g3 + TRAP_ENT_F1]%asi    /* tsb data */          ;\
1287         rdpr    %tnpc, %g6                                              ;\
1288         stxa    %g6, [%g3 + TRAP_ENT_F2]%asi                            ;\
1289         stxa    %g1, [%g3 + TRAP_ENT_F3]%asi    /* tsb pointer */       ;\
1290         stxa    %g0, [%g3 + TRAP_ENT_F4]%asi                            ;\
1291         rdpr    %tpc, %g6                                               ;\
1292         stxa    %g6, [%g3 + TRAP_ENT_TPC]%asi                           ;\
1293         rdpr    %tl, %g6                                                ;\
1294         stha    %g6, [%g3 + TRAP_ENT_TL]%asi                            ;\
1295         rdpr    %tt, %g6                                                ;\
1296         or      %g6, (ttextra), %g6                                     ;\
1297         stha    %g6, [%g3 + TRAP_ENT_TT]%asi                            ;\
1298         ldxa    [%g0]ASI_IMMU, %g1              /* tag target */        ;\
1299         ldxa    [%g0]ASI_DMMU, %g4                                      ;\
1300         cmp     %g6, FAST_IMMU_MISS_TT                                  ;\
1301         movne   %icc, %g4, %g1                                          ;\
1302         stxa    %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tsb tag */          ;\
1303         stxa    %g0, [%g3 + TRAP_ENT_TR]%asi                            ;\
1304         TRACE_NEXT(%g3, %g4, %g6)
1305 #else
1306 #define TRACE_TSBHIT(ttextra)
1307 #endif
1308 







1309 /*
1310  * =======================================================================
1311  *              SPARC V9 TRAP TABLE
1312  *
1313  * The trap table is divided into two halves: the first half is used when
1314  * taking traps when TL=0; the second half is used when taking traps from
1315  * TL>0. Note that handlers in the second half of the table might not be able
1316  * to make the same assumptions as handlers in the first half of the table.
1317  *
1318  * Worst case trap nesting so far:
1319  *
1320  *      at TL=0 client issues software trap requesting service
1321  *      at TL=1 nucleus wants a register window
1322  *      at TL=2 register window clean/spill/fill takes a TLB miss
1323  *      at TL=3 processing TLB miss
1324  *      at TL=4 handle asynchronous error
1325  *
1326  * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode".
1327  *
1328  * =======================================================================
1329  */
1330         .section ".text"
1331         .align  4
1332         .global trap_table, scb, trap_table0, trap_table1, etrap_table
1333         .type   trap_table, #object
1334         .type   scb, #object
1335 trap_table:
1336 scb:
1337 trap_table0:
1338         /* hardware traps */
1339         NOT;                            /* 000  reserved */
1340         RED;                            /* 001  power on reset */
1341         RED;                            /* 002  watchdog reset */
1342         RED;                            /* 003  externally initiated reset */
1343         RED;                            /* 004  software initiated reset */
1344         RED;                            /* 005  red mode exception */
1345         NOT; NOT;                       /* 006 - 007 reserved */
1346         IMMU_EXCEPTION;                 /* 008  instruction access exception */
1347         NOT;                            /* 009  instruction access MMU miss */
1348         ASYNC_TRAP(T_INSTR_ERROR, trace_gen, tt0_iae);
1349                                         /* 00A  instruction access error */
1350         NOT; NOT4;                      /* 00B - 00F reserved */
1351         ILLTRAP_INSTR;                  /* 010  illegal instruction */
1352         TRAP(T_PRIV_INSTR);             /* 011  privileged opcode */
1353         NOT;                            /* 012  unimplemented LDD */
1354         NOT;                            /* 013  unimplemented STD */
1355         NOT4; NOT4; NOT4;               /* 014 - 01F reserved */
1356         FP_DISABLED_TRAP;               /* 020  fp disabled */
1357         FP_IEEE_TRAP;                   /* 021  fp exception ieee 754 */
1358         FP_TRAP;                        /* 022  fp exception other */
1359         TAG_OVERFLOW;                   /* 023  tag overflow */
1360         CLEAN_WINDOW;                   /* 024 - 027 clean window */
1361         DIV_BY_ZERO;                    /* 028  division by zero */
1362         NOT;                            /* 029  internal processor error */
1363         NOT; NOT; NOT4;                 /* 02A - 02F reserved */
1364         DMMU_EXCEPTION;                 /* 030  data access exception */
1365         NOT;                            /* 031  data access MMU miss */
1366         ASYNC_TRAP(T_DATA_ERROR, trace_gen, tt0_dae);
1367                                         /* 032  data access error */
1368         NOT;                            /* 033  data access protection */
1369         DMMU_EXC_AG_NOT_ALIGNED;        /* 034  mem address not aligned */
1370         DMMU_EXC_LDDF_NOT_ALIGNED;      /* 035  LDDF mem address not aligned */
1371         DMMU_EXC_STDF_NOT_ALIGNED;      /* 036  STDF mem address not aligned */
1372         DMMU_EXC_AG_PRIV;               /* 037  privileged action */
1373         NOT;                            /* 038  LDQF mem address not aligned */
1374         NOT;                            /* 039  STQF mem address not aligned */
1375         NOT; NOT; NOT4;                 /* 03A - 03F reserved */
1376         LABELED_BAD(tt0_asdat);         /* 040  async data error */
1377         LEVEL_INTERRUPT(1);             /* 041  interrupt level 1 */
1378         LEVEL_INTERRUPT(2);             /* 042  interrupt level 2 */
1379         LEVEL_INTERRUPT(3);             /* 043  interrupt level 3 */
1380         LEVEL_INTERRUPT(4);             /* 044  interrupt level 4 */
1381         LEVEL_INTERRUPT(5);             /* 045  interrupt level 5 */
1382         LEVEL_INTERRUPT(6);             /* 046  interrupt level 6 */
1383         LEVEL_INTERRUPT(7);             /* 047  interrupt level 7 */
1384         LEVEL_INTERRUPT(8);             /* 048  interrupt level 8 */
1385         LEVEL_INTERRUPT(9);             /* 049  interrupt level 9 */
1386         LEVEL_INTERRUPT(10);            /* 04A  interrupt level 10 */
1387         LEVEL_INTERRUPT(11);            /* 04B  interrupt level 11 */
1388         LEVEL_INTERRUPT(12);            /* 04C  interrupt level 12 */
1389         LEVEL_INTERRUPT(13);            /* 04D  interrupt level 13 */
1390         LEVEL14_INTERRUPT;              /* 04E  interrupt level 14 */
1391         LEVEL15_INTERRUPT;              /* 04F  interrupt level 15 */
1392         NOT4; NOT4; NOT4; NOT4;         /* 050 - 05F reserved */
1393         VECTOR_INTERRUPT;               /* 060  interrupt vector */
1394         GOTO(kmdb_trap);                /* 061  PA watchpoint */
1395         GOTO(kmdb_trap);                /* 062  VA watchpoint */
1396         GOTO_TT(ce_err, trace_gen);     /* 063  corrected ECC error */
1397         ITLB_MISS(tt0);                 /* 064  instruction access MMU miss */
1398         DTLB_MISS(tt0);                 /* 068  data access MMU miss */
1399         DTLB_PROT;                      /* 06C  data access protection */
1400         LABELED_BAD(tt0_fecc);          /* 070  fast ecache ECC error */
1401         LABELED_BAD(tt0_dperr);         /* 071  Cheetah+ dcache parity error */
1402         LABELED_BAD(tt0_iperr);         /* 072  Cheetah+ icache parity error */
1403         NOT;                            /* 073  reserved */
1404         NOT4; NOT4; NOT4;               /* 074 - 07F reserved */
1405         NOT4;                           /* 080  spill 0 normal */
1406         SPILL_32bit_asi(ASI_AIUP,sn0);  /* 084  spill 1 normal */
1407         SPILL_64bit_asi(ASI_AIUP,sn0);  /* 088  spill 2 normal */
1408         SPILL_32clean(ASI_AIUP,sn0);    /* 08C  spill 3 normal */
1409         SPILL_64clean(ASI_AIUP,sn0);    /* 090  spill 4 normal */
1410         SPILL_32bit(not);               /* 094  spill 5 normal */
1411         SPILL_64bit(not);               /* 098  spill 6 normal */
1412         SPILL_mixed;                    /* 09C  spill 7 normal */
1413         NOT4;                           /* 0A0  spill 0 other */
1414         SPILL_32bit_asi(ASI_AIUS,so0);  /* 0A4  spill 1 other */
1415         SPILL_64bit_asi(ASI_AIUS,so0);  /* 0A8  spill 2 other */
1416         SPILL_32bit_asi(ASI_AIUS,so0);  /* 0AC  spill 3 other */
1417         SPILL_64bit_asi(ASI_AIUS,so0);  /* 0B0  spill 4 other */
1418         NOT4;                           /* 0B4  spill 5 other */
1419         NOT4;                           /* 0B8  spill 6 other */
1420         NOT4;                           /* 0BC  spill 7 other */
1421         NOT4;                           /* 0C0  fill 0 normal */
1422         FILL_32bit_asi(ASI_AIUP,fn0);   /* 0C4  fill 1 normal */
1423         FILL_64bit_asi(ASI_AIUP,fn0);   /* 0C8  fill 2 normal */
1424         FILL_32bit_asi(ASI_AIUP,fn0);   /* 0CC  fill 3 normal */
1425         FILL_64bit_asi(ASI_AIUP,fn0);   /* 0D0  fill 4 normal */
1426         FILL_32bit(not);                /* 0D4  fill 5 normal */
1427         FILL_64bit(not);                /* 0D8  fill 6 normal */
1428         FILL_mixed;                     /* 0DC  fill 7 normal */
1429         NOT4;                           /* 0E0  fill 0 other */
1430         NOT4;                           /* 0E4  fill 1 other */
1431         NOT4;                           /* 0E8  fill 2 other */
1432         NOT4;                           /* 0EC  fill 3 other */
1433         NOT4;                           /* 0F0  fill 4 other */
1434         NOT4;                           /* 0F4  fill 5 other */
1435         NOT4;                           /* 0F8  fill 6 other */
1436         NOT4;                           /* 0FC  fill 7 other */
1437         /* user traps */
1438         GOTO(syscall_trap_4x);          /* 100  old system call */
1439         TRAP(T_BREAKPOINT);             /* 101  user breakpoint */
1440         TRAP(T_DIV0);                   /* 102  user divide by zero */
1441         FLUSHW(tt0_flushw);             /* 103  flush windows */
1442         GOTO(.clean_windows);           /* 104  clean windows */
1443         BAD;                            /* 105  range check ?? */
1444         GOTO(.fix_alignment);           /* 106  do unaligned references */
1445         BAD;                            /* 107  unused */
1446         SYSCALL_TRAP32;                 /* 108  ILP32 system call on LP64 */
1447         GOTO(set_trap0_addr);           /* 109  set trap0 address */
1448         BAD; BAD; BAD4;                 /* 10A - 10F unused */
1449         TRP4; TRP4; TRP4; TRP4;         /* 110 - 11F V9 user trap handlers */
1450         GOTO(.getcc);                   /* 120  get condition codes */
1451         GOTO(.setcc);                   /* 121  set condition codes */
1452         GOTO(.getpsr);                  /* 122  get psr */
1453         GOTO(.setpsr);                  /* 123  set psr (some fields) */
1454         GOTO(get_timestamp);            /* 124  get timestamp */
1455         GOTO(get_virtime);              /* 125  get lwp virtual time */
1456         PRIV(self_xcall);               /* 126  self xcall */
1457         GOTO(get_hrestime);             /* 127  get hrestime */
1458         BAD;                            /* 128  ST_SETV9STACK */
1459         GOTO(.getlgrp);                 /* 129  get lgrpid */
1460         BAD; BAD; BAD4;                 /* 12A - 12F unused */
1461         BAD4; BAD4;                     /* 130 - 137 unused */
1462         DTRACE_PID;                     /* 138  dtrace pid tracing provider */
1463         BAD;                            /* 139  unused */
1464         DTRACE_RETURN;                  /* 13A  dtrace pid return probe */
1465         BAD; BAD4;                      /* 13B - 13F unused */
1466         SYSCALL_TRAP;                   /* 140  LP64 system call */
1467         SYSCALL(nosys);                 /* 141  unused system call trap */
1468 #ifdef DEBUG_USER_TRAPTRACECTL
1469         GOTO(.traptrace_freeze);        /* 142  freeze traptrace */
1470         GOTO(.traptrace_unfreeze);      /* 143  unfreeze traptrace */
1471 #else
1472         SYSCALL(nosys);                 /* 142  unused system call trap */
1473         SYSCALL(nosys);                 /* 143  unused system call trap */
1474 #endif
1475         BAD4; BAD4; BAD4;               /* 144 - 14F unused */
1476         BAD4; BAD4; BAD4; BAD4;         /* 150 - 15F unused */
1477         BAD4; BAD4; BAD4; BAD4;         /* 160 - 16F unused */
1478         BAD;                            /* 170 - unused */
1479         BAD;                            /* 171 - unused */
1480         BAD; BAD;                       /* 172 - 173 unused */
1481         BAD4; BAD4;                     /* 174 - 17B unused */
1482 #ifdef  PTL1_PANIC_DEBUG
1483         mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic);
1484                                         /* 17C  test ptl1_panic */
1485 #else
1486         BAD;                            /* 17C  unused */
1487 #endif  /* PTL1_PANIC_DEBUG */
1488         PRIV(kmdb_trap);                /* 17D  kmdb enter (L1-A) */
1489         PRIV(kmdb_trap);                /* 17E  kmdb breakpoint */
1490         PRIV(kctx_obp_bpt);             /* 17F  obp breakpoint */
1491         /* reserved */
1492         NOT4; NOT4; NOT4; NOT4;         /* 180 - 18F reserved */
1493         NOT4; NOT4; NOT4; NOT4;         /* 190 - 19F reserved */
1494         NOT4; NOT4; NOT4; NOT4;         /* 1A0 - 1AF reserved */
1495         NOT4; NOT4; NOT4; NOT4;         /* 1B0 - 1BF reserved */
1496         NOT4; NOT4; NOT4; NOT4;         /* 1C0 - 1CF reserved */
1497         NOT4; NOT4; NOT4; NOT4;         /* 1D0 - 1DF reserved */
1498         NOT4; NOT4; NOT4; NOT4;         /* 1E0 - 1EF reserved */
1499         NOT4; NOT4; NOT4; NOT4;         /* 1F0 - 1FF reserved */
1500 trap_table1:
1501         NOT4; NOT4; NOT; NOT;           /* 000 - 009 unused */
1502         ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen, tt1_iae);
1503                                         /* 00A  instruction access error */
1504         NOT; NOT4;                      /* 00B - 00F unused */
1505         NOT4; NOT4; NOT4; NOT4;         /* 010 - 01F unused */
1506         NOT4;                           /* 020 - 023 unused */
1507         CLEAN_WINDOW;                   /* 024 - 027 clean window */
1508         NOT4; NOT4;                     /* 028 - 02F unused */
1509         DMMU_EXCEPTION_TL1;             /* 030  data access exception */
1510         NOT;                            /* 031 unused */
1511         ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen, tt1_dae);
1512                                         /* 032  data access error */
1513         NOT;                            /* 033  unused */
1514         MISALIGN_ADDR_TL1;              /* 034  mem address not aligned */
1515         NOT; NOT; NOT; NOT4; NOT4       /* 035 - 03F unused */
1516         LABELED_BAD(tt1_asdat);         /* 040  async data error */
1517         NOT; NOT; NOT;                  /* 041 - 043 unused */
1518         NOT4; NOT4; NOT4;               /* 044 - 04F unused */
1519         NOT4; NOT4; NOT4; NOT4;         /* 050 - 05F unused */
1520         NOT;                            /* 060  unused */
1521         GOTO(kmdb_trap_tl1);            /* 061  PA watchpoint */
1522         GOTO(kmdb_trap_tl1);            /* 062  VA watchpoint */
1523         GOTO_TT(ce_err_tl1, trace_gen); /* 063  corrected ECC error */
1524         ITLB_MISS(tt1);                 /* 064  instruction access MMU miss */
1525         DTLB_MISS(tt1);                 /* 068  data access MMU miss */
1526         DTLB_PROT;                      /* 06C  data access protection */
1527         LABELED_BAD(tt1_fecc);          /* 070  fast ecache ECC error */
1528         LABELED_BAD(tt1_dperr);         /* 071  Cheetah+ dcache parity error */
1529         LABELED_BAD(tt1_iperr);         /* 072  Cheetah+ icache parity error */
1530         NOT;                            /* 073  reserved */
1531         NOT4; NOT4; NOT4;               /* 074 - 07F reserved */
1532         NOT4;                           /* 080  spill 0 normal */
1533         SPILL_32bit_tt1(ASI_AIUP,sn1);  /* 084  spill 1 normal */
1534         SPILL_64bit_tt1(ASI_AIUP,sn1);  /* 088  spill 2 normal */
1535         SPILL_32bit_tt1(ASI_AIUP,sn1);  /* 08C  spill 3 normal */
1536         SPILL_64bit_tt1(ASI_AIUP,sn1);  /* 090  spill 4 normal */
1537         SPILL_32bit(not);               /* 094  spill 5 normal */
1538         SPILL_64bit(not);               /* 098  spill 6 normal */
1539         SPILL_mixed;                    /* 09C  spill 7 normal */
1540         NOT4;                           /* 0A0  spill 0 other */
1541         SPILL_32bit_tt1(ASI_AIUS,so1);  /* 0A4  spill 1 other */
1542         SPILL_64bit_tt1(ASI_AIUS,so1);  /* 0A8  spill 2 other */
1543         SPILL_32bit_tt1(ASI_AIUS,so1);  /* 0AC  spill 3 other */
1544         SPILL_64bit_tt1(ASI_AIUS,so1);  /* 0B0  spill 4 other */
1545         NOT4;                           /* 0B4  spill 5 other */
1546         NOT4;                           /* 0B8  spill 6 other */
1547         NOT4;                           /* 0BC  spill 7 other */
1548         NOT4;                           /* 0C0  fill 0 normal */
1549         FILL_32bit_tt1(ASI_AIUP,fn1);   /* 0C4  fill 1 normal */
1550         FILL_64bit_tt1(ASI_AIUP,fn1);   /* 0C8  fill 2 normal */
1551         FILL_32bit_tt1(ASI_AIUP,fn1);   /* 0CC  fill 3 normal */
1552         FILL_64bit_tt1(ASI_AIUP,fn1);   /* 0D0  fill 4 normal */
1553         FILL_32bit(not);                /* 0D4  fill 5 normal */
1554         FILL_64bit(not);                /* 0D8  fill 6 normal */
1555         FILL_mixed;                     /* 0DC  fill 7 normal */
1556         NOT4; NOT4; NOT4; NOT4;         /* 0E0 - 0EF unused */
1557         NOT4; NOT4; NOT4; NOT4;         /* 0F0 - 0FF unused */
1558         LABELED_BAD(tt1_swtrap0);       /* 100  fast ecache ECC error (cont) */
1559         LABELED_BAD(tt1_swtrap1);       /* 101  Ch+ D$ parity error (cont) */
1560         LABELED_BAD(tt1_swtrap2);       /* 102  Ch+ I$ parity error (cont) */
1561         NOT;                            /* 103  reserved */
1562 /*
1563  * We only reserve the above four special case soft traps for code running
1564  * at TL>0, so we can truncate the trap table here.
1565  */
1566 etrap_table:
1567         .size   trap_table, (.-trap_table)
1568         .size   scb, (.-scb)
1569 
1570 /*
1571  * We get to exec_fault in the case of an instruction miss and tte
1572  * has no execute bit set.  We go to tl0 to handle it.
1573  *
1574  * g1 = tsbe pointer (in/clobbered)
1575  * g2 = tag access register (in)
1576  * g3 - g4 = scratch (clobbered)
1577  * g5 = tsbe data (in)
1578  * g6 = scratch (clobbered)
1579  */
1580         ALTENTRY(exec_fault)
1581         TRACE_TSBHIT(0x200)
1582         SWITCH_GLOBALS
1583         mov     MMU_TAG_ACCESS, %g4
1584         ldxa    [%g4]ASI_IMMU, %g2                      ! arg1 = addr
1585         mov     T_INSTR_MMU_MISS, %g3                   ! arg2 = traptype
1586         set     trap, %g1
1587         ba,pt   %xcc, sys_trap
1588           mov   -1, %g4
1589 
1590 .mmu_exception_not_aligned:
1591         rdpr    %tstate, %g1
1592         btst    TSTATE_PRIV, %g1
1593         bnz,pn  %icc, 2f
1594         nop
1595         CPU_ADDR(%g1, %g4)                              ! load CPU struct addr
1596         ldn     [%g1 + CPU_THREAD], %g1                 ! load thread pointer
1597         ldn     [%g1 + T_PROCP], %g1                    ! load proc pointer
1598         ldn     [%g1 + P_UTRAPS], %g5                   ! are there utraps?
1599         brz,pt  %g5, 2f
1600         nop
1601         ldn     [%g5 + P_UTRAP15], %g5                  ! unaligned utrap?
1602         brz,pn  %g5, 2f
1603         nop
1604         btst    1, %sp
1605         bz,pt   %xcc, 1f                                ! 32 bit user program
1606         nop
1607         ba,pt   %xcc, .setup_v9utrap                    ! 64 bit user program
1608         nop
1609 1:
1610         ba,pt   %xcc, .setup_utrap
1611         or      %g2, %g0, %g7
1612 2:
1613         ba,pt   %xcc, .mmu_exception_end
1614         mov     T_ALIGNMENT, %g1
1615 
1616 .mmu_priv_exception:
1617         rdpr    %tstate, %g1
1618         btst    TSTATE_PRIV, %g1
1619         bnz,pn  %icc, 1f
1620         nop
1621         CPU_ADDR(%g1, %g4)                              ! load CPU struct addr
1622         ldn     [%g1 + CPU_THREAD], %g1                 ! load thread pointer
1623         ldn     [%g1 + T_PROCP], %g1                    ! load proc pointer
1624         ldn     [%g1 + P_UTRAPS], %g5                   ! are there utraps?
1625         brz,pt  %g5, 1f
1626         nop
1627         ldn     [%g5 + P_UTRAP16], %g5
1628         brnz,pt %g5, .setup_v9utrap
1629         nop
1630 1:
1631         mov     T_PRIV_INSTR, %g1
1632 
1633 .mmu_exception_end:
1634         CPU_INDEX(%g4, %g5)
1635         set     cpu_core, %g5
1636         sllx    %g4, CPU_CORE_SHIFT, %g4
1637         add     %g4, %g5, %g4
1638         lduh    [%g4 + CPUC_DTRACE_FLAGS], %g5
1639         andcc   %g5, CPU_DTRACE_NOFAULT, %g0
1640         bz      %xcc, .mmu_exception_tlb_chk
1641         or      %g5, CPU_DTRACE_BADADDR, %g5
1642         stuh    %g5, [%g4 + CPUC_DTRACE_FLAGS]
1643         done
1644 
1645 .mmu_exception_tlb_chk:
1646         GET_CPU_IMPL(%g5)                       ! check SFSR.FT to see if this
1647         cmp     %g5, PANTHER_IMPL               ! is a TLB parity error. But
1648         bne     2f                              ! we only do this check while
1649         mov     1, %g4                          ! running on Panther CPUs
1650         sllx    %g4, PN_SFSR_PARITY_SHIFT, %g4  ! since US-I/II use the same
1651         andcc   %g3, %g4, %g0                   ! bit for something else which
1652         bz      2f                              ! will be handled later.
1653         nop
1654 .mmu_exception_is_tlb_parity:
1655         .weak itlb_parity_trap
1656         .weak dtlb_parity_trap
1657         set     itlb_parity_trap, %g4
1658         cmp     %g1, T_INSTR_EXCEPTION          ! branch to the itlb or
1659         be      3f                              ! dtlb parity handler
1660         nop                                     ! if this trap is due
1661         set     dtlb_parity_trap, %g4
1662         cmp     %g1, T_DATA_EXCEPTION           ! to a IMMU exception
1663         be      3f                              ! or DMMU exception.
1664         nop
1665 2:
1666         sllx    %g3, 32, %g3
1667         or      %g3, %g1, %g3
1668         set     trap, %g1
1669         ba,pt   %xcc, sys_trap
1670         sub     %g0, 1, %g4
1671 3:
1672         jmp     %g4                             ! off to the appropriate
1673         nop                                     ! TLB parity handler
1674 
1675 .fp_disabled:
1676         CPU_ADDR(%g1, %g4)                              ! load CPU struct addr
1677         ldn     [%g1 + CPU_THREAD], %g1                 ! load thread pointer
1678 #ifdef SF_ERRATA_30 /* call causes fp-disabled */
1679         brz,a,pn %g1, 2f
1680           nop
1681 #endif
1682         rdpr    %tstate, %g4
1683         btst    TSTATE_PRIV, %g4
1684 #ifdef SF_ERRATA_30 /* call causes fp-disabled */
1685         bnz,pn %icc, 2f
1686           nop
1687 #else
1688         bnz,a,pn %icc, ptl1_panic
1689           mov   PTL1_BAD_FPTRAP, %g1
1690 #endif
1691         ldn     [%g1 + T_PROCP], %g1                    ! load proc pointer
1692         ldn     [%g1 + P_UTRAPS], %g5                   ! are there utraps?
1693         brz,a,pt %g5, 2f
1694           nop
1695         ldn     [%g5 + P_UTRAP7], %g5                   ! fp_disabled utrap?
1696         brz,a,pn %g5, 2f
1697           nop
1698         btst    1, %sp
1699         bz,a,pt %xcc, 1f                                ! 32 bit user program
1700           nop
1701         ba,a,pt %xcc, .setup_v9utrap                    ! 64 bit user program
1702           nop
1703 1:
1704         ba,pt   %xcc, .setup_utrap
1705           or    %g0, %g0, %g7
1706 2:
1707         set     fp_disabled, %g1
1708         ba,pt   %xcc, sys_trap
1709           sub   %g0, 1, %g4
1710 
1711 .fp_ieee_exception:
1712         rdpr    %tstate, %g1
1713         btst    TSTATE_PRIV, %g1
1714         bnz,a,pn %icc, ptl1_panic
1715           mov   PTL1_BAD_FPTRAP, %g1
1716         CPU_ADDR(%g1, %g4)                              ! load CPU struct addr
1717         stx     %fsr, [%g1 + CPU_TMP1]
1718         ldx     [%g1 + CPU_TMP1], %g2
1719         ldn     [%g1 + CPU_THREAD], %g1                 ! load thread pointer
1720         ldn     [%g1 + T_PROCP], %g1                    ! load proc pointer
1721         ldn     [%g1 + P_UTRAPS], %g5                   ! are there utraps?
1722         brz,a,pt %g5, 1f
1723           nop
1724         ldn     [%g5 + P_UTRAP8], %g5
1725         brnz,a,pt %g5, .setup_v9utrap
1726           nop
1727 1:
1728         set     _fp_ieee_exception, %g1
1729         ba,pt   %xcc, sys_trap
1730           sub   %g0, 1, %g4
1731 
1732 /*
1733  * Register Inputs:
1734  *      %g5             user trap handler
1735  *      %g7             misaligned addr - for alignment traps only
1736  */
1737 .setup_utrap:
1738         set     trap, %g1                       ! setup in case we go
1739         mov     T_FLUSH_PCB, %g3                ! through sys_trap on
1740         sub     %g0, 1, %g4                     ! the save instruction below
1741 
1742         /*
1743          * If the DTrace pid provider is single stepping a copied-out
1744          * instruction, t->t_dtrace_step will be set. In that case we need
1745          * to abort the single-stepping (since execution of the instruction
1746          * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1747          */
1748         save    %sp, -SA(MINFRAME32), %sp       ! window for trap handler
1749         CPU_ADDR(%g1, %g4)                      ! load CPU struct addr
1750         ldn     [%g1 + CPU_THREAD], %g1         ! load thread pointer
1751         ldub    [%g1 + T_DTRACE_STEP], %g2      ! load t->t_dtrace_step
1752         rdpr    %tnpc, %l2                      ! arg1 == tnpc
1753         brz,pt  %g2, 1f
1754         rdpr    %tpc, %l1                       ! arg0 == tpc
1755 
1756         ldub    [%g1 + T_DTRACE_AST], %g2       ! load t->t_dtrace_ast
1757         ldn     [%g1 + T_DTRACE_NPC], %l2       ! arg1 = t->t_dtrace_npc (step)
1758         brz,pt  %g2, 1f
1759         st      %g0, [%g1 + T_DTRACE_FT]        ! zero all pid provider flags
1760         stub    %g2, [%g1 + T_ASTFLAG]          ! aston(t) if t->t_dtrace_ast
1761 1:
1762         mov     %g7, %l3                        ! arg2 == misaligned address
1763 
1764         rdpr    %tstate, %g1                    ! cwp for trap handler
1765         rdpr    %cwp, %g4
1766         bclr    TSTATE_CWP_MASK, %g1
1767         wrpr    %g1, %g4, %tstate
1768         wrpr    %g0, %g5, %tnpc                 ! trap handler address
1769         FAST_TRAP_DONE
1770         /* NOTREACHED */
1771 
1772 .check_v9utrap:
1773         rdpr    %tstate, %g1
1774         btst    TSTATE_PRIV, %g1
1775         bnz,a,pn %icc, 3f
1776           nop
1777         CPU_ADDR(%g4, %g1)                              ! load CPU struct addr
1778         ldn     [%g4 + CPU_THREAD], %g5                 ! load thread pointer
1779         ldn     [%g5 + T_PROCP], %g5                    ! load proc pointer
1780         ldn     [%g5 + P_UTRAPS], %g5                   ! are there utraps?
1781 
1782         cmp     %g3, T_SOFTWARE_TRAP
1783         bne,a,pt %icc, 1f
1784           nop
1785 
1786         brz,pt %g5, 3f                  ! if p_utraps == NULL goto trap()
1787           rdpr  %tt, %g3                ! delay - get actual hw trap type
1788 
1789         sub     %g3, 254, %g1           ! UT_TRAP_INSTRUCTION_16 = p_utraps[18]
1790         ba,pt   %icc, 2f
1791           smul  %g1, CPTRSIZE, %g2
1792 1:
1793         brz,a,pt %g5, 3f                ! if p_utraps == NULL goto trap()
1794           nop
1795 
1796         cmp     %g3, T_UNIMP_INSTR
1797         bne,a,pt %icc, 2f
1798           nop
1799 
1800         mov     1, %g1
1801         st      %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR
1802         rdpr    %tpc, %g1               ! ld trapping instruction using
1803         lduwa   [%g1]ASI_AIUP, %g1      ! "AS IF USER" ASI which could fault
1804         st      %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR
1805 
1806         sethi   %hi(0xc1c00000), %g4    ! setup mask for illtrap instruction
1807         andcc   %g1, %g4, %g4           ! and instruction with mask
1808         bnz,a,pt %icc, 3f               ! if %g4 == zero, %g1 is an ILLTRAP
1809           nop                           ! fall thru to setup
1810 2:
1811         ldn     [%g5 + %g2], %g5
1812         brnz,a,pt %g5, .setup_v9utrap
1813           nop
1814 3:
1815         set     trap, %g1
1816         ba,pt   %xcc, sys_trap
1817           sub   %g0, 1, %g4
1818         /* NOTREACHED */
1819 
1820 /*
1821  * Register Inputs:
1822  *      %g5             user trap handler
1823  */
1824 .setup_v9utrap:
1825         set     trap, %g1                       ! setup in case we go
1826         mov     T_FLUSH_PCB, %g3                ! through sys_trap on
1827         sub     %g0, 1, %g4                     ! the save instruction below
1828 
1829         /*
1830          * If the DTrace pid provider is single stepping a copied-out
1831          * instruction, t->t_dtrace_step will be set. In that case we need
1832          * to abort the single-stepping (since execution of the instruction
1833          * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1834          */
1835         save    %sp, -SA(MINFRAME64), %sp       ! window for trap handler
1836         CPU_ADDR(%g1, %g4)                      ! load CPU struct addr
1837         ldn     [%g1 + CPU_THREAD], %g1         ! load thread pointer
1838         ldub    [%g1 + T_DTRACE_STEP], %g2      ! load t->t_dtrace_step
1839         rdpr    %tnpc, %l7                      ! arg1 == tnpc
1840         brz,pt  %g2, 1f
1841         rdpr    %tpc, %l6                       ! arg0 == tpc
1842 
1843         ldub    [%g1 + T_DTRACE_AST], %g2       ! load t->t_dtrace_ast
1844         ldn     [%g1 + T_DTRACE_NPC], %l7       ! arg1 == t->t_dtrace_npc (step)
1845         brz,pt  %g2, 1f
1846         st      %g0, [%g1 + T_DTRACE_FT]        ! zero all pid provider flags
1847         stub    %g2, [%g1 + T_ASTFLAG]          ! aston(t) if t->t_dtrace_ast
1848 1:
1849         rdpr    %tstate, %g2                    ! cwp for trap handler
1850         rdpr    %cwp, %g4
1851         bclr    TSTATE_CWP_MASK, %g2
1852         wrpr    %g2, %g4, %tstate
1853 
1854         ldn     [%g1 + T_PROCP], %g4            ! load proc pointer
1855         ldn     [%g4 + P_AS], %g4               ! load as pointer
1856         ldn     [%g4 + A_USERLIMIT], %g4        ! load as userlimit
1857         cmp     %l7, %g4                        ! check for single-step set
1858         bne,pt  %xcc, 4f
1859           nop
1860         ldn     [%g1 + T_LWP], %g1              ! load klwp pointer
1861         ld      [%g1 + PCB_STEP], %g4           ! load single-step flag
1862         cmp     %g4, STEP_ACTIVE                ! step flags set in pcb?
1863         bne,pt  %icc, 4f
1864           nop
1865         stn     %g5, [%g1 + PCB_TRACEPC]        ! save trap handler addr in pcb
1866         mov     %l7, %g4                        ! on entry to precise user trap
1867         add     %l6, 4, %l7                     ! handler, %l6 == pc, %l7 == npc
1868                                                 ! at time of trap
1869         wrpr    %g0, %g4, %tnpc                 ! generate FLTBOUNDS,
1870                                                 ! %g4 == userlimit
1871         FAST_TRAP_DONE
1872         /* NOTREACHED */
1873 4:
1874         wrpr    %g0, %g5, %tnpc                 ! trap handler address
1875         FAST_TRAP_DONE_CHK_INTR
1876         /* NOTREACHED */
1877 
1878 .fp_exception:
1879         CPU_ADDR(%g1, %g4)
1880         stx     %fsr, [%g1 + CPU_TMP1]
1881         ldx     [%g1 + CPU_TMP1], %g2
1882 
1883         /*
1884          * Cheetah takes unfinished_FPop trap for certain range of operands
1885          * to the "fitos" instruction. Instead of going through the slow
1886          * software emulation path, we try to simulate the "fitos" instruction
1887          * via "fitod" and "fdtos" provided the following conditions are met:
1888          *
1889          *      fpu_exists is set (if DEBUG)
1890          *      not in privileged mode
1891          *      ftt is unfinished_FPop
1892          *      NXM IEEE trap is not enabled
1893          *      instruction at %tpc is "fitos"
1894          *
1895          *  Usage:
1896          *      %g1     per cpu address
1897          *      %g2     %fsr
1898          *      %g6     user instruction
1899          *
1900          * Note that we can take a memory access related trap while trying
1901          * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR
1902          * flag to catch those traps and let the SFMMU code deal with page
1903          * fault and data access exception.
1904          */
1905 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
1906         sethi   %hi(fpu_exists), %g7
1907         ld      [%g7 + %lo(fpu_exists)], %g7
1908         brz,pn %g7, .fp_exception_cont
1909           nop
1910 #endif
1911         rdpr    %tstate, %g7                    ! branch if in privileged mode
1912         btst    TSTATE_PRIV, %g7
1913         bnz,pn  %xcc, .fp_exception_cont
1914         srl     %g2, FSR_FTT_SHIFT, %g7         ! extract ftt from %fsr
1915         and     %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7
1916         cmp     %g7, FTT_UNFIN
1917         set     FSR_TEM_NX, %g5
1918         bne,pn  %xcc, .fp_exception_cont        ! branch if NOT unfinished_FPop
1919           andcc %g2, %g5, %g0
1920         bne,pn  %xcc, .fp_exception_cont        ! branch if FSR_TEM_NX enabled
1921           rdpr  %tpc, %g5                       ! get faulting PC
1922 
1923         or      %g0, 1, %g7
1924         st      %g7, [%g1 + CPU_TL1_HDLR]       ! set tl1_hdlr flag
1925         lda     [%g5]ASI_USER, %g6              ! get user's instruction
1926         st      %g0, [%g1 + CPU_TL1_HDLR]       ! clear tl1_hdlr flag
1927 
1928         set     FITOS_INSTR_MASK, %g7
1929         and     %g6, %g7, %g7
1930         set     FITOS_INSTR, %g5
1931         cmp     %g7, %g5
1932         bne,pn  %xcc, .fp_exception_cont        ! branch if not FITOS_INSTR
1933          nop
1934 
1935         /*
1936          * This is unfinished FPops trap for "fitos" instruction. We
1937          * need to simulate "fitos" via "fitod" and "fdtos" instruction
1938          * sequence.
1939          *
1940          * We need a temporary FP register to do the conversion. Since
1941          * both source and destination operands for the "fitos" instruction
1942          * have to be within %f0-%f31, we use an FP register from the upper
1943          * half to guarantee that it won't collide with the source or the
1944          * dest operand. However, we do have to save and restore its value.
1945          *
1946          * We use %d62 as a temporary FP register for the conversion and
1947          * branch to appropriate instruction within the conversion tables
1948          * based upon the rs2 and rd values.
1949          */
1950 
1951         std     %d62, [%g1 + CPU_TMP1]          ! save original value
1952 
1953         srl     %g6, FITOS_RS2_SHIFT, %g7
1954         and     %g7, FITOS_REG_MASK, %g7
1955         set     _fitos_fitod_table, %g4
1956         sllx    %g7, 2, %g7
1957         jmp     %g4 + %g7
1958           ba,pt %xcc, _fitos_fitod_done
1959         .empty
1960 
1961 _fitos_fitod_table:
1962           fitod %f0, %d62
1963           fitod %f1, %d62
1964           fitod %f2, %d62
1965           fitod %f3, %d62
1966           fitod %f4, %d62
1967           fitod %f5, %d62
1968           fitod %f6, %d62
1969           fitod %f7, %d62
1970           fitod %f8, %d62
1971           fitod %f9, %d62
1972           fitod %f10, %d62
1973           fitod %f11, %d62
1974           fitod %f12, %d62
1975           fitod %f13, %d62
1976           fitod %f14, %d62
1977           fitod %f15, %d62
1978           fitod %f16, %d62
1979           fitod %f17, %d62
1980           fitod %f18, %d62
1981           fitod %f19, %d62
1982           fitod %f20, %d62
1983           fitod %f21, %d62
1984           fitod %f22, %d62
1985           fitod %f23, %d62
1986           fitod %f24, %d62
1987           fitod %f25, %d62
1988           fitod %f26, %d62
1989           fitod %f27, %d62
1990           fitod %f28, %d62
1991           fitod %f29, %d62
1992           fitod %f30, %d62
1993           fitod %f31, %d62
1994 _fitos_fitod_done:
1995 
1996         /*
1997          * Now convert data back into single precision
1998          */
1999         srl     %g6, FITOS_RD_SHIFT, %g7
2000         and     %g7, FITOS_REG_MASK, %g7
2001         set     _fitos_fdtos_table, %g4
2002         sllx    %g7, 2, %g7
2003         jmp     %g4 + %g7
2004           ba,pt %xcc, _fitos_fdtos_done
2005         .empty
2006 
2007 _fitos_fdtos_table:
2008           fdtos %d62, %f0
2009           fdtos %d62, %f1
2010           fdtos %d62, %f2
2011           fdtos %d62, %f3
2012           fdtos %d62, %f4
2013           fdtos %d62, %f5
2014           fdtos %d62, %f6
2015           fdtos %d62, %f7
2016           fdtos %d62, %f8
2017           fdtos %d62, %f9
2018           fdtos %d62, %f10
2019           fdtos %d62, %f11
2020           fdtos %d62, %f12
2021           fdtos %d62, %f13
2022           fdtos %d62, %f14
2023           fdtos %d62, %f15
2024           fdtos %d62, %f16
2025           fdtos %d62, %f17
2026           fdtos %d62, %f18
2027           fdtos %d62, %f19
2028           fdtos %d62, %f20
2029           fdtos %d62, %f21
2030           fdtos %d62, %f22
2031           fdtos %d62, %f23
2032           fdtos %d62, %f24
2033           fdtos %d62, %f25
2034           fdtos %d62, %f26
2035           fdtos %d62, %f27
2036           fdtos %d62, %f28
2037           fdtos %d62, %f29
2038           fdtos %d62, %f30
2039           fdtos %d62, %f31
2040 _fitos_fdtos_done:
2041 
2042         ldd     [%g1 + CPU_TMP1], %d62          ! restore %d62
2043 
2044 #if DEBUG
2045         /*
2046          * Update FPop_unfinished trap kstat
2047          */
2048         set     fpustat+FPUSTAT_UNFIN_KSTAT, %g7
2049         ldx     [%g7], %g5
2050 1:
2051         add     %g5, 1, %g6
2052 
2053         casxa   [%g7] ASI_N, %g5, %g6
2054         cmp     %g5, %g6
2055         bne,a,pn %xcc, 1b
2056           or    %g0, %g6, %g5
2057 
2058         /*
2059          * Update fpu_sim_fitos kstat
2060          */
2061         set     fpuinfo+FPUINFO_FITOS_KSTAT, %g7
2062         ldx     [%g7], %g5
2063 1:
2064         add     %g5, 1, %g6
2065 
2066         casxa   [%g7] ASI_N, %g5, %g6
2067         cmp     %g5, %g6
2068         bne,a,pn %xcc, 1b
2069           or    %g0, %g6, %g5
2070 #endif /* DEBUG */
2071 
2072         FAST_TRAP_DONE
2073 
2074 .fp_exception_cont:
2075         /*
2076          * Let _fp_exception deal with simulating FPop instruction.
2077          * Note that we need to pass %fsr in %g2 (already read above).
2078          */
2079 
2080         set     _fp_exception, %g1
2081         ba,pt   %xcc, sys_trap
2082         sub     %g0, 1, %g4
2083 
2084         .global opl_cleanw_patch
2085 opl_cleanw_patch:
2086 .clean_windows:
2087         set     trap, %g1
2088         mov     T_FLUSH_PCB, %g3
2089         sub     %g0, 1, %g4
2090         save
2091         flushw
2092         restore
2093         wrpr    %g0, %g0, %cleanwin     ! no clean windows
2094 
2095         CPU_ADDR(%g4, %g5)
2096         ldn     [%g4 + CPU_MPCB], %g4
2097         brz,a,pn %g4, 1f
2098           nop
2099         ld      [%g4 + MPCB_WSTATE], %g5
2100         add     %g5, WSTATE_CLEAN_OFFSET, %g5
2101         wrpr    %g0, %g5, %wstate
2102 1:      FAST_TRAP_DONE
2103 
2104 /*
2105  * .spill_clean: clean the previous window, restore the wstate, and
2106  * "done".
2107  *
2108  * Entry: %g7 contains new wstate
2109  */
2110 .spill_clean:
2111         sethi   %hi(nwin_minus_one), %g5
2112         ld      [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1
2113         rdpr    %cwp, %g6                       ! %g6 = %cwp
2114         deccc   %g6                             ! %g6--
2115         movneg  %xcc, %g5, %g6                  ! if (%g6<0) %g6 = nwin-1
2116         wrpr    %g6, %cwp
2117         TT_TRACE_L(trace_win)
2118         clr     %l0
2119         clr     %l1
2120         clr     %l2
2121         clr     %l3
2122         clr     %l4
2123         clr     %l5
2124         clr     %l6
2125         clr     %l7
2126         wrpr    %g0, %g7, %wstate
2127         saved
2128         retry                   ! restores correct %cwp
2129 
2130 .fix_alignment:
2131         CPU_ADDR(%g1, %g2)              ! load CPU struct addr to %g1 using %g2
2132         ldn     [%g1 + CPU_THREAD], %g1 ! load thread pointer
2133         ldn     [%g1 + T_PROCP], %g1
2134         mov     1, %g2
2135         stb     %g2, [%g1 + P_FIXALIGNMENT]
2136         FAST_TRAP_DONE
2137 
2138 #define STDF_REG(REG, ADDR, TMP)                \
2139         sll     REG, 3, REG                     ;\
2140 mark1:  set     start1, TMP                     ;\
2141         jmp     REG + TMP                       ;\
2142           nop                                   ;\
2143 start1: ba,pt   %xcc, done1                     ;\
2144           std   %f0, [ADDR + CPU_TMP1]          ;\
2145         ba,pt   %xcc, done1                     ;\
2146           std   %f32, [ADDR + CPU_TMP1]         ;\
2147         ba,pt   %xcc, done1                     ;\
2148           std   %f2, [ADDR + CPU_TMP1]          ;\
2149         ba,pt   %xcc, done1                     ;\
2150           std   %f34, [ADDR + CPU_TMP1]         ;\
2151         ba,pt   %xcc, done1                     ;\
2152           std   %f4, [ADDR + CPU_TMP1]          ;\
2153         ba,pt   %xcc, done1                     ;\
2154           std   %f36, [ADDR + CPU_TMP1]         ;\
2155         ba,pt   %xcc, done1                     ;\
2156           std   %f6, [ADDR + CPU_TMP1]          ;\
2157         ba,pt   %xcc, done1                     ;\
2158           std   %f38, [ADDR + CPU_TMP1]         ;\
2159         ba,pt   %xcc, done1                     ;\
2160           std   %f8, [ADDR + CPU_TMP1]          ;\
2161         ba,pt   %xcc, done1                     ;\
2162           std   %f40, [ADDR + CPU_TMP1]         ;\
2163         ba,pt   %xcc, done1                     ;\
2164           std   %f10, [ADDR + CPU_TMP1]         ;\
2165         ba,pt   %xcc, done1                     ;\
2166           std   %f42, [ADDR + CPU_TMP1]         ;\
2167         ba,pt   %xcc, done1                     ;\
2168           std   %f12, [ADDR + CPU_TMP1]         ;\
2169         ba,pt   %xcc, done1                     ;\
2170           std   %f44, [ADDR + CPU_TMP1]         ;\
2171         ba,pt   %xcc, done1                     ;\
2172           std   %f14, [ADDR + CPU_TMP1]         ;\
2173         ba,pt   %xcc, done1                     ;\
2174           std   %f46, [ADDR + CPU_TMP1]         ;\
2175         ba,pt   %xcc, done1                     ;\
2176           std   %f16, [ADDR + CPU_TMP1]         ;\
2177         ba,pt   %xcc, done1                     ;\
2178           std   %f48, [ADDR + CPU_TMP1]         ;\
2179         ba,pt   %xcc, done1                     ;\
2180           std   %f18, [ADDR + CPU_TMP1]         ;\
2181         ba,pt   %xcc, done1                     ;\
2182           std   %f50, [ADDR + CPU_TMP1]         ;\
2183         ba,pt   %xcc, done1                     ;\
2184           std   %f20, [ADDR + CPU_TMP1]         ;\
2185         ba,pt   %xcc, done1                     ;\
2186           std   %f52, [ADDR + CPU_TMP1]         ;\
2187         ba,pt   %xcc, done1                     ;\
2188           std   %f22, [ADDR + CPU_TMP1]         ;\
2189         ba,pt   %xcc, done1                     ;\
2190           std   %f54, [ADDR + CPU_TMP1]         ;\
2191         ba,pt   %xcc, done1                     ;\
2192           std   %f24, [ADDR + CPU_TMP1]         ;\
2193         ba,pt   %xcc, done1                     ;\
2194           std   %f56, [ADDR + CPU_TMP1]         ;\
2195         ba,pt   %xcc, done1                     ;\
2196           std   %f26, [ADDR + CPU_TMP1]         ;\
2197         ba,pt   %xcc, done1                     ;\
2198           std   %f58, [ADDR + CPU_TMP1]         ;\
2199         ba,pt   %xcc, done1                     ;\
2200           std   %f28, [ADDR + CPU_TMP1]         ;\
2201         ba,pt   %xcc, done1                     ;\
2202           std   %f60, [ADDR + CPU_TMP1]         ;\
2203         ba,pt   %xcc, done1                     ;\
2204           std   %f30, [ADDR + CPU_TMP1]         ;\
2205         ba,pt   %xcc, done1                     ;\
2206           std   %f62, [ADDR + CPU_TMP1]         ;\
2207 done1:
2208 
2209 #define LDDF_REG(REG, ADDR, TMP)                \
2210         sll     REG, 3, REG                     ;\
2211 mark2:  set     start2, TMP                     ;\
2212         jmp     REG + TMP                       ;\
2213           nop                                   ;\
2214 start2: ba,pt   %xcc, done2                     ;\
2215           ldd   [ADDR + CPU_TMP1], %f0          ;\
2216         ba,pt   %xcc, done2                     ;\
2217           ldd   [ADDR + CPU_TMP1], %f32         ;\
2218         ba,pt   %xcc, done2                     ;\
2219           ldd   [ADDR + CPU_TMP1], %f2          ;\
2220         ba,pt   %xcc, done2                     ;\
2221           ldd   [ADDR + CPU_TMP1], %f34         ;\
2222         ba,pt   %xcc, done2                     ;\
2223           ldd   [ADDR + CPU_TMP1], %f4          ;\
2224         ba,pt   %xcc, done2                     ;\
2225           ldd   [ADDR + CPU_TMP1], %f36         ;\
2226         ba,pt   %xcc, done2                     ;\
2227           ldd   [ADDR + CPU_TMP1], %f6          ;\
2228         ba,pt   %xcc, done2                     ;\
2229           ldd   [ADDR + CPU_TMP1], %f38         ;\
2230         ba,pt   %xcc, done2                     ;\
2231           ldd   [ADDR + CPU_TMP1], %f8          ;\
2232         ba,pt   %xcc, done2                     ;\
2233           ldd   [ADDR + CPU_TMP1], %f40         ;\
2234         ba,pt   %xcc, done2                     ;\
2235           ldd   [ADDR + CPU_TMP1], %f10         ;\
2236         ba,pt   %xcc, done2                     ;\
2237           ldd   [ADDR + CPU_TMP1], %f42         ;\
2238         ba,pt   %xcc, done2                     ;\
2239           ldd   [ADDR + CPU_TMP1], %f12         ;\
2240         ba,pt   %xcc, done2                     ;\
2241           ldd   [ADDR + CPU_TMP1], %f44         ;\
2242         ba,pt   %xcc, done2                     ;\
2243           ldd   [ADDR + CPU_TMP1], %f14         ;\
2244         ba,pt   %xcc, done2                     ;\
2245           ldd   [ADDR + CPU_TMP1], %f46         ;\
2246         ba,pt   %xcc, done2                     ;\
2247           ldd   [ADDR + CPU_TMP1], %f16         ;\
2248         ba,pt   %xcc, done2                     ;\
2249           ldd   [ADDR + CPU_TMP1], %f48         ;\
2250         ba,pt   %xcc, done2                     ;\
2251           ldd   [ADDR + CPU_TMP1], %f18         ;\
2252         ba,pt   %xcc, done2                     ;\
2253           ldd   [ADDR + CPU_TMP1], %f50         ;\
2254         ba,pt   %xcc, done2                     ;\
2255           ldd   [ADDR + CPU_TMP1], %f20         ;\
2256         ba,pt   %xcc, done2                     ;\
2257           ldd   [ADDR + CPU_TMP1], %f52         ;\
2258         ba,pt   %xcc, done2                     ;\
2259           ldd   [ADDR + CPU_TMP1], %f22         ;\
2260         ba,pt   %xcc, done2                     ;\
2261           ldd   [ADDR + CPU_TMP1], %f54         ;\
2262         ba,pt   %xcc, done2                     ;\
2263           ldd   [ADDR + CPU_TMP1], %f24         ;\
2264         ba,pt   %xcc, done2                     ;\
2265           ldd   [ADDR + CPU_TMP1], %f56         ;\
2266         ba,pt   %xcc, done2                     ;\
2267           ldd   [ADDR + CPU_TMP1], %f26         ;\
2268         ba,pt   %xcc, done2                     ;\
2269           ldd   [ADDR + CPU_TMP1], %f58         ;\
2270         ba,pt   %xcc, done2                     ;\
2271           ldd   [ADDR + CPU_TMP1], %f28         ;\
2272         ba,pt   %xcc, done2                     ;\
2273           ldd   [ADDR + CPU_TMP1], %f60         ;\
2274         ba,pt   %xcc, done2                     ;\
2275           ldd   [ADDR + CPU_TMP1], %f30         ;\
2276         ba,pt   %xcc, done2                     ;\
2277           ldd   [ADDR + CPU_TMP1], %f62         ;\
2278 done2:
2279 
2280 .lddf_exception_not_aligned:
2281         /*
2282          * Cheetah overwrites SFAR on a DTLB miss, hence read it now.
2283          */
2284         ldxa    [MMU_SFAR]%asi, %g5     ! misaligned vaddr in %g5
2285 
2286 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2287         sethi   %hi(fpu_exists), %g2            ! check fpu_exists
2288         ld      [%g2 + %lo(fpu_exists)], %g2
2289         brz,a,pn %g2, 4f
2290           nop
2291 #endif
2292         CPU_ADDR(%g1, %g4)
2293         or      %g0, 1, %g4
2294         st      %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2295 
2296         rdpr    %tpc, %g2
2297         lda     [%g2]ASI_AIUP, %g6      ! get the user's lddf instruction
2298         srl     %g6, 23, %g1            ! using ldda or not?
2299         and     %g1, 1, %g1
2300         brz,a,pt %g1, 2f                ! check for ldda instruction
2301           nop
2302         srl     %g6, 13, %g1            ! check immflag
2303         and     %g1, 1, %g1
2304         rdpr    %tstate, %g2            ! %tstate in %g2
2305         brnz,a,pn %g1, 1f
2306           srl   %g2, 31, %g1            ! get asi from %tstate
2307         srl     %g6, 5, %g1             ! get asi from instruction
2308         and     %g1, 0xFF, %g1          ! imm_asi field
2309 1:
2310         cmp     %g1, ASI_P              ! primary address space
2311         be,a,pt %icc, 2f
2312           nop
2313         cmp     %g1, ASI_PNF            ! primary no fault address space
2314         be,a,pt %icc, 2f
2315           nop
2316         cmp     %g1, ASI_S              ! secondary address space
2317         be,a,pt %icc, 2f
2318           nop
2319         cmp     %g1, ASI_SNF            ! secondary no fault address space
2320         bne,a,pn %icc, 3f
2321           nop
2322 2:
2323         lduwa   [%g5]ASI_USER, %g7      ! get first half of misaligned data
2324         add     %g5, 4, %g5             ! increment misaligned data address
2325         lduwa   [%g5]ASI_USER, %g5      ! get second half of misaligned data
2326 
2327         sllx    %g7, 32, %g7
2328         or      %g5, %g7, %g5           ! combine data
2329         CPU_ADDR(%g7, %g1)              ! save data on a per-cpu basis
2330         stx     %g5, [%g7 + CPU_TMP1]   ! save in cpu_tmp1
2331 
2332         srl     %g6, 25, %g3            ! %g6 has the instruction
2333         and     %g3, 0x1F, %g3          ! %g3 has rd
2334         LDDF_REG(%g3, %g7, %g4)
2335 
2336         CPU_ADDR(%g1, %g4)
2337         st      %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2338         FAST_TRAP_DONE
2339 3:
2340         CPU_ADDR(%g1, %g4)
2341         st      %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2342 4:
2343         set     T_USER, %g3             ! trap type in %g3
2344         or      %g3, T_LDDF_ALIGN, %g3
2345         mov     %g5, %g2                ! misaligned vaddr in %g2
2346         set     fpu_trap, %g1           ! goto C for the little and
2347         ba,pt   %xcc, sys_trap          ! no fault little asi's
2348           sub   %g0, 1, %g4
2349 
2350 .stdf_exception_not_aligned:
2351         /*
2352          * Cheetah overwrites SFAR on a DTLB miss, hence read it now.
2353          */
2354         ldxa    [MMU_SFAR]%asi, %g5     ! misaligned vaddr in %g5
2355 
2356 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2357         sethi   %hi(fpu_exists), %g7            ! check fpu_exists
2358         ld      [%g7 + %lo(fpu_exists)], %g3
2359         brz,a,pn %g3, 4f
2360           nop
2361 #endif
2362         CPU_ADDR(%g1, %g4)
2363         or      %g0, 1, %g4
2364         st      %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2365 
2366         rdpr    %tpc, %g2
2367         lda     [%g2]ASI_AIUP, %g6      ! get the user's stdf instruction
2368 
2369         srl     %g6, 23, %g1            ! using stda or not?
2370         and     %g1, 1, %g1
2371         brz,a,pt %g1, 2f                ! check for stda instruction
2372           nop
2373         srl     %g6, 13, %g1            ! check immflag
2374         and     %g1, 1, %g1
2375         rdpr    %tstate, %g2            ! %tstate in %g2
2376         brnz,a,pn %g1, 1f
2377           srl   %g2, 31, %g1            ! get asi from %tstate
2378         srl     %g6, 5, %g1             ! get asi from instruction
2379         and     %g1, 0xFF, %g1          ! imm_asi field
2380 1:
2381         cmp     %g1, ASI_P              ! primary address space
2382         be,a,pt %icc, 2f
2383           nop
2384         cmp     %g1, ASI_S              ! secondary address space
2385         bne,a,pn %icc, 3f
2386           nop
2387 2:
2388         srl     %g6, 25, %g6
2389         and     %g6, 0x1F, %g6          ! %g6 has rd
2390         CPU_ADDR(%g7, %g1)
2391         STDF_REG(%g6, %g7, %g4)         ! STDF_REG(REG, ADDR, TMP)
2392 
2393         ldx     [%g7 + CPU_TMP1], %g6
2394         srlx    %g6, 32, %g7
2395         stuwa   %g7, [%g5]ASI_USER      ! first half
2396         add     %g5, 4, %g5             ! increment misaligned data address
2397         stuwa   %g6, [%g5]ASI_USER      ! second half
2398 
2399         CPU_ADDR(%g1, %g4)
2400         st      %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2401         FAST_TRAP_DONE
2402 3:
2403         CPU_ADDR(%g1, %g4)
2404         st      %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2405 4:
2406         set     T_USER, %g3             ! trap type in %g3
2407         or      %g3, T_STDF_ALIGN, %g3
2408         mov     %g5, %g2                ! misaligned vaddr in %g2
2409         set     fpu_trap, %g1           ! goto C for the little and
2410         ba,pt   %xcc, sys_trap          ! nofault little asi's
2411           sub   %g0, 1, %g4
2412 
2413 #ifdef DEBUG_USER_TRAPTRACECTL
2414 
2415 .traptrace_freeze:
2416         mov     %l0, %g1 ; mov  %l1, %g2 ; mov  %l2, %g3 ; mov  %l4, %g4
2417         TT_TRACE_L(trace_win)
2418         mov     %g4, %l4 ; mov  %g3, %l2 ; mov  %g2, %l1 ; mov  %g1, %l0
2419         set     trap_freeze, %g1
2420         mov     1, %g2
2421         st      %g2, [%g1]
2422         FAST_TRAP_DONE
2423 
2424 .traptrace_unfreeze:
2425         set     trap_freeze, %g1
2426         st      %g0, [%g1]
2427         mov     %l0, %g1 ; mov  %l1, %g2 ; mov  %l2, %g3 ; mov  %l4, %g4
2428         TT_TRACE_L(trace_win)
2429         mov     %g4, %l4 ; mov  %g3, %l2 ; mov  %g2, %l1 ; mov  %g1, %l0
2430         FAST_TRAP_DONE
2431 
2432 #endif /* DEBUG_USER_TRAPTRACECTL */
2433 
2434 .getcc:
2435         CPU_ADDR(%g1, %g2)
2436         stx     %o0, [%g1 + CPU_TMP1]           ! save %o0
2437         stx     %o1, [%g1 + CPU_TMP2]           ! save %o1
2438         rdpr    %tstate, %g3                    ! get tstate
2439         srlx    %g3, PSR_TSTATE_CC_SHIFT, %o0   ! shift ccr to V8 psr
2440         set     PSR_ICC, %g2
2441         and     %o0, %g2, %o0                   ! mask out the rest
2442         srl     %o0, PSR_ICC_SHIFT, %o0         ! right justify
2443         rdpr    %pstate, %o1
2444         wrpr    %o1, PSTATE_AG, %pstate         ! get into normal globals
2445         mov     %o0, %g1                        ! move ccr to normal %g1
2446         wrpr    %g0, %o1, %pstate               ! back into alternate globals
2447         ldx     [%g1 + CPU_TMP1], %o0           ! restore %o0
2448         ldx     [%g1 + CPU_TMP2], %o1           ! restore %o1
2449         FAST_TRAP_DONE
2450 
2451 .setcc:
2452         CPU_ADDR(%g1, %g2)
2453         stx     %o0, [%g1 + CPU_TMP1]           ! save %o0
2454         stx     %o1, [%g1 + CPU_TMP2]           ! save %o1
2455         rdpr    %pstate, %o0
2456         wrpr    %o0, PSTATE_AG, %pstate         ! get into normal globals
2457         mov     %g1, %o1
2458         wrpr    %g0, %o0, %pstate               ! back to alternates
2459         sll     %o1, PSR_ICC_SHIFT, %g2
2460         set     PSR_ICC, %g3
2461         and     %g2, %g3, %g2                   ! mask out rest
2462         sllx    %g2, PSR_TSTATE_CC_SHIFT, %g2
2463         rdpr    %tstate, %g3                    ! get tstate
2464         srl     %g3, 0, %g3                     ! clear upper word
2465         or      %g3, %g2, %g3                   ! or in new bits
2466         wrpr    %g3, %tstate
2467         ldx     [%g1 + CPU_TMP1], %o0           ! restore %o0
2468         ldx     [%g1 + CPU_TMP2], %o1           ! restore %o1
2469         FAST_TRAP_DONE
2470 
2471 /*
2472  * getpsr(void)
2473  * Note that the xcc part of the ccr is not provided.
2474  * The V8 code shows why the V9 trap is not faster:
2475  * #define GETPSR_TRAP() \
2476  *      mov %psr, %i0; jmp %l2; rett %l2+4; nop;
2477  */
2478 
2479         .type   .getpsr, #function
2480 .getpsr:
2481         rdpr    %tstate, %g1                    ! get tstate
2482         srlx    %g1, PSR_TSTATE_CC_SHIFT, %o0   ! shift ccr to V8 psr
2483         set     PSR_ICC, %g2
2484         and     %o0, %g2, %o0                   ! mask out the rest
2485 
2486         rd      %fprs, %g1                      ! get fprs
2487         and     %g1, FPRS_FEF, %g2              ! mask out dirty upper/lower
2488         sllx    %g2, PSR_FPRS_FEF_SHIFT, %g2    ! shift fef to V8 psr.ef
2489         or      %o0, %g2, %o0                   ! or result into psr.ef
2490 
2491         set     V9_PSR_IMPLVER, %g2             ! SI assigned impl/ver: 0xef
2492         or      %o0, %g2, %o0                   ! or psr.impl/ver
2493         FAST_TRAP_DONE
2494         SET_SIZE(.getpsr)
2495 
2496 /*
2497  * setpsr(newpsr)
2498  * Note that there is no support for ccr.xcc in the V9 code.
2499  */
2500 
2501         .type   .setpsr, #function
2502 .setpsr:
2503         rdpr    %tstate, %g1                    ! get tstate
2504 !       setx    TSTATE_V8_UBITS, %g2
2505         or      %g0, CCR_ICC, %g3
2506         sllx    %g3, TSTATE_CCR_SHIFT, %g2
2507 
2508         andn    %g1, %g2, %g1                   ! zero current user bits
2509         set     PSR_ICC, %g2
2510         and     %g2, %o0, %g2                   ! clear all but psr.icc bits
2511         sllx    %g2, PSR_TSTATE_CC_SHIFT, %g3   ! shift to tstate.ccr.icc
2512         wrpr    %g1, %g3, %tstate               ! write tstate
2513 
2514         set     PSR_EF, %g2
2515         and     %g2, %o0, %g2                   ! clear all but fp enable bit
2516         srlx    %g2, PSR_FPRS_FEF_SHIFT, %g4    ! shift ef to V9 fprs.fef
2517         wr      %g0, %g4, %fprs                 ! write fprs
2518 
2519         CPU_ADDR(%g1, %g2)                      ! load CPU struct addr to %g1
2520         ldn     [%g1 + CPU_THREAD], %g2         ! load thread pointer
2521         ldn     [%g2 + T_LWP], %g3              ! load klwp pointer
2522         ldn     [%g3 + LWP_FPU], %g2            ! get lwp_fpu pointer
2523         stuw    %g4, [%g2 + FPU_FPRS]           ! write fef value to fpu_fprs
2524         srlx    %g4, 2, %g4                     ! shift fef value to bit 0
2525         stub    %g4, [%g2 + FPU_EN]             ! write fef value to fpu_en
2526         FAST_TRAP_DONE
2527         SET_SIZE(.setpsr)
2528 
2529 /*
2530  * getlgrp
2531  * get home lgrpid on which the calling thread is currently executing.
2532  */
2533         .type   .getlgrp, #function
2534 .getlgrp:
2535         CPU_ADDR(%g1, %g2)              ! load CPU struct addr to %g1 using %g2
2536         ld      [%g1 + CPU_ID], %o0     ! load cpu_id
2537         ldn     [%g1 + CPU_THREAD], %g2 ! load thread pointer
2538         ldn     [%g2 + T_LPL], %g2      ! load lpl pointer
2539         ld      [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid
2540         sra     %g1, 0, %o1
2541         FAST_TRAP_DONE
2542         SET_SIZE(.getlgrp)
2543 
2544 /*
2545  * Entry for old 4.x trap (trap 0).
2546  */
2547         ENTRY_NP(syscall_trap_4x)
2548         CPU_ADDR(%g1, %g2)              ! load CPU struct addr to %g1 using %g2
2549         ldn     [%g1 + CPU_THREAD], %g2 ! load thread pointer
2550         ldn     [%g2 + T_LWP], %g2      ! load klwp pointer
2551         ld      [%g2 + PCB_TRAP0], %g2  ! lwp->lwp_pcb.pcb_trap0addr
2552         brz,pn  %g2, 1f                 ! has it been set?
2553         st      %l0, [%g1 + CPU_TMP1]   ! delay - save some locals
2554         st      %l1, [%g1 + CPU_TMP2]
2555         rdpr    %tnpc, %l1              ! save old tnpc
2556         wrpr    %g0, %g2, %tnpc         ! setup tnpc
2557 
2558         rdpr    %pstate, %l0
2559         wrpr    %l0, PSTATE_AG, %pstate ! switch to normal globals
2560         mov     %l1, %g6                ! pass tnpc to user code in %g6
2561         wrpr    %l0, %g0, %pstate       ! switch back to alternate globals
2562 
2563         ! Note that %g1 still contains CPU struct addr
2564         ld      [%g1 + CPU_TMP2], %l1   ! restore locals
2565         ld      [%g1 + CPU_TMP1], %l0
2566         FAST_TRAP_DONE_CHK_INTR
2567 1:
2568         mov     %g1, %l0
2569         st      %l1, [%g1 + CPU_TMP2]
2570         rdpr    %pstate, %l1
2571         wrpr    %l1, PSTATE_AG, %pstate
2572         !
2573         ! check for old syscall mmap which is the only different one which
2574         ! must be the same.  Others are handled in the compatibility library.
2575         !
2576         cmp     %g1, OSYS_mmap  ! compare to old 4.x mmap
2577         movz    %icc, SYS_mmap, %g1
2578         wrpr    %g0, %l1, %pstate
2579         ld      [%l0 + CPU_TMP2], %l1   ! restore locals
2580         ld      [%l0 + CPU_TMP1], %l0
2581         SYSCALL(syscall_trap32)
2582         SET_SIZE(syscall_trap_4x)
2583 
2584 /*
2585  * Handler for software trap 9.
2586  * Set trap0 emulation address for old 4.x system call trap.
2587  * XXX - this should be a system call.
2588  */
2589         ENTRY_NP(set_trap0_addr)
2590         CPU_ADDR(%g1, %g2)              ! load CPU struct addr to %g1 using %g2
2591         ldn     [%g1 + CPU_THREAD], %g2 ! load thread pointer
2592         ldn     [%g2 + T_LWP], %g2      ! load klwp pointer
2593         st      %l0, [%g1 + CPU_TMP1]   ! save some locals
2594         st      %l1, [%g1 + CPU_TMP2]
2595         rdpr    %pstate, %l0
2596         wrpr    %l0, PSTATE_AG, %pstate
2597         mov     %g1, %l1
2598         wrpr    %g0, %l0, %pstate
2599         andn    %l1, 3, %l1             ! force alignment
2600         st      %l1, [%g2 + PCB_TRAP0]  ! lwp->lwp_pcb.pcb_trap0addr
2601         ld      [%g1 + CPU_TMP1], %l0   ! restore locals
2602         ld      [%g1 + CPU_TMP2], %l1
2603         FAST_TRAP_DONE
2604         SET_SIZE(set_trap0_addr)
2605 
2606 /*
2607  * mmu_trap_tl1
2608  * trap handler for unexpected mmu traps.
2609  * simply checks if the trap was a user lddf/stdf alignment trap, in which
2610  * case we go to fpu_trap or a user trap from the window handler, in which
2611  * case we go save the state on the pcb.  Otherwise, we go to ptl1_panic.
2612  */
2613         .type   mmu_trap_tl1, #function
2614 mmu_trap_tl1:
2615 #ifdef  TRAPTRACE
2616         TRACE_PTR(%g5, %g6)
2617         GET_TRACE_TICK(%g6, %g7)
2618         stxa    %g6, [%g5 + TRAP_ENT_TICK]%asi
2619         rdpr    %tl, %g6
2620         stha    %g6, [%g5 + TRAP_ENT_TL]%asi
2621         rdpr    %tt, %g6
2622         stha    %g6, [%g5 + TRAP_ENT_TT]%asi
2623         rdpr    %tstate, %g6
2624         stxa    %g6, [%g5 + TRAP_ENT_TSTATE]%asi
2625         stna    %sp, [%g5 + TRAP_ENT_SP]%asi
2626         stna    %g0, [%g5 + TRAP_ENT_TR]%asi
2627         rdpr    %tpc, %g6
2628         stna    %g6, [%g5 + TRAP_ENT_TPC]%asi
2629         set     MMU_SFAR, %g6
2630         ldxa    [%g6]ASI_DMMU, %g6
2631         stxa    %g6, [%g5 + TRAP_ENT_F1]%asi
2632         CPU_PADDR(%g7, %g6);
2633         add     %g7, CPU_TL1_HDLR, %g7
2634         lda     [%g7]ASI_MEM, %g6
2635         stxa    %g6, [%g5 + TRAP_ENT_F2]%asi
2636         set     0xdeadbeef, %g6
2637         stna    %g6, [%g5 + TRAP_ENT_F3]%asi
2638         stna    %g6, [%g5 + TRAP_ENT_F4]%asi
2639         TRACE_NEXT(%g5, %g6, %g7)
2640 #endif /* TRAPTRACE */
2641 
2642         GET_CPU_IMPL(%g5)
2643         cmp     %g5, PANTHER_IMPL
2644         bne     mmu_trap_tl1_4
2645           nop
2646         rdpr    %tt, %g5
2647         cmp     %g5, T_DATA_EXCEPTION
2648         bne     mmu_trap_tl1_4
2649           nop
2650         wr      %g0, ASI_DMMU, %asi
2651         ldxa    [MMU_SFSR]%asi, %g5
2652         mov     1, %g6
2653         sllx    %g6, PN_SFSR_PARITY_SHIFT, %g6
2654         andcc   %g5, %g6, %g0
2655         bz      mmu_trap_tl1_4
2656 
2657         /*
2658          * We are running on a Panther and have hit a DTLB parity error.
2659          */
2660         ldxa    [MMU_TAG_ACCESS]%asi, %g2
2661         mov     %g5, %g3
2662         ba,pt   %xcc, .mmu_exception_is_tlb_parity
2663         mov     T_DATA_EXCEPTION, %g1
2664 
2665 mmu_trap_tl1_4:
2666         CPU_PADDR(%g7, %g6);
2667         add     %g7, CPU_TL1_HDLR, %g7          ! %g7 = &cpu_m.tl1_hdlr (PA)
2668         /*
2669          * AM is cleared on trap, so addresses are 64 bit
2670          */
2671         lda     [%g7]ASI_MEM, %g6
2672         brz,a,pt %g6, 1f
2673           nop
2674         /*
2675          * We are going to update cpu_m.tl1_hdlr using physical address.
2676          * Flush the D$ line, so that stale data won't be accessed later.
2677          */
2678         CPU_ADDR(%g6, %g5)
2679         add     %g6, CPU_TL1_HDLR, %g6          ! %g6 = &cpu_m.tl1_hdlr (VA)
2680         GET_CPU_IMPL(%g5)
2681         cmp     %g5, CHEETAH_IMPL
2682         bl,pt   %icc, 3f
2683          cmp    %g5, SPITFIRE_IMPL
2684         stxa    %g0, [%g7]ASI_DC_INVAL
2685         membar  #Sync
2686         ba,pt   %xcc, 2f
2687          nop
2688 3:
2689         bl,pt   %icc, 2f
2690          sethi  %hi(dcache_line_mask), %g5
2691         ld      [%g5 + %lo(dcache_line_mask)], %g5
2692         and     %g6, %g5, %g5
2693         stxa    %g0, [%g5]ASI_DC_TAG
2694         membar  #Sync
2695 2:
2696         sta     %g0, [%g7]ASI_MEM
2697         SWITCH_GLOBALS                          ! back to mmu globals
2698         ba,a,pt %xcc, sfmmu_mmu_trap            ! handle page faults
2699 1:
2700         rdpr    %tt, %g5
2701         rdpr    %tl, %g7
2702         sub     %g7, 1, %g6
2703         wrpr    %g6, %tl
2704         rdpr    %tt, %g6
2705         wrpr    %g7, %tl
2706         and     %g6, WTRAP_TTMASK, %g6
2707         cmp     %g6, WTRAP_TYPE
2708         bne,a,pn %xcc, ptl1_panic
2709         mov     PTL1_BAD_MMUTRAP, %g1
2710         rdpr    %tpc, %g7
2711         /* tpc should be in the trap table */
2712         set     trap_table, %g6
2713         cmp     %g7, %g6
2714         blt,a,pn %xcc, ptl1_panic
2715           mov   PTL1_BAD_MMUTRAP, %g1
2716         set     etrap_table, %g6
2717         cmp     %g7, %g6
2718         bge,a,pn %xcc, ptl1_panic
2719           mov   PTL1_BAD_MMUTRAP, %g1
2720         cmp     %g5, T_ALIGNMENT
2721         move    %icc, MMU_SFAR, %g6
2722         movne   %icc, MMU_TAG_ACCESS, %g6
2723         ldxa    [%g6]ASI_DMMU, %g6
2724         andn    %g7, WTRAP_ALIGN, %g7   /* 128 byte aligned */
2725         add     %g7, WTRAP_FAULTOFF, %g7
2726         wrpr    %g0, %g7, %tnpc
2727         done
2728         SET_SIZE(mmu_trap_tl1)
2729 
2730 /*
2731  * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers.  These
2732  * traps are valid only when kmdb is loaded.  When the debugger is active,
2733  * the code below is rewritten to transfer control to the appropriate
2734  * debugger entry points.
2735  */
2736         .global kmdb_trap
2737         .align  8
2738 kmdb_trap:
2739         ba,a    trap_table0
2740         jmp     %g1 + 0
2741         nop
2742 
2743         .global kmdb_trap_tl1
2744         .align  8
2745 kmdb_trap_tl1:
2746         ba,a    trap_table0
2747         jmp     %g1 + 0
2748         nop
2749 
2750 /*
2751  * This entry is copied from OBP's trap table during boot.
2752  */
2753         .global obp_bpt
2754         .align  8
2755 obp_bpt:
2756         NOT
2757 
2758 /*
2759  * if kernel, set PCONTEXT to 0 for debuggers
2760  * if user, clear nucleus page sizes
2761  */
2762         .global kctx_obp_bpt
2763 kctx_obp_bpt:
2764         set     obp_bpt, %g2
2765 1:
2766 #ifndef _OPL
2767         mov     MMU_PCONTEXT, %g1
2768         ldxa    [%g1]ASI_DMMU, %g1
2769         srlx    %g1, CTXREG_NEXT_SHIFT, %g3
2770         brz,pt  %g3, 3f                 ! nucleus pgsz is 0, no problem
2771           sllx  %g3, CTXREG_NEXT_SHIFT, %g3
2772         set     CTXREG_CTX_MASK, %g4    ! check Pcontext
2773         btst    %g4, %g1
2774         bz,a,pt %xcc, 2f
2775           clr   %g3                     ! kernel:  PCONTEXT=0
2776         xor     %g3, %g1, %g3           ! user: clr N_pgsz0/1 bits
2777 2:
2778         set     DEMAP_ALL_TYPE, %g1
2779         stxa    %g0, [%g1]ASI_DTLB_DEMAP
2780         stxa    %g0, [%g1]ASI_ITLB_DEMAP
2781         mov     MMU_PCONTEXT, %g1
2782         stxa    %g3, [%g1]ASI_DMMU
2783         membar  #Sync
2784         sethi   %hi(FLUSH_ADDR), %g1
2785         flush   %g1                     ! flush required by immu
2786 #endif /* _OPL */
2787 3:
2788         jmp     %g2
2789           nop
2790 
2791 
2792 #ifdef  TRAPTRACE
2793 /*
2794  * TRAPTRACE support.
2795  * labels here are branched to with "rd %pc, %g7" in the delay slot.
2796  * Return is done by "jmp %g7 + 4".
2797  */
2798 
2799 trace_gen:
2800         TRACE_PTR(%g3, %g6)
2801         GET_TRACE_TICK(%g6, %g4)
2802         stxa    %g6, [%g3 + TRAP_ENT_TICK]%asi
2803         rdpr    %tl, %g6
2804         stha    %g6, [%g3 + TRAP_ENT_TL]%asi
2805         rdpr    %tt, %g6
2806         stha    %g6, [%g3 + TRAP_ENT_TT]%asi
2807         rdpr    %tstate, %g6
2808         stxa    %g6, [%g3 + TRAP_ENT_TSTATE]%asi
2809         stna    %sp, [%g3 + TRAP_ENT_SP]%asi
2810         rdpr    %tpc, %g6
2811         stna    %g6, [%g3 + TRAP_ENT_TPC]%asi
2812         TRACE_NEXT(%g3, %g4, %g5)
2813         jmp     %g7 + 4
2814         nop
2815 
2816 trace_win:
2817         TRACE_WIN_INFO(0, %l0, %l1, %l2)
2818         ! Keep the locals as clean as possible, caller cleans %l4
2819         clr     %l2
2820         clr     %l1
2821         jmp     %l4 + 4
2822           clr   %l0
2823 
2824 /*
2825  * Trace a tsb hit
2826  * g1 = tsbe pointer (in/clobbered)
2827  * g2 = tag access register (in)
2828  * g3 - g4 = scratch (clobbered)
2829  * g5 = tsbe data (in)
2830  * g6 = scratch (clobbered)
2831  * g7 = pc we jumped here from (in)
2832  */
2833 
2834         ! Do not disturb %g5, it will be used after the trace
2835         ALTENTRY(trace_tsbhit)
2836         TRACE_TSBHIT(0)
2837         jmp     %g7 + 4
2838         nop
2839 
2840 /*
2841  * Trace a TSB miss
2842  *
2843  * g1 = tsb8k pointer (in)
2844  * g2 = tag access register (in)
2845  * g3 = tsb4m pointer (in)
2846  * g4 = tsbe tag (in/clobbered)
2847  * g5 - g6 = scratch (clobbered)
2848  * g7 = pc we jumped here from (in)
2849  */
2850         .global trace_tsbmiss
2851 trace_tsbmiss:
2852         membar  #Sync
2853         sethi   %hi(FLUSH_ADDR), %g6
2854         flush   %g6
2855         TRACE_PTR(%g5, %g6)
2856         stxa    %g2, [%g5 + TRAP_ENT_SP]%asi            ! tag access
2857         stxa    %g4, [%g5 + TRAP_ENT_F1]%asi            ! tsb tag
2858         GET_TRACE_TICK(%g6, %g4)
2859         stxa    %g6, [%g5 + TRAP_ENT_TICK]%asi
2860         rdpr    %tnpc, %g6
2861         stxa    %g6, [%g5 + TRAP_ENT_F2]%asi
2862         stna    %g1, [%g5 + TRAP_ENT_F3]%asi            ! tsb8k pointer
2863         srlx    %g1, 32, %g6
2864         stna    %g6, [%g5 + TRAP_ENT_F4]%asi            ! huh?
2865         rdpr    %tpc, %g6
2866         stna    %g6, [%g5 + TRAP_ENT_TPC]%asi
2867         rdpr    %tl, %g6
2868         stha    %g6, [%g5 + TRAP_ENT_TL]%asi
2869         rdpr    %tt, %g6
2870         or      %g6, TT_MMU_MISS, %g4
2871         stha    %g4, [%g5 + TRAP_ENT_TT]%asi
2872         cmp     %g6, FAST_IMMU_MISS_TT
2873         be,a    %icc, 1f
2874           ldxa  [%g0]ASI_IMMU, %g6
2875         ldxa    [%g0]ASI_DMMU, %g6
2876 1:      stxa    %g6, [%g5 + TRAP_ENT_TSTATE]%asi        ! tag target
2877         stxa    %g3, [%g5 + TRAP_ENT_TR]%asi            ! tsb4m pointer
2878         TRACE_NEXT(%g5, %g4, %g6)
2879         jmp     %g7 + 4
2880         nop
2881 
2882 /*
2883  * g2 = tag access register (in)
2884  * g3 = ctx number (in)
2885  */
2886 trace_dataprot:
2887         membar  #Sync
2888         sethi   %hi(FLUSH_ADDR), %g6
2889         flush   %g6
2890         TRACE_PTR(%g1, %g6)
2891         GET_TRACE_TICK(%g6, %g5)
2892         stxa    %g6, [%g1 + TRAP_ENT_TICK]%asi
2893         rdpr    %tpc, %g6
2894         stna    %g6, [%g1 + TRAP_ENT_TPC]%asi
2895         rdpr    %tstate, %g6
2896         stxa    %g6, [%g1 + TRAP_ENT_TSTATE]%asi
2897         stxa    %g2, [%g1 + TRAP_ENT_SP]%asi            ! tag access reg
2898         stxa    %g0, [%g1 + TRAP_ENT_TR]%asi
2899         stxa    %g0, [%g1 + TRAP_ENT_F1]%asi
2900         stxa    %g0, [%g1 + TRAP_ENT_F2]%asi
2901         stxa    %g0, [%g1 + TRAP_ENT_F3]%asi
2902         stxa    %g0, [%g1 + TRAP_ENT_F4]%asi
2903         rdpr    %tl, %g6
2904         stha    %g6, [%g1 + TRAP_ENT_TL]%asi
2905         rdpr    %tt, %g6
2906         stha    %g6, [%g1 + TRAP_ENT_TT]%asi
2907         TRACE_NEXT(%g1, %g4, %g5)
2908         jmp     %g7 + 4
2909         nop
2910 
2911 #endif /* TRAPTRACE */
2912 
2913        .align  32
2914        .global pil15_epilogue
2915 pil15_epilogue:
2916        ba      pil_interrupt_common
2917        nop
2918        .align  32
2919 
2920 /*
2921  * fast_trap_done, fast_trap_done_chk_intr:
2922  *
2923  * Due to the design of UltraSPARC pipeline, pending interrupts are not
2924  * taken immediately after a RETRY or DONE instruction which causes IE to
2925  * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed
2926  * to execute first before taking any interrupts. If that instruction
2927  * results in other traps, and if the corresponding trap handler runs
2928  * entirely at TL=1 with interrupts disabled, then pending interrupts
2929  * won't be taken until after yet another instruction following the %tpc
2930  * or %tnpc.
2931  *
2932  * A malicious user program can use this feature to block out interrupts
2933  * for extended durations, which can result in send_mondo_timeout kernel
2934  * panic.
2935  *
2936  * This problem is addressed by servicing any pending interrupts via
2937  * sys_trap before returning back to the user mode from a fast trap
2938  * handler. The "done" instruction within a fast trap handler, which
2939  * runs entirely at TL=1 with interrupts disabled, is replaced with the
2940  * FAST_TRAP_DONE macro, which branches control to this fast_trap_done
2941  * entry point.
2942  *
2943  * We check for any pending interrupts here and force a sys_trap to
2944  * service those interrupts, if any. To minimize overhead, pending
2945  * interrupts are checked if the %tpc happens to be at 16K boundary,
2946  * which allows a malicious program to execute at most 4K consecutive
2947  * instructions before we service any pending interrupts. If a worst
2948  * case fast trap handler takes about 2 usec, then interrupts will be
2949  * blocked for at most 8 msec, less than a clock tick.
2950  *
2951  * For the cases where we don't know if the %tpc will cross a 16K
2952  * boundary, we can't use the above optimization and always process
2953  * any pending interrupts via fast_frap_done_chk_intr entry point.
2954  *
2955  * Entry Conditions:
2956  *      %pstate         am:0 priv:1 ie:0
2957  *                      globals are AG (not normal globals)
2958  */
2959 
2960         .global fast_trap_done, fast_trap_done_chk_intr
2961 fast_trap_done:
2962         rdpr    %tpc, %g5
2963         sethi   %hi(0xffffc000), %g6    ! 1's complement of 0x3fff
2964         andncc  %g5, %g6, %g0           ! check lower 14 bits of %tpc
2965         bz,a,pn %icc, 1f                ! branch if zero (lower 32 bits only)
2966           ldxa  [%g0]ASI_INTR_RECEIVE_STATUS, %g5
2967         done
2968 
2969         ALTENTRY(fast_trap_done_check_interrupts)
2970 fast_trap_done_chk_intr:
2971         ldxa    [%g0]ASI_INTR_RECEIVE_STATUS, %g5
2972 
2973 1:      rd      SOFTINT, %g6
2974         and     %g5, IRSR_BUSY, %g5
2975         orcc    %g5, %g6, %g0
2976         bnz,pn  %xcc, 2f                ! branch if any pending intr
2977         nop
2978         done
2979 
2980 2:
2981         /*
2982          * We get here if there are any pending interrupts.
2983          * Adjust %tpc/%tnpc as we'll be resuming via "retry"
2984          * instruction.
2985          */
2986         rdpr    %tnpc, %g5
2987         wrpr    %g0, %g5, %tpc
2988         add     %g5, 4, %g5
2989         wrpr    %g0, %g5, %tnpc
2990 
2991         /*
2992          * Force a dummy sys_trap call so that interrupts can be serviced.
2993          */
2994         set     fast_trap_dummy_call, %g1
2995         ba,pt   %xcc, sys_trap
2996           mov   -1, %g4
2997 
2998 fast_trap_dummy_call:
2999         retl
3000         nop
3001 
3002 /*
3003  * Currently the brand syscall interposition code is not enabled by
3004  * default.  Instead, when a branded zone is first booted the brand
3005  * infrastructure will patch the trap table so that the syscall
3006  * entry points are redirected to syscall_wrapper32 and syscall_wrapper
3007  * for ILP32 and LP64 syscalls respectively.  this is done in
3008  * brand_plat_interposition_enable().  Note that the syscall wrappers
3009  * below do not collect any trap trace data since the syscall hot patch
3010  * points are reached after trap trace data has already been collected.
3011  */
3012 #define BRAND_CALLBACK(callback_id)                                         \
3013         CPU_ADDR(%g2, %g1)              /* load CPU struct addr to %g2  */ ;\
3014         ldn     [%g2 + CPU_THREAD], %g3 /* load thread pointer          */ ;\
3015         ldn     [%g3 + T_PROCP], %g3    /* get proc pointer             */ ;\
3016         ldn     [%g3 + P_BRAND], %g3    /* get brand pointer            */ ;\
3017         brz     %g3, 1f                 /* No brand?  No callback.      */ ;\
3018         nop                                                                ;\
3019         ldn     [%g3 + B_MACHOPS], %g3  /* get machops list             */ ;\
3020         ldn     [%g3 + (callback_id << 3)], %g3                      ;\
3021         brz     %g3, 1f                                                    ;\
3022         /*                                                                  \
3023          * This isn't pretty.  We want a low-latency way for the callback   \
3024          * routine to decline to do anything.  We just pass in an address   \
3025          * the routine can directly jmp back to, pretending that nothing    \
3026          * has happened.                                                    \
3027          *                                                                  \
3028          * %g1: return address (where the brand handler jumps back to)      \
3029          * %g2: address of CPU structure                                    \
3030          * %g3: address of brand handler (where we will jump to)            \
3031          */                                                                 \
3032         mov     %pc, %g1                                                   ;\
3033         add     %g1, 16, %g1                                               ;\
3034         jmp     %g3                                                        ;\
3035         nop                                                                ;\
3036 1:
3037 
3038         ENTRY_NP(syscall_wrapper32)
3039         BRAND_CALLBACK(BRAND_CB_SYSCALL32)
3040         SYSCALL_NOTT(syscall_trap32)
3041         SET_SIZE(syscall_wrapper32)
3042 
3043         ENTRY_NP(syscall_wrapper)
3044         BRAND_CALLBACK(BRAND_CB_SYSCALL)
3045         SYSCALL_NOTT(syscall_trap)
3046         SET_SIZE(syscall_wrapper)
3047 

--- EOF ---