Print this page
restore sparc comments
de-linting of .s files


   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  *
  25  * Assembly code support for the Olympus-C module
  26  */
  27 
  28 #if !defined(lint)
  29 #include "assym.h"
  30 #endif  /* lint */
  31 
  32 #include <sys/asm_linkage.h>
  33 #include <sys/mmu.h>
  34 #include <vm/hat_sfmmu.h>
  35 #include <sys/machparam.h>
  36 #include <sys/machcpuvar.h>
  37 #include <sys/machthread.h>
  38 #include <sys/machtrap.h>
  39 #include <sys/privregs.h>
  40 #include <sys/asm_linkage.h>
  41 #include <sys/trap.h>
  42 #include <sys/opl_olympus_regs.h>
  43 #include <sys/opl_module.h>
  44 #include <sys/xc_impl.h>
  45 #include <sys/intreg.h>
  46 #include <sys/async.h>
  47 #include <sys/clock.h>
  48 #include <sys/cmpregs.h>
  49 
  50 #ifdef TRAPTRACE
  51 #include <sys/traptrace.h>
  52 #endif /* TRAPTRACE */
  53 
  54 /*
  55  * Macro that flushes the entire Ecache.
  56  *
  57  * arg1 = ecache size
  58  * arg2 = ecache linesize
  59  * arg3 = ecache flush address - Not used for olympus-C
  60  */
  61 #define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1)                         \
  62         mov     ASI_L2_CTRL_U2_FLUSH, arg1;                             \
  63         mov     ASI_L2_CTRL_RW_ADDR, arg2;                              \
  64         stxa    arg1, [arg2]ASI_L2_CTRL
  65 
  66 /*
  67  * SPARC64-VI MMU and Cache operations.
  68  */
  69 
  70 #if defined(lint)
  71 
  72 /* ARGSUSED */
  73 void
  74 vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
  75 {}
  76 
  77 #else   /* lint */
  78 
  79         ENTRY_NP(vtag_flushpage)
  80         /*
  81          * flush page from the tlb
  82          *
  83          * %o0 = vaddr
  84          * %o1 = sfmmup
  85          */
  86         rdpr    %pstate, %o5
  87 #ifdef DEBUG
  88         PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1)
  89 #endif /* DEBUG */
  90         /*
  91          * disable ints
  92          */
  93         andn    %o5, PSTATE_IE, %o4
  94         wrpr    %o4, 0, %pstate
  95 
  96         /*
  97          * Then, blow out the tlb
  98          * Interrupts are disabled to prevent the primary ctx register


 127 
 128         wrpr    %g0, 1, %tl
 129         set     MMU_PCONTEXT, %o4
 130         or      DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
 131         ldxa    [%o4]ASI_DMMU, %o2              ! %o2 = save old ctxnum
 132         srlx    %o2, CTXREG_NEXT_SHIFT, %o1     ! need to preserve nucleus pgsz
 133         sllx    %o1, CTXREG_NEXT_SHIFT, %o1     ! %o1 = nucleus pgsz
 134         or      %g1, %o1, %g1                   ! %g1 = nucleus pgsz | primary pgsz | cnum
 135         stxa    %g1, [%o4]ASI_DMMU              ! wr new ctxum 
 136 
 137         stxa    %g0, [%o0]ASI_DTLB_DEMAP
 138         stxa    %g0, [%o0]ASI_ITLB_DEMAP
 139         stxa    %o2, [%o4]ASI_DMMU              /* restore old ctxnum */
 140         flush   %o3
 141         wrpr    %g0, 0, %tl
 142 
 143         retl
 144         wrpr    %g0, %o5, %pstate               /* enable interrupts */
 145         SET_SIZE(vtag_flushpage)
 146 
 147 #endif  /* lint */
 148 
 149 
 150 #if defined(lint)
 151 
 152 void
 153 vtag_flushall(void)
 154 {}
 155 
 156 #else   /* lint */
 157 
 158         ENTRY_NP2(vtag_flushall, demap_all)
 159         /*
 160          * flush the tlb
 161          */
 162         sethi   %hi(FLUSH_ADDR), %o3
 163         set     DEMAP_ALL_TYPE, %g1
 164         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 165         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 166         flush   %o3
 167         retl
 168         nop
 169         SET_SIZE(demap_all)
 170         SET_SIZE(vtag_flushall)
 171 
 172 #endif  /* lint */
 173 
 174 
 175 #if defined(lint)
 176 
 177 /* ARGSUSED */
 178 void
 179 vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
 180 {}
 181 
 182 #else   /* lint */
 183 
 184         ENTRY_NP(vtag_flushpage_tl1)
 185         /*
 186          * x-trap to flush page from tlb and tsb
 187          *
 188          * %g1 = vaddr, zero-extended on 32-bit kernel
 189          * %g2 = sfmmup
 190          *
 191          * assumes TSBE_TAG = 0
 192          */
 193         srln    %g1, MMU_PAGESHIFT, %g1
 194                 
 195         sethi   %hi(ksfmmup), %g3
 196         ldx     [%g3 + %lo(ksfmmup)], %g3
 197         cmp     %g3, %g2
 198         bne,pt  %xcc, 1f                        ! if not kernel as, go to 1
 199           slln  %g1, MMU_PAGESHIFT, %g1         /* g1 = vaddr */
 200 
 201         /* We need to demap in the kernel context */
 202         or      DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
 203         stxa    %g0, [%g1]ASI_DTLB_DEMAP


 208         or      DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
 209 
 210         SFMMU_CPU_CNUM(%g2, %g6, %g3)   ! %g6 = sfmmu cnum on this CPU
 211         
 212         ldub    [%g2 + SFMMU_CEXT], %g4         ! %g4 = sfmmup->cext
 213         sll     %g4, CTXREG_EXT_SHIFT, %g4
 214         or      %g6, %g4, %g6                   ! %g6 = primary pgsz | cnum
 215 
 216         set     MMU_PCONTEXT, %g4
 217         ldxa    [%g4]ASI_DMMU, %g5              ! %g5 = save old ctxnum
 218         srlx    %g5, CTXREG_NEXT_SHIFT, %g2     ! %g2 = nucleus pgsz 
 219         sllx    %g2, CTXREG_NEXT_SHIFT, %g2     ! preserve nucleus pgsz 
 220         or      %g6, %g2, %g6                   ! %g6 = nucleus pgsz | primary pgsz | cnum      
 221         stxa    %g6, [%g4]ASI_DMMU              ! wr new ctxum
 222         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 223         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 224         stxa    %g5, [%g4]ASI_DMMU              ! restore old ctxnum
 225         retry
 226         SET_SIZE(vtag_flushpage_tl1)
 227 
 228 #endif  /* lint */
 229 
 230 
 231 #if defined(lint)
 232 
 233 /* ARGSUSED */
 234 void
 235 vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
 236 {}
 237 
 238 #else   /* lint */
 239 
 240         ENTRY_NP(vtag_flush_pgcnt_tl1)
 241         /*
 242          * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
 243          *
 244          * %g1 = vaddr, zero-extended on 32-bit kernel
 245          * %g2 = <sfmmup58|pgcnt6>
 246          *
 247          * NOTE: this handler relies on the fact that no
 248          *      interrupts or traps can occur during the loop
 249          *      issuing the TLB_DEMAP operations. It is assumed
 250          *      that interrupts are disabled and this code is
 251          *      fetching from the kernel locked text address.
 252          *
 253          * assumes TSBE_TAG = 0
 254          */
 255         set     SFMMU_PGCNT_MASK, %g4
 256         and     %g4, %g2, %g3                   /* g3 = pgcnt - 1 */
 257         add     %g3, 1, %g3                     /* g3 = pgcnt */
 258 
 259         andn    %g2, SFMMU_PGCNT_MASK, %g2      /* g2 = sfmmup */


 298         srlx    %g6, CTXREG_NEXT_SHIFT, %g2     /* %g2 = nucleus pgsz */
 299         sllx    %g2, CTXREG_NEXT_SHIFT, %g2     /* preserve nucleus pgsz */
 300         or      %g5, %g2, %g5                   /* %g5 = nucleus pgsz | primary pgsz | cnum */
 301         stxa    %g5, [%g4]ASI_DMMU              /* wr new ctxum */
 302 
 303         set     MMU_PAGESIZE, %g2               /* g2 = pgsize */
 304         sethi   %hi(FLUSH_ADDR), %g5
 305 3:
 306         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 307         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 308         flush   %g5                             ! flush required by immu
 309 
 310         deccc   %g3                             /* decr pgcnt */
 311         bnz,pt  %icc,3b
 312           add   %g1, %g2, %g1                   /* next page */
 313 
 314         stxa    %g6, [%g4]ASI_DMMU              /* restore old ctxnum */
 315         retry
 316         SET_SIZE(vtag_flush_pgcnt_tl1)
 317 
 318 #endif  /* lint */
 319 
 320 
 321 #if defined(lint)
 322 
 323 /*ARGSUSED*/
 324 void
 325 vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
 326 {}
 327 
 328 #else   /* lint */
 329 
 330         ENTRY_NP(vtag_flushall_tl1)
 331         /*
 332          * x-trap to flush tlb
 333          */
 334         set     DEMAP_ALL_TYPE, %g4
 335         stxa    %g0, [%g4]ASI_DTLB_DEMAP
 336         stxa    %g0, [%g4]ASI_ITLB_DEMAP
 337         retry
 338         SET_SIZE(vtag_flushall_tl1)
 339 
 340 #endif  /* lint */
 341 
 342 
 343 /*
 344  * VAC (virtual address conflict) does not apply to OPL.
 345  * VAC resolution is managed by the Olympus processor hardware.
 346  * As a result, all OPL VAC flushing routines are no-ops.
 347  */
 348 
 349 #if defined(lint)
 350 
 351 /* ARGSUSED */
 352 void
 353 vac_flushpage(pfn_t pfnum, int vcolor)
 354 {}
 355 
 356 #else   /* lint */
 357 
 358         ENTRY(vac_flushpage)
 359         retl
 360           nop
 361         SET_SIZE(vac_flushpage)
 362 
 363 #endif  /* lint */
 364 
 365 #if defined(lint)
 366 
 367 /* ARGSUSED */
 368 void
 369 vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
 370 {}
 371 
 372 #else   /* lint */
 373 
 374         ENTRY_NP(vac_flushpage_tl1)
 375         retry
 376         SET_SIZE(vac_flushpage_tl1)
 377 
 378 #endif  /* lint */
 379 
 380 
 381 #if defined(lint)
 382 
 383 /* ARGSUSED */
 384 void
 385 vac_flushcolor(int vcolor, pfn_t pfnum)
 386 {}
 387 
 388 #else   /* lint */
 389 
 390         ENTRY(vac_flushcolor)
 391         retl
 392          nop
 393         SET_SIZE(vac_flushcolor)
 394 
 395 #endif  /* lint */
 396 
 397 
 398 
 399 #if defined(lint)
 400 
 401 /* ARGSUSED */
 402 void
 403 vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum)
 404 {}
 405 
 406 #else   /* lint */
 407 
 408         ENTRY(vac_flushcolor_tl1)
 409         retry
 410         SET_SIZE(vac_flushcolor_tl1)
 411 
 412 #endif  /* lint */
 413 
 414 #if defined(lint)
 415 
 416 int
 417 idsr_busy(void)
 418 {
 419         return (0);
 420 }
 421 
 422 #else   /* lint */
 423 
 424 /*
 425  * Determine whether or not the IDSR is busy.
 426  * Entry: no arguments
 427  * Returns: 1 if busy, 0 otherwise
 428  */
 429         ENTRY(idsr_busy)
 430         ldxa    [%g0]ASI_INTR_DISPATCH_STATUS, %g1
 431         clr     %o0
 432         btst    IDSR_BUSY, %g1
 433         bz,a,pt %xcc, 1f
 434         mov     1, %o0
 435 1:
 436         retl
 437         nop
 438         SET_SIZE(idsr_busy)
 439 
 440 #endif  /* lint */
 441 
 442 #if defined(lint)
 443 
 444 /* ARGSUSED */
 445 void
 446 init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
 447 {}
 448 
 449 /* ARGSUSED */
 450 void
 451 init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
 452 {}
 453 
 454 #else   /* lint */
 455 
 456         .global _dispatch_status_busy
 457 _dispatch_status_busy:
 458         .asciz  "ASI_INTR_DISPATCH_STATUS error: busy"
 459         .align  4
 460 
 461 /*
 462  * Setup interrupt dispatch data registers
 463  * Entry:
 464  *      %o0 - function or inumber to call
 465  *      %o1, %o2 - arguments (2 uint64_t's)
 466  */
 467         .seg "text"
 468 
 469         ENTRY(init_mondo)
 470 #ifdef DEBUG
 471         !
 472         ! IDSR should not be busy at the moment
 473         !
 474         ldxa    [%g0]ASI_INTR_DISPATCH_STATUS, %g1
 475         btst    IDSR_BUSY, %g1


 489         mov     IDDR_1, %g2
 490         mov     IDDR_2, %g3
 491         stxa    %o0, [%g1]ASI_INTR_DISPATCH
 492 
 493         !
 494         ! interrupt vector dispatch data reg 1
 495         !
 496         stxa    %o1, [%g2]ASI_INTR_DISPATCH
 497 
 498         !
 499         ! interrupt vector dispatch data reg 2
 500         !
 501         stxa    %o2, [%g3]ASI_INTR_DISPATCH
 502 
 503         membar  #Sync
 504         retl
 505         nop
 506         SET_SIZE(init_mondo_nocheck)
 507         SET_SIZE(init_mondo)
 508 
 509 #endif  /* lint */
 510 
 511 
 512 #if defined(lint)
 513 
 514 /* ARGSUSED */
 515 void
 516 shipit(int upaid, int bn)
 517 { return; }
 518 
 519 #else   /* lint */
 520 
 521 /*
 522  * Ship mondo to aid using busy/nack pair bn
 523  */
 524         ENTRY_NP(shipit)
 525         sll     %o0, IDCR_PID_SHIFT, %g1        ! IDCR<23:14> = agent id
 526         sll     %o1, IDCR_BN_SHIFT, %g2         ! IDCR<28:24> = b/n pair
 527         or      %g1, IDCR_OFFSET, %g1           ! IDCR<13:0> = 0x70
 528         or      %g1, %g2, %g1
 529         stxa    %g0, [%g1]ASI_INTR_DISPATCH     ! interrupt vector dispatch
 530         membar  #Sync
 531         retl
 532         nop
 533         SET_SIZE(shipit)
 534 
 535 #endif  /* lint */
 536 
 537 
 538 #if defined(lint)
 539 
 540 /* ARGSUSED */
 541 void
 542 flush_instr_mem(caddr_t vaddr, size_t len)
 543 {}
 544 
 545 #else   /* lint */
 546 
 547 /*
 548  * flush_instr_mem:
 549  *      Flush 1 page of the I-$ starting at vaddr
 550  *      %o0 vaddr
 551  *      %o1 bytes to be flushed
 552  *
 553  * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
 554  * the stores from all processors so that a FLUSH instruction is only needed
 555  * to ensure pipeline is consistent. This means a single flush is sufficient at
 556  * the end of a sequence of stores that updates the instruction stream to
 557  * ensure correct operation.
 558  */
 559 
 560         ENTRY(flush_instr_mem)
 561         flush   %o0                     ! address irrelevant
 562         retl
 563         nop
 564         SET_SIZE(flush_instr_mem)
 565 
 566 #endif  /* lint */
 567 
 568 
 569 /*
 570  * flush_ecache:
 571  *      %o0 - 64 bit physical address
 572  *      %o1 - ecache size
 573  *      %o2 - ecache linesize
 574  */
 575 #if defined(lint)
 576 
 577 /*ARGSUSED*/
 578 void
 579 flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize)
 580 {}
 581 
 582 #else /* !lint */
 583 
 584         ENTRY(flush_ecache)
 585 
 586         /*
 587          * Flush the entire Ecache.
 588          */
 589         ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
 590         retl
 591         nop
 592         SET_SIZE(flush_ecache)
 593 
 594 #endif /* lint */
 595 
 596 #if defined(lint)
 597 
 598 /*ARGSUSED*/
 599 void
 600 kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size,
 601     int icache_lsize)
 602 {
 603 }
 604 
 605 #else   /* lint */
 606 
 607         /*
 608          * I/D cache flushing is not needed for OPL processors
 609          */
 610         ENTRY(kdi_flush_idcache)
 611         retl
 612         nop
 613         SET_SIZE(kdi_flush_idcache)
 614 
 615 #endif  /* lint */
 616 
 617 #ifdef  TRAPTRACE
 618 /*
 619  * Simplified trap trace macro for OPL. Adapted from us3.
 620  */
 621 #define OPL_TRAPTRACE(ptr, scr1, scr2, label)                   \
 622         CPU_INDEX(scr1, ptr);                                   \
 623         sll     scr1, TRAPTR_SIZE_SHIFT, scr1;                  \
 624         set     trap_trace_ctl, ptr;                            \
 625         add     ptr, scr1, scr1;                                \
 626         ld      [scr1 + TRAPTR_LIMIT], ptr;                     \
 627         tst     ptr;                                            \
 628         be,pn   %icc, label/**/1;                               \
 629          ldx    [scr1 + TRAPTR_PBASE], ptr;                     \
 630         ld      [scr1 + TRAPTR_OFFSET], scr1;                   \
 631         add     ptr, scr1, ptr;                                 \
 632         rd      %asi, scr2;                                     \
 633         wr      %g0, TRAPTR_ASI, %asi;                          \
 634         rd      STICK, scr1;                                    \
 635         stxa    scr1, [ptr + TRAP_ENT_TICK]%asi;                \
 636         rdpr    %tl, scr1;                                      \


 984  * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
 985  * and we reset these regiseter here.
 986  */
 987 #define RESET_CUR_TSTATE(tmp)                                   \
 988         set     TSTATE_KERN, tmp                                ;\
 989         wrpr    %g0, tmp, %tstate                               ;\
 990         wrpr    %g0, 0, %tpc                                    ;\
 991         wrpr    %g0, 0, %tnpc                                   ;\
 992         RESET_WINREG(tmp)
 993 
 994 /*
 995  * In case of urgent errors some MMU registers may be
 996  * corrupted, so we set here some reasonable values for
 997  * them. Note that resetting MMU registers also reset the context
 998  * info, we will need to reset the window registers to prevent
 999  * spill/fill that depends on context info for correct behaviour.
1000  * Note that the TLBs must be flushed before programming the context
1001  * registers.
1002  */
1003 
1004 #if !defined(lint)
1005 #define RESET_MMU_REGS(tmp1, tmp2, tmp3)                        \
1006         FLUSH_ALL_TLB(tmp1)                                     ;\
1007         set     MMU_PCONTEXT, tmp1                              ;\
1008         sethi   %hi(kcontextreg), tmp2                          ;\
1009         ldx     [tmp2 + %lo(kcontextreg)], tmp2                 ;\
1010         stxa    tmp2, [tmp1]ASI_DMMU                            ;\
1011         set     MMU_SCONTEXT, tmp1                              ;\
1012         stxa    tmp2, [tmp1]ASI_DMMU                            ;\
1013         sethi   %hi(ktsb_base), tmp1                            ;\
1014         ldx     [tmp1 + %lo(ktsb_base)], tmp2                   ;\
1015         mov     MMU_TSB, tmp3                                   ;\
1016         stxa    tmp2, [tmp3]ASI_IMMU                            ;\
1017         stxa    tmp2, [tmp3]ASI_DMMU                            ;\
1018         membar  #Sync                                           ;\
1019         RESET_WINREG(tmp1)
1020 
1021 #define RESET_TSB_TAGPTR(tmp)                                   \
1022         set     MMU_TAG_ACCESS, tmp                             ;\
1023         stxa    %g0, [tmp]ASI_IMMU                              ;\
1024         stxa    %g0, [tmp]ASI_DMMU                              ;\
1025         membar  #Sync
1026 #endif /* lint */
1027 
1028 /*
1029  * In case of errors in the MMU_TSB_PREFETCH registers we have to
1030  * reset them. We can use "0" as the reset value, this way we set
1031  * the "V" bit of the registers to 0, which will disable the prefetch
1032  * so the values of the other fields are irrelevant.
1033  */
1034 #if !defined(lint)
1035 #define RESET_TSB_PREFETCH(tmp)                 \
1036         set     VA_UTSBPREF_8K, tmp             ;\
1037         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
1038         set     VA_UTSBPREF_4M, tmp             ;\
1039         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
1040         set     VA_KTSBPREF_8K, tmp             ;\
1041         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
1042         set     VA_KTSBPREF_4M, tmp             ;\
1043         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
1044         set     VA_UTSBPREF_8K, tmp             ;\
1045         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
1046         set     VA_UTSBPREF_4M, tmp             ;\
1047         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
1048         set     VA_KTSBPREF_8K, tmp             ;\
1049         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
1050         set     VA_KTSBPREF_4M, tmp             ;\
1051         stxa    %g0, [tmp]ASI_DTSB_PREFETCH
1052 #endif /* lint */
1053 
1054 /*
1055  * In case of errors in the MMU_SHARED_CONTEXT register we have to
1056  * reset its value. We can use "0" as the reset value, it will put
1057  * 0 in the IV field disabling the shared context support, and
1058  * making values of all the other fields of the register irrelevant.
1059  */
1060 #if !defined(lint)
1061 #define RESET_SHARED_CTXT(tmp)                  \
1062         set     MMU_SHARED_CONTEXT, tmp         ;\
1063         stxa    %g0, [tmp]ASI_DMMU
1064 #endif /* lint */
1065 
1066 /*
1067  * RESET_TO_PRIV()
1068  *
1069  * In many cases, we need to force the thread into privilege mode because
1070  * privilege mode is only thing in which the system continue to work
1071  * due to undeterminable user mode information that come from register
1072  * corruption.
1073  *
1074  *  - opl_uger_ctxt
1075  *    If the error is secondary TSB related register parity, we have no idea
1076  *    what value is supposed to be for it.
1077  *
1078  *  The below three cases %tstate is not accessible until it is overwritten
1079  *  with some value, so we have no clue if the thread was running on user mode
1080  *  or not
1081  *   - opl_uger_pstate
1082  *     If the error is %pstate parity, it propagates to %tstate.
1083  *   - opl_uger_tstate
1084  *     No need to say the reason


1108  */
1109 #define RESET_TO_PRIV(tmp, tmp1, tmp2, local)                   \
1110         RESET_MMU_REGS(tmp, tmp1, tmp2)                         ;\
1111         CPU_ADDR(tmp, tmp1)                                     ;\
1112         ldx     [tmp + CPU_THREAD], local                       ;\
1113         ldx     [local + T_STACK], tmp                          ;\
1114         sub     tmp, STACK_BIAS, %sp                            ;\
1115         rdpr    %pstate, tmp                                    ;\
1116         wrpr    tmp, PSTATE_AG, %pstate                         ;\
1117         mov     local, %g7                                      ;\
1118         rdpr    %pstate, local                                  ;\
1119         wrpr    local, PSTATE_AG, %pstate                       ;\
1120         wrpr    %g0, 1, %tl                                     ;\
1121         set     TSTATE_KERN, tmp                                ;\
1122         rdpr    %cwp, tmp1                                      ;\
1123         or      tmp, tmp1, tmp                                  ;\
1124         wrpr    tmp, %g0, %tstate                               ;\
1125         wrpr    %g0, %tpc
1126 
1127 
1128 #if defined(lint)
1129 
1130 void
1131 ce_err(void)
1132 {}
1133 
1134 #else   /* lint */
1135 
1136 /*
1137  * We normally don't expect CE traps since we disable the
1138  * 0x63 trap reporting at the start of day. There is a
1139  * small window before we disable them, so let check for
1140  * it. Otherwise, panic.
1141  */
1142 
1143         .align  128
1144         ENTRY_NP(ce_err)
1145         mov     AFSR_ECR, %g1
1146         ldxa    [%g1]ASI_ECR, %g1
1147         andcc   %g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
1148         bz,pn   %xcc, 1f
1149          nop
1150         retry
1151 1:
1152         /*
1153          * We did disabled the 0x63 trap reporting.
1154          * This shouldn't happen - panic.
1155          */
1156         set     trap, %g1
1157         rdpr    %tt, %g3
1158         sethi   %hi(sys_trap), %g5
1159         jmp     %g5 + %lo(sys_trap)
1160         sub     %g0, 1, %g4
1161         SET_SIZE(ce_err)
1162 
1163 #endif  /* lint */
1164 
1165 
1166 #if defined(lint)
1167 
1168 void
1169 ce_err_tl1(void)
1170 {}
1171 
1172 #else   /* lint */
1173 
1174 /*
1175  * We don't use trap for CE detection.
1176  */
1177         ENTRY_NP(ce_err_tl1)
1178         set     trap, %g1
1179         rdpr    %tt, %g3
1180         sethi   %hi(sys_trap), %g5
1181         jmp     %g5 + %lo(sys_trap)
1182         sub     %g0, 1, %g4
1183         SET_SIZE(ce_err_tl1)
1184 
1185 #endif  /* lint */
1186 
1187 
1188 #if defined(lint)
1189 
1190 void
1191 async_err(void)
1192 {}
1193 
1194 #else   /* lint */
1195 
1196 /*
1197  * async_err is the default handler for IAE/DAE traps.
1198  * For OPL, we patch in the right handler at start of day.
1199  * But if a IAE/DAE trap get generated before the handler
1200  * is patched, panic.
1201  */
1202         ENTRY_NP(async_err)
1203         set     trap, %g1
1204         rdpr    %tt, %g3
1205         sethi   %hi(sys_trap), %g5
1206         jmp     %g5 + %lo(sys_trap)
1207         sub     %g0, 1, %g4
1208         SET_SIZE(async_err)
1209 
1210 #endif  /* lint */
1211 
1212 #if defined(lint)
1213 void
1214 opl_sync_trap(void)
1215 {}
1216 #else   /* lint */
1217 
1218         .seg    ".data"
1219         .global opl_clr_freg
1220         .global opl_cpu0_err_log
1221 
1222         .align  16
1223 opl_clr_freg:
1224         .word   0
1225         .align  16
1226 
1227         .align  MMU_PAGESIZE
1228 opl_cpu0_err_log:
1229         .skip   MMU_PAGESIZE
1230 
1231 /*
1232  * Common synchronous error trap handler (tt=0xA, 0x32)
1233  * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1234  * The error handling can be best summarized as follows:
1235  * 0. Do TRAPTRACE if enabled.
1236  * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1237  * 2. The SFSR register is read and verified as valid by checking


1384         mov     %g5, %g3                ! pass SFSR to the 3rd arg
1385         mov     %g6, %g2                ! pass SFAR to the 2nd arg
1386         set     opl_cpu_isync_tl1_error, %g1
1387         set     opl_cpu_dsync_tl1_error, %g6
1388         cmp     %g4, T_INSTR_ERROR
1389         movne   %icc, %g6, %g1
1390         ba,pt   %icc, 6f
1391         nop
1392 3:
1393         mov     %g5, %g3                ! pass SFSR to the 3rd arg
1394         mov     %g6, %g2                ! pass SFAR to the 2nd arg
1395         set     opl_cpu_isync_tl0_error, %g1
1396         set     opl_cpu_dsync_tl0_error, %g6
1397         cmp     %g4, T_INSTR_ERROR
1398         movne   %icc, %g6, %g1
1399 6:
1400         sethi   %hi(sys_trap), %g5
1401         jmp     %g5 + %lo(sys_trap)
1402          mov    PIL_15, %g4
1403         SET_SIZE(opl_sync_trap)
1404 #endif  /* lint */
1405 
1406 #if defined(lint)
1407 void
1408 opl_uger_trap(void)
1409 {}
1410 #else   /* lint */
1411 /*
1412  * Common Urgent error trap handler (tt=0x40)
1413  * All TL=0 and TL>0 0x40 traps vector to this handler.
1414  * The error handling can be best summarized as follows:
1415  * 1. Read the Urgent error status register (UGERSR)
1416  *    Faultaddress is N/A here and it is not collected.
1417  * 2. Check to see if we have a multiple errors case
1418  *    If so, we enable WEAK_ED (weak error detection) bit
1419  *    to prevent any potential error storms and branch directly
1420  *    to generate ereport. (we don't decode/handle individual
1421  *    error cases when we get a multiple error situation)
1422  * 3. Now look for the recoverable error cases which include
1423  *    IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1424  *    recoverable errors are detected, do the following:
1425  *    - Flush all tlbs.
1426  *    - Verify that we came from TL=0, if not, generate
1427  *      ereport. Note that the reason we don't recover
1428  *      at TL>0 is because the AGs might be corrupted or
1429  *      inconsistent. We can't save/restore them into
1430  *      the scratchpad regs like we did for opl_sync_trap().


1605          nop
1606 
1607 opl_uger_panic1:
1608         mov     %g1, %g2                        ! %g2 = arg #1
1609         LOG_UGER_REG(%g1, %g3, %g4)
1610         RESET_TO_PRIV(%g1, %g3, %g4, %l0)
1611 
1612         /*
1613          * Set up the argument for sys_trap.
1614          * %g2 = arg #1 already set above
1615          */
1616 opl_uger_panic_cmn:
1617         RESET_USER_RTT_REGS(%g4, %g5, opl_uger_panic_resetskip)
1618 opl_uger_panic_resetskip:
1619         rdpr    %tl, %g3                        ! arg #2
1620         set     opl_cpu_urgent_error, %g1       ! pc
1621         sethi   %hi(sys_trap), %g5
1622         jmp     %g5 + %lo(sys_trap)
1623          mov    PIL_15, %g4
1624         SET_SIZE(opl_uger_trap)
1625 #endif  /* lint */
1626 
1627 #if defined(lint)
1628 void
1629 opl_ta3_trap(void)
1630 {}
1631 void
1632 opl_cleanw_subr(void)
1633 {}
1634 #else   /* lint */
1635 /*
1636  * OPL ta3 support (note please, that win_reg
1637  * area size for each cpu is 2^7 bytes)
1638  */
1639 
1640 #define RESTORE_WREGS(tmp1, tmp2)               \
1641         CPU_INDEX(tmp1, tmp2)                   ;\
1642         sethi   %hi(opl_ta3_save), tmp2         ;\
1643         ldx     [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1644         sllx    tmp1, 7, tmp1                   ;\
1645         add     tmp2, tmp1, tmp2                ;\
1646         ldx     [tmp2 + 0], %l0                 ;\
1647         ldx     [tmp2 + 8], %l1                 ;\
1648         ldx     [tmp2 + 16], %l2                ;\
1649         ldx     [tmp2 + 24], %l3                ;\
1650         ldx     [tmp2 + 32], %l4                ;\
1651         ldx     [tmp2 + 40], %l5                ;\
1652         ldx     [tmp2 + 48], %l6                ;\
1653         ldx     [tmp2 + 56], %l7                ;\
1654         ldx     [tmp2 + 64], %i0                ;\


1710         ba,a    fast_trap_done
1711         SET_SIZE(opl_ta3_trap)
1712 
1713         ENTRY_NP(opl_cleanw_subr)
1714         set     trap, %g1
1715         mov     T_FLUSHW, %g3
1716         sub     %g0, 1, %g4
1717         rdpr    %cwp, %g5
1718         SAVE_WREGS(%g2, %g6)
1719         save
1720         flushw
1721         rdpr    %cwp, %g6
1722         wrpr    %g5, %cwp
1723         RESTORE_WREGS(%g2, %g5)
1724         wrpr    %g6, %cwp
1725         restored
1726         restore
1727         jmp     %g7
1728           nop
1729         SET_SIZE(opl_cleanw_subr)
1730 #endif  /* lint */
1731 
1732 #if defined(lint)
1733 
1734 void
1735 opl_serr_instr(void)
1736 {}
1737 
1738 #else   /* lint */
1739 /*
1740  * The actual trap handler for tt=0x0a, and tt=0x32
1741  */
1742         ENTRY_NP(opl_serr_instr)
1743         OPL_SAVE_GLOBAL(%g1,%g2,%g3)
1744         sethi   %hi(opl_sync_trap), %g3
1745         jmp     %g3 + %lo(opl_sync_trap)
1746          rdpr    %tt, %g1
1747         .align  32
1748         SET_SIZE(opl_serr_instr)
1749 
1750 #endif  /* lint */
1751 
1752 #if defined(lint)
1753 
1754 void
1755 opl_ugerr_instr(void)
1756 {}
1757 
1758 #else   /* lint */
1759 /*
1760  * The actual trap handler for tt=0x40
1761  */
1762         ENTRY_NP(opl_ugerr_instr)
1763         sethi   %hi(opl_uger_trap), %g3
1764         jmp     %g3 + %lo(opl_uger_trap)
1765          nop
1766         .align  32
1767         SET_SIZE(opl_ugerr_instr)
1768 
1769 #endif  /* lint */
1770 
1771 #if defined(lint)
1772 
1773 void
1774 opl_ta3_instr(void)
1775 {}
1776 
1777 #else   /* lint */
1778 /*
1779  * The actual trap handler for tt=0x103 (flushw)
1780  */
1781         ENTRY_NP(opl_ta3_instr)
1782         sethi   %hi(opl_ta3_trap), %g3
1783         jmp     %g3 + %lo(opl_ta3_trap)
1784          nop
1785         .align  32
1786         SET_SIZE(opl_ta3_instr)
1787 
1788 #endif  /* lint */
1789 
1790 #if defined(lint)
1791 
1792 void
1793 opl_ta4_instr(void)
1794 {}
1795 
1796 #else   /* lint */
1797 /*
1798  * The patch for the .clean_windows code
1799  */
1800         ENTRY_NP(opl_ta4_instr)
1801         sethi   %hi(opl_cleanw_subr), %g3
1802         add     %g3, %lo(opl_cleanw_subr), %g3
1803         jmpl    %g3, %g7
1804           add   %g7, 8, %g7
1805         nop
1806         nop
1807         nop
1808         SET_SIZE(opl_ta4_instr)
1809 
1810 #endif  /* lint */
1811 
1812 #if defined(lint)
1813 /*
1814  *  Get timestamp (stick).
1815  */
1816 /* ARGSUSED */
1817 void
1818 stick_timestamp(int64_t *ts)
1819 {
1820 }
1821 
1822 #else   /* lint */
1823 
1824         ENTRY_NP(stick_timestamp)
1825         rd      STICK, %g1      ! read stick reg
1826         sllx    %g1, 1, %g1
1827         srlx    %g1, 1, %g1     ! clear npt bit
1828 
1829         retl
1830         stx     %g1, [%o0]      ! store the timestamp
1831         SET_SIZE(stick_timestamp)
1832 
1833 #endif  /* lint */
1834 
1835 
1836 #if defined(lint)
1837 /*
1838  * Set STICK adjusted by skew.
1839  */
1840 /* ARGSUSED */
1841 void
1842 stick_adj(int64_t skew)
1843 {
1844 }
1845 
1846 #else   /* lint */
1847 
1848         ENTRY_NP(stick_adj)
1849         rdpr    %pstate, %g1            ! save processor state
1850         andn    %g1, PSTATE_IE, %g3
1851         ba      1f                      ! cache align stick adj
1852         wrpr    %g0, %g3, %pstate       ! turn off interrupts
1853 
1854         .align  16
1855 1:      nop
1856 
1857         rd      STICK, %g4              ! read stick reg
1858         add     %g4, %o0, %o1           ! adjust stick with skew
1859         wr      %o1, %g0, STICK         ! write stick reg
1860 
1861         retl
1862         wrpr    %g1, %pstate            ! restore processor state
1863         SET_SIZE(stick_adj)
1864 
1865 #endif  /* lint */
1866 
1867 #if defined(lint)
1868 /*
1869  * Debugger-specific stick retrieval
1870  */
1871 /*ARGSUSED*/
1872 int
1873 kdi_get_stick(uint64_t *stickp)
1874 {
1875         return (0);
1876 }
1877 
1878 #else   /* lint */
1879 
1880         ENTRY_NP(kdi_get_stick)
1881         rd      STICK, %g1
1882         stx     %g1, [%o0]
1883         retl
1884         mov     %g0, %o0
1885         SET_SIZE(kdi_get_stick)
1886 
1887 #endif  /* lint */
1888 
1889 #if defined(lint)
1890 
1891 /*ARGSUSED*/
1892 int
1893 dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
1894 { return (0); }
1895 
1896 #else
1897 
1898         ENTRY(dtrace_blksuword32)
1899         save    %sp, -SA(MINFRAME + 4), %sp
1900 
1901         rdpr    %pstate, %l1
1902         andn    %l1, PSTATE_IE, %l2             ! disable interrupts to
1903         wrpr    %g0, %l2, %pstate               ! protect our FPU diddling
1904 
1905         rd      %fprs, %l0
1906         andcc   %l0, FPRS_FEF, %g0
1907         bz,a,pt %xcc, 1f                        ! if the fpu is disabled
1908         wr      %g0, FPRS_FEF, %fprs            ! ... enable the fpu
1909 
1910         st      %f0, [%fp + STACK_BIAS - 4]     ! save %f0 to the stack
1911 1:
1912         set     0f, %l5
1913         /*
1914          * We're about to write a block full or either total garbage
1915          * (not kernel data, don't worry) or user floating-point data
1916          * (so it only _looks_ like garbage).
1917          */


1942         wr      %g0, %l0, %fprs                 ! restore %fprs
1943 
1944         ld      [%fp + STACK_BIAS - 4], %f0     ! restore %f0
1945 1:
1946 
1947         wrpr    %g0, %l1, %pstate               ! restore interrupts
1948 
1949         /*
1950          * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1951          * which deals with watchpoints. Otherwise, just return -1.
1952          */
1953         brnz,pt %i2, 1f
1954         nop
1955         ret
1956         restore %g0, -1, %o0
1957 1:
1958         call    dtrace_blksuword32_err
1959         restore
1960 
1961         SET_SIZE(dtrace_blksuword32)
1962 #endif /* lint */
1963 
1964 #if defined(lint)
1965 /*ARGSUSED*/
1966 void
1967 ras_cntr_reset(void *arg)
1968 {
1969 }
1970 #else
1971         ENTRY_NP(ras_cntr_reset)
1972         set     OPL_SCRATCHPAD_ERRLOG, %o1
1973         ldxa    [%o1]ASI_SCRATCHPAD, %o0
1974         or      %o0, ERRLOG_REG_NUMERR_MASK, %o0
1975         retl
1976          stxa   %o0, [%o1]ASI_SCRATCHPAD
1977         SET_SIZE(ras_cntr_reset)
1978 #endif /* lint */
1979 
1980 #if defined(lint)
1981 /* ARGSUSED */
1982 void
1983 opl_error_setup(uint64_t cpu_err_log_pa)
1984 {
1985 }
1986 
1987 #else   /* lint */
1988         ENTRY_NP(opl_error_setup)
1989         /*
1990          * Initialize the error log scratchpad register
1991          */
1992         ldxa    [%g0]ASI_EIDR, %o2
1993         sethi   %hi(ERRLOG_REG_EIDR_MASK), %o1
1994         or      %o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
1995         and     %o2, %o1, %o3
1996         sllx    %o3, ERRLOG_REG_EIDR_SHIFT, %o2
1997         or      %o2, %o0, %o3
1998         or      %o3, ERRLOG_REG_NUMERR_MASK, %o0
1999         set     OPL_SCRATCHPAD_ERRLOG, %o1
2000         stxa    %o0, [%o1]ASI_SCRATCHPAD
2001         /*
2002          * Disable all restrainable error traps
2003          */
2004         mov     AFSR_ECR, %o1
2005         ldxa    [%o1]ASI_AFSR, %o0
2006         andn    %o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
2007         retl
2008           stxa  %o0, [%o1]ASI_AFSR
2009         SET_SIZE(opl_error_setup)
2010 #endif /* lint */
2011 
2012 #if defined(lint)
2013 /* ARGSUSED */
2014 void
2015 cpu_early_feature_init(void)
2016 {
2017 }
2018 #else   /* lint */
2019         ENTRY_NP(cpu_early_feature_init)
2020         /*
2021          * Enable MMU translating multiple page sizes for
2022          * sITLB and sDTLB.
2023          */
2024         mov     LSU_MCNTL, %o0
2025         ldxa    [%o0] ASI_MCNTL, %o1
2026         or      %o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
2027           stxa  %o1, [%o0] ASI_MCNTL
2028         /*
2029          * Demap all previous entries.
2030          */
2031         sethi   %hi(FLUSH_ADDR), %o1
2032         set     DEMAP_ALL_TYPE, %o0
2033         stxa    %g0, [%o0]ASI_DTLB_DEMAP
2034         stxa    %g0, [%o0]ASI_ITLB_DEMAP
2035         retl
2036           flush %o1
2037         SET_SIZE(cpu_early_feature_init)
2038 #endif /* lint */
2039 
2040 #if     defined(lint)
2041 /*
2042  * This function is called for each (enabled) CPU. We use it to
2043  * initialize error handling related registers.
2044  */
2045 /*ARGSUSED*/
2046 void
2047 cpu_feature_init(void)
2048 {}
2049 #else   /* lint */
2050         ENTRY(cpu_feature_init)
2051         !
2052         ! get the device_id and store the device_id
2053         ! in the appropriate cpunodes structure
2054         ! given the cpus index
2055         !
2056         CPU_INDEX(%o0, %o1)
2057         mulx %o0, CPU_NODE_SIZE, %o0
2058         set  cpunodes + DEVICE_ID, %o1
2059         ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
2060         stx  %o2, [%o0 + %o1]
2061         !
2062         ! initialize CPU registers
2063         !
2064         ba      opl_cpu_reg_init
2065         nop
2066         SET_SIZE(cpu_feature_init)
2067 #endif  /* lint */
2068 
2069 #if defined(lint)
2070 
2071 void
2072 cpu_cleartickpnt(void)
2073 {}
2074 
2075 #else   /* lint */
2076         /*
2077          * Clear the NPT (non-privileged trap) bit in the %tick/%stick
2078          * registers. In an effort to make the change in the
2079          * tick/stick counter as consistent as possible, we disable
2080          * all interrupts while we're changing the registers. We also
2081          * ensure that the read and write instructions are in the same
2082          * line in the instruction cache.
2083          */
2084         ENTRY_NP(cpu_clearticknpt)
2085         rdpr    %pstate, %g1            /* save processor state */
2086         andn    %g1, PSTATE_IE, %g3     /* turn off */
2087         wrpr    %g0, %g3, %pstate       /*   interrupts */
2088         rdpr    %tick, %g2              /* get tick register */
2089         brgez,pn %g2, 1f                /* if NPT bit off, we're done */
2090         mov     1, %g3                  /* create mask */
2091         sllx    %g3, 63, %g3            /*   for NPT bit */
2092         ba,a,pt %xcc, 2f
2093         .align  8                       /* Ensure rd/wr in same i$ line */
2094 2:
2095         rdpr    %tick, %g2              /* get tick register */
2096         wrpr    %g3, %g2, %tick         /* write tick register, */
2097                                         /*   clearing NPT bit   */
2098 1:
2099         rd      STICK, %g2              /* get stick register */
2100         brgez,pn %g2, 3f                /* if NPT bit off, we're done */
2101         mov     1, %g3                  /* create mask */
2102         sllx    %g3, 63, %g3            /*   for NPT bit */
2103         ba,a,pt %xcc, 4f
2104         .align  8                       /* Ensure rd/wr in same i$ line */
2105 4:
2106         rd      STICK, %g2              /* get stick register */
2107         wr      %g3, %g2, STICK         /* write stick register, */
2108                                         /*   clearing NPT bit   */
2109 3:
2110         jmp     %g4 + 4
2111         wrpr    %g0, %g1, %pstate       /* restore processor state */
2112 
2113         SET_SIZE(cpu_clearticknpt)
2114 
2115 #endif  /* lint */
2116 
2117 #if defined(lint)
2118 
2119 void
2120 cpu_halt_cpu(void)
2121 {}
2122 
2123 void
2124 cpu_smt_pause(void)
2125 {}
2126 
2127 #else   /* lint */
2128 
2129         /*
2130          * Halt the current strand with the suspend instruction.
2131          * The compiler/asm currently does not support this suspend
2132          * instruction mnemonic, use byte code for now.
2133          */
2134         ENTRY_NP(cpu_halt_cpu)
2135         .word   0x81b01040
2136         retl
2137         nop
2138         SET_SIZE(cpu_halt_cpu)
2139 
2140         /*
2141          * Pause the current strand with the sleep instruction.
2142          * The compiler/asm currently does not support this sleep
2143          * instruction mnemonic, use byte code for now.
2144          */
2145         ENTRY_NP(cpu_smt_pause)
2146         .word   0x81b01060
2147         retl
2148         nop
2149         SET_SIZE(cpu_smt_pause)
2150 
2151 #endif  /* lint */


   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  *
  25  * Assembly code support for the Olympus-C module
  26  */
  27 

  28 #include "assym.h"

  29 
  30 #include <sys/asm_linkage.h>
  31 #include <sys/mmu.h>
  32 #include <vm/hat_sfmmu.h>
  33 #include <sys/machparam.h>
  34 #include <sys/machcpuvar.h>
  35 #include <sys/machthread.h>
  36 #include <sys/machtrap.h>
  37 #include <sys/privregs.h>
  38 #include <sys/asm_linkage.h>
  39 #include <sys/trap.h>
  40 #include <sys/opl_olympus_regs.h>
  41 #include <sys/opl_module.h>
  42 #include <sys/xc_impl.h>
  43 #include <sys/intreg.h>
  44 #include <sys/async.h>
  45 #include <sys/clock.h>
  46 #include <sys/cmpregs.h>
  47 
  48 #ifdef TRAPTRACE
  49 #include <sys/traptrace.h>
  50 #endif /* TRAPTRACE */
  51 
  52 /*
  53  * Macro that flushes the entire Ecache.
  54  *
  55  * arg1 = ecache size
  56  * arg2 = ecache linesize
  57  * arg3 = ecache flush address - Not used for olympus-C
  58  */
  59 #define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1)                         \
  60         mov     ASI_L2_CTRL_U2_FLUSH, arg1;                             \
  61         mov     ASI_L2_CTRL_RW_ADDR, arg2;                              \
  62         stxa    arg1, [arg2]ASI_L2_CTRL
  63 
  64 /*
  65  * SPARC64-VI MMU and Cache operations.
  66  */
  67 









  68         ENTRY_NP(vtag_flushpage)
  69         /*
  70          * flush page from the tlb
  71          *
  72          * %o0 = vaddr
  73          * %o1 = sfmmup
  74          */
  75         rdpr    %pstate, %o5
  76 #ifdef DEBUG
  77         PANIC_IF_INTR_DISABLED_PSTR(%o5, opl_di_l3, %g1)
  78 #endif /* DEBUG */
  79         /*
  80          * disable ints
  81          */
  82         andn    %o5, PSTATE_IE, %o4
  83         wrpr    %o4, 0, %pstate
  84 
  85         /*
  86          * Then, blow out the tlb
  87          * Interrupts are disabled to prevent the primary ctx register


 116 
 117         wrpr    %g0, 1, %tl
 118         set     MMU_PCONTEXT, %o4
 119         or      DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
 120         ldxa    [%o4]ASI_DMMU, %o2              ! %o2 = save old ctxnum
 121         srlx    %o2, CTXREG_NEXT_SHIFT, %o1     ! need to preserve nucleus pgsz
 122         sllx    %o1, CTXREG_NEXT_SHIFT, %o1     ! %o1 = nucleus pgsz
 123         or      %g1, %o1, %g1                   ! %g1 = nucleus pgsz | primary pgsz | cnum
 124         stxa    %g1, [%o4]ASI_DMMU              ! wr new ctxum 
 125 
 126         stxa    %g0, [%o0]ASI_DTLB_DEMAP
 127         stxa    %g0, [%o0]ASI_ITLB_DEMAP
 128         stxa    %o2, [%o4]ASI_DMMU              /* restore old ctxnum */
 129         flush   %o3
 130         wrpr    %g0, 0, %tl
 131 
 132         retl
 133         wrpr    %g0, %o5, %pstate               /* enable interrupts */
 134         SET_SIZE(vtag_flushpage)
 135 

 136 









 137         ENTRY_NP2(vtag_flushall, demap_all)
 138         /*
 139          * flush the tlb
 140          */
 141         sethi   %hi(FLUSH_ADDR), %o3
 142         set     DEMAP_ALL_TYPE, %g1
 143         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 144         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 145         flush   %o3
 146         retl
 147         nop
 148         SET_SIZE(demap_all)
 149         SET_SIZE(vtag_flushall)
 150 

 151 










 152         ENTRY_NP(vtag_flushpage_tl1)
 153         /*
 154          * x-trap to flush page from tlb and tsb
 155          *
 156          * %g1 = vaddr, zero-extended on 32-bit kernel
 157          * %g2 = sfmmup
 158          *
 159          * assumes TSBE_TAG = 0
 160          */
 161         srln    %g1, MMU_PAGESHIFT, %g1
 162                 
 163         sethi   %hi(ksfmmup), %g3
 164         ldx     [%g3 + %lo(ksfmmup)], %g3
 165         cmp     %g3, %g2
 166         bne,pt  %xcc, 1f                        ! if not kernel as, go to 1
 167           slln  %g1, MMU_PAGESHIFT, %g1         /* g1 = vaddr */
 168 
 169         /* We need to demap in the kernel context */
 170         or      DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
 171         stxa    %g0, [%g1]ASI_DTLB_DEMAP


 176         or      DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
 177 
 178         SFMMU_CPU_CNUM(%g2, %g6, %g3)   ! %g6 = sfmmu cnum on this CPU
 179         
 180         ldub    [%g2 + SFMMU_CEXT], %g4         ! %g4 = sfmmup->cext
 181         sll     %g4, CTXREG_EXT_SHIFT, %g4
 182         or      %g6, %g4, %g6                   ! %g6 = primary pgsz | cnum
 183 
 184         set     MMU_PCONTEXT, %g4
 185         ldxa    [%g4]ASI_DMMU, %g5              ! %g5 = save old ctxnum
 186         srlx    %g5, CTXREG_NEXT_SHIFT, %g2     ! %g2 = nucleus pgsz 
 187         sllx    %g2, CTXREG_NEXT_SHIFT, %g2     ! preserve nucleus pgsz 
 188         or      %g6, %g2, %g6                   ! %g6 = nucleus pgsz | primary pgsz | cnum      
 189         stxa    %g6, [%g4]ASI_DMMU              ! wr new ctxum
 190         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 191         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 192         stxa    %g5, [%g4]ASI_DMMU              ! restore old ctxnum
 193         retry
 194         SET_SIZE(vtag_flushpage_tl1)
 195 

 196 










 197         ENTRY_NP(vtag_flush_pgcnt_tl1)
 198         /*
 199          * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
 200          *
 201          * %g1 = vaddr, zero-extended on 32-bit kernel
 202          * %g2 = <sfmmup58|pgcnt6>
 203          *
 204          * NOTE: this handler relies on the fact that no
 205          *      interrupts or traps can occur during the loop
 206          *      issuing the TLB_DEMAP operations. It is assumed
 207          *      that interrupts are disabled and this code is
 208          *      fetching from the kernel locked text address.
 209          *
 210          * assumes TSBE_TAG = 0
 211          */
 212         set     SFMMU_PGCNT_MASK, %g4
 213         and     %g4, %g2, %g3                   /* g3 = pgcnt - 1 */
 214         add     %g3, 1, %g3                     /* g3 = pgcnt */
 215 
 216         andn    %g2, SFMMU_PGCNT_MASK, %g2      /* g2 = sfmmup */


 255         srlx    %g6, CTXREG_NEXT_SHIFT, %g2     /* %g2 = nucleus pgsz */
 256         sllx    %g2, CTXREG_NEXT_SHIFT, %g2     /* preserve nucleus pgsz */
 257         or      %g5, %g2, %g5                   /* %g5 = nucleus pgsz | primary pgsz | cnum */
 258         stxa    %g5, [%g4]ASI_DMMU              /* wr new ctxum */
 259 
 260         set     MMU_PAGESIZE, %g2               /* g2 = pgsize */
 261         sethi   %hi(FLUSH_ADDR), %g5
 262 3:
 263         stxa    %g0, [%g1]ASI_DTLB_DEMAP
 264         stxa    %g0, [%g1]ASI_ITLB_DEMAP
 265         flush   %g5                             ! flush required by immu
 266 
 267         deccc   %g3                             /* decr pgcnt */
 268         bnz,pt  %icc,3b
 269           add   %g1, %g2, %g1                   /* next page */
 270 
 271         stxa    %g6, [%g4]ASI_DMMU              /* restore old ctxnum */
 272         retry
 273         SET_SIZE(vtag_flush_pgcnt_tl1)
 274 

 275 










 276         ENTRY_NP(vtag_flushall_tl1)
 277         /*
 278          * x-trap to flush tlb
 279          */
 280         set     DEMAP_ALL_TYPE, %g4
 281         stxa    %g0, [%g4]ASI_DTLB_DEMAP
 282         stxa    %g0, [%g4]ASI_ITLB_DEMAP
 283         retry
 284         SET_SIZE(vtag_flushall_tl1)
 285 

 286 

 287 /*
 288  * VAC (virtual address conflict) does not apply to OPL.
 289  * VAC resolution is managed by the Olympus processor hardware.
 290  * As a result, all OPL VAC flushing routines are no-ops.
 291  */
 292 









 293         ENTRY(vac_flushpage)
 294         retl
 295           nop
 296         SET_SIZE(vac_flushpage)
 297 











 298         ENTRY_NP(vac_flushpage_tl1)
 299         retry
 300         SET_SIZE(vac_flushpage_tl1)
 301 

 302 










 303         ENTRY(vac_flushcolor)
 304         retl
 305          nop
 306         SET_SIZE(vac_flushcolor)
 307 

 308 
 309 










 310         ENTRY(vac_flushcolor_tl1)
 311         retry
 312         SET_SIZE(vac_flushcolor_tl1)
 313 












 314 /*
 315  * Determine whether or not the IDSR is busy.
 316  * Entry: no arguments
 317  * Returns: 1 if busy, 0 otherwise
 318  */
 319         ENTRY(idsr_busy)
 320         ldxa    [%g0]ASI_INTR_DISPATCH_STATUS, %g1
 321         clr     %o0
 322         btst    IDSR_BUSY, %g1
 323         bz,a,pt %xcc, 1f
 324         mov     1, %o0
 325 1:
 326         retl
 327         nop
 328         SET_SIZE(idsr_busy)
 329 
















 330         .global _dispatch_status_busy
 331 _dispatch_status_busy:
 332         .asciz  "ASI_INTR_DISPATCH_STATUS error: busy"
 333         .align  4
 334 
 335 /*
 336  * Setup interrupt dispatch data registers
 337  * Entry:
 338  *      %o0 - function or inumber to call
 339  *      %o1, %o2 - arguments (2 uint64_t's)
 340  */
 341         .seg "text"
 342 
 343         ENTRY(init_mondo)
 344 #ifdef DEBUG
 345         !
 346         ! IDSR should not be busy at the moment
 347         !
 348         ldxa    [%g0]ASI_INTR_DISPATCH_STATUS, %g1
 349         btst    IDSR_BUSY, %g1


 363         mov     IDDR_1, %g2
 364         mov     IDDR_2, %g3
 365         stxa    %o0, [%g1]ASI_INTR_DISPATCH
 366 
 367         !
 368         ! interrupt vector dispatch data reg 1
 369         !
 370         stxa    %o1, [%g2]ASI_INTR_DISPATCH
 371 
 372         !
 373         ! interrupt vector dispatch data reg 2
 374         !
 375         stxa    %o2, [%g3]ASI_INTR_DISPATCH
 376 
 377         membar  #Sync
 378         retl
 379         nop
 380         SET_SIZE(init_mondo_nocheck)
 381         SET_SIZE(init_mondo)
 382 

 383 










 384 /*
 385  * Ship mondo to aid using busy/nack pair bn
 386  */
 387         ENTRY_NP(shipit)
 388         sll     %o0, IDCR_PID_SHIFT, %g1        ! IDCR<23:14> = agent id
 389         sll     %o1, IDCR_BN_SHIFT, %g2         ! IDCR<28:24> = b/n pair
 390         or      %g1, IDCR_OFFSET, %g1           ! IDCR<13:0> = 0x70
 391         or      %g1, %g2, %g1
 392         stxa    %g0, [%g1]ASI_INTR_DISPATCH     ! interrupt vector dispatch
 393         membar  #Sync
 394         retl
 395         nop
 396         SET_SIZE(shipit)
 397 

 398 










 399 /*
 400  * flush_instr_mem:
 401  *      Flush 1 page of the I-$ starting at vaddr
 402  *      %o0 vaddr
 403  *      %o1 bytes to be flushed
 404  *
 405  * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
 406  * the stores from all processors so that a FLUSH instruction is only needed
 407  * to ensure pipeline is consistent. This means a single flush is sufficient at
 408  * the end of a sequence of stores that updates the instruction stream to
 409  * ensure correct operation.
 410  */
 411 
 412         ENTRY(flush_instr_mem)
 413         flush   %o0                     ! address irrelevant
 414         retl
 415         nop
 416         SET_SIZE(flush_instr_mem)
 417 

 418 

 419 /*
 420  * flush_ecache:
 421  *      %o0 - 64 bit physical address
 422  *      %o1 - ecache size
 423  *      %o2 - ecache linesize
 424  */

 425 







 426         ENTRY(flush_ecache)
 427 
 428         /*
 429          * Flush the entire Ecache.
 430          */
 431         ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
 432         retl
 433         nop
 434         SET_SIZE(flush_ecache)
 435 













 436         /*
 437          * I/D cache flushing is not needed for OPL processors
 438          */
 439         ENTRY(kdi_flush_idcache)
 440         retl
 441         nop
 442         SET_SIZE(kdi_flush_idcache)
 443 


 444 #ifdef  TRAPTRACE
 445 /*
 446  * Simplified trap trace macro for OPL. Adapted from us3.
 447  */
 448 #define OPL_TRAPTRACE(ptr, scr1, scr2, label)                   \
 449         CPU_INDEX(scr1, ptr);                                   \
 450         sll     scr1, TRAPTR_SIZE_SHIFT, scr1;                  \
 451         set     trap_trace_ctl, ptr;                            \
 452         add     ptr, scr1, scr1;                                \
 453         ld      [scr1 + TRAPTR_LIMIT], ptr;                     \
 454         tst     ptr;                                            \
 455         be,pn   %icc, label/**/1;                               \
 456          ldx    [scr1 + TRAPTR_PBASE], ptr;                     \
 457         ld      [scr1 + TRAPTR_OFFSET], scr1;                   \
 458         add     ptr, scr1, ptr;                                 \
 459         rd      %asi, scr2;                                     \
 460         wr      %g0, TRAPTR_ASI, %asi;                          \
 461         rd      STICK, scr1;                                    \
 462         stxa    scr1, [ptr + TRAP_ENT_TICK]%asi;                \
 463         rdpr    %tl, scr1;                                      \


 811  * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
 812  * and we reset these regiseter here.
 813  */
 814 #define RESET_CUR_TSTATE(tmp)                                   \
 815         set     TSTATE_KERN, tmp                                ;\
 816         wrpr    %g0, tmp, %tstate                               ;\
 817         wrpr    %g0, 0, %tpc                                    ;\
 818         wrpr    %g0, 0, %tnpc                                   ;\
 819         RESET_WINREG(tmp)
 820 
 821 /*
 822  * In case of urgent errors some MMU registers may be
 823  * corrupted, so we set here some reasonable values for
 824  * them. Note that resetting MMU registers also reset the context
 825  * info, we will need to reset the window registers to prevent
 826  * spill/fill that depends on context info for correct behaviour.
 827  * Note that the TLBs must be flushed before programming the context
 828  * registers.
 829  */
 830 

 831 #define RESET_MMU_REGS(tmp1, tmp2, tmp3)                        \
 832         FLUSH_ALL_TLB(tmp1)                                     ;\
 833         set     MMU_PCONTEXT, tmp1                              ;\
 834         sethi   %hi(kcontextreg), tmp2                          ;\
 835         ldx     [tmp2 + %lo(kcontextreg)], tmp2                 ;\
 836         stxa    tmp2, [tmp1]ASI_DMMU                            ;\
 837         set     MMU_SCONTEXT, tmp1                              ;\
 838         stxa    tmp2, [tmp1]ASI_DMMU                            ;\
 839         sethi   %hi(ktsb_base), tmp1                            ;\
 840         ldx     [tmp1 + %lo(ktsb_base)], tmp2                   ;\
 841         mov     MMU_TSB, tmp3                                   ;\
 842         stxa    tmp2, [tmp3]ASI_IMMU                            ;\
 843         stxa    tmp2, [tmp3]ASI_DMMU                            ;\
 844         membar  #Sync                                           ;\
 845         RESET_WINREG(tmp1)
 846 
 847 #define RESET_TSB_TAGPTR(tmp)                                   \
 848         set     MMU_TAG_ACCESS, tmp                             ;\
 849         stxa    %g0, [tmp]ASI_IMMU                              ;\
 850         stxa    %g0, [tmp]ASI_DMMU                              ;\
 851         membar  #Sync

 852 
 853 /*
 854  * In case of errors in the MMU_TSB_PREFETCH registers we have to
 855  * reset them. We can use "0" as the reset value, this way we set
 856  * the "V" bit of the registers to 0, which will disable the prefetch
 857  * so the values of the other fields are irrelevant.
 858  */

 859 #define RESET_TSB_PREFETCH(tmp)                 \
 860         set     VA_UTSBPREF_8K, tmp             ;\
 861         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 862         set     VA_UTSBPREF_4M, tmp             ;\
 863         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 864         set     VA_KTSBPREF_8K, tmp             ;\
 865         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 866         set     VA_KTSBPREF_4M, tmp             ;\
 867         stxa    %g0, [tmp]ASI_ITSB_PREFETCH     ;\
 868         set     VA_UTSBPREF_8K, tmp             ;\
 869         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
 870         set     VA_UTSBPREF_4M, tmp             ;\
 871         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
 872         set     VA_KTSBPREF_8K, tmp             ;\
 873         stxa    %g0, [tmp]ASI_DTSB_PREFETCH     ;\
 874         set     VA_KTSBPREF_4M, tmp             ;\
 875         stxa    %g0, [tmp]ASI_DTSB_PREFETCH

 876 
 877 /*
 878  * In case of errors in the MMU_SHARED_CONTEXT register we have to
 879  * reset its value. We can use "0" as the reset value, it will put
 880  * 0 in the IV field disabling the shared context support, and
 881  * making values of all the other fields of the register irrelevant.
 882  */

 883 #define RESET_SHARED_CTXT(tmp)                  \
 884         set     MMU_SHARED_CONTEXT, tmp         ;\
 885         stxa    %g0, [tmp]ASI_DMMU

 886 
 887 /*
 888  * RESET_TO_PRIV()
 889  *
 890  * In many cases, we need to force the thread into privilege mode because
 891  * privilege mode is only thing in which the system continue to work
 892  * due to undeterminable user mode information that come from register
 893  * corruption.
 894  *
 895  *  - opl_uger_ctxt
 896  *    If the error is secondary TSB related register parity, we have no idea
 897  *    what value is supposed to be for it.
 898  *
 899  *  The below three cases %tstate is not accessible until it is overwritten
 900  *  with some value, so we have no clue if the thread was running on user mode
 901  *  or not
 902  *   - opl_uger_pstate
 903  *     If the error is %pstate parity, it propagates to %tstate.
 904  *   - opl_uger_tstate
 905  *     No need to say the reason


 929  */
 930 #define RESET_TO_PRIV(tmp, tmp1, tmp2, local)                   \
 931         RESET_MMU_REGS(tmp, tmp1, tmp2)                         ;\
 932         CPU_ADDR(tmp, tmp1)                                     ;\
 933         ldx     [tmp + CPU_THREAD], local                       ;\
 934         ldx     [local + T_STACK], tmp                          ;\
 935         sub     tmp, STACK_BIAS, %sp                            ;\
 936         rdpr    %pstate, tmp                                    ;\
 937         wrpr    tmp, PSTATE_AG, %pstate                         ;\
 938         mov     local, %g7                                      ;\
 939         rdpr    %pstate, local                                  ;\
 940         wrpr    local, PSTATE_AG, %pstate                       ;\
 941         wrpr    %g0, 1, %tl                                     ;\
 942         set     TSTATE_KERN, tmp                                ;\
 943         rdpr    %cwp, tmp1                                      ;\
 944         or      tmp, tmp1, tmp                                  ;\
 945         wrpr    tmp, %g0, %tstate                               ;\
 946         wrpr    %g0, %tpc
 947 
 948 








 949 /*
 950  * We normally don't expect CE traps since we disable the
 951  * 0x63 trap reporting at the start of day. There is a
 952  * small window before we disable them, so let check for
 953  * it. Otherwise, panic.
 954  */
 955 
 956         .align  128
 957         ENTRY_NP(ce_err)
 958         mov     AFSR_ECR, %g1
 959         ldxa    [%g1]ASI_ECR, %g1
 960         andcc   %g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
 961         bz,pn   %xcc, 1f
 962          nop
 963         retry
 964 1:
 965         /*
 966          * We did disabled the 0x63 trap reporting.
 967          * This shouldn't happen - panic.
 968          */
 969         set     trap, %g1
 970         rdpr    %tt, %g3
 971         sethi   %hi(sys_trap), %g5
 972         jmp     %g5 + %lo(sys_trap)
 973         sub     %g0, 1, %g4
 974         SET_SIZE(ce_err)
 975 

 976 









 977 /*
 978  * We don't use trap for CE detection.
 979  */
 980         ENTRY_NP(ce_err_tl1)
 981         set     trap, %g1
 982         rdpr    %tt, %g3
 983         sethi   %hi(sys_trap), %g5
 984         jmp     %g5 + %lo(sys_trap)
 985         sub     %g0, 1, %g4
 986         SET_SIZE(ce_err_tl1)
 987 

 988 









 989 /*
 990  * async_err is the default handler for IAE/DAE traps.
 991  * For OPL, we patch in the right handler at start of day.
 992  * But if a IAE/DAE trap get generated before the handler
 993  * is patched, panic.
 994  */
 995         ENTRY_NP(async_err)
 996         set     trap, %g1
 997         rdpr    %tt, %g3
 998         sethi   %hi(sys_trap), %g5
 999         jmp     %g5 + %lo(sys_trap)
1000         sub     %g0, 1, %g4
1001         SET_SIZE(async_err)
1002 








1003         .seg    ".data"
1004         .global opl_clr_freg
1005         .global opl_cpu0_err_log
1006 
1007         .align  16
1008 opl_clr_freg:
1009         .word   0
1010         .align  16
1011 
1012         .align  MMU_PAGESIZE
1013 opl_cpu0_err_log:
1014         .skip   MMU_PAGESIZE
1015 
1016 /*
1017  * Common synchronous error trap handler (tt=0xA, 0x32)
1018  * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
1019  * The error handling can be best summarized as follows:
1020  * 0. Do TRAPTRACE if enabled.
1021  * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
1022  * 2. The SFSR register is read and verified as valid by checking


1169         mov     %g5, %g3                ! pass SFSR to the 3rd arg
1170         mov     %g6, %g2                ! pass SFAR to the 2nd arg
1171         set     opl_cpu_isync_tl1_error, %g1
1172         set     opl_cpu_dsync_tl1_error, %g6
1173         cmp     %g4, T_INSTR_ERROR
1174         movne   %icc, %g6, %g1
1175         ba,pt   %icc, 6f
1176         nop
1177 3:
1178         mov     %g5, %g3                ! pass SFSR to the 3rd arg
1179         mov     %g6, %g2                ! pass SFAR to the 2nd arg
1180         set     opl_cpu_isync_tl0_error, %g1
1181         set     opl_cpu_dsync_tl0_error, %g6
1182         cmp     %g4, T_INSTR_ERROR
1183         movne   %icc, %g6, %g1
1184 6:
1185         sethi   %hi(sys_trap), %g5
1186         jmp     %g5 + %lo(sys_trap)
1187          mov    PIL_15, %g4
1188         SET_SIZE(opl_sync_trap)

1189 





1190 /*
1191  * Common Urgent error trap handler (tt=0x40)
1192  * All TL=0 and TL>0 0x40 traps vector to this handler.
1193  * The error handling can be best summarized as follows:
1194  * 1. Read the Urgent error status register (UGERSR)
1195  *    Faultaddress is N/A here and it is not collected.
1196  * 2. Check to see if we have a multiple errors case
1197  *    If so, we enable WEAK_ED (weak error detection) bit
1198  *    to prevent any potential error storms and branch directly
1199  *    to generate ereport. (we don't decode/handle individual
1200  *    error cases when we get a multiple error situation)
1201  * 3. Now look for the recoverable error cases which include
1202  *    IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
1203  *    recoverable errors are detected, do the following:
1204  *    - Flush all tlbs.
1205  *    - Verify that we came from TL=0, if not, generate
1206  *      ereport. Note that the reason we don't recover
1207  *      at TL>0 is because the AGs might be corrupted or
1208  *      inconsistent. We can't save/restore them into
1209  *      the scratchpad regs like we did for opl_sync_trap().


1384          nop
1385 
1386 opl_uger_panic1:
1387         mov     %g1, %g2                        ! %g2 = arg #1
1388         LOG_UGER_REG(%g1, %g3, %g4)
1389         RESET_TO_PRIV(%g1, %g3, %g4, %l0)
1390 
1391         /*
1392          * Set up the argument for sys_trap.
1393          * %g2 = arg #1 already set above
1394          */
1395 opl_uger_panic_cmn:
1396         RESET_USER_RTT_REGS(%g4, %g5, opl_uger_panic_resetskip)
1397 opl_uger_panic_resetskip:
1398         rdpr    %tl, %g3                        ! arg #2
1399         set     opl_cpu_urgent_error, %g1       ! pc
1400         sethi   %hi(sys_trap), %g5
1401         jmp     %g5 + %lo(sys_trap)
1402          mov    PIL_15, %g4
1403         SET_SIZE(opl_uger_trap)

1404 








1405 /*
1406  * OPL ta3 support (note please, that win_reg
1407  * area size for each cpu is 2^7 bytes)
1408  */
1409 
1410 #define RESTORE_WREGS(tmp1, tmp2)               \
1411         CPU_INDEX(tmp1, tmp2)                   ;\
1412         sethi   %hi(opl_ta3_save), tmp2         ;\
1413         ldx     [tmp2 +%lo(opl_ta3_save)], tmp2 ;\
1414         sllx    tmp1, 7, tmp1                   ;\
1415         add     tmp2, tmp1, tmp2                ;\
1416         ldx     [tmp2 + 0], %l0                 ;\
1417         ldx     [tmp2 + 8], %l1                 ;\
1418         ldx     [tmp2 + 16], %l2                ;\
1419         ldx     [tmp2 + 24], %l3                ;\
1420         ldx     [tmp2 + 32], %l4                ;\
1421         ldx     [tmp2 + 40], %l5                ;\
1422         ldx     [tmp2 + 48], %l6                ;\
1423         ldx     [tmp2 + 56], %l7                ;\
1424         ldx     [tmp2 + 64], %i0                ;\


1480         ba,a    fast_trap_done
1481         SET_SIZE(opl_ta3_trap)
1482 
1483         ENTRY_NP(opl_cleanw_subr)
1484         set     trap, %g1
1485         mov     T_FLUSHW, %g3
1486         sub     %g0, 1, %g4
1487         rdpr    %cwp, %g5
1488         SAVE_WREGS(%g2, %g6)
1489         save
1490         flushw
1491         rdpr    %cwp, %g6
1492         wrpr    %g5, %cwp
1493         RESTORE_WREGS(%g2, %g5)
1494         wrpr    %g6, %cwp
1495         restored
1496         restore
1497         jmp     %g7
1498           nop
1499         SET_SIZE(opl_cleanw_subr)

1500 







1501 /*
1502  * The actual trap handler for tt=0x0a, and tt=0x32
1503  */
1504         ENTRY_NP(opl_serr_instr)
1505         OPL_SAVE_GLOBAL(%g1,%g2,%g3)
1506         sethi   %hi(opl_sync_trap), %g3
1507         jmp     %g3 + %lo(opl_sync_trap)
1508          rdpr    %tt, %g1
1509         .align  32
1510         SET_SIZE(opl_serr_instr)
1511 









1512 /*
1513  * The actual trap handler for tt=0x40
1514  */
1515         ENTRY_NP(opl_ugerr_instr)
1516         sethi   %hi(opl_uger_trap), %g3
1517         jmp     %g3 + %lo(opl_uger_trap)
1518          nop
1519         .align  32
1520         SET_SIZE(opl_ugerr_instr)
1521 









1522 /*
1523  * The actual trap handler for tt=0x103 (flushw)
1524  */
1525         ENTRY_NP(opl_ta3_instr)
1526         sethi   %hi(opl_ta3_trap), %g3
1527         jmp     %g3 + %lo(opl_ta3_trap)
1528          nop
1529         .align  32
1530         SET_SIZE(opl_ta3_instr)
1531 









1532 /*
1533  * The patch for the .clean_windows code
1534  */
1535         ENTRY_NP(opl_ta4_instr)
1536         sethi   %hi(opl_cleanw_subr), %g3
1537         add     %g3, %lo(opl_cleanw_subr), %g3
1538         jmpl    %g3, %g7
1539           add   %g7, 8, %g7
1540         nop
1541         nop
1542         nop
1543         SET_SIZE(opl_ta4_instr)
1544 














1545         ENTRY_NP(stick_timestamp)
1546         rd      STICK, %g1      ! read stick reg
1547         sllx    %g1, 1, %g1
1548         srlx    %g1, 1, %g1     ! clear npt bit
1549 
1550         retl
1551         stx     %g1, [%o0]      ! store the timestamp
1552         SET_SIZE(stick_timestamp)
1553 

1554 













1555         ENTRY_NP(stick_adj)
1556         rdpr    %pstate, %g1            ! save processor state
1557         andn    %g1, PSTATE_IE, %g3
1558         ba      1f                      ! cache align stick adj
1559         wrpr    %g0, %g3, %pstate       ! turn off interrupts
1560 
1561         .align  16
1562 1:      nop
1563 
1564         rd      STICK, %g4              ! read stick reg
1565         add     %g4, %o0, %o1           ! adjust stick with skew
1566         wr      %o1, %g0, STICK         ! write stick reg
1567 
1568         retl
1569         wrpr    %g1, %pstate            ! restore processor state
1570         SET_SIZE(stick_adj)
1571 















1572         ENTRY_NP(kdi_get_stick)
1573         rd      STICK, %g1
1574         stx     %g1, [%o0]
1575         retl
1576         mov     %g0, %o0
1577         SET_SIZE(kdi_get_stick)
1578 











1579         ENTRY(dtrace_blksuword32)
1580         save    %sp, -SA(MINFRAME + 4), %sp
1581 
1582         rdpr    %pstate, %l1
1583         andn    %l1, PSTATE_IE, %l2             ! disable interrupts to
1584         wrpr    %g0, %l2, %pstate               ! protect our FPU diddling
1585 
1586         rd      %fprs, %l0
1587         andcc   %l0, FPRS_FEF, %g0
1588         bz,a,pt %xcc, 1f                        ! if the fpu is disabled
1589         wr      %g0, FPRS_FEF, %fprs            ! ... enable the fpu
1590 
1591         st      %f0, [%fp + STACK_BIAS - 4]     ! save %f0 to the stack
1592 1:
1593         set     0f, %l5
1594         /*
1595          * We're about to write a block full or either total garbage
1596          * (not kernel data, don't worry) or user floating-point data
1597          * (so it only _looks_ like garbage).
1598          */


1623         wr      %g0, %l0, %fprs                 ! restore %fprs
1624 
1625         ld      [%fp + STACK_BIAS - 4], %f0     ! restore %f0
1626 1:
1627 
1628         wrpr    %g0, %l1, %pstate               ! restore interrupts
1629 
1630         /*
1631          * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
1632          * which deals with watchpoints. Otherwise, just return -1.
1633          */
1634         brnz,pt %i2, 1f
1635         nop
1636         ret
1637         restore %g0, -1, %o0
1638 1:
1639         call    dtrace_blksuword32_err
1640         restore
1641 
1642         SET_SIZE(dtrace_blksuword32)

1643 







1644         ENTRY_NP(ras_cntr_reset)
1645         set     OPL_SCRATCHPAD_ERRLOG, %o1
1646         ldxa    [%o1]ASI_SCRATCHPAD, %o0
1647         or      %o0, ERRLOG_REG_NUMERR_MASK, %o0
1648         retl
1649          stxa   %o0, [%o1]ASI_SCRATCHPAD
1650         SET_SIZE(ras_cntr_reset)

1651 








1652         ENTRY_NP(opl_error_setup)
1653         /*
1654          * Initialize the error log scratchpad register
1655          */
1656         ldxa    [%g0]ASI_EIDR, %o2
1657         sethi   %hi(ERRLOG_REG_EIDR_MASK), %o1
1658         or      %o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
1659         and     %o2, %o1, %o3
1660         sllx    %o3, ERRLOG_REG_EIDR_SHIFT, %o2
1661         or      %o2, %o0, %o3
1662         or      %o3, ERRLOG_REG_NUMERR_MASK, %o0
1663         set     OPL_SCRATCHPAD_ERRLOG, %o1
1664         stxa    %o0, [%o1]ASI_SCRATCHPAD
1665         /*
1666          * Disable all restrainable error traps
1667          */
1668         mov     AFSR_ECR, %o1
1669         ldxa    [%o1]ASI_AFSR, %o0
1670         andn    %o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
1671         retl
1672           stxa  %o0, [%o1]ASI_AFSR
1673         SET_SIZE(opl_error_setup)

1674 







1675         ENTRY_NP(cpu_early_feature_init)
1676         /*
1677          * Enable MMU translating multiple page sizes for
1678          * sITLB and sDTLB.
1679          */
1680         mov     LSU_MCNTL, %o0
1681         ldxa    [%o0] ASI_MCNTL, %o1
1682         or      %o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
1683           stxa  %o1, [%o0] ASI_MCNTL
1684         /*
1685          * Demap all previous entries.
1686          */
1687         sethi   %hi(FLUSH_ADDR), %o1
1688         set     DEMAP_ALL_TYPE, %o0
1689         stxa    %g0, [%o0]ASI_DTLB_DEMAP
1690         stxa    %g0, [%o0]ASI_ITLB_DEMAP
1691         retl
1692           flush %o1
1693         SET_SIZE(cpu_early_feature_init)

1694 

1695 /*
1696  * This function is called for each (enabled) CPU. We use it to
1697  * initialize error handling related registers.
1698  */





1699         ENTRY(cpu_feature_init)
1700         !
1701         ! get the device_id and store the device_id
1702         ! in the appropriate cpunodes structure
1703         ! given the cpus index
1704         !
1705         CPU_INDEX(%o0, %o1)
1706         mulx %o0, CPU_NODE_SIZE, %o0
1707         set  cpunodes + DEVICE_ID, %o1
1708         ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
1709         stx  %o2, [%o0 + %o1]
1710         !
1711         ! initialize CPU registers
1712         !
1713         ba      opl_cpu_reg_init
1714         nop
1715         SET_SIZE(cpu_feature_init)

1716 







1717         /*
1718          * Clear the NPT (non-privileged trap) bit in the %tick/%stick
1719          * registers. In an effort to make the change in the
1720          * tick/stick counter as consistent as possible, we disable
1721          * all interrupts while we're changing the registers. We also
1722          * ensure that the read and write instructions are in the same
1723          * line in the instruction cache.
1724          */
1725         ENTRY_NP(cpu_clearticknpt)
1726         rdpr    %pstate, %g1            /* save processor state */
1727         andn    %g1, PSTATE_IE, %g3     /* turn off */
1728         wrpr    %g0, %g3, %pstate       /*   interrupts */
1729         rdpr    %tick, %g2              /* get tick register */
1730         brgez,pn %g2, 1f                /* if NPT bit off, we're done */
1731         mov     1, %g3                  /* create mask */
1732         sllx    %g3, 63, %g3            /*   for NPT bit */
1733         ba,a,pt %xcc, 2f
1734         .align  8                       /* Ensure rd/wr in same i$ line */
1735 2:
1736         rdpr    %tick, %g2              /* get tick register */
1737         wrpr    %g3, %g2, %tick         /* write tick register, */
1738                                         /*   clearing NPT bit   */
1739 1:
1740         rd      STICK, %g2              /* get stick register */
1741         brgez,pn %g2, 3f                /* if NPT bit off, we're done */
1742         mov     1, %g3                  /* create mask */
1743         sllx    %g3, 63, %g3            /*   for NPT bit */
1744         ba,a,pt %xcc, 4f
1745         .align  8                       /* Ensure rd/wr in same i$ line */
1746 4:
1747         rd      STICK, %g2              /* get stick register */
1748         wr      %g3, %g2, STICK         /* write stick register, */
1749                                         /*   clearing NPT bit   */
1750 3:
1751         jmp     %g4 + 4
1752         wrpr    %g0, %g1, %pstate       /* restore processor state */
1753 
1754         SET_SIZE(cpu_clearticknpt)
1755 














1756         /*
1757          * Halt the current strand with the suspend instruction.
1758          * The compiler/asm currently does not support this suspend
1759          * instruction mnemonic, use byte code for now.
1760          */
1761         ENTRY_NP(cpu_halt_cpu)
1762         .word   0x81b01040
1763         retl
1764         nop
1765         SET_SIZE(cpu_halt_cpu)
1766 
1767         /*
1768          * Pause the current strand with the sleep instruction.
1769          * The compiler/asm currently does not support this sleep
1770          * instruction mnemonic, use byte code for now.
1771          */
1772         ENTRY_NP(cpu_smt_pause)
1773         .word   0x81b01060
1774         retl
1775         nop
1776         SET_SIZE(cpu_smt_pause)
1777