1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #if defined(lint) 29 #include <sys/types.h> 30 #include <sys/t_lock.h> 31 #include <sys/promif.h> 32 #include <sys/prom_isa.h> 33 #endif /* lint */ 34 35 #include <sys/asm_linkage.h> 36 #include <sys/intreg.h> 37 #include <sys/ivintr.h> 38 #include <sys/mmu.h> 39 #include <sys/machpcb.h> 40 #include <sys/machtrap.h> 41 #include <sys/machlock.h> 42 #include <sys/fdreg.h> 43 #include <sys/vis.h> 44 #include <sys/traptrace.h> 45 #include <sys/panic.h> 46 #include <sys/machasi.h> 47 #include <sys/clock.h> 48 #include <vm/hat_sfmmu.h> 49 #if defined(lint) 50 51 #include <sys/thread.h> 52 #include <sys/time.h> 53 54 #else /* lint */ 55 56 #include "assym.h" 57 58 59 ! 60 ! REGOFF must add up to allow double word access to r_tstate. 61 ! PCB_WBUF must also be aligned. 62 ! 63 #if (REGOFF & 7) != 0 64 #error "struct regs not aligned" 65 #endif 66 67 /* 68 * Absolute external symbols. 69 * On the sun4u we put the panic buffer in the third and fourth pages. 70 * We set things up so that the first 2 pages of KERNELBASE is illegal 71 * to act as a redzone during copyin/copyout type operations. One of 72 * the reasons the panic buffer is allocated in low memory to 73 * prevent being overwritten during booting operations (besides 74 * the fact that it is small enough to share pages with others). 75 */ 76 77 .seg ".data" 78 .global panicbuf 79 80 PROM = 0xFFE00000 ! address of prom virtual area 81 panicbuf = SYSBASE32 + PAGESIZE ! address of panic buffer 82 83 .type panicbuf, #object 84 .size panicbuf, PANICBUFSIZE 85 86 /* 87 * Absolute external symbol - intr_vec_table. 88 * 89 * With new bus structures supporting a larger number of interrupt 90 * numbers, the interrupt vector table, intr_vec_table[] has been 91 * moved out of kernel nucleus and allocated after panicbuf. 92 */ 93 .global intr_vec_table 94 95 intr_vec_table = SYSBASE32 + PAGESIZE + PANICBUFSIZE ! address of interrupt table 96 97 .type intr_vec_table, #object 98 .size intr_vec_table, MAXIVNUM * CPTRSIZE + MAX_RSVD_IV * IV_SIZE + MAX_RSVD_IVX * (IV_SIZE + CPTRSIZE * (NCPU - 1)) 99 100 /* 101 * The thread 0 stack. This must be the first thing in the data 102 * segment (other than an sccs string) so that we don't stomp 103 * on anything important if the stack overflows. We get a 104 * red zone below this stack for free when the kernel text is 105 * write protected. 106 */ 107 108 .global t0stack 109 .align 16 110 .type t0stack, #object 111 t0stack: 112 .skip T0STKSZ ! thread 0 stack 113 t0stacktop: 114 .size t0stack, T0STKSZ 115 116 /* 117 * cpu0 and its ptl1_panic stack. The cpu structure must be allocated 118 * on a single page for ptl1_panic's physical address accesses. 119 */ 120 .global cpu0 121 .align MMU_PAGESIZE 122 cpu0: 123 .type cpu0, #object 124 .skip CPU_ALLOC_SIZE 125 .size cpu0, CPU_ALLOC_SIZE 126 127 .global t0 128 .align PTR24_ALIGN ! alignment for mutex. 129 .type t0, #object 130 t0: 131 .skip THREAD_SIZE ! thread 0 132 .size t0, THREAD_SIZE 133 134 #ifdef TRAPTRACE 135 .global trap_trace_ctl 136 .global trap_tr0 137 .global trap_trace_bufsize 138 .global trap_freeze 139 .global trap_freeze_pc 140 141 .align 4 142 trap_trace_bufsize: 143 .word TRAP_TSIZE ! default trap buffer size 144 trap_freeze: 145 .word 0 146 147 .align 64 148 trap_trace_ctl: 149 .skip NCPU * TRAPTR_SIZE ! NCPU control headers 150 151 .align 16 152 trap_tr0: 153 .skip TRAP_TSIZE ! one buffer for the boot cpu 154 155 /* 156 * When an assertion in TRACE_PTR was failed, %pc is saved in trap_freeze_pc to 157 * show in which TRACE_PTR the assertion failure happened. 158 */ 159 .align 8 160 trap_freeze_pc: 161 .nword 0 162 #endif /* TRAPTRACE */ 163 164 .align 4 165 .seg ".text" 166 167 #ifdef NOPROM 168 .global availmem 169 availmem: 170 .word 0 171 #endif /* NOPROM */ 172 173 .align 8 174 _local_p1275cis: 175 .nword 0 176 177 #endif /* lint */ 178 179 #if defined(lint) 180 181 void 182 _start(void) 183 {} 184 185 #else /* lint */ 186 187 .seg ".data" 188 189 .global nwindows, nwin_minus_one, winmask 190 nwindows: 191 .word 8 192 nwin_minus_one: 193 .word 7 194 winmask: 195 .word 8 196 197 .global afsrbuf 198 afsrbuf: 199 .word 0,0,0,0 200 201 /* 202 * System initialization 203 * 204 * Our contract with the boot prom specifies that the MMU is on and the 205 * first 16 meg of memory is mapped with a level-1 pte. We are called 206 * with p1275cis ptr in %o0 and kdi_dvec in %o1; we start execution 207 * directly from physical memory, so we need to get up into our proper 208 * addresses quickly: all code before we do this must be position 209 * independent. 210 * 211 * NB: Above is not true for boot/stick kernel, the only thing mapped is 212 * the text+data+bss. The kernel is loaded directly into KERNELBASE. 213 * 214 * entry, the romvec pointer (romp) is the first argument; 215 * i.e., %o0. 216 * the bootops vector is in the third argument (%o1) 217 * 218 * Our tasks are: 219 * save parameters 220 * construct mappings for KERNELBASE (not needed for boot/stick kernel) 221 * hop up into high memory (not needed for boot/stick kernel) 222 * initialize stack pointer 223 * initialize trap base register 224 * initialize window invalid mask 225 * initialize psr (with traps enabled) 226 * figure out all the module type stuff 227 * tear down the 1-1 mappings 228 * dive into main() 229 */ 230 ENTRY_NP(_start) 231 ! 232 ! Stash away our arguments in memory. 233 ! 234 sethi %hi(_local_p1275cis), %g1 235 stn %o4, [%g1 + %lo(_local_p1275cis)] 236 237 ! 238 ! Initialize CPU state registers 239 ! 240 wrpr %g0, PSTATE_KERN, %pstate 241 wr %g0, %g0, %fprs 242 243 ! 244 ! call krtld to link the world together 245 ! 246 call kobj_start 247 mov %o4, %o0 248 249 CLEARTICKNPT ! allow user rdtick 250 ! 251 ! Get maxwin from %ver 252 ! 253 rdpr %ver, %g1 254 and %g1, VER_MAXWIN, %g1 255 256 ! 257 ! Stuff some memory cells related to numbers of windows. 258 ! 259 sethi %hi(nwin_minus_one), %g2 260 st %g1, [%g2 + %lo(nwin_minus_one)] 261 inc %g1 262 sethi %hi(nwindows), %g2 263 st %g1, [%g2 + %lo(nwindows)] 264 dec %g1 265 mov -2, %g2 266 sll %g2, %g1, %g2 267 sethi %hi(winmask), %g4 268 st %g2, [%g4 + %lo(winmask)] 269 270 ! 271 ! save a pointer to obp's tba for later use by kmdb 272 ! 273 rdpr %tba, %g1 274 set boot_tba, %g2 275 stx %g1, [%g2] 276 277 ! 278 ! copy obp's breakpoint trap entry to obp_bpt 279 ! 280 rdpr %tba, %g1 281 set T_SOFTWARE_TRAP | ST_MON_BREAKPOINT, %g2 282 sll %g2, 5, %g2 283 or %g1, %g2, %g1 284 set obp_bpt, %g2 285 ldx [%g1], %g3 286 stx %g3, [%g2] 287 flush %g2 288 ldx [%g1 + 8], %g3 289 stx %g3, [%g2 + 8] 290 flush %g2 + 8 291 ldx [%g1 + 16], %g3 292 stx %g3, [%g2 + 16] 293 flush %g2 + 16 294 ldx [%g1 + 24], %g3 295 stx %g3, [%g2 + 24] 296 flush %g2 + 24 297 298 ! 299 ! Initialize thread 0's stack. 300 ! 301 set t0stacktop, %g1 ! setup kernel stack pointer 302 sub %g1, SA(KFPUSIZE+GSR_SIZE), %g2 303 and %g2, 0x3f, %g3 304 sub %g2, %g3, %o1 305 sub %o1, SA(MPCBSIZE) + STACK_BIAS, %sp 306 307 ! 308 ! Initialize global thread register. 309 ! 310 set t0, THREAD_REG 311 312 ! 313 ! Fill in enough of the cpu structure so that 314 ! the wbuf management code works. Make sure the 315 ! boot cpu is inserted in cpu[] based on cpuid. 316 ! 317 CPU_INDEX(%g2, %g1) 318 sll %g2, CPTRSHIFT, %g2 ! convert cpuid to cpu[] offset 319 set cpu0, %o0 ! &cpu0 320 set cpu, %g1 ! &cpu[] 321 stn %o0, [%g1 + %g2] ! cpu[cpuid] = &cpu0 322 323 stn %o0, [THREAD_REG + T_CPU] ! threadp()->t_cpu = cpu[cpuid] 324 stn THREAD_REG, [%o0 + CPU_THREAD] ! cpu[cpuid]->cpu_thread = threadp() 325 326 327 ! We do NOT need to bzero our BSS...boot has already done it for us. 328 ! Just need to reference edata so that we don't break /dev/ksyms 329 set edata, %g0 330 331 ! 332 ! Call mlsetup with address of prototype user registers. 333 ! 334 call mlsetup 335 add %sp, REGOFF + STACK_BIAS, %o0 336 337 #if (REGOFF != MPCB_REGS) 338 #error "hole in struct machpcb between frame and regs?" 339 #endif 340 341 ! 342 ! Now call main. We will return as process 1 (init). 343 ! 344 call main 345 nop 346 347 ! 348 ! Main should never return. 349 ! 350 set .mainretmsg, %o0 351 call panic 352 nop 353 SET_SIZE(_start) 354 355 .mainretmsg: 356 .asciz "main returned" 357 .align 4 358 359 #endif /* lint */ 360 361 362 /* 363 * Generic system trap handler. 364 * 365 * Some kernel trap handlers save themselves from buying a window by 366 * borrowing some of sys_trap's unused locals. %l0 thru %l3 may be used 367 * for this purpose, as user_rtt and priv_rtt do not depend on them. 368 * %l4 thru %l7 should NOT be used this way. 369 * 370 * Entry Conditions: 371 * %pstate am:0 priv:1 ie:0 372 * globals are either ag or ig (not mg!) 373 * 374 * Register Inputs: 375 * %g1 pc of trap handler 376 * %g2, %g3 args for handler 377 * %g4 desired %pil (-1 means current %pil) 378 * %g5, %g6 destroyed 379 * %g7 saved 380 * 381 * Register Usage: 382 * %l0, %l1 temps 383 * %l3 saved %g1 384 * %l6 curthread for user traps, %pil for priv traps 385 * %l7 regs 386 * 387 * Called function prototype variants: 388 * 389 * func(struct regs *rp); 390 * func(struct regs *rp, uintptr_t arg1 [%g2], uintptr_t arg2 [%g3]) 391 * func(struct regs *rp, uintptr_t arg1 [%g2], 392 * uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h]) 393 * func(struct regs *rp, uint32_t arg1 [%g2.l], 394 * uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h], uint32_t [%g2.h]) 395 */ 396 397 #if defined(lint) 398 399 void 400 sys_trap(void) 401 {} 402 403 #else /* lint */ 404 405 ENTRY_NP(sys_trap) 406 ! 407 ! force tl=1, update %cwp, branch to correct handler 408 ! 409 wrpr %g0, 1, %tl 410 rdpr %tstate, %g5 411 btst TSTATE_PRIV, %g5 412 and %g5, TSTATE_CWP, %g6 413 bnz,pn %xcc, priv_trap 414 wrpr %g0, %g6, %cwp 415 416 ALTENTRY(user_trap) 417 ! 418 ! user trap 419 ! 420 ! make all windows clean for kernel 421 ! buy a window using the current thread's stack 422 ! 423 sethi %hi(nwin_minus_one), %g5 424 ld [%g5 + %lo(nwin_minus_one)], %g5 425 wrpr %g0, %g5, %cleanwin 426 CPU_ADDR(%g5, %g6) 427 ldn [%g5 + CPU_THREAD], %g5 428 ldn [%g5 + T_STACK], %g6 429 sub %g6, STACK_BIAS, %g6 430 save %g6, 0, %sp 431 ! 432 ! set window registers so that current windows are "other" windows 433 ! 434 rdpr %canrestore, %l0 435 rdpr %wstate, %l1 436 wrpr %g0, 0, %canrestore 437 sllx %l1, WSTATE_SHIFT, %l1 438 wrpr %l1, WSTATE_K64, %wstate 439 wrpr %g0, %l0, %otherwin 440 ! 441 ! set pcontext to run kernel 442 ! 443 sethi %hi(kcontextreg), %l0 444 ldx [%l0 + %lo(kcontextreg)], %l0 445 mov MMU_PCONTEXT, %l1 ! if kcontextreg==PCONTEXT, do nothing 446 ldxa [%l1]ASI_MMU_CTX, %l2 447 xor %l0, %l2, %l2 448 srlx %l2, CTXREG_NEXT_SHIFT, %l2 449 brz %l2, 2f ! if N_pgsz0/1 changed, need demap 450 sethi %hi(FLUSH_ADDR), %l3 451 mov DEMAP_ALL_TYPE, %l2 452 stxa %g0, [%l2]ASI_DTLB_DEMAP 453 stxa %g0, [%l2]ASI_ITLB_DEMAP 454 2: 455 stxa %l0, [%l1]ASI_MMU_CTX 456 flush %l3 ! flush required by immu 457 1: 458 459 set utl0, %g6 ! bounce to utl0 460 have_win: 461 SYSTRAP_TRACE(%o1, %o2, %o3) 462 463 464 ! 465 ! at this point we have a new window we can play in, 466 ! and %g6 is the label we want done to bounce to 467 ! 468 ! save needed current globals 469 ! 470 mov %g1, %l3 ! pc 471 mov %g2, %o1 ! arg #1 472 mov %g3, %o2 ! arg #2 473 srlx %g3, 32, %o3 ! pseudo arg #3 474 srlx %g2, 32, %o4 ! pseudo arg #4 475 mov %g5, %l6 ! curthread if user trap, %pil if priv trap 476 ! 477 ! save trap state on stack 478 ! 479 add %sp, REGOFF + STACK_BIAS, %l7 480 rdpr %tpc, %l0 481 rdpr %tnpc, %l1 482 rdpr %tstate, %l2 483 stn %l0, [%l7 + PC_OFF] 484 stn %l1, [%l7 + nPC_OFF] 485 stx %l2, [%l7 + TSTATE_OFF] 486 ! 487 ! setup pil 488 ! 489 brlz,pt %g4, 1f 490 nop 491 #ifdef DEBUG 492 ! 493 ! ASSERT(%g4 >= %pil). 494 ! 495 rdpr %pil, %l0 496 cmp %g4, %l0 497 bge,pt %xcc, 0f 498 nop ! yes, nop; to avoid anull 499 set bad_g4_called, %l3 500 mov 1, %o1 501 st %o1, [%l3] 502 set bad_g4, %l3 ! pc 503 set sys_trap_wrong_pil, %o1 ! arg #1 504 mov %g4, %o2 ! arg #2 505 ba 1f ! stay at the current %pil 506 mov %l0, %o3 ! arg #3 507 0: 508 #endif /* DEBUG */ 509 wrpr %g0, %g4, %pil 510 1: 511 ! 512 ! set trap regs to execute in kernel at %g6 513 ! done resumes execution there 514 ! 515 wrpr %g0, %g6, %tnpc 516 rdpr %cwp, %l0 517 set TSTATE_KERN, %l1 518 wrpr %l1, %l0, %tstate 519 done 520 /* NOTREACHED */ 521 SET_SIZE(user_trap) 522 SET_SIZE(sys_trap) 523 524 525 ENTRY_NP(prom_trap) 526 ! 527 ! prom trap switches the stack to 32-bit 528 ! if we took a trap from a 64-bit window 529 ! Then buys a window on the current stack. 530 ! 531 save %sp, -SA64(REGOFF + REGSIZE), %sp 532 /* 32 bit frame, 64 bit sized */ 533 set ptl0, %g6 534 ba,a,pt %xcc, have_win 535 SET_SIZE(prom_trap) 536 537 ENTRY_NP(priv_trap) 538 ! 539 ! kernel trap 540 ! buy a window on the current stack 541 ! 542 ! is the trap PC in the range allocated to Open Firmware? 543 rdpr %tpc, %g5 544 set OFW_END_ADDR, %g6 545 cmp %g5, %g6 546 bgu,a,pn %xcc, 1f 547 rdpr %pil, %g5 548 set OFW_START_ADDR, %g6 549 cmp %g5, %g6 550 bgeu,pn %xcc, prom_trap 551 rdpr %pil, %g5 552 1: 553 ! 554 ! check if the primary context is of kernel. 555 ! 556 mov MMU_PCONTEXT, %g6 557 ldxa [%g6]ASI_MMU_CTX, %g5 558 sllx %g5, CTXREG_CTX_SHIFT, %g5 ! keep just the ctx bits 559 brnz,pn %g5, 2f ! assumes KCONTEXT == 0 560 rdpr %pil, %g5 561 ! 562 ! primary context is of kernel. 563 ! 564 set ktl0, %g6 565 save %sp, -SA(REGOFF + REGSIZE), %sp 566 ba,a,pt %xcc, have_win 567 2: 568 ! 569 ! primary context is of user. caller of sys_trap() 570 ! or priv_trap() did not set kernel context. raise 571 ! trap level to MAXTL-1 so that ptl1_panic() prints 572 ! out all levels of trap data. 573 ! 574 rdpr %ver, %g5 575 srlx %g5, VER_MAXTL_SHIFT, %g5 576 and %g5, VER_MAXTL_MASK, %g5 ! %g5 = MAXTL 577 sub %g5, 1, %g5 578 wrpr %g0, %g5, %tl 579 mov PTL1_BAD_CTX, %g1 580 ba,a,pt %xcc, ptl1_panic 581 SET_SIZE(priv_trap) 582 583 ENTRY_NP(utl0) 584 SAVE_GLOBALS(%l7) 585 SAVE_OUTS(%l7) 586 mov %l6, THREAD_REG 587 wrpr %g0, PSTATE_KERN, %pstate ! enable ints 588 jmpl %l3, %o7 ! call trap handler 589 mov %l7, %o0 590 ! 591 ALTENTRY(user_rtt) 592 ! 593 ! Register inputs 594 ! %l7 - regs 595 ! 596 ! disable interrupts and check for ASTs and wbuf restores 597 ! keep cpu_base_spl in %l4 and THREAD_REG in %l6 (needed 598 ! in wbuf.s when globals have already been restored). 599 ! 600 wrpr %g0, PIL_MAX, %pil 601 ldn [THREAD_REG + T_CPU], %l0 602 ld [%l0 + CPU_BASE_SPL], %l4 603 604 ldub [THREAD_REG + T_ASTFLAG], %l2 605 brz,pt %l2, 1f 606 ld [%sp + STACK_BIAS + MPCB_WBCNT], %l3 607 ! 608 ! call trap to do ast processing 609 ! 610 wrpr %g0, %l4, %pil ! pil = cpu_base_spl 611 mov %l7, %o0 612 call trap 613 mov T_AST, %o2 614 ba,a,pt %xcc, user_rtt 615 1: 616 brz,pt %l3, 2f 617 mov THREAD_REG, %l6 618 ! 619 ! call restore_wbuf to push wbuf windows to stack 620 ! 621 wrpr %g0, %l4, %pil ! pil = cpu_base_spl 622 mov %l7, %o0 623 call trap 624 mov T_FLUSH_PCB, %o2 625 ba,a,pt %xcc, user_rtt 626 2: 627 #ifdef TRAPTRACE 628 TRACE_RTT(TT_SYS_RTT_USER, %l0, %l1, %l2, %l3) 629 #endif /* TRAPTRACE */ 630 ld [%sp + STACK_BIAS + MPCB_WSTATE], %l3 ! get wstate 631 632 ! 633 ! restore user globals and outs 634 ! 635 rdpr %pstate, %l1 636 wrpr %l1, PSTATE_IE, %pstate 637 RESTORE_GLOBALS(%l7) 638 ! switch to alternate globals, saving THREAD_REG in %l6 639 wrpr %l1, PSTATE_IE | PSTATE_AG, %pstate 640 mov %sp, %g6 ! remember the mpcb pointer in %g6 641 RESTORE_OUTS(%l7) 642 ! 643 ! set %pil from cpu_base_spl 644 ! 645 wrpr %g0, %l4, %pil 646 ! 647 ! raise tl (now using nucleus context) 648 ! 649 wrpr %g0, 1, %tl 650 651 ! switch "other" windows back to "normal" windows. 652 rdpr %otherwin, %g1 653 wrpr %g0, 0, %otherwin 654 add %l3, WSTATE_CLEAN_OFFSET, %l3 ! convert to "clean" wstate 655 wrpr %g0, %l3, %wstate 656 wrpr %g0, %g1, %canrestore 657 658 ! set pcontext to scontext for user execution 659 mov MMU_SCONTEXT, %g3 660 ldxa [%g3]ASI_MMU_CTX, %g2 661 662 mov MMU_PCONTEXT, %g3 663 ldxa [%g3]ASI_MMU_CTX, %g4 ! need N_pgsz0/1 bits 664 srlx %g4, CTXREG_NEXT_SHIFT, %g4 665 sllx %g4, CTXREG_NEXT_SHIFT, %g4 666 or %g4, %g2, %g2 ! Or in Nuc pgsz bits 667 668 sethi %hi(FLUSH_ADDR), %g4 669 stxa %g2, [%g3]ASI_MMU_CTX 670 flush %g4 ! flush required by immu 671 ! 672 ! Within the code segment [rtt_ctx_start - rtt_ctx_end], 673 ! PCONTEXT is set to run user code. If a trap happens in this 674 ! window, and the trap needs to be handled at TL=0, the handler 675 ! must make sure to set PCONTEXT to run kernel. A convenience 676 ! macro, RESET_USER_RTT_REGS(scr1, scr2, label) is available to 677 ! TL>1 handlers for this purpose. 678 ! 679 ! %g1 = %canrestore 680 ! %l7 = regs 681 ! %g6 = mpcb 682 ! 683 .global rtt_ctx_start 684 rtt_ctx_start: 685 ! 686 ! setup trap regs 687 ! 688 ldn [%l7 + PC_OFF], %g3 689 ldn [%l7 + nPC_OFF], %g2 690 ldx [%l7 + TSTATE_OFF], %l0 691 andn %l0, TSTATE_CWP, %g7 692 wrpr %g3, %tpc 693 wrpr %g2, %tnpc 694 695 ! 696 ! Restore to window we originally trapped in. 697 ! First attempt to restore from the watchpoint saved register window 698 ! 699 tst %g1 700 bne,a 1f 701 clrn [%g6 + STACK_BIAS + MPCB_RSP0] 702 tst %fp 703 be,a 1f 704 clrn [%g6 + STACK_BIAS + MPCB_RSP0] 705 ! test for user return window in pcb 706 ldn [%g6 + STACK_BIAS + MPCB_RSP0], %g1 707 cmp %fp, %g1 708 bne 1f 709 clrn [%g6 + STACK_BIAS + MPCB_RSP0] 710 restored 711 restore 712 ! restore from user return window 713 RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN0) 714 ! 715 ! Attempt to restore from the scond watchpoint saved register window 716 tst %fp 717 be,a 2f 718 clrn [%g6 + STACK_BIAS + MPCB_RSP1] 719 ldn [%g6 + STACK_BIAS + MPCB_RSP1], %g1 720 cmp %fp, %g1 721 bne 2f 722 clrn [%g6 + STACK_BIAS + MPCB_RSP1] 723 restored 724 restore 725 RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN1) 726 save 727 b,a 2f 728 1: 729 restore ! should not trap 730 2: 731 ! 732 ! set %cleanwin to %canrestore 733 ! set %tstate to the correct %cwp 734 ! retry resumes user execution 735 ! 736 rdpr %canrestore, %g1 737 wrpr %g0, %g1, %cleanwin 738 rdpr %cwp, %g1 739 wrpr %g1, %g7, %tstate 740 retry 741 .global rtt_ctx_end 742 rtt_ctx_end: 743 /* NOTREACHED */ 744 SET_SIZE(user_rtt) 745 SET_SIZE(utl0) 746 747 ENTRY_NP(ptl0) 748 SAVE_GLOBALS(%l7) 749 SAVE_OUTS(%l7) 750 CPU_ADDR(%g5, %g6) 751 ldn [%g5 + CPU_THREAD], THREAD_REG 752 wrpr %g0, PSTATE_KERN, %pstate ! enable ints 753 jmpl %l3, %o7 ! call trap handler 754 mov %l7, %o0 755 ! 756 ALTENTRY(prom_rtt) 757 #ifdef TRAPTRACE 758 TRACE_RTT(TT_SYS_RTT_PROM, %l0, %l1, %l2, %l3) 759 #endif /* TRAPTRACE */ 760 ba,pt %xcc, common_rtt 761 mov THREAD_REG, %l0 762 SET_SIZE(prom_rtt) 763 SET_SIZE(ptl0) 764 765 ENTRY_NP(ktl0) 766 SAVE_GLOBALS(%l7) 767 SAVE_OUTS(%l7) ! for the call bug workaround 768 wrpr %g0, PSTATE_KERN, %pstate ! enable ints 769 jmpl %l3, %o7 ! call trap handler 770 mov %l7, %o0 771 ! 772 ALTENTRY(priv_rtt) 773 #ifdef TRAPTRACE 774 TRACE_RTT(TT_SYS_RTT_PRIV, %l0, %l1, %l2, %l3) 775 #endif /* TRAPTRACE */ 776 ! 777 ! Register inputs 778 ! %l7 - regs 779 ! %l6 - trap %pil 780 ! 781 ! Check for a kernel preemption request 782 ! 783 ldn [THREAD_REG + T_CPU], %l0 784 ldub [%l0 + CPU_KPRUNRUN], %l0 785 brz,pt %l0, 1f 786 nop 787 788 ! 789 ! Attempt to preempt 790 ! 791 ldstub [THREAD_REG + T_PREEMPT_LK], %l0 ! load preempt lock 792 brnz,pn %l0, 1f ! can't call kpreempt if this thread is 793 nop ! already in it... 794 795 call kpreempt 796 mov %l6, %o0 ! pass original interrupt level 797 798 stub %g0, [THREAD_REG + T_PREEMPT_LK] ! nuke the lock 799 800 rdpr %pil, %o0 ! compare old pil level 801 cmp %l6, %o0 ! with current pil level 802 movg %xcc, %o0, %l6 ! if current is lower, drop old pil 803 1: 804 ! 805 ! If we interrupted the mutex_owner_running() critical region we 806 ! must reset the PC and nPC back to the beginning to prevent missed 807 ! wakeups. See the comments in mutex_owner_running() for details. 808 ! 809 ldn [%l7 + PC_OFF], %l0 810 set mutex_owner_running_critical_start, %l1 811 sub %l0, %l1, %l0 812 cmp %l0, mutex_owner_running_critical_size 813 bgeu,pt %xcc, 2f 814 mov THREAD_REG, %l0 815 stn %l1, [%l7 + PC_OFF] ! restart mutex_owner_running() 816 add %l1, 4, %l1 817 ba,pt %xcc, common_rtt 818 stn %l1, [%l7 + nPC_OFF] 819 820 2: 821 ! 822 ! If we interrupted the mutex_exit() critical region we must reset 823 ! the PC and nPC back to the beginning to prevent missed wakeups. 824 ! See the comments in mutex_exit() for details. 825 ! 826 ldn [%l7 + PC_OFF], %l0 827 set mutex_exit_critical_start, %l1 828 sub %l0, %l1, %l0 829 cmp %l0, mutex_exit_critical_size 830 bgeu,pt %xcc, common_rtt 831 mov THREAD_REG, %l0 832 stn %l1, [%l7 + PC_OFF] ! restart mutex_exit() 833 add %l1, 4, %l1 834 stn %l1, [%l7 + nPC_OFF] 835 836 common_rtt: 837 ! 838 ! restore globals and outs 839 ! 840 rdpr %pstate, %l1 841 wrpr %l1, PSTATE_IE, %pstate 842 RESTORE_GLOBALS(%l7) 843 ! switch to alternate globals 844 wrpr %l1, PSTATE_IE | PSTATE_AG, %pstate 845 RESTORE_OUTS(%l7) 846 ! 847 ! set %pil from max(old pil, cpu_base_spl) 848 ! 849 ldn [%l0 + T_CPU], %l0 850 ld [%l0 + CPU_BASE_SPL], %l0 851 cmp %l6, %l0 852 movg %xcc, %l6, %l0 853 wrpr %g0, %l0, %pil 854 ! 855 ! raise tl 856 ! setup trap regs 857 ! restore to window we originally trapped in 858 ! 859 wrpr %g0, 1, %tl 860 ldn [%l7 + PC_OFF], %g1 861 ldn [%l7 + nPC_OFF], %g2 862 ldx [%l7 + TSTATE_OFF], %l0 863 andn %l0, TSTATE_CWP, %g7 864 wrpr %g1, %tpc 865 wrpr %g2, %tnpc 866 restore 867 ! 868 ! set %tstate to the correct %cwp 869 ! retry resumes prom execution 870 ! 871 rdpr %cwp, %g1 872 wrpr %g1, %g7, %tstate 873 retry 874 /* NOTREACHED */ 875 SET_SIZE(priv_rtt) 876 SET_SIZE(ktl0) 877 878 #endif /* lint */ 879 880 #ifndef lint 881 882 #ifdef DEBUG 883 .seg ".data" 884 .align 4 885 886 .global bad_g4_called 887 bad_g4_called: 888 .word 0 889 890 sys_trap_wrong_pil: 891 .asciz "sys_trap: %g4(%d) is lower than %pil(%d)" 892 .align 4 893 .seg ".text" 894 895 ENTRY_NP(bad_g4) 896 mov %o1, %o0 897 mov %o2, %o1 898 call panic 899 mov %o3, %o2 900 SET_SIZE(bad_g4) 901 #endif /* DEBUG */ 902 #endif /* lint */ 903 904 /* 905 * sys_tl1_panic can be called by traps at tl1 which 906 * really want to panic, but need the rearrangement of 907 * the args as provided by this wrapper routine. 908 */ 909 #if defined(lint) 910 911 void 912 sys_tl1_panic(void) 913 {} 914 915 #else /* lint */ 916 ENTRY_NP(sys_tl1_panic) 917 mov %o1, %o0 918 mov %o2, %o1 919 call panic 920 mov %o3, %o2 921 SET_SIZE(sys_tl1_panic) 922 #endif /* lint */ 923 924 /* 925 * Turn on or off bits in the auxiliary i/o register. 926 * 927 * set_auxioreg(bit, flag) 928 * int bit; bit mask in aux i/o reg 929 * int flag; 0 = off, otherwise on 930 * 931 * This is intrinsicly ugly but is used by the floppy driver. It is also 932 * used to turn on/off the led. 933 */ 934 935 #if defined(lint) 936 937 /* ARGSUSED */ 938 void 939 set_auxioreg(int bit, int flag) 940 {} 941 942 #else /* lint */ 943 944 .seg ".data" 945 .align 4 946 auxio_panic: 947 .asciz "set_auxioreg: interrupts already disabled on entry" 948 .align 4 949 .seg ".text" 950 951 ENTRY_NP(set_auxioreg) 952 /* 953 * o0 = bit mask 954 * o1 = flag: 0 = off, otherwise on 955 * 956 * disable interrupts while updating auxioreg 957 */ 958 rdpr %pstate, %o2 959 #ifdef DEBUG 960 andcc %o2, PSTATE_IE, %g0 /* if interrupts already */ 961 bnz,a,pt %icc, 1f /* disabled, panic */ 962 nop 963 sethi %hi(auxio_panic), %o0 964 call panic 965 or %o0, %lo(auxio_panic), %o0 966 1: 967 #endif /* DEBUG */ 968 wrpr %o2, PSTATE_IE, %pstate /* disable interrupts */ 969 sethi %hi(v_auxio_addr), %o3 970 ldn [%o3 + %lo(v_auxio_addr)], %o4 971 ldub [%o4], %g1 /* read aux i/o register */ 972 tst %o1 973 bnz,a 2f 974 bset %o0, %g1 /* on */ 975 bclr %o0, %g1 /* off */ 976 2: 977 or %g1, AUX_MBO, %g1 /* Must Be Ones */ 978 stb %g1, [%o4] /* write aux i/o register */ 979 retl 980 wrpr %g0, %o2, %pstate /* enable interrupt */ 981 SET_SIZE(set_auxioreg) 982 983 #endif /* lint */ 984 985 /* 986 * Flush all windows to memory, except for the one we entered in. 987 * We do this by doing NWINDOW-2 saves then the same number of restores. 988 * This leaves the WIM immediately before window entered in. 989 * This is used for context switching. 990 */ 991 992 #if defined(lint) 993 994 void 995 flush_windows(void) 996 {} 997 998 #else /* lint */ 999 1000 ENTRY_NP(flush_windows) 1001 retl 1002 flushw 1003 SET_SIZE(flush_windows) 1004 1005 #endif /* lint */ 1006 1007 #if defined(lint) 1008 1009 void 1010 debug_flush_windows(void) 1011 {} 1012 1013 #else /* lint */ 1014 1015 ENTRY_NP(debug_flush_windows) 1016 set nwindows, %g1 1017 ld [%g1], %g1 1018 mov %g1, %g2 1019 1020 1: 1021 save %sp, -WINDOWSIZE, %sp 1022 brnz %g2, 1b 1023 dec %g2 1024 1025 mov %g1, %g2 1026 2: 1027 restore 1028 brnz %g2, 2b 1029 dec %g2 1030 1031 retl 1032 nop 1033 1034 SET_SIZE(debug_flush_windows) 1035 1036 #endif /* lint */ 1037 1038 /* 1039 * flush user windows to memory. 1040 */ 1041 1042 #if defined(lint) 1043 1044 void 1045 flush_user_windows(void) 1046 {} 1047 1048 #else /* lint */ 1049 1050 ENTRY_NP(flush_user_windows) 1051 rdpr %otherwin, %g1 1052 brz %g1, 3f 1053 clr %g2 1054 1: 1055 save %sp, -WINDOWSIZE, %sp 1056 rdpr %otherwin, %g1 1057 brnz %g1, 1b 1058 add %g2, 1, %g2 1059 2: 1060 sub %g2, 1, %g2 ! restore back to orig window 1061 brnz %g2, 2b 1062 restore 1063 3: 1064 retl 1065 nop 1066 SET_SIZE(flush_user_windows) 1067 1068 #endif /* lint */ 1069 1070 /* 1071 * Throw out any user windows in the register file. 1072 * Used by setregs (exec) to clean out old user. 1073 * Used by sigcleanup to remove extraneous windows when returning from a 1074 * signal. 1075 */ 1076 1077 #if defined(lint) 1078 1079 void 1080 trash_user_windows(void) 1081 {} 1082 1083 #else /* lint */ 1084 1085 ENTRY_NP(trash_user_windows) 1086 rdpr %otherwin, %g1 1087 brz %g1, 3f ! no user windows? 1088 ldn [THREAD_REG + T_STACK], %g5 1089 1090 ! 1091 ! There are old user windows in the register file. We disable ints 1092 ! and increment cansave so that we don't overflow on these windows. 1093 ! Also, this sets up a nice underflow when first returning to the 1094 ! new user. 1095 ! 1096 rdpr %pstate, %g2 1097 wrpr %g2, PSTATE_IE, %pstate 1098 rdpr %cansave, %g3 1099 rdpr %otherwin, %g1 ! re-read in case of interrupt 1100 add %g3, %g1, %g3 1101 wrpr %g0, 0, %otherwin 1102 wrpr %g0, %g3, %cansave 1103 wrpr %g0, %g2, %pstate 1104 3: 1105 retl 1106 clr [%g5 + MPCB_WBCNT] ! zero window buffer cnt 1107 SET_SIZE(trash_user_windows) 1108 1109 1110 #endif /* lint */ 1111 1112 /* 1113 * Setup g7 via the CPU data structure. 1114 */ 1115 #if defined(lint) 1116 1117 struct scb * 1118 set_tbr(struct scb *s) 1119 { return (s); } 1120 1121 #else /* lint */ 1122 1123 ENTRY_NP(set_tbr) 1124 retl 1125 ta 72 ! no tbr, stop simulation 1126 SET_SIZE(set_tbr) 1127 1128 #endif /* lint */ 1129 1130 1131 #if defined(lint) 1132 /* 1133 * These need to be defined somewhere to lint and there is no "hicore.s"... 1134 */ 1135 char etext[1], end[1]; 1136 #endif /* lint*/ 1137 1138 #if defined (lint) 1139 1140 /* ARGSUSED */ 1141 void 1142 ptl1_panic(u_int reason) 1143 {} 1144 1145 #else /* lint */ 1146 1147 #define PTL1_SAVE_WINDOW(RP) \ 1148 stxa %l0, [RP + RW64_LOCAL + (0 * RW64_LOCAL_INCR)] %asi; \ 1149 stxa %l1, [RP + RW64_LOCAL + (1 * RW64_LOCAL_INCR)] %asi; \ 1150 stxa %l2, [RP + RW64_LOCAL + (2 * RW64_LOCAL_INCR)] %asi; \ 1151 stxa %l3, [RP + RW64_LOCAL + (3 * RW64_LOCAL_INCR)] %asi; \ 1152 stxa %l4, [RP + RW64_LOCAL + (4 * RW64_LOCAL_INCR)] %asi; \ 1153 stxa %l5, [RP + RW64_LOCAL + (5 * RW64_LOCAL_INCR)] %asi; \ 1154 stxa %l6, [RP + RW64_LOCAL + (6 * RW64_LOCAL_INCR)] %asi; \ 1155 stxa %l7, [RP + RW64_LOCAL + (7 * RW64_LOCAL_INCR)] %asi; \ 1156 stxa %i0, [RP + RW64_IN + (0 * RW64_IN_INCR)] %asi; \ 1157 stxa %i1, [RP + RW64_IN + (1 * RW64_IN_INCR)] %asi; \ 1158 stxa %i2, [RP + RW64_IN + (2 * RW64_IN_INCR)] %asi; \ 1159 stxa %i3, [RP + RW64_IN + (3 * RW64_IN_INCR)] %asi; \ 1160 stxa %i4, [RP + RW64_IN + (4 * RW64_IN_INCR)] %asi; \ 1161 stxa %i5, [RP + RW64_IN + (5 * RW64_IN_INCR)] %asi; \ 1162 stxa %i6, [RP + RW64_IN + (6 * RW64_IN_INCR)] %asi; \ 1163 stxa %i7, [RP + RW64_IN + (7 * RW64_IN_INCR)] %asi 1164 #define PTL1_NEXT_WINDOW(scr) \ 1165 add scr, RWIN64SIZE, scr 1166 1167 #define PTL1_RESET_RWINDOWS(scr) \ 1168 sethi %hi(nwin_minus_one), scr; \ 1169 ld [scr + %lo(nwin_minus_one)], scr; \ 1170 wrpr scr, %cleanwin; \ 1171 dec scr; \ 1172 wrpr scr, %cansave; \ 1173 wrpr %g0, %canrestore; \ 1174 wrpr %g0, %otherwin 1175 1176 #define PTL1_DCACHE_LINE_SIZE 4 /* small enough for all CPUs */ 1177 1178 /* 1179 * ptl1_panic is called when the kernel detects that it is in an invalid state 1180 * and the trap level is greater than 0. ptl1_panic is responsible to save the 1181 * current CPU state, to restore the CPU state to normal, and to call panic. 1182 * The CPU state must be saved reliably without causing traps. ptl1_panic saves 1183 * it in the ptl1_state structure, which is a member of the machcpu structure. 1184 * In order to access the ptl1_state structure without causing traps, physical 1185 * addresses are used so that we can avoid MMU miss traps. The restriction of 1186 * physical memory accesses is that the ptl1_state structure must be on a single 1187 * physical page. This is because (1) a single physical address for each 1188 * ptl1_state structure is needed and (2) it simplifies physical address 1189 * calculation for each member of the structure. 1190 * ptl1_panic is a likely spot for stack overflows to wind up; thus, the current 1191 * stack may not be usable. In order to call panic reliably in such a state, 1192 * each CPU needs a dedicated ptl1 panic stack. 1193 * CPU_ALLOC_SIZE, which is defined to be MMU_PAGESIZE, is used to allocate the 1194 * cpu structure and a ptl1 panic stack. They are put together on the same page 1195 * for memory space efficiency. The low address part is used for the cpu 1196 * structure, and the high address part is for a ptl1 panic stack. 1197 * The cpu_pa array holds the physical addresses of the allocated cpu structures, 1198 * as the cpu array holds their virtual addresses. 1199 * 1200 * %g1 reason to be called 1201 * %g2 broken 1202 * %g3 broken 1203 */ 1204 ENTRY_NP(ptl1_panic) 1205 ! 1206 ! flush D$ first, so that stale data will not be accessed later. 1207 ! Data written via ASI_MEM bypasses D$. If D$ contains data at the same 1208 ! address, where data was written via ASI_MEM, a load from that address 1209 ! using a virtual address and the default ASI still takes the old data. 1210 ! Flushing D$ erases old data in D$, so that it will not be loaded. 1211 ! Since we can afford only 2 registers (%g2 and %g3) for this job, we 1212 ! flush entire D$. 1213 ! For FJ OPL processors (IMPL values < SPITFIRE_IMPL), DC flushing 1214 ! is not needed. 1215 ! 1216 GET_CPU_IMPL(%g2) 1217 cmp %g2, SPITFIRE_IMPL 1218 blt,pn %icc, 1f ! Skip flushing for OPL processors 1219 nop 1220 sethi %hi(dcache_size), %g2 1221 ld [%g2 + %lo(dcache_size)], %g2 1222 sethi %hi(dcache_linesize), %g3 1223 ld [%g3 + %lo(dcache_linesize)], %g3 1224 sub %g2, %g3, %g2 1225 0: stxa %g0, [%g2] ASI_DC_TAG 1226 membar #Sync 1227 brnz,pt %g2, 0b 1228 sub %g2, %g3, %g2 1229 1: 1230 ! 1231 ! increment the entry counter. 1232 ! save CPU state if this is the first entry. 1233 ! 1234 CPU_PADDR(%g2, %g3); 1235 add %g2, CPU_PTL1, %g2 ! pstate = &CPU->mcpu.ptl1_state 1236 wr %g0, ASI_MEM, %asi ! physical address access 1237 ! 1238 ! pstate->ptl1_entry_count++ 1239 ! 1240 lduwa [%g2 + PTL1_ENTRY_COUNT] %asi, %g3 1241 add %g3, 1, %g3 1242 stuwa %g3, [%g2 + PTL1_ENTRY_COUNT] %asi 1243 ! 1244 ! CPU state saving is skipped from the 2nd entry to ptl1_panic since we 1245 ! do not want to clobber the state from the original failure. panic() 1246 ! is responsible for handling multiple or recursive panics. 1247 ! 1248 cmp %g3, 2 ! if (ptl1_entry_count >= 2) 1249 bge,pn %icc, state_saved ! goto state_saved 1250 add %g2, PTL1_REGS, %g3 ! %g3 = &pstate->ptl1_regs[0] 1251 ! 1252 ! save CPU state 1253 ! 1254 save_cpu_state: 1255 ! save current global registers 1256 ! so that all them become available for use 1257 ! 1258 stxa %g1, [%g3 + PTL1_G1] %asi 1259 stxa %g2, [%g3 + PTL1_G2] %asi 1260 stxa %g3, [%g3 + PTL1_G3] %asi 1261 stxa %g4, [%g3 + PTL1_G4] %asi 1262 stxa %g5, [%g3 + PTL1_G5] %asi 1263 stxa %g6, [%g3 + PTL1_G6] %asi 1264 stxa %g7, [%g3 + PTL1_G7] %asi 1265 ! 1266 ! %tl, %tt, %tstate, %tpc, %tnpc for each TL 1267 ! 1268 rdpr %tl, %g1 1269 brz %g1, 1f ! if(trap_level == 0) -------+ 1270 add %g3, PTL1_TRAP_REGS, %g4 ! %g4 = &ptl1_trap_regs[0]; ! 1271 0: ! -----------<----------+ ! 1272 stwa %g1, [%g4 + PTL1_TL] %asi ! ! 1273 rdpr %tt, %g5 ! ! 1274 stwa %g5, [%g4 + PTL1_TT] %asi ! ! 1275 rdpr %tstate, %g5 ! ! 1276 stxa %g5, [%g4 + PTL1_TSTATE] %asi ! ! 1277 rdpr %tpc, %g5 ! ! 1278 stxa %g5, [%g4 + PTL1_TPC] %asi ! ! 1279 rdpr %tnpc, %g5 ! ! 1280 stxa %g5, [%g4 + PTL1_TNPC] %asi ! ! 1281 add %g4, PTL1_TRAP_REGS_INCR, %g4 ! ! 1282 deccc %g1 ! ! 1283 bnz,a,pt %icc, 0b ! if(trap_level != 0) --+ ! 1284 wrpr %g1, %tl ! 1285 1: ! ----------<----------------+ 1286 ! 1287 ! %pstate, %pil, SOFTINT, (S)TICK 1288 ! Pending interrupts is also cleared in order to avoid a recursive call 1289 ! to ptl1_panic in case the interrupt handler causes a panic. 1290 ! 1291 rdpr %pil, %g1 1292 stba %g1, [%g3 + PTL1_PIL] %asi 1293 rdpr %pstate, %g1 1294 stha %g1, [%g3 + PTL1_PSTATE] %asi 1295 rd SOFTINT, %g1 1296 sta %g1, [%g3 + PTL1_SOFTINT] %asi 1297 wr %g1, CLEAR_SOFTINT 1298 sethi %hi(traptrace_use_stick), %g1 1299 ld [%g1 + %lo(traptrace_use_stick)], %g1 1300 brz,a,pn %g1, 2f 1301 rdpr %tick, %g1 1302 rd STICK, %g1 1303 2: stxa %g1, [%g3 + PTL1_TICK] %asi 1304 1305 ! 1306 ! MMU registers because ptl1_panic may be called from 1307 ! the MMU trap handlers. 1308 ! 1309 mov MMU_SFAR, %g1 1310 ldxa [%g1]ASI_DMMU, %g4 1311 stxa %g4, [%g3 + PTL1_DMMU_SFAR]%asi 1312 mov MMU_SFSR, %g1 1313 ldxa [%g1]ASI_DMMU, %g4 1314 stxa %g4, [%g3 + PTL1_DMMU_SFSR]%asi 1315 ldxa [%g1]ASI_IMMU, %g4 1316 stxa %g4, [%g3 + PTL1_IMMU_SFSR]%asi 1317 mov MMU_TAG_ACCESS, %g1 1318 ldxa [%g1]ASI_DMMU, %g4 1319 stxa %g4, [%g3 + PTL1_DMMU_TAG_ACCESS]%asi 1320 ldxa [%g1]ASI_IMMU, %g4 1321 stxa %g4, [%g3 + PTL1_IMMU_TAG_ACCESS]%asi 1322 1323 ! 1324 ! Save register window state and register windows. 1325 ! 1326 rdpr %cwp, %g1 1327 stba %g1, [%g3 + PTL1_CWP] %asi 1328 rdpr %wstate, %g1 1329 stba %g1, [%g3 + PTL1_WSTATE] %asi 1330 rdpr %otherwin, %g1 1331 stba %g1, [%g3 + PTL1_OTHERWIN] %asi 1332 rdpr %cleanwin, %g1 1333 stba %g1, [%g3 + PTL1_CLEANWIN] %asi 1334 rdpr %cansave, %g1 1335 stba %g1, [%g3 + PTL1_CANSAVE] %asi 1336 rdpr %canrestore, %g1 1337 stba %g1, [%g3 + PTL1_CANRESTORE] %asi 1338 1339 PTL1_RESET_RWINDOWS(%g1) 1340 clr %g1 1341 wrpr %g1, %cwp 1342 add %g3, PTL1_RWINDOW, %g4 ! %g4 = &ptl1_rwindow[0]; 1343 1344 3: PTL1_SAVE_WINDOW(%g4) ! <-------------+ 1345 inc %g1 ! 1346 cmp %g1, MAXWIN ! 1347 bgeu,pn %icc, 5f ! 1348 wrpr %g1, %cwp ! 1349 rdpr %cwp, %g2 ! 1350 cmp %g1, %g2 ! saturation check 1351 be,pt %icc, 3b ! 1352 PTL1_NEXT_WINDOW(%g4) ! ------+ 1353 5: 1354 ! 1355 ! most crucial CPU state was saved. 1356 ! Proceed to go back to TL = 0. 1357 ! 1358 state_saved: 1359 wrpr %g0, 1, %tl 1360 wrpr %g0, PIL_MAX, %pil 1361 ! 1362 PTL1_RESET_RWINDOWS(%g1) 1363 wrpr %g0, %cwp 1364 wrpr %g0, %cleanwin 1365 wrpr %g0, WSTATE_KERN, %wstate 1366 ! 1367 ! Set pcontext to run kernel. 1368 ! 1369 ! For OPL, load kcontexreg instead of clearing primary 1370 ! context register. This is to avoid changing nucleus page 1371 ! size bits after boot initialization. 1372 ! 1373 #ifdef _OPL 1374 sethi %hi(kcontextreg), %g4 1375 ldx [%g4 + %lo(kcontextreg)], %g4 1376 #endif /* _OPL */ 1377 1378 set DEMAP_ALL_TYPE, %g1 1379 sethi %hi(FLUSH_ADDR), %g3 1380 set MMU_PCONTEXT, %g2 1381 1382 stxa %g0, [%g1]ASI_DTLB_DEMAP 1383 stxa %g0, [%g1]ASI_ITLB_DEMAP 1384 1385 #ifdef _OPL 1386 stxa %g4, [%g2]ASI_MMU_CTX 1387 #else /* _OPL */ 1388 stxa %g0, [%g2]ASI_MMU_CTX 1389 #endif /* _OPL */ 1390 1391 flush %g3 1392 1393 rdpr %cwp, %g1 1394 set TSTATE_KERN, %g3 1395 wrpr %g3, %g1, %tstate 1396 set ptl1_panic_tl0, %g3 1397 wrpr %g0, %g3, %tnpc 1398 done ! go to -->-+ TL:1 1399 ! 1400 ptl1_panic_tl0: ! ----<-----+ TL:0 1401 CPU_ADDR(%l0, %l1) ! %l0 = cpu[cpuid] 1402 add %l0, CPU_PTL1, %l1 ! %l1 = &CPU->mcpu.ptl1_state 1403 ! 1404 ! prepare to call panic() 1405 ! 1406 ldn [%l0 + CPU_THREAD], THREAD_REG ! restore %g7 1407 ldn [%l1 + PTL1_STKTOP], %l2 ! %sp = ptl1_stktop 1408 sub %l2, SA(MINFRAME) + STACK_BIAS, %sp 1409 clr %fp ! no frame below this window 1410 clr %i7 1411 ! 1412 ! enable limited interrupts 1413 ! 1414 wrpr %g0, CLOCK_LEVEL, %pil 1415 wrpr %g0, PSTATE_KERN, %pstate 1416 ! 1417 ba,pt %xcc, ptl1_panic_handler 1418 mov %l1, %o0 1419 /*NOTREACHED*/ 1420 SET_SIZE(ptl1_panic) 1421 #endif /* lint */ 1422 1423 #ifdef PTL1_PANIC_DEBUG 1424 #if defined (lint) 1425 /* 1426 * ptl1_recurse() calls itself a number of times to either set up a known 1427 * stack or to cause a kernel stack overflow. It decrements the arguments 1428 * on each recursion. 1429 * It's called by #ifdef PTL1_PANIC_DEBUG code in startup.c to set the 1430 * registers to a known state to facilitate debugging. 1431 */ 1432 1433 /* ARGSUSED */ 1434 void 1435 ptl1_recurse(int count_threshold, int trap_threshold) 1436 {} 1437 1438 #else /* lint */ 1439 1440 ENTRY_NP(ptl1_recurse) 1441 save %sp, -SA(MINFRAME), %sp 1442 1443 set ptl1_recurse_call, %o7 1444 cmp %o7, %i7 ! if ptl1_recurse is called 1445 be,pt %icc, 0f ! by itself, then skip 1446 nop ! register initialization 1447 1448 /* 1449 * Initialize Out Registers to Known Values 1450 */ 1451 set 0x01000, %l0 ! %i0 is the ... 1452 ! recursion_depth_count 1453 sub %i0, 1, %o0; 1454 sub %i1, 1, %o1; 1455 add %l0, %o0, %o2; 1456 add %l0, %o2, %o3; 1457 add %l0, %o3, %o4; 1458 add %l0, %o4, %o5; 1459 ba,a 1f 1460 nop 1461 1462 0: /* Outs = Ins - 1 */ 1463 sub %i0, 1, %o0; 1464 sub %i1, 1, %o1; 1465 sub %i2, 1, %o2; 1466 sub %i3, 1, %o3; 1467 sub %i4, 1, %o4; 1468 sub %i5, 1, %o5; 1469 1470 /* Locals = Ins + 1 */ 1471 1: add %i0, 1, %l0; 1472 add %i1, 1, %l1; 1473 add %i2, 1, %l2; 1474 add %i3, 1, %l3; 1475 add %i4, 1, %l4; 1476 add %i5, 1, %l5; 1477 1478 set 0x0100000, %g5 1479 add %g5, %g0, %g1 1480 add %g5, %g1, %g2 1481 add %g5, %g2, %g3 1482 add %g5, %g3, %g4 1483 add %g5, %g4, %g5 1484 1485 brz,pn %i1, ptl1_recurse_trap ! if trpp_count == 0) { 1486 nop ! trap to ptl1_panic 1487 ! 1488 brz,pn %i0, ptl1_recure_exit ! if(depth_count == 0) { 1489 nop ! skip recursive call 1490 ! } 1491 ptl1_recurse_call: 1492 call ptl1_recurse 1493 nop 1494 1495 ptl1_recure_exit: 1496 ret 1497 restore 1498 1499 ptl1_recurse_trap: 1500 ta PTL1_DEBUG_TRAP; ! Trap Always to ptl1_panic() 1501 nop ! NOTREACHED 1502 SET_SIZE(ptl1_recurse) 1503 1504 #endif /* lint */ 1505 1506 #if defined (lint) 1507 1508 /* ARGSUSED */ 1509 void 1510 ptl1_panic_xt(int arg1, int arg2) 1511 {} 1512 1513 #else /* lint */ 1514 /* 1515 * Asm function to handle a cross trap to call ptl1_panic() 1516 */ 1517 ENTRY_NP(ptl1_panic_xt) 1518 ba ptl1_panic 1519 mov PTL1_BAD_DEBUG, %g1 1520 SET_SIZE(ptl1_panic_xt) 1521 1522 #endif /* lint */ 1523 1524 #endif /* PTL1_PANIC_DEBUG */ 1525 1526 #ifdef TRAPTRACE 1527 #if defined (lint) 1528 1529 void 1530 trace_ptr_panic(void) 1531 { 1532 } 1533 1534 #else /* lint */ 1535 1536 ENTRY_NP(trace_ptr_panic) 1537 ! 1538 ! freeze the trap trace to disable the assertions. Otherwise, 1539 ! ptl1_panic is likely to be repeatedly called from there. 1540 ! %g2 and %g3 are used as scratch registers in ptl1_panic. 1541 ! 1542 mov 1, %g3 1543 sethi %hi(trap_freeze), %g2 1544 st %g3, [%g2 + %lo(trap_freeze)] 1545 ! 1546 ! %g1 contains the %pc address where an assertion was failed. 1547 ! save it in trap_freeze_pc for a debugging hint if there is 1548 ! no value saved in it. 1549 ! 1550 set trap_freeze_pc, %g2 1551 casn [%g2], %g0, %g1 1552 1553 ba ptl1_panic 1554 mov PTL1_BAD_TRACE_PTR, %g1 1555 SET_SIZE(trace_ptr_panic) 1556 1557 #endif /* lint */ 1558 #endif /* TRAPTRACE */ 1559 1560 #if defined (lint) 1561 /* 1562 * set_kcontextreg() sets PCONTEXT to kctx 1563 * if PCONTEXT==kctx, do nothing 1564 * if N_pgsz0|N_pgsz1 differ, do demap all first 1565 */ 1566 1567 /* ARGSUSED */ 1568 void 1569 set_kcontextreg() 1570 { 1571 } 1572 1573 #else /* lint */ 1574 1575 ENTRY_NP(set_kcontextreg) 1576 ! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3) 1577 SET_KCONTEXTREG(%o0, %o1, %o2, %o3, %o4, l1, l2, l3) 1578 retl 1579 nop 1580 SET_SIZE(set_kcontextreg) 1581 1582 #endif /* lint */ 1583 1584 1585 /* 1586 * The interface for a 32-bit client program that takes over the TBA 1587 * calling the 64-bit romvec OBP. 1588 */ 1589 1590 #if defined(lint) 1591 1592 /* ARGSUSED */ 1593 int 1594 client_handler(void *cif_handler, void *arg_array) 1595 { return 0; } 1596 1597 #else /* lint */ 1598 1599 ENTRY(client_handler) 1600 save %sp, -SA64(MINFRAME64), %sp ! 32 bit frame, 64 bit sized 1601 sethi %hi(tba_taken_over), %l2 1602 ld [%l2+%lo(tba_taken_over)], %l3 1603 brz %l3, 1f ! is the tba_taken_over = 1 ? 1604 rdpr %wstate, %l5 ! save %wstate 1605 andn %l5, WSTATE_MASK, %l6 1606 wrpr %l6, WSTATE_KMIX, %wstate 1607 1608 ! 1609 ! switch to PCONTEXT=0 1610 ! 1611 #ifndef _OPL 1612 mov MMU_PCONTEXT, %o2 1613 ldxa [%o2]ASI_DMMU, %o2 1614 srlx %o2, CTXREG_NEXT_SHIFT, %o2 1615 brz,pt %o2, 1f ! nucleus pgsz is 0, no problem 1616 nop 1617 rdpr %pstate, %l4 ! disable interrupts 1618 andn %l4, PSTATE_IE, %o2 1619 wrpr %g0, %o2, %pstate 1620 mov DEMAP_ALL_TYPE, %o2 ! set PCONTEXT=0 1621 stxa %g0, [%o2]ASI_DTLB_DEMAP 1622 stxa %g0, [%o2]ASI_ITLB_DEMAP 1623 mov MMU_PCONTEXT, %o2 1624 stxa %g0, [%o2]ASI_DMMU 1625 membar #Sync 1626 sethi %hi(FLUSH_ADDR), %o2 1627 flush %o2 ! flush required by immu 1628 wrpr %g0, %l4, %pstate ! restore interrupt state 1629 #endif /* _OPL */ 1630 1631 1: mov %i1, %o0 1632 rdpr %pstate, %l4 ! Get the present pstate value 1633 andn %l4, PSTATE_AM, %l6 1634 wrpr %l6, 0, %pstate ! Set PSTATE_AM = 0 1635 jmpl %i0, %o7 ! Call cif handler 1636 nop 1637 wrpr %l4, 0, %pstate ! restore pstate 1638 brz %l3, 1f ! is the tba_taken_over = 1 1639 nop 1640 wrpr %g0, %l5, %wstate ! restore wstate 1641 1642 ! 1643 ! switch to PCONTEXT=kcontexreg 1644 ! 1645 #ifndef _OPL 1646 sethi %hi(kcontextreg), %o3 1647 ldx [%o3 + %lo(kcontextreg)], %o3 1648 brz %o3, 1f 1649 nop 1650 rdpr %pstate, %l4 ! disable interrupts 1651 andn %l4, PSTATE_IE, %o2 1652 wrpr %g0, %o2, %pstate 1653 mov DEMAP_ALL_TYPE, %o2 1654 stxa %g0, [%o2]ASI_DTLB_DEMAP 1655 stxa %g0, [%o2]ASI_ITLB_DEMAP 1656 mov MMU_PCONTEXT, %o2 1657 stxa %o3, [%o2]ASI_DMMU 1658 membar #Sync 1659 sethi %hi(FLUSH_ADDR), %o2 1660 flush %o2 ! flush required by immu 1661 wrpr %g0, %l4, %pstate ! restore interrupt state 1662 #endif /* _OPL */ 1663 1664 1: ret ! Return result ... 1665 restore %o0, %g0, %o0 ! delay; result in %o0 1666 SET_SIZE(client_handler) 1667 1668 #endif /* lint */ 1669