1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #if !defined(lint) 28 #include "assym.h" 29 #endif /* !lint */ 30 #include <sys/asm_linkage.h> 31 #include <sys/privregs.h> 32 #include <sys/sun4asi.h> 33 #include <sys/machasi.h> 34 #include <sys/hypervisor_api.h> 35 #include <sys/machtrap.h> 36 #include <sys/machthread.h> 37 #include <sys/machbrand.h> 38 #include <sys/pcb.h> 39 #include <sys/pte.h> 40 #include <sys/mmu.h> 41 #include <sys/machpcb.h> 42 #include <sys/async.h> 43 #include <sys/intreg.h> 44 #include <sys/scb.h> 45 #include <sys/psr_compat.h> 46 #include <sys/syscall.h> 47 #include <sys/machparam.h> 48 #include <sys/traptrace.h> 49 #include <vm/hat_sfmmu.h> 50 #include <sys/archsystm.h> 51 #include <sys/utrap.h> 52 #include <sys/clock.h> 53 #include <sys/intr.h> 54 #include <sys/fpu/fpu_simulator.h> 55 #include <vm/seg_spt.h> 56 57 /* 58 * WARNING: If you add a fast trap handler which can be invoked by a 59 * non-privileged user, you may have to use the FAST_TRAP_DONE macro 60 * instead of "done" instruction to return back to the user mode. See 61 * comments for the "fast_trap_done" entry point for more information. 62 * 63 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the 64 * cases where you always want to process any pending interrupts before 65 * returning back to the user mode. 66 */ 67 #define FAST_TRAP_DONE \ 68 ba,a fast_trap_done 69 70 #define FAST_TRAP_DONE_CHK_INTR \ 71 ba,a fast_trap_done_chk_intr 72 73 /* 74 * SPARC V9 Trap Table 75 * 76 * Most of the trap handlers are made from common building 77 * blocks, and some are instantiated multiple times within 78 * the trap table. So, I build a bunch of macros, then 79 * populate the table using only the macros. 80 * 81 * Many macros branch to sys_trap. Its calling convention is: 82 * %g1 kernel trap handler 83 * %g2, %g3 args for above 84 * %g4 desire %pil 85 */ 86 87 #ifdef TRAPTRACE 88 89 /* 90 * Tracing macro. Adds two instructions if TRAPTRACE is defined. 91 */ 92 #define TT_TRACE(label) \ 93 ba label ;\ 94 rd %pc, %g7 95 #define TT_TRACE_INS 2 96 97 #define TT_TRACE_L(label) \ 98 ba label ;\ 99 rd %pc, %l4 ;\ 100 clr %l4 101 #define TT_TRACE_L_INS 3 102 103 #else 104 105 #define TT_TRACE(label) 106 #define TT_TRACE_INS 0 107 108 #define TT_TRACE_L(label) 109 #define TT_TRACE_L_INS 0 110 111 #endif 112 113 /* 114 * This first set are funneled to trap() with %tt as the type. 115 * Trap will then either panic or send the user a signal. 116 */ 117 /* 118 * NOT is used for traps that just shouldn't happen. 119 * It comes in both single and quadruple flavors. 120 */ 121 #if !defined(lint) 122 .global trap 123 #endif /* !lint */ 124 #define NOT \ 125 TT_TRACE(trace_gen) ;\ 126 set trap, %g1 ;\ 127 rdpr %tt, %g3 ;\ 128 ba,pt %xcc, sys_trap ;\ 129 sub %g0, 1, %g4 ;\ 130 .align 32 131 #define NOT4 NOT; NOT; NOT; NOT 132 133 #define NOTP \ 134 TT_TRACE(trace_gen) ;\ 135 ba,pt %xcc, ptl1_panic ;\ 136 mov PTL1_BAD_TRAP, %g1 ;\ 137 .align 32 138 #define NOTP4 NOTP; NOTP; NOTP; NOTP 139 140 141 /* 142 * BAD is used for trap vectors we don't have a kernel 143 * handler for. 144 * It also comes in single and quadruple versions. 145 */ 146 #define BAD NOT 147 #define BAD4 NOT4 148 149 #define DONE \ 150 done; \ 151 .align 32 152 153 /* 154 * TRAP vectors to the trap() function. 155 * It's main use is for user errors. 156 */ 157 #if !defined(lint) 158 .global trap 159 #endif /* !lint */ 160 #define TRAP(arg) \ 161 TT_TRACE(trace_gen) ;\ 162 set trap, %g1 ;\ 163 mov arg, %g3 ;\ 164 ba,pt %xcc, sys_trap ;\ 165 sub %g0, 1, %g4 ;\ 166 .align 32 167 168 /* 169 * SYSCALL is used for unsupported syscall interfaces (with 'which' 170 * set to 'nosys') and legacy support of old SunOS 4.x syscalls (with 171 * 'which' set to 'syscall_trap32'). 172 * 173 * The SYSCALL_TRAP* macros are used for syscall entry points. 174 * SYSCALL_TRAP is used to support LP64 syscalls and SYSCALL_TRAP32 175 * is used to support ILP32. Each macro can only be used once 176 * since they each define a symbol. The symbols are used as hot patch 177 * points by the brand infrastructure to dynamically enable and disable 178 * brand syscall interposition. See the comments around BRAND_CALLBACK 179 * and brand_plat_interposition_enable() for more information. 180 */ 181 #define SYSCALL_NOTT(which) \ 182 set (which), %g1 ;\ 183 ba,pt %xcc, user_trap ;\ 184 sub %g0, 1, %g4 ;\ 185 .align 32 186 187 #define SYSCALL(which) \ 188 TT_TRACE(trace_gen) ;\ 189 SYSCALL_NOTT(which) 190 191 #define SYSCALL_TRAP32 \ 192 TT_TRACE(trace_gen) ;\ 193 ALTENTRY(syscall_trap32_patch_point) \ 194 SYSCALL_NOTT(syscall_trap32) 195 196 #define SYSCALL_TRAP \ 197 TT_TRACE(trace_gen) ;\ 198 ALTENTRY(syscall_trap_patch_point) \ 199 SYSCALL_NOTT(syscall_trap) 200 201 /* 202 * GOTO just jumps to a label. 203 * It's used for things that can be fixed without going thru sys_trap. 204 */ 205 #define GOTO(label) \ 206 .global label ;\ 207 ba,a label ;\ 208 .empty ;\ 209 .align 32 210 211 /* 212 * GOTO_TT just jumps to a label. 213 * correctable ECC error traps at level 0 and 1 will use this macro. 214 * It's used for things that can be fixed without going thru sys_trap. 215 */ 216 #define GOTO_TT(label, ttlabel) \ 217 .global label ;\ 218 TT_TRACE(ttlabel) ;\ 219 ba,a label ;\ 220 .empty ;\ 221 .align 32 222 223 /* 224 * Privileged traps 225 * Takes breakpoint if privileged, calls trap() if not. 226 */ 227 #define PRIV(label) \ 228 rdpr %tstate, %g1 ;\ 229 btst TSTATE_PRIV, %g1 ;\ 230 bnz label ;\ 231 rdpr %tt, %g3 ;\ 232 set trap, %g1 ;\ 233 ba,pt %xcc, sys_trap ;\ 234 sub %g0, 1, %g4 ;\ 235 .align 32 236 237 238 /* 239 * DTrace traps. 240 */ 241 #define DTRACE_PID \ 242 .global dtrace_pid_probe ;\ 243 set dtrace_pid_probe, %g1 ;\ 244 ba,pt %xcc, user_trap ;\ 245 sub %g0, 1, %g4 ;\ 246 .align 32 247 248 #define DTRACE_RETURN \ 249 .global dtrace_return_probe ;\ 250 set dtrace_return_probe, %g1 ;\ 251 ba,pt %xcc, user_trap ;\ 252 sub %g0, 1, %g4 ;\ 253 .align 32 254 255 /* 256 * REGISTER WINDOW MANAGEMENT MACROS 257 */ 258 259 /* 260 * various convenient units of padding 261 */ 262 #define SKIP(n) .skip 4*(n) 263 264 /* 265 * CLEAN_WINDOW is the simple handler for cleaning a register window. 266 */ 267 #define CLEAN_WINDOW \ 268 TT_TRACE_L(trace_win) ;\ 269 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\ 270 clr %l0; clr %l1; clr %l2; clr %l3 ;\ 271 clr %l4; clr %l5; clr %l6; clr %l7 ;\ 272 clr %o0; clr %o1; clr %o2; clr %o3 ;\ 273 clr %o4; clr %o5; clr %o6; clr %o7 ;\ 274 retry; .align 128 275 276 #if !defined(lint) 277 278 /* 279 * If we get an unresolved tlb miss while in a window handler, the fault 280 * handler will resume execution at the last instruction of the window 281 * hander, instead of delivering the fault to the kernel. Spill handlers 282 * use this to spill windows into the wbuf. 283 * 284 * The mixed handler works by checking %sp, and branching to the correct 285 * handler. This is done by branching back to label 1: for 32b frames, 286 * or label 2: for 64b frames; which implies the handler order is: 32b, 287 * 64b, mixed. The 1: and 2: labels are offset into the routines to 288 * allow the branchs' delay slots to contain useful instructions. 289 */ 290 291 /* 292 * SPILL_32bit spills a 32-bit-wide kernel register window. It 293 * assumes that the kernel context and the nucleus context are the 294 * same. The stack pointer is required to be eight-byte aligned even 295 * though this code only needs it to be four-byte aligned. 296 */ 297 #define SPILL_32bit(tail) \ 298 srl %sp, 0, %sp ;\ 299 1: st %l0, [%sp + 0] ;\ 300 st %l1, [%sp + 4] ;\ 301 st %l2, [%sp + 8] ;\ 302 st %l3, [%sp + 12] ;\ 303 st %l4, [%sp + 16] ;\ 304 st %l5, [%sp + 20] ;\ 305 st %l6, [%sp + 24] ;\ 306 st %l7, [%sp + 28] ;\ 307 st %i0, [%sp + 32] ;\ 308 st %i1, [%sp + 36] ;\ 309 st %i2, [%sp + 40] ;\ 310 st %i3, [%sp + 44] ;\ 311 st %i4, [%sp + 48] ;\ 312 st %i5, [%sp + 52] ;\ 313 st %i6, [%sp + 56] ;\ 314 st %i7, [%sp + 60] ;\ 315 TT_TRACE_L(trace_win) ;\ 316 saved ;\ 317 retry ;\ 318 SKIP(31-19-TT_TRACE_L_INS) ;\ 319 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 320 .empty 321 322 /* 323 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit 324 * wide address space via the designated asi. It is used to spill 325 * non-kernel windows. The stack pointer is required to be eight-byte 326 * aligned even though this code only needs it to be four-byte 327 * aligned. 328 */ 329 #define SPILL_32bit_asi(asi_num, tail) \ 330 srl %sp, 0, %sp ;\ 331 1: sta %l0, [%sp + %g0]asi_num ;\ 332 mov 4, %g1 ;\ 333 sta %l1, [%sp + %g1]asi_num ;\ 334 mov 8, %g2 ;\ 335 sta %l2, [%sp + %g2]asi_num ;\ 336 mov 12, %g3 ;\ 337 sta %l3, [%sp + %g3]asi_num ;\ 338 add %sp, 16, %g4 ;\ 339 sta %l4, [%g4 + %g0]asi_num ;\ 340 sta %l5, [%g4 + %g1]asi_num ;\ 341 sta %l6, [%g4 + %g2]asi_num ;\ 342 sta %l7, [%g4 + %g3]asi_num ;\ 343 add %g4, 16, %g4 ;\ 344 sta %i0, [%g4 + %g0]asi_num ;\ 345 sta %i1, [%g4 + %g1]asi_num ;\ 346 sta %i2, [%g4 + %g2]asi_num ;\ 347 sta %i3, [%g4 + %g3]asi_num ;\ 348 add %g4, 16, %g4 ;\ 349 sta %i4, [%g4 + %g0]asi_num ;\ 350 sta %i5, [%g4 + %g1]asi_num ;\ 351 sta %i6, [%g4 + %g2]asi_num ;\ 352 sta %i7, [%g4 + %g3]asi_num ;\ 353 TT_TRACE_L(trace_win) ;\ 354 saved ;\ 355 retry ;\ 356 SKIP(31-25-TT_TRACE_L_INS) ;\ 357 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 358 .empty 359 360 #define SPILL_32bit_tt1(asi_num, tail) \ 361 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 362 .empty ;\ 363 .align 128 364 365 366 /* 367 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes 368 * that the kernel context and the nucleus context are the same. The 369 * stack pointer is required to be eight-byte aligned even though this 370 * code only needs it to be four-byte aligned. 371 */ 372 #define FILL_32bit(tail) \ 373 srl %sp, 0, %sp ;\ 374 1: TT_TRACE_L(trace_win) ;\ 375 ld [%sp + 0], %l0 ;\ 376 ld [%sp + 4], %l1 ;\ 377 ld [%sp + 8], %l2 ;\ 378 ld [%sp + 12], %l3 ;\ 379 ld [%sp + 16], %l4 ;\ 380 ld [%sp + 20], %l5 ;\ 381 ld [%sp + 24], %l6 ;\ 382 ld [%sp + 28], %l7 ;\ 383 ld [%sp + 32], %i0 ;\ 384 ld [%sp + 36], %i1 ;\ 385 ld [%sp + 40], %i2 ;\ 386 ld [%sp + 44], %i3 ;\ 387 ld [%sp + 48], %i4 ;\ 388 ld [%sp + 52], %i5 ;\ 389 ld [%sp + 56], %i6 ;\ 390 ld [%sp + 60], %i7 ;\ 391 restored ;\ 392 retry ;\ 393 SKIP(31-19-TT_TRACE_L_INS) ;\ 394 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 395 .empty 396 397 /* 398 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit 399 * wide address space via the designated asi. It is used to fill 400 * non-kernel windows. The stack pointer is required to be eight-byte 401 * aligned even though this code only needs it to be four-byte 402 * aligned. 403 */ 404 #define FILL_32bit_asi(asi_num, tail) \ 405 srl %sp, 0, %sp ;\ 406 1: TT_TRACE_L(trace_win) ;\ 407 mov 4, %g1 ;\ 408 lda [%sp + %g0]asi_num, %l0 ;\ 409 mov 8, %g2 ;\ 410 lda [%sp + %g1]asi_num, %l1 ;\ 411 mov 12, %g3 ;\ 412 lda [%sp + %g2]asi_num, %l2 ;\ 413 lda [%sp + %g3]asi_num, %l3 ;\ 414 add %sp, 16, %g4 ;\ 415 lda [%g4 + %g0]asi_num, %l4 ;\ 416 lda [%g4 + %g1]asi_num, %l5 ;\ 417 lda [%g4 + %g2]asi_num, %l6 ;\ 418 lda [%g4 + %g3]asi_num, %l7 ;\ 419 add %g4, 16, %g4 ;\ 420 lda [%g4 + %g0]asi_num, %i0 ;\ 421 lda [%g4 + %g1]asi_num, %i1 ;\ 422 lda [%g4 + %g2]asi_num, %i2 ;\ 423 lda [%g4 + %g3]asi_num, %i3 ;\ 424 add %g4, 16, %g4 ;\ 425 lda [%g4 + %g0]asi_num, %i4 ;\ 426 lda [%g4 + %g1]asi_num, %i5 ;\ 427 lda [%g4 + %g2]asi_num, %i6 ;\ 428 lda [%g4 + %g3]asi_num, %i7 ;\ 429 restored ;\ 430 retry ;\ 431 SKIP(31-25-TT_TRACE_L_INS) ;\ 432 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 433 .empty 434 435 436 /* 437 * SPILL_64bit spills a 64-bit-wide kernel register window. It 438 * assumes that the kernel context and the nucleus context are the 439 * same. The stack pointer is required to be eight-byte aligned. 440 */ 441 #define SPILL_64bit(tail) \ 442 2: stx %l0, [%sp + V9BIAS64 + 0] ;\ 443 stx %l1, [%sp + V9BIAS64 + 8] ;\ 444 stx %l2, [%sp + V9BIAS64 + 16] ;\ 445 stx %l3, [%sp + V9BIAS64 + 24] ;\ 446 stx %l4, [%sp + V9BIAS64 + 32] ;\ 447 stx %l5, [%sp + V9BIAS64 + 40] ;\ 448 stx %l6, [%sp + V9BIAS64 + 48] ;\ 449 stx %l7, [%sp + V9BIAS64 + 56] ;\ 450 stx %i0, [%sp + V9BIAS64 + 64] ;\ 451 stx %i1, [%sp + V9BIAS64 + 72] ;\ 452 stx %i2, [%sp + V9BIAS64 + 80] ;\ 453 stx %i3, [%sp + V9BIAS64 + 88] ;\ 454 stx %i4, [%sp + V9BIAS64 + 96] ;\ 455 stx %i5, [%sp + V9BIAS64 + 104] ;\ 456 stx %i6, [%sp + V9BIAS64 + 112] ;\ 457 stx %i7, [%sp + V9BIAS64 + 120] ;\ 458 TT_TRACE_L(trace_win) ;\ 459 saved ;\ 460 retry ;\ 461 SKIP(31-18-TT_TRACE_L_INS) ;\ 462 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 463 .empty 464 465 #define SPILL_64bit_ktt1(tail) \ 466 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 467 .empty ;\ 468 .align 128 469 470 #define SPILL_mixed_ktt1(tail) \ 471 btst 1, %sp ;\ 472 bz,a,pt %xcc, fault_32bit_/**/tail ;\ 473 srl %sp, 0, %sp ;\ 474 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 475 .empty ;\ 476 .align 128 477 478 /* 479 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit 480 * wide address space via the designated asi. It is used to spill 481 * non-kernel windows. The stack pointer is required to be eight-byte 482 * aligned. 483 */ 484 #define SPILL_64bit_asi(asi_num, tail) \ 485 mov 0 + V9BIAS64, %g1 ;\ 486 2: stxa %l0, [%sp + %g1]asi_num ;\ 487 mov 8 + V9BIAS64, %g2 ;\ 488 stxa %l1, [%sp + %g2]asi_num ;\ 489 mov 16 + V9BIAS64, %g3 ;\ 490 stxa %l2, [%sp + %g3]asi_num ;\ 491 mov 24 + V9BIAS64, %g4 ;\ 492 stxa %l3, [%sp + %g4]asi_num ;\ 493 add %sp, 32, %g5 ;\ 494 stxa %l4, [%g5 + %g1]asi_num ;\ 495 stxa %l5, [%g5 + %g2]asi_num ;\ 496 stxa %l6, [%g5 + %g3]asi_num ;\ 497 stxa %l7, [%g5 + %g4]asi_num ;\ 498 add %g5, 32, %g5 ;\ 499 stxa %i0, [%g5 + %g1]asi_num ;\ 500 stxa %i1, [%g5 + %g2]asi_num ;\ 501 stxa %i2, [%g5 + %g3]asi_num ;\ 502 stxa %i3, [%g5 + %g4]asi_num ;\ 503 add %g5, 32, %g5 ;\ 504 stxa %i4, [%g5 + %g1]asi_num ;\ 505 stxa %i5, [%g5 + %g2]asi_num ;\ 506 stxa %i6, [%g5 + %g3]asi_num ;\ 507 stxa %i7, [%g5 + %g4]asi_num ;\ 508 TT_TRACE_L(trace_win) ;\ 509 saved ;\ 510 retry ;\ 511 SKIP(31-25-TT_TRACE_L_INS) ;\ 512 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 513 .empty 514 515 #define SPILL_64bit_tt1(asi_num, tail) \ 516 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 517 .empty ;\ 518 .align 128 519 520 /* 521 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes 522 * that the kernel context and the nucleus context are the same. The 523 * stack pointer is required to be eight-byte aligned. 524 */ 525 #define FILL_64bit(tail) \ 526 2: TT_TRACE_L(trace_win) ;\ 527 ldx [%sp + V9BIAS64 + 0], %l0 ;\ 528 ldx [%sp + V9BIAS64 + 8], %l1 ;\ 529 ldx [%sp + V9BIAS64 + 16], %l2 ;\ 530 ldx [%sp + V9BIAS64 + 24], %l3 ;\ 531 ldx [%sp + V9BIAS64 + 32], %l4 ;\ 532 ldx [%sp + V9BIAS64 + 40], %l5 ;\ 533 ldx [%sp + V9BIAS64 + 48], %l6 ;\ 534 ldx [%sp + V9BIAS64 + 56], %l7 ;\ 535 ldx [%sp + V9BIAS64 + 64], %i0 ;\ 536 ldx [%sp + V9BIAS64 + 72], %i1 ;\ 537 ldx [%sp + V9BIAS64 + 80], %i2 ;\ 538 ldx [%sp + V9BIAS64 + 88], %i3 ;\ 539 ldx [%sp + V9BIAS64 + 96], %i4 ;\ 540 ldx [%sp + V9BIAS64 + 104], %i5 ;\ 541 ldx [%sp + V9BIAS64 + 112], %i6 ;\ 542 ldx [%sp + V9BIAS64 + 120], %i7 ;\ 543 restored ;\ 544 retry ;\ 545 SKIP(31-18-TT_TRACE_L_INS) ;\ 546 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 547 .empty 548 549 /* 550 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit 551 * wide address space via the designated asi. It is used to fill 552 * non-kernel windows. The stack pointer is required to be eight-byte 553 * aligned. 554 */ 555 #define FILL_64bit_asi(asi_num, tail) \ 556 mov V9BIAS64 + 0, %g1 ;\ 557 2: TT_TRACE_L(trace_win) ;\ 558 ldxa [%sp + %g1]asi_num, %l0 ;\ 559 mov V9BIAS64 + 8, %g2 ;\ 560 ldxa [%sp + %g2]asi_num, %l1 ;\ 561 mov V9BIAS64 + 16, %g3 ;\ 562 ldxa [%sp + %g3]asi_num, %l2 ;\ 563 mov V9BIAS64 + 24, %g4 ;\ 564 ldxa [%sp + %g4]asi_num, %l3 ;\ 565 add %sp, 32, %g5 ;\ 566 ldxa [%g5 + %g1]asi_num, %l4 ;\ 567 ldxa [%g5 + %g2]asi_num, %l5 ;\ 568 ldxa [%g5 + %g3]asi_num, %l6 ;\ 569 ldxa [%g5 + %g4]asi_num, %l7 ;\ 570 add %g5, 32, %g5 ;\ 571 ldxa [%g5 + %g1]asi_num, %i0 ;\ 572 ldxa [%g5 + %g2]asi_num, %i1 ;\ 573 ldxa [%g5 + %g3]asi_num, %i2 ;\ 574 ldxa [%g5 + %g4]asi_num, %i3 ;\ 575 add %g5, 32, %g5 ;\ 576 ldxa [%g5 + %g1]asi_num, %i4 ;\ 577 ldxa [%g5 + %g2]asi_num, %i5 ;\ 578 ldxa [%g5 + %g3]asi_num, %i6 ;\ 579 ldxa [%g5 + %g4]asi_num, %i7 ;\ 580 restored ;\ 581 retry ;\ 582 SKIP(31-25-TT_TRACE_L_INS) ;\ 583 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 584 .empty 585 586 587 #endif /* !lint */ 588 589 /* 590 * SPILL_mixed spills either size window, depending on 591 * whether %sp is even or odd, to a 32-bit address space. 592 * This may only be used in conjunction with SPILL_32bit/ 593 * FILL_64bit. 594 * Clear upper 32 bits of %sp if it is odd. 595 * We won't need to clear them in 64 bit kernel. 596 */ 597 #define SPILL_mixed \ 598 btst 1, %sp ;\ 599 bz,a,pt %xcc, 1b ;\ 600 srl %sp, 0, %sp ;\ 601 ba,pt %xcc, 2b ;\ 602 nop ;\ 603 .align 128 604 605 /* 606 * FILL_mixed(ASI) fills either size window, depending on 607 * whether %sp is even or odd, from a 32-bit address space. 608 * This may only be used in conjunction with FILL_32bit/ 609 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be 610 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular 611 * attention should be paid to the instructions that belong 612 * in the delay slots of the branches depending on the type 613 * of fill handler being branched to. 614 * Clear upper 32 bits of %sp if it is odd. 615 * We won't need to clear them in 64 bit kernel. 616 */ 617 #define FILL_mixed \ 618 btst 1, %sp ;\ 619 bz,a,pt %xcc, 1b ;\ 620 srl %sp, 0, %sp ;\ 621 ba,pt %xcc, 2b ;\ 622 nop ;\ 623 .align 128 624 625 626 /* 627 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows, 628 * respectively, into the address space via the designated asi. The 629 * unbiased stack pointer is required to be eight-byte aligned (even for 630 * the 32-bit case even though this code does not require such strict 631 * alignment). 632 * 633 * With SPARC v9 the spill trap takes precedence over the cleanwin trap 634 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save 635 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That 636 * window may contain kernel data so in user_rtt we set wstate to call 637 * these spill handlers on the first user spill trap. These handler then 638 * spill the appropriate window but also back up a window and clean the 639 * window that didn't get a cleanwin trap. 640 */ 641 #define SPILL_32clean(asi_num, tail) \ 642 srl %sp, 0, %sp ;\ 643 sta %l0, [%sp + %g0]asi_num ;\ 644 mov 4, %g1 ;\ 645 sta %l1, [%sp + %g1]asi_num ;\ 646 mov 8, %g2 ;\ 647 sta %l2, [%sp + %g2]asi_num ;\ 648 mov 12, %g3 ;\ 649 sta %l3, [%sp + %g3]asi_num ;\ 650 add %sp, 16, %g4 ;\ 651 sta %l4, [%g4 + %g0]asi_num ;\ 652 sta %l5, [%g4 + %g1]asi_num ;\ 653 sta %l6, [%g4 + %g2]asi_num ;\ 654 sta %l7, [%g4 + %g3]asi_num ;\ 655 add %g4, 16, %g4 ;\ 656 sta %i0, [%g4 + %g0]asi_num ;\ 657 sta %i1, [%g4 + %g1]asi_num ;\ 658 sta %i2, [%g4 + %g2]asi_num ;\ 659 sta %i3, [%g4 + %g3]asi_num ;\ 660 add %g4, 16, %g4 ;\ 661 sta %i4, [%g4 + %g0]asi_num ;\ 662 sta %i5, [%g4 + %g1]asi_num ;\ 663 sta %i6, [%g4 + %g2]asi_num ;\ 664 sta %i7, [%g4 + %g3]asi_num ;\ 665 TT_TRACE_L(trace_win) ;\ 666 b .spill_clean ;\ 667 mov WSTATE_USER32, %g7 ;\ 668 SKIP(31-25-TT_TRACE_L_INS) ;\ 669 ba,a,pt %xcc, fault_32bit_/**/tail ;\ 670 .empty 671 672 #define SPILL_64clean(asi_num, tail) \ 673 mov 0 + V9BIAS64, %g1 ;\ 674 stxa %l0, [%sp + %g1]asi_num ;\ 675 mov 8 + V9BIAS64, %g2 ;\ 676 stxa %l1, [%sp + %g2]asi_num ;\ 677 mov 16 + V9BIAS64, %g3 ;\ 678 stxa %l2, [%sp + %g3]asi_num ;\ 679 mov 24 + V9BIAS64, %g4 ;\ 680 stxa %l3, [%sp + %g4]asi_num ;\ 681 add %sp, 32, %g5 ;\ 682 stxa %l4, [%g5 + %g1]asi_num ;\ 683 stxa %l5, [%g5 + %g2]asi_num ;\ 684 stxa %l6, [%g5 + %g3]asi_num ;\ 685 stxa %l7, [%g5 + %g4]asi_num ;\ 686 add %g5, 32, %g5 ;\ 687 stxa %i0, [%g5 + %g1]asi_num ;\ 688 stxa %i1, [%g5 + %g2]asi_num ;\ 689 stxa %i2, [%g5 + %g3]asi_num ;\ 690 stxa %i3, [%g5 + %g4]asi_num ;\ 691 add %g5, 32, %g5 ;\ 692 stxa %i4, [%g5 + %g1]asi_num ;\ 693 stxa %i5, [%g5 + %g2]asi_num ;\ 694 stxa %i6, [%g5 + %g3]asi_num ;\ 695 stxa %i7, [%g5 + %g4]asi_num ;\ 696 TT_TRACE_L(trace_win) ;\ 697 b .spill_clean ;\ 698 mov WSTATE_USER64, %g7 ;\ 699 SKIP(31-25-TT_TRACE_L_INS) ;\ 700 ba,a,pt %xcc, fault_64bit_/**/tail ;\ 701 .empty 702 703 704 /* 705 * Floating point disabled. 706 */ 707 #define FP_DISABLED_TRAP \ 708 TT_TRACE(trace_gen) ;\ 709 ba,pt %xcc,.fp_disabled ;\ 710 nop ;\ 711 .align 32 712 713 /* 714 * Floating point exceptions. 715 */ 716 #define FP_IEEE_TRAP \ 717 TT_TRACE(trace_gen) ;\ 718 ba,pt %xcc,.fp_ieee_exception ;\ 719 nop ;\ 720 .align 32 721 722 #define FP_TRAP \ 723 TT_TRACE(trace_gen) ;\ 724 ba,pt %xcc,.fp_exception ;\ 725 nop ;\ 726 .align 32 727 728 #if !defined(lint) 729 730 /* 731 * ECACHE_ECC error traps at level 0 and level 1 732 */ 733 #define ECACHE_ECC(table_name) \ 734 .global table_name ;\ 735 table_name: ;\ 736 membar #Sync ;\ 737 set trap, %g1 ;\ 738 rdpr %tt, %g3 ;\ 739 ba,pt %xcc, sys_trap ;\ 740 sub %g0, 1, %g4 ;\ 741 .align 32 742 743 #endif /* !lint */ 744 745 /* 746 * illegal instruction trap 747 */ 748 #define ILLTRAP_INSTR \ 749 membar #Sync ;\ 750 TT_TRACE(trace_gen) ;\ 751 or %g0, P_UTRAP4, %g2 ;\ 752 or %g0, T_UNIMP_INSTR, %g3 ;\ 753 sethi %hi(.check_v9utrap), %g4 ;\ 754 jmp %g4 + %lo(.check_v9utrap) ;\ 755 nop ;\ 756 .align 32 757 758 /* 759 * tag overflow trap 760 */ 761 #define TAG_OVERFLOW \ 762 TT_TRACE(trace_gen) ;\ 763 or %g0, P_UTRAP10, %g2 ;\ 764 or %g0, T_TAG_OVERFLOW, %g3 ;\ 765 sethi %hi(.check_v9utrap), %g4 ;\ 766 jmp %g4 + %lo(.check_v9utrap) ;\ 767 nop ;\ 768 .align 32 769 770 /* 771 * divide by zero trap 772 */ 773 #define DIV_BY_ZERO \ 774 TT_TRACE(trace_gen) ;\ 775 or %g0, P_UTRAP11, %g2 ;\ 776 or %g0, T_IDIV0, %g3 ;\ 777 sethi %hi(.check_v9utrap), %g4 ;\ 778 jmp %g4 + %lo(.check_v9utrap) ;\ 779 nop ;\ 780 .align 32 781 782 /* 783 * trap instruction for V9 user trap handlers 784 */ 785 #define TRAP_INSTR \ 786 TT_TRACE(trace_gen) ;\ 787 or %g0, T_SOFTWARE_TRAP, %g3 ;\ 788 sethi %hi(.check_v9utrap), %g4 ;\ 789 jmp %g4 + %lo(.check_v9utrap) ;\ 790 nop ;\ 791 .align 32 792 #define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR 793 794 /* 795 * LEVEL_INTERRUPT is for level N interrupts. 796 * VECTOR_INTERRUPT is for the vector trap. 797 */ 798 #define LEVEL_INTERRUPT(level) \ 799 .global tt_pil/**/level ;\ 800 tt_pil/**/level: ;\ 801 ba,pt %xcc, pil_interrupt ;\ 802 mov level, %g4 ;\ 803 .align 32 804 805 #define LEVEL14_INTERRUPT \ 806 ba pil14_interrupt ;\ 807 mov PIL_14, %g4 ;\ 808 .align 32 809 810 #define LEVEL15_INTERRUPT \ 811 ba pil15_interrupt ;\ 812 mov PIL_15, %g4 ;\ 813 .align 32 814 815 #define CPU_MONDO \ 816 ba,a,pt %xcc, cpu_mondo ;\ 817 .align 32 818 819 #define DEV_MONDO \ 820 ba,a,pt %xcc, dev_mondo ;\ 821 .align 32 822 823 /* 824 * We take over the rtba after we set our trap table and 825 * fault status area. The watchdog reset trap is now handled by the OS. 826 */ 827 #define WATCHDOG_RESET \ 828 mov PTL1_BAD_WATCHDOG, %g1 ;\ 829 ba,a,pt %xcc, .watchdog_trap ;\ 830 .align 32 831 832 /* 833 * RED is for traps that use the red mode handler. 834 * We should never see these either. 835 */ 836 #define RED \ 837 mov PTL1_BAD_RED, %g1 ;\ 838 ba,a,pt %xcc, .watchdog_trap ;\ 839 .align 32 840 841 842 /* 843 * MMU Trap Handlers. 844 */ 845 846 /* 847 * synthesize for trap(): SFSR in %g3 848 */ 849 #define IMMU_EXCEPTION \ 850 MMU_FAULT_STATUS_AREA(%g3) ;\ 851 rdpr %tpc, %g2 ;\ 852 ldx [%g3 + MMFSA_I_TYPE], %g1 ;\ 853 ldx [%g3 + MMFSA_I_CTX], %g3 ;\ 854 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 855 or %g3, %g1, %g3 ;\ 856 ba,pt %xcc, .mmu_exception_end ;\ 857 mov T_INSTR_EXCEPTION, %g1 ;\ 858 .align 32 859 860 /* 861 * synthesize for trap(): TAG_ACCESS in %g2, SFSR in %g3 862 */ 863 #define DMMU_EXCEPTION \ 864 ba,a,pt %xcc, .dmmu_exception ;\ 865 .align 32 866 867 /* 868 * synthesize for trap(): SFAR in %g2, SFSR in %g3 869 */ 870 #define DMMU_EXC_AG_PRIV \ 871 MMU_FAULT_STATUS_AREA(%g3) ;\ 872 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 873 /* Fault type not available in MMU fault status area */ ;\ 874 mov MMFSA_F_PRVACT, %g1 ;\ 875 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 876 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 877 ba,pt %xcc, .mmu_priv_exception ;\ 878 or %g3, %g1, %g3 ;\ 879 .align 32 880 881 /* 882 * synthesize for trap(): SFAR in %g2, SFSR in %g3 883 */ 884 #define DMMU_EXC_AG_NOT_ALIGNED \ 885 MMU_FAULT_STATUS_AREA(%g3) ;\ 886 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\ 887 /* Fault type not available in MMU fault status area */ ;\ 888 mov MMFSA_F_UNALIGN, %g1 ;\ 889 ldx [%g3 + MMFSA_D_CTX], %g3 ;\ 890 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\ 891 ba,pt %xcc, .mmu_exception_not_aligned ;\ 892 or %g3, %g1, %g3 /* SFSR */ ;\ 893 .align 32 894 /* 895 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2) 896 */ 897 898 /* 899 * synthesize for trap(): SFAR in %g2, SFSR in %g3 900 */ 901 #define DMMU_EXC_LDDF_NOT_ALIGNED \ 902 ba,a,pt %xcc, .dmmu_exc_lddf_not_aligned ;\ 903 .align 32 904 /* 905 * synthesize for trap(): SFAR in %g2, SFSR in %g3 906 */ 907 #define DMMU_EXC_STDF_NOT_ALIGNED \ 908 ba,a,pt %xcc, .dmmu_exc_stdf_not_aligned ;\ 909 .align 32 910 911 #if defined(cscope) 912 /* 913 * Define labels to direct cscope quickly to labels that 914 * are generated by macro expansion of DTLB_MISS(). 915 */ 916 .global tt0_dtlbmiss 917 tt0_dtlbmiss: 918 .global tt1_dtlbmiss 919 tt1_dtlbmiss: 920 nop 921 #endif 922 923 /* 924 * Data miss handler (must be exactly 32 instructions) 925 * 926 * This handler is invoked only if the hypervisor has been instructed 927 * not to do any TSB walk. 928 * 929 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss 930 * handler. 931 * 932 * User TLB miss handling depends upon whether a user process has one or 933 * two TSBs. User TSB information (physical base and size code) is kept 934 * in two dedicated scratchpad registers. Absence of a user TSB (primarily 935 * second TSB) is indicated by a negative value (-1) in that register. 936 */ 937 938 /* 939 * synthesize for miss handler: pseudo-tag access in %g2 (with context "type" 940 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 941 */ 942 #define DTLB_MISS(table_name) ;\ 943 .global table_name/**/_dtlbmiss ;\ 944 table_name/**/_dtlbmiss: ;\ 945 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 946 cmp %g3, INVALID_CONTEXT ;\ 947 ble,pn %xcc, sfmmu_kdtlb_miss ;\ 948 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 949 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 950 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 951 brgez,pn %g1, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\ 952 nop ;\ 953 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 954 ba,pt %xcc, sfmmu_udtlb_fastpath /* no 4M TSB, miss */ ;\ 955 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 956 .align 128 957 958 959 #if defined(cscope) 960 /* 961 * Define labels to direct cscope quickly to labels that 962 * are generated by macro expansion of ITLB_MISS(). 963 */ 964 .global tt0_itlbmiss 965 tt0_itlbmiss: 966 .global tt1_itlbmiss 967 tt1_itlbmiss: 968 nop 969 #endif 970 971 /* 972 * Instruction miss handler. 973 * 974 * This handler is invoked only if the hypervisor has been instructed 975 * not to do any TSB walk. 976 * 977 * ldda instructions will have their ASI patched 978 * by sfmmu_patch_ktsb at runtime. 979 * MUST be EXACTLY 32 instructions or we'll break. 980 */ 981 982 /* 983 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 984 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 985 */ 986 #define ITLB_MISS(table_name) \ 987 .global table_name/**/_itlbmiss ;\ 988 table_name/**/_itlbmiss: ;\ 989 GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 990 cmp %g3, INVALID_CONTEXT ;\ 991 ble,pn %xcc, sfmmu_kitlb_miss ;\ 992 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 993 mov SCRATCHPAD_UTSBREG2, %g1 ;\ 994 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\ 995 brgez,pn %g1, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\ 996 nop ;\ 997 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\ 998 ba,pt %xcc, sfmmu_uitlb_fastpath /* no 4M TSB, miss */ ;\ 999 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\ 1000 .align 128 1001 1002 #define DTSB_MISS \ 1003 GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu) 1004 1005 #define ITSB_MISS \ 1006 GOTO_TT(sfmmu_slow_immu_miss,trace_immu) 1007 1008 /* 1009 * This macro is the first level handler for fast protection faults. 1010 * It first demaps the tlb entry which generated the fault and then 1011 * attempts to set the modify bit on the hash. It needs to be 1012 * exactly 32 instructions. 1013 */ 1014 /* 1015 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 1016 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 1017 */ 1018 #define DTLB_PROT \ 1019 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\ 1020 /* ;\ 1021 * g2 = pseudo-tag access register (ctx type rather than ctx ID) ;\ 1022 * g3 = ctx type (0, 1, or 2) ;\ 1023 */ ;\ 1024 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\ 1025 /* clobbers g1 and g6 XXXQ? */ ;\ 1026 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\ 1027 nop ;\ 1028 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\ 1029 .align 128 1030 1031 #define DMMU_EXCEPTION_TL1 ;\ 1032 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1033 .align 32 1034 1035 #define MISALIGN_ADDR_TL1 ;\ 1036 ba,a,pt %xcc, mmu_trap_tl1 ;\ 1037 .align 32 1038 1039 /* 1040 * Trace a tsb hit 1041 * g1 = tsbe pointer (in/clobbered) 1042 * g2 = tag access register (in) 1043 * g3 - g4 = scratch (clobbered) 1044 * g5 = tsbe data (in) 1045 * g6 = scratch (clobbered) 1046 * g7 = pc we jumped here from (in) 1047 * ttextra = value to OR in to trap type (%tt) (in) 1048 */ 1049 #ifdef TRAPTRACE 1050 #define TRACE_TSBHIT(ttextra) \ 1051 membar #Sync ;\ 1052 sethi %hi(FLUSH_ADDR), %g6 ;\ 1053 flush %g6 ;\ 1054 TRACE_PTR(%g3, %g6) ;\ 1055 GET_TRACE_TICK(%g6, %g4) ;\ 1056 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\ 1057 stna %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\ 1058 stna %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\ 1059 rdpr %tnpc, %g6 ;\ 1060 stna %g6, [%g3 + TRAP_ENT_F2]%asi ;\ 1061 stna %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\ 1062 stna %g0, [%g3 + TRAP_ENT_F4]%asi ;\ 1063 rdpr %tpc, %g6 ;\ 1064 stna %g6, [%g3 + TRAP_ENT_TPC]%asi ;\ 1065 TRACE_SAVE_TL_GL_REGS(%g3, %g6) ;\ 1066 rdpr %tt, %g6 ;\ 1067 or %g6, (ttextra), %g1 ;\ 1068 stha %g1, [%g3 + TRAP_ENT_TT]%asi ;\ 1069 MMU_FAULT_STATUS_AREA(%g4) ;\ 1070 mov MMFSA_D_ADDR, %g1 ;\ 1071 cmp %g6, FAST_IMMU_MISS_TT ;\ 1072 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1073 cmp %g6, T_INSTR_MMU_MISS ;\ 1074 move %xcc, MMFSA_I_ADDR, %g1 ;\ 1075 ldx [%g4 + %g1], %g1 ;\ 1076 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */ ;\ 1077 mov MMFSA_D_CTX, %g1 ;\ 1078 cmp %g6, FAST_IMMU_MISS_TT ;\ 1079 move %xcc, MMFSA_I_CTX, %g1 ;\ 1080 cmp %g6, T_INSTR_MMU_MISS ;\ 1081 move %xcc, MMFSA_I_CTX, %g1 ;\ 1082 ldx [%g4 + %g1], %g1 ;\ 1083 stna %g1, [%g3 + TRAP_ENT_TR]%asi ;\ 1084 TRACE_NEXT(%g3, %g4, %g6) 1085 #else 1086 #define TRACE_TSBHIT(ttextra) 1087 #endif 1088 1089 1090 #if defined(lint) 1091 1092 struct scb trap_table; 1093 struct scb scb; /* trap_table/scb are the same object */ 1094 1095 #else /* lint */ 1096 1097 /* 1098 * ======================================================================= 1099 * SPARC V9 TRAP TABLE 1100 * 1101 * The trap table is divided into two halves: the first half is used when 1102 * taking traps when TL=0; the second half is used when taking traps from 1103 * TL>0. Note that handlers in the second half of the table might not be able 1104 * to make the same assumptions as handlers in the first half of the table. 1105 * 1106 * Worst case trap nesting so far: 1107 * 1108 * at TL=0 client issues software trap requesting service 1109 * at TL=1 nucleus wants a register window 1110 * at TL=2 register window clean/spill/fill takes a TLB miss 1111 * at TL=3 processing TLB miss 1112 * at TL=4 handle asynchronous error 1113 * 1114 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode". 1115 * 1116 * ======================================================================= 1117 */ 1118 .section ".text" 1119 .align 4 1120 .global trap_table, scb, trap_table0, trap_table1, etrap_table 1121 .type trap_table, #object 1122 .type trap_table0, #object 1123 .type trap_table1, #object 1124 .type scb, #object 1125 trap_table: 1126 scb: 1127 trap_table0: 1128 /* hardware traps */ 1129 NOT; /* 000 reserved */ 1130 RED; /* 001 power on reset */ 1131 WATCHDOG_RESET; /* 002 watchdog reset */ 1132 RED; /* 003 externally initiated reset */ 1133 RED; /* 004 software initiated reset */ 1134 RED; /* 005 red mode exception */ 1135 NOT; NOT; /* 006 - 007 reserved */ 1136 IMMU_EXCEPTION; /* 008 instruction access exception */ 1137 ITSB_MISS; /* 009 instruction access MMU miss */ 1138 NOT; /* 00A reserved */ 1139 NOT; NOT4; /* 00B - 00F reserved */ 1140 ILLTRAP_INSTR; /* 010 illegal instruction */ 1141 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */ 1142 TRAP(T_UNIMP_LDD); /* 012 unimplemented LDD */ 1143 TRAP(T_UNIMP_STD); /* 013 unimplemented STD */ 1144 NOT4; NOT4; NOT4; /* 014 - 01F reserved */ 1145 FP_DISABLED_TRAP; /* 020 fp disabled */ 1146 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */ 1147 FP_TRAP; /* 022 fp exception other */ 1148 TAG_OVERFLOW; /* 023 tag overflow */ 1149 CLEAN_WINDOW; /* 024 - 027 clean window */ 1150 DIV_BY_ZERO; /* 028 division by zero */ 1151 NOT; /* 029 internal processor error */ 1152 NOT; NOT; NOT4; /* 02A - 02F reserved */ 1153 DMMU_EXCEPTION; /* 030 data access exception */ 1154 DTSB_MISS; /* 031 data access MMU miss */ 1155 NOT; /* 032 reserved */ 1156 NOT; /* 033 data access protection */ 1157 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */ 1158 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */ 1159 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */ 1160 DMMU_EXC_AG_PRIV; /* 037 privileged action */ 1161 NOT; /* 038 LDQF mem address not aligned */ 1162 NOT; /* 039 STQF mem address not aligned */ 1163 NOT; NOT; NOT4; /* 03A - 03F reserved */ 1164 NOT; /* 040 async data error */ 1165 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */ 1166 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */ 1167 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */ 1168 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */ 1169 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */ 1170 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */ 1171 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */ 1172 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */ 1173 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */ 1174 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */ 1175 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */ 1176 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */ 1177 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */ 1178 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */ 1179 LEVEL15_INTERRUPT; /* 04F interrupt level 15 */ 1180 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */ 1181 NOT; /* 060 interrupt vector */ 1182 GOTO(kmdb_trap); /* 061 PA watchpoint */ 1183 GOTO(kmdb_trap); /* 062 VA watchpoint */ 1184 NOT; /* 063 reserved */ 1185 ITLB_MISS(tt0); /* 064 instruction access MMU miss */ 1186 DTLB_MISS(tt0); /* 068 data access MMU miss */ 1187 DTLB_PROT; /* 06C data access protection */ 1188 NOT; /* 070 reserved */ 1189 NOT; /* 071 reserved */ 1190 NOT; /* 072 reserved */ 1191 NOT; /* 073 reserved */ 1192 NOT4; NOT4 /* 074 - 07B reserved */ 1193 CPU_MONDO; /* 07C cpu_mondo */ 1194 DEV_MONDO; /* 07D dev_mondo */ 1195 GOTO_TT(resumable_error, trace_gen); /* 07E resumable error */ 1196 GOTO_TT(nonresumable_error, trace_gen); /* 07F non-reasumable error */ 1197 NOT4; /* 080 spill 0 normal */ 1198 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */ 1199 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */ 1200 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */ 1201 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */ 1202 SPILL_32bit(not); /* 094 spill 5 normal */ 1203 SPILL_64bit(not); /* 098 spill 6 normal */ 1204 SPILL_mixed; /* 09C spill 7 normal */ 1205 NOT4; /* 0A0 spill 0 other */ 1206 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */ 1207 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */ 1208 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */ 1209 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */ 1210 NOT4; /* 0B4 spill 5 other */ 1211 NOT4; /* 0B8 spill 6 other */ 1212 NOT4; /* 0BC spill 7 other */ 1213 NOT4; /* 0C0 fill 0 normal */ 1214 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */ 1215 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */ 1216 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */ 1217 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */ 1218 FILL_32bit(not); /* 0D4 fill 5 normal */ 1219 FILL_64bit(not); /* 0D8 fill 6 normal */ 1220 FILL_mixed; /* 0DC fill 7 normal */ 1221 NOT4; /* 0E0 fill 0 other */ 1222 NOT4; /* 0E4 fill 1 other */ 1223 NOT4; /* 0E8 fill 2 other */ 1224 NOT4; /* 0EC fill 3 other */ 1225 NOT4; /* 0F0 fill 4 other */ 1226 NOT4; /* 0F4 fill 5 other */ 1227 NOT4; /* 0F8 fill 6 other */ 1228 NOT4; /* 0FC fill 7 other */ 1229 /* user traps */ 1230 GOTO(syscall_trap_4x); /* 100 old system call */ 1231 TRAP(T_BREAKPOINT); /* 101 user breakpoint */ 1232 TRAP(T_DIV0); /* 102 user divide by zero */ 1233 GOTO(.flushw); /* 103 flush windows */ 1234 GOTO(.clean_windows); /* 104 clean windows */ 1235 BAD; /* 105 range check ?? */ 1236 GOTO(.fix_alignment); /* 106 do unaligned references */ 1237 BAD; /* 107 unused */ 1238 SYSCALL_TRAP32; /* 108 ILP32 system call on LP64 */ 1239 GOTO(set_trap0_addr); /* 109 set trap0 address */ 1240 BAD; BAD; BAD4; /* 10A - 10F unused */ 1241 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */ 1242 GOTO(.getcc); /* 120 get condition codes */ 1243 GOTO(.setcc); /* 121 set condition codes */ 1244 GOTO(.getpsr); /* 122 get psr */ 1245 GOTO(.setpsr); /* 123 set psr (some fields) */ 1246 GOTO(get_timestamp); /* 124 get timestamp */ 1247 GOTO(get_virtime); /* 125 get lwp virtual time */ 1248 PRIV(self_xcall); /* 126 self xcall */ 1249 GOTO(get_hrestime); /* 127 get hrestime */ 1250 BAD; /* 128 ST_SETV9STACK */ 1251 GOTO(.getlgrp); /* 129 get lgrpid */ 1252 BAD; BAD; BAD4; /* 12A - 12F unused */ 1253 BAD4; BAD4; /* 130 - 137 unused */ 1254 DTRACE_PID; /* 138 dtrace pid tracing provider */ 1255 BAD; /* 139 unused */ 1256 DTRACE_RETURN; /* 13A dtrace pid return probe */ 1257 BAD; BAD4; /* 13B - 13F unused */ 1258 SYSCALL_TRAP; /* 140 LP64 system call */ 1259 SYSCALL(nosys); /* 141 unused system call trap */ 1260 #ifdef DEBUG_USER_TRAPTRACECTL 1261 GOTO(.traptrace_freeze); /* 142 freeze traptrace */ 1262 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */ 1263 #else 1264 SYSCALL(nosys); /* 142 unused system call trap */ 1265 SYSCALL(nosys); /* 143 unused system call trap */ 1266 #endif 1267 BAD4; BAD4; BAD4; /* 144 - 14F unused */ 1268 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1269 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1270 BAD; /* 170 - unused */ 1271 BAD; /* 171 - unused */ 1272 BAD; BAD; /* 172 - 173 unused */ 1273 BAD4; BAD4; /* 174 - 17B unused */ 1274 #ifdef PTL1_PANIC_DEBUG 1275 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic); 1276 /* 17C test ptl1_panic */ 1277 #else 1278 BAD; /* 17C unused */ 1279 #endif /* PTL1_PANIC_DEBUG */ 1280 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */ 1281 PRIV(kmdb_trap); /* 17E kmdb breakpoint */ 1282 PRIV(obp_bpt); /* 17F obp breakpoint */ 1283 /* reserved */ 1284 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1285 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1286 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1287 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1288 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1289 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1290 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1291 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1292 .size trap_table0, (.-trap_table0) 1293 trap_table1: 1294 NOT4; NOT4; /* 000 - 007 unused */ 1295 NOT; /* 008 instruction access exception */ 1296 ITSB_MISS; /* 009 instruction access MMU miss */ 1297 NOT; /* 00A reserved */ 1298 NOT; NOT4; /* 00B - 00F unused */ 1299 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */ 1300 NOT4; /* 020 - 023 unused */ 1301 CLEAN_WINDOW; /* 024 - 027 clean window */ 1302 NOT4; NOT4; /* 028 - 02F unused */ 1303 DMMU_EXCEPTION_TL1; /* 030 data access exception */ 1304 DTSB_MISS; /* 031 data access MMU miss */ 1305 NOT; /* 032 reserved */ 1306 NOT; /* 033 unused */ 1307 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */ 1308 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */ 1309 NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */ 1310 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */ 1311 NOT; /* 060 unused */ 1312 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */ 1313 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */ 1314 NOT; /* 063 reserved */ 1315 ITLB_MISS(tt1); /* 064 instruction access MMU miss */ 1316 DTLB_MISS(tt1); /* 068 data access MMU miss */ 1317 DTLB_PROT; /* 06C data access protection */ 1318 NOT; /* 070 reserved */ 1319 NOT; /* 071 reserved */ 1320 NOT; /* 072 reserved */ 1321 NOT; /* 073 reserved */ 1322 NOT4; NOT4; /* 074 - 07B reserved */ 1323 NOT; /* 07C reserved */ 1324 NOT; /* 07D reserved */ 1325 NOT; /* 07E resumable error */ 1326 GOTO_TT(nonresumable_error, trace_gen); /* 07F nonresumable error */ 1327 NOTP4; /* 080 spill 0 normal */ 1328 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */ 1329 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */ 1330 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */ 1331 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */ 1332 NOTP4; /* 094 spill 5 normal */ 1333 SPILL_64bit_ktt1(sk); /* 098 spill 6 normal */ 1334 SPILL_mixed_ktt1(sk); /* 09C spill 7 normal */ 1335 NOTP4; /* 0A0 spill 0 other */ 1336 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */ 1337 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */ 1338 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */ 1339 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */ 1340 NOTP4; /* 0B4 spill 5 other */ 1341 NOTP4; /* 0B8 spill 6 other */ 1342 NOTP4; /* 0BC spill 7 other */ 1343 NOT4; /* 0C0 fill 0 normal */ 1344 NOT4; /* 0C4 fill 1 normal */ 1345 NOT4; /* 0C8 fill 2 normal */ 1346 NOT4; /* 0CC fill 3 normal */ 1347 NOT4; /* 0D0 fill 4 normal */ 1348 NOT4; /* 0D4 fill 5 normal */ 1349 NOT4; /* 0D8 fill 6 normal */ 1350 NOT4; /* 0DC fill 7 normal */ 1351 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */ 1352 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */ 1353 /* 1354 * Code running at TL>0 does not use soft traps, so 1355 * we can truncate the table here. 1356 * However: 1357 * sun4v uses (hypervisor) ta instructions at TL > 0, so 1358 * provide a safety net for now. 1359 */ 1360 /* soft traps */ 1361 BAD4; BAD4; BAD4; BAD4; /* 100 - 10F unused */ 1362 BAD4; BAD4; BAD4; BAD4; /* 110 - 11F unused */ 1363 BAD4; BAD4; BAD4; BAD4; /* 120 - 12F unused */ 1364 BAD4; BAD4; BAD4; BAD4; /* 130 - 13F unused */ 1365 BAD4; BAD4; BAD4; BAD4; /* 140 - 14F unused */ 1366 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */ 1367 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */ 1368 BAD4; BAD4; BAD4; BAD4; /* 170 - 17F unused */ 1369 /* reserved */ 1370 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */ 1371 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */ 1372 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */ 1373 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */ 1374 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */ 1375 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */ 1376 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */ 1377 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */ 1378 etrap_table: 1379 .size trap_table1, (.-trap_table1) 1380 .size trap_table, (.-trap_table) 1381 .size scb, (.-scb) 1382 1383 /* 1384 * We get to exec_fault in the case of an instruction miss and tte 1385 * has no execute bit set. We go to tl0 to handle it. 1386 * 1387 * g1 = tsbe pointer (in/clobbered) 1388 * g2 = tag access register (in) 1389 * g3 - g4 = scratch (clobbered) 1390 * g5 = tsbe data (in) 1391 * g6 = scratch (clobbered) 1392 * g7 = pc we jumped here from (in) 1393 */ 1394 /* 1395 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type" 1396 * (0=kernel, 1=invalid, or 2=user) rather than context ID) 1397 */ 1398 ALTENTRY(exec_fault) 1399 TRACE_TSBHIT(TT_MMU_EXEC) 1400 MMU_FAULT_STATUS_AREA(%g4) 1401 ldx [%g4 + MMFSA_I_ADDR], %g2 /* g2 = address */ 1402 ldx [%g4 + MMFSA_I_CTX], %g3 /* g3 = ctx */ 1403 srlx %g2, MMU_PAGESHIFT, %g2 ! align address to page boundry 1404 cmp %g3, USER_CONTEXT_TYPE 1405 sllx %g2, MMU_PAGESHIFT, %g2 1406 movgu %icc, USER_CONTEXT_TYPE, %g3 1407 or %g2, %g3, %g2 /* TAG_ACCESS */ 1408 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype 1409 set trap, %g1 1410 ba,pt %xcc, sys_trap 1411 mov -1, %g4 1412 1413 .mmu_exception_not_aligned: 1414 /* %g2 = sfar, %g3 = sfsr */ 1415 rdpr %tstate, %g1 1416 btst TSTATE_PRIV, %g1 1417 bnz,pn %icc, 2f 1418 nop 1419 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1420 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1421 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1422 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1423 brz,pt %g5, 2f 1424 nop 1425 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap? 1426 brz,pn %g5, 2f 1427 nop 1428 btst 1, %sp 1429 bz,pt %xcc, 1f ! 32 bit user program 1430 nop 1431 ba,pt %xcc, .setup_v9utrap ! 64 bit user program 1432 nop 1433 1: 1434 ba,pt %xcc, .setup_utrap 1435 or %g2, %g0, %g7 1436 2: 1437 ba,pt %xcc, .mmu_exception_end 1438 mov T_ALIGNMENT, %g1 1439 1440 .mmu_priv_exception: 1441 rdpr %tstate, %g1 1442 btst TSTATE_PRIV, %g1 1443 bnz,pn %icc, 1f 1444 nop 1445 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1446 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1447 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1448 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1449 brz,pt %g5, 1f 1450 nop 1451 ldn [%g5 + P_UTRAP16], %g5 1452 brnz,pt %g5, .setup_v9utrap 1453 nop 1454 1: 1455 mov T_PRIV_INSTR, %g1 1456 1457 .mmu_exception_end: 1458 CPU_INDEX(%g4, %g5) 1459 set cpu_core, %g5 1460 sllx %g4, CPU_CORE_SHIFT, %g4 1461 add %g4, %g5, %g4 1462 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5 1463 andcc %g5, CPU_DTRACE_NOFAULT, %g0 1464 bz 1f 1465 or %g5, CPU_DTRACE_BADADDR, %g5 1466 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS] 1467 done 1468 1469 1: 1470 sllx %g3, 32, %g3 1471 or %g3, %g1, %g3 1472 set trap, %g1 1473 ba,pt %xcc, sys_trap 1474 sub %g0, 1, %g4 1475 1476 .fp_disabled: 1477 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1478 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1479 rdpr %tstate, %g4 1480 btst TSTATE_PRIV, %g4 1481 bnz,a,pn %icc, ptl1_panic 1482 mov PTL1_BAD_FPTRAP, %g1 1483 1484 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1485 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1486 brz,a,pt %g5, 2f 1487 nop 1488 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap? 1489 brz,a,pn %g5, 2f 1490 nop 1491 btst 1, %sp 1492 bz,a,pt %xcc, 1f ! 32 bit user program 1493 nop 1494 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program 1495 nop 1496 1: 1497 ba,pt %xcc, .setup_utrap 1498 or %g0, %g0, %g7 1499 2: 1500 set fp_disabled, %g1 1501 ba,pt %xcc, sys_trap 1502 sub %g0, 1, %g4 1503 1504 .fp_ieee_exception: 1505 rdpr %tstate, %g1 1506 btst TSTATE_PRIV, %g1 1507 bnz,a,pn %icc, ptl1_panic 1508 mov PTL1_BAD_FPTRAP, %g1 1509 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1510 stx %fsr, [%g1 + CPU_TMP1] 1511 ldx [%g1 + CPU_TMP1], %g2 1512 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1513 ldn [%g1 + T_PROCP], %g1 ! load proc pointer 1514 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps? 1515 brz,a,pt %g5, 1f 1516 nop 1517 ldn [%g5 + P_UTRAP8], %g5 1518 brnz,a,pt %g5, .setup_v9utrap 1519 nop 1520 1: 1521 set _fp_ieee_exception, %g1 1522 ba,pt %xcc, sys_trap 1523 sub %g0, 1, %g4 1524 1525 /* 1526 * Register Inputs: 1527 * %g5 user trap handler 1528 * %g7 misaligned addr - for alignment traps only 1529 */ 1530 .setup_utrap: 1531 set trap, %g1 ! setup in case we go 1532 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1533 sub %g0, 1, %g4 ! the save instruction below 1534 1535 /* 1536 * If the DTrace pid provider is single stepping a copied-out 1537 * instruction, t->t_dtrace_step will be set. In that case we need 1538 * to abort the single-stepping (since execution of the instruction 1539 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1540 */ 1541 save %sp, -SA(MINFRAME32), %sp ! window for trap handler 1542 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1543 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1544 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1545 rdpr %tnpc, %l2 ! arg1 == tnpc 1546 brz,pt %g2, 1f 1547 rdpr %tpc, %l1 ! arg0 == tpc 1548 1549 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1550 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step) 1551 brz,pt %g2, 1f 1552 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1553 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 1554 1: 1555 mov %g7, %l3 ! arg2 == misaligned address 1556 1557 rdpr %tstate, %g1 ! cwp for trap handler 1558 rdpr %cwp, %g4 1559 bclr TSTATE_CWP_MASK, %g1 1560 wrpr %g1, %g4, %tstate 1561 wrpr %g0, %g5, %tnpc ! trap handler address 1562 FAST_TRAP_DONE 1563 /* NOTREACHED */ 1564 1565 .check_v9utrap: 1566 rdpr %tstate, %g1 1567 btst TSTATE_PRIV, %g1 1568 bnz,a,pn %icc, 3f 1569 nop 1570 CPU_ADDR(%g4, %g1) ! load CPU struct addr 1571 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer 1572 ldn [%g5 + T_PROCP], %g5 ! load proc pointer 1573 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps? 1574 1575 cmp %g3, T_SOFTWARE_TRAP 1576 bne,a,pt %icc, 1f 1577 nop 1578 1579 brz,pt %g5, 3f ! if p_utraps == NULL goto trap() 1580 rdpr %tt, %g3 ! delay - get actual hw trap type 1581 1582 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18] 1583 ba,pt %icc, 2f 1584 smul %g1, CPTRSIZE, %g2 1585 1: 1586 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap() 1587 nop 1588 1589 cmp %g3, T_UNIMP_INSTR 1590 bne,a,pt %icc, 2f 1591 nop 1592 1593 mov 1, %g1 1594 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR 1595 rdpr %tpc, %g1 ! ld trapping instruction using 1596 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault 1597 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR 1598 1599 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction 1600 andcc %g1, %g4, %g4 ! and instruction with mask 1601 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP 1602 nop ! fall thru to setup 1603 2: 1604 ldn [%g5 + %g2], %g5 1605 brnz,a,pt %g5, .setup_v9utrap 1606 nop 1607 3: 1608 set trap, %g1 1609 ba,pt %xcc, sys_trap 1610 sub %g0, 1, %g4 1611 /* NOTREACHED */ 1612 1613 /* 1614 * Register Inputs: 1615 * %g5 user trap handler 1616 */ 1617 .setup_v9utrap: 1618 set trap, %g1 ! setup in case we go 1619 mov T_FLUSH_PCB, %g3 ! through sys_trap on 1620 sub %g0, 1, %g4 ! the save instruction below 1621 1622 /* 1623 * If the DTrace pid provider is single stepping a copied-out 1624 * instruction, t->t_dtrace_step will be set. In that case we need 1625 * to abort the single-stepping (since execution of the instruction 1626 * was interrupted) and use the value of t->t_dtrace_npc as the %npc. 1627 */ 1628 save %sp, -SA(MINFRAME64), %sp ! window for trap handler 1629 CPU_ADDR(%g1, %g4) ! load CPU struct addr 1630 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1631 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step 1632 rdpr %tnpc, %l7 ! arg1 == tnpc 1633 brz,pt %g2, 1f 1634 rdpr %tpc, %l6 ! arg0 == tpc 1635 1636 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast 1637 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step) 1638 brz,pt %g2, 1f 1639 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags 1640 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast 1641 1: 1642 rdpr %tstate, %g2 ! cwp for trap handler 1643 rdpr %cwp, %g4 1644 bclr TSTATE_CWP_MASK, %g2 1645 wrpr %g2, %g4, %tstate 1646 1647 ldn [%g1 + T_PROCP], %g4 ! load proc pointer 1648 ldn [%g4 + P_AS], %g4 ! load as pointer 1649 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit 1650 cmp %l7, %g4 ! check for single-step set 1651 bne,pt %xcc, 4f 1652 nop 1653 ldn [%g1 + T_LWP], %g1 ! load klwp pointer 1654 ld [%g1 + PCB_STEP], %g4 ! load single-step flag 1655 cmp %g4, STEP_ACTIVE ! step flags set in pcb? 1656 bne,pt %icc, 4f 1657 nop 1658 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb 1659 mov %l7, %g4 ! on entry to precise user trap 1660 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc 1661 ! at time of trap 1662 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS, 1663 ! %g4 == userlimit 1664 FAST_TRAP_DONE 1665 /* NOTREACHED */ 1666 4: 1667 wrpr %g0, %g5, %tnpc ! trap handler address 1668 FAST_TRAP_DONE_CHK_INTR 1669 /* NOTREACHED */ 1670 1671 .fp_exception: 1672 CPU_ADDR(%g1, %g4) 1673 stx %fsr, [%g1 + CPU_TMP1] 1674 ldx [%g1 + CPU_TMP1], %g2 1675 1676 /* 1677 * Cheetah takes unfinished_FPop trap for certain range of operands 1678 * to the "fitos" instruction. Instead of going through the slow 1679 * software emulation path, we try to simulate the "fitos" instruction 1680 * via "fitod" and "fdtos" provided the following conditions are met: 1681 * 1682 * fpu_exists is set (if DEBUG) 1683 * not in privileged mode 1684 * ftt is unfinished_FPop 1685 * NXM IEEE trap is not enabled 1686 * instruction at %tpc is "fitos" 1687 * 1688 * Usage: 1689 * %g1 per cpu address 1690 * %g2 %fsr 1691 * %g6 user instruction 1692 * 1693 * Note that we can take a memory access related trap while trying 1694 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR 1695 * flag to catch those traps and let the SFMMU code deal with page 1696 * fault and data access exception. 1697 */ 1698 #if defined(DEBUG) || defined(NEED_FPU_EXISTS) 1699 sethi %hi(fpu_exists), %g7 1700 ld [%g7 + %lo(fpu_exists)], %g7 1701 brz,pn %g7, .fp_exception_cont 1702 nop 1703 #endif 1704 rdpr %tstate, %g7 ! branch if in privileged mode 1705 btst TSTATE_PRIV, %g7 1706 bnz,pn %xcc, .fp_exception_cont 1707 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr 1708 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7 1709 cmp %g7, FTT_UNFIN 1710 set FSR_TEM_NX, %g5 1711 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop 1712 andcc %g2, %g5, %g0 1713 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled 1714 rdpr %tpc, %g5 ! get faulting PC 1715 1716 or %g0, 1, %g7 1717 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 1718 lda [%g5]ASI_USER, %g6 ! get user's instruction 1719 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 1720 1721 set FITOS_INSTR_MASK, %g7 1722 and %g6, %g7, %g7 1723 set FITOS_INSTR, %g5 1724 cmp %g7, %g5 1725 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR 1726 nop 1727 1728 /* 1729 * This is unfinished FPops trap for "fitos" instruction. We 1730 * need to simulate "fitos" via "fitod" and "fdtos" instruction 1731 * sequence. 1732 * 1733 * We need a temporary FP register to do the conversion. Since 1734 * both source and destination operands for the "fitos" instruction 1735 * have to be within %f0-%f31, we use an FP register from the upper 1736 * half to guarantee that it won't collide with the source or the 1737 * dest operand. However, we do have to save and restore its value. 1738 * 1739 * We use %d62 as a temporary FP register for the conversion and 1740 * branch to appropriate instruction within the conversion tables 1741 * based upon the rs2 and rd values. 1742 */ 1743 1744 std %d62, [%g1 + CPU_TMP1] ! save original value 1745 1746 srl %g6, FITOS_RS2_SHIFT, %g7 1747 and %g7, FITOS_REG_MASK, %g7 1748 set _fitos_fitod_table, %g4 1749 sllx %g7, 2, %g7 1750 jmp %g4 + %g7 1751 ba,pt %xcc, _fitos_fitod_done 1752 .empty 1753 1754 _fitos_fitod_table: 1755 fitod %f0, %d62 1756 fitod %f1, %d62 1757 fitod %f2, %d62 1758 fitod %f3, %d62 1759 fitod %f4, %d62 1760 fitod %f5, %d62 1761 fitod %f6, %d62 1762 fitod %f7, %d62 1763 fitod %f8, %d62 1764 fitod %f9, %d62 1765 fitod %f10, %d62 1766 fitod %f11, %d62 1767 fitod %f12, %d62 1768 fitod %f13, %d62 1769 fitod %f14, %d62 1770 fitod %f15, %d62 1771 fitod %f16, %d62 1772 fitod %f17, %d62 1773 fitod %f18, %d62 1774 fitod %f19, %d62 1775 fitod %f20, %d62 1776 fitod %f21, %d62 1777 fitod %f22, %d62 1778 fitod %f23, %d62 1779 fitod %f24, %d62 1780 fitod %f25, %d62 1781 fitod %f26, %d62 1782 fitod %f27, %d62 1783 fitod %f28, %d62 1784 fitod %f29, %d62 1785 fitod %f30, %d62 1786 fitod %f31, %d62 1787 _fitos_fitod_done: 1788 1789 /* 1790 * Now convert data back into single precision 1791 */ 1792 srl %g6, FITOS_RD_SHIFT, %g7 1793 and %g7, FITOS_REG_MASK, %g7 1794 set _fitos_fdtos_table, %g4 1795 sllx %g7, 2, %g7 1796 jmp %g4 + %g7 1797 ba,pt %xcc, _fitos_fdtos_done 1798 .empty 1799 1800 _fitos_fdtos_table: 1801 fdtos %d62, %f0 1802 fdtos %d62, %f1 1803 fdtos %d62, %f2 1804 fdtos %d62, %f3 1805 fdtos %d62, %f4 1806 fdtos %d62, %f5 1807 fdtos %d62, %f6 1808 fdtos %d62, %f7 1809 fdtos %d62, %f8 1810 fdtos %d62, %f9 1811 fdtos %d62, %f10 1812 fdtos %d62, %f11 1813 fdtos %d62, %f12 1814 fdtos %d62, %f13 1815 fdtos %d62, %f14 1816 fdtos %d62, %f15 1817 fdtos %d62, %f16 1818 fdtos %d62, %f17 1819 fdtos %d62, %f18 1820 fdtos %d62, %f19 1821 fdtos %d62, %f20 1822 fdtos %d62, %f21 1823 fdtos %d62, %f22 1824 fdtos %d62, %f23 1825 fdtos %d62, %f24 1826 fdtos %d62, %f25 1827 fdtos %d62, %f26 1828 fdtos %d62, %f27 1829 fdtos %d62, %f28 1830 fdtos %d62, %f29 1831 fdtos %d62, %f30 1832 fdtos %d62, %f31 1833 _fitos_fdtos_done: 1834 1835 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62 1836 1837 #if DEBUG 1838 /* 1839 * Update FPop_unfinished trap kstat 1840 */ 1841 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7 1842 ldx [%g7], %g5 1843 1: 1844 add %g5, 1, %g6 1845 1846 casxa [%g7] ASI_N, %g5, %g6 1847 cmp %g5, %g6 1848 bne,a,pn %xcc, 1b 1849 or %g0, %g6, %g5 1850 1851 /* 1852 * Update fpu_sim_fitos kstat 1853 */ 1854 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7 1855 ldx [%g7], %g5 1856 1: 1857 add %g5, 1, %g6 1858 1859 casxa [%g7] ASI_N, %g5, %g6 1860 cmp %g5, %g6 1861 bne,a,pn %xcc, 1b 1862 or %g0, %g6, %g5 1863 #endif /* DEBUG */ 1864 1865 FAST_TRAP_DONE 1866 1867 .fp_exception_cont: 1868 /* 1869 * Let _fp_exception deal with simulating FPop instruction. 1870 * Note that we need to pass %fsr in %g2 (already read above). 1871 */ 1872 1873 set _fp_exception, %g1 1874 ba,pt %xcc, sys_trap 1875 sub %g0, 1, %g4 1876 1877 1878 /* 1879 * Register windows 1880 */ 1881 .flushw: 1882 .clean_windows: 1883 rdpr %tnpc, %g1 1884 wrpr %g1, %tpc 1885 add %g1, 4, %g1 1886 wrpr %g1, %tnpc 1887 set trap, %g1 1888 mov T_FLUSH_PCB, %g3 1889 ba,pt %xcc, sys_trap 1890 sub %g0, 1, %g4 1891 1892 /* 1893 * .spill_clean: clean the previous window, restore the wstate, and 1894 * "done". 1895 * 1896 * Entry: %g7 contains new wstate 1897 */ 1898 .spill_clean: 1899 sethi %hi(nwin_minus_one), %g5 1900 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1 1901 rdpr %cwp, %g6 ! %g6 = %cwp 1902 deccc %g6 ! %g6-- 1903 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1 1904 wrpr %g6, %cwp 1905 TT_TRACE_L(trace_win) 1906 clr %l0 1907 clr %l1 1908 clr %l2 1909 clr %l3 1910 clr %l4 1911 clr %l5 1912 clr %l6 1913 clr %l7 1914 wrpr %g0, %g7, %wstate 1915 saved 1916 retry ! restores correct %cwp 1917 1918 .fix_alignment: 1919 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 1920 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer 1921 ldn [%g1 + T_PROCP], %g1 1922 mov 1, %g2 1923 stb %g2, [%g1 + P_FIXALIGNMENT] 1924 FAST_TRAP_DONE 1925 1926 #define STDF_REG(REG, ADDR, TMP) \ 1927 sll REG, 3, REG ;\ 1928 mark1: set start1, TMP ;\ 1929 jmp REG + TMP ;\ 1930 nop ;\ 1931 start1: ba,pt %xcc, done1 ;\ 1932 std %f0, [ADDR + CPU_TMP1] ;\ 1933 ba,pt %xcc, done1 ;\ 1934 std %f32, [ADDR + CPU_TMP1] ;\ 1935 ba,pt %xcc, done1 ;\ 1936 std %f2, [ADDR + CPU_TMP1] ;\ 1937 ba,pt %xcc, done1 ;\ 1938 std %f34, [ADDR + CPU_TMP1] ;\ 1939 ba,pt %xcc, done1 ;\ 1940 std %f4, [ADDR + CPU_TMP1] ;\ 1941 ba,pt %xcc, done1 ;\ 1942 std %f36, [ADDR + CPU_TMP1] ;\ 1943 ba,pt %xcc, done1 ;\ 1944 std %f6, [ADDR + CPU_TMP1] ;\ 1945 ba,pt %xcc, done1 ;\ 1946 std %f38, [ADDR + CPU_TMP1] ;\ 1947 ba,pt %xcc, done1 ;\ 1948 std %f8, [ADDR + CPU_TMP1] ;\ 1949 ba,pt %xcc, done1 ;\ 1950 std %f40, [ADDR + CPU_TMP1] ;\ 1951 ba,pt %xcc, done1 ;\ 1952 std %f10, [ADDR + CPU_TMP1] ;\ 1953 ba,pt %xcc, done1 ;\ 1954 std %f42, [ADDR + CPU_TMP1] ;\ 1955 ba,pt %xcc, done1 ;\ 1956 std %f12, [ADDR + CPU_TMP1] ;\ 1957 ba,pt %xcc, done1 ;\ 1958 std %f44, [ADDR + CPU_TMP1] ;\ 1959 ba,pt %xcc, done1 ;\ 1960 std %f14, [ADDR + CPU_TMP1] ;\ 1961 ba,pt %xcc, done1 ;\ 1962 std %f46, [ADDR + CPU_TMP1] ;\ 1963 ba,pt %xcc, done1 ;\ 1964 std %f16, [ADDR + CPU_TMP1] ;\ 1965 ba,pt %xcc, done1 ;\ 1966 std %f48, [ADDR + CPU_TMP1] ;\ 1967 ba,pt %xcc, done1 ;\ 1968 std %f18, [ADDR + CPU_TMP1] ;\ 1969 ba,pt %xcc, done1 ;\ 1970 std %f50, [ADDR + CPU_TMP1] ;\ 1971 ba,pt %xcc, done1 ;\ 1972 std %f20, [ADDR + CPU_TMP1] ;\ 1973 ba,pt %xcc, done1 ;\ 1974 std %f52, [ADDR + CPU_TMP1] ;\ 1975 ba,pt %xcc, done1 ;\ 1976 std %f22, [ADDR + CPU_TMP1] ;\ 1977 ba,pt %xcc, done1 ;\ 1978 std %f54, [ADDR + CPU_TMP1] ;\ 1979 ba,pt %xcc, done1 ;\ 1980 std %f24, [ADDR + CPU_TMP1] ;\ 1981 ba,pt %xcc, done1 ;\ 1982 std %f56, [ADDR + CPU_TMP1] ;\ 1983 ba,pt %xcc, done1 ;\ 1984 std %f26, [ADDR + CPU_TMP1] ;\ 1985 ba,pt %xcc, done1 ;\ 1986 std %f58, [ADDR + CPU_TMP1] ;\ 1987 ba,pt %xcc, done1 ;\ 1988 std %f28, [ADDR + CPU_TMP1] ;\ 1989 ba,pt %xcc, done1 ;\ 1990 std %f60, [ADDR + CPU_TMP1] ;\ 1991 ba,pt %xcc, done1 ;\ 1992 std %f30, [ADDR + CPU_TMP1] ;\ 1993 ba,pt %xcc, done1 ;\ 1994 std %f62, [ADDR + CPU_TMP1] ;\ 1995 done1: 1996 1997 #define LDDF_REG(REG, ADDR, TMP) \ 1998 sll REG, 3, REG ;\ 1999 mark2: set start2, TMP ;\ 2000 jmp REG + TMP ;\ 2001 nop ;\ 2002 start2: ba,pt %xcc, done2 ;\ 2003 ldd [ADDR + CPU_TMP1], %f0 ;\ 2004 ba,pt %xcc, done2 ;\ 2005 ldd [ADDR + CPU_TMP1], %f32 ;\ 2006 ba,pt %xcc, done2 ;\ 2007 ldd [ADDR + CPU_TMP1], %f2 ;\ 2008 ba,pt %xcc, done2 ;\ 2009 ldd [ADDR + CPU_TMP1], %f34 ;\ 2010 ba,pt %xcc, done2 ;\ 2011 ldd [ADDR + CPU_TMP1], %f4 ;\ 2012 ba,pt %xcc, done2 ;\ 2013 ldd [ADDR + CPU_TMP1], %f36 ;\ 2014 ba,pt %xcc, done2 ;\ 2015 ldd [ADDR + CPU_TMP1], %f6 ;\ 2016 ba,pt %xcc, done2 ;\ 2017 ldd [ADDR + CPU_TMP1], %f38 ;\ 2018 ba,pt %xcc, done2 ;\ 2019 ldd [ADDR + CPU_TMP1], %f8 ;\ 2020 ba,pt %xcc, done2 ;\ 2021 ldd [ADDR + CPU_TMP1], %f40 ;\ 2022 ba,pt %xcc, done2 ;\ 2023 ldd [ADDR + CPU_TMP1], %f10 ;\ 2024 ba,pt %xcc, done2 ;\ 2025 ldd [ADDR + CPU_TMP1], %f42 ;\ 2026 ba,pt %xcc, done2 ;\ 2027 ldd [ADDR + CPU_TMP1], %f12 ;\ 2028 ba,pt %xcc, done2 ;\ 2029 ldd [ADDR + CPU_TMP1], %f44 ;\ 2030 ba,pt %xcc, done2 ;\ 2031 ldd [ADDR + CPU_TMP1], %f14 ;\ 2032 ba,pt %xcc, done2 ;\ 2033 ldd [ADDR + CPU_TMP1], %f46 ;\ 2034 ba,pt %xcc, done2 ;\ 2035 ldd [ADDR + CPU_TMP1], %f16 ;\ 2036 ba,pt %xcc, done2 ;\ 2037 ldd [ADDR + CPU_TMP1], %f48 ;\ 2038 ba,pt %xcc, done2 ;\ 2039 ldd [ADDR + CPU_TMP1], %f18 ;\ 2040 ba,pt %xcc, done2 ;\ 2041 ldd [ADDR + CPU_TMP1], %f50 ;\ 2042 ba,pt %xcc, done2 ;\ 2043 ldd [ADDR + CPU_TMP1], %f20 ;\ 2044 ba,pt %xcc, done2 ;\ 2045 ldd [ADDR + CPU_TMP1], %f52 ;\ 2046 ba,pt %xcc, done2 ;\ 2047 ldd [ADDR + CPU_TMP1], %f22 ;\ 2048 ba,pt %xcc, done2 ;\ 2049 ldd [ADDR + CPU_TMP1], %f54 ;\ 2050 ba,pt %xcc, done2 ;\ 2051 ldd [ADDR + CPU_TMP1], %f24 ;\ 2052 ba,pt %xcc, done2 ;\ 2053 ldd [ADDR + CPU_TMP1], %f56 ;\ 2054 ba,pt %xcc, done2 ;\ 2055 ldd [ADDR + CPU_TMP1], %f26 ;\ 2056 ba,pt %xcc, done2 ;\ 2057 ldd [ADDR + CPU_TMP1], %f58 ;\ 2058 ba,pt %xcc, done2 ;\ 2059 ldd [ADDR + CPU_TMP1], %f28 ;\ 2060 ba,pt %xcc, done2 ;\ 2061 ldd [ADDR + CPU_TMP1], %f60 ;\ 2062 ba,pt %xcc, done2 ;\ 2063 ldd [ADDR + CPU_TMP1], %f30 ;\ 2064 ba,pt %xcc, done2 ;\ 2065 ldd [ADDR + CPU_TMP1], %f62 ;\ 2066 done2: 2067 2068 .lddf_exception_not_aligned: 2069 /* %g2 = sfar, %g3 = sfsr */ 2070 mov %g2, %g5 ! stash sfar 2071 #if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2072 sethi %hi(fpu_exists), %g2 ! check fpu_exists 2073 ld [%g2 + %lo(fpu_exists)], %g2 2074 brz,a,pn %g2, 4f 2075 nop 2076 #endif 2077 CPU_ADDR(%g1, %g4) 2078 or %g0, 1, %g4 2079 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2080 2081 rdpr %tpc, %g2 2082 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction 2083 srl %g6, 23, %g1 ! using ldda or not? 2084 and %g1, 1, %g1 2085 brz,a,pt %g1, 2f ! check for ldda instruction 2086 nop 2087 srl %g6, 13, %g1 ! check immflag 2088 and %g1, 1, %g1 2089 rdpr %tstate, %g2 ! %tstate in %g2 2090 brnz,a,pn %g1, 1f 2091 srl %g2, 31, %g1 ! get asi from %tstate 2092 srl %g6, 5, %g1 ! get asi from instruction 2093 and %g1, 0xFF, %g1 ! imm_asi field 2094 1: 2095 cmp %g1, ASI_P ! primary address space 2096 be,a,pt %icc, 2f 2097 nop 2098 cmp %g1, ASI_PNF ! primary no fault address space 2099 be,a,pt %icc, 2f 2100 nop 2101 cmp %g1, ASI_S ! secondary address space 2102 be,a,pt %icc, 2f 2103 nop 2104 cmp %g1, ASI_SNF ! secondary no fault address space 2105 bne,a,pn %icc, 3f 2106 nop 2107 2: 2108 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data 2109 add %g5, 4, %g5 ! increment misaligned data address 2110 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data 2111 2112 sllx %g7, 32, %g7 2113 or %g5, %g7, %g5 ! combine data 2114 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis 2115 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1 2116 2117 srl %g6, 25, %g3 ! %g6 has the instruction 2118 and %g3, 0x1F, %g3 ! %g3 has rd 2119 LDDF_REG(%g3, %g7, %g4) 2120 2121 CPU_ADDR(%g1, %g4) 2122 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2123 FAST_TRAP_DONE 2124 3: 2125 CPU_ADDR(%g1, %g4) 2126 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2127 4: 2128 set T_USER, %g3 ! trap type in %g3 2129 or %g3, T_LDDF_ALIGN, %g3 2130 mov %g5, %g2 ! misaligned vaddr in %g2 2131 set fpu_trap, %g1 ! goto C for the little and 2132 ba,pt %xcc, sys_trap ! no fault little asi's 2133 sub %g0, 1, %g4 2134 2135 .stdf_exception_not_aligned: 2136 /* %g2 = sfar, %g3 = sfsr */ 2137 mov %g2, %g5 2138 2139 #if defined(DEBUG) || defined(NEED_FPU_EXISTS) 2140 sethi %hi(fpu_exists), %g7 ! check fpu_exists 2141 ld [%g7 + %lo(fpu_exists)], %g3 2142 brz,a,pn %g3, 4f 2143 nop 2144 #endif 2145 CPU_ADDR(%g1, %g4) 2146 or %g0, 1, %g4 2147 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag 2148 2149 rdpr %tpc, %g2 2150 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction 2151 2152 srl %g6, 23, %g1 ! using stda or not? 2153 and %g1, 1, %g1 2154 brz,a,pt %g1, 2f ! check for stda instruction 2155 nop 2156 srl %g6, 13, %g1 ! check immflag 2157 and %g1, 1, %g1 2158 rdpr %tstate, %g2 ! %tstate in %g2 2159 brnz,a,pn %g1, 1f 2160 srl %g2, 31, %g1 ! get asi from %tstate 2161 srl %g6, 5, %g1 ! get asi from instruction 2162 and %g1, 0xff, %g1 ! imm_asi field 2163 1: 2164 cmp %g1, ASI_P ! primary address space 2165 be,a,pt %icc, 2f 2166 nop 2167 cmp %g1, ASI_S ! secondary address space 2168 bne,a,pn %icc, 3f 2169 nop 2170 2: 2171 srl %g6, 25, %g6 2172 and %g6, 0x1F, %g6 ! %g6 has rd 2173 CPU_ADDR(%g7, %g1) 2174 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP) 2175 2176 ldx [%g7 + CPU_TMP1], %g6 2177 srlx %g6, 32, %g7 2178 stuwa %g7, [%g5]ASI_USER ! first half 2179 add %g5, 4, %g5 ! increment misaligned data address 2180 stuwa %g6, [%g5]ASI_USER ! second half 2181 2182 CPU_ADDR(%g1, %g4) 2183 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2184 FAST_TRAP_DONE 2185 3: 2186 CPU_ADDR(%g1, %g4) 2187 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag 2188 4: 2189 set T_USER, %g3 ! trap type in %g3 2190 or %g3, T_STDF_ALIGN, %g3 2191 mov %g5, %g2 ! misaligned vaddr in %g2 2192 set fpu_trap, %g1 ! goto C for the little and 2193 ba,pt %xcc, sys_trap ! nofault little asi's 2194 sub %g0, 1, %g4 2195 2196 #ifdef DEBUG_USER_TRAPTRACECTL 2197 2198 .traptrace_freeze: 2199 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2200 TT_TRACE_L(trace_win) 2201 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2202 set trap_freeze, %g1 2203 mov 1, %g2 2204 st %g2, [%g1] 2205 FAST_TRAP_DONE 2206 2207 .traptrace_unfreeze: 2208 set trap_freeze, %g1 2209 st %g0, [%g1] 2210 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4 2211 TT_TRACE_L(trace_win) 2212 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0 2213 FAST_TRAP_DONE 2214 2215 #endif /* DEBUG_USER_TRAPTRACECTL */ 2216 2217 .getcc: 2218 CPU_ADDR(%g1, %g2) 2219 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2220 rdpr %tstate, %g3 ! get tstate 2221 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2222 set PSR_ICC, %g2 2223 and %o0, %g2, %o0 ! mask out the rest 2224 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify 2225 wrpr %g0, 0, %gl 2226 mov %o0, %g1 ! move ccr to normal %g1 2227 wrpr %g0, 1, %gl 2228 ! cannot assume globals retained their values after increasing %gl 2229 CPU_ADDR(%g1, %g2) 2230 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2231 FAST_TRAP_DONE 2232 2233 .setcc: 2234 CPU_ADDR(%g1, %g2) 2235 stx %o0, [%g1 + CPU_TMP1] ! save %o0 2236 wrpr %g0, 0, %gl 2237 mov %g1, %o0 2238 wrpr %g0, 1, %gl 2239 ! cannot assume globals retained their values after increasing %gl 2240 CPU_ADDR(%g1, %g2) 2241 sll %o0, PSR_ICC_SHIFT, %g2 2242 set PSR_ICC, %g3 2243 and %g2, %g3, %g2 ! mask out rest 2244 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2 2245 rdpr %tstate, %g3 ! get tstate 2246 srl %g3, 0, %g3 ! clear upper word 2247 or %g3, %g2, %g3 ! or in new bits 2248 wrpr %g3, %tstate 2249 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0 2250 FAST_TRAP_DONE 2251 2252 /* 2253 * getpsr(void) 2254 * Note that the xcc part of the ccr is not provided. 2255 * The V8 code shows why the V9 trap is not faster: 2256 * #define GETPSR_TRAP() \ 2257 * mov %psr, %i0; jmp %l2; rett %l2+4; nop; 2258 */ 2259 2260 .type .getpsr, #function 2261 .getpsr: 2262 rdpr %tstate, %g1 ! get tstate 2263 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr 2264 set PSR_ICC, %g2 2265 and %o0, %g2, %o0 ! mask out the rest 2266 2267 rd %fprs, %g1 ! get fprs 2268 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower 2269 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef 2270 or %o0, %g2, %o0 ! or result into psr.ef 2271 2272 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef 2273 or %o0, %g2, %o0 ! or psr.impl/ver 2274 FAST_TRAP_DONE 2275 SET_SIZE(.getpsr) 2276 2277 /* 2278 * setpsr(newpsr) 2279 * Note that there is no support for ccr.xcc in the V9 code. 2280 */ 2281 2282 .type .setpsr, #function 2283 .setpsr: 2284 rdpr %tstate, %g1 ! get tstate 2285 ! setx TSTATE_V8_UBITS, %g2 2286 or %g0, CCR_ICC, %g3 2287 sllx %g3, TSTATE_CCR_SHIFT, %g2 2288 2289 andn %g1, %g2, %g1 ! zero current user bits 2290 set PSR_ICC, %g2 2291 and %g2, %o0, %g2 ! clear all but psr.icc bits 2292 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc 2293 wrpr %g1, %g3, %tstate ! write tstate 2294 2295 set PSR_EF, %g2 2296 and %g2, %o0, %g2 ! clear all but fp enable bit 2297 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef 2298 wr %g0, %g4, %fprs ! write fprs 2299 2300 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 2301 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2302 ldn [%g2 + T_LWP], %g3 ! load klwp pointer 2303 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer 2304 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs 2305 srlx %g4, 2, %g4 ! shift fef value to bit 0 2306 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en 2307 FAST_TRAP_DONE 2308 SET_SIZE(.setpsr) 2309 2310 /* 2311 * getlgrp 2312 * get home lgrpid on which the calling thread is currently executing. 2313 */ 2314 .type .getlgrp, #function 2315 .getlgrp: 2316 ! Thanks for the incredibly helpful comments 2317 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2318 ld [%g1 + CPU_ID], %o0 ! load cpu_id 2319 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2320 ldn [%g2 + T_LPL], %g2 ! load lpl pointer 2321 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid 2322 sra %g1, 0, %o1 2323 FAST_TRAP_DONE 2324 SET_SIZE(.getlgrp) 2325 2326 /* 2327 * Entry for old 4.x trap (trap 0). 2328 */ 2329 ENTRY_NP(syscall_trap_4x) 2330 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2331 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer 2332 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2333 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr 2334 brz,pn %g2, 1f ! has it been set? 2335 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals 2336 st %l1, [%g1 + CPU_TMP2] 2337 rdpr %tnpc, %l1 ! save old tnpc 2338 wrpr %g0, %g2, %tnpc ! setup tnpc 2339 2340 mov %g1, %l0 ! save CPU struct addr 2341 wrpr %g0, 0, %gl 2342 mov %l1, %g6 ! pass tnpc to user code in %g6 2343 wrpr %g0, 1, %gl 2344 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2345 ld [%l0 + CPU_TMP1], %l0 2346 FAST_TRAP_DONE_CHK_INTR 2347 1: 2348 ! 2349 ! check for old syscall mmap which is the only different one which 2350 ! must be the same. Others are handled in the compatibility library. 2351 ! 2352 mov %g1, %l0 ! save CPU struct addr 2353 wrpr %g0, 0, %gl 2354 cmp %g1, OSYS_mmap ! compare to old 4.x mmap 2355 movz %icc, SYS_mmap, %g1 2356 wrpr %g0, 1, %gl 2357 ld [%l0 + CPU_TMP1], %l0 2358 SYSCALL(syscall_trap32) 2359 SET_SIZE(syscall_trap_4x) 2360 2361 /* 2362 * Handler for software trap 9. 2363 * Set trap0 emulation address for old 4.x system call trap. 2364 * XXX - this should be a system call. 2365 */ 2366 ENTRY_NP(set_trap0_addr) 2367 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2 2368 st %l0, [%g1 + CPU_TMP1] ! save some locals 2369 st %l1, [%g1 + CPU_TMP2] 2370 mov %g1, %l0 ! preserve CPU addr 2371 wrpr %g0, 0, %gl 2372 mov %g1, %l1 2373 wrpr %g0, 1, %gl 2374 ! cannot assume globals retained their values after increasing %gl 2375 ldn [%l0 + CPU_THREAD], %g2 ! load thread pointer 2376 ldn [%g2 + T_LWP], %g2 ! load klwp pointer 2377 andn %l1, 3, %l1 ! force alignment 2378 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr 2379 ld [%l0 + CPU_TMP2], %l1 ! restore locals 2380 ld [%l0 + CPU_TMP1], %l0 2381 FAST_TRAP_DONE 2382 SET_SIZE(set_trap0_addr) 2383 2384 /* 2385 * mmu_trap_tl1 2386 * trap handler for unexpected mmu traps. 2387 * simply checks if the trap was a user lddf/stdf alignment trap, in which 2388 * case we go to fpu_trap or a user trap from the window handler, in which 2389 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic. 2390 */ 2391 .type mmu_trap_tl1, #function 2392 mmu_trap_tl1: 2393 #ifdef TRAPTRACE 2394 TRACE_PTR(%g5, %g6) 2395 GET_TRACE_TICK(%g6, %g7) 2396 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2397 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2398 rdpr %tt, %g6 2399 stha %g6, [%g5 + TRAP_ENT_TT]%asi 2400 rdpr %tstate, %g6 2401 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi 2402 stna %sp, [%g5 + TRAP_ENT_SP]%asi 2403 stna %g0, [%g5 + TRAP_ENT_TR]%asi 2404 rdpr %tpc, %g6 2405 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2406 MMU_FAULT_STATUS_AREA(%g6) 2407 ldx [%g6 + MMFSA_D_ADDR], %g6 2408 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! MMU fault address 2409 CPU_PADDR(%g7, %g6); 2410 add %g7, CPU_TL1_HDLR, %g7 2411 lda [%g7]ASI_MEM, %g6 2412 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2413 MMU_FAULT_STATUS_AREA(%g6) 2414 ldx [%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant? 2415 ldx [%g6 + MMFSA_D_CTX], %g6 2416 sllx %g6, SFSR_CTX_SHIFT, %g6 2417 or %g6, %g7, %g6 2418 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type 2419 set 0xdeadbeef, %g6 2420 stna %g6, [%g5 + TRAP_ENT_F4]%asi 2421 TRACE_NEXT(%g5, %g6, %g7) 2422 #endif /* TRAPTRACE */ 2423 CPU_PADDR(%g7, %g6); 2424 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA) 2425 lda [%g7]ASI_MEM, %g6 2426 brz,a,pt %g6, 1f 2427 nop 2428 sta %g0, [%g7]ASI_MEM 2429 ! XXXQ need to setup registers for sfmmu_mmu_trap? 2430 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults 2431 1: 2432 rdpr %tpc, %g7 2433 /* in user_rtt? */ 2434 set rtt_fill_start, %g6 2435 cmp %g7, %g6 2436 blu,pn %xcc, 6f 2437 .empty 2438 set rtt_fill_end, %g6 2439 cmp %g7, %g6 2440 bgeu,pn %xcc, 6f 2441 nop 2442 set fault_rtt_fn1, %g7 2443 ba,a 7f 2444 6: 2445 ! check to see if the trap pc is in a window spill/fill handling 2446 rdpr %tpc, %g7 2447 /* tpc should be in the trap table */ 2448 set trap_table, %g6 2449 cmp %g7, %g6 2450 blu,a,pn %xcc, ptl1_panic 2451 mov PTL1_BAD_MMUTRAP, %g1 2452 set etrap_table, %g6 2453 cmp %g7, %g6 2454 bgeu,a,pn %xcc, ptl1_panic 2455 mov PTL1_BAD_MMUTRAP, %g1 2456 ! pc is inside the trap table, convert to trap type 2457 srl %g7, 5, %g6 ! XXXQ need #define 2458 and %g6, 0x1ff, %g6 ! XXXQ need #define 2459 ! and check for a window trap type 2460 and %g6, WTRAP_TTMASK, %g6 2461 cmp %g6, WTRAP_TYPE 2462 bne,a,pn %xcc, ptl1_panic 2463 mov PTL1_BAD_MMUTRAP, %g1 2464 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */ 2465 add %g7, WTRAP_FAULTOFF, %g7 2466 2467 7: 2468 ! Arguments are passed in the global set active after the 2469 ! 'done' instruction. Before switching sets, must save 2470 ! the calculated next pc 2471 wrpr %g0, %g7, %tnpc 2472 wrpr %g0, 1, %gl 2473 rdpr %tt, %g5 2474 MMU_FAULT_STATUS_AREA(%g7) 2475 cmp %g5, T_ALIGNMENT 2476 be,pn %xcc, 1f 2477 ldx [%g7 + MMFSA_D_ADDR], %g6 2478 ldx [%g7 + MMFSA_D_CTX], %g7 2479 srlx %g6, MMU_PAGESHIFT, %g6 /* align address */ 2480 cmp %g7, USER_CONTEXT_TYPE 2481 sllx %g6, MMU_PAGESHIFT, %g6 2482 movgu %icc, USER_CONTEXT_TYPE, %g7 2483 or %g6, %g7, %g6 /* TAG_ACCESS */ 2484 1: 2485 done 2486 SET_SIZE(mmu_trap_tl1) 2487 2488 /* 2489 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These 2490 * traps are valid only when kmdb is loaded. When the debugger is active, 2491 * the code below is rewritten to transfer control to the appropriate 2492 * debugger entry points. 2493 */ 2494 .global kmdb_trap 2495 .align 8 2496 kmdb_trap: 2497 ba,a trap_table0 2498 jmp %g1 + 0 2499 nop 2500 2501 .global kmdb_trap_tl1 2502 .align 8 2503 kmdb_trap_tl1: 2504 ba,a trap_table0 2505 jmp %g1 + 0 2506 nop 2507 2508 /* 2509 * This entry is copied from OBP's trap table during boot. 2510 */ 2511 .global obp_bpt 2512 .align 8 2513 obp_bpt: 2514 NOT 2515 2516 2517 2518 #ifdef TRAPTRACE 2519 /* 2520 * TRAPTRACE support. 2521 * labels here are branched to with "rd %pc, %g7" in the delay slot. 2522 * Return is done by "jmp %g7 + 4". 2523 */ 2524 2525 trace_dmmu: 2526 TRACE_PTR(%g3, %g6) 2527 GET_TRACE_TICK(%g6, %g5) 2528 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2529 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2530 rdpr %tt, %g6 2531 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2532 rdpr %tstate, %g6 2533 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2534 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2535 rdpr %tpc, %g6 2536 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2537 MMU_FAULT_STATUS_AREA(%g6) 2538 ldx [%g6 + MMFSA_D_ADDR], %g4 2539 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2540 ldx [%g6 + MMFSA_D_CTX], %g4 2541 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2542 ldx [%g6 + MMFSA_D_TYPE], %g4 2543 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2544 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2545 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2546 TRACE_NEXT(%g3, %g4, %g5) 2547 jmp %g7 + 4 2548 nop 2549 2550 trace_immu: 2551 TRACE_PTR(%g3, %g6) 2552 GET_TRACE_TICK(%g6, %g5) 2553 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2554 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2555 rdpr %tt, %g6 2556 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2557 rdpr %tstate, %g6 2558 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2559 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2560 rdpr %tpc, %g6 2561 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2562 MMU_FAULT_STATUS_AREA(%g6) 2563 ldx [%g6 + MMFSA_I_ADDR], %g4 2564 stxa %g4, [%g3 + TRAP_ENT_TR]%asi 2565 ldx [%g6 + MMFSA_I_CTX], %g4 2566 stxa %g4, [%g3 + TRAP_ENT_F1]%asi 2567 ldx [%g6 + MMFSA_I_TYPE], %g4 2568 stxa %g4, [%g3 + TRAP_ENT_F2]%asi 2569 stxa %g6, [%g3 + TRAP_ENT_F3]%asi 2570 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2571 TRACE_NEXT(%g3, %g4, %g5) 2572 jmp %g7 + 4 2573 nop 2574 2575 trace_gen: 2576 TRACE_PTR(%g3, %g6) 2577 GET_TRACE_TICK(%g6, %g5) 2578 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi 2579 TRACE_SAVE_TL_GL_REGS(%g3, %g6) 2580 rdpr %tt, %g6 2581 stha %g6, [%g3 + TRAP_ENT_TT]%asi 2582 rdpr %tstate, %g6 2583 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi 2584 stna %sp, [%g3 + TRAP_ENT_SP]%asi 2585 rdpr %tpc, %g6 2586 stna %g6, [%g3 + TRAP_ENT_TPC]%asi 2587 stna %g0, [%g3 + TRAP_ENT_TR]%asi 2588 stna %g0, [%g3 + TRAP_ENT_F1]%asi 2589 stna %g0, [%g3 + TRAP_ENT_F2]%asi 2590 stna %g0, [%g3 + TRAP_ENT_F3]%asi 2591 stna %g0, [%g3 + TRAP_ENT_F4]%asi 2592 TRACE_NEXT(%g3, %g4, %g5) 2593 jmp %g7 + 4 2594 nop 2595 2596 trace_win: 2597 TRACE_WIN_INFO(0, %l0, %l1, %l2) 2598 ! Keep the locals as clean as possible, caller cleans %l4 2599 clr %l2 2600 clr %l1 2601 jmp %l4 + 4 2602 clr %l0 2603 2604 /* 2605 * Trace a tsb hit 2606 * g1 = tsbe pointer (in/clobbered) 2607 * g2 = tag access register (in) 2608 * g3 - g4 = scratch (clobbered) 2609 * g5 = tsbe data (in) 2610 * g6 = scratch (clobbered) 2611 * g7 = pc we jumped here from (in) 2612 */ 2613 2614 ! Do not disturb %g5, it will be used after the trace 2615 ALTENTRY(trace_tsbhit) 2616 TRACE_TSBHIT(0) 2617 jmp %g7 + 4 2618 nop 2619 2620 /* 2621 * Trace a TSB miss 2622 * 2623 * g1 = tsb8k pointer (in) 2624 * g2 = tag access register (in) 2625 * g3 = tsb4m pointer (in) 2626 * g4 = tsbe tag (in/clobbered) 2627 * g5 - g6 = scratch (clobbered) 2628 * g7 = pc we jumped here from (in) 2629 */ 2630 .global trace_tsbmiss 2631 trace_tsbmiss: 2632 membar #Sync 2633 sethi %hi(FLUSH_ADDR), %g6 2634 flush %g6 2635 TRACE_PTR(%g5, %g6) 2636 stna %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access 2637 stna %g4, [%g5 + TRAP_ENT_F1]%asi ! XXX? tsb tag 2638 GET_TRACE_TICK(%g6, %g4) 2639 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi 2640 rdpr %tnpc, %g6 2641 stna %g6, [%g5 + TRAP_ENT_F2]%asi 2642 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer 2643 rdpr %tpc, %g6 2644 stna %g6, [%g5 + TRAP_ENT_TPC]%asi 2645 TRACE_SAVE_TL_GL_REGS(%g5, %g6) 2646 rdpr %tt, %g6 2647 or %g6, TT_MMU_MISS, %g4 2648 stha %g4, [%g5 + TRAP_ENT_TT]%asi 2649 mov MMFSA_D_ADDR, %g4 2650 cmp %g6, FAST_IMMU_MISS_TT 2651 move %xcc, MMFSA_I_ADDR, %g4 2652 cmp %g6, T_INSTR_MMU_MISS 2653 move %xcc, MMFSA_I_ADDR, %g4 2654 MMU_FAULT_STATUS_AREA(%g6) 2655 ldx [%g6 + %g4], %g6 2656 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target 2657 cmp %g4, MMFSA_D_ADDR 2658 move %xcc, MMFSA_D_CTX, %g4 2659 movne %xcc, MMFSA_I_CTX, %g4 2660 MMU_FAULT_STATUS_AREA(%g6) 2661 ldx [%g6 + %g4], %g6 2662 stxa %g6, [%g5 + TRAP_ENT_F4]%asi ! context ID 2663 stna %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer 2664 TRACE_NEXT(%g5, %g4, %g6) 2665 jmp %g7 + 4 2666 nop 2667 2668 /* 2669 * g2 = tag access register (in) 2670 * g3 = ctx type (0, 1 or 2) (in) (not used) 2671 */ 2672 trace_dataprot: 2673 membar #Sync 2674 sethi %hi(FLUSH_ADDR), %g6 2675 flush %g6 2676 TRACE_PTR(%g1, %g6) 2677 GET_TRACE_TICK(%g6, %g4) 2678 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi 2679 rdpr %tpc, %g6 2680 stna %g6, [%g1 + TRAP_ENT_TPC]%asi 2681 rdpr %tstate, %g6 2682 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi 2683 stna %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg 2684 stna %g0, [%g1 + TRAP_ENT_F1]%asi 2685 stna %g0, [%g1 + TRAP_ENT_F2]%asi 2686 stna %g0, [%g1 + TRAP_ENT_F3]%asi 2687 stna %g0, [%g1 + TRAP_ENT_F4]%asi 2688 TRACE_SAVE_TL_GL_REGS(%g1, %g6) 2689 rdpr %tt, %g6 2690 stha %g6, [%g1 + TRAP_ENT_TT]%asi 2691 mov MMFSA_D_CTX, %g4 2692 cmp %g6, FAST_IMMU_MISS_TT 2693 move %xcc, MMFSA_I_CTX, %g4 2694 cmp %g6, T_INSTR_MMU_MISS 2695 move %xcc, MMFSA_I_CTX, %g4 2696 MMU_FAULT_STATUS_AREA(%g6) 2697 ldx [%g6 + %g4], %g6 2698 stxa %g6, [%g1 + TRAP_ENT_TR]%asi ! context ID 2699 TRACE_NEXT(%g1, %g4, %g5) 2700 jmp %g7 + 4 2701 nop 2702 2703 #endif /* TRAPTRACE */ 2704 2705 /* 2706 * Handle watchdog reset trap. Enable the MMU using the MMU_ENABLE 2707 * HV service, which requires the return target to be specified as a VA 2708 * since we are enabling the MMU. We set the target to ptl1_panic. 2709 */ 2710 2711 .type .watchdog_trap, #function 2712 .watchdog_trap: 2713 mov 1, %o0 2714 setx ptl1_panic, %g2, %o1 2715 mov MMU_ENABLE, %o5 2716 ta FAST_TRAP 2717 done 2718 SET_SIZE(.watchdog_trap) 2719 /* 2720 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2721 */ 2722 .type .dmmu_exc_lddf_not_aligned, #function 2723 .dmmu_exc_lddf_not_aligned: 2724 MMU_FAULT_STATUS_AREA(%g3) 2725 ldx [%g3 + MMFSA_D_ADDR], %g2 2726 /* Fault type not available in MMU fault status area */ 2727 mov MMFSA_F_UNALIGN, %g1 2728 ldx [%g3 + MMFSA_D_CTX], %g3 2729 sllx %g3, SFSR_CTX_SHIFT, %g3 2730 btst 1, %sp 2731 bnz,pt %xcc, .lddf_exception_not_aligned 2732 or %g3, %g1, %g3 /* SFSR */ 2733 ba,a,pt %xcc, .mmu_exception_not_aligned 2734 SET_SIZE(.dmmu_exc_lddf_not_aligned) 2735 2736 /* 2737 * synthesize for trap(): SFAR in %g2, SFSR in %g3 2738 */ 2739 .type .dmmu_exc_stdf_not_aligned, #function 2740 .dmmu_exc_stdf_not_aligned: 2741 MMU_FAULT_STATUS_AREA(%g3) 2742 ldx [%g3 + MMFSA_D_ADDR], %g2 2743 /* Fault type not available in MMU fault status area */ 2744 mov MMFSA_F_UNALIGN, %g1 2745 ldx [%g3 + MMFSA_D_CTX], %g3 2746 sllx %g3, SFSR_CTX_SHIFT, %g3 2747 btst 1, %sp 2748 bnz,pt %xcc, .stdf_exception_not_aligned 2749 or %g3, %g1, %g3 /* SFSR */ 2750 ba,a,pt %xcc, .mmu_exception_not_aligned 2751 SET_SIZE(.dmmu_exc_stdf_not_aligned) 2752 2753 .type .dmmu_exception, #function 2754 .dmmu_exception: 2755 MMU_FAULT_STATUS_AREA(%g3) 2756 ldx [%g3 + MMFSA_D_ADDR], %g2 2757 ldx [%g3 + MMFSA_D_TYPE], %g1 2758 ldx [%g3 + MMFSA_D_CTX], %g4 2759 srlx %g2, MMU_PAGESHIFT, %g2 /* align address */ 2760 sllx %g2, MMU_PAGESHIFT, %g2 2761 sllx %g4, SFSR_CTX_SHIFT, %g3 2762 or %g3, %g1, %g3 /* SFSR */ 2763 cmp %g4, USER_CONTEXT_TYPE 2764 movgeu %icc, USER_CONTEXT_TYPE, %g4 2765 or %g2, %g4, %g2 /* TAG_ACCESS */ 2766 ba,pt %xcc, .mmu_exception_end 2767 mov T_DATA_EXCEPTION, %g1 2768 SET_SIZE(.dmmu_exception) 2769 2770 .align 32 2771 .global pil15_epilogue 2772 pil15_epilogue: 2773 ba pil_interrupt_common 2774 nop 2775 .align 32 2776 2777 /* 2778 * fast_trap_done, fast_trap_done_chk_intr: 2779 * 2780 * Due to the design of UltraSPARC pipeline, pending interrupts are not 2781 * taken immediately after a RETRY or DONE instruction which causes IE to 2782 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed 2783 * to execute first before taking any interrupts. If that instruction 2784 * results in other traps, and if the corresponding trap handler runs 2785 * entirely at TL=1 with interrupts disabled, then pending interrupts 2786 * won't be taken until after yet another instruction following the %tpc 2787 * or %tnpc. 2788 * 2789 * A malicious user program can use this feature to block out interrupts 2790 * for extended durations, which can result in send_mondo_timeout kernel 2791 * panic. 2792 * 2793 * This problem is addressed by servicing any pending interrupts via 2794 * sys_trap before returning back to the user mode from a fast trap 2795 * handler. The "done" instruction within a fast trap handler, which 2796 * runs entirely at TL=1 with interrupts disabled, is replaced with the 2797 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done 2798 * entry point. 2799 * 2800 * We check for any pending interrupts here and force a sys_trap to 2801 * service those interrupts, if any. To minimize overhead, pending 2802 * interrupts are checked if the %tpc happens to be at 16K boundary, 2803 * which allows a malicious program to execute at most 4K consecutive 2804 * instructions before we service any pending interrupts. If a worst 2805 * case fast trap handler takes about 2 usec, then interrupts will be 2806 * blocked for at most 8 msec, less than a clock tick. 2807 * 2808 * For the cases where we don't know if the %tpc will cross a 16K 2809 * boundary, we can't use the above optimization and always process 2810 * any pending interrupts via fast_frap_done_chk_intr entry point. 2811 * 2812 * Entry Conditions: 2813 * %pstate am:0 priv:1 ie:0 2814 * globals are AG (not normal globals) 2815 */ 2816 2817 .global fast_trap_done, fast_trap_done_chk_intr 2818 fast_trap_done: 2819 rdpr %tpc, %g5 2820 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff 2821 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc 2822 bz,pn %icc, 1f ! branch if zero (lower 32 bits only) 2823 nop 2824 done 2825 2826 fast_trap_done_chk_intr: 2827 1: rd SOFTINT, %g6 2828 brnz,pn %g6, 2f ! branch if any pending intr 2829 nop 2830 done 2831 2832 2: 2833 /* 2834 * We get here if there are any pending interrupts. 2835 * Adjust %tpc/%tnpc as we'll be resuming via "retry" 2836 * instruction. 2837 */ 2838 rdpr %tnpc, %g5 2839 wrpr %g0, %g5, %tpc 2840 add %g5, 4, %g5 2841 wrpr %g0, %g5, %tnpc 2842 2843 /* 2844 * Force a dummy sys_trap call so that interrupts can be serviced. 2845 */ 2846 set fast_trap_dummy_call, %g1 2847 ba,pt %xcc, sys_trap 2848 mov -1, %g4 2849 2850 fast_trap_dummy_call: 2851 retl 2852 nop 2853 2854 /* 2855 * Currently the brand syscall interposition code is not enabled by 2856 * default. Instead, when a branded zone is first booted the brand 2857 * infrastructure will patch the trap table so that the syscall 2858 * entry points are redirected to syscall_wrapper32 and syscall_wrapper 2859 * for ILP32 and LP64 syscalls respectively. this is done in 2860 * brand_plat_interposition_enable(). Note that the syscall wrappers 2861 * below do not collect any trap trace data since the syscall hot patch 2862 * points are reached after trap trace data has already been collected. 2863 */ 2864 #define BRAND_CALLBACK(callback_id) \ 2865 CPU_ADDR(%g2, %g1) /* load CPU struct addr to %g2 */ ;\ 2866 ldn [%g2 + CPU_THREAD], %g3 /* load thread pointer */ ;\ 2867 ldn [%g3 + T_PROCP], %g3 /* get proc pointer */ ;\ 2868 ldn [%g3 + P_BRAND], %g3 /* get brand pointer */ ;\ 2869 brz %g3, 1f /* No brand? No callback. */ ;\ 2870 nop ;\ 2871 ldn [%g3 + B_MACHOPS], %g3 /* get machops list */ ;\ 2872 ldn [%g3 + (callback_id << 3)], %g3 ;\ 2873 brz %g3, 1f ;\ 2874 /* \ 2875 * This isn't pretty. We want a low-latency way for the callback \ 2876 * routine to decline to do anything. We just pass in an address \ 2877 * the routine can directly jmp back to, pretending that nothing \ 2878 * has happened. \ 2879 * \ 2880 * %g1: return address (where the brand handler jumps back to) \ 2881 * %g2: address of CPU structure \ 2882 * %g3: address of brand handler (where we will jump to) \ 2883 */ \ 2884 mov %pc, %g1 ;\ 2885 add %g1, 16, %g1 ;\ 2886 jmp %g3 ;\ 2887 nop ;\ 2888 1: 2889 2890 ENTRY_NP(syscall_wrapper32) 2891 BRAND_CALLBACK(BRAND_CB_SYSCALL32) 2892 SYSCALL_NOTT(syscall_trap32) 2893 SET_SIZE(syscall_wrapper32) 2894 2895 ENTRY_NP(syscall_wrapper) 2896 BRAND_CALLBACK(BRAND_CB_SYSCALL) 2897 SYSCALL_NOTT(syscall_trap) 2898 SET_SIZE(syscall_wrapper) 2899 2900 #endif /* lint */