Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4v/ml/trap_table.s
+++ new/usr/src/uts/sun4v/ml/trap_table.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 -#if !defined(lint)
28 27 #include "assym.h"
29 -#endif /* !lint */
30 28 #include <sys/asm_linkage.h>
31 29 #include <sys/privregs.h>
32 30 #include <sys/sun4asi.h>
33 31 #include <sys/machasi.h>
34 32 #include <sys/hypervisor_api.h>
35 33 #include <sys/machtrap.h>
36 34 #include <sys/machthread.h>
37 35 #include <sys/machbrand.h>
38 36 #include <sys/pcb.h>
39 37 #include <sys/pte.h>
40 38 #include <sys/mmu.h>
41 39 #include <sys/machpcb.h>
42 40 #include <sys/async.h>
43 41 #include <sys/intreg.h>
44 42 #include <sys/scb.h>
45 43 #include <sys/psr_compat.h>
46 44 #include <sys/syscall.h>
47 45 #include <sys/machparam.h>
48 46 #include <sys/traptrace.h>
49 47 #include <vm/hat_sfmmu.h>
50 48 #include <sys/archsystm.h>
51 49 #include <sys/utrap.h>
52 50 #include <sys/clock.h>
53 51 #include <sys/intr.h>
54 52 #include <sys/fpu/fpu_simulator.h>
55 53 #include <vm/seg_spt.h>
56 54
57 55 /*
58 56 * WARNING: If you add a fast trap handler which can be invoked by a
59 57 * non-privileged user, you may have to use the FAST_TRAP_DONE macro
60 58 * instead of "done" instruction to return back to the user mode. See
61 59 * comments for the "fast_trap_done" entry point for more information.
62 60 *
63 61 * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the
64 62 * cases where you always want to process any pending interrupts before
65 63 * returning back to the user mode.
66 64 */
67 65 #define FAST_TRAP_DONE \
68 66 ba,a fast_trap_done
69 67
70 68 #define FAST_TRAP_DONE_CHK_INTR \
71 69 ba,a fast_trap_done_chk_intr
72 70
73 71 /*
74 72 * SPARC V9 Trap Table
75 73 *
76 74 * Most of the trap handlers are made from common building
77 75 * blocks, and some are instantiated multiple times within
78 76 * the trap table. So, I build a bunch of macros, then
79 77 * populate the table using only the macros.
80 78 *
81 79 * Many macros branch to sys_trap. Its calling convention is:
82 80 * %g1 kernel trap handler
83 81 * %g2, %g3 args for above
84 82 * %g4 desire %pil
85 83 */
86 84
87 85 #ifdef TRAPTRACE
88 86
89 87 /*
90 88 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
91 89 */
92 90 #define TT_TRACE(label) \
93 91 ba label ;\
94 92 rd %pc, %g7
95 93 #define TT_TRACE_INS 2
96 94
97 95 #define TT_TRACE_L(label) \
98 96 ba label ;\
99 97 rd %pc, %l4 ;\
100 98 clr %l4
101 99 #define TT_TRACE_L_INS 3
102 100
103 101 #else
104 102
105 103 #define TT_TRACE(label)
106 104 #define TT_TRACE_INS 0
107 105
108 106 #define TT_TRACE_L(label)
109 107 #define TT_TRACE_L_INS 0
110 108
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
111 109 #endif
112 110
113 111 /*
114 112 * This first set are funneled to trap() with %tt as the type.
115 113 * Trap will then either panic or send the user a signal.
116 114 */
117 115 /*
118 116 * NOT is used for traps that just shouldn't happen.
119 117 * It comes in both single and quadruple flavors.
120 118 */
121 -#if !defined(lint)
122 119 .global trap
123 -#endif /* !lint */
124 120 #define NOT \
125 121 TT_TRACE(trace_gen) ;\
126 122 set trap, %g1 ;\
127 123 rdpr %tt, %g3 ;\
128 124 ba,pt %xcc, sys_trap ;\
129 125 sub %g0, 1, %g4 ;\
130 126 .align 32
131 127 #define NOT4 NOT; NOT; NOT; NOT
132 128
133 129 #define NOTP \
134 130 TT_TRACE(trace_gen) ;\
135 131 ba,pt %xcc, ptl1_panic ;\
136 132 mov PTL1_BAD_TRAP, %g1 ;\
137 133 .align 32
138 134 #define NOTP4 NOTP; NOTP; NOTP; NOTP
139 135
140 136
141 137 /*
142 138 * BAD is used for trap vectors we don't have a kernel
143 139 * handler for.
144 140 * It also comes in single and quadruple versions.
145 141 */
146 142 #define BAD NOT
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
147 143 #define BAD4 NOT4
148 144
149 145 #define DONE \
150 146 done; \
151 147 .align 32
152 148
153 149 /*
154 150 * TRAP vectors to the trap() function.
155 151 * It's main use is for user errors.
156 152 */
157 -#if !defined(lint)
158 153 .global trap
159 -#endif /* !lint */
160 154 #define TRAP(arg) \
161 155 TT_TRACE(trace_gen) ;\
162 156 set trap, %g1 ;\
163 157 mov arg, %g3 ;\
164 158 ba,pt %xcc, sys_trap ;\
165 159 sub %g0, 1, %g4 ;\
166 160 .align 32
167 161
168 162 /*
169 163 * SYSCALL is used for unsupported syscall interfaces (with 'which'
170 164 * set to 'nosys') and legacy support of old SunOS 4.x syscalls (with
171 165 * 'which' set to 'syscall_trap32').
172 166 *
173 167 * The SYSCALL_TRAP* macros are used for syscall entry points.
174 168 * SYSCALL_TRAP is used to support LP64 syscalls and SYSCALL_TRAP32
175 169 * is used to support ILP32. Each macro can only be used once
176 170 * since they each define a symbol. The symbols are used as hot patch
177 171 * points by the brand infrastructure to dynamically enable and disable
178 172 * brand syscall interposition. See the comments around BRAND_CALLBACK
179 173 * and brand_plat_interposition_enable() for more information.
180 174 */
181 175 #define SYSCALL_NOTT(which) \
182 176 set (which), %g1 ;\
183 177 ba,pt %xcc, user_trap ;\
184 178 sub %g0, 1, %g4 ;\
185 179 .align 32
186 180
187 181 #define SYSCALL(which) \
188 182 TT_TRACE(trace_gen) ;\
189 183 SYSCALL_NOTT(which)
190 184
191 185 #define SYSCALL_TRAP32 \
192 186 TT_TRACE(trace_gen) ;\
193 187 ALTENTRY(syscall_trap32_patch_point) \
194 188 SYSCALL_NOTT(syscall_trap32)
195 189
196 190 #define SYSCALL_TRAP \
197 191 TT_TRACE(trace_gen) ;\
198 192 ALTENTRY(syscall_trap_patch_point) \
199 193 SYSCALL_NOTT(syscall_trap)
200 194
201 195 /*
202 196 * GOTO just jumps to a label.
203 197 * It's used for things that can be fixed without going thru sys_trap.
204 198 */
205 199 #define GOTO(label) \
206 200 .global label ;\
207 201 ba,a label ;\
208 202 .empty ;\
209 203 .align 32
210 204
211 205 /*
212 206 * GOTO_TT just jumps to a label.
213 207 * correctable ECC error traps at level 0 and 1 will use this macro.
214 208 * It's used for things that can be fixed without going thru sys_trap.
215 209 */
216 210 #define GOTO_TT(label, ttlabel) \
217 211 .global label ;\
218 212 TT_TRACE(ttlabel) ;\
219 213 ba,a label ;\
220 214 .empty ;\
221 215 .align 32
222 216
223 217 /*
224 218 * Privileged traps
225 219 * Takes breakpoint if privileged, calls trap() if not.
226 220 */
227 221 #define PRIV(label) \
228 222 rdpr %tstate, %g1 ;\
229 223 btst TSTATE_PRIV, %g1 ;\
230 224 bnz label ;\
231 225 rdpr %tt, %g3 ;\
232 226 set trap, %g1 ;\
233 227 ba,pt %xcc, sys_trap ;\
234 228 sub %g0, 1, %g4 ;\
235 229 .align 32
236 230
237 231
238 232 /*
239 233 * DTrace traps.
240 234 */
241 235 #define DTRACE_PID \
242 236 .global dtrace_pid_probe ;\
243 237 set dtrace_pid_probe, %g1 ;\
244 238 ba,pt %xcc, user_trap ;\
245 239 sub %g0, 1, %g4 ;\
246 240 .align 32
247 241
248 242 #define DTRACE_RETURN \
249 243 .global dtrace_return_probe ;\
250 244 set dtrace_return_probe, %g1 ;\
251 245 ba,pt %xcc, user_trap ;\
252 246 sub %g0, 1, %g4 ;\
253 247 .align 32
254 248
255 249 /*
256 250 * REGISTER WINDOW MANAGEMENT MACROS
257 251 */
258 252
259 253 /*
260 254 * various convenient units of padding
261 255 */
262 256 #define SKIP(n) .skip 4*(n)
263 257
264 258 /*
265 259 * CLEAN_WINDOW is the simple handler for cleaning a register window.
↓ open down ↓ |
96 lines elided |
↑ open up ↑ |
266 260 */
267 261 #define CLEAN_WINDOW \
268 262 TT_TRACE_L(trace_win) ;\
269 263 rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\
270 264 clr %l0; clr %l1; clr %l2; clr %l3 ;\
271 265 clr %l4; clr %l5; clr %l6; clr %l7 ;\
272 266 clr %o0; clr %o1; clr %o2; clr %o3 ;\
273 267 clr %o4; clr %o5; clr %o6; clr %o7 ;\
274 268 retry; .align 128
275 269
276 -#if !defined(lint)
277 -
278 270 /*
279 271 * If we get an unresolved tlb miss while in a window handler, the fault
280 272 * handler will resume execution at the last instruction of the window
281 273 * hander, instead of delivering the fault to the kernel. Spill handlers
282 274 * use this to spill windows into the wbuf.
283 275 *
284 276 * The mixed handler works by checking %sp, and branching to the correct
285 277 * handler. This is done by branching back to label 1: for 32b frames,
286 278 * or label 2: for 64b frames; which implies the handler order is: 32b,
287 279 * 64b, mixed. The 1: and 2: labels are offset into the routines to
288 280 * allow the branchs' delay slots to contain useful instructions.
289 281 */
290 282
291 283 /*
292 284 * SPILL_32bit spills a 32-bit-wide kernel register window. It
293 285 * assumes that the kernel context and the nucleus context are the
294 286 * same. The stack pointer is required to be eight-byte aligned even
295 287 * though this code only needs it to be four-byte aligned.
296 288 */
297 289 #define SPILL_32bit(tail) \
298 290 srl %sp, 0, %sp ;\
299 291 1: st %l0, [%sp + 0] ;\
300 292 st %l1, [%sp + 4] ;\
301 293 st %l2, [%sp + 8] ;\
302 294 st %l3, [%sp + 12] ;\
303 295 st %l4, [%sp + 16] ;\
304 296 st %l5, [%sp + 20] ;\
305 297 st %l6, [%sp + 24] ;\
306 298 st %l7, [%sp + 28] ;\
307 299 st %i0, [%sp + 32] ;\
308 300 st %i1, [%sp + 36] ;\
309 301 st %i2, [%sp + 40] ;\
310 302 st %i3, [%sp + 44] ;\
311 303 st %i4, [%sp + 48] ;\
312 304 st %i5, [%sp + 52] ;\
313 305 st %i6, [%sp + 56] ;\
314 306 st %i7, [%sp + 60] ;\
315 307 TT_TRACE_L(trace_win) ;\
316 308 saved ;\
317 309 retry ;\
318 310 SKIP(31-19-TT_TRACE_L_INS) ;\
319 311 ba,a,pt %xcc, fault_32bit_/**/tail ;\
320 312 .empty
321 313
322 314 /*
323 315 * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit
324 316 * wide address space via the designated asi. It is used to spill
325 317 * non-kernel windows. The stack pointer is required to be eight-byte
326 318 * aligned even though this code only needs it to be four-byte
327 319 * aligned.
328 320 */
329 321 #define SPILL_32bit_asi(asi_num, tail) \
330 322 srl %sp, 0, %sp ;\
331 323 1: sta %l0, [%sp + %g0]asi_num ;\
332 324 mov 4, %g1 ;\
333 325 sta %l1, [%sp + %g1]asi_num ;\
334 326 mov 8, %g2 ;\
335 327 sta %l2, [%sp + %g2]asi_num ;\
336 328 mov 12, %g3 ;\
337 329 sta %l3, [%sp + %g3]asi_num ;\
338 330 add %sp, 16, %g4 ;\
339 331 sta %l4, [%g4 + %g0]asi_num ;\
340 332 sta %l5, [%g4 + %g1]asi_num ;\
341 333 sta %l6, [%g4 + %g2]asi_num ;\
342 334 sta %l7, [%g4 + %g3]asi_num ;\
343 335 add %g4, 16, %g4 ;\
344 336 sta %i0, [%g4 + %g0]asi_num ;\
345 337 sta %i1, [%g4 + %g1]asi_num ;\
346 338 sta %i2, [%g4 + %g2]asi_num ;\
347 339 sta %i3, [%g4 + %g3]asi_num ;\
348 340 add %g4, 16, %g4 ;\
349 341 sta %i4, [%g4 + %g0]asi_num ;\
350 342 sta %i5, [%g4 + %g1]asi_num ;\
351 343 sta %i6, [%g4 + %g2]asi_num ;\
352 344 sta %i7, [%g4 + %g3]asi_num ;\
353 345 TT_TRACE_L(trace_win) ;\
354 346 saved ;\
355 347 retry ;\
356 348 SKIP(31-25-TT_TRACE_L_INS) ;\
357 349 ba,a,pt %xcc, fault_32bit_/**/tail ;\
358 350 .empty
359 351
360 352 #define SPILL_32bit_tt1(asi_num, tail) \
361 353 ba,a,pt %xcc, fault_32bit_/**/tail ;\
362 354 .empty ;\
363 355 .align 128
364 356
365 357
366 358 /*
367 359 * FILL_32bit fills a 32-bit-wide kernel register window. It assumes
368 360 * that the kernel context and the nucleus context are the same. The
369 361 * stack pointer is required to be eight-byte aligned even though this
370 362 * code only needs it to be four-byte aligned.
371 363 */
372 364 #define FILL_32bit(tail) \
373 365 srl %sp, 0, %sp ;\
374 366 1: TT_TRACE_L(trace_win) ;\
375 367 ld [%sp + 0], %l0 ;\
376 368 ld [%sp + 4], %l1 ;\
377 369 ld [%sp + 8], %l2 ;\
378 370 ld [%sp + 12], %l3 ;\
379 371 ld [%sp + 16], %l4 ;\
380 372 ld [%sp + 20], %l5 ;\
381 373 ld [%sp + 24], %l6 ;\
382 374 ld [%sp + 28], %l7 ;\
383 375 ld [%sp + 32], %i0 ;\
384 376 ld [%sp + 36], %i1 ;\
385 377 ld [%sp + 40], %i2 ;\
386 378 ld [%sp + 44], %i3 ;\
387 379 ld [%sp + 48], %i4 ;\
388 380 ld [%sp + 52], %i5 ;\
389 381 ld [%sp + 56], %i6 ;\
390 382 ld [%sp + 60], %i7 ;\
391 383 restored ;\
392 384 retry ;\
393 385 SKIP(31-19-TT_TRACE_L_INS) ;\
394 386 ba,a,pt %xcc, fault_32bit_/**/tail ;\
395 387 .empty
396 388
397 389 /*
398 390 * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit
399 391 * wide address space via the designated asi. It is used to fill
400 392 * non-kernel windows. The stack pointer is required to be eight-byte
401 393 * aligned even though this code only needs it to be four-byte
402 394 * aligned.
403 395 */
404 396 #define FILL_32bit_asi(asi_num, tail) \
405 397 srl %sp, 0, %sp ;\
406 398 1: TT_TRACE_L(trace_win) ;\
407 399 mov 4, %g1 ;\
408 400 lda [%sp + %g0]asi_num, %l0 ;\
409 401 mov 8, %g2 ;\
410 402 lda [%sp + %g1]asi_num, %l1 ;\
411 403 mov 12, %g3 ;\
412 404 lda [%sp + %g2]asi_num, %l2 ;\
413 405 lda [%sp + %g3]asi_num, %l3 ;\
414 406 add %sp, 16, %g4 ;\
415 407 lda [%g4 + %g0]asi_num, %l4 ;\
416 408 lda [%g4 + %g1]asi_num, %l5 ;\
417 409 lda [%g4 + %g2]asi_num, %l6 ;\
418 410 lda [%g4 + %g3]asi_num, %l7 ;\
419 411 add %g4, 16, %g4 ;\
420 412 lda [%g4 + %g0]asi_num, %i0 ;\
421 413 lda [%g4 + %g1]asi_num, %i1 ;\
422 414 lda [%g4 + %g2]asi_num, %i2 ;\
423 415 lda [%g4 + %g3]asi_num, %i3 ;\
424 416 add %g4, 16, %g4 ;\
425 417 lda [%g4 + %g0]asi_num, %i4 ;\
426 418 lda [%g4 + %g1]asi_num, %i5 ;\
427 419 lda [%g4 + %g2]asi_num, %i6 ;\
428 420 lda [%g4 + %g3]asi_num, %i7 ;\
429 421 restored ;\
430 422 retry ;\
431 423 SKIP(31-25-TT_TRACE_L_INS) ;\
432 424 ba,a,pt %xcc, fault_32bit_/**/tail ;\
433 425 .empty
434 426
435 427
436 428 /*
437 429 * SPILL_64bit spills a 64-bit-wide kernel register window. It
438 430 * assumes that the kernel context and the nucleus context are the
439 431 * same. The stack pointer is required to be eight-byte aligned.
440 432 */
441 433 #define SPILL_64bit(tail) \
442 434 2: stx %l0, [%sp + V9BIAS64 + 0] ;\
443 435 stx %l1, [%sp + V9BIAS64 + 8] ;\
444 436 stx %l2, [%sp + V9BIAS64 + 16] ;\
445 437 stx %l3, [%sp + V9BIAS64 + 24] ;\
446 438 stx %l4, [%sp + V9BIAS64 + 32] ;\
447 439 stx %l5, [%sp + V9BIAS64 + 40] ;\
448 440 stx %l6, [%sp + V9BIAS64 + 48] ;\
449 441 stx %l7, [%sp + V9BIAS64 + 56] ;\
450 442 stx %i0, [%sp + V9BIAS64 + 64] ;\
451 443 stx %i1, [%sp + V9BIAS64 + 72] ;\
452 444 stx %i2, [%sp + V9BIAS64 + 80] ;\
453 445 stx %i3, [%sp + V9BIAS64 + 88] ;\
454 446 stx %i4, [%sp + V9BIAS64 + 96] ;\
455 447 stx %i5, [%sp + V9BIAS64 + 104] ;\
456 448 stx %i6, [%sp + V9BIAS64 + 112] ;\
457 449 stx %i7, [%sp + V9BIAS64 + 120] ;\
458 450 TT_TRACE_L(trace_win) ;\
459 451 saved ;\
460 452 retry ;\
461 453 SKIP(31-18-TT_TRACE_L_INS) ;\
462 454 ba,a,pt %xcc, fault_64bit_/**/tail ;\
463 455 .empty
464 456
465 457 #define SPILL_64bit_ktt1(tail) \
466 458 ba,a,pt %xcc, fault_64bit_/**/tail ;\
467 459 .empty ;\
468 460 .align 128
469 461
470 462 #define SPILL_mixed_ktt1(tail) \
471 463 btst 1, %sp ;\
472 464 bz,a,pt %xcc, fault_32bit_/**/tail ;\
473 465 srl %sp, 0, %sp ;\
474 466 ba,a,pt %xcc, fault_64bit_/**/tail ;\
475 467 .empty ;\
476 468 .align 128
477 469
478 470 /*
479 471 * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit
480 472 * wide address space via the designated asi. It is used to spill
481 473 * non-kernel windows. The stack pointer is required to be eight-byte
482 474 * aligned.
483 475 */
484 476 #define SPILL_64bit_asi(asi_num, tail) \
485 477 mov 0 + V9BIAS64, %g1 ;\
486 478 2: stxa %l0, [%sp + %g1]asi_num ;\
487 479 mov 8 + V9BIAS64, %g2 ;\
488 480 stxa %l1, [%sp + %g2]asi_num ;\
489 481 mov 16 + V9BIAS64, %g3 ;\
490 482 stxa %l2, [%sp + %g3]asi_num ;\
491 483 mov 24 + V9BIAS64, %g4 ;\
492 484 stxa %l3, [%sp + %g4]asi_num ;\
493 485 add %sp, 32, %g5 ;\
494 486 stxa %l4, [%g5 + %g1]asi_num ;\
495 487 stxa %l5, [%g5 + %g2]asi_num ;\
496 488 stxa %l6, [%g5 + %g3]asi_num ;\
497 489 stxa %l7, [%g5 + %g4]asi_num ;\
498 490 add %g5, 32, %g5 ;\
499 491 stxa %i0, [%g5 + %g1]asi_num ;\
500 492 stxa %i1, [%g5 + %g2]asi_num ;\
501 493 stxa %i2, [%g5 + %g3]asi_num ;\
502 494 stxa %i3, [%g5 + %g4]asi_num ;\
503 495 add %g5, 32, %g5 ;\
504 496 stxa %i4, [%g5 + %g1]asi_num ;\
505 497 stxa %i5, [%g5 + %g2]asi_num ;\
506 498 stxa %i6, [%g5 + %g3]asi_num ;\
507 499 stxa %i7, [%g5 + %g4]asi_num ;\
508 500 TT_TRACE_L(trace_win) ;\
509 501 saved ;\
510 502 retry ;\
511 503 SKIP(31-25-TT_TRACE_L_INS) ;\
512 504 ba,a,pt %xcc, fault_64bit_/**/tail ;\
513 505 .empty
514 506
515 507 #define SPILL_64bit_tt1(asi_num, tail) \
516 508 ba,a,pt %xcc, fault_64bit_/**/tail ;\
517 509 .empty ;\
518 510 .align 128
519 511
520 512 /*
521 513 * FILL_64bit fills a 64-bit-wide kernel register window. It assumes
522 514 * that the kernel context and the nucleus context are the same. The
523 515 * stack pointer is required to be eight-byte aligned.
524 516 */
525 517 #define FILL_64bit(tail) \
526 518 2: TT_TRACE_L(trace_win) ;\
527 519 ldx [%sp + V9BIAS64 + 0], %l0 ;\
528 520 ldx [%sp + V9BIAS64 + 8], %l1 ;\
529 521 ldx [%sp + V9BIAS64 + 16], %l2 ;\
530 522 ldx [%sp + V9BIAS64 + 24], %l3 ;\
531 523 ldx [%sp + V9BIAS64 + 32], %l4 ;\
532 524 ldx [%sp + V9BIAS64 + 40], %l5 ;\
533 525 ldx [%sp + V9BIAS64 + 48], %l6 ;\
534 526 ldx [%sp + V9BIAS64 + 56], %l7 ;\
535 527 ldx [%sp + V9BIAS64 + 64], %i0 ;\
536 528 ldx [%sp + V9BIAS64 + 72], %i1 ;\
537 529 ldx [%sp + V9BIAS64 + 80], %i2 ;\
538 530 ldx [%sp + V9BIAS64 + 88], %i3 ;\
539 531 ldx [%sp + V9BIAS64 + 96], %i4 ;\
540 532 ldx [%sp + V9BIAS64 + 104], %i5 ;\
541 533 ldx [%sp + V9BIAS64 + 112], %i6 ;\
542 534 ldx [%sp + V9BIAS64 + 120], %i7 ;\
543 535 restored ;\
544 536 retry ;\
545 537 SKIP(31-18-TT_TRACE_L_INS) ;\
546 538 ba,a,pt %xcc, fault_64bit_/**/tail ;\
547 539 .empty
548 540
549 541 /*
550 542 * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit
551 543 * wide address space via the designated asi. It is used to fill
552 544 * non-kernel windows. The stack pointer is required to be eight-byte
553 545 * aligned.
554 546 */
555 547 #define FILL_64bit_asi(asi_num, tail) \
556 548 mov V9BIAS64 + 0, %g1 ;\
557 549 2: TT_TRACE_L(trace_win) ;\
558 550 ldxa [%sp + %g1]asi_num, %l0 ;\
559 551 mov V9BIAS64 + 8, %g2 ;\
560 552 ldxa [%sp + %g2]asi_num, %l1 ;\
561 553 mov V9BIAS64 + 16, %g3 ;\
562 554 ldxa [%sp + %g3]asi_num, %l2 ;\
563 555 mov V9BIAS64 + 24, %g4 ;\
564 556 ldxa [%sp + %g4]asi_num, %l3 ;\
565 557 add %sp, 32, %g5 ;\
566 558 ldxa [%g5 + %g1]asi_num, %l4 ;\
567 559 ldxa [%g5 + %g2]asi_num, %l5 ;\
568 560 ldxa [%g5 + %g3]asi_num, %l6 ;\
569 561 ldxa [%g5 + %g4]asi_num, %l7 ;\
570 562 add %g5, 32, %g5 ;\
571 563 ldxa [%g5 + %g1]asi_num, %i0 ;\
572 564 ldxa [%g5 + %g2]asi_num, %i1 ;\
573 565 ldxa [%g5 + %g3]asi_num, %i2 ;\
574 566 ldxa [%g5 + %g4]asi_num, %i3 ;\
575 567 add %g5, 32, %g5 ;\
576 568 ldxa [%g5 + %g1]asi_num, %i4 ;\
↓ open down ↓ |
289 lines elided |
↑ open up ↑ |
577 569 ldxa [%g5 + %g2]asi_num, %i5 ;\
578 570 ldxa [%g5 + %g3]asi_num, %i6 ;\
579 571 ldxa [%g5 + %g4]asi_num, %i7 ;\
580 572 restored ;\
581 573 retry ;\
582 574 SKIP(31-25-TT_TRACE_L_INS) ;\
583 575 ba,a,pt %xcc, fault_64bit_/**/tail ;\
584 576 .empty
585 577
586 578
587 -#endif /* !lint */
588 -
589 579 /*
590 580 * SPILL_mixed spills either size window, depending on
591 581 * whether %sp is even or odd, to a 32-bit address space.
592 582 * This may only be used in conjunction with SPILL_32bit/
593 583 * FILL_64bit.
594 584 * Clear upper 32 bits of %sp if it is odd.
595 585 * We won't need to clear them in 64 bit kernel.
596 586 */
597 587 #define SPILL_mixed \
598 588 btst 1, %sp ;\
599 589 bz,a,pt %xcc, 1b ;\
600 590 srl %sp, 0, %sp ;\
601 591 ba,pt %xcc, 2b ;\
602 592 nop ;\
603 593 .align 128
604 594
605 595 /*
606 596 * FILL_mixed(ASI) fills either size window, depending on
607 597 * whether %sp is even or odd, from a 32-bit address space.
608 598 * This may only be used in conjunction with FILL_32bit/
609 599 * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be
610 600 * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular
611 601 * attention should be paid to the instructions that belong
612 602 * in the delay slots of the branches depending on the type
613 603 * of fill handler being branched to.
614 604 * Clear upper 32 bits of %sp if it is odd.
615 605 * We won't need to clear them in 64 bit kernel.
616 606 */
617 607 #define FILL_mixed \
618 608 btst 1, %sp ;\
619 609 bz,a,pt %xcc, 1b ;\
620 610 srl %sp, 0, %sp ;\
621 611 ba,pt %xcc, 2b ;\
622 612 nop ;\
623 613 .align 128
624 614
625 615
626 616 /*
627 617 * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows,
628 618 * respectively, into the address space via the designated asi. The
629 619 * unbiased stack pointer is required to be eight-byte aligned (even for
630 620 * the 32-bit case even though this code does not require such strict
631 621 * alignment).
632 622 *
633 623 * With SPARC v9 the spill trap takes precedence over the cleanwin trap
634 624 * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save
635 625 * will cause cwp + 2 to be spilled but will not clean cwp + 1. That
636 626 * window may contain kernel data so in user_rtt we set wstate to call
637 627 * these spill handlers on the first user spill trap. These handler then
638 628 * spill the appropriate window but also back up a window and clean the
639 629 * window that didn't get a cleanwin trap.
640 630 */
641 631 #define SPILL_32clean(asi_num, tail) \
642 632 srl %sp, 0, %sp ;\
643 633 sta %l0, [%sp + %g0]asi_num ;\
644 634 mov 4, %g1 ;\
645 635 sta %l1, [%sp + %g1]asi_num ;\
646 636 mov 8, %g2 ;\
647 637 sta %l2, [%sp + %g2]asi_num ;\
648 638 mov 12, %g3 ;\
649 639 sta %l3, [%sp + %g3]asi_num ;\
650 640 add %sp, 16, %g4 ;\
651 641 sta %l4, [%g4 + %g0]asi_num ;\
652 642 sta %l5, [%g4 + %g1]asi_num ;\
653 643 sta %l6, [%g4 + %g2]asi_num ;\
654 644 sta %l7, [%g4 + %g3]asi_num ;\
655 645 add %g4, 16, %g4 ;\
656 646 sta %i0, [%g4 + %g0]asi_num ;\
657 647 sta %i1, [%g4 + %g1]asi_num ;\
658 648 sta %i2, [%g4 + %g2]asi_num ;\
659 649 sta %i3, [%g4 + %g3]asi_num ;\
660 650 add %g4, 16, %g4 ;\
661 651 sta %i4, [%g4 + %g0]asi_num ;\
662 652 sta %i5, [%g4 + %g1]asi_num ;\
663 653 sta %i6, [%g4 + %g2]asi_num ;\
664 654 sta %i7, [%g4 + %g3]asi_num ;\
665 655 TT_TRACE_L(trace_win) ;\
666 656 b .spill_clean ;\
667 657 mov WSTATE_USER32, %g7 ;\
668 658 SKIP(31-25-TT_TRACE_L_INS) ;\
669 659 ba,a,pt %xcc, fault_32bit_/**/tail ;\
670 660 .empty
671 661
672 662 #define SPILL_64clean(asi_num, tail) \
673 663 mov 0 + V9BIAS64, %g1 ;\
674 664 stxa %l0, [%sp + %g1]asi_num ;\
675 665 mov 8 + V9BIAS64, %g2 ;\
676 666 stxa %l1, [%sp + %g2]asi_num ;\
677 667 mov 16 + V9BIAS64, %g3 ;\
678 668 stxa %l2, [%sp + %g3]asi_num ;\
679 669 mov 24 + V9BIAS64, %g4 ;\
680 670 stxa %l3, [%sp + %g4]asi_num ;\
681 671 add %sp, 32, %g5 ;\
682 672 stxa %l4, [%g5 + %g1]asi_num ;\
683 673 stxa %l5, [%g5 + %g2]asi_num ;\
684 674 stxa %l6, [%g5 + %g3]asi_num ;\
685 675 stxa %l7, [%g5 + %g4]asi_num ;\
686 676 add %g5, 32, %g5 ;\
687 677 stxa %i0, [%g5 + %g1]asi_num ;\
688 678 stxa %i1, [%g5 + %g2]asi_num ;\
689 679 stxa %i2, [%g5 + %g3]asi_num ;\
690 680 stxa %i3, [%g5 + %g4]asi_num ;\
691 681 add %g5, 32, %g5 ;\
692 682 stxa %i4, [%g5 + %g1]asi_num ;\
693 683 stxa %i5, [%g5 + %g2]asi_num ;\
694 684 stxa %i6, [%g5 + %g3]asi_num ;\
695 685 stxa %i7, [%g5 + %g4]asi_num ;\
696 686 TT_TRACE_L(trace_win) ;\
697 687 b .spill_clean ;\
698 688 mov WSTATE_USER64, %g7 ;\
699 689 SKIP(31-25-TT_TRACE_L_INS) ;\
700 690 ba,a,pt %xcc, fault_64bit_/**/tail ;\
701 691 .empty
702 692
703 693
704 694 /*
705 695 * Floating point disabled.
706 696 */
707 697 #define FP_DISABLED_TRAP \
708 698 TT_TRACE(trace_gen) ;\
709 699 ba,pt %xcc,.fp_disabled ;\
710 700 nop ;\
711 701 .align 32
712 702
713 703 /*
714 704 * Floating point exceptions.
715 705 */
716 706 #define FP_IEEE_TRAP \
717 707 TT_TRACE(trace_gen) ;\
↓ open down ↓ |
119 lines elided |
↑ open up ↑ |
718 708 ba,pt %xcc,.fp_ieee_exception ;\
719 709 nop ;\
720 710 .align 32
721 711
722 712 #define FP_TRAP \
723 713 TT_TRACE(trace_gen) ;\
724 714 ba,pt %xcc,.fp_exception ;\
725 715 nop ;\
726 716 .align 32
727 717
728 -#if !defined(lint)
729 -
730 718 /*
731 719 * ECACHE_ECC error traps at level 0 and level 1
732 720 */
733 721 #define ECACHE_ECC(table_name) \
734 722 .global table_name ;\
735 723 table_name: ;\
736 724 membar #Sync ;\
737 725 set trap, %g1 ;\
738 726 rdpr %tt, %g3 ;\
739 727 ba,pt %xcc, sys_trap ;\
740 728 sub %g0, 1, %g4 ;\
741 729 .align 32
742 730
743 -#endif /* !lint */
744 -
745 731 /*
746 732 * illegal instruction trap
747 733 */
748 734 #define ILLTRAP_INSTR \
749 735 membar #Sync ;\
750 736 TT_TRACE(trace_gen) ;\
751 737 or %g0, P_UTRAP4, %g2 ;\
752 738 or %g0, T_UNIMP_INSTR, %g3 ;\
753 739 sethi %hi(.check_v9utrap), %g4 ;\
754 740 jmp %g4 + %lo(.check_v9utrap) ;\
755 741 nop ;\
756 742 .align 32
757 743
758 744 /*
759 745 * tag overflow trap
760 746 */
761 747 #define TAG_OVERFLOW \
762 748 TT_TRACE(trace_gen) ;\
763 749 or %g0, P_UTRAP10, %g2 ;\
764 750 or %g0, T_TAG_OVERFLOW, %g3 ;\
765 751 sethi %hi(.check_v9utrap), %g4 ;\
766 752 jmp %g4 + %lo(.check_v9utrap) ;\
767 753 nop ;\
768 754 .align 32
769 755
770 756 /*
771 757 * divide by zero trap
772 758 */
773 759 #define DIV_BY_ZERO \
774 760 TT_TRACE(trace_gen) ;\
775 761 or %g0, P_UTRAP11, %g2 ;\
776 762 or %g0, T_IDIV0, %g3 ;\
777 763 sethi %hi(.check_v9utrap), %g4 ;\
778 764 jmp %g4 + %lo(.check_v9utrap) ;\
779 765 nop ;\
780 766 .align 32
781 767
782 768 /*
783 769 * trap instruction for V9 user trap handlers
784 770 */
785 771 #define TRAP_INSTR \
786 772 TT_TRACE(trace_gen) ;\
787 773 or %g0, T_SOFTWARE_TRAP, %g3 ;\
788 774 sethi %hi(.check_v9utrap), %g4 ;\
789 775 jmp %g4 + %lo(.check_v9utrap) ;\
790 776 nop ;\
791 777 .align 32
792 778 #define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR
793 779
794 780 /*
795 781 * LEVEL_INTERRUPT is for level N interrupts.
796 782 * VECTOR_INTERRUPT is for the vector trap.
797 783 */
798 784 #define LEVEL_INTERRUPT(level) \
799 785 .global tt_pil/**/level ;\
800 786 tt_pil/**/level: ;\
801 787 ba,pt %xcc, pil_interrupt ;\
802 788 mov level, %g4 ;\
803 789 .align 32
804 790
805 791 #define LEVEL14_INTERRUPT \
806 792 ba pil14_interrupt ;\
807 793 mov PIL_14, %g4 ;\
808 794 .align 32
809 795
810 796 #define LEVEL15_INTERRUPT \
811 797 ba pil15_interrupt ;\
812 798 mov PIL_15, %g4 ;\
813 799 .align 32
814 800
815 801 #define CPU_MONDO \
816 802 ba,a,pt %xcc, cpu_mondo ;\
817 803 .align 32
818 804
819 805 #define DEV_MONDO \
820 806 ba,a,pt %xcc, dev_mondo ;\
821 807 .align 32
822 808
823 809 /*
824 810 * We take over the rtba after we set our trap table and
825 811 * fault status area. The watchdog reset trap is now handled by the OS.
826 812 */
827 813 #define WATCHDOG_RESET \
828 814 mov PTL1_BAD_WATCHDOG, %g1 ;\
829 815 ba,a,pt %xcc, .watchdog_trap ;\
830 816 .align 32
831 817
832 818 /*
833 819 * RED is for traps that use the red mode handler.
834 820 * We should never see these either.
835 821 */
836 822 #define RED \
837 823 mov PTL1_BAD_RED, %g1 ;\
838 824 ba,a,pt %xcc, .watchdog_trap ;\
839 825 .align 32
840 826
841 827
842 828 /*
843 829 * MMU Trap Handlers.
844 830 */
845 831
846 832 /*
847 833 * synthesize for trap(): SFSR in %g3
848 834 */
849 835 #define IMMU_EXCEPTION \
850 836 MMU_FAULT_STATUS_AREA(%g3) ;\
851 837 rdpr %tpc, %g2 ;\
852 838 ldx [%g3 + MMFSA_I_TYPE], %g1 ;\
853 839 ldx [%g3 + MMFSA_I_CTX], %g3 ;\
854 840 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\
855 841 or %g3, %g1, %g3 ;\
856 842 ba,pt %xcc, .mmu_exception_end ;\
857 843 mov T_INSTR_EXCEPTION, %g1 ;\
858 844 .align 32
859 845
860 846 /*
861 847 * synthesize for trap(): TAG_ACCESS in %g2, SFSR in %g3
862 848 */
863 849 #define DMMU_EXCEPTION \
864 850 ba,a,pt %xcc, .dmmu_exception ;\
865 851 .align 32
866 852
867 853 /*
868 854 * synthesize for trap(): SFAR in %g2, SFSR in %g3
869 855 */
870 856 #define DMMU_EXC_AG_PRIV \
871 857 MMU_FAULT_STATUS_AREA(%g3) ;\
872 858 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\
873 859 /* Fault type not available in MMU fault status area */ ;\
874 860 mov MMFSA_F_PRVACT, %g1 ;\
875 861 ldx [%g3 + MMFSA_D_CTX], %g3 ;\
876 862 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\
877 863 ba,pt %xcc, .mmu_priv_exception ;\
878 864 or %g3, %g1, %g3 ;\
879 865 .align 32
880 866
881 867 /*
882 868 * synthesize for trap(): SFAR in %g2, SFSR in %g3
883 869 */
884 870 #define DMMU_EXC_AG_NOT_ALIGNED \
885 871 MMU_FAULT_STATUS_AREA(%g3) ;\
886 872 ldx [%g3 + MMFSA_D_ADDR], %g2 ;\
887 873 /* Fault type not available in MMU fault status area */ ;\
888 874 mov MMFSA_F_UNALIGN, %g1 ;\
889 875 ldx [%g3 + MMFSA_D_CTX], %g3 ;\
890 876 sllx %g3, SFSR_CTX_SHIFT, %g3 ;\
891 877 ba,pt %xcc, .mmu_exception_not_aligned ;\
892 878 or %g3, %g1, %g3 /* SFSR */ ;\
893 879 .align 32
894 880 /*
895 881 * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2)
896 882 */
897 883
898 884 /*
899 885 * synthesize for trap(): SFAR in %g2, SFSR in %g3
900 886 */
901 887 #define DMMU_EXC_LDDF_NOT_ALIGNED \
902 888 ba,a,pt %xcc, .dmmu_exc_lddf_not_aligned ;\
903 889 .align 32
904 890 /*
905 891 * synthesize for trap(): SFAR in %g2, SFSR in %g3
906 892 */
907 893 #define DMMU_EXC_STDF_NOT_ALIGNED \
908 894 ba,a,pt %xcc, .dmmu_exc_stdf_not_aligned ;\
909 895 .align 32
910 896
911 897 #if defined(cscope)
912 898 /*
913 899 * Define labels to direct cscope quickly to labels that
914 900 * are generated by macro expansion of DTLB_MISS().
915 901 */
916 902 .global tt0_dtlbmiss
917 903 tt0_dtlbmiss:
918 904 .global tt1_dtlbmiss
919 905 tt1_dtlbmiss:
920 906 nop
921 907 #endif
922 908
923 909 /*
924 910 * Data miss handler (must be exactly 32 instructions)
925 911 *
926 912 * This handler is invoked only if the hypervisor has been instructed
927 913 * not to do any TSB walk.
928 914 *
929 915 * Kernel and invalid context cases are handled by the sfmmu_kdtlb_miss
930 916 * handler.
931 917 *
932 918 * User TLB miss handling depends upon whether a user process has one or
933 919 * two TSBs. User TSB information (physical base and size code) is kept
934 920 * in two dedicated scratchpad registers. Absence of a user TSB (primarily
935 921 * second TSB) is indicated by a negative value (-1) in that register.
936 922 */
937 923
938 924 /*
939 925 * synthesize for miss handler: pseudo-tag access in %g2 (with context "type"
940 926 * (0=kernel, 1=invalid, or 2=user) rather than context ID)
941 927 */
942 928 #define DTLB_MISS(table_name) ;\
943 929 .global table_name/**/_dtlbmiss ;\
944 930 table_name/**/_dtlbmiss: ;\
945 931 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\
946 932 cmp %g3, INVALID_CONTEXT ;\
947 933 ble,pn %xcc, sfmmu_kdtlb_miss ;\
948 934 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
949 935 mov SCRATCHPAD_UTSBREG2, %g1 ;\
950 936 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\
951 937 brgez,pn %g1, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\
952 938 nop ;\
953 939 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\
954 940 ba,pt %xcc, sfmmu_udtlb_fastpath /* no 4M TSB, miss */ ;\
955 941 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
956 942 .align 128
957 943
958 944
959 945 #if defined(cscope)
960 946 /*
961 947 * Define labels to direct cscope quickly to labels that
962 948 * are generated by macro expansion of ITLB_MISS().
963 949 */
964 950 .global tt0_itlbmiss
965 951 tt0_itlbmiss:
966 952 .global tt1_itlbmiss
967 953 tt1_itlbmiss:
968 954 nop
969 955 #endif
970 956
971 957 /*
972 958 * Instruction miss handler.
973 959 *
974 960 * This handler is invoked only if the hypervisor has been instructed
975 961 * not to do any TSB walk.
976 962 *
977 963 * ldda instructions will have their ASI patched
978 964 * by sfmmu_patch_ktsb at runtime.
979 965 * MUST be EXACTLY 32 instructions or we'll break.
980 966 */
981 967
982 968 /*
983 969 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type"
984 970 * (0=kernel, 1=invalid, or 2=user) rather than context ID)
985 971 */
986 972 #define ITLB_MISS(table_name) \
987 973 .global table_name/**/_itlbmiss ;\
988 974 table_name/**/_itlbmiss: ;\
989 975 GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\
990 976 cmp %g3, INVALID_CONTEXT ;\
991 977 ble,pn %xcc, sfmmu_kitlb_miss ;\
992 978 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
993 979 mov SCRATCHPAD_UTSBREG2, %g1 ;\
994 980 ldxa [%g1]ASI_SCRATCHPAD, %g1 /* get 2nd tsbreg */ ;\
995 981 brgez,pn %g1, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\
996 982 nop ;\
997 983 GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5) /* 11 instr */ ;\
998 984 ba,pt %xcc, sfmmu_uitlb_fastpath /* no 4M TSB, miss */ ;\
999 985 srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
1000 986 .align 128
1001 987
1002 988 #define DTSB_MISS \
1003 989 GOTO_TT(sfmmu_slow_dmmu_miss,trace_dmmu)
1004 990
1005 991 #define ITSB_MISS \
1006 992 GOTO_TT(sfmmu_slow_immu_miss,trace_immu)
1007 993
1008 994 /*
1009 995 * This macro is the first level handler for fast protection faults.
1010 996 * It first demaps the tlb entry which generated the fault and then
1011 997 * attempts to set the modify bit on the hash. It needs to be
1012 998 * exactly 32 instructions.
1013 999 */
1014 1000 /*
1015 1001 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type"
1016 1002 * (0=kernel, 1=invalid, or 2=user) rather than context ID)
1017 1003 */
1018 1004 #define DTLB_PROT \
1019 1005 GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3) /* 8 instr */ ;\
1020 1006 /* ;\
1021 1007 * g2 = pseudo-tag access register (ctx type rather than ctx ID) ;\
1022 1008 * g3 = ctx type (0, 1, or 2) ;\
1023 1009 */ ;\
1024 1010 TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\
1025 1011 /* clobbers g1 and g6 XXXQ? */ ;\
1026 1012 brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\
1027 1013 nop ;\
1028 1014 ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\
1029 1015 .align 128
1030 1016
1031 1017 #define DMMU_EXCEPTION_TL1 ;\
1032 1018 ba,a,pt %xcc, mmu_trap_tl1 ;\
1033 1019 .align 32
1034 1020
1035 1021 #define MISALIGN_ADDR_TL1 ;\
1036 1022 ba,a,pt %xcc, mmu_trap_tl1 ;\
1037 1023 .align 32
1038 1024
1039 1025 /*
1040 1026 * Trace a tsb hit
1041 1027 * g1 = tsbe pointer (in/clobbered)
1042 1028 * g2 = tag access register (in)
1043 1029 * g3 - g4 = scratch (clobbered)
1044 1030 * g5 = tsbe data (in)
1045 1031 * g6 = scratch (clobbered)
1046 1032 * g7 = pc we jumped here from (in)
1047 1033 * ttextra = value to OR in to trap type (%tt) (in)
1048 1034 */
1049 1035 #ifdef TRAPTRACE
1050 1036 #define TRACE_TSBHIT(ttextra) \
1051 1037 membar #Sync ;\
1052 1038 sethi %hi(FLUSH_ADDR), %g6 ;\
1053 1039 flush %g6 ;\
1054 1040 TRACE_PTR(%g3, %g6) ;\
1055 1041 GET_TRACE_TICK(%g6, %g4) ;\
1056 1042 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\
1057 1043 stna %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\
1058 1044 stna %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\
1059 1045 rdpr %tnpc, %g6 ;\
1060 1046 stna %g6, [%g3 + TRAP_ENT_F2]%asi ;\
1061 1047 stna %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\
1062 1048 stna %g0, [%g3 + TRAP_ENT_F4]%asi ;\
1063 1049 rdpr %tpc, %g6 ;\
1064 1050 stna %g6, [%g3 + TRAP_ENT_TPC]%asi ;\
1065 1051 TRACE_SAVE_TL_GL_REGS(%g3, %g6) ;\
1066 1052 rdpr %tt, %g6 ;\
1067 1053 or %g6, (ttextra), %g1 ;\
1068 1054 stha %g1, [%g3 + TRAP_ENT_TT]%asi ;\
1069 1055 MMU_FAULT_STATUS_AREA(%g4) ;\
1070 1056 mov MMFSA_D_ADDR, %g1 ;\
1071 1057 cmp %g6, FAST_IMMU_MISS_TT ;\
1072 1058 move %xcc, MMFSA_I_ADDR, %g1 ;\
1073 1059 cmp %g6, T_INSTR_MMU_MISS ;\
1074 1060 move %xcc, MMFSA_I_ADDR, %g1 ;\
1075 1061 ldx [%g4 + %g1], %g1 ;\
1076 1062 stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* fault addr */ ;\
1077 1063 mov MMFSA_D_CTX, %g1 ;\
1078 1064 cmp %g6, FAST_IMMU_MISS_TT ;\
1079 1065 move %xcc, MMFSA_I_CTX, %g1 ;\
↓ open down ↓ |
325 lines elided |
↑ open up ↑ |
1080 1066 cmp %g6, T_INSTR_MMU_MISS ;\
1081 1067 move %xcc, MMFSA_I_CTX, %g1 ;\
1082 1068 ldx [%g4 + %g1], %g1 ;\
1083 1069 stna %g1, [%g3 + TRAP_ENT_TR]%asi ;\
1084 1070 TRACE_NEXT(%g3, %g4, %g6)
1085 1071 #else
1086 1072 #define TRACE_TSBHIT(ttextra)
1087 1073 #endif
1088 1074
1089 1075
1090 -#if defined(lint)
1091 -
1092 -struct scb trap_table;
1093 -struct scb scb; /* trap_table/scb are the same object */
1094 -
1095 -#else /* lint */
1096 -
1097 1076 /*
1098 1077 * =======================================================================
1099 1078 * SPARC V9 TRAP TABLE
1100 1079 *
1101 1080 * The trap table is divided into two halves: the first half is used when
1102 1081 * taking traps when TL=0; the second half is used when taking traps from
1103 1082 * TL>0. Note that handlers in the second half of the table might not be able
1104 1083 * to make the same assumptions as handlers in the first half of the table.
1105 1084 *
1106 1085 * Worst case trap nesting so far:
1107 1086 *
1108 1087 * at TL=0 client issues software trap requesting service
1109 1088 * at TL=1 nucleus wants a register window
1110 1089 * at TL=2 register window clean/spill/fill takes a TLB miss
1111 1090 * at TL=3 processing TLB miss
1112 1091 * at TL=4 handle asynchronous error
1113 1092 *
1114 1093 * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode".
1115 1094 *
1116 1095 * =======================================================================
1117 1096 */
1118 1097 .section ".text"
1119 1098 .align 4
1120 1099 .global trap_table, scb, trap_table0, trap_table1, etrap_table
1121 1100 .type trap_table, #object
1122 1101 .type trap_table0, #object
1123 1102 .type trap_table1, #object
1124 1103 .type scb, #object
1125 1104 trap_table:
1126 1105 scb:
1127 1106 trap_table0:
1128 1107 /* hardware traps */
1129 1108 NOT; /* 000 reserved */
1130 1109 RED; /* 001 power on reset */
1131 1110 WATCHDOG_RESET; /* 002 watchdog reset */
1132 1111 RED; /* 003 externally initiated reset */
1133 1112 RED; /* 004 software initiated reset */
1134 1113 RED; /* 005 red mode exception */
1135 1114 NOT; NOT; /* 006 - 007 reserved */
1136 1115 IMMU_EXCEPTION; /* 008 instruction access exception */
1137 1116 ITSB_MISS; /* 009 instruction access MMU miss */
1138 1117 NOT; /* 00A reserved */
1139 1118 NOT; NOT4; /* 00B - 00F reserved */
1140 1119 ILLTRAP_INSTR; /* 010 illegal instruction */
1141 1120 TRAP(T_PRIV_INSTR); /* 011 privileged opcode */
1142 1121 TRAP(T_UNIMP_LDD); /* 012 unimplemented LDD */
1143 1122 TRAP(T_UNIMP_STD); /* 013 unimplemented STD */
1144 1123 NOT4; NOT4; NOT4; /* 014 - 01F reserved */
1145 1124 FP_DISABLED_TRAP; /* 020 fp disabled */
1146 1125 FP_IEEE_TRAP; /* 021 fp exception ieee 754 */
1147 1126 FP_TRAP; /* 022 fp exception other */
1148 1127 TAG_OVERFLOW; /* 023 tag overflow */
1149 1128 CLEAN_WINDOW; /* 024 - 027 clean window */
1150 1129 DIV_BY_ZERO; /* 028 division by zero */
1151 1130 NOT; /* 029 internal processor error */
1152 1131 NOT; NOT; NOT4; /* 02A - 02F reserved */
1153 1132 DMMU_EXCEPTION; /* 030 data access exception */
1154 1133 DTSB_MISS; /* 031 data access MMU miss */
1155 1134 NOT; /* 032 reserved */
1156 1135 NOT; /* 033 data access protection */
1157 1136 DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */
1158 1137 DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */
1159 1138 DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */
1160 1139 DMMU_EXC_AG_PRIV; /* 037 privileged action */
1161 1140 NOT; /* 038 LDQF mem address not aligned */
1162 1141 NOT; /* 039 STQF mem address not aligned */
1163 1142 NOT; NOT; NOT4; /* 03A - 03F reserved */
1164 1143 NOT; /* 040 async data error */
1165 1144 LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */
1166 1145 LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */
1167 1146 LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */
1168 1147 LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */
1169 1148 LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */
1170 1149 LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */
1171 1150 LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */
1172 1151 LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */
1173 1152 LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */
1174 1153 LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */
1175 1154 LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */
1176 1155 LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */
1177 1156 LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */
1178 1157 LEVEL14_INTERRUPT; /* 04E interrupt level 14 */
1179 1158 LEVEL15_INTERRUPT; /* 04F interrupt level 15 */
1180 1159 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */
1181 1160 NOT; /* 060 interrupt vector */
1182 1161 GOTO(kmdb_trap); /* 061 PA watchpoint */
1183 1162 GOTO(kmdb_trap); /* 062 VA watchpoint */
1184 1163 NOT; /* 063 reserved */
1185 1164 ITLB_MISS(tt0); /* 064 instruction access MMU miss */
1186 1165 DTLB_MISS(tt0); /* 068 data access MMU miss */
1187 1166 DTLB_PROT; /* 06C data access protection */
1188 1167 NOT; /* 070 reserved */
1189 1168 NOT; /* 071 reserved */
1190 1169 NOT; /* 072 reserved */
1191 1170 NOT; /* 073 reserved */
1192 1171 NOT4; NOT4 /* 074 - 07B reserved */
1193 1172 CPU_MONDO; /* 07C cpu_mondo */
1194 1173 DEV_MONDO; /* 07D dev_mondo */
1195 1174 GOTO_TT(resumable_error, trace_gen); /* 07E resumable error */
1196 1175 GOTO_TT(nonresumable_error, trace_gen); /* 07F non-reasumable error */
1197 1176 NOT4; /* 080 spill 0 normal */
1198 1177 SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */
1199 1178 SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */
1200 1179 SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */
1201 1180 SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */
1202 1181 SPILL_32bit(not); /* 094 spill 5 normal */
1203 1182 SPILL_64bit(not); /* 098 spill 6 normal */
1204 1183 SPILL_mixed; /* 09C spill 7 normal */
1205 1184 NOT4; /* 0A0 spill 0 other */
1206 1185 SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */
1207 1186 SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */
1208 1187 SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */
1209 1188 SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */
1210 1189 NOT4; /* 0B4 spill 5 other */
1211 1190 NOT4; /* 0B8 spill 6 other */
1212 1191 NOT4; /* 0BC spill 7 other */
1213 1192 NOT4; /* 0C0 fill 0 normal */
1214 1193 FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */
1215 1194 FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */
1216 1195 FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */
1217 1196 FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */
1218 1197 FILL_32bit(not); /* 0D4 fill 5 normal */
1219 1198 FILL_64bit(not); /* 0D8 fill 6 normal */
1220 1199 FILL_mixed; /* 0DC fill 7 normal */
1221 1200 NOT4; /* 0E0 fill 0 other */
1222 1201 NOT4; /* 0E4 fill 1 other */
1223 1202 NOT4; /* 0E8 fill 2 other */
1224 1203 NOT4; /* 0EC fill 3 other */
1225 1204 NOT4; /* 0F0 fill 4 other */
1226 1205 NOT4; /* 0F4 fill 5 other */
1227 1206 NOT4; /* 0F8 fill 6 other */
1228 1207 NOT4; /* 0FC fill 7 other */
1229 1208 /* user traps */
1230 1209 GOTO(syscall_trap_4x); /* 100 old system call */
1231 1210 TRAP(T_BREAKPOINT); /* 101 user breakpoint */
1232 1211 TRAP(T_DIV0); /* 102 user divide by zero */
1233 1212 GOTO(.flushw); /* 103 flush windows */
1234 1213 GOTO(.clean_windows); /* 104 clean windows */
1235 1214 BAD; /* 105 range check ?? */
1236 1215 GOTO(.fix_alignment); /* 106 do unaligned references */
1237 1216 BAD; /* 107 unused */
1238 1217 SYSCALL_TRAP32; /* 108 ILP32 system call on LP64 */
1239 1218 GOTO(set_trap0_addr); /* 109 set trap0 address */
1240 1219 BAD; BAD; BAD4; /* 10A - 10F unused */
1241 1220 TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */
1242 1221 GOTO(.getcc); /* 120 get condition codes */
1243 1222 GOTO(.setcc); /* 121 set condition codes */
1244 1223 GOTO(.getpsr); /* 122 get psr */
1245 1224 GOTO(.setpsr); /* 123 set psr (some fields) */
1246 1225 GOTO(get_timestamp); /* 124 get timestamp */
1247 1226 GOTO(get_virtime); /* 125 get lwp virtual time */
1248 1227 PRIV(self_xcall); /* 126 self xcall */
1249 1228 GOTO(get_hrestime); /* 127 get hrestime */
1250 1229 BAD; /* 128 ST_SETV9STACK */
1251 1230 GOTO(.getlgrp); /* 129 get lgrpid */
1252 1231 BAD; BAD; BAD4; /* 12A - 12F unused */
1253 1232 BAD4; BAD4; /* 130 - 137 unused */
1254 1233 DTRACE_PID; /* 138 dtrace pid tracing provider */
1255 1234 BAD; /* 139 unused */
1256 1235 DTRACE_RETURN; /* 13A dtrace pid return probe */
1257 1236 BAD; BAD4; /* 13B - 13F unused */
1258 1237 SYSCALL_TRAP; /* 140 LP64 system call */
1259 1238 SYSCALL(nosys); /* 141 unused system call trap */
1260 1239 #ifdef DEBUG_USER_TRAPTRACECTL
1261 1240 GOTO(.traptrace_freeze); /* 142 freeze traptrace */
1262 1241 GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */
1263 1242 #else
1264 1243 SYSCALL(nosys); /* 142 unused system call trap */
1265 1244 SYSCALL(nosys); /* 143 unused system call trap */
1266 1245 #endif
1267 1246 BAD4; BAD4; BAD4; /* 144 - 14F unused */
1268 1247 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */
1269 1248 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */
1270 1249 BAD; /* 170 - unused */
1271 1250 BAD; /* 171 - unused */
1272 1251 BAD; BAD; /* 172 - 173 unused */
1273 1252 BAD4; BAD4; /* 174 - 17B unused */
1274 1253 #ifdef PTL1_PANIC_DEBUG
1275 1254 mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic);
1276 1255 /* 17C test ptl1_panic */
1277 1256 #else
1278 1257 BAD; /* 17C unused */
1279 1258 #endif /* PTL1_PANIC_DEBUG */
1280 1259 PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */
1281 1260 PRIV(kmdb_trap); /* 17E kmdb breakpoint */
1282 1261 PRIV(obp_bpt); /* 17F obp breakpoint */
1283 1262 /* reserved */
1284 1263 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */
1285 1264 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */
1286 1265 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */
1287 1266 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */
1288 1267 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */
1289 1268 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */
1290 1269 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */
1291 1270 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */
1292 1271 .size trap_table0, (.-trap_table0)
1293 1272 trap_table1:
1294 1273 NOT4; NOT4; /* 000 - 007 unused */
1295 1274 NOT; /* 008 instruction access exception */
1296 1275 ITSB_MISS; /* 009 instruction access MMU miss */
1297 1276 NOT; /* 00A reserved */
1298 1277 NOT; NOT4; /* 00B - 00F unused */
1299 1278 NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */
1300 1279 NOT4; /* 020 - 023 unused */
1301 1280 CLEAN_WINDOW; /* 024 - 027 clean window */
1302 1281 NOT4; NOT4; /* 028 - 02F unused */
1303 1282 DMMU_EXCEPTION_TL1; /* 030 data access exception */
1304 1283 DTSB_MISS; /* 031 data access MMU miss */
1305 1284 NOT; /* 032 reserved */
1306 1285 NOT; /* 033 unused */
1307 1286 MISALIGN_ADDR_TL1; /* 034 mem address not aligned */
1308 1287 NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */
1309 1288 NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */
1310 1289 NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */
1311 1290 NOT; /* 060 unused */
1312 1291 GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */
1313 1292 GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */
1314 1293 NOT; /* 063 reserved */
1315 1294 ITLB_MISS(tt1); /* 064 instruction access MMU miss */
1316 1295 DTLB_MISS(tt1); /* 068 data access MMU miss */
1317 1296 DTLB_PROT; /* 06C data access protection */
1318 1297 NOT; /* 070 reserved */
1319 1298 NOT; /* 071 reserved */
1320 1299 NOT; /* 072 reserved */
1321 1300 NOT; /* 073 reserved */
1322 1301 NOT4; NOT4; /* 074 - 07B reserved */
1323 1302 NOT; /* 07C reserved */
1324 1303 NOT; /* 07D reserved */
1325 1304 NOT; /* 07E resumable error */
1326 1305 GOTO_TT(nonresumable_error, trace_gen); /* 07F nonresumable error */
1327 1306 NOTP4; /* 080 spill 0 normal */
1328 1307 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */
1329 1308 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */
1330 1309 SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */
1331 1310 SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */
1332 1311 NOTP4; /* 094 spill 5 normal */
1333 1312 SPILL_64bit_ktt1(sk); /* 098 spill 6 normal */
1334 1313 SPILL_mixed_ktt1(sk); /* 09C spill 7 normal */
1335 1314 NOTP4; /* 0A0 spill 0 other */
1336 1315 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */
1337 1316 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */
1338 1317 SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */
1339 1318 SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */
1340 1319 NOTP4; /* 0B4 spill 5 other */
1341 1320 NOTP4; /* 0B8 spill 6 other */
1342 1321 NOTP4; /* 0BC spill 7 other */
1343 1322 NOT4; /* 0C0 fill 0 normal */
1344 1323 NOT4; /* 0C4 fill 1 normal */
1345 1324 NOT4; /* 0C8 fill 2 normal */
1346 1325 NOT4; /* 0CC fill 3 normal */
1347 1326 NOT4; /* 0D0 fill 4 normal */
1348 1327 NOT4; /* 0D4 fill 5 normal */
1349 1328 NOT4; /* 0D8 fill 6 normal */
1350 1329 NOT4; /* 0DC fill 7 normal */
1351 1330 NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */
1352 1331 NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */
1353 1332 /*
1354 1333 * Code running at TL>0 does not use soft traps, so
1355 1334 * we can truncate the table here.
1356 1335 * However:
1357 1336 * sun4v uses (hypervisor) ta instructions at TL > 0, so
1358 1337 * provide a safety net for now.
1359 1338 */
1360 1339 /* soft traps */
1361 1340 BAD4; BAD4; BAD4; BAD4; /* 100 - 10F unused */
1362 1341 BAD4; BAD4; BAD4; BAD4; /* 110 - 11F unused */
1363 1342 BAD4; BAD4; BAD4; BAD4; /* 120 - 12F unused */
1364 1343 BAD4; BAD4; BAD4; BAD4; /* 130 - 13F unused */
1365 1344 BAD4; BAD4; BAD4; BAD4; /* 140 - 14F unused */
1366 1345 BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */
1367 1346 BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */
1368 1347 BAD4; BAD4; BAD4; BAD4; /* 170 - 17F unused */
1369 1348 /* reserved */
1370 1349 NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */
1371 1350 NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */
1372 1351 NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */
1373 1352 NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */
1374 1353 NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */
1375 1354 NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */
1376 1355 NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */
1377 1356 NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */
1378 1357 etrap_table:
1379 1358 .size trap_table1, (.-trap_table1)
1380 1359 .size trap_table, (.-trap_table)
1381 1360 .size scb, (.-scb)
1382 1361
1383 1362 /*
1384 1363 * We get to exec_fault in the case of an instruction miss and tte
1385 1364 * has no execute bit set. We go to tl0 to handle it.
1386 1365 *
1387 1366 * g1 = tsbe pointer (in/clobbered)
1388 1367 * g2 = tag access register (in)
1389 1368 * g3 - g4 = scratch (clobbered)
1390 1369 * g5 = tsbe data (in)
1391 1370 * g6 = scratch (clobbered)
1392 1371 * g7 = pc we jumped here from (in)
1393 1372 */
1394 1373 /*
1395 1374 * synthesize for miss handler: TAG_ACCESS in %g2 (with context "type"
1396 1375 * (0=kernel, 1=invalid, or 2=user) rather than context ID)
1397 1376 */
1398 1377 ALTENTRY(exec_fault)
1399 1378 TRACE_TSBHIT(TT_MMU_EXEC)
1400 1379 MMU_FAULT_STATUS_AREA(%g4)
1401 1380 ldx [%g4 + MMFSA_I_ADDR], %g2 /* g2 = address */
1402 1381 ldx [%g4 + MMFSA_I_CTX], %g3 /* g3 = ctx */
1403 1382 srlx %g2, MMU_PAGESHIFT, %g2 ! align address to page boundry
1404 1383 cmp %g3, USER_CONTEXT_TYPE
1405 1384 sllx %g2, MMU_PAGESHIFT, %g2
1406 1385 movgu %icc, USER_CONTEXT_TYPE, %g3
1407 1386 or %g2, %g3, %g2 /* TAG_ACCESS */
1408 1387 mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype
1409 1388 set trap, %g1
1410 1389 ba,pt %xcc, sys_trap
1411 1390 mov -1, %g4
1412 1391
1413 1392 .mmu_exception_not_aligned:
1414 1393 /* %g2 = sfar, %g3 = sfsr */
1415 1394 rdpr %tstate, %g1
1416 1395 btst TSTATE_PRIV, %g1
1417 1396 bnz,pn %icc, 2f
1418 1397 nop
1419 1398 CPU_ADDR(%g1, %g4) ! load CPU struct addr
1420 1399 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
1421 1400 ldn [%g1 + T_PROCP], %g1 ! load proc pointer
1422 1401 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
1423 1402 brz,pt %g5, 2f
1424 1403 nop
1425 1404 ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap?
1426 1405 brz,pn %g5, 2f
1427 1406 nop
1428 1407 btst 1, %sp
1429 1408 bz,pt %xcc, 1f ! 32 bit user program
1430 1409 nop
1431 1410 ba,pt %xcc, .setup_v9utrap ! 64 bit user program
1432 1411 nop
1433 1412 1:
1434 1413 ba,pt %xcc, .setup_utrap
1435 1414 or %g2, %g0, %g7
1436 1415 2:
1437 1416 ba,pt %xcc, .mmu_exception_end
1438 1417 mov T_ALIGNMENT, %g1
1439 1418
1440 1419 .mmu_priv_exception:
1441 1420 rdpr %tstate, %g1
1442 1421 btst TSTATE_PRIV, %g1
1443 1422 bnz,pn %icc, 1f
1444 1423 nop
1445 1424 CPU_ADDR(%g1, %g4) ! load CPU struct addr
1446 1425 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
1447 1426 ldn [%g1 + T_PROCP], %g1 ! load proc pointer
1448 1427 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
1449 1428 brz,pt %g5, 1f
1450 1429 nop
1451 1430 ldn [%g5 + P_UTRAP16], %g5
1452 1431 brnz,pt %g5, .setup_v9utrap
1453 1432 nop
1454 1433 1:
1455 1434 mov T_PRIV_INSTR, %g1
1456 1435
1457 1436 .mmu_exception_end:
1458 1437 CPU_INDEX(%g4, %g5)
1459 1438 set cpu_core, %g5
1460 1439 sllx %g4, CPU_CORE_SHIFT, %g4
1461 1440 add %g4, %g5, %g4
1462 1441 lduh [%g4 + CPUC_DTRACE_FLAGS], %g5
1463 1442 andcc %g5, CPU_DTRACE_NOFAULT, %g0
1464 1443 bz 1f
1465 1444 or %g5, CPU_DTRACE_BADADDR, %g5
1466 1445 stuh %g5, [%g4 + CPUC_DTRACE_FLAGS]
1467 1446 done
1468 1447
1469 1448 1:
1470 1449 sllx %g3, 32, %g3
1471 1450 or %g3, %g1, %g3
1472 1451 set trap, %g1
1473 1452 ba,pt %xcc, sys_trap
1474 1453 sub %g0, 1, %g4
1475 1454
1476 1455 .fp_disabled:
1477 1456 CPU_ADDR(%g1, %g4) ! load CPU struct addr
1478 1457 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
1479 1458 rdpr %tstate, %g4
1480 1459 btst TSTATE_PRIV, %g4
1481 1460 bnz,a,pn %icc, ptl1_panic
1482 1461 mov PTL1_BAD_FPTRAP, %g1
1483 1462
1484 1463 ldn [%g1 + T_PROCP], %g1 ! load proc pointer
1485 1464 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
1486 1465 brz,a,pt %g5, 2f
1487 1466 nop
1488 1467 ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap?
1489 1468 brz,a,pn %g5, 2f
1490 1469 nop
1491 1470 btst 1, %sp
1492 1471 bz,a,pt %xcc, 1f ! 32 bit user program
1493 1472 nop
1494 1473 ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program
1495 1474 nop
1496 1475 1:
1497 1476 ba,pt %xcc, .setup_utrap
1498 1477 or %g0, %g0, %g7
1499 1478 2:
1500 1479 set fp_disabled, %g1
1501 1480 ba,pt %xcc, sys_trap
1502 1481 sub %g0, 1, %g4
1503 1482
1504 1483 .fp_ieee_exception:
1505 1484 rdpr %tstate, %g1
1506 1485 btst TSTATE_PRIV, %g1
1507 1486 bnz,a,pn %icc, ptl1_panic
1508 1487 mov PTL1_BAD_FPTRAP, %g1
1509 1488 CPU_ADDR(%g1, %g4) ! load CPU struct addr
1510 1489 stx %fsr, [%g1 + CPU_TMP1]
1511 1490 ldx [%g1 + CPU_TMP1], %g2
1512 1491 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
1513 1492 ldn [%g1 + T_PROCP], %g1 ! load proc pointer
1514 1493 ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
1515 1494 brz,a,pt %g5, 1f
1516 1495 nop
1517 1496 ldn [%g5 + P_UTRAP8], %g5
1518 1497 brnz,a,pt %g5, .setup_v9utrap
1519 1498 nop
1520 1499 1:
1521 1500 set _fp_ieee_exception, %g1
1522 1501 ba,pt %xcc, sys_trap
1523 1502 sub %g0, 1, %g4
1524 1503
1525 1504 /*
1526 1505 * Register Inputs:
1527 1506 * %g5 user trap handler
1528 1507 * %g7 misaligned addr - for alignment traps only
1529 1508 */
1530 1509 .setup_utrap:
1531 1510 set trap, %g1 ! setup in case we go
1532 1511 mov T_FLUSH_PCB, %g3 ! through sys_trap on
1533 1512 sub %g0, 1, %g4 ! the save instruction below
1534 1513
1535 1514 /*
1536 1515 * If the DTrace pid provider is single stepping a copied-out
1537 1516 * instruction, t->t_dtrace_step will be set. In that case we need
1538 1517 * to abort the single-stepping (since execution of the instruction
1539 1518 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1540 1519 */
1541 1520 save %sp, -SA(MINFRAME32), %sp ! window for trap handler
1542 1521 CPU_ADDR(%g1, %g4) ! load CPU struct addr
1543 1522 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
1544 1523 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step
1545 1524 rdpr %tnpc, %l2 ! arg1 == tnpc
1546 1525 brz,pt %g2, 1f
1547 1526 rdpr %tpc, %l1 ! arg0 == tpc
1548 1527
1549 1528 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast
1550 1529 ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step)
1551 1530 brz,pt %g2, 1f
1552 1531 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags
1553 1532 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast
1554 1533 1:
1555 1534 mov %g7, %l3 ! arg2 == misaligned address
1556 1535
1557 1536 rdpr %tstate, %g1 ! cwp for trap handler
1558 1537 rdpr %cwp, %g4
1559 1538 bclr TSTATE_CWP_MASK, %g1
1560 1539 wrpr %g1, %g4, %tstate
1561 1540 wrpr %g0, %g5, %tnpc ! trap handler address
1562 1541 FAST_TRAP_DONE
1563 1542 /* NOTREACHED */
1564 1543
1565 1544 .check_v9utrap:
1566 1545 rdpr %tstate, %g1
1567 1546 btst TSTATE_PRIV, %g1
1568 1547 bnz,a,pn %icc, 3f
1569 1548 nop
1570 1549 CPU_ADDR(%g4, %g1) ! load CPU struct addr
1571 1550 ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer
1572 1551 ldn [%g5 + T_PROCP], %g5 ! load proc pointer
1573 1552 ldn [%g5 + P_UTRAPS], %g5 ! are there utraps?
1574 1553
1575 1554 cmp %g3, T_SOFTWARE_TRAP
1576 1555 bne,a,pt %icc, 1f
1577 1556 nop
1578 1557
1579 1558 brz,pt %g5, 3f ! if p_utraps == NULL goto trap()
1580 1559 rdpr %tt, %g3 ! delay - get actual hw trap type
1581 1560
1582 1561 sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18]
1583 1562 ba,pt %icc, 2f
1584 1563 smul %g1, CPTRSIZE, %g2
1585 1564 1:
1586 1565 brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap()
1587 1566 nop
1588 1567
1589 1568 cmp %g3, T_UNIMP_INSTR
1590 1569 bne,a,pt %icc, 2f
1591 1570 nop
1592 1571
1593 1572 mov 1, %g1
1594 1573 st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR
1595 1574 rdpr %tpc, %g1 ! ld trapping instruction using
1596 1575 lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault
1597 1576 st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR
1598 1577
1599 1578 sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction
1600 1579 andcc %g1, %g4, %g4 ! and instruction with mask
1601 1580 bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP
1602 1581 nop ! fall thru to setup
1603 1582 2:
1604 1583 ldn [%g5 + %g2], %g5
1605 1584 brnz,a,pt %g5, .setup_v9utrap
1606 1585 nop
1607 1586 3:
1608 1587 set trap, %g1
1609 1588 ba,pt %xcc, sys_trap
1610 1589 sub %g0, 1, %g4
1611 1590 /* NOTREACHED */
1612 1591
1613 1592 /*
1614 1593 * Register Inputs:
1615 1594 * %g5 user trap handler
1616 1595 */
1617 1596 .setup_v9utrap:
1618 1597 set trap, %g1 ! setup in case we go
1619 1598 mov T_FLUSH_PCB, %g3 ! through sys_trap on
1620 1599 sub %g0, 1, %g4 ! the save instruction below
1621 1600
1622 1601 /*
1623 1602 * If the DTrace pid provider is single stepping a copied-out
1624 1603 * instruction, t->t_dtrace_step will be set. In that case we need
1625 1604 * to abort the single-stepping (since execution of the instruction
1626 1605 * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
1627 1606 */
1628 1607 save %sp, -SA(MINFRAME64), %sp ! window for trap handler
1629 1608 CPU_ADDR(%g1, %g4) ! load CPU struct addr
1630 1609 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
1631 1610 ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step
1632 1611 rdpr %tnpc, %l7 ! arg1 == tnpc
1633 1612 brz,pt %g2, 1f
1634 1613 rdpr %tpc, %l6 ! arg0 == tpc
1635 1614
1636 1615 ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast
1637 1616 ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step)
1638 1617 brz,pt %g2, 1f
1639 1618 st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags
1640 1619 stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast
1641 1620 1:
1642 1621 rdpr %tstate, %g2 ! cwp for trap handler
1643 1622 rdpr %cwp, %g4
1644 1623 bclr TSTATE_CWP_MASK, %g2
1645 1624 wrpr %g2, %g4, %tstate
1646 1625
1647 1626 ldn [%g1 + T_PROCP], %g4 ! load proc pointer
1648 1627 ldn [%g4 + P_AS], %g4 ! load as pointer
1649 1628 ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit
1650 1629 cmp %l7, %g4 ! check for single-step set
1651 1630 bne,pt %xcc, 4f
1652 1631 nop
1653 1632 ldn [%g1 + T_LWP], %g1 ! load klwp pointer
1654 1633 ld [%g1 + PCB_STEP], %g4 ! load single-step flag
1655 1634 cmp %g4, STEP_ACTIVE ! step flags set in pcb?
1656 1635 bne,pt %icc, 4f
1657 1636 nop
1658 1637 stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb
1659 1638 mov %l7, %g4 ! on entry to precise user trap
1660 1639 add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc
1661 1640 ! at time of trap
1662 1641 wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS,
1663 1642 ! %g4 == userlimit
1664 1643 FAST_TRAP_DONE
1665 1644 /* NOTREACHED */
1666 1645 4:
1667 1646 wrpr %g0, %g5, %tnpc ! trap handler address
1668 1647 FAST_TRAP_DONE_CHK_INTR
1669 1648 /* NOTREACHED */
1670 1649
1671 1650 .fp_exception:
1672 1651 CPU_ADDR(%g1, %g4)
1673 1652 stx %fsr, [%g1 + CPU_TMP1]
1674 1653 ldx [%g1 + CPU_TMP1], %g2
1675 1654
1676 1655 /*
1677 1656 * Cheetah takes unfinished_FPop trap for certain range of operands
1678 1657 * to the "fitos" instruction. Instead of going through the slow
1679 1658 * software emulation path, we try to simulate the "fitos" instruction
1680 1659 * via "fitod" and "fdtos" provided the following conditions are met:
1681 1660 *
1682 1661 * fpu_exists is set (if DEBUG)
1683 1662 * not in privileged mode
1684 1663 * ftt is unfinished_FPop
1685 1664 * NXM IEEE trap is not enabled
1686 1665 * instruction at %tpc is "fitos"
1687 1666 *
1688 1667 * Usage:
1689 1668 * %g1 per cpu address
1690 1669 * %g2 %fsr
1691 1670 * %g6 user instruction
1692 1671 *
1693 1672 * Note that we can take a memory access related trap while trying
1694 1673 * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR
1695 1674 * flag to catch those traps and let the SFMMU code deal with page
1696 1675 * fault and data access exception.
1697 1676 */
1698 1677 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
1699 1678 sethi %hi(fpu_exists), %g7
1700 1679 ld [%g7 + %lo(fpu_exists)], %g7
1701 1680 brz,pn %g7, .fp_exception_cont
1702 1681 nop
1703 1682 #endif
1704 1683 rdpr %tstate, %g7 ! branch if in privileged mode
1705 1684 btst TSTATE_PRIV, %g7
1706 1685 bnz,pn %xcc, .fp_exception_cont
1707 1686 srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr
1708 1687 and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7
1709 1688 cmp %g7, FTT_UNFIN
1710 1689 set FSR_TEM_NX, %g5
1711 1690 bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop
1712 1691 andcc %g2, %g5, %g0
1713 1692 bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled
1714 1693 rdpr %tpc, %g5 ! get faulting PC
1715 1694
1716 1695 or %g0, 1, %g7
1717 1696 st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
1718 1697 lda [%g5]ASI_USER, %g6 ! get user's instruction
1719 1698 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
1720 1699
1721 1700 set FITOS_INSTR_MASK, %g7
1722 1701 and %g6, %g7, %g7
1723 1702 set FITOS_INSTR, %g5
1724 1703 cmp %g7, %g5
1725 1704 bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR
1726 1705 nop
1727 1706
1728 1707 /*
1729 1708 * This is unfinished FPops trap for "fitos" instruction. We
1730 1709 * need to simulate "fitos" via "fitod" and "fdtos" instruction
1731 1710 * sequence.
1732 1711 *
1733 1712 * We need a temporary FP register to do the conversion. Since
1734 1713 * both source and destination operands for the "fitos" instruction
1735 1714 * have to be within %f0-%f31, we use an FP register from the upper
1736 1715 * half to guarantee that it won't collide with the source or the
1737 1716 * dest operand. However, we do have to save and restore its value.
1738 1717 *
1739 1718 * We use %d62 as a temporary FP register for the conversion and
1740 1719 * branch to appropriate instruction within the conversion tables
1741 1720 * based upon the rs2 and rd values.
1742 1721 */
1743 1722
1744 1723 std %d62, [%g1 + CPU_TMP1] ! save original value
1745 1724
1746 1725 srl %g6, FITOS_RS2_SHIFT, %g7
1747 1726 and %g7, FITOS_REG_MASK, %g7
1748 1727 set _fitos_fitod_table, %g4
1749 1728 sllx %g7, 2, %g7
1750 1729 jmp %g4 + %g7
1751 1730 ba,pt %xcc, _fitos_fitod_done
1752 1731 .empty
1753 1732
1754 1733 _fitos_fitod_table:
1755 1734 fitod %f0, %d62
1756 1735 fitod %f1, %d62
1757 1736 fitod %f2, %d62
1758 1737 fitod %f3, %d62
1759 1738 fitod %f4, %d62
1760 1739 fitod %f5, %d62
1761 1740 fitod %f6, %d62
1762 1741 fitod %f7, %d62
1763 1742 fitod %f8, %d62
1764 1743 fitod %f9, %d62
1765 1744 fitod %f10, %d62
1766 1745 fitod %f11, %d62
1767 1746 fitod %f12, %d62
1768 1747 fitod %f13, %d62
1769 1748 fitod %f14, %d62
1770 1749 fitod %f15, %d62
1771 1750 fitod %f16, %d62
1772 1751 fitod %f17, %d62
1773 1752 fitod %f18, %d62
1774 1753 fitod %f19, %d62
1775 1754 fitod %f20, %d62
1776 1755 fitod %f21, %d62
1777 1756 fitod %f22, %d62
1778 1757 fitod %f23, %d62
1779 1758 fitod %f24, %d62
1780 1759 fitod %f25, %d62
1781 1760 fitod %f26, %d62
1782 1761 fitod %f27, %d62
1783 1762 fitod %f28, %d62
1784 1763 fitod %f29, %d62
1785 1764 fitod %f30, %d62
1786 1765 fitod %f31, %d62
1787 1766 _fitos_fitod_done:
1788 1767
1789 1768 /*
1790 1769 * Now convert data back into single precision
1791 1770 */
1792 1771 srl %g6, FITOS_RD_SHIFT, %g7
1793 1772 and %g7, FITOS_REG_MASK, %g7
1794 1773 set _fitos_fdtos_table, %g4
1795 1774 sllx %g7, 2, %g7
1796 1775 jmp %g4 + %g7
1797 1776 ba,pt %xcc, _fitos_fdtos_done
1798 1777 .empty
1799 1778
1800 1779 _fitos_fdtos_table:
1801 1780 fdtos %d62, %f0
1802 1781 fdtos %d62, %f1
1803 1782 fdtos %d62, %f2
1804 1783 fdtos %d62, %f3
1805 1784 fdtos %d62, %f4
1806 1785 fdtos %d62, %f5
1807 1786 fdtos %d62, %f6
1808 1787 fdtos %d62, %f7
1809 1788 fdtos %d62, %f8
1810 1789 fdtos %d62, %f9
1811 1790 fdtos %d62, %f10
1812 1791 fdtos %d62, %f11
1813 1792 fdtos %d62, %f12
1814 1793 fdtos %d62, %f13
1815 1794 fdtos %d62, %f14
1816 1795 fdtos %d62, %f15
1817 1796 fdtos %d62, %f16
1818 1797 fdtos %d62, %f17
1819 1798 fdtos %d62, %f18
1820 1799 fdtos %d62, %f19
1821 1800 fdtos %d62, %f20
1822 1801 fdtos %d62, %f21
1823 1802 fdtos %d62, %f22
1824 1803 fdtos %d62, %f23
1825 1804 fdtos %d62, %f24
1826 1805 fdtos %d62, %f25
1827 1806 fdtos %d62, %f26
1828 1807 fdtos %d62, %f27
1829 1808 fdtos %d62, %f28
1830 1809 fdtos %d62, %f29
1831 1810 fdtos %d62, %f30
1832 1811 fdtos %d62, %f31
1833 1812 _fitos_fdtos_done:
1834 1813
1835 1814 ldd [%g1 + CPU_TMP1], %d62 ! restore %d62
1836 1815
1837 1816 #if DEBUG
1838 1817 /*
1839 1818 * Update FPop_unfinished trap kstat
1840 1819 */
1841 1820 set fpustat+FPUSTAT_UNFIN_KSTAT, %g7
1842 1821 ldx [%g7], %g5
1843 1822 1:
1844 1823 add %g5, 1, %g6
1845 1824
1846 1825 casxa [%g7] ASI_N, %g5, %g6
1847 1826 cmp %g5, %g6
1848 1827 bne,a,pn %xcc, 1b
1849 1828 or %g0, %g6, %g5
1850 1829
1851 1830 /*
1852 1831 * Update fpu_sim_fitos kstat
1853 1832 */
1854 1833 set fpuinfo+FPUINFO_FITOS_KSTAT, %g7
1855 1834 ldx [%g7], %g5
1856 1835 1:
1857 1836 add %g5, 1, %g6
1858 1837
1859 1838 casxa [%g7] ASI_N, %g5, %g6
1860 1839 cmp %g5, %g6
1861 1840 bne,a,pn %xcc, 1b
1862 1841 or %g0, %g6, %g5
1863 1842 #endif /* DEBUG */
1864 1843
1865 1844 FAST_TRAP_DONE
1866 1845
1867 1846 .fp_exception_cont:
1868 1847 /*
1869 1848 * Let _fp_exception deal with simulating FPop instruction.
1870 1849 * Note that we need to pass %fsr in %g2 (already read above).
1871 1850 */
1872 1851
1873 1852 set _fp_exception, %g1
1874 1853 ba,pt %xcc, sys_trap
1875 1854 sub %g0, 1, %g4
1876 1855
1877 1856
1878 1857 /*
1879 1858 * Register windows
1880 1859 */
1881 1860 .flushw:
1882 1861 .clean_windows:
1883 1862 rdpr %tnpc, %g1
1884 1863 wrpr %g1, %tpc
1885 1864 add %g1, 4, %g1
1886 1865 wrpr %g1, %tnpc
1887 1866 set trap, %g1
1888 1867 mov T_FLUSH_PCB, %g3
1889 1868 ba,pt %xcc, sys_trap
1890 1869 sub %g0, 1, %g4
1891 1870
1892 1871 /*
1893 1872 * .spill_clean: clean the previous window, restore the wstate, and
1894 1873 * "done".
1895 1874 *
1896 1875 * Entry: %g7 contains new wstate
1897 1876 */
1898 1877 .spill_clean:
1899 1878 sethi %hi(nwin_minus_one), %g5
1900 1879 ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1
1901 1880 rdpr %cwp, %g6 ! %g6 = %cwp
1902 1881 deccc %g6 ! %g6--
1903 1882 movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1
1904 1883 wrpr %g6, %cwp
1905 1884 TT_TRACE_L(trace_win)
1906 1885 clr %l0
1907 1886 clr %l1
1908 1887 clr %l2
1909 1888 clr %l3
1910 1889 clr %l4
1911 1890 clr %l5
1912 1891 clr %l6
1913 1892 clr %l7
1914 1893 wrpr %g0, %g7, %wstate
1915 1894 saved
1916 1895 retry ! restores correct %cwp
1917 1896
1918 1897 .fix_alignment:
1919 1898 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
1920 1899 ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
1921 1900 ldn [%g1 + T_PROCP], %g1
1922 1901 mov 1, %g2
1923 1902 stb %g2, [%g1 + P_FIXALIGNMENT]
1924 1903 FAST_TRAP_DONE
1925 1904
1926 1905 #define STDF_REG(REG, ADDR, TMP) \
1927 1906 sll REG, 3, REG ;\
1928 1907 mark1: set start1, TMP ;\
1929 1908 jmp REG + TMP ;\
1930 1909 nop ;\
1931 1910 start1: ba,pt %xcc, done1 ;\
1932 1911 std %f0, [ADDR + CPU_TMP1] ;\
1933 1912 ba,pt %xcc, done1 ;\
1934 1913 std %f32, [ADDR + CPU_TMP1] ;\
1935 1914 ba,pt %xcc, done1 ;\
1936 1915 std %f2, [ADDR + CPU_TMP1] ;\
1937 1916 ba,pt %xcc, done1 ;\
1938 1917 std %f34, [ADDR + CPU_TMP1] ;\
1939 1918 ba,pt %xcc, done1 ;\
1940 1919 std %f4, [ADDR + CPU_TMP1] ;\
1941 1920 ba,pt %xcc, done1 ;\
1942 1921 std %f36, [ADDR + CPU_TMP1] ;\
1943 1922 ba,pt %xcc, done1 ;\
1944 1923 std %f6, [ADDR + CPU_TMP1] ;\
1945 1924 ba,pt %xcc, done1 ;\
1946 1925 std %f38, [ADDR + CPU_TMP1] ;\
1947 1926 ba,pt %xcc, done1 ;\
1948 1927 std %f8, [ADDR + CPU_TMP1] ;\
1949 1928 ba,pt %xcc, done1 ;\
1950 1929 std %f40, [ADDR + CPU_TMP1] ;\
1951 1930 ba,pt %xcc, done1 ;\
1952 1931 std %f10, [ADDR + CPU_TMP1] ;\
1953 1932 ba,pt %xcc, done1 ;\
1954 1933 std %f42, [ADDR + CPU_TMP1] ;\
1955 1934 ba,pt %xcc, done1 ;\
1956 1935 std %f12, [ADDR + CPU_TMP1] ;\
1957 1936 ba,pt %xcc, done1 ;\
1958 1937 std %f44, [ADDR + CPU_TMP1] ;\
1959 1938 ba,pt %xcc, done1 ;\
1960 1939 std %f14, [ADDR + CPU_TMP1] ;\
1961 1940 ba,pt %xcc, done1 ;\
1962 1941 std %f46, [ADDR + CPU_TMP1] ;\
1963 1942 ba,pt %xcc, done1 ;\
1964 1943 std %f16, [ADDR + CPU_TMP1] ;\
1965 1944 ba,pt %xcc, done1 ;\
1966 1945 std %f48, [ADDR + CPU_TMP1] ;\
1967 1946 ba,pt %xcc, done1 ;\
1968 1947 std %f18, [ADDR + CPU_TMP1] ;\
1969 1948 ba,pt %xcc, done1 ;\
1970 1949 std %f50, [ADDR + CPU_TMP1] ;\
1971 1950 ba,pt %xcc, done1 ;\
1972 1951 std %f20, [ADDR + CPU_TMP1] ;\
1973 1952 ba,pt %xcc, done1 ;\
1974 1953 std %f52, [ADDR + CPU_TMP1] ;\
1975 1954 ba,pt %xcc, done1 ;\
1976 1955 std %f22, [ADDR + CPU_TMP1] ;\
1977 1956 ba,pt %xcc, done1 ;\
1978 1957 std %f54, [ADDR + CPU_TMP1] ;\
1979 1958 ba,pt %xcc, done1 ;\
1980 1959 std %f24, [ADDR + CPU_TMP1] ;\
1981 1960 ba,pt %xcc, done1 ;\
1982 1961 std %f56, [ADDR + CPU_TMP1] ;\
1983 1962 ba,pt %xcc, done1 ;\
1984 1963 std %f26, [ADDR + CPU_TMP1] ;\
1985 1964 ba,pt %xcc, done1 ;\
1986 1965 std %f58, [ADDR + CPU_TMP1] ;\
1987 1966 ba,pt %xcc, done1 ;\
1988 1967 std %f28, [ADDR + CPU_TMP1] ;\
1989 1968 ba,pt %xcc, done1 ;\
1990 1969 std %f60, [ADDR + CPU_TMP1] ;\
1991 1970 ba,pt %xcc, done1 ;\
1992 1971 std %f30, [ADDR + CPU_TMP1] ;\
1993 1972 ba,pt %xcc, done1 ;\
1994 1973 std %f62, [ADDR + CPU_TMP1] ;\
1995 1974 done1:
1996 1975
1997 1976 #define LDDF_REG(REG, ADDR, TMP) \
1998 1977 sll REG, 3, REG ;\
1999 1978 mark2: set start2, TMP ;\
2000 1979 jmp REG + TMP ;\
2001 1980 nop ;\
2002 1981 start2: ba,pt %xcc, done2 ;\
2003 1982 ldd [ADDR + CPU_TMP1], %f0 ;\
2004 1983 ba,pt %xcc, done2 ;\
2005 1984 ldd [ADDR + CPU_TMP1], %f32 ;\
2006 1985 ba,pt %xcc, done2 ;\
2007 1986 ldd [ADDR + CPU_TMP1], %f2 ;\
2008 1987 ba,pt %xcc, done2 ;\
2009 1988 ldd [ADDR + CPU_TMP1], %f34 ;\
2010 1989 ba,pt %xcc, done2 ;\
2011 1990 ldd [ADDR + CPU_TMP1], %f4 ;\
2012 1991 ba,pt %xcc, done2 ;\
2013 1992 ldd [ADDR + CPU_TMP1], %f36 ;\
2014 1993 ba,pt %xcc, done2 ;\
2015 1994 ldd [ADDR + CPU_TMP1], %f6 ;\
2016 1995 ba,pt %xcc, done2 ;\
2017 1996 ldd [ADDR + CPU_TMP1], %f38 ;\
2018 1997 ba,pt %xcc, done2 ;\
2019 1998 ldd [ADDR + CPU_TMP1], %f8 ;\
2020 1999 ba,pt %xcc, done2 ;\
2021 2000 ldd [ADDR + CPU_TMP1], %f40 ;\
2022 2001 ba,pt %xcc, done2 ;\
2023 2002 ldd [ADDR + CPU_TMP1], %f10 ;\
2024 2003 ba,pt %xcc, done2 ;\
2025 2004 ldd [ADDR + CPU_TMP1], %f42 ;\
2026 2005 ba,pt %xcc, done2 ;\
2027 2006 ldd [ADDR + CPU_TMP1], %f12 ;\
2028 2007 ba,pt %xcc, done2 ;\
2029 2008 ldd [ADDR + CPU_TMP1], %f44 ;\
2030 2009 ba,pt %xcc, done2 ;\
2031 2010 ldd [ADDR + CPU_TMP1], %f14 ;\
2032 2011 ba,pt %xcc, done2 ;\
2033 2012 ldd [ADDR + CPU_TMP1], %f46 ;\
2034 2013 ba,pt %xcc, done2 ;\
2035 2014 ldd [ADDR + CPU_TMP1], %f16 ;\
2036 2015 ba,pt %xcc, done2 ;\
2037 2016 ldd [ADDR + CPU_TMP1], %f48 ;\
2038 2017 ba,pt %xcc, done2 ;\
2039 2018 ldd [ADDR + CPU_TMP1], %f18 ;\
2040 2019 ba,pt %xcc, done2 ;\
2041 2020 ldd [ADDR + CPU_TMP1], %f50 ;\
2042 2021 ba,pt %xcc, done2 ;\
2043 2022 ldd [ADDR + CPU_TMP1], %f20 ;\
2044 2023 ba,pt %xcc, done2 ;\
2045 2024 ldd [ADDR + CPU_TMP1], %f52 ;\
2046 2025 ba,pt %xcc, done2 ;\
2047 2026 ldd [ADDR + CPU_TMP1], %f22 ;\
2048 2027 ba,pt %xcc, done2 ;\
2049 2028 ldd [ADDR + CPU_TMP1], %f54 ;\
2050 2029 ba,pt %xcc, done2 ;\
2051 2030 ldd [ADDR + CPU_TMP1], %f24 ;\
2052 2031 ba,pt %xcc, done2 ;\
2053 2032 ldd [ADDR + CPU_TMP1], %f56 ;\
2054 2033 ba,pt %xcc, done2 ;\
2055 2034 ldd [ADDR + CPU_TMP1], %f26 ;\
2056 2035 ba,pt %xcc, done2 ;\
2057 2036 ldd [ADDR + CPU_TMP1], %f58 ;\
2058 2037 ba,pt %xcc, done2 ;\
2059 2038 ldd [ADDR + CPU_TMP1], %f28 ;\
2060 2039 ba,pt %xcc, done2 ;\
2061 2040 ldd [ADDR + CPU_TMP1], %f60 ;\
2062 2041 ba,pt %xcc, done2 ;\
2063 2042 ldd [ADDR + CPU_TMP1], %f30 ;\
2064 2043 ba,pt %xcc, done2 ;\
2065 2044 ldd [ADDR + CPU_TMP1], %f62 ;\
2066 2045 done2:
2067 2046
2068 2047 .lddf_exception_not_aligned:
2069 2048 /* %g2 = sfar, %g3 = sfsr */
2070 2049 mov %g2, %g5 ! stash sfar
2071 2050 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2072 2051 sethi %hi(fpu_exists), %g2 ! check fpu_exists
2073 2052 ld [%g2 + %lo(fpu_exists)], %g2
2074 2053 brz,a,pn %g2, 4f
2075 2054 nop
2076 2055 #endif
2077 2056 CPU_ADDR(%g1, %g4)
2078 2057 or %g0, 1, %g4
2079 2058 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2080 2059
2081 2060 rdpr %tpc, %g2
2082 2061 lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction
2083 2062 srl %g6, 23, %g1 ! using ldda or not?
2084 2063 and %g1, 1, %g1
2085 2064 brz,a,pt %g1, 2f ! check for ldda instruction
2086 2065 nop
2087 2066 srl %g6, 13, %g1 ! check immflag
2088 2067 and %g1, 1, %g1
2089 2068 rdpr %tstate, %g2 ! %tstate in %g2
2090 2069 brnz,a,pn %g1, 1f
2091 2070 srl %g2, 31, %g1 ! get asi from %tstate
2092 2071 srl %g6, 5, %g1 ! get asi from instruction
2093 2072 and %g1, 0xFF, %g1 ! imm_asi field
2094 2073 1:
2095 2074 cmp %g1, ASI_P ! primary address space
2096 2075 be,a,pt %icc, 2f
2097 2076 nop
2098 2077 cmp %g1, ASI_PNF ! primary no fault address space
2099 2078 be,a,pt %icc, 2f
2100 2079 nop
2101 2080 cmp %g1, ASI_S ! secondary address space
2102 2081 be,a,pt %icc, 2f
2103 2082 nop
2104 2083 cmp %g1, ASI_SNF ! secondary no fault address space
2105 2084 bne,a,pn %icc, 3f
2106 2085 nop
2107 2086 2:
2108 2087 lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data
2109 2088 add %g5, 4, %g5 ! increment misaligned data address
2110 2089 lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data
2111 2090
2112 2091 sllx %g7, 32, %g7
2113 2092 or %g5, %g7, %g5 ! combine data
2114 2093 CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis
2115 2094 stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1
2116 2095
2117 2096 srl %g6, 25, %g3 ! %g6 has the instruction
2118 2097 and %g3, 0x1F, %g3 ! %g3 has rd
2119 2098 LDDF_REG(%g3, %g7, %g4)
2120 2099
2121 2100 CPU_ADDR(%g1, %g4)
2122 2101 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2123 2102 FAST_TRAP_DONE
2124 2103 3:
2125 2104 CPU_ADDR(%g1, %g4)
2126 2105 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2127 2106 4:
2128 2107 set T_USER, %g3 ! trap type in %g3
2129 2108 or %g3, T_LDDF_ALIGN, %g3
2130 2109 mov %g5, %g2 ! misaligned vaddr in %g2
2131 2110 set fpu_trap, %g1 ! goto C for the little and
2132 2111 ba,pt %xcc, sys_trap ! no fault little asi's
2133 2112 sub %g0, 1, %g4
2134 2113
2135 2114 .stdf_exception_not_aligned:
2136 2115 /* %g2 = sfar, %g3 = sfsr */
2137 2116 mov %g2, %g5
2138 2117
2139 2118 #if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2140 2119 sethi %hi(fpu_exists), %g7 ! check fpu_exists
2141 2120 ld [%g7 + %lo(fpu_exists)], %g3
2142 2121 brz,a,pn %g3, 4f
2143 2122 nop
2144 2123 #endif
2145 2124 CPU_ADDR(%g1, %g4)
2146 2125 or %g0, 1, %g4
2147 2126 st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2148 2127
2149 2128 rdpr %tpc, %g2
2150 2129 lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction
2151 2130
2152 2131 srl %g6, 23, %g1 ! using stda or not?
2153 2132 and %g1, 1, %g1
2154 2133 brz,a,pt %g1, 2f ! check for stda instruction
2155 2134 nop
2156 2135 srl %g6, 13, %g1 ! check immflag
2157 2136 and %g1, 1, %g1
2158 2137 rdpr %tstate, %g2 ! %tstate in %g2
2159 2138 brnz,a,pn %g1, 1f
2160 2139 srl %g2, 31, %g1 ! get asi from %tstate
2161 2140 srl %g6, 5, %g1 ! get asi from instruction
2162 2141 and %g1, 0xff, %g1 ! imm_asi field
2163 2142 1:
2164 2143 cmp %g1, ASI_P ! primary address space
2165 2144 be,a,pt %icc, 2f
2166 2145 nop
2167 2146 cmp %g1, ASI_S ! secondary address space
2168 2147 bne,a,pn %icc, 3f
2169 2148 nop
2170 2149 2:
2171 2150 srl %g6, 25, %g6
2172 2151 and %g6, 0x1F, %g6 ! %g6 has rd
2173 2152 CPU_ADDR(%g7, %g1)
2174 2153 STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP)
2175 2154
2176 2155 ldx [%g7 + CPU_TMP1], %g6
2177 2156 srlx %g6, 32, %g7
2178 2157 stuwa %g7, [%g5]ASI_USER ! first half
2179 2158 add %g5, 4, %g5 ! increment misaligned data address
2180 2159 stuwa %g6, [%g5]ASI_USER ! second half
2181 2160
2182 2161 CPU_ADDR(%g1, %g4)
2183 2162 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2184 2163 FAST_TRAP_DONE
2185 2164 3:
2186 2165 CPU_ADDR(%g1, %g4)
2187 2166 st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2188 2167 4:
2189 2168 set T_USER, %g3 ! trap type in %g3
2190 2169 or %g3, T_STDF_ALIGN, %g3
2191 2170 mov %g5, %g2 ! misaligned vaddr in %g2
2192 2171 set fpu_trap, %g1 ! goto C for the little and
2193 2172 ba,pt %xcc, sys_trap ! nofault little asi's
2194 2173 sub %g0, 1, %g4
2195 2174
2196 2175 #ifdef DEBUG_USER_TRAPTRACECTL
2197 2176
2198 2177 .traptrace_freeze:
2199 2178 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4
2200 2179 TT_TRACE_L(trace_win)
2201 2180 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0
2202 2181 set trap_freeze, %g1
2203 2182 mov 1, %g2
2204 2183 st %g2, [%g1]
2205 2184 FAST_TRAP_DONE
2206 2185
2207 2186 .traptrace_unfreeze:
2208 2187 set trap_freeze, %g1
2209 2188 st %g0, [%g1]
2210 2189 mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4
2211 2190 TT_TRACE_L(trace_win)
2212 2191 mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0
2213 2192 FAST_TRAP_DONE
2214 2193
2215 2194 #endif /* DEBUG_USER_TRAPTRACECTL */
2216 2195
2217 2196 .getcc:
2218 2197 CPU_ADDR(%g1, %g2)
2219 2198 stx %o0, [%g1 + CPU_TMP1] ! save %o0
2220 2199 rdpr %tstate, %g3 ! get tstate
2221 2200 srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr
2222 2201 set PSR_ICC, %g2
2223 2202 and %o0, %g2, %o0 ! mask out the rest
2224 2203 srl %o0, PSR_ICC_SHIFT, %o0 ! right justify
2225 2204 wrpr %g0, 0, %gl
2226 2205 mov %o0, %g1 ! move ccr to normal %g1
2227 2206 wrpr %g0, 1, %gl
2228 2207 ! cannot assume globals retained their values after increasing %gl
2229 2208 CPU_ADDR(%g1, %g2)
2230 2209 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0
2231 2210 FAST_TRAP_DONE
2232 2211
2233 2212 .setcc:
2234 2213 CPU_ADDR(%g1, %g2)
2235 2214 stx %o0, [%g1 + CPU_TMP1] ! save %o0
2236 2215 wrpr %g0, 0, %gl
2237 2216 mov %g1, %o0
2238 2217 wrpr %g0, 1, %gl
2239 2218 ! cannot assume globals retained their values after increasing %gl
2240 2219 CPU_ADDR(%g1, %g2)
2241 2220 sll %o0, PSR_ICC_SHIFT, %g2
2242 2221 set PSR_ICC, %g3
2243 2222 and %g2, %g3, %g2 ! mask out rest
2244 2223 sllx %g2, PSR_TSTATE_CC_SHIFT, %g2
2245 2224 rdpr %tstate, %g3 ! get tstate
2246 2225 srl %g3, 0, %g3 ! clear upper word
2247 2226 or %g3, %g2, %g3 ! or in new bits
2248 2227 wrpr %g3, %tstate
2249 2228 ldx [%g1 + CPU_TMP1], %o0 ! restore %o0
2250 2229 FAST_TRAP_DONE
2251 2230
2252 2231 /*
2253 2232 * getpsr(void)
2254 2233 * Note that the xcc part of the ccr is not provided.
2255 2234 * The V8 code shows why the V9 trap is not faster:
2256 2235 * #define GETPSR_TRAP() \
2257 2236 * mov %psr, %i0; jmp %l2; rett %l2+4; nop;
2258 2237 */
2259 2238
2260 2239 .type .getpsr, #function
2261 2240 .getpsr:
2262 2241 rdpr %tstate, %g1 ! get tstate
2263 2242 srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr
2264 2243 set PSR_ICC, %g2
2265 2244 and %o0, %g2, %o0 ! mask out the rest
2266 2245
2267 2246 rd %fprs, %g1 ! get fprs
2268 2247 and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower
2269 2248 sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef
2270 2249 or %o0, %g2, %o0 ! or result into psr.ef
2271 2250
2272 2251 set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef
2273 2252 or %o0, %g2, %o0 ! or psr.impl/ver
2274 2253 FAST_TRAP_DONE
2275 2254 SET_SIZE(.getpsr)
2276 2255
2277 2256 /*
2278 2257 * setpsr(newpsr)
2279 2258 * Note that there is no support for ccr.xcc in the V9 code.
2280 2259 */
2281 2260
2282 2261 .type .setpsr, #function
2283 2262 .setpsr:
2284 2263 rdpr %tstate, %g1 ! get tstate
2285 2264 ! setx TSTATE_V8_UBITS, %g2
2286 2265 or %g0, CCR_ICC, %g3
2287 2266 sllx %g3, TSTATE_CCR_SHIFT, %g2
2288 2267
2289 2268 andn %g1, %g2, %g1 ! zero current user bits
2290 2269 set PSR_ICC, %g2
2291 2270 and %g2, %o0, %g2 ! clear all but psr.icc bits
2292 2271 sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc
2293 2272 wrpr %g1, %g3, %tstate ! write tstate
2294 2273
2295 2274 set PSR_EF, %g2
2296 2275 and %g2, %o0, %g2 ! clear all but fp enable bit
2297 2276 srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef
2298 2277 wr %g0, %g4, %fprs ! write fprs
2299 2278
2300 2279 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1
2301 2280 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer
2302 2281 ldn [%g2 + T_LWP], %g3 ! load klwp pointer
2303 2282 ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer
2304 2283 stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs
2305 2284 srlx %g4, 2, %g4 ! shift fef value to bit 0
2306 2285 stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en
2307 2286 FAST_TRAP_DONE
2308 2287 SET_SIZE(.setpsr)
2309 2288
2310 2289 /*
2311 2290 * getlgrp
2312 2291 * get home lgrpid on which the calling thread is currently executing.
2313 2292 */
2314 2293 .type .getlgrp, #function
2315 2294 .getlgrp:
2316 2295 ! Thanks for the incredibly helpful comments
2317 2296 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
2318 2297 ld [%g1 + CPU_ID], %o0 ! load cpu_id
2319 2298 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer
2320 2299 ldn [%g2 + T_LPL], %g2 ! load lpl pointer
2321 2300 ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid
2322 2301 sra %g1, 0, %o1
2323 2302 FAST_TRAP_DONE
2324 2303 SET_SIZE(.getlgrp)
2325 2304
2326 2305 /*
2327 2306 * Entry for old 4.x trap (trap 0).
2328 2307 */
2329 2308 ENTRY_NP(syscall_trap_4x)
2330 2309 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
2331 2310 ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer
2332 2311 ldn [%g2 + T_LWP], %g2 ! load klwp pointer
2333 2312 ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr
2334 2313 brz,pn %g2, 1f ! has it been set?
2335 2314 st %l0, [%g1 + CPU_TMP1] ! delay - save some locals
2336 2315 st %l1, [%g1 + CPU_TMP2]
2337 2316 rdpr %tnpc, %l1 ! save old tnpc
2338 2317 wrpr %g0, %g2, %tnpc ! setup tnpc
2339 2318
2340 2319 mov %g1, %l0 ! save CPU struct addr
2341 2320 wrpr %g0, 0, %gl
2342 2321 mov %l1, %g6 ! pass tnpc to user code in %g6
2343 2322 wrpr %g0, 1, %gl
2344 2323 ld [%l0 + CPU_TMP2], %l1 ! restore locals
2345 2324 ld [%l0 + CPU_TMP1], %l0
2346 2325 FAST_TRAP_DONE_CHK_INTR
2347 2326 1:
2348 2327 !
2349 2328 ! check for old syscall mmap which is the only different one which
2350 2329 ! must be the same. Others are handled in the compatibility library.
2351 2330 !
2352 2331 mov %g1, %l0 ! save CPU struct addr
2353 2332 wrpr %g0, 0, %gl
2354 2333 cmp %g1, OSYS_mmap ! compare to old 4.x mmap
2355 2334 movz %icc, SYS_mmap, %g1
2356 2335 wrpr %g0, 1, %gl
2357 2336 ld [%l0 + CPU_TMP1], %l0
2358 2337 SYSCALL(syscall_trap32)
2359 2338 SET_SIZE(syscall_trap_4x)
2360 2339
2361 2340 /*
2362 2341 * Handler for software trap 9.
2363 2342 * Set trap0 emulation address for old 4.x system call trap.
2364 2343 * XXX - this should be a system call.
2365 2344 */
2366 2345 ENTRY_NP(set_trap0_addr)
2367 2346 CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
2368 2347 st %l0, [%g1 + CPU_TMP1] ! save some locals
2369 2348 st %l1, [%g1 + CPU_TMP2]
2370 2349 mov %g1, %l0 ! preserve CPU addr
2371 2350 wrpr %g0, 0, %gl
2372 2351 mov %g1, %l1
2373 2352 wrpr %g0, 1, %gl
2374 2353 ! cannot assume globals retained their values after increasing %gl
2375 2354 ldn [%l0 + CPU_THREAD], %g2 ! load thread pointer
2376 2355 ldn [%g2 + T_LWP], %g2 ! load klwp pointer
2377 2356 andn %l1, 3, %l1 ! force alignment
2378 2357 st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr
2379 2358 ld [%l0 + CPU_TMP2], %l1 ! restore locals
2380 2359 ld [%l0 + CPU_TMP1], %l0
2381 2360 FAST_TRAP_DONE
2382 2361 SET_SIZE(set_trap0_addr)
2383 2362
2384 2363 /*
2385 2364 * mmu_trap_tl1
2386 2365 * trap handler for unexpected mmu traps.
2387 2366 * simply checks if the trap was a user lddf/stdf alignment trap, in which
2388 2367 * case we go to fpu_trap or a user trap from the window handler, in which
2389 2368 * case we go save the state on the pcb. Otherwise, we go to ptl1_panic.
2390 2369 */
2391 2370 .type mmu_trap_tl1, #function
2392 2371 mmu_trap_tl1:
2393 2372 #ifdef TRAPTRACE
2394 2373 TRACE_PTR(%g5, %g6)
2395 2374 GET_TRACE_TICK(%g6, %g7)
2396 2375 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi
2397 2376 TRACE_SAVE_TL_GL_REGS(%g5, %g6)
2398 2377 rdpr %tt, %g6
2399 2378 stha %g6, [%g5 + TRAP_ENT_TT]%asi
2400 2379 rdpr %tstate, %g6
2401 2380 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi
2402 2381 stna %sp, [%g5 + TRAP_ENT_SP]%asi
2403 2382 stna %g0, [%g5 + TRAP_ENT_TR]%asi
2404 2383 rdpr %tpc, %g6
2405 2384 stna %g6, [%g5 + TRAP_ENT_TPC]%asi
2406 2385 MMU_FAULT_STATUS_AREA(%g6)
2407 2386 ldx [%g6 + MMFSA_D_ADDR], %g6
2408 2387 stna %g6, [%g5 + TRAP_ENT_F1]%asi ! MMU fault address
2409 2388 CPU_PADDR(%g7, %g6);
2410 2389 add %g7, CPU_TL1_HDLR, %g7
2411 2390 lda [%g7]ASI_MEM, %g6
2412 2391 stna %g6, [%g5 + TRAP_ENT_F2]%asi
2413 2392 MMU_FAULT_STATUS_AREA(%g6)
2414 2393 ldx [%g6 + MMFSA_D_TYPE], %g7 ! XXXQ should be a MMFSA_F_ constant?
2415 2394 ldx [%g6 + MMFSA_D_CTX], %g6
2416 2395 sllx %g6, SFSR_CTX_SHIFT, %g6
2417 2396 or %g6, %g7, %g6
2418 2397 stna %g6, [%g5 + TRAP_ENT_F3]%asi ! MMU context/type
2419 2398 set 0xdeadbeef, %g6
2420 2399 stna %g6, [%g5 + TRAP_ENT_F4]%asi
2421 2400 TRACE_NEXT(%g5, %g6, %g7)
2422 2401 #endif /* TRAPTRACE */
2423 2402 CPU_PADDR(%g7, %g6);
2424 2403 add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA)
2425 2404 lda [%g7]ASI_MEM, %g6
2426 2405 brz,a,pt %g6, 1f
2427 2406 nop
2428 2407 sta %g0, [%g7]ASI_MEM
2429 2408 ! XXXQ need to setup registers for sfmmu_mmu_trap?
2430 2409 ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults
2431 2410 1:
2432 2411 rdpr %tpc, %g7
2433 2412 /* in user_rtt? */
2434 2413 set rtt_fill_start, %g6
2435 2414 cmp %g7, %g6
2436 2415 blu,pn %xcc, 6f
2437 2416 .empty
2438 2417 set rtt_fill_end, %g6
2439 2418 cmp %g7, %g6
2440 2419 bgeu,pn %xcc, 6f
2441 2420 nop
2442 2421 set fault_rtt_fn1, %g7
2443 2422 ba,a 7f
2444 2423 6:
2445 2424 ! check to see if the trap pc is in a window spill/fill handling
2446 2425 rdpr %tpc, %g7
2447 2426 /* tpc should be in the trap table */
2448 2427 set trap_table, %g6
2449 2428 cmp %g7, %g6
2450 2429 blu,a,pn %xcc, ptl1_panic
2451 2430 mov PTL1_BAD_MMUTRAP, %g1
2452 2431 set etrap_table, %g6
2453 2432 cmp %g7, %g6
2454 2433 bgeu,a,pn %xcc, ptl1_panic
2455 2434 mov PTL1_BAD_MMUTRAP, %g1
2456 2435 ! pc is inside the trap table, convert to trap type
2457 2436 srl %g7, 5, %g6 ! XXXQ need #define
2458 2437 and %g6, 0x1ff, %g6 ! XXXQ need #define
2459 2438 ! and check for a window trap type
2460 2439 and %g6, WTRAP_TTMASK, %g6
2461 2440 cmp %g6, WTRAP_TYPE
2462 2441 bne,a,pn %xcc, ptl1_panic
2463 2442 mov PTL1_BAD_MMUTRAP, %g1
2464 2443 andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */
2465 2444 add %g7, WTRAP_FAULTOFF, %g7
2466 2445
2467 2446 7:
2468 2447 ! Arguments are passed in the global set active after the
2469 2448 ! 'done' instruction. Before switching sets, must save
2470 2449 ! the calculated next pc
2471 2450 wrpr %g0, %g7, %tnpc
2472 2451 wrpr %g0, 1, %gl
2473 2452 rdpr %tt, %g5
2474 2453 MMU_FAULT_STATUS_AREA(%g7)
2475 2454 cmp %g5, T_ALIGNMENT
2476 2455 be,pn %xcc, 1f
2477 2456 ldx [%g7 + MMFSA_D_ADDR], %g6
2478 2457 ldx [%g7 + MMFSA_D_CTX], %g7
2479 2458 srlx %g6, MMU_PAGESHIFT, %g6 /* align address */
2480 2459 cmp %g7, USER_CONTEXT_TYPE
2481 2460 sllx %g6, MMU_PAGESHIFT, %g6
2482 2461 movgu %icc, USER_CONTEXT_TYPE, %g7
2483 2462 or %g6, %g7, %g6 /* TAG_ACCESS */
2484 2463 1:
2485 2464 done
2486 2465 SET_SIZE(mmu_trap_tl1)
2487 2466
2488 2467 /*
2489 2468 * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These
2490 2469 * traps are valid only when kmdb is loaded. When the debugger is active,
2491 2470 * the code below is rewritten to transfer control to the appropriate
2492 2471 * debugger entry points.
2493 2472 */
2494 2473 .global kmdb_trap
2495 2474 .align 8
2496 2475 kmdb_trap:
2497 2476 ba,a trap_table0
2498 2477 jmp %g1 + 0
2499 2478 nop
2500 2479
2501 2480 .global kmdb_trap_tl1
2502 2481 .align 8
2503 2482 kmdb_trap_tl1:
2504 2483 ba,a trap_table0
2505 2484 jmp %g1 + 0
2506 2485 nop
2507 2486
2508 2487 /*
2509 2488 * This entry is copied from OBP's trap table during boot.
2510 2489 */
2511 2490 .global obp_bpt
2512 2491 .align 8
2513 2492 obp_bpt:
2514 2493 NOT
2515 2494
2516 2495
2517 2496
2518 2497 #ifdef TRAPTRACE
2519 2498 /*
2520 2499 * TRAPTRACE support.
2521 2500 * labels here are branched to with "rd %pc, %g7" in the delay slot.
2522 2501 * Return is done by "jmp %g7 + 4".
2523 2502 */
2524 2503
2525 2504 trace_dmmu:
2526 2505 TRACE_PTR(%g3, %g6)
2527 2506 GET_TRACE_TICK(%g6, %g5)
2528 2507 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi
2529 2508 TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2530 2509 rdpr %tt, %g6
2531 2510 stha %g6, [%g3 + TRAP_ENT_TT]%asi
2532 2511 rdpr %tstate, %g6
2533 2512 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi
2534 2513 stna %sp, [%g3 + TRAP_ENT_SP]%asi
2535 2514 rdpr %tpc, %g6
2536 2515 stna %g6, [%g3 + TRAP_ENT_TPC]%asi
2537 2516 MMU_FAULT_STATUS_AREA(%g6)
2538 2517 ldx [%g6 + MMFSA_D_ADDR], %g4
2539 2518 stxa %g4, [%g3 + TRAP_ENT_TR]%asi
2540 2519 ldx [%g6 + MMFSA_D_CTX], %g4
2541 2520 stxa %g4, [%g3 + TRAP_ENT_F1]%asi
2542 2521 ldx [%g6 + MMFSA_D_TYPE], %g4
2543 2522 stxa %g4, [%g3 + TRAP_ENT_F2]%asi
2544 2523 stxa %g6, [%g3 + TRAP_ENT_F3]%asi
2545 2524 stna %g0, [%g3 + TRAP_ENT_F4]%asi
2546 2525 TRACE_NEXT(%g3, %g4, %g5)
2547 2526 jmp %g7 + 4
2548 2527 nop
2549 2528
2550 2529 trace_immu:
2551 2530 TRACE_PTR(%g3, %g6)
2552 2531 GET_TRACE_TICK(%g6, %g5)
2553 2532 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi
2554 2533 TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2555 2534 rdpr %tt, %g6
2556 2535 stha %g6, [%g3 + TRAP_ENT_TT]%asi
2557 2536 rdpr %tstate, %g6
2558 2537 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi
2559 2538 stna %sp, [%g3 + TRAP_ENT_SP]%asi
2560 2539 rdpr %tpc, %g6
2561 2540 stna %g6, [%g3 + TRAP_ENT_TPC]%asi
2562 2541 MMU_FAULT_STATUS_AREA(%g6)
2563 2542 ldx [%g6 + MMFSA_I_ADDR], %g4
2564 2543 stxa %g4, [%g3 + TRAP_ENT_TR]%asi
2565 2544 ldx [%g6 + MMFSA_I_CTX], %g4
2566 2545 stxa %g4, [%g3 + TRAP_ENT_F1]%asi
2567 2546 ldx [%g6 + MMFSA_I_TYPE], %g4
2568 2547 stxa %g4, [%g3 + TRAP_ENT_F2]%asi
2569 2548 stxa %g6, [%g3 + TRAP_ENT_F3]%asi
2570 2549 stna %g0, [%g3 + TRAP_ENT_F4]%asi
2571 2550 TRACE_NEXT(%g3, %g4, %g5)
2572 2551 jmp %g7 + 4
2573 2552 nop
2574 2553
2575 2554 trace_gen:
2576 2555 TRACE_PTR(%g3, %g6)
2577 2556 GET_TRACE_TICK(%g6, %g5)
2578 2557 stxa %g6, [%g3 + TRAP_ENT_TICK]%asi
2579 2558 TRACE_SAVE_TL_GL_REGS(%g3, %g6)
2580 2559 rdpr %tt, %g6
2581 2560 stha %g6, [%g3 + TRAP_ENT_TT]%asi
2582 2561 rdpr %tstate, %g6
2583 2562 stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi
2584 2563 stna %sp, [%g3 + TRAP_ENT_SP]%asi
2585 2564 rdpr %tpc, %g6
2586 2565 stna %g6, [%g3 + TRAP_ENT_TPC]%asi
2587 2566 stna %g0, [%g3 + TRAP_ENT_TR]%asi
2588 2567 stna %g0, [%g3 + TRAP_ENT_F1]%asi
2589 2568 stna %g0, [%g3 + TRAP_ENT_F2]%asi
2590 2569 stna %g0, [%g3 + TRAP_ENT_F3]%asi
2591 2570 stna %g0, [%g3 + TRAP_ENT_F4]%asi
2592 2571 TRACE_NEXT(%g3, %g4, %g5)
2593 2572 jmp %g7 + 4
2594 2573 nop
2595 2574
2596 2575 trace_win:
2597 2576 TRACE_WIN_INFO(0, %l0, %l1, %l2)
2598 2577 ! Keep the locals as clean as possible, caller cleans %l4
2599 2578 clr %l2
2600 2579 clr %l1
2601 2580 jmp %l4 + 4
2602 2581 clr %l0
2603 2582
2604 2583 /*
2605 2584 * Trace a tsb hit
2606 2585 * g1 = tsbe pointer (in/clobbered)
2607 2586 * g2 = tag access register (in)
2608 2587 * g3 - g4 = scratch (clobbered)
2609 2588 * g5 = tsbe data (in)
2610 2589 * g6 = scratch (clobbered)
2611 2590 * g7 = pc we jumped here from (in)
2612 2591 */
2613 2592
2614 2593 ! Do not disturb %g5, it will be used after the trace
2615 2594 ALTENTRY(trace_tsbhit)
2616 2595 TRACE_TSBHIT(0)
2617 2596 jmp %g7 + 4
2618 2597 nop
2619 2598
2620 2599 /*
2621 2600 * Trace a TSB miss
2622 2601 *
2623 2602 * g1 = tsb8k pointer (in)
2624 2603 * g2 = tag access register (in)
2625 2604 * g3 = tsb4m pointer (in)
2626 2605 * g4 = tsbe tag (in/clobbered)
2627 2606 * g5 - g6 = scratch (clobbered)
2628 2607 * g7 = pc we jumped here from (in)
2629 2608 */
2630 2609 .global trace_tsbmiss
2631 2610 trace_tsbmiss:
2632 2611 membar #Sync
2633 2612 sethi %hi(FLUSH_ADDR), %g6
2634 2613 flush %g6
2635 2614 TRACE_PTR(%g5, %g6)
2636 2615 stna %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access
2637 2616 stna %g4, [%g5 + TRAP_ENT_F1]%asi ! XXX? tsb tag
2638 2617 GET_TRACE_TICK(%g6, %g4)
2639 2618 stxa %g6, [%g5 + TRAP_ENT_TICK]%asi
2640 2619 rdpr %tnpc, %g6
2641 2620 stna %g6, [%g5 + TRAP_ENT_F2]%asi
2642 2621 stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer
2643 2622 rdpr %tpc, %g6
2644 2623 stna %g6, [%g5 + TRAP_ENT_TPC]%asi
2645 2624 TRACE_SAVE_TL_GL_REGS(%g5, %g6)
2646 2625 rdpr %tt, %g6
2647 2626 or %g6, TT_MMU_MISS, %g4
2648 2627 stha %g4, [%g5 + TRAP_ENT_TT]%asi
2649 2628 mov MMFSA_D_ADDR, %g4
2650 2629 cmp %g6, FAST_IMMU_MISS_TT
2651 2630 move %xcc, MMFSA_I_ADDR, %g4
2652 2631 cmp %g6, T_INSTR_MMU_MISS
2653 2632 move %xcc, MMFSA_I_ADDR, %g4
2654 2633 MMU_FAULT_STATUS_AREA(%g6)
2655 2634 ldx [%g6 + %g4], %g6
2656 2635 stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target
2657 2636 cmp %g4, MMFSA_D_ADDR
2658 2637 move %xcc, MMFSA_D_CTX, %g4
2659 2638 movne %xcc, MMFSA_I_CTX, %g4
2660 2639 MMU_FAULT_STATUS_AREA(%g6)
2661 2640 ldx [%g6 + %g4], %g6
2662 2641 stxa %g6, [%g5 + TRAP_ENT_F4]%asi ! context ID
2663 2642 stna %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer
2664 2643 TRACE_NEXT(%g5, %g4, %g6)
2665 2644 jmp %g7 + 4
2666 2645 nop
2667 2646
2668 2647 /*
2669 2648 * g2 = tag access register (in)
2670 2649 * g3 = ctx type (0, 1 or 2) (in) (not used)
2671 2650 */
2672 2651 trace_dataprot:
2673 2652 membar #Sync
2674 2653 sethi %hi(FLUSH_ADDR), %g6
2675 2654 flush %g6
2676 2655 TRACE_PTR(%g1, %g6)
2677 2656 GET_TRACE_TICK(%g6, %g4)
2678 2657 stxa %g6, [%g1 + TRAP_ENT_TICK]%asi
2679 2658 rdpr %tpc, %g6
2680 2659 stna %g6, [%g1 + TRAP_ENT_TPC]%asi
2681 2660 rdpr %tstate, %g6
2682 2661 stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi
2683 2662 stna %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg
2684 2663 stna %g0, [%g1 + TRAP_ENT_F1]%asi
2685 2664 stna %g0, [%g1 + TRAP_ENT_F2]%asi
2686 2665 stna %g0, [%g1 + TRAP_ENT_F3]%asi
2687 2666 stna %g0, [%g1 + TRAP_ENT_F4]%asi
2688 2667 TRACE_SAVE_TL_GL_REGS(%g1, %g6)
2689 2668 rdpr %tt, %g6
2690 2669 stha %g6, [%g1 + TRAP_ENT_TT]%asi
2691 2670 mov MMFSA_D_CTX, %g4
2692 2671 cmp %g6, FAST_IMMU_MISS_TT
2693 2672 move %xcc, MMFSA_I_CTX, %g4
2694 2673 cmp %g6, T_INSTR_MMU_MISS
2695 2674 move %xcc, MMFSA_I_CTX, %g4
2696 2675 MMU_FAULT_STATUS_AREA(%g6)
2697 2676 ldx [%g6 + %g4], %g6
2698 2677 stxa %g6, [%g1 + TRAP_ENT_TR]%asi ! context ID
2699 2678 TRACE_NEXT(%g1, %g4, %g5)
2700 2679 jmp %g7 + 4
2701 2680 nop
2702 2681
2703 2682 #endif /* TRAPTRACE */
2704 2683
2705 2684 /*
2706 2685 * Handle watchdog reset trap. Enable the MMU using the MMU_ENABLE
2707 2686 * HV service, which requires the return target to be specified as a VA
2708 2687 * since we are enabling the MMU. We set the target to ptl1_panic.
2709 2688 */
2710 2689
2711 2690 .type .watchdog_trap, #function
2712 2691 .watchdog_trap:
2713 2692 mov 1, %o0
2714 2693 setx ptl1_panic, %g2, %o1
2715 2694 mov MMU_ENABLE, %o5
2716 2695 ta FAST_TRAP
2717 2696 done
2718 2697 SET_SIZE(.watchdog_trap)
2719 2698 /*
2720 2699 * synthesize for trap(): SFAR in %g2, SFSR in %g3
2721 2700 */
2722 2701 .type .dmmu_exc_lddf_not_aligned, #function
2723 2702 .dmmu_exc_lddf_not_aligned:
2724 2703 MMU_FAULT_STATUS_AREA(%g3)
2725 2704 ldx [%g3 + MMFSA_D_ADDR], %g2
2726 2705 /* Fault type not available in MMU fault status area */
2727 2706 mov MMFSA_F_UNALIGN, %g1
2728 2707 ldx [%g3 + MMFSA_D_CTX], %g3
2729 2708 sllx %g3, SFSR_CTX_SHIFT, %g3
2730 2709 btst 1, %sp
2731 2710 bnz,pt %xcc, .lddf_exception_not_aligned
2732 2711 or %g3, %g1, %g3 /* SFSR */
2733 2712 ba,a,pt %xcc, .mmu_exception_not_aligned
2734 2713 SET_SIZE(.dmmu_exc_lddf_not_aligned)
2735 2714
2736 2715 /*
2737 2716 * synthesize for trap(): SFAR in %g2, SFSR in %g3
2738 2717 */
2739 2718 .type .dmmu_exc_stdf_not_aligned, #function
2740 2719 .dmmu_exc_stdf_not_aligned:
2741 2720 MMU_FAULT_STATUS_AREA(%g3)
2742 2721 ldx [%g3 + MMFSA_D_ADDR], %g2
2743 2722 /* Fault type not available in MMU fault status area */
2744 2723 mov MMFSA_F_UNALIGN, %g1
2745 2724 ldx [%g3 + MMFSA_D_CTX], %g3
2746 2725 sllx %g3, SFSR_CTX_SHIFT, %g3
2747 2726 btst 1, %sp
2748 2727 bnz,pt %xcc, .stdf_exception_not_aligned
2749 2728 or %g3, %g1, %g3 /* SFSR */
2750 2729 ba,a,pt %xcc, .mmu_exception_not_aligned
2751 2730 SET_SIZE(.dmmu_exc_stdf_not_aligned)
2752 2731
2753 2732 .type .dmmu_exception, #function
2754 2733 .dmmu_exception:
2755 2734 MMU_FAULT_STATUS_AREA(%g3)
2756 2735 ldx [%g3 + MMFSA_D_ADDR], %g2
2757 2736 ldx [%g3 + MMFSA_D_TYPE], %g1
2758 2737 ldx [%g3 + MMFSA_D_CTX], %g4
2759 2738 srlx %g2, MMU_PAGESHIFT, %g2 /* align address */
2760 2739 sllx %g2, MMU_PAGESHIFT, %g2
2761 2740 sllx %g4, SFSR_CTX_SHIFT, %g3
2762 2741 or %g3, %g1, %g3 /* SFSR */
2763 2742 cmp %g4, USER_CONTEXT_TYPE
2764 2743 movgeu %icc, USER_CONTEXT_TYPE, %g4
2765 2744 or %g2, %g4, %g2 /* TAG_ACCESS */
2766 2745 ba,pt %xcc, .mmu_exception_end
2767 2746 mov T_DATA_EXCEPTION, %g1
2768 2747 SET_SIZE(.dmmu_exception)
2769 2748
2770 2749 .align 32
2771 2750 .global pil15_epilogue
2772 2751 pil15_epilogue:
2773 2752 ba pil_interrupt_common
2774 2753 nop
2775 2754 .align 32
2776 2755
2777 2756 /*
2778 2757 * fast_trap_done, fast_trap_done_chk_intr:
2779 2758 *
2780 2759 * Due to the design of UltraSPARC pipeline, pending interrupts are not
2781 2760 * taken immediately after a RETRY or DONE instruction which causes IE to
2782 2761 * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed
2783 2762 * to execute first before taking any interrupts. If that instruction
2784 2763 * results in other traps, and if the corresponding trap handler runs
2785 2764 * entirely at TL=1 with interrupts disabled, then pending interrupts
2786 2765 * won't be taken until after yet another instruction following the %tpc
2787 2766 * or %tnpc.
2788 2767 *
2789 2768 * A malicious user program can use this feature to block out interrupts
2790 2769 * for extended durations, which can result in send_mondo_timeout kernel
2791 2770 * panic.
2792 2771 *
2793 2772 * This problem is addressed by servicing any pending interrupts via
2794 2773 * sys_trap before returning back to the user mode from a fast trap
2795 2774 * handler. The "done" instruction within a fast trap handler, which
2796 2775 * runs entirely at TL=1 with interrupts disabled, is replaced with the
2797 2776 * FAST_TRAP_DONE macro, which branches control to this fast_trap_done
2798 2777 * entry point.
2799 2778 *
2800 2779 * We check for any pending interrupts here and force a sys_trap to
2801 2780 * service those interrupts, if any. To minimize overhead, pending
2802 2781 * interrupts are checked if the %tpc happens to be at 16K boundary,
2803 2782 * which allows a malicious program to execute at most 4K consecutive
2804 2783 * instructions before we service any pending interrupts. If a worst
2805 2784 * case fast trap handler takes about 2 usec, then interrupts will be
2806 2785 * blocked for at most 8 msec, less than a clock tick.
2807 2786 *
2808 2787 * For the cases where we don't know if the %tpc will cross a 16K
2809 2788 * boundary, we can't use the above optimization and always process
2810 2789 * any pending interrupts via fast_frap_done_chk_intr entry point.
2811 2790 *
2812 2791 * Entry Conditions:
2813 2792 * %pstate am:0 priv:1 ie:0
2814 2793 * globals are AG (not normal globals)
2815 2794 */
2816 2795
2817 2796 .global fast_trap_done, fast_trap_done_chk_intr
2818 2797 fast_trap_done:
2819 2798 rdpr %tpc, %g5
2820 2799 sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff
2821 2800 andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc
2822 2801 bz,pn %icc, 1f ! branch if zero (lower 32 bits only)
2823 2802 nop
2824 2803 done
2825 2804
2826 2805 fast_trap_done_chk_intr:
2827 2806 1: rd SOFTINT, %g6
2828 2807 brnz,pn %g6, 2f ! branch if any pending intr
2829 2808 nop
2830 2809 done
2831 2810
2832 2811 2:
2833 2812 /*
2834 2813 * We get here if there are any pending interrupts.
2835 2814 * Adjust %tpc/%tnpc as we'll be resuming via "retry"
2836 2815 * instruction.
2837 2816 */
2838 2817 rdpr %tnpc, %g5
2839 2818 wrpr %g0, %g5, %tpc
2840 2819 add %g5, 4, %g5
2841 2820 wrpr %g0, %g5, %tnpc
2842 2821
2843 2822 /*
2844 2823 * Force a dummy sys_trap call so that interrupts can be serviced.
2845 2824 */
2846 2825 set fast_trap_dummy_call, %g1
2847 2826 ba,pt %xcc, sys_trap
2848 2827 mov -1, %g4
2849 2828
2850 2829 fast_trap_dummy_call:
2851 2830 retl
2852 2831 nop
2853 2832
2854 2833 /*
2855 2834 * Currently the brand syscall interposition code is not enabled by
2856 2835 * default. Instead, when a branded zone is first booted the brand
2857 2836 * infrastructure will patch the trap table so that the syscall
2858 2837 * entry points are redirected to syscall_wrapper32 and syscall_wrapper
2859 2838 * for ILP32 and LP64 syscalls respectively. this is done in
2860 2839 * brand_plat_interposition_enable(). Note that the syscall wrappers
2861 2840 * below do not collect any trap trace data since the syscall hot patch
2862 2841 * points are reached after trap trace data has already been collected.
2863 2842 */
2864 2843 #define BRAND_CALLBACK(callback_id) \
2865 2844 CPU_ADDR(%g2, %g1) /* load CPU struct addr to %g2 */ ;\
2866 2845 ldn [%g2 + CPU_THREAD], %g3 /* load thread pointer */ ;\
2867 2846 ldn [%g3 + T_PROCP], %g3 /* get proc pointer */ ;\
2868 2847 ldn [%g3 + P_BRAND], %g3 /* get brand pointer */ ;\
2869 2848 brz %g3, 1f /* No brand? No callback. */ ;\
2870 2849 nop ;\
2871 2850 ldn [%g3 + B_MACHOPS], %g3 /* get machops list */ ;\
2872 2851 ldn [%g3 + (callback_id << 3)], %g3 ;\
2873 2852 brz %g3, 1f ;\
2874 2853 /* \
2875 2854 * This isn't pretty. We want a low-latency way for the callback \
2876 2855 * routine to decline to do anything. We just pass in an address \
2877 2856 * the routine can directly jmp back to, pretending that nothing \
2878 2857 * has happened. \
2879 2858 * \
2880 2859 * %g1: return address (where the brand handler jumps back to) \
2881 2860 * %g2: address of CPU structure \
2882 2861 * %g3: address of brand handler (where we will jump to) \
2883 2862 */ \
2884 2863 mov %pc, %g1 ;\
2885 2864 add %g1, 16, %g1 ;\
2886 2865 jmp %g3 ;\
2887 2866 nop ;\
2888 2867 1:
2889 2868
↓ open down ↓ |
1783 lines elided |
↑ open up ↑ |
2890 2869 ENTRY_NP(syscall_wrapper32)
2891 2870 BRAND_CALLBACK(BRAND_CB_SYSCALL32)
2892 2871 SYSCALL_NOTT(syscall_trap32)
2893 2872 SET_SIZE(syscall_wrapper32)
2894 2873
2895 2874 ENTRY_NP(syscall_wrapper)
2896 2875 BRAND_CALLBACK(BRAND_CB_SYSCALL)
2897 2876 SYSCALL_NOTT(syscall_trap)
2898 2877 SET_SIZE(syscall_wrapper)
2899 2878
2900 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX