Print this page
restore sparc comments
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4v/ml/mach_locore.s
+++ new/usr/src/uts/sun4v/ml/mach_locore.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 -#if defined(lint)
27 -#include <sys/types.h>
28 -#include <sys/t_lock.h>
29 -#include <sys/promif.h>
30 -#include <sys/prom_isa.h>
31 -#endif /* lint */
32 -
33 26 #include <sys/asm_linkage.h>
34 27 #include <sys/intreg.h>
35 28 #include <sys/ivintr.h>
36 29 #include <sys/mmu.h>
37 30 #include <sys/machpcb.h>
38 31 #include <sys/machtrap.h>
39 32 #include <sys/machlock.h>
40 33 #include <sys/fdreg.h>
41 34 #include <sys/vis.h>
42 35 #include <sys/traptrace.h>
43 36 #include <sys/panic.h>
44 37 #include <sys/machasi.h>
45 38 #include <sys/privregs.h>
46 39 #include <sys/hypervisor_api.h>
47 40 #include <sys/clock.h>
48 41
49 -#if defined(lint)
50 -
51 -#include <sys/thread.h>
52 -#include <sys/time.h>
53 -
54 -#else /* lint */
55 -
56 42 #include "assym.h"
57 43
58 44
59 45 !
60 46 ! REGOFF must add up to allow double word access to r_tstate.
61 47 ! PCB_WBUF must also be aligned.
62 48 !
63 49 #if (REGOFF & 7) != 0
64 50 #error "struct regs not aligned"
65 51 #endif
66 52
67 53 /*
68 54 * Absolute external symbols.
69 55 * On the sun4u we put the panic buffer in the third and fourth pages.
70 56 * We set things up so that the first 2 pages of KERNELBASE is illegal
71 57 * to act as a redzone during copyin/copyout type operations. One of
72 58 * the reasons the panic buffer is allocated in low memory to
73 59 * prevent being overwritten during booting operations (besides
74 60 * the fact that it is small enough to share pages with others).
75 61 */
76 62
77 63 .seg ".data"
78 64 .global panicbuf
79 65
80 66 PROM = 0xFFE00000 ! address of prom virtual area
81 67 panicbuf = SYSBASE32 + PAGESIZE ! address of panic buffer
82 68
83 69 .type panicbuf, #object
84 70 .size panicbuf, PANICBUFSIZE
85 71
86 72 /*
87 73 * Absolute external symbol - intr_vec_table.
88 74 *
89 75 * With new bus structures supporting a larger number of interrupt
90 76 * numbers, the interrupt vector table, intr_vec_table[] has been
91 77 * moved out of kernel nucleus and allocated after panicbuf.
92 78 */
93 79 .global intr_vec_table
94 80
95 81 intr_vec_table = SYSBASE32 + PAGESIZE + PANICBUFSIZE ! address of interrupt table
96 82
97 83 .type intr_vec_table, #object
98 84 .size intr_vec_table, MAXIVNUM * CPTRSIZE + MAX_RSVD_IV * IV_SIZE + MAX_RSVD_IVX * (IV_SIZE + CPTRSIZE * (NCPU - 1))
99 85
100 86 /*
101 87 * The thread 0 stack. This must be the first thing in the data
102 88 * segment (other than an sccs string) so that we don't stomp
103 89 * on anything important if the stack overflows. We get a
104 90 * red zone below this stack for free when the kernel text is
105 91 * write protected.
106 92 */
107 93
108 94 .global t0stack
109 95 .align 16
110 96 .type t0stack, #object
111 97 t0stack:
112 98 .skip T0STKSZ ! thread 0 stack
113 99 t0stacktop:
114 100 .size t0stack, T0STKSZ
115 101
116 102 /*
117 103 * cpu0 and its ptl1_panic stack. The cpu structure must be allocated
118 104 * on a single page for ptl1_panic's physical address accesses.
119 105 */
120 106 .global cpu0
121 107 .align MMU_PAGESIZE
122 108 cpu0:
123 109 .type cpu0, #object
124 110 .skip CPU_ALLOC_SIZE
125 111 .size cpu0, CPU_ALLOC_SIZE
126 112
127 113 .global t0
128 114 .align PTR24_ALIGN ! alignment for mutex.
129 115 .type t0, #object
130 116 t0:
131 117 .skip THREAD_SIZE ! thread 0
132 118 .size t0, THREAD_SIZE
133 119
134 120 .global trap_trace_ctl
135 121 .global htrap_tr0
136 122 .global htrap_trace_bufsize
137 123
138 124 .align 64
139 125 trap_trace_ctl:
140 126 .skip NCPU * TRAPTR_SIZE ! NCPU control headers
141 127 htrap_tr0:
142 128 .skip HTRAP_TSIZE ! one buffer for the boot cpu
143 129 .align 4
144 130 htrap_trace_bufsize:
145 131 .word HTRAP_TSIZE ! default hv trap buffer size
146 132
147 133 #ifdef TRAPTRACE
148 134 .global trap_tr0
149 135 .global trap_trace_bufsize
150 136 .global trap_freeze
151 137 .global trap_freeze_pc
152 138
153 139 .align 4
154 140 trap_trace_bufsize:
155 141 .word TRAP_TSIZE ! default trap buffer size
156 142 trap_freeze:
157 143 .word 0
158 144
159 145 .align 16
160 146 trap_tr0:
161 147 .skip TRAP_TSIZE ! one buffer for the boot cpu
162 148
163 149 /*
164 150 * When an assertion in TRACE_PTR was failed, %pc is saved in trap_freeze_pc to
165 151 * show in which TRACE_PTR the assertion failure happened.
166 152 */
167 153 .align 8
168 154 trap_freeze_pc:
169 155 .nword 0
170 156 #endif /* TRAPTRACE */
171 157
172 158 .align 4
173 159 .seg ".text"
174 160
↓ open down ↓ |
109 lines elided |
↑ open up ↑ |
175 161 #ifdef NOPROM
176 162 .global availmem
177 163 availmem:
178 164 .word 0
179 165 #endif /* NOPROM */
180 166
181 167 .align 8
182 168 _local_p1275cis:
183 169 .nword 0
184 170
185 -#endif /* lint */
186 -
187 -#if defined(lint)
188 -
189 -void
190 -_start(void)
191 -{}
192 -
193 -#else /* lint */
194 -
195 171 .seg ".data"
196 172
197 173 .global nwindows, nwin_minus_one, winmask
198 174 nwindows:
199 175 .word 8
200 176 nwin_minus_one:
201 177 .word 7
202 178 winmask:
203 179 .word 8
204 180
205 181 .global afsrbuf
206 182 afsrbuf:
207 183 .word 0,0,0,0
208 184
209 185 /*
210 186 * System initialization
211 187 *
212 188 * Our contract with the boot prom specifies that the MMU is on and the
213 189 * first 16 meg of memory is mapped with a level-1 pte. We are called
214 190 * with p1275cis ptr in %o0 and kdi_dvec in %o1; we start execution
215 191 * directly from physical memory, so we need to get up into our proper
216 192 * addresses quickly: all code before we do this must be position
217 193 * independent.
218 194 *
219 195 * NB: Above is not true for boot/stick kernel, the only thing mapped is
220 196 * the text+data+bss. The kernel is loaded directly into KERNELBASE.
221 197 *
222 198 * entry, the romvec pointer (romp) is the first argument;
223 199 * i.e., %o0.
224 200 * the bootops vector is in the third argument (%o1)
225 201 *
226 202 * Our tasks are:
227 203 * save parameters
228 204 * construct mappings for KERNELBASE (not needed for boot/stick kernel)
229 205 * hop up into high memory (not needed for boot/stick kernel)
230 206 * initialize stack pointer
231 207 * initialize trap base register
232 208 * initialize window invalid mask
233 209 * initialize psr (with traps enabled)
234 210 * figure out all the module type stuff
235 211 * tear down the 1-1 mappings
236 212 * dive into main()
237 213 */
238 214 ENTRY_NP(_start)
239 215 !
240 216 ! Stash away our arguments in memory.
241 217 !
242 218 sethi %hi(_local_p1275cis), %g1
243 219 stn %o4, [%g1 + %lo(_local_p1275cis)]
244 220
245 221 !
246 222 ! Initialize CPU state registers
247 223 !
248 224 wrpr %g0, PSTATE_KERN, %pstate
249 225 wr %g0, %g0, %fprs
250 226
251 227 !
252 228 ! call krtld to link the world together
253 229 !
254 230 call kobj_start
255 231 mov %o4, %o0
256 232
257 233 ! Write 0x1f (MAX_REG_WINDOWS) to %cwp and read back to get
258 234 ! the actual implemented nwin - 1 value
259 235 rdpr %cwp, %g2 ! save current %cwp
260 236 wrpr %g0, 0x1f, %cwp
261 237 rdpr %cwp, %g1 ! %g1 = nwin - 1
262 238 wrpr %g0, %g2, %cwp ! restore current %cwp
263 239
264 240 !
265 241 ! Stuff some memory cells related to numbers of windows.
266 242 !
267 243 sethi %hi(nwin_minus_one), %g2
268 244 st %g1, [%g2 + %lo(nwin_minus_one)]
269 245 inc %g1
270 246 sethi %hi(nwindows), %g2
271 247 st %g1, [%g2 + %lo(nwindows)]
272 248 dec %g1
273 249 mov -2, %g2
274 250 sll %g2, %g1, %g2
275 251 sethi %hi(winmask), %g4
276 252 st %g2, [%g4 + %lo(winmask)]
277 253
278 254 !
279 255 ! save a pointer to obp's tba for later use by kmdb
280 256 !
281 257 rdpr %tba, %g1
282 258 set boot_tba, %g2
283 259 stx %g1, [%g2]
284 260
285 261 !
286 262 ! copy obp's breakpoint trap entry to obp_bpt
287 263 !
288 264 rdpr %tba, %g1
289 265 set T_SOFTWARE_TRAP | ST_MON_BREAKPOINT, %g2
290 266 sll %g2, 5, %g2
291 267 or %g1, %g2, %g1
292 268 set obp_bpt, %g2
293 269 ldx [%g1], %g3
294 270 stx %g3, [%g2]
295 271 flush %g2
296 272 ldx [%g1 + 8], %g3
297 273 stx %g3, [%g2 + 8]
298 274 flush %g2 + 8
299 275 ldx [%g1 + 16], %g3
300 276 stx %g3, [%g2 + 16]
301 277 flush %g2 + 16
302 278 ldx [%g1 + 24], %g3
303 279 stx %g3, [%g2 + 24]
304 280 flush %g2 + 24
305 281
306 282 !
307 283 ! Initialize thread 0's stack.
308 284 !
309 285 set t0stacktop, %g1 ! setup kernel stack pointer
310 286 sub %g1, SA(KFPUSIZE+GSR_SIZE), %g2
311 287 and %g2, 0x3f, %g3
312 288 sub %g2, %g3, %o1
313 289 sub %o1, SA(MPCBSIZE) + STACK_BIAS, %sp
314 290
315 291 !
316 292 ! Initialize global thread register.
317 293 !
318 294 set t0, THREAD_REG
319 295
320 296 !
321 297 ! Fill in enough of the cpu structure so that
322 298 ! the wbuf management code works. Make sure the
323 299 ! boot cpu is inserted in cpu[] based on cpuid.
324 300 !
325 301 CPU_INDEX(%g2, %g1)
326 302 sll %g2, CPTRSHIFT, %g2 ! convert cpuid to cpu[] offset
327 303 set cpu0, %o0 ! &cpu0
328 304 set cpu, %g1 ! &cpu[]
329 305 stn %o0, [%g1 + %g2] ! cpu[cpuid] = &cpu0
330 306
331 307 stn %o0, [THREAD_REG + T_CPU] ! threadp()->t_cpu = cpu[cpuid]
332 308 stn THREAD_REG, [%o0 + CPU_THREAD] ! cpu[cpuid]->cpu_thread = threadp()
333 309
334 310
335 311 ! We do NOT need to bzero our BSS...boot has already done it for us.
336 312 ! Just need to reference edata so that we don't break /dev/ksyms
337 313 set edata, %g0
338 314
339 315 !
340 316 ! Call mlsetup with address of prototype user registers.
341 317 !
342 318 call mlsetup
343 319 add %sp, REGOFF + STACK_BIAS, %o0
344 320
345 321 #if (REGOFF != MPCB_REGS)
346 322 #error "hole in struct machpcb between frame and regs?"
347 323 #endif
348 324
349 325 !
350 326 ! Now call main. We will return as process 1 (init).
351 327 !
352 328 call main
353 329 nop
354 330
355 331 !
356 332 ! Main should never return.
↓ open down ↓ |
152 lines elided |
↑ open up ↑ |
357 333 !
358 334 set .mainretmsg, %o0
359 335 call panic
360 336 nop
361 337 SET_SIZE(_start)
362 338
363 339 .mainretmsg:
364 340 .asciz "main returned"
365 341 .align 4
366 342
367 -#endif /* lint */
368 343
369 -
370 344 /*
371 345 * Generic system trap handler.
372 346 *
373 347 * Some kernel trap handlers save themselves from buying a window by
374 348 * borrowing some of sys_trap's unused locals. %l0 thru %l3 may be used
375 349 * for this purpose, as user_rtt and priv_rtt do not depend on them.
376 350 * %l4 thru %l7 should NOT be used this way.
377 351 *
378 352 * Entry Conditions:
379 353 * %pstate am:0 priv:1 ie:0
380 354 * %gl global level 1
381 355 *
382 356 * Register Inputs:
383 357 * %g1 pc of trap handler
384 358 * %g2, %g3 args for handler
385 359 * %g4 desired %pil (-1 means current %pil)
386 360 * %g5, %g6 destroyed
387 361 * %g7 saved
388 362 *
389 363 * Register Usage:
390 364 * %l0, %l1 temps
391 365 * %l3 saved %g1
392 366 * %l6 curthread for user traps, %pil for priv traps
393 367 * %l7 regs
394 368 *
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
395 369 * Called function prototype variants:
396 370 *
397 371 * func(struct regs *rp);
398 372 * func(struct regs *rp, uintptr_t arg1 [%g2], uintptr_t arg2 [%g3])
399 373 * func(struct regs *rp, uintptr_t arg1 [%g2],
400 374 * uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h])
401 375 * func(struct regs *rp, uint32_t arg1 [%g2.l],
402 376 * uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h], uint32_t [%g2.h])
403 377 */
404 378
405 -#if defined(lint)
406 -
407 -void
408 -sys_trap(void)
409 -{}
410 -
411 -#else /* lint */
412 -
413 379 ENTRY_NP(sys_trap)
414 380 #ifdef DEBUG
415 381 ! Assert gl == 1
416 382 rdpr %gl, %g5
417 383 cmp %g5, 1
418 384 bne,a,pn %xcc, ptl1_panic
419 385 mov PTL1_BAD_GL, %g1
420 386 #endif
421 387
422 388 !
423 389 ! force tl=1, update %cwp, branch to correct handler
424 390 !
425 391
426 392 wrpr %g0, 1, %tl
427 393 rdpr %tstate, %g5
428 394 btst TSTATE_PRIV, %g5
429 395 and %g5, TSTATE_CWP, %g6
430 396 bnz,pn %xcc, priv_trap
431 397 wrpr %g0, %g6, %cwp
432 398
433 399 ALTENTRY(user_trap)
434 400 !
435 401 ! user trap
436 402 !
437 403 ! make all windows clean for kernel
438 404 ! buy a window using the current thread's stack
439 405 !
440 406 #ifdef DEBUG
441 407 ! Assert gl == 1
442 408 rdpr %gl, %g5
443 409 cmp %g5, 1
444 410 bne,a,pn %xcc, ptl1_panic
445 411 mov PTL1_BAD_GL, %g1
446 412 #endif
447 413 sethi %hi(nwin_minus_one), %g5
448 414 ld [%g5 + %lo(nwin_minus_one)], %g5
449 415 wrpr %g0, %g5, %cleanwin
450 416 CPU_ADDR(%g5, %g6)
451 417 ldn [%g5 + CPU_THREAD], %g5
452 418 ldn [%g5 + T_STACK], %g6
453 419 sub %g6, STACK_BIAS, %g6
454 420 save %g6, 0, %sp
455 421 !
456 422 ! set window registers so that current windows are "other" windows
457 423 !
458 424 rdpr %canrestore, %l0
459 425 rdpr %wstate, %l1
460 426 wrpr %g0, 0, %canrestore
461 427 sllx %l1, WSTATE_SHIFT, %l1
462 428 wrpr %l1, WSTATE_K64, %wstate
463 429 wrpr %g0, %l0, %otherwin
464 430 !
465 431 ! set pcontext to run kernel
466 432 !
467 433 mov KCONTEXT, %l0
468 434 mov MMU_PCONTEXT, %l1
469 435 stxa %l0, [%l1]ASI_MMU_CTX
470 436 ! Ensure new ctx takes effect by the time the "done" (below) completes
471 437 membar #Sync
472 438
473 439 set utl0, %g6 ! bounce to utl0
474 440 have_win:
475 441 #ifdef DEBUG
476 442 CPU_ADDR(%o1, %o2)
477 443 add %o1, CPU_MCPU, %o1
478 444 ld [%o1 + MCPU_KWBUF_FULL], %o2
479 445 tst %o2
480 446 bnz,a,pn %icc, ptl1_panic
481 447 mov PTL1_BAD_WTRAP, %g1
482 448 #endif /* DEBUG */
483 449 SYSTRAP_TRACE(%o1, %o2, %o3)
484 450
485 451
486 452 !
487 453 ! at this point we have a new window we can play in,
488 454 ! and %g6 is the label we want done to bounce to
489 455 !
490 456 ! save needed current globals
491 457 !
492 458 mov %g1, %l3 ! pc
493 459 mov %g2, %o1 ! arg #1
494 460 mov %g3, %o2 ! arg #2
495 461 srlx %g3, 32, %o3 ! pseudo arg #3
496 462 srlx %g2, 32, %o4 ! pseudo arg #4
497 463 mov %g5, %l6 ! curthread if user trap, %pil if priv trap
498 464 !
499 465 ! save trap state on stack
500 466 !
501 467 add %sp, REGOFF + STACK_BIAS, %l7
502 468 rdpr %tpc, %l0
503 469 rdpr %tnpc, %l1
504 470 rdpr %tstate, %l2
505 471 stn %l0, [%l7 + PC_OFF]
506 472 stn %l1, [%l7 + nPC_OFF]
507 473 stx %l2, [%l7 + TSTATE_OFF]
508 474 !
509 475 ! setup pil
510 476 !
511 477 brlz,pt %g4, 1f
512 478 nop
513 479 #ifdef DEBUG
514 480 !
515 481 ! ASSERT(%g4 >= %pil).
516 482 !
517 483 rdpr %pil, %l0
518 484 cmp %g4, %l0
519 485 bge,pt %xcc, 0f
520 486 nop ! yes, nop; to avoid anull
521 487 set bad_g4_called, %l3
522 488 mov 1, %o1
523 489 st %o1, [%l3]
524 490 set bad_g4, %l3 ! pc
525 491 set sys_trap_wrong_pil, %o1 ! arg #1
526 492 mov %g4, %o2 ! arg #2
527 493 ba 1f ! stay at the current %pil
528 494 mov %l0, %o3 ! arg #3
529 495 0:
530 496 #endif /* DEBUG */
531 497 wrpr %g0, %g4, %pil
532 498 1:
533 499 !
534 500 ! set trap regs to execute in kernel at %g6
535 501 ! done resumes execution there
536 502 !
537 503 wrpr %g0, %g6, %tnpc
538 504 rdpr %cwp, %l0
539 505 set TSTATE_KERN, %l1
540 506 wrpr %l1, %l0, %tstate
541 507 done
542 508 /* NOTREACHED */
543 509 SET_SIZE(user_trap)
544 510 SET_SIZE(sys_trap)
545 511
546 512 #define KWBUF64_TO_STACK(SBP,SPP,TMP) \
547 513 ldx [SBP + (0*8)], TMP; \
548 514 stx TMP, [SPP + V9BIAS64 + 0]; \
549 515 ldx [SBP + (1*8)], TMP; \
550 516 stx TMP, [SPP + V9BIAS64 + 8]; \
551 517 ldx [SBP + (2*8)], TMP; \
552 518 stx TMP, [SPP + V9BIAS64 + 16]; \
553 519 ldx [SBP + (3*8)], TMP; \
554 520 stx TMP, [SPP + V9BIAS64 + 24]; \
555 521 ldx [SBP + (4*8)], TMP; \
556 522 stx TMP, [SPP + V9BIAS64 + 32]; \
557 523 ldx [SBP + (5*8)], TMP; \
558 524 stx TMP, [SPP + V9BIAS64 + 40]; \
559 525 ldx [SBP + (6*8)], TMP; \
560 526 stx TMP, [SPP + V9BIAS64 + 48]; \
561 527 ldx [SBP + (7*8)], TMP; \
562 528 stx TMP, [SPP + V9BIAS64 + 56]; \
563 529 ldx [SBP + (8*8)], TMP; \
564 530 stx TMP, [SPP + V9BIAS64 + 64]; \
565 531 ldx [SBP + (9*8)], TMP; \
566 532 stx TMP, [SPP + V9BIAS64 + 72]; \
567 533 ldx [SBP + (10*8)], TMP; \
568 534 stx TMP, [SPP + V9BIAS64 + 80]; \
569 535 ldx [SBP + (11*8)], TMP; \
570 536 stx TMP, [SPP + V9BIAS64 + 88]; \
571 537 ldx [SBP + (12*8)], TMP; \
572 538 stx TMP, [SPP + V9BIAS64 + 96]; \
573 539 ldx [SBP + (13*8)], TMP; \
574 540 stx TMP, [SPP + V9BIAS64 + 104]; \
575 541 ldx [SBP + (14*8)], TMP; \
576 542 stx TMP, [SPP + V9BIAS64 + 112]; \
577 543 ldx [SBP + (15*8)], TMP; \
578 544 stx TMP, [SPP + V9BIAS64 + 120];
579 545
580 546 #define KWBUF32_TO_STACK(SBP,SPP,TMP) \
581 547 lduw [SBP + (0 * 4)], TMP; \
582 548 stw TMP, [SPP + 0]; \
583 549 lduw [SBP + (1 * 4)], TMP; \
584 550 stw TMP, [SPP + (1 * 4)]; \
585 551 lduw [SBP + (2 * 4)], TMP; \
586 552 stw TMP, [SPP + (2 * 4)]; \
587 553 lduw [SBP + (3 * 4)], TMP; \
588 554 stw TMP, [SPP + (3 * 4)]; \
589 555 lduw [SBP + (4 * 4)], TMP; \
590 556 stw TMP, [SPP + (4 * 4)]; \
591 557 lduw [SBP + (5 * 4)], TMP; \
592 558 stw TMP, [SPP + (5 * 4)]; \
593 559 lduw [SBP + (6 * 4)], TMP; \
594 560 stw TMP, [SPP + (6 * 4)]; \
595 561 lduw [SBP + (7 * 4)], TMP; \
596 562 stw TMP, [SPP + (7 * 4)]; \
597 563 lduw [SBP + (8 * 4)], TMP; \
598 564 stw TMP, [SPP + (8 * 4)]; \
599 565 lduw [SBP + (9 * 4)], TMP; \
600 566 stw TMP, [SPP + (9 * 4)]; \
601 567 lduw [SBP + (10 * 4)], TMP; \
602 568 stw TMP, [SPP + (10 * 4)]; \
603 569 lduw [SBP + (11 * 4)], TMP; \
604 570 stw TMP, [SPP + (11 * 4)]; \
605 571 lduw [SBP + (12 * 4)], TMP; \
606 572 stw TMP, [SPP + (12 * 4)]; \
607 573 lduw [SBP + (13 * 4)], TMP; \
608 574 stw TMP, [SPP + (13 * 4)]; \
609 575 lduw [SBP + (14 * 4)], TMP; \
610 576 stw TMP, [SPP + (14 * 4)]; \
611 577 lduw [SBP + (15 * 4)], TMP; \
612 578 stw TMP, [SPP + (15 * 4)];
613 579
614 580 #define COPY_KWBUF_TO_STACK(TMP1,TMP2,TMP3) \
615 581 CPU_ADDR(TMP2, TMP3) ;\
616 582 add TMP2, CPU_MCPU, TMP2 ;\
617 583 ld [TMP2 + MCPU_KWBUF_FULL], TMP3 ;\
618 584 brz,pt TMP3, 2f ;\
619 585 nop ;\
620 586 st %g0, [TMP2 + MCPU_KWBUF_FULL] ;\
621 587 set MCPU_KWBUF_SP, TMP3 ;\
622 588 ldn [TMP2 + TMP3], TMP3 ;\
623 589 set MCPU_KWBUF, TMP1 ;\
624 590 btst 1, TMP3 ;\
625 591 bz,pn %xcc, 3f ;\
626 592 add TMP2, TMP1, TMP2 ;\
627 593 KWBUF64_TO_STACK(TMP2, TMP3, TMP1) ;\
628 594 ba,a 2f ;\
629 595 3: ;\
630 596 KWBUF32_TO_STACK(TMP2, TMP3, TMP1) ;\
631 597 2:
632 598
633 599 ENTRY_NP(prom_trap)
634 600 !
635 601 ! prom trap switches the stack to 32-bit
636 602 ! if we took a trap from a 64-bit window
637 603 ! Then buys a window on the current stack.
638 604 !
639 605 save %sp, -SA64(REGOFF + REGSIZE), %sp
640 606 /* 32 bit frame, 64 bit sized */
641 607 COPY_KWBUF_TO_STACK(%o1, %o2, %o3)
642 608 set ptl0, %g6
643 609 ba,a,pt %xcc, have_win
644 610 SET_SIZE(prom_trap)
645 611
646 612 ENTRY_NP(priv_trap)
647 613 !
648 614 ! kernel trap
649 615 ! buy a window on the current stack
650 616 !
651 617 ! is the trap PC in the range allocated to Open Firmware?
652 618 rdpr %tpc, %g5
653 619 set OFW_END_ADDR, %g6
654 620 cmp %g5, %g6
655 621 bgu,a,pn %xcc, 1f
656 622 rdpr %pil, %g5
657 623 set OFW_START_ADDR, %g6
658 624 cmp %g5, %g6
659 625 bgeu,pn %xcc, prom_trap
660 626 rdpr %pil, %g5
661 627 1:
662 628 set ktl0, %g6
663 629 save %sp, -SA(REGOFF + REGSIZE), %sp
664 630 COPY_KWBUF_TO_STACK(%o1, %o2, %o3)
665 631 ba,a,pt %xcc, have_win
666 632 SET_SIZE(priv_trap)
667 633
668 634 /*
669 635 * FILL_32bit_rtt/FILL_64bit_rtt fills a 32/64-bit-wide register window
670 636 * from a 32/64-bit * wide address space via the designated asi.
671 637 * It is used to fill windows in user_rtt to avoid going above TL 2.
672 638 */
673 639 /* TODO: Use the faster FILL based on FILL_32bit_asi/FILL_64bit_asi */
674 640 #define FILL_32bit_rtt(asi_num) \
675 641 mov asi_num, %asi ;\
676 642 rdpr %cwp, %g1 ;\
677 643 dec %g1 ;\
678 644 wrpr %g1, %cwp ;\
679 645 srl %sp, 0, %sp ;\
680 646 lda [%sp + 0]%asi, %l0 ;\
681 647 lda [%sp + 4]%asi, %l1 ;\
682 648 lda [%sp + 8]%asi, %l2 ;\
683 649 lda [%sp + 12]%asi, %l3 ;\
684 650 lda [%sp + 16]%asi, %l4 ;\
685 651 lda [%sp + 20]%asi, %l5 ;\
686 652 lda [%sp + 24]%asi, %l6 ;\
687 653 lda [%sp + 28]%asi, %l7 ;\
688 654 lda [%sp + 32]%asi, %i0 ;\
689 655 lda [%sp + 36]%asi, %i1 ;\
690 656 lda [%sp + 40]%asi, %i2 ;\
691 657 lda [%sp + 44]%asi, %i3 ;\
692 658 lda [%sp + 48]%asi, %i4 ;\
693 659 lda [%sp + 52]%asi, %i5 ;\
694 660 lda [%sp + 56]%asi, %i6 ;\
695 661 lda [%sp + 60]%asi, %i7 ;\
696 662 restored ;\
697 663 add %g1, 1, %g1 ;\
698 664 wrpr %g1, %cwp
699 665
700 666 #define FILL_64bit_rtt(asi_num) \
701 667 mov asi_num, %asi ;\
702 668 rdpr %cwp, %g1 ;\
703 669 sub %g1, 1, %g1 ;\
704 670 wrpr %g1, %cwp ;\
705 671 ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\
706 672 ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\
707 673 ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\
708 674 ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\
709 675 ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\
710 676 ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\
711 677 ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\
712 678 ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\
713 679 ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\
714 680 ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\
715 681 ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\
716 682 ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\
717 683 ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\
718 684 ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\
719 685 ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\
720 686 ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\
721 687 restored ;\
722 688 add %g1, 1, %g1 ;\
723 689 wrpr %g1, %cwp
724 690
725 691 ENTRY_NP(utl0)
726 692 SAVE_GLOBALS(%l7)
727 693 SAVE_OUTS(%l7)
728 694 mov %l6, THREAD_REG
729 695 wrpr %g0, PSTATE_KERN, %pstate ! enable ints
730 696 jmpl %l3, %o7 ! call trap handler
731 697 mov %l7, %o0
732 698 !
733 699 ALTENTRY(user_rtt)
734 700 !
735 701 ! Register inputs
736 702 ! %l7 - regs
737 703 !
738 704 ! disable interrupts and check for ASTs and wbuf restores
739 705 ! keep cpu_base_spl in %l4
740 706 !
741 707 wrpr %g0, PIL_MAX, %pil
742 708 ldn [THREAD_REG + T_CPU], %l0
743 709 ld [%l0 + CPU_BASE_SPL], %l4
744 710
745 711 ldub [THREAD_REG + T_ASTFLAG], %l2
746 712 brz,pt %l2, 1f
747 713 ld [%sp + STACK_BIAS + MPCB_WBCNT], %l3
748 714 !
749 715 ! call trap to do ast processing
750 716 !
751 717 wrpr %g0, %l4, %pil ! pil = cpu_base_spl
752 718 mov %l7, %o0
753 719 call trap
754 720 mov T_AST, %o2
755 721 ba,a,pt %xcc, user_rtt
756 722 1:
757 723 brz,pt %l3, 2f
758 724 mov THREAD_REG, %l6
759 725 !
760 726 ! call restore_wbuf to push wbuf windows to stack
761 727 !
762 728 wrpr %g0, %l4, %pil ! pil = cpu_base_spl
763 729 mov %l7, %o0
764 730 call trap
765 731 mov T_FLUSH_PCB, %o2
766 732 ba,a,pt %xcc, user_rtt
767 733 2:
768 734 #ifdef TRAPTRACE
769 735 TRACE_RTT(TT_SYS_RTT_USER, %l0, %l1, %l2, %l3)
770 736 #endif /* TRAPTRACE */
771 737 ld [%sp + STACK_BIAS + MPCB_WSTATE], %l3 ! get wstate
772 738
773 739 !
774 740 ! restore user globals and outs
775 741 !
776 742 rdpr %pstate, %l1
777 743 wrpr %l1, PSTATE_IE, %pstate
778 744 RESTORE_GLOBALS(%l7)
779 745 ! switch to global set 1, saving THREAD_REG in %l6
780 746 wrpr %g0, 1, %gl
781 747 mov %sp, %g6 ! remember the mpcb pointer in %g6
782 748 RESTORE_OUTS(%l7)
783 749 !
784 750 ! set %pil from cpu_base_spl
785 751 !
786 752 wrpr %g0, %l4, %pil
787 753 !
788 754 ! raise tl (now using nucleus context)
789 755 ! set pcontext to scontext for user execution
790 756 !
791 757 wrpr %g0, 1, %tl
792 758
793 759 mov MMU_SCONTEXT, %g1
794 760 ldxa [%g1]ASI_MMU_CTX, %g2
795 761 mov MMU_PCONTEXT, %g1
796 762 stxa %g2, [%g1]ASI_MMU_CTX
797 763 !
798 764 ! If shared context support is not enabled, then the next six
799 765 ! instructions will be patched with nop instructions.
800 766 !
801 767 .global sfmmu_shctx_user_rtt_patch
802 768 sfmmu_shctx_user_rtt_patch:
803 769 !
804 770 ! On processors which support multiple contexts, writing to
805 771 ! pcontext0 automatically updates pcontext1 for backwards
806 772 ! compatibility. So, if scontext0 & scontext1 are the same
807 773 ! a write to pcontext0 is sufficient.
808 774 !
809 775 mov MMU_SCONTEXT1, %g1
810 776 ldxa [%g1]ASI_MMU_CTX, %g3
811 777 cmp %g2, %g3
812 778 beq,pt %xcc, no_pctx1_update
813 779 mov MMU_PCONTEXT1, %g1
814 780 stxa %g3, [%g1]ASI_MMU_CTX
815 781
816 782 no_pctx1_update:
817 783 ! Ensure new ctxs take effect by the time the "retry" (below) completes
818 784 membar #Sync
819 785
820 786 !
821 787 ! setup trap regs
822 788 !
823 789 ldn [%l7 + PC_OFF], %g1
824 790 ldn [%l7 + nPC_OFF], %g2
825 791 ldx [%l7 + TSTATE_OFF], %l0
826 792 andn %l0, TSTATE_CWP, %g7
827 793 wrpr %g1, %tpc
828 794 wrpr %g2, %tnpc
829 795 !
830 796 ! switch "other" windows back to "normal" windows and
831 797 ! restore to window we originally trapped in
832 798 !
833 799 rdpr %otherwin, %g1
834 800 wrpr %g0, 0, %otherwin
835 801 add %l3, WSTATE_CLEAN_OFFSET, %l3 ! convert to "clean" wstate
836 802 wrpr %g0, %l3, %wstate
837 803 wrpr %g0, %g1, %canrestore
838 804 !
839 805 ! First attempt to restore from the watchpoint saved register window
840 806 tst %g1
841 807 bne,a 1f
842 808 clrn [%g6 + STACK_BIAS + MPCB_RSP0]
843 809 tst %fp
844 810 be,a 1f
845 811 clrn [%g6 + STACK_BIAS + MPCB_RSP0]
846 812 ! test for user return window in pcb
847 813 ldn [%g6 + STACK_BIAS + MPCB_RSP0], %g1
848 814 cmp %fp, %g1
849 815 bne 1f
850 816 clrn [%g6 + STACK_BIAS + MPCB_RSP0]
851 817 restored
852 818 restore
853 819 ! restore from user return window
854 820 RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN0)
855 821 !
856 822 ! Attempt to restore from the scond watchpoint saved register window
857 823 tst %fp
858 824 be,a 2f
859 825 clrn [%g6 + STACK_BIAS + MPCB_RSP1]
860 826 ldn [%g6 + STACK_BIAS + MPCB_RSP1], %g1
861 827 cmp %fp, %g1
862 828 bne 2f
863 829 clrn [%g6 + STACK_BIAS + MPCB_RSP1]
864 830 restored
865 831 restore
866 832 RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN1)
867 833 save
868 834 b,a 2f
869 835 1:
870 836 rdpr %canrestore, %g1
871 837 brnz %g1, 3f
872 838 nop ! no trap, use restore directly
873 839 rdpr %cwp, %g1
874 840 wrpr %g1, %g7, %tstate ! needed by wbuf recovery code
875 841 ! hand craft the restore to avoid getting to TL > 2
876 842 rdpr %wstate, %g1
877 843 btst 1, %g1
878 844 beq 4f
879 845 nop
880 846 .global rtt_fill_start
881 847 rtt_fill_start:
882 848 FILL_32bit_rtt(ASI_AIUP)
883 849 ba,a 3f
884 850 4:
885 851 FILL_64bit_rtt(ASI_AIUP)
886 852 .global rtt_fill_end
887 853 rtt_fill_end:
888 854 3:
889 855 restore ! should not trap
890 856 2:
891 857 !
892 858 ! set %cleanwin to %canrestore
893 859 ! set %tstate to the correct %cwp
894 860 ! retry resumes user execution
895 861 !
896 862 rdpr %canrestore, %g1
897 863 wrpr %g0, %g1, %cleanwin
898 864 rdpr %cwp, %g1
899 865 wrpr %g1, %g7, %tstate
900 866 retry
901 867 /* NOTREACHED */
902 868 SET_SIZE(user_rtt)
903 869 SET_SIZE(utl0)
904 870
905 871 ENTRY_NP(ptl0)
906 872 SAVE_GLOBALS(%l7)
907 873 SAVE_OUTS(%l7)
908 874 CPU_ADDR(%g5, %g6)
909 875 ldn [%g5 + CPU_THREAD], THREAD_REG
910 876 wrpr %g0, PSTATE_KERN, %pstate ! enable ints
911 877 jmpl %l3, %o7 ! call trap handler
912 878 mov %l7, %o0
913 879 !
914 880 ALTENTRY(prom_rtt)
915 881 #ifdef TRAPTRACE
916 882 TRACE_RTT(TT_SYS_RTT_PROM, %l0, %l1, %l2, %l3)
917 883 #endif /* TRAPTRACE */
918 884 ba,pt %xcc, common_rtt
919 885 mov THREAD_REG, %l0
920 886 SET_SIZE(prom_rtt)
921 887 SET_SIZE(ptl0)
922 888
923 889 ENTRY_NP(ktl0)
924 890 /*
925 891 * THREAD_REG cannot be restored in fault_32bit_fn1 since
926 892 * sun4v cannot safely lower %gl then raise it again.
927 893 */
928 894 CPU_ADDR(%l0, %l1)
929 895 ldn [%l0 + CPU_THREAD], THREAD_REG
930 896 SAVE_GLOBALS(%l7)
931 897 SAVE_OUTS(%l7) ! for the call bug workaround
932 898 wrpr %g0, PSTATE_KERN, %pstate ! enable ints
933 899 jmpl %l3, %o7 ! call trap handler
934 900 mov %l7, %o0
935 901 !
936 902 ALTENTRY(priv_rtt)
937 903 #ifdef TRAPTRACE
938 904 TRACE_RTT(TT_SYS_RTT_PRIV, %l0, %l1, %l2, %l3)
939 905 #endif /* TRAPTRACE */
940 906 !
941 907 ! Register inputs
942 908 ! %l7 - regs
943 909 ! %l6 - trap %pil
944 910 !
945 911 ! Check for a kernel preemption request
946 912 !
947 913 ldn [THREAD_REG + T_CPU], %l0
948 914 ldub [%l0 + CPU_KPRUNRUN], %l0
949 915 brz,pt %l0, 1f
950 916 nop
951 917
952 918 !
953 919 ! Attempt to preempt
954 920 !
955 921 ldstub [THREAD_REG + T_PREEMPT_LK], %l0 ! load preempt lock
956 922 brnz,pn %l0, 1f ! can't call kpreempt if this thread is
957 923 nop ! already in it...
958 924
959 925 call kpreempt
960 926 mov %l6, %o0 ! pass original interrupt level
961 927
962 928 stub %g0, [THREAD_REG + T_PREEMPT_LK] ! nuke the lock
963 929
964 930 rdpr %pil, %o0 ! compare old pil level
965 931 cmp %l6, %o0 ! with current pil level
966 932 movg %xcc, %o0, %l6 ! if current is lower, drop old pil
967 933 1:
968 934 !
969 935 ! If we interrupted the mutex_owner_running() critical region we
970 936 ! must reset ! the PC and nPC back to the beginning to prevent missed
971 937 ! wakeups. ! See the comments in mutex_exit() for details.
972 938 !
973 939 ldn [%l7 + PC_OFF], %l0
974 940 set mutex_owner_running_critical_start, %l1
975 941 sub %l0, %l1, %l0
976 942 cmp %l0, mutex_owner_running_critical_size
977 943 bgeu,pt %xcc, 2f
978 944 mov THREAD_REG, %l0
979 945 stn %l1, [%l7 + PC_OFF] ! restart mutex_owner_running()
980 946 add %l1, 4, %l1
981 947 ba,pt %xcc, common_rtt
982 948 stn %l1, [%l7 + nPC_OFF]
983 949
984 950 2:
985 951 !
986 952 ! If we interrupted the mutex_exit() critical region we must reset
987 953 ! the PC and nPC back to the beginning to prevent missed wakeups.
988 954 ! See the comments in mutex_exit() for details.
989 955 !
990 956 ldn [%l7 + PC_OFF], %l0
991 957 set mutex_exit_critical_start, %l1
992 958 sub %l0, %l1, %l0
993 959 cmp %l0, mutex_exit_critical_size
994 960 bgeu,pt %xcc, common_rtt
995 961 mov THREAD_REG, %l0
996 962 stn %l1, [%l7 + PC_OFF] ! restart mutex_exit()
997 963 add %l1, 4, %l1
998 964 stn %l1, [%l7 + nPC_OFF]
999 965
1000 966 common_rtt:
1001 967 !
1002 968 ! restore globals and outs
1003 969 !
1004 970 rdpr %pstate, %l1
1005 971 wrpr %l1, PSTATE_IE, %pstate
1006 972 RESTORE_GLOBALS(%l7)
1007 973 ! switch to global set 1
1008 974 wrpr %g0, 1, %gl
1009 975 RESTORE_OUTS(%l7)
1010 976 !
1011 977 ! set %pil from max(old pil, cpu_base_spl)
1012 978 !
1013 979 ldn [%l0 + T_CPU], %l0
1014 980 ld [%l0 + CPU_BASE_SPL], %l0
1015 981 cmp %l6, %l0
1016 982 movg %xcc, %l6, %l0
1017 983 wrpr %g0, %l0, %pil
1018 984 !
1019 985 ! raise tl
1020 986 ! setup trap regs
1021 987 ! restore to window we originally trapped in
1022 988 !
1023 989 wrpr %g0, 1, %tl
1024 990 ldn [%l7 + PC_OFF], %g1
1025 991 ldn [%l7 + nPC_OFF], %g2
1026 992 ldx [%l7 + TSTATE_OFF], %l0
1027 993 andn %l0, TSTATE_CWP, %g7
1028 994 wrpr %g1, %tpc
1029 995 wrpr %g2, %tnpc
1030 996 rdpr %canrestore, %g1
1031 997 brnz %g1, 3f
1032 998 nop ! no trap, use restore directly
1033 999 rdpr %cwp, %g1
1034 1000 wrpr %g1, %g7, %tstate ! needed by wbuf recovery code
1035 1001 ! hand craft the restore to avoid getting to TL > 2
1036 1002 FILL_64bit_rtt(ASI_N)
1037 1003 3:
1038 1004 restore
1039 1005 !
↓ open down ↓ |
617 lines elided |
↑ open up ↑ |
1040 1006 ! set %tstate to the correct %cwp
1041 1007 ! retry resumes prom execution
1042 1008 !
1043 1009 rdpr %cwp, %g1
1044 1010 wrpr %g1, %g7, %tstate
1045 1011 retry
1046 1012 /* NOTREACHED */
1047 1013 SET_SIZE(priv_rtt)
1048 1014 SET_SIZE(ktl0)
1049 1015
1050 -#endif /* lint */
1051 -
1052 -#ifndef lint
1053 -
1054 1016 #ifdef DEBUG
1055 1017 .seg ".data"
1056 1018 .align 4
1057 1019
1058 1020 .global bad_g4_called
1059 1021 bad_g4_called:
1060 1022 .word 0
1061 1023
1062 1024 sys_trap_wrong_pil:
1063 1025 .asciz "sys_trap: %g4(%d) is lower than %pil(%d)"
1064 1026 .align 4
1065 1027 .seg ".text"
1066 1028
1067 1029 ENTRY_NP(bad_g4)
1068 1030 mov %o1, %o0
1069 1031 mov %o2, %o1
1070 1032 call panic
1071 1033 mov %o3, %o2
1072 1034 SET_SIZE(bad_g4)
1073 1035 #endif /* DEBUG */
1074 -#endif /* lint */
1075 1036
1076 1037 /*
1077 1038 * sys_tl1_panic can be called by traps at tl1 which
1078 1039 * really want to panic, but need the rearrangement of
1079 1040 * the args as provided by this wrapper routine.
1080 1041 */
1081 -#if defined(lint)
1082 -
1083 -void
1084 -sys_tl1_panic(void)
1085 -{}
1086 -
1087 -#else /* lint */
1088 1042 ENTRY_NP(sys_tl1_panic)
1089 1043 mov %o1, %o0
1090 1044 mov %o2, %o1
1091 1045 call panic
1092 1046 mov %o3, %o2
1093 1047 SET_SIZE(sys_tl1_panic)
1094 -#endif /* lint */
1095 1048
1096 1049
1097 1050 /*
1098 1051 * Flush all windows to memory, except for the one we entered in.
1099 1052 * We do this by doing NWINDOW-2 saves then the same number of restores.
1100 1053 * This leaves the WIM immediately before window entered in.
1101 1054 * This is used for context switching.
1102 1055 */
1103 1056
1104 -#if defined(lint)
1105 -
1106 -void
1107 -flush_windows(void)
1108 -{}
1109 -
1110 -#else /* lint */
1111 -
1112 1057 ENTRY_NP(flush_windows)
1113 1058 retl
1114 1059 flushw
1115 1060 SET_SIZE(flush_windows)
1116 1061
1117 -#endif /* lint */
1118 -
1119 -#if defined(lint)
1120 -
1121 -void
1122 -debug_flush_windows(void)
1123 -{}
1124 -
1125 -#else /* lint */
1126 -
1127 1062 ENTRY_NP(debug_flush_windows)
1128 1063 set nwindows, %g1
1129 1064 ld [%g1], %g1
1130 1065 mov %g1, %g2
1131 1066
1132 1067 1:
1133 1068 save %sp, -WINDOWSIZE, %sp
1134 1069 brnz %g2, 1b
1135 1070 dec %g2
1136 1071
1137 1072 mov %g1, %g2
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1138 1073 2:
1139 1074 restore
1140 1075 brnz %g2, 2b
1141 1076 dec %g2
1142 1077
1143 1078 retl
1144 1079 nop
1145 1080
1146 1081 SET_SIZE(debug_flush_windows)
1147 1082
1148 -#endif /* lint */
1149 -
1150 1083 /*
1151 1084 * flush user windows to memory.
1152 1085 */
1153 1086
1154 -#if defined(lint)
1155 -
1156 -void
1157 -flush_user_windows(void)
1158 -{}
1159 -
1160 -#else /* lint */
1161 -
1162 1087 ENTRY_NP(flush_user_windows)
1163 1088 rdpr %otherwin, %g1
1164 1089 brz %g1, 3f
1165 1090 clr %g2
1166 1091 1:
1167 1092 save %sp, -WINDOWSIZE, %sp
1168 1093 rdpr %otherwin, %g1
1169 1094 brnz %g1, 1b
1170 1095 add %g2, 1, %g2
1171 1096 2:
1172 1097 sub %g2, 1, %g2 ! restore back to orig window
1173 1098 brnz %g2, 2b
1174 1099 restore
1175 1100 3:
1176 1101 retl
1177 1102 nop
1178 1103 SET_SIZE(flush_user_windows)
1179 1104
1180 -#endif /* lint */
1181 -
1182 1105 /*
1183 1106 * Throw out any user windows in the register file.
1184 1107 * Used by setregs (exec) to clean out old user.
1185 1108 * Used by sigcleanup to remove extraneous windows when returning from a
1186 1109 * signal.
1187 1110 */
1188 1111
1189 -#if defined(lint)
1190 -
1191 -void
1192 -trash_user_windows(void)
1193 -{}
1194 -
1195 -#else /* lint */
1196 -
1197 1112 ENTRY_NP(trash_user_windows)
1198 1113 rdpr %otherwin, %g1
1199 1114 brz %g1, 3f ! no user windows?
1200 1115 ldn [THREAD_REG + T_STACK], %g5
1201 1116
1202 1117 !
1203 1118 ! There are old user windows in the register file. We disable ints
1204 1119 ! and increment cansave so that we don't overflow on these windows.
1205 1120 ! Also, this sets up a nice underflow when first returning to the
1206 1121 ! new user.
1207 1122 !
1208 1123 rdpr %pstate, %g2
1209 1124 wrpr %g2, PSTATE_IE, %pstate
1210 1125 rdpr %cansave, %g3
1211 1126 rdpr %otherwin, %g1 ! re-read in case of interrupt
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1212 1127 add %g3, %g1, %g3
1213 1128 wrpr %g0, 0, %otherwin
1214 1129 wrpr %g0, %g3, %cansave
1215 1130 wrpr %g0, %g2, %pstate
1216 1131 3:
1217 1132 retl
1218 1133 clr [%g5 + MPCB_WBCNT] ! zero window buffer cnt
1219 1134 SET_SIZE(trash_user_windows)
1220 1135
1221 1136
1222 -#endif /* lint */
1223 -
1224 1137 /*
1225 1138 * Setup g7 via the CPU data structure.
1226 1139 */
1227 -#if defined(lint)
1228 1140
1229 -struct scb *
1230 -set_tbr(struct scb *s)
1231 -{ return (s); }
1232 -
1233 -#else /* lint */
1234 -
1235 1141 ENTRY_NP(set_tbr)
1236 1142 retl
1237 1143 ta 72 ! no tbr, stop simulation
1238 1144 SET_SIZE(set_tbr)
1239 1145
1240 -#endif /* lint */
1241 1146
1242 -
1243 -#if defined(lint)
1244 -/*
1245 - * These need to be defined somewhere to lint and there is no "hicore.s"...
1246 - */
1247 -char etext[1], end[1];
1248 -#endif /* lint*/
1249 -
1250 -#if defined (lint)
1251 -
1252 -/* ARGSUSED */
1253 -void
1254 -ptl1_panic(u_int reason)
1255 -{}
1256 -
1257 -#else /* lint */
1258 -
1259 1147 #define PTL1_SAVE_WINDOW(RP) \
1260 1148 stxa %l0, [RP + RW64_LOCAL + (0 * RW64_LOCAL_INCR)] %asi; \
1261 1149 stxa %l1, [RP + RW64_LOCAL + (1 * RW64_LOCAL_INCR)] %asi; \
1262 1150 stxa %l2, [RP + RW64_LOCAL + (2 * RW64_LOCAL_INCR)] %asi; \
1263 1151 stxa %l3, [RP + RW64_LOCAL + (3 * RW64_LOCAL_INCR)] %asi; \
1264 1152 stxa %l4, [RP + RW64_LOCAL + (4 * RW64_LOCAL_INCR)] %asi; \
1265 1153 stxa %l5, [RP + RW64_LOCAL + (5 * RW64_LOCAL_INCR)] %asi; \
1266 1154 stxa %l6, [RP + RW64_LOCAL + (6 * RW64_LOCAL_INCR)] %asi; \
1267 1155 stxa %l7, [RP + RW64_LOCAL + (7 * RW64_LOCAL_INCR)] %asi; \
1268 1156 stxa %i0, [RP + RW64_IN + (0 * RW64_IN_INCR)] %asi; \
1269 1157 stxa %i1, [RP + RW64_IN + (1 * RW64_IN_INCR)] %asi; \
1270 1158 stxa %i2, [RP + RW64_IN + (2 * RW64_IN_INCR)] %asi; \
1271 1159 stxa %i3, [RP + RW64_IN + (3 * RW64_IN_INCR)] %asi; \
1272 1160 stxa %i4, [RP + RW64_IN + (4 * RW64_IN_INCR)] %asi; \
1273 1161 stxa %i5, [RP + RW64_IN + (5 * RW64_IN_INCR)] %asi; \
1274 1162 stxa %i6, [RP + RW64_IN + (6 * RW64_IN_INCR)] %asi; \
1275 1163 stxa %i7, [RP + RW64_IN + (7 * RW64_IN_INCR)] %asi
1276 1164 #define PTL1_NEXT_WINDOW(scr) \
1277 1165 add scr, RWIN64SIZE, scr
1278 1166
1279 1167 #define PTL1_RESET_RWINDOWS(scr) \
1280 1168 sethi %hi(nwin_minus_one), scr; \
1281 1169 ld [scr + %lo(nwin_minus_one)], scr; \
1282 1170 wrpr scr, %cleanwin; \
1283 1171 dec scr; \
1284 1172 wrpr scr, %cansave; \
1285 1173 wrpr %g0, %canrestore; \
1286 1174 wrpr %g0, %otherwin
1287 1175
1288 1176 #define PTL1_DCACHE_LINE_SIZE 4 /* small enough for all CPUs */
1289 1177
1290 1178 /*
1291 1179 * ptl1_panic is called when the kernel detects that it is in an invalid state
1292 1180 * and the trap level is greater than 0. ptl1_panic is responsible to save the
1293 1181 * current CPU state, to restore the CPU state to normal, and to call panic.
1294 1182 * The CPU state must be saved reliably without causing traps. ptl1_panic saves
1295 1183 * it in the ptl1_state structure, which is a member of the machcpu structure.
1296 1184 * In order to access the ptl1_state structure without causing traps, physical
1297 1185 * addresses are used so that we can avoid MMU miss traps. The restriction of
1298 1186 * physical memory accesses is that the ptl1_state structure must be on a single
1299 1187 * physical page. This is because (1) a single physical address for each
1300 1188 * ptl1_state structure is needed and (2) it simplifies physical address
1301 1189 * calculation for each member of the structure.
1302 1190 * ptl1_panic is a likely spot for stack overflows to wind up; thus, the current
1303 1191 * stack may not be usable. In order to call panic reliably in such a state,
1304 1192 * each CPU needs a dedicated ptl1 panic stack.
1305 1193 * CPU_ALLOC_SIZE, which is defined to be MMU_PAGESIZE, is used to allocate the
1306 1194 * cpu structure and a ptl1 panic stack. They are put together on the same page
1307 1195 * for memory space efficiency. The low address part is used for the cpu
1308 1196 * structure, and the high address part is for a ptl1 panic stack.
1309 1197 * The cpu_pa array holds the physical addresses of the allocated cpu structures,
1310 1198 * as the cpu array holds their virtual addresses.
1311 1199 *
1312 1200 * %g1 reason to be called
1313 1201 * %g2 broken
1314 1202 * %g3 broken
1315 1203 */
1316 1204 ENTRY_NP(ptl1_panic)
1317 1205 !
1318 1206 ! increment the entry counter.
1319 1207 ! save CPU state if this is the first entry.
1320 1208 !
1321 1209 CPU_PADDR(%g2, %g3);
1322 1210 add %g2, CPU_PTL1, %g2 ! pstate = &CPU->mcpu.ptl1_state
1323 1211 wr %g0, ASI_MEM, %asi ! physical address access
1324 1212 !
1325 1213 ! pstate->ptl1_entry_count++
1326 1214 !
1327 1215 lduwa [%g2 + PTL1_ENTRY_COUNT] %asi, %g3
1328 1216 add %g3, 1, %g3
1329 1217 stuwa %g3, [%g2 + PTL1_ENTRY_COUNT] %asi
1330 1218 !
1331 1219 ! CPU state saving is skipped from the 2nd entry to ptl1_panic since we
1332 1220 ! do not want to clobber the state from the original failure. panic()
1333 1221 ! is responsible for handling multiple or recursive panics.
1334 1222 !
1335 1223 cmp %g3, 2 ! if (ptl1_entry_count >= 2)
1336 1224 bge,pn %icc, state_saved ! goto state_saved
1337 1225 add %g2, PTL1_REGS, %g3 ! %g3 = &pstate->ptl1_regs[0]
1338 1226 !
1339 1227 ! save CPU state
1340 1228 !
1341 1229 save_cpu_state:
1342 1230 ! save current global registers
1343 1231 ! so that all them become available for use
1344 1232 !
1345 1233 stxa %o1, [%g3 + PTL1_RWINDOW] %asi ! save %o1
1346 1234 stxa %o2, [%g3 + PTL1_RWINDOW + 8] %asi ! save %o2
1347 1235 stxa %o3, [%g3 + PTL1_RWINDOW + 16] %asi ! save %o3
1348 1236 rdpr %gl, %o1
1349 1237 add %g3, PTL1_GREGS, %o2 ! %o4 = &ptl1_gregs[0]
1350 1238 mov %g3, %o3
1351 1239 6:
1352 1240 stxa %o1, [%o2 + PTL1_GL] %asi
1353 1241 stxa %g1, [%o2 + PTL1_G1] %asi
1354 1242 stxa %g2, [%o2 + PTL1_G2] %asi
1355 1243 stxa %g3, [%o2 + PTL1_G3] %asi
1356 1244 stxa %g4, [%o2 + PTL1_G4] %asi
1357 1245 stxa %g5, [%o2 + PTL1_G5] %asi
1358 1246 stxa %g6, [%o2 + PTL1_G6] %asi
1359 1247 stxa %g7, [%o2 + PTL1_G7] %asi
1360 1248 add %o2, PTL1_GREGS_INCR, %o2
1361 1249 deccc %o1
1362 1250 brgez,a,pt %o1, 6b
1363 1251 wrpr %o1, %gl
1364 1252 !
1365 1253 ! restore %g3, %o1, %o2 and %o3
1366 1254 !
1367 1255 mov %o3, %g3
1368 1256 ldxa [%g3 + PTL1_RWINDOW] %asi, %o1
1369 1257 ldxa [%g3 + PTL1_RWINDOW + 8] %asi, %o2
1370 1258 ldxa [%g3 + PTL1_RWINDOW + 16] %asi, %o3
1371 1259 !
1372 1260 ! %tl, %tt, %tstate, %tpc, %tnpc for each TL
1373 1261 !
1374 1262 rdpr %tl, %g1
1375 1263 brz %g1, 1f ! if(trap_level == 0) -------+
1376 1264 add %g3, PTL1_TRAP_REGS, %g4 ! %g4 = &ptl1_trap_regs[0]; !
1377 1265 0: ! -----------<----------+ !
1378 1266 stwa %g1, [%g4 + PTL1_TL] %asi ! !
1379 1267 rdpr %tt, %g5 ! !
1380 1268 stwa %g5, [%g4 + PTL1_TT] %asi ! !
1381 1269 rdpr %tstate, %g5 ! !
1382 1270 stxa %g5, [%g4 + PTL1_TSTATE] %asi ! !
1383 1271 rdpr %tpc, %g5 ! !
1384 1272 stxa %g5, [%g4 + PTL1_TPC] %asi ! !
1385 1273 rdpr %tnpc, %g5 ! !
1386 1274 stxa %g5, [%g4 + PTL1_TNPC] %asi ! !
1387 1275 add %g4, PTL1_TRAP_REGS_INCR, %g4 ! !
1388 1276 deccc %g1 ! !
1389 1277 bnz,a,pt %icc, 0b ! if(trap_level != 0) --+ !
1390 1278 wrpr %g1, %tl !
1391 1279 1: ! ----------<----------------+
1392 1280 !
1393 1281 ! %pstate, %pil, SOFTINT, (S)TICK
1394 1282 ! Pending interrupts is also cleared in order to avoid a recursive call
1395 1283 ! to ptl1_panic in case the interrupt handler causes a panic.
1396 1284 !
1397 1285 rdpr %pil, %g1
1398 1286 stba %g1, [%g3 + PTL1_PIL] %asi
1399 1287 rdpr %pstate, %g1
1400 1288 stha %g1, [%g3 + PTL1_PSTATE] %asi
1401 1289 rd SOFTINT, %g1
1402 1290 sta %g1, [%g3 + PTL1_SOFTINT] %asi
1403 1291 wr %g1, CLEAR_SOFTINT
1404 1292 RD_TICKSTICK_FLAG(%g1, %g4, traptrace_use_stick)
1405 1293 stxa %g1, [%g3 + PTL1_TICK] %asi
1406 1294
1407 1295 MMU_FAULT_STATUS_AREA(%g1)
1408 1296 ldx [%g1 + MMFSA_D_TYPE], %g4
1409 1297 stxa %g4, [%g3 + PTL1_DMMU_TYPE] %asi
1410 1298 ldx [%g1 + MMFSA_D_ADDR], %g4
1411 1299 stxa %g4, [%g3 + PTL1_DMMU_ADDR] %asi
1412 1300 ldx [%g1 + MMFSA_D_CTX], %g4
1413 1301 stxa %g4, [%g3 + PTL1_DMMU_CTX] %asi
1414 1302 ldx [%g1 + MMFSA_I_TYPE], %g4
1415 1303 stxa %g4, [%g3 + PTL1_IMMU_TYPE] %asi
1416 1304 ldx [%g1 + MMFSA_I_ADDR], %g4
1417 1305 stxa %g4, [%g3 + PTL1_IMMU_ADDR] %asi
1418 1306 ldx [%g1 + MMFSA_I_CTX], %g4
1419 1307 stxa %g4, [%g3 + PTL1_IMMU_CTX] %asi
1420 1308
1421 1309 !
1422 1310 ! Save register window state and register windows.
1423 1311 !
1424 1312 rdpr %cwp, %g1
1425 1313 stba %g1, [%g3 + PTL1_CWP] %asi
1426 1314 rdpr %wstate, %g1
1427 1315 stba %g1, [%g3 + PTL1_WSTATE] %asi
1428 1316 rdpr %otherwin, %g1
1429 1317 stba %g1, [%g3 + PTL1_OTHERWIN] %asi
1430 1318 rdpr %cleanwin, %g1
1431 1319 stba %g1, [%g3 + PTL1_CLEANWIN] %asi
1432 1320 rdpr %cansave, %g1
1433 1321 stba %g1, [%g3 + PTL1_CANSAVE] %asi
1434 1322 rdpr %canrestore, %g1
1435 1323 stba %g1, [%g3 + PTL1_CANRESTORE] %asi
1436 1324
1437 1325 PTL1_RESET_RWINDOWS(%g1)
1438 1326 clr %g1
1439 1327 wrpr %g1, %cwp
1440 1328 add %g3, PTL1_RWINDOW, %g4 ! %g4 = &ptl1_rwindow[0];
1441 1329
1442 1330 3: PTL1_SAVE_WINDOW(%g4) ! <-------------+
1443 1331 inc %g1 !
1444 1332 cmp %g1, MAXWIN !
1445 1333 bgeu,pn %icc, 5f !
1446 1334 wrpr %g1, %cwp !
1447 1335 rdpr %cwp, %g2 !
1448 1336 cmp %g1, %g2 ! saturation check
1449 1337 be,pt %icc, 3b !
1450 1338 PTL1_NEXT_WINDOW(%g4) ! ------+
1451 1339 5:
1452 1340 !
1453 1341 ! most crucial CPU state was saved.
1454 1342 ! Proceed to go back to TL = 0.
1455 1343 !
1456 1344 state_saved:
1457 1345 wrpr %g0, 1, %tl
1458 1346 wrpr %g0, 1, %gl
1459 1347 wrpr %g0, PIL_MAX, %pil
1460 1348 !
1461 1349 PTL1_RESET_RWINDOWS(%g1)
1462 1350 wrpr %g0, %cwp
1463 1351 wrpr %g0, %cleanwin
1464 1352 wrpr %g0, WSTATE_KERN, %wstate
1465 1353 !
1466 1354 ! Set pcontext to run kernel.
1467 1355 !
1468 1356 set MMU_PCONTEXT, %g1
1469 1357 stxa %g0, [%g1]ASI_MMU_CTX
1470 1358 membar #Sync
1471 1359
1472 1360 rdpr %cwp, %g1
1473 1361 set TSTATE_KERN, %g3
1474 1362 wrpr %g3, %g1, %tstate
1475 1363 set ptl1_panic_tl0, %g3
1476 1364 wrpr %g0, %g3, %tnpc
1477 1365 done ! go to -->-+ TL:1
1478 1366 !
1479 1367 ptl1_panic_tl0: ! ----<-----+ TL:0
1480 1368 CPU_ADDR(%l0, %l1) ! %l0 = cpu[cpuid]
1481 1369 add %l0, CPU_PTL1, %l1 ! %l1 = &CPU->mcpu.ptl1_state
1482 1370 !
1483 1371 ! prepare to call panic()
1484 1372 !
1485 1373 ldn [%l0 + CPU_THREAD], THREAD_REG ! restore %g7
1486 1374 ldn [%l1 + PTL1_STKTOP], %l2 ! %sp = ptl1_stktop
1487 1375 sub %l2, SA(MINFRAME) + STACK_BIAS, %sp
1488 1376 clr %fp ! no frame below this window
1489 1377 clr %i7
↓ open down ↓ |
221 lines elided |
↑ open up ↑ |
1490 1378 !
1491 1379 ! enable limited interrupts
1492 1380 !
1493 1381 wrpr %g0, CLOCK_LEVEL, %pil
1494 1382 wrpr %g0, PSTATE_KERN, %pstate
1495 1383 !
1496 1384 ba,pt %xcc, ptl1_panic_handler
1497 1385 mov %l1, %o0
1498 1386 /*NOTREACHED*/
1499 1387 SET_SIZE(ptl1_panic)
1500 -#endif /* lint */
1501 1388
1502 1389 #ifdef PTL1_PANIC_DEBUG
1503 -#if defined (lint)
1390 +
1504 1391 /*
1505 1392 * ptl1_recurse() calls itself a number of times to either set up a known
1506 - * stack or to cause a kernel stack overflow. It decrements the arguments
1393 + * stack or to cause a kernel stack overflow. It decrements the arguments
1507 1394 * on each recursion.
1508 1395 * It's called by #ifdef PTL1_PANIC_DEBUG code in startup.c to set the
1509 1396 * registers to a known state to facilitate debugging.
1510 1397 */
1511 -
1512 -/* ARGSUSED */
1513 -void
1514 -ptl1_recurse(int count_threshold, int trap_threshold)
1515 -{}
1516 -
1517 -#else /* lint */
1518 -
1519 1398 ENTRY_NP(ptl1_recurse)
1520 1399 save %sp, -SA(MINFRAME), %sp
1521 1400
1522 1401 set ptl1_recurse_call, %o7
1523 1402 cmp %o7, %i7 ! if ptl1_recurse is called
1524 1403 be,pt %icc, 0f ! by itself, then skip
1525 1404 nop ! register initialization
1526 1405
1527 1406 /*
1528 1407 * Initialize Out Registers to Known Values
1529 1408 */
1530 1409 set 0x01000, %l0 ! %i0 is the ...
1531 1410 ! recursion_depth_count
1532 1411 sub %i0, 1, %o0;
1533 1412 sub %i1, 1, %o1;
1534 1413 add %l0, %o0, %o2;
1535 1414 add %l0, %o2, %o3;
1536 1415 add %l0, %o3, %o4;
1537 1416 add %l0, %o4, %o5;
1538 1417 ba,a 1f
1539 1418 nop
1540 1419
1541 1420 0: /* Outs = Ins - 1 */
1542 1421 sub %i0, 1, %o0;
1543 1422 sub %i1, 1, %o1;
1544 1423 sub %i2, 1, %o2;
1545 1424 sub %i3, 1, %o3;
1546 1425 sub %i4, 1, %o4;
1547 1426 sub %i5, 1, %o5;
1548 1427
1549 1428 /* Locals = Ins + 1 */
1550 1429 1: add %i0, 1, %l0;
1551 1430 add %i1, 1, %l1;
1552 1431 add %i2, 1, %l2;
1553 1432 add %i3, 1, %l3;
1554 1433 add %i4, 1, %l4;
1555 1434 add %i5, 1, %l5;
1556 1435
1557 1436 set 0x0100000, %g5
1558 1437 add %g5, %g0, %g1
1559 1438 add %g5, %g1, %g2
1560 1439 add %g5, %g2, %g3
1561 1440 add %g5, %g3, %g4
1562 1441 add %g5, %g4, %g5
1563 1442
1564 1443 brz,pn %i1, ptl1_recurse_trap ! if trpp_count == 0) {
1565 1444 nop ! trap to ptl1_panic
1566 1445 !
1567 1446 brz,pn %i0, ptl1_recure_exit ! if(depth_count == 0) {
1568 1447 nop ! skip recursive call
1569 1448 ! }
1570 1449 ptl1_recurse_call:
1571 1450 call ptl1_recurse
1572 1451 nop
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
1573 1452
1574 1453 ptl1_recure_exit:
1575 1454 ret
1576 1455 restore
1577 1456
1578 1457 ptl1_recurse_trap:
1579 1458 ta PTL1_DEBUG_TRAP; ! Trap Always to ptl1_panic()
1580 1459 nop ! NOTREACHED
1581 1460 SET_SIZE(ptl1_recurse)
1582 1461
1583 -#endif /* lint */
1584 -
1585 -#if defined (lint)
1586 -
1587 -/* ARGSUSED */
1588 -void
1589 -ptl1_panic_xt(int arg1, int arg2)
1590 -{}
1591 -
1592 -#else /* lint */
1593 1462 /*
1594 1463 * Asm function to handle a cross trap to call ptl1_panic()
1595 1464 */
1596 1465 ENTRY_NP(ptl1_panic_xt)
1597 1466 ba ptl1_panic
1598 1467 mov PTL1_BAD_DEBUG, %g1
1599 1468 SET_SIZE(ptl1_panic_xt)
1600 1469
1601 -#endif /* lint */
1602 -
1603 1470 #endif /* PTL1_PANIC_DEBUG */
1604 1471
1605 1472 #ifdef TRAPTRACE
1606 -#if defined (lint)
1607 1473
1608 -void
1609 -trace_ptr_panic(void)
1610 -{
1611 -}
1612 -
1613 -#else /* lint */
1614 -
1615 1474 ENTRY_NP(trace_ptr_panic)
1616 1475 !
1617 1476 ! freeze the trap trace to disable the assertions. Otherwise,
1618 1477 ! ptl1_panic is likely to be repeatedly called from there.
1619 1478 ! %g2 and %g3 are used as scratch registers in ptl1_panic.
1620 1479 !
1621 1480 mov 1, %g3
1622 1481 sethi %hi(trap_freeze), %g2
1623 1482 st %g3, [%g2 + %lo(trap_freeze)]
1624 1483 !
1625 1484 ! %g1 contains the %pc address where an assertion was failed.
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1626 1485 ! save it in trap_freeze_pc for a debugging hint if there is
1627 1486 ! no value saved in it.
1628 1487 !
1629 1488 set trap_freeze_pc, %g2
1630 1489 casn [%g2], %g0, %g1
1631 1490
1632 1491 ba ptl1_panic
1633 1492 mov PTL1_BAD_TRACE_PTR, %g1
1634 1493 SET_SIZE(trace_ptr_panic)
1635 1494
1636 -#endif /* lint */
1637 1495 #endif /* TRAPTRACE */
1638 1496
1639 1497 /*
1640 1498 * The interface for a 32-bit client program that takes over the TBA
1641 1499 * calling the 64-bit romvec OBP.
1642 1500 */
1643 1501
1644 -#if defined(lint)
1645 -
1646 -/* ARGSUSED */
1647 -int
1648 -client_handler(void *cif_handler, void *arg_array)
1649 -{ return 0; }
1650 -
1651 -#else /* lint */
1652 -
1653 1502 ENTRY(client_handler)
1654 1503 save %sp, -SA64(MINFRAME64), %sp ! 32 bit frame, 64 bit sized
1655 1504 sethi %hi(tba_taken_over), %l2
1656 1505 ld [%l2+%lo(tba_taken_over)], %l3
1657 1506 brz %l3, 1f ! is the tba_taken_over = 1 ?
1658 1507 rdpr %wstate, %l5 ! save %wstate
1659 1508 andn %l5, WSTATE_MASK, %l6
1660 1509 wrpr %l6, WSTATE_KMIX, %wstate
1661 1510 1: mov %i1, %o0
1662 1511 1: rdpr %pstate, %l4 ! Get the present pstate value
1663 1512 andn %l4, PSTATE_AM, %l6
1664 1513 wrpr %l6, 0, %pstate ! Set PSTATE_AM = 0
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1665 1514 jmpl %i0, %o7 ! Call cif handler
1666 1515 nop
1667 1516 wrpr %l4, 0, %pstate ! restore pstate
1668 1517 brz %l3, 1f ! is the tba_taken_over = 1
1669 1518 nop
1670 1519 wrpr %g0, %l5, %wstate ! restore wstate
1671 1520 1: ret ! Return result ...
1672 1521 restore %o0, %g0, %o0 ! delay; result in %o0
1673 1522 SET_SIZE(client_handler)
1674 1523
1675 -#endif /* lint */
1676 -
1677 -#if defined(lint)
1678 -
1679 -/*ARGSUSED*/
1680 -void
1681 -panic_bad_hcall(uint64_t err, uint64_t hcall)
1682 -{}
1683 -
1684 -#else /* lint */
1685 -
1686 1524 .seg ".text"
1687 1525 bad_hcall_error:
1688 1526 .asciz "hypervisor call 0x%x returned an unexpected error %d"
1689 1527
1690 1528 /*
1691 1529 * panic_bad_hcall is called when a hcall returns
1692 1530 * unexpected error
1693 1531 * %o0 error number
1694 1532 * %o1 hcall number
1695 1533 */
1696 1534
1697 1535 ENTRY(panic_bad_hcall)
1698 1536 mov %o0, %o2
1699 1537 sethi %hi(bad_hcall_error), %o0
1700 1538 or %o0, %lo(bad_hcall_error), %o0
1701 1539 mov %o7, %o3
1702 1540 call panic
1703 1541 mov %o3, %o7
1704 1542 SET_SIZE(panic_bad_hcall)
1705 1543
1706 -#endif /* lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX