Print this page
de-linting of .s files
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sparc/v9/ml/sparcv9_subr.s
+++ new/usr/src/uts/sparc/v9/ml/sparcv9_subr.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * General assembly language routines.
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
28 28 * It is the intent of this file to contain routines that are
29 29 * independent of the specific kernel architecture, and those that are
30 30 * common across kernel architectures.
31 31 * As architectures diverge, and implementations of specific
32 32 * architecture-dependent routines change, the routines should be moved
33 33 * from this file into the respective ../`arch -k`/subr.s file.
34 34 * Or, if you want to be really nice, move them to a file whose
35 35 * name has something to do with the routine you are moving.
36 36 */
37 37
38 -#if defined(lint)
39 -#include <sys/types.h>
40 -#include <sys/scb.h>
41 -#include <sys/systm.h>
42 -#include <sys/regset.h>
43 -#include <sys/sunddi.h>
44 -#include <sys/lockstat.h>
45 -#include <sys/dtrace.h>
46 -#include <sys/ftrace.h>
47 -#endif /* lint */
48 -
49 38 #include <sys/asm_linkage.h>
50 39 #include <sys/privregs.h>
51 40 #include <sys/machparam.h> /* To get SYSBASE and PAGESIZE */
52 41 #include <sys/machthread.h>
53 42 #include <sys/clock.h>
54 43 #include <sys/psr_compat.h>
55 44 #include <sys/isa_defs.h>
56 45 #include <sys/dditypes.h>
57 46 #include <sys/panic.h>
58 47 #include <sys/machlock.h>
59 48 #include <sys/ontrap.h>
60 49
61 -#if !defined(lint)
62 50 #include "assym.h"
63 51
64 52 .seg ".text"
65 53 .align 4
66 54
67 55 /*
68 56 * Macro to raise processor priority level.
69 57 * Avoid dropping processor priority if already at high level.
70 58 * Also avoid going below CPU->cpu_base_spl, which could've just been set by
71 59 * a higher-level interrupt thread that just blocked.
72 60 *
73 61 * level can be %o0 (not other regs used here) or a constant.
74 62 */
75 63 #define RAISE(level) \
76 64 rdpr %pil, %o1; /* get current PIL */ \
77 65 cmp %o1, level; /* is PIL high enough? */ \
78 66 bge 1f; /* yes, return */ \
79 67 nop; \
80 68 wrpr %g0, PIL_MAX, %pil; /* freeze CPU_BASE_SPL */ \
81 69 ldn [THREAD_REG + T_CPU], %o2; \
82 70 ld [%o2 + CPU_BASE_SPL], %o2; \
83 71 cmp %o2, level; /* compare new to base */ \
84 72 movl %xcc, level, %o2; /* use new if base lower */ \
85 73 wrpr %g0, %o2, %pil; \
86 74 1: \
87 75 retl; \
88 76 mov %o1, %o0 /* return old PIL */
89 77
90 78 /*
91 79 * Macro to raise processor priority level to level >= DISP_LEVEL.
92 80 * Doesn't require comparison to CPU->cpu_base_spl.
93 81 *
94 82 * newpil can be %o0 (not other regs used here) or a constant.
95 83 */
96 84 #define RAISE_HIGH(level) \
97 85 rdpr %pil, %o1; /* get current PIL */ \
98 86 cmp %o1, level; /* is PIL high enough? */ \
99 87 bge 1f; /* yes, return */ \
100 88 nop; \
101 89 wrpr %g0, level, %pil; /* use chose value */ \
102 90 1: \
103 91 retl; \
104 92 mov %o1, %o0 /* return old PIL */
105 93
106 94 /*
107 95 * Macro to set the priority to a specified level.
108 96 * Avoid dropping the priority below CPU->cpu_base_spl.
109 97 *
110 98 * newpil can be %o0 (not other regs used here) or a constant with
111 99 * the new PIL in the PSR_PIL field of the level arg.
112 100 */
113 101 #define SETPRI(level) \
114 102 rdpr %pil, %o1; /* get current PIL */ \
115 103 wrpr %g0, PIL_MAX, %pil; /* freeze CPU_BASE_SPL */ \
116 104 ldn [THREAD_REG + T_CPU], %o2; \
117 105 ld [%o2 + CPU_BASE_SPL], %o2; \
118 106 cmp %o2, level; /* compare new to base */ \
119 107 movl %xcc, level, %o2; /* use new if base lower */ \
120 108 wrpr %g0, %o2, %pil; \
121 109 retl; \
122 110 mov %o1, %o0 /* return old PIL */
123 111
124 112 /*
125 113 * Macro to set the priority to a specified level at or above LOCK_LEVEL.
126 114 * Doesn't require comparison to CPU->cpu_base_spl.
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
127 115 *
128 116 * newpil can be %o0 (not other regs used here) or a constant with
129 117 * the new PIL in the PSR_PIL field of the level arg.
130 118 */
131 119 #define SETPRI_HIGH(level) \
132 120 rdpr %pil, %o1; /* get current PIL */ \
133 121 wrpr %g0, level, %pil; \
134 122 retl; \
135 123 mov %o1, %o0 /* return old PIL */
136 124
137 -#endif /* lint */
138 -
139 125 /*
140 126 * Berkley 4.3 introduced symbolically named interrupt levels
141 127 * as a way deal with priority in a machine independent fashion.
142 128 * Numbered priorities are machine specific, and should be
143 129 * discouraged where possible.
144 130 *
145 131 * Note, for the machine specific priorities there are
146 132 * examples listed for devices that use a particular priority.
147 133 * It should not be construed that all devices of that
148 134 * type should be at that priority. It is currently were
149 135 * the current devices fit into the priority scheme based
150 136 * upon time criticalness.
151 137 *
152 138 * The underlying assumption of these assignments is that
153 139 * SPARC9 IPL 10 is the highest level from which a device
154 140 * routine can call wakeup. Devices that interrupt from higher
155 141 * levels are restricted in what they can do. If they need
156 142 * kernels services they should schedule a routine at a lower
157 143 * level (via software interrupt) to do the required
158 144 * processing.
159 145 *
160 146 * Examples of this higher usage:
161 147 * Level Usage
162 148 * 15 Asynchronous memory exceptions
163 149 * 14 Profiling clock (and PROM uart polling clock)
164 150 * 13 Audio device
165 151 * 12 Serial ports
166 152 * 11 Floppy controller
167 153 *
168 154 * The serial ports request lower level processing on level 6.
169 155 * Audio and floppy request lower level processing on level 4.
170 156 *
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
171 157 * Also, almost all splN routines (where N is a number or a
172 158 * mnemonic) will do a RAISE(), on the assumption that they are
173 159 * never used to lower our priority.
174 160 * The exceptions are:
175 161 * spl8() Because you can't be above 15 to begin with!
176 162 * splzs() Because this is used at boot time to lower our
177 163 * priority, to allow the PROM to poll the uart.
178 164 * spl0() Used to lower priority to 0.
179 165 */
180 166
181 -#if defined(lint)
182 -
183 -int spl0(void) { return (0); }
184 -int spl6(void) { return (0); }
185 -int spl7(void) { return (0); }
186 -int spl8(void) { return (0); }
187 -int splhi(void) { return (0); }
188 -int splhigh(void) { return (0); }
189 -int splzs(void) { return (0); }
190 -
191 -#else /* lint */
192 -
193 167 /* locks out all interrupts, including memory errors */
194 168 ENTRY(spl8)
195 169 SETPRI_HIGH(15)
196 170 SET_SIZE(spl8)
197 171
198 172 /* just below the level that profiling runs */
199 173 ENTRY(spl7)
200 174 RAISE_HIGH(13)
201 175 SET_SIZE(spl7)
202 176
203 177 /* sun specific - highest priority onboard serial i/o zs ports */
204 178 ENTRY(splzs)
205 179 SETPRI_HIGH(12) /* Can't be a RAISE, as it's used to lower us */
206 180 SET_SIZE(splzs)
207 181
208 182 /*
209 183 * should lock out clocks and all interrupts,
210 184 * as you can see, there are exceptions
211 185 */
212 186 ENTRY(splhi)
213 187 ALTENTRY(splhigh)
214 188 ALTENTRY(spl6)
215 189 ALTENTRY(i_ddi_splhigh)
216 190 RAISE_HIGH(DISP_LEVEL)
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
217 191 SET_SIZE(i_ddi_splhigh)
218 192 SET_SIZE(spl6)
219 193 SET_SIZE(splhigh)
220 194 SET_SIZE(splhi)
221 195
222 196 /* allow all interrupts */
223 197 ENTRY(spl0)
224 198 SETPRI(0)
225 199 SET_SIZE(spl0)
226 200
227 -#endif /* lint */
228 -
229 201 /*
230 202 * splx - set PIL back to that indicated by the old %pil passed as an argument,
231 203 * or to the CPU's base priority, whichever is higher.
232 204 */
233 205
234 -#if defined(lint)
235 -
236 -/* ARGSUSED */
237 -void
238 -splx(int level)
239 -{}
240 -
241 -#else /* lint */
242 -
243 206 ENTRY(splx)
244 207 ALTENTRY(i_ddi_splx)
245 208 SETPRI(%o0) /* set PIL */
246 209 SET_SIZE(i_ddi_splx)
247 210 SET_SIZE(splx)
248 211
249 -#endif /* level */
250 -
251 212 /*
252 213 * splr()
253 214 *
254 215 * splr is like splx but will only raise the priority and never drop it
255 216 * Be careful not to set priority lower than CPU->cpu_base_pri,
256 217 * even though it seems we're raising the priority, it could be set higher
257 218 * at any time by an interrupt routine, so we must block interrupts and
258 219 * look at CPU->cpu_base_pri.
259 220 */
260 221
261 -#if defined(lint)
262 -
263 -/* ARGSUSED */
264 -int
265 -splr(int level)
266 -{ return (0); }
267 -
268 -#else /* lint */
269 222 ENTRY(splr)
270 223 RAISE(%o0)
271 224 SET_SIZE(splr)
272 225
273 -#endif /* lint */
274 -
275 226 /*
276 227 * on_fault()
277 228 * Catch lofault faults. Like setjmp except it returns one
278 229 * if code following causes uncorrectable fault. Turned off
279 230 * by calling no_fault().
280 231 */
281 232
282 -#if defined(lint)
283 -
284 -/* ARGSUSED */
285 -int
286 -on_fault(label_t *ljb)
287 -{ return (0); }
288 -
289 -#else /* lint */
290 -
291 233 ENTRY(on_fault)
292 234 membar #Sync ! sync error barrier (see copy.s)
293 235 stn %o0, [THREAD_REG + T_ONFAULT]
294 236 set catch_fault, %o1
295 237 b setjmp ! let setjmp do the rest
296 238 stn %o1, [THREAD_REG + T_LOFAULT] ! put catch_fault in t_lofault
297 239
298 240 catch_fault:
299 241 save %sp, -SA(WINDOWSIZE), %sp ! goto next window so that we can rtn
300 242 ldn [THREAD_REG + T_ONFAULT], %o0
301 243 membar #Sync ! sync error barrier
302 244 stn %g0, [THREAD_REG + T_ONFAULT] ! turn off onfault
303 245 b longjmp ! let longjmp do the rest
304 246 stn %g0, [THREAD_REG + T_LOFAULT] ! turn off lofault
305 247 SET_SIZE(on_fault)
306 248
307 -#endif /* lint */
308 -
309 249 /*
310 250 * no_fault()
311 251 * turn off fault catching.
312 252 */
313 253
314 -#if defined(lint)
315 -
316 -void
317 -no_fault(void)
318 -{}
319 -
320 -#else /* lint */
321 -
322 254 ENTRY(no_fault)
323 255 membar #Sync ! sync error barrier
324 256 stn %g0, [THREAD_REG + T_ONFAULT]
325 257 retl
326 258 stn %g0, [THREAD_REG + T_LOFAULT] ! turn off lofault
327 259 SET_SIZE(no_fault)
328 260
329 -#endif /* lint */
330 -
331 261 /*
332 262 * Default trampoline code for on_trap() (see <sys/ontrap.h>). On sparcv9,
333 263 * the trap code will complete trap processing but reset the return %pc to
334 264 * ot_trampoline, which will by default be set to the address of this code.
335 265 * We longjmp(&curthread->t_ontrap->ot_jmpbuf) to return back to on_trap().
336 266 */
337 -#if defined(lint)
338 267
339 -void
340 -on_trap_trampoline(void)
341 -{}
342 -
343 -#else /* lint */
344 -
345 268 ENTRY(on_trap_trampoline)
346 269 ldn [THREAD_REG + T_ONTRAP], %o0
347 270 b longjmp
348 271 add %o0, OT_JMPBUF, %o0
349 272 SET_SIZE(on_trap_trampoline)
350 273
351 -#endif /* lint */
352 -
353 274 /*
354 275 * Push a new element on to the t_ontrap stack. Refer to <sys/ontrap.h> for
355 276 * more information about the on_trap() mechanism. If the on_trap_data is the
356 277 * same as the topmost stack element, we just modify that element.
357 278 * On UltraSPARC, we need to issue a membar #Sync before modifying t_ontrap.
358 279 * The issue barrier is defined to force all deferred errors to complete before
359 280 * we go any further. We want these errors to be processed before we modify
360 281 * our current error protection.
361 282 */
362 -#if defined(lint)
363 283
364 -/*ARGSUSED*/
365 -int
366 -on_trap(on_trap_data_t *otp, uint_t prot)
367 -{ return (0); }
368 -
369 -#else /* lint */
370 -
371 284 ENTRY(on_trap)
372 285 membar #Sync ! force error barrier
373 286 sth %o1, [%o0 + OT_PROT] ! ot_prot = prot
374 287 sth %g0, [%o0 + OT_TRAP] ! ot_trap = 0
375 288 set on_trap_trampoline, %o2 ! %o2 = &on_trap_trampoline
376 289 stn %o2, [%o0 + OT_TRAMPOLINE] ! ot_trampoline = %o2
377 290 stn %g0, [%o0 + OT_HANDLE] ! ot_handle = NULL
378 291 ldn [THREAD_REG + T_ONTRAP], %o2 ! %o2 = curthread->t_ontrap
379 292 cmp %o0, %o2 ! if (otp == %o2)
380 293 be 0f ! don't modify t_ontrap
381 294 stn %g0, [%o0 + OT_PAD1] ! delay - ot_pad1 = NULL
382 295
383 296 stn %o2, [%o0 + OT_PREV] ! ot_prev = t_ontrap
384 297 membar #Sync ! force error barrier
385 298 stn %o0, [THREAD_REG + T_ONTRAP] ! t_ontrap = otp
386 299
387 300 0: b setjmp ! let setjmp do the rest
388 301 add %o0, OT_JMPBUF, %o0 ! %o0 = &ot_jmpbuf
389 302 SET_SIZE(on_trap)
390 303
391 -#endif /* lint */
392 -
393 304 /*
394 305 * Setjmp and longjmp implement non-local gotos using state vectors
395 306 * type label_t.
396 307 */
397 308
398 -#if defined(lint)
399 -
400 -/* ARGSUSED */
401 -int
402 -setjmp(label_t *lp)
403 -{ return (0); }
404 -
405 -#else /* lint */
406 -
407 309 ENTRY(setjmp)
408 310 stn %o7, [%o0 + L_PC] ! save return address
409 311 stn %sp, [%o0 + L_SP] ! save stack ptr
410 312 retl
411 313 clr %o0 ! return 0
412 314 SET_SIZE(setjmp)
413 315
414 -#endif /* lint */
415 316
416 -
417 -#if defined(lint)
418 -
419 -/* ARGSUSED */
420 -void
421 -longjmp(label_t *lp)
422 -{}
423 -
424 -#else /* lint */
425 -
426 317 ENTRY(longjmp)
427 318 !
428 319 ! The following save is required so that an extra register
429 320 ! window is flushed. Flushw flushes nwindows-2
430 321 ! register windows. If setjmp and longjmp are called from
431 322 ! within the same window, that window will not get pushed
432 323 ! out onto the stack without the extra save below. Tail call
433 324 ! optimization can lead to callers of longjmp executing
434 325 ! from a window that could be the same as the setjmp,
435 326 ! thus the need for the following save.
436 327 !
437 328 save %sp, -SA(MINFRAME), %sp
438 329 flushw ! flush all but this window
439 330 ldn [%i0 + L_PC], %i7 ! restore return addr
440 331 ldn [%i0 + L_SP], %fp ! restore sp for dest on foreign stack
441 332 ret ! return 1
442 333 restore %g0, 1, %o0 ! takes underflow, switches stacks
443 334 SET_SIZE(longjmp)
444 335
445 -#endif /* lint */
446 -
447 336 /*
448 337 * movtuc(length, from, to, table)
449 338 *
450 339 * VAX movtuc instruction (sort of).
451 340 */
452 341
453 -#if defined(lint)
454 -
455 -/*ARGSUSED*/
456 -int
457 -movtuc(size_t length, u_char *from, u_char *to, u_char table[])
458 -{ return (0); }
459 -
460 -#else /* lint */
461 -
462 342 ENTRY(movtuc)
463 343 tst %o0
464 344 ble,pn %ncc, 2f ! check length
465 345 clr %o4
466 346
467 347 ldub [%o1 + %o4], %g1 ! get next byte in string
468 348 0:
469 349 ldub [%o3 + %g1], %g1 ! get corresponding table entry
470 350 tst %g1 ! escape char?
471 351 bnz 1f
472 352 stb %g1, [%o2 + %o4] ! delay slot, store it
473 353
474 354 retl ! return (bytes moved)
475 355 mov %o4, %o0
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
476 356 1:
477 357 inc %o4 ! increment index
478 358 cmp %o4, %o0 ! index < length ?
479 359 bl,a,pt %ncc, 0b
480 360 ldub [%o1 + %o4], %g1 ! delay slot, get next byte in string
481 361 2:
482 362 retl ! return (bytes moved)
483 363 mov %o4, %o0
484 364 SET_SIZE(movtuc)
485 365
486 -#endif /* lint */
487 -
488 366 /*
489 367 * scanc(length, string, table, mask)
490 368 *
491 369 * VAX scanc instruction.
492 370 */
493 371
494 -#if defined(lint)
495 -
496 -/*ARGSUSED*/
497 -int
498 -scanc(size_t length, u_char *string, u_char table[], u_char mask)
499 -{ return (0); }
500 -
501 -#else /* lint */
502 -
503 372 ENTRY(scanc)
504 373 tst %o0
505 374 ble,pn %ncc, 1f ! check length
506 375 clr %o4
507 376 0:
508 377 ldub [%o1 + %o4], %g1 ! get next byte in string
509 378 cmp %o4, %o0 ! interlock slot, index < length ?
510 379 ldub [%o2 + %g1], %g1 ! get corresponding table entry
511 380 bge,pn %ncc, 1f ! interlock slot
512 381 btst %o3, %g1 ! apply the mask
513 382 bz,a 0b
514 383 inc %o4 ! delay slot, increment index
515 384 1:
516 385 retl ! return(length - index)
517 386 sub %o0, %o4, %o0
518 387 SET_SIZE(scanc)
519 388
520 -#endif /* lint */
521 -
522 389 /*
523 390 * if a() calls b() calls caller(),
524 391 * caller() returns return address in a().
525 392 */
526 393
527 -#if defined(lint)
528 -
529 -caddr_t
530 -caller(void)
531 -{ return (0); }
532 -
533 -#else /* lint */
534 -
535 394 ENTRY(caller)
536 395 retl
537 396 mov %i7, %o0
538 397 SET_SIZE(caller)
539 398
540 -#endif /* lint */
541 -
542 399 /*
543 400 * if a() calls callee(), callee() returns the
544 401 * return address in a();
545 402 */
546 403
547 -#if defined(lint)
548 -
549 -caddr_t
550 -callee(void)
551 -{ return (0); }
552 -
553 -#else /* lint */
554 -
555 404 ENTRY(callee)
556 405 retl
557 406 mov %o7, %o0
558 407 SET_SIZE(callee)
559 408
560 -#endif /* lint */
561 -
562 409 /*
563 410 * return the current frame pointer
564 411 */
565 412
566 -#if defined(lint)
567 -
568 -greg_t
569 -getfp(void)
570 -{ return (0); }
571 -
572 -#else /* lint */
573 -
574 413 ENTRY(getfp)
575 414 retl
576 415 mov %fp, %o0
577 416 SET_SIZE(getfp)
578 417
579 -#endif /* lint */
580 -
581 418 /*
582 419 * Get vector base register
583 420 */
584 421
585 -#if defined(lint)
586 -
587 -greg_t
588 -gettbr(void)
589 -{ return (0); }
590 -
591 -#else /* lint */
592 -
593 422 ENTRY(gettbr)
594 423 retl
595 424 mov %tbr, %o0
596 425 SET_SIZE(gettbr)
597 426
598 -#endif /* lint */
599 -
600 427 /*
601 428 * Get processor state register, V9 faked to look like V8.
602 429 * Note: does not provide ccr.xcc and provides FPRS.FEF instead of
603 430 * PSTATE.PEF, because PSTATE.PEF is always on in order to allow the
604 431 * libc_psr memcpy routines to run without hitting the fp_disabled trap.
605 432 */
606 433
607 -#if defined(lint)
608 -
609 -greg_t
610 -getpsr(void)
611 -{ return (0); }
612 -
613 -#else /* lint */
614 -
615 434 ENTRY(getpsr)
616 435 rd %ccr, %o1 ! get ccr
617 436 sll %o1, PSR_ICC_SHIFT, %o0 ! move icc to V8 psr.icc
618 437 rd %fprs, %o1 ! get fprs
619 438 and %o1, FPRS_FEF, %o1 ! mask out dirty upper/lower
620 439 sllx %o1, PSR_FPRS_FEF_SHIFT, %o1 ! shift fef to V8 psr.ef
621 440 or %o0, %o1, %o0 ! or into psr.ef
622 441 set V9_PSR_IMPLVER, %o1 ! SI assigned impl/ver: 0xef
623 442 retl
624 443 or %o0, %o1, %o0 ! or into psr.impl/ver
625 444 SET_SIZE(getpsr)
626 445
627 -#endif /* lint */
628 -
629 446 /*
630 447 * Get current processor interrupt level
631 448 */
632 449
633 -#if defined(lint)
634 -
635 -u_int
636 -getpil(void)
637 -{ return (0); }
638 -
639 -#else /* lint */
640 -
641 450 ENTRY(getpil)
642 451 retl
643 452 rdpr %pil, %o0
644 453 SET_SIZE(getpil)
645 454
646 -#endif /* lint */
647 -
648 -#if defined(lint)
649 -
650 -/*ARGSUSED*/
651 -void
652 -setpil(u_int pil)
653 -{}
654 -
655 -#else /* lint */
656 -
657 455 ENTRY(setpil)
658 456 retl
659 457 wrpr %g0, %o0, %pil
660 458 SET_SIZE(setpil)
661 459
662 -#endif /* lint */
663 460
664 -
665 461 /*
666 462 * _insque(entryp, predp)
667 463 *
668 464 * Insert entryp after predp in a doubly linked list.
669 465 */
670 466
671 -#if defined(lint)
672 -
673 -/*ARGSUSED*/
674 -void
675 -_insque(caddr_t entryp, caddr_t predp)
676 -{}
677 -
678 -#else /* lint */
679 -
680 467 ENTRY(_insque)
681 468 ldn [%o1], %g1 ! predp->forw
682 469 stn %o1, [%o0 + CPTRSIZE] ! entryp->back = predp
683 470 stn %g1, [%o0] ! entryp->forw = predp->forw
684 471 stn %o0, [%o1] ! predp->forw = entryp
685 472 retl
686 473 stn %o0, [%g1 + CPTRSIZE] ! predp->forw->back = entryp
687 474 SET_SIZE(_insque)
688 475
689 -#endif /* lint */
690 -
691 476 /*
692 477 * _remque(entryp)
693 478 *
694 479 * Remove entryp from a doubly linked list
695 480 */
696 481
697 -#if defined(lint)
698 -
699 -/*ARGSUSED*/
700 -void
701 -_remque(caddr_t entryp)
702 -{}
703 -
704 -#else /* lint */
705 -
706 482 ENTRY(_remque)
707 483 ldn [%o0], %g1 ! entryp->forw
708 484 ldn [%o0 + CPTRSIZE], %g2 ! entryp->back
709 485 stn %g1, [%g2] ! entryp->back->forw = entryp->forw
710 486 retl
711 487 stn %g2, [%g1 + CPTRSIZE] ! entryp->forw->back = entryp->back
712 488 SET_SIZE(_remque)
713 489
714 -#endif /* lint */
715 490
716 -
717 491 /*
718 492 * strlen(str)
719 493 *
720 494 * Returns the number of non-NULL bytes in string argument.
721 495 *
722 496 * XXX - why is this here, rather than the traditional file?
723 497 * why does it have local labels which don't start with a `.'?
724 498 */
725 499
726 -#if defined(lint)
727 -
728 -/*ARGSUSED*/
729 -size_t
730 -strlen(const char *str)
731 -{ return (0); }
732 -
733 -#else /* lint */
734 -
735 500 ENTRY(strlen)
736 501 mov %o0, %o1
737 502 andcc %o1, 3, %o3 ! is src word aligned
738 503 bz $nowalgnd
739 504 clr %o0 ! length of non-zero bytes
740 505 cmp %o3, 2 ! is src half-word aligned
741 506 be $s2algn
742 507 cmp %o3, 3 ! src is byte aligned
743 508 ldub [%o1], %o3 ! move 1 or 3 bytes to align it
744 509 inc 1, %o1 ! in either case, safe to do a byte
745 510 be $s3algn
746 511 tst %o3
747 512 $s1algn:
748 513 bnz,a $s2algn ! now go align dest
749 514 inc 1, %o0
750 515 b,a $done
751 516
752 517 $s2algn:
753 518 lduh [%o1], %o3 ! know src is half-byte aligned
754 519 inc 2, %o1
755 520 srl %o3, 8, %o4
756 521 tst %o4 ! is the first byte zero
757 522 bnz,a 1f
758 523 inc %o0
759 524 b,a $done
760 525 1: andcc %o3, 0xff, %o3 ! is the second byte zero
761 526 bnz,a $nowalgnd
762 527 inc %o0
763 528 b,a $done
764 529 $s3algn:
765 530 bnz,a $nowalgnd
766 531 inc 1, %o0
767 532 b,a $done
768 533
769 534 $nowalgnd:
770 535 ! use trick to check if any read bytes of a word are zero
771 536 ! the following two constants will generate "byte carries"
772 537 ! and check if any bit in a byte is set, if all characters
773 538 ! are 7bits (unsigned) this allways works, otherwise
774 539 ! there is a specil case that rarely happens, see below
775 540
776 541 set 0x7efefeff, %o3
777 542 set 0x81010100, %o4
778 543
779 544 3: ld [%o1], %o2 ! main loop
780 545 inc 4, %o1
781 546 add %o2, %o3, %o5 ! generate byte-carries
782 547 xor %o5, %o2, %o5 ! see if orignal bits set
783 548 and %o5, %o4, %o5
784 549 cmp %o5, %o4 ! if ==, no zero bytes
785 550 be,a 3b
786 551 inc 4, %o0
787 552
788 553 ! check for the zero byte and increment the count appropriately
789 554 ! some information (the carry bit) is lost if bit 31
790 555 ! was set (very rare), if this is the rare condition,
791 556 ! return to the main loop again
792 557
793 558 sethi %hi(0xff000000), %o5 ! mask used to test for terminator
794 559 andcc %o2, %o5, %g0 ! check if first byte was zero
795 560 bnz 1f
796 561 srl %o5, 8, %o5
797 562 $done:
798 563 retl
799 564 nop
800 565 1: andcc %o2, %o5, %g0 ! check if second byte was zero
801 566 bnz 1f
802 567 srl %o5, 8, %o5
803 568 $done1:
804 569 retl
805 570 inc %o0
806 571 1: andcc %o2, %o5, %g0 ! check if third byte was zero
807 572 bnz 1f
808 573 andcc %o2, 0xff, %g0 ! check if last byte is zero
↓ open down ↓ |
64 lines elided |
↑ open up ↑ |
809 574 $done2:
810 575 retl
811 576 inc 2, %o0
812 577 1: bnz,a 3b
813 578 inc 4, %o0 ! count of bytes
814 579 $done3:
815 580 retl
816 581 inc 3, %o0
817 582 SET_SIZE(strlen)
818 583
819 -#endif /* lint */
820 -
821 584 /*
822 585 * Provide a C callable interface to the membar instruction.
823 586 */
824 587
825 -#if defined(lint)
826 -
827 -void
828 -membar_ldld(void)
829 -{}
830 -
831 -void
832 -membar_stld(void)
833 -{}
834 -
835 -void
836 -membar_ldst(void)
837 -{}
838 -
839 -void
840 -membar_stst(void)
841 -{}
842 -
843 -void
844 -membar_ldld_ldst(void)
845 -{}
846 -
847 -void
848 -membar_ldld_stld(void)
849 -{}
850 -
851 -void
852 -membar_ldld_stst(void)
853 -{}
854 -
855 -void
856 -membar_stld_ldld(void)
857 -{}
858 -
859 -void
860 -membar_stld_ldst(void)
861 -{}
862 -
863 -void
864 -membar_stld_stst(void)
865 -{}
866 -
867 -void
868 -membar_ldst_ldld(void)
869 -{}
870 -
871 -void
872 -membar_ldst_stld(void)
873 -{}
874 -
875 -void
876 -membar_ldst_stst(void)
877 -{}
878 -
879 -void
880 -membar_stst_ldld(void)
881 -{}
882 -
883 -void
884 -membar_stst_stld(void)
885 -{}
886 -
887 -void
888 -membar_stst_ldst(void)
889 -{}
890 -
891 -void
892 -membar_lookaside(void)
893 -{}
894 -
895 -void
896 -membar_memissue(void)
897 -{}
898 -
899 -void
900 -membar_sync(void)
901 -{}
902 -
903 -#else
904 588 ENTRY(membar_ldld)
905 589 retl
906 590 membar #LoadLoad
907 591 SET_SIZE(membar_ldld)
908 592
909 593 ENTRY(membar_stld)
910 594 retl
911 595 membar #StoreLoad
912 596 SET_SIZE(membar_stld)
913 597
914 598 ENTRY(membar_ldst)
915 599 retl
916 600 membar #LoadStore
917 601 SET_SIZE(membar_ldst)
918 602
919 603 ENTRY(membar_stst)
920 604 retl
921 605 membar #StoreStore
922 606 SET_SIZE(membar_stst)
923 607
924 608 ENTRY(membar_ldld_stld)
925 609 ALTENTRY(membar_stld_ldld)
926 610 retl
927 611 membar #LoadLoad|#StoreLoad
928 612 SET_SIZE(membar_stld_ldld)
929 613 SET_SIZE(membar_ldld_stld)
930 614
931 615 ENTRY(membar_ldld_ldst)
932 616 ALTENTRY(membar_ldst_ldld)
933 617 retl
934 618 membar #LoadLoad|#LoadStore
935 619 SET_SIZE(membar_ldst_ldld)
936 620 SET_SIZE(membar_ldld_ldst)
937 621
938 622 ENTRY(membar_ldld_stst)
939 623 ALTENTRY(membar_stst_ldld)
940 624 retl
941 625 membar #LoadLoad|#StoreStore
942 626 SET_SIZE(membar_stst_ldld)
943 627 SET_SIZE(membar_ldld_stst)
944 628
945 629 ENTRY(membar_stld_ldst)
946 630 ALTENTRY(membar_ldst_stld)
947 631 retl
948 632 membar #StoreLoad|#LoadStore
949 633 SET_SIZE(membar_ldst_stld)
950 634 SET_SIZE(membar_stld_ldst)
951 635
952 636 ENTRY(membar_stld_stst)
953 637 ALTENTRY(membar_stst_stld)
954 638 retl
955 639 membar #StoreLoad|#StoreStore
956 640 SET_SIZE(membar_stst_stld)
957 641 SET_SIZE(membar_stld_stst)
958 642
959 643 ENTRY(membar_ldst_stst)
960 644 ALTENTRY(membar_stst_ldst)
961 645 retl
962 646 membar #LoadStore|#StoreStore
963 647 SET_SIZE(membar_stst_ldst)
964 648 SET_SIZE(membar_ldst_stst)
965 649
966 650 ENTRY(membar_lookaside)
967 651 retl
968 652 membar #Lookaside
969 653 SET_SIZE(membar_lookaside)
970 654
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
971 655 ENTRY(membar_memissue)
972 656 retl
973 657 membar #MemIssue
974 658 SET_SIZE(membar_memissue)
975 659
976 660 ENTRY(membar_sync)
977 661 retl
978 662 membar #Sync
979 663 SET_SIZE(membar_sync)
980 664
981 -#endif /* lint */
982 665
983 -
984 -#if defined(lint)
985 -
986 -/*ARGSUSED*/
987 -int
988 -fuword64(const void *addr, uint64_t *dst)
989 -{ return (0); }
990 -
991 -/*ARGSUSED*/
992 -int
993 -fuword32(const void *addr, uint32_t *dst)
994 -{ return (0); }
995 -
996 -/*ARGSUSED*/
997 -int
998 -fuword16(const void *addr, uint16_t *dst)
999 -{ return (0); }
1000 -
1001 -/*ARGSUSED*/
1002 -int
1003 -fuword8(const void *addr, uint8_t *dst)
1004 -{ return (0); }
1005 -
1006 -/*ARGSUSED*/
1007 -int
1008 -dtrace_ft_fuword64(const void *addr, uint64_t *dst)
1009 -{ return (0); }
1010 -
1011 -/*ARGSUSED*/
1012 -int
1013 -dtrace_ft_fuword32(const void *addr, uint32_t *dst)
1014 -{ return (0); }
1015 -
1016 -#else /* lint */
1017 -
1018 666 /*
1019 667 * Since all of the fuword() variants are so similar, we have a macro to spit
1020 668 * them out.
1021 669 */
1022 670
1023 671 #define FUWORD(NAME, LOAD, STORE, COPYOP) \
1024 672 ENTRY(NAME); \
1025 673 sethi %hi(1f), %o5; \
1026 674 ldn [THREAD_REG + T_LOFAULT], %o3; \
1027 675 or %o5, %lo(1f), %o5; \
1028 676 membar #Sync; \
1029 677 stn %o5, [THREAD_REG + T_LOFAULT]; \
1030 678 LOAD [%o0]ASI_USER, %o2; \
1031 679 membar #Sync; \
1032 680 stn %o3, [THREAD_REG + T_LOFAULT]; \
1033 681 mov 0, %o0; \
1034 682 retl; \
1035 683 STORE %o2, [%o1]; \
1036 684 1: \
1037 685 membar #Sync; \
1038 686 stn %o3, [THREAD_REG + T_LOFAULT]; \
1039 687 ldn [THREAD_REG + T_COPYOPS], %o2; \
1040 688 brz %o2, 2f; \
1041 689 nop; \
1042 690 ldn [%o2 + COPYOP], %g1; \
1043 691 jmp %g1; \
1044 692 nop; \
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
1045 693 2: \
1046 694 retl; \
1047 695 mov -1, %o0; \
1048 696 SET_SIZE(NAME)
1049 697
1050 698 FUWORD(fuword64, ldxa, stx, CP_FUWORD64)
1051 699 FUWORD(fuword32, lda, st, CP_FUWORD32)
1052 700 FUWORD(fuword16, lduha, sth, CP_FUWORD16)
1053 701 FUWORD(fuword8, lduba, stb, CP_FUWORD8)
1054 702
1055 -#endif /* lint */
1056 703
1057 -
1058 -#if defined(lint)
1059 -
1060 -/*ARGSUSED*/
1061 -int
1062 -suword64(void *addr, uint64_t value)
1063 -{ return (0); }
1064 -
1065 -/*ARGSUSED*/
1066 -int
1067 -suword32(void *addr, uint32_t value)
1068 -{ return (0); }
1069 -
1070 -/*ARGSUSED*/
1071 -int
1072 -suword16(void *addr, uint16_t value)
1073 -{ return (0); }
1074 -
1075 -/*ARGSUSED*/
1076 -int
1077 -suword8(void *addr, uint8_t value)
1078 -{ return (0); }
1079 -
1080 -#else /* lint */
1081 -
1082 704 /*
1083 705 * Since all of the suword() variants are so similar, we have a macro to spit
1084 706 * them out.
1085 707 */
1086 708
1087 709 #define SUWORD(NAME, STORE, COPYOP) \
1088 710 ENTRY(NAME) \
1089 711 sethi %hi(1f), %o5; \
1090 712 ldn [THREAD_REG + T_LOFAULT], %o3; \
1091 713 or %o5, %lo(1f), %o5; \
1092 714 membar #Sync; \
1093 715 stn %o5, [THREAD_REG + T_LOFAULT]; \
1094 716 STORE %o1, [%o0]ASI_USER; \
1095 717 membar #Sync; \
1096 718 stn %o3, [THREAD_REG + T_LOFAULT]; \
1097 719 retl; \
1098 720 clr %o0; \
1099 721 1: \
1100 722 membar #Sync; \
1101 723 stn %o3, [THREAD_REG + T_LOFAULT]; \
1102 724 ldn [THREAD_REG + T_COPYOPS], %o2; \
1103 725 brz %o2, 2f; \
1104 726 nop; \
1105 727 ldn [%o2 + COPYOP], %g1; \
1106 728 jmp %g1; \
1107 729 nop; \
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
1108 730 2: \
1109 731 retl; \
1110 732 mov -1, %o0; \
1111 733 SET_SIZE(NAME)
1112 734
1113 735 SUWORD(suword64, stxa, CP_SUWORD64)
1114 736 SUWORD(suword32, sta, CP_SUWORD32)
1115 737 SUWORD(suword16, stha, CP_SUWORD16)
1116 738 SUWORD(suword8, stba, CP_SUWORD8)
1117 739
1118 -#endif /* lint */
1119 -
1120 -#if defined(lint)
1121 -
1122 -/*ARGSUSED*/
1123 -void
1124 -fuword8_noerr(const void *addr, uint8_t *dst)
1125 -{}
1126 -
1127 -/*ARGSUSED*/
1128 -void
1129 -fuword16_noerr(const void *addr, uint16_t *dst)
1130 -{}
1131 -
1132 -/*ARGSUSED*/
1133 -void
1134 -fuword32_noerr(const void *addr, uint32_t *dst)
1135 -{}
1136 -
1137 -/*ARGSUSED*/
1138 -void
1139 -fuword64_noerr(const void *addr, uint64_t *dst)
1140 -{}
1141 -
1142 -#else /* lint */
1143 -
1144 740 ENTRY(fuword8_noerr)
1145 741 lduba [%o0]ASI_USER, %o0
1146 742 retl
1147 743 stb %o0, [%o1]
1148 744 SET_SIZE(fuword8_noerr)
1149 745
1150 746 ENTRY(fuword16_noerr)
1151 747 lduha [%o0]ASI_USER, %o0
1152 748 retl
1153 749 sth %o0, [%o1]
1154 750 SET_SIZE(fuword16_noerr)
1155 751
1156 752 ENTRY(fuword32_noerr)
1157 753 lda [%o0]ASI_USER, %o0
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1158 754 retl
1159 755 st %o0, [%o1]
1160 756 SET_SIZE(fuword32_noerr)
1161 757
1162 758 ENTRY(fuword64_noerr)
1163 759 ldxa [%o0]ASI_USER, %o0
1164 760 retl
1165 761 stx %o0, [%o1]
1166 762 SET_SIZE(fuword64_noerr)
1167 763
1168 -#endif /* lint */
1169 -
1170 -#if defined(lint)
1171 -
1172 -/*ARGSUSED*/
1173 -void
1174 -suword8_noerr(void *addr, uint8_t value)
1175 -{}
1176 -
1177 -/*ARGSUSED*/
1178 -void
1179 -suword16_noerr(void *addr, uint16_t value)
1180 -{}
1181 -
1182 -/*ARGSUSED*/
1183 -void
1184 -suword32_noerr(void *addr, uint32_t value)
1185 -{}
1186 -
1187 -/*ARGSUSED*/
1188 -void
1189 -suword64_noerr(void *addr, uint64_t value)
1190 -{}
1191 -
1192 -#else /* lint */
1193 -
1194 764 ENTRY(suword8_noerr)
1195 765 retl
1196 766 stba %o1, [%o0]ASI_USER
1197 767 SET_SIZE(suword8_noerr)
1198 768
1199 769 ENTRY(suword16_noerr)
1200 770 retl
1201 771 stha %o1, [%o0]ASI_USER
1202 772 SET_SIZE(suword16_noerr)
1203 773
1204 774 ENTRY(suword32_noerr)
1205 775 retl
1206 776 sta %o1, [%o0]ASI_USER
1207 777 SET_SIZE(suword32_noerr)
1208 778
1209 779 ENTRY(suword64_noerr)
1210 780 retl
1211 781 stxa %o1, [%o0]ASI_USER
1212 782 SET_SIZE(suword64_noerr)
1213 783
1214 -#endif /* lint */
1215 -
1216 -#if defined(__lint)
1217 -
1218 -/*ARGSUSED*/
1219 -int
1220 -subyte(void *addr, uchar_t value)
1221 -{ return (0); }
1222 -
1223 -/*ARGSUSED*/
1224 -void
1225 -subyte_noerr(void *addr, uchar_t value)
1226 -{}
1227 -
1228 -/*ARGSUSED*/
1229 -int
1230 -fulword(const void *addr, ulong_t *valuep)
1231 -{ return (0); }
1232 -
1233 -/*ARGSUSED*/
1234 -void
1235 -fulword_noerr(const void *addr, ulong_t *valuep)
1236 -{}
1237 -
1238 -/*ARGSUSED*/
1239 -int
1240 -sulword(void *addr, ulong_t valuep)
1241 -{ return (0); }
1242 -
1243 -/*ARGSUSED*/
1244 -void
1245 -sulword_noerr(void *addr, ulong_t valuep)
1246 -{}
1247 -
1248 -#else
1249 -
1250 784 .weak subyte
1251 785 subyte=suword8
1252 786 .weak subyte_noerr
1253 787 subyte_noerr=suword8_noerr
1254 788 #ifdef _LP64
1255 789 .weak fulword
1256 790 fulword=fuword64
1257 791 .weak fulword_noerr
1258 792 fulword_noerr=fuword64_noerr
1259 793 .weak sulword
1260 794 sulword=suword64
1261 795 .weak sulword_noerr
1262 796 sulword_noerr=suword64_noerr
1263 797 #else
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1264 798 .weak fulword
1265 799 fulword=fuword32
1266 800 .weak fulword_noerr
1267 801 fulword_noerr=fuword32_noerr
1268 802 .weak sulword
1269 803 sulword=suword32
1270 804 .weak sulword_noerr
1271 805 sulword_noerr=suword32_noerr
1272 806 #endif /* LP64 */
1273 807
1274 -#endif /* lint */
1275 -
1276 808 /*
1277 809 * We define rdtick here, but not for sun4v. On sun4v systems, the %tick
1278 810 * and %stick should not be read directly without considering the tick
1279 811 * and stick offset kernel variables introduced to support sun4v OS
1280 812 * suspension.
1281 813 */
1282 814 #if !defined (sun4v)
1283 815
1284 -#if defined (lint)
1285 -
1286 -hrtime_t
1287 -rdtick()
1288 -{ return (0); }
1289 -
1290 -#else /* lint */
1291 -
1292 816 ENTRY(rdtick)
1293 817 retl
1294 818 rd %tick, %o0
1295 819 SET_SIZE(rdtick)
1296 820
1297 -#endif /* lint */
1298 -
1299 821 #endif /* !sun4v */
1300 822
1301 823 /*
1302 824 * Set tba to given address, no side effects.
1303 825 */
1304 -#if defined (lint)
1305 826
1306 -/*ARGSUSED*/
1307 -void *
1308 -set_tba(void *new_tba)
1309 -{ return (0); }
1310 -
1311 -#else /* lint */
1312 -
1313 827 ENTRY(set_tba)
1314 828 mov %o0, %o1
1315 829 rdpr %tba, %o0
1316 830 wrpr %o1, %tba
1317 831 retl
1318 832 nop
1319 833 SET_SIZE(set_tba)
1320 834
1321 -#endif /* lint */
1322 -
1323 -#if defined (lint)
1324 -
1325 -/*ARGSUSED*/
1326 -void *
1327 -get_tba()
1328 -{ return (0); }
1329 -
1330 -#else /* lint */
1331 -
1332 835 ENTRY(get_tba)
1333 836 retl
1334 837 rdpr %tba, %o0
1335 838 SET_SIZE(get_tba)
1336 839
1337 -#endif /* lint */
1338 -
1339 -#if defined(lint) || defined(__lint)
1340 -
1341 -/* ARGSUSED */
1342 -void
1343 -setpstate(u_int pstate)
1344 -{}
1345 -
1346 -#else /* lint */
1347 -
1348 840 ENTRY_NP(setpstate)
1349 841 retl
1350 842 wrpr %g0, %o0, %pstate
1351 843 SET_SIZE(setpstate)
1352 844
1353 -#endif /* lint */
1354 -
1355 -#if defined(lint) || defined(__lint)
1356 -
1357 -u_int
1358 -getpstate(void)
1359 -{ return(0); }
1360 -
1361 -#else /* lint */
1362 -
1363 845 ENTRY_NP(getpstate)
1364 846 retl
1365 847 rdpr %pstate, %o0
1366 848 SET_SIZE(getpstate)
1367 849
1368 -#endif /* lint */
1369 -
1370 -#if defined(lint) || defined(__lint)
1371 -
1372 -dtrace_icookie_t
1373 -dtrace_interrupt_disable(void)
1374 -{ return (0); }
1375 -
1376 -#else /* lint */
1377 -
1378 850 ENTRY_NP(dtrace_interrupt_disable)
1379 851 rdpr %pstate, %o0
1380 852 andn %o0, PSTATE_IE, %o1
1381 853 retl
1382 854 wrpr %g0, %o1, %pstate
1383 855 SET_SIZE(dtrace_interrupt_disable)
1384 856
1385 -#endif /* lint */
1386 -
1387 -#if defined(lint) || defined(__lint)
1388 -
1389 -/*ARGSUSED*/
1390 -void
1391 -dtrace_interrupt_enable(dtrace_icookie_t cookie)
1392 -{}
1393 -
1394 -#else
1395 -
1396 857 ENTRY_NP(dtrace_interrupt_enable)
1397 858 retl
1398 859 wrpr %g0, %o0, %pstate
1399 860 SET_SIZE(dtrace_interrupt_enable)
1400 861
1401 -#endif /* lint*/
1402 -
1403 -#if defined(lint)
1404 -
1405 -void
1406 -dtrace_membar_producer(void)
1407 -{}
1408 -
1409 -void
1410 -dtrace_membar_consumer(void)
1411 -{}
1412 -
1413 -#else /* lint */
1414 -
1415 862 #ifdef SF_ERRATA_51
1416 863 .align 32
1417 864 ENTRY(dtrace_membar_return)
1418 865 retl
1419 866 nop
1420 867 SET_SIZE(dtrace_membar_return)
1421 868 #define DTRACE_MEMBAR_RETURN ba,pt %icc, dtrace_membar_return
1422 869 #else
1423 870 #define DTRACE_MEMBAR_RETURN retl
1424 871 #endif
1425 872
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1426 873 ENTRY(dtrace_membar_producer)
1427 874 DTRACE_MEMBAR_RETURN
1428 875 membar #StoreStore
1429 876 SET_SIZE(dtrace_membar_producer)
1430 877
1431 878 ENTRY(dtrace_membar_consumer)
1432 879 DTRACE_MEMBAR_RETURN
1433 880 membar #LoadLoad
1434 881 SET_SIZE(dtrace_membar_consumer)
1435 882
1436 -#endif /* lint */
1437 -
1438 -#if defined(lint) || defined(__lint)
1439 -
1440 -void
1441 -dtrace_flush_windows(void)
1442 -{}
1443 -
1444 -#else
1445 -
1446 883 ENTRY_NP(dtrace_flush_windows)
1447 884 retl
1448 885 flushw
1449 886 SET_SIZE(dtrace_flush_windows)
1450 887
1451 -#endif /* lint */
1452 -
1453 -#if defined(lint)
1454 -
1455 -/*ARGSUSED*/
1456 -int
1457 -getpcstack_top(pc_t *pcstack, int limit, uintptr_t *lastfp, pc_t *lastpc)
1458 -{
1459 - return (0);
1460 -}
1461 -
1462 -#else /* lint */
1463 -
1464 888 /*
1465 889 * %g1 pcstack
1466 890 * %g2 iteration count
1467 891 * %g3 final %fp
1468 892 * %g4 final %i7
1469 893 * %g5 saved %cwp (so we can get back to the original window)
1470 894 *
1471 895 * %o0 pcstack / return value (iteration count)
1472 896 * %o1 limit / saved %cansave
1473 897 * %o2 lastfp
1474 898 * %o3 lastpc
1475 899 * %o4 saved %canrestore
1476 900 * %o5 saved %pstate (to restore interrupts)
1477 901 *
1478 902 * Note: The frame pointer returned via lastfp is safe to use as
1479 903 * long as getpcstack_top() returns either (0) or a value less
1480 904 * than (limit).
1481 905 */
1482 906 ENTRY_NP(getpcstack_top)
1483 907
1484 908 rdpr %pstate, %o5
1485 909 andn %o5, PSTATE_IE, %g1
1486 910 wrpr %g0, %g1, %pstate ! disable interrupts
1487 911
1488 912 mov %o0, %g1 ! we need the pcstack pointer while
1489 913 ! we're visiting other windows
1490 914
1491 915 rdpr %canrestore, %g2 ! number of available windows
1492 916 sub %g2, 1, %g2 ! account for skipped frame
1493 917 cmp %g2, %o1 ! compare with limit
1494 918 movg %icc, %o1, %g2 ! %g2 = min(%canrestore-1, limit)
1495 919
1496 920 brlez,a,pn %g2, 3f ! Use slow path if count <= 0 --
1497 921 clr %o0 ! return zero.
1498 922
1499 923 mov %g2, %o0 ! set up return value
1500 924
1501 925 rdpr %cwp, %g5 ! remember the register window state
1502 926 rdpr %cansave, %o1 ! 'restore' changes, so we can undo
1503 927 rdpr %canrestore, %o4 ! its effects when we finish.
1504 928
1505 929 restore ! skip caller's frame
1506 930 1:
1507 931 st %i7, [%g1] ! stash return address in pcstack
1508 932 restore ! go to the next frame
1509 933 subcc %g2, 1, %g2 ! decrement the count
1510 934 bnz,pt %icc, 1b ! loop until count reaches 0
1511 935 add %g1, 4, %g1 ! increment pcstack
1512 936
1513 937 mov %i6, %g3 ! copy the final %fp and return PC
1514 938 mov %i7, %g4 ! aside so we can return them to our
1515 939 ! caller
1516 940
1517 941 wrpr %g0, %g5, %cwp ! jump back to the original window
1518 942 wrpr %g0, %o1, %cansave ! and restore the original register
1519 943 wrpr %g0, %o4, %canrestore ! window state.
1520 944 2:
1521 945 stn %g3, [%o2] ! store the frame pointer and pc
1522 946 st %g4, [%o3] ! so our caller can continue the trace
1523 947
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
1524 948 retl ! return to caller
1525 949 wrpr %g0, %o5, %pstate ! restore interrupts
1526 950
1527 951 3:
1528 952 flushw ! flush register windows, then
1529 953 ldn [%fp + STACK_BIAS + 14*CLONGSIZE], %g3 ! load initial fp
1530 954 ba 2b
1531 955 ldn [%fp + STACK_BIAS + 15*CLONGSIZE], %g4 ! and pc
1532 956 SET_SIZE(getpcstack_top)
1533 957
1534 -#endif /* lint */
1535 -
1536 -#if defined(lint) || defined(__lint)
1537 -
1538 -/* ARGSUSED */
1539 -void
1540 -setwstate(u_int wstate)
1541 -{}
1542 -
1543 -#else /* lint */
1544 -
1545 958 ENTRY_NP(setwstate)
1546 959 retl
1547 960 wrpr %g0, %o0, %wstate
1548 961 SET_SIZE(setwstate)
1549 962
1550 -#endif /* lint */
1551 963
1552 -
1553 -#if defined(lint) || defined(__lint)
1554 -
1555 -u_int
1556 -getwstate(void)
1557 -{ return(0); }
1558 -
1559 -#else /* lint */
1560 -
1561 964 ENTRY_NP(getwstate)
1562 965 retl
1563 966 rdpr %wstate, %o0
1564 967 SET_SIZE(getwstate)
1565 968
1566 -#endif /* lint */
1567 969
1568 -
1569 970 /*
1570 971 * int panic_trigger(int *tp)
1571 972 *
1572 973 * A panic trigger is a word which is updated atomically and can only be set
1573 974 * once. We atomically store 0xFF into the high byte and load the old value.
1574 975 * If the byte was 0xFF, the trigger has already been activated and we fail.
1575 976 * If the previous value was 0 or not 0xFF, we succeed. This allows a
1576 977 * partially corrupt trigger to still trigger correctly. DTrace has its own
1577 978 * version of this function to allow it to panic correctly from probe context.
1578 979 */
1579 -#if defined(lint)
1580 980
1581 -/*ARGSUSED*/
1582 -int panic_trigger(int *tp) { return (0); }
1583 -
1584 -/*ARGSUSED*/
1585 -int dtrace_panic_trigger(int *tp) { return (0); }
1586 -
1587 -#else /* lint */
1588 -
1589 981 ENTRY_NP(panic_trigger)
1590 982 ldstub [%o0], %o0 ! store 0xFF, load byte into %o0
1591 983 cmp %o0, 0xFF ! compare %o0 to 0xFF
1592 984 set 1, %o1 ! %o1 = 1
1593 985 be,a 0f ! if (%o0 == 0xFF) goto 0f (else annul)
1594 986 set 0, %o1 ! delay - %o1 = 0
1595 987 0: retl
1596 988 mov %o1, %o0 ! return (%o1);
1597 989 SET_SIZE(panic_trigger)
1598 990
1599 991 ENTRY_NP(dtrace_panic_trigger)
1600 992 ldstub [%o0], %o0 ! store 0xFF, load byte into %o0
1601 993 cmp %o0, 0xFF ! compare %o0 to 0xFF
1602 994 set 1, %o1 ! %o1 = 1
1603 995 be,a 0f ! if (%o0 == 0xFF) goto 0f (else annul)
1604 996 set 0, %o1 ! delay - %o1 = 0
1605 997 0: retl
1606 998 mov %o1, %o0 ! return (%o1);
1607 999 SET_SIZE(dtrace_panic_trigger)
1608 1000
1609 -#endif /* lint */
1610 -
1611 1001 /*
1612 1002 * void vpanic(const char *format, va_list alist)
1613 1003 *
1614 1004 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
1615 1005 * into the panic code implemented in panicsys(). vpanic() is responsible
1616 1006 * for passing through the format string and arguments, and constructing a
1617 1007 * regs structure on the stack into which it saves the current register
1618 1008 * values. If we are not dying due to a fatal trap, these registers will
1619 1009 * then be preserved in panicbuf as the current processor state. Before
1620 1010 * invoking panicsys(), vpanic() activates the first panic trigger (see
1621 1011 * common/os/panic.c) and switches to the panic_stack if successful. Note that
1622 1012 * DTrace takes a slightly different panic path if it must panic from probe
1623 1013 * context. Instead of calling panic, it calls into dtrace_vpanic(), which
1624 1014 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
1625 1015 * branches back into vpanic().
1626 1016 */
1627 -#if defined(lint)
1628 1017
1629 -/*ARGSUSED*/
1630 -void vpanic(const char *format, va_list alist) {}
1631 -
1632 -/*ARGSUSED*/
1633 -void dtrace_vpanic(const char *format, va_list alist) {}
1634 -
1635 -#else /* lint */
1636 -
1637 1018 ENTRY_NP(vpanic)
1638 1019
1639 1020 save %sp, -SA(MINFRAME + REGSIZE), %sp ! save and allocate regs
1640 1021
1641 1022 !
1642 1023 ! The v9 struct regs has a 64-bit r_tstate field, which we use here
1643 1024 ! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1644 1025 ! in %tstate if a trap occurred. We leave it up to the debugger to
1645 1026 ! realize what happened and extract the register values.
1646 1027 !
1647 1028 rd %ccr, %l0 ! %l0 = %ccr
1648 1029 sllx %l0, TSTATE_CCR_SHIFT, %l0 ! %l0 <<= CCR_SHIFT
1649 1030 rd %asi, %l1 ! %l1 = %asi
1650 1031 sllx %l1, TSTATE_ASI_SHIFT, %l1 ! %l1 <<= ASI_SHIFT
1651 1032 or %l0, %l1, %l0 ! %l0 |= %l1
1652 1033 rdpr %pstate, %l1 ! %l1 = %pstate
1653 1034 sllx %l1, TSTATE_PSTATE_SHIFT, %l1 ! %l1 <<= PSTATE_SHIFT
1654 1035 or %l0, %l1, %l0 ! %l0 |= %l1
1655 1036 rdpr %cwp, %l1 ! %l1 = %cwp
1656 1037 sllx %l1, TSTATE_CWP_SHIFT, %l1 ! %l1 <<= CWP_SHIFT
1657 1038 or %l0, %l1, %l0 ! %l0 |= %l1
1658 1039
1659 1040 set vpanic, %l1 ! %l1 = %pc (vpanic)
1660 1041 add %l1, 4, %l2 ! %l2 = %npc (vpanic+4)
1661 1042 rd %y, %l3 ! %l3 = %y
1662 1043 !
1663 1044 ! Flush register windows before panic_trigger() in order to avoid a
1664 1045 ! problem that a dump hangs if flush_windows() causes another panic.
1665 1046 !
1666 1047 call flush_windows
1667 1048 nop
1668 1049
1669 1050 sethi %hi(panic_quiesce), %o0
1670 1051 call panic_trigger
1671 1052 or %o0, %lo(panic_quiesce), %o0 ! if (!panic_trigger(
1672 1053
1673 1054 vpanic_common:
1674 1055 tst %o0 ! &panic_quiesce))
1675 1056 be 0f ! goto 0f;
1676 1057 mov %o0, %l4 ! delay - %l4 = %o0
1677 1058
1678 1059 !
1679 1060 ! If panic_trigger() was successful, we are the first to initiate a
1680 1061 ! panic: switch to the panic_stack.
1681 1062 !
1682 1063 set panic_stack, %o0 ! %o0 = panic_stack
1683 1064 set PANICSTKSIZE, %o1 ! %o1 = size of stack
1684 1065 add %o0, %o1, %o0 ! %o0 = top of stack
1685 1066
1686 1067 sub %o0, SA(MINFRAME + REGSIZE) + STACK_BIAS, %sp
1687 1068
1688 1069 !
1689 1070 ! Now that we've got everything set up, store each register to its
1690 1071 ! designated location in the regs structure allocated on the stack.
1691 1072 ! The register set we store is the equivalent of the registers at
1692 1073 ! the time the %pc was pointing to vpanic, thus the %i's now contain
1693 1074 ! what the %o's contained prior to the save instruction.
1694 1075 !
1695 1076 0: stx %l0, [%sp + STACK_BIAS + SA(MINFRAME) + TSTATE_OFF]
1696 1077 stx %g1, [%sp + STACK_BIAS + SA(MINFRAME) + G1_OFF]
1697 1078 stx %g2, [%sp + STACK_BIAS + SA(MINFRAME) + G2_OFF]
1698 1079 stx %g3, [%sp + STACK_BIAS + SA(MINFRAME) + G3_OFF]
1699 1080 stx %g4, [%sp + STACK_BIAS + SA(MINFRAME) + G4_OFF]
1700 1081 stx %g5, [%sp + STACK_BIAS + SA(MINFRAME) + G5_OFF]
1701 1082 stx %g6, [%sp + STACK_BIAS + SA(MINFRAME) + G6_OFF]
1702 1083 stx %g7, [%sp + STACK_BIAS + SA(MINFRAME) + G7_OFF]
1703 1084 stx %i0, [%sp + STACK_BIAS + SA(MINFRAME) + O0_OFF]
1704 1085 stx %i1, [%sp + STACK_BIAS + SA(MINFRAME) + O1_OFF]
1705 1086 stx %i2, [%sp + STACK_BIAS + SA(MINFRAME) + O2_OFF]
1706 1087 stx %i3, [%sp + STACK_BIAS + SA(MINFRAME) + O3_OFF]
1707 1088 stx %i4, [%sp + STACK_BIAS + SA(MINFRAME) + O4_OFF]
1708 1089 stx %i5, [%sp + STACK_BIAS + SA(MINFRAME) + O5_OFF]
1709 1090 stx %i6, [%sp + STACK_BIAS + SA(MINFRAME) + O6_OFF]
1710 1091 stx %i7, [%sp + STACK_BIAS + SA(MINFRAME) + O7_OFF]
1711 1092 stn %l1, [%sp + STACK_BIAS + SA(MINFRAME) + PC_OFF]
1712 1093 stn %l2, [%sp + STACK_BIAS + SA(MINFRAME) + NPC_OFF]
1713 1094 st %l3, [%sp + STACK_BIAS + SA(MINFRAME) + Y_OFF]
1714 1095
1715 1096 mov %l4, %o3 ! %o3 = on_panic_stack
1716 1097 add %sp, STACK_BIAS + SA(MINFRAME), %o2 ! %o2 = ®s
1717 1098 mov %i1, %o1 ! %o1 = alist
1718 1099 call panicsys ! panicsys();
1719 1100 mov %i0, %o0 ! %o0 = format
1720 1101 ret
1721 1102 restore
1722 1103
1723 1104 SET_SIZE(vpanic)
1724 1105
1725 1106 ENTRY_NP(dtrace_vpanic)
1726 1107
1727 1108 save %sp, -SA(MINFRAME + REGSIZE), %sp ! save and allocate regs
1728 1109
1729 1110 !
1730 1111 ! The v9 struct regs has a 64-bit r_tstate field, which we use here
1731 1112 ! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1732 1113 ! in %tstate if a trap occurred. We leave it up to the debugger to
1733 1114 ! realize what happened and extract the register values.
1734 1115 !
1735 1116 rd %ccr, %l0 ! %l0 = %ccr
1736 1117 sllx %l0, TSTATE_CCR_SHIFT, %l0 ! %l0 <<= CCR_SHIFT
1737 1118 rd %asi, %l1 ! %l1 = %asi
1738 1119 sllx %l1, TSTATE_ASI_SHIFT, %l1 ! %l1 <<= ASI_SHIFT
1739 1120 or %l0, %l1, %l0 ! %l0 |= %l1
1740 1121 rdpr %pstate, %l1 ! %l1 = %pstate
1741 1122 sllx %l1, TSTATE_PSTATE_SHIFT, %l1 ! %l1 <<= PSTATE_SHIFT
1742 1123 or %l0, %l1, %l0 ! %l0 |= %l1
1743 1124 rdpr %cwp, %l1 ! %l1 = %cwp
1744 1125 sllx %l1, TSTATE_CWP_SHIFT, %l1 ! %l1 <<= CWP_SHIFT
1745 1126 or %l0, %l1, %l0 ! %l0 |= %l1
1746 1127
1747 1128 set dtrace_vpanic, %l1 ! %l1 = %pc (vpanic)
1748 1129 add %l1, 4, %l2 ! %l2 = %npc (vpanic+4)
1749 1130 rd %y, %l3 ! %l3 = %y
1750 1131 !
1751 1132 ! Flush register windows before panic_trigger() in order to avoid a
1752 1133 ! problem that a dump hangs if flush_windows() causes another panic.
1753 1134 !
↓ open down ↓ |
107 lines elided |
↑ open up ↑ |
1754 1135 call dtrace_flush_windows
1755 1136 nop
1756 1137
1757 1138 sethi %hi(panic_quiesce), %o0
1758 1139 call dtrace_panic_trigger
1759 1140 or %o0, %lo(panic_quiesce), %o0 ! if (!panic_trigger(
1760 1141
1761 1142 ba,a vpanic_common
1762 1143 SET_SIZE(dtrace_vpanic)
1763 1144
1764 -#endif /* lint */
1765 -
1766 -#if defined(lint)
1767 -
1768 -/*ARGSUSED*/
1769 -
1770 -uint_t
1771 -get_subcc_ccr( uint64_t addrl, uint64_t addrr)
1772 -{ return (0); }
1773 -
1774 -#else /* lint */
1775 -
1776 1145 ENTRY(get_subcc_ccr)
1777 1146 wr %g0, %ccr ! clear condition codes
1778 1147 subcc %o0, %o1, %g0
1779 1148 retl
1780 1149 rd %ccr, %o0 ! return condition codes
1781 1150 SET_SIZE(get_subcc_ccr)
1782 1151
1783 -#endif /* lint */
1784 -
1785 -#if defined(lint) || defined(__lint)
1786 -
1787 -ftrace_icookie_t
1788 -ftrace_interrupt_disable(void)
1789 -{ return (0); }
1790 -
1791 -#else /* lint */
1792 -
1793 1152 ENTRY_NP(ftrace_interrupt_disable)
1794 1153 rdpr %pstate, %o0
1795 1154 andn %o0, PSTATE_IE, %o1
1796 1155 retl
1797 1156 wrpr %g0, %o1, %pstate
1798 1157 SET_SIZE(ftrace_interrupt_disable)
1799 1158
1800 -#endif /* lint */
1801 -
1802 -#if defined(lint) || defined(__lint)
1803 -
1804 -/*ARGSUSED*/
1805 -void
1806 -ftrace_interrupt_enable(ftrace_icookie_t cookie)
1807 -{}
1808 -
1809 -#else
1810 -
1811 1159 ENTRY_NP(ftrace_interrupt_enable)
1812 1160 retl
1813 1161 wrpr %g0, %o0, %pstate
1814 1162 SET_SIZE(ftrace_interrupt_enable)
1815 1163
1816 -#endif /* lint*/
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX