1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #if !defined(lint)
26 #include "assym.h"
27 #endif
28
29 /*
30 * General assembly language routines.
31 * It is the intent of this file to contain routines that are
32 * specific to cpu architecture.
33 */
34
35 /*
36 * WARNING: If you add a fast trap handler which can be invoked by a
37 * non-privileged user, you may have to use the FAST_TRAP_DONE macro
38 * instead of "done" instruction to return back to the user mode. See
39 * comments for the "fast_trap_done" entry point for more information.
40 */
41 #define FAST_TRAP_DONE \
42 ba,a fast_trap_done
43
44 #include <sys/machclock.h>
45 #include <sys/clock.h>
46
47 #if defined(lint)
48 #include <sys/types.h>
49 #include <sys/scb.h>
50 #include <sys/systm.h>
51 #include <sys/regset.h>
52 #include <sys/sunddi.h>
53 #include <sys/lockstat.h>
54 #endif /* lint */
55
56
57 #include <sys/asm_linkage.h>
58 #include <sys/privregs.h>
59 #include <vm/hat_sfmmu.h>
60 #include <sys/machparam.h> /* To get SYSBASE and PAGESIZE */
61 #include <sys/machthread.h>
62 #include <sys/clock.h>
63 #include <sys/intreg.h>
64 #include <sys/psr_compat.h>
65 #include <sys/isa_defs.h>
66 #include <sys/dditypes.h>
67 #include <sys/intr.h>
68 #include <sys/hypervisor_api.h>
69
70 #if !defined(lint)
71 #include "assym.h"
72 #endif
73
74 #define ICACHE_FLUSHSZ 0x20
75
76 #if defined(lint)
77 /*
78 * Softint generated when counter field of tick reg matches value field
79 * of tick_cmpr reg
80 */
81 /*ARGSUSED*/
82 void
83 tickcmpr_set(uint64_t clock_cycles)
84 {}
85
86 #else /* lint */
87
88 ENTRY_NP(tickcmpr_set)
89 ! get 64-bit clock_cycles interval
90 mov %o0, %o2
91 mov 8, %o3 ! A reasonable initial step size
92 1:
93 WR_TICKCMPR(%o2,%o4,%o5,__LINE__) ! Write to TICK_CMPR
94
95 GET_NATIVE_TIME(%o0,%o4,%o5,__LINE__) ! Read %tick to confirm the
96 ! value we wrote was in the
97 ! future.
98
99 cmp %o2, %o0 ! If the value we wrote was in the
100 bg,pt %xcc, 2f ! future, then blow out of here.
101 sllx %o3, 1, %o3 ! If not, then double our step size,
102 ba,pt %xcc, 1b ! and take another lap.
103 add %o0, %o3, %o2 !
104 2:
105 retl
106 nop
107 SET_SIZE(tickcmpr_set)
108
109 #endif /* lint */
110
111 #if defined(lint)
112
113 void
114 tickcmpr_disable(void)
115 {}
116
117 #else
118
119 ENTRY_NP(tickcmpr_disable)
120 mov 1, %g1
121 sllx %g1, TICKINT_DIS_SHFT, %o0
122 WR_TICKCMPR(%o0,%o4,%o5,__LINE__) ! Write to TICK_CMPR
123 retl
124 nop
125 SET_SIZE(tickcmpr_disable)
126
127 #endif
128
129 #if defined(lint)
130
131 /*
132 * tick_write_delta() is intended to increment %stick by the specified delta,
133 * but %stick is only writeable in hyperprivileged mode and at present there
134 * is no provision for this. tick_write_delta is called by the cylic subsystem
135 * if a negative %stick delta is observed after cyclic processing is resumed
136 * after an event such as an OS suspend/resume. On sun4v, the suspend/resume
137 * routines should adjust the %stick offset preventing the cyclic subsystem
138 * from detecting a negative delta. If a negative delta is detected, panic the
139 * system. The negative delta could be caused by improper %stick
140 * synchronization after a suspend/resume.
141 */
142
143 /*ARGSUSED*/
144 void
145 tick_write_delta(uint64_t delta)
146 {}
147
148 #else /* lint */
149
150 .seg ".text"
151 tick_write_delta_panic:
152 .asciz "tick_write_delta: not supported, delta: 0x%lx"
153
154 ENTRY_NP(tick_write_delta)
155 sethi %hi(tick_write_delta_panic), %o1
156 save %sp, -SA(MINFRAME), %sp ! get a new window to preserve caller
157 mov %i0, %o1
158 call panic
159 or %i1, %lo(tick_write_delta_panic), %o0
160 /*NOTREACHED*/
161 retl
162 nop
163 #endif
164
165 #if defined(lint)
166 /*
167 * return 1 if disabled
168 */
169
170 int
171 tickcmpr_disabled(void)
172 { return (0); }
173
174 #else /* lint */
175
176 ENTRY_NP(tickcmpr_disabled)
177 RD_TICKCMPR(%g1,%o0,%o1,__LINE__)
178 retl
179 srlx %g1, TICKINT_DIS_SHFT, %o0
180 SET_SIZE(tickcmpr_disabled)
181
182 #endif /* lint */
183
184 /*
185 * Get current tick
186 */
187 #if defined(lint)
188
189 u_longlong_t
190 gettick(void)
191 { return (0); }
192
193 u_longlong_t
194 randtick(void)
195 { return (0); }
196
197 #else /* lint */
198
199 ENTRY(gettick)
200 ALTENTRY(randtick)
201 GET_NATIVE_TIME(%o0,%o2,%o3,__LINE__)
202 retl
203 nop
204 SET_SIZE(randtick)
205 SET_SIZE(gettick)
206
207 #endif /* lint */
208
209 /*
210 * Get current tick. For trapstat use only.
211 */
212 #if defined (lint)
213
214 hrtime_t
215 rdtick()
216 { return (0); }
217
218 #else
219 ENTRY(rdtick)
220 retl
221 RD_TICK_PHYSICAL(%o0)
222 SET_SIZE(rdtick)
223 #endif /* lint */
224
225
226 /*
227 * Return the counter portion of the tick register.
228 */
229
230 #if defined(lint)
231
232 uint64_t
233 gettick_counter(void)
234 { return(0); }
235
236 uint64_t
237 gettick_npt(void)
238 { return(0); }
239
240 uint64_t
241 getstick_npt(void)
242 { return(0); }
243
244 #else /* lint */
245
246 ENTRY_NP(gettick_counter)
247 RD_TICK(%o0,%o1,%o2,__LINE__)
248 retl
249 nop
250 SET_SIZE(gettick_counter)
251
252 ENTRY_NP(gettick_npt)
253 RD_TICK_PHYSICAL(%o0)
254 retl
255 srlx %o0, 63, %o0
256 SET_SIZE(gettick_npt)
257
258 ENTRY_NP(getstick_npt)
259 RD_STICK_PHYSICAL(%o0)
260 retl
261 srlx %o0, 63, %o0
262 SET_SIZE(getstick_npt)
263 #endif /* lint */
264
265 /*
266 * Provide a C callable interface to the trap that reads the hi-res timer.
267 * Returns 64-bit nanosecond timestamp in %o0 and %o1.
268 */
269
270 #if defined(lint)
271
272 hrtime_t
273 gethrtime(void)
274 {
275 return ((hrtime_t)0);
276 }
277
278 hrtime_t
279 gethrtime_unscaled(void)
280 {
281 return ((hrtime_t)0);
282 }
283
284 hrtime_t
285 gethrtime_max(void)
286 {
287 return ((hrtime_t)0);
288 }
289
290 void
291 scalehrtime(hrtime_t *hrt)
292 {
293 *hrt = 0;
294 }
295
296 void
297 gethrestime(timespec_t *tp)
298 {
299 tp->tv_sec = 0;
300 tp->tv_nsec = 0;
301 }
302
303 time_t
304 gethrestime_sec(void)
305 {
306 return (0);
307 }
308
309 void
310 gethrestime_lasttick(timespec_t *tp)
311 {
312 tp->tv_sec = 0;
313 tp->tv_nsec = 0;
314 }
315
316 /*ARGSUSED*/
317 void
318 hres_tick(void)
319 {
320 }
321
322 void
323 panic_hres_tick(void)
324 {
325 }
326
327 #else /* lint */
328
329 ENTRY_NP(gethrtime)
330 GET_HRTIME(%g1,%o0,%o1,%o2,%o3,%o4,%o5,%g2,__LINE__)
331 ! %g1 = hrtime
332 retl
333 mov %g1, %o0
334 SET_SIZE(gethrtime)
335
336 ENTRY_NP(gethrtime_unscaled)
337 GET_NATIVE_TIME(%g1,%o2,%o3,__LINE__) ! %g1 = native time
338 retl
339 mov %g1, %o0
340 SET_SIZE(gethrtime_unscaled)
341
342 ENTRY_NP(gethrtime_waitfree)
343 ALTENTRY(dtrace_gethrtime)
344 GET_NATIVE_TIME(%g1,%o2,%o3,__LINE__) ! %g1 = native time
345 NATIVE_TIME_TO_NSEC(%g1, %o2, %o3)
346 retl
347 mov %g1, %o0
348 SET_SIZE(dtrace_gethrtime)
349 SET_SIZE(gethrtime_waitfree)
350
351 ENTRY(gethrtime_max)
352 NATIVE_TIME_MAX(%g1)
353 NATIVE_TIME_TO_NSEC(%g1, %o0, %o1)
354
355 ! hrtime_t's are signed, max hrtime_t must be positive
356 mov -1, %o2
357 brlz,a %g1, 1f
358 srlx %o2, 1, %g1
359 1:
360 retl
361 mov %g1, %o0
362 SET_SIZE(gethrtime_max)
363
364 ENTRY(scalehrtime)
365 ldx [%o0], %o1
366 NATIVE_TIME_TO_NSEC(%o1, %o2, %o3)
367 retl
368 stx %o1, [%o0]
369 SET_SIZE(scalehrtime)
370
371 /*
372 * Fast trap to return a timestamp, uses trap window, leaves traps
373 * disabled. Returns a 64-bit nanosecond timestamp in %o0 and %o1.
374 *
375 * This is the handler for the ST_GETHRTIME trap.
376 */
377
378 ENTRY_NP(get_timestamp)
379 GET_HRTIME(%g1,%g2,%g3,%g4,%g5,%o0,%o1,%o2,__LINE__)
380 ! %g1 = hrtime
381 srlx %g1, 32, %o0 ! %o0 = hi32(%g1)
382 srl %g1, 0, %o1 ! %o1 = lo32(%g1)
383 FAST_TRAP_DONE
384 SET_SIZE(get_timestamp)
385
386 /*
387 * Macro to convert GET_HRESTIME() bits into a timestamp.
388 *
389 * We use two separate macros so that the platform-dependent GET_HRESTIME()
390 * can be as small as possible; CONV_HRESTIME() implements the generic part.
391 */
392 #define CONV_HRESTIME(hrestsec, hrestnsec, adj, nslt, nano) \
393 brz,pt adj, 3f; /* no adjustments, it's easy */ \
394 add hrestnsec, nslt, hrestnsec; /* hrest.tv_nsec += nslt */ \
395 brlz,pn adj, 2f; /* if hrestime_adj negative */ \
396 srlx nslt, ADJ_SHIFT, nslt; /* delay: nslt >>= 4 */ \
397 subcc adj, nslt, %g0; /* hrestime_adj - nslt/16 */ \
398 movg %xcc, nslt, adj; /* adj by min(adj, nslt/16) */ \
399 ba 3f; /* go convert to sec/nsec */ \
400 add hrestnsec, adj, hrestnsec; /* delay: apply adjustment */ \
401 2: addcc adj, nslt, %g0; /* hrestime_adj + nslt/16 */ \
402 bge,a,pt %xcc, 3f; /* is adj less negative? */ \
403 add hrestnsec, adj, hrestnsec; /* yes: hrest.nsec += adj */ \
404 sub hrestnsec, nslt, hrestnsec; /* no: hrest.nsec -= nslt/16 */ \
405 3: cmp hrestnsec, nano; /* more than a billion? */ \
406 bl,pt %xcc, 4f; /* if not, we're done */ \
407 nop; /* delay: do nothing :( */ \
408 add hrestsec, 1, hrestsec; /* hrest.tv_sec++; */ \
409 sub hrestnsec, nano, hrestnsec; /* hrest.tv_nsec -= NANOSEC; */ \
410 ba,a 3b; /* check >= billion again */ \
411 4:
412
413 ENTRY_NP(gethrestime)
414 GET_HRESTIME(%o1,%o2,%o3,%o4,%o5,%g1,%g2,%g3,%g4,__LINE__)
415 CONV_HRESTIME(%o1, %o2, %o3, %o4, %o5)
416 stn %o1, [%o0]
417 retl
418 stn %o2, [%o0 + CLONGSIZE]
419 SET_SIZE(gethrestime)
420
421 /*
422 * Similar to gethrestime(), but gethrestime_sec() returns current hrestime
423 * seconds.
424 */
425 ENTRY_NP(gethrestime_sec)
426 GET_HRESTIME(%o0,%o2,%o3,%o4,%o5,%g1,%g2,%g3,%g4,__LINE__)
427 CONV_HRESTIME(%o0, %o2, %o3, %o4, %o5)
428 retl ! %o0 current hrestime seconds
429 nop
430 SET_SIZE(gethrestime_sec)
431
432 /*
433 * Returns the hrestime on the last tick. This is simpler than gethrestime()
434 * and gethrestime_sec(): no conversion is required. gethrestime_lasttick()
435 * follows the same locking algorithm as GET_HRESTIME and GET_HRTIME,
436 * outlined in detail in clock.h. (Unlike GET_HRESTIME/GET_HRTIME, we don't
437 * rely on load dependencies to effect the membar #LoadLoad, instead declaring
438 * it explicitly.)
439 */
440 ENTRY_NP(gethrestime_lasttick)
441 sethi %hi(hres_lock), %o1
442 0:
443 lduw [%o1 + %lo(hres_lock)], %o2 ! Load lock value
444 membar #LoadLoad ! Load of lock must complete
445 andn %o2, 1, %o2 ! Mask off lowest bit
446 ldn [%o1 + %lo(hrestime)], %g1 ! Seconds.
447 add %o1, %lo(hrestime), %o4
448 ldn [%o4 + CLONGSIZE], %g2 ! Nanoseconds.
449 membar #LoadLoad ! All loads must complete
450 lduw [%o1 + %lo(hres_lock)], %o3 ! Reload lock value
451 cmp %o3, %o2 ! If lock is locked or has
452 bne 0b ! changed, retry.
453 stn %g1, [%o0] ! Delay: store seconds
454 retl
455 stn %g2, [%o0 + CLONGSIZE] ! Delay: store nanoseconds
456 SET_SIZE(gethrestime_lasttick)
457
458 /*
459 * Fast trap for gettimeofday(). Returns a timestruc_t in %o0 and %o1.
460 *
461 * This is the handler for the ST_GETHRESTIME trap.
462 */
463
464 ENTRY_NP(get_hrestime)
465 GET_HRESTIME(%o0,%o1,%g1,%g2,%g3,%g4,%g5,%o2,%o3,__LINE__)
466 CONV_HRESTIME(%o0, %o1, %g1, %g2, %g3)
467 FAST_TRAP_DONE
468 SET_SIZE(get_hrestime)
469
470 /*
471 * Fast trap to return lwp virtual time, uses trap window, leaves traps
472 * disabled. Returns a 64-bit number in %o0:%o1, which is the number
473 * of nanoseconds consumed.
474 *
475 * This is the handler for the ST_GETHRVTIME trap.
476 *
477 * Register usage:
478 * %o0, %o1 = return lwp virtual time
479 * %o2 = CPU/thread
480 * %o3 = lwp
481 * %g1 = scratch
482 * %g5 = scratch
483 */
484 ENTRY_NP(get_virtime)
485 GET_NATIVE_TIME(%g5,%g1,%g2,__LINE__) ! %g5 = native time in ticks
486 CPU_ADDR(%g2, %g3) ! CPU struct ptr to %g2
487 ldn [%g2 + CPU_THREAD], %g2 ! thread pointer to %g2
488 ldn [%g2 + T_LWP], %g3 ! lwp pointer to %g3
489
490 /*
491 * Subtract start time of current microstate from time
492 * of day to get increment for lwp virtual time.
493 */
494 ldx [%g3 + LWP_STATE_START], %g1 ! ms_state_start
495 sub %g5, %g1, %g5
496
497 /*
498 * Add current value of ms_acct[LMS_USER]
499 */
500 ldx [%g3 + LWP_ACCT_USER], %g1 ! ms_acct[LMS_USER]
501 add %g5, %g1, %g5
502 NATIVE_TIME_TO_NSEC(%g5, %g1, %o0)
503
504 srl %g5, 0, %o1 ! %o1 = lo32(%g5)
505 srlx %g5, 32, %o0 ! %o0 = hi32(%g5)
506
507 FAST_TRAP_DONE
508 SET_SIZE(get_virtime)
509
510
511
512 .seg ".text"
513 hrtime_base_panic:
514 .asciz "hrtime_base stepping back"
515
516
517 ENTRY_NP(hres_tick)
518 save %sp, -SA(MINFRAME), %sp ! get a new window
519
520 sethi %hi(hrestime), %l4
521 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5 ! try locking
522 7: tst %l5
523 bz,pt %xcc, 8f ! if we got it, drive on
524 ld [%l4 + %lo(nsec_scale)], %l5 ! delay: %l5 = scaling factor
525 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
526 9: tst %l5
527 bz,a,pn %xcc, 7b
528 ldstub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
529 ba,pt %xcc, 9b
530 ldub [%l4 + %lo(hres_lock + HRES_LOCK_OFFSET)], %l5
531 8:
532 membar #StoreLoad|#StoreStore
533
534 !
535 ! update hres_last_tick. %l5 has the scaling factor (nsec_scale).
536 !
537 ldx [%l4 + %lo(hrtime_base)], %g1 ! load current hrtime_base
538 GET_NATIVE_TIME(%l0,%l3,%l6,__LINE__) ! current native time
539 stx %l0, [%l4 + %lo(hres_last_tick)]! prev = current
540 ! convert native time to nsecs
541 NATIVE_TIME_TO_NSEC_SCALE(%l0, %l5, %l2, NSEC_SHIFT)
542
543 sub %l0, %g1, %i1 ! get accurate nsec delta
544
545 ldx [%l4 + %lo(hrtime_base)], %l1
546 cmp %l1, %l0
547 bg,pn %xcc, 9f
548 nop
549
550 stx %l0, [%l4 + %lo(hrtime_base)] ! update hrtime_base
551
552 !
553 ! apply adjustment, if any
554 !
555 ldx [%l4 + %lo(hrestime_adj)], %l0 ! %l0 = hrestime_adj
556 brz %l0, 2f
557 ! hrestime_adj == 0 ?
558 ! yes, skip adjustments
559 clr %l5 ! delay: set adj to zero
560 tst %l0 ! is hrestime_adj >= 0 ?
561 bge,pt %xcc, 1f ! yes, go handle positive case
562 srl %i1, ADJ_SHIFT, %l5 ! delay: %l5 = adj
563
564 addcc %l0, %l5, %g0 ! hrestime_adj < -adj ?
565 bl,pt %xcc, 2f ! yes, use current adj
566 neg %l5 ! delay: %l5 = -adj
567 ba,pt %xcc, 2f
568 mov %l0, %l5 ! no, so set adj = hrestime_adj
569 1:
570 subcc %l0, %l5, %g0 ! hrestime_adj < adj ?
571 bl,a,pt %xcc, 2f ! yes, set adj = hrestime_adj
572 mov %l0, %l5 ! delay: adj = hrestime_adj
573 2:
574 ldx [%l4 + %lo(timedelta)], %l0 ! %l0 = timedelta
575 sub %l0, %l5, %l0 ! timedelta -= adj
576
577 stx %l0, [%l4 + %lo(timedelta)] ! store new timedelta
578 stx %l0, [%l4 + %lo(hrestime_adj)] ! hrestime_adj = timedelta
579
580 or %l4, %lo(hrestime), %l2
581 ldn [%l2], %i2 ! %i2:%i3 = hrestime sec:nsec
582 ldn [%l2 + CLONGSIZE], %i3
583 add %i3, %l5, %i3 ! hrestime.nsec += adj
584 add %i3, %i1, %i3 ! hrestime.nsec += nslt
585
586 set NANOSEC, %l5 ! %l5 = NANOSEC
587 cmp %i3, %l5
588 bl,pt %xcc, 5f ! if hrestime.tv_nsec < NANOSEC
589 sethi %hi(one_sec), %i1 ! delay
590 add %i2, 0x1, %i2 ! hrestime.tv_sec++
591 sub %i3, %l5, %i3 ! hrestime.tv_nsec - NANOSEC
592 mov 0x1, %l5
593 st %l5, [%i1 + %lo(one_sec)]
594 5:
595 stn %i2, [%l2]
596 stn %i3, [%l2 + CLONGSIZE] ! store the new hrestime
597
598 membar #StoreStore
599
600 ld [%l4 + %lo(hres_lock)], %i1
601 inc %i1 ! release lock
602 st %i1, [%l4 + %lo(hres_lock)] ! clear hres_lock
603
604 ret
605 restore
606
607 9:
608 !
609 ! release hres_lock
610 !
611 ld [%l4 + %lo(hres_lock)], %i1
612 inc %i1
613 st %i1, [%l4 + %lo(hres_lock)]
614
615 sethi %hi(hrtime_base_panic), %o0
616 call panic
617 or %o0, %lo(hrtime_base_panic), %o0
618
619 SET_SIZE(hres_tick)
620
621 #endif /* lint */
622
623 #if !defined(lint) && !defined(__lint)
624
625 .seg ".text"
626 kstat_q_panic_msg:
627 .asciz "kstat_q_exit: qlen == 0"
628
629 ENTRY(kstat_q_panic)
630 save %sp, -SA(MINFRAME), %sp
631 sethi %hi(kstat_q_panic_msg), %o0
632 call panic
633 or %o0, %lo(kstat_q_panic_msg), %o0
634 /*NOTREACHED*/
635 SET_SIZE(kstat_q_panic)
636
637 #define BRZPN brz,pn
638 #define BRZPT brz,pt
639
640 #define KSTAT_Q_UPDATE(QOP, QBR, QZERO, QRETURN, QTYPE) \
641 ld [%o0 + QTYPE/**/CNT], %o1; /* %o1 = old qlen */ \
642 QOP %o1, 1, %o2; /* %o2 = new qlen */ \
643 QBR %o1, QZERO; /* done if qlen == 0 */ \
644 st %o2, [%o0 + QTYPE/**/CNT]; /* delay: save qlen */ \
645 ldx [%o0 + QTYPE/**/LASTUPDATE], %o3; \
646 ldx [%o0 + QTYPE/**/TIME], %o4; /* %o4 = old time */ \
647 ldx [%o0 + QTYPE/**/LENTIME], %o5; /* %o5 = old lentime */ \
648 sub %g1, %o3, %o2; /* %o2 = time delta */ \
649 mulx %o1, %o2, %o3; /* %o3 = cur lentime */ \
650 add %o4, %o2, %o4; /* %o4 = new time */ \
651 add %o5, %o3, %o5; /* %o5 = new lentime */ \
652 stx %o4, [%o0 + QTYPE/**/TIME]; /* save time */ \
653 stx %o5, [%o0 + QTYPE/**/LENTIME]; /* save lentime */ \
654 QRETURN; \
655 stx %g1, [%o0 + QTYPE/**/LASTUPDATE]; /* lastupdate = now */
656
657 #if !defined(DEBUG)
658 /*
659 * same as KSTAT_Q_UPDATE but without:
660 * QBR %o1, QZERO;
661 * to be used only with non-debug build. mimics ASSERT() behaviour.
662 */
663 #define KSTAT_Q_UPDATE_ND(QOP, QRETURN, QTYPE) \
664 ld [%o0 + QTYPE/**/CNT], %o1; /* %o1 = old qlen */ \
665 QOP %o1, 1, %o2; /* %o2 = new qlen */ \
666 st %o2, [%o0 + QTYPE/**/CNT]; /* delay: save qlen */ \
667 ldx [%o0 + QTYPE/**/LASTUPDATE], %o3; \
668 ldx [%o0 + QTYPE/**/TIME], %o4; /* %o4 = old time */ \
669 ldx [%o0 + QTYPE/**/LENTIME], %o5; /* %o5 = old lentime */ \
670 sub %g1, %o3, %o2; /* %o2 = time delta */ \
671 mulx %o1, %o2, %o3; /* %o3 = cur lentime */ \
672 add %o4, %o2, %o4; /* %o4 = new time */ \
673 add %o5, %o3, %o5; /* %o5 = new lentime */ \
674 stx %o4, [%o0 + QTYPE/**/TIME]; /* save time */ \
675 stx %o5, [%o0 + QTYPE/**/LENTIME]; /* save lentime */ \
676 QRETURN; \
677 stx %g1, [%o0 + QTYPE/**/LASTUPDATE]; /* lastupdate = now */
678 #endif
679
680 .align 16
681 ENTRY(kstat_waitq_enter)
682 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
683 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W)
684 SET_SIZE(kstat_waitq_enter)
685
686 .align 16
687 ENTRY(kstat_waitq_exit)
688 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
689 #if defined(DEBUG)
690 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_W)
691 #else
692 KSTAT_Q_UPDATE_ND(sub, retl, KSTAT_IO_W)
693 #endif
694 SET_SIZE(kstat_waitq_exit)
695
696 .align 16
697 ENTRY(kstat_runq_enter)
698 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
699 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R)
700 SET_SIZE(kstat_runq_enter)
701
702 .align 16
703 ENTRY(kstat_runq_exit)
704 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
705 #if defined(DEBUG)
706 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, retl, KSTAT_IO_R)
707 #else
708 KSTAT_Q_UPDATE_ND(sub, retl, KSTAT_IO_R)
709 #endif
710 SET_SIZE(kstat_runq_exit)
711
712 .align 16
713 ENTRY(kstat_waitq_to_runq)
714 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
715 #if defined(DEBUG)
716 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_W)
717 #else
718 KSTAT_Q_UPDATE_ND(sub, 1:, KSTAT_IO_W)
719 #endif
720 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_R)
721 SET_SIZE(kstat_waitq_to_runq)
722
723 .align 16
724 ENTRY(kstat_runq_back_to_waitq)
725 GET_NATIVE_TIME(%g1,%g2,%g3,__LINE__)
726 #if defined(DEBUG)
727 KSTAT_Q_UPDATE(sub, BRZPN, kstat_q_panic, 1:, KSTAT_IO_R)
728 #else
729 KSTAT_Q_UPDATE_ND(sub, 1:, KSTAT_IO_R)
730 #endif
731 KSTAT_Q_UPDATE(add, BRZPT, 1f, 1:retl, KSTAT_IO_W)
732 SET_SIZE(kstat_runq_back_to_waitq)
733
734 #endif /* lint */
735
736 #ifdef lint
737
738 int64_t timedelta;
739 hrtime_t hres_last_tick;
740 volatile timestruc_t hrestime;
741 int64_t hrestime_adj;
742 volatile int hres_lock;
743 uint_t nsec_scale;
744 hrtime_t hrtime_base;
745 int traptrace_use_stick;
746
747 #else
748 /*
749 * -- WARNING --
750 *
751 * The following variables MUST be together on a 128-byte boundary.
752 * In addition to the primary performance motivation (having them all
753 * on the same cache line(s)), code here and in the GET*TIME() macros
754 * assumes that they all have the same high 22 address bits (so
755 * there's only one sethi).
756 */
757 .seg ".data"
758 .global timedelta, hres_last_tick, hrestime, hrestime_adj
759 .global hres_lock, nsec_scale, hrtime_base, traptrace_use_stick
760 .global nsec_shift, adj_shift, native_tick_offset, native_stick_offset
761
762 /* XXX - above comment claims 128-bytes is necessary */
763 .align 64
764 timedelta:
765 .word 0, 0 /* int64_t */
766 hres_last_tick:
767 .word 0, 0 /* hrtime_t */
768 hrestime:
769 .nword 0, 0 /* 2 longs */
770 hrestime_adj:
771 .word 0, 0 /* int64_t */
772 hres_lock:
773 .word 0
774 nsec_scale:
775 .word 0
776 hrtime_base:
777 .word 0, 0
778 traptrace_use_stick:
779 .word 0
780 nsec_shift:
781 .word NSEC_SHIFT
782 adj_shift:
783 .word ADJ_SHIFT
784 .align 8
785 native_tick_offset:
786 .word 0, 0
787 .align 8
788 native_stick_offset:
789 .word 0, 0
790
791 #endif
792
793
794 /*
795 * drv_usecwait(clock_t n) [DDI/DKI - section 9F]
796 * usec_delay(int n) [compatibility - should go one day]
797 * Delay by spinning.
798 *
799 * delay for n microseconds. numbers <= 0 delay 1 usec
800 *
801 * With UltraSPARC-III the combination of supporting mixed-speed CPUs
802 * and variable clock rate for power management requires that we
803 * use %stick to implement this routine.
804 */
805
806 #if defined(lint)
807
808 /*ARGSUSED*/
809 void
810 drv_usecwait(clock_t n)
811 {}
812
813 /*ARGSUSED*/
814 void
815 usec_delay(int n)
816 {}
817
818 #else /* lint */
819
820 ENTRY(drv_usecwait)
821 ALTENTRY(usec_delay)
822 brlez,a,pn %o0, 0f
823 mov 1, %o0
824 0:
825 sethi %hi(sticks_per_usec), %o1
826 lduw [%o1 + %lo(sticks_per_usec)], %o1
827 mulx %o1, %o0, %o1 ! Scale usec to ticks
828 inc %o1 ! We don't start on a tick edge
829 GET_NATIVE_TIME(%o2,%o3,%o4,__LINE__)
830 add %o1, %o2, %o1
831
832 1: cmp %o1, %o2
833 GET_NATIVE_TIME(%o2,%o3,%o4,__LINE__)
834 bgeu,pt %xcc, 1b
835 nop
836 retl
837 nop
838 SET_SIZE(usec_delay)
839 SET_SIZE(drv_usecwait)
840 #endif /* lint */
841
842 #if defined(lint)
843
844 /* ARGSUSED */
845 void
846 pil14_interrupt(int level)
847 {}
848
849 #else
850
851 /*
852 * Level-14 interrupt prologue.
853 */
854 ENTRY_NP(pil14_interrupt)
855 CPU_ADDR(%g1, %g2)
856 rdpr %pil, %g6 ! %g6 = interrupted PIL
857 stn %g6, [%g1 + CPU_PROFILE_PIL] ! record interrupted PIL
858 rdpr %tstate, %g6
859 rdpr %tpc, %g5
860 btst TSTATE_PRIV, %g6 ! trap from supervisor mode?
861 bnz,a,pt %xcc, 1f
862 stn %g5, [%g1 + CPU_PROFILE_PC] ! if so, record kernel PC
863 stn %g5, [%g1 + CPU_PROFILE_UPC] ! if not, record user PC
864 ba pil_interrupt_common ! must be large-disp branch
865 stn %g0, [%g1 + CPU_PROFILE_PC] ! zero kernel PC
866 1: ba pil_interrupt_common ! must be large-disp branch
867 stn %g0, [%g1 + CPU_PROFILE_UPC] ! zero user PC
868 SET_SIZE(pil14_interrupt)
869
870 ENTRY_NP(tick_rtt)
871 !
872 ! Load TICK_COMPARE into %o5; if bit 63 is set, then TICK_COMPARE is
873 ! disabled. If TICK_COMPARE is enabled, we know that we need to
874 ! reenqueue the interrupt request structure. We'll then check TICKINT
875 ! in SOFTINT; if it's set, then we know that we were in a TICK_COMPARE
876 ! interrupt. In this case, TICK_COMPARE may have been rewritten
877 ! recently; we'll compare %o5 to the current time to verify that it's
878 ! in the future.
879 !
880 ! Note that %o5 is live until after 1f.
881 ! XXX - there is a subroutine call while %o5 is live!
882 !
883 RD_TICKCMPR(%o5,%g1,%g2,__LINE__)
884 srlx %o5, TICKINT_DIS_SHFT, %g1
885 brnz,pt %g1, 2f
886 nop
887
888 rdpr %pstate, %g5
889 andn %g5, PSTATE_IE, %g1
890 wrpr %g0, %g1, %pstate ! Disable vec interrupts
891
892 sethi %hi(cbe_level14_inum), %o1
893 ldx [%o1 + %lo(cbe_level14_inum)], %o1
894 call intr_enqueue_req ! preserves %o5 and %g5
895 mov PIL_14, %o0
896
897 ! Check SOFTINT for TICKINT/STICKINT
898 rd SOFTINT, %o4
899 set (TICK_INT_MASK | STICK_INT_MASK), %o0
900 andcc %o4, %o0, %g0
901 bz,a,pn %icc, 2f
902 wrpr %g0, %g5, %pstate ! Enable vec interrupts
903
904 ! clear TICKINT/STICKINT
905 wr %o0, CLEAR_SOFTINT
906
907 !
908 ! Now that we've cleared TICKINT, we can reread %tick and confirm
909 ! that the value we programmed is still in the future. If it isn't,
910 ! we need to reprogram TICK_COMPARE to fire as soon as possible.
911 !
912 GET_NATIVE_TIME(%o0,%g1,%g2,__LINE__) ! %o0 = tick
913 cmp %o5, %o0 ! In the future?
914 bg,a,pt %xcc, 2f ! Yes, drive on.
915 wrpr %g0, %g5, %pstate ! delay: enable vec intr
916
917 !
918 ! If we're here, then we have programmed TICK_COMPARE with a %tick
919 ! which is in the past; we'll now load an initial step size, and loop
920 ! until we've managed to program TICK_COMPARE to fire in the future.
921 !
922 mov 8, %o4 ! 8 = arbitrary inital step
923 1: add %o0, %o4, %o5 ! Add the step
924 WR_TICKCMPR(%o5,%g1,%g2,__LINE__) ! Write to TICK_CMPR
925 GET_NATIVE_TIME(%o0,%g1,%g2,__LINE__) ! %o0 = tick
926 cmp %o5, %o0 ! In the future?
927 bg,a,pt %xcc, 2f ! Yes, drive on.
928 wrpr %g0, %g5, %pstate ! delay: enable vec intr
929 ba 1b ! No, try again.
930 sllx %o4, 1, %o4 ! delay: double step size
931
932 2: ba current_thread_complete
933 nop
934 SET_SIZE(tick_rtt)
935
936 #endif /* lint */
937
938 #if defined(lint)
939
940 /* ARGSUSED */
941 void
942 pil15_interrupt(int level)
943 {}
944
945 #else /* lint */
946
947 /*
948 * Level-15 interrupt prologue.
949 */
950 ENTRY_NP(pil15_interrupt)
951 CPU_ADDR(%g1, %g2)
952 rdpr %tstate, %g6
953 rdpr %tpc, %g5
954 btst TSTATE_PRIV, %g6 ! trap from supervisor mode?
955 bnz,a,pt %xcc, 1f
956 stn %g5, [%g1 + CPU_CPCPROFILE_PC] ! if so, record kernel PC
957 stn %g5, [%g1 + CPU_CPCPROFILE_UPC] ! if not, record user PC
958 ba pil15_epilogue ! must be large-disp branch
959 stn %g0, [%g1 + CPU_CPCPROFILE_PC] ! zero kernel PC
960 1: ba pil15_epilogue ! must be large-disp branch
961 stn %g0, [%g1 + CPU_CPCPROFILE_UPC] ! zero user PC
962 SET_SIZE(pil15_interrupt)
963
964 #endif /* lint */
965
966 #if defined(lint)
967 /*
968 * Prefetch a page_t for write or read, this assumes a linear
969 * scan of sequential page_t's.
970 */
971 /*ARGSUSED*/
972 void
973 prefetch_page_w(void *pp)
974 {}
975
976 /*ARGSUSED*/
977 void
978 prefetch_page_r(void *pp)
979 {}
980 #else /* lint */
981
982 /* XXXQ These should be inline templates, not functions */
983 ENTRY(prefetch_page_w)
984 retl
985 nop
986 SET_SIZE(prefetch_page_w)
987
988 ENTRY(prefetch_page_r)
989 retl
990 nop
991 SET_SIZE(prefetch_page_r)
992
993 #endif /* lint */
994
995 #if defined(lint)
996 /*
997 * Prefetch struct smap for write.
998 */
999 /*ARGSUSED*/
1000 void
1001 prefetch_smap_w(void *smp)
1002 {}
1003 #else /* lint */
1004
1005 /* XXXQ These should be inline templates, not functions */
1006 ENTRY(prefetch_smap_w)
1007 retl
1008 nop
1009 SET_SIZE(prefetch_smap_w)
1010
1011 #endif /* lint */
1012
1013 /*
1014 * Generic sun4v MMU and Cache operations.
1015 */
1016
1017 #if defined(lint)
1018
1019 /*ARGSUSED*/
1020 void
1021 vtag_flushpage(caddr_t vaddr, uint64_t sfmmup)
1022 {}
1023
1024 /*ARGSUSED*/
1025 void
1026 vtag_flushall(void)
1027 {}
1028
1029 /*ARGSUSED*/
1030 void
1031 vtag_unmap_perm_tl1(uint64_t vaddr, uint64_t ctxnum)
1032 {}
1033
1034 /*ARGSUSED*/
1035 void
1036 vtag_flushpage_tl1(uint64_t vaddr, uint64_t sfmmup)
1037 {}
1038
1039 /*ARGSUSED*/
1040 void
1041 vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t sfmmup_pgcnt)
1042 {}
1043
1044 /*ARGSUSED*/
1045 void
1046 vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
1047 {}
1048
1049 /*ARGSUSED*/
1050 void
1051 vac_flushpage(pfn_t pfnum, int vcolor)
1052 {}
1053
1054 /*ARGSUSED*/
1055 void
1056 vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
1057 {}
1058
1059 /*ARGSUSED*/
1060 void
1061 flush_instr_mem(caddr_t vaddr, size_t len)
1062 {}
1063
1064 #else /* lint */
1065
1066 ENTRY_NP(vtag_flushpage)
1067 /*
1068 * flush page from the tlb
1069 *
1070 * %o0 = vaddr
1071 * %o1 = sfmmup
1072 */
1073 SFMMU_CPU_CNUM(%o1, %g1, %g2) /* %g1 = sfmmu cnum on this CPU */
1074
1075 mov %g1, %o1
1076 mov MAP_ITLB | MAP_DTLB, %o2
1077 ta MMU_UNMAP_ADDR
1078 brz,pt %o0, 1f
1079 nop
1080 ba panic_bad_hcall
1081 mov MMU_UNMAP_ADDR, %o1
1082 1:
1083 retl
1084 nop
1085 SET_SIZE(vtag_flushpage)
1086
1087 ENTRY_NP(vtag_flushall)
1088 mov %g0, %o0 ! XXX no cpu list yet
1089 mov %g0, %o1 ! XXX no cpu list yet
1090 mov MAP_ITLB | MAP_DTLB, %o2
1091 mov MMU_DEMAP_ALL, %o5
1092 ta FAST_TRAP
1093 brz,pt %o0, 1f
1094 nop
1095 ba panic_bad_hcall
1096 mov MMU_DEMAP_ALL, %o1
1097 1:
1098 retl
1099 nop
1100 SET_SIZE(vtag_flushall)
1101
1102 ENTRY_NP(vtag_unmap_perm_tl1)
1103 /*
1104 * x-trap to unmap perm map entry
1105 * %g1 = vaddr
1106 * %g2 = ctxnum (KCONTEXT only)
1107 */
1108 mov %o0, %g3
1109 mov %o1, %g4
1110 mov %o2, %g5
1111 mov %o5, %g6
1112 mov %g1, %o0
1113 mov %g2, %o1
1114 mov MAP_ITLB | MAP_DTLB, %o2
1115 mov UNMAP_PERM_ADDR, %o5
1116 ta FAST_TRAP
1117 brz,pt %o0, 1f
1118 nop
1119
1120 mov PTL1_BAD_HCALL, %g1
1121
1122 cmp %o0, H_ENOMAP
1123 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP, %g1
1124
1125 cmp %o0, H_EINVAL
1126 move %xcc, PTL1_BAD_HCALL_UNMAP_PERM_EINVAL, %g1
1127
1128 ba,a ptl1_panic
1129 1:
1130 mov %g6, %o5
1131 mov %g5, %o2
1132 mov %g4, %o1
1133 mov %g3, %o0
1134 retry
1135 SET_SIZE(vtag_unmap_perm_tl1)
1136
1137 ENTRY_NP(vtag_flushpage_tl1)
1138 /*
1139 * x-trap to flush page from tlb and tsb
1140 *
1141 * %g1 = vaddr, zero-extended on 32-bit kernel
1142 * %g2 = sfmmup
1143 *
1144 * assumes TSBE_TAG = 0
1145 */
1146 srln %g1, MMU_PAGESHIFT, %g1
1147 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
1148 mov %o0, %g3
1149 mov %o1, %g4
1150 mov %o2, %g5
1151 mov %g1, %o0 /* vaddr */
1152
1153 SFMMU_CPU_CNUM(%g2, %o1, %g6) /* %o1 = sfmmu cnum on this CPU */
1154
1155 mov MAP_ITLB | MAP_DTLB, %o2
1156 ta MMU_UNMAP_ADDR
1157 brz,pt %o0, 1f
1158 nop
1159 ba ptl1_panic
1160 mov PTL1_BAD_HCALL, %g1
1161 1:
1162 mov %g5, %o2
1163 mov %g4, %o1
1164 mov %g3, %o0
1165 membar #Sync
1166 retry
1167 SET_SIZE(vtag_flushpage_tl1)
1168
1169 ENTRY_NP(vtag_flush_pgcnt_tl1)
1170 /*
1171 * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
1172 *
1173 * %g1 = vaddr, zero-extended on 32-bit kernel
1174 * %g2 = <sfmmup58|pgcnt6>, (pgcnt - 1) is pass'ed in via pgcnt6 bits.
1175 *
1176 * NOTE: this handler relies on the fact that no
1177 * interrupts or traps can occur during the loop
1178 * issuing the TLB_DEMAP operations. It is assumed
1179 * that interrupts are disabled and this code is
1180 * fetching from the kernel locked text address.
1181 *
1182 * assumes TSBE_TAG = 0
1183 */
1184 srln %g1, MMU_PAGESHIFT, %g1
1185 slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
1186 mov %o0, %g3
1187 mov %o1, %g4
1188 mov %o2, %g5
1189
1190 and %g2, SFMMU_PGCNT_MASK, %g7 /* g7 = pgcnt - 1 */
1191 add %g7, 1, %g7 /* g7 = pgcnt */
1192
1193 andn %g2, SFMMU_PGCNT_MASK, %o0 /* %o0 = sfmmup */
1194
1195 SFMMU_CPU_CNUM(%o0, %g2, %g6) /* %g2 = sfmmu cnum on this CPU */
1196
1197 set MMU_PAGESIZE, %g6 /* g6 = pgsize */
1198
1199 1:
1200 mov %g1, %o0 /* vaddr */
1201 mov %g2, %o1 /* cnum */
1202 mov MAP_ITLB | MAP_DTLB, %o2
1203 ta MMU_UNMAP_ADDR
1204 brz,pt %o0, 2f
1205 nop
1206 ba ptl1_panic
1207 mov PTL1_BAD_HCALL, %g1
1208 2:
1209 deccc %g7 /* decr pgcnt */
1210 bnz,pt %icc,1b
1211 add %g1, %g6, %g1 /* go to nextpage */
1212
1213 mov %g5, %o2
1214 mov %g4, %o1
1215 mov %g3, %o0
1216 membar #Sync
1217 retry
1218 SET_SIZE(vtag_flush_pgcnt_tl1)
1219
1220 ! Not implemented on US1/US2
1221 ENTRY_NP(vtag_flushall_tl1)
1222 mov %o0, %g3
1223 mov %o1, %g4
1224 mov %o2, %g5
1225 mov %o3, %g6 ! XXXQ not used?
1226 mov %o5, %g7
1227 mov %g0, %o0 ! XXX no cpu list yet
1228 mov %g0, %o1 ! XXX no cpu list yet
1229 mov MAP_ITLB | MAP_DTLB, %o2
1230 mov MMU_DEMAP_ALL, %o5
1231 ta FAST_TRAP
1232 brz,pt %o0, 1f
1233 nop
1234 ba ptl1_panic
1235 mov PTL1_BAD_HCALL, %g1
1236 1:
1237 mov %g7, %o5
1238 mov %g6, %o3 ! XXXQ not used?
1239 mov %g5, %o2
1240 mov %g4, %o1
1241 mov %g3, %o0
1242 retry
1243 SET_SIZE(vtag_flushall_tl1)
1244
1245 /*
1246 * flush_instr_mem:
1247 * Flush a portion of the I-$ starting at vaddr
1248 * %o0 vaddr
1249 * %o1 bytes to be flushed
1250 */
1251
1252 ENTRY(flush_instr_mem)
1253 membar #StoreStore ! Ensure the stores
1254 ! are globally visible
1255 1:
1256 flush %o0
1257 subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20
1258 bgu,pt %ncc, 1b
1259 add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20
1260
1261 retl
1262 nop
1263 SET_SIZE(flush_instr_mem)
1264
1265 #endif /* !lint */
1266
1267 #if !defined(CUSTOM_FPZERO)
1268
1269 /*
1270 * fp_zero() - clear all fp data registers and the fsr
1271 */
1272
1273 #if defined(lint) || defined(__lint)
1274
1275 void
1276 fp_zero(void)
1277 {}
1278
1279 #else /* lint */
1280
1281 .global fp_zero_zero
1282 .align 8
1283 fp_zero_zero:
1284 .xword 0
1285
1286 ENTRY_NP(fp_zero)
1287 sethi %hi(fp_zero_zero), %o0
1288 ldx [%o0 + %lo(fp_zero_zero)], %fsr
1289 ldd [%o0 + %lo(fp_zero_zero)], %f0
1290 fmovd %f0, %f2
1291 fmovd %f0, %f4
1292 fmovd %f0, %f6
1293 fmovd %f0, %f8
1294 fmovd %f0, %f10
1295 fmovd %f0, %f12
1296 fmovd %f0, %f14
1297 fmovd %f0, %f16
1298 fmovd %f0, %f18
1299 fmovd %f0, %f20
1300 fmovd %f0, %f22
1301 fmovd %f0, %f24
1302 fmovd %f0, %f26
1303 fmovd %f0, %f28
1304 fmovd %f0, %f30
1305 fmovd %f0, %f32
1306 fmovd %f0, %f34
1307 fmovd %f0, %f36
1308 fmovd %f0, %f38
1309 fmovd %f0, %f40
1310 fmovd %f0, %f42
1311 fmovd %f0, %f44
1312 fmovd %f0, %f46
1313 fmovd %f0, %f48
1314 fmovd %f0, %f50
1315 fmovd %f0, %f52
1316 fmovd %f0, %f54
1317 fmovd %f0, %f56
1318 fmovd %f0, %f58
1319 fmovd %f0, %f60
1320 retl
1321 fmovd %f0, %f62
1322 SET_SIZE(fp_zero)
1323
1324 #endif /* lint */
1325 #endif /* CUSTOM_FPZERO */