1 !
2 ! CDDL HEADER START
3 !
4 ! The contents of this file are subject to the terms of the
5 ! Common Development and Distribution License (the "License").
6 ! You may not use this file except in compliance with the License.
7 !
8 ! You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 ! or http://www.opensolaris.org/os/licensing.
10 ! See the License for the specific language governing permissions
11 ! and limitations under the License.
12 !
13 ! When distributing Covered Code, this CDDL HEADER in each
14 ! file and the License file at usr/src/OPENSOLARIS.LICENSE.
15 ! If applicable, add the following below this CDDL HEADER, with the
16 ! fields enclosed by brackets "[]" replaced with your own identifying
17 ! information: Portions Copyright [yyyy] [name of copyright owner]
18 !
19 ! CDDL HEADER END
20 !
21 ! Copyright 2011 Nexenta Systems, Inc. All rights reserved.
22 !
23 ! Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 ! Use is subject to license terms.
25 !
26
27 ! Portions of this file are duplicated as GCC inline assembly in
28 ! libm_inlines.h. Keep them in sync.
29
30 .inline __r_hypot_,2
31 ld [%o0],%o4
32 sethi 0x1fffff,%o5
33 or %o5,1023,%o5
34 and %o4,%o5,%o4
35 sethi 0x1fe000,%o3
36 cmp %o4,%o3
37 ld [%o0],%f0 ! load result with first argument
38 bne 2f
39 nop
40 fabss %f0,%f0
41 ld [%o1],%f1
42 .volatile
43 fcmps %f0,%f1 ! generate invalid for Snan
44 .nonvolatile
45 nop
46 fba 5f
47 nop
48 2:
49 ld [%o1],%o4
50 sethi 0x1fffff,%o5
51 or %o5,1023,%o5
52 and %o4,%o5,%o4
53 sethi 0x1fe000,%o3
54 cmp %o4,%o3
55 bne 4f
56 nop
57 ld [%o1],%f0 ! second argument inf
58 fabss %f0,%f0
59 ld [%o0],%f1
60 .volatile
61 fcmps %f0,%f1 ! generate invalid for Snan
62 .nonvolatile
63 nop
64 fba 5f
65 nop
66 4:
67 ld [%o1],%f3
68 fsmuld %f0,%f0,%f0
69 fsmuld %f3,%f3,%f2
70 faddd %f2,%f0,%f0
71 fsqrtd %f0,%f0
72 fdtos %f0,%f0
73 5:
74 .end
75
76 .inline __c_abs,1
77 ld [%o0],%o4
78 sethi 0x1fffff,%o5
79 or %o5,1023,%o5
80 and %o4,%o5,%o4
81 sethi 0x1fe000,%o3
82 cmp %o4,%o3
83 ld [%o0],%f0
84 bne 2f
85 nop
86 fabss %f0,%f0
87 ld [%o0+4],%f1
88 .volatile
89 fcmps %f0,%f1 ! generate invalid for Snan
90 .nonvolatile
91 nop
92 fba 5f
93 nop
94 2:
95 ld [%o0+4],%o4
96 sethi 0x1fffff,%o5
97 or %o5,1023,%o5
98 and %o4,%o5,%o4
99 sethi 0x1fe000,%o3
100 cmp %o4,%o3
101 bne 4f
102 nop
103 ld [%o0+4],%f0
104 fabss %f0,%f0
105 ld [%o0],%f1
106 .volatile
107 fcmps %f0,%f1 ! generate invalid for Snan
108 .nonvolatile
109 nop
110 fba 5f
111 nop
112 ! store to 8-aligned address
113 4:
114 ld [%o0+4],%f3
115 fsmuld %f0,%f0,%f0
116 fsmuld %f3,%f3,%f2
117 faddd %f2,%f0,%f0
118 fsqrtd %f0,%f0
119 fdtos %f0,%f0
120 5:
121 .end
122 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
123 ! void
124 ! __Fc_mult(c, a, b)
125 ! complex *c, *a, *b;
126 ! {
127
128 .inline __Fc_mult,3
129 ! 21 c->real = (a->real * b->real) - (a->imag * b->imag)
130 ld [%o1+4],%f0 ! f0 = a->imag
131 ld [%o2+4],%f1 ! f1 = b->imag
132 ld [%o1],%f2 ! f2 = a->real
133 fsmuld %f0,%f1,%f4 ! f4 = (a->imag * b->imag)
134 ld [%o2],%f3 ! f3 = b->real
135 fsmuld %f2,%f1,%f6 ! f6 = a->real * b->imag
136 fsmuld %f2,%f3,%f8 ! f8 = a->real * b->real
137 fsmuld %f0,%f3,%f10 ! f10 = a->imag * b->real
138 fsubd %f8,%f4,%f0 ! f0 = ar*br - ai*bi
139 faddd %f6,%f10,%f2 ! f2 = ai*br + ar*bi
140 fdtos %f0,%f4
141 fdtos %f2,%f6
142 st %f4,[%o0]
143 st %f6,[%o0+4]
144 .end
145 ! }
146 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
147 ! void
148 ! __Fc_div(c, a, b)
149 ! complex *c, *a, *b;
150 ! {
151 .inline __Fc_div,3
152 ld [%o2+4],%o3
153 sethi %hi(0x7fffffff),%o4
154 or %o4,%lo(0x7fffffff),%o4 ! [internal]
155 andcc %o3,%o4,%g0
156 ld [%o2],%f6 ! f6 gets reb
157 bne 1f
158 nop
159 ld [%o1],%f0
160 ld [%o2],%f1
161 fdivs %f0,%f1,%f0
162 st %f0,[%o0]
163 ld [%o1+4],%f3
164 fdivs %f3,%f1,%f3
165 st %f3,[%o0+4]
166 ba 2f
167 nop
168 1: ! [internal]
169 sethi %hi(0x3ff00000),%o4
170 or %g0,0,%o5
171 std %o4,[%sp+0x48]
172 ldd [%sp+0x48],%f8
173 ld [%o2+4],%f10 ! f10 gets imb
174 fsmuld %f6,%f6,%f16 ! f16/17 gets reb**2
175 ld [%o1+4],%f4 ! f4 gets ima
176 fsmuld %f10,%f10,%f12 ! f12/13 gets imb**2
177 ld [%o1],%f19 ! f19 gets rea
178 fsmuld %f4,%f10,%f0 ! f0/f1 gets ima*imb
179 fsmuld %f19,%f6,%f2 ! f2/3 gets rea*reb
180 faddd %f12,%f16,%f12 ! f12/13 gets reb**2+imb**2
181 fdivd %f8,%f12,%f12 ! f12/13 gets 1/(reb**2+imb**2)
182 faddd %f2,%f0,%f2 ! f2/3 gets rea*reb+ima*imb
183 fsmuld %f4,%f6,%f24 ! f24/5 gets ima*reb
184 fmuld %f2,%f12,%f2 ! f2/3 gets rec
185 fsmuld %f19,%f10,%f10 ! f10/11 gets rea*imb
186 fsubd %f24,%f10,%f10 ! f10/11 gets ima*reb-rea*imb
187 fmuld %f10,%f12,%f12 ! f12 gets imc
188 fdtos %f2,%f7 ! f7 gets rec
189 fdtos %f12,%f15 ! f15 gets imc
190 st %f7,[%o0]
191 st %f15,[%o0+4]
192 2:
193 .end
194 ! }
195
196 .inline .mul,2
197 .volatile
198 smul %o0,%o1,%o0
199 rd %y,%o1
200 sra %o0,31,%o2
201 cmp %o1,%o2
202 .nonvolatile
203 .end
204
205 .inline .umul,2
206 .volatile
207 umul %o0,%o1,%o0
208 rd %y,%o1
209 tst %o1
210 .nonvolatile
211 .end
212
213 .inline .div,2
214 sra %o0,31,%o4 ! extend sign
215 .volatile
216 wr %o4,%g0,%y
217 cmp %o1,0xffffffff ! is divisor -1?
218 be,a 1f ! if yes
219 .volatile
220 subcc %g0,%o0,%o0 ! simply negate dividend
221 nop ! RT620 FABs A.0/A.1
222 sdiv %o0,%o1,%o0 ! o0 contains quotient a/b
223 .nonvolatile
224 1:
225 .end
226
227 .inline .udiv,2
228 .volatile
229 wr %g0,%g0,%y
230 nop
231 nop
232 nop
233 udiv %o0,%o1,%o0 ! o0 contains quotient a/b
234 .nonvolatile
235 .end
236
237 .inline .rem,2
238 sra %o0,31,%o4 ! extend sign
239 .volatile
240 wr %o4,%g0,%y
241 cmp %o1,0xffffffff ! is divisor -1?
242 be,a 1f ! if yes
243 .volatile
244 or %g0,%g0,%o0 ! simply return 0
245 nop ! RT620 FABs A.0/A.1
246 sdiv %o0,%o1,%o2 ! o2 contains quotient a/b
247 .nonvolatile
248 smul %o2,%o1,%o4 ! o4 contains q*b
249 sub %o0,%o4,%o0 ! o0 gets a-q*b
250 1:
251 .end
252
253 .inline .urem,2
254 .volatile
255 wr %g0,%g0,%y
256 nop
257 nop
258 nop
259 udiv %o0,%o1,%o2 ! o2 contains quotient a/b
260 .nonvolatile
261 umul %o2,%o1,%o4 ! o4 contains q*b
262 sub %o0,%o4,%o0 ! o0 gets a-q*b
263 .end
264
265 .inline .div_o3,2
266 sra %o0,31,%o4 ! extend sign
267 .volatile
268 wr %o4,%g0,%y
269 cmp %o1,0xffffffff ! is divisor -1?
270 be,a 1f ! if yes
271 .volatile
272 subcc %g0,%o0,%o0 ! simply negate dividend
273 mov %o0,%o3 ! o3 gets __remainder
274 sdiv %o0,%o1,%o0 ! o0 contains quotient a/b
275 .nonvolatile
276 smul %o0,%o1,%o4 ! o4 contains q*b
277 ba 2f
278 sub %o3,%o4,%o3 ! o3 gets a-q*b
279 1:
280 mov %g0,%o3 ! __remainder is 0
281 2:
282 .end
283
284 .inline .udiv_o3,2
285 .volatile
286 wr %g0,%g0,%y
287 mov %o0,%o3 ! o3 gets __remainder
288 nop
289 nop
290 udiv %o0,%o1,%o0 ! o0 contains quotient a/b
291 .nonvolatile
292 umul %o0,%o1,%o4 ! o4 contains q*b
293 sub %o3,%o4,%o3 ! o3 gets a-q*b
294 .end
295
296 .inline __ieee754_sqrt,2
297 std %o0,[%sp+0x48] ! store to 8-aligned address
298 ldd [%sp+0x48],%f0
299 fsqrtd %f0,%f0
300 .end
301
302 .inline __inline_sqrtf,1
303 st %o0,[%sp+0x44]
304 ld [%sp+0x44],%f0
305 fsqrts %f0,%f0
306 .end
307
308 .inline __inline_sqrt,2
309 std %o0,[%sp+0x48] ! store to 8-aligned address
310 ldd [%sp+0x48],%f0
311 fsqrtd %f0,%f0
312 .end
313
314 .inline __sqrtf,1
315 st %o0,[%sp+0x44]
316 ld [%sp+0x44],%f0
317 fsqrts %f0,%f0
318 .end
319
320 .inline __sqrt,2
321 std %o0,[%sp+0x48] ! store to 8-aligned address
322 ldd [%sp+0x48],%f0
323 fsqrtd %f0,%f0
324 .end
325
326 .inline __r_sqrt_,1
327 ld [%o0],%f0
328 fsqrts %f0,%f0
329 .end
330
331 .inline __d_sqrt_,1
332 ld [%o0],%f0
333 ld [%o0+4],%f1
334 fsqrtd %f0,%f0
335 .end
336
337 .inline __ceil,2
338 std %o0,[%sp+0x48]
339 sethi %hi(0x80000000),%o5
340 andn %o0,%o5,%o2
341 sethi %hi(0x43300000),%o3
342 st %g0,[%sp+0x54]
343 subcc %o2,%o3,%g0
344 bl 1f
345 nop
346 sethi %hi(0x3ff00000),%o2
347 st %o2,[%sp+0x50]
348 ldd [%sp+0x48],%f0
349 ldd [%sp+0x50],%f2
350 fmuld %f0,%f2,%f0
351 ba 4f
352 nop
353 1:
354 tst %o0
355 st %o3,[%sp+0x50]
356 ldd [%sp+0x50],%f2
357 bge 2f
358 nop
359 fnegs %f2,%f2
360 2:
361 ldd [%sp+0x48],%f4
362 faddd %f4,%f2,%f0
363 fsubd %f0,%f2,%f0
364 fcmpd %f0,%f4
365 sethi %hi(0x3ff00000),%o2
366 st %o2,[%sp+0x50]
367 and %o0,%o5,%o4
368 fbge 3f
369 nop
370 ldd [%sp+0x50],%f4
371 faddd %f0,%f4,%f0
372 3:
373 st %f0,[%sp+0x48]
374 ld [%sp+0x48],%o3
375 andn %o3,%o5,%o3
376 or %o4,%o3,%o3
377 st %o3,[%sp+0x48]
378 ld [%sp+0x48],%f0
379 4:
380 .end
381
382 .inline __floor,2
383 std %o0,[%sp+0x48]
384 sethi %hi(0x80000000),%o5
385 andn %o0,%o5,%o2
386 sethi %hi(0x43300000),%o3
387 st %g0,[%sp+0x54]
388 subcc %o2,%o3,%g0
389 bl 1f
390 nop
391 sethi %hi(0x3ff00000),%o2
392 st %o2,[%sp+0x50]
393 ldd [%sp+0x48],%f0
394 ldd [%sp+0x50],%f2
395 fmuld %f0,%f2,%f0
396 ba 4f
397 nop
398 1:
399 tst %o0
400 st %o3,[%sp+0x50]
401 ldd [%sp+0x50],%f2
402 bge 2f
403 nop
404 fnegs %f2,%f2
405 2:
406 ldd [%sp+0x48],%f4
407 faddd %f4,%f2,%f0
408 fsubd %f0,%f2,%f0
409 fcmpd %f0,%f4
410 sethi %hi(0x3ff00000),%o2
411 st %o2,[%sp+0x50]
412 ldd [%sp+0x50],%f4
413 and %o0,%o5,%o4
414 fble 3f
415 nop
416 fsubd %f0,%f4,%f0
417 3:
418 st %f0,[%sp+0x48]
419 ld [%sp+0x48],%o3
420 andn %o3,%o5,%o3
421 or %o4,%o3,%o3
422 st %o3,[%sp+0x48]
423 ld [%sp+0x48],%f0
424 4:
425 .end
426
427 .inline __ilogb,2
428 sethi %hi(0x7ff00000),%o4
429 andcc %o4,%o0,%o2
430 bne 1f
431 nop
432 sethi %hi(0x43500000),%o3
433 std %o0,[%sp+0x48]
434 st %o3,[%sp+0x50]
435 st %g0,[%sp+0x54]
436 ldd [%sp+0x48],%f0
437 ldd [%sp+0x50],%f2
438 fmuld %f0,%f2,%f0
439 sethi %hi(0x80000001),%o0
440 or %o0,%lo(0x80000001),%o0
441 st %f0,[%sp+0x48]
442 ld [%sp+0x48],%o2
443 andcc %o2,%o4,%o2
444 srl %o2,20,%o2
445 be 2f
446 nop
447 sub %o2,0x435,%o0
448 ba 2f
449 nop
450 1:
451 subcc %o4,%o2,%g0
452 srl %o2,20,%o3
453 bne 0f
454 nop
455 sethi %hi(0x7fffffff),%o0
456 or %o0,%lo(0x7fffffff),%o0
457 ba 2f
458 nop
459 0:
460 sub %o3,0x3ff,%o0
461 2:
462 .end
463
464 .inline __rint,2
465 std %o0,[%sp+0x48]
466 sethi %hi(0x80000000),%o2
467 andn %o0,%o2,%o2
468 ldd [%sp+0x48],%f0
469 sethi %hi(0x43300000),%o3
470 st %g0,[%sp+0x50]
471 st %g0,[%sp+0x54]
472 subcc %o2,%o3,%g0
473 bl 1f
474 nop
475 sethi %hi(0x3ff00000),%o2
476 st %o2,[%sp+0x50]
477 ldd [%sp+0x50],%f2
478 fmuld %f0,%f2,%f0
479 ba 3f
480 nop
481 1:
482 tst %o0
483 st %o3,[%sp+0x48]
484 st %g0,[%sp+0x4c]
485 ldd [%sp+0x48],%f2
486 bge 2f
487 nop
488 fnegs %f2,%f2
489 2:
490 faddd %f0,%f2,%f0
491 fcmpd %f0,%f2
492 fbne 0f
493 nop
494 ldd [%sp+0x50],%f0
495 bge 3f
496 nop
497 fnegs %f0,%f0
498 ba 3f
499 nop
500 0:
501 fsubd %f0,%f2,%f0
502 3:
503 .end
504
505 .inline __rintf,1
506 st %o0,[%sp+0x48]
507 sethi %hi(0x80000000),%o2
508 andn %o0,%o2,%o2
509 ld [%sp+0x48],%f0
510 sethi %hi(0x4b000000),%o3
511 st %g0,[%sp+0x50]
512 subcc %o2,%o3,%g0
513 bl 1f
514 nop
515 sethi %hi(0x3f800000),%o2
516 st %o2,[%sp+0x50]
517 ld [%sp+0x50],%f2
518 fmuls %f0,%f2,%f0
519 ba 3f
520 nop
521 1:
522 tst %o0
523 st %o3,[%sp+0x48]
524 ld [%sp+0x48],%f2
525 bge 2f
526 nop
527 fnegs %f2,%f2
528 2:
529 fadds %f0,%f2,%f0
530 fcmps %f0,%f2
531 fbne 0f
532 nop
533 ld [%sp+0x50],%f0
534 bge 3f
535 nop
536 fnegs %f0,%f0
537 ba 3f
538 nop
539 0:
540 fsubs %f0,%f2,%f0
541 3:
542 .end
543
544 .inline __min_subnormal,0
545 set 0x0,%o0
546 st %o0,[%sp+0x44]
547 ld [%sp+0x44],%f0
548 set 0x1,%o0
549 st %o0,[%sp+0x44]
550 ld [%sp+0x44],%f1
551 .end
552
553 .inline __d_min_subnormal_,0
554 set 0x0,%o0
555 st %o0,[%sp+0x44]
556 ld [%sp+0x44],%f0
557 set 0x1,%o0
558 st %o0,[%sp+0x44]
559 ld [%sp+0x44],%f1
560 .end
561
562 .inline __min_subnormalf,0
563 set 0x1,%o0
564 st %o0,[%sp+0x44]
565 ld [%sp+0x44],%f0
566 .end
567
568 .inline __r_min_subnormal_,0
569 set 0x1,%o0
570 st %o0,[%sp+0x44]
571 ld [%sp+0x44],%f0
572 .end
573
574 .inline __max_subnormal,0
575 set 0x000fffff,%o0
576 st %o0,[%sp+0x44]
577 ld [%sp+0x44],%f0
578 set 0xffffffff,%o0
579 st %o0,[%sp+0x44]
580 ld [%sp+0x44],%f1
581 .end
582
583 .inline __d_max_subnormal_,0
584 set 0x000fffff,%o0
585 st %o0,[%sp+0x44]
586 ld [%sp+0x44],%f0
587 set 0xffffffff,%o0
588 st %o0,[%sp+0x44]
589 ld [%sp+0x44],%f1
590 .end
591
592 .inline __max_subnormalf,0
593 set 0x007fffff,%o0
594 st %o0,[%sp+0x44]
595 ld [%sp+0x44],%f0
596 .end
597
598 .inline __r_max_subnormal_,0
599 set 0x007fffff,%o0
600 st %o0,[%sp+0x44]
601 ld [%sp+0x44],%f0
602 .end
603
604 .inline __min_normal,0
605 set 0x00100000,%o0
606 set 0x0,%o1
607 std %o0,[%sp+0x48]
608 ldd [%sp+0x48],%f0
609 .end
610
611 .inline __d_min_normal_,0
612 set 0x00100000,%o0
613 st %o0,[%sp+0x44]
614 ld [%sp+0x44],%f0
615 set 0x0,%o0
616 st %o0,[%sp+0x44]
617 ld [%sp+0x44],%f1
618 .end
619
620 .inline __min_normalf,0
621 set 0x00800000,%o0
622 st %o0,[%sp+0x44]
623 ld [%sp+0x44],%f0
624 .end
625
626 .inline __r_min_normal_,0
627 set 0x00800000,%o0
628 st %o0,[%sp+0x44]
629 ld [%sp+0x44],%f0
630 .end
631
632 .inline __max_normal,0
633 set 0x7fefffff,%o0
634 set 0xffffffff,%o1
635 std %o0,[%sp+0x48]
636 ldd [%sp+0x48],%f0
637 .end
638
639 .inline __d_max_normal_,0
640 set 0x7fefffff,%o0
641 st %o0,[%sp+0x44]
642 ld [%sp+0x44],%f0
643 set 0xffffffff,%o0
644 st %o0,[%sp+0x44]
645 ld [%sp+0x44],%f1
646 .end
647
648 .inline __max_normalf,0
649 set 0x7f7fffff,%o0
650 st %o0,[%sp+0x44]
651 ld [%sp+0x44],%f0
652 .end
653
654 .inline __r_max_normal_,0
655 set 0x7f7fffff,%o0
656 st %o0,[%sp+0x44]
657 ld [%sp+0x44],%f0
658 .end
659
660 .inline __infinity,0
661 set 0x7ff00000,%o0
662 set 0x0,%o1
663 std %o0,[%sp+0x48]
664 ldd [%sp+0x48],%f0
665 .end
666
667 .inline __infinity,0
668 set 0x7ff00000,%o0
669 set 0x0,%o1
670 std %o0,[%sp+0x48]
671 ldd [%sp+0x48],%f0
672 .end
673
674 .inline __d_infinity_,0
675 set 0x7ff00000,%o0
676 st %o0,[%sp+0x44]
677 ld [%sp+0x44],%f0
678 set 0x0,%o0
679 st %o0,[%sp+0x44]
680 ld [%sp+0x44],%f1
681 .end
682
683 .inline __infinityf,0
684 set 0x7f800000,%o0
685 st %o0,[%sp+0x44]
686 ld [%sp+0x44],%f0
687 .end
688
689 .inline __r_infinity_,0
690 set 0x7f800000,%o0
691 st %o0,[%sp+0x44]
692 ld [%sp+0x44],%f0
693 .end
694
695 .inline __signaling_nan,0
696 set 0x7ff00000,%o0
697 set 0x1,%o1
698 std %o0,[%sp+0x48]
699 ldd [%sp+0x48],%f0
700 .end
701
702 .inline __d_signaling_nan_,0
703 set 0x7ff00000,%o0
704 st %o0,[%sp+0x44]
705 ld [%sp+0x44],%f0
706 set 0x1,%o0
707 st %o0,[%sp+0x44]
708 ld [%sp+0x44],%f1
709 .end
710
711 .inline __signaling_nanf,0
712 set 0x7f800001,%o0
713 st %o0,[%sp+0x44]
714 ld [%sp+0x44],%f0
715 .end
716
717 .inline __r_signaling_nan_,0
718 set 0x7f800001,%o0
719 st %o0,[%sp+0x44]
720 ld [%sp+0x44],%f0
721 .end
722
723 .inline __quiet_nan,0
724 set 0x7fffffff,%o0
725 st %o0,[%sp+0x44]
726 ld [%sp+0x44],%f0
727 set 0xffffffff,%o0
728 st %o0,[%sp+0x44]
729 ld [%sp+0x44],%f1
730 .end
731
732 .inline __d_quiet_nan_,0
733 set 0x7fffffff,%o0
734 st %o0,[%sp+0x44]
735 ld [%sp+0x44],%f0
736 set 0xffffffff,%o0
737 st %o0,[%sp+0x44]
738 ld [%sp+0x44],%f1
739 .end
740
741 .inline __quiet_nanf,0
742 set 0x7fffffff,%o0
743 st %o0,[%sp+0x44]
744 ld [%sp+0x44],%f0
745 .end
746
747 .inline __r_quiet_nan_,0
748 set 0x7fffffff,%o0
749 st %o0,[%sp+0x44]
750 ld [%sp+0x44],%f0
751 .end
752
753 .inline __swapEX,1
754 and %o0,0x1f,%o1
755 sll %o1,5,%o1 ! shift input to aexc bit location
756 .volatile
757 st %fsr,[%sp+0x44]
758 ld [%sp+0x44],%o0 ! o0 = fsr
759 andn %o0,0x3e0,%o2
760 or %o1,%o2,%o1 ! o1 = new fsr
761 st %o1,[%sp+0x44]
762 ld [%sp+0x44],%fsr
763 srl %o0,5,%o0
764 and %o0,0x1f,%o0
765 .nonvolatile
766 .end
767
768 .inline _QgetRD,0
769 st %fsr,[%sp+0x44]
770 ld [%sp+0x44],%o0 ! o0 = fsr
771 srl %o0,30,%o0 ! return __round control value
772 .end
773
774 .inline _QgetRP,0
775 or %g0,%g0,%o0
776 .end
777
778 .inline __swapRD,1
779 and %o0,0x3,%o0
780 sll %o0,30,%o1 ! shift input to RD bit location
781 .volatile
782 st %fsr,[%sp+0x44]
783 ld [%sp+0x44],%o0 ! o0 = fsr
784 set 0xc0000000,%o4 ! mask of rounding direction bits
785 andn %o0,%o4,%o2
786 or %o1,%o2,%o1 ! o1 = new fsr
787 st %o1,[%sp+0x44]
788 ld [%sp+0x44],%fsr
789 srl %o0,30,%o0
790 and %o0,0x3,%o0
791 .nonvolatile
792 .end
793 !
794 ! On the SPARC, __swapRP is a no-op; always return 0 for backward compatibility
795 !
796
797 .inline __swapRP,1
798 or %g0,%g0,%o0
799 .end
800
801 .inline __swapTE,1
802 and %o0,0x1f,%o0
803 sll %o0,23,%o1 ! shift input to TEM bit location
804 .volatile
805 st %fsr,[%sp+0x44]
806 ld [%sp+0x44],%o0 ! o0 = fsr
807 set 0x0f800000,%o4 ! mask of TEM (Trap Enable Mode bits)
808 andn %o0,%o4,%o2
809 or %o1,%o2,%o1 ! o1 = new fsr
810 st %o1,[%sp+0x48]
811 ld [%sp+0x48],%fsr
812 srl %o0,23,%o0
813 and %o0,0x1f,%o0
814 .nonvolatile
815 .end
816
817 .inline __fp_class,2
818 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
819 andn %o0,%o2,%o0 ! o0-o1 gets abs(x)
820 orcc %o0,%o1,%g0 ! set cc as x is zero/nonzero
821 bne 1f ! branch if x is nonzero
822 nop
823 mov 0,%o0
824 ba 2f ! x is 0
825 nop
826 1:
827 sethi %hi(0x7ff00000),%o2 ! o2 gets 7ff00000
828 andcc %o0,%o2,%g0 ! cc set by __exp field of x
829 bne 1f ! branch if normal or max __exp
830 nop
831 mov 1,%o0
832 ba 2f ! x is subnormal
833 nop
834 1:
835 cmp %o0,%o2
836 bge 1f ! branch if x is max __exp
837 nop
838 mov 2,%o0
839 ba 2f ! x is normal
840 nop
841 1:
842 andn %o0,%o2,%o0 ! o0 gets msw __significand field
843 orcc %o0,%o1,%g0 ! set cc by OR __significand
844 bne 1f ! Branch if __nan
845 nop
846 mov 3,%o0
847 ba 2f ! x is __infinity
848 nop
849 1:
850 sethi %hi(0x00080000),%o2
851 andcc %o0,%o2,%g0 ! set cc by quiet/sig bit
852 be 1f ! Branch if signaling
853 nop
854 mov 4,%o0 ! x is quiet NaN
855 ba 2f
856 nop
857 1:
858 mov 5,%o0 ! x is signaling NaN
859 2:
860 .end
861
862 .inline __fp_classf,1
863 sethi %hi(0x80000000),%o2
864 andncc %o0,%o2,%o0
865 bne 1f
866 nop
867 mov 0,%o0
868 ba 2f ! x is 0
869 nop
870 1:
871 sethi %hi(0x7f800000),%o2
872 andcc %o0,%o2,%g0
873 bne 1f
874 nop
875 mov 1,%o0
876 ba 2f ! x is subnormal
877 nop
878 1:
879 cmp %o0,%o2
880 bge 1f
881 nop
882 mov 2,%o0
883 ba 2f ! x is normal
884 nop
885 1:
886 bg 1f
887 nop
888 mov 3,%o0
889 ba 2f ! x is __infinity
890 nop
891 1:
892 sethi %hi(0x00400000),%o2
893 andcc %o0,%o2,%g0
894 mov 4,%o0 ! x is quiet NaN
895 bne 2f
896 nop
897 mov 5,%o0 ! x is signaling NaN
898 2:
899 .end
900
901 .inline __ir_fp_class_,1
902 ld [%o0],%o0
903 sethi %hi(0x80000000),%o2
904 andncc %o0,%o2,%o0
905 bne 1f
906 nop
907 mov 0,%o0
908 ba 2f ! x is 0
909 nop
910 1:
911 sethi %hi(0x7f800000),%o2
912 andcc %o0,%o2,%g0
913 bne 1f
914 nop
915 mov 1,%o0
916 ba 2f ! x is subnormal
917 nop
918 1:
919 cmp %o0,%o2
920 bge 1f
921 nop
922 mov 2,%o0
923 ba 2f ! x is normal
924 nop
925 1:
926 bg 1f
927 nop
928 mov 3,%o0
929 ba 2f ! x is __infinity
930 nop
931 1:
932 sethi %hi(0x00400000),%o2
933 andcc %o0,%o2,%g0
934 mov 4,%o0 ! x is quiet NaN
935 bne 2f
936 nop
937 mov 5,%o0 ! x is signaling NaN
938 2:
939 .end
940
941 .inline __copysign,4
942 set 0x80000000,%o3
943 and %o2,%o3,%o2
944 andn %o0,%o3,%o0
945 or %o0,%o2,%o0
946 std %o0,[%sp+0x48]
947 ldd [%sp+0x48],%f0
948 .end
949
950 .inline __copysignf,2
951 set 0x80000000,%o2
952 andn %o0,%o2,%o0
953 and %o1,%o2,%o1
954 or %o0,%o1,%o0
955 st %o0,[%sp+0x44]
956 ld [%sp+0x44],%f0
957 .end
958
959 .inline __r_copysign_,2
960 ld [%o0],%o0
961 ld [%o1],%o1
962 set 0x80000000,%o2
963 andn %o0,%o2,%o0
964 and %o1,%o2,%o1
965 or %o0,%o1,%o0
966 st %o0,[%sp+0x44]
967 ld [%sp+0x44],%f0
968 .end
969
970 .inline _finite,2
971 set 0x7ff00000,%o1
972 and %o0,%o1,%o0
973 cmp %o0,%o1
974 mov 1,%o0
975 bne 1f
976 nop
977 mov 0,%o0
978 1:
979 .end
980
981 .inline __finitef,2
982 set 0x7f800000,%o1
983 and %o0,%o1,%o0
984 cmp %o0,%o1
985 mov 1,%o0
986 bne 1f
987 nop
988 mov 0,%o0
989 1:
990 .end
991
992 .inline __ir_finite_,1
993 ld [%o0],%o0
994 set 0x7f800000,%o1
995 and %o0,%o1,%o0
996 cmp %o0,%o1
997 mov 1,%o0
998 bne 1f
999 nop
1000 mov 0,%o0
1001 1:
1002 .end
1003
1004 .inline __signbit,1
1005 srl %o0,31,%o0
1006 .end
1007
1008 .inline __signbitf,1
1009 srl %o0,31,%o0
1010 .end
1011
1012 .inline __ir_signbit_,1
1013 ld [%o0],%o0
1014 srl %o0,31,%o0
1015 .end
1016
1017 .inline __isinf,2
1018 tst %o1
1019 sethi %hi(0x80000000),%o2
1020 bne 1f
1021 nop
1022 andn %o0,%o2,%o0
1023 sethi %hi(0x7ff00000),%o2
1024 cmp %o0,%o2
1025 mov 1,%o0
1026 be 2f
1027 nop
1028 1:
1029 mov 0,%o0
1030 2:
1031 .end
1032
1033 .inline __isinff,1
1034 sethi %hi(0x80000000),%o2
1035 andn %o0,%o2,%o0 ! o0 gets abs(x)
1036 sethi %hi(0x7f800000),%o2
1037 cmp %o0,%o2
1038 mov 0,%o0
1039 bne 1f ! Branch if not inf.
1040 nop
1041 mov 1,%o0
1042 1:
1043 .end
1044
1045 .inline __ir_isinf_,1
1046 ld [%o0],%o0
1047 sethi %hi(0x80000000),%o2
1048 andn %o0,%o2,%o0 ! o0 gets abs(x)
1049 sethi %hi(0x7f800000),%o2
1050 cmp %o0,%o2
1051 mov 0,%o0
1052 bne 1f ! Branch if not inf.
1053 nop
1054 mov 1,%o0
1055 1:
1056 .end
1057
1058 .inline __isnan,2
1059 sethi %hi(0x80000000),%o2
1060 andn %o0,%o2,%o0
1061 sub %g0,%o1,%o3
1062 or %o1,%o3,%o1
1063 srl %o1,31,%o1
1064 or %o0,%o1,%o0
1065 sethi %hi(0x7ff00000),%o4
1066 sub %o4,%o0,%o0
1067 srl %o0,31,%o0
1068 .end
1069
1070 .inline __isnanf,1
1071 sethi %hi(0x80000000),%o2
1072 andn %o0,%o2,%o0
1073 sethi %hi(0x7f800000),%o1
1074 sub %o1,%o0,%o0
1075 srl %o0,31,%o0
1076 .end
1077
1078 .inline __ir_isnan_,1
1079 ld [%o0],%o0
1080 sethi %hi(0x80000000),%o2
1081 andn %o0,%o2,%o0
1082 sethi %hi(0x7f800000),%o1
1083 sub %o1,%o0,%o0
1084 srl %o0,31,%o0
1085 .end
1086
1087 .inline __isnormal,2
1088 sethi %hi(0x80000000),%o2
1089 andn %o0,%o2,%o0
1090 sethi %hi(0x7ff00000),%o2
1091 cmp %o0,%o2
1092 sethi %hi(0x00100000),%o2
1093 bge 1f
1094 nop
1095 cmp %o0,%o2
1096 mov 1,%o0
1097 bge 2f
1098 nop
1099 1:
1100 mov 0,%o0
1101 2:
1102 .end
1103
1104 .inline __isnormalf,1
1105 sethi %hi(0x80000000),%o2
1106 andn %o0,%o2,%o0
1107 sethi %hi(0x7f800000),%o2
1108 cmp %o0,%o2
1109 sethi %hi(0x00800000),%o2
1110 bge 1f
1111 nop
1112 cmp %o0,%o2
1113 mov 1,%o0
1114 bge 2f
1115 nop
1116 1:
1117 mov 0,%o0
1118 2:
1119 .end
1120
1121 .inline __ir_isnormal_,1
1122 ld [%o0],%o0
1123 sethi %hi(0x80000000),%o2
1124 andn %o0,%o2,%o0
1125 sethi %hi(0x7f800000),%o2
1126 cmp %o0,%o2
1127 sethi %hi(0x00800000),%o2
1128 bge 1f
1129 nop
1130 cmp %o0,%o2
1131 mov 1,%o0
1132 bge 2f
1133 nop
1134 1:
1135 mov 0,%o0
1136 2:
1137 .end
1138
1139 .inline __issubnormal,2
1140 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
1141 andn %o0,%o2,%o0 ! o0/o1 gets abs(x)
1142 sethi %hi(0x00100000),%o2 ! o2 gets 00100000
1143 cmp %o0,%o2
1144 bge 1f ! branch if x norm or max __exp
1145 nop
1146 orcc %o0,%o1,%g0
1147 be 1f ! Branch if x zero
1148 nop
1149 mov 1,%o0 ! x is subnormal
1150 ba 2f
1151 nop
1152 1:
1153 mov 0,%o0
1154 2:
1155 .end
1156
1157 .inline __issubnormalf,1
1158 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
1159 andn %o0,%o2,%o0 ! o0 gets abs(x)
1160 sethi %hi(0x00800000),%o2 ! o2 gets 00800000
1161 cmp %o0,%o2
1162 bge 1f ! branch if x norm or max __exp
1163 nop
1164 orcc %o0,%g0,%g0
1165 be 1f ! Branch if x zero
1166 nop
1167 mov 1,%o0 ! x is subnormal
1168 ba 2f
1169 nop
1170 1:
1171 mov 0,%o0
1172 2:
1173 .end
1174
1175 .inline __ir_issubnormal_,1
1176 ld [%o0],%o0
1177 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
1178 andn %o0,%o2,%o0 ! o0 gets abs(x)
1179 sethi %hi(0x00800000),%o2 ! o2 gets 00800000
1180 cmp %o0,%o2
1181 bge 1f ! branch if x norm or max __exp
1182 nop
1183 orcc %o0,%g0,%g0
1184 be 1f ! Branch if x zero
1185 nop
1186 mov 1,%o0 ! x is subnormal
1187 ba 2f
1188 nop
1189 1:
1190 mov 0,%o0
1191 2:
1192 .end
1193
1194 .inline __iszero,2
1195 sethi %hi(0x80000000),%o2
1196 andn %o0,%o2,%o0
1197 orcc %o0,%o1,%g0
1198 mov 1,%o0
1199 be 1f
1200 nop
1201 mov 0,%o0
1202 1:
1203 .end
1204
1205 .inline __iszerof,1
1206 sethi %hi(0x80000000),%o2
1207 andncc %o0,%o2,%o0
1208 mov 1,%o0
1209 be 1f
1210 nop
1211 mov 0,%o0
1212 1:
1213 .end
1214
1215 .inline __ir_iszero_,1
1216 ld [%o0],%o0
1217 sethi %hi(0x80000000),%o2
1218 andncc %o0,%o2,%o0
1219 mov 1,%o0
1220 be 1f
1221 nop
1222 mov 0,%o0
1223 1:
1224 .end
1225
1226 .inline abs,1
1227 sra %o0,31,%o1
1228 xor %o0,%o1,%o0
1229 sub %o0,%o1,%o0
1230 .end
1231
1232 .inline __fabs,2
1233 st %o0,[%sp+0x48]
1234 st %o1,[%sp+0x4c]
1235 ldd [%sp+0x48],%f0
1236 fabsd %f0,%f0
1237 .end
1238
1239 .inline __fabsf,1
1240 st %o0,[%sp+0x44]
1241 ld [%sp+0x44],%f0
1242 fabss %f0,%f0
1243 .end
1244
1245 .inline __r_fabs_,1
1246 ld [%o0],%f0
1247 fabss %f0,%f0
1248 .end
1249 !
1250 ! __nintf - f77 NINT(REAL*4)
1251 !
1252
1253 .inline __nintf,1
1254 srl %o0,30-7,%g1
1255 sethi %hi(0x7fffff),%o2
1256 st %o0,[%sp+0x44]
1257 and %g1,0xff,%g1
1258 or %o2,%lo(0x7fffff),%o2
1259 sethi %hi(1<<22),%o4
1260 subcc %g1,127+31,%g0
1261 and %o0,%o2,%o3
1262 bl 0f
1263 nop
1264 sethi %hi(0xcf000000),%o2
1265 sethi %hi(0x80000000),%g1
1266 subcc %o0,%o2,%g0
1267 or %g1,%g0,%o0
1268 be 9f
1269 nop
1270 ld [%sp+0x44],%f0
1271 fstoi %f0,%f0
1272 st %f0,[%sp+0x44]
1273 ld [%sp+0x44],%o0
1274 ba 9f
1275 nop
1276 0:
1277 add %o4,%o4,%o5
1278 or %o3,%o5,%o3
1279 sra %o0,31-0,%o2
1280 subcc %g1,127,%g1
1281 srl %o4,%g1,%o4
1282 bge 1f
1283 nop
1284 subcc %g1,-1,%g0
1285 or %g0,0,%o0
1286 bne 2f
1287 nop
1288 or %g0,1,%o0
1289 ba 2f
1290 nop
1291 1:
1292 add %o3,%o4,%o3
1293 or %g0,23,%o0
1294 subcc %o0,%g1,%o0
1295 bl 1f
1296 nop
1297 srl %o3,%o0,%o0
1298 ba 2f
1299 nop
1300 1:
1301 sub %g0,%o0,%o0
1302 sll %o3,%o0,%o0
1303 2:
1304 xor %o0,%o2,%o0
1305 and %o2,1,%o2
1306 add %o0,%o2,%o0
1307 9:
1308 .end
1309
1310 .inline __il_nint,1
1311 ld [%o0],%o0
1312 sra %o0,0,%o0
1313 srlx %o0,31-8,%g1
1314 or %g0,1,%o2
1315 sllx %o2,23-1,%o4
1316 and %g1,0xff,%g1
1317 sllx %o2,63-0,%o2
1318 subcc %g1,127+63,%g0
1319 bl 0f
1320 nop
1321 st %o0,[%sp+0x48]
1322 ld [%sp+0x48],%f0
1323 fstox %f0,%f0
1324 std %f0,[%sp+0x48]
1325 ldx [%sp+0x48],%o1
1326 ba 9f
1327 nop
1328 0:
1329 add %o4,%o4,%o5
1330 srax %o2,63-23,%o2
1331 sub %g1,127+23,%o1
1332 xnor %o2,%g0,%o2
1333 and %o0,%o2,%o3
1334 or %o3,%o5,%o3
1335 srax %o0,63-0,%o2
1336 subcc %g1,127,%g1
1337 bge 1f
1338 nop
1339 subcc %g1,-1,%g0
1340 or %g0,0,%o0
1341 bne 2f
1342 nop
1343 or %g0,1,%o0
1344 ba 2f
1345 nop
1346 1:
1347 brlz,pt %o1,3f
1348 nop
1349 sub %g1,23,%o0
1350 sllx %o3,%o0,%o0
1351 ba 2f
1352 nop
1353 3:
1354 srlx %o4,%g1,%o4
1355 add %o3,%o4,%o3
1356 or %g0,23,%o0
1357 sub %o0,%g1,%o0
1358 srlx %o3,%o0,%o0
1359 2:
1360 xor %o0,%o2,%o0
1361 sub %o0,%o2,%o1
1362 9:
1363 srlx %o1,32,%o0
1364 .end
1365 !
1366 ! __i_dnnt - f77 NINT(REAL*8)
1367 !
1368
1369 .inline __i_dnnt,1
1370 ld [%o0],%o1
1371 sllx %o1,32,%o1
1372 ld [%o0+4],%o0
1373 or %o0,%o1,%o0
1374 srlx %o0,63-11,%g1
1375 or %g0,1,%o2
1376 stx %o0,[%sp+0x48]
1377 sllx %o2,52-1,%o4
1378 and %g1,0x7ff,%g1
1379 sllx %o2,63-0,%o2
1380 subcc %g1,1023+32,%g0
1381 bl 0f
1382 nop
1383 ldd [%sp+0x48],%f0
1384 ba 8f
1385 nop
1386 0:
1387 add %o4,%o4,%o5
1388 srax %o2,63-52,%o2
1389 sub %g1,1023+30,%o1
1390 xnor %o2,%g0,%o2
1391 and %o0,%o2,%o3
1392 or %o3,%o5,%o3
1393 srax %o0,63-0,%o2
1394 subcc %g1,1023,%g1
1395 bge 1f
1396 nop
1397 subcc %g1,-1,%g0
1398 or %g0,0,%o0
1399 bne 2f
1400 nop
1401 or %g0,1,%o0
1402 ba 2f
1403 nop
1404 1:
1405 srlx %o4,%g1,%o4
1406 add %o3,%o4,%o3
1407 or %g0,52,%o0
1408 sub %o0,%g1,%o0
1409 srlx %o3,%o0,%o0
1410 2:
1411 xor %o0,%o2,%o0
1412 sub %o0,%o2,%o0
1413 brlz,pt %o1,9f
1414 nop
1415 stx %o0,[%sp+0x48]
1416 ldd [%sp+0x48],%f0
1417 fxtod %f0,%f0
1418 8:
1419 fdtoi %f0,%f0
1420 st %f0,[%sp+0x44]
1421 ld [%sp+0x44],%o0
1422 9:
1423 .end
1424
1425 .inline __il_dnnt,1
1426 ld [%o0],%o1
1427 sllx %o1,32,%o1
1428 ld [%o0+4],%o0
1429 or %o0,%o1,%o0
1430 srlx %o0,63-11,%g1
1431 or %g0,1,%o2
1432 sllx %o2,52-1,%o4
1433 and %g1,0x7ff,%g1
1434 sllx %o2,63-0,%o2
1435 subcc %g1,1023+63,%g0
1436 bl 0f
1437 nop
1438 stx %o0,[%sp+0x48]
1439 ldd [%sp+0x48],%f0
1440 fdtox %f0,%f0
1441 std %f0,[%sp+0x48]
1442 ldx [%sp+0x48],%o1
1443 ba 9f
1444 nop
1445 0:
1446 add %o4,%o4,%o5
1447 srax %o2,63-52,%o2
1448 sub %g1,1023+52,%o1
1449 xnor %o2,%g0,%o2
1450 and %o0,%o2,%o3
1451 or %o3,%o5,%o3
1452 srax %o0,63-0,%o2
1453 subcc %g1,1023,%g1
1454 bge 1f
1455 nop
1456 subcc %g1,-1,%g0
1457 or %g0,0,%o0
1458 bne 2f
1459 nop
1460 or %g0,1,%o0
1461 ba 2f
1462 nop
1463 1:
1464 brlz,pt %o1,3f
1465 nop
1466 sub %g1,52,%o0
1467 sllx %o3,%o0,%o0
1468 ba 2f
1469 nop
1470 3:
1471 srlx %o4,%g1,%o4
1472 add %o3,%o4,%o3
1473 or %g0,52,%o0
1474 sub %o0,%g1,%o0
1475 srlx %o3,%o0,%o0
1476 2:
1477 xor %o0,%o2,%o0
1478 sub %o0,%o2,%o1
1479 9:
1480 srlx %o1,32,%o0
1481 .end
1482
1483 .inline __anintf,1
1484 or %g0,1,%o1
1485 srl %o0,23,%g1
1486 and %g1,0xff,%g1
1487 sub %g0,%g1,%g1
1488 add %g1,0x95,%g1
1489 subcc %g1,23,%g0
1490 sll %o1,%g1,%o1
1491 sub %o1,1,%o2
1492 bcs 1f
1493 nop
1494 be 2f
1495 nop
1496 bl 3f
1497 nop
1498 sethi %hi(0x80000000),%o1
1499 and %o0,%o1,%o0
1500 ba 3f
1501 nop
1502 1:
1503 and %o0,%o1,%o1
1504 2:
1505 add %o0,%o1,%o0
1506 andn %o0,%o2,%o0
1507 3:
1508 st %o0,[%sp+0x48]
1509 ld [%sp+0x48],%f0
1510 .end
1511
1512 .inline __anint,2
1513 sllx %o0,32,%o0
1514 or %o0,%o1,%o0
1515 or %g0,1,%o1
1516 srlx %o0,52,%g1
1517 and %g1,0x7ff,%g1
1518 sub %g0,%g1,%g1
1519 add %g1,0x432,%g1
1520 subcc %g1,52,%g0
1521 sllx %o1,%g1,%o1
1522 sub %o1,1,%o2
1523 bcs,pt %icc,1f
1524 nop
1525 be,pt %icc,2f
1526 nop
1527 bl,pt %icc,3f
1528 nop
1529 srlx %o0,63,%o0
1530 sllx %o0,63,%o0
1531 ba 3f
1532 nop
1533 1:
1534 and %o0,%o1,%o1
1535 2:
1536 add %o0,%o1,%o0
1537 andn %o0,%o2,%o0
1538 3:
1539 stx %o0,[%sp+0x48]
1540 ldd [%sp+0x48],%f0
1541 .end
1542
1543 .inline __Fz_minus,3
1544 ld [%o1],%f0
1545 ld [%o1+0x4],%f1
1546 ld [%o2],%f4
1547 ld [%o2+0x4],%f5
1548 fsubd %f0,%f4,%f0
1549 ld [%o1+8],%f2
1550 ld [%o1+0xc],%f3
1551 ld [%o2+8],%f6
1552 ld [%o2+0xc],%f7
1553 fsubd %f2,%f6,%f2
1554 st %f0,[%o0+0x0]
1555 st %f1,[%o0+0x4]
1556 st %f2,[%o0+0x8]
1557 st %f3,[%o0+0xc]
1558 .end
1559
1560 .inline __Fz_add,3
1561 ld [%o1],%f0
1562 ld [%o1+0x4],%f1
1563 ld [%o2],%f4
1564 ld [%o2+0x4],%f5
1565 faddd %f0,%f4,%f0
1566 ld [%o1+8],%f2
1567 ld [%o1+0xc],%f3
1568 ld [%o2+8],%f6
1569 ld [%o2+0xc],%f7
1570 faddd %f2,%f6,%f2
1571 st %f0,[%o0+0x0]
1572 st %f1,[%o0+0x4]
1573 st %f2,[%o0+0x8]
1574 st %f3,[%o0+0xc]
1575 .end
1576
1577 .inline __Fz_neg,2
1578 ld [%o1],%f0
1579 fnegs %f0,%f0
1580 ld [%o1+0x4],%f1
1581 st %f1,[%o0+0x4]
1582 ld [%o1+8],%f2
1583 fnegs %f2,%f2
1584 ld [%o1+0xc],%f3
1585 st %f3,[%o0+0xc]
1586 st %f0,[%o0]
1587 st %f2,[%o0+0x8]
1588 .end
1589
1590 .inline __Ff_conv_z,2
1591 st %o1,[%sp+0x44]
1592 ld [%sp+0x44],%f0
1593 fstod %f0,%f0
1594 st %g0,[%o0+0x8]
1595 st %g0,[%o0+0xc]
1596 st %f1,[%o0+0x4]
1597 st %f0,[%o0]
1598 .end
1599
1600 .inline __Fz_conv_f,1
1601 ld [%o0],%f0
1602 ld [%o0+4],%f1
1603 fdtos %f0,%f0
1604 .end
1605
1606 .inline __Fz_conv_i,1
1607 ld [%o0],%f0
1608 ld [%o0+4],%f1
1609 fdtoi %f0,%f0
1610 st %f0,[%sp+0x44]
1611 ld [%sp+0x44],%o0
1612 .end
1613
1614 .inline __Fi_conv_z,2
1615 st %o1,[%sp+0x44]
1616 ld [%sp+0x44],%f0
1617 fitod %f0,%f0
1618 st %g0,[%o0+0x8]
1619 st %g0,[%o0+0xc]
1620 st %f1,[%o0+0x4]
1621 st %f0,[%o0]
1622 .end
1623
1624 .inline __Fz_conv_d,1
1625 ld [%o0],%f0
1626 ld [%o0+4],%f1
1627 .end
1628
1629 .inline __Fd_conv_z,3
1630 st %o1,[%o0]
1631 st %o2,[%o0+0x4]
1632 st %g0,[%o0+0x8]
1633 st %g0,[%o0+0xc]
1634 .end
1635
1636 .inline __Fz_conv_c,2
1637 ldd [%o1],%f0
1638 fdtos %f0,%f0
1639 st %f0,[%o0]
1640 ldd [%o1+0x8],%f2
1641 fdtos %f2,%f1
1642 st %f1,[%o0+0x4]
1643 .end
1644
1645 .inline __Fz_eq,2
1646 ld [%o0],%f0
1647 ld [%o0+4],%f1
1648 ld [%o1],%f2
1649 ld [%o1+4],%f3
1650 fcmpd %f0,%f2
1651 mov %o0,%o2
1652 mov 0,%o0
1653 fbne 1f
1654 nop
1655 ld [%o2+8],%f0
1656 ld [%o2+12],%f1
1657 ld [%o1+8],%f2
1658 ld [%o1+12],%f3
1659 fcmpd %f0,%f2
1660 nop
1661 fbne 1f
1662 nop
1663 mov 1,%o0
1664 1:
1665 .end
1666
1667 .inline __Fz_ne,2
1668 ld [%o0],%f0
1669 ld [%o0+4],%f1
1670 ld [%o1],%f2
1671 ld [%o1+4],%f3
1672 fcmpd %f0,%f2
1673 mov %o0,%o2
1674 mov 1,%o0
1675 fbne 1f
1676 nop
1677 ld [%o2+8],%f0
1678 ld [%o2+12],%f1
1679 ld [%o1+8],%f2
1680 ld [%o1+12],%f3
1681 fcmpd %f0,%f2
1682 nop
1683 fbne 1f
1684 nop
1685 mov 0,%o0
1686 1:
1687 .end
1688
1689 .inline __c_cmplx,3
1690 ld [%o1],%o1
1691 st %o1,[%o0]
1692 ld [%o2],%o2
1693 st %o2,[%o0+4]
1694 .end
1695
1696 .inline __d_cmplx,3
1697 ld [%o1],%f0
1698 st %f0,[%o0]
1699 ld [%o1+4],%f1
1700 st %f1,[%o0+4]
1701 ld [%o2],%f0
1702 st %f0,[%o0+0x8]
1703 ld [%o2+4],%f1
1704 st %f1,[%o0+0xc]
1705 .end
1706
1707 .inline __r_cnjg,2
1708 ld [%o1+0x4],%f1
1709 fnegs %f1,%f1
1710 ld [%o1],%f0
1711 st %f0,[%o0]
1712 st %f1,[%o0+4]
1713 .end
1714
1715 .inline __d_cnjg,2
1716 ld [%o1+0x8],%f0
1717 fnegs %f0,%f0
1718 ld [%o1+0xc],%f1
1719 st %f1,[%o0+0xc]
1720 ld [%o1+0x0],%f1
1721 st %f1,[%o0+0x0]
1722 ld [%o1+0x4],%f1
1723 st %f1,[%o0+0x4]
1724 st %f0,[%o0+0x8]
1725 .end
1726
1727 .inline __r_dim,2
1728 st %g0,[%sp+0x48]
1729 ld [%sp+0x48],%f4
1730 ld [%o0],%f0
1731 ld [%o1],%f2
1732 fcmps %fcc0,%f0,%f2
1733 fmovsule %fcc0,%f4,%f2
1734 fsubs %f0,%f2,%f0
1735 fmovsule %fcc0,%f4,%f0
1736 .end
1737
1738 .inline __d_dim,2
1739 stx %g0,[%sp+0x48]
1740 ldd [%sp+0x48],%f4
1741 ld [%o0],%f0
1742 ld [%o0+4],%f1
1743 ld [%o1],%f2
1744 ld [%o1+4],%f3
1745 fcmpd %fcc0,%f0,%f2
1746 fmovdule %fcc0,%f4,%f2
1747 fsubd %f0,%f2,%f0
1748 fmovdule %fcc0,%f4,%f0
1749 .end
1750
1751 .inline __r_imag,1
1752 ld [%o0+4],%f0
1753 .end
1754
1755 .inline __d_imag,1
1756 ld [%o0+8],%f0
1757 ld [%o0+0xc],%f1
1758 .end
1759
1760 .inline __f95_signf,2
1761 ld [%o0],%f0
1762 ld [%o1],%o1
1763 fabss %f0,%f0
1764 fnegs %f0,%f1
1765 sra %o1,0,%o1
1766 fmovrslz %o1,%f1,%f0
1767 .end
1768
1769 .inline __f95_sign,2
1770 ld [%o0],%f0
1771 ld [%o0+4],%f1
1772 ld [%o1],%o1
1773 fabsd %f0,%f0
1774 fnegd %f0,%f2
1775 sra %o1,0,%o1
1776 fmovrdlz %o1,%f2,%f0
1777 .end
1778
1779 .inline __r_sign,2
1780 ld [%o0],%f0
1781 ld [%o1],%o1
1782 fabss %f0,%f0
1783 fnegs %f0,%f1
1784 sub %o1,1,%o0
1785 and %o1,%o0,%o1
1786 sra %o1,0,%o1
1787 fmovrslz %o1,%f1,%f0
1788 .end
1789
1790 .inline __d_sign,2
1791 ld [%o0],%f0
1792 ld [%o0+4],%f1
1793 ld [%o1],%o0
1794 sllx %o0,32,%o0
1795 ld [%o1+4],%o1
1796 or %o1,%o0,%o1
1797 fabsd %f0,%f0
1798 fnegd %f0,%f2
1799 sub %o1,1,%o0
1800 and %o1,%o0,%o1
1801 fmovrdlz %o1,%f2,%f0
1802 .end
1803
1804 .inline __Fz_mult,3
1805 ld [%o1],%f0
1806 ld [%o1+0x4],%f1
1807 ld [%o2],%f4
1808 ld [%o2+0x4],%f5
1809 fmuld %f0,%f4,%f8 ! f8 = r1*r2
1810 ld [%o1+0x8],%f2
1811 ld [%o1+0xc],%f3
1812 ld [%o2+0x8],%f6
1813 ld [%o2+0xc],%f7
1814 fmuld %f2,%f6,%f10 ! f10= i1*i2
1815 fsubd %f8,%f10,%f12 ! f12= r1*r2-i1*i2
1816 st %f12,[%o0]
1817 st %f13,[%o0+4]
1818 fmuld %f0,%f6,%f14 ! f14= r1*i2
1819 fmuld %f2,%f4,%f16 ! f16= r2*i1
1820 faddd %f14,%f16,%f2 ! f2 = r1*i2+r2*i1
1821 st %f2,[%o0+8]
1822 st %f3,[%o0+12]
1823 .end
1824 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1825 ! void
1826 ! __Fc_minus(c, a, b)
1827 ! complex *c, *a, *b;
1828 ! {
1829
1830 .inline __Fc_minus,3
1831 ! 30 c->real = a->real - b->real
1832 ld [%o1],%f0
1833 ld [%o2],%f1
1834 fsubs %f0,%f1,%f2
1835 ! 31 c->imag = a->imag - b->imag
1836 ld [%o1+4],%f3
1837 ld [%o2+4],%f4
1838 fsubs %f3,%f4,%f5
1839 st %f2,[%o0]
1840 st %f5,[%o0+4]
1841 .end
1842 }
1843 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1844 ! void
1845 ! __Fc_add(c, a, b)
1846 ! complex *c, *a, *b;
1847 ! {
1848
1849 .inline __Fc_add,3
1850 ! 39 c->real = a->real + b->real
1851 ld [%o1],%f0
1852 ld [%o2],%f1
1853 fadds %f0,%f1,%f2
1854 ! 40 c->imag = a->imag + b->imag
1855 ld [%o1+4],%f3
1856 ld [%o2+4],%f4
1857 fadds %f3,%f4,%f5
1858 st %f2,[%o0]
1859 st %f5,[%o0+4]
1860 .end
1861 ! }
1862 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1863 ! void
1864 ! __Fc_neg(c, a)
1865 ! complex *c, *a;
1866 ! {
1867
1868 .inline __Fc_neg,2
1869 ! 48 c->real = - a->real
1870 ld [%o1],%f0
1871 fnegs %f0,%f1
1872 ! 49 c->imag = - a->imag
1873 ld [%o1+4],%f2
1874 fnegs %f2,%f3
1875 st %f1,[%o0]
1876 st %f3,[%o0+4]
1877 .end
1878 ! }
1879 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1880 ! void
1881 ! __Ff_conv_c(c, x)
1882 ! complex *c;
1883 ! FLOATPARAMETER x;
1884 ! {
1885
1886 .inline __Ff_conv_c,2
1887 ! 59 c->real = x
1888 st %o1,[%o0]
1889 ! 60 c->imag = 0.0
1890 st %g0,[%o0+4]
1891 .end
1892 ! }
1893 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1894 ! FLOATFUNCTIONTYPE
1895 ! __Fc_conv_f(c)
1896 ! complex *c;
1897 ! {
1898
1899 .inline __Fc_conv_f,1
1900 ! 69 RETURNFLOAT(c->real)
1901 ld [%o0],%f0
1902 .end
1903 ! }
1904 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1905 ! int
1906 ! __Fc_conv_i(c)
1907 ! complex *c;
1908 ! {
1909
1910 .inline __Fc_conv_i,1
1911 ! 78 return (int)c->real
1912 ld [%o0],%f0
1913 fstoi %f0,%f1
1914 st %f1,[%sp+68]
1915 ld [%sp+68],%o0
1916 .end
1917 ! }
1918 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1919 ! void
1920 ! __Fi_conv_c(c, i)
1921 ! complex *c;
1922 ! int i;
1923 ! {
1924
1925 .inline __Fi_conv_c,2
1926 ! 88 c->real = (float)i
1927 st %o1,[%sp+68]
1928 ld [%sp+68],%f0
1929 fitos %f0,%f1
1930 st %f1,[%o0]
1931 ! 89 c->imag = 0.0
1932 st %g0,[%o0+4]
1933 .end
1934 ! }
1935 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1936 ! double
1937 ! __Fc_conv_d(c)
1938 ! complex *c;
1939 ! {
1940
1941 .inline __Fc_conv_d,1
1942 ! 98 return (double)c->real
1943 ld [%o0],%f2
1944 fstod %f2,%f0
1945 .end
1946 ! }
1947 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1948 ! void
1949 ! __Fd_conv_c(c, x)
1950 ! complex *c;
1951 ! double x;
1952 ! {
1953
1954 .inline __Fd_conv_c,2
1955 st %o1,[%sp+72]
1956 st %o2,[%sp+76]
1957 ! 109 c->real = (float)(x)
1958 ldd [%sp+72],%f0
1959 fdtos %f0,%f1
1960 st %f1,[%o0]
1961 ! 110 c->imag = 0.0
1962 st %g0,[%o0+4]
1963 .end
1964 ! }
1965 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1966 ! void
1967 ! __Fc_conv_z(result, c)
1968 ! dcomplex *result;
1969 ! complex *c;
1970 ! {
1971
1972 .inline __Fc_conv_z,2
1973 ! 120 result->dreal = (double)c->real
1974 ld [%o1],%f0
1975 fstod %f0,%f2
1976 st %f2,[%o0]
1977 st %f3,[%o0+4]
1978 ! 121 result->dimag = (double)c->imag
1979 ld [%o1+4],%f3
1980 fstod %f3,%f4
1981 st %f4,[%o0+8]
1982 st %f5,[%o0+12]
1983 .end
1984 ! }
1985 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1986 ! int
1987 ! __Fc_eq(x, y)
1988 ! complex *x, *y;
1989 ! {
1990
1991 .inline __Fc_eq,2
1992 ! return (x->real == y->real) && (x->imag == y->imag);
1993 ld [%o0],%f0
1994 ld [%o1],%f2
1995 mov %o0,%o2
1996 fcmps %f0,%f2
1997 mov 0,%o0
1998 fbne 1f
1999 nop
2000 ld [%o2+4],%f0
2001 ld [%o1+4],%f2
2002 fcmps %f0,%f2
2003 nop
2004 fbne 1f
2005 nop
2006 mov 1,%o0
2007 1:
2008 .end
2009 ! }
2010 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2011 ! int
2012 ! __Fc_ne(x, y)
2013 ! complex *x, *y;
2014 ! {
2015
2016 .inline __Fc_ne,2
2017 ! return (x->real != y->real) || (x->imag != y->imag);
2018 ld [%o0],%f0
2019 ld [%o1],%f2
2020 mov %o0,%o2
2021 fcmps %f0,%f2
2022 mov 1,%o0
2023 fbne 1f
2024 nop
2025 ld [%o2+4],%f0
2026 ld [%o1+4],%f2
2027 fcmps %f0,%f2
2028 nop
2029 fbne 1f
2030 nop
2031 mov 0,%o0
2032 1:
2033 .end
2034 ! }