Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libm/sparc/src/locallibm.il
+++ new/usr/src/lib/libm/sparc/src/locallibm.il
1 1 !
2 2 ! CDDL HEADER START
3 3 !
4 4 ! The contents of this file are subject to the terms of the
5 5 ! Common Development and Distribution License (the "License").
6 6 ! You may not use this file except in compliance with the License.
7 7 !
8 8 ! You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 ! or http://www.opensolaris.org/os/licensing.
10 10 ! See the License for the specific language governing permissions
11 11 ! and limitations under the License.
12 12 !
13 13 ! When distributing Covered Code, this CDDL HEADER in each
14 14 ! file and the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 ! If applicable, add the following below this CDDL HEADER, with the
16 16 ! fields enclosed by brackets "[]" replaced with your own identifying
17 17 ! information: Portions Copyright [yyyy] [name of copyright owner]
18 18 !
19 19 ! CDDL HEADER END
20 20 !
21 21 ! Copyright 2011 Nexenta Systems, Inc. All rights reserved.
22 22 !
23 23 ! Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 24 ! Use is subject to license terms.
25 25 !
26 26
27 27 ! Portions of this file are duplicated as GCC inline assembly in
28 28 ! libm_inlines.h. Keep them in sync.
29 29
30 30 .inline __r_hypot_,2
31 31 ld [%o0],%o4
32 32 sethi 0x1fffff,%o5
33 33 or %o5,1023,%o5
34 34 and %o4,%o5,%o4
35 35 sethi 0x1fe000,%o3
36 36 cmp %o4,%o3
37 37 ld [%o0],%f0 ! load result with first argument
38 38 bne 2f
39 39 nop
40 40 fabss %f0,%f0
41 41 ld [%o1],%f1
42 42 .volatile
43 43 fcmps %f0,%f1 ! generate invalid for Snan
44 44 .nonvolatile
45 45 nop
46 46 fba 5f
47 47 nop
48 48 2:
49 49 ld [%o1],%o4
50 50 sethi 0x1fffff,%o5
51 51 or %o5,1023,%o5
52 52 and %o4,%o5,%o4
53 53 sethi 0x1fe000,%o3
54 54 cmp %o4,%o3
55 55 bne 4f
56 56 nop
57 57 ld [%o1],%f0 ! second argument inf
58 58 fabss %f0,%f0
59 59 ld [%o0],%f1
60 60 .volatile
61 61 fcmps %f0,%f1 ! generate invalid for Snan
62 62 .nonvolatile
63 63 nop
64 64 fba 5f
65 65 nop
66 66 4:
67 67 ld [%o1],%f3
68 68 fsmuld %f0,%f0,%f0
69 69 fsmuld %f3,%f3,%f2
70 70 faddd %f2,%f0,%f0
71 71 fsqrtd %f0,%f0
72 72 fdtos %f0,%f0
73 73 5:
74 74 .end
75 75
76 76 .inline __c_abs,1
77 77 ld [%o0],%o4
78 78 sethi 0x1fffff,%o5
79 79 or %o5,1023,%o5
80 80 and %o4,%o5,%o4
81 81 sethi 0x1fe000,%o3
82 82 cmp %o4,%o3
83 83 ld [%o0],%f0
84 84 bne 2f
85 85 nop
86 86 fabss %f0,%f0
87 87 ld [%o0+4],%f1
88 88 .volatile
89 89 fcmps %f0,%f1 ! generate invalid for Snan
90 90 .nonvolatile
91 91 nop
92 92 fba 5f
93 93 nop
94 94 2:
95 95 ld [%o0+4],%o4
96 96 sethi 0x1fffff,%o5
97 97 or %o5,1023,%o5
98 98 and %o4,%o5,%o4
99 99 sethi 0x1fe000,%o3
100 100 cmp %o4,%o3
101 101 bne 4f
102 102 nop
103 103 ld [%o0+4],%f0
104 104 fabss %f0,%f0
105 105 ld [%o0],%f1
106 106 .volatile
107 107 fcmps %f0,%f1 ! generate invalid for Snan
108 108 .nonvolatile
109 109 nop
110 110 fba 5f
111 111 nop
112 112 ! store to 8-aligned address
113 113 4:
114 114 ld [%o0+4],%f3
115 115 fsmuld %f0,%f0,%f0
116 116 fsmuld %f3,%f3,%f2
117 117 faddd %f2,%f0,%f0
118 118 fsqrtd %f0,%f0
119 119 fdtos %f0,%f0
120 120 5:
121 121 .end
122 122 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
123 123 ! void
124 124 ! __Fc_mult(c, a, b)
125 125 ! complex *c, *a, *b;
126 126 ! {
127 127
128 128 .inline __Fc_mult,3
129 129 ! 21 c->real = (a->real * b->real) - (a->imag * b->imag)
130 130 ld [%o1+4],%f0 ! f0 = a->imag
131 131 ld [%o2+4],%f1 ! f1 = b->imag
132 132 ld [%o1],%f2 ! f2 = a->real
133 133 fsmuld %f0,%f1,%f4 ! f4 = (a->imag * b->imag)
134 134 ld [%o2],%f3 ! f3 = b->real
135 135 fsmuld %f2,%f1,%f6 ! f6 = a->real * b->imag
136 136 fsmuld %f2,%f3,%f8 ! f8 = a->real * b->real
137 137 fsmuld %f0,%f3,%f10 ! f10 = a->imag * b->real
138 138 fsubd %f8,%f4,%f0 ! f0 = ar*br - ai*bi
139 139 faddd %f6,%f10,%f2 ! f2 = ai*br + ar*bi
140 140 fdtos %f0,%f4
141 141 fdtos %f2,%f6
142 142 st %f4,[%o0]
143 143 st %f6,[%o0+4]
144 144 .end
145 145 ! }
146 146 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
147 147 ! void
148 148 ! __Fc_div(c, a, b)
149 149 ! complex *c, *a, *b;
150 150 ! {
151 151 .inline __Fc_div,3
152 152 ld [%o2+4],%o3
153 153 sethi %hi(0x7fffffff),%o4
154 154 or %o4,%lo(0x7fffffff),%o4 ! [internal]
155 155 andcc %o3,%o4,%g0
156 156 ld [%o2],%f6 ! f6 gets reb
157 157 bne 1f
158 158 nop
159 159 ld [%o1],%f0
160 160 ld [%o2],%f1
161 161 fdivs %f0,%f1,%f0
162 162 st %f0,[%o0]
163 163 ld [%o1+4],%f3
164 164 fdivs %f3,%f1,%f3
165 165 st %f3,[%o0+4]
166 166 ba 2f
167 167 nop
168 168 1: ! [internal]
169 169 sethi %hi(0x3ff00000),%o4
170 170 or %g0,0,%o5
171 171 std %o4,[%sp+0x48]
172 172 ldd [%sp+0x48],%f8
173 173 ld [%o2+4],%f10 ! f10 gets imb
174 174 fsmuld %f6,%f6,%f16 ! f16/17 gets reb**2
175 175 ld [%o1+4],%f4 ! f4 gets ima
176 176 fsmuld %f10,%f10,%f12 ! f12/13 gets imb**2
177 177 ld [%o1],%f19 ! f19 gets rea
178 178 fsmuld %f4,%f10,%f0 ! f0/f1 gets ima*imb
179 179 fsmuld %f19,%f6,%f2 ! f2/3 gets rea*reb
180 180 faddd %f12,%f16,%f12 ! f12/13 gets reb**2+imb**2
181 181 fdivd %f8,%f12,%f12 ! f12/13 gets 1/(reb**2+imb**2)
182 182 faddd %f2,%f0,%f2 ! f2/3 gets rea*reb+ima*imb
183 183 fsmuld %f4,%f6,%f24 ! f24/5 gets ima*reb
184 184 fmuld %f2,%f12,%f2 ! f2/3 gets rec
185 185 fsmuld %f19,%f10,%f10 ! f10/11 gets rea*imb
186 186 fsubd %f24,%f10,%f10 ! f10/11 gets ima*reb-rea*imb
187 187 fmuld %f10,%f12,%f12 ! f12 gets imc
188 188 fdtos %f2,%f7 ! f7 gets rec
189 189 fdtos %f12,%f15 ! f15 gets imc
190 190 st %f7,[%o0]
191 191 st %f15,[%o0+4]
192 192 2:
193 193 .end
194 194 ! }
195 195
196 196 .inline .mul,2
197 197 .volatile
198 198 smul %o0,%o1,%o0
199 199 rd %y,%o1
200 200 sra %o0,31,%o2
201 201 cmp %o1,%o2
202 202 .nonvolatile
203 203 .end
204 204
205 205 .inline .umul,2
206 206 .volatile
207 207 umul %o0,%o1,%o0
208 208 rd %y,%o1
209 209 tst %o1
210 210 .nonvolatile
211 211 .end
212 212
213 213 .inline .div,2
214 214 sra %o0,31,%o4 ! extend sign
215 215 .volatile
216 216 wr %o4,%g0,%y
217 217 cmp %o1,0xffffffff ! is divisor -1?
218 218 be,a 1f ! if yes
219 219 .volatile
220 220 subcc %g0,%o0,%o0 ! simply negate dividend
221 221 nop ! RT620 FABs A.0/A.1
222 222 sdiv %o0,%o1,%o0 ! o0 contains quotient a/b
223 223 .nonvolatile
224 224 1:
225 225 .end
226 226
227 227 .inline .udiv,2
228 228 .volatile
229 229 wr %g0,%g0,%y
230 230 nop
231 231 nop
232 232 nop
233 233 udiv %o0,%o1,%o0 ! o0 contains quotient a/b
234 234 .nonvolatile
235 235 .end
236 236
237 237 .inline .rem,2
238 238 sra %o0,31,%o4 ! extend sign
239 239 .volatile
240 240 wr %o4,%g0,%y
241 241 cmp %o1,0xffffffff ! is divisor -1?
242 242 be,a 1f ! if yes
243 243 .volatile
244 244 or %g0,%g0,%o0 ! simply return 0
245 245 nop ! RT620 FABs A.0/A.1
246 246 sdiv %o0,%o1,%o2 ! o2 contains quotient a/b
247 247 .nonvolatile
248 248 smul %o2,%o1,%o4 ! o4 contains q*b
249 249 sub %o0,%o4,%o0 ! o0 gets a-q*b
250 250 1:
251 251 .end
252 252
253 253 .inline .urem,2
254 254 .volatile
255 255 wr %g0,%g0,%y
256 256 nop
257 257 nop
258 258 nop
259 259 udiv %o0,%o1,%o2 ! o2 contains quotient a/b
260 260 .nonvolatile
261 261 umul %o2,%o1,%o4 ! o4 contains q*b
262 262 sub %o0,%o4,%o0 ! o0 gets a-q*b
263 263 .end
264 264
265 265 .inline .div_o3,2
266 266 sra %o0,31,%o4 ! extend sign
267 267 .volatile
268 268 wr %o4,%g0,%y
269 269 cmp %o1,0xffffffff ! is divisor -1?
270 270 be,a 1f ! if yes
271 271 .volatile
272 272 subcc %g0,%o0,%o0 ! simply negate dividend
273 273 mov %o0,%o3 ! o3 gets __remainder
274 274 sdiv %o0,%o1,%o0 ! o0 contains quotient a/b
275 275 .nonvolatile
276 276 smul %o0,%o1,%o4 ! o4 contains q*b
277 277 ba 2f
278 278 sub %o3,%o4,%o3 ! o3 gets a-q*b
279 279 1:
280 280 mov %g0,%o3 ! __remainder is 0
281 281 2:
282 282 .end
283 283
284 284 .inline .udiv_o3,2
285 285 .volatile
286 286 wr %g0,%g0,%y
287 287 mov %o0,%o3 ! o3 gets __remainder
288 288 nop
289 289 nop
290 290 udiv %o0,%o1,%o0 ! o0 contains quotient a/b
291 291 .nonvolatile
292 292 umul %o0,%o1,%o4 ! o4 contains q*b
293 293 sub %o3,%o4,%o3 ! o3 gets a-q*b
294 294 .end
295 295
296 296 .inline __ieee754_sqrt,2
297 297 std %o0,[%sp+0x48] ! store to 8-aligned address
298 298 ldd [%sp+0x48],%f0
299 299 fsqrtd %f0,%f0
300 300 .end
301 301
302 302 .inline __inline_sqrtf,1
303 303 st %o0,[%sp+0x44]
304 304 ld [%sp+0x44],%f0
305 305 fsqrts %f0,%f0
306 306 .end
307 307
308 308 .inline __inline_sqrt,2
309 309 std %o0,[%sp+0x48] ! store to 8-aligned address
310 310 ldd [%sp+0x48],%f0
311 311 fsqrtd %f0,%f0
312 312 .end
313 313
314 314 .inline __sqrtf,1
315 315 st %o0,[%sp+0x44]
316 316 ld [%sp+0x44],%f0
317 317 fsqrts %f0,%f0
318 318 .end
319 319
320 320 .inline __sqrt,2
321 321 std %o0,[%sp+0x48] ! store to 8-aligned address
322 322 ldd [%sp+0x48],%f0
323 323 fsqrtd %f0,%f0
324 324 .end
325 325
326 326 .inline __r_sqrt_,1
327 327 ld [%o0],%f0
328 328 fsqrts %f0,%f0
329 329 .end
330 330
331 331 .inline __d_sqrt_,1
332 332 ld [%o0],%f0
333 333 ld [%o0+4],%f1
334 334 fsqrtd %f0,%f0
335 335 .end
336 336
337 337 .inline __ceil,2
338 338 std %o0,[%sp+0x48]
339 339 sethi %hi(0x80000000),%o5
340 340 andn %o0,%o5,%o2
341 341 sethi %hi(0x43300000),%o3
342 342 st %g0,[%sp+0x54]
343 343 subcc %o2,%o3,%g0
344 344 bl 1f
345 345 nop
346 346 sethi %hi(0x3ff00000),%o2
347 347 st %o2,[%sp+0x50]
348 348 ldd [%sp+0x48],%f0
349 349 ldd [%sp+0x50],%f2
350 350 fmuld %f0,%f2,%f0
351 351 ba 4f
352 352 nop
353 353 1:
354 354 tst %o0
355 355 st %o3,[%sp+0x50]
356 356 ldd [%sp+0x50],%f2
357 357 bge 2f
358 358 nop
359 359 fnegs %f2,%f2
360 360 2:
361 361 ldd [%sp+0x48],%f4
362 362 faddd %f4,%f2,%f0
363 363 fsubd %f0,%f2,%f0
364 364 fcmpd %f0,%f4
365 365 sethi %hi(0x3ff00000),%o2
366 366 st %o2,[%sp+0x50]
367 367 and %o0,%o5,%o4
368 368 fbge 3f
369 369 nop
370 370 ldd [%sp+0x50],%f4
371 371 faddd %f0,%f4,%f0
372 372 3:
373 373 st %f0,[%sp+0x48]
374 374 ld [%sp+0x48],%o3
375 375 andn %o3,%o5,%o3
376 376 or %o4,%o3,%o3
377 377 st %o3,[%sp+0x48]
378 378 ld [%sp+0x48],%f0
379 379 4:
380 380 .end
381 381
382 382 .inline __floor,2
383 383 std %o0,[%sp+0x48]
384 384 sethi %hi(0x80000000),%o5
385 385 andn %o0,%o5,%o2
386 386 sethi %hi(0x43300000),%o3
387 387 st %g0,[%sp+0x54]
388 388 subcc %o2,%o3,%g0
389 389 bl 1f
390 390 nop
391 391 sethi %hi(0x3ff00000),%o2
392 392 st %o2,[%sp+0x50]
393 393 ldd [%sp+0x48],%f0
394 394 ldd [%sp+0x50],%f2
395 395 fmuld %f0,%f2,%f0
396 396 ba 4f
397 397 nop
398 398 1:
399 399 tst %o0
400 400 st %o3,[%sp+0x50]
401 401 ldd [%sp+0x50],%f2
402 402 bge 2f
403 403 nop
404 404 fnegs %f2,%f2
405 405 2:
406 406 ldd [%sp+0x48],%f4
407 407 faddd %f4,%f2,%f0
408 408 fsubd %f0,%f2,%f0
409 409 fcmpd %f0,%f4
410 410 sethi %hi(0x3ff00000),%o2
411 411 st %o2,[%sp+0x50]
412 412 ldd [%sp+0x50],%f4
413 413 and %o0,%o5,%o4
414 414 fble 3f
415 415 nop
416 416 fsubd %f0,%f4,%f0
417 417 3:
418 418 st %f0,[%sp+0x48]
419 419 ld [%sp+0x48],%o3
420 420 andn %o3,%o5,%o3
421 421 or %o4,%o3,%o3
422 422 st %o3,[%sp+0x48]
423 423 ld [%sp+0x48],%f0
424 424 4:
425 425 .end
426 426
427 427 .inline __ilogb,2
428 428 sethi %hi(0x7ff00000),%o4
429 429 andcc %o4,%o0,%o2
430 430 bne 1f
431 431 nop
432 432 sethi %hi(0x43500000),%o3
433 433 std %o0,[%sp+0x48]
434 434 st %o3,[%sp+0x50]
435 435 st %g0,[%sp+0x54]
436 436 ldd [%sp+0x48],%f0
437 437 ldd [%sp+0x50],%f2
438 438 fmuld %f0,%f2,%f0
439 439 sethi %hi(0x80000001),%o0
440 440 or %o0,%lo(0x80000001),%o0
441 441 st %f0,[%sp+0x48]
442 442 ld [%sp+0x48],%o2
443 443 andcc %o2,%o4,%o2
444 444 srl %o2,20,%o2
445 445 be 2f
446 446 nop
447 447 sub %o2,0x435,%o0
448 448 ba 2f
449 449 nop
450 450 1:
451 451 subcc %o4,%o2,%g0
452 452 srl %o2,20,%o3
453 453 bne 0f
454 454 nop
455 455 sethi %hi(0x7fffffff),%o0
456 456 or %o0,%lo(0x7fffffff),%o0
457 457 ba 2f
458 458 nop
459 459 0:
460 460 sub %o3,0x3ff,%o0
461 461 2:
462 462 .end
463 463
464 464 .inline __rint,2
465 465 std %o0,[%sp+0x48]
466 466 sethi %hi(0x80000000),%o2
467 467 andn %o0,%o2,%o2
468 468 ldd [%sp+0x48],%f0
469 469 sethi %hi(0x43300000),%o3
470 470 st %g0,[%sp+0x50]
471 471 st %g0,[%sp+0x54]
472 472 subcc %o2,%o3,%g0
473 473 bl 1f
474 474 nop
475 475 sethi %hi(0x3ff00000),%o2
476 476 st %o2,[%sp+0x50]
477 477 ldd [%sp+0x50],%f2
478 478 fmuld %f0,%f2,%f0
479 479 ba 3f
480 480 nop
481 481 1:
482 482 tst %o0
483 483 st %o3,[%sp+0x48]
484 484 st %g0,[%sp+0x4c]
485 485 ldd [%sp+0x48],%f2
486 486 bge 2f
487 487 nop
488 488 fnegs %f2,%f2
489 489 2:
490 490 faddd %f0,%f2,%f0
491 491 fcmpd %f0,%f2
492 492 fbne 0f
493 493 nop
494 494 ldd [%sp+0x50],%f0
495 495 bge 3f
496 496 nop
497 497 fnegs %f0,%f0
498 498 ba 3f
499 499 nop
500 500 0:
501 501 fsubd %f0,%f2,%f0
502 502 3:
503 503 .end
504 504
505 505 .inline __rintf,1
506 506 st %o0,[%sp+0x48]
507 507 sethi %hi(0x80000000),%o2
508 508 andn %o0,%o2,%o2
509 509 ld [%sp+0x48],%f0
510 510 sethi %hi(0x4b000000),%o3
511 511 st %g0,[%sp+0x50]
512 512 subcc %o2,%o3,%g0
513 513 bl 1f
514 514 nop
515 515 sethi %hi(0x3f800000),%o2
516 516 st %o2,[%sp+0x50]
517 517 ld [%sp+0x50],%f2
518 518 fmuls %f0,%f2,%f0
519 519 ba 3f
520 520 nop
521 521 1:
522 522 tst %o0
523 523 st %o3,[%sp+0x48]
524 524 ld [%sp+0x48],%f2
525 525 bge 2f
526 526 nop
527 527 fnegs %f2,%f2
528 528 2:
529 529 fadds %f0,%f2,%f0
530 530 fcmps %f0,%f2
531 531 fbne 0f
532 532 nop
533 533 ld [%sp+0x50],%f0
534 534 bge 3f
535 535 nop
536 536 fnegs %f0,%f0
537 537 ba 3f
538 538 nop
539 539 0:
540 540 fsubs %f0,%f2,%f0
541 541 3:
542 542 .end
543 543
544 544 .inline __min_subnormal,0
545 545 set 0x0,%o0
546 546 st %o0,[%sp+0x44]
547 547 ld [%sp+0x44],%f0
548 548 set 0x1,%o0
549 549 st %o0,[%sp+0x44]
550 550 ld [%sp+0x44],%f1
551 551 .end
552 552
553 553 .inline __d_min_subnormal_,0
554 554 set 0x0,%o0
555 555 st %o0,[%sp+0x44]
556 556 ld [%sp+0x44],%f0
557 557 set 0x1,%o0
558 558 st %o0,[%sp+0x44]
559 559 ld [%sp+0x44],%f1
560 560 .end
561 561
562 562 .inline __min_subnormalf,0
563 563 set 0x1,%o0
564 564 st %o0,[%sp+0x44]
565 565 ld [%sp+0x44],%f0
566 566 .end
567 567
568 568 .inline __r_min_subnormal_,0
569 569 set 0x1,%o0
570 570 st %o0,[%sp+0x44]
571 571 ld [%sp+0x44],%f0
572 572 .end
573 573
574 574 .inline __max_subnormal,0
575 575 set 0x000fffff,%o0
576 576 st %o0,[%sp+0x44]
577 577 ld [%sp+0x44],%f0
578 578 set 0xffffffff,%o0
579 579 st %o0,[%sp+0x44]
580 580 ld [%sp+0x44],%f1
581 581 .end
582 582
583 583 .inline __d_max_subnormal_,0
584 584 set 0x000fffff,%o0
585 585 st %o0,[%sp+0x44]
586 586 ld [%sp+0x44],%f0
587 587 set 0xffffffff,%o0
588 588 st %o0,[%sp+0x44]
589 589 ld [%sp+0x44],%f1
590 590 .end
591 591
592 592 .inline __max_subnormalf,0
593 593 set 0x007fffff,%o0
594 594 st %o0,[%sp+0x44]
595 595 ld [%sp+0x44],%f0
596 596 .end
597 597
598 598 .inline __r_max_subnormal_,0
599 599 set 0x007fffff,%o0
600 600 st %o0,[%sp+0x44]
601 601 ld [%sp+0x44],%f0
602 602 .end
603 603
604 604 .inline __min_normal,0
605 605 set 0x00100000,%o0
606 606 set 0x0,%o1
607 607 std %o0,[%sp+0x48]
608 608 ldd [%sp+0x48],%f0
609 609 .end
610 610
611 611 .inline __d_min_normal_,0
612 612 set 0x00100000,%o0
613 613 st %o0,[%sp+0x44]
614 614 ld [%sp+0x44],%f0
615 615 set 0x0,%o0
616 616 st %o0,[%sp+0x44]
617 617 ld [%sp+0x44],%f1
618 618 .end
619 619
620 620 .inline __min_normalf,0
621 621 set 0x00800000,%o0
622 622 st %o0,[%sp+0x44]
623 623 ld [%sp+0x44],%f0
624 624 .end
625 625
626 626 .inline __r_min_normal_,0
627 627 set 0x00800000,%o0
628 628 st %o0,[%sp+0x44]
629 629 ld [%sp+0x44],%f0
630 630 .end
631 631
632 632 .inline __max_normal,0
633 633 set 0x7fefffff,%o0
634 634 set 0xffffffff,%o1
635 635 std %o0,[%sp+0x48]
636 636 ldd [%sp+0x48],%f0
637 637 .end
638 638
639 639 .inline __d_max_normal_,0
640 640 set 0x7fefffff,%o0
641 641 st %o0,[%sp+0x44]
642 642 ld [%sp+0x44],%f0
643 643 set 0xffffffff,%o0
644 644 st %o0,[%sp+0x44]
645 645 ld [%sp+0x44],%f1
646 646 .end
647 647
648 648 .inline __max_normalf,0
649 649 set 0x7f7fffff,%o0
650 650 st %o0,[%sp+0x44]
651 651 ld [%sp+0x44],%f0
652 652 .end
653 653
654 654 .inline __r_max_normal_,0
655 655 set 0x7f7fffff,%o0
656 656 st %o0,[%sp+0x44]
657 657 ld [%sp+0x44],%f0
658 658 .end
659 659
660 660 .inline __infinity,0
661 661 set 0x7ff00000,%o0
662 662 set 0x0,%o1
663 663 std %o0,[%sp+0x48]
664 664 ldd [%sp+0x48],%f0
665 665 .end
666 666
667 667 .inline __infinity,0
668 668 set 0x7ff00000,%o0
669 669 set 0x0,%o1
670 670 std %o0,[%sp+0x48]
671 671 ldd [%sp+0x48],%f0
672 672 .end
673 673
674 674 .inline __d_infinity_,0
675 675 set 0x7ff00000,%o0
676 676 st %o0,[%sp+0x44]
677 677 ld [%sp+0x44],%f0
678 678 set 0x0,%o0
679 679 st %o0,[%sp+0x44]
680 680 ld [%sp+0x44],%f1
681 681 .end
682 682
683 683 .inline __infinityf,0
684 684 set 0x7f800000,%o0
685 685 st %o0,[%sp+0x44]
686 686 ld [%sp+0x44],%f0
687 687 .end
688 688
689 689 .inline __r_infinity_,0
690 690 set 0x7f800000,%o0
691 691 st %o0,[%sp+0x44]
692 692 ld [%sp+0x44],%f0
693 693 .end
694 694
695 695 .inline __signaling_nan,0
696 696 set 0x7ff00000,%o0
697 697 set 0x1,%o1
698 698 std %o0,[%sp+0x48]
699 699 ldd [%sp+0x48],%f0
700 700 .end
701 701
702 702 .inline __d_signaling_nan_,0
703 703 set 0x7ff00000,%o0
704 704 st %o0,[%sp+0x44]
705 705 ld [%sp+0x44],%f0
706 706 set 0x1,%o0
707 707 st %o0,[%sp+0x44]
708 708 ld [%sp+0x44],%f1
709 709 .end
710 710
711 711 .inline __signaling_nanf,0
712 712 set 0x7f800001,%o0
713 713 st %o0,[%sp+0x44]
714 714 ld [%sp+0x44],%f0
715 715 .end
716 716
717 717 .inline __r_signaling_nan_,0
718 718 set 0x7f800001,%o0
719 719 st %o0,[%sp+0x44]
720 720 ld [%sp+0x44],%f0
721 721 .end
722 722
723 723 .inline __quiet_nan,0
724 724 set 0x7fffffff,%o0
725 725 st %o0,[%sp+0x44]
726 726 ld [%sp+0x44],%f0
727 727 set 0xffffffff,%o0
728 728 st %o0,[%sp+0x44]
729 729 ld [%sp+0x44],%f1
730 730 .end
731 731
732 732 .inline __d_quiet_nan_,0
733 733 set 0x7fffffff,%o0
734 734 st %o0,[%sp+0x44]
735 735 ld [%sp+0x44],%f0
736 736 set 0xffffffff,%o0
737 737 st %o0,[%sp+0x44]
738 738 ld [%sp+0x44],%f1
739 739 .end
740 740
741 741 .inline __quiet_nanf,0
742 742 set 0x7fffffff,%o0
743 743 st %o0,[%sp+0x44]
744 744 ld [%sp+0x44],%f0
↓ open down ↓ |
744 lines elided |
↑ open up ↑ |
745 745 .end
746 746
747 747 .inline __r_quiet_nan_,0
748 748 set 0x7fffffff,%o0
749 749 st %o0,[%sp+0x44]
750 750 ld [%sp+0x44],%f0
751 751 .end
752 752
753 753 .inline __swapEX,1
754 754 and %o0,0x1f,%o1
755 - sll %o1,5,%o1 ! input to aexc bit location
755 + sll %o1,5,%o1 ! shift input to aexc bit location
756 756 .volatile
757 757 st %fsr,[%sp+0x44]
758 758 ld [%sp+0x44],%o0 ! o0 = fsr
759 759 andn %o0,0x3e0,%o2
760 760 or %o1,%o2,%o1 ! o1 = new fsr
761 761 st %o1,[%sp+0x44]
762 762 ld [%sp+0x44],%fsr
763 763 srl %o0,5,%o0
764 764 and %o0,0x1f,%o0
765 765 .nonvolatile
766 766 .end
767 767
768 768 .inline _QgetRD,0
769 769 st %fsr,[%sp+0x44]
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
770 770 ld [%sp+0x44],%o0 ! o0 = fsr
771 771 srl %o0,30,%o0 ! return __round control value
772 772 .end
773 773
774 774 .inline _QgetRP,0
775 775 or %g0,%g0,%o0
776 776 .end
777 777
778 778 .inline __swapRD,1
779 779 and %o0,0x3,%o0
780 - sll %o0,30,%o1 ! input to RD bit location
780 + sll %o0,30,%o1 ! shift input to RD bit location
781 781 .volatile
782 782 st %fsr,[%sp+0x44]
783 783 ld [%sp+0x44],%o0 ! o0 = fsr
784 784 set 0xc0000000,%o4 ! mask of rounding direction bits
785 785 andn %o0,%o4,%o2
786 786 or %o1,%o2,%o1 ! o1 = new fsr
787 787 st %o1,[%sp+0x44]
788 788 ld [%sp+0x44],%fsr
789 789 srl %o0,30,%o0
790 790 and %o0,0x3,%o0
791 791 .nonvolatile
792 792 .end
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
793 793 !
794 794 ! On the SPARC, __swapRP is a no-op; always return 0 for backward compatibility
795 795 !
796 796
797 797 .inline __swapRP,1
798 798 or %g0,%g0,%o0
799 799 .end
800 800
801 801 .inline __swapTE,1
802 802 and %o0,0x1f,%o0
803 - sll %o0,23,%o1 ! input to TEM bit location
803 + sll %o0,23,%o1 ! shift input to TEM bit location
804 804 .volatile
805 805 st %fsr,[%sp+0x44]
806 806 ld [%sp+0x44],%o0 ! o0 = fsr
807 807 set 0x0f800000,%o4 ! mask of TEM (Trap Enable Mode bits)
808 808 andn %o0,%o4,%o2
809 809 or %o1,%o2,%o1 ! o1 = new fsr
810 810 st %o1,[%sp+0x48]
811 811 ld [%sp+0x48],%fsr
812 812 srl %o0,23,%o0
813 813 and %o0,0x1f,%o0
814 814 .nonvolatile
815 815 .end
816 816
817 817 .inline __fp_class,2
818 818 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
819 819 andn %o0,%o2,%o0 ! o0-o1 gets abs(x)
820 820 orcc %o0,%o1,%g0 ! set cc as x is zero/nonzero
821 821 bne 1f ! branch if x is nonzero
822 822 nop
823 823 mov 0,%o0
824 824 ba 2f ! x is 0
825 825 nop
826 826 1:
827 827 sethi %hi(0x7ff00000),%o2 ! o2 gets 7ff00000
828 828 andcc %o0,%o2,%g0 ! cc set by __exp field of x
829 829 bne 1f ! branch if normal or max __exp
830 830 nop
831 831 mov 1,%o0
832 832 ba 2f ! x is subnormal
833 833 nop
834 834 1:
835 835 cmp %o0,%o2
836 836 bge 1f ! branch if x is max __exp
837 837 nop
838 838 mov 2,%o0
839 839 ba 2f ! x is normal
840 840 nop
841 841 1:
842 842 andn %o0,%o2,%o0 ! o0 gets msw __significand field
843 843 orcc %o0,%o1,%g0 ! set cc by OR __significand
844 844 bne 1f ! Branch if __nan
845 845 nop
846 846 mov 3,%o0
847 847 ba 2f ! x is __infinity
848 848 nop
849 849 1:
850 850 sethi %hi(0x00080000),%o2
851 851 andcc %o0,%o2,%g0 ! set cc by quiet/sig bit
852 852 be 1f ! Branch if signaling
853 853 nop
854 854 mov 4,%o0 ! x is quiet NaN
855 855 ba 2f
856 856 nop
857 857 1:
858 858 mov 5,%o0 ! x is signaling NaN
859 859 2:
860 860 .end
861 861
862 862 .inline __fp_classf,1
863 863 sethi %hi(0x80000000),%o2
864 864 andncc %o0,%o2,%o0
865 865 bne 1f
866 866 nop
867 867 mov 0,%o0
868 868 ba 2f ! x is 0
869 869 nop
870 870 1:
871 871 sethi %hi(0x7f800000),%o2
872 872 andcc %o0,%o2,%g0
873 873 bne 1f
874 874 nop
875 875 mov 1,%o0
876 876 ba 2f ! x is subnormal
877 877 nop
878 878 1:
879 879 cmp %o0,%o2
880 880 bge 1f
881 881 nop
882 882 mov 2,%o0
883 883 ba 2f ! x is normal
884 884 nop
885 885 1:
886 886 bg 1f
887 887 nop
888 888 mov 3,%o0
889 889 ba 2f ! x is __infinity
890 890 nop
891 891 1:
892 892 sethi %hi(0x00400000),%o2
893 893 andcc %o0,%o2,%g0
894 894 mov 4,%o0 ! x is quiet NaN
895 895 bne 2f
896 896 nop
897 897 mov 5,%o0 ! x is signaling NaN
898 898 2:
899 899 .end
900 900
901 901 .inline __ir_fp_class_,1
902 902 ld [%o0],%o0
903 903 sethi %hi(0x80000000),%o2
904 904 andncc %o0,%o2,%o0
905 905 bne 1f
906 906 nop
907 907 mov 0,%o0
908 908 ba 2f ! x is 0
909 909 nop
910 910 1:
911 911 sethi %hi(0x7f800000),%o2
912 912 andcc %o0,%o2,%g0
913 913 bne 1f
914 914 nop
915 915 mov 1,%o0
916 916 ba 2f ! x is subnormal
917 917 nop
918 918 1:
919 919 cmp %o0,%o2
920 920 bge 1f
921 921 nop
922 922 mov 2,%o0
923 923 ba 2f ! x is normal
924 924 nop
925 925 1:
926 926 bg 1f
927 927 nop
928 928 mov 3,%o0
929 929 ba 2f ! x is __infinity
930 930 nop
931 931 1:
932 932 sethi %hi(0x00400000),%o2
933 933 andcc %o0,%o2,%g0
934 934 mov 4,%o0 ! x is quiet NaN
935 935 bne 2f
936 936 nop
937 937 mov 5,%o0 ! x is signaling NaN
938 938 2:
939 939 .end
940 940
941 941 .inline __copysign,4
942 942 set 0x80000000,%o3
943 943 and %o2,%o3,%o2
944 944 andn %o0,%o3,%o0
945 945 or %o0,%o2,%o0
946 946 std %o0,[%sp+0x48]
947 947 ldd [%sp+0x48],%f0
948 948 .end
949 949
950 950 .inline __copysignf,2
951 951 set 0x80000000,%o2
952 952 andn %o0,%o2,%o0
953 953 and %o1,%o2,%o1
954 954 or %o0,%o1,%o0
955 955 st %o0,[%sp+0x44]
956 956 ld [%sp+0x44],%f0
957 957 .end
958 958
959 959 .inline __r_copysign_,2
960 960 ld [%o0],%o0
961 961 ld [%o1],%o1
962 962 set 0x80000000,%o2
963 963 andn %o0,%o2,%o0
964 964 and %o1,%o2,%o1
965 965 or %o0,%o1,%o0
966 966 st %o0,[%sp+0x44]
967 967 ld [%sp+0x44],%f0
968 968 .end
969 969
970 970 .inline _finite,2
971 971 set 0x7ff00000,%o1
972 972 and %o0,%o1,%o0
973 973 cmp %o0,%o1
974 974 mov 1,%o0
975 975 bne 1f
976 976 nop
977 977 mov 0,%o0
978 978 1:
979 979 .end
980 980
981 981 .inline __finitef,2
982 982 set 0x7f800000,%o1
983 983 and %o0,%o1,%o0
984 984 cmp %o0,%o1
985 985 mov 1,%o0
986 986 bne 1f
987 987 nop
988 988 mov 0,%o0
989 989 1:
990 990 .end
991 991
992 992 .inline __ir_finite_,1
993 993 ld [%o0],%o0
994 994 set 0x7f800000,%o1
995 995 and %o0,%o1,%o0
996 996 cmp %o0,%o1
997 997 mov 1,%o0
998 998 bne 1f
999 999 nop
1000 1000 mov 0,%o0
1001 1001 1:
1002 1002 .end
1003 1003
1004 1004 .inline __signbit,1
1005 1005 srl %o0,31,%o0
1006 1006 .end
1007 1007
1008 1008 .inline __signbitf,1
1009 1009 srl %o0,31,%o0
1010 1010 .end
1011 1011
1012 1012 .inline __ir_signbit_,1
1013 1013 ld [%o0],%o0
1014 1014 srl %o0,31,%o0
1015 1015 .end
1016 1016
1017 1017 .inline __isinf,2
1018 1018 tst %o1
1019 1019 sethi %hi(0x80000000),%o2
1020 1020 bne 1f
1021 1021 nop
1022 1022 andn %o0,%o2,%o0
1023 1023 sethi %hi(0x7ff00000),%o2
1024 1024 cmp %o0,%o2
1025 1025 mov 1,%o0
1026 1026 be 2f
1027 1027 nop
1028 1028 1:
1029 1029 mov 0,%o0
1030 1030 2:
1031 1031 .end
1032 1032
1033 1033 .inline __isinff,1
1034 1034 sethi %hi(0x80000000),%o2
1035 1035 andn %o0,%o2,%o0 ! o0 gets abs(x)
1036 1036 sethi %hi(0x7f800000),%o2
1037 1037 cmp %o0,%o2
1038 1038 mov 0,%o0
1039 1039 bne 1f ! Branch if not inf.
1040 1040 nop
1041 1041 mov 1,%o0
1042 1042 1:
1043 1043 .end
1044 1044
1045 1045 .inline __ir_isinf_,1
1046 1046 ld [%o0],%o0
1047 1047 sethi %hi(0x80000000),%o2
1048 1048 andn %o0,%o2,%o0 ! o0 gets abs(x)
1049 1049 sethi %hi(0x7f800000),%o2
1050 1050 cmp %o0,%o2
1051 1051 mov 0,%o0
1052 1052 bne 1f ! Branch if not inf.
1053 1053 nop
1054 1054 mov 1,%o0
1055 1055 1:
1056 1056 .end
1057 1057
1058 1058 .inline __isnan,2
1059 1059 sethi %hi(0x80000000),%o2
1060 1060 andn %o0,%o2,%o0
1061 1061 sub %g0,%o1,%o3
1062 1062 or %o1,%o3,%o1
1063 1063 srl %o1,31,%o1
1064 1064 or %o0,%o1,%o0
1065 1065 sethi %hi(0x7ff00000),%o4
1066 1066 sub %o4,%o0,%o0
1067 1067 srl %o0,31,%o0
1068 1068 .end
1069 1069
1070 1070 .inline __isnanf,1
1071 1071 sethi %hi(0x80000000),%o2
1072 1072 andn %o0,%o2,%o0
1073 1073 sethi %hi(0x7f800000),%o1
1074 1074 sub %o1,%o0,%o0
1075 1075 srl %o0,31,%o0
1076 1076 .end
1077 1077
1078 1078 .inline __ir_isnan_,1
1079 1079 ld [%o0],%o0
1080 1080 sethi %hi(0x80000000),%o2
1081 1081 andn %o0,%o2,%o0
1082 1082 sethi %hi(0x7f800000),%o1
1083 1083 sub %o1,%o0,%o0
1084 1084 srl %o0,31,%o0
1085 1085 .end
1086 1086
1087 1087 .inline __isnormal,2
1088 1088 sethi %hi(0x80000000),%o2
1089 1089 andn %o0,%o2,%o0
1090 1090 sethi %hi(0x7ff00000),%o2
1091 1091 cmp %o0,%o2
1092 1092 sethi %hi(0x00100000),%o2
1093 1093 bge 1f
1094 1094 nop
1095 1095 cmp %o0,%o2
1096 1096 mov 1,%o0
1097 1097 bge 2f
1098 1098 nop
1099 1099 1:
1100 1100 mov 0,%o0
1101 1101 2:
1102 1102 .end
1103 1103
1104 1104 .inline __isnormalf,1
1105 1105 sethi %hi(0x80000000),%o2
1106 1106 andn %o0,%o2,%o0
1107 1107 sethi %hi(0x7f800000),%o2
1108 1108 cmp %o0,%o2
1109 1109 sethi %hi(0x00800000),%o2
1110 1110 bge 1f
1111 1111 nop
1112 1112 cmp %o0,%o2
1113 1113 mov 1,%o0
1114 1114 bge 2f
1115 1115 nop
1116 1116 1:
1117 1117 mov 0,%o0
1118 1118 2:
1119 1119 .end
1120 1120
1121 1121 .inline __ir_isnormal_,1
1122 1122 ld [%o0],%o0
1123 1123 sethi %hi(0x80000000),%o2
1124 1124 andn %o0,%o2,%o0
1125 1125 sethi %hi(0x7f800000),%o2
1126 1126 cmp %o0,%o2
1127 1127 sethi %hi(0x00800000),%o2
1128 1128 bge 1f
1129 1129 nop
1130 1130 cmp %o0,%o2
1131 1131 mov 1,%o0
1132 1132 bge 2f
1133 1133 nop
1134 1134 1:
1135 1135 mov 0,%o0
1136 1136 2:
1137 1137 .end
1138 1138
1139 1139 .inline __issubnormal,2
1140 1140 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
1141 1141 andn %o0,%o2,%o0 ! o0/o1 gets abs(x)
1142 1142 sethi %hi(0x00100000),%o2 ! o2 gets 00100000
1143 1143 cmp %o0,%o2
1144 1144 bge 1f ! branch if x norm or max __exp
1145 1145 nop
1146 1146 orcc %o0,%o1,%g0
1147 1147 be 1f ! Branch if x zero
1148 1148 nop
1149 1149 mov 1,%o0 ! x is subnormal
1150 1150 ba 2f
1151 1151 nop
1152 1152 1:
1153 1153 mov 0,%o0
1154 1154 2:
1155 1155 .end
1156 1156
1157 1157 .inline __issubnormalf,1
1158 1158 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
1159 1159 andn %o0,%o2,%o0 ! o0 gets abs(x)
1160 1160 sethi %hi(0x00800000),%o2 ! o2 gets 00800000
1161 1161 cmp %o0,%o2
1162 1162 bge 1f ! branch if x norm or max __exp
1163 1163 nop
1164 1164 orcc %o0,%g0,%g0
1165 1165 be 1f ! Branch if x zero
1166 1166 nop
1167 1167 mov 1,%o0 ! x is subnormal
1168 1168 ba 2f
1169 1169 nop
1170 1170 1:
1171 1171 mov 0,%o0
1172 1172 2:
1173 1173 .end
1174 1174
1175 1175 .inline __ir_issubnormal_,1
1176 1176 ld [%o0],%o0
1177 1177 sethi %hi(0x80000000),%o2 ! o2 gets 80000000
1178 1178 andn %o0,%o2,%o0 ! o0 gets abs(x)
1179 1179 sethi %hi(0x00800000),%o2 ! o2 gets 00800000
1180 1180 cmp %o0,%o2
1181 1181 bge 1f ! branch if x norm or max __exp
1182 1182 nop
1183 1183 orcc %o0,%g0,%g0
1184 1184 be 1f ! Branch if x zero
1185 1185 nop
1186 1186 mov 1,%o0 ! x is subnormal
1187 1187 ba 2f
1188 1188 nop
1189 1189 1:
1190 1190 mov 0,%o0
1191 1191 2:
1192 1192 .end
1193 1193
1194 1194 .inline __iszero,2
1195 1195 sethi %hi(0x80000000),%o2
1196 1196 andn %o0,%o2,%o0
1197 1197 orcc %o0,%o1,%g0
1198 1198 mov 1,%o0
1199 1199 be 1f
1200 1200 nop
1201 1201 mov 0,%o0
1202 1202 1:
1203 1203 .end
1204 1204
1205 1205 .inline __iszerof,1
1206 1206 sethi %hi(0x80000000),%o2
1207 1207 andncc %o0,%o2,%o0
1208 1208 mov 1,%o0
1209 1209 be 1f
1210 1210 nop
1211 1211 mov 0,%o0
1212 1212 1:
1213 1213 .end
1214 1214
1215 1215 .inline __ir_iszero_,1
1216 1216 ld [%o0],%o0
1217 1217 sethi %hi(0x80000000),%o2
1218 1218 andncc %o0,%o2,%o0
1219 1219 mov 1,%o0
1220 1220 be 1f
1221 1221 nop
1222 1222 mov 0,%o0
1223 1223 1:
1224 1224 .end
1225 1225
1226 1226 .inline abs,1
1227 1227 sra %o0,31,%o1
1228 1228 xor %o0,%o1,%o0
1229 1229 sub %o0,%o1,%o0
1230 1230 .end
1231 1231
1232 1232 .inline __fabs,2
1233 1233 st %o0,[%sp+0x48]
1234 1234 st %o1,[%sp+0x4c]
1235 1235 ldd [%sp+0x48],%f0
1236 1236 fabsd %f0,%f0
1237 1237 .end
1238 1238
1239 1239 .inline __fabsf,1
1240 1240 st %o0,[%sp+0x44]
1241 1241 ld [%sp+0x44],%f0
1242 1242 fabss %f0,%f0
1243 1243 .end
1244 1244
1245 1245 .inline __r_fabs_,1
1246 1246 ld [%o0],%f0
1247 1247 fabss %f0,%f0
1248 1248 .end
1249 1249 !
1250 1250 ! __nintf - f77 NINT(REAL*4)
1251 1251 !
1252 1252
1253 1253 .inline __nintf,1
1254 1254 srl %o0,30-7,%g1
1255 1255 sethi %hi(0x7fffff),%o2
1256 1256 st %o0,[%sp+0x44]
1257 1257 and %g1,0xff,%g1
1258 1258 or %o2,%lo(0x7fffff),%o2
1259 1259 sethi %hi(1<<22),%o4
1260 1260 subcc %g1,127+31,%g0
1261 1261 and %o0,%o2,%o3
1262 1262 bl 0f
1263 1263 nop
1264 1264 sethi %hi(0xcf000000),%o2
1265 1265 sethi %hi(0x80000000),%g1
1266 1266 subcc %o0,%o2,%g0
1267 1267 or %g1,%g0,%o0
1268 1268 be 9f
1269 1269 nop
1270 1270 ld [%sp+0x44],%f0
1271 1271 fstoi %f0,%f0
1272 1272 st %f0,[%sp+0x44]
1273 1273 ld [%sp+0x44],%o0
1274 1274 ba 9f
1275 1275 nop
1276 1276 0:
1277 1277 add %o4,%o4,%o5
1278 1278 or %o3,%o5,%o3
1279 1279 sra %o0,31-0,%o2
1280 1280 subcc %g1,127,%g1
1281 1281 srl %o4,%g1,%o4
1282 1282 bge 1f
1283 1283 nop
1284 1284 subcc %g1,-1,%g0
1285 1285 or %g0,0,%o0
1286 1286 bne 2f
1287 1287 nop
1288 1288 or %g0,1,%o0
1289 1289 ba 2f
1290 1290 nop
1291 1291 1:
1292 1292 add %o3,%o4,%o3
1293 1293 or %g0,23,%o0
1294 1294 subcc %o0,%g1,%o0
1295 1295 bl 1f
1296 1296 nop
1297 1297 srl %o3,%o0,%o0
1298 1298 ba 2f
1299 1299 nop
1300 1300 1:
1301 1301 sub %g0,%o0,%o0
1302 1302 sll %o3,%o0,%o0
1303 1303 2:
1304 1304 xor %o0,%o2,%o0
1305 1305 and %o2,1,%o2
1306 1306 add %o0,%o2,%o0
1307 1307 9:
1308 1308 .end
1309 1309
1310 1310 .inline __il_nint,1
1311 1311 ld [%o0],%o0
1312 1312 sra %o0,0,%o0
1313 1313 srlx %o0,31-8,%g1
1314 1314 or %g0,1,%o2
1315 1315 sllx %o2,23-1,%o4
1316 1316 and %g1,0xff,%g1
1317 1317 sllx %o2,63-0,%o2
1318 1318 subcc %g1,127+63,%g0
1319 1319 bl 0f
1320 1320 nop
1321 1321 st %o0,[%sp+0x48]
1322 1322 ld [%sp+0x48],%f0
1323 1323 fstox %f0,%f0
1324 1324 std %f0,[%sp+0x48]
1325 1325 ldx [%sp+0x48],%o1
1326 1326 ba 9f
1327 1327 nop
1328 1328 0:
1329 1329 add %o4,%o4,%o5
1330 1330 srax %o2,63-23,%o2
1331 1331 sub %g1,127+23,%o1
1332 1332 xnor %o2,%g0,%o2
1333 1333 and %o0,%o2,%o3
1334 1334 or %o3,%o5,%o3
1335 1335 srax %o0,63-0,%o2
1336 1336 subcc %g1,127,%g1
1337 1337 bge 1f
1338 1338 nop
1339 1339 subcc %g1,-1,%g0
1340 1340 or %g0,0,%o0
1341 1341 bne 2f
1342 1342 nop
1343 1343 or %g0,1,%o0
1344 1344 ba 2f
1345 1345 nop
1346 1346 1:
1347 1347 brlz,pt %o1,3f
1348 1348 nop
1349 1349 sub %g1,23,%o0
1350 1350 sllx %o3,%o0,%o0
1351 1351 ba 2f
1352 1352 nop
1353 1353 3:
1354 1354 srlx %o4,%g1,%o4
1355 1355 add %o3,%o4,%o3
1356 1356 or %g0,23,%o0
1357 1357 sub %o0,%g1,%o0
1358 1358 srlx %o3,%o0,%o0
1359 1359 2:
1360 1360 xor %o0,%o2,%o0
1361 1361 sub %o0,%o2,%o1
1362 1362 9:
1363 1363 srlx %o1,32,%o0
1364 1364 .end
1365 1365 !
1366 1366 ! __i_dnnt - f77 NINT(REAL*8)
1367 1367 !
1368 1368
1369 1369 .inline __i_dnnt,1
1370 1370 ld [%o0],%o1
1371 1371 sllx %o1,32,%o1
1372 1372 ld [%o0+4],%o0
1373 1373 or %o0,%o1,%o0
1374 1374 srlx %o0,63-11,%g1
1375 1375 or %g0,1,%o2
1376 1376 stx %o0,[%sp+0x48]
1377 1377 sllx %o2,52-1,%o4
1378 1378 and %g1,0x7ff,%g1
1379 1379 sllx %o2,63-0,%o2
1380 1380 subcc %g1,1023+32,%g0
1381 1381 bl 0f
1382 1382 nop
1383 1383 ldd [%sp+0x48],%f0
1384 1384 ba 8f
1385 1385 nop
1386 1386 0:
1387 1387 add %o4,%o4,%o5
1388 1388 srax %o2,63-52,%o2
1389 1389 sub %g1,1023+30,%o1
1390 1390 xnor %o2,%g0,%o2
1391 1391 and %o0,%o2,%o3
1392 1392 or %o3,%o5,%o3
1393 1393 srax %o0,63-0,%o2
1394 1394 subcc %g1,1023,%g1
1395 1395 bge 1f
1396 1396 nop
1397 1397 subcc %g1,-1,%g0
1398 1398 or %g0,0,%o0
1399 1399 bne 2f
1400 1400 nop
1401 1401 or %g0,1,%o0
1402 1402 ba 2f
1403 1403 nop
1404 1404 1:
1405 1405 srlx %o4,%g1,%o4
1406 1406 add %o3,%o4,%o3
1407 1407 or %g0,52,%o0
1408 1408 sub %o0,%g1,%o0
1409 1409 srlx %o3,%o0,%o0
1410 1410 2:
1411 1411 xor %o0,%o2,%o0
1412 1412 sub %o0,%o2,%o0
1413 1413 brlz,pt %o1,9f
1414 1414 nop
1415 1415 stx %o0,[%sp+0x48]
1416 1416 ldd [%sp+0x48],%f0
1417 1417 fxtod %f0,%f0
1418 1418 8:
1419 1419 fdtoi %f0,%f0
1420 1420 st %f0,[%sp+0x44]
1421 1421 ld [%sp+0x44],%o0
1422 1422 9:
1423 1423 .end
1424 1424
1425 1425 .inline __il_dnnt,1
1426 1426 ld [%o0],%o1
1427 1427 sllx %o1,32,%o1
1428 1428 ld [%o0+4],%o0
1429 1429 or %o0,%o1,%o0
1430 1430 srlx %o0,63-11,%g1
1431 1431 or %g0,1,%o2
1432 1432 sllx %o2,52-1,%o4
1433 1433 and %g1,0x7ff,%g1
1434 1434 sllx %o2,63-0,%o2
1435 1435 subcc %g1,1023+63,%g0
1436 1436 bl 0f
1437 1437 nop
1438 1438 stx %o0,[%sp+0x48]
1439 1439 ldd [%sp+0x48],%f0
1440 1440 fdtox %f0,%f0
1441 1441 std %f0,[%sp+0x48]
1442 1442 ldx [%sp+0x48],%o1
1443 1443 ba 9f
1444 1444 nop
1445 1445 0:
1446 1446 add %o4,%o4,%o5
1447 1447 srax %o2,63-52,%o2
1448 1448 sub %g1,1023+52,%o1
1449 1449 xnor %o2,%g0,%o2
1450 1450 and %o0,%o2,%o3
1451 1451 or %o3,%o5,%o3
1452 1452 srax %o0,63-0,%o2
1453 1453 subcc %g1,1023,%g1
1454 1454 bge 1f
1455 1455 nop
1456 1456 subcc %g1,-1,%g0
1457 1457 or %g0,0,%o0
1458 1458 bne 2f
1459 1459 nop
1460 1460 or %g0,1,%o0
1461 1461 ba 2f
1462 1462 nop
1463 1463 1:
1464 1464 brlz,pt %o1,3f
1465 1465 nop
1466 1466 sub %g1,52,%o0
1467 1467 sllx %o3,%o0,%o0
1468 1468 ba 2f
1469 1469 nop
1470 1470 3:
1471 1471 srlx %o4,%g1,%o4
1472 1472 add %o3,%o4,%o3
1473 1473 or %g0,52,%o0
1474 1474 sub %o0,%g1,%o0
1475 1475 srlx %o3,%o0,%o0
1476 1476 2:
1477 1477 xor %o0,%o2,%o0
1478 1478 sub %o0,%o2,%o1
1479 1479 9:
1480 1480 srlx %o1,32,%o0
1481 1481 .end
1482 1482
1483 1483 .inline __anintf,1
1484 1484 or %g0,1,%o1
1485 1485 srl %o0,23,%g1
1486 1486 and %g1,0xff,%g1
1487 1487 sub %g0,%g1,%g1
1488 1488 add %g1,0x95,%g1
1489 1489 subcc %g1,23,%g0
1490 1490 sll %o1,%g1,%o1
1491 1491 sub %o1,1,%o2
1492 1492 bcs 1f
1493 1493 nop
1494 1494 be 2f
1495 1495 nop
1496 1496 bl 3f
1497 1497 nop
1498 1498 sethi %hi(0x80000000),%o1
1499 1499 and %o0,%o1,%o0
1500 1500 ba 3f
1501 1501 nop
1502 1502 1:
1503 1503 and %o0,%o1,%o1
1504 1504 2:
1505 1505 add %o0,%o1,%o0
1506 1506 andn %o0,%o2,%o0
1507 1507 3:
1508 1508 st %o0,[%sp+0x48]
1509 1509 ld [%sp+0x48],%f0
1510 1510 .end
1511 1511
1512 1512 .inline __anint,2
1513 1513 sllx %o0,32,%o0
1514 1514 or %o0,%o1,%o0
1515 1515 or %g0,1,%o1
1516 1516 srlx %o0,52,%g1
1517 1517 and %g1,0x7ff,%g1
1518 1518 sub %g0,%g1,%g1
1519 1519 add %g1,0x432,%g1
1520 1520 subcc %g1,52,%g0
1521 1521 sllx %o1,%g1,%o1
1522 1522 sub %o1,1,%o2
1523 1523 bcs,pt %icc,1f
1524 1524 nop
1525 1525 be,pt %icc,2f
1526 1526 nop
1527 1527 bl,pt %icc,3f
1528 1528 nop
1529 1529 srlx %o0,63,%o0
1530 1530 sllx %o0,63,%o0
1531 1531 ba 3f
1532 1532 nop
1533 1533 1:
1534 1534 and %o0,%o1,%o1
1535 1535 2:
1536 1536 add %o0,%o1,%o0
1537 1537 andn %o0,%o2,%o0
1538 1538 3:
1539 1539 stx %o0,[%sp+0x48]
1540 1540 ldd [%sp+0x48],%f0
1541 1541 .end
1542 1542
1543 1543 .inline __Fz_minus,3
1544 1544 ld [%o1],%f0
1545 1545 ld [%o1+0x4],%f1
1546 1546 ld [%o2],%f4
1547 1547 ld [%o2+0x4],%f5
1548 1548 fsubd %f0,%f4,%f0
1549 1549 ld [%o1+8],%f2
1550 1550 ld [%o1+0xc],%f3
1551 1551 ld [%o2+8],%f6
1552 1552 ld [%o2+0xc],%f7
1553 1553 fsubd %f2,%f6,%f2
1554 1554 st %f0,[%o0+0x0]
1555 1555 st %f1,[%o0+0x4]
1556 1556 st %f2,[%o0+0x8]
1557 1557 st %f3,[%o0+0xc]
1558 1558 .end
1559 1559
1560 1560 .inline __Fz_add,3
1561 1561 ld [%o1],%f0
1562 1562 ld [%o1+0x4],%f1
1563 1563 ld [%o2],%f4
1564 1564 ld [%o2+0x4],%f5
1565 1565 faddd %f0,%f4,%f0
1566 1566 ld [%o1+8],%f2
1567 1567 ld [%o1+0xc],%f3
1568 1568 ld [%o2+8],%f6
1569 1569 ld [%o2+0xc],%f7
1570 1570 faddd %f2,%f6,%f2
1571 1571 st %f0,[%o0+0x0]
1572 1572 st %f1,[%o0+0x4]
1573 1573 st %f2,[%o0+0x8]
1574 1574 st %f3,[%o0+0xc]
1575 1575 .end
1576 1576
1577 1577 .inline __Fz_neg,2
1578 1578 ld [%o1],%f0
1579 1579 fnegs %f0,%f0
1580 1580 ld [%o1+0x4],%f1
1581 1581 st %f1,[%o0+0x4]
1582 1582 ld [%o1+8],%f2
1583 1583 fnegs %f2,%f2
1584 1584 ld [%o1+0xc],%f3
1585 1585 st %f3,[%o0+0xc]
1586 1586 st %f0,[%o0]
1587 1587 st %f2,[%o0+0x8]
1588 1588 .end
1589 1589
1590 1590 .inline __Ff_conv_z,2
1591 1591 st %o1,[%sp+0x44]
1592 1592 ld [%sp+0x44],%f0
1593 1593 fstod %f0,%f0
1594 1594 st %g0,[%o0+0x8]
1595 1595 st %g0,[%o0+0xc]
1596 1596 st %f1,[%o0+0x4]
1597 1597 st %f0,[%o0]
1598 1598 .end
1599 1599
1600 1600 .inline __Fz_conv_f,1
1601 1601 ld [%o0],%f0
1602 1602 ld [%o0+4],%f1
1603 1603 fdtos %f0,%f0
1604 1604 .end
1605 1605
1606 1606 .inline __Fz_conv_i,1
1607 1607 ld [%o0],%f0
1608 1608 ld [%o0+4],%f1
1609 1609 fdtoi %f0,%f0
1610 1610 st %f0,[%sp+0x44]
1611 1611 ld [%sp+0x44],%o0
1612 1612 .end
1613 1613
1614 1614 .inline __Fi_conv_z,2
1615 1615 st %o1,[%sp+0x44]
1616 1616 ld [%sp+0x44],%f0
1617 1617 fitod %f0,%f0
1618 1618 st %g0,[%o0+0x8]
1619 1619 st %g0,[%o0+0xc]
1620 1620 st %f1,[%o0+0x4]
1621 1621 st %f0,[%o0]
1622 1622 .end
1623 1623
1624 1624 .inline __Fz_conv_d,1
1625 1625 ld [%o0],%f0
1626 1626 ld [%o0+4],%f1
1627 1627 .end
1628 1628
1629 1629 .inline __Fd_conv_z,3
1630 1630 st %o1,[%o0]
1631 1631 st %o2,[%o0+0x4]
1632 1632 st %g0,[%o0+0x8]
1633 1633 st %g0,[%o0+0xc]
1634 1634 .end
1635 1635
1636 1636 .inline __Fz_conv_c,2
1637 1637 ldd [%o1],%f0
1638 1638 fdtos %f0,%f0
1639 1639 st %f0,[%o0]
1640 1640 ldd [%o1+0x8],%f2
1641 1641 fdtos %f2,%f1
1642 1642 st %f1,[%o0+0x4]
1643 1643 .end
1644 1644
1645 1645 .inline __Fz_eq,2
1646 1646 ld [%o0],%f0
1647 1647 ld [%o0+4],%f1
1648 1648 ld [%o1],%f2
1649 1649 ld [%o1+4],%f3
1650 1650 fcmpd %f0,%f2
1651 1651 mov %o0,%o2
1652 1652 mov 0,%o0
1653 1653 fbne 1f
1654 1654 nop
1655 1655 ld [%o2+8],%f0
1656 1656 ld [%o2+12],%f1
1657 1657 ld [%o1+8],%f2
1658 1658 ld [%o1+12],%f3
1659 1659 fcmpd %f0,%f2
1660 1660 nop
1661 1661 fbne 1f
1662 1662 nop
1663 1663 mov 1,%o0
1664 1664 1:
1665 1665 .end
1666 1666
1667 1667 .inline __Fz_ne,2
1668 1668 ld [%o0],%f0
1669 1669 ld [%o0+4],%f1
1670 1670 ld [%o1],%f2
1671 1671 ld [%o1+4],%f3
1672 1672 fcmpd %f0,%f2
1673 1673 mov %o0,%o2
1674 1674 mov 1,%o0
1675 1675 fbne 1f
1676 1676 nop
1677 1677 ld [%o2+8],%f0
1678 1678 ld [%o2+12],%f1
1679 1679 ld [%o1+8],%f2
1680 1680 ld [%o1+12],%f3
1681 1681 fcmpd %f0,%f2
1682 1682 nop
1683 1683 fbne 1f
1684 1684 nop
1685 1685 mov 0,%o0
1686 1686 1:
1687 1687 .end
1688 1688
1689 1689 .inline __c_cmplx,3
1690 1690 ld [%o1],%o1
1691 1691 st %o1,[%o0]
1692 1692 ld [%o2],%o2
1693 1693 st %o2,[%o0+4]
1694 1694 .end
1695 1695
1696 1696 .inline __d_cmplx,3
1697 1697 ld [%o1],%f0
1698 1698 st %f0,[%o0]
1699 1699 ld [%o1+4],%f1
1700 1700 st %f1,[%o0+4]
1701 1701 ld [%o2],%f0
1702 1702 st %f0,[%o0+0x8]
1703 1703 ld [%o2+4],%f1
1704 1704 st %f1,[%o0+0xc]
1705 1705 .end
1706 1706
1707 1707 .inline __r_cnjg,2
1708 1708 ld [%o1+0x4],%f1
1709 1709 fnegs %f1,%f1
1710 1710 ld [%o1],%f0
1711 1711 st %f0,[%o0]
1712 1712 st %f1,[%o0+4]
1713 1713 .end
1714 1714
1715 1715 .inline __d_cnjg,2
1716 1716 ld [%o1+0x8],%f0
1717 1717 fnegs %f0,%f0
1718 1718 ld [%o1+0xc],%f1
1719 1719 st %f1,[%o0+0xc]
1720 1720 ld [%o1+0x0],%f1
1721 1721 st %f1,[%o0+0x0]
1722 1722 ld [%o1+0x4],%f1
1723 1723 st %f1,[%o0+0x4]
1724 1724 st %f0,[%o0+0x8]
1725 1725 .end
1726 1726
1727 1727 .inline __r_dim,2
1728 1728 st %g0,[%sp+0x48]
1729 1729 ld [%sp+0x48],%f4
1730 1730 ld [%o0],%f0
1731 1731 ld [%o1],%f2
1732 1732 fcmps %fcc0,%f0,%f2
1733 1733 fmovsule %fcc0,%f4,%f2
1734 1734 fsubs %f0,%f2,%f0
1735 1735 fmovsule %fcc0,%f4,%f0
1736 1736 .end
1737 1737
1738 1738 .inline __d_dim,2
1739 1739 stx %g0,[%sp+0x48]
1740 1740 ldd [%sp+0x48],%f4
1741 1741 ld [%o0],%f0
1742 1742 ld [%o0+4],%f1
1743 1743 ld [%o1],%f2
1744 1744 ld [%o1+4],%f3
1745 1745 fcmpd %fcc0,%f0,%f2
1746 1746 fmovdule %fcc0,%f4,%f2
1747 1747 fsubd %f0,%f2,%f0
1748 1748 fmovdule %fcc0,%f4,%f0
1749 1749 .end
1750 1750
1751 1751 .inline __r_imag,1
1752 1752 ld [%o0+4],%f0
1753 1753 .end
1754 1754
1755 1755 .inline __d_imag,1
1756 1756 ld [%o0+8],%f0
1757 1757 ld [%o0+0xc],%f1
1758 1758 .end
1759 1759
1760 1760 .inline __f95_signf,2
1761 1761 ld [%o0],%f0
1762 1762 ld [%o1],%o1
1763 1763 fabss %f0,%f0
1764 1764 fnegs %f0,%f1
1765 1765 sra %o1,0,%o1
1766 1766 fmovrslz %o1,%f1,%f0
1767 1767 .end
1768 1768
1769 1769 .inline __f95_sign,2
1770 1770 ld [%o0],%f0
1771 1771 ld [%o0+4],%f1
1772 1772 ld [%o1],%o1
1773 1773 fabsd %f0,%f0
1774 1774 fnegd %f0,%f2
1775 1775 sra %o1,0,%o1
1776 1776 fmovrdlz %o1,%f2,%f0
1777 1777 .end
1778 1778
1779 1779 .inline __r_sign,2
1780 1780 ld [%o0],%f0
1781 1781 ld [%o1],%o1
1782 1782 fabss %f0,%f0
1783 1783 fnegs %f0,%f1
1784 1784 sub %o1,1,%o0
1785 1785 and %o1,%o0,%o1
1786 1786 sra %o1,0,%o1
1787 1787 fmovrslz %o1,%f1,%f0
1788 1788 .end
1789 1789
1790 1790 .inline __d_sign,2
1791 1791 ld [%o0],%f0
1792 1792 ld [%o0+4],%f1
1793 1793 ld [%o1],%o0
1794 1794 sllx %o0,32,%o0
1795 1795 ld [%o1+4],%o1
1796 1796 or %o1,%o0,%o1
1797 1797 fabsd %f0,%f0
1798 1798 fnegd %f0,%f2
1799 1799 sub %o1,1,%o0
1800 1800 and %o1,%o0,%o1
1801 1801 fmovrdlz %o1,%f2,%f0
1802 1802 .end
1803 1803
1804 1804 .inline __Fz_mult,3
1805 1805 ld [%o1],%f0
1806 1806 ld [%o1+0x4],%f1
1807 1807 ld [%o2],%f4
1808 1808 ld [%o2+0x4],%f5
1809 1809 fmuld %f0,%f4,%f8 ! f8 = r1*r2
1810 1810 ld [%o1+0x8],%f2
1811 1811 ld [%o1+0xc],%f3
1812 1812 ld [%o2+0x8],%f6
1813 1813 ld [%o2+0xc],%f7
1814 1814 fmuld %f2,%f6,%f10 ! f10= i1*i2
1815 1815 fsubd %f8,%f10,%f12 ! f12= r1*r2-i1*i2
1816 1816 st %f12,[%o0]
1817 1817 st %f13,[%o0+4]
1818 1818 fmuld %f0,%f6,%f14 ! f14= r1*i2
1819 1819 fmuld %f2,%f4,%f16 ! f16= r2*i1
1820 1820 faddd %f14,%f16,%f2 ! f2 = r1*i2+r2*i1
1821 1821 st %f2,[%o0+8]
1822 1822 st %f3,[%o0+12]
1823 1823 .end
1824 1824 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1825 1825 ! void
1826 1826 ! __Fc_minus(c, a, b)
1827 1827 ! complex *c, *a, *b;
1828 1828 ! {
1829 1829
1830 1830 .inline __Fc_minus,3
1831 1831 ! 30 c->real = a->real - b->real
1832 1832 ld [%o1],%f0
1833 1833 ld [%o2],%f1
1834 1834 fsubs %f0,%f1,%f2
1835 1835 ! 31 c->imag = a->imag - b->imag
1836 1836 ld [%o1+4],%f3
1837 1837 ld [%o2+4],%f4
1838 1838 fsubs %f3,%f4,%f5
1839 1839 st %f2,[%o0]
1840 1840 st %f5,[%o0+4]
1841 1841 .end
1842 1842 }
1843 1843 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1844 1844 ! void
1845 1845 ! __Fc_add(c, a, b)
1846 1846 ! complex *c, *a, *b;
1847 1847 ! {
1848 1848
1849 1849 .inline __Fc_add,3
1850 1850 ! 39 c->real = a->real + b->real
1851 1851 ld [%o1],%f0
1852 1852 ld [%o2],%f1
1853 1853 fadds %f0,%f1,%f2
1854 1854 ! 40 c->imag = a->imag + b->imag
1855 1855 ld [%o1+4],%f3
1856 1856 ld [%o2+4],%f4
1857 1857 fadds %f3,%f4,%f5
1858 1858 st %f2,[%o0]
1859 1859 st %f5,[%o0+4]
1860 1860 .end
1861 1861 ! }
1862 1862 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1863 1863 ! void
1864 1864 ! __Fc_neg(c, a)
1865 1865 ! complex *c, *a;
1866 1866 ! {
1867 1867
1868 1868 .inline __Fc_neg,2
1869 1869 ! 48 c->real = - a->real
1870 1870 ld [%o1],%f0
1871 1871 fnegs %f0,%f1
1872 1872 ! 49 c->imag = - a->imag
1873 1873 ld [%o1+4],%f2
1874 1874 fnegs %f2,%f3
1875 1875 st %f1,[%o0]
1876 1876 st %f3,[%o0+4]
1877 1877 .end
1878 1878 ! }
1879 1879 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1880 1880 ! void
1881 1881 ! __Ff_conv_c(c, x)
1882 1882 ! complex *c;
1883 1883 ! FLOATPARAMETER x;
1884 1884 ! {
1885 1885
1886 1886 .inline __Ff_conv_c,2
1887 1887 ! 59 c->real = x
1888 1888 st %o1,[%o0]
1889 1889 ! 60 c->imag = 0.0
1890 1890 st %g0,[%o0+4]
1891 1891 .end
1892 1892 ! }
1893 1893 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1894 1894 ! FLOATFUNCTIONTYPE
1895 1895 ! __Fc_conv_f(c)
1896 1896 ! complex *c;
1897 1897 ! {
1898 1898
1899 1899 .inline __Fc_conv_f,1
1900 1900 ! 69 RETURNFLOAT(c->real)
1901 1901 ld [%o0],%f0
1902 1902 .end
1903 1903 ! }
1904 1904 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1905 1905 ! int
1906 1906 ! __Fc_conv_i(c)
1907 1907 ! complex *c;
1908 1908 ! {
1909 1909
1910 1910 .inline __Fc_conv_i,1
1911 1911 ! 78 return (int)c->real
1912 1912 ld [%o0],%f0
1913 1913 fstoi %f0,%f1
1914 1914 st %f1,[%sp+68]
1915 1915 ld [%sp+68],%o0
1916 1916 .end
1917 1917 ! }
1918 1918 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1919 1919 ! void
1920 1920 ! __Fi_conv_c(c, i)
1921 1921 ! complex *c;
1922 1922 ! int i;
1923 1923 ! {
1924 1924
1925 1925 .inline __Fi_conv_c,2
1926 1926 ! 88 c->real = (float)i
1927 1927 st %o1,[%sp+68]
1928 1928 ld [%sp+68],%f0
1929 1929 fitos %f0,%f1
1930 1930 st %f1,[%o0]
1931 1931 ! 89 c->imag = 0.0
1932 1932 st %g0,[%o0+4]
1933 1933 .end
1934 1934 ! }
1935 1935 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1936 1936 ! double
1937 1937 ! __Fc_conv_d(c)
1938 1938 ! complex *c;
1939 1939 ! {
1940 1940
1941 1941 .inline __Fc_conv_d,1
1942 1942 ! 98 return (double)c->real
1943 1943 ld [%o0],%f2
1944 1944 fstod %f2,%f0
1945 1945 .end
1946 1946 ! }
1947 1947 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1948 1948 ! void
1949 1949 ! __Fd_conv_c(c, x)
1950 1950 ! complex *c;
1951 1951 ! double x;
1952 1952 ! {
1953 1953
1954 1954 .inline __Fd_conv_c,2
1955 1955 st %o1,[%sp+72]
1956 1956 st %o2,[%sp+76]
1957 1957 ! 109 c->real = (float)(x)
1958 1958 ldd [%sp+72],%f0
1959 1959 fdtos %f0,%f1
1960 1960 st %f1,[%o0]
1961 1961 ! 110 c->imag = 0.0
1962 1962 st %g0,[%o0+4]
1963 1963 .end
1964 1964 ! }
1965 1965 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1966 1966 ! void
1967 1967 ! __Fc_conv_z(result, c)
1968 1968 ! dcomplex *result;
1969 1969 ! complex *c;
1970 1970 ! {
1971 1971
1972 1972 .inline __Fc_conv_z,2
1973 1973 ! 120 result->dreal = (double)c->real
1974 1974 ld [%o1],%f0
1975 1975 fstod %f0,%f2
1976 1976 st %f2,[%o0]
1977 1977 st %f3,[%o0+4]
1978 1978 ! 121 result->dimag = (double)c->imag
1979 1979 ld [%o1+4],%f3
1980 1980 fstod %f3,%f4
1981 1981 st %f4,[%o0+8]
1982 1982 st %f5,[%o0+12]
1983 1983 .end
1984 1984 ! }
1985 1985 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1986 1986 ! int
1987 1987 ! __Fc_eq(x, y)
1988 1988 ! complex *x, *y;
1989 1989 ! {
1990 1990
1991 1991 .inline __Fc_eq,2
1992 1992 ! return (x->real == y->real) && (x->imag == y->imag);
1993 1993 ld [%o0],%f0
1994 1994 ld [%o1],%f2
1995 1995 mov %o0,%o2
1996 1996 fcmps %f0,%f2
1997 1997 mov 0,%o0
1998 1998 fbne 1f
1999 1999 nop
2000 2000 ld [%o2+4],%f0
2001 2001 ld [%o1+4],%f2
2002 2002 fcmps %f0,%f2
2003 2003 nop
2004 2004 fbne 1f
2005 2005 nop
2006 2006 mov 1,%o0
2007 2007 1:
2008 2008 .end
2009 2009 ! }
2010 2010 !- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2011 2011 ! int
2012 2012 ! __Fc_ne(x, y)
2013 2013 ! complex *x, *y;
2014 2014 ! {
2015 2015
2016 2016 .inline __Fc_ne,2
2017 2017 ! return (x->real != y->real) || (x->imag != y->imag);
2018 2018 ld [%o0],%f0
2019 2019 ld [%o1],%f2
2020 2020 mov %o0,%o2
2021 2021 fcmps %f0,%f2
2022 2022 mov 1,%o0
2023 2023 fbne 1f
2024 2024 nop
2025 2025 ld [%o2+4],%f0
2026 2026 ld [%o1+4],%f2
2027 2027 fcmps %f0,%f2
2028 2028 nop
2029 2029 fbne 1f
2030 2030 nop
2031 2031 mov 0,%o0
2032 2032 1:
2033 2033 .end
2034 2034 ! }
↓ open down ↓ |
1221 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX