Print this page
6648 illumos build should be explicit about C standards

Split Close
Expand all
Collapse all
          --- old/usr/src/lib/libm/common/m9x/fenv_inlines.h
          +++ new/usr/src/lib/libm/common/m9x/fenv_inlines.h
↓ open down ↓ 31 lines elided ↑ open up ↑
  32   32   * (much of this 'amd64' code can be, in fact.)
  33   33   */
  34   34  union fp_cwsw {
  35   35          uint32_t cwsw;
  36   36          struct {
  37   37                  uint16_t cw;
  38   38                  uint16_t sw;
  39   39          } words;
  40   40  };
  41   41  
  42      -extern __inline__ void
       42 +extern __GNU_INLINE void
  43   43  __fenv_getcwsw(unsigned int *value)
  44   44  {
  45   45          union fp_cwsw *u = (union fp_cwsw *)value;
  46   46  
  47   47          __asm__ __volatile__(
  48   48              "fstsw %0\n\t"
  49   49              "fstcw %1\n\t"
  50   50              : "=m" (u->words.cw), "=m" (u->words.sw));
  51   51  }
  52   52  
  53      -extern __inline__ void
       53 +extern __GNU_INLINE void
  54   54  __fenv_setcwsw(const unsigned int *value)
  55   55  {
  56   56          union fp_cwsw cwsw;
  57   57          short fenv[16];
  58   58  
  59   59          cwsw.cwsw = *value;
  60   60  
  61   61          __asm__ __volatile__(
  62   62              "fstenv %0\n\t"
  63   63              "movw   %4,%1\n\t"
  64   64              "movw   %3,%2\n\t"
  65   65              "fldenv %0\n\t"
  66   66              "fwait\n\t"
  67   67              : "=m" (fenv), "=m" (fenv[0]), "=m" (fenv[2])
  68   68              : "r" (cwsw.words.cw), "r" (cwsw.words.sw)
  69   69              /* For practical purposes, we clobber the whole FPU */
  70   70              : "cc", "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)",
  71   71                "st(6)", "st(7)");
  72   72  }
  73   73  
  74      -extern __inline__ void
       74 +extern __GNU_INLINE void
  75   75  __fenv_getmxcsr(unsigned int *value)
  76   76  {
  77   77          __asm__ __volatile__("stmxcsr %0" : "=m" (*value));
  78   78  }
  79   79  
  80      -extern __inline__ void
       80 +extern __GNU_INLINE void
  81   81  __fenv_setmxcsr(const unsigned int *value)
  82   82  {
  83   83          __asm__ __volatile__("ldmxcsr %0" : : "m" (*value));
  84   84  }
  85   85  
  86      -extern __inline__ long double
       86 +extern __GNU_INLINE long double
  87   87  f2xm1(long double x)
  88   88  {
  89   89          long double ret;
  90   90  
  91   91          __asm__ __volatile__("f2xm1" : "=t" (ret) : "0" (x) : "cc");
  92   92          return (ret);
  93   93  }
  94   94  
  95      -extern __inline__ long double
       95 +extern __GNU_INLINE long double
  96   96  fyl2x(long double y, long double x)
  97   97  {
  98   98          long double ret;
  99   99  
 100  100          __asm__ __volatile__("fyl2x"
 101  101              : "=t" (ret)
 102  102              : "0" (x), "u" (y)
 103  103              : "st(1)", "cc");
 104  104          return (ret);
 105  105  }
 106  106  
 107      -extern __inline__ long double
      107 +extern __GNU_INLINE long double
 108  108  fptan(long double x)
 109  109  {
 110  110          /*
 111  111           * fptan pushes 1.0 then the result on completion, so we want to pop
 112  112           * the FP stack twice, so we need a dummy value into which to pop it.
 113  113           */
 114  114          long double ret;
 115  115          long double dummy;
 116  116  
 117  117          __asm__ __volatile__("fptan"
 118  118              : "=t" (dummy), "=u" (ret)
 119  119              : "0" (x)
 120  120              : "cc");
 121  121          return (ret);
 122  122  }
 123  123  
 124      -extern __inline__ long double
      124 +extern __GNU_INLINE long double
 125  125  fpatan(long double x, long double y)
 126  126  {
 127  127          long double ret;
 128  128  
 129  129          __asm__ __volatile__("fpatan"
 130  130              : "=t" (ret)
 131  131              : "0" (y), "u" (x)
 132  132              : "st(1)", "cc");
 133  133          return (ret);
 134  134  }
 135  135  
 136      -extern __inline__ long double
      136 +extern __GNU_INLINE long double
 137  137  fxtract(long double x)
 138  138  {
 139  139          __asm__ __volatile__("fxtract" : "+t" (x) : : "cc");
 140  140          return (x);
 141  141  }
 142  142  
 143      -extern __inline__ long double
      143 +extern __GNU_INLINE long double
 144  144  fprem1(long double idend, long double div)
 145  145  {
 146  146          __asm__ __volatile__("fprem1" : "+t" (div) : "u" (idend) : "cc");
 147  147          return (div);
 148  148  }
 149  149  
 150      -extern __inline__ long double
      150 +extern __GNU_INLINE long double
 151  151  fprem(long double idend, long double div)
 152  152  {
 153  153          __asm__ __volatile__("fprem" : "+t" (div) : "u" (idend) : "cc");
 154  154          return (div);
 155  155  }
 156  156  
 157      -extern __inline__ long double
      157 +extern __GNU_INLINE long double
 158  158  fyl2xp1(long double y, long double x)
 159  159  {
 160  160          long double ret;
 161  161  
 162  162          __asm__ __volatile__("fyl2xp1"
 163  163              : "=t" (ret)
 164  164              : "0" (x), "u" (y)
 165  165              : "st(1)", "cc");
 166  166          return (ret);
 167  167  }
 168  168  
 169      -extern __inline__ long double
      169 +extern __GNU_INLINE long double
 170  170  fsqrt(long double x)
 171  171  {
 172  172          __asm__ __volatile__("fsqrt" : "+t" (x) : : "cc");
 173  173          return (x);
 174  174  }
 175  175  
 176      -extern __inline__ long double
      176 +extern __GNU_INLINE long double
 177  177  fsincos(long double x)
 178  178  {
 179  179          long double dummy;
 180  180  
 181  181          __asm__ __volatile__("fsincos" : "+t" (x), "=u" (dummy) : : "cc");
 182  182          return (x);
 183  183  }
 184  184  
 185      -extern __inline__ long double
      185 +extern __GNU_INLINE long double
 186  186  frndint(long double x)
 187  187  {
 188  188          __asm__ __volatile__("frndint" : "+t" (x) : : "cc");
 189  189          return (x);
 190  190  }
 191  191  
 192      -extern __inline__ long double
      192 +extern __GNU_INLINE long double
 193  193  fscale(long double x, long double y)
 194  194  {
 195  195          long double ret;
 196  196  
 197  197          __asm__ __volatile__("fscale" : "=t" (ret) : "0" (y), "u" (x) : "cc");
 198  198          return (ret);
 199  199  }
 200  200  
 201      -extern __inline__ long double
      201 +extern __GNU_INLINE long double
 202  202  fsin(long double x)
 203  203  {
 204  204          __asm__ __volatile__("fsin" : "+t" (x) : : "cc");
 205  205          return (x);
 206  206  }
 207  207  
 208      -extern __inline__ long double
      208 +extern __GNU_INLINE long double
 209  209  fcos(long double x)
 210  210  {
 211  211          __asm__ __volatile__("fcos" : "+t" (x) : : "cc");
 212  212          return (x);
 213  213  }
 214  214  
 215      -extern __inline__ void
      215 +extern __GNU_INLINE void
 216  216  sse_cmpeqss(float *f1, float *f2, int *i1)
 217  217  {
 218  218          __asm__ __volatile__(
 219  219              "cmpeqss %2, %1\n\t"
 220  220              "movss   %1, %0"
 221  221              : "=m" (*i1), "+x" (*f1)
 222  222              : "x" (*f2)
 223  223              : "cc");
 224  224  }
 225  225  
 226      -extern __inline__ void
      226 +extern __GNU_INLINE void
 227  227  sse_cmpltss(float *f1, float *f2, int *i1)
 228  228  {
 229  229          __asm__ __volatile__(
 230  230              "cmpltss %2, %1\n\t"
 231  231              "movss   %1, %0"
 232  232              : "=m" (*i1), "+x" (*f1)
 233  233              : "x" (*f2)
 234  234              : "cc");
 235  235  }
 236  236  
 237      -extern __inline__ void
      237 +extern __GNU_INLINE void
 238  238  sse_cmpless(float *f1, float *f2, int *i1)
 239  239  {
 240  240          __asm__ __volatile__(
 241  241              "cmpless %2, %1\n\t"
 242  242              "movss   %1, %0"
 243  243              : "=m" (*i1), "+x" (*f1)
 244  244              : "x" (*f2)
 245  245              : "cc");
 246  246  }
 247  247  
 248      -extern __inline__ void
      248 +extern __GNU_INLINE void
 249  249  sse_cmpunordss(float *f1, float *f2, int *i1)
 250  250  {
 251  251          __asm__ __volatile__(
 252  252              "cmpunordss %2, %1\n\t"
 253  253              "movss      %1, %0"
 254  254              : "=m" (*i1), "+x" (*f1)
 255  255              : "x" (*f2)
 256  256              : "cc");
 257  257  }
 258  258  
 259      -extern __inline__ void
      259 +extern __GNU_INLINE void
 260  260  sse_minss(float *f1, float *f2, float *f3)
 261  261  {
 262  262          __asm__ __volatile__(
 263  263              "minss %2, %1\n\t"
 264  264              "movss %1, %0"
 265  265              : "=m" (*f3), "+x" (*f1)
 266  266              : "x" (*f2));
 267  267  }
 268  268  
 269      -extern __inline__ void
      269 +extern __GNU_INLINE void
 270  270  sse_maxss(float *f1, float *f2, float *f3)
 271  271  {
 272  272          __asm__ __volatile__(
 273  273              "maxss %2, %1\n\t"
 274  274              "movss %1, %0"
 275  275              : "=m" (*f3), "+x" (*f1)
 276  276              : "x" (*f2));
 277  277  }
 278  278  
 279      -extern __inline__ void
      279 +extern __GNU_INLINE void
 280  280  sse_addss(float *f1, float *f2, float *f3)
 281  281  {
 282  282          __asm__ __volatile__(
 283  283              "addss %2, %1\n\t"
 284  284              "movss %1, %0"
 285  285              : "=m" (*f3), "+x" (*f1)
 286  286              : "x" (*f2));
 287  287  }
 288  288  
 289      -extern __inline__ void
      289 +extern __GNU_INLINE void
 290  290  sse_subss(float *f1, float *f2, float *f3)
 291  291  {
 292  292          __asm__ __volatile__(
 293  293              "subss %2, %1\n\t"
 294  294              "movss %1, %0"
 295  295              : "=m" (*f3), "+x" (*f1)
 296  296              : "x" (*f2));
 297  297  }
 298  298  
 299      -extern __inline__ void
      299 +extern __GNU_INLINE void
 300  300  sse_mulss(float *f1, float *f2, float *f3)
 301  301  {
 302  302          __asm__ __volatile__(
 303  303              "mulss %2, %1\n\t"
 304  304              "movss %1, %0"
 305  305              : "=m" (*f3), "+x" (*f1)
 306  306              : "x" (*f2));
 307  307  }
 308  308  
 309      -extern __inline__ void
      309 +extern __GNU_INLINE void
 310  310  sse_divss(float *f1, float *f2, float *f3)
 311  311  {
 312  312          __asm__ __volatile__(
 313  313              "divss %2, %1\n\t"
 314  314              "movss %1, %0"
 315  315              : "=m" (*f3), "+x" (*f1)
 316  316              : "x" (*f2));
 317  317  }
 318  318  
 319      -extern __inline__ void
      319 +extern __GNU_INLINE void
 320  320  sse_sqrtss(float *f1, float *f2)
 321  321  {
 322  322          double tmp;
 323  323  
 324  324          __asm__ __volatile__(
 325  325              "sqrtss %2, %1\n\t"
 326  326              "movss  %1, %0"
 327  327              : "=m" (*f2), "=x" (tmp)
 328  328              : "m" (*f1));
 329  329  }
 330  330  
 331      -extern __inline__ void
      331 +extern __GNU_INLINE void
 332  332  sse_ucomiss(float *f1, float *f2)
 333  333  {
 334  334          __asm__ __volatile__("ucomiss %1, %0" : : "x" (*f1), "x" (*f2));
 335  335  
 336  336  }
 337  337  
 338      -extern __inline__ void
      338 +extern __GNU_INLINE void
 339  339  sse_comiss(float *f1, float *f2)
 340  340  {
 341  341          __asm__ __volatile__("comiss %1, %0" : : "x" (*f1), "x" (*f2));
 342  342  }
 343  343  
 344      -extern __inline__ void
      344 +extern __GNU_INLINE void
 345  345  sse_cvtss2sd(float *f1, double *d1)
 346  346  {
 347  347          double tmp;
 348  348  
 349  349          __asm__ __volatile__(
 350  350              "cvtss2sd %2, %1\n\t"
 351  351              "movsd    %1, %0"
 352  352              : "=m" (*d1), "=x" (tmp)
 353  353              : "m" (*f1));
 354  354  }
 355  355  
 356      -extern __inline__ void
      356 +extern __GNU_INLINE void
 357  357  sse_cvtsi2ss(int *i1, float *f1)
 358  358  {
 359  359          double tmp;
 360  360  
 361  361          __asm__ __volatile__(
 362  362              "cvtsi2ss %2, %1\n\t"
 363  363              "movss    %1, %0"
 364  364              : "=m" (*f1), "=x" (tmp)
 365  365              : "m" (*i1));
 366  366  }
 367  367  
 368      -extern __inline__ void
      368 +extern __GNU_INLINE void
 369  369  sse_cvttss2si(float *f1, int *i1)
 370  370  {
 371  371          int tmp;
 372  372  
 373  373          __asm__ __volatile__(
 374  374              "cvttss2si %2, %1\n\t"
 375  375              "movl      %1, %0"
 376  376              : "=m" (*i1), "=r" (tmp)
 377  377              : "m" (*f1));
 378  378  }
 379  379  
 380      -extern __inline__ void
      380 +extern __GNU_INLINE void
 381  381  sse_cvtss2si(float *f1, int *i1)
 382  382  {
 383  383          int tmp;
 384  384  
 385  385          __asm__ __volatile__(
 386  386              "cvtss2si %2, %1\n\t"
 387  387              "movl     %1, %0"
 388  388              : "=m" (*i1), "=r" (tmp)
 389  389              : "m" (*f1));
 390  390  }
 391  391  
 392  392  #if defined(__amd64)
 393      -extern __inline__ void
      393 +extern __GNU_INLINE void
 394  394  sse_cvtsi2ssq(long long *ll1, float *f1)
 395  395  {
 396  396          double tmp;
 397  397  
 398  398          __asm__ __volatile__(
 399  399              "cvtsi2ssq %2, %1\n\t"
 400  400              "movss     %1, %0"
 401  401              : "=m" (*f1), "=x" (tmp)
 402  402              : "m" (*ll1));
 403  403  }
 404  404  
 405      -extern __inline__ void
      405 +extern __GNU_INLINE void
 406  406  sse_cvttss2siq(float *f1, long long *ll1)
 407  407  {
 408  408          uint64_t tmp;
 409  409  
 410  410          __asm__ __volatile__(
 411  411              "cvttss2siq %2, %1\n\t"
 412  412              "movq       %1, %0"
 413  413              : "=m" (*ll1), "=r" (tmp)
 414  414              : "m" (*f1));
 415  415  }
 416  416  
 417      -extern __inline__ void
      417 +extern __GNU_INLINE void
 418  418  sse_cvtss2siq(float *f1, long long *ll1)
 419  419  {
 420  420          uint64_t tmp;
 421  421  
 422  422          __asm__ __volatile__(
 423  423              "cvtss2siq %2, %1\n\t"
 424  424              "movq      %1, %0"
 425  425              : "=m" (*ll1), "=r" (tmp)
 426  426              : "m" (*f1));
 427  427  }
 428  428  
 429  429  #endif
 430  430  
 431      -extern __inline__ void
      431 +extern __GNU_INLINE void
 432  432  sse_cmpeqsd(double *d1, double *d2, long long *ll1)
 433  433  {
 434  434          __asm__ __volatile__(
 435  435              "cmpeqsd %2,%1\n\t"
 436  436              "movsd   %1,%0"
 437  437              : "=m" (*ll1), "+x" (*d1)
 438  438              : "x" (*d2));
 439  439  }
 440  440  
 441      -extern __inline__ void
      441 +extern __GNU_INLINE void
 442  442  sse_cmpltsd(double *d1, double *d2, long long *ll1)
 443  443  {
 444  444          __asm__ __volatile__(
 445  445              "cmpltsd %2,%1\n\t"
 446  446              "movsd   %1,%0"
 447  447              : "=m" (*ll1), "+x" (*d1)
 448  448              : "x" (*d2));
 449  449  }
 450  450  
 451      -extern __inline__ void
      451 +extern __GNU_INLINE void
 452  452  sse_cmplesd(double *d1, double *d2, long long *ll1)
 453  453  {
 454  454          __asm__ __volatile__(
 455  455              "cmplesd %2,%1\n\t"
 456  456              "movsd   %1,%0"
 457  457              : "=m" (*ll1), "+x" (*d1)
 458  458              : "x" (*d2));
 459  459  }
 460  460  
 461      -extern __inline__ void
      461 +extern __GNU_INLINE void
 462  462  sse_cmpunordsd(double *d1, double *d2, long long *ll1)
 463  463  {
 464  464          __asm__ __volatile__(
 465  465              "cmpunordsd %2,%1\n\t"
 466  466              "movsd      %1,%0"
 467  467              : "=m" (*ll1), "+x" (*d1)
 468  468              : "x" (*d2));
 469  469  }
 470  470  
 471  471  
 472      -extern __inline__ void
      472 +extern __GNU_INLINE void
 473  473  sse_minsd(double *d1, double *d2, double *d3)
 474  474  {
 475  475          __asm__ __volatile__(
 476  476              "minsd %2,%1\n\t"
 477  477              "movsd %1,%0"
 478  478              : "=m" (*d3), "+x" (*d1)
 479  479              : "x" (*d2));
 480  480  }
 481  481  
 482      -extern __inline__ void
      482 +extern __GNU_INLINE void
 483  483  sse_maxsd(double *d1, double *d2, double *d3)
 484  484  {
 485  485          __asm__ __volatile__(
 486  486              "maxsd %2,%1\n\t"
 487  487              "movsd %1,%0"
 488  488              : "=m" (*d3), "+x" (*d1)
 489  489              : "x" (*d2));
 490  490  }
 491  491  
 492      -extern __inline__ void
      492 +extern __GNU_INLINE void
 493  493  sse_addsd(double *d1, double *d2, double *d3)
 494  494  {
 495  495          __asm__ __volatile__(
 496  496              "addsd %2,%1\n\t"
 497  497              "movsd %1,%0"
 498  498              : "=m" (*d3), "+x" (*d1)
 499  499              : "x" (*d2));
 500  500  }
 501  501  
 502      -extern __inline__ void
      502 +extern __GNU_INLINE void
 503  503  sse_subsd(double *d1, double *d2, double *d3)
 504  504  {
 505  505          __asm__ __volatile__(
 506  506              "subsd %2,%1\n\t"
 507  507              "movsd %1,%0"
 508  508              : "=m" (*d3), "+x" (*d1)
 509  509              : "x" (*d2));
 510  510  }
 511  511  
 512      -extern __inline__ void
      512 +extern __GNU_INLINE void
 513  513  sse_mulsd(double *d1, double *d2, double *d3)
 514  514  {
 515  515          __asm__ __volatile__(
 516  516              "mulsd %2,%1\n\t"
 517  517              "movsd %1,%0"
 518  518              : "=m" (*d3), "+x" (*d1)
 519  519              : "x" (*d2));
 520  520  }
 521  521  
 522      -extern __inline__ void
      522 +extern __GNU_INLINE void
 523  523  sse_divsd(double *d1, double *d2, double *d3)
 524  524  {
 525  525          __asm__ __volatile__(
 526  526              "divsd %2,%1\n\t"
 527  527              "movsd %1,%0"
 528  528              : "=m" (*d3), "+x" (*d1)
 529  529              : "x" (*d2));
 530  530  }
 531  531  
 532      -extern __inline__ void
      532 +extern __GNU_INLINE void
 533  533  sse_sqrtsd(double *d1, double *d2)
 534  534  {
 535  535          double tmp;
 536  536  
 537  537          __asm__ __volatile__(
 538  538              "sqrtsd %2, %1\n\t"
 539  539              "movsd %1, %0"
 540  540              : "=m" (*d2), "=x" (tmp)
 541  541              : "m" (*d1));
 542  542  }
 543  543  
 544      -extern __inline__ void
      544 +extern __GNU_INLINE void
 545  545  sse_ucomisd(double *d1, double *d2)
 546  546  {
 547  547          __asm__ __volatile__("ucomisd %1, %0" : : "x" (*d1), "x" (*d2));
 548  548  }
 549  549  
 550      -extern __inline__ void
      550 +extern __GNU_INLINE void
 551  551  sse_comisd(double *d1, double *d2)
 552  552  {
 553  553          __asm__ __volatile__("comisd %1, %0" : : "x" (*d1), "x" (*d2));
 554  554  }
 555  555  
 556      -extern __inline__ void
      556 +extern __GNU_INLINE void
 557  557  sse_cvtsd2ss(double *d1, float *f1)
 558  558  {
 559  559          double tmp;
 560  560  
 561  561          __asm__ __volatile__(
 562  562              "cvtsd2ss %2,%1\n\t"
 563  563              "movss    %1,%0"
 564  564              : "=m" (*f1), "=x" (tmp)
 565  565              : "m" (*d1));
 566  566  }
 567  567  
 568      -extern __inline__ void
      568 +extern __GNU_INLINE void
 569  569  sse_cvtsi2sd(int *i1, double *d1)
 570  570  {
 571  571          double tmp;
 572  572          __asm__ __volatile__(
 573  573              "cvtsi2sd %2,%1\n\t"
 574  574              "movsd    %1,%0"
 575  575              : "=m" (*d1), "=x" (tmp)
 576  576              : "m" (*i1));
 577  577  }
 578  578  
 579      -extern __inline__ void
      579 +extern __GNU_INLINE void
 580  580  sse_cvttsd2si(double *d1, int *i1)
 581  581  {
 582  582          int tmp;
 583  583  
 584  584          __asm__ __volatile__(
 585  585              "cvttsd2si %2,%1\n\t"
 586  586              "movl      %1,%0"
 587  587              : "=m" (*i1), "=r" (tmp)
 588  588              : "m" (*d1));
 589  589  }
 590  590  
 591      -extern __inline__ void
      591 +extern __GNU_INLINE void
 592  592  sse_cvtsd2si(double *d1, int *i1)
 593  593  {
 594  594          int tmp;
 595  595  
 596  596          __asm__ __volatile__(
 597  597              "cvtsd2si %2,%1\n\t"
 598  598              "movl     %1,%0"
 599  599              : "=m" (*i1), "=r" (tmp)
 600  600              : "m" (*d1));
 601  601  }
 602  602  
 603  603  #if defined(__amd64)
 604      -extern __inline__ void
      604 +extern __GNU_INLINE void
 605  605  sse_cvtsi2sdq(long long *ll1, double *d1)
 606  606  {
 607  607          double tmp;
 608  608  
 609  609          __asm__ __volatile__(
 610  610              "cvtsi2sdq %2,%1\n\t"
 611  611              "movsd     %1,%0"
 612  612              : "=m" (*d1), "=x" (tmp)
 613  613              : "m" (*ll1));
 614  614  }
 615  615  
 616      -extern __inline__ void
      616 +extern __GNU_INLINE void
 617  617  sse_cvttsd2siq(double *d1, long long *ll1)
 618  618  {
 619  619          uint64_t tmp;
 620  620  
 621  621          __asm__ __volatile__(
 622  622              "cvttsd2siq %2,%1\n\t"
 623  623              "movq       %1,%0"
 624  624              : "=m" (*ll1), "=r" (tmp)
 625  625              : "m" (*d1));
 626  626  }
 627  627  
 628      -extern __inline__ void
      628 +extern __GNU_INLINE void
 629  629  sse_cvtsd2siq(double *d1, long long *ll1)
 630  630  {
 631  631          uint64_t tmp;
 632  632  
 633  633          __asm__ __volatile__(
 634  634              "cvtsd2siq %2,%1\n\t"
 635  635              "movq      %1,%0"
 636  636              : "=m" (*ll1), "=r" (tmp)
 637  637              : "m" (*d1));
 638  638  }
 639  639  #endif
 640  640  
 641  641  #elif defined(__sparc)
 642      -extern __inline__ void
      642 +extern __GNU_INLINE void
 643  643  __fenv_getfsr(unsigned long *l)
 644  644  {
 645  645          __asm__ __volatile__(
 646  646  #if defined(__sparcv9)
 647      -                "stx %%fsr,%0\n\t"
      647 +            "stx %%fsr,%0\n\t"
 648  648  #else
 649      -                "st  %%fsr,%0\n\t"
      649 +            "st  %%fsr,%0\n\t"
 650  650  #endif
 651      -                : "=m" (*l));
      651 +            : "=m" (*l));
 652  652  }
 653  653  
 654      -extern __inline__ void
      654 +extern __GNU_INLINE void
 655  655  __fenv_setfsr(const unsigned long *l)
 656  656  {
 657  657          __asm__ __volatile__(
 658  658  #if defined(__sparcv9)
 659      -                "ldx %0,%%fsr\n\t"
      659 +            "ldx %0,%%fsr\n\t"
 660  660  #else
 661      -                "ld %0,%%fsr\n\t"
      661 +            "ld %0,%%fsr\n\t"
 662  662  #endif
 663      -                : : "m" (*l) : "cc");
      663 +            : : "m" (*l) : "cc");
 664  664  }
 665  665  
 666      -extern __inline__ void
      666 +extern __GNU_INLINE void
 667  667  __fenv_getfsr32(unsigned int *l)
 668  668  {
 669  669          __asm__ __volatile__("st %%fsr,%0\n\t" : "=m" (*l));
 670  670  }
 671  671  
 672      -extern __inline__ void
      672 +extern __GNU_INLINE void
 673  673  __fenv_setfsr32(const unsigned int *l)
 674  674  {
 675  675          __asm__ __volatile__("ld %0,%%fsr\n\t" : : "m" (*l));
 676  676  }
 677  677  #else
 678  678  #error "GCC FENV inlines not implemented for this platform"
 679  679  #endif
 680  680  
 681  681  #ifdef __cplusplus
 682  682  }
 683  683  #endif
 684  684  
 685  685  #endif  /* __GNUC__ */
 686  686  
 687  687  #endif /* _FENV_INLINES_H */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX