Print this page
6648 illumos build should be explicit about C standards

Split Close
Expand all
Collapse all
          --- old/usr/src/lib/libm/amd64/src/libm_inlines.h
          +++ new/usr/src/lib/libm/amd64/src/libm_inlines.h
↓ open down ↓ 37 lines elided ↑ open up ↑
  38   38  
  39   39  #ifdef __GNUC__
  40   40  
  41   41  #ifdef __cplusplus
  42   42  extern "C" {
  43   43  #endif
  44   44  
  45   45  #include <sys/types.h>
  46   46  #include <sys/ieeefp.h>
  47   47  
  48      -extern __inline__ float
       48 +extern __GNU_INLINE float
  49   49  __inline_sqrtf(float a)
  50   50  {
  51   51          float ret;
  52   52  
  53   53          __asm__ __volatile__("sqrtss %1, %0\n\t" : "=x" (ret) : "x" (a));
  54   54          return (ret);
  55   55  }
  56   56  
  57      -extern __inline__ double
       57 +extern __GNU_INLINE double
  58   58  __inline_sqrt(double a)
  59   59  {
  60   60          double ret;
  61   61  
  62   62          __asm__ __volatile__("sqrtsd %1, %0\n\t" : "=x" (ret) : "x" (a));
  63   63          return (ret);
  64   64  }
  65   65  
  66      -extern __inline__ double
       66 +extern __GNU_INLINE double
  67   67  __ieee754_sqrt(double a)
  68   68  {
  69   69          return (__inline_sqrt(a));
  70   70  }
  71   71  
  72   72  /*
  73   73   * 00 - 24 bits
  74   74   * 01 - reserved
  75   75   * 10 - 53 bits
  76   76   * 11 - 64 bits
  77   77   */
  78      -extern __inline__ int
       78 +extern __GNU_INLINE int
  79   79  __swapRP(int i)
  80   80  {
  81   81          int ret;
  82   82          uint16_t cw;
  83   83  
  84   84          __asm__ __volatile__("fstcw %0\n\t" : "=m" (cw));
  85   85  
  86   86          ret = (cw >> 8) & 0x3;
  87   87          cw = (cw & 0xfcff) | ((i & 0x3) << 8);
  88   88  
↓ open down ↓ 1 lines elided ↑ open up ↑
  90   90  
  91   91          return (ret);
  92   92  }
  93   93  
  94   94  /*
  95   95   * 00 - Round to nearest, with even preferred
  96   96   * 01 - Round down
  97   97   * 10 - Round up
  98   98   * 11 - Chop
  99   99   */
 100      -extern __inline__ enum fp_direction_type
      100 +extern __GNU_INLINE enum fp_direction_type
 101  101  __swap87RD(enum fp_direction_type i)
 102  102  {
 103  103          int ret;
 104  104          uint16_t cw;
 105  105  
 106  106          __asm__ __volatile__("fstcw %0\n\t" : "=m" (cw));
 107  107  
 108  108          ret = (cw >> 10) & 0x3;
 109  109          cw = (cw & 0xf3ff) | ((i & 0x3) << 10);
 110  110  
 111  111          __asm__ __volatile__("fldcw %0\n\t" : : "m" (cw));
 112  112  
 113  113          return (ret);
 114  114  }
 115  115  
 116      -extern __inline__ int
      116 +extern __GNU_INLINE int
 117  117  abs(int i)
 118  118  {
 119  119          int ret;
 120  120          __asm__ __volatile__(
 121  121              "movl    %1, %0\n\t"
 122  122              "negl    %1\n\t"
 123  123              "cmovnsl %1, %0\n\t"
 124  124              : "=r" (ret), "+r" (i)
 125  125              :
 126  126              : "cc");
 127  127          return (ret);
 128  128  }
 129  129  
 130      -extern __inline__ double
      130 +extern __GNU_INLINE double
 131  131  copysign(double d1, double d2)
 132  132  {
 133  133          double tmpd;
 134  134  
 135  135          __asm__ __volatile__(
 136  136              "movd %3, %1\n\t"
 137  137              "andpd %1, %0\n\t"
 138  138              "andnpd %2, %1\n\t"
 139  139              "orpd %1, %0\n\t"
 140  140              : "+&x" (d1), "=&x" (tmpd)
 141  141              : "x" (d2), "r" (0x7fffffffffffffff));
 142  142  
 143  143          return (d1);
 144  144  }
 145  145  
 146      -extern __inline__ double
      146 +extern __GNU_INLINE double
 147  147  fabs(double d)
 148  148  {
 149  149          double tmp;
 150  150  
 151  151          __asm__ __volatile__(
 152  152              "movd  %2, %1\n\t"
 153  153              "andpd %1, %0"
 154  154              : "+x" (d), "=&x" (tmp)
 155  155              : "r" (0x7fffffffffffffff));
 156  156  
 157  157          return (d);
 158  158  }
 159  159  
 160      -extern __inline__ float
      160 +extern __GNU_INLINE float
 161  161  fabsf(float d)
 162  162  {
 163  163          __asm__ __volatile__(
 164  164              "andpd %1, %0"
 165  165              : "+x" (d)
 166  166              : "x" (0x7fffffff));
 167  167  
 168  168          return (d);
 169  169  }
 170  170  
 171      -extern __inline__ int
      171 +extern __GNU_INLINE int
 172  172  finite(double d)
 173  173  {
 174  174          long ret = 0x7fffffffffffffff;
 175  175          uint64_t tmp;
 176  176  
 177  177          __asm__ __volatile__(
 178  178              "movq %2, %1\n\t"
 179  179              "andq %1, %0\n\t"
 180  180              "movq $0x7ff0000000000000, %1\n\t"
 181  181              "subq %1, %0\n\t"
 182  182              "shrq $63, %0\n\t"
 183  183              : "+r" (ret), "=r" (tmp)
 184  184              : "x" (d)
 185  185              : "cc");
 186  186  
 187  187          return (ret);
 188  188  }
 189  189  
 190      -extern __inline__ int
      190 +extern __GNU_INLINE int
 191  191  signbit(double d)
 192  192  {
 193  193          long ret;
 194  194          __asm__ __volatile__(
 195  195              "movmskpd %1, %0\n\t"
 196  196              "andq     $1, %0\n\t"
 197  197              : "=r" (ret)
 198  198              : "x" (d)
 199  199              : "cc");
 200  200          return (ret);
 201  201  }
 202  202  
 203      -extern __inline__ double
      203 +extern __GNU_INLINE double
 204  204  sqrt(double d)
 205  205  {
 206  206          return (__inline_sqrt(d));
 207  207  }
 208  208  
 209      -extern __inline__ float
      209 +extern __GNU_INLINE float
 210  210  sqrtf(float f)
 211  211  {
 212  212          return (__inline_sqrtf(f));
 213  213  }
 214  214  
 215  215  #ifdef __cplusplus
 216  216  }
 217  217  #endif
 218  218  
 219  219  #endif  /* __GNUC__ */
 220  220  
 221  221  #endif /* _LIBM_INLINES_H */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX