Print this page
4853 illumos-gate is not lint-clean when built with openssl 1.0

Split Close
Expand all
Collapse all
          --- old/usr/src/lib/openssl/include/md32_common.h
          +++ new/usr/src/lib/openssl/include/md32_common.h
↓ open down ↓ 154 lines elided ↑ open up ↑
 155  155  #  endif
 156  156  # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
 157  157    /*
 158  158     * Some GNU C inline assembler templates. Note that these are
 159  159     * rotates by *constant* number of bits! But that's exactly
 160  160     * what we need here...
 161  161     *                                    <appro@fy.chalmers.se>
 162  162     */
 163  163  #  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
 164  164  #   define ROTATE(a,n)  ({ register unsigned int ret;   \
 165      -                                asm (                   \
      165 +                                __asm__ (                       \
 166  166                                  "roll %1,%0"            \
 167  167                                  : "=r"(ret)             \
 168  168                                  : "I"(n), "0"((unsigned int)(a))        \
 169  169                                  : "cc");                \
 170  170                             ret;                         \
 171  171                          })
 172  172  #  elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
 173  173          defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
 174  174  #   define ROTATE(a,n)  ({ register unsigned int ret;   \
 175      -                                asm (                   \
      175 +                                __asm__ (                       \
 176  176                                  "rlwinm %0,%1,%2,0,31"  \
 177  177                                  : "=r"(ret)             \
 178  178                                  : "r"(a), "I"(n));      \
 179  179                             ret;                         \
 180  180                          })
 181  181  #  elif defined(__s390x__)
 182  182  #   define ROTATE(a,n) ({ register unsigned int ret;    \
 183      -                                asm ("rll %0,%1,%2"     \
      183 +                                __asm__ ("rll %0,%1,%2" \
 184  184                                  : "=r"(ret)             \
 185  185                                  : "r"(a), "I"(n));      \
 186  186                            ret;                          \
 187  187                          })
 188  188  #  endif
 189  189  # endif
 190  190  #endif /* PEDANTIC */
 191  191  
 192  192  #ifndef ROTATE
 193  193  #define ROTATE(a,n)     (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
↓ open down ↓ 6 lines elided ↑ open up ↑
 200  200  #  if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
 201  201        (defined(__x86_64) || defined(__x86_64__))
 202  202  #   if !defined(B_ENDIAN)
 203  203      /*
 204  204       * This gives ~30-40% performance improvement in SHA-256 compiled
 205  205       * with gcc [on P4]. Well, first macro to be frank. We can pull
 206  206       * this trick on x86* platforms only, because these CPUs can fetch
 207  207       * unaligned data without raising an exception.
 208  208       */
 209  209  #   define HOST_c2l(c,l)        ({ unsigned int r=*((const unsigned int *)(c)); \
 210      -                                   asm ("bswapl %0":"=r"(r):"0"(r));    \
      210 +                                   __asm__ ("bswapl %0":"=r"(r):"0"(r));        \
 211  211                                     (c)+=4; (l)=r;                       })
 212  212  #   define HOST_l2c(l,c)        ({ unsigned int r=(l);                  \
 213      -                                   asm ("bswapl %0":"=r"(r):"0"(r));    \
      213 +                                   __asm__ ("bswapl %0":"=r"(r):"0"(r));        \
 214  214                                     *((unsigned int *)(c))=r; (c)+=4; r; })
 215  215  #   endif
 216  216  #  endif
 217  217  # endif
 218  218  #endif
 219  219  #if defined(__s390__) || defined(__s390x__)
 220  220  # define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, (l))
 221  221  # define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, (l))
 222  222  #endif
 223  223  
↓ open down ↓ 10 lines elided ↑ open up ↑
 234  234                           *((c)++)=(unsigned char)(((l)>> 8)&0xff),      \
 235  235                           *((c)++)=(unsigned char)(((l)    )&0xff),      \
 236  236                           l)
 237  237  #endif
 238  238  
 239  239  #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
 240  240  
 241  241  #ifndef PEDANTIC
 242  242  # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
 243  243  #  if defined(__s390x__)
 244      -#   define HOST_c2l(c,l)        ({ asm ("lrv    %0,%1"                  \
      244 +#   define HOST_c2l(c,l)        ({ __asm__ ("lrv        %0,%1"                  \
 245  245                                     :"=d"(l) :"m"(*(const unsigned int *)(c)));\
 246  246                                     (c)+=4; (l);                         })
 247      -#   define HOST_l2c(l,c)        ({ asm ("strv   %1,%0"                  \
      247 +#   define HOST_l2c(l,c)        ({ __asm__ ("strv       %1,%0"                  \
 248  248                                     :"=m"(*(unsigned int *)(c)) :"d"(l));\
 249  249                                     (c)+=4; (l);                         })
 250  250  #  endif
 251  251  # endif
 252  252  #endif
 253  253  #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
 254  254  # ifndef B_ENDIAN
 255  255     /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
 256  256  #  define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
 257  257  #  define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
↓ open down ↓ 158 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX