Print this page
4853 illumos-gate is not lint-clean when built with openssl 1.0
@@ -160,29 +160,29 @@
* what we need here...
* <appro@fy.chalmers.se>
*/
# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
- asm ( \
+ __asm__ ( \
"roll %1,%0" \
: "=r"(ret) \
: "I"(n), "0"((unsigned int)(a)) \
: "cc"); \
ret; \
})
# elif defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
- asm ( \
+ __asm__ ( \
"rlwinm %0,%1,%2,0,31" \
: "=r"(ret) \
: "r"(a), "I"(n)); \
ret; \
})
# elif defined(__s390x__)
# define ROTATE(a,n) ({ register unsigned int ret; \
- asm ("rll %0,%1,%2" \
+ __asm__ ("rll %0,%1,%2" \
: "=r"(ret) \
: "r"(a), "I"(n)); \
ret; \
})
# endif
@@ -205,14 +205,14 @@
* with gcc [on P4]. Well, first macro to be frank. We can pull
* this trick on x86* platforms only, because these CPUs can fetch
* unaligned data without raising an exception.
*/
# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
- asm ("bswapl %0":"=r"(r):"0"(r)); \
+ __asm__ ("bswapl %0":"=r"(r):"0"(r)); \
(c)+=4; (l)=r; })
# define HOST_l2c(l,c) ({ unsigned int r=(l); \
- asm ("bswapl %0":"=r"(r):"0"(r)); \
+ __asm__ ("bswapl %0":"=r"(r):"0"(r)); \
*((unsigned int *)(c))=r; (c)+=4; r; })
# endif
# endif
# endif
#endif
@@ -239,14 +239,14 @@
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
#ifndef PEDANTIC
# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if defined(__s390x__)
-# define HOST_c2l(c,l) ({ asm ("lrv %0,%1" \
+# define HOST_c2l(c,l) ({ __asm__ ("lrv %0,%1" \
:"=d"(l) :"m"(*(const unsigned int *)(c)));\
(c)+=4; (l); })
-# define HOST_l2c(l,c) ({ asm ("strv %1,%0" \
+# define HOST_l2c(l,c) ({ __asm__ ("strv %1,%0" \
:"=m"(*(unsigned int *)(c)) :"d"(l));\
(c)+=4; (l); })
# endif
# endif
#endif