8 * You may not use this file except in compliance with the License.
9 *
10 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
11 * or http://opensource.org/licenses/CDDL-1.0.
12 * See the License for the specific language governing permissions
13 * and limitations under the License.
14 *
15 * When distributing Covered Code, include this CDDL HEADER in each
16 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
17 * If applicable, add the following below this CDDL HEADER, with the
18 * fields enclosed by brackets "[]" replaced with your own identifying
19 * information: Portions Copyright [yyyy] [name of copyright owner]
20 *
21 * CDDL HEADER END
22 *
23 * Copyright (C) 2009, 2010, Jorn Amundsen <jorn.amundsen@ntnu.no>
24 *
25 * C header file to determine compile machine byte order. Take care when cross
26 * compiling.
27 *
28 * $Id: byteorder.h 517 2013-02-17 20:34:39Z joern $
29 */
30 /*
31 * Portions copyright (c) 2013, Saso Kiselkov, All rights reserved
32 */
33
34 #ifndef _CRYPTO_EDONR_BYTEORDER_H
35 #define _CRYPTO_EDONR_BYTEORDER_H
36
37 #if defined(__linux)
38 #include <endian.h>
39 #else
40 #include <sys/param.h>
41 #endif
42
43 #if defined(__BYTE_ORDER)
44 #if (__BYTE_ORDER == __BIG_ENDIAN)
45 #define MACHINE_IS_BIG_ENDIAN
46 #elif (__BYTE_ORDER == __LITTLE_ENDIAN)
47 #define MACHINE_IS_LITTLE_ENDIAN
48 #endif
49 #elif defined(BYTE_ORDER)
50 #if (BYTE_ORDER == BIG_ENDIAN)
51 #define MACHINE_IS_BIG_ENDIAN
117 uint32_t *s4, h, l; \
118 l = (s64) & 0xfffffffful, h = (s64) >> 32; \
119 __asm__ volatile("addi %0,%3,4;stwbrx %1,0,%3;stwbrx %2,0,%0" \
120 : "+r"(s4) : "r"(l), "r"(h), "b"(d64)); \
121 }
122 #endif /* __64BIT__ */
123 #define aix_ld_swap32(s32, d32)\
124 __asm__("lwbrx %0,0,%1" : "=r"(d32) : "r"(s32))
125 #define aix_st_swap32(s32, d32)\
126 __asm__ volatile("stwbrx %1,0,%0" : : "r"(d32), "r"(s32))
127 #define ld_swap32(s, d) aix_ld_swap32(s, d)
128 #define st_swap32(s, d) aix_st_swap32(s, d)
129 #define ld_swap64(s, d) aix_ld_swap64(s, d)
130 #define st_swap64(s, d) aix_st_swap64(s, d)
131 #endif /* __PPC__ || _ARCH_PPC */
132
133 #if defined(__sparc)
134 #if !defined(__arch64__) && !defined(__sparcv8) && defined(__sparcv9)
135 #define __arch64__
136 #endif
137 #if defined(__GNUC__) || (defined(__SUNPRO_C) && __SUNPRO_C > 0x590)
138 /* need Sun Studio C 5.10 and above for GNU inline assembly */
139 #if defined(__arch64__)
140 #define sparc_ld_swap64(s64, d64) \
141 __asm__("ldxa [%1]0x88,%0" : "=r"(d64) : "r"(s64))
142 #define sparc_st_swap64(s64, d64) \
143 __asm__ volatile("stxa %0,[%1]0x88" : : "r"(s64), "r"(d64))
144 #define st_swap64(s, d) sparc_st_swap64(s, d)
145 #else
146 #define sparc_ld_swap64(s64, d64) \
147 { \
148 uint32_t *s4, h, l; \
149 __asm__("add %3,4,%0\n\tlda [%3]0x88,%1\n\tlda [%0]0x88,%2" \
150 : "+r"(s4), "=r"(l), "=r"(h) : "r"(s64)); \
151 d64 = ((uint64_t)h<<32) | l; \
152 }
153 #define sparc_st_swap64(s64, d64) \
154 { \
155 uint32_t *s4, h, l; \
156 l = (s64) & 0xfffffffful, h = (s64) >> 32; \
157 __asm__ volatile("add %3,4,%0\n\tsta %1,[%3]0x88\n\tsta %2,[%0]0x88"\
158 : "+r"(s4) : "r"(l), "r"(h), "r"(d64)); \
|
8 * You may not use this file except in compliance with the License.
9 *
10 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
11 * or http://opensource.org/licenses/CDDL-1.0.
12 * See the License for the specific language governing permissions
13 * and limitations under the License.
14 *
15 * When distributing Covered Code, include this CDDL HEADER in each
16 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
17 * If applicable, add the following below this CDDL HEADER, with the
18 * fields enclosed by brackets "[]" replaced with your own identifying
19 * information: Portions Copyright [yyyy] [name of copyright owner]
20 *
21 * CDDL HEADER END
22 *
23 * Copyright (C) 2009, 2010, Jorn Amundsen <jorn.amundsen@ntnu.no>
24 *
25 * C header file to determine compile machine byte order. Take care when cross
26 * compiling.
27 *
28 */
29 /*
30 * Portions copyright (c) 2013, Saso Kiselkov, All rights reserved
31 * Copyright 2016 Gary Mills
32 */
33
34 #ifndef _CRYPTO_EDONR_BYTEORDER_H
35 #define _CRYPTO_EDONR_BYTEORDER_H
36
37 #if defined(__linux)
38 #include <endian.h>
39 #else
40 #include <sys/param.h>
41 #endif
42
43 #if defined(__BYTE_ORDER)
44 #if (__BYTE_ORDER == __BIG_ENDIAN)
45 #define MACHINE_IS_BIG_ENDIAN
46 #elif (__BYTE_ORDER == __LITTLE_ENDIAN)
47 #define MACHINE_IS_LITTLE_ENDIAN
48 #endif
49 #elif defined(BYTE_ORDER)
50 #if (BYTE_ORDER == BIG_ENDIAN)
51 #define MACHINE_IS_BIG_ENDIAN
117 uint32_t *s4, h, l; \
118 l = (s64) & 0xfffffffful, h = (s64) >> 32; \
119 __asm__ volatile("addi %0,%3,4;stwbrx %1,0,%3;stwbrx %2,0,%0" \
120 : "+r"(s4) : "r"(l), "r"(h), "b"(d64)); \
121 }
122 #endif /* __64BIT__ */
123 #define aix_ld_swap32(s32, d32)\
124 __asm__("lwbrx %0,0,%1" : "=r"(d32) : "r"(s32))
125 #define aix_st_swap32(s32, d32)\
126 __asm__ volatile("stwbrx %1,0,%0" : : "r"(d32), "r"(s32))
127 #define ld_swap32(s, d) aix_ld_swap32(s, d)
128 #define st_swap32(s, d) aix_st_swap32(s, d)
129 #define ld_swap64(s, d) aix_ld_swap64(s, d)
130 #define st_swap64(s, d) aix_st_swap64(s, d)
131 #endif /* __PPC__ || _ARCH_PPC */
132
133 #if defined(__sparc)
134 #if !defined(__arch64__) && !defined(__sparcv8) && defined(__sparcv9)
135 #define __arch64__
136 #endif
137 #if defined(__GNUC__) || (defined(__SUNPRO_C) && __SUNPRO_C > 0x590 && \
138 !defined(__lint))
139 /* need Sun Studio C 5.10 and above for GNU inline assembly, but not lint */
140 #if defined(__arch64__)
141 #define sparc_ld_swap64(s64, d64) \
142 __asm__("ldxa [%1]0x88,%0" : "=r"(d64) : "r"(s64))
143 #define sparc_st_swap64(s64, d64) \
144 __asm__ volatile("stxa %0,[%1]0x88" : : "r"(s64), "r"(d64))
145 #define st_swap64(s, d) sparc_st_swap64(s, d)
146 #else
147 #define sparc_ld_swap64(s64, d64) \
148 { \
149 uint32_t *s4, h, l; \
150 __asm__("add %3,4,%0\n\tlda [%3]0x88,%1\n\tlda [%0]0x88,%2" \
151 : "+r"(s4), "=r"(l), "=r"(h) : "r"(s64)); \
152 d64 = ((uint64_t)h<<32) | l; \
153 }
154 #define sparc_st_swap64(s64, d64) \
155 { \
156 uint32_t *s4, h, l; \
157 l = (s64) & 0xfffffffful, h = (s64) >> 32; \
158 __asm__ volatile("add %3,4,%0\n\tsta %1,[%3]0x88\n\tsta %2,[%0]0x88"\
159 : "+r"(s4) : "r"(l), "r"(h), "r"(d64)); \
|