Print this page
6648 illumos build should be explicit about C standards
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/grub/grub-0.97/netboot/io.h
+++ new/usr/src/grub/grub-0.97/netboot/io.h
1 +#include <sys/ccompile.h>
2 +
1 3 #ifndef IO_H
2 4 #define IO_H
3 5
4 -
5 6 /* Amount of relocation etherboot is experiencing */
6 7 extern unsigned long virt_offset;
7 8
8 9 /* Don't require identity mapped physical memory,
9 10 * osloader.c is the only valid user at the moment.
10 11 */
11 12 unsigned long virt_to_phys(volatile const void *virt_addr);
12 13 void *phys_to_virt(unsigned long phys_addr);
13 14
14 15 /* virt_to_bus converts an addresss inside of etherboot [_start, _end]
15 16 * into a memory access cards can use.
16 17 */
17 18 #define virt_to_bus virt_to_phys
18 19
19 20
20 21 /* bus_to_virt reverses virt_to_bus, the address must be output
21 22 * from virt_to_bus to be valid. This function does not work on
22 23 * all bus addresses.
23 24 */
24 25 #define bus_to_virt phys_to_virt
25 26
26 27 /* ioremap converts a random 32bit bus address into something
27 28 * etherboot can access.
28 29 */
29 30 static inline void *ioremap(unsigned long bus_addr, unsigned long length __unused)
30 31 {
31 32 return bus_to_virt(bus_addr);
32 33 }
33 34
34 35 /* iounmap cleans up anything ioremap had to setup */
35 36 static inline void iounmap(void *virt_addr __unused)
36 37 {
37 38 return;
38 39 }
39 40
40 41 /*
41 42 * This file contains the definitions for the x86 IO instructions
42 43 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
43 44 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
44 45 * versions of the single-IO instructions (inb_p/inw_p/..).
45 46 *
46 47 * This file is not meant to be obfuscating: it's just complicated
47 48 * to (a) handle it all in a way that makes gcc able to optimize it
48 49 * as well as possible and (b) trying to avoid writing the same thing
49 50 * over and over again with slight variations and possibly making a
50 51 * mistake somewhere.
51 52 */
52 53
53 54 /*
54 55 * Thanks to James van Artsdalen for a better timing-fix than
55 56 * the two short jumps: using outb's to a nonexistent port seems
56 57 * to guarantee better timings even on fast machines.
57 58 *
58 59 * On the other hand, I'd like to be sure of a non-existent port:
59 60 * I feel a bit unsafe about using 0x80 (should be safe, though)
60 61 *
61 62 * Linus
62 63 */
63 64
64 65 #ifdef SLOW_IO_BY_JUMPING
65 66 #define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
66 67 #else
67 68 #define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80")
68 69 #endif
69 70
70 71 #ifdef REALLY_SLOW_IO
71 72 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
72 73 #else
73 74 #define SLOW_DOWN_IO __SLOW_DOWN_IO
74 75 #endif
75 76
76 77 /*
77 78 * readX/writeX() are used to access memory mapped devices. On some
78 79 * architectures the memory mapped IO stuff needs to be accessed
79 80 * differently. On the x86 architecture, we just read/write the
80 81 * memory location directly.
81 82 */
82 83 #define readb(addr) (*(volatile unsigned char *) (addr))
83 84 #define readw(addr) (*(volatile unsigned short *) (addr))
84 85 #define readl(addr) (*(volatile unsigned int *) (addr))
85 86
86 87 #define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
87 88 #define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
88 89 #define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
89 90
90 91 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
91 92 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
92 93
93 94 /*
94 95 * Force strict CPU ordering.
95 96 * And yes, this is required on UP too when we're talking
96 97 * to devices.
97 98 *
98 99 * For now, "wmb()" doesn't actually do anything, as all
99 100 * Intel CPU's follow what Intel calls a *Processor Order*,
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
100 101 * in which all writes are seen in the program order even
101 102 * outside the CPU.
102 103 *
103 104 * I expect future Intel CPU's to have a weaker ordering,
104 105 * but I'd also expect them to finally get their act together
105 106 * and add some real memory barriers if so.
106 107 *
107 108 * Some non intel clones support out of order store. wmb() ceases to be a
108 109 * nop for these.
109 110 */
110 -
111 +
111 112 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
112 113 #define rmb() mb()
113 114 #define wmb() mb();
114 115
115 116
116 117 /*
117 118 * Talk about misusing macros..
118 119 */
119 120
120 121 #define __OUT1(s,x) \
121 122 extern void __out##s(unsigned x value, unsigned short port); \
122 -extern inline void __out##s(unsigned x value, unsigned short port) {
123 +extern __GNU_INLINE \
124 +void __out##s(unsigned x value, unsigned short port) {
123 125
124 126 #define __OUT2(s,s1,s2) \
125 127 __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
126 128
127 129 #define __OUT(s,s1,x) \
128 130 __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \
129 131 __OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \
130 132 __OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \
131 133 __OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; }
132 134
133 135 #define __IN1(s,x) \
134 136 extern unsigned x __in##s(unsigned short port); \
135 -extern inline unsigned x __in##s(unsigned short port) { unsigned x _v;
137 +extern __GNU_INLINE \
138 +unsigned x __in##s(unsigned short port) { unsigned x _v;
136 139
137 140 #define __IN2(s,s1,s2) \
138 141 __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
139 142
140 143 #define __IN(s,s1,x,i...) \
141 144 __IN1(s,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \
142 145 __IN1(s##c,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \
143 146 __IN1(s##_p,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \
144 147 __IN1(s##c_p,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; }
145 148
146 149 #define __INS(s) \
147 150 extern void ins##s(unsigned short port, void * addr, unsigned long count); \
148 -extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
151 +extern __GNU_INLINE \
152 +void ins##s(unsigned short port, void * addr, unsigned long count) \
149 153 { __asm__ __volatile__ ("cld ; rep ; ins" #s \
150 154 : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
151 155
152 156 #define __OUTS(s) \
153 157 extern void outs##s(unsigned short port, const void * addr, unsigned long count); \
154 -extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
158 +extern __GNU_INLINE \
159 +void outs##s(unsigned short port, const void * addr, unsigned long count) \
155 160 { __asm__ __volatile__ ("cld ; rep ; outs" #s \
156 161 : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
157 162
158 163 __IN(b,"", char)
159 164 __IN(w,"",short)
160 165 __IN(l,"", long)
161 166
162 167 __OUT(b,"b",char)
163 168 __OUT(w,"w",short)
164 169 __OUT(l,,int)
165 170
166 171 __INS(b)
167 172 __INS(w)
168 173 __INS(l)
169 174
170 175 __OUTS(b)
171 176 __OUTS(w)
172 177 __OUTS(l)
173 178
174 179 /*
175 180 * Note that due to the way __builtin_constant_p() works, you
176 181 * - can't use it inside a inline function (it will never be true)
177 182 * - you don't have to worry about side effects within the __builtin..
178 183 */
179 184 #define outb(val,port) \
180 185 ((__builtin_constant_p((port)) && (port) < 256) ? \
181 186 __outbc((val),(port)) : \
182 187 __outb((val),(port)))
183 188
184 189 #define inb(port) \
185 190 ((__builtin_constant_p((port)) && (port) < 256) ? \
186 191 __inbc(port) : \
187 192 __inb(port))
188 193
189 194 #define outb_p(val,port) \
190 195 ((__builtin_constant_p((port)) && (port) < 256) ? \
191 196 __outbc_p((val),(port)) : \
192 197 __outb_p((val),(port)))
193 198
194 199 #define inb_p(port) \
195 200 ((__builtin_constant_p((port)) && (port) < 256) ? \
196 201 __inbc_p(port) : \
197 202 __inb_p(port))
198 203
199 204 #define outw(val,port) \
200 205 ((__builtin_constant_p((port)) && (port) < 256) ? \
201 206 __outwc((val),(port)) : \
202 207 __outw((val),(port)))
203 208
204 209 #define inw(port) \
205 210 ((__builtin_constant_p((port)) && (port) < 256) ? \
206 211 __inwc(port) : \
207 212 __inw(port))
208 213
209 214 #define outw_p(val,port) \
210 215 ((__builtin_constant_p((port)) && (port) < 256) ? \
211 216 __outwc_p((val),(port)) : \
212 217 __outw_p((val),(port)))
213 218
214 219 #define inw_p(port) \
215 220 ((__builtin_constant_p((port)) && (port) < 256) ? \
216 221 __inwc_p(port) : \
217 222 __inw_p(port))
218 223
219 224 #define outl(val,port) \
220 225 ((__builtin_constant_p((port)) && (port) < 256) ? \
221 226 __outlc((val),(port)) : \
222 227 __outl((val),(port)))
223 228
224 229 #define inl(port) \
225 230 ((__builtin_constant_p((port)) && (port) < 256) ? \
226 231 __inlc(port) : \
227 232 __inl(port))
228 233
229 234 #define outl_p(val,port) \
230 235 ((__builtin_constant_p((port)) && (port) < 256) ? \
231 236 __outlc_p((val),(port)) : \
232 237 __outl_p((val),(port)))
233 238
234 239 #define inl_p(port) \
235 240 ((__builtin_constant_p((port)) && (port) < 256) ? \
236 241 __inlc_p(port) : \
237 242 __inl_p(port))
238 243
239 244 #endif /* ETHERBOOT_IO_H */
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX