1 #include <sys/ccompile.h> 2 3 #ifndef IO_H 4 #define IO_H 5 6 /* Amount of relocation etherboot is experiencing */ 7 extern unsigned long virt_offset; 8 9 /* Don't require identity mapped physical memory, 10 * osloader.c is the only valid user at the moment. 11 */ 12 unsigned long virt_to_phys(volatile const void *virt_addr); 13 void *phys_to_virt(unsigned long phys_addr); 14 15 /* virt_to_bus converts an addresss inside of etherboot [_start, _end] 16 * into a memory access cards can use. 17 */ 18 #define virt_to_bus virt_to_phys 19 20 21 /* bus_to_virt reverses virt_to_bus, the address must be output 22 * from virt_to_bus to be valid. This function does not work on 23 * all bus addresses. 24 */ 25 #define bus_to_virt phys_to_virt 26 27 /* ioremap converts a random 32bit bus address into something 28 * etherboot can access. 29 */ 30 static inline void *ioremap(unsigned long bus_addr, unsigned long length __unused) 31 { 32 return bus_to_virt(bus_addr); 33 } 34 35 /* iounmap cleans up anything ioremap had to setup */ 36 static inline void iounmap(void *virt_addr __unused) 37 { 38 return; 39 } 40 41 /* 42 * This file contains the definitions for the x86 IO instructions 43 * inb/inw/inl/outb/outw/outl and the "string versions" of the same 44 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 45 * versions of the single-IO instructions (inb_p/inw_p/..). 46 * 47 * This file is not meant to be obfuscating: it's just complicated 48 * to (a) handle it all in a way that makes gcc able to optimize it 49 * as well as possible and (b) trying to avoid writing the same thing 50 * over and over again with slight variations and possibly making a 51 * mistake somewhere. 52 */ 53 54 /* 55 * Thanks to James van Artsdalen for a better timing-fix than 56 * the two short jumps: using outb's to a nonexistent port seems 57 * to guarantee better timings even on fast machines. 58 * 59 * On the other hand, I'd like to be sure of a non-existent port: 60 * I feel a bit unsafe about using 0x80 (should be safe, though) 61 * 62 * Linus 63 */ 64 65 #ifdef SLOW_IO_BY_JUMPING 66 #define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:") 67 #else 68 #define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80") 69 #endif 70 71 #ifdef REALLY_SLOW_IO 72 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } 73 #else 74 #define SLOW_DOWN_IO __SLOW_DOWN_IO 75 #endif 76 77 /* 78 * readX/writeX() are used to access memory mapped devices. On some 79 * architectures the memory mapped IO stuff needs to be accessed 80 * differently. On the x86 architecture, we just read/write the 81 * memory location directly. 82 */ 83 #define readb(addr) (*(volatile unsigned char *) (addr)) 84 #define readw(addr) (*(volatile unsigned short *) (addr)) 85 #define readl(addr) (*(volatile unsigned int *) (addr)) 86 87 #define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b)) 88 #define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b)) 89 #define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b)) 90 91 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 92 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 93 94 /* 95 * Force strict CPU ordering. 96 * And yes, this is required on UP too when we're talking 97 * to devices. 98 * 99 * For now, "wmb()" doesn't actually do anything, as all 100 * Intel CPU's follow what Intel calls a *Processor Order*, 101 * in which all writes are seen in the program order even 102 * outside the CPU. 103 * 104 * I expect future Intel CPU's to have a weaker ordering, 105 * but I'd also expect them to finally get their act together 106 * and add some real memory barriers if so. 107 * 108 * Some non intel clones support out of order store. wmb() ceases to be a 109 * nop for these. 110 */ 111 112 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") 113 #define rmb() mb() 114 #define wmb() mb(); 115 116 117 /* 118 * Talk about misusing macros.. 119 */ 120 121 #define __OUT1(s,x) \ 122 extern void __out##s(unsigned x value, unsigned short port); \ 123 extern __GNU_INLINE \ 124 void __out##s(unsigned x value, unsigned short port) { 125 126 #define __OUT2(s,s1,s2) \ 127 __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" 128 129 #define __OUT(s,s1,x) \ 130 __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \ 131 __OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \ 132 __OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \ 133 __OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; } 134 135 #define __IN1(s,x) \ 136 extern unsigned x __in##s(unsigned short port); \ 137 extern __GNU_INLINE \ 138 unsigned x __in##s(unsigned short port) { unsigned x _v; 139 140 #define __IN2(s,s1,s2) \ 141 __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" 142 143 #define __IN(s,s1,x,i...) \ 144 __IN1(s,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \ 145 __IN1(s##c,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \ 146 __IN1(s##_p,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \ 147 __IN1(s##c_p,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; } 148 149 #define __INS(s) \ 150 extern void ins##s(unsigned short port, void * addr, unsigned long count); \ 151 extern __GNU_INLINE \ 152 void ins##s(unsigned short port, void * addr, unsigned long count) \ 153 { __asm__ __volatile__ ("cld ; rep ; ins" #s \ 154 : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 155 156 #define __OUTS(s) \ 157 extern void outs##s(unsigned short port, const void * addr, unsigned long count); \ 158 extern __GNU_INLINE \ 159 void outs##s(unsigned short port, const void * addr, unsigned long count) \ 160 { __asm__ __volatile__ ("cld ; rep ; outs" #s \ 161 : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 162 163 __IN(b,"", char) 164 __IN(w,"",short) 165 __IN(l,"", long) 166 167 __OUT(b,"b",char) 168 __OUT(w,"w",short) 169 __OUT(l,,int) 170 171 __INS(b) 172 __INS(w) 173 __INS(l) 174 175 __OUTS(b) 176 __OUTS(w) 177 __OUTS(l) 178 179 /* 180 * Note that due to the way __builtin_constant_p() works, you 181 * - can't use it inside a inline function (it will never be true) 182 * - you don't have to worry about side effects within the __builtin.. 183 */ 184 #define outb(val,port) \ 185 ((__builtin_constant_p((port)) && (port) < 256) ? \ 186 __outbc((val),(port)) : \ 187 __outb((val),(port))) 188 189 #define inb(port) \ 190 ((__builtin_constant_p((port)) && (port) < 256) ? \ 191 __inbc(port) : \ 192 __inb(port)) 193 194 #define outb_p(val,port) \ 195 ((__builtin_constant_p((port)) && (port) < 256) ? \ 196 __outbc_p((val),(port)) : \ 197 __outb_p((val),(port))) 198 199 #define inb_p(port) \ 200 ((__builtin_constant_p((port)) && (port) < 256) ? \ 201 __inbc_p(port) : \ 202 __inb_p(port)) 203 204 #define outw(val,port) \ 205 ((__builtin_constant_p((port)) && (port) < 256) ? \ 206 __outwc((val),(port)) : \ 207 __outw((val),(port))) 208 209 #define inw(port) \ 210 ((__builtin_constant_p((port)) && (port) < 256) ? \ 211 __inwc(port) : \ 212 __inw(port)) 213 214 #define outw_p(val,port) \ 215 ((__builtin_constant_p((port)) && (port) < 256) ? \ 216 __outwc_p((val),(port)) : \ 217 __outw_p((val),(port))) 218 219 #define inw_p(port) \ 220 ((__builtin_constant_p((port)) && (port) < 256) ? \ 221 __inwc_p(port) : \ 222 __inw_p(port)) 223 224 #define outl(val,port) \ 225 ((__builtin_constant_p((port)) && (port) < 256) ? \ 226 __outlc((val),(port)) : \ 227 __outl((val),(port))) 228 229 #define inl(port) \ 230 ((__builtin_constant_p((port)) && (port) < 256) ? \ 231 __inlc(port) : \ 232 __inl(port)) 233 234 #define outl_p(val,port) \ 235 ((__builtin_constant_p((port)) && (port) < 256) ? \ 236 __outlc_p((val),(port)) : \ 237 __outl_p((val),(port))) 238 239 #define inl_p(port) \ 240 ((__builtin_constant_p((port)) && (port) < 256) ? \ 241 __inlc_p(port) : \ 242 __inl_p(port)) 243 244 #endif /* ETHERBOOT_IO_H */