Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/gregkh/driver...
[linux-2.6/verdex.git] / include / asm-m68knommu / mcfsmc.h
blob2583900b95913cc4a0d23c0ef26a826014c23803
1 /****************************************************************************/
3 /*
4 * mcfsmc.h -- SMC ethernet support for ColdFire environments.
6 * (C) Copyright 1999-2002, Greg Ungerer (gerg@snapgear.com)
7 * (C) Copyright 2000, Lineo Inc. (www.lineo.com)
8 */
10 /****************************************************************************/
11 #ifndef mcfsmc_h
12 #define mcfsmc_h
13 /****************************************************************************/
16 * None of the current ColdFire targets that use the SMC91x111
17 * allow 8 bit accesses. So this code is 16bit access only.
20 #include <linux/config.h>
22 #undef outb
23 #undef inb
24 #undef outw
25 #undef outwd
26 #undef inw
27 #undef outl
28 #undef inl
30 #undef outsb
31 #undef outsw
32 #undef outsl
33 #undef insb
34 #undef insw
35 #undef insl
38 * Re-defines for ColdFire environment... The SMC part is
39 * mapped into memory space, so remap the PC-style in/out
40 * routines to handle that.
42 #define outb smc_outb
43 #define inb smc_inb
44 #define outw smc_outw
45 #define outwd smc_outwd
46 #define inw smc_inw
47 #define outl smc_outl
48 #define inl smc_inl
50 #define outsb smc_outsb
51 #define outsw smc_outsw
52 #define outsl smc_outsl
53 #define insb smc_insb
54 #define insw smc_insw
55 #define insl smc_insl
58 static inline int smc_inb(unsigned int addr)
60 register unsigned short w;
61 w = *((volatile unsigned short *) (addr & ~0x1));
62 return(((addr & 0x1) ? w : (w >> 8)) & 0xff);
65 static inline void smc_outw(unsigned int val, unsigned int addr)
67 *((volatile unsigned short *) addr) = (val << 8) | (val >> 8);
70 static inline int smc_inw(unsigned int addr)
72 register unsigned short w;
73 w = *((volatile unsigned short *) addr);
74 return(((w << 8) | (w >> 8)) & 0xffff);
77 static inline void smc_outl(unsigned long val, unsigned int addr)
79 *((volatile unsigned long *) addr) =
80 ((val << 8) & 0xff000000) | ((val >> 8) & 0x00ff0000) |
81 ((val << 8) & 0x0000ff00) | ((val >> 8) & 0x000000ff);
84 static inline void smc_outwd(unsigned int val, unsigned int addr)
86 *((volatile unsigned short *) addr) = val;
91 * The rep* functions are used to feed the data port with
92 * raw data. So we do not byte swap them when copying.
95 static inline void smc_insb(unsigned int addr, void *vbuf, int unsigned long len)
97 volatile unsigned short *rp;
98 unsigned short *buf, *ebuf;
100 buf = (unsigned short *) vbuf;
101 rp = (volatile unsigned short *) addr;
103 /* Copy as words for as long as possible */
104 for (ebuf = buf + (len >> 1); (buf < ebuf); )
105 *buf++ = *rp;
107 /* Lastly, handle left over byte */
108 if (len & 0x1)
109 *((unsigned char *) buf) = (*rp >> 8) & 0xff;
112 static inline void smc_insw(unsigned int addr, void *vbuf, unsigned long len)
114 volatile unsigned short *rp;
115 unsigned short *buf, *ebuf;
117 buf = (unsigned short *) vbuf;
118 rp = (volatile unsigned short *) addr;
119 for (ebuf = buf + len; (buf < ebuf); )
120 *buf++ = *rp;
123 static inline void smc_insl(unsigned int addr, void *vbuf, unsigned long len)
125 volatile unsigned long *rp;
126 unsigned long *buf, *ebuf;
128 buf = (unsigned long *) vbuf;
129 rp = (volatile unsigned long *) addr;
130 for (ebuf = buf + len; (buf < ebuf); )
131 *buf++ = *rp;
134 static inline void smc_outsw(unsigned int addr, const void *vbuf, unsigned long len)
136 volatile unsigned short *rp;
137 unsigned short *buf, *ebuf;
139 buf = (unsigned short *) vbuf;
140 rp = (volatile unsigned short *) addr;
141 for (ebuf = buf + len; (buf < ebuf); )
142 *rp = *buf++;
145 static inline void smc_outsl(unsigned int addr, void *vbuf, unsigned long len)
147 volatile unsigned long *rp;
148 unsigned long *buf, *ebuf;
150 buf = (unsigned long *) vbuf;
151 rp = (volatile unsigned long *) addr;
152 for (ebuf = buf + len; (buf < ebuf); )
153 *rp = *buf++;
157 #ifdef CONFIG_NETtel
159 * Re-map the address space of at least one of the SMC ethernet
160 * parts. Both parts power up decoding the same address, so we
161 * need to move one of them first, before doing enything else.
163 * We also increase the number of wait states for this part by one.
166 void smc_remap(unsigned int ioaddr)
168 static int once = 0;
169 extern unsigned short ppdata;
170 if (once++ == 0) {
171 *((volatile unsigned short *)(MCF_MBAR+MCFSIM_PADDR)) = 0x00ec;
172 ppdata |= 0x0080;
173 *((volatile unsigned short *)(MCF_MBAR+MCFSIM_PADAT)) = ppdata;
174 outw(0x0001, ioaddr + BANK_SELECT);
175 outw(0x0001, ioaddr + BANK_SELECT);
176 outw(0x0067, ioaddr + BASE);
178 ppdata &= ~0x0080;
179 *((volatile unsigned short *)(MCF_MBAR+MCFSIM_PADAT)) = ppdata;
182 *((volatile unsigned short *)(MCF_MBAR+MCFSIM_CSCR3)) = 0x1180;
185 #endif
187 /****************************************************************************/
188 #endif /* mcfsmc_h */