MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / include / asm-ppc64 / eeh.h
blob85bc6850f08155c480f823c8d03e4d2ef8574ec1
1 /*
2 * eeh.h
3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #ifndef _PPC64_EEH_H
21 #define _PPC64_EEH_H
23 #include <linux/string.h>
24 #include <linux/init.h>
26 struct pci_dev;
27 struct device_node;
29 /* Values for eeh_mode bits in device_node */
30 #define EEH_MODE_SUPPORTED (1<<0)
31 #define EEH_MODE_NOCHECK (1<<1)
33 #ifdef CONFIG_PPC_PSERIES
34 extern void __init eeh_init(void);
35 unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val);
36 int eeh_dn_check_failure (struct device_node *dn, struct pci_dev *dev);
37 void __iomem *eeh_ioremap(unsigned long addr, void __iomem *vaddr);
38 void __init pci_addr_cache_build(void);
39 #else
40 #define eeh_check_failure(token, val) (val)
41 #endif
43 /**
44 * eeh_add_device_early
45 * eeh_add_device_late
47 * Perform eeh initialization for devices added after boot.
48 * Call eeh_add_device_early before doing any i/o to the
49 * device (including config space i/o). Call eeh_add_device_late
50 * to finish the eeh setup for this device.
52 struct device_node;
53 void eeh_add_device_early(struct device_node *);
54 void eeh_add_device_late(struct pci_dev *);
56 /**
57 * eeh_remove_device - undo EEH setup for the indicated pci device
58 * @dev: pci device to be removed
60 * This routine should be when a device is removed from a running
61 * system (e.g. by hotplug or dlpar).
63 void eeh_remove_device(struct pci_dev *);
65 #define EEH_DISABLE 0
66 #define EEH_ENABLE 1
67 #define EEH_RELEASE_LOADSTORE 2
68 #define EEH_RELEASE_DMA 3
69 int eeh_set_option(struct pci_dev *dev, int options);
72 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
74 * If this macro yields TRUE, the caller relays to eeh_check_failure()
75 * which does further tests out of line.
77 #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0)
80 * Reads from a device which has been isolated by EEH will return
81 * all 1s. This macro gives an all-1s value of the given size (in
82 * bytes: 1, 2, or 4) for comparing with the result of a read.
84 #define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
86 /*
87 * MMIO read/write operations with EEH support.
89 static inline u8 eeh_readb(const volatile void __iomem *addr) {
90 volatile u8 *vaddr = (volatile u8 __force *) addr;
91 u8 val = in_8(vaddr);
92 if (EEH_POSSIBLE_ERROR(val, u8))
93 return eeh_check_failure(addr, val);
94 return val;
96 static inline void eeh_writeb(u8 val, volatile void __iomem *addr) {
97 volatile u8 *vaddr = (volatile u8 __force *) addr;
98 out_8(vaddr, val);
101 static inline u16 eeh_readw(const volatile void __iomem *addr) {
102 volatile u16 *vaddr = (volatile u16 __force *) addr;
103 u16 val = in_le16(vaddr);
104 if (EEH_POSSIBLE_ERROR(val, u16))
105 return eeh_check_failure(addr, val);
106 return val;
108 static inline void eeh_writew(u16 val, volatile void __iomem *addr) {
109 volatile u16 *vaddr = (volatile u16 __force *) addr;
110 out_le16(vaddr, val);
112 static inline u16 eeh_raw_readw(const volatile void __iomem *addr) {
113 volatile u16 *vaddr = (volatile u16 __force *) addr;
114 u16 val = in_be16(vaddr);
115 if (EEH_POSSIBLE_ERROR(val, u16))
116 return eeh_check_failure(addr, val);
117 return val;
119 static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
120 volatile u16 *vaddr = (volatile u16 __force *) addr;
121 out_be16(vaddr, val);
124 static inline u32 eeh_readl(const volatile void __iomem *addr) {
125 volatile u32 *vaddr = (volatile u32 __force *) addr;
126 u32 val = in_le32(vaddr);
127 if (EEH_POSSIBLE_ERROR(val, u32))
128 return eeh_check_failure(addr, val);
129 return val;
131 static inline void eeh_writel(u32 val, volatile void __iomem *addr) {
132 volatile u32 *vaddr = (volatile u32 __force *) addr;
133 out_le32(vaddr, val);
135 static inline u32 eeh_raw_readl(const volatile void __iomem *addr) {
136 volatile u32 *vaddr = (volatile u32 __force *) addr;
137 u32 val = in_be32(vaddr);
138 if (EEH_POSSIBLE_ERROR(val, u32))
139 return eeh_check_failure(addr, val);
140 return val;
142 static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr) {
143 volatile u32 *vaddr = (volatile u32 __force *) addr;
144 out_be32(vaddr, val);
147 static inline u64 eeh_readq(const volatile void __iomem *addr) {
148 volatile u64 *vaddr = (volatile u64 __force *) addr;
149 u64 val = in_le64(vaddr);
150 if (EEH_POSSIBLE_ERROR(val, u64))
151 return eeh_check_failure(addr, val);
152 return val;
154 static inline void eeh_writeq(u64 val, volatile void __iomem *addr) {
155 volatile u64 *vaddr = (volatile u64 __force *) addr;
156 out_le64(vaddr, val);
158 static inline u64 eeh_raw_readq(const volatile void __iomem *addr) {
159 volatile u64 *vaddr = (volatile u64 __force *) addr;
160 u64 val = in_be64(vaddr);
161 if (EEH_POSSIBLE_ERROR(val, u64))
162 return eeh_check_failure(addr, val);
163 return val;
165 static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr) {
166 volatile u64 *vaddr = (volatile u64 __force *) addr;
167 out_be64(vaddr, val);
170 #define EEH_CHECK_ALIGN(v,a) \
171 ((((unsigned long)(v)) & ((a) - 1)) == 0)
173 static inline void eeh_memset_io(volatile void __iomem *addr, int c, unsigned long n) {
174 void *vaddr = (void __force *) addr;
175 u32 lc = c;
176 lc |= lc << 8;
177 lc |= lc << 16;
179 while(n && !EEH_CHECK_ALIGN(vaddr, 4)) {
180 *((volatile u8 *)vaddr) = c;
181 vaddr = (void *)((unsigned long)vaddr + 1);
182 n--;
184 while(n >= 4) {
185 *((volatile u32 *)vaddr) = lc;
186 vaddr = (void *)((unsigned long)vaddr + 4);
187 n -= 4;
189 while(n) {
190 *((volatile u8 *)vaddr) = c;
191 vaddr = (void *)((unsigned long)vaddr + 1);
192 n--;
194 __asm__ __volatile__ ("sync" : : : "memory");
196 static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src, unsigned long n) {
197 void *vsrc = (void __force *) src;
198 void *destsave = dest;
199 unsigned long nsave = n;
201 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
202 *((u8 *)dest) = *((volatile u8 *)vsrc);
203 __asm__ __volatile__ ("eieio" : : : "memory");
204 vsrc = (void *)((unsigned long)vsrc + 1);
205 dest = (void *)((unsigned long)dest + 1);
206 n--;
208 while(n > 4) {
209 *((u32 *)dest) = *((volatile u32 *)vsrc);
210 __asm__ __volatile__ ("eieio" : : : "memory");
211 vsrc = (void *)((unsigned long)vsrc + 4);
212 dest = (void *)((unsigned long)dest + 4);
213 n -= 4;
215 while(n) {
216 *((u8 *)dest) = *((volatile u8 *)vsrc);
217 __asm__ __volatile__ ("eieio" : : : "memory");
218 vsrc = (void *)((unsigned long)vsrc + 1);
219 dest = (void *)((unsigned long)dest + 1);
220 n--;
222 __asm__ __volatile__ ("sync" : : : "memory");
224 /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
225 * were copied. Check all four bytes.
227 if ((nsave >= 4) &&
228 (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
229 eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
233 static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) {
234 void *vdest = (void __force *) dest;
236 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
237 *((volatile u8 *)vdest) = *((u8 *)src);
238 src = (void *)((unsigned long)src + 1);
239 vdest = (void *)((unsigned long)vdest + 1);
240 n--;
242 while(n > 4) {
243 *((volatile u32 *)vdest) = *((volatile u32 *)src);
244 src = (void *)((unsigned long)src + 4);
245 vdest = (void *)((unsigned long)vdest + 4);
246 n-=4;
248 while(n) {
249 *((volatile u8 *)vdest) = *((u8 *)src);
250 src = (void *)((unsigned long)src + 1);
251 vdest = (void *)((unsigned long)vdest + 1);
252 n--;
254 __asm__ __volatile__ ("sync" : : : "memory");
257 #undef EEH_CHECK_ALIGN
259 #define MAX_ISA_PORT 0x10000
260 extern unsigned long io_page_mask;
261 #define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) & io_page_mask)
263 static inline u8 eeh_inb(unsigned long port) {
264 u8 val;
265 if (!_IO_IS_VALID(port))
266 return ~0;
267 val = in_8((u8 *)(port+pci_io_base));
268 if (EEH_POSSIBLE_ERROR(val, u8))
269 return eeh_check_failure((void __iomem *)(port), val);
270 return val;
273 static inline void eeh_outb(u8 val, unsigned long port) {
274 if (_IO_IS_VALID(port))
275 out_8((u8 *)(port+pci_io_base), val);
278 static inline u16 eeh_inw(unsigned long port) {
279 u16 val;
280 if (!_IO_IS_VALID(port))
281 return ~0;
282 val = in_le16((u16 *)(port+pci_io_base));
283 if (EEH_POSSIBLE_ERROR(val, u16))
284 return eeh_check_failure((void __iomem *)(port), val);
285 return val;
288 static inline void eeh_outw(u16 val, unsigned long port) {
289 if (_IO_IS_VALID(port))
290 out_le16((u16 *)(port+pci_io_base), val);
293 static inline u32 eeh_inl(unsigned long port) {
294 u32 val;
295 if (!_IO_IS_VALID(port))
296 return ~0;
297 val = in_le32((u32 *)(port+pci_io_base));
298 if (EEH_POSSIBLE_ERROR(val, u32))
299 return eeh_check_failure((void __iomem *)(port), val);
300 return val;
303 static inline void eeh_outl(u32 val, unsigned long port) {
304 if (_IO_IS_VALID(port))
305 out_le32((u32 *)(port+pci_io_base), val);
308 /* in-string eeh macros */
309 static inline void eeh_insb(unsigned long port, void * buf, int ns) {
310 _insb((u8 *)(port+pci_io_base), buf, ns);
311 if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
312 eeh_check_failure((void __iomem *)(port), *(u8*)buf);
315 static inline void eeh_insw_ns(unsigned long port, void * buf, int ns) {
316 _insw_ns((u16 *)(port+pci_io_base), buf, ns);
317 if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
318 eeh_check_failure((void __iomem *)(port), *(u16*)buf);
321 static inline void eeh_insl_ns(unsigned long port, void * buf, int nl) {
322 _insl_ns((u32 *)(port+pci_io_base), buf, nl);
323 if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
324 eeh_check_failure((void __iomem *)(port), *(u32*)buf);
327 #endif /* _PPC64_EEH_H */