Linux v2.6.13-rc3
[pohmelfs.git] / include / asm-ppc64 / eeh.h
blob94298b106a4ba3bff36bdcdfdcce497275ca6661
1 /*
2 * eeh.h
3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #ifndef _PPC64_EEH_H
21 #define _PPC64_EEH_H
23 #include <linux/config.h>
24 #include <linux/init.h>
25 #include <linux/list.h>
26 #include <linux/string.h>
28 struct pci_dev;
29 struct device_node;
30 struct device_node;
31 struct notifier_block;
33 #ifdef CONFIG_EEH
35 /* Values for eeh_mode bits in device_node */
36 #define EEH_MODE_SUPPORTED (1<<0)
37 #define EEH_MODE_NOCHECK (1<<1)
38 #define EEH_MODE_ISOLATED (1<<2)
40 void __init eeh_init(void);
41 unsigned long eeh_check_failure(const volatile void __iomem *token,
42 unsigned long val);
43 int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
44 void __init pci_addr_cache_build(void);
46 /**
47 * eeh_add_device_early
48 * eeh_add_device_late
50 * Perform eeh initialization for devices added after boot.
51 * Call eeh_add_device_early before doing any i/o to the
52 * device (including config space i/o). Call eeh_add_device_late
53 * to finish the eeh setup for this device.
55 void eeh_add_device_early(struct device_node *);
56 void eeh_add_device_late(struct pci_dev *);
58 /**
59 * eeh_remove_device - undo EEH setup for the indicated pci device
60 * @dev: pci device to be removed
62 * This routine should be when a device is removed from a running
63 * system (e.g. by hotplug or dlpar).
65 void eeh_remove_device(struct pci_dev *);
67 #define EEH_DISABLE 0
68 #define EEH_ENABLE 1
69 #define EEH_RELEASE_LOADSTORE 2
70 #define EEH_RELEASE_DMA 3
72 /**
73 * Notifier event flags.
75 #define EEH_NOTIFY_FREEZE 1
77 /** EEH event -- structure holding pci slot data that describes
78 * a change in the isolation status of a PCI slot. A pointer
79 * to this struct is passed as the data pointer in a notify callback.
81 struct eeh_event {
82 struct list_head list;
83 struct pci_dev *dev;
84 struct device_node *dn;
85 int reset_state;
88 /** Register to find out about EEH events. */
89 int eeh_register_notifier(struct notifier_block *nb);
90 int eeh_unregister_notifier(struct notifier_block *nb);
92 /**
93 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
95 * If this macro yields TRUE, the caller relays to eeh_check_failure()
96 * which does further tests out of line.
98 #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0)
101 * Reads from a device which has been isolated by EEH will return
102 * all 1s. This macro gives an all-1s value of the given size (in
103 * bytes: 1, 2, or 4) for comparing with the result of a read.
105 #define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
107 #else /* !CONFIG_EEH */
108 static inline void eeh_init(void) { }
110 static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
112 return val;
115 static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
117 return 0;
120 static inline void pci_addr_cache_build(void) { }
122 static inline void eeh_add_device_early(struct device_node *dn) { }
124 static inline void eeh_add_device_late(struct pci_dev *dev) { }
126 static inline void eeh_remove_device(struct pci_dev *dev) { }
128 #define EEH_POSSIBLE_ERROR(val, type) (0)
129 #define EEH_IO_ERROR_VALUE(size) (-1UL)
130 #endif /* CONFIG_EEH */
133 * MMIO read/write operations with EEH support.
135 static inline u8 eeh_readb(const volatile void __iomem *addr)
137 u8 val = in_8(addr);
138 if (EEH_POSSIBLE_ERROR(val, u8))
139 return eeh_check_failure(addr, val);
140 return val;
142 static inline void eeh_writeb(u8 val, volatile void __iomem *addr)
144 out_8(addr, val);
147 static inline u16 eeh_readw(const volatile void __iomem *addr)
149 u16 val = in_le16(addr);
150 if (EEH_POSSIBLE_ERROR(val, u16))
151 return eeh_check_failure(addr, val);
152 return val;
154 static inline void eeh_writew(u16 val, volatile void __iomem *addr)
156 out_le16(addr, val);
158 static inline u16 eeh_raw_readw(const volatile void __iomem *addr)
160 u16 val = in_be16(addr);
161 if (EEH_POSSIBLE_ERROR(val, u16))
162 return eeh_check_failure(addr, val);
163 return val;
165 static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
166 volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr;
167 out_be16(vaddr, val);
170 static inline u32 eeh_readl(const volatile void __iomem *addr)
172 u32 val = in_le32(addr);
173 if (EEH_POSSIBLE_ERROR(val, u32))
174 return eeh_check_failure(addr, val);
175 return val;
177 static inline void eeh_writel(u32 val, volatile void __iomem *addr)
179 out_le32(addr, val);
181 static inline u32 eeh_raw_readl(const volatile void __iomem *addr)
183 u32 val = in_be32(addr);
184 if (EEH_POSSIBLE_ERROR(val, u32))
185 return eeh_check_failure(addr, val);
186 return val;
188 static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr)
190 out_be32(addr, val);
193 static inline u64 eeh_readq(const volatile void __iomem *addr)
195 u64 val = in_le64(addr);
196 if (EEH_POSSIBLE_ERROR(val, u64))
197 return eeh_check_failure(addr, val);
198 return val;
200 static inline void eeh_writeq(u64 val, volatile void __iomem *addr)
202 out_le64(addr, val);
204 static inline u64 eeh_raw_readq(const volatile void __iomem *addr)
206 u64 val = in_be64(addr);
207 if (EEH_POSSIBLE_ERROR(val, u64))
208 return eeh_check_failure(addr, val);
209 return val;
211 static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr)
213 out_be64(addr, val);
216 #define EEH_CHECK_ALIGN(v,a) \
217 ((((unsigned long)(v)) & ((a) - 1)) == 0)
219 static inline void eeh_memset_io(volatile void __iomem *addr, int c,
220 unsigned long n)
222 u32 lc = c;
223 lc |= lc << 8;
224 lc |= lc << 16;
226 while(n && !EEH_CHECK_ALIGN(addr, 4)) {
227 *((volatile u8 *)addr) = c;
228 addr = (void *)((unsigned long)addr + 1);
229 n--;
231 while(n >= 4) {
232 *((volatile u32 *)addr) = lc;
233 addr = (void *)((unsigned long)addr + 4);
234 n -= 4;
236 while(n) {
237 *((volatile u8 *)addr) = c;
238 addr = (void *)((unsigned long)addr + 1);
239 n--;
241 __asm__ __volatile__ ("sync" : : : "memory");
243 static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src,
244 unsigned long n)
246 void *vsrc = (void __force *) src;
247 void *destsave = dest;
248 unsigned long nsave = n;
250 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
251 *((u8 *)dest) = *((volatile u8 *)vsrc);
252 __asm__ __volatile__ ("eieio" : : : "memory");
253 vsrc = (void *)((unsigned long)vsrc + 1);
254 dest = (void *)((unsigned long)dest + 1);
255 n--;
257 while(n > 4) {
258 *((u32 *)dest) = *((volatile u32 *)vsrc);
259 __asm__ __volatile__ ("eieio" : : : "memory");
260 vsrc = (void *)((unsigned long)vsrc + 4);
261 dest = (void *)((unsigned long)dest + 4);
262 n -= 4;
264 while(n) {
265 *((u8 *)dest) = *((volatile u8 *)vsrc);
266 __asm__ __volatile__ ("eieio" : : : "memory");
267 vsrc = (void *)((unsigned long)vsrc + 1);
268 dest = (void *)((unsigned long)dest + 1);
269 n--;
271 __asm__ __volatile__ ("sync" : : : "memory");
273 /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
274 * were copied. Check all four bytes.
276 if ((nsave >= 4) &&
277 (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
278 eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
282 static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
283 unsigned long n)
285 void *vdest = (void __force *) dest;
287 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
288 *((volatile u8 *)vdest) = *((u8 *)src);
289 src = (void *)((unsigned long)src + 1);
290 vdest = (void *)((unsigned long)vdest + 1);
291 n--;
293 while(n > 4) {
294 *((volatile u32 *)vdest) = *((volatile u32 *)src);
295 src = (void *)((unsigned long)src + 4);
296 vdest = (void *)((unsigned long)vdest + 4);
297 n-=4;
299 while(n) {
300 *((volatile u8 *)vdest) = *((u8 *)src);
301 src = (void *)((unsigned long)src + 1);
302 vdest = (void *)((unsigned long)vdest + 1);
303 n--;
305 __asm__ __volatile__ ("sync" : : : "memory");
308 #undef EEH_CHECK_ALIGN
310 static inline u8 eeh_inb(unsigned long port)
312 u8 val;
313 if (!_IO_IS_VALID(port))
314 return ~0;
315 val = in_8((u8 __iomem *)(port+pci_io_base));
316 if (EEH_POSSIBLE_ERROR(val, u8))
317 return eeh_check_failure((void __iomem *)(port), val);
318 return val;
321 static inline void eeh_outb(u8 val, unsigned long port)
323 if (_IO_IS_VALID(port))
324 out_8((u8 __iomem *)(port+pci_io_base), val);
327 static inline u16 eeh_inw(unsigned long port)
329 u16 val;
330 if (!_IO_IS_VALID(port))
331 return ~0;
332 val = in_le16((u16 __iomem *)(port+pci_io_base));
333 if (EEH_POSSIBLE_ERROR(val, u16))
334 return eeh_check_failure((void __iomem *)(port), val);
335 return val;
338 static inline void eeh_outw(u16 val, unsigned long port)
340 if (_IO_IS_VALID(port))
341 out_le16((u16 __iomem *)(port+pci_io_base), val);
344 static inline u32 eeh_inl(unsigned long port)
346 u32 val;
347 if (!_IO_IS_VALID(port))
348 return ~0;
349 val = in_le32((u32 __iomem *)(port+pci_io_base));
350 if (EEH_POSSIBLE_ERROR(val, u32))
351 return eeh_check_failure((void __iomem *)(port), val);
352 return val;
355 static inline void eeh_outl(u32 val, unsigned long port)
357 if (_IO_IS_VALID(port))
358 out_le32((u32 __iomem *)(port+pci_io_base), val);
361 /* in-string eeh macros */
362 static inline void eeh_insb(unsigned long port, void * buf, int ns)
364 _insb((u8 __iomem *)(port+pci_io_base), buf, ns);
365 if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
366 eeh_check_failure((void __iomem *)(port), *(u8*)buf);
369 static inline void eeh_insw_ns(unsigned long port, void * buf, int ns)
371 _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns);
372 if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
373 eeh_check_failure((void __iomem *)(port), *(u16*)buf);
376 static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
378 _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl);
379 if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
380 eeh_check_failure((void __iomem *)(port), *(u32*)buf);
383 #endif /* _PPC64_EEH_H */