UHCI: don't stop at an Iso error
[linux/fpc-iii.git] / include / asm-powerpc / system.h
blob4c9f5229e83355a6af341841e6230277422bd39b
1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4 #ifndef _ASM_POWERPC_SYSTEM_H
5 #define _ASM_POWERPC_SYSTEM_H
7 #include <linux/kernel.h>
9 #include <asm/hw_irq.h>
10 #include <asm/atomic.h>
13 * Memory barrier.
14 * The sync instruction guarantees that all memory accesses initiated
15 * by this processor have been performed (with respect to all other
16 * mechanisms that access memory). The eieio instruction is a barrier
17 * providing an ordering (separately) for (a) cacheable stores and (b)
18 * loads and stores to non-cacheable memory (e.g. I/O devices).
20 * mb() prevents loads and stores being reordered across this point.
21 * rmb() prevents loads being reordered across this point.
22 * wmb() prevents stores being reordered across this point.
23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC).
26 * We have to use the sync instructions for mb(), since lwsync doesn't
27 * order loads with respect to previous stores. Lwsync is fine for
28 * rmb(), though. Note that lwsync is interpreted as sync by
29 * 32-bit and older 64-bit CPUs.
31 * For wmb(), we use sync since wmb is used in drivers to order
32 * stores to system memory with respect to writes to the device.
33 * However, smp_wmb() can be a lighter-weight eieio barrier on
34 * SMP since it is only used to order updates to system memory.
36 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
37 #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
38 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
39 #define read_barrier_depends() do { } while(0)
41 #define set_mb(var, value) do { var = value; mb(); } while (0)
43 #ifdef __KERNEL__
44 #ifdef CONFIG_SMP
45 #define smp_mb() mb()
46 #define smp_rmb() rmb()
47 #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
48 #define smp_read_barrier_depends() read_barrier_depends()
49 #else
50 #define smp_mb() barrier()
51 #define smp_rmb() barrier()
52 #define smp_wmb() barrier()
53 #define smp_read_barrier_depends() do { } while(0)
54 #endif /* CONFIG_SMP */
57 * This is a barrier which prevents following instructions from being
58 * started until the value of the argument x is known. For example, if
59 * x is a variable loaded from memory, this prevents following
60 * instructions from being executed until the load has been performed.
62 #define data_barrier(x) \
63 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
65 struct task_struct;
66 struct pt_regs;
68 #ifdef CONFIG_DEBUGGER
70 extern int (*__debugger)(struct pt_regs *regs);
71 extern int (*__debugger_ipi)(struct pt_regs *regs);
72 extern int (*__debugger_bpt)(struct pt_regs *regs);
73 extern int (*__debugger_sstep)(struct pt_regs *regs);
74 extern int (*__debugger_iabr_match)(struct pt_regs *regs);
75 extern int (*__debugger_dabr_match)(struct pt_regs *regs);
76 extern int (*__debugger_fault_handler)(struct pt_regs *regs);
78 #define DEBUGGER_BOILERPLATE(__NAME) \
79 static inline int __NAME(struct pt_regs *regs) \
80 { \
81 if (unlikely(__ ## __NAME)) \
82 return __ ## __NAME(regs); \
83 return 0; \
86 DEBUGGER_BOILERPLATE(debugger)
87 DEBUGGER_BOILERPLATE(debugger_ipi)
88 DEBUGGER_BOILERPLATE(debugger_bpt)
89 DEBUGGER_BOILERPLATE(debugger_sstep)
90 DEBUGGER_BOILERPLATE(debugger_iabr_match)
91 DEBUGGER_BOILERPLATE(debugger_dabr_match)
92 DEBUGGER_BOILERPLATE(debugger_fault_handler)
94 #ifdef CONFIG_XMON
95 extern void xmon_init(int enable);
96 #endif
98 #else
99 static inline int debugger(struct pt_regs *regs) { return 0; }
100 static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
101 static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
102 static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
103 static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
104 static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
105 static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
106 #endif
108 extern int set_dabr(unsigned long dabr);
109 extern void print_backtrace(unsigned long *);
110 extern void show_regs(struct pt_regs * regs);
111 extern void flush_instruction_cache(void);
112 extern void hard_reset_now(void);
113 extern void poweroff_now(void);
115 #ifdef CONFIG_6xx
116 extern long _get_L2CR(void);
117 extern long _get_L3CR(void);
118 extern void _set_L2CR(unsigned long);
119 extern void _set_L3CR(unsigned long);
120 #else
121 #define _get_L2CR() 0L
122 #define _get_L3CR() 0L
123 #define _set_L2CR(val) do { } while(0)
124 #define _set_L3CR(val) do { } while(0)
125 #endif
127 extern void via_cuda_init(void);
128 extern void read_rtc_time(void);
129 extern void pmac_find_display(void);
130 extern void giveup_fpu(struct task_struct *);
131 extern void disable_kernel_fp(void);
132 extern void enable_kernel_fp(void);
133 extern void flush_fp_to_thread(struct task_struct *);
134 extern void enable_kernel_altivec(void);
135 extern void giveup_altivec(struct task_struct *);
136 extern void load_up_altivec(struct task_struct *);
137 extern int emulate_altivec(struct pt_regs *);
138 extern void giveup_spe(struct task_struct *);
139 extern void load_up_spe(struct task_struct *);
140 extern int fix_alignment(struct pt_regs *);
141 extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
142 extern void cvt_df(double *from, float *to, struct thread_struct *thread);
144 #ifndef CONFIG_SMP
145 extern void discard_lazy_cpu_state(void);
146 #else
147 static inline void discard_lazy_cpu_state(void)
150 #endif
152 #ifdef CONFIG_ALTIVEC
153 extern void flush_altivec_to_thread(struct task_struct *);
154 #else
155 static inline void flush_altivec_to_thread(struct task_struct *t)
158 #endif
160 #ifdef CONFIG_SPE
161 extern void flush_spe_to_thread(struct task_struct *);
162 #else
163 static inline void flush_spe_to_thread(struct task_struct *t)
166 #endif
168 extern int call_rtas(const char *, int, int, unsigned long *, ...);
169 extern void cacheable_memzero(void *p, unsigned int nb);
170 extern void *cacheable_memcpy(void *, const void *, unsigned int);
171 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
172 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
173 extern int die(const char *, struct pt_regs *, long);
174 extern void _exception(int, struct pt_regs *, int, unsigned long);
175 #ifdef CONFIG_BOOKE_WDT
176 extern u32 booke_wdt_enabled;
177 extern u32 booke_wdt_period;
178 #endif /* CONFIG_BOOKE_WDT */
180 /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
181 extern unsigned char e2a(unsigned char);
182 extern unsigned char* strne2a(unsigned char *dest,
183 const unsigned char *src, size_t n);
185 struct device_node;
186 extern void note_scsi_host(struct device_node *, void *);
188 extern struct task_struct *__switch_to(struct task_struct *,
189 struct task_struct *);
190 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
192 struct thread_struct;
193 extern struct task_struct *_switch(struct thread_struct *prev,
194 struct thread_struct *next);
197 * On SMP systems, when the scheduler does migration-cost autodetection,
198 * it needs a way to flush as much of the CPU's caches as possible.
200 * TODO: fill this in!
202 static inline void sched_cacheflush(void)
206 extern unsigned int rtas_data;
207 extern int mem_init_done; /* set on boot once kmalloc can be called */
208 extern unsigned long memory_limit;
209 extern unsigned long klimit;
211 extern int powersave_nap; /* set if nap mode can be used in idle loop */
214 * Atomic exchange
216 * Changes the memory location '*ptr' to be val and returns
217 * the previous value stored there.
219 static __inline__ unsigned long
220 __xchg_u32(volatile void *p, unsigned long val)
222 unsigned long prev;
224 __asm__ __volatile__(
225 LWSYNC_ON_SMP
226 "1: lwarx %0,0,%2 \n"
227 PPC405_ERR77(0,%2)
228 " stwcx. %3,0,%2 \n\
229 bne- 1b"
230 ISYNC_ON_SMP
231 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
232 : "r" (p), "r" (val)
233 : "cc", "memory");
235 return prev;
238 #ifdef CONFIG_PPC64
239 static __inline__ unsigned long
240 __xchg_u64(volatile void *p, unsigned long val)
242 unsigned long prev;
244 __asm__ __volatile__(
245 LWSYNC_ON_SMP
246 "1: ldarx %0,0,%2 \n"
247 PPC405_ERR77(0,%2)
248 " stdcx. %3,0,%2 \n\
249 bne- 1b"
250 ISYNC_ON_SMP
251 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
252 : "r" (p), "r" (val)
253 : "cc", "memory");
255 return prev;
257 #endif
260 * This function doesn't exist, so you'll get a linker error
261 * if something tries to do an invalid xchg().
263 extern void __xchg_called_with_bad_pointer(void);
265 static __inline__ unsigned long
266 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
268 switch (size) {
269 case 4:
270 return __xchg_u32(ptr, x);
271 #ifdef CONFIG_PPC64
272 case 8:
273 return __xchg_u64(ptr, x);
274 #endif
276 __xchg_called_with_bad_pointer();
277 return x;
280 #define xchg(ptr,x) \
281 ({ \
282 __typeof__(*(ptr)) _x_ = (x); \
283 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
286 #define tas(ptr) (xchg((ptr),1))
289 * Compare and exchange - if *p == old, set it to new,
290 * and return the old value of *p.
292 #define __HAVE_ARCH_CMPXCHG 1
294 static __inline__ unsigned long
295 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
297 unsigned int prev;
299 __asm__ __volatile__ (
300 LWSYNC_ON_SMP
301 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
302 cmpw 0,%0,%3\n\
303 bne- 2f\n"
304 PPC405_ERR77(0,%2)
305 " stwcx. %4,0,%2\n\
306 bne- 1b"
307 ISYNC_ON_SMP
308 "\n\
310 : "=&r" (prev), "+m" (*p)
311 : "r" (p), "r" (old), "r" (new)
312 : "cc", "memory");
314 return prev;
317 #ifdef CONFIG_PPC64
318 static __inline__ unsigned long
319 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
321 unsigned long prev;
323 __asm__ __volatile__ (
324 LWSYNC_ON_SMP
325 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
326 cmpd 0,%0,%3\n\
327 bne- 2f\n\
328 stdcx. %4,0,%2\n\
329 bne- 1b"
330 ISYNC_ON_SMP
331 "\n\
333 : "=&r" (prev), "+m" (*p)
334 : "r" (p), "r" (old), "r" (new)
335 : "cc", "memory");
337 return prev;
339 #endif
341 /* This function doesn't exist, so you'll get a linker error
342 if something tries to do an invalid cmpxchg(). */
343 extern void __cmpxchg_called_with_bad_pointer(void);
345 static __inline__ unsigned long
346 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
347 unsigned int size)
349 switch (size) {
350 case 4:
351 return __cmpxchg_u32(ptr, old, new);
352 #ifdef CONFIG_PPC64
353 case 8:
354 return __cmpxchg_u64(ptr, old, new);
355 #endif
357 __cmpxchg_called_with_bad_pointer();
358 return old;
361 #define cmpxchg(ptr,o,n) \
362 ({ \
363 __typeof__(*(ptr)) _o_ = (o); \
364 __typeof__(*(ptr)) _n_ = (n); \
365 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
366 (unsigned long)_n_, sizeof(*(ptr))); \
369 #ifdef CONFIG_PPC64
371 * We handle most unaligned accesses in hardware. On the other hand
372 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
373 * powers of 2 writes until it reaches sufficient alignment).
375 * Based on this we disable the IP header alignment in network drivers.
376 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
377 * cacheline alignment of buffers.
379 #define NET_IP_ALIGN 0
380 #define NET_SKB_PAD L1_CACHE_BYTES
381 #endif
383 #define arch_align_stack(x) (x)
385 /* Used in very early kernel initialization. */
386 extern unsigned long reloc_offset(void);
387 extern unsigned long add_reloc_offset(unsigned long);
388 extern void reloc_got2(unsigned long);
390 #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
392 static inline void create_instruction(unsigned long addr, unsigned int instr)
394 unsigned int *p;
395 p = (unsigned int *)addr;
396 *p = instr;
397 asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
400 /* Flags for create_branch:
401 * "b" == create_branch(addr, target, 0);
402 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
403 * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
404 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
406 #define BRANCH_SET_LINK 0x1
407 #define BRANCH_ABSOLUTE 0x2
409 static inline void create_branch(unsigned long addr,
410 unsigned long target, int flags)
412 unsigned int instruction;
414 if (! (flags & BRANCH_ABSOLUTE))
415 target = target - addr;
417 /* Mask out the flags and target, so they don't step on each other. */
418 instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
420 create_instruction(addr, instruction);
423 static inline void create_function_call(unsigned long addr, void * func)
425 unsigned long func_addr;
427 #ifdef CONFIG_PPC64
429 * On PPC64 the function pointer actually points to the function's
430 * descriptor. The first entry in the descriptor is the address
431 * of the function text.
433 func_addr = *(unsigned long *)func;
434 #else
435 func_addr = (unsigned long)func;
436 #endif
437 create_branch(addr, func_addr, BRANCH_SET_LINK);
440 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
441 extern void account_system_vtime(struct task_struct *);
442 #endif
444 #endif /* __KERNEL__ */
445 #endif /* _ASM_POWERPC_SYSTEM_H */