[SCSI] advansys: Make advansys_board_found a little more readable
[pv_ops_mirror.git] / include / asm-powerpc / system.h
blob41520b7a7b76006e2f58025874da39f176d42562
1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4 #ifndef _ASM_POWERPC_SYSTEM_H
5 #define _ASM_POWERPC_SYSTEM_H
7 #include <linux/kernel.h>
9 #include <asm/hw_irq.h>
12 * Memory barrier.
13 * The sync instruction guarantees that all memory accesses initiated
14 * by this processor have been performed (with respect to all other
15 * mechanisms that access memory). The eieio instruction is a barrier
16 * providing an ordering (separately) for (a) cacheable stores and (b)
17 * loads and stores to non-cacheable memory (e.g. I/O devices).
19 * mb() prevents loads and stores being reordered across this point.
20 * rmb() prevents loads being reordered across this point.
21 * wmb() prevents stores being reordered across this point.
22 * read_barrier_depends() prevents data-dependent loads being reordered
23 * across this point (nop on PPC).
25 * We have to use the sync instructions for mb(), since lwsync doesn't
26 * order loads with respect to previous stores. Lwsync is fine for
27 * rmb(), though. Note that rmb() actually uses a sync on 32-bit
28 * architectures.
30 * For wmb(), we use sync since wmb is used in drivers to order
31 * stores to system memory with respect to writes to the device.
32 * However, smp_wmb() can be a lighter-weight eieio barrier on
33 * SMP since it is only used to order updates to system memory.
35 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
36 #define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory")
37 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
38 #define read_barrier_depends() do { } while(0)
40 #define set_mb(var, value) do { var = value; mb(); } while (0)
42 #ifdef __KERNEL__
43 #ifdef CONFIG_SMP
44 #define smp_mb() mb()
45 #define smp_rmb() rmb()
46 #define smp_wmb() eieio()
47 #define smp_read_barrier_depends() read_barrier_depends()
48 #else
49 #define smp_mb() barrier()
50 #define smp_rmb() barrier()
51 #define smp_wmb() barrier()
52 #define smp_read_barrier_depends() do { } while(0)
53 #endif /* CONFIG_SMP */
56 * This is a barrier which prevents following instructions from being
57 * started until the value of the argument x is known. For example, if
58 * x is a variable loaded from memory, this prevents following
59 * instructions from being executed until the load has been performed.
61 #define data_barrier(x) \
62 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
64 struct task_struct;
65 struct pt_regs;
67 #ifdef CONFIG_DEBUGGER
69 extern int (*__debugger)(struct pt_regs *regs);
70 extern int (*__debugger_ipi)(struct pt_regs *regs);
71 extern int (*__debugger_bpt)(struct pt_regs *regs);
72 extern int (*__debugger_sstep)(struct pt_regs *regs);
73 extern int (*__debugger_iabr_match)(struct pt_regs *regs);
74 extern int (*__debugger_dabr_match)(struct pt_regs *regs);
75 extern int (*__debugger_fault_handler)(struct pt_regs *regs);
77 #define DEBUGGER_BOILERPLATE(__NAME) \
78 static inline int __NAME(struct pt_regs *regs) \
79 { \
80 if (unlikely(__ ## __NAME)) \
81 return __ ## __NAME(regs); \
82 return 0; \
85 DEBUGGER_BOILERPLATE(debugger)
86 DEBUGGER_BOILERPLATE(debugger_ipi)
87 DEBUGGER_BOILERPLATE(debugger_bpt)
88 DEBUGGER_BOILERPLATE(debugger_sstep)
89 DEBUGGER_BOILERPLATE(debugger_iabr_match)
90 DEBUGGER_BOILERPLATE(debugger_dabr_match)
91 DEBUGGER_BOILERPLATE(debugger_fault_handler)
93 #else
94 static inline int debugger(struct pt_regs *regs) { return 0; }
95 static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
96 static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
97 static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
98 static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
99 static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
100 static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
101 #endif
103 extern int set_dabr(unsigned long dabr);
104 extern void print_backtrace(unsigned long *);
105 extern void show_regs(struct pt_regs * regs);
106 extern void flush_instruction_cache(void);
107 extern void hard_reset_now(void);
108 extern void poweroff_now(void);
110 #ifdef CONFIG_6xx
111 extern long _get_L2CR(void);
112 extern long _get_L3CR(void);
113 extern void _set_L2CR(unsigned long);
114 extern void _set_L3CR(unsigned long);
115 #else
116 #define _get_L2CR() 0L
117 #define _get_L3CR() 0L
118 #define _set_L2CR(val) do { } while(0)
119 #define _set_L3CR(val) do { } while(0)
120 #endif
122 extern void via_cuda_init(void);
123 extern void read_rtc_time(void);
124 extern void pmac_find_display(void);
125 extern void giveup_fpu(struct task_struct *);
126 extern void disable_kernel_fp(void);
127 extern void enable_kernel_fp(void);
128 extern void flush_fp_to_thread(struct task_struct *);
129 extern void enable_kernel_altivec(void);
130 extern void giveup_altivec(struct task_struct *);
131 extern void load_up_altivec(struct task_struct *);
132 extern int emulate_altivec(struct pt_regs *);
133 extern void enable_kernel_spe(void);
134 extern void giveup_spe(struct task_struct *);
135 extern void load_up_spe(struct task_struct *);
136 extern int fix_alignment(struct pt_regs *);
137 extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
138 extern void cvt_df(double *from, float *to, struct thread_struct *thread);
140 #ifndef CONFIG_SMP
141 extern void discard_lazy_cpu_state(void);
142 #else
143 static inline void discard_lazy_cpu_state(void)
146 #endif
148 #ifdef CONFIG_ALTIVEC
149 extern void flush_altivec_to_thread(struct task_struct *);
150 #else
151 static inline void flush_altivec_to_thread(struct task_struct *t)
154 #endif
156 #ifdef CONFIG_SPE
157 extern void flush_spe_to_thread(struct task_struct *);
158 #else
159 static inline void flush_spe_to_thread(struct task_struct *t)
162 #endif
164 extern int call_rtas(const char *, int, int, unsigned long *, ...);
165 extern void cacheable_memzero(void *p, unsigned int nb);
166 extern void *cacheable_memcpy(void *, const void *, unsigned int);
167 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
168 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
169 extern int die(const char *, struct pt_regs *, long);
170 extern void _exception(int, struct pt_regs *, int, unsigned long);
171 #ifdef CONFIG_BOOKE_WDT
172 extern u32 booke_wdt_enabled;
173 extern u32 booke_wdt_period;
174 #endif /* CONFIG_BOOKE_WDT */
176 struct device_node;
177 extern void note_scsi_host(struct device_node *, void *);
179 extern struct task_struct *__switch_to(struct task_struct *,
180 struct task_struct *);
181 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
183 struct thread_struct;
184 extern struct task_struct *_switch(struct thread_struct *prev,
185 struct thread_struct *next);
187 extern unsigned int rtas_data;
188 extern int mem_init_done; /* set on boot once kmalloc can be called */
189 extern unsigned long memory_limit;
190 extern unsigned long klimit;
192 extern int powersave_nap; /* set if nap mode can be used in idle loop */
195 * Atomic exchange
197 * Changes the memory location '*ptr' to be val and returns
198 * the previous value stored there.
200 static __inline__ unsigned long
201 __xchg_u32(volatile void *p, unsigned long val)
203 unsigned long prev;
205 __asm__ __volatile__(
206 LWSYNC_ON_SMP
207 "1: lwarx %0,0,%2 \n"
208 PPC405_ERR77(0,%2)
209 " stwcx. %3,0,%2 \n\
210 bne- 1b"
211 ISYNC_ON_SMP
212 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
213 : "r" (p), "r" (val)
214 : "cc", "memory");
216 return prev;
220 * Atomic exchange
222 * Changes the memory location '*ptr' to be val and returns
223 * the previous value stored there.
225 static __inline__ unsigned long
226 __xchg_u32_local(volatile void *p, unsigned long val)
228 unsigned long prev;
230 __asm__ __volatile__(
231 "1: lwarx %0,0,%2 \n"
232 PPC405_ERR77(0,%2)
233 " stwcx. %3,0,%2 \n\
234 bne- 1b"
235 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
236 : "r" (p), "r" (val)
237 : "cc", "memory");
239 return prev;
242 #ifdef CONFIG_PPC64
243 static __inline__ unsigned long
244 __xchg_u64(volatile void *p, unsigned long val)
246 unsigned long prev;
248 __asm__ __volatile__(
249 LWSYNC_ON_SMP
250 "1: ldarx %0,0,%2 \n"
251 PPC405_ERR77(0,%2)
252 " stdcx. %3,0,%2 \n\
253 bne- 1b"
254 ISYNC_ON_SMP
255 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
256 : "r" (p), "r" (val)
257 : "cc", "memory");
259 return prev;
262 static __inline__ unsigned long
263 __xchg_u64_local(volatile void *p, unsigned long val)
265 unsigned long prev;
267 __asm__ __volatile__(
268 "1: ldarx %0,0,%2 \n"
269 PPC405_ERR77(0,%2)
270 " stdcx. %3,0,%2 \n\
271 bne- 1b"
272 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
273 : "r" (p), "r" (val)
274 : "cc", "memory");
276 return prev;
278 #endif
281 * This function doesn't exist, so you'll get a linker error
282 * if something tries to do an invalid xchg().
284 extern void __xchg_called_with_bad_pointer(void);
286 static __inline__ unsigned long
287 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
289 switch (size) {
290 case 4:
291 return __xchg_u32(ptr, x);
292 #ifdef CONFIG_PPC64
293 case 8:
294 return __xchg_u64(ptr, x);
295 #endif
297 __xchg_called_with_bad_pointer();
298 return x;
301 static __inline__ unsigned long
302 __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
304 switch (size) {
305 case 4:
306 return __xchg_u32_local(ptr, x);
307 #ifdef CONFIG_PPC64
308 case 8:
309 return __xchg_u64_local(ptr, x);
310 #endif
312 __xchg_called_with_bad_pointer();
313 return x;
315 #define xchg(ptr,x) \
316 ({ \
317 __typeof__(*(ptr)) _x_ = (x); \
318 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
321 #define xchg_local(ptr,x) \
322 ({ \
323 __typeof__(*(ptr)) _x_ = (x); \
324 (__typeof__(*(ptr))) __xchg_local((ptr), \
325 (unsigned long)_x_, sizeof(*(ptr))); \
329 * Compare and exchange - if *p == old, set it to new,
330 * and return the old value of *p.
332 #define __HAVE_ARCH_CMPXCHG 1
334 static __inline__ unsigned long
335 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
337 unsigned int prev;
339 __asm__ __volatile__ (
340 LWSYNC_ON_SMP
341 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
342 cmpw 0,%0,%3\n\
343 bne- 2f\n"
344 PPC405_ERR77(0,%2)
345 " stwcx. %4,0,%2\n\
346 bne- 1b"
347 ISYNC_ON_SMP
348 "\n\
350 : "=&r" (prev), "+m" (*p)
351 : "r" (p), "r" (old), "r" (new)
352 : "cc", "memory");
354 return prev;
357 static __inline__ unsigned long
358 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
359 unsigned long new)
361 unsigned int prev;
363 __asm__ __volatile__ (
364 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
365 cmpw 0,%0,%3\n\
366 bne- 2f\n"
367 PPC405_ERR77(0,%2)
368 " stwcx. %4,0,%2\n\
369 bne- 1b"
370 "\n\
372 : "=&r" (prev), "+m" (*p)
373 : "r" (p), "r" (old), "r" (new)
374 : "cc", "memory");
376 return prev;
379 #ifdef CONFIG_PPC64
380 static __inline__ unsigned long
381 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
383 unsigned long prev;
385 __asm__ __volatile__ (
386 LWSYNC_ON_SMP
387 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
388 cmpd 0,%0,%3\n\
389 bne- 2f\n\
390 stdcx. %4,0,%2\n\
391 bne- 1b"
392 ISYNC_ON_SMP
393 "\n\
395 : "=&r" (prev), "+m" (*p)
396 : "r" (p), "r" (old), "r" (new)
397 : "cc", "memory");
399 return prev;
402 static __inline__ unsigned long
403 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
404 unsigned long new)
406 unsigned long prev;
408 __asm__ __volatile__ (
409 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
410 cmpd 0,%0,%3\n\
411 bne- 2f\n\
412 stdcx. %4,0,%2\n\
413 bne- 1b"
414 "\n\
416 : "=&r" (prev), "+m" (*p)
417 : "r" (p), "r" (old), "r" (new)
418 : "cc", "memory");
420 return prev;
422 #endif
424 /* This function doesn't exist, so you'll get a linker error
425 if something tries to do an invalid cmpxchg(). */
426 extern void __cmpxchg_called_with_bad_pointer(void);
428 static __inline__ unsigned long
429 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
430 unsigned int size)
432 switch (size) {
433 case 4:
434 return __cmpxchg_u32(ptr, old, new);
435 #ifdef CONFIG_PPC64
436 case 8:
437 return __cmpxchg_u64(ptr, old, new);
438 #endif
440 __cmpxchg_called_with_bad_pointer();
441 return old;
444 static __inline__ unsigned long
445 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
446 unsigned int size)
448 switch (size) {
449 case 4:
450 return __cmpxchg_u32_local(ptr, old, new);
451 #ifdef CONFIG_PPC64
452 case 8:
453 return __cmpxchg_u64_local(ptr, old, new);
454 #endif
456 __cmpxchg_called_with_bad_pointer();
457 return old;
460 #define cmpxchg(ptr,o,n) \
461 ({ \
462 __typeof__(*(ptr)) _o_ = (o); \
463 __typeof__(*(ptr)) _n_ = (n); \
464 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
465 (unsigned long)_n_, sizeof(*(ptr))); \
469 #define cmpxchg_local(ptr,o,n) \
470 ({ \
471 __typeof__(*(ptr)) _o_ = (o); \
472 __typeof__(*(ptr)) _n_ = (n); \
473 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
474 (unsigned long)_n_, sizeof(*(ptr))); \
477 #ifdef CONFIG_PPC64
479 * We handle most unaligned accesses in hardware. On the other hand
480 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
481 * powers of 2 writes until it reaches sufficient alignment).
483 * Based on this we disable the IP header alignment in network drivers.
484 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
485 * cacheline alignment of buffers.
487 #define NET_IP_ALIGN 0
488 #define NET_SKB_PAD L1_CACHE_BYTES
489 #endif
491 #define arch_align_stack(x) (x)
493 /* Used in very early kernel initialization. */
494 extern unsigned long reloc_offset(void);
495 extern unsigned long add_reloc_offset(unsigned long);
496 extern void reloc_got2(unsigned long);
498 #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
500 static inline void create_instruction(unsigned long addr, unsigned int instr)
502 unsigned int *p;
503 p = (unsigned int *)addr;
504 *p = instr;
505 asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
508 /* Flags for create_branch:
509 * "b" == create_branch(addr, target, 0);
510 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
511 * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
512 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
514 #define BRANCH_SET_LINK 0x1
515 #define BRANCH_ABSOLUTE 0x2
517 static inline void create_branch(unsigned long addr,
518 unsigned long target, int flags)
520 unsigned int instruction;
522 if (! (flags & BRANCH_ABSOLUTE))
523 target = target - addr;
525 /* Mask out the flags and target, so they don't step on each other. */
526 instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
528 create_instruction(addr, instruction);
531 static inline void create_function_call(unsigned long addr, void * func)
533 unsigned long func_addr;
535 #ifdef CONFIG_PPC64
537 * On PPC64 the function pointer actually points to the function's
538 * descriptor. The first entry in the descriptor is the address
539 * of the function text.
541 func_addr = *(unsigned long *)func;
542 #else
543 func_addr = (unsigned long)func;
544 #endif
545 create_branch(addr, func_addr, BRANCH_SET_LINK);
548 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
549 extern void account_system_vtime(struct task_struct *);
550 #endif
552 extern struct dentry *powerpc_debugfs_root;
554 #endif /* __KERNEL__ */
555 #endif /* _ASM_POWERPC_SYSTEM_H */