[PATCH] s390: Increase spinlock retry code performance
[linux/fpc-iii.git] / include / asm-xtensa / system.h
blob9284867f1cb90616cfba4af95b14f42f3059cdd5
1 /*
2 * include/asm-xtensa/system.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
11 #ifndef _XTENSA_SYSTEM_H
12 #define _XTENSA_SYSTEM_H
14 #include <linux/config.h>
15 #include <linux/stringify.h>
17 #include <asm/processor.h>
19 /* interrupt control */
21 #define local_save_flags(x) \
22 __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x));
23 #define local_irq_restore(x) do { \
24 __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \
25 :: "a" (x) : "memory"); } while(0);
26 #define local_irq_save(x) do { \
27 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \
28 : "=a" (x) :: "memory");} while(0);
30 static inline void local_irq_disable(void)
32 unsigned long flags;
33 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL)
34 : "=a" (flags) :: "memory");
36 static inline void local_irq_enable(void)
38 unsigned long flags;
39 __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory");
43 static inline int irqs_disabled(void)
45 unsigned long flags;
46 local_save_flags(flags);
47 return flags & 0xf;
50 #define RSR_CPENABLE(x) do { \
51 __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
52 } while(0);
53 #define WSR_CPENABLE(x) do { \
54 __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \
55 :: "a" (x));} while(0);
57 #define clear_cpenable() __clear_cpenable()
59 static inline void __clear_cpenable(void)
61 #if XCHAL_HAVE_CP
62 unsigned long i = 0;
63 WSR_CPENABLE(i);
64 #endif
67 static inline void enable_coprocessor(int i)
69 #if XCHAL_HAVE_CP
70 int cp;
71 RSR_CPENABLE(cp);
72 cp |= 1 << i;
73 WSR_CPENABLE(cp);
74 #endif
77 static inline void disable_coprocessor(int i)
79 #if XCHAL_HAVE_CP
80 int cp;
81 RSR_CPENABLE(cp);
82 cp &= ~(1 << i);
83 WSR_CPENABLE(cp);
84 #endif
87 #define smp_read_barrier_depends() do { } while(0)
88 #define read_barrier_depends() do { } while(0)
90 #define mb() barrier()
91 #define rmb() mb()
92 #define wmb() mb()
94 #ifdef CONFIG_SMP
95 #error smp_* not defined
96 #else
97 #define smp_mb() barrier()
98 #define smp_rmb() barrier()
99 #define smp_wmb() barrier()
100 #endif
102 #define set_mb(var, value) do { var = value; mb(); } while (0)
103 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
105 #if !defined (__ASSEMBLY__)
107 /* * switch_to(n) should switch tasks to task nr n, first
108 * checking that n isn't the current task, in which case it does nothing.
110 extern void *_switch_to(void *last, void *next);
112 #endif /* __ASSEMBLY__ */
114 #define prepare_to_switch() do { } while(0)
116 #define switch_to(prev,next,last) \
117 do { \
118 clear_cpenable(); \
119 (last) = _switch_to(prev, next); \
120 } while(0)
123 * cmpxchg
126 static inline unsigned long
127 __cmpxchg_u32(volatile int *p, int old, int new)
129 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
130 "l32i %0, %1, 0 \n\t"
131 "bne %0, %2, 1f \n\t"
132 "s32i %3, %1, 0 \n\t"
133 "1: \n\t"
134 "wsr a15, "__stringify(PS)" \n\t"
135 "rsync \n\t"
136 : "=&a" (old)
137 : "a" (p), "a" (old), "r" (new)
138 : "a15", "memory");
139 return old;
141 /* This function doesn't exist, so you'll get a linker error
142 * if something tries to do an invalid cmpxchg(). */
144 extern void __cmpxchg_called_with_bad_pointer(void);
146 static __inline__ unsigned long
147 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
149 switch (size) {
150 case 4: return __cmpxchg_u32(ptr, old, new);
151 default: __cmpxchg_called_with_bad_pointer();
152 return old;
156 #define cmpxchg(ptr,o,n) \
157 ({ __typeof__(*(ptr)) _o_ = (o); \
158 __typeof__(*(ptr)) _n_ = (n); \
159 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
160 (unsigned long)_n_, sizeof (*(ptr))); \
167 * xchg_u32
169 * Note that a15 is used here because the register allocation
170 * done by the compiler is not guaranteed and a window overflow
171 * may not occur between the rsil and wsr instructions. By using
172 * a15 in the rsil, the machine is guaranteed to be in a state
173 * where no register reference will cause an overflow.
176 static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
178 unsigned long tmp;
179 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
180 "l32i %0, %1, 0 \n\t"
181 "s32i %2, %1, 0 \n\t"
182 "wsr a15, "__stringify(PS)" \n\t"
183 "rsync \n\t"
184 : "=&a" (tmp)
185 : "a" (m), "a" (val)
186 : "a15", "memory");
187 return tmp;
190 #define tas(ptr) (xchg((ptr),1))
192 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
195 * This only works if the compiler isn't horribly bad at optimizing.
196 * gcc-2.5.8 reportedly can't handle this, but I define that one to
197 * be dead anyway.
200 extern void __xchg_called_with_bad_pointer(void);
202 static __inline__ unsigned long
203 __xchg(unsigned long x, volatile void * ptr, int size)
205 switch (size) {
206 case 4:
207 return xchg_u32(ptr, x);
209 __xchg_called_with_bad_pointer();
210 return x;
213 extern void set_except_vector(int n, void *addr);
215 static inline void spill_registers(void)
217 unsigned int a0, ps;
219 __asm__ __volatile__ (
220 "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t"
221 "mov a12, a0\n\t"
222 "rsr a13," __stringify(SAR) "\n\t"
223 "xsr a14," __stringify(PS) "\n\t"
224 "movi a0, _spill_registers\n\t"
225 "rsync\n\t"
226 "callx0 a0\n\t"
227 "mov a0, a12\n\t"
228 "wsr a13," __stringify(SAR) "\n\t"
229 "wsr a14," __stringify(PS) "\n\t"
230 :: "a" (&a0), "a" (&ps)
231 : "a2", "a3", "a12", "a13", "a14", "a15", "memory");
234 #define arch_align_stack(x) (x)
236 #endif /* _XTENSA_SYSTEM_H */