Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/gregkh/driver...
[linux-2.6/verdex.git] / include / asm-sparc64 / spinlock.h
blobdb7581bdb53141a8493991dc02c8bb20af768f1d
1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
9 #include <linux/config.h>
10 #include <linux/threads.h> /* For NR_CPUS */
12 #ifndef __ASSEMBLY__
14 /* To get debugging spinlocks which detect and catch
15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16 * and rebuild your kernel.
19 /* All of these locking primitives are expected to work properly
20 * even in an RMO memory model, which currently is what the kernel
21 * runs in.
23 * There is another issue. Because we play games to save cycles
24 * in the non-contention case, we need to be extra careful about
25 * branch targets into the "spinning" code. They live in their
26 * own section, but the newer V9 branches have a shorter range
27 * than the traditional 32-bit sparc branch variants. The rule
28 * is that the branches that go into and out of the spinner sections
29 * must be pre-V9 branches.
32 #ifndef CONFIG_DEBUG_SPINLOCK
34 typedef struct {
35 volatile unsigned char lock;
36 #ifdef CONFIG_PREEMPT
37 unsigned int break_lock;
38 #endif
39 } spinlock_t;
40 #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
42 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
43 #define spin_is_locked(lp) ((lp)->lock != 0)
45 #define spin_unlock_wait(lp) \
46 do { membar("#LoadLoad"); \
47 } while((lp)->lock)
49 static inline void _raw_spin_lock(spinlock_t *lock)
51 unsigned long tmp;
53 __asm__ __volatile__(
54 "1: ldstub [%1], %0\n"
55 " brnz,pn %0, 2f\n"
56 " membar #StoreLoad | #StoreStore\n"
57 " .subsection 2\n"
58 "2: ldub [%1], %0\n"
59 " brnz,pt %0, 2b\n"
60 " membar #LoadLoad\n"
61 " ba,a,pt %%xcc, 1b\n"
62 " .previous"
63 : "=&r" (tmp)
64 : "r" (lock)
65 : "memory");
68 static inline int _raw_spin_trylock(spinlock_t *lock)
70 unsigned long result;
72 __asm__ __volatile__(
73 " ldstub [%1], %0\n"
74 " membar #StoreLoad | #StoreStore"
75 : "=r" (result)
76 : "r" (lock)
77 : "memory");
79 return (result == 0UL);
82 static inline void _raw_spin_unlock(spinlock_t *lock)
84 __asm__ __volatile__(
85 " membar #StoreStore | #LoadStore\n"
86 " stb %%g0, [%0]"
87 : /* No outputs */
88 : "r" (lock)
89 : "memory");
92 static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
94 unsigned long tmp1, tmp2;
96 __asm__ __volatile__(
97 "1: ldstub [%2], %0\n"
98 " brnz,pn %0, 2f\n"
99 " membar #StoreLoad | #StoreStore\n"
100 " .subsection 2\n"
101 "2: rdpr %%pil, %1\n"
102 " wrpr %3, %%pil\n"
103 "3: ldub [%2], %0\n"
104 " brnz,pt %0, 3b\n"
105 " membar #LoadLoad\n"
106 " ba,pt %%xcc, 1b\n"
107 " wrpr %1, %%pil\n"
108 " .previous"
109 : "=&r" (tmp1), "=&r" (tmp2)
110 : "r"(lock), "r"(flags)
111 : "memory");
114 #else /* !(CONFIG_DEBUG_SPINLOCK) */
116 typedef struct {
117 volatile unsigned char lock;
118 unsigned int owner_pc, owner_cpu;
119 #ifdef CONFIG_PREEMPT
120 unsigned int break_lock;
121 #endif
122 } spinlock_t;
123 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
124 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
125 #define spin_is_locked(__lock) ((__lock)->lock != 0)
126 #define spin_unlock_wait(__lock) \
127 do { \
128 membar("#LoadLoad"); \
129 } while((__lock)->lock)
131 extern void _do_spin_lock (spinlock_t *lock, char *str);
132 extern void _do_spin_unlock (spinlock_t *lock);
133 extern int _do_spin_trylock (spinlock_t *lock);
135 #define _raw_spin_trylock(lp) _do_spin_trylock(lp)
136 #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
137 #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
138 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
140 #endif /* CONFIG_DEBUG_SPINLOCK */
142 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
144 #ifndef CONFIG_DEBUG_SPINLOCK
146 typedef struct {
147 volatile unsigned int lock;
148 #ifdef CONFIG_PREEMPT
149 unsigned int break_lock;
150 #endif
151 } rwlock_t;
152 #define RW_LOCK_UNLOCKED (rwlock_t) {0,}
153 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
155 static void inline __read_lock(rwlock_t *lock)
157 unsigned long tmp1, tmp2;
159 __asm__ __volatile__ (
160 "1: ldsw [%2], %0\n"
161 " brlz,pn %0, 2f\n"
162 "4: add %0, 1, %1\n"
163 " cas [%2], %0, %1\n"
164 " cmp %0, %1\n"
165 " bne,pn %%icc, 1b\n"
166 " membar #StoreLoad | #StoreStore\n"
167 " .subsection 2\n"
168 "2: ldsw [%2], %0\n"
169 " brlz,pt %0, 2b\n"
170 " membar #LoadLoad\n"
171 " ba,a,pt %%xcc, 4b\n"
172 " .previous"
173 : "=&r" (tmp1), "=&r" (tmp2)
174 : "r" (lock)
175 : "memory");
178 static void inline __read_unlock(rwlock_t *lock)
180 unsigned long tmp1, tmp2;
182 __asm__ __volatile__(
183 " membar #StoreLoad | #LoadLoad\n"
184 "1: lduw [%2], %0\n"
185 " sub %0, 1, %1\n"
186 " cas [%2], %0, %1\n"
187 " cmp %0, %1\n"
188 " bne,pn %%xcc, 1b\n"
189 " nop"
190 : "=&r" (tmp1), "=&r" (tmp2)
191 : "r" (lock)
192 : "memory");
195 static void inline __write_lock(rwlock_t *lock)
197 unsigned long mask, tmp1, tmp2;
199 mask = 0x80000000UL;
201 __asm__ __volatile__(
202 "1: lduw [%2], %0\n"
203 " brnz,pn %0, 2f\n"
204 "4: or %0, %3, %1\n"
205 " cas [%2], %0, %1\n"
206 " cmp %0, %1\n"
207 " bne,pn %%icc, 1b\n"
208 " membar #StoreLoad | #StoreStore\n"
209 " .subsection 2\n"
210 "2: lduw [%2], %0\n"
211 " brnz,pt %0, 2b\n"
212 " membar #LoadLoad\n"
213 " ba,a,pt %%xcc, 4b\n"
214 " .previous"
215 : "=&r" (tmp1), "=&r" (tmp2)
216 : "r" (lock), "r" (mask)
217 : "memory");
220 static void inline __write_unlock(rwlock_t *lock)
222 __asm__ __volatile__(
223 " membar #LoadStore | #StoreStore\n"
224 " stw %%g0, [%0]"
225 : /* no outputs */
226 : "r" (lock)
227 : "memory");
230 static int inline __write_trylock(rwlock_t *lock)
232 unsigned long mask, tmp1, tmp2, result;
234 mask = 0x80000000UL;
236 __asm__ __volatile__(
237 " mov 0, %2\n"
238 "1: lduw [%3], %0\n"
239 " brnz,pn %0, 2f\n"
240 " or %0, %4, %1\n"
241 " cas [%3], %0, %1\n"
242 " cmp %0, %1\n"
243 " bne,pn %%icc, 1b\n"
244 " membar #StoreLoad | #StoreStore\n"
245 " mov 1, %2\n"
246 "2:"
247 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
248 : "r" (lock), "r" (mask)
249 : "memory");
251 return result;
254 #define _raw_read_lock(p) __read_lock(p)
255 #define _raw_read_unlock(p) __read_unlock(p)
256 #define _raw_write_lock(p) __write_lock(p)
257 #define _raw_write_unlock(p) __write_unlock(p)
258 #define _raw_write_trylock(p) __write_trylock(p)
260 #else /* !(CONFIG_DEBUG_SPINLOCK) */
262 typedef struct {
263 volatile unsigned long lock;
264 unsigned int writer_pc, writer_cpu;
265 unsigned int reader_pc[NR_CPUS];
266 #ifdef CONFIG_PREEMPT
267 unsigned int break_lock;
268 #endif
269 } rwlock_t;
270 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
271 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
273 extern void _do_read_lock(rwlock_t *rw, char *str);
274 extern void _do_read_unlock(rwlock_t *rw, char *str);
275 extern void _do_write_lock(rwlock_t *rw, char *str);
276 extern void _do_write_unlock(rwlock_t *rw);
277 extern int _do_write_trylock(rwlock_t *rw, char *str);
279 #define _raw_read_lock(lock) \
280 do { unsigned long flags; \
281 local_irq_save(flags); \
282 _do_read_lock(lock, "read_lock"); \
283 local_irq_restore(flags); \
284 } while(0)
286 #define _raw_read_unlock(lock) \
287 do { unsigned long flags; \
288 local_irq_save(flags); \
289 _do_read_unlock(lock, "read_unlock"); \
290 local_irq_restore(flags); \
291 } while(0)
293 #define _raw_write_lock(lock) \
294 do { unsigned long flags; \
295 local_irq_save(flags); \
296 _do_write_lock(lock, "write_lock"); \
297 local_irq_restore(flags); \
298 } while(0)
300 #define _raw_write_unlock(lock) \
301 do { unsigned long flags; \
302 local_irq_save(flags); \
303 _do_write_unlock(lock); \
304 local_irq_restore(flags); \
305 } while(0)
307 #define _raw_write_trylock(lock) \
308 ({ unsigned long flags; \
309 int val; \
310 local_irq_save(flags); \
311 val = _do_write_trylock(lock, "write_trylock"); \
312 local_irq_restore(flags); \
313 val; \
316 #endif /* CONFIG_DEBUG_SPINLOCK */
318 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
319 #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
320 #define write_can_lock(rw) (!(rw)->lock)
322 #endif /* !(__ASSEMBLY__) */
324 #endif /* !(__SPARC64_SPINLOCK_H) */