1 #ifndef _ASM_M32R_SPINLOCK_H
2 #define _ASM_M32R_SPINLOCK_H
5 * linux/include/asm-m32r/spinlock.h
8 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
9 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
12 #include <linux/compiler.h>
13 #include <linux/atomic.h>
17 * Your basic SMP spinlocks, allowing only a single CPU anywhere
19 * (the type definitions are in asm/spinlock_types.h)
21 * Simple spin lock operations. There are two variants, one clears IRQ's
22 * on the local processor, one does not.
24 * We make no fairness assumptions. They have a cost.
27 #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
28 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
29 #define arch_spin_unlock_wait(x) \
30 do { cpu_relax(); } while (arch_spin_is_locked(x))
33 * arch_spin_trylock - Try spin lock and return a result
34 * @lock: Pointer to the lock variable
36 * arch_spin_trylock() tries to get the lock and returns a result.
37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
39 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
42 unsigned long tmp1
, tmp2
;
45 * lock->slock : =1 : unlock
48 * oldval = lock->slock; <--+ need atomic operation
49 * lock->slock = 0; <--+
52 __asm__
__volatile__ (
53 "# arch_spin_trylock \n\t"
56 "clrpsw #0x40 -> nop; \n\t"
57 DCACHE_CLEAR("%0", "r6", "%3")
59 "unlock %1, @%3; \n\t"
61 : "=&r" (oldval
), "=&r" (tmp1
), "=&r" (tmp2
)
64 #ifdef CONFIG_CHIP_M32700_TS1
66 #endif /* CONFIG_CHIP_M32700_TS1 */
72 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
74 unsigned long tmp0
, tmp1
;
77 * lock->slock : =1 : unlock
81 * lock->slock -= 1; <-- need atomic operation
82 * if (lock->slock == 0) break;
83 * for ( ; lock->slock <= 0 ; );
86 __asm__
__volatile__ (
87 "# arch_spin_lock \n\t"
91 "clrpsw #0x40 -> nop; \n\t"
92 DCACHE_CLEAR("%0", "r6", "%2")
95 "unlock %0, @%2; \n\t"
98 LOCK_SECTION_START(".balign 4 \n\t")
105 : "=&r" (tmp0
), "=&r" (tmp1
)
108 #ifdef CONFIG_CHIP_M32700_TS1
110 #endif /* CONFIG_CHIP_M32700_TS1 */
114 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
121 * Read-write spinlocks, allowing multiple readers
122 * but only one writer.
124 * NOTE! it is quite common to have readers in interrupts
125 * but no interrupt writers. For those circumstances we
126 * can "mix" irq-safe locks - any writer needs to get a
127 * irq-safe write-lock, but readers can get non-irqsafe
130 * On x86, we implement read-write locks as a 32-bit counter
131 * with the high bit (sign) being the "contended" bit.
133 * The inline assembly is non-obvious. Think about it.
135 * Changed to use the same technique as rw semaphores. See
136 * semaphore.h for details. -ben
140 * read_can_lock - would read_trylock() succeed?
141 * @lock: the rwlock in question.
143 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
146 * write_can_lock - would write_trylock() succeed?
147 * @lock: the rwlock in question.
149 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
151 static inline void arch_read_lock(arch_rwlock_t
*rw
)
153 unsigned long tmp0
, tmp1
;
156 * rw->lock : >0 : unlock
160 * rw->lock -= 1; <-- need atomic operation
161 * if (rw->lock >= 0) break;
162 * rw->lock += 1; <-- need atomic operation
163 * for ( ; rw->lock <= 0 ; );
166 __asm__
__volatile__ (
171 "clrpsw #0x40 -> nop; \n\t"
172 DCACHE_CLEAR("%0", "r6", "%2")
175 "unlock %0, @%2; \n\t"
178 LOCK_SECTION_START(".balign 4 \n\t")
181 "clrpsw #0x40 -> nop; \n\t"
182 DCACHE_CLEAR("%0", "r6", "%2")
185 "unlock %0, @%2; \n\t"
193 : "=&r" (tmp0
), "=&r" (tmp1
)
196 #ifdef CONFIG_CHIP_M32700_TS1
198 #endif /* CONFIG_CHIP_M32700_TS1 */
202 static inline void arch_write_lock(arch_rwlock_t
*rw
)
204 unsigned long tmp0
, tmp1
, tmp2
;
207 * rw->lock : =RW_LOCK_BIAS_STR : unlock
208 * : !=RW_LOCK_BIAS_STR : lock
211 * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
212 * if (rw->lock == 0) break;
213 * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
214 * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
217 __asm__
__volatile__ (
219 "seth %1, #high(" RW_LOCK_BIAS_STR
"); \n\t"
220 "or3 %1, %1, #low(" RW_LOCK_BIAS_STR
"); \n\t"
224 "clrpsw #0x40 -> nop; \n\t"
225 DCACHE_CLEAR("%0", "r7", "%3")
228 "unlock %0, @%3; \n\t"
231 LOCK_SECTION_START(".balign 4 \n\t")
234 "clrpsw #0x40 -> nop; \n\t"
235 DCACHE_CLEAR("%0", "r7", "%3")
238 "unlock %0, @%3; \n\t"
243 "beq %0, %1, 1b; \n\t"
246 : "=&r" (tmp0
), "=&r" (tmp1
), "=&r" (tmp2
)
249 #ifdef CONFIG_CHIP_M32700_TS1
251 #endif /* CONFIG_CHIP_M32700_TS1 */
255 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
257 unsigned long tmp0
, tmp1
;
259 __asm__
__volatile__ (
262 "clrpsw #0x40 -> nop; \n\t"
263 DCACHE_CLEAR("%0", "r6", "%2")
266 "unlock %0, @%2; \n\t"
268 : "=&r" (tmp0
), "=&r" (tmp1
)
271 #ifdef CONFIG_CHIP_M32700_TS1
273 #endif /* CONFIG_CHIP_M32700_TS1 */
277 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
279 unsigned long tmp0
, tmp1
, tmp2
;
281 __asm__
__volatile__ (
282 "# write_unlock \n\t"
283 "seth %1, #high(" RW_LOCK_BIAS_STR
"); \n\t"
284 "or3 %1, %1, #low(" RW_LOCK_BIAS_STR
"); \n\t"
286 "clrpsw #0x40 -> nop; \n\t"
287 DCACHE_CLEAR("%0", "r7", "%3")
290 "unlock %0, @%3; \n\t"
292 : "=&r" (tmp0
), "=&r" (tmp1
), "=&r" (tmp2
)
295 #ifdef CONFIG_CHIP_M32700_TS1
297 #endif /* CONFIG_CHIP_M32700_TS1 */
301 static inline int arch_read_trylock(arch_rwlock_t
*lock
)
303 atomic_t
*count
= (atomic_t
*)lock
;
304 if (atomic_dec_return(count
) >= 0)
310 static inline int arch_write_trylock(arch_rwlock_t
*lock
)
312 atomic_t
*count
= (atomic_t
*)lock
;
313 if (atomic_sub_and_test(RW_LOCK_BIAS
, count
))
315 atomic_add(RW_LOCK_BIAS
, count
);
319 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
320 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
322 #define arch_spin_relax(lock) cpu_relax()
323 #define arch_read_relax(lock) cpu_relax()
324 #define arch_write_relax(lock) cpu_relax()
326 #endif /* _ASM_M32R_SPINLOCK_H */