1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
11 * We exclusively read the old value. If it is zero, we may have
12 * won the lock, so we try exclusively storing it. A memory barrier
13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
20 volatile unsigned int lock
;
22 unsigned int break_lock
;
26 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
28 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
29 #define spin_is_locked(x) ((x)->lock != 0)
30 #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
31 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
33 static inline void _raw_spin_lock(spinlock_t
*lock
)
40 " strexeq %0, %2, [%1]\n"
44 : "r" (&lock
->lock
), "r" (1)
50 static inline int _raw_spin_trylock(spinlock_t
*lock
)
57 " strexeq %0, %2, [%1]"
59 : "r" (&lock
->lock
), "r" (1)
70 static inline void _raw_spin_unlock(spinlock_t
*lock
)
77 : "r" (&lock
->lock
), "r" (0)
85 volatile unsigned int lock
;
87 unsigned int break_lock
;
91 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
92 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
93 #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
96 * Write locks are easy - we just set bit 31. When unlocking, we can
97 * just write zero since the lock is exclusively held.
99 static inline void _raw_write_lock(rwlock_t
*rw
)
103 __asm__
__volatile__(
104 "1: ldrex %0, [%1]\n"
106 " strexeq %0, %2, [%1]\n"
110 : "r" (&rw
->lock
), "r" (0x80000000)
116 static inline int _raw_write_trylock(rwlock_t
*rw
)
120 __asm__
__volatile__(
121 "1: ldrex %0, [%1]\n"
123 " strexeq %0, %2, [%1]"
125 : "r" (&rw
->lock
), "r" (0x80000000)
136 static inline void _raw_write_unlock(rwlock_t
*rw
)
140 __asm__
__volatile__(
143 : "r" (&rw
->lock
), "r" (0)
148 * Read locks are a bit more hairy:
149 * - Exclusively load the lock value.
151 * - Store new lock value if positive, and we still own this location.
152 * If the value is negative, we've already failed.
153 * - If we failed to store the value, we want a negative result.
154 * - If we failed, try again.
155 * Unlocking is similarly hairy. We may have multiple read locks
156 * currently active. However, we know we won't have any write
159 static inline void _raw_read_lock(rwlock_t
*rw
)
161 unsigned long tmp
, tmp2
;
163 __asm__
__volatile__(
164 "1: ldrex %0, [%2]\n"
166 " strexpl %1, %0, [%2]\n"
167 " rsbpls %0, %1, #0\n"
169 : "=&r" (tmp
), "=&r" (tmp2
)
176 static inline void _raw_read_unlock(rwlock_t
*rw
)
178 unsigned long tmp
, tmp2
;
182 __asm__
__volatile__(
183 "1: ldrex %0, [%2]\n"
185 " strex %1, %0, [%2]\n"
188 : "=&r" (tmp
), "=&r" (tmp2
)
193 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
195 #endif /* __ASM_SPINLOCK_H */