1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <linux/compiler.h>
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
15 * We make no fairness assumptions. They have a cost.
17 * (the type definitions are in asm/spinlock_types.h)
20 #define __raw_spin_is_locked(x) \
21 (*(volatile signed char *)(&(x)->slock) <= 0)
23 #define __raw_spin_lock_string \
25 "lock ; decb %0\n\t" \
35 * NOTE: there's an irqs-on section here, which normally would have to be
36 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
37 * __raw_spin_lock_string_flags().
39 #define __raw_spin_lock_string_flags \
41 "lock ; decb %0\n\t" \
44 "testl $0x200, %1\n\t" \
60 #define __raw_spin_lock_string_up \
63 static inline void __raw_spin_lock(raw_spinlock_t
*lock
)
66 __raw_spin_lock_string
,
67 __raw_spin_lock_string_up
,
68 "+m" (lock
->slock
) : : "memory");
72 * It is easier for the lock validator if interrupts are not re-enabled
73 * in the middle of a lock-acquire. This is a performance feature anyway
76 #ifndef CONFIG_PROVE_LOCKING
77 static inline void __raw_spin_lock_flags(raw_spinlock_t
*lock
, unsigned long flags
)
80 __raw_spin_lock_string_flags
,
81 __raw_spin_lock_string_up
,
82 "+m" (lock
->slock
) : "r" (flags
) : "memory");
86 static inline int __raw_spin_trylock(raw_spinlock_t
*lock
)
91 :"=q" (oldval
), "+m" (lock
->slock
)
97 * __raw_spin_unlock based on writing $1 to the low byte.
98 * This method works. Despite all the confusion.
99 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
100 * (PPro errata 66, 92)
103 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
105 #define __raw_spin_unlock_string \
107 :"+m" (lock->slock) : : "memory"
110 static inline void __raw_spin_unlock(raw_spinlock_t
*lock
)
112 __asm__
__volatile__(
113 __raw_spin_unlock_string
119 #define __raw_spin_unlock_string \
121 :"=q" (oldval), "+m" (lock->slock) \
122 :"0" (oldval) : "memory"
124 static inline void __raw_spin_unlock(raw_spinlock_t
*lock
)
128 __asm__
__volatile__(
129 __raw_spin_unlock_string
135 #define __raw_spin_unlock_wait(lock) \
136 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
139 * Read-write spinlocks, allowing multiple readers
140 * but only one writer.
142 * NOTE! it is quite common to have readers in interrupts
143 * but no interrupt writers. For those circumstances we
144 * can "mix" irq-safe locks - any writer needs to get a
145 * irq-safe write-lock, but readers can get non-irqsafe
148 * On x86, we implement read-write locks as a 32-bit counter
149 * with the high bit (sign) being the "contended" bit.
151 * The inline assembly is non-obvious. Think about it.
153 * Changed to use the same technique as rw semaphores. See
154 * semaphore.h for details. -ben
156 * the helpers are in arch/i386/kernel/semaphore.c
160 * read_can_lock - would read_trylock() succeed?
161 * @lock: the rwlock in question.
163 #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
166 * write_can_lock - would write_trylock() succeed?
167 * @lock: the rwlock in question.
169 #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
171 static inline void __raw_read_lock(raw_rwlock_t
*rw
)
173 __build_read_lock(rw
, "__read_lock_failed");
176 static inline void __raw_write_lock(raw_rwlock_t
*rw
)
178 __build_write_lock(rw
, "__write_lock_failed");
181 static inline int __raw_read_trylock(raw_rwlock_t
*lock
)
183 atomic_t
*count
= (atomic_t
*)lock
;
185 if (atomic_read(count
) >= 0)
191 static inline int __raw_write_trylock(raw_rwlock_t
*lock
)
193 atomic_t
*count
= (atomic_t
*)lock
;
194 if (atomic_sub_and_test(RW_LOCK_BIAS
, count
))
196 atomic_add(RW_LOCK_BIAS
, count
);
200 static inline void __raw_read_unlock(raw_rwlock_t
*rw
)
202 asm volatile(LOCK_PREFIX
"incl %0" :"+m" (rw
->lock
) : : "memory");
205 static inline void __raw_write_unlock(raw_rwlock_t
*rw
)
207 asm volatile(LOCK_PREFIX
"addl $" RW_LOCK_BIAS_STR
", %0"
208 : "+m" (rw
->lock
) : : "memory");
211 #endif /* __ASM_SPINLOCK_H */