1 #ifndef _X86_SPINLOCK_H_
2 #define _X86_SPINLOCK_H_
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
16 * These are fair FIFO ticket locks, which are currently limited to 256
19 * (the type definitions are in asm/spinlock_types.h)
23 # define LOCK_PTR_REG "a"
25 # define LOCK_PTR_REG "D"
28 #if defined(CONFIG_X86_32) && \
29 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
31 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
32 * (PPro errata 66, 92)
34 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
36 # define UNLOCK_LOCK_PREFIX
40 * Ticket locks are conceptually two parts, one indicating the current head of
41 * the queue, and the other indicating the current tail. The lock is acquired
42 * by atomically noting the tail and incrementing it by one (thus adding
43 * ourself to the queue and noting our position), then waiting until the head
44 * becomes equal to the the initial value of the tail.
46 * We use an xadd covering *both* parts of the lock, to increment the tail and
47 * also load the position of the head, which takes care of memory ordering
48 * issues and should be optimal for the uncontended case. Note the tail must be
49 * in the high part, because a wide xadd increment of the low part would carry
50 * up and contaminate the high part.
52 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
53 * save some instructions and make the code more elegant. There really isn't
54 * much between them in performance though, especially as locks are out of line.
57 static inline int __raw_spin_is_locked(raw_spinlock_t
*lock
)
59 int tmp
= ACCESS_ONCE(lock
->slock
);
61 return (((tmp
>> 8) & 0xff) != (tmp
& 0xff));
64 static inline int __raw_spin_is_contended(raw_spinlock_t
*lock
)
66 int tmp
= ACCESS_ONCE(lock
->slock
);
68 return (((tmp
>> 8) & 0xff) - (tmp
& 0xff)) > 1;
71 static __always_inline
void __raw_spin_lock(raw_spinlock_t
*lock
)
76 LOCK_PREFIX
"xaddw %w0, %1\n"
82 /* don't need lfence here, because loads are in-order */
85 : "+Q" (inc
), "+m" (lock
->slock
)
90 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
92 static __always_inline
int __raw_spin_trylock(raw_spinlock_t
*lock
)
97 asm volatile("movw %2,%w0\n\t"
102 "lock ; cmpxchgw %w1,%2\n\t"
106 : "=&a" (tmp
), "=Q" (new), "+m" (lock
->slock
)
113 static __always_inline
void __raw_spin_unlock(raw_spinlock_t
*lock
)
115 asm volatile(UNLOCK_LOCK_PREFIX
"incb %0"
121 static inline int __raw_spin_is_locked(raw_spinlock_t
*lock
)
123 int tmp
= ACCESS_ONCE(lock
->slock
);
125 return (((tmp
>> 16) & 0xffff) != (tmp
& 0xffff));
128 static inline int __raw_spin_is_contended(raw_spinlock_t
*lock
)
130 int tmp
= ACCESS_ONCE(lock
->slock
);
132 return (((tmp
>> 16) & 0xffff) - (tmp
& 0xffff)) > 1;
135 static __always_inline
void __raw_spin_lock(raw_spinlock_t
*lock
)
137 int inc
= 0x00010000;
140 asm volatile("lock ; xaddl %0, %1\n"
148 /* don't need lfence here, because loads are in-order */
151 : "+Q" (inc
), "+m" (lock
->slock
), "=r" (tmp
)
156 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
158 static __always_inline
int __raw_spin_trylock(raw_spinlock_t
*lock
)
163 asm volatile("movl %2,%0\n\t"
168 "addl $0x00010000, %1\n\t"
169 "lock ; cmpxchgl %1,%2\n\t"
173 : "=&a" (tmp
), "=r" (new), "+m" (lock
->slock
)
180 static __always_inline
void __raw_spin_unlock(raw_spinlock_t
*lock
)
182 asm volatile(UNLOCK_LOCK_PREFIX
"incw %0"
189 static inline void __raw_spin_unlock_wait(raw_spinlock_t
*lock
)
191 while (__raw_spin_is_locked(lock
))
196 * Read-write spinlocks, allowing multiple readers
197 * but only one writer.
199 * NOTE! it is quite common to have readers in interrupts
200 * but no interrupt writers. For those circumstances we
201 * can "mix" irq-safe locks - any writer needs to get a
202 * irq-safe write-lock, but readers can get non-irqsafe
205 * On x86, we implement read-write locks as a 32-bit counter
206 * with the high bit (sign) being the "contended" bit.
210 * read_can_lock - would read_trylock() succeed?
211 * @lock: the rwlock in question.
213 static inline int __raw_read_can_lock(raw_rwlock_t
*lock
)
215 return (int)(lock
)->lock
> 0;
219 * write_can_lock - would write_trylock() succeed?
220 * @lock: the rwlock in question.
222 static inline int __raw_write_can_lock(raw_rwlock_t
*lock
)
224 return (lock
)->lock
== RW_LOCK_BIAS
;
227 static inline void __raw_read_lock(raw_rwlock_t
*rw
)
229 asm volatile(LOCK_PREFIX
" subl $1,(%0)\n\t"
231 "call __read_lock_failed\n\t"
233 ::LOCK_PTR_REG (rw
) : "memory");
236 static inline void __raw_write_lock(raw_rwlock_t
*rw
)
238 asm volatile(LOCK_PREFIX
" subl %1,(%0)\n\t"
240 "call __write_lock_failed\n\t"
242 ::LOCK_PTR_REG (rw
), "i" (RW_LOCK_BIAS
) : "memory");
245 static inline int __raw_read_trylock(raw_rwlock_t
*lock
)
247 atomic_t
*count
= (atomic_t
*)lock
;
250 if (atomic_read(count
) >= 0)
256 static inline int __raw_write_trylock(raw_rwlock_t
*lock
)
258 atomic_t
*count
= (atomic_t
*)lock
;
260 if (atomic_sub_and_test(RW_LOCK_BIAS
, count
))
262 atomic_add(RW_LOCK_BIAS
, count
);
266 static inline void __raw_read_unlock(raw_rwlock_t
*rw
)
268 asm volatile(LOCK_PREFIX
"incl %0" :"+m" (rw
->lock
) : : "memory");
271 static inline void __raw_write_unlock(raw_rwlock_t
*rw
)
273 asm volatile(LOCK_PREFIX
"addl %1, %0"
274 : "+m" (rw
->lock
) : "i" (RW_LOCK_BIAS
) : "memory");
277 #define _raw_spin_relax(lock) cpu_relax()
278 #define _raw_read_relax(lock) cpu_relax()
279 #define _raw_write_relax(lock) cpu_relax()