1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
4 #include <linux/atomic.h>
6 #include <asm/processor.h>
7 #include <linux/compiler.h>
8 #include <asm/paravirt.h>
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
15 * These are fair FIFO ticket locks, which are currently limited to 256
18 * (the type definitions are in asm/spinlock_types.h)
22 # define LOCK_PTR_REG "a"
23 # define REG_PTR_MODE "k"
25 # define LOCK_PTR_REG "D"
26 # define REG_PTR_MODE "q"
29 #if defined(CONFIG_X86_32) && \
30 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
32 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
33 * (PPro errata 66, 92)
35 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
37 # define UNLOCK_LOCK_PREFIX
41 * Ticket locks are conceptually two parts, one indicating the current head of
42 * the queue, and the other indicating the current tail. The lock is acquired
43 * by atomically noting the tail and incrementing it by one (thus adding
44 * ourself to the queue and noting our position), then waiting until the head
45 * becomes equal to the the initial value of the tail.
47 * We use an xadd covering *both* parts of the lock, to increment the tail and
48 * also load the position of the head, which takes care of memory ordering
49 * issues and should be optimal for the uncontended case. Note the tail must be
50 * in the high part, because a wide xadd increment of the low part would carry
51 * up and contaminate the high part.
53 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
54 * save some instructions and make the code more elegant. There really isn't
55 * much between them in performance though, especially as locks are out of line.
58 #define TICKET_SHIFT 8
60 static __always_inline
void __ticket_spin_lock(arch_spinlock_t
*lock
)
65 LOCK_PREFIX
"xaddw %w0, %1\n"
71 /* don't need lfence here, because loads are in-order */
74 : "+Q" (inc
), "+m" (lock
->slock
)
79 static __always_inline
int __ticket_spin_trylock(arch_spinlock_t
*lock
)
83 asm volatile("movzwl %2, %0\n\t"
85 "leal 0x100(%" REG_PTR_MODE
"0), %1\n\t"
87 LOCK_PREFIX
"cmpxchgw %w1,%2\n\t"
91 : "=&a" (tmp
), "=&q" (new), "+m" (lock
->slock
)
98 static __always_inline
void __ticket_spin_unlock(arch_spinlock_t
*lock
)
100 asm volatile(UNLOCK_LOCK_PREFIX
"incb %0"
106 #define TICKET_SHIFT 16
108 static __always_inline
void __ticket_spin_lock(arch_spinlock_t
*lock
)
110 int inc
= 0x00010000;
113 asm volatile(LOCK_PREFIX
"xaddl %0, %1\n"
121 /* don't need lfence here, because loads are in-order */
124 : "+r" (inc
), "+m" (lock
->slock
), "=&r" (tmp
)
129 static __always_inline
int __ticket_spin_trylock(arch_spinlock_t
*lock
)
134 asm volatile("movl %2,%0\n\t"
138 "leal 0x00010000(%" REG_PTR_MODE
"0), %1\n\t"
140 LOCK_PREFIX
"cmpxchgl %1,%2\n\t"
144 : "=&a" (tmp
), "=&q" (new), "+m" (lock
->slock
)
151 static __always_inline
void __ticket_spin_unlock(arch_spinlock_t
*lock
)
153 asm volatile(UNLOCK_LOCK_PREFIX
"incw %0"
160 static inline int __ticket_spin_is_locked(arch_spinlock_t
*lock
)
162 int tmp
= ACCESS_ONCE(lock
->slock
);
164 return !!(((tmp
>> TICKET_SHIFT
) ^ tmp
) & ((1 << TICKET_SHIFT
) - 1));
167 static inline int __ticket_spin_is_contended(arch_spinlock_t
*lock
)
169 int tmp
= ACCESS_ONCE(lock
->slock
);
171 return (((tmp
>> TICKET_SHIFT
) - tmp
) & ((1 << TICKET_SHIFT
) - 1)) > 1;
174 #ifndef CONFIG_PARAVIRT_SPINLOCKS
176 static inline int arch_spin_is_locked(arch_spinlock_t
*lock
)
178 return __ticket_spin_is_locked(lock
);
181 static inline int arch_spin_is_contended(arch_spinlock_t
*lock
)
183 return __ticket_spin_is_contended(lock
);
185 #define arch_spin_is_contended arch_spin_is_contended
187 static __always_inline
void arch_spin_lock(arch_spinlock_t
*lock
)
189 __ticket_spin_lock(lock
);
192 static __always_inline
int arch_spin_trylock(arch_spinlock_t
*lock
)
194 return __ticket_spin_trylock(lock
);
197 static __always_inline
void arch_spin_unlock(arch_spinlock_t
*lock
)
199 __ticket_spin_unlock(lock
);
202 static __always_inline
void arch_spin_lock_flags(arch_spinlock_t
*lock
,
205 arch_spin_lock(lock
);
208 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
210 static inline void arch_spin_unlock_wait(arch_spinlock_t
*lock
)
212 while (arch_spin_is_locked(lock
))
217 * Read-write spinlocks, allowing multiple readers
218 * but only one writer.
220 * NOTE! it is quite common to have readers in interrupts
221 * but no interrupt writers. For those circumstances we
222 * can "mix" irq-safe locks - any writer needs to get a
223 * irq-safe write-lock, but readers can get non-irqsafe
226 * On x86, we implement read-write locks as a 32-bit counter
227 * with the high bit (sign) being the "contended" bit.
231 * read_can_lock - would read_trylock() succeed?
232 * @lock: the rwlock in question.
234 static inline int arch_read_can_lock(arch_rwlock_t
*lock
)
236 return lock
->lock
> 0;
240 * write_can_lock - would write_trylock() succeed?
241 * @lock: the rwlock in question.
243 static inline int arch_write_can_lock(arch_rwlock_t
*lock
)
245 return lock
->write
== WRITE_LOCK_CMP
;
248 static inline void arch_read_lock(arch_rwlock_t
*rw
)
250 asm volatile(LOCK_PREFIX
READ_LOCK_SIZE(dec
) " (%0)\n\t"
252 "call __read_lock_failed\n\t"
254 ::LOCK_PTR_REG (rw
) : "memory");
257 static inline void arch_write_lock(arch_rwlock_t
*rw
)
259 asm volatile(LOCK_PREFIX
WRITE_LOCK_SUB(%1) "(%0)\n\t"
261 "call __write_lock_failed\n\t"
263 ::LOCK_PTR_REG (&rw
->write
), "i" (RW_LOCK_BIAS
)
267 static inline int arch_read_trylock(arch_rwlock_t
*lock
)
269 READ_LOCK_ATOMIC(t
) *count
= (READ_LOCK_ATOMIC(t
) *)lock
;
271 if (READ_LOCK_ATOMIC(dec_return
)(count
) >= 0)
273 READ_LOCK_ATOMIC(inc
)(count
);
277 static inline int arch_write_trylock(arch_rwlock_t
*lock
)
279 atomic_t
*count
= (atomic_t
*)&lock
->write
;
281 if (atomic_sub_and_test(WRITE_LOCK_CMP
, count
))
283 atomic_add(WRITE_LOCK_CMP
, count
);
287 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
289 asm volatile(LOCK_PREFIX
READ_LOCK_SIZE(inc
) " %0"
290 :"+m" (rw
->lock
) : : "memory");
293 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
295 asm volatile(LOCK_PREFIX
WRITE_LOCK_ADD(%1) "%0"
296 : "+m" (rw
->write
) : "i" (RW_LOCK_BIAS
) : "memory");
299 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
300 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
302 #undef READ_LOCK_SIZE
303 #undef READ_LOCK_ATOMIC
304 #undef WRITE_LOCK_ADD
305 #undef WRITE_LOCK_SUB
306 #undef WRITE_LOCK_CMP
308 #define arch_spin_relax(lock) cpu_relax()
309 #define arch_read_relax(lock) cpu_relax()
310 #define arch_write_relax(lock) cpu_relax()
312 /* The {read|write|spin}_lock() on x86 are full memory barriers. */
313 static inline void smp_mb__after_lock(void) { }
314 #define ARCH_HAS_SMP_MB_AFTER_LOCK
316 #endif /* _ASM_X86_SPINLOCK_H */