1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
4 #include <linux/atomic.h>
6 #include <asm/processor.h>
7 #include <linux/compiler.h>
8 #include <asm/paravirt.h>
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
15 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
17 * (the type definitions are in asm/spinlock_types.h)
21 # define LOCK_PTR_REG "a"
23 # define LOCK_PTR_REG "D"
26 #if defined(CONFIG_X86_32) && \
27 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
29 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
30 * (PPro errata 66, 92)
32 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
34 # define UNLOCK_LOCK_PREFIX
38 * Ticket locks are conceptually two parts, one indicating the current head of
39 * the queue, and the other indicating the current tail. The lock is acquired
40 * by atomically noting the tail and incrementing it by one (thus adding
41 * ourself to the queue and noting our position), then waiting until the head
42 * becomes equal to the the initial value of the tail.
44 * We use an xadd covering *both* parts of the lock, to increment the tail and
45 * also load the position of the head, which takes care of memory ordering
46 * issues and should be optimal for the uncontended case. Note the tail must be
47 * in the high part, because a wide xadd increment of the low part would carry
48 * up and contaminate the high part.
50 static __always_inline
void __ticket_spin_lock(arch_spinlock_t
*lock
)
52 register struct __raw_tickets inc
= { .tail
= 1 };
54 inc
= xadd(&lock
->tickets
, inc
);
57 if (inc
.head
== inc
.tail
)
60 inc
.head
= ACCESS_ONCE(lock
->tickets
.head
);
62 barrier(); /* make sure nothing creeps before the lock is taken */
65 static __always_inline
int __ticket_spin_trylock(arch_spinlock_t
*lock
)
67 arch_spinlock_t old
, new;
69 old
.tickets
= ACCESS_ONCE(lock
->tickets
);
70 if (old
.tickets
.head
!= old
.tickets
.tail
)
73 new.head_tail
= old
.head_tail
+ (1 << TICKET_SHIFT
);
75 /* cmpxchg is a full barrier, so nothing can move before it */
76 return cmpxchg(&lock
->head_tail
, old
.head_tail
, new.head_tail
) == old
.head_tail
;
79 static __always_inline
void __ticket_spin_unlock(arch_spinlock_t
*lock
)
81 __add(&lock
->tickets
.head
, 1, UNLOCK_LOCK_PREFIX
);
84 static inline int __ticket_spin_is_locked(arch_spinlock_t
*lock
)
86 struct __raw_tickets tmp
= ACCESS_ONCE(lock
->tickets
);
88 return tmp
.tail
!= tmp
.head
;
91 static inline int __ticket_spin_is_contended(arch_spinlock_t
*lock
)
93 struct __raw_tickets tmp
= ACCESS_ONCE(lock
->tickets
);
95 return (__ticket_t
)(tmp
.tail
- tmp
.head
) > 1;
98 #ifndef CONFIG_PARAVIRT_SPINLOCKS
100 static inline int arch_spin_is_locked(arch_spinlock_t
*lock
)
102 return __ticket_spin_is_locked(lock
);
105 static inline int arch_spin_is_contended(arch_spinlock_t
*lock
)
107 return __ticket_spin_is_contended(lock
);
109 #define arch_spin_is_contended arch_spin_is_contended
111 static __always_inline
void arch_spin_lock(arch_spinlock_t
*lock
)
113 __ticket_spin_lock(lock
);
116 static __always_inline
int arch_spin_trylock(arch_spinlock_t
*lock
)
118 return __ticket_spin_trylock(lock
);
121 static __always_inline
void arch_spin_unlock(arch_spinlock_t
*lock
)
123 __ticket_spin_unlock(lock
);
126 static __always_inline
void arch_spin_lock_flags(arch_spinlock_t
*lock
,
129 arch_spin_lock(lock
);
132 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
134 static inline void arch_spin_unlock_wait(arch_spinlock_t
*lock
)
136 while (arch_spin_is_locked(lock
))
141 * Read-write spinlocks, allowing multiple readers
142 * but only one writer.
144 * NOTE! it is quite common to have readers in interrupts
145 * but no interrupt writers. For those circumstances we
146 * can "mix" irq-safe locks - any writer needs to get a
147 * irq-safe write-lock, but readers can get non-irqsafe
150 * On x86, we implement read-write locks as a 32-bit counter
151 * with the high bit (sign) being the "contended" bit.
155 * read_can_lock - would read_trylock() succeed?
156 * @lock: the rwlock in question.
158 static inline int arch_read_can_lock(arch_rwlock_t
*lock
)
160 return lock
->lock
> 0;
164 * write_can_lock - would write_trylock() succeed?
165 * @lock: the rwlock in question.
167 static inline int arch_write_can_lock(arch_rwlock_t
*lock
)
169 return lock
->write
== WRITE_LOCK_CMP
;
172 static inline void arch_read_lock(arch_rwlock_t
*rw
)
174 asm volatile(LOCK_PREFIX
READ_LOCK_SIZE(dec
) " (%0)\n\t"
176 "call __read_lock_failed\n\t"
178 ::LOCK_PTR_REG (rw
) : "memory");
181 static inline void arch_write_lock(arch_rwlock_t
*rw
)
183 asm volatile(LOCK_PREFIX
WRITE_LOCK_SUB(%1) "(%0)\n\t"
185 "call __write_lock_failed\n\t"
187 ::LOCK_PTR_REG (&rw
->write
), "i" (RW_LOCK_BIAS
)
191 static inline int arch_read_trylock(arch_rwlock_t
*lock
)
193 READ_LOCK_ATOMIC(t
) *count
= (READ_LOCK_ATOMIC(t
) *)lock
;
195 if (READ_LOCK_ATOMIC(dec_return
)(count
) >= 0)
197 READ_LOCK_ATOMIC(inc
)(count
);
201 static inline int arch_write_trylock(arch_rwlock_t
*lock
)
203 atomic_t
*count
= (atomic_t
*)&lock
->write
;
205 if (atomic_sub_and_test(WRITE_LOCK_CMP
, count
))
207 atomic_add(WRITE_LOCK_CMP
, count
);
211 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
213 asm volatile(LOCK_PREFIX
READ_LOCK_SIZE(inc
) " %0"
214 :"+m" (rw
->lock
) : : "memory");
217 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
219 asm volatile(LOCK_PREFIX
WRITE_LOCK_ADD(%1) "%0"
220 : "+m" (rw
->write
) : "i" (RW_LOCK_BIAS
) : "memory");
223 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
224 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
226 #undef READ_LOCK_SIZE
227 #undef READ_LOCK_ATOMIC
228 #undef WRITE_LOCK_ADD
229 #undef WRITE_LOCK_SUB
230 #undef WRITE_LOCK_CMP
232 #define arch_spin_relax(lock) cpu_relax()
233 #define arch_read_relax(lock) cpu_relax()
234 #define arch_write_relax(lock) cpu_relax()
236 /* The {read|write|spin}_lock() on x86 are full memory barriers. */
237 static inline void smp_mb__after_lock(void) { }
238 #define ARCH_HAS_SMP_MB_AFTER_LOCK
240 #endif /* _ASM_X86_SPINLOCK_H */