1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
4 #include <linux/jump_label.h>
5 #include <linux/atomic.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
10 #include <asm/bitops.h>
13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
15 * Simple spin lock operations. There are two variants, one clears IRQ's
16 * on the local processor, one does not.
18 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
20 * (the type definitions are in asm/spinlock_types.h)
24 # define LOCK_PTR_REG "a"
26 # define LOCK_PTR_REG "D"
29 #if defined(CONFIG_X86_32) && \
30 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
32 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
33 * (PPro errata 66, 92)
35 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
37 # define UNLOCK_LOCK_PREFIX
40 /* How long a lock should spin before we consider blocking */
41 #define SPIN_THRESHOLD (1 << 15)
43 extern struct static_key paravirt_ticketlocks_enabled
;
44 static __always_inline
bool static_key_false(struct static_key
*key
);
46 #ifdef CONFIG_PARAVIRT_SPINLOCKS
48 static inline void __ticket_enter_slowpath(arch_spinlock_t
*lock
)
50 set_bit(0, (volatile unsigned long *)&lock
->tickets
.tail
);
53 #else /* !CONFIG_PARAVIRT_SPINLOCKS */
54 static __always_inline
void __ticket_lock_spinning(arch_spinlock_t
*lock
,
58 static inline void __ticket_unlock_kick(arch_spinlock_t
*lock
,
63 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
65 static __always_inline
int arch_spin_value_unlocked(arch_spinlock_t lock
)
67 return lock
.tickets
.head
== lock
.tickets
.tail
;
71 * Ticket locks are conceptually two parts, one indicating the current head of
72 * the queue, and the other indicating the current tail. The lock is acquired
73 * by atomically noting the tail and incrementing it by one (thus adding
74 * ourself to the queue and noting our position), then waiting until the head
75 * becomes equal to the the initial value of the tail.
77 * We use an xadd covering *both* parts of the lock, to increment the tail and
78 * also load the position of the head, which takes care of memory ordering
79 * issues and should be optimal for the uncontended case. Note the tail must be
80 * in the high part, because a wide xadd increment of the low part would carry
81 * up and contaminate the high part.
83 static __always_inline
void arch_spin_lock(arch_spinlock_t
*lock
)
85 register struct __raw_tickets inc
= { .tail
= TICKET_LOCK_INC
};
87 inc
= xadd(&lock
->tickets
, inc
);
88 if (likely(inc
.head
== inc
.tail
))
91 inc
.tail
&= ~TICKET_SLOWPATH_FLAG
;
93 unsigned count
= SPIN_THRESHOLD
;
96 if (ACCESS_ONCE(lock
->tickets
.head
) == inc
.tail
)
100 __ticket_lock_spinning(lock
, inc
.tail
);
102 out
: barrier(); /* make sure nothing creeps before the lock is taken */
105 static __always_inline
int arch_spin_trylock(arch_spinlock_t
*lock
)
107 arch_spinlock_t old
, new;
109 old
.tickets
= ACCESS_ONCE(lock
->tickets
);
110 if (old
.tickets
.head
!= (old
.tickets
.tail
& ~TICKET_SLOWPATH_FLAG
))
113 new.head_tail
= old
.head_tail
+ (TICKET_LOCK_INC
<< TICKET_SHIFT
);
115 /* cmpxchg is a full barrier, so nothing can move before it */
116 return cmpxchg(&lock
->head_tail
, old
.head_tail
, new.head_tail
) == old
.head_tail
;
119 static inline void __ticket_unlock_slowpath(arch_spinlock_t
*lock
,
124 BUILD_BUG_ON(((__ticket_t
)NR_CPUS
) != NR_CPUS
);
126 /* Perform the unlock on the "before" copy */
127 old
.tickets
.head
+= TICKET_LOCK_INC
;
129 /* Clear the slowpath flag */
130 new.head_tail
= old
.head_tail
& ~(TICKET_SLOWPATH_FLAG
<< TICKET_SHIFT
);
133 * If the lock is uncontended, clear the flag - use cmpxchg in
134 * case it changes behind our back though.
136 if (new.tickets
.head
!= new.tickets
.tail
||
137 cmpxchg(&lock
->head_tail
, old
.head_tail
,
138 new.head_tail
) != old
.head_tail
) {
140 * Lock still has someone queued for it, so wake up an
141 * appropriate waiter.
143 __ticket_unlock_kick(lock
, old
.tickets
.head
);
147 static __always_inline
void arch_spin_unlock(arch_spinlock_t
*lock
)
149 if (TICKET_SLOWPATH_FLAG
&&
150 static_key_false(¶virt_ticketlocks_enabled
)) {
151 arch_spinlock_t prev
;
154 add_smp(&lock
->tickets
.head
, TICKET_LOCK_INC
);
156 /* add_smp() is a full mb() */
158 if (unlikely(lock
->tickets
.tail
& TICKET_SLOWPATH_FLAG
))
159 __ticket_unlock_slowpath(lock
, prev
);
161 __add(&lock
->tickets
.head
, TICKET_LOCK_INC
, UNLOCK_LOCK_PREFIX
);
164 static inline int arch_spin_is_locked(arch_spinlock_t
*lock
)
166 struct __raw_tickets tmp
= ACCESS_ONCE(lock
->tickets
);
168 return tmp
.tail
!= tmp
.head
;
171 static inline int arch_spin_is_contended(arch_spinlock_t
*lock
)
173 struct __raw_tickets tmp
= ACCESS_ONCE(lock
->tickets
);
175 return (__ticket_t
)(tmp
.tail
- tmp
.head
) > TICKET_LOCK_INC
;
177 #define arch_spin_is_contended arch_spin_is_contended
179 static __always_inline
void arch_spin_lock_flags(arch_spinlock_t
*lock
,
182 arch_spin_lock(lock
);
185 static inline void arch_spin_unlock_wait(arch_spinlock_t
*lock
)
187 while (arch_spin_is_locked(lock
))
192 * Read-write spinlocks, allowing multiple readers
193 * but only one writer.
195 * NOTE! it is quite common to have readers in interrupts
196 * but no interrupt writers. For those circumstances we
197 * can "mix" irq-safe locks - any writer needs to get a
198 * irq-safe write-lock, but readers can get non-irqsafe
201 * On x86, we implement read-write locks as a 32-bit counter
202 * with the high bit (sign) being the "contended" bit.
206 * read_can_lock - would read_trylock() succeed?
207 * @lock: the rwlock in question.
209 static inline int arch_read_can_lock(arch_rwlock_t
*lock
)
211 return lock
->lock
> 0;
215 * write_can_lock - would write_trylock() succeed?
216 * @lock: the rwlock in question.
218 static inline int arch_write_can_lock(arch_rwlock_t
*lock
)
220 return lock
->write
== WRITE_LOCK_CMP
;
223 static inline void arch_read_lock(arch_rwlock_t
*rw
)
225 asm volatile(LOCK_PREFIX
READ_LOCK_SIZE(dec
) " (%0)\n\t"
227 "call __read_lock_failed\n\t"
229 ::LOCK_PTR_REG (rw
) : "memory");
232 static inline void arch_write_lock(arch_rwlock_t
*rw
)
234 asm volatile(LOCK_PREFIX
WRITE_LOCK_SUB(%1) "(%0)\n\t"
236 "call __write_lock_failed\n\t"
238 ::LOCK_PTR_REG (&rw
->write
), "i" (RW_LOCK_BIAS
)
242 static inline int arch_read_trylock(arch_rwlock_t
*lock
)
244 READ_LOCK_ATOMIC(t
) *count
= (READ_LOCK_ATOMIC(t
) *)lock
;
246 if (READ_LOCK_ATOMIC(dec_return
)(count
) >= 0)
248 READ_LOCK_ATOMIC(inc
)(count
);
252 static inline int arch_write_trylock(arch_rwlock_t
*lock
)
254 atomic_t
*count
= (atomic_t
*)&lock
->write
;
256 if (atomic_sub_and_test(WRITE_LOCK_CMP
, count
))
258 atomic_add(WRITE_LOCK_CMP
, count
);
262 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
264 asm volatile(LOCK_PREFIX
READ_LOCK_SIZE(inc
) " %0"
265 :"+m" (rw
->lock
) : : "memory");
268 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
270 asm volatile(LOCK_PREFIX
WRITE_LOCK_ADD(%1) "%0"
271 : "+m" (rw
->write
) : "i" (RW_LOCK_BIAS
) : "memory");
274 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
275 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
277 #undef READ_LOCK_SIZE
278 #undef READ_LOCK_ATOMIC
279 #undef WRITE_LOCK_ADD
280 #undef WRITE_LOCK_SUB
281 #undef WRITE_LOCK_CMP
283 #define arch_spin_relax(lock) cpu_relax()
284 #define arch_read_relax(lock) cpu_relax()
285 #define arch_write_relax(lock) cpu_relax()
287 #endif /* _ASM_X86_SPINLOCK_H */