1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _GEN_PV_LOCK_SLOWPATH
3 #error "do not include this file"
6 #include <linux/hash.h>
7 #include <linux/memblock.h>
8 #include <linux/debug_locks.h>
11 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
14 * This relies on the architecture to provide two paravirt hypercalls:
16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17 * pv_kick(cpu) -- wakes a suspended vcpu
19 * Using these we implement __pv_queued_spin_lock_slowpath() and
20 * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
21 * native_queued_spin_unlock().
24 #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
27 * Queue Node Adaptive Spinning
29 * A queue node vCPU will stop spinning if the vCPU in the previous node is
30 * not running. The one lock stealing attempt allowed at slowpath entry
31 * mitigates the slight slowdown for non-overcommitted guest with this
32 * aggressive wait-early mechanism.
34 * The status of the previous node will be checked at fixed interval
35 * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
36 * pound on the cacheline of the previous node too heavily.
38 #define PV_PREV_CHECK_MASK 0xff
41 * Queue node uses: VCPU_RUNNING & VCPU_HALTED.
42 * Queue head uses: VCPU_RUNNING & VCPU_HASHED.
46 VCPU_HALTED
, /* Used only in pv_wait_node */
47 VCPU_HASHED
, /* = pv_hash'ed + VCPU_HALTED */
51 struct mcs_spinlock mcs
;
57 * Hybrid PV queued/unfair lock
59 * By replacing the regular queued_spin_trylock() with the function below,
60 * it will be called once when a lock waiter enter the PV slowpath before
63 * The pending bit is set by the queue head vCPU of the MCS wait queue in
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
66 * is allowed. The function will return immediately to make the waiters
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
68 * as the queued mode vCPUs are actively running to set the pending bit
69 * and hence disabling lock stealing.
71 * When the pending bit isn't set, the lock waiters will stay in the unfair
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
73 * case, the lock waiters will enter the queued mode slowpath trying to
74 * become the queue head and set the pending bit.
76 * This hybrid PV queued/unfair lock combines the best attributes of a
77 * queued lock (no lock starvation) and an unfair lock (good performance
78 * on not heavily contended locks).
80 #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l)
81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock
*lock
)
84 * Stay in unfair lock mode as long as queued mode waiters are
85 * present in the MCS wait queue but the pending bit isn't set.
88 int val
= atomic_read(&lock
->val
);
91 if (!(val
& _Q_LOCKED_PENDING_MASK
) &&
92 try_cmpxchg_acquire(&lock
->locked
, &old
, _Q_LOCKED_VAL
)) {
93 lockevent_inc(pv_lock_stealing
);
96 if (!(val
& _Q_TAIL_MASK
) || (val
& _Q_PENDING_MASK
))
106 * The pending bit is used by the queue head vCPU to indicate that it
107 * is actively spinning on the lock and no lock stealing is allowed.
109 #if _Q_PENDING_BITS == 8
110 static __always_inline
void set_pending(struct qspinlock
*lock
)
112 WRITE_ONCE(lock
->pending
, 1);
116 * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
117 * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
118 * lock just to be sure that it will get it.
120 static __always_inline
bool trylock_clear_pending(struct qspinlock
*lock
)
122 u16 old
= _Q_PENDING_VAL
;
124 return !READ_ONCE(lock
->locked
) &&
125 try_cmpxchg_acquire(&lock
->locked_pending
, &old
, _Q_LOCKED_VAL
);
127 #else /* _Q_PENDING_BITS == 8 */
128 static __always_inline
void set_pending(struct qspinlock
*lock
)
130 atomic_or(_Q_PENDING_VAL
, &lock
->val
);
133 static __always_inline
bool trylock_clear_pending(struct qspinlock
*lock
)
137 old
= atomic_read(&lock
->val
);
139 if (old
& _Q_LOCKED_MASK
)
142 * Try to clear pending bit & set locked bit
144 new = (old
& ~_Q_PENDING_MASK
) | _Q_LOCKED_VAL
;
145 } while (!atomic_try_cmpxchg_acquire (&lock
->val
, &old
, new));
149 #endif /* _Q_PENDING_BITS == 8 */
152 * Lock and MCS node addresses hash table for fast lookup
154 * Hashing is done on a per-cacheline basis to minimize the need to access
155 * more than one cacheline.
157 * Dynamically allocate a hash table big enough to hold at least 4X the
158 * number of possible cpus in the system. Allocation is done on page
159 * granularity. So the minimum number of hash buckets should be at least
160 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
162 * Since we should not be holding locks from NMI context (very rare indeed) the
163 * max load factor is 0.75, which is around the point where open addressing
167 struct pv_hash_entry
{
168 struct qspinlock
*lock
;
169 struct pv_node
*node
;
172 #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
173 #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry))
175 static struct pv_hash_entry
*pv_lock_hash
;
176 static unsigned int pv_lock_hash_bits __read_mostly
;
179 * Allocate memory for the PV qspinlock hash buckets
181 * This function should be called from the paravirt spinlock initialization
184 void __init
__pv_init_lock_hash(void)
186 int pv_hash_size
= ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE
);
188 if (pv_hash_size
< PV_HE_MIN
)
189 pv_hash_size
= PV_HE_MIN
;
192 * Allocate space from bootmem which should be page-size aligned
193 * and hence cacheline aligned.
195 pv_lock_hash
= alloc_large_system_hash("PV qspinlock",
196 sizeof(struct pv_hash_entry
),
198 HASH_EARLY
| HASH_ZERO
,
199 &pv_lock_hash_bits
, NULL
,
200 pv_hash_size
, pv_hash_size
);
203 #define for_each_hash_entry(he, offset, hash) \
204 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \
205 offset < (1 << pv_lock_hash_bits); \
206 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
208 static struct qspinlock
**pv_hash(struct qspinlock
*lock
, struct pv_node
*node
)
210 unsigned long offset
, hash
= hash_ptr(lock
, pv_lock_hash_bits
);
211 struct pv_hash_entry
*he
;
214 for_each_hash_entry(he
, offset
, hash
) {
215 struct qspinlock
*old
= NULL
;
217 if (try_cmpxchg(&he
->lock
, &old
, lock
)) {
218 WRITE_ONCE(he
->node
, node
);
219 lockevent_pv_hop(hopcnt
);
224 * Hard assume there is a free entry for us.
226 * This is guaranteed by ensuring every blocked lock only ever consumes
227 * a single entry, and since we only have 4 nesting levels per CPU
228 * and allocated 4*nr_possible_cpus(), this must be so.
230 * The single entry is guaranteed by having the lock owner unhash
231 * before it releases.
236 static struct pv_node
*pv_unhash(struct qspinlock
*lock
)
238 unsigned long offset
, hash
= hash_ptr(lock
, pv_lock_hash_bits
);
239 struct pv_hash_entry
*he
;
240 struct pv_node
*node
;
242 for_each_hash_entry(he
, offset
, hash
) {
243 if (READ_ONCE(he
->lock
) == lock
) {
244 node
= READ_ONCE(he
->node
);
245 WRITE_ONCE(he
->lock
, NULL
);
250 * Hard assume we'll find an entry.
252 * This guarantees a limited lookup time and is itself guaranteed by
253 * having the lock owner do the unhash -- IFF the unlock sees the
254 * SLOW flag, there MUST be a hash entry.
260 * Return true if when it is time to check the previous node which is not
261 * in a running state.
264 pv_wait_early(struct pv_node
*prev
, int loop
)
266 if ((loop
& PV_PREV_CHECK_MASK
) != 0)
269 return READ_ONCE(prev
->state
) != VCPU_RUNNING
;
273 * Initialize the PV part of the mcs_spinlock node.
275 static void pv_init_node(struct mcs_spinlock
*node
)
277 struct pv_node
*pn
= (struct pv_node
*)node
;
279 BUILD_BUG_ON(sizeof(struct pv_node
) > sizeof(struct qnode
));
281 pn
->cpu
= smp_processor_id();
282 pn
->state
= VCPU_RUNNING
;
286 * Wait for node->locked to become true, halt the vcpu after a short spin.
287 * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
290 static void pv_wait_node(struct mcs_spinlock
*node
, struct mcs_spinlock
*prev
)
292 struct pv_node
*pn
= (struct pv_node
*)node
;
293 struct pv_node
*pp
= (struct pv_node
*)prev
;
298 for (wait_early
= false, loop
= SPIN_THRESHOLD
; loop
; loop
--) {
299 if (READ_ONCE(node
->locked
))
301 if (pv_wait_early(pp
, loop
)) {
309 * Order pn->state vs pn->locked thusly:
311 * [S] pn->state = VCPU_HALTED [S] next->locked = 1
313 * [L] pn->locked [RmW] pn->state = VCPU_HASHED
315 * Matches the cmpxchg() from pv_kick_node().
317 smp_store_mb(pn
->state
, VCPU_HALTED
);
319 if (!READ_ONCE(node
->locked
)) {
320 lockevent_inc(pv_wait_node
);
321 lockevent_cond_inc(pv_wait_early
, wait_early
);
322 pv_wait(&pn
->state
, VCPU_HALTED
);
326 * If pv_kick_node() changed us to VCPU_HASHED, retain that
327 * value so that pv_wait_head_or_lock() knows to not also try
330 cmpxchg(&pn
->state
, VCPU_HALTED
, VCPU_RUNNING
);
333 * If the locked flag is still not set after wakeup, it is a
334 * spurious wakeup and the vCPU should wait again. However,
335 * there is a pretty high overhead for CPU halting and kicking.
336 * So it is better to spin for a while in the hope that the
337 * MCS lock will be released soon.
339 lockevent_cond_inc(pv_spurious_wakeup
,
340 !READ_ONCE(node
->locked
));
344 * By now our node->locked should be 1 and our caller will not actually
345 * spin-wait for it. We do however rely on our caller to do a
346 * load-acquire for us.
351 * Called after setting next->locked = 1 when we're the lock owner.
353 * Instead of waking the waiters stuck in pv_wait_node() advance their state
354 * such that they're waiting in pv_wait_head_or_lock(), this avoids a
357 static void pv_kick_node(struct qspinlock
*lock
, struct mcs_spinlock
*node
)
359 struct pv_node
*pn
= (struct pv_node
*)node
;
360 u8 old
= VCPU_HALTED
;
362 * If the vCPU is indeed halted, advance its state to match that of
363 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
364 * observe its next->locked value and advance itself.
366 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
368 * The write to next->locked in arch_mcs_spin_unlock_contended()
369 * must be ordered before the read of pn->state in the cmpxchg()
370 * below for the code to work correctly. To guarantee full ordering
371 * irrespective of the success or failure of the cmpxchg(),
372 * a relaxed version with explicit barrier is used. The control
373 * dependency will order the reading of pn->state before any
376 smp_mb__before_atomic();
377 if (!try_cmpxchg_relaxed(&pn
->state
, &old
, VCPU_HASHED
))
381 * Put the lock into the hash table and set the _Q_SLOW_VAL.
383 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
384 * the hash table later on at unlock time, no atomic instruction is
387 WRITE_ONCE(lock
->locked
, _Q_SLOW_VAL
);
388 (void)pv_hash(lock
, pn
);
392 * Wait for l->locked to become clear and acquire the lock;
393 * halt the vcpu after a short spin.
394 * __pv_queued_spin_unlock() will wake us.
396 * The current value of the lock will be returned for additional processing.
399 pv_wait_head_or_lock(struct qspinlock
*lock
, struct mcs_spinlock
*node
)
401 struct pv_node
*pn
= (struct pv_node
*)node
;
402 struct qspinlock
**lp
= NULL
;
407 * If pv_kick_node() already advanced our state, we don't need to
408 * insert ourselves into the hash table anymore.
410 if (READ_ONCE(pn
->state
) == VCPU_HASHED
)
411 lp
= (struct qspinlock
**)1;
414 * Tracking # of slowpath locking operations
416 lockevent_inc(lock_slowpath
);
420 * Set correct vCPU state to be used by queue node wait-early
423 WRITE_ONCE(pn
->state
, VCPU_RUNNING
);
426 * Set the pending bit in the active lock spinning loop to
427 * disable lock stealing before attempting to acquire the lock.
430 for (loop
= SPIN_THRESHOLD
; loop
; loop
--) {
431 if (trylock_clear_pending(lock
))
438 if (!lp
) { /* ONCE */
439 lp
= pv_hash(lock
, pn
);
442 * We must hash before setting _Q_SLOW_VAL, such that
443 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
444 * we'll be sure to be able to observe our hash entry.
446 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL
448 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash>
450 * Matches the smp_rmb() in __pv_queued_spin_unlock().
452 if (xchg(&lock
->locked
, _Q_SLOW_VAL
) == 0) {
454 * The lock was free and now we own the lock.
455 * Change the lock value back to _Q_LOCKED_VAL
456 * and unhash the table.
458 WRITE_ONCE(lock
->locked
, _Q_LOCKED_VAL
);
459 WRITE_ONCE(*lp
, NULL
);
463 WRITE_ONCE(pn
->state
, VCPU_HASHED
);
464 lockevent_inc(pv_wait_head
);
465 lockevent_cond_inc(pv_wait_again
, waitcnt
);
466 pv_wait(&lock
->locked
, _Q_SLOW_VAL
);
469 * Because of lock stealing, the queue head vCPU may not be
470 * able to acquire the lock before it has to wait again.
475 * The cmpxchg() or xchg() call before coming here provides the
476 * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
477 * here is to indicate to the compiler that the value will always
478 * be nozero to enable better code optimization.
481 return (u32
)(atomic_read(&lock
->val
) | _Q_LOCKED_VAL
);
485 * Include the architecture specific callee-save thunk of the
486 * __pv_queued_spin_unlock(). This thunk is put together with
487 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
488 * function close to each other sharing consecutive instruction cachelines.
489 * Alternatively, architecture specific version of __pv_queued_spin_unlock()
492 #include <asm/qspinlock_paravirt.h>
495 * PV versions of the unlock fastpath and slowpath functions to be used
496 * instead of queued_spin_unlock().
498 __visible __lockfunc
void
499 __pv_queued_spin_unlock_slowpath(struct qspinlock
*lock
, u8 locked
)
501 struct pv_node
*node
;
503 if (unlikely(locked
!= _Q_SLOW_VAL
)) {
504 WARN(!debug_locks_silent
,
505 "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
506 (unsigned long)lock
, atomic_read(&lock
->val
));
511 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
512 * so we need a barrier to order the read of the node data in
513 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
515 * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
520 * Since the above failed to release, this must be the SLOW path.
521 * Therefore start by looking up the blocked node and unhashing it.
523 node
= pv_unhash(lock
);
526 * Now that we have a reference to the (likely) blocked pv_node,
529 smp_store_release(&lock
->locked
, 0);
532 * At this point the memory pointed at by lock can be freed/reused,
533 * however we can still use the pv_node to kick the CPU.
534 * The other vCPU may not really be halted, but kicking an active
535 * vCPU is harmless other than the additional latency in completing
538 lockevent_inc(pv_kick_unlock
);
542 #ifndef __pv_queued_spin_unlock
543 __visible __lockfunc
void __pv_queued_spin_unlock(struct qspinlock
*lock
)
545 u8 locked
= _Q_LOCKED_VAL
;
548 * We must not unlock if SLOW, because in that case we must first
549 * unhash. Otherwise it would be possible to have multiple @lock
550 * entries, which would be BAD.
552 if (try_cmpxchg_release(&lock
->locked
, &locked
, 0))
555 __pv_queued_spin_unlock_slowpath(lock
, locked
);
557 #endif /* __pv_queued_spin_unlock */