Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming...
[linux/fpc-iii.git] / kernel / locking / qspinlock_paravirt.h
blob21ede57f68b320594f874e4ff0a1b60974fc67f0
1 #ifndef _GEN_PV_LOCK_SLOWPATH
2 #error "do not include this file"
3 #endif
5 #include <linux/hash.h>
6 #include <linux/bootmem.h>
7 #include <linux/debug_locks.h>
9 /*
10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
11 * of spinning them.
13 * This relies on the architecture to provide two paravirt hypercalls:
15 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
16 * pv_kick(cpu) -- wakes a suspended vcpu
18 * Using these we implement __pv_queued_spin_lock_slowpath() and
19 * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
20 * native_queued_spin_unlock().
23 #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)
26 * Queue Node Adaptive Spinning
28 * A queue node vCPU will stop spinning if the vCPU in the previous node is
29 * not running. The one lock stealing attempt allowed at slowpath entry
30 * mitigates the slight slowdown for non-overcommitted guest with this
31 * aggressive wait-early mechanism.
33 * The status of the previous node will be checked at fixed interval
34 * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
35 * pound on the cacheline of the previous node too heavily.
37 #define PV_PREV_CHECK_MASK 0xff
40 * Queue node uses: vcpu_running & vcpu_halted.
41 * Queue head uses: vcpu_running & vcpu_hashed.
43 enum vcpu_state {
44 vcpu_running = 0,
45 vcpu_halted, /* Used only in pv_wait_node */
46 vcpu_hashed, /* = pv_hash'ed + vcpu_halted */
49 struct pv_node {
50 struct mcs_spinlock mcs;
51 struct mcs_spinlock __res[3];
53 int cpu;
54 u8 state;
58 * Include queued spinlock statistics code
60 #include "qspinlock_stat.h"
63 * By replacing the regular queued_spin_trylock() with the function below,
64 * it will be called once when a lock waiter enter the PV slowpath before
65 * being queued. By allowing one lock stealing attempt here when the pending
66 * bit is off, it helps to reduce the performance impact of lock waiter
67 * preemption without the drawback of lock starvation.
69 #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l)
70 static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
72 struct __qspinlock *l = (void *)lock;
73 int ret = !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
74 (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
76 qstat_inc(qstat_pv_lock_stealing, ret);
77 return ret;
81 * The pending bit is used by the queue head vCPU to indicate that it
82 * is actively spinning on the lock and no lock stealing is allowed.
84 #if _Q_PENDING_BITS == 8
85 static __always_inline void set_pending(struct qspinlock *lock)
87 struct __qspinlock *l = (void *)lock;
89 WRITE_ONCE(l->pending, 1);
92 static __always_inline void clear_pending(struct qspinlock *lock)
94 struct __qspinlock *l = (void *)lock;
96 WRITE_ONCE(l->pending, 0);
100 * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
101 * barrier. Therefore, an atomic cmpxchg() is used to acquire the lock
102 * just to be sure that it will get it.
104 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
106 struct __qspinlock *l = (void *)lock;
108 return !READ_ONCE(l->locked) &&
109 (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
110 == _Q_PENDING_VAL);
112 #else /* _Q_PENDING_BITS == 8 */
113 static __always_inline void set_pending(struct qspinlock *lock)
115 atomic_set_mask(_Q_PENDING_VAL, &lock->val);
118 static __always_inline void clear_pending(struct qspinlock *lock)
120 atomic_clear_mask(_Q_PENDING_VAL, &lock->val);
123 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
125 int val = atomic_read(&lock->val);
127 for (;;) {
128 int old, new;
130 if (val & _Q_LOCKED_MASK)
131 break;
134 * Try to clear pending bit & set locked bit
136 old = val;
137 new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
138 val = atomic_cmpxchg(&lock->val, old, new);
140 if (val == old)
141 return 1;
143 return 0;
145 #endif /* _Q_PENDING_BITS == 8 */
148 * Lock and MCS node addresses hash table for fast lookup
150 * Hashing is done on a per-cacheline basis to minimize the need to access
151 * more than one cacheline.
153 * Dynamically allocate a hash table big enough to hold at least 4X the
154 * number of possible cpus in the system. Allocation is done on page
155 * granularity. So the minimum number of hash buckets should be at least
156 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
158 * Since we should not be holding locks from NMI context (very rare indeed) the
159 * max load factor is 0.75, which is around the point where open addressing
160 * breaks down.
163 struct pv_hash_entry {
164 struct qspinlock *lock;
165 struct pv_node *node;
168 #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
169 #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry))
171 static struct pv_hash_entry *pv_lock_hash;
172 static unsigned int pv_lock_hash_bits __read_mostly;
175 * Allocate memory for the PV qspinlock hash buckets
177 * This function should be called from the paravirt spinlock initialization
178 * routine.
180 void __init __pv_init_lock_hash(void)
182 int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
184 if (pv_hash_size < PV_HE_MIN)
185 pv_hash_size = PV_HE_MIN;
188 * Allocate space from bootmem which should be page-size aligned
189 * and hence cacheline aligned.
191 pv_lock_hash = alloc_large_system_hash("PV qspinlock",
192 sizeof(struct pv_hash_entry),
193 pv_hash_size, 0, HASH_EARLY,
194 &pv_lock_hash_bits, NULL,
195 pv_hash_size, pv_hash_size);
198 #define for_each_hash_entry(he, offset, hash) \
199 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \
200 offset < (1 << pv_lock_hash_bits); \
201 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
203 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
205 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
206 struct pv_hash_entry *he;
207 int hopcnt = 0;
209 for_each_hash_entry(he, offset, hash) {
210 hopcnt++;
211 if (!cmpxchg(&he->lock, NULL, lock)) {
212 WRITE_ONCE(he->node, node);
213 qstat_hop(hopcnt);
214 return &he->lock;
218 * Hard assume there is a free entry for us.
220 * This is guaranteed by ensuring every blocked lock only ever consumes
221 * a single entry, and since we only have 4 nesting levels per CPU
222 * and allocated 4*nr_possible_cpus(), this must be so.
224 * The single entry is guaranteed by having the lock owner unhash
225 * before it releases.
227 BUG();
230 static struct pv_node *pv_unhash(struct qspinlock *lock)
232 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
233 struct pv_hash_entry *he;
234 struct pv_node *node;
236 for_each_hash_entry(he, offset, hash) {
237 if (READ_ONCE(he->lock) == lock) {
238 node = READ_ONCE(he->node);
239 WRITE_ONCE(he->lock, NULL);
240 return node;
244 * Hard assume we'll find an entry.
246 * This guarantees a limited lookup time and is itself guaranteed by
247 * having the lock owner do the unhash -- IFF the unlock sees the
248 * SLOW flag, there MUST be a hash entry.
250 BUG();
254 * Return true if when it is time to check the previous node which is not
255 * in a running state.
257 static inline bool
258 pv_wait_early(struct pv_node *prev, int loop)
261 if ((loop & PV_PREV_CHECK_MASK) != 0)
262 return false;
264 return READ_ONCE(prev->state) != vcpu_running;
268 * Initialize the PV part of the mcs_spinlock node.
270 static void pv_init_node(struct mcs_spinlock *node)
272 struct pv_node *pn = (struct pv_node *)node;
274 BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
276 pn->cpu = smp_processor_id();
277 pn->state = vcpu_running;
281 * Wait for node->locked to become true, halt the vcpu after a short spin.
282 * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
283 * behalf.
285 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
287 struct pv_node *pn = (struct pv_node *)node;
288 struct pv_node *pp = (struct pv_node *)prev;
289 int waitcnt = 0;
290 int loop;
291 bool wait_early;
293 /* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */
294 for (;; waitcnt++) {
295 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
296 if (READ_ONCE(node->locked))
297 return;
298 if (pv_wait_early(pp, loop)) {
299 wait_early = true;
300 break;
302 cpu_relax();
306 * Order pn->state vs pn->locked thusly:
308 * [S] pn->state = vcpu_halted [S] next->locked = 1
309 * MB MB
310 * [L] pn->locked [RmW] pn->state = vcpu_hashed
312 * Matches the cmpxchg() from pv_kick_node().
314 smp_store_mb(pn->state, vcpu_halted);
316 if (!READ_ONCE(node->locked)) {
317 qstat_inc(qstat_pv_wait_node, true);
318 qstat_inc(qstat_pv_wait_again, waitcnt);
319 qstat_inc(qstat_pv_wait_early, wait_early);
320 pv_wait(&pn->state, vcpu_halted);
324 * If pv_kick_node() changed us to vcpu_hashed, retain that
325 * value so that pv_wait_head_or_lock() knows to not also try
326 * to hash this lock.
328 cmpxchg(&pn->state, vcpu_halted, vcpu_running);
331 * If the locked flag is still not set after wakeup, it is a
332 * spurious wakeup and the vCPU should wait again. However,
333 * there is a pretty high overhead for CPU halting and kicking.
334 * So it is better to spin for a while in the hope that the
335 * MCS lock will be released soon.
337 qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
341 * By now our node->locked should be 1 and our caller will not actually
342 * spin-wait for it. We do however rely on our caller to do a
343 * load-acquire for us.
348 * Called after setting next->locked = 1 when we're the lock owner.
350 * Instead of waking the waiters stuck in pv_wait_node() advance their state
351 * such that they're waiting in pv_wait_head_or_lock(), this avoids a
352 * wake/sleep cycle.
354 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
356 struct pv_node *pn = (struct pv_node *)node;
357 struct __qspinlock *l = (void *)lock;
360 * If the vCPU is indeed halted, advance its state to match that of
361 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
362 * observe its next->locked value and advance itself.
364 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
366 if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
367 return;
370 * Put the lock into the hash table and set the _Q_SLOW_VAL.
372 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
373 * the hash table later on at unlock time, no atomic instruction is
374 * needed.
376 WRITE_ONCE(l->locked, _Q_SLOW_VAL);
377 (void)pv_hash(lock, pn);
381 * Wait for l->locked to become clear and acquire the lock;
382 * halt the vcpu after a short spin.
383 * __pv_queued_spin_unlock() will wake us.
385 * The current value of the lock will be returned for additional processing.
387 static u32
388 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
390 struct pv_node *pn = (struct pv_node *)node;
391 struct __qspinlock *l = (void *)lock;
392 struct qspinlock **lp = NULL;
393 int waitcnt = 0;
394 int loop;
397 * If pv_kick_node() already advanced our state, we don't need to
398 * insert ourselves into the hash table anymore.
400 if (READ_ONCE(pn->state) == vcpu_hashed)
401 lp = (struct qspinlock **)1;
404 * Tracking # of slowpath locking operations
406 qstat_inc(qstat_pv_lock_slowpath, true);
408 for (;; waitcnt++) {
410 * Set correct vCPU state to be used by queue node wait-early
411 * mechanism.
413 WRITE_ONCE(pn->state, vcpu_running);
416 * Set the pending bit in the active lock spinning loop to
417 * disable lock stealing before attempting to acquire the lock.
419 set_pending(lock);
420 for (loop = SPIN_THRESHOLD; loop; loop--) {
421 if (trylock_clear_pending(lock))
422 goto gotlock;
423 cpu_relax();
425 clear_pending(lock);
428 if (!lp) { /* ONCE */
429 lp = pv_hash(lock, pn);
432 * We must hash before setting _Q_SLOW_VAL, such that
433 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
434 * we'll be sure to be able to observe our hash entry.
436 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL
437 * MB RMB
438 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash>
440 * Matches the smp_rmb() in __pv_queued_spin_unlock().
442 if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
444 * The lock was free and now we own the lock.
445 * Change the lock value back to _Q_LOCKED_VAL
446 * and unhash the table.
448 WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
449 WRITE_ONCE(*lp, NULL);
450 goto gotlock;
453 WRITE_ONCE(pn->state, vcpu_halted);
454 qstat_inc(qstat_pv_wait_head, true);
455 qstat_inc(qstat_pv_wait_again, waitcnt);
456 pv_wait(&l->locked, _Q_SLOW_VAL);
459 * The unlocker should have freed the lock before kicking the
460 * CPU. So if the lock is still not free, it is a spurious
461 * wakeup or another vCPU has stolen the lock. The current
462 * vCPU should spin again.
464 qstat_inc(qstat_pv_spurious_wakeup, READ_ONCE(l->locked));
468 * The cmpxchg() or xchg() call before coming here provides the
469 * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
470 * here is to indicate to the compiler that the value will always
471 * be nozero to enable better code optimization.
473 gotlock:
474 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
478 * PV versions of the unlock fastpath and slowpath functions to be used
479 * instead of queued_spin_unlock().
481 __visible void
482 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
484 struct __qspinlock *l = (void *)lock;
485 struct pv_node *node;
487 if (unlikely(locked != _Q_SLOW_VAL)) {
488 WARN(!debug_locks_silent,
489 "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
490 (unsigned long)lock, atomic_read(&lock->val));
491 return;
495 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
496 * so we need a barrier to order the read of the node data in
497 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
499 * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
501 smp_rmb();
504 * Since the above failed to release, this must be the SLOW path.
505 * Therefore start by looking up the blocked node and unhashing it.
507 node = pv_unhash(lock);
510 * Now that we have a reference to the (likely) blocked pv_node,
511 * release the lock.
513 smp_store_release(&l->locked, 0);
516 * At this point the memory pointed at by lock can be freed/reused,
517 * however we can still use the pv_node to kick the CPU.
518 * The other vCPU may not really be halted, but kicking an active
519 * vCPU is harmless other than the additional latency in completing
520 * the unlock.
522 qstat_inc(qstat_pv_kick_unlock, true);
523 pv_kick(node->cpu);
527 * Include the architecture specific callee-save thunk of the
528 * __pv_queued_spin_unlock(). This thunk is put together with
529 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
530 * function close to each other sharing consecutive instruction cachelines.
531 * Alternatively, architecture specific version of __pv_queued_spin_unlock()
532 * can be defined.
534 #include <asm/qspinlock_paravirt.h>
536 #ifndef __pv_queued_spin_unlock
537 __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
539 struct __qspinlock *l = (void *)lock;
540 u8 locked;
543 * We must not unlock if SLOW, because in that case we must first
544 * unhash. Otherwise it would be possible to have multiple @lock
545 * entries, which would be BAD.
547 locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
548 if (likely(locked == _Q_LOCKED_VAL))
549 return;
551 __pv_queued_spin_unlock_slowpath(lock, locked);
553 #endif /* __pv_queued_spin_unlock */