1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
5 #include <linux/jump_label.h>
6 #include <asm/cpufeature.h>
7 #include <asm-generic/qspinlock_types.h>
8 #include <asm/paravirt.h>
11 #define _Q_PENDING_LOOPS (1 << 9)
13 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
15 static __always_inline
bool __queued_RMW_btsl(struct qspinlock
*lock
)
17 GEN_BINARY_RMWcc(LOCK_PREFIX
"btsl", lock
->val
.counter
,
18 "I", _Q_PENDING_OFFSET
, "%0", c
);
21 static __always_inline u32
queued_fetch_set_pending_acquire(struct qspinlock
*lock
)
25 if (__queued_RMW_btsl(lock
))
26 val
|= _Q_PENDING_VAL
;
28 val
|= atomic_read(&lock
->val
) & ~_Q_PENDING_MASK
;
33 #ifdef CONFIG_PARAVIRT_SPINLOCKS
34 extern void native_queued_spin_lock_slowpath(struct qspinlock
*lock
, u32 val
);
35 extern void __pv_init_lock_hash(void);
36 extern void __pv_queued_spin_lock_slowpath(struct qspinlock
*lock
, u32 val
);
37 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock
*lock
);
39 #define queued_spin_unlock queued_spin_unlock
41 * queued_spin_unlock - release a queued spinlock
42 * @lock : Pointer to queued spinlock structure
44 * A smp_store_release() on the least-significant byte.
46 static inline void native_queued_spin_unlock(struct qspinlock
*lock
)
48 smp_store_release(&lock
->locked
, 0);
51 static inline void queued_spin_lock_slowpath(struct qspinlock
*lock
, u32 val
)
53 pv_queued_spin_lock_slowpath(lock
, val
);
56 static inline void queued_spin_unlock(struct qspinlock
*lock
)
58 pv_queued_spin_unlock(lock
);
61 #define vcpu_is_preempted vcpu_is_preempted
62 static inline bool vcpu_is_preempted(long cpu
)
64 return pv_vcpu_is_preempted(cpu
);
68 #ifdef CONFIG_PARAVIRT
69 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key
);
71 void native_pv_lock_init(void) __init
;
73 #define virt_spin_lock virt_spin_lock
74 static inline bool virt_spin_lock(struct qspinlock
*lock
)
76 if (!static_branch_likely(&virt_spin_lock_key
))
80 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
81 * back to a Test-and-Set spinlock, because fair locks have
82 * horrible lock 'holder' preemption issues.
86 while (atomic_read(&lock
->val
) != 0)
88 } while (atomic_cmpxchg(&lock
->val
, 0, _Q_LOCKED_VAL
) != 0);
93 static inline void native_pv_lock_init(void)
96 #endif /* CONFIG_PARAVIRT */
98 #include <asm-generic/qspinlock.h>
100 #endif /* _ASM_X86_QSPINLOCK_H */