Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / include / asm-generic / qspinlock.h
blobd74b13825501437f0bb50e5899f9e9eca7d7be03
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Queued spinlock
5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
8 * Authors: Waiman Long <waiman.long@hpe.com>
9 */
10 #ifndef __ASM_GENERIC_QSPINLOCK_H
11 #define __ASM_GENERIC_QSPINLOCK_H
13 #include <asm-generic/qspinlock_types.h>
14 #include <linux/atomic.h>
16 #ifndef queued_spin_is_locked
17 /**
18 * queued_spin_is_locked - is the spinlock locked?
19 * @lock: Pointer to queued spinlock structure
20 * Return: 1 if it is locked, 0 otherwise
22 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
25 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
26 * isn't immediately observable.
28 return atomic_read(&lock->val);
30 #endif
32 /**
33 * queued_spin_value_unlocked - is the spinlock structure unlocked?
34 * @lock: queued spinlock structure
35 * Return: 1 if it is unlocked, 0 otherwise
37 * N.B. Whenever there are tasks waiting for the lock, it is considered
38 * locked wrt the lockref code to avoid lock stealing by the lockref
39 * code and change things underneath the lock. This also allows some
40 * optimizations to be applied without conflict with lockref.
42 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
44 return !atomic_read(&lock.val);
47 /**
48 * queued_spin_is_contended - check if the lock is contended
49 * @lock : Pointer to queued spinlock structure
50 * Return: 1 if lock contended, 0 otherwise
52 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
54 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
56 /**
57 * queued_spin_trylock - try to acquire the queued spinlock
58 * @lock : Pointer to queued spinlock structure
59 * Return: 1 if lock acquired, 0 if failed
61 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
63 int val = atomic_read(&lock->val);
65 if (unlikely(val))
66 return 0;
68 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
71 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
73 #ifndef queued_spin_lock
74 /**
75 * queued_spin_lock - acquire a queued spinlock
76 * @lock: Pointer to queued spinlock structure
78 static __always_inline void queued_spin_lock(struct qspinlock *lock)
80 int val = 0;
82 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
83 return;
85 queued_spin_lock_slowpath(lock, val);
87 #endif
89 #ifndef queued_spin_unlock
90 /**
91 * queued_spin_unlock - release a queued spinlock
92 * @lock : Pointer to queued spinlock structure
94 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
97 * unlock() needs release semantics:
99 smp_store_release(&lock->locked, 0);
101 #endif
103 #ifndef virt_spin_lock
104 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
106 return false;
108 #endif
111 * Remapping spinlock architecture specific functions to the corresponding
112 * queued spinlock functions.
114 #define arch_spin_is_locked(l) queued_spin_is_locked(l)
115 #define arch_spin_is_contended(l) queued_spin_is_contended(l)
116 #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
117 #define arch_spin_lock(l) queued_spin_lock(l)
118 #define arch_spin_trylock(l) queued_spin_trylock(l)
119 #define arch_spin_unlock(l) queued_spin_unlock(l)
121 #endif /* __ASM_GENERIC_QSPINLOCK_H */