of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / include / asm-generic / qspinlock.h
blob39e1cb201b8eaa00ffe759ce7147cc8b3647fb5d
1 /*
2 * Queued spinlock
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
17 * Authors: Waiman Long <waiman.long@hpe.com>
19 #ifndef __ASM_GENERIC_QSPINLOCK_H
20 #define __ASM_GENERIC_QSPINLOCK_H
22 #include <asm-generic/qspinlock_types.h>
24 /**
25 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise
29 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
31 return atomic_read(&lock->val);
34 /**
35 * queued_spin_value_unlocked - is the spinlock structure unlocked?
36 * @lock: queued spinlock structure
37 * Return: 1 if it is unlocked, 0 otherwise
39 * N.B. Whenever there are tasks waiting for the lock, it is considered
40 * locked wrt the lockref code to avoid lock stealing by the lockref
41 * code and change things underneath the lock. This also allows some
42 * optimizations to be applied without conflict with lockref.
44 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
46 return !atomic_read(&lock.val);
49 /**
50 * queued_spin_is_contended - check if the lock is contended
51 * @lock : Pointer to queued spinlock structure
52 * Return: 1 if lock contended, 0 otherwise
54 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
56 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
58 /**
59 * queued_spin_trylock - try to acquire the queued spinlock
60 * @lock : Pointer to queued spinlock structure
61 * Return: 1 if lock acquired, 0 if failed
63 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
65 if (!atomic_read(&lock->val) &&
66 (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
67 return 1;
68 return 0;
71 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
73 /**
74 * queued_spin_lock - acquire a queued spinlock
75 * @lock: Pointer to queued spinlock structure
77 static __always_inline void queued_spin_lock(struct qspinlock *lock)
79 u32 val;
81 val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
82 if (likely(val == 0))
83 return;
84 queued_spin_lock_slowpath(lock, val);
87 #ifndef queued_spin_unlock
88 /**
89 * queued_spin_unlock - release a queued spinlock
90 * @lock : Pointer to queued spinlock structure
92 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
95 * smp_mb__before_atomic() in order to guarantee release semantics
97 smp_mb__before_atomic();
98 atomic_sub(_Q_LOCKED_VAL, &lock->val);
100 #endif
103 * queued_spin_unlock_wait - wait until current lock holder releases the lock
104 * @lock : Pointer to queued spinlock structure
106 * There is a very slight possibility of live-lock if the lockers keep coming
107 * and the waiter is just unfortunate enough to not see any unlock state.
109 static inline void queued_spin_unlock_wait(struct qspinlock *lock)
111 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
112 cpu_relax();
115 #ifndef virt_spin_lock
116 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
118 return false;
120 #endif
123 * Initializier
125 #define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
128 * Remapping spinlock architecture specific functions to the corresponding
129 * queued spinlock functions.
131 #define arch_spin_is_locked(l) queued_spin_is_locked(l)
132 #define arch_spin_is_contended(l) queued_spin_is_contended(l)
133 #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
134 #define arch_spin_lock(l) queued_spin_lock(l)
135 #define arch_spin_trylock(l) queued_spin_trylock(l)
136 #define arch_spin_unlock(l) queued_spin_unlock(l)
137 #define arch_spin_lock_flags(l, f) queued_spin_lock(l)
138 #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
140 #endif /* __ASM_GENERIC_QSPINLOCK_H */