Linux 5.7.6
[linux/fpc-iii.git] / arch / parisc / include / asm / spinlock.h
blob70fecb8dc4e290b480e390e6224b2710010edd20
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
5 #include <asm/barrier.h>
6 #include <asm/ldcw.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
10 static inline int arch_spin_is_locked(arch_spinlock_t *x)
12 volatile unsigned int *a = __ldcw_align(x);
13 smp_mb();
14 return *a == 0;
17 static inline void arch_spin_lock(arch_spinlock_t *x)
19 volatile unsigned int *a;
21 a = __ldcw_align(x);
22 while (__ldcw(a) == 0)
23 while (*a == 0)
24 cpu_relax();
27 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
28 unsigned long flags)
30 volatile unsigned int *a;
31 unsigned long flags_dis;
33 a = __ldcw_align(x);
34 while (__ldcw(a) == 0) {
35 local_save_flags(flags_dis);
36 local_irq_restore(flags);
37 while (*a == 0)
38 cpu_relax();
39 local_irq_restore(flags_dis);
42 #define arch_spin_lock_flags arch_spin_lock_flags
44 static inline void arch_spin_unlock(arch_spinlock_t *x)
46 volatile unsigned int *a;
48 a = __ldcw_align(x);
49 #ifdef CONFIG_SMP
50 (void) __ldcw(a);
51 #else
52 mb();
53 #endif
54 *a = 1;
57 static inline int arch_spin_trylock(arch_spinlock_t *x)
59 volatile unsigned int *a;
60 int ret;
62 a = __ldcw_align(x);
63 ret = __ldcw(a) != 0;
65 return ret;
69 * Read-write spinlocks, allowing multiple readers but only one writer.
70 * Unfair locking as Writers could be starved indefinitely by Reader(s)
72 * The spinlock itself is contained in @counter and access to it is
73 * serialized with @lock_mutex.
76 /* 1 - lock taken successfully */
77 static inline int arch_read_trylock(arch_rwlock_t *rw)
79 int ret = 0;
80 unsigned long flags;
82 local_irq_save(flags);
83 arch_spin_lock(&(rw->lock_mutex));
86 * zero means writer holds the lock exclusively, deny Reader.
87 * Otherwise grant lock to first/subseq reader
89 if (rw->counter > 0) {
90 rw->counter--;
91 ret = 1;
94 arch_spin_unlock(&(rw->lock_mutex));
95 local_irq_restore(flags);
97 return ret;
100 /* 1 - lock taken successfully */
101 static inline int arch_write_trylock(arch_rwlock_t *rw)
103 int ret = 0;
104 unsigned long flags;
106 local_irq_save(flags);
107 arch_spin_lock(&(rw->lock_mutex));
110 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
111 * deny writer. Otherwise if unlocked grant to writer
112 * Hence the claim that Linux rwlocks are unfair to writers.
113 * (can be starved for an indefinite time by readers).
115 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
116 rw->counter = 0;
117 ret = 1;
119 arch_spin_unlock(&(rw->lock_mutex));
120 local_irq_restore(flags);
122 return ret;
125 static inline void arch_read_lock(arch_rwlock_t *rw)
127 while (!arch_read_trylock(rw))
128 cpu_relax();
131 static inline void arch_write_lock(arch_rwlock_t *rw)
133 while (!arch_write_trylock(rw))
134 cpu_relax();
137 static inline void arch_read_unlock(arch_rwlock_t *rw)
139 unsigned long flags;
141 local_irq_save(flags);
142 arch_spin_lock(&(rw->lock_mutex));
143 rw->counter++;
144 arch_spin_unlock(&(rw->lock_mutex));
145 local_irq_restore(flags);
148 static inline void arch_write_unlock(arch_rwlock_t *rw)
150 unsigned long flags;
152 local_irq_save(flags);
153 arch_spin_lock(&(rw->lock_mutex));
154 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
155 arch_spin_unlock(&(rw->lock_mutex));
156 local_irq_restore(flags);
159 #endif /* __ASM_SPINLOCK_H */