x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / s390 / lib / spinlock.c
blob1dc85f552f4817fb7187b800155db64bfe958789
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Out of line spinlock code.
5 * Copyright IBM Corp. 2004, 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */
9 #include <linux/types.h>
10 #include <linux/export.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/smp.h>
14 #include <asm/io.h>
16 int spin_retry = -1;
18 static int __init spin_retry_init(void)
20 if (spin_retry < 0)
21 spin_retry = 1000;
22 return 0;
24 early_initcall(spin_retry_init);
26 /**
27 * spin_retry= parameter
29 static int __init spin_retry_setup(char *str)
31 spin_retry = simple_strtoul(str, &str, 0);
32 return 1;
34 __setup("spin_retry=", spin_retry_setup);
36 static inline int arch_load_niai4(int *lock)
38 int owner;
40 asm volatile(
41 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
42 " .long 0xb2fa0040\n" /* NIAI 4 */
43 #endif
44 " l %0,%1\n"
45 : "=d" (owner) : "Q" (*lock) : "memory");
46 return owner;
49 static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
51 int expected = old;
53 asm volatile(
54 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
55 " .long 0xb2fa0080\n" /* NIAI 8 */
56 #endif
57 " cs %0,%3,%1\n"
58 : "=d" (old), "=Q" (*lock)
59 : "0" (old), "d" (new), "Q" (*lock)
60 : "cc", "memory");
61 return expected == old;
64 void arch_spin_lock_wait(arch_spinlock_t *lp)
66 int cpu = SPINLOCK_LOCKVAL;
67 int owner, count;
69 /* Pass the virtual CPU to the lock holder if it is not running */
70 owner = arch_load_niai4(&lp->lock);
71 if (owner && arch_vcpu_is_preempted(~owner))
72 smp_yield_cpu(~owner);
74 count = spin_retry;
75 while (1) {
76 owner = arch_load_niai4(&lp->lock);
77 /* Try to get the lock if it is free. */
78 if (!owner) {
79 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
80 return;
81 continue;
83 if (count-- >= 0)
84 continue;
85 count = spin_retry;
87 * For multiple layers of hypervisors, e.g. z/VM + LPAR
88 * yield the CPU unconditionally. For LPAR rely on the
89 * sense running status.
91 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
92 smp_yield_cpu(~owner);
95 EXPORT_SYMBOL(arch_spin_lock_wait);
97 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
99 int cpu = SPINLOCK_LOCKVAL;
100 int owner, count;
102 local_irq_restore(flags);
104 /* Pass the virtual CPU to the lock holder if it is not running */
105 owner = arch_load_niai4(&lp->lock);
106 if (owner && arch_vcpu_is_preempted(~owner))
107 smp_yield_cpu(~owner);
109 count = spin_retry;
110 while (1) {
111 owner = arch_load_niai4(&lp->lock);
112 /* Try to get the lock if it is free. */
113 if (!owner) {
114 local_irq_disable();
115 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
116 return;
117 local_irq_restore(flags);
118 continue;
120 if (count-- >= 0)
121 continue;
122 count = spin_retry;
124 * For multiple layers of hypervisors, e.g. z/VM + LPAR
125 * yield the CPU unconditionally. For LPAR rely on the
126 * sense running status.
128 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
129 smp_yield_cpu(~owner);
132 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
134 int arch_spin_trylock_retry(arch_spinlock_t *lp)
136 int cpu = SPINLOCK_LOCKVAL;
137 int owner, count;
139 for (count = spin_retry; count > 0; count--) {
140 owner = READ_ONCE(lp->lock);
141 /* Try to get the lock if it is free. */
142 if (!owner) {
143 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
144 return 1;
147 return 0;
149 EXPORT_SYMBOL(arch_spin_trylock_retry);
151 void _raw_read_lock_wait(arch_rwlock_t *rw)
153 int count = spin_retry;
154 int owner, old;
156 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
158 #endif
159 owner = 0;
160 while (1) {
161 if (count-- <= 0) {
162 if (owner && arch_vcpu_is_preempted(~owner))
163 smp_yield_cpu(~owner);
164 count = spin_retry;
166 old = ACCESS_ONCE(rw->lock);
167 owner = ACCESS_ONCE(rw->owner);
168 if (old < 0)
169 continue;
170 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
171 return;
174 EXPORT_SYMBOL(_raw_read_lock_wait);
176 int _raw_read_trylock_retry(arch_rwlock_t *rw)
178 int count = spin_retry;
179 int old;
181 while (count-- > 0) {
182 old = ACCESS_ONCE(rw->lock);
183 if (old < 0)
184 continue;
185 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
186 return 1;
188 return 0;
190 EXPORT_SYMBOL(_raw_read_trylock_retry);
192 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
194 void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
196 int count = spin_retry;
197 int owner, old;
199 owner = 0;
200 while (1) {
201 if (count-- <= 0) {
202 if (owner && arch_vcpu_is_preempted(~owner))
203 smp_yield_cpu(~owner);
204 count = spin_retry;
206 old = ACCESS_ONCE(rw->lock);
207 owner = ACCESS_ONCE(rw->owner);
208 smp_mb();
209 if (old >= 0) {
210 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211 old = prev;
213 if ((old & 0x7fffffff) == 0 && prev >= 0)
214 break;
217 EXPORT_SYMBOL(_raw_write_lock_wait);
219 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
221 void _raw_write_lock_wait(arch_rwlock_t *rw)
223 int count = spin_retry;
224 int owner, old, prev;
226 prev = 0x80000000;
227 owner = 0;
228 while (1) {
229 if (count-- <= 0) {
230 if (owner && arch_vcpu_is_preempted(~owner))
231 smp_yield_cpu(~owner);
232 count = spin_retry;
234 old = ACCESS_ONCE(rw->lock);
235 owner = ACCESS_ONCE(rw->owner);
236 if (old >= 0 &&
237 __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
238 prev = old;
239 else
240 smp_mb();
241 if ((old & 0x7fffffff) == 0 && prev >= 0)
242 break;
245 EXPORT_SYMBOL(_raw_write_lock_wait);
247 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
249 int _raw_write_trylock_retry(arch_rwlock_t *rw)
251 int count = spin_retry;
252 int old;
254 while (count-- > 0) {
255 old = ACCESS_ONCE(rw->lock);
256 if (old)
257 continue;
258 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
259 return 1;
261 return 0;
263 EXPORT_SYMBOL(_raw_write_trylock_retry);
265 void arch_lock_relax(int cpu)
267 if (!cpu)
268 return;
269 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
270 return;
271 smp_yield_cpu(~cpu);
273 EXPORT_SYMBOL(arch_lock_relax);