Linux 4.13.16
[linux/fpc-iii.git] / arch / s390 / lib / spinlock.c
blobffb15bd4c593f645f5ea9d226473af47a2be39a8
1 /*
2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
8 #include <linux/types.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
15 int spin_retry = -1;
17 static int __init spin_retry_init(void)
19 if (spin_retry < 0)
20 spin_retry = 1000;
21 return 0;
23 early_initcall(spin_retry_init);
25 /**
26 * spin_retry= parameter
28 static int __init spin_retry_setup(char *str)
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
33 __setup("spin_retry=", spin_retry_setup);
35 void arch_spin_lock_wait(arch_spinlock_t *lp)
37 int cpu = SPINLOCK_LOCKVAL;
38 int owner, count, first_diag;
40 first_diag = 1;
41 while (1) {
42 owner = ACCESS_ONCE(lp->lock);
43 /* Try to get the lock if it is free. */
44 if (!owner) {
45 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
46 return;
47 continue;
49 /* First iteration: check if the lock owner is running. */
50 if (first_diag && arch_vcpu_is_preempted(~owner)) {
51 smp_yield_cpu(~owner);
52 first_diag = 0;
53 continue;
55 /* Loop for a while on the lock value. */
56 count = spin_retry;
57 do {
58 owner = ACCESS_ONCE(lp->lock);
59 } while (owner && count-- > 0);
60 if (!owner)
61 continue;
63 * For multiple layers of hypervisors, e.g. z/VM + LPAR
64 * yield the CPU unconditionally. For LPAR rely on the
65 * sense running status.
67 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
68 smp_yield_cpu(~owner);
69 first_diag = 0;
73 EXPORT_SYMBOL(arch_spin_lock_wait);
75 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
77 int cpu = SPINLOCK_LOCKVAL;
78 int owner, count, first_diag;
80 local_irq_restore(flags);
81 first_diag = 1;
82 while (1) {
83 owner = ACCESS_ONCE(lp->lock);
84 /* Try to get the lock if it is free. */
85 if (!owner) {
86 local_irq_disable();
87 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
88 return;
89 local_irq_restore(flags);
90 continue;
92 /* Check if the lock owner is running. */
93 if (first_diag && arch_vcpu_is_preempted(~owner)) {
94 smp_yield_cpu(~owner);
95 first_diag = 0;
96 continue;
98 /* Loop for a while on the lock value. */
99 count = spin_retry;
100 do {
101 owner = ACCESS_ONCE(lp->lock);
102 } while (owner && count-- > 0);
103 if (!owner)
104 continue;
106 * For multiple layers of hypervisors, e.g. z/VM + LPAR
107 * yield the CPU unconditionally. For LPAR rely on the
108 * sense running status.
110 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
111 smp_yield_cpu(~owner);
112 first_diag = 0;
116 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
118 int arch_spin_trylock_retry(arch_spinlock_t *lp)
120 int cpu = SPINLOCK_LOCKVAL;
121 int owner, count;
123 for (count = spin_retry; count > 0; count--) {
124 owner = READ_ONCE(lp->lock);
125 /* Try to get the lock if it is free. */
126 if (!owner) {
127 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
128 return 1;
131 return 0;
133 EXPORT_SYMBOL(arch_spin_trylock_retry);
135 void _raw_read_lock_wait(arch_rwlock_t *rw)
137 int count = spin_retry;
138 int owner, old;
140 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
141 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
142 #endif
143 owner = 0;
144 while (1) {
145 if (count-- <= 0) {
146 if (owner && arch_vcpu_is_preempted(~owner))
147 smp_yield_cpu(~owner);
148 count = spin_retry;
150 old = ACCESS_ONCE(rw->lock);
151 owner = ACCESS_ONCE(rw->owner);
152 if (old < 0)
153 continue;
154 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
155 return;
158 EXPORT_SYMBOL(_raw_read_lock_wait);
160 int _raw_read_trylock_retry(arch_rwlock_t *rw)
162 int count = spin_retry;
163 int old;
165 while (count-- > 0) {
166 old = ACCESS_ONCE(rw->lock);
167 if (old < 0)
168 continue;
169 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
170 return 1;
172 return 0;
174 EXPORT_SYMBOL(_raw_read_trylock_retry);
176 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
178 void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
180 int count = spin_retry;
181 int owner, old;
183 owner = 0;
184 while (1) {
185 if (count-- <= 0) {
186 if (owner && arch_vcpu_is_preempted(~owner))
187 smp_yield_cpu(~owner);
188 count = spin_retry;
190 old = ACCESS_ONCE(rw->lock);
191 owner = ACCESS_ONCE(rw->owner);
192 smp_mb();
193 if (old >= 0) {
194 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
195 old = prev;
197 if ((old & 0x7fffffff) == 0 && prev >= 0)
198 break;
201 EXPORT_SYMBOL(_raw_write_lock_wait);
203 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
205 void _raw_write_lock_wait(arch_rwlock_t *rw)
207 int count = spin_retry;
208 int owner, old, prev;
210 prev = 0x80000000;
211 owner = 0;
212 while (1) {
213 if (count-- <= 0) {
214 if (owner && arch_vcpu_is_preempted(~owner))
215 smp_yield_cpu(~owner);
216 count = spin_retry;
218 old = ACCESS_ONCE(rw->lock);
219 owner = ACCESS_ONCE(rw->owner);
220 if (old >= 0 &&
221 __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
222 prev = old;
223 else
224 smp_mb();
225 if ((old & 0x7fffffff) == 0 && prev >= 0)
226 break;
229 EXPORT_SYMBOL(_raw_write_lock_wait);
231 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
233 int _raw_write_trylock_retry(arch_rwlock_t *rw)
235 int count = spin_retry;
236 int old;
238 while (count-- > 0) {
239 old = ACCESS_ONCE(rw->lock);
240 if (old)
241 continue;
242 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
243 return 1;
245 return 0;
247 EXPORT_SYMBOL(_raw_write_trylock_retry);
249 void arch_lock_relax(int cpu)
251 if (!cpu)
252 return;
253 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
254 return;
255 smp_yield_cpu(~cpu);
257 EXPORT_SYMBOL(arch_lock_relax);