clk: qcom: Add support for SR2 PLLs
[linux/fpc-iii.git] / arch / s390 / lib / spinlock.c
blobd6c9991f77975e430f738bbbb802ae5e221ab46d
1 /*
2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
15 int spin_retry = -1;
17 static int __init spin_retry_init(void)
19 if (spin_retry < 0)
20 spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
21 return 0;
23 early_initcall(spin_retry_init);
25 /**
26 * spin_retry= parameter
28 static int __init spin_retry_setup(char *str)
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
33 __setup("spin_retry=", spin_retry_setup);
35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
40 void arch_spin_lock_wait(arch_spinlock_t *lp)
42 unsigned int cpu = SPINLOCK_LOCKVAL;
43 unsigned int owner;
44 int count;
46 while (1) {
47 owner = ACCESS_ONCE(lp->lock);
48 /* Try to get the lock if it is free. */
49 if (!owner) {
50 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
51 return;
52 continue;
54 /* Check if the lock owner is running. */
55 if (!smp_vcpu_scheduled(~owner)) {
56 smp_yield_cpu(~owner);
57 continue;
59 /* Loop for a while on the lock value. */
60 count = spin_retry;
61 do {
62 if (MACHINE_HAS_CAD)
63 _raw_compare_and_delay(&lp->lock, owner);
64 owner = ACCESS_ONCE(lp->lock);
65 } while (owner && count-- > 0);
66 if (!owner)
67 continue;
69 * For multiple layers of hypervisors, e.g. z/VM + LPAR
70 * yield the CPU if the lock is still unavailable.
72 if (!MACHINE_IS_LPAR)
73 smp_yield_cpu(~owner);
76 EXPORT_SYMBOL(arch_spin_lock_wait);
78 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
80 unsigned int cpu = SPINLOCK_LOCKVAL;
81 unsigned int owner;
82 int count;
84 local_irq_restore(flags);
85 while (1) {
86 owner = ACCESS_ONCE(lp->lock);
87 /* Try to get the lock if it is free. */
88 if (!owner) {
89 local_irq_disable();
90 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
91 return;
92 local_irq_restore(flags);
94 /* Check if the lock owner is running. */
95 if (!smp_vcpu_scheduled(~owner)) {
96 smp_yield_cpu(~owner);
97 continue;
99 /* Loop for a while on the lock value. */
100 count = spin_retry;
101 do {
102 if (MACHINE_HAS_CAD)
103 _raw_compare_and_delay(&lp->lock, owner);
104 owner = ACCESS_ONCE(lp->lock);
105 } while (owner && count-- > 0);
106 if (!owner)
107 continue;
109 * For multiple layers of hypervisors, e.g. z/VM + LPAR
110 * yield the CPU if the lock is still unavailable.
112 if (!MACHINE_IS_LPAR)
113 smp_yield_cpu(~owner);
116 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
118 int arch_spin_trylock_retry(arch_spinlock_t *lp)
120 unsigned int cpu = SPINLOCK_LOCKVAL;
121 unsigned int owner;
122 int count;
124 for (count = spin_retry; count > 0; count--) {
125 owner = ACCESS_ONCE(lp->lock);
126 /* Try to get the lock if it is free. */
127 if (!owner) {
128 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
129 return 1;
130 } else if (MACHINE_HAS_CAD)
131 _raw_compare_and_delay(&lp->lock, owner);
133 return 0;
135 EXPORT_SYMBOL(arch_spin_trylock_retry);
137 void _raw_read_lock_wait(arch_rwlock_t *rw)
139 unsigned int owner, old;
140 int count = spin_retry;
142 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
143 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
144 #endif
145 owner = 0;
146 while (1) {
147 if (count-- <= 0) {
148 if (owner && !smp_vcpu_scheduled(~owner))
149 smp_yield_cpu(~owner);
150 count = spin_retry;
152 old = ACCESS_ONCE(rw->lock);
153 owner = ACCESS_ONCE(rw->owner);
154 if ((int) old < 0) {
155 if (MACHINE_HAS_CAD)
156 _raw_compare_and_delay(&rw->lock, old);
157 continue;
159 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
160 return;
163 EXPORT_SYMBOL(_raw_read_lock_wait);
165 int _raw_read_trylock_retry(arch_rwlock_t *rw)
167 unsigned int old;
168 int count = spin_retry;
170 while (count-- > 0) {
171 old = ACCESS_ONCE(rw->lock);
172 if ((int) old < 0) {
173 if (MACHINE_HAS_CAD)
174 _raw_compare_and_delay(&rw->lock, old);
175 continue;
177 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
178 return 1;
180 return 0;
182 EXPORT_SYMBOL(_raw_read_trylock_retry);
184 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
186 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
188 unsigned int owner, old;
189 int count = spin_retry;
191 owner = 0;
192 while (1) {
193 if (count-- <= 0) {
194 if (owner && !smp_vcpu_scheduled(~owner))
195 smp_yield_cpu(~owner);
196 count = spin_retry;
198 old = ACCESS_ONCE(rw->lock);
199 owner = ACCESS_ONCE(rw->owner);
200 smp_rmb();
201 if ((int) old >= 0) {
202 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
203 old = prev;
205 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
206 break;
207 if (MACHINE_HAS_CAD)
208 _raw_compare_and_delay(&rw->lock, old);
211 EXPORT_SYMBOL(_raw_write_lock_wait);
213 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
215 void _raw_write_lock_wait(arch_rwlock_t *rw)
217 unsigned int owner, old, prev;
218 int count = spin_retry;
220 prev = 0x80000000;
221 owner = 0;
222 while (1) {
223 if (count-- <= 0) {
224 if (owner && !smp_vcpu_scheduled(~owner))
225 smp_yield_cpu(~owner);
226 count = spin_retry;
228 old = ACCESS_ONCE(rw->lock);
229 owner = ACCESS_ONCE(rw->owner);
230 if ((int) old >= 0 &&
231 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
232 prev = old;
233 else
234 smp_rmb();
235 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
236 break;
237 if (MACHINE_HAS_CAD)
238 _raw_compare_and_delay(&rw->lock, old);
241 EXPORT_SYMBOL(_raw_write_lock_wait);
243 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
245 int _raw_write_trylock_retry(arch_rwlock_t *rw)
247 unsigned int old;
248 int count = spin_retry;
250 while (count-- > 0) {
251 old = ACCESS_ONCE(rw->lock);
252 if (old) {
253 if (MACHINE_HAS_CAD)
254 _raw_compare_and_delay(&rw->lock, old);
255 continue;
257 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
258 return 1;
260 return 0;
262 EXPORT_SYMBOL(_raw_write_trylock_retry);
264 void arch_lock_relax(unsigned int cpu)
266 if (!cpu)
267 return;
268 if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
269 return;
270 smp_yield_cpu(~cpu);
272 EXPORT_SYMBOL(arch_lock_relax);