Merge branch 'for-3.18-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux/fpc-iii.git] / arch / s390 / include / asm / rwsem.h
blob487f9b64efb9a5f8d64d7a25fea34ae40e1faa29
1 #ifndef _S390_RWSEM_H
2 #define _S390_RWSEM_H
4 /*
5 * S390 version
6 * Copyright IBM Corp. 2002
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
14 * The MSW of the count is the negated number of active writers and waiting
15 * lockers, and the LSW is the total number of active locks
17 * The lock count is initialized to 0 (no active and no waiting lockers).
19 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
20 * uncontended lock. This can be determined because XADD returns the old value.
21 * Readers increment by 1 and see a positive value when uncontended, negative
22 * if there are writers (and maybe) readers waiting (in which case it goes to
23 * sleep).
25 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
26 * be extended to 65534 by manually checking the whole MSW rather than relying
27 * on the S flag.
29 * The value of ACTIVE_BIAS supports up to 65535 active processes.
31 * This should be totally fair - if anything is waiting, a process that wants a
32 * lock will go to the back of the queue. When the currently active lock is
33 * released, if there's a writer at the front of the queue, then that and only
34 * that will be woken up; if there's a bunch of consequtive readers at the
35 * front, then they'll all be woken up, but no other readers will be.
38 #ifndef _LINUX_RWSEM_H
39 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
40 #endif
42 #ifndef CONFIG_64BIT
43 #define RWSEM_UNLOCKED_VALUE 0x00000000
44 #define RWSEM_ACTIVE_BIAS 0x00000001
45 #define RWSEM_ACTIVE_MASK 0x0000ffff
46 #define RWSEM_WAITING_BIAS (-0x00010000)
47 #else /* CONFIG_64BIT */
48 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
49 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
50 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
51 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
52 #endif /* CONFIG_64BIT */
53 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
54 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
57 * lock for reading
59 static inline void __down_read(struct rw_semaphore *sem)
61 signed long old, new;
63 asm volatile(
64 #ifndef CONFIG_64BIT
65 " l %0,%2\n"
66 "0: lr %1,%0\n"
67 " ahi %1,%4\n"
68 " cs %0,%1,%2\n"
69 " jl 0b"
70 #else /* CONFIG_64BIT */
71 " lg %0,%2\n"
72 "0: lgr %1,%0\n"
73 " aghi %1,%4\n"
74 " csg %0,%1,%2\n"
75 " jl 0b"
76 #endif /* CONFIG_64BIT */
77 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
78 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
79 : "cc", "memory");
80 if (old < 0)
81 rwsem_down_read_failed(sem);
85 * trylock for reading -- returns 1 if successful, 0 if contention
87 static inline int __down_read_trylock(struct rw_semaphore *sem)
89 signed long old, new;
91 asm volatile(
92 #ifndef CONFIG_64BIT
93 " l %0,%2\n"
94 "0: ltr %1,%0\n"
95 " jm 1f\n"
96 " ahi %1,%4\n"
97 " cs %0,%1,%2\n"
98 " jl 0b\n"
99 "1:"
100 #else /* CONFIG_64BIT */
101 " lg %0,%2\n"
102 "0: ltgr %1,%0\n"
103 " jm 1f\n"
104 " aghi %1,%4\n"
105 " csg %0,%1,%2\n"
106 " jl 0b\n"
107 "1:"
108 #endif /* CONFIG_64BIT */
109 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
110 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
111 : "cc", "memory");
112 return old >= 0 ? 1 : 0;
116 * lock for writing
118 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
120 signed long old, new, tmp;
122 tmp = RWSEM_ACTIVE_WRITE_BIAS;
123 asm volatile(
124 #ifndef CONFIG_64BIT
125 " l %0,%2\n"
126 "0: lr %1,%0\n"
127 " a %1,%4\n"
128 " cs %0,%1,%2\n"
129 " jl 0b"
130 #else /* CONFIG_64BIT */
131 " lg %0,%2\n"
132 "0: lgr %1,%0\n"
133 " ag %1,%4\n"
134 " csg %0,%1,%2\n"
135 " jl 0b"
136 #endif /* CONFIG_64BIT */
137 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
138 : "Q" (sem->count), "m" (tmp)
139 : "cc", "memory");
140 if (old != 0)
141 rwsem_down_write_failed(sem);
144 static inline void __down_write(struct rw_semaphore *sem)
146 __down_write_nested(sem, 0);
150 * trylock for writing -- returns 1 if successful, 0 if contention
152 static inline int __down_write_trylock(struct rw_semaphore *sem)
154 signed long old;
156 asm volatile(
157 #ifndef CONFIG_64BIT
158 " l %0,%1\n"
159 "0: ltr %0,%0\n"
160 " jnz 1f\n"
161 " cs %0,%3,%1\n"
162 " jl 0b\n"
163 #else /* CONFIG_64BIT */
164 " lg %0,%1\n"
165 "0: ltgr %0,%0\n"
166 " jnz 1f\n"
167 " csg %0,%3,%1\n"
168 " jl 0b\n"
169 #endif /* CONFIG_64BIT */
170 "1:"
171 : "=&d" (old), "=Q" (sem->count)
172 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
173 : "cc", "memory");
174 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
178 * unlock after reading
180 static inline void __up_read(struct rw_semaphore *sem)
182 signed long old, new;
184 asm volatile(
185 #ifndef CONFIG_64BIT
186 " l %0,%2\n"
187 "0: lr %1,%0\n"
188 " ahi %1,%4\n"
189 " cs %0,%1,%2\n"
190 " jl 0b"
191 #else /* CONFIG_64BIT */
192 " lg %0,%2\n"
193 "0: lgr %1,%0\n"
194 " aghi %1,%4\n"
195 " csg %0,%1,%2\n"
196 " jl 0b"
197 #endif /* CONFIG_64BIT */
198 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
199 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
200 : "cc", "memory");
201 if (new < 0)
202 if ((new & RWSEM_ACTIVE_MASK) == 0)
203 rwsem_wake(sem);
207 * unlock after writing
209 static inline void __up_write(struct rw_semaphore *sem)
211 signed long old, new, tmp;
213 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
214 asm volatile(
215 #ifndef CONFIG_64BIT
216 " l %0,%2\n"
217 "0: lr %1,%0\n"
218 " a %1,%4\n"
219 " cs %0,%1,%2\n"
220 " jl 0b"
221 #else /* CONFIG_64BIT */
222 " lg %0,%2\n"
223 "0: lgr %1,%0\n"
224 " ag %1,%4\n"
225 " csg %0,%1,%2\n"
226 " jl 0b"
227 #endif /* CONFIG_64BIT */
228 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
229 : "Q" (sem->count), "m" (tmp)
230 : "cc", "memory");
231 if (new < 0)
232 if ((new & RWSEM_ACTIVE_MASK) == 0)
233 rwsem_wake(sem);
237 * downgrade write lock to read lock
239 static inline void __downgrade_write(struct rw_semaphore *sem)
241 signed long old, new, tmp;
243 tmp = -RWSEM_WAITING_BIAS;
244 asm volatile(
245 #ifndef CONFIG_64BIT
246 " l %0,%2\n"
247 "0: lr %1,%0\n"
248 " a %1,%4\n"
249 " cs %0,%1,%2\n"
250 " jl 0b"
251 #else /* CONFIG_64BIT */
252 " lg %0,%2\n"
253 "0: lgr %1,%0\n"
254 " ag %1,%4\n"
255 " csg %0,%1,%2\n"
256 " jl 0b"
257 #endif /* CONFIG_64BIT */
258 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
259 : "Q" (sem->count), "m" (tmp)
260 : "cc", "memory");
261 if (new > 1)
262 rwsem_downgrade_wake(sem);
266 * implement atomic add functionality
268 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
270 signed long old, new;
272 asm volatile(
273 #ifndef CONFIG_64BIT
274 " l %0,%2\n"
275 "0: lr %1,%0\n"
276 " ar %1,%4\n"
277 " cs %0,%1,%2\n"
278 " jl 0b"
279 #else /* CONFIG_64BIT */
280 " lg %0,%2\n"
281 "0: lgr %1,%0\n"
282 " agr %1,%4\n"
283 " csg %0,%1,%2\n"
284 " jl 0b"
285 #endif /* CONFIG_64BIT */
286 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
287 : "Q" (sem->count), "d" (delta)
288 : "cc", "memory");
292 * implement exchange and add functionality
294 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
296 signed long old, new;
298 asm volatile(
299 #ifndef CONFIG_64BIT
300 " l %0,%2\n"
301 "0: lr %1,%0\n"
302 " ar %1,%4\n"
303 " cs %0,%1,%2\n"
304 " jl 0b"
305 #else /* CONFIG_64BIT */
306 " lg %0,%2\n"
307 "0: lgr %1,%0\n"
308 " agr %1,%4\n"
309 " csg %0,%1,%2\n"
310 " jl 0b"
311 #endif /* CONFIG_64BIT */
312 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
313 : "Q" (sem->count), "d" (delta)
314 : "cc", "memory");
315 return new;
318 #endif /* _S390_RWSEM_H */