spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / s390 / include / asm / rwsem.h
blobd0eb4653cebdb0d7bf0eab014cfb142904ad89b7
1 #ifndef _S390_RWSEM_H
2 #define _S390_RWSEM_H
4 /*
5 * include/asm-s390/rwsem.h
7 * S390 version
8 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
11 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
16 * The MSW of the count is the negated number of active writers and waiting
17 * lockers, and the LSW is the total number of active locks
19 * The lock count is initialized to 0 (no active and no waiting lockers).
21 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
22 * uncontended lock. This can be determined because XADD returns the old value.
23 * Readers increment by 1 and see a positive value when uncontended, negative
24 * if there are writers (and maybe) readers waiting (in which case it goes to
25 * sleep).
27 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
28 * be extended to 65534 by manually checking the whole MSW rather than relying
29 * on the S flag.
31 * The value of ACTIVE_BIAS supports up to 65535 active processes.
33 * This should be totally fair - if anything is waiting, a process that wants a
34 * lock will go to the back of the queue. When the currently active lock is
35 * released, if there's a writer at the front of the queue, then that and only
36 * that will be woken up; if there's a bunch of consequtive readers at the
37 * front, then they'll all be woken up, but no other readers will be.
40 #ifndef _LINUX_RWSEM_H
41 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
42 #endif
44 #ifdef __KERNEL__
46 #ifndef __s390x__
47 #define RWSEM_UNLOCKED_VALUE 0x00000000
48 #define RWSEM_ACTIVE_BIAS 0x00000001
49 #define RWSEM_ACTIVE_MASK 0x0000ffff
50 #define RWSEM_WAITING_BIAS (-0x00010000)
51 #else /* __s390x__ */
52 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
53 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
54 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
55 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
56 #endif /* __s390x__ */
57 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
58 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
61 * lock for reading
63 static inline void __down_read(struct rw_semaphore *sem)
65 signed long old, new;
67 asm volatile(
68 #ifndef __s390x__
69 " l %0,%2\n"
70 "0: lr %1,%0\n"
71 " ahi %1,%4\n"
72 " cs %0,%1,%2\n"
73 " jl 0b"
74 #else /* __s390x__ */
75 " lg %0,%2\n"
76 "0: lgr %1,%0\n"
77 " aghi %1,%4\n"
78 " csg %0,%1,%2\n"
79 " jl 0b"
80 #endif /* __s390x__ */
81 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
82 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
83 : "cc", "memory");
84 if (old < 0)
85 rwsem_down_read_failed(sem);
89 * trylock for reading -- returns 1 if successful, 0 if contention
91 static inline int __down_read_trylock(struct rw_semaphore *sem)
93 signed long old, new;
95 asm volatile(
96 #ifndef __s390x__
97 " l %0,%2\n"
98 "0: ltr %1,%0\n"
99 " jm 1f\n"
100 " ahi %1,%4\n"
101 " cs %0,%1,%2\n"
102 " jl 0b\n"
103 "1:"
104 #else /* __s390x__ */
105 " lg %0,%2\n"
106 "0: ltgr %1,%0\n"
107 " jm 1f\n"
108 " aghi %1,%4\n"
109 " csg %0,%1,%2\n"
110 " jl 0b\n"
111 "1:"
112 #endif /* __s390x__ */
113 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
114 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
115 : "cc", "memory");
116 return old >= 0 ? 1 : 0;
120 * lock for writing
122 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
124 signed long old, new, tmp;
126 tmp = RWSEM_ACTIVE_WRITE_BIAS;
127 asm volatile(
128 #ifndef __s390x__
129 " l %0,%2\n"
130 "0: lr %1,%0\n"
131 " a %1,%4\n"
132 " cs %0,%1,%2\n"
133 " jl 0b"
134 #else /* __s390x__ */
135 " lg %0,%2\n"
136 "0: lgr %1,%0\n"
137 " ag %1,%4\n"
138 " csg %0,%1,%2\n"
139 " jl 0b"
140 #endif /* __s390x__ */
141 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
142 : "Q" (sem->count), "m" (tmp)
143 : "cc", "memory");
144 if (old != 0)
145 rwsem_down_write_failed(sem);
148 static inline void __down_write(struct rw_semaphore *sem)
150 __down_write_nested(sem, 0);
154 * trylock for writing -- returns 1 if successful, 0 if contention
156 static inline int __down_write_trylock(struct rw_semaphore *sem)
158 signed long old;
160 asm volatile(
161 #ifndef __s390x__
162 " l %0,%1\n"
163 "0: ltr %0,%0\n"
164 " jnz 1f\n"
165 " cs %0,%3,%1\n"
166 " jl 0b\n"
167 #else /* __s390x__ */
168 " lg %0,%1\n"
169 "0: ltgr %0,%0\n"
170 " jnz 1f\n"
171 " csg %0,%3,%1\n"
172 " jl 0b\n"
173 #endif /* __s390x__ */
174 "1:"
175 : "=&d" (old), "=Q" (sem->count)
176 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
177 : "cc", "memory");
178 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
182 * unlock after reading
184 static inline void __up_read(struct rw_semaphore *sem)
186 signed long old, new;
188 asm volatile(
189 #ifndef __s390x__
190 " l %0,%2\n"
191 "0: lr %1,%0\n"
192 " ahi %1,%4\n"
193 " cs %0,%1,%2\n"
194 " jl 0b"
195 #else /* __s390x__ */
196 " lg %0,%2\n"
197 "0: lgr %1,%0\n"
198 " aghi %1,%4\n"
199 " csg %0,%1,%2\n"
200 " jl 0b"
201 #endif /* __s390x__ */
202 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
203 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
204 : "cc", "memory");
205 if (new < 0)
206 if ((new & RWSEM_ACTIVE_MASK) == 0)
207 rwsem_wake(sem);
211 * unlock after writing
213 static inline void __up_write(struct rw_semaphore *sem)
215 signed long old, new, tmp;
217 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
218 asm volatile(
219 #ifndef __s390x__
220 " l %0,%2\n"
221 "0: lr %1,%0\n"
222 " a %1,%4\n"
223 " cs %0,%1,%2\n"
224 " jl 0b"
225 #else /* __s390x__ */
226 " lg %0,%2\n"
227 "0: lgr %1,%0\n"
228 " ag %1,%4\n"
229 " csg %0,%1,%2\n"
230 " jl 0b"
231 #endif /* __s390x__ */
232 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
233 : "Q" (sem->count), "m" (tmp)
234 : "cc", "memory");
235 if (new < 0)
236 if ((new & RWSEM_ACTIVE_MASK) == 0)
237 rwsem_wake(sem);
241 * downgrade write lock to read lock
243 static inline void __downgrade_write(struct rw_semaphore *sem)
245 signed long old, new, tmp;
247 tmp = -RWSEM_WAITING_BIAS;
248 asm volatile(
249 #ifndef __s390x__
250 " l %0,%2\n"
251 "0: lr %1,%0\n"
252 " a %1,%4\n"
253 " cs %0,%1,%2\n"
254 " jl 0b"
255 #else /* __s390x__ */
256 " lg %0,%2\n"
257 "0: lgr %1,%0\n"
258 " ag %1,%4\n"
259 " csg %0,%1,%2\n"
260 " jl 0b"
261 #endif /* __s390x__ */
262 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
263 : "Q" (sem->count), "m" (tmp)
264 : "cc", "memory");
265 if (new > 1)
266 rwsem_downgrade_wake(sem);
270 * implement atomic add functionality
272 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
274 signed long old, new;
276 asm volatile(
277 #ifndef __s390x__
278 " l %0,%2\n"
279 "0: lr %1,%0\n"
280 " ar %1,%4\n"
281 " cs %0,%1,%2\n"
282 " jl 0b"
283 #else /* __s390x__ */
284 " lg %0,%2\n"
285 "0: lgr %1,%0\n"
286 " agr %1,%4\n"
287 " csg %0,%1,%2\n"
288 " jl 0b"
289 #endif /* __s390x__ */
290 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
291 : "Q" (sem->count), "d" (delta)
292 : "cc", "memory");
296 * implement exchange and add functionality
298 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
300 signed long old, new;
302 asm volatile(
303 #ifndef __s390x__
304 " l %0,%2\n"
305 "0: lr %1,%0\n"
306 " ar %1,%4\n"
307 " cs %0,%1,%2\n"
308 " jl 0b"
309 #else /* __s390x__ */
310 " lg %0,%2\n"
311 "0: lgr %1,%0\n"
312 " agr %1,%4\n"
313 " csg %0,%1,%2\n"
314 " jl 0b"
315 #endif /* __s390x__ */
316 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
317 : "Q" (sem->count), "d" (delta)
318 : "cc", "memory");
319 return new;
322 #endif /* __KERNEL__ */
323 #endif /* _S390_RWSEM_H */