PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / tty / tty_ldsem.c
blobd8a55e87877f06f3141602e4f08cdcb668c465b0
1 /*
2 * Ldisc rw semaphore
4 * The ldisc semaphore is semantically a rw_semaphore but which enforces
5 * an alternate policy, namely:
6 * 1) Supports lock wait timeouts
7 * 2) Write waiter has priority
8 * 3) Downgrading is not supported
10 * Implementation notes:
11 * 1) Upper half of semaphore count is a wait count (differs from rwsem
12 * in that rwsem normalizes the upper half to the wait bias)
13 * 2) Lacks overflow checking
15 * The generic counting was copied and modified from include/asm-generic/rwsem.h
16 * by Paul Mackerras <paulus@samba.org>.
18 * The scheduling policy was copied and modified from lib/rwsem.c
19 * Written by David Howells (dhowells@redhat.com).
21 * This implementation incorporates the write lock stealing work of
22 * Michel Lespinasse <walken@google.com>.
24 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
26 * This file may be redistributed under the terms of the GNU General Public
27 * License v2.
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/atomic.h>
33 #include <linux/tty.h>
34 #include <linux/sched.h>
37 #ifdef CONFIG_DEBUG_LOCK_ALLOC
38 # define __acq(l, s, t, r, c, n, i) \
39 lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
40 # define __rel(l, n, i) \
41 lock_release(&(l)->dep_map, n, i)
42 # ifdef CONFIG_PROVE_LOCKING
43 # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 2, NULL, i)
44 # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 2, n, i)
45 # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 2, NULL, i)
46 # define lockdep_release(l, n, i) __rel(l, n, i)
47 # else
48 # define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
49 # define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
50 # define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
51 # define lockdep_release(l, n, i) __rel(l, n, i)
52 # endif
53 #else
54 # define lockdep_acquire(l, s, t, i) do { } while (0)
55 # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
56 # define lockdep_acquire_read(l, s, t, i) do { } while (0)
57 # define lockdep_release(l, n, i) do { } while (0)
58 #endif
60 #ifdef CONFIG_LOCK_STAT
61 # define lock_stat(_lock, stat) lock_##stat(&(_lock)->dep_map, _RET_IP_)
62 #else
63 # define lock_stat(_lock, stat) do { } while (0)
64 #endif
67 #if BITS_PER_LONG == 64
68 # define LDSEM_ACTIVE_MASK 0xffffffffL
69 #else
70 # define LDSEM_ACTIVE_MASK 0x0000ffffL
71 #endif
73 #define LDSEM_UNLOCKED 0L
74 #define LDSEM_ACTIVE_BIAS 1L
75 #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1)
76 #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS
77 #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
79 struct ldsem_waiter {
80 struct list_head list;
81 struct task_struct *task;
84 static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
90 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
91 * Returns 1 if count was successfully changed; @*old will have @new value.
92 * Returns 0 if count was not changed; @*old will have most recent sem->count
94 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
96 long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
97 if (tmp == *old) {
98 *old = new;
99 return 1;
100 } else {
101 *old = tmp;
102 return 0;
107 * Initialize an ldsem:
109 void __init_ldsem(struct ld_semaphore *sem, const char *name,
110 struct lock_class_key *key)
112 #ifdef CONFIG_DEBUG_LOCK_ALLOC
114 * Make sure we are not reinitializing a held semaphore:
116 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
117 lockdep_init_map(&sem->dep_map, name, key, 0);
118 #endif
119 sem->count = LDSEM_UNLOCKED;
120 sem->wait_readers = 0;
121 raw_spin_lock_init(&sem->wait_lock);
122 INIT_LIST_HEAD(&sem->read_wait);
123 INIT_LIST_HEAD(&sem->write_wait);
126 static void __ldsem_wake_readers(struct ld_semaphore *sem)
128 struct ldsem_waiter *waiter, *next;
129 struct task_struct *tsk;
130 long adjust, count;
132 /* Try to grant read locks to all readers on the read wait list.
133 * Note the 'active part' of the count is incremented by
134 * the number of readers before waking any processes up.
136 adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
137 count = ldsem_atomic_update(adjust, sem);
138 do {
139 if (count > 0)
140 break;
141 if (ldsem_cmpxchg(&count, count - adjust, sem))
142 return;
143 } while (1);
145 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
146 tsk = waiter->task;
147 smp_mb();
148 waiter->task = NULL;
149 wake_up_process(tsk);
150 put_task_struct(tsk);
152 INIT_LIST_HEAD(&sem->read_wait);
153 sem->wait_readers = 0;
156 static inline int writer_trylock(struct ld_semaphore *sem)
158 /* only wake this writer if the active part of the count can be
159 * transitioned from 0 -> 1
161 long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
162 do {
163 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
164 return 1;
165 if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
166 return 0;
167 } while (1);
170 static void __ldsem_wake_writer(struct ld_semaphore *sem)
172 struct ldsem_waiter *waiter;
174 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
175 wake_up_process(waiter->task);
179 * handle the lock release when processes blocked on it that can now run
180 * - if we come here from up_xxxx(), then:
181 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
182 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
183 * - the spinlock must be held by the caller
184 * - woken process blocks are discarded from the list after having task zeroed
186 static void __ldsem_wake(struct ld_semaphore *sem)
188 if (!list_empty(&sem->write_wait))
189 __ldsem_wake_writer(sem);
190 else if (!list_empty(&sem->read_wait))
191 __ldsem_wake_readers(sem);
194 static void ldsem_wake(struct ld_semaphore *sem)
196 unsigned long flags;
198 raw_spin_lock_irqsave(&sem->wait_lock, flags);
199 __ldsem_wake(sem);
200 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
204 * wait for the read lock to be granted
206 static struct ld_semaphore __sched *
207 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
209 struct ldsem_waiter waiter;
210 struct task_struct *tsk = current;
211 long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
213 /* set up my own style of waitqueue */
214 raw_spin_lock_irq(&sem->wait_lock);
216 /* Try to reverse the lock attempt but if the count has changed
217 * so that reversing fails, check if there are are no waiters,
218 * and early-out if not */
219 do {
220 if (ldsem_cmpxchg(&count, count + adjust, sem))
221 break;
222 if (count > 0) {
223 raw_spin_unlock_irq(&sem->wait_lock);
224 return sem;
226 } while (1);
228 list_add_tail(&waiter.list, &sem->read_wait);
229 sem->wait_readers++;
231 waiter.task = tsk;
232 get_task_struct(tsk);
234 /* if there are no active locks, wake the new lock owner(s) */
235 if ((count & LDSEM_ACTIVE_MASK) == 0)
236 __ldsem_wake(sem);
238 raw_spin_unlock_irq(&sem->wait_lock);
240 /* wait to be given the lock */
241 for (;;) {
242 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
244 if (!waiter.task)
245 break;
246 if (!timeout)
247 break;
248 timeout = schedule_timeout(timeout);
251 __set_task_state(tsk, TASK_RUNNING);
253 if (!timeout) {
254 /* lock timed out but check if this task was just
255 * granted lock ownership - if so, pretend there
256 * was no timeout; otherwise, cleanup lock wait */
257 raw_spin_lock_irq(&sem->wait_lock);
258 if (waiter.task) {
259 ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
260 list_del(&waiter.list);
261 raw_spin_unlock_irq(&sem->wait_lock);
262 put_task_struct(waiter.task);
263 return NULL;
265 raw_spin_unlock_irq(&sem->wait_lock);
268 return sem;
272 * wait for the write lock to be granted
274 static struct ld_semaphore __sched *
275 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
277 struct ldsem_waiter waiter;
278 struct task_struct *tsk = current;
279 long adjust = -LDSEM_ACTIVE_BIAS;
280 int locked = 0;
282 /* set up my own style of waitqueue */
283 raw_spin_lock_irq(&sem->wait_lock);
285 /* Try to reverse the lock attempt but if the count has changed
286 * so that reversing fails, check if the lock is now owned,
287 * and early-out if so */
288 do {
289 if (ldsem_cmpxchg(&count, count + adjust, sem))
290 break;
291 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
292 raw_spin_unlock_irq(&sem->wait_lock);
293 return sem;
295 } while (1);
297 list_add_tail(&waiter.list, &sem->write_wait);
299 waiter.task = tsk;
301 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
302 for (;;) {
303 if (!timeout)
304 break;
305 raw_spin_unlock_irq(&sem->wait_lock);
306 timeout = schedule_timeout(timeout);
307 raw_spin_lock_irq(&sem->wait_lock);
308 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
309 if ((locked = writer_trylock(sem)))
310 break;
313 if (!locked)
314 ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
315 list_del(&waiter.list);
316 raw_spin_unlock_irq(&sem->wait_lock);
318 __set_task_state(tsk, TASK_RUNNING);
320 /* lock wait may have timed out */
321 if (!locked)
322 return NULL;
323 return sem;
328 static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
329 int subclass, long timeout)
331 long count;
333 lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
335 count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
336 if (count <= 0) {
337 lock_stat(sem, contended);
338 if (!down_read_failed(sem, count, timeout)) {
339 lockdep_release(sem, 1, _RET_IP_);
340 return 0;
343 lock_stat(sem, acquired);
344 return 1;
347 static inline int __ldsem_down_write_nested(struct ld_semaphore *sem,
348 int subclass, long timeout)
350 long count;
352 lockdep_acquire(sem, subclass, 0, _RET_IP_);
354 count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
355 if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
356 lock_stat(sem, contended);
357 if (!down_write_failed(sem, count, timeout)) {
358 lockdep_release(sem, 1, _RET_IP_);
359 return 0;
362 lock_stat(sem, acquired);
363 return 1;
368 * lock for reading -- returns 1 if successful, 0 if timed out
370 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
372 might_sleep();
373 return __ldsem_down_read_nested(sem, 0, timeout);
377 * trylock for reading -- returns 1 if successful, 0 if contention
379 int ldsem_down_read_trylock(struct ld_semaphore *sem)
381 long count = sem->count;
383 while (count >= 0) {
384 if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
385 lockdep_acquire_read(sem, 0, 1, _RET_IP_);
386 lock_stat(sem, acquired);
387 return 1;
390 return 0;
394 * lock for writing -- returns 1 if successful, 0 if timed out
396 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
398 might_sleep();
399 return __ldsem_down_write_nested(sem, 0, timeout);
403 * trylock for writing -- returns 1 if successful, 0 if contention
405 int ldsem_down_write_trylock(struct ld_semaphore *sem)
407 long count = sem->count;
409 while ((count & LDSEM_ACTIVE_MASK) == 0) {
410 if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
411 lockdep_acquire(sem, 0, 1, _RET_IP_);
412 lock_stat(sem, acquired);
413 return 1;
416 return 0;
420 * release a read lock
422 void ldsem_up_read(struct ld_semaphore *sem)
424 long count;
426 lockdep_release(sem, 1, _RET_IP_);
428 count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
429 if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
430 ldsem_wake(sem);
434 * release a write lock
436 void ldsem_up_write(struct ld_semaphore *sem)
438 long count;
440 lockdep_release(sem, 1, _RET_IP_);
442 count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
443 if (count < 0)
444 ldsem_wake(sem);
448 #ifdef CONFIG_DEBUG_LOCK_ALLOC
450 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
452 might_sleep();
453 return __ldsem_down_read_nested(sem, subclass, timeout);
456 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
457 long timeout)
459 might_sleep();
460 return __ldsem_down_write_nested(sem, subclass, timeout);
463 #endif