Linux 4.9.237
[linux/fpc-iii.git] / drivers / tty / tty_ldsem.c
blob6c5eb99fcfcee506a32a661ca619ddad1b108dba
1 /*
2 * Ldisc rw semaphore
4 * The ldisc semaphore is semantically a rw_semaphore but which enforces
5 * an alternate policy, namely:
6 * 1) Supports lock wait timeouts
7 * 2) Write waiter has priority
8 * 3) Downgrading is not supported
10 * Implementation notes:
11 * 1) Upper half of semaphore count is a wait count (differs from rwsem
12 * in that rwsem normalizes the upper half to the wait bias)
13 * 2) Lacks overflow checking
15 * The generic counting was copied and modified from include/asm-generic/rwsem.h
16 * by Paul Mackerras <paulus@samba.org>.
18 * The scheduling policy was copied and modified from lib/rwsem.c
19 * Written by David Howells (dhowells@redhat.com).
21 * This implementation incorporates the write lock stealing work of
22 * Michel Lespinasse <walken@google.com>.
24 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
26 * This file may be redistributed under the terms of the GNU General Public
27 * License v2.
30 #include <linux/list.h>
31 #include <linux/spinlock.h>
32 #include <linux/atomic.h>
33 #include <linux/tty.h>
34 #include <linux/sched.h>
37 #ifdef CONFIG_DEBUG_LOCK_ALLOC
38 # define __acq(l, s, t, r, c, n, i) \
39 lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
40 # define __rel(l, n, i) \
41 lock_release(&(l)->dep_map, n, i)
42 #define lockdep_acquire(l, s, t, i) __acq(l, s, t, 0, 1, NULL, i)
43 #define lockdep_acquire_nest(l, s, t, n, i) __acq(l, s, t, 0, 1, n, i)
44 #define lockdep_acquire_read(l, s, t, i) __acq(l, s, t, 1, 1, NULL, i)
45 #define lockdep_release(l, n, i) __rel(l, n, i)
46 #else
47 # define lockdep_acquire(l, s, t, i) do { } while (0)
48 # define lockdep_acquire_nest(l, s, t, n, i) do { } while (0)
49 # define lockdep_acquire_read(l, s, t, i) do { } while (0)
50 # define lockdep_release(l, n, i) do { } while (0)
51 #endif
53 #ifdef CONFIG_LOCK_STAT
54 # define lock_stat(_lock, stat) lock_##stat(&(_lock)->dep_map, _RET_IP_)
55 #else
56 # define lock_stat(_lock, stat) do { } while (0)
57 #endif
60 #if BITS_PER_LONG == 64
61 # define LDSEM_ACTIVE_MASK 0xffffffffL
62 #else
63 # define LDSEM_ACTIVE_MASK 0x0000ffffL
64 #endif
66 #define LDSEM_UNLOCKED 0L
67 #define LDSEM_ACTIVE_BIAS 1L
68 #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1)
69 #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS
70 #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
72 struct ldsem_waiter {
73 struct list_head list;
74 struct task_struct *task;
77 static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
79 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
83 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
84 * Returns 1 if count was successfully changed; @*old will have @new value.
85 * Returns 0 if count was not changed; @*old will have most recent sem->count
87 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
89 long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
90 if (tmp == *old) {
91 *old = new;
92 return 1;
93 } else {
94 *old = tmp;
95 return 0;
100 * Initialize an ldsem:
102 void __init_ldsem(struct ld_semaphore *sem, const char *name,
103 struct lock_class_key *key)
105 #ifdef CONFIG_DEBUG_LOCK_ALLOC
107 * Make sure we are not reinitializing a held semaphore:
109 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
110 lockdep_init_map(&sem->dep_map, name, key, 0);
111 #endif
112 sem->count = LDSEM_UNLOCKED;
113 sem->wait_readers = 0;
114 raw_spin_lock_init(&sem->wait_lock);
115 INIT_LIST_HEAD(&sem->read_wait);
116 INIT_LIST_HEAD(&sem->write_wait);
119 static void __ldsem_wake_readers(struct ld_semaphore *sem)
121 struct ldsem_waiter *waiter, *next;
122 struct task_struct *tsk;
123 long adjust, count;
125 /* Try to grant read locks to all readers on the read wait list.
126 * Note the 'active part' of the count is incremented by
127 * the number of readers before waking any processes up.
129 adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
130 count = ldsem_atomic_update(adjust, sem);
131 do {
132 if (count > 0)
133 break;
134 if (ldsem_cmpxchg(&count, count - adjust, sem))
135 return;
136 } while (1);
138 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
139 tsk = waiter->task;
140 smp_store_release(&waiter->task, NULL);
141 wake_up_process(tsk);
142 put_task_struct(tsk);
144 INIT_LIST_HEAD(&sem->read_wait);
145 sem->wait_readers = 0;
148 static inline int writer_trylock(struct ld_semaphore *sem)
150 /* only wake this writer if the active part of the count can be
151 * transitioned from 0 -> 1
153 long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
154 do {
155 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
156 return 1;
157 if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
158 return 0;
159 } while (1);
162 static void __ldsem_wake_writer(struct ld_semaphore *sem)
164 struct ldsem_waiter *waiter;
166 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
167 wake_up_process(waiter->task);
171 * handle the lock release when processes blocked on it that can now run
172 * - if we come here from up_xxxx(), then:
173 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
174 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
175 * - the spinlock must be held by the caller
176 * - woken process blocks are discarded from the list after having task zeroed
178 static void __ldsem_wake(struct ld_semaphore *sem)
180 if (!list_empty(&sem->write_wait))
181 __ldsem_wake_writer(sem);
182 else if (!list_empty(&sem->read_wait))
183 __ldsem_wake_readers(sem);
186 static void ldsem_wake(struct ld_semaphore *sem)
188 unsigned long flags;
190 raw_spin_lock_irqsave(&sem->wait_lock, flags);
191 __ldsem_wake(sem);
192 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
196 * wait for the read lock to be granted
198 static struct ld_semaphore __sched *
199 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
201 struct ldsem_waiter waiter;
202 struct task_struct *tsk = current;
203 long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
205 /* set up my own style of waitqueue */
206 raw_spin_lock_irq(&sem->wait_lock);
208 /* Try to reverse the lock attempt but if the count has changed
209 * so that reversing fails, check if there are are no waiters,
210 * and early-out if not */
211 do {
212 if (ldsem_cmpxchg(&count, count + adjust, sem))
213 break;
214 if (count > 0) {
215 raw_spin_unlock_irq(&sem->wait_lock);
216 return sem;
218 } while (1);
220 list_add_tail(&waiter.list, &sem->read_wait);
221 sem->wait_readers++;
223 waiter.task = tsk;
224 get_task_struct(tsk);
226 /* if there are no active locks, wake the new lock owner(s) */
227 if ((count & LDSEM_ACTIVE_MASK) == 0)
228 __ldsem_wake(sem);
230 raw_spin_unlock_irq(&sem->wait_lock);
232 /* wait to be given the lock */
233 for (;;) {
234 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
236 if (!smp_load_acquire(&waiter.task))
237 break;
238 if (!timeout)
239 break;
240 timeout = schedule_timeout(timeout);
243 __set_task_state(tsk, TASK_RUNNING);
245 if (!timeout) {
246 /* lock timed out but check if this task was just
247 * granted lock ownership - if so, pretend there
248 * was no timeout; otherwise, cleanup lock wait */
249 raw_spin_lock_irq(&sem->wait_lock);
250 if (waiter.task) {
251 ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
252 list_del(&waiter.list);
253 raw_spin_unlock_irq(&sem->wait_lock);
254 put_task_struct(waiter.task);
255 return NULL;
257 raw_spin_unlock_irq(&sem->wait_lock);
260 return sem;
264 * wait for the write lock to be granted
266 static struct ld_semaphore __sched *
267 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
269 struct ldsem_waiter waiter;
270 struct task_struct *tsk = current;
271 long adjust = -LDSEM_ACTIVE_BIAS;
272 int locked = 0;
274 /* set up my own style of waitqueue */
275 raw_spin_lock_irq(&sem->wait_lock);
277 /* Try to reverse the lock attempt but if the count has changed
278 * so that reversing fails, check if the lock is now owned,
279 * and early-out if so */
280 do {
281 if (ldsem_cmpxchg(&count, count + adjust, sem))
282 break;
283 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
284 raw_spin_unlock_irq(&sem->wait_lock);
285 return sem;
287 } while (1);
289 list_add_tail(&waiter.list, &sem->write_wait);
291 waiter.task = tsk;
293 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
294 for (;;) {
295 if (!timeout)
296 break;
297 raw_spin_unlock_irq(&sem->wait_lock);
298 timeout = schedule_timeout(timeout);
299 raw_spin_lock_irq(&sem->wait_lock);
300 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
301 locked = writer_trylock(sem);
302 if (locked)
303 break;
306 if (!locked)
307 ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
308 list_del(&waiter.list);
311 * In case of timeout, wake up every reader who gave the right of way
312 * to writer. Prevent separation readers into two groups:
313 * one that helds semaphore and another that sleeps.
314 * (in case of no contention with a writer)
316 if (!locked && list_empty(&sem->write_wait))
317 __ldsem_wake_readers(sem);
319 raw_spin_unlock_irq(&sem->wait_lock);
321 __set_task_state(tsk, TASK_RUNNING);
323 /* lock wait may have timed out */
324 if (!locked)
325 return NULL;
326 return sem;
331 static int __ldsem_down_read_nested(struct ld_semaphore *sem,
332 int subclass, long timeout)
334 long count;
336 lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
338 count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
339 if (count <= 0) {
340 lock_stat(sem, contended);
341 if (!down_read_failed(sem, count, timeout)) {
342 lockdep_release(sem, 1, _RET_IP_);
343 return 0;
346 lock_stat(sem, acquired);
347 return 1;
350 static int __ldsem_down_write_nested(struct ld_semaphore *sem,
351 int subclass, long timeout)
353 long count;
355 lockdep_acquire(sem, subclass, 0, _RET_IP_);
357 count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
358 if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
359 lock_stat(sem, contended);
360 if (!down_write_failed(sem, count, timeout)) {
361 lockdep_release(sem, 1, _RET_IP_);
362 return 0;
365 lock_stat(sem, acquired);
366 return 1;
371 * lock for reading -- returns 1 if successful, 0 if timed out
373 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
375 might_sleep();
376 return __ldsem_down_read_nested(sem, 0, timeout);
380 * trylock for reading -- returns 1 if successful, 0 if contention
382 int ldsem_down_read_trylock(struct ld_semaphore *sem)
384 long count = sem->count;
386 while (count >= 0) {
387 if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
388 lockdep_acquire_read(sem, 0, 1, _RET_IP_);
389 lock_stat(sem, acquired);
390 return 1;
393 return 0;
397 * lock for writing -- returns 1 if successful, 0 if timed out
399 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
401 might_sleep();
402 return __ldsem_down_write_nested(sem, 0, timeout);
406 * trylock for writing -- returns 1 if successful, 0 if contention
408 int ldsem_down_write_trylock(struct ld_semaphore *sem)
410 long count = sem->count;
412 while ((count & LDSEM_ACTIVE_MASK) == 0) {
413 if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
414 lockdep_acquire(sem, 0, 1, _RET_IP_);
415 lock_stat(sem, acquired);
416 return 1;
419 return 0;
423 * release a read lock
425 void ldsem_up_read(struct ld_semaphore *sem)
427 long count;
429 lockdep_release(sem, 1, _RET_IP_);
431 count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
432 if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
433 ldsem_wake(sem);
437 * release a write lock
439 void ldsem_up_write(struct ld_semaphore *sem)
441 long count;
443 lockdep_release(sem, 1, _RET_IP_);
445 count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
446 if (count < 0)
447 ldsem_wake(sem);
451 #ifdef CONFIG_DEBUG_LOCK_ALLOC
453 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
455 might_sleep();
456 return __ldsem_down_read_nested(sem, subclass, timeout);
459 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
460 long timeout)
462 might_sleep();
463 return __ldsem_down_write_nested(sem, subclass, timeout);
466 #endif