1 // SPDX-License-Identifier: GPL-2.0
5 * The ldisc semaphore is semantically a rw_semaphore but which enforces
6 * an alternate policy, namely:
7 * 1) Supports lock wait timeouts
8 * 2) Write waiter has priority
9 * 3) Downgrading is not supported
11 * Implementation notes:
12 * 1) Upper half of semaphore count is a wait count (differs from rwsem
13 * in that rwsem normalizes the upper half to the wait bias)
14 * 2) Lacks overflow checking
16 * The generic counting was copied and modified from include/asm-generic/rwsem.h
17 * by Paul Mackerras <paulus@samba.org>.
19 * The scheduling policy was copied and modified from lib/rwsem.c
20 * Written by David Howells (dhowells@redhat.com).
22 * This implementation incorporates the write lock stealing work of
23 * Michel Lespinasse <walken@google.com>.
25 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
28 #include <linux/list.h>
29 #include <linux/spinlock.h>
30 #include <linux/atomic.h>
31 #include <linux/tty.h>
32 #include <linux/sched.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/task.h>
37 #if BITS_PER_LONG == 64
38 # define LDSEM_ACTIVE_MASK 0xffffffffL
40 # define LDSEM_ACTIVE_MASK 0x0000ffffL
43 #define LDSEM_UNLOCKED 0L
44 #define LDSEM_ACTIVE_BIAS 1L
45 #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1)
46 #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS
47 #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
50 struct list_head list
;
51 struct task_struct
*task
;
55 * Initialize an ldsem:
57 void __init_ldsem(struct ld_semaphore
*sem
, const char *name
,
58 struct lock_class_key
*key
)
60 #ifdef CONFIG_DEBUG_LOCK_ALLOC
62 * Make sure we are not reinitializing a held semaphore:
64 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
65 lockdep_init_map(&sem
->dep_map
, name
, key
, 0);
67 atomic_long_set(&sem
->count
, LDSEM_UNLOCKED
);
68 sem
->wait_readers
= 0;
69 raw_spin_lock_init(&sem
->wait_lock
);
70 INIT_LIST_HEAD(&sem
->read_wait
);
71 INIT_LIST_HEAD(&sem
->write_wait
);
74 static void __ldsem_wake_readers(struct ld_semaphore
*sem
)
76 struct ldsem_waiter
*waiter
, *next
;
77 struct task_struct
*tsk
;
81 * Try to grant read locks to all readers on the read wait list.
82 * Note the 'active part' of the count is incremented by
83 * the number of readers before waking any processes up.
85 adjust
= sem
->wait_readers
* (LDSEM_ACTIVE_BIAS
- LDSEM_WAIT_BIAS
);
86 count
= atomic_long_add_return(adjust
, &sem
->count
);
90 if (atomic_long_try_cmpxchg(&sem
->count
, &count
, count
- adjust
))
94 list_for_each_entry_safe(waiter
, next
, &sem
->read_wait
, list
) {
96 smp_store_release(&waiter
->task
, NULL
);
100 INIT_LIST_HEAD(&sem
->read_wait
);
101 sem
->wait_readers
= 0;
104 static inline int writer_trylock(struct ld_semaphore
*sem
)
107 * Only wake this writer if the active part of the count can be
108 * transitioned from 0 -> 1
110 long count
= atomic_long_add_return(LDSEM_ACTIVE_BIAS
, &sem
->count
);
112 if ((count
& LDSEM_ACTIVE_MASK
) == LDSEM_ACTIVE_BIAS
)
114 if (atomic_long_try_cmpxchg(&sem
->count
, &count
, count
- LDSEM_ACTIVE_BIAS
))
119 static void __ldsem_wake_writer(struct ld_semaphore
*sem
)
121 struct ldsem_waiter
*waiter
;
123 waiter
= list_entry(sem
->write_wait
.next
, struct ldsem_waiter
, list
);
124 wake_up_process(waiter
->task
);
128 * handle the lock release when processes blocked on it that can now run
129 * - if we come here from up_xxxx(), then:
130 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
131 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
132 * - the spinlock must be held by the caller
133 * - woken process blocks are discarded from the list after having task zeroed
135 static void __ldsem_wake(struct ld_semaphore
*sem
)
137 if (!list_empty(&sem
->write_wait
))
138 __ldsem_wake_writer(sem
);
139 else if (!list_empty(&sem
->read_wait
))
140 __ldsem_wake_readers(sem
);
143 static void ldsem_wake(struct ld_semaphore
*sem
)
147 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
149 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
153 * wait for the read lock to be granted
155 static struct ld_semaphore __sched
*
156 down_read_failed(struct ld_semaphore
*sem
, long count
, long timeout
)
158 struct ldsem_waiter waiter
;
159 long adjust
= -LDSEM_ACTIVE_BIAS
+ LDSEM_WAIT_BIAS
;
161 /* set up my own style of waitqueue */
162 raw_spin_lock_irq(&sem
->wait_lock
);
165 * Try to reverse the lock attempt but if the count has changed
166 * so that reversing fails, check if there are are no waiters,
167 * and early-out if not
170 if (atomic_long_try_cmpxchg(&sem
->count
, &count
, count
+ adjust
)) {
175 raw_spin_unlock_irq(&sem
->wait_lock
);
180 list_add_tail(&waiter
.list
, &sem
->read_wait
);
183 waiter
.task
= current
;
184 get_task_struct(current
);
186 /* if there are no active locks, wake the new lock owner(s) */
187 if ((count
& LDSEM_ACTIVE_MASK
) == 0)
190 raw_spin_unlock_irq(&sem
->wait_lock
);
192 /* wait to be given the lock */
194 set_current_state(TASK_UNINTERRUPTIBLE
);
196 if (!smp_load_acquire(&waiter
.task
))
200 timeout
= schedule_timeout(timeout
);
203 __set_current_state(TASK_RUNNING
);
207 * Lock timed out but check if this task was just
208 * granted lock ownership - if so, pretend there
209 * was no timeout; otherwise, cleanup lock wait.
211 raw_spin_lock_irq(&sem
->wait_lock
);
213 atomic_long_add_return(-LDSEM_WAIT_BIAS
, &sem
->count
);
215 list_del(&waiter
.list
);
216 raw_spin_unlock_irq(&sem
->wait_lock
);
217 put_task_struct(waiter
.task
);
220 raw_spin_unlock_irq(&sem
->wait_lock
);
227 * wait for the write lock to be granted
229 static struct ld_semaphore __sched
*
230 down_write_failed(struct ld_semaphore
*sem
, long count
, long timeout
)
232 struct ldsem_waiter waiter
;
233 long adjust
= -LDSEM_ACTIVE_BIAS
;
236 /* set up my own style of waitqueue */
237 raw_spin_lock_irq(&sem
->wait_lock
);
240 * Try to reverse the lock attempt but if the count has changed
241 * so that reversing fails, check if the lock is now owned,
242 * and early-out if so.
245 if (atomic_long_try_cmpxchg(&sem
->count
, &count
, count
+ adjust
))
247 if ((count
& LDSEM_ACTIVE_MASK
) == LDSEM_ACTIVE_BIAS
) {
248 raw_spin_unlock_irq(&sem
->wait_lock
);
253 list_add_tail(&waiter
.list
, &sem
->write_wait
);
255 waiter
.task
= current
;
257 set_current_state(TASK_UNINTERRUPTIBLE
);
261 raw_spin_unlock_irq(&sem
->wait_lock
);
262 timeout
= schedule_timeout(timeout
);
263 raw_spin_lock_irq(&sem
->wait_lock
);
264 set_current_state(TASK_UNINTERRUPTIBLE
);
265 locked
= writer_trylock(sem
);
271 atomic_long_add_return(-LDSEM_WAIT_BIAS
, &sem
->count
);
272 list_del(&waiter
.list
);
275 * In case of timeout, wake up every reader who gave the right of way
276 * to writer. Prevent separation readers into two groups:
277 * one that helds semaphore and another that sleeps.
278 * (in case of no contention with a writer)
280 if (!locked
&& list_empty(&sem
->write_wait
))
281 __ldsem_wake_readers(sem
);
283 raw_spin_unlock_irq(&sem
->wait_lock
);
285 __set_current_state(TASK_RUNNING
);
287 /* lock wait may have timed out */
295 static int __ldsem_down_read_nested(struct ld_semaphore
*sem
,
296 int subclass
, long timeout
)
300 rwsem_acquire_read(&sem
->dep_map
, subclass
, 0, _RET_IP_
);
302 count
= atomic_long_add_return(LDSEM_READ_BIAS
, &sem
->count
);
304 lock_contended(&sem
->dep_map
, _RET_IP_
);
305 if (!down_read_failed(sem
, count
, timeout
)) {
306 rwsem_release(&sem
->dep_map
, _RET_IP_
);
310 lock_acquired(&sem
->dep_map
, _RET_IP_
);
314 static int __ldsem_down_write_nested(struct ld_semaphore
*sem
,
315 int subclass
, long timeout
)
319 rwsem_acquire(&sem
->dep_map
, subclass
, 0, _RET_IP_
);
321 count
= atomic_long_add_return(LDSEM_WRITE_BIAS
, &sem
->count
);
322 if ((count
& LDSEM_ACTIVE_MASK
) != LDSEM_ACTIVE_BIAS
) {
323 lock_contended(&sem
->dep_map
, _RET_IP_
);
324 if (!down_write_failed(sem
, count
, timeout
)) {
325 rwsem_release(&sem
->dep_map
, _RET_IP_
);
329 lock_acquired(&sem
->dep_map
, _RET_IP_
);
335 * lock for reading -- returns 1 if successful, 0 if timed out
337 int __sched
ldsem_down_read(struct ld_semaphore
*sem
, long timeout
)
340 return __ldsem_down_read_nested(sem
, 0, timeout
);
344 * trylock for reading -- returns 1 if successful, 0 if contention
346 int ldsem_down_read_trylock(struct ld_semaphore
*sem
)
348 long count
= atomic_long_read(&sem
->count
);
351 if (atomic_long_try_cmpxchg(&sem
->count
, &count
, count
+ LDSEM_READ_BIAS
)) {
352 rwsem_acquire_read(&sem
->dep_map
, 0, 1, _RET_IP_
);
353 lock_acquired(&sem
->dep_map
, _RET_IP_
);
361 * lock for writing -- returns 1 if successful, 0 if timed out
363 int __sched
ldsem_down_write(struct ld_semaphore
*sem
, long timeout
)
366 return __ldsem_down_write_nested(sem
, 0, timeout
);
370 * trylock for writing -- returns 1 if successful, 0 if contention
372 int ldsem_down_write_trylock(struct ld_semaphore
*sem
)
374 long count
= atomic_long_read(&sem
->count
);
376 while ((count
& LDSEM_ACTIVE_MASK
) == 0) {
377 if (atomic_long_try_cmpxchg(&sem
->count
, &count
, count
+ LDSEM_WRITE_BIAS
)) {
378 rwsem_acquire(&sem
->dep_map
, 0, 1, _RET_IP_
);
379 lock_acquired(&sem
->dep_map
, _RET_IP_
);
387 * release a read lock
389 void ldsem_up_read(struct ld_semaphore
*sem
)
393 rwsem_release(&sem
->dep_map
, _RET_IP_
);
395 count
= atomic_long_add_return(-LDSEM_READ_BIAS
, &sem
->count
);
396 if (count
< 0 && (count
& LDSEM_ACTIVE_MASK
) == 0)
401 * release a write lock
403 void ldsem_up_write(struct ld_semaphore
*sem
)
407 rwsem_release(&sem
->dep_map
, _RET_IP_
);
409 count
= atomic_long_add_return(-LDSEM_WRITE_BIAS
, &sem
->count
);
415 #ifdef CONFIG_DEBUG_LOCK_ALLOC
417 int ldsem_down_read_nested(struct ld_semaphore
*sem
, int subclass
, long timeout
)
420 return __ldsem_down_read_nested(sem
, subclass
, timeout
);
423 int ldsem_down_write_nested(struct ld_semaphore
*sem
, int subclass
,
427 return __ldsem_down_write_nested(sem
, subclass
, timeout
);