staging: vboxvideo: Atomic phase 3: Switch last bits over to atomic
[linux/fpc-iii.git] / kernel / locking / rwsem-xadd.c
blob3064c50e181e19ea8acb537fd2db9de015fe9e18
1 // SPDX-License-Identifier: GPL-2.0
2 /* rwsem.c: R/W semaphores: contention handling functions
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from arch/i386/kernel/semaphore.c
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
13 #include <linux/rwsem.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/wake_q.h>
19 #include <linux/sched/debug.h>
20 #include <linux/osq_lock.h>
22 #include "rwsem.h"
25 * Guide to the rw_semaphore's count field for common values.
26 * (32-bit case illustrated, similar for 64-bit)
28 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
29 * X = #active_readers + #readers attempting to lock
30 * (X*ACTIVE_BIAS)
32 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
33 * attempting to read lock or write lock.
35 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
36 * X = #active readers + # readers attempting lock
37 * (X*ACTIVE_BIAS + WAITING_BIAS)
38 * (2) 1 writer attempting lock, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 * (3) 1 writer active, no waiters for lock
42 * X-1 = #active readers + #readers attempting lock
43 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
45 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
46 * (WAITING_BIAS + ACTIVE_BIAS)
47 * (2) 1 writer active or attempting lock, no waiters for lock
48 * (ACTIVE_WRITE_BIAS)
50 * 0xffff0000 (1) There are writers or readers queued but none active
51 * or in the process of attempting lock.
52 * (WAITING_BIAS)
53 * Note: writer can attempt to steal lock for this count by adding
54 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
56 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
57 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
59 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
60 * the count becomes more than 0 for successful lock acquisition,
61 * i.e. the case where there are only readers or nobody has lock.
62 * (1st and 2nd case above).
64 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
65 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
66 * acquisition (i.e. nobody else has lock or attempts lock). If
67 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
68 * are only waiters but none active (5th case above), and attempt to
69 * steal the lock.
74 * Initialize an rwsem:
76 void __init_rwsem(struct rw_semaphore *sem, const char *name,
77 struct lock_class_key *key)
79 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 * Make sure we are not reinitializing a held semaphore:
83 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
84 lockdep_init_map(&sem->dep_map, name, key, 0);
85 #endif
86 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
87 raw_spin_lock_init(&sem->wait_lock);
88 INIT_LIST_HEAD(&sem->wait_list);
89 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
90 sem->owner = NULL;
91 osq_lock_init(&sem->osq);
92 #endif
95 EXPORT_SYMBOL(__init_rwsem);
97 enum rwsem_waiter_type {
98 RWSEM_WAITING_FOR_WRITE,
99 RWSEM_WAITING_FOR_READ
102 struct rwsem_waiter {
103 struct list_head list;
104 struct task_struct *task;
105 enum rwsem_waiter_type type;
108 enum rwsem_wake_type {
109 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
110 RWSEM_WAKE_READERS, /* Wake readers only */
111 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
115 * handle the lock release when processes blocked on it that can now run
116 * - if we come here from up_xxxx(), then:
117 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
118 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
119 * - there must be someone on the queue
120 * - the wait_lock must be held by the caller
121 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
122 * to actually wakeup the blocked task(s) and drop the reference count,
123 * preferably when the wait_lock is released
124 * - woken process blocks are discarded from the list after having task zeroed
125 * - writers are only marked woken if downgrading is false
127 static void __rwsem_mark_wake(struct rw_semaphore *sem,
128 enum rwsem_wake_type wake_type,
129 struct wake_q_head *wake_q)
131 struct rwsem_waiter *waiter, *tmp;
132 long oldcount, woken = 0, adjustment = 0;
135 * Take a peek at the queue head waiter such that we can determine
136 * the wakeup(s) to perform.
138 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
140 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
141 if (wake_type == RWSEM_WAKE_ANY) {
143 * Mark writer at the front of the queue for wakeup.
144 * Until the task is actually later awoken later by
145 * the caller, other writers are able to steal it.
146 * Readers, on the other hand, will block as they
147 * will notice the queued writer.
149 wake_q_add(wake_q, waiter->task);
152 return;
156 * Writers might steal the lock before we grant it to the next reader.
157 * We prefer to do the first reader grant before counting readers
158 * so we can bail out early if a writer stole the lock.
160 if (wake_type != RWSEM_WAKE_READ_OWNED) {
161 adjustment = RWSEM_ACTIVE_READ_BIAS;
162 try_reader_grant:
163 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
164 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
166 * If the count is still less than RWSEM_WAITING_BIAS
167 * after removing the adjustment, it is assumed that
168 * a writer has stolen the lock. We have to undo our
169 * reader grant.
171 if (atomic_long_add_return(-adjustment, &sem->count) <
172 RWSEM_WAITING_BIAS)
173 return;
175 /* Last active locker left. Retry waking readers. */
176 goto try_reader_grant;
179 * It is not really necessary to set it to reader-owned here,
180 * but it gives the spinners an early indication that the
181 * readers now have the lock.
183 rwsem_set_reader_owned(sem);
187 * Grant an infinite number of read locks to the readers at the front
188 * of the queue. We know that woken will be at least 1 as we accounted
189 * for above. Note we increment the 'active part' of the count by the
190 * number of readers before waking any processes up.
192 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
193 struct task_struct *tsk;
195 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
196 break;
198 woken++;
199 tsk = waiter->task;
201 wake_q_add(wake_q, tsk);
202 list_del(&waiter->list);
204 * Ensure that the last operation is setting the reader
205 * waiter to nil such that rwsem_down_read_failed() cannot
206 * race with do_exit() by always holding a reference count
207 * to the task to wakeup.
209 smp_store_release(&waiter->task, NULL);
212 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
213 if (list_empty(&sem->wait_list)) {
214 /* hit end of list above */
215 adjustment -= RWSEM_WAITING_BIAS;
218 if (adjustment)
219 atomic_long_add(adjustment, &sem->count);
223 * Wait for the read lock to be granted
225 static inline struct rw_semaphore __sched *
226 __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
228 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
229 struct rwsem_waiter waiter;
230 DEFINE_WAKE_Q(wake_q);
232 waiter.task = current;
233 waiter.type = RWSEM_WAITING_FOR_READ;
235 raw_spin_lock_irq(&sem->wait_lock);
236 if (list_empty(&sem->wait_list))
237 adjustment += RWSEM_WAITING_BIAS;
238 list_add_tail(&waiter.list, &sem->wait_list);
240 /* we're now waiting on the lock, but no longer actively locking */
241 count = atomic_long_add_return(adjustment, &sem->count);
244 * If there are no active locks, wake the front queued process(es).
246 * If there are no writers and we are first in the queue,
247 * wake our own waiter to join the existing active readers !
249 if (count == RWSEM_WAITING_BIAS ||
250 (count > RWSEM_WAITING_BIAS &&
251 adjustment != -RWSEM_ACTIVE_READ_BIAS))
252 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
254 raw_spin_unlock_irq(&sem->wait_lock);
255 wake_up_q(&wake_q);
257 /* wait to be given the lock */
258 while (true) {
259 set_current_state(state);
260 if (!waiter.task)
261 break;
262 if (signal_pending_state(state, current)) {
263 raw_spin_lock_irq(&sem->wait_lock);
264 if (waiter.task)
265 goto out_nolock;
266 raw_spin_unlock_irq(&sem->wait_lock);
267 break;
269 schedule();
272 __set_current_state(TASK_RUNNING);
273 return sem;
274 out_nolock:
275 list_del(&waiter.list);
276 if (list_empty(&sem->wait_list))
277 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
278 raw_spin_unlock_irq(&sem->wait_lock);
279 __set_current_state(TASK_RUNNING);
280 return ERR_PTR(-EINTR);
283 __visible struct rw_semaphore * __sched
284 rwsem_down_read_failed(struct rw_semaphore *sem)
286 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
288 EXPORT_SYMBOL(rwsem_down_read_failed);
290 __visible struct rw_semaphore * __sched
291 rwsem_down_read_failed_killable(struct rw_semaphore *sem)
293 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
295 EXPORT_SYMBOL(rwsem_down_read_failed_killable);
298 * This function must be called with the sem->wait_lock held to prevent
299 * race conditions between checking the rwsem wait list and setting the
300 * sem->count accordingly.
302 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
305 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
307 if (count != RWSEM_WAITING_BIAS)
308 return false;
311 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
312 * are other tasks on the wait list, we need to add on WAITING_BIAS.
314 count = list_is_singular(&sem->wait_list) ?
315 RWSEM_ACTIVE_WRITE_BIAS :
316 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
318 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
319 == RWSEM_WAITING_BIAS) {
320 rwsem_set_owner(sem);
321 return true;
324 return false;
327 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
329 * Try to acquire write lock before the writer has been put on wait queue.
331 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
333 long old, count = atomic_long_read(&sem->count);
335 while (true) {
336 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
337 return false;
339 old = atomic_long_cmpxchg_acquire(&sem->count, count,
340 count + RWSEM_ACTIVE_WRITE_BIAS);
341 if (old == count) {
342 rwsem_set_owner(sem);
343 return true;
346 count = old;
350 static inline bool owner_on_cpu(struct task_struct *owner)
353 * As lock holder preemption issue, we both skip spinning if
354 * task is not on cpu or its cpu is preempted
356 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
359 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
361 struct task_struct *owner;
362 bool ret = true;
364 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
366 if (need_resched())
367 return false;
369 rcu_read_lock();
370 owner = READ_ONCE(sem->owner);
371 if (owner) {
372 ret = is_rwsem_owner_spinnable(owner) &&
373 owner_on_cpu(owner);
375 rcu_read_unlock();
376 return ret;
380 * Return true only if we can still spin on the owner field of the rwsem.
382 static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
384 struct task_struct *owner = READ_ONCE(sem->owner);
386 if (!is_rwsem_owner_spinnable(owner))
387 return false;
389 rcu_read_lock();
390 while (owner && (READ_ONCE(sem->owner) == owner)) {
392 * Ensure we emit the owner->on_cpu, dereference _after_
393 * checking sem->owner still matches owner, if that fails,
394 * owner might point to free()d memory, if it still matches,
395 * the rcu_read_lock() ensures the memory stays valid.
397 barrier();
400 * abort spinning when need_resched or owner is not running or
401 * owner's cpu is preempted.
403 if (need_resched() || !owner_on_cpu(owner)) {
404 rcu_read_unlock();
405 return false;
408 cpu_relax();
410 rcu_read_unlock();
413 * If there is a new owner or the owner is not set, we continue
414 * spinning.
416 return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
419 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
421 bool taken = false;
423 preempt_disable();
425 /* sem->wait_lock should not be held when doing optimistic spinning */
426 if (!rwsem_can_spin_on_owner(sem))
427 goto done;
429 if (!osq_lock(&sem->osq))
430 goto done;
433 * Optimistically spin on the owner field and attempt to acquire the
434 * lock whenever the owner changes. Spinning will be stopped when:
435 * 1) the owning writer isn't running; or
436 * 2) readers own the lock as we can't determine if they are
437 * actively running or not.
439 while (rwsem_spin_on_owner(sem)) {
441 * Try to acquire the lock
443 if (rwsem_try_write_lock_unqueued(sem)) {
444 taken = true;
445 break;
449 * When there's no owner, we might have preempted between the
450 * owner acquiring the lock and setting the owner field. If
451 * we're an RT task that will live-lock because we won't let
452 * the owner complete.
454 if (!sem->owner && (need_resched() || rt_task(current)))
455 break;
458 * The cpu_relax() call is a compiler barrier which forces
459 * everything in this loop to be re-loaded. We don't need
460 * memory barriers as we'll eventually observe the right
461 * values at the cost of a few extra spins.
463 cpu_relax();
465 osq_unlock(&sem->osq);
466 done:
467 preempt_enable();
468 return taken;
472 * Return true if the rwsem has active spinner
474 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
476 return osq_is_locked(&sem->osq);
479 #else
480 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
482 return false;
485 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
487 return false;
489 #endif
492 * Wait until we successfully acquire the write lock
494 static inline struct rw_semaphore *
495 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
497 long count;
498 bool waiting = true; /* any queued threads before us */
499 struct rwsem_waiter waiter;
500 struct rw_semaphore *ret = sem;
501 DEFINE_WAKE_Q(wake_q);
503 /* undo write bias from down_write operation, stop active locking */
504 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
506 /* do optimistic spinning and steal lock if possible */
507 if (rwsem_optimistic_spin(sem))
508 return sem;
511 * Optimistic spinning failed, proceed to the slowpath
512 * and block until we can acquire the sem.
514 waiter.task = current;
515 waiter.type = RWSEM_WAITING_FOR_WRITE;
517 raw_spin_lock_irq(&sem->wait_lock);
519 /* account for this before adding a new element to the list */
520 if (list_empty(&sem->wait_list))
521 waiting = false;
523 list_add_tail(&waiter.list, &sem->wait_list);
525 /* we're now waiting on the lock, but no longer actively locking */
526 if (waiting) {
527 count = atomic_long_read(&sem->count);
530 * If there were already threads queued before us and there are
531 * no active writers, the lock must be read owned; so we try to
532 * wake any read locks that were queued ahead of us.
534 if (count > RWSEM_WAITING_BIAS) {
535 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
537 * The wakeup is normally called _after_ the wait_lock
538 * is released, but given that we are proactively waking
539 * readers we can deal with the wake_q overhead as it is
540 * similar to releasing and taking the wait_lock again
541 * for attempting rwsem_try_write_lock().
543 wake_up_q(&wake_q);
546 * Reinitialize wake_q after use.
548 wake_q_init(&wake_q);
551 } else
552 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
554 /* wait until we successfully acquire the lock */
555 set_current_state(state);
556 while (true) {
557 if (rwsem_try_write_lock(count, sem))
558 break;
559 raw_spin_unlock_irq(&sem->wait_lock);
561 /* Block until there are no active lockers. */
562 do {
563 if (signal_pending_state(state, current))
564 goto out_nolock;
566 schedule();
567 set_current_state(state);
568 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
570 raw_spin_lock_irq(&sem->wait_lock);
572 __set_current_state(TASK_RUNNING);
573 list_del(&waiter.list);
574 raw_spin_unlock_irq(&sem->wait_lock);
576 return ret;
578 out_nolock:
579 __set_current_state(TASK_RUNNING);
580 raw_spin_lock_irq(&sem->wait_lock);
581 list_del(&waiter.list);
582 if (list_empty(&sem->wait_list))
583 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
584 else
585 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
586 raw_spin_unlock_irq(&sem->wait_lock);
587 wake_up_q(&wake_q);
589 return ERR_PTR(-EINTR);
592 __visible struct rw_semaphore * __sched
593 rwsem_down_write_failed(struct rw_semaphore *sem)
595 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
597 EXPORT_SYMBOL(rwsem_down_write_failed);
599 __visible struct rw_semaphore * __sched
600 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
602 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
604 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
607 * handle waking up a waiter on the semaphore
608 * - up_read/up_write has decremented the active part of count if we come here
610 __visible
611 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
613 unsigned long flags;
614 DEFINE_WAKE_Q(wake_q);
617 * __rwsem_down_write_failed_common(sem)
618 * rwsem_optimistic_spin(sem)
619 * osq_unlock(sem->osq)
620 * ...
621 * atomic_long_add_return(&sem->count)
623 * - VS -
625 * __up_write()
626 * if (atomic_long_sub_return_release(&sem->count) < 0)
627 * rwsem_wake(sem)
628 * osq_is_locked(&sem->osq)
630 * And __up_write() must observe !osq_is_locked() when it observes the
631 * atomic_long_add_return() in order to not miss a wakeup.
633 * This boils down to:
635 * [S.rel] X = 1 [RmW] r0 = (Y += 0)
636 * MB RMB
637 * [RmW] Y += 1 [L] r1 = X
639 * exists (r0=1 /\ r1=0)
641 smp_rmb();
644 * If a spinner is present, it is not necessary to do the wakeup.
645 * Try to do wakeup only if the trylock succeeds to minimize
646 * spinlock contention which may introduce too much delay in the
647 * unlock operation.
649 * spinning writer up_write/up_read caller
650 * --------------- -----------------------
651 * [S] osq_unlock() [L] osq
652 * MB RMB
653 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
655 * Here, it is important to make sure that there won't be a missed
656 * wakeup while the rwsem is free and the only spinning writer goes
657 * to sleep without taking the rwsem. Even when the spinning writer
658 * is just going to break out of the waiting loop, it will still do
659 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
660 * rwsem_has_spinner() is true, it will guarantee at least one
661 * trylock attempt on the rwsem later on.
663 if (rwsem_has_spinner(sem)) {
665 * The smp_rmb() here is to make sure that the spinner
666 * state is consulted before reading the wait_lock.
668 smp_rmb();
669 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
670 return sem;
671 goto locked;
673 raw_spin_lock_irqsave(&sem->wait_lock, flags);
674 locked:
676 if (!list_empty(&sem->wait_list))
677 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
679 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
680 wake_up_q(&wake_q);
682 return sem;
684 EXPORT_SYMBOL(rwsem_wake);
687 * downgrade a write lock into a read lock
688 * - caller incremented waiting part of count and discovered it still negative
689 * - just wake up any readers at the front of the queue
691 __visible
692 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
694 unsigned long flags;
695 DEFINE_WAKE_Q(wake_q);
697 raw_spin_lock_irqsave(&sem->wait_lock, flags);
699 if (!list_empty(&sem->wait_list))
700 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
702 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
703 wake_up_q(&wake_q);
705 return sem;
707 EXPORT_SYMBOL(rwsem_downgrade_wake);