PM: sleep: core: Switch back to async_schedule_dev()
[linux/fpc-iii.git] / kernel / locking / qrwlock.c
blobfe9ca92faa2a7a7b0915a6ae420f31b2e4fc4dd8
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Queued read/write locks
5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9 #include <linux/smp.h>
10 #include <linux/bug.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/spinlock.h>
15 #include <asm/qrwlock.h>
17 /**
18 * queued_read_lock_slowpath - acquire read lock of a queue rwlock
19 * @lock: Pointer to queue rwlock structure
21 void queued_read_lock_slowpath(struct qrwlock *lock)
24 * Readers come here when they cannot get the lock without waiting
26 if (unlikely(in_interrupt())) {
28 * Readers in interrupt context will get the lock immediately
29 * if the writer is just waiting (not holding the lock yet),
30 * so spin with ACQUIRE semantics until the lock is available
31 * without waiting in the queue.
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
34 return;
36 atomic_sub(_QR_BIAS, &lock->cnts);
39 * Put the reader into the wait queue
41 arch_spin_lock(&lock->wait_lock);
42 atomic_add(_QR_BIAS, &lock->cnts);
45 * The ACQUIRE semantics of the following spinning code ensure
46 * that accesses can't leak upwards out of our subsequent critical
47 * section in the case that the lock is currently held for write.
49 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
52 * Signal the next one in queue to become queue head
54 arch_spin_unlock(&lock->wait_lock);
56 EXPORT_SYMBOL(queued_read_lock_slowpath);
58 /**
59 * queued_write_lock_slowpath - acquire write lock of a queue rwlock
60 * @lock : Pointer to queue rwlock structure
62 void queued_write_lock_slowpath(struct qrwlock *lock)
64 /* Put the writer into the wait queue */
65 arch_spin_lock(&lock->wait_lock);
67 /* Try to acquire the lock directly if no reader is present */
68 if (!atomic_read(&lock->cnts) &&
69 (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
70 goto unlock;
72 /* Set the waiting flag to notify readers that a writer is pending */
73 atomic_add(_QW_WAITING, &lock->cnts);
75 /* When no more readers or writers, set the locked flag */
76 do {
77 atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
78 } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
79 _QW_LOCKED) != _QW_WAITING);
80 unlock:
81 arch_spin_unlock(&lock->wait_lock);
83 EXPORT_SYMBOL(queued_write_lock_slowpath);