Merge tag 'regmap-fix-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / fs / btrfs / locking.c
blob5fafc5e89bb76cba7ccc3801cc02a94821ec1090
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include "misc.h"
12 #include "ctree.h"
13 #include "extent_io.h"
14 #include "locking.h"
17 * Extent buffer locking
18 * =====================
20 * We use a rw_semaphore for tree locking, and the semantics are exactly the
21 * same:
23 * - reader/writer exclusion
24 * - writer/writer exclusion
25 * - reader/reader sharing
26 * - try-lock semantics for readers and writers
28 * The rwsem implementation does opportunistic spinning which reduces number of
29 * times the locking task needs to sleep.
33 * __btrfs_tree_read_lock - lock extent buffer for read
34 * @eb: the eb to be locked
35 * @nest: the nesting level to be used for lockdep
37 * This takes the read lock on the extent buffer, using the specified nesting
38 * level for lockdep purposes.
40 void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
42 u64 start_ns = 0;
44 if (trace_btrfs_tree_read_lock_enabled())
45 start_ns = ktime_get_ns();
47 down_read_nested(&eb->lock, nest);
48 eb->lock_owner = current->pid;
49 trace_btrfs_tree_read_lock(eb, start_ns);
52 void btrfs_tree_read_lock(struct extent_buffer *eb)
54 __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
58 * Try-lock for read.
60 * Retrun 1 if the rwlock has been taken, 0 otherwise
62 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
64 if (down_read_trylock(&eb->lock)) {
65 eb->lock_owner = current->pid;
66 trace_btrfs_try_tree_read_lock(eb);
67 return 1;
69 return 0;
73 * Try-lock for write.
75 * Retrun 1 if the rwlock has been taken, 0 otherwise
77 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
79 if (down_write_trylock(&eb->lock)) {
80 eb->lock_owner = current->pid;
81 trace_btrfs_try_tree_write_lock(eb);
82 return 1;
84 return 0;
88 * Release read lock.
90 void btrfs_tree_read_unlock(struct extent_buffer *eb)
92 trace_btrfs_tree_read_unlock(eb);
93 eb->lock_owner = 0;
94 up_read(&eb->lock);
98 * __btrfs_tree_lock - lock eb for write
99 * @eb: the eb to lock
100 * @nest: the nesting to use for the lock
102 * Returns with the eb->lock write locked.
104 void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
105 __acquires(&eb->lock)
107 u64 start_ns = 0;
109 if (trace_btrfs_tree_lock_enabled())
110 start_ns = ktime_get_ns();
112 down_write_nested(&eb->lock, nest);
113 eb->lock_owner = current->pid;
114 trace_btrfs_tree_lock(eb, start_ns);
117 void btrfs_tree_lock(struct extent_buffer *eb)
119 __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
123 * Release the write lock.
125 void btrfs_tree_unlock(struct extent_buffer *eb)
127 trace_btrfs_tree_unlock(eb);
128 eb->lock_owner = 0;
129 up_write(&eb->lock);
133 * This releases any locks held in the path starting at level and going all the
134 * way up to the root.
136 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
137 * cases, such as COW of the block at slot zero in the node. This ignores
138 * those rules, and it should only be called when there are no more updates to
139 * be done higher up in the tree.
141 void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
143 int i;
145 if (path->keep_locks)
146 return;
148 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
149 if (!path->nodes[i])
150 continue;
151 if (!path->locks[i])
152 continue;
153 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
154 path->locks[i] = 0;
159 * Loop around taking references on and locking the root node of the tree until
160 * we end up with a lock on the root node.
162 * Return: root extent buffer with write lock held
164 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
166 struct extent_buffer *eb;
168 while (1) {
169 eb = btrfs_root_node(root);
170 btrfs_tree_lock(eb);
171 if (eb == root->node)
172 break;
173 btrfs_tree_unlock(eb);
174 free_extent_buffer(eb);
176 return eb;
180 * Loop around taking references on and locking the root node of the tree until
181 * we end up with a lock on the root node.
183 * Return: root extent buffer with read lock held
185 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
187 struct extent_buffer *eb;
189 while (1) {
190 eb = btrfs_root_node(root);
191 btrfs_tree_read_lock(eb);
192 if (eb == root->node)
193 break;
194 btrfs_tree_read_unlock(eb);
195 free_extent_buffer(eb);
197 return eb;
201 * DREW locks
202 * ==========
204 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
205 * where you want to provide A-B exclusion but not AA or BB.
207 * Currently implementation gives more priority to reader. If a reader and a
208 * writer both race to acquire their respective sides of the lock the writer
209 * would yield its lock as soon as it detects a concurrent reader. Additionally
210 * if there are pending readers no new writers would be allowed to come in and
211 * acquire the lock.
214 int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
216 int ret;
218 ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
219 if (ret)
220 return ret;
222 atomic_set(&lock->readers, 0);
223 init_waitqueue_head(&lock->pending_readers);
224 init_waitqueue_head(&lock->pending_writers);
226 return 0;
229 void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
231 percpu_counter_destroy(&lock->writers);
234 /* Return true if acquisition is successful, false otherwise */
235 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
237 if (atomic_read(&lock->readers))
238 return false;
240 percpu_counter_inc(&lock->writers);
242 /* Ensure writers count is updated before we check for pending readers */
243 smp_mb();
244 if (atomic_read(&lock->readers)) {
245 btrfs_drew_write_unlock(lock);
246 return false;
249 return true;
252 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
254 while (true) {
255 if (btrfs_drew_try_write_lock(lock))
256 return;
257 wait_event(lock->pending_writers, !atomic_read(&lock->readers));
261 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
263 percpu_counter_dec(&lock->writers);
264 cond_wake_up(&lock->pending_readers);
267 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
269 atomic_inc(&lock->readers);
272 * Ensure the pending reader count is perceieved BEFORE this reader
273 * goes to sleep in case of active writers. This guarantees new writers
274 * won't be allowed and that the current reader will be woken up when
275 * the last active writer finishes its jobs.
277 smp_mb__after_atomic();
279 wait_event(lock->pending_readers,
280 percpu_counter_sum(&lock->writers) == 0);
283 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
286 * atomic_dec_and_test implies a full barrier, so woken up writers
287 * are guaranteed to see the decrement
289 if (atomic_dec_and_test(&lock->readers))
290 wake_up(&lock->pending_writers);