Linux 6.13-rc4
[linux.git] / fs / btrfs / locking.c
blob9a7a7b7233054e1f70e7db9ea0d2da8d84cfe187
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
10 #include <asm/bug.h>
11 #include <trace/events/btrfs.h>
12 #include "misc.h"
13 #include "ctree.h"
14 #include "extent_io.h"
15 #include "locking.h"
18 * Lockdep class keys for extent_buffer->lock's in this root. For a given
19 * eb, the lockdep key is determined by the btrfs_root it belongs to and
20 * the level the eb occupies in the tree.
22 * Different roots are used for different purposes and may nest inside each
23 * other and they require separate keysets. As lockdep keys should be
24 * static, assign keysets according to the purpose of the root as indicated
25 * by btrfs_root->root_key.objectid. This ensures that all special purpose
26 * roots have separate keysets.
28 * Lock-nesting across peer nodes is always done with the immediate parent
29 * node locked thus preventing deadlock. As lockdep doesn't know this, use
30 * subclass to avoid triggering lockdep warning in such cases.
32 * The key is set by the readpage_end_io_hook after the buffer has passed
33 * csum validation but before the pages are unlocked. It is also set by
34 * btrfs_init_new_buffer on freshly allocated blocks.
36 * We also add a check to make sure the highest level of the tree is the
37 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
38 * needs update as well.
40 #ifdef CONFIG_DEBUG_LOCK_ALLOC
41 #if BTRFS_MAX_LEVEL != 8
42 #error
43 #endif
45 #define DEFINE_LEVEL(stem, level) \
46 .names[level] = "btrfs-" stem "-0" #level,
48 #define DEFINE_NAME(stem) \
49 DEFINE_LEVEL(stem, 0) \
50 DEFINE_LEVEL(stem, 1) \
51 DEFINE_LEVEL(stem, 2) \
52 DEFINE_LEVEL(stem, 3) \
53 DEFINE_LEVEL(stem, 4) \
54 DEFINE_LEVEL(stem, 5) \
55 DEFINE_LEVEL(stem, 6) \
56 DEFINE_LEVEL(stem, 7)
58 static struct btrfs_lockdep_keyset {
59 u64 id; /* root objectid */
60 /* Longest entry: btrfs-block-group-00 */
61 char names[BTRFS_MAX_LEVEL][24];
62 struct lock_class_key keys[BTRFS_MAX_LEVEL];
63 } btrfs_lockdep_keysets[] = {
64 { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") },
65 { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") },
66 { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") },
67 { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") },
68 { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") },
69 { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") },
70 { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") },
71 { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") },
72 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") },
73 { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
74 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
75 { .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
76 { .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") },
77 { .id = 0, DEFINE_NAME("tree") },
80 #undef DEFINE_LEVEL
81 #undef DEFINE_NAME
83 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
85 struct btrfs_lockdep_keyset *ks;
87 ASSERT(level < ARRAY_SIZE(ks->keys));
89 /* Find the matching keyset, id 0 is the default entry */
90 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
91 if (ks->id == objectid)
92 break;
94 lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
97 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
99 if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
100 btrfs_set_buffer_lockdep_class(btrfs_root_id(root),
101 eb, btrfs_header_level(eb));
104 #endif
106 #ifdef CONFIG_BTRFS_DEBUG
107 static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner)
109 eb->lock_owner = owner;
111 #else
112 static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { }
113 #endif
116 * Extent buffer locking
117 * =====================
119 * We use a rw_semaphore for tree locking, and the semantics are exactly the
120 * same:
122 * - reader/writer exclusion
123 * - writer/writer exclusion
124 * - reader/reader sharing
125 * - try-lock semantics for readers and writers
127 * The rwsem implementation does opportunistic spinning which reduces number of
128 * times the locking task needs to sleep.
132 * btrfs_tree_read_lock_nested - lock extent buffer for read
133 * @eb: the eb to be locked
134 * @nest: the nesting level to be used for lockdep
136 * This takes the read lock on the extent buffer, using the specified nesting
137 * level for lockdep purposes.
139 void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
141 u64 start_ns = 0;
143 if (trace_btrfs_tree_read_lock_enabled())
144 start_ns = ktime_get_ns();
146 down_read_nested(&eb->lock, nest);
147 trace_btrfs_tree_read_lock(eb, start_ns);
151 * Try-lock for read.
153 * Return 1 if the rwlock has been taken, 0 otherwise
155 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
157 if (down_read_trylock(&eb->lock)) {
158 trace_btrfs_try_tree_read_lock(eb);
159 return 1;
161 return 0;
165 * Release read lock.
167 void btrfs_tree_read_unlock(struct extent_buffer *eb)
169 trace_btrfs_tree_read_unlock(eb);
170 up_read(&eb->lock);
174 * Lock eb for write.
176 * @eb: the eb to lock
177 * @nest: the nesting to use for the lock
179 * Returns with the eb->lock write locked.
181 void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
182 __acquires(&eb->lock)
184 u64 start_ns = 0;
186 if (trace_btrfs_tree_lock_enabled())
187 start_ns = ktime_get_ns();
189 down_write_nested(&eb->lock, nest);
190 btrfs_set_eb_lock_owner(eb, current->pid);
191 trace_btrfs_tree_lock(eb, start_ns);
195 * Release the write lock.
197 void btrfs_tree_unlock(struct extent_buffer *eb)
199 trace_btrfs_tree_unlock(eb);
200 btrfs_set_eb_lock_owner(eb, 0);
201 up_write(&eb->lock);
205 * This releases any locks held in the path starting at level and going all the
206 * way up to the root.
208 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
209 * cases, such as COW of the block at slot zero in the node. This ignores
210 * those rules, and it should only be called when there are no more updates to
211 * be done higher up in the tree.
213 void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
215 int i;
217 if (path->keep_locks)
218 return;
220 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
221 if (!path->nodes[i])
222 continue;
223 if (!path->locks[i])
224 continue;
225 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
226 path->locks[i] = 0;
231 * Loop around taking references on and locking the root node of the tree until
232 * we end up with a lock on the root node.
234 * Return: root extent buffer with write lock held
236 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
238 struct extent_buffer *eb;
240 while (1) {
241 eb = btrfs_root_node(root);
243 btrfs_maybe_reset_lockdep_class(root, eb);
244 btrfs_tree_lock(eb);
245 if (eb == root->node)
246 break;
247 btrfs_tree_unlock(eb);
248 free_extent_buffer(eb);
250 return eb;
254 * Loop around taking references on and locking the root node of the tree until
255 * we end up with a lock on the root node.
257 * Return: root extent buffer with read lock held
259 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
261 struct extent_buffer *eb;
263 while (1) {
264 eb = btrfs_root_node(root);
266 btrfs_maybe_reset_lockdep_class(root, eb);
267 btrfs_tree_read_lock(eb);
268 if (eb == root->node)
269 break;
270 btrfs_tree_read_unlock(eb);
271 free_extent_buffer(eb);
273 return eb;
277 * Loop around taking references on and locking the root node of the tree in
278 * nowait mode until we end up with a lock on the root node or returning to
279 * avoid blocking.
281 * Return: root extent buffer with read lock held or -EAGAIN.
283 struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
285 struct extent_buffer *eb;
287 while (1) {
288 eb = btrfs_root_node(root);
289 if (!btrfs_try_tree_read_lock(eb)) {
290 free_extent_buffer(eb);
291 return ERR_PTR(-EAGAIN);
293 if (eb == root->node)
294 break;
295 btrfs_tree_read_unlock(eb);
296 free_extent_buffer(eb);
298 return eb;
302 * DREW locks
303 * ==========
305 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
306 * where you want to provide A-B exclusion but not AA or BB.
308 * Currently implementation gives more priority to reader. If a reader and a
309 * writer both race to acquire their respective sides of the lock the writer
310 * would yield its lock as soon as it detects a concurrent reader. Additionally
311 * if there are pending readers no new writers would be allowed to come in and
312 * acquire the lock.
315 void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
317 atomic_set(&lock->readers, 0);
318 atomic_set(&lock->writers, 0);
319 init_waitqueue_head(&lock->pending_readers);
320 init_waitqueue_head(&lock->pending_writers);
323 /* Return true if acquisition is successful, false otherwise */
324 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
326 if (atomic_read(&lock->readers))
327 return false;
329 atomic_inc(&lock->writers);
331 /* Ensure writers count is updated before we check for pending readers */
332 smp_mb__after_atomic();
333 if (atomic_read(&lock->readers)) {
334 btrfs_drew_write_unlock(lock);
335 return false;
338 return true;
341 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
343 while (true) {
344 if (btrfs_drew_try_write_lock(lock))
345 return;
346 wait_event(lock->pending_writers, !atomic_read(&lock->readers));
350 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
353 * atomic_dec_and_test() implies a full barrier, so woken up readers are
354 * guaranteed to see the decrement.
356 if (atomic_dec_and_test(&lock->writers))
357 wake_up(&lock->pending_readers);
360 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
362 atomic_inc(&lock->readers);
365 * Ensure the pending reader count is perceieved BEFORE this reader
366 * goes to sleep in case of active writers. This guarantees new writers
367 * won't be allowed and that the current reader will be woken up when
368 * the last active writer finishes its jobs.
370 smp_mb__after_atomic();
372 wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
375 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
378 * atomic_dec_and_test implies a full barrier, so woken up writers
379 * are guaranteed to see the decrement
381 if (atomic_dec_and_test(&lock->readers))
382 wake_up(&lock->pending_writers);