1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
13 #include "extent_io.h"
17 * Extent buffer locking
18 * =====================
20 * The locks use a custom scheme that allows to do more operations than are
21 * available fromt current locking primitives. The building blocks are still
22 * rwlock and wait queues.
26 * - reader/writer exclusion
27 * - writer/writer exclusion
28 * - reader/reader sharing
29 * - spinning lock semantics
30 * - blocking lock semantics
31 * - try-lock semantics for readers and writers
32 * - one level nesting, allowing read lock to be taken by the same thread that
33 * already has write lock
35 * The extent buffer locks (also called tree locks) manage access to eb data
36 * related to the storage in the b-tree (keys, items, but not the individual
38 * We want concurrency of many readers and safe updates. The underlying locking
39 * is done by read-write spinlock and the blocking part is implemented using
40 * counters and wait queues.
42 * spinning semantics - the low-level rwlock is held so all other threads that
43 * want to take it are spinning on it.
45 * blocking semantics - the low-level rwlock is not held but the counter
46 * denotes how many times the blocking lock was held;
47 * sleeping is possible
49 * Write lock always allows only one thread to access the data.
55 * There are additional state counters that are asserted in various contexts,
56 * removed from non-debug build to reduce extent_buffer size and for
57 * performance reasons.
63 * A write operation on a tree might indirectly start a look up on the same
64 * tree. This can happen when btrfs_cow_block locks the tree and needs to
65 * lookup free extents.
69 * alloc_tree_block_no_bg_flush
70 * btrfs_alloc_tree_block
71 * btrfs_reserve_extent
73 * load_free_space_cache
75 * btrfs_lookup_file_extent
79 * Locking pattern - spinning
80 * --------------------------
82 * The simple locking scenario, the +--+ denotes the spinning section.
85 * | - extent_buffer::rwlock is held
86 * | - no heavy operations should happen, eg. IO, memory allocations, large
87 * | structure traversals
91 * Locking pattern - blocking
92 * --------------------------
94 * The blocking write uses the following scheme. The +--+ denotes the spinning
99 * +- btrfs_set_lock_blocking_write
101 * - allowed: IO, memory allocations, etc.
103 * -- btrfs_tree_unlock - note, no explicit unblocking necessary
106 * Blocking read is similar.
108 * +- btrfs_tree_read_lock
110 * +- btrfs_set_lock_blocking_read
112 * - heavy operations allowed
114 * +- btrfs_tree_read_unlock_blocking
116 * +- btrfs_tree_read_unlock
120 #ifdef CONFIG_BTRFS_DEBUG
121 static inline void btrfs_assert_spinning_writers_get(struct extent_buffer
*eb
)
123 WARN_ON(eb
->spinning_writers
);
124 eb
->spinning_writers
++;
127 static inline void btrfs_assert_spinning_writers_put(struct extent_buffer
*eb
)
129 WARN_ON(eb
->spinning_writers
!= 1);
130 eb
->spinning_writers
--;
133 static inline void btrfs_assert_no_spinning_writers(struct extent_buffer
*eb
)
135 WARN_ON(eb
->spinning_writers
);
138 static inline void btrfs_assert_spinning_readers_get(struct extent_buffer
*eb
)
140 atomic_inc(&eb
->spinning_readers
);
143 static inline void btrfs_assert_spinning_readers_put(struct extent_buffer
*eb
)
145 WARN_ON(atomic_read(&eb
->spinning_readers
) == 0);
146 atomic_dec(&eb
->spinning_readers
);
149 static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer
*eb
)
151 atomic_inc(&eb
->read_locks
);
154 static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer
*eb
)
156 atomic_dec(&eb
->read_locks
);
159 static inline void btrfs_assert_tree_read_locked(struct extent_buffer
*eb
)
161 BUG_ON(!atomic_read(&eb
->read_locks
));
164 static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer
*eb
)
169 static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer
*eb
)
175 static void btrfs_assert_spinning_writers_get(struct extent_buffer
*eb
) { }
176 static void btrfs_assert_spinning_writers_put(struct extent_buffer
*eb
) { }
177 static void btrfs_assert_no_spinning_writers(struct extent_buffer
*eb
) { }
178 static void btrfs_assert_spinning_readers_put(struct extent_buffer
*eb
) { }
179 static void btrfs_assert_spinning_readers_get(struct extent_buffer
*eb
) { }
180 static void btrfs_assert_tree_read_locked(struct extent_buffer
*eb
) { }
181 static void btrfs_assert_tree_read_locks_get(struct extent_buffer
*eb
) { }
182 static void btrfs_assert_tree_read_locks_put(struct extent_buffer
*eb
) { }
183 static void btrfs_assert_tree_write_locks_get(struct extent_buffer
*eb
) { }
184 static void btrfs_assert_tree_write_locks_put(struct extent_buffer
*eb
) { }
188 * Mark already held read lock as blocking. Can be nested in write lock by the
191 * Use when there are potentially long operations ahead so other thread waiting
192 * on the lock will not actively spin but sleep instead.
194 * The rwlock is released and blocking reader counter is increased.
196 void btrfs_set_lock_blocking_read(struct extent_buffer
*eb
)
198 trace_btrfs_set_lock_blocking_read(eb
);
200 * No lock is required. The lock owner may change if we have a read
201 * lock, but it won't change to or away from us. If we have the write
202 * lock, we are the owner and it'll never change.
204 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
)
206 btrfs_assert_tree_read_locked(eb
);
207 atomic_inc(&eb
->blocking_readers
);
208 btrfs_assert_spinning_readers_put(eb
);
209 read_unlock(&eb
->lock
);
213 * Mark already held write lock as blocking.
215 * Use when there are potentially long operations ahead so other threads
216 * waiting on the lock will not actively spin but sleep instead.
218 * The rwlock is released and blocking writers is set.
220 void btrfs_set_lock_blocking_write(struct extent_buffer
*eb
)
222 trace_btrfs_set_lock_blocking_write(eb
);
224 * No lock is required. The lock owner may change if we have a read
225 * lock, but it won't change to or away from us. If we have the write
226 * lock, we are the owner and it'll never change.
228 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
)
230 if (eb
->blocking_writers
== 0) {
231 btrfs_assert_spinning_writers_put(eb
);
232 btrfs_assert_tree_locked(eb
);
233 WRITE_ONCE(eb
->blocking_writers
, 1);
234 write_unlock(&eb
->lock
);
239 * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
240 * Can be nested in write lock by the same thread.
242 * Use when the locked section does only lightweight actions and busy waiting
243 * would be cheaper than making other threads do the wait/wake loop.
245 * The rwlock is held upon exit.
247 void btrfs_tree_read_lock(struct extent_buffer
*eb
)
251 if (trace_btrfs_tree_read_lock_enabled())
252 start_ns
= ktime_get_ns();
254 read_lock(&eb
->lock
);
255 BUG_ON(eb
->blocking_writers
== 0 &&
256 current
->pid
== eb
->lock_owner
);
257 if (eb
->blocking_writers
) {
258 if (current
->pid
== eb
->lock_owner
) {
260 * This extent is already write-locked by our thread.
261 * We allow an additional read lock to be added because
262 * it's for the same thread. btrfs_find_all_roots()
263 * depends on this as it may be called on a partly
264 * (write-)locked tree.
266 BUG_ON(eb
->lock_nested
);
267 eb
->lock_nested
= true;
268 read_unlock(&eb
->lock
);
269 trace_btrfs_tree_read_lock(eb
, start_ns
);
272 read_unlock(&eb
->lock
);
273 wait_event(eb
->write_lock_wq
,
274 READ_ONCE(eb
->blocking_writers
) == 0);
277 btrfs_assert_tree_read_locks_get(eb
);
278 btrfs_assert_spinning_readers_get(eb
);
279 trace_btrfs_tree_read_lock(eb
, start_ns
);
283 * Lock extent buffer for read, optimistically expecting that there are no
284 * contending blocking writers. If there are, don't wait.
286 * Return 1 if the rwlock has been taken, 0 otherwise
288 int btrfs_tree_read_lock_atomic(struct extent_buffer
*eb
)
290 if (READ_ONCE(eb
->blocking_writers
))
293 read_lock(&eb
->lock
);
294 /* Refetch value after lock */
295 if (READ_ONCE(eb
->blocking_writers
)) {
296 read_unlock(&eb
->lock
);
299 btrfs_assert_tree_read_locks_get(eb
);
300 btrfs_assert_spinning_readers_get(eb
);
301 trace_btrfs_tree_read_lock_atomic(eb
);
306 * Try-lock for read. Don't block or wait for contending writers.
308 * Retrun 1 if the rwlock has been taken, 0 otherwise
310 int btrfs_try_tree_read_lock(struct extent_buffer
*eb
)
312 if (READ_ONCE(eb
->blocking_writers
))
315 if (!read_trylock(&eb
->lock
))
318 /* Refetch value after lock */
319 if (READ_ONCE(eb
->blocking_writers
)) {
320 read_unlock(&eb
->lock
);
323 btrfs_assert_tree_read_locks_get(eb
);
324 btrfs_assert_spinning_readers_get(eb
);
325 trace_btrfs_try_tree_read_lock(eb
);
330 * Try-lock for write. May block until the lock is uncontended, but does not
331 * wait until it is free.
333 * Retrun 1 if the rwlock has been taken, 0 otherwise
335 int btrfs_try_tree_write_lock(struct extent_buffer
*eb
)
337 if (READ_ONCE(eb
->blocking_writers
) || atomic_read(&eb
->blocking_readers
))
340 write_lock(&eb
->lock
);
341 /* Refetch value after lock */
342 if (READ_ONCE(eb
->blocking_writers
) || atomic_read(&eb
->blocking_readers
)) {
343 write_unlock(&eb
->lock
);
346 btrfs_assert_tree_write_locks_get(eb
);
347 btrfs_assert_spinning_writers_get(eb
);
348 eb
->lock_owner
= current
->pid
;
349 trace_btrfs_try_tree_write_lock(eb
);
354 * Release read lock. Must be used only if the lock is in spinning mode. If
355 * the read lock is nested, must pair with read lock before the write unlock.
357 * The rwlock is not held upon exit.
359 void btrfs_tree_read_unlock(struct extent_buffer
*eb
)
361 trace_btrfs_tree_read_unlock(eb
);
363 * if we're nested, we have the write lock. No new locking
364 * is needed as long as we are the lock owner.
365 * The write unlock will do a barrier for us, and the lock_nested
366 * field only matters to the lock owner.
368 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
) {
369 eb
->lock_nested
= false;
372 btrfs_assert_tree_read_locked(eb
);
373 btrfs_assert_spinning_readers_put(eb
);
374 btrfs_assert_tree_read_locks_put(eb
);
375 read_unlock(&eb
->lock
);
379 * Release read lock, previously set to blocking by a pairing call to
380 * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
383 * State of rwlock is unchanged, last reader wakes waiting threads.
385 void btrfs_tree_read_unlock_blocking(struct extent_buffer
*eb
)
387 trace_btrfs_tree_read_unlock_blocking(eb
);
389 * if we're nested, we have the write lock. No new locking
390 * is needed as long as we are the lock owner.
391 * The write unlock will do a barrier for us, and the lock_nested
392 * field only matters to the lock owner.
394 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
) {
395 eb
->lock_nested
= false;
398 btrfs_assert_tree_read_locked(eb
);
399 WARN_ON(atomic_read(&eb
->blocking_readers
) == 0);
400 /* atomic_dec_and_test implies a barrier */
401 if (atomic_dec_and_test(&eb
->blocking_readers
))
402 cond_wake_up_nomb(&eb
->read_lock_wq
);
403 btrfs_assert_tree_read_locks_put(eb
);
407 * Lock for write. Wait for all blocking and spinning readers and writers. This
408 * starts context where reader lock could be nested by the same thread.
410 * The rwlock is held for write upon exit.
412 void btrfs_tree_lock(struct extent_buffer
*eb
)
413 __acquires(&eb
->lock
)
417 if (trace_btrfs_tree_lock_enabled())
418 start_ns
= ktime_get_ns();
420 WARN_ON(eb
->lock_owner
== current
->pid
);
422 wait_event(eb
->read_lock_wq
, atomic_read(&eb
->blocking_readers
) == 0);
423 wait_event(eb
->write_lock_wq
, READ_ONCE(eb
->blocking_writers
) == 0);
424 write_lock(&eb
->lock
);
425 /* Refetch value after lock */
426 if (atomic_read(&eb
->blocking_readers
) ||
427 READ_ONCE(eb
->blocking_writers
)) {
428 write_unlock(&eb
->lock
);
431 btrfs_assert_spinning_writers_get(eb
);
432 btrfs_assert_tree_write_locks_get(eb
);
433 eb
->lock_owner
= current
->pid
;
434 trace_btrfs_tree_lock(eb
, start_ns
);
438 * Release the write lock, either blocking or spinning (ie. there's no need
439 * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
440 * This also ends the context for nesting, the read lock must have been
443 * Tasks blocked and waiting are woken, rwlock is not held upon exit.
445 void btrfs_tree_unlock(struct extent_buffer
*eb
)
448 * This is read both locked and unlocked but always by the same thread
449 * that already owns the lock so we don't need to use READ_ONCE
451 int blockers
= eb
->blocking_writers
;
453 BUG_ON(blockers
> 1);
455 btrfs_assert_tree_locked(eb
);
456 trace_btrfs_tree_unlock(eb
);
458 btrfs_assert_tree_write_locks_put(eb
);
461 btrfs_assert_no_spinning_writers(eb
);
463 WRITE_ONCE(eb
->blocking_writers
, 0);
465 * We need to order modifying blocking_writers above with
466 * actually waking up the sleepers to ensure they see the
467 * updated value of blocking_writers
469 cond_wake_up(&eb
->write_lock_wq
);
471 btrfs_assert_spinning_writers_put(eb
);
472 write_unlock(&eb
->lock
);
477 * Set all locked nodes in the path to blocking locks. This should be done
480 void btrfs_set_path_blocking(struct btrfs_path
*p
)
484 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
485 if (!p
->nodes
[i
] || !p
->locks
[i
])
488 * If we currently have a spinning reader or writer lock this
489 * will bump the count of blocking holders and drop the
492 if (p
->locks
[i
] == BTRFS_READ_LOCK
) {
493 btrfs_set_lock_blocking_read(p
->nodes
[i
]);
494 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
495 } else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
) {
496 btrfs_set_lock_blocking_write(p
->nodes
[i
]);
497 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
503 * This releases any locks held in the path starting at level and going all the
504 * way up to the root.
506 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
507 * cases, such as COW of the block at slot zero in the node. This ignores
508 * those rules, and it should only be called when there are no more updates to
509 * be done higher up in the tree.
511 void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
515 if (path
->keep_locks
)
518 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
523 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
529 * Loop around taking references on and locking the root node of the tree until
530 * we end up with a lock on the root node.
532 * Return: root extent buffer with write lock held
534 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
536 struct extent_buffer
*eb
;
539 eb
= btrfs_root_node(root
);
541 if (eb
== root
->node
)
543 btrfs_tree_unlock(eb
);
544 free_extent_buffer(eb
);
550 * Loop around taking references on and locking the root node of the tree until
551 * we end up with a lock on the root node.
553 * Return: root extent buffer with read lock held
555 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
557 struct extent_buffer
*eb
;
560 eb
= btrfs_root_node(root
);
561 btrfs_tree_read_lock(eb
);
562 if (eb
== root
->node
)
564 btrfs_tree_read_unlock(eb
);
565 free_extent_buffer(eb
);
574 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
575 * where you want to provide A-B exclusion but not AA or BB.
577 * Currently implementation gives more priority to reader. If a reader and a
578 * writer both race to acquire their respective sides of the lock the writer
579 * would yield its lock as soon as it detects a concurrent reader. Additionally
580 * if there are pending readers no new writers would be allowed to come in and
584 int btrfs_drew_lock_init(struct btrfs_drew_lock
*lock
)
588 ret
= percpu_counter_init(&lock
->writers
, 0, GFP_KERNEL
);
592 atomic_set(&lock
->readers
, 0);
593 init_waitqueue_head(&lock
->pending_readers
);
594 init_waitqueue_head(&lock
->pending_writers
);
599 void btrfs_drew_lock_destroy(struct btrfs_drew_lock
*lock
)
601 percpu_counter_destroy(&lock
->writers
);
604 /* Return true if acquisition is successful, false otherwise */
605 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock
*lock
)
607 if (atomic_read(&lock
->readers
))
610 percpu_counter_inc(&lock
->writers
);
612 /* Ensure writers count is updated before we check for pending readers */
614 if (atomic_read(&lock
->readers
)) {
615 btrfs_drew_write_unlock(lock
);
622 void btrfs_drew_write_lock(struct btrfs_drew_lock
*lock
)
625 if (btrfs_drew_try_write_lock(lock
))
627 wait_event(lock
->pending_writers
, !atomic_read(&lock
->readers
));
631 void btrfs_drew_write_unlock(struct btrfs_drew_lock
*lock
)
633 percpu_counter_dec(&lock
->writers
);
634 cond_wake_up(&lock
->pending_readers
);
637 void btrfs_drew_read_lock(struct btrfs_drew_lock
*lock
)
639 atomic_inc(&lock
->readers
);
642 * Ensure the pending reader count is perceieved BEFORE this reader
643 * goes to sleep in case of active writers. This guarantees new writers
644 * won't be allowed and that the current reader will be woken up when
645 * the last active writer finishes its jobs.
647 smp_mb__after_atomic();
649 wait_event(lock
->pending_readers
,
650 percpu_counter_sum(&lock
->writers
) == 0);
653 void btrfs_drew_read_unlock(struct btrfs_drew_lock
*lock
)
656 * atomic_dec_and_test implies a full barrier, so woken up writers
657 * are guaranteed to see the decrement
659 if (atomic_dec_and_test(&lock
->readers
))
660 wake_up(&lock
->pending_writers
);