1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
12 #include "extent_io.h"
15 static void btrfs_assert_tree_read_locked(struct extent_buffer
*eb
);
18 * if we currently have a spinning reader or writer lock
19 * (indicated by the rw flag) this will bump the count
20 * of blocking holders and drop the spinlock.
22 void btrfs_set_lock_blocking_rw(struct extent_buffer
*eb
, int rw
)
25 * no lock is required. The lock owner may change if
26 * we have a read lock, but it won't change to or away
27 * from us. If we have the write lock, we are the owner
28 * and it'll never change.
30 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
)
32 if (rw
== BTRFS_WRITE_LOCK
) {
33 if (atomic_read(&eb
->blocking_writers
) == 0) {
34 WARN_ON(atomic_read(&eb
->spinning_writers
) != 1);
35 atomic_dec(&eb
->spinning_writers
);
36 btrfs_assert_tree_locked(eb
);
37 atomic_inc(&eb
->blocking_writers
);
38 write_unlock(&eb
->lock
);
40 } else if (rw
== BTRFS_READ_LOCK
) {
41 btrfs_assert_tree_read_locked(eb
);
42 atomic_inc(&eb
->blocking_readers
);
43 WARN_ON(atomic_read(&eb
->spinning_readers
) == 0);
44 atomic_dec(&eb
->spinning_readers
);
45 read_unlock(&eb
->lock
);
50 * if we currently have a blocking lock, take the spinlock
51 * and drop our blocking count
53 void btrfs_clear_lock_blocking_rw(struct extent_buffer
*eb
, int rw
)
56 * no lock is required. The lock owner may change if
57 * we have a read lock, but it won't change to or away
58 * from us. If we have the write lock, we are the owner
59 * and it'll never change.
61 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
)
64 if (rw
== BTRFS_WRITE_LOCK_BLOCKING
) {
65 BUG_ON(atomic_read(&eb
->blocking_writers
) != 1);
66 write_lock(&eb
->lock
);
67 WARN_ON(atomic_read(&eb
->spinning_writers
));
68 atomic_inc(&eb
->spinning_writers
);
69 /* atomic_dec_and_test implies a barrier */
70 if (atomic_dec_and_test(&eb
->blocking_writers
))
71 cond_wake_up_nomb(&eb
->write_lock_wq
);
72 } else if (rw
== BTRFS_READ_LOCK_BLOCKING
) {
73 BUG_ON(atomic_read(&eb
->blocking_readers
) == 0);
75 atomic_inc(&eb
->spinning_readers
);
76 /* atomic_dec_and_test implies a barrier */
77 if (atomic_dec_and_test(&eb
->blocking_readers
))
78 cond_wake_up_nomb(&eb
->read_lock_wq
);
83 * take a spinning read lock. This will wait for any blocking
86 void btrfs_tree_read_lock(struct extent_buffer
*eb
)
89 BUG_ON(!atomic_read(&eb
->blocking_writers
) &&
90 current
->pid
== eb
->lock_owner
);
93 if (atomic_read(&eb
->blocking_writers
) &&
94 current
->pid
== eb
->lock_owner
) {
96 * This extent is already write-locked by our thread. We allow
97 * an additional read lock to be added because it's for the same
98 * thread. btrfs_find_all_roots() depends on this as it may be
99 * called on a partly (write-)locked tree.
101 BUG_ON(eb
->lock_nested
);
103 read_unlock(&eb
->lock
);
106 if (atomic_read(&eb
->blocking_writers
)) {
107 read_unlock(&eb
->lock
);
108 wait_event(eb
->write_lock_wq
,
109 atomic_read(&eb
->blocking_writers
) == 0);
112 atomic_inc(&eb
->read_locks
);
113 atomic_inc(&eb
->spinning_readers
);
117 * take a spinning read lock.
118 * returns 1 if we get the read lock and 0 if we don't
119 * this won't wait for blocking writers
121 int btrfs_tree_read_lock_atomic(struct extent_buffer
*eb
)
123 if (atomic_read(&eb
->blocking_writers
))
126 read_lock(&eb
->lock
);
127 if (atomic_read(&eb
->blocking_writers
)) {
128 read_unlock(&eb
->lock
);
131 atomic_inc(&eb
->read_locks
);
132 atomic_inc(&eb
->spinning_readers
);
137 * returns 1 if we get the read lock and 0 if we don't
138 * this won't wait for blocking writers
140 int btrfs_try_tree_read_lock(struct extent_buffer
*eb
)
142 if (atomic_read(&eb
->blocking_writers
))
145 if (!read_trylock(&eb
->lock
))
148 if (atomic_read(&eb
->blocking_writers
)) {
149 read_unlock(&eb
->lock
);
152 atomic_inc(&eb
->read_locks
);
153 atomic_inc(&eb
->spinning_readers
);
158 * returns 1 if we get the read lock and 0 if we don't
159 * this won't wait for blocking writers or readers
161 int btrfs_try_tree_write_lock(struct extent_buffer
*eb
)
163 if (atomic_read(&eb
->blocking_writers
) ||
164 atomic_read(&eb
->blocking_readers
))
167 write_lock(&eb
->lock
);
168 if (atomic_read(&eb
->blocking_writers
) ||
169 atomic_read(&eb
->blocking_readers
)) {
170 write_unlock(&eb
->lock
);
173 atomic_inc(&eb
->write_locks
);
174 atomic_inc(&eb
->spinning_writers
);
175 eb
->lock_owner
= current
->pid
;
180 * drop a spinning read lock
182 void btrfs_tree_read_unlock(struct extent_buffer
*eb
)
185 * if we're nested, we have the write lock. No new locking
186 * is needed as long as we are the lock owner.
187 * The write unlock will do a barrier for us, and the lock_nested
188 * field only matters to the lock owner.
190 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
) {
194 btrfs_assert_tree_read_locked(eb
);
195 WARN_ON(atomic_read(&eb
->spinning_readers
) == 0);
196 atomic_dec(&eb
->spinning_readers
);
197 atomic_dec(&eb
->read_locks
);
198 read_unlock(&eb
->lock
);
202 * drop a blocking read lock
204 void btrfs_tree_read_unlock_blocking(struct extent_buffer
*eb
)
207 * if we're nested, we have the write lock. No new locking
208 * is needed as long as we are the lock owner.
209 * The write unlock will do a barrier for us, and the lock_nested
210 * field only matters to the lock owner.
212 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
) {
216 btrfs_assert_tree_read_locked(eb
);
217 WARN_ON(atomic_read(&eb
->blocking_readers
) == 0);
218 /* atomic_dec_and_test implies a barrier */
219 if (atomic_dec_and_test(&eb
->blocking_readers
))
220 cond_wake_up_nomb(&eb
->read_lock_wq
);
221 atomic_dec(&eb
->read_locks
);
225 * take a spinning write lock. This will wait for both
226 * blocking readers or writers
228 void btrfs_tree_lock(struct extent_buffer
*eb
)
230 WARN_ON(eb
->lock_owner
== current
->pid
);
232 wait_event(eb
->read_lock_wq
, atomic_read(&eb
->blocking_readers
) == 0);
233 wait_event(eb
->write_lock_wq
, atomic_read(&eb
->blocking_writers
) == 0);
234 write_lock(&eb
->lock
);
235 if (atomic_read(&eb
->blocking_readers
)) {
236 write_unlock(&eb
->lock
);
237 wait_event(eb
->read_lock_wq
,
238 atomic_read(&eb
->blocking_readers
) == 0);
241 if (atomic_read(&eb
->blocking_writers
)) {
242 write_unlock(&eb
->lock
);
243 wait_event(eb
->write_lock_wq
,
244 atomic_read(&eb
->blocking_writers
) == 0);
247 WARN_ON(atomic_read(&eb
->spinning_writers
));
248 atomic_inc(&eb
->spinning_writers
);
249 atomic_inc(&eb
->write_locks
);
250 eb
->lock_owner
= current
->pid
;
254 * drop a spinning or a blocking write lock.
256 void btrfs_tree_unlock(struct extent_buffer
*eb
)
258 int blockers
= atomic_read(&eb
->blocking_writers
);
260 BUG_ON(blockers
> 1);
262 btrfs_assert_tree_locked(eb
);
264 atomic_dec(&eb
->write_locks
);
267 WARN_ON(atomic_read(&eb
->spinning_writers
));
268 atomic_dec(&eb
->blocking_writers
);
269 /* Use the lighter barrier after atomic */
270 smp_mb__after_atomic();
271 cond_wake_up_nomb(&eb
->write_lock_wq
);
273 WARN_ON(atomic_read(&eb
->spinning_writers
) != 1);
274 atomic_dec(&eb
->spinning_writers
);
275 write_unlock(&eb
->lock
);
279 void btrfs_assert_tree_locked(struct extent_buffer
*eb
)
281 BUG_ON(!atomic_read(&eb
->write_locks
));
284 static void btrfs_assert_tree_read_locked(struct extent_buffer
*eb
)
286 BUG_ON(!atomic_read(&eb
->read_locks
));