1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/spinlock.h>
9 #include <linux/page-flags.h>
12 #include "extent_io.h"
15 static void btrfs_assert_tree_read_locked(struct extent_buffer
*eb
);
18 * if we currently have a spinning reader or writer lock
19 * (indicated by the rw flag) this will bump the count
20 * of blocking holders and drop the spinlock.
22 void btrfs_set_lock_blocking_rw(struct extent_buffer
*eb
, int rw
)
25 * no lock is required. The lock owner may change if
26 * we have a read lock, but it won't change to or away
27 * from us. If we have the write lock, we are the owner
28 * and it'll never change.
30 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
)
32 if (rw
== BTRFS_WRITE_LOCK
) {
33 if (atomic_read(&eb
->blocking_writers
) == 0) {
34 WARN_ON(atomic_read(&eb
->spinning_writers
) != 1);
35 atomic_dec(&eb
->spinning_writers
);
36 btrfs_assert_tree_locked(eb
);
37 atomic_inc(&eb
->blocking_writers
);
38 write_unlock(&eb
->lock
);
40 } else if (rw
== BTRFS_READ_LOCK
) {
41 btrfs_assert_tree_read_locked(eb
);
42 atomic_inc(&eb
->blocking_readers
);
43 WARN_ON(atomic_read(&eb
->spinning_readers
) == 0);
44 atomic_dec(&eb
->spinning_readers
);
45 read_unlock(&eb
->lock
);
50 * if we currently have a blocking lock, take the spinlock
51 * and drop our blocking count
53 void btrfs_clear_lock_blocking_rw(struct extent_buffer
*eb
, int rw
)
56 * no lock is required. The lock owner may change if
57 * we have a read lock, but it won't change to or away
58 * from us. If we have the write lock, we are the owner
59 * and it'll never change.
61 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
)
64 if (rw
== BTRFS_WRITE_LOCK_BLOCKING
) {
65 BUG_ON(atomic_read(&eb
->blocking_writers
) != 1);
66 write_lock(&eb
->lock
);
67 WARN_ON(atomic_read(&eb
->spinning_writers
));
68 atomic_inc(&eb
->spinning_writers
);
70 * atomic_dec_and_test implies a barrier for waitqueue_active
72 if (atomic_dec_and_test(&eb
->blocking_writers
) &&
73 waitqueue_active(&eb
->write_lock_wq
))
74 wake_up(&eb
->write_lock_wq
);
75 } else if (rw
== BTRFS_READ_LOCK_BLOCKING
) {
76 BUG_ON(atomic_read(&eb
->blocking_readers
) == 0);
78 atomic_inc(&eb
->spinning_readers
);
80 * atomic_dec_and_test implies a barrier for waitqueue_active
82 if (atomic_dec_and_test(&eb
->blocking_readers
) &&
83 waitqueue_active(&eb
->read_lock_wq
))
84 wake_up(&eb
->read_lock_wq
);
89 * take a spinning read lock. This will wait for any blocking
92 void btrfs_tree_read_lock(struct extent_buffer
*eb
)
95 BUG_ON(!atomic_read(&eb
->blocking_writers
) &&
96 current
->pid
== eb
->lock_owner
);
99 if (atomic_read(&eb
->blocking_writers
) &&
100 current
->pid
== eb
->lock_owner
) {
102 * This extent is already write-locked by our thread. We allow
103 * an additional read lock to be added because it's for the same
104 * thread. btrfs_find_all_roots() depends on this as it may be
105 * called on a partly (write-)locked tree.
107 BUG_ON(eb
->lock_nested
);
109 read_unlock(&eb
->lock
);
112 if (atomic_read(&eb
->blocking_writers
)) {
113 read_unlock(&eb
->lock
);
114 wait_event(eb
->write_lock_wq
,
115 atomic_read(&eb
->blocking_writers
) == 0);
118 atomic_inc(&eb
->read_locks
);
119 atomic_inc(&eb
->spinning_readers
);
123 * take a spinning read lock.
124 * returns 1 if we get the read lock and 0 if we don't
125 * this won't wait for blocking writers
127 int btrfs_tree_read_lock_atomic(struct extent_buffer
*eb
)
129 if (atomic_read(&eb
->blocking_writers
))
132 read_lock(&eb
->lock
);
133 if (atomic_read(&eb
->blocking_writers
)) {
134 read_unlock(&eb
->lock
);
137 atomic_inc(&eb
->read_locks
);
138 atomic_inc(&eb
->spinning_readers
);
143 * returns 1 if we get the read lock and 0 if we don't
144 * this won't wait for blocking writers
146 int btrfs_try_tree_read_lock(struct extent_buffer
*eb
)
148 if (atomic_read(&eb
->blocking_writers
))
151 if (!read_trylock(&eb
->lock
))
154 if (atomic_read(&eb
->blocking_writers
)) {
155 read_unlock(&eb
->lock
);
158 atomic_inc(&eb
->read_locks
);
159 atomic_inc(&eb
->spinning_readers
);
164 * returns 1 if we get the read lock and 0 if we don't
165 * this won't wait for blocking writers or readers
167 int btrfs_try_tree_write_lock(struct extent_buffer
*eb
)
169 if (atomic_read(&eb
->blocking_writers
) ||
170 atomic_read(&eb
->blocking_readers
))
173 write_lock(&eb
->lock
);
174 if (atomic_read(&eb
->blocking_writers
) ||
175 atomic_read(&eb
->blocking_readers
)) {
176 write_unlock(&eb
->lock
);
179 atomic_inc(&eb
->write_locks
);
180 atomic_inc(&eb
->spinning_writers
);
181 eb
->lock_owner
= current
->pid
;
186 * drop a spinning read lock
188 void btrfs_tree_read_unlock(struct extent_buffer
*eb
)
191 * if we're nested, we have the write lock. No new locking
192 * is needed as long as we are the lock owner.
193 * The write unlock will do a barrier for us, and the lock_nested
194 * field only matters to the lock owner.
196 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
) {
200 btrfs_assert_tree_read_locked(eb
);
201 WARN_ON(atomic_read(&eb
->spinning_readers
) == 0);
202 atomic_dec(&eb
->spinning_readers
);
203 atomic_dec(&eb
->read_locks
);
204 read_unlock(&eb
->lock
);
208 * drop a blocking read lock
210 void btrfs_tree_read_unlock_blocking(struct extent_buffer
*eb
)
213 * if we're nested, we have the write lock. No new locking
214 * is needed as long as we are the lock owner.
215 * The write unlock will do a barrier for us, and the lock_nested
216 * field only matters to the lock owner.
218 if (eb
->lock_nested
&& current
->pid
== eb
->lock_owner
) {
222 btrfs_assert_tree_read_locked(eb
);
223 WARN_ON(atomic_read(&eb
->blocking_readers
) == 0);
225 * atomic_dec_and_test implies a barrier for waitqueue_active
227 if (atomic_dec_and_test(&eb
->blocking_readers
) &&
228 waitqueue_active(&eb
->read_lock_wq
))
229 wake_up(&eb
->read_lock_wq
);
230 atomic_dec(&eb
->read_locks
);
234 * take a spinning write lock. This will wait for both
235 * blocking readers or writers
237 void btrfs_tree_lock(struct extent_buffer
*eb
)
239 WARN_ON(eb
->lock_owner
== current
->pid
);
241 wait_event(eb
->read_lock_wq
, atomic_read(&eb
->blocking_readers
) == 0);
242 wait_event(eb
->write_lock_wq
, atomic_read(&eb
->blocking_writers
) == 0);
243 write_lock(&eb
->lock
);
244 if (atomic_read(&eb
->blocking_readers
)) {
245 write_unlock(&eb
->lock
);
246 wait_event(eb
->read_lock_wq
,
247 atomic_read(&eb
->blocking_readers
) == 0);
250 if (atomic_read(&eb
->blocking_writers
)) {
251 write_unlock(&eb
->lock
);
252 wait_event(eb
->write_lock_wq
,
253 atomic_read(&eb
->blocking_writers
) == 0);
256 WARN_ON(atomic_read(&eb
->spinning_writers
));
257 atomic_inc(&eb
->spinning_writers
);
258 atomic_inc(&eb
->write_locks
);
259 eb
->lock_owner
= current
->pid
;
263 * drop a spinning or a blocking write lock.
265 void btrfs_tree_unlock(struct extent_buffer
*eb
)
267 int blockers
= atomic_read(&eb
->blocking_writers
);
269 BUG_ON(blockers
> 1);
271 btrfs_assert_tree_locked(eb
);
273 atomic_dec(&eb
->write_locks
);
276 WARN_ON(atomic_read(&eb
->spinning_writers
));
277 atomic_dec(&eb
->blocking_writers
);
279 * Make sure counter is updated before we wake up waiters.
281 smp_mb__after_atomic();
282 if (waitqueue_active(&eb
->write_lock_wq
))
283 wake_up(&eb
->write_lock_wq
);
285 WARN_ON(atomic_read(&eb
->spinning_writers
) != 1);
286 atomic_dec(&eb
->spinning_writers
);
287 write_unlock(&eb
->lock
);
291 void btrfs_assert_tree_locked(struct extent_buffer
*eb
)
293 BUG_ON(!atomic_read(&eb
->write_locks
));
296 static void btrfs_assert_tree_read_locked(struct extent_buffer
*eb
)
298 BUG_ON(!atomic_read(&eb
->read_locks
));