iommu/amd: Fix devid mapping for ivrs_ioapic override
[linux/fpc-iii.git] / fs / btrfs / locking.c
blob5665d2149249d1d83a260c74f21889e1d394a6c3
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
30 * if we currently have a spinning reader or writer lock
31 * (indicated by the rw flag) this will bump the count
32 * of blocking holders and drop the spinlock.
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
37 * no lock is required. The lock owner may change if
38 * we have a read lock, but it won't change to or away
39 * from us. If we have the write lock, we are the owner
40 * and it'll never change.
42 if (eb->lock_nested && current->pid == eb->lock_owner)
43 return;
44 if (rw == BTRFS_WRITE_LOCK) {
45 if (atomic_read(&eb->blocking_writers) == 0) {
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47 atomic_dec(&eb->spinning_writers);
48 btrfs_assert_tree_locked(eb);
49 atomic_inc(&eb->blocking_writers);
50 write_unlock(&eb->lock);
52 } else if (rw == BTRFS_READ_LOCK) {
53 btrfs_assert_tree_read_locked(eb);
54 atomic_inc(&eb->blocking_readers);
55 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56 atomic_dec(&eb->spinning_readers);
57 read_unlock(&eb->lock);
59 return;
63 * if we currently have a blocking lock, take the spinlock
64 * and drop our blocking count
66 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
69 * no lock is required. The lock owner may change if
70 * we have a read lock, but it won't change to or away
71 * from us. If we have the write lock, we are the owner
72 * and it'll never change.
74 if (eb->lock_nested && current->pid == eb->lock_owner)
75 return;
77 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
78 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
79 write_lock(&eb->lock);
80 WARN_ON(atomic_read(&eb->spinning_writers));
81 atomic_inc(&eb->spinning_writers);
82 if (atomic_dec_and_test(&eb->blocking_writers) &&
83 waitqueue_active(&eb->write_lock_wq))
84 wake_up(&eb->write_lock_wq);
85 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
86 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
87 read_lock(&eb->lock);
88 atomic_inc(&eb->spinning_readers);
89 if (atomic_dec_and_test(&eb->blocking_readers) &&
90 waitqueue_active(&eb->read_lock_wq))
91 wake_up(&eb->read_lock_wq);
93 return;
97 * take a spinning read lock. This will wait for any blocking
98 * writers
100 void btrfs_tree_read_lock(struct extent_buffer *eb)
102 again:
103 BUG_ON(!atomic_read(&eb->blocking_writers) &&
104 current->pid == eb->lock_owner);
106 read_lock(&eb->lock);
107 if (atomic_read(&eb->blocking_writers) &&
108 current->pid == eb->lock_owner) {
110 * This extent is already write-locked by our thread. We allow
111 * an additional read lock to be added because it's for the same
112 * thread. btrfs_find_all_roots() depends on this as it may be
113 * called on a partly (write-)locked tree.
115 BUG_ON(eb->lock_nested);
116 eb->lock_nested = 1;
117 read_unlock(&eb->lock);
118 return;
120 if (atomic_read(&eb->blocking_writers)) {
121 read_unlock(&eb->lock);
122 wait_event(eb->write_lock_wq,
123 atomic_read(&eb->blocking_writers) == 0);
124 goto again;
126 atomic_inc(&eb->read_locks);
127 atomic_inc(&eb->spinning_readers);
131 * returns 1 if we get the read lock and 0 if we don't
132 * this won't wait for blocking writers
134 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
136 if (atomic_read(&eb->blocking_writers))
137 return 0;
139 if (!read_trylock(&eb->lock))
140 return 0;
142 if (atomic_read(&eb->blocking_writers)) {
143 read_unlock(&eb->lock);
144 return 0;
146 atomic_inc(&eb->read_locks);
147 atomic_inc(&eb->spinning_readers);
148 return 1;
152 * returns 1 if we get the read lock and 0 if we don't
153 * this won't wait for blocking writers or readers
155 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
157 if (atomic_read(&eb->blocking_writers) ||
158 atomic_read(&eb->blocking_readers))
159 return 0;
161 if (!write_trylock(&eb->lock))
162 return 0;
164 if (atomic_read(&eb->blocking_writers) ||
165 atomic_read(&eb->blocking_readers)) {
166 write_unlock(&eb->lock);
167 return 0;
169 atomic_inc(&eb->write_locks);
170 atomic_inc(&eb->spinning_writers);
171 eb->lock_owner = current->pid;
172 return 1;
176 * drop a spinning read lock
178 void btrfs_tree_read_unlock(struct extent_buffer *eb)
181 * if we're nested, we have the write lock. No new locking
182 * is needed as long as we are the lock owner.
183 * The write unlock will do a barrier for us, and the lock_nested
184 * field only matters to the lock owner.
186 if (eb->lock_nested && current->pid == eb->lock_owner) {
187 eb->lock_nested = 0;
188 return;
190 btrfs_assert_tree_read_locked(eb);
191 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
192 atomic_dec(&eb->spinning_readers);
193 atomic_dec(&eb->read_locks);
194 read_unlock(&eb->lock);
198 * drop a blocking read lock
200 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
203 * if we're nested, we have the write lock. No new locking
204 * is needed as long as we are the lock owner.
205 * The write unlock will do a barrier for us, and the lock_nested
206 * field only matters to the lock owner.
208 if (eb->lock_nested && current->pid == eb->lock_owner) {
209 eb->lock_nested = 0;
210 return;
212 btrfs_assert_tree_read_locked(eb);
213 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
214 if (atomic_dec_and_test(&eb->blocking_readers) &&
215 waitqueue_active(&eb->read_lock_wq))
216 wake_up(&eb->read_lock_wq);
217 atomic_dec(&eb->read_locks);
221 * take a spinning write lock. This will wait for both
222 * blocking readers or writers
224 void btrfs_tree_lock(struct extent_buffer *eb)
226 again:
227 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
228 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
229 write_lock(&eb->lock);
230 if (atomic_read(&eb->blocking_readers)) {
231 write_unlock(&eb->lock);
232 wait_event(eb->read_lock_wq,
233 atomic_read(&eb->blocking_readers) == 0);
234 goto again;
236 if (atomic_read(&eb->blocking_writers)) {
237 write_unlock(&eb->lock);
238 wait_event(eb->write_lock_wq,
239 atomic_read(&eb->blocking_writers) == 0);
240 goto again;
242 WARN_ON(atomic_read(&eb->spinning_writers));
243 atomic_inc(&eb->spinning_writers);
244 atomic_inc(&eb->write_locks);
245 eb->lock_owner = current->pid;
249 * drop a spinning or a blocking write lock.
251 void btrfs_tree_unlock(struct extent_buffer *eb)
253 int blockers = atomic_read(&eb->blocking_writers);
255 BUG_ON(blockers > 1);
257 btrfs_assert_tree_locked(eb);
258 eb->lock_owner = 0;
259 atomic_dec(&eb->write_locks);
261 if (blockers) {
262 WARN_ON(atomic_read(&eb->spinning_writers));
263 atomic_dec(&eb->blocking_writers);
264 smp_mb();
265 if (waitqueue_active(&eb->write_lock_wq))
266 wake_up(&eb->write_lock_wq);
267 } else {
268 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
269 atomic_dec(&eb->spinning_writers);
270 write_unlock(&eb->lock);
274 void btrfs_assert_tree_locked(struct extent_buffer *eb)
276 BUG_ON(!atomic_read(&eb->write_locks));
279 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
281 BUG_ON(!atomic_read(&eb->read_locks));