Avoid beyond bounds copy while caching ACL
[zen-stable.git] / fs / btrfs / locking.c
blob5e178d8f7167f496e928613b6c1f0000c2ea242e
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
27 void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
30 * if we currently have a spinning reader or writer lock
31 * (indicated by the rw flag) this will bump the count
32 * of blocking holders and drop the spinlock.
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
36 if (eb->lock_nested) {
37 read_lock(&eb->lock);
38 if (eb->lock_nested && current->pid == eb->lock_owner) {
39 read_unlock(&eb->lock);
40 return;
42 read_unlock(&eb->lock);
44 if (rw == BTRFS_WRITE_LOCK) {
45 if (atomic_read(&eb->blocking_writers) == 0) {
46 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47 atomic_dec(&eb->spinning_writers);
48 btrfs_assert_tree_locked(eb);
49 atomic_inc(&eb->blocking_writers);
50 write_unlock(&eb->lock);
52 } else if (rw == BTRFS_READ_LOCK) {
53 btrfs_assert_tree_read_locked(eb);
54 atomic_inc(&eb->blocking_readers);
55 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56 atomic_dec(&eb->spinning_readers);
57 read_unlock(&eb->lock);
59 return;
63 * if we currently have a blocking lock, take the spinlock
64 * and drop our blocking count
66 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
68 if (eb->lock_nested) {
69 read_lock(&eb->lock);
70 if (&eb->lock_nested && current->pid == eb->lock_owner) {
71 read_unlock(&eb->lock);
72 return;
74 read_unlock(&eb->lock);
76 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
77 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
78 write_lock(&eb->lock);
79 WARN_ON(atomic_read(&eb->spinning_writers));
80 atomic_inc(&eb->spinning_writers);
81 if (atomic_dec_and_test(&eb->blocking_writers))
82 wake_up(&eb->write_lock_wq);
83 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
84 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
85 read_lock(&eb->lock);
86 atomic_inc(&eb->spinning_readers);
87 if (atomic_dec_and_test(&eb->blocking_readers))
88 wake_up(&eb->read_lock_wq);
90 return;
94 * take a spinning read lock. This will wait for any blocking
95 * writers
97 void btrfs_tree_read_lock(struct extent_buffer *eb)
99 again:
100 read_lock(&eb->lock);
101 if (atomic_read(&eb->blocking_writers) &&
102 current->pid == eb->lock_owner) {
104 * This extent is already write-locked by our thread. We allow
105 * an additional read lock to be added because it's for the same
106 * thread. btrfs_find_all_roots() depends on this as it may be
107 * called on a partly (write-)locked tree.
109 BUG_ON(eb->lock_nested);
110 eb->lock_nested = 1;
111 read_unlock(&eb->lock);
112 return;
114 read_unlock(&eb->lock);
115 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
116 read_lock(&eb->lock);
117 if (atomic_read(&eb->blocking_writers)) {
118 read_unlock(&eb->lock);
119 goto again;
121 atomic_inc(&eb->read_locks);
122 atomic_inc(&eb->spinning_readers);
126 * returns 1 if we get the read lock and 0 if we don't
127 * this won't wait for blocking writers
129 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
131 if (atomic_read(&eb->blocking_writers))
132 return 0;
134 read_lock(&eb->lock);
135 if (atomic_read(&eb->blocking_writers)) {
136 read_unlock(&eb->lock);
137 return 0;
139 atomic_inc(&eb->read_locks);
140 atomic_inc(&eb->spinning_readers);
141 return 1;
145 * returns 1 if we get the read lock and 0 if we don't
146 * this won't wait for blocking writers or readers
148 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
150 if (atomic_read(&eb->blocking_writers) ||
151 atomic_read(&eb->blocking_readers))
152 return 0;
153 write_lock(&eb->lock);
154 if (atomic_read(&eb->blocking_writers) ||
155 atomic_read(&eb->blocking_readers)) {
156 write_unlock(&eb->lock);
157 return 0;
159 atomic_inc(&eb->write_locks);
160 atomic_inc(&eb->spinning_writers);
161 eb->lock_owner = current->pid;
162 return 1;
166 * drop a spinning read lock
168 void btrfs_tree_read_unlock(struct extent_buffer *eb)
170 if (eb->lock_nested) {
171 read_lock(&eb->lock);
172 if (eb->lock_nested && current->pid == eb->lock_owner) {
173 eb->lock_nested = 0;
174 read_unlock(&eb->lock);
175 return;
177 read_unlock(&eb->lock);
179 btrfs_assert_tree_read_locked(eb);
180 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
181 atomic_dec(&eb->spinning_readers);
182 atomic_dec(&eb->read_locks);
183 read_unlock(&eb->lock);
187 * drop a blocking read lock
189 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
191 if (eb->lock_nested) {
192 read_lock(&eb->lock);
193 if (eb->lock_nested && current->pid == eb->lock_owner) {
194 eb->lock_nested = 0;
195 read_unlock(&eb->lock);
196 return;
198 read_unlock(&eb->lock);
200 btrfs_assert_tree_read_locked(eb);
201 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
202 if (atomic_dec_and_test(&eb->blocking_readers))
203 wake_up(&eb->read_lock_wq);
204 atomic_dec(&eb->read_locks);
208 * take a spinning write lock. This will wait for both
209 * blocking readers or writers
211 int btrfs_tree_lock(struct extent_buffer *eb)
213 again:
214 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
215 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
216 write_lock(&eb->lock);
217 if (atomic_read(&eb->blocking_readers)) {
218 write_unlock(&eb->lock);
219 wait_event(eb->read_lock_wq,
220 atomic_read(&eb->blocking_readers) == 0);
221 goto again;
223 if (atomic_read(&eb->blocking_writers)) {
224 write_unlock(&eb->lock);
225 wait_event(eb->write_lock_wq,
226 atomic_read(&eb->blocking_writers) == 0);
227 goto again;
229 WARN_ON(atomic_read(&eb->spinning_writers));
230 atomic_inc(&eb->spinning_writers);
231 atomic_inc(&eb->write_locks);
232 eb->lock_owner = current->pid;
233 return 0;
237 * drop a spinning or a blocking write lock.
239 int btrfs_tree_unlock(struct extent_buffer *eb)
241 int blockers = atomic_read(&eb->blocking_writers);
243 BUG_ON(blockers > 1);
245 btrfs_assert_tree_locked(eb);
246 atomic_dec(&eb->write_locks);
248 if (blockers) {
249 WARN_ON(atomic_read(&eb->spinning_writers));
250 atomic_dec(&eb->blocking_writers);
251 smp_wmb();
252 wake_up(&eb->write_lock_wq);
253 } else {
254 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
255 atomic_dec(&eb->spinning_writers);
256 write_unlock(&eb->lock);
258 return 0;
261 void btrfs_assert_tree_locked(struct extent_buffer *eb)
263 BUG_ON(!atomic_read(&eb->write_locks));
266 void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
268 BUG_ON(!atomic_read(&eb->read_locks));