mm-only debug patch...
[mmotm.git] / fs / btrfs / locking.c
blob1c36e5cd8f55495843631f7e5d3e6245d47684dd
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/gfp.h>
20 #include <linux/pagemap.h>
21 #include <linux/spinlock.h>
22 #include <linux/page-flags.h>
23 #include <asm/bug.h>
24 #include "ctree.h"
25 #include "extent_io.h"
26 #include "locking.h"
28 static inline void spin_nested(struct extent_buffer *eb)
30 spin_lock(&eb->lock);
34 * Setting a lock to blocking will drop the spinlock and set the
35 * flag that forces other procs who want the lock to wait. After
36 * this you can safely schedule with the lock held.
38 void btrfs_set_lock_blocking(struct extent_buffer *eb)
40 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
41 set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
42 spin_unlock(&eb->lock);
44 /* exit with the spin lock released and the bit set */
48 * clearing the blocking flag will take the spinlock again.
49 * After this you can't safely schedule
51 void btrfs_clear_lock_blocking(struct extent_buffer *eb)
53 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
54 spin_nested(eb);
55 clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
56 smp_mb__after_clear_bit();
58 /* exit with the spin lock held */
62 * unfortunately, many of the places that currently set a lock to blocking
63 * don't end up blocking for very long, and often they don't block
64 * at all. For a dbench 50 run, if we don't spin on the blocking bit
65 * at all, the context switch rate can jump up to 400,000/sec or more.
67 * So, we're still stuck with this crummy spin on the blocking bit,
68 * at least until the most common causes of the short blocks
69 * can be dealt with.
71 static int btrfs_spin_on_block(struct extent_buffer *eb)
73 int i;
75 for (i = 0; i < 512; i++) {
76 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
77 return 1;
78 if (need_resched())
79 break;
80 cpu_relax();
82 return 0;
86 * This is somewhat different from trylock. It will take the
87 * spinlock but if it finds the lock is set to blocking, it will
88 * return without the lock held.
90 * returns 1 if it was able to take the lock and zero otherwise
92 * After this call, scheduling is not safe without first calling
93 * btrfs_set_lock_blocking()
95 int btrfs_try_spin_lock(struct extent_buffer *eb)
97 int i;
99 if (btrfs_spin_on_block(eb)) {
100 spin_nested(eb);
101 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
102 return 1;
103 spin_unlock(&eb->lock);
105 /* spin for a bit on the BLOCKING flag */
106 for (i = 0; i < 2; i++) {
107 cpu_relax();
108 if (!btrfs_spin_on_block(eb))
109 break;
111 spin_nested(eb);
112 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
113 return 1;
114 spin_unlock(&eb->lock);
116 return 0;
120 * the autoremove wake function will return 0 if it tried to wake up
121 * a process that was already awake, which means that process won't
122 * count as an exclusive wakeup. The waitq code will continue waking
123 * procs until it finds one that was actually sleeping.
125 * For btrfs, this isn't quite what we want. We want a single proc
126 * to be notified that the lock is ready for taking. If that proc
127 * already happen to be awake, great, it will loop around and try for
128 * the lock.
130 * So, btrfs_wake_function always returns 1, even when the proc that we
131 * tried to wake up was already awake.
133 static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
134 int sync, void *key)
136 autoremove_wake_function(wait, mode, sync, key);
137 return 1;
141 * returns with the extent buffer spinlocked.
143 * This will spin and/or wait as required to take the lock, and then
144 * return with the spinlock held.
146 * After this call, scheduling is not safe without first calling
147 * btrfs_set_lock_blocking()
149 int btrfs_tree_lock(struct extent_buffer *eb)
151 DEFINE_WAIT(wait);
152 wait.func = btrfs_wake_function;
154 if (!btrfs_spin_on_block(eb))
155 goto sleep;
157 while(1) {
158 spin_nested(eb);
160 /* nobody is blocking, exit with the spinlock held */
161 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
162 return 0;
165 * we have the spinlock, but the real owner is blocking.
166 * wait for them
168 spin_unlock(&eb->lock);
171 * spin for a bit, and if the blocking flag goes away,
172 * loop around
174 cpu_relax();
175 if (btrfs_spin_on_block(eb))
176 continue;
177 sleep:
178 prepare_to_wait_exclusive(&eb->lock_wq, &wait,
179 TASK_UNINTERRUPTIBLE);
181 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
182 schedule();
184 finish_wait(&eb->lock_wq, &wait);
186 return 0;
190 * Very quick trylock, this does not spin or schedule. It returns
191 * 1 with the spinlock held if it was able to take the lock, or it
192 * returns zero if it was unable to take the lock.
194 * After this call, scheduling is not safe without first calling
195 * btrfs_set_lock_blocking()
197 int btrfs_try_tree_lock(struct extent_buffer *eb)
199 if (spin_trylock(&eb->lock)) {
200 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
202 * we've got the spinlock, but the real owner is
203 * blocking. Drop the spinlock and return failure
205 spin_unlock(&eb->lock);
206 return 0;
208 return 1;
210 /* someone else has the spinlock giveup */
211 return 0;
214 int btrfs_tree_unlock(struct extent_buffer *eb)
217 * if we were a blocking owner, we don't have the spinlock held
218 * just clear the bit and look for waiters
220 if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
221 smp_mb__after_clear_bit();
222 else
223 spin_unlock(&eb->lock);
225 if (waitqueue_active(&eb->lock_wq))
226 wake_up(&eb->lock_wq);
227 return 0;
230 void btrfs_assert_tree_locked(struct extent_buffer *eb)
232 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
233 assert_spin_locked(&eb->lock);