ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / fs / btrfs / locking.c
blob66fa43dc3f0f9ff8b5c67e5330120fd663bf4054
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
27 static inline void spin_nested(struct extent_buffer *eb)
29 spin_lock(&eb->lock);
33 * Setting a lock to blocking will drop the spinlock and set the
34 * flag that forces other procs who want the lock to wait. After
35 * this you can safely schedule with the lock held.
37 void btrfs_set_lock_blocking(struct extent_buffer *eb)
39 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
40 set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
41 spin_unlock(&eb->lock);
43 /* exit with the spin lock released and the bit set */
47 * clearing the blocking flag will take the spinlock again.
48 * After this you can't safely schedule
50 void btrfs_clear_lock_blocking(struct extent_buffer *eb)
52 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
53 spin_nested(eb);
54 clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
55 smp_mb__after_clear_bit();
57 /* exit with the spin lock held */
61 * unfortunately, many of the places that currently set a lock to blocking
62 * don't end up blocking for very long, and often they don't block
63 * at all. For a dbench 50 run, if we don't spin on the blocking bit
64 * at all, the context switch rate can jump up to 400,000/sec or more.
66 * So, we're still stuck with this crummy spin on the blocking bit,
67 * at least until the most common causes of the short blocks
68 * can be dealt with.
70 static int btrfs_spin_on_block(struct extent_buffer *eb)
72 int i;
74 for (i = 0; i < 512; i++) {
75 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
76 return 1;
77 if (need_resched())
78 break;
79 cpu_relax();
81 return 0;
85 * This is somewhat different from trylock. It will take the
86 * spinlock but if it finds the lock is set to blocking, it will
87 * return without the lock held.
89 * returns 1 if it was able to take the lock and zero otherwise
91 * After this call, scheduling is not safe without first calling
92 * btrfs_set_lock_blocking()
94 int btrfs_try_spin_lock(struct extent_buffer *eb)
96 int i;
98 if (btrfs_spin_on_block(eb)) {
99 spin_nested(eb);
100 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
101 return 1;
102 spin_unlock(&eb->lock);
104 /* spin for a bit on the BLOCKING flag */
105 for (i = 0; i < 2; i++) {
106 cpu_relax();
107 if (!btrfs_spin_on_block(eb))
108 break;
110 spin_nested(eb);
111 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
112 return 1;
113 spin_unlock(&eb->lock);
115 return 0;
119 * the autoremove wake function will return 0 if it tried to wake up
120 * a process that was already awake, which means that process won't
121 * count as an exclusive wakeup. The waitq code will continue waking
122 * procs until it finds one that was actually sleeping.
124 * For btrfs, this isn't quite what we want. We want a single proc
125 * to be notified that the lock is ready for taking. If that proc
126 * already happen to be awake, great, it will loop around and try for
127 * the lock.
129 * So, btrfs_wake_function always returns 1, even when the proc that we
130 * tried to wake up was already awake.
132 static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
133 int sync, void *key)
135 autoremove_wake_function(wait, mode, sync, key);
136 return 1;
140 * returns with the extent buffer spinlocked.
142 * This will spin and/or wait as required to take the lock, and then
143 * return with the spinlock held.
145 * After this call, scheduling is not safe without first calling
146 * btrfs_set_lock_blocking()
148 int btrfs_tree_lock(struct extent_buffer *eb)
150 DEFINE_WAIT(wait);
151 wait.func = btrfs_wake_function;
153 if (!btrfs_spin_on_block(eb))
154 goto sleep;
156 while(1) {
157 spin_nested(eb);
159 /* nobody is blocking, exit with the spinlock held */
160 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
161 return 0;
164 * we have the spinlock, but the real owner is blocking.
165 * wait for them
167 spin_unlock(&eb->lock);
170 * spin for a bit, and if the blocking flag goes away,
171 * loop around
173 cpu_relax();
174 if (btrfs_spin_on_block(eb))
175 continue;
176 sleep:
177 prepare_to_wait_exclusive(&eb->lock_wq, &wait,
178 TASK_UNINTERRUPTIBLE);
180 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
181 schedule();
183 finish_wait(&eb->lock_wq, &wait);
185 return 0;
188 int btrfs_tree_unlock(struct extent_buffer *eb)
191 * if we were a blocking owner, we don't have the spinlock held
192 * just clear the bit and look for waiters
194 if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
195 smp_mb__after_clear_bit();
196 else
197 spin_unlock(&eb->lock);
199 if (waitqueue_active(&eb->lock_wq))
200 wake_up(&eb->lock_wq);
201 return 0;
204 void btrfs_assert_tree_locked(struct extent_buffer *eb)
206 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
207 assert_spin_locked(&eb->lock);