Cleanup handling of NULL value passed for a mount option
[linux/fpc-iii.git] / kernel / srcu.c
blobba35f3a4a1f486d3495cba7180f1f645e593c5a3
1 /*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2006
20 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU/ *.txt
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/percpu.h>
30 #include <linux/preempt.h>
31 #include <linux/rcupdate.h>
32 #include <linux/sched.h>
33 #include <linux/smp.h>
34 #include <linux/delay.h>
35 #include <linux/srcu.h>
37 static int init_srcu_struct_fields(struct srcu_struct *sp)
39 sp->completed = 0;
40 mutex_init(&sp->mutex);
41 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
42 return sp->per_cpu_ref ? 0 : -ENOMEM;
45 #ifdef CONFIG_DEBUG_LOCK_ALLOC
47 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
48 struct lock_class_key *key)
50 /* Don't re-initialize a lock while it is held. */
51 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
52 lockdep_init_map(&sp->dep_map, name, key, 0);
53 return init_srcu_struct_fields(sp);
55 EXPORT_SYMBOL_GPL(__init_srcu_struct);
57 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
59 /**
60 * init_srcu_struct - initialize a sleep-RCU structure
61 * @sp: structure to initialize.
63 * Must invoke this on a given srcu_struct before passing that srcu_struct
64 * to any other function. Each srcu_struct represents a separate domain
65 * of SRCU protection.
67 int init_srcu_struct(struct srcu_struct *sp)
69 return init_srcu_struct_fields(sp);
71 EXPORT_SYMBOL_GPL(init_srcu_struct);
73 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
76 * srcu_readers_active_idx -- returns approximate number of readers
77 * active on the specified rank of per-CPU counters.
80 static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
82 int cpu;
83 int sum;
85 sum = 0;
86 for_each_possible_cpu(cpu)
87 sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx];
88 return sum;
91 /**
92 * srcu_readers_active - returns approximate number of readers.
93 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
95 * Note that this is not an atomic primitive, and can therefore suffer
96 * severe errors when invoked on an active srcu_struct. That said, it
97 * can be useful as an error check at cleanup time.
99 static int srcu_readers_active(struct srcu_struct *sp)
101 return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
105 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
106 * @sp: structure to clean up.
108 * Must invoke this after you are finished using a given srcu_struct that
109 * was initialized via init_srcu_struct(), else you leak memory.
111 void cleanup_srcu_struct(struct srcu_struct *sp)
113 int sum;
115 sum = srcu_readers_active(sp);
116 WARN_ON(sum); /* Leakage unless caller handles error. */
117 if (sum != 0)
118 return;
119 free_percpu(sp->per_cpu_ref);
120 sp->per_cpu_ref = NULL;
122 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
125 * Counts the new reader in the appropriate per-CPU element of the
126 * srcu_struct. Must be called from process context.
127 * Returns an index that must be passed to the matching srcu_read_unlock().
129 int __srcu_read_lock(struct srcu_struct *sp)
131 int idx;
133 preempt_disable();
134 idx = sp->completed & 0x1;
135 barrier(); /* ensure compiler looks -once- at sp->completed. */
136 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]++;
137 srcu_barrier(); /* ensure compiler won't misorder critical section. */
138 preempt_enable();
139 return idx;
141 EXPORT_SYMBOL_GPL(__srcu_read_lock);
144 * Removes the count for the old reader from the appropriate per-CPU
145 * element of the srcu_struct. Note that this may well be a different
146 * CPU than that which was incremented by the corresponding srcu_read_lock().
147 * Must be called from process context.
149 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
151 preempt_disable();
152 srcu_barrier(); /* ensure compiler won't misorder critical section. */
153 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
154 preempt_enable();
156 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
159 * We use an adaptive strategy for synchronize_srcu() and especially for
160 * synchronize_srcu_expedited(). We spin for a fixed time period
161 * (defined below) to allow SRCU readers to exit their read-side critical
162 * sections. If there are still some readers after 10 microseconds,
163 * we repeatedly block for 1-millisecond time periods. This approach
164 * has done well in testing, so there is no need for a config parameter.
166 #define SYNCHRONIZE_SRCU_READER_DELAY 10
169 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
171 static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
173 int idx;
175 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
176 !lock_is_held(&rcu_bh_lock_map) &&
177 !lock_is_held(&rcu_lock_map) &&
178 !lock_is_held(&rcu_sched_lock_map),
179 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
181 idx = sp->completed;
182 mutex_lock(&sp->mutex);
185 * Check to see if someone else did the work for us while we were
186 * waiting to acquire the lock. We need -two- advances of
187 * the counter, not just one. If there was but one, we might have
188 * shown up -after- our helper's first synchronize_sched(), thus
189 * having failed to prevent CPU-reordering races with concurrent
190 * srcu_read_unlock()s on other CPUs (see comment below). So we
191 * either (1) wait for two or (2) supply the second ourselves.
194 if ((sp->completed - idx) >= 2) {
195 mutex_unlock(&sp->mutex);
196 return;
199 sync_func(); /* Force memory barrier on all CPUs. */
202 * The preceding synchronize_sched() ensures that any CPU that
203 * sees the new value of sp->completed will also see any preceding
204 * changes to data structures made by this CPU. This prevents
205 * some other CPU from reordering the accesses in its SRCU
206 * read-side critical section to precede the corresponding
207 * srcu_read_lock() -- ensuring that such references will in
208 * fact be protected.
210 * So it is now safe to do the flip.
213 idx = sp->completed & 0x1;
214 sp->completed++;
216 sync_func(); /* Force memory barrier on all CPUs. */
219 * At this point, because of the preceding synchronize_sched(),
220 * all srcu_read_lock() calls using the old counters have completed.
221 * Their corresponding critical sections might well be still
222 * executing, but the srcu_read_lock() primitives themselves
223 * will have finished executing. We initially give readers
224 * an arbitrarily chosen 10 microseconds to get out of their
225 * SRCU read-side critical sections, then loop waiting 1/HZ
226 * seconds per iteration. The 10-microsecond value has done
227 * very well in testing.
230 if (srcu_readers_active_idx(sp, idx))
231 udelay(SYNCHRONIZE_SRCU_READER_DELAY);
232 while (srcu_readers_active_idx(sp, idx))
233 schedule_timeout_interruptible(1);
235 sync_func(); /* Force memory barrier on all CPUs. */
238 * The preceding synchronize_sched() forces all srcu_read_unlock()
239 * primitives that were executing concurrently with the preceding
240 * for_each_possible_cpu() loop to have completed by this point.
241 * More importantly, it also forces the corresponding SRCU read-side
242 * critical sections to have also completed, and the corresponding
243 * references to SRCU-protected data items to be dropped.
245 * Note:
247 * Despite what you might think at first glance, the
248 * preceding synchronize_sched() -must- be within the
249 * critical section ended by the following mutex_unlock().
250 * Otherwise, a task taking the early exit can race
251 * with a srcu_read_unlock(), which might have executed
252 * just before the preceding srcu_readers_active() check,
253 * and whose CPU might have reordered the srcu_read_unlock()
254 * with the preceding critical section. In this case, there
255 * is nothing preventing the synchronize_sched() task that is
256 * taking the early exit from freeing a data structure that
257 * is still being referenced (out of order) by the task
258 * doing the srcu_read_unlock().
260 * Alternatively, the comparison with "2" on the early exit
261 * could be changed to "3", but this increases synchronize_srcu()
262 * latency for bulk loads. So the current code is preferred.
265 mutex_unlock(&sp->mutex);
269 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
270 * @sp: srcu_struct with which to synchronize.
272 * Flip the completed counter, and wait for the old count to drain to zero.
273 * As with classic RCU, the updater must use some separate means of
274 * synchronizing concurrent updates. Can block; must be called from
275 * process context.
277 * Note that it is illegal to call synchronize_srcu() from the corresponding
278 * SRCU read-side critical section; doing so will result in deadlock.
279 * However, it is perfectly legal to call synchronize_srcu() on one
280 * srcu_struct from some other srcu_struct's read-side critical section.
282 void synchronize_srcu(struct srcu_struct *sp)
284 __synchronize_srcu(sp, synchronize_sched);
286 EXPORT_SYMBOL_GPL(synchronize_srcu);
289 * synchronize_srcu_expedited - Brute-force SRCU grace period
290 * @sp: srcu_struct with which to synchronize.
292 * Wait for an SRCU grace period to elapse, but use a "big hammer"
293 * approach to force the grace period to end quickly. This consumes
294 * significant time on all CPUs and is unfriendly to real-time workloads,
295 * so is thus not recommended for any sort of common-case code. In fact,
296 * if you are using synchronize_srcu_expedited() in a loop, please
297 * restructure your code to batch your updates, and then use a single
298 * synchronize_srcu() instead.
300 * Note that it is illegal to call this function while holding any lock
301 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
302 * to call this function from a CPU-hotplug notifier. Failing to observe
303 * these restriction will result in deadlock. It is also illegal to call
304 * synchronize_srcu_expedited() from the corresponding SRCU read-side
305 * critical section; doing so will result in deadlock. However, it is
306 * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct
307 * from some other srcu_struct's read-side critical section, as long as
308 * the resulting graph of srcu_structs is acyclic.
310 void synchronize_srcu_expedited(struct srcu_struct *sp)
312 __synchronize_srcu(sp, synchronize_sched_expedited);
314 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
317 * srcu_batches_completed - return batches completed.
318 * @sp: srcu_struct on which to report batch completion.
320 * Report the number of batches, correlated with, but not necessarily
321 * precisely the same as, the number of grace periods that have elapsed.
324 long srcu_batches_completed(struct srcu_struct *sp)
326 return sp->completed;
328 EXPORT_SYMBOL_GPL(srcu_batches_completed);