2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2008
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
25 #include <linux/completion.h>
26 #include <linux/interrupt.h>
27 #include <linux/notifier.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/kernel.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/sched.h>
33 #include <linux/types.h>
34 #include <linux/init.h>
35 #include <linux/time.h>
36 #include <linux/cpu.h>
37 #include <linux/prefetch.h>
41 /* Global control variables for rcupdate callback mechanism. */
43 struct rcu_head
*rcucblist
; /* List of pending callbacks (CBs). */
44 struct rcu_head
**donetail
; /* ->next pointer of last "done" CB. */
45 struct rcu_head
**curtail
; /* ->next pointer of last CB. */
48 /* Definition for rcupdate control block. */
49 static struct rcu_ctrlblk rcu_sched_ctrlblk
= {
50 .donetail
= &rcu_sched_ctrlblk
.rcucblist
,
51 .curtail
= &rcu_sched_ctrlblk
.rcucblist
,
54 static struct rcu_ctrlblk rcu_bh_ctrlblk
= {
55 .donetail
= &rcu_bh_ctrlblk
.rcucblist
,
56 .curtail
= &rcu_bh_ctrlblk
.rcucblist
,
59 void rcu_barrier_bh(void)
61 wait_rcu_gp(call_rcu_bh
);
63 EXPORT_SYMBOL(rcu_barrier_bh
);
65 void rcu_barrier_sched(void)
67 wait_rcu_gp(call_rcu_sched
);
69 EXPORT_SYMBOL(rcu_barrier_sched
);
72 * Helper function for rcu_sched_qs() and rcu_bh_qs().
73 * Also irqs are disabled to avoid confusion due to interrupt handlers
74 * invoking call_rcu().
76 static int rcu_qsctr_help(struct rcu_ctrlblk
*rcp
)
78 if (rcp
->donetail
!= rcp
->curtail
) {
79 rcp
->donetail
= rcp
->curtail
;
87 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
88 * are at it, given that any rcu quiescent state is also an rcu_bh
89 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
91 void rcu_sched_qs(void)
95 local_irq_save(flags
);
96 if (rcu_qsctr_help(&rcu_sched_ctrlblk
) +
97 rcu_qsctr_help(&rcu_bh_ctrlblk
))
98 raise_softirq(RCU_SOFTIRQ
);
99 local_irq_restore(flags
);
103 * Record an rcu_bh quiescent state.
109 local_irq_save(flags
);
110 if (rcu_qsctr_help(&rcu_bh_ctrlblk
))
111 raise_softirq(RCU_SOFTIRQ
);
112 local_irq_restore(flags
);
116 * Check to see if the scheduling-clock interrupt came from an extended
117 * quiescent state, and, if so, tell RCU about it. This function must
118 * be called from hardirq context. It is normally called from the
119 * scheduling-clock interrupt.
121 void rcu_check_callbacks(int user
)
125 if (user
|| !in_softirq())
130 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
131 * whose grace period has elapsed.
133 static void __rcu_process_callbacks(struct rcu_ctrlblk
*rcp
)
135 struct rcu_head
*next
, *list
;
138 /* Move the ready-to-invoke callbacks to a local list. */
139 local_irq_save(flags
);
140 if (rcp
->donetail
== &rcp
->rcucblist
) {
141 /* No callbacks ready, so just leave. */
142 local_irq_restore(flags
);
145 list
= rcp
->rcucblist
;
146 rcp
->rcucblist
= *rcp
->donetail
;
147 *rcp
->donetail
= NULL
;
148 if (rcp
->curtail
== rcp
->donetail
)
149 rcp
->curtail
= &rcp
->rcucblist
;
150 rcp
->donetail
= &rcp
->rcucblist
;
151 local_irq_restore(flags
);
153 /* Invoke the callbacks on the local list. */
157 debug_rcu_head_unqueue(list
);
159 __rcu_reclaim("", list
);
165 static __latent_entropy
void rcu_process_callbacks(struct softirq_action
*unused
)
167 __rcu_process_callbacks(&rcu_sched_ctrlblk
);
168 __rcu_process_callbacks(&rcu_bh_ctrlblk
);
172 * Wait for a grace period to elapse. But it is illegal to invoke
173 * synchronize_sched() from within an RCU read-side critical section.
174 * Therefore, any legal call to synchronize_sched() is a quiescent
175 * state, and so on a UP system, synchronize_sched() need do nothing.
176 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
177 * benefits of doing might_sleep() to reduce latency.)
179 * Cool, huh? (Due to Josh Triplett.)
181 void synchronize_sched(void)
183 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
184 lock_is_held(&rcu_lock_map
) ||
185 lock_is_held(&rcu_sched_lock_map
),
186 "Illegal synchronize_sched() in RCU read-side critical section");
188 EXPORT_SYMBOL_GPL(synchronize_sched
);
191 * Helper function for call_rcu() and call_rcu_bh().
193 static void __call_rcu(struct rcu_head
*head
,
195 struct rcu_ctrlblk
*rcp
)
199 debug_rcu_head_queue(head
);
203 local_irq_save(flags
);
204 *rcp
->curtail
= head
;
205 rcp
->curtail
= &head
->next
;
206 local_irq_restore(flags
);
208 if (unlikely(is_idle_task(current
))) {
209 /* force scheduling for rcu_sched_qs() */
215 * Post an RCU callback to be invoked after the end of an RCU-sched grace
216 * period. But since we have but one CPU, that would be after any
219 void call_rcu_sched(struct rcu_head
*head
, rcu_callback_t func
)
221 __call_rcu(head
, func
, &rcu_sched_ctrlblk
);
223 EXPORT_SYMBOL_GPL(call_rcu_sched
);
226 * Post an RCU bottom-half callback to be invoked after any subsequent
229 void call_rcu_bh(struct rcu_head
*head
, rcu_callback_t func
)
231 __call_rcu(head
, func
, &rcu_bh_ctrlblk
);
233 EXPORT_SYMBOL_GPL(call_rcu_bh
);
235 void __init
rcu_init(void)
237 open_softirq(RCU_SOFTIRQ
, rcu_process_callbacks
);
238 rcu_early_boot_tests();