2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/stop_machine.h>
31 * Check the RCU kernel configuration parameters and print informative
32 * messages about anything out of the ordinary. If you like #ifdef, you
33 * will love this function.
35 static void __init
rcu_bootup_announce_oddness(void)
37 #ifdef CONFIG_RCU_TRACE
38 printk(KERN_INFO
"\tRCU debugfs-based tracing is enabled.\n");
40 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
41 printk(KERN_INFO
"\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
44 #ifdef CONFIG_RCU_FANOUT_EXACT
45 printk(KERN_INFO
"\tHierarchical RCU autobalancing is disabled.\n");
47 #ifdef CONFIG_RCU_FAST_NO_HZ
49 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
51 #ifdef CONFIG_PROVE_RCU
52 printk(KERN_INFO
"\tRCU lockdep checking is enabled.\n");
54 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
55 printk(KERN_INFO
"\tRCU torture testing starts during boot.\n");
57 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
58 printk(KERN_INFO
"\tVerbose stalled-CPUs detection is disabled.\n");
60 #if NUM_RCU_LVL_4 != 0
61 printk(KERN_INFO
"\tExperimental four-level hierarchy is enabled.\n");
65 #ifdef CONFIG_TREE_PREEMPT_RCU
67 struct rcu_state rcu_preempt_state
= RCU_STATE_INITIALIZER(rcu_preempt
);
68 DEFINE_PER_CPU(struct rcu_data
, rcu_preempt_data
);
69 static struct rcu_state
*rcu_state
= &rcu_preempt_state
;
71 static void rcu_read_unlock_special(struct task_struct
*t
);
72 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
);
75 * Tell them what RCU they are running.
77 static void __init
rcu_bootup_announce(void)
79 printk(KERN_INFO
"Preemptible hierarchical RCU implementation.\n");
80 rcu_bootup_announce_oddness();
84 * Return the number of RCU-preempt batches processed thus far
85 * for debug and statistics.
87 long rcu_batches_completed_preempt(void)
89 return rcu_preempt_state
.completed
;
91 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt
);
94 * Return the number of RCU batches processed thus far for debug & stats.
96 long rcu_batches_completed(void)
98 return rcu_batches_completed_preempt();
100 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
103 * Force a quiescent state for preemptible RCU.
105 void rcu_force_quiescent_state(void)
107 force_quiescent_state(&rcu_preempt_state
, 0);
109 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
112 * Record a preemptible-RCU quiescent state for the specified CPU. Note
113 * that this just means that the task currently running on the CPU is
114 * not in a quiescent state. There might be any number of tasks blocked
115 * while in an RCU read-side critical section.
117 * Unlike the other rcu_*_qs() functions, callers to this function
118 * must disable irqs in order to protect the assignment to
119 * ->rcu_read_unlock_special.
121 static void rcu_preempt_qs(int cpu
)
123 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
125 rdp
->passed_quiesce_gpnum
= rdp
->gpnum
;
127 if (rdp
->passed_quiesce
== 0)
128 trace_rcu_grace_period("rcu_preempt", rdp
->gpnum
, "cpuqs");
129 rdp
->passed_quiesce
= 1;
130 current
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_NEED_QS
;
134 * We have entered the scheduler, and the current task might soon be
135 * context-switched away from. If this task is in an RCU read-side
136 * critical section, we will no longer be able to rely on the CPU to
137 * record that fact, so we enqueue the task on the blkd_tasks list.
138 * The task will dequeue itself when it exits the outermost enclosing
139 * RCU read-side critical section. Therefore, the current grace period
140 * cannot be permitted to complete until the blkd_tasks list entries
141 * predating the current grace period drain, in other words, until
142 * rnp->gp_tasks becomes NULL.
144 * Caller must disable preemption.
146 static void rcu_preempt_note_context_switch(int cpu
)
148 struct task_struct
*t
= current
;
150 struct rcu_data
*rdp
;
151 struct rcu_node
*rnp
;
153 if (t
->rcu_read_lock_nesting
> 0 &&
154 (t
->rcu_read_unlock_special
& RCU_READ_UNLOCK_BLOCKED
) == 0) {
156 /* Possibly blocking in an RCU read-side critical section. */
157 rdp
= per_cpu_ptr(rcu_preempt_state
.rda
, cpu
);
159 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
160 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_BLOCKED
;
161 t
->rcu_blocked_node
= rnp
;
164 * If this CPU has already checked in, then this task
165 * will hold up the next grace period rather than the
166 * current grace period. Queue the task accordingly.
167 * If the task is queued for the current grace period
168 * (i.e., this CPU has not yet passed through a quiescent
169 * state for the current grace period), then as long
170 * as that task remains queued, the current grace period
171 * cannot end. Note that there is some uncertainty as
172 * to exactly when the current grace period started.
173 * We take a conservative approach, which can result
174 * in unnecessarily waiting on tasks that started very
175 * slightly after the current grace period began. C'est
178 * But first, note that the current CPU must still be
181 WARN_ON_ONCE((rdp
->grpmask
& rnp
->qsmaskinit
) == 0);
182 WARN_ON_ONCE(!list_empty(&t
->rcu_node_entry
));
183 if ((rnp
->qsmask
& rdp
->grpmask
) && rnp
->gp_tasks
!= NULL
) {
184 list_add(&t
->rcu_node_entry
, rnp
->gp_tasks
->prev
);
185 rnp
->gp_tasks
= &t
->rcu_node_entry
;
186 #ifdef CONFIG_RCU_BOOST
187 if (rnp
->boost_tasks
!= NULL
)
188 rnp
->boost_tasks
= rnp
->gp_tasks
;
189 #endif /* #ifdef CONFIG_RCU_BOOST */
191 list_add(&t
->rcu_node_entry
, &rnp
->blkd_tasks
);
192 if (rnp
->qsmask
& rdp
->grpmask
)
193 rnp
->gp_tasks
= &t
->rcu_node_entry
;
195 trace_rcu_preempt_task(rdp
->rsp
->name
,
197 (rnp
->qsmask
& rdp
->grpmask
)
200 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
201 } else if (t
->rcu_read_lock_nesting
< 0 &&
202 t
->rcu_read_unlock_special
) {
205 * Complete exit from RCU read-side critical section on
206 * behalf of preempted instance of __rcu_read_unlock().
208 rcu_read_unlock_special(t
);
212 * Either we were not in an RCU read-side critical section to
213 * begin with, or we have now recorded that critical section
214 * globally. Either way, we can now note a quiescent state
215 * for this CPU. Again, if we were in an RCU read-side critical
216 * section, and if that critical section was blocking the current
217 * grace period, then the fact that the task has been enqueued
218 * means that we continue to block the current grace period.
220 local_irq_save(flags
);
222 local_irq_restore(flags
);
226 * Tree-preemptible RCU implementation for rcu_read_lock().
227 * Just increment ->rcu_read_lock_nesting, shared state will be updated
230 void __rcu_read_lock(void)
232 current
->rcu_read_lock_nesting
++;
233 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
235 EXPORT_SYMBOL_GPL(__rcu_read_lock
);
238 * Check for preempted RCU readers blocking the current grace period
239 * for the specified rcu_node structure. If the caller needs a reliable
240 * answer, it must hold the rcu_node's ->lock.
242 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
244 return rnp
->gp_tasks
!= NULL
;
248 * Record a quiescent state for all tasks that were previously queued
249 * on the specified rcu_node structure and that were blocking the current
250 * RCU grace period. The caller must hold the specified rnp->lock with
251 * irqs disabled, and this lock is released upon return, but irqs remain
254 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
255 __releases(rnp
->lock
)
258 struct rcu_node
*rnp_p
;
260 if (rnp
->qsmask
!= 0 || rcu_preempt_blocked_readers_cgp(rnp
)) {
261 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
262 return; /* Still need more quiescent states! */
268 * Either there is only one rcu_node in the tree,
269 * or tasks were kicked up to root rcu_node due to
270 * CPUs going offline.
272 rcu_report_qs_rsp(&rcu_preempt_state
, flags
);
276 /* Report up the rest of the hierarchy. */
278 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
279 raw_spin_lock(&rnp_p
->lock
); /* irqs already disabled. */
280 rcu_report_qs_rnp(mask
, &rcu_preempt_state
, rnp_p
, flags
);
284 * Advance a ->blkd_tasks-list pointer to the next entry, instead
285 * returning NULL if at the end of the list.
287 static struct list_head
*rcu_next_node_entry(struct task_struct
*t
,
288 struct rcu_node
*rnp
)
290 struct list_head
*np
;
292 np
= t
->rcu_node_entry
.next
;
293 if (np
== &rnp
->blkd_tasks
)
299 * Handle special cases during rcu_read_unlock(), such as needing to
300 * notify RCU core processing or task having blocked during the RCU
301 * read-side critical section.
303 static noinline
void rcu_read_unlock_special(struct task_struct
*t
)
308 struct list_head
*np
;
309 #ifdef CONFIG_RCU_BOOST
310 struct rt_mutex
*rbmp
= NULL
;
311 #endif /* #ifdef CONFIG_RCU_BOOST */
312 struct rcu_node
*rnp
;
315 /* NMI handlers cannot block and cannot safely manipulate state. */
319 local_irq_save(flags
);
322 * If RCU core is waiting for this CPU to exit critical section,
323 * let it know that we have done so.
325 special
= t
->rcu_read_unlock_special
;
326 if (special
& RCU_READ_UNLOCK_NEED_QS
) {
327 rcu_preempt_qs(smp_processor_id());
330 /* Hardware IRQ handlers cannot block. */
331 if (in_irq() || in_serving_softirq()) {
332 local_irq_restore(flags
);
336 /* Clean up if blocked during RCU read-side critical section. */
337 if (special
& RCU_READ_UNLOCK_BLOCKED
) {
338 t
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_BLOCKED
;
341 * Remove this task from the list it blocked on. The
342 * task can migrate while we acquire the lock, but at
343 * most one time. So at most two passes through loop.
346 rnp
= t
->rcu_blocked_node
;
347 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
348 if (rnp
== t
->rcu_blocked_node
)
350 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
352 empty
= !rcu_preempt_blocked_readers_cgp(rnp
);
353 empty_exp
= !rcu_preempted_readers_exp(rnp
);
354 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
355 np
= rcu_next_node_entry(t
, rnp
);
356 list_del_init(&t
->rcu_node_entry
);
357 t
->rcu_blocked_node
= NULL
;
358 trace_rcu_unlock_preempted_task("rcu_preempt",
360 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
362 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
364 #ifdef CONFIG_RCU_BOOST
365 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
366 rnp
->boost_tasks
= np
;
367 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
368 if (t
->rcu_boost_mutex
) {
369 rbmp
= t
->rcu_boost_mutex
;
370 t
->rcu_boost_mutex
= NULL
;
372 #endif /* #ifdef CONFIG_RCU_BOOST */
375 * If this was the last task on the current list, and if
376 * we aren't waiting on any CPUs, report the quiescent state.
377 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
379 if (!empty
&& !rcu_preempt_blocked_readers_cgp(rnp
)) {
380 trace_rcu_quiescent_state_report("preempt_rcu",
387 rcu_report_unblock_qs_rnp(rnp
, flags
);
389 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
391 #ifdef CONFIG_RCU_BOOST
392 /* Unboost if we were boosted. */
394 rt_mutex_unlock(rbmp
);
395 #endif /* #ifdef CONFIG_RCU_BOOST */
398 * If this was the last task on the expedited lists,
399 * then we need to report up the rcu_node hierarchy.
401 if (!empty_exp
&& !rcu_preempted_readers_exp(rnp
))
402 rcu_report_exp_rnp(&rcu_preempt_state
, rnp
);
404 local_irq_restore(flags
);
409 * Tree-preemptible RCU implementation for rcu_read_unlock().
410 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
411 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
412 * invoke rcu_read_unlock_special() to clean up after a context switch
413 * in an RCU read-side critical section and other special cases.
415 void __rcu_read_unlock(void)
417 struct task_struct
*t
= current
;
419 if (t
->rcu_read_lock_nesting
!= 1)
420 --t
->rcu_read_lock_nesting
;
422 barrier(); /* critical section before exit code. */
423 t
->rcu_read_lock_nesting
= INT_MIN
;
424 barrier(); /* assign before ->rcu_read_unlock_special load */
425 if (unlikely(ACCESS_ONCE(t
->rcu_read_unlock_special
)))
426 rcu_read_unlock_special(t
);
427 barrier(); /* ->rcu_read_unlock_special load before assign */
428 t
->rcu_read_lock_nesting
= 0;
430 #ifdef CONFIG_PROVE_LOCKING
432 int rrln
= ACCESS_ONCE(t
->rcu_read_lock_nesting
);
434 WARN_ON_ONCE(rrln
< 0 && rrln
> INT_MIN
/ 2);
436 #endif /* #ifdef CONFIG_PROVE_LOCKING */
438 EXPORT_SYMBOL_GPL(__rcu_read_unlock
);
440 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
443 * Dump detailed information for all tasks blocking the current RCU
444 * grace period on the specified rcu_node structure.
446 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
449 struct task_struct
*t
;
451 if (!rcu_preempt_blocked_readers_cgp(rnp
))
453 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
454 t
= list_entry(rnp
->gp_tasks
,
455 struct task_struct
, rcu_node_entry
);
456 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
)
458 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
462 * Dump detailed information for all tasks blocking the current RCU
465 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
467 struct rcu_node
*rnp
= rcu_get_root(rsp
);
469 rcu_print_detail_task_stall_rnp(rnp
);
470 rcu_for_each_leaf_node(rsp
, rnp
)
471 rcu_print_detail_task_stall_rnp(rnp
);
474 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
476 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
480 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
483 * Scan the current list of tasks blocked within RCU read-side critical
484 * sections, printing out the tid of each.
486 static int rcu_print_task_stall(struct rcu_node
*rnp
)
488 struct task_struct
*t
;
491 if (!rcu_preempt_blocked_readers_cgp(rnp
))
493 t
= list_entry(rnp
->gp_tasks
,
494 struct task_struct
, rcu_node_entry
);
495 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
496 printk(" P%d", t
->pid
);
503 * Suppress preemptible RCU's CPU stall warnings by pushing the
504 * time of the next stall-warning message comfortably far into the
507 static void rcu_preempt_stall_reset(void)
509 rcu_preempt_state
.jiffies_stall
= jiffies
+ ULONG_MAX
/ 2;
513 * Check that the list of blocked tasks for the newly completed grace
514 * period is in fact empty. It is a serious bug to complete a grace
515 * period that still has RCU readers blocked! This function must be
516 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
517 * must be held by the caller.
519 * Also, if there are blocked tasks on the list, they automatically
520 * block the newly created grace period, so set up ->gp_tasks accordingly.
522 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
524 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
));
525 if (!list_empty(&rnp
->blkd_tasks
))
526 rnp
->gp_tasks
= rnp
->blkd_tasks
.next
;
527 WARN_ON_ONCE(rnp
->qsmask
);
530 #ifdef CONFIG_HOTPLUG_CPU
533 * Handle tasklist migration for case in which all CPUs covered by the
534 * specified rcu_node have gone offline. Move them up to the root
535 * rcu_node. The reason for not just moving them to the immediate
536 * parent is to remove the need for rcu_read_unlock_special() to
537 * make more than two attempts to acquire the target rcu_node's lock.
538 * Returns true if there were tasks blocking the current RCU grace
541 * Returns 1 if there was previously a task blocking the current grace
542 * period on the specified rcu_node structure.
544 * The caller must hold rnp->lock with irqs disabled.
546 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
547 struct rcu_node
*rnp
,
548 struct rcu_data
*rdp
)
550 struct list_head
*lp
;
551 struct list_head
*lp_root
;
553 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
554 struct task_struct
*t
;
556 if (rnp
== rnp_root
) {
557 WARN_ONCE(1, "Last CPU thought to be offlined?");
558 return 0; /* Shouldn't happen: at least one CPU online. */
561 /* If we are on an internal node, complain bitterly. */
562 WARN_ON_ONCE(rnp
!= rdp
->mynode
);
565 * Move tasks up to root rcu_node. Don't try to get fancy for
566 * this corner-case operation -- just put this node's tasks
567 * at the head of the root node's list, and update the root node's
568 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
569 * if non-NULL. This might result in waiting for more tasks than
570 * absolutely necessary, but this is a good performance/complexity
573 if (rcu_preempt_blocked_readers_cgp(rnp
))
574 retval
|= RCU_OFL_TASKS_NORM_GP
;
575 if (rcu_preempted_readers_exp(rnp
))
576 retval
|= RCU_OFL_TASKS_EXP_GP
;
577 lp
= &rnp
->blkd_tasks
;
578 lp_root
= &rnp_root
->blkd_tasks
;
579 while (!list_empty(lp
)) {
580 t
= list_entry(lp
->next
, typeof(*t
), rcu_node_entry
);
581 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
582 list_del(&t
->rcu_node_entry
);
583 t
->rcu_blocked_node
= rnp_root
;
584 list_add(&t
->rcu_node_entry
, lp_root
);
585 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
586 rnp_root
->gp_tasks
= rnp
->gp_tasks
;
587 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
588 rnp_root
->exp_tasks
= rnp
->exp_tasks
;
589 #ifdef CONFIG_RCU_BOOST
590 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
591 rnp_root
->boost_tasks
= rnp
->boost_tasks
;
592 #endif /* #ifdef CONFIG_RCU_BOOST */
593 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
596 #ifdef CONFIG_RCU_BOOST
597 /* In case root is being boosted and leaf is not. */
598 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
599 if (rnp_root
->boost_tasks
!= NULL
&&
600 rnp_root
->boost_tasks
!= rnp_root
->gp_tasks
)
601 rnp_root
->boost_tasks
= rnp_root
->gp_tasks
;
602 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
603 #endif /* #ifdef CONFIG_RCU_BOOST */
605 rnp
->gp_tasks
= NULL
;
606 rnp
->exp_tasks
= NULL
;
611 * Do CPU-offline processing for preemptible RCU.
613 static void rcu_preempt_offline_cpu(int cpu
)
615 __rcu_offline_cpu(cpu
, &rcu_preempt_state
);
618 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
621 * Check for a quiescent state from the current CPU. When a task blocks,
622 * the task is recorded in the corresponding CPU's rcu_node structure,
623 * which is checked elsewhere.
625 * Caller must disable hard irqs.
627 static void rcu_preempt_check_callbacks(int cpu
)
629 struct task_struct
*t
= current
;
631 if (t
->rcu_read_lock_nesting
== 0) {
635 if (t
->rcu_read_lock_nesting
> 0 &&
636 per_cpu(rcu_preempt_data
, cpu
).qs_pending
)
637 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_NEED_QS
;
641 * Process callbacks for preemptible RCU.
643 static void rcu_preempt_process_callbacks(void)
645 __rcu_process_callbacks(&rcu_preempt_state
,
646 &__get_cpu_var(rcu_preempt_data
));
649 #ifdef CONFIG_RCU_BOOST
651 static void rcu_preempt_do_callbacks(void)
653 rcu_do_batch(&rcu_preempt_state
, &__get_cpu_var(rcu_preempt_data
));
656 #endif /* #ifdef CONFIG_RCU_BOOST */
659 * Queue a preemptible-RCU callback for invocation after a grace period.
661 void call_rcu(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
663 __call_rcu(head
, func
, &rcu_preempt_state
);
665 EXPORT_SYMBOL_GPL(call_rcu
);
668 * synchronize_rcu - wait until a grace period has elapsed.
670 * Control will return to the caller some time after a full grace
671 * period has elapsed, in other words after all currently executing RCU
672 * read-side critical sections have completed. Note, however, that
673 * upon return from synchronize_rcu(), the caller might well be executing
674 * concurrently with new RCU read-side critical sections that began while
675 * synchronize_rcu() was waiting. RCU read-side critical sections are
676 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
678 void synchronize_rcu(void)
680 if (!rcu_scheduler_active
)
682 wait_rcu_gp(call_rcu
);
684 EXPORT_SYMBOL_GPL(synchronize_rcu
);
686 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq
);
687 static long sync_rcu_preempt_exp_count
;
688 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex
);
691 * Return non-zero if there are any tasks in RCU read-side critical
692 * sections blocking the current preemptible-RCU expedited grace period.
693 * If there is no preemptible-RCU expedited grace period currently in
694 * progress, returns zero unconditionally.
696 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
)
698 return rnp
->exp_tasks
!= NULL
;
702 * return non-zero if there is no RCU expedited grace period in progress
703 * for the specified rcu_node structure, in other words, if all CPUs and
704 * tasks covered by the specified rcu_node structure have done their bit
705 * for the current expedited grace period. Works only for preemptible
706 * RCU -- other RCU implementation use other means.
708 * Caller must hold sync_rcu_preempt_exp_mutex.
710 static int sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
712 return !rcu_preempted_readers_exp(rnp
) &&
713 ACCESS_ONCE(rnp
->expmask
) == 0;
717 * Report the exit from RCU read-side critical section for the last task
718 * that queued itself during or before the current expedited preemptible-RCU
719 * grace period. This event is reported either to the rcu_node structure on
720 * which the task was queued or to one of that rcu_node structure's ancestors,
721 * recursively up the tree. (Calm down, calm down, we do the recursion
724 * Caller must hold sync_rcu_preempt_exp_mutex.
726 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
731 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
733 if (!sync_rcu_preempt_exp_done(rnp
)) {
734 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
737 if (rnp
->parent
== NULL
) {
738 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
739 wake_up(&sync_rcu_preempt_exp_wq
);
743 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled */
745 raw_spin_lock(&rnp
->lock
); /* irqs already disabled */
746 rnp
->expmask
&= ~mask
;
751 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
752 * grace period for the specified rcu_node structure. If there are no such
753 * tasks, report it up the rcu_node hierarchy.
755 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
758 sync_rcu_preempt_exp_init(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
763 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
764 if (list_empty(&rnp
->blkd_tasks
))
765 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
767 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
768 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock */
772 rcu_report_exp_rnp(rsp
, rnp
);
776 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
777 * is to invoke synchronize_sched_expedited() to push all the tasks to
778 * the ->blkd_tasks lists and wait for this list to drain.
780 void synchronize_rcu_expedited(void)
783 struct rcu_node
*rnp
;
784 struct rcu_state
*rsp
= &rcu_preempt_state
;
788 smp_mb(); /* Caller's modifications seen first by other CPUs. */
789 snap
= ACCESS_ONCE(sync_rcu_preempt_exp_count
) + 1;
790 smp_mb(); /* Above access cannot bleed into critical section. */
793 * Acquire lock, falling back to synchronize_rcu() if too many
794 * lock-acquisition failures. Of course, if someone does the
795 * expedited grace period for us, just leave.
797 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex
)) {
799 udelay(trycount
* num_online_cpus());
804 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
805 goto mb_ret
; /* Others did our work for us. */
807 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
808 goto unlock_mb_ret
; /* Others did our work for us. */
810 /* force all RCU readers onto ->blkd_tasks lists. */
811 synchronize_sched_expedited();
813 raw_spin_lock_irqsave(&rsp
->onofflock
, flags
);
815 /* Initialize ->expmask for all non-leaf rcu_node structures. */
816 rcu_for_each_nonleaf_node_breadth_first(rsp
, rnp
) {
817 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
818 rnp
->expmask
= rnp
->qsmaskinit
;
819 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
822 /* Snapshot current state of ->blkd_tasks lists. */
823 rcu_for_each_leaf_node(rsp
, rnp
)
824 sync_rcu_preempt_exp_init(rsp
, rnp
);
825 if (NUM_RCU_NODES
> 1)
826 sync_rcu_preempt_exp_init(rsp
, rcu_get_root(rsp
));
828 raw_spin_unlock_irqrestore(&rsp
->onofflock
, flags
);
830 /* Wait for snapshotted ->blkd_tasks lists to drain. */
831 rnp
= rcu_get_root(rsp
);
832 wait_event(sync_rcu_preempt_exp_wq
,
833 sync_rcu_preempt_exp_done(rnp
));
835 /* Clean up and exit. */
836 smp_mb(); /* ensure expedited GP seen before counter increment. */
837 ACCESS_ONCE(sync_rcu_preempt_exp_count
)++;
839 mutex_unlock(&sync_rcu_preempt_exp_mutex
);
841 smp_mb(); /* ensure subsequent action seen after grace period. */
843 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
846 * Check to see if there is any immediate preemptible-RCU-related work
849 static int rcu_preempt_pending(int cpu
)
851 return __rcu_pending(&rcu_preempt_state
,
852 &per_cpu(rcu_preempt_data
, cpu
));
856 * Does preemptible RCU need the CPU to stay out of dynticks mode?
858 static int rcu_preempt_needs_cpu(int cpu
)
860 return !!per_cpu(rcu_preempt_data
, cpu
).nxtlist
;
864 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
866 void rcu_barrier(void)
868 _rcu_barrier(&rcu_preempt_state
, call_rcu
);
870 EXPORT_SYMBOL_GPL(rcu_barrier
);
873 * Initialize preemptible RCU's per-CPU data.
875 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
877 rcu_init_percpu_data(cpu
, &rcu_preempt_state
, 1);
881 * Move preemptible RCU's callbacks from dying CPU to other online CPU.
883 static void rcu_preempt_send_cbs_to_online(void)
885 rcu_send_cbs_to_online(&rcu_preempt_state
);
889 * Initialize preemptible RCU's state structures.
891 static void __init
__rcu_init_preempt(void)
893 rcu_init_one(&rcu_preempt_state
, &rcu_preempt_data
);
897 * Check for a task exiting while in a preemptible-RCU read-side
898 * critical section, clean up if so. No need to issue warnings,
899 * as debug_check_no_locks_held() already does this if lockdep
904 struct task_struct
*t
= current
;
906 if (t
->rcu_read_lock_nesting
== 0)
908 t
->rcu_read_lock_nesting
= 1;
912 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
914 static struct rcu_state
*rcu_state
= &rcu_sched_state
;
917 * Tell them what RCU they are running.
919 static void __init
rcu_bootup_announce(void)
921 printk(KERN_INFO
"Hierarchical RCU implementation.\n");
922 rcu_bootup_announce_oddness();
926 * Return the number of RCU batches processed thus far for debug & stats.
928 long rcu_batches_completed(void)
930 return rcu_batches_completed_sched();
932 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
935 * Force a quiescent state for RCU, which, because there is no preemptible
936 * RCU, becomes the same as rcu-sched.
938 void rcu_force_quiescent_state(void)
940 rcu_sched_force_quiescent_state();
942 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
945 * Because preemptible RCU does not exist, we never have to check for
946 * CPUs being in quiescent states.
948 static void rcu_preempt_note_context_switch(int cpu
)
953 * Because preemptible RCU does not exist, there are never any preempted
956 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
961 #ifdef CONFIG_HOTPLUG_CPU
963 /* Because preemptible RCU does not exist, no quieting of tasks. */
964 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
966 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
969 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
972 * Because preemptible RCU does not exist, we never have to check for
973 * tasks blocked within RCU read-side critical sections.
975 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
980 * Because preemptible RCU does not exist, we never have to check for
981 * tasks blocked within RCU read-side critical sections.
983 static int rcu_print_task_stall(struct rcu_node
*rnp
)
989 * Because preemptible RCU does not exist, there is no need to suppress
990 * its CPU stall warnings.
992 static void rcu_preempt_stall_reset(void)
997 * Because there is no preemptible RCU, there can be no readers blocked,
998 * so there is no need to check for blocked tasks. So check only for
999 * bogus qsmask values.
1001 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
1003 WARN_ON_ONCE(rnp
->qsmask
);
1006 #ifdef CONFIG_HOTPLUG_CPU
1009 * Because preemptible RCU does not exist, it never needs to migrate
1010 * tasks that were blocked within RCU read-side critical sections, and
1011 * such non-existent tasks cannot possibly have been blocking the current
1014 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
1015 struct rcu_node
*rnp
,
1016 struct rcu_data
*rdp
)
1022 * Because preemptible RCU does not exist, it never needs CPU-offline
1025 static void rcu_preempt_offline_cpu(int cpu
)
1029 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1032 * Because preemptible RCU does not exist, it never has any callbacks
1035 static void rcu_preempt_check_callbacks(int cpu
)
1040 * Because preemptible RCU does not exist, it never has any callbacks
1043 static void rcu_preempt_process_callbacks(void)
1048 * Wait for an rcu-preempt grace period, but make it happen quickly.
1049 * But because preemptible RCU does not exist, map to rcu-sched.
1051 void synchronize_rcu_expedited(void)
1053 synchronize_sched_expedited();
1055 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
1057 #ifdef CONFIG_HOTPLUG_CPU
1060 * Because preemptible RCU does not exist, there is never any need to
1061 * report on tasks preempted in RCU read-side critical sections during
1062 * expedited RCU grace periods.
1064 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
1069 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1072 * Because preemptible RCU does not exist, it never has any work to do.
1074 static int rcu_preempt_pending(int cpu
)
1080 * Because preemptible RCU does not exist, it never needs any CPU.
1082 static int rcu_preempt_needs_cpu(int cpu
)
1088 * Because preemptible RCU does not exist, rcu_barrier() is just
1089 * another name for rcu_barrier_sched().
1091 void rcu_barrier(void)
1093 rcu_barrier_sched();
1095 EXPORT_SYMBOL_GPL(rcu_barrier
);
1098 * Because preemptible RCU does not exist, there is no per-CPU
1099 * data to initialize.
1101 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
1106 * Because there is no preemptible RCU, there are no callbacks to move.
1108 static void rcu_preempt_send_cbs_to_online(void)
1113 * Because preemptible RCU does not exist, it need not be initialized.
1115 static void __init
__rcu_init_preempt(void)
1119 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1121 #ifdef CONFIG_RCU_BOOST
1123 #include "rtmutex_common.h"
1125 #ifdef CONFIG_RCU_TRACE
1127 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1129 if (list_empty(&rnp
->blkd_tasks
))
1130 rnp
->n_balk_blkd_tasks
++;
1131 else if (rnp
->exp_tasks
== NULL
&& rnp
->gp_tasks
== NULL
)
1132 rnp
->n_balk_exp_gp_tasks
++;
1133 else if (rnp
->gp_tasks
!= NULL
&& rnp
->boost_tasks
!= NULL
)
1134 rnp
->n_balk_boost_tasks
++;
1135 else if (rnp
->gp_tasks
!= NULL
&& rnp
->qsmask
!= 0)
1136 rnp
->n_balk_notblocked
++;
1137 else if (rnp
->gp_tasks
!= NULL
&&
1138 ULONG_CMP_LT(jiffies
, rnp
->boost_time
))
1139 rnp
->n_balk_notyet
++;
1144 #else /* #ifdef CONFIG_RCU_TRACE */
1146 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1150 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1152 static struct lock_class_key rcu_boost_class
;
1155 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1156 * or ->boost_tasks, advancing the pointer to the next task in the
1157 * ->blkd_tasks list.
1159 * Note that irqs must be enabled: boosting the task can block.
1160 * Returns 1 if there are more tasks needing to be boosted.
1162 static int rcu_boost(struct rcu_node
*rnp
)
1164 unsigned long flags
;
1165 struct rt_mutex mtx
;
1166 struct task_struct
*t
;
1167 struct list_head
*tb
;
1169 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
)
1170 return 0; /* Nothing left to boost. */
1172 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1175 * Recheck under the lock: all tasks in need of boosting
1176 * might exit their RCU read-side critical sections on their own.
1178 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
) {
1179 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1184 * Preferentially boost tasks blocking expedited grace periods.
1185 * This cannot starve the normal grace periods because a second
1186 * expedited grace period must boost all blocked tasks, including
1187 * those blocking the pre-existing normal grace period.
1189 if (rnp
->exp_tasks
!= NULL
) {
1190 tb
= rnp
->exp_tasks
;
1191 rnp
->n_exp_boosts
++;
1193 tb
= rnp
->boost_tasks
;
1194 rnp
->n_normal_boosts
++;
1196 rnp
->n_tasks_boosted
++;
1199 * We boost task t by manufacturing an rt_mutex that appears to
1200 * be held by task t. We leave a pointer to that rt_mutex where
1201 * task t can find it, and task t will release the mutex when it
1202 * exits its outermost RCU read-side critical section. Then
1203 * simply acquiring this artificial rt_mutex will boost task
1204 * t's priority. (Thanks to tglx for suggesting this approach!)
1206 * Note that task t must acquire rnp->lock to remove itself from
1207 * the ->blkd_tasks list, which it will do from exit() if from
1208 * nowhere else. We therefore are guaranteed that task t will
1209 * stay around at least until we drop rnp->lock. Note that
1210 * rnp->lock also resolves races between our priority boosting
1211 * and task t's exiting its outermost RCU read-side critical
1214 t
= container_of(tb
, struct task_struct
, rcu_node_entry
);
1215 rt_mutex_init_proxy_locked(&mtx
, t
);
1216 /* Avoid lockdep false positives. This rt_mutex is its own thing. */
1217 lockdep_set_class_and_name(&mtx
.wait_lock
, &rcu_boost_class
,
1219 t
->rcu_boost_mutex
= &mtx
;
1220 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1221 rt_mutex_lock(&mtx
); /* Side effect: boosts task t's priority. */
1222 rt_mutex_unlock(&mtx
); /* Keep lockdep happy. */
1223 local_irq_restore(flags
);
1225 return rnp
->exp_tasks
!= NULL
|| rnp
->boost_tasks
!= NULL
;
1229 * Timer handler to initiate waking up of boost kthreads that
1230 * have yielded the CPU due to excessive numbers of tasks to
1231 * boost. We wake up the per-rcu_node kthread, which in turn
1232 * will wake up the booster kthread.
1234 static void rcu_boost_kthread_timer(unsigned long arg
)
1236 invoke_rcu_node_kthread((struct rcu_node
*)arg
);
1240 * Priority-boosting kthread. One per leaf rcu_node and one for the
1243 static int rcu_boost_kthread(void *arg
)
1245 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1249 trace_rcu_utilization("Start boost kthread@init");
1251 rnp
->boost_kthread_status
= RCU_KTHREAD_WAITING
;
1252 trace_rcu_utilization("End boost kthread@rcu_wait");
1253 rcu_wait(rnp
->boost_tasks
|| rnp
->exp_tasks
);
1254 trace_rcu_utilization("Start boost kthread@rcu_wait");
1255 rnp
->boost_kthread_status
= RCU_KTHREAD_RUNNING
;
1256 more2boost
= rcu_boost(rnp
);
1262 trace_rcu_utilization("End boost kthread@rcu_yield");
1263 rcu_yield(rcu_boost_kthread_timer
, (unsigned long)rnp
);
1264 trace_rcu_utilization("Start boost kthread@rcu_yield");
1269 trace_rcu_utilization("End boost kthread@notreached");
1274 * Check to see if it is time to start boosting RCU readers that are
1275 * blocking the current grace period, and, if so, tell the per-rcu_node
1276 * kthread to start boosting them. If there is an expedited grace
1277 * period in progress, it is always time to boost.
1279 * The caller must hold rnp->lock, which this function releases,
1280 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1281 * so we don't need to worry about it going away.
1283 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1285 struct task_struct
*t
;
1287 if (!rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->exp_tasks
== NULL
) {
1288 rnp
->n_balk_exp_gp_tasks
++;
1289 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1292 if (rnp
->exp_tasks
!= NULL
||
1293 (rnp
->gp_tasks
!= NULL
&&
1294 rnp
->boost_tasks
== NULL
&&
1296 ULONG_CMP_GE(jiffies
, rnp
->boost_time
))) {
1297 if (rnp
->exp_tasks
== NULL
)
1298 rnp
->boost_tasks
= rnp
->gp_tasks
;
1299 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1300 t
= rnp
->boost_kthread_task
;
1304 rcu_initiate_boost_trace(rnp
);
1305 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1310 * Wake up the per-CPU kthread to invoke RCU callbacks.
1312 static void invoke_rcu_callbacks_kthread(void)
1314 unsigned long flags
;
1316 local_irq_save(flags
);
1317 __this_cpu_write(rcu_cpu_has_work
, 1);
1318 if (__this_cpu_read(rcu_cpu_kthread_task
) != NULL
&&
1319 current
!= __this_cpu_read(rcu_cpu_kthread_task
))
1320 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task
));
1321 local_irq_restore(flags
);
1325 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1326 * held, so no one should be messing with the existence of the boost
1329 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
,
1332 struct task_struct
*t
;
1334 t
= rnp
->boost_kthread_task
;
1336 set_cpus_allowed_ptr(rnp
->boost_kthread_task
, cm
);
1339 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1342 * Do priority-boost accounting for the start of a new grace period.
1344 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1346 rnp
->boost_time
= jiffies
+ RCU_BOOST_DELAY_JIFFIES
;
1350 * Create an RCU-boost kthread for the specified node if one does not
1351 * already exist. We only create this kthread for preemptible RCU.
1352 * Returns zero if all is well, a negated errno otherwise.
1354 static int __cpuinit
rcu_spawn_one_boost_kthread(struct rcu_state
*rsp
,
1355 struct rcu_node
*rnp
,
1358 unsigned long flags
;
1359 struct sched_param sp
;
1360 struct task_struct
*t
;
1362 if (&rcu_preempt_state
!= rsp
)
1365 if (rnp
->boost_kthread_task
!= NULL
)
1367 t
= kthread_create(rcu_boost_kthread
, (void *)rnp
,
1368 "rcub%d", rnp_index
);
1371 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1372 rnp
->boost_kthread_task
= t
;
1373 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1374 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1375 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1376 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1380 #ifdef CONFIG_HOTPLUG_CPU
1383 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1385 static void rcu_stop_cpu_kthread(int cpu
)
1387 struct task_struct
*t
;
1389 /* Stop the CPU's kthread. */
1390 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1392 per_cpu(rcu_cpu_kthread_task
, cpu
) = NULL
;
1397 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1399 static void rcu_kthread_do_work(void)
1401 rcu_do_batch(&rcu_sched_state
, &__get_cpu_var(rcu_sched_data
));
1402 rcu_do_batch(&rcu_bh_state
, &__get_cpu_var(rcu_bh_data
));
1403 rcu_preempt_do_callbacks();
1407 * Wake up the specified per-rcu_node-structure kthread.
1408 * Because the per-rcu_node kthreads are immortal, we don't need
1409 * to do anything to keep them alive.
1411 static void invoke_rcu_node_kthread(struct rcu_node
*rnp
)
1413 struct task_struct
*t
;
1415 t
= rnp
->node_kthread_task
;
1421 * Set the specified CPU's kthread to run RT or not, as specified by
1422 * the to_rt argument. The CPU-hotplug locks are held, so the task
1423 * is not going away.
1425 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1428 struct sched_param sp
;
1429 struct task_struct
*t
;
1431 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1435 policy
= SCHED_FIFO
;
1436 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1438 policy
= SCHED_NORMAL
;
1439 sp
.sched_priority
= 0;
1441 sched_setscheduler_nocheck(t
, policy
, &sp
);
1445 * Timer handler to initiate the waking up of per-CPU kthreads that
1446 * have yielded the CPU due to excess numbers of RCU callbacks.
1447 * We wake up the per-rcu_node kthread, which in turn will wake up
1448 * the booster kthread.
1450 static void rcu_cpu_kthread_timer(unsigned long arg
)
1452 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, arg
);
1453 struct rcu_node
*rnp
= rdp
->mynode
;
1455 atomic_or(rdp
->grpmask
, &rnp
->wakemask
);
1456 invoke_rcu_node_kthread(rnp
);
1460 * Drop to non-real-time priority and yield, but only after posting a
1461 * timer that will cause us to regain our real-time priority if we
1462 * remain preempted. Either way, we restore our real-time priority
1465 static void rcu_yield(void (*f
)(unsigned long), unsigned long arg
)
1467 struct sched_param sp
;
1468 struct timer_list yield_timer
;
1470 setup_timer_on_stack(&yield_timer
, f
, arg
);
1471 mod_timer(&yield_timer
, jiffies
+ 2);
1472 sp
.sched_priority
= 0;
1473 sched_setscheduler_nocheck(current
, SCHED_NORMAL
, &sp
);
1474 set_user_nice(current
, 19);
1476 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1477 sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sp
);
1478 del_timer(&yield_timer
);
1482 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1483 * This can happen while the corresponding CPU is either coming online
1484 * or going offline. We cannot wait until the CPU is fully online
1485 * before starting the kthread, because the various notifier functions
1486 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1487 * the corresponding CPU is online.
1489 * Return 1 if the kthread needs to stop, 0 otherwise.
1491 * Caller must disable bh. This function can momentarily enable it.
1493 static int rcu_cpu_kthread_should_stop(int cpu
)
1495 while (cpu_is_offline(cpu
) ||
1496 !cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)) ||
1497 smp_processor_id() != cpu
) {
1498 if (kthread_should_stop())
1500 per_cpu(rcu_cpu_kthread_status
, cpu
) = RCU_KTHREAD_OFFCPU
;
1501 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = raw_smp_processor_id();
1503 schedule_timeout_uninterruptible(1);
1504 if (!cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)))
1505 set_cpus_allowed_ptr(current
, cpumask_of(cpu
));
1508 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1513 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1514 * RCU softirq used in flavors and configurations of RCU that do not
1515 * support RCU priority boosting.
1517 static int rcu_cpu_kthread(void *arg
)
1519 int cpu
= (int)(long)arg
;
1520 unsigned long flags
;
1522 unsigned int *statusp
= &per_cpu(rcu_cpu_kthread_status
, cpu
);
1524 char *workp
= &per_cpu(rcu_cpu_has_work
, cpu
);
1526 trace_rcu_utilization("Start CPU kthread@init");
1528 *statusp
= RCU_KTHREAD_WAITING
;
1529 trace_rcu_utilization("End CPU kthread@rcu_wait");
1530 rcu_wait(*workp
!= 0 || kthread_should_stop());
1531 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1533 if (rcu_cpu_kthread_should_stop(cpu
)) {
1537 *statusp
= RCU_KTHREAD_RUNNING
;
1538 per_cpu(rcu_cpu_kthread_loops
, cpu
)++;
1539 local_irq_save(flags
);
1542 local_irq_restore(flags
);
1544 rcu_kthread_do_work();
1551 *statusp
= RCU_KTHREAD_YIELDING
;
1552 trace_rcu_utilization("End CPU kthread@rcu_yield");
1553 rcu_yield(rcu_cpu_kthread_timer
, (unsigned long)cpu
);
1554 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1558 *statusp
= RCU_KTHREAD_STOPPED
;
1559 trace_rcu_utilization("End CPU kthread@term");
1564 * Spawn a per-CPU kthread, setting up affinity and priority.
1565 * Because the CPU hotplug lock is held, no other CPU will be attempting
1566 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1567 * attempting to access it during boot, but the locking in kthread_bind()
1568 * will enforce sufficient ordering.
1570 * Please note that we cannot simply refuse to wake up the per-CPU
1571 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1572 * which can result in softlockup complaints if the task ends up being
1573 * idle for more than a couple of minutes.
1575 * However, please note also that we cannot bind the per-CPU kthread to its
1576 * CPU until that CPU is fully online. We also cannot wait until the
1577 * CPU is fully online before we create its per-CPU kthread, as this would
1578 * deadlock the system when CPU notifiers tried waiting for grace
1579 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1580 * is online. If its CPU is not yet fully online, then the code in
1581 * rcu_cpu_kthread() will wait until it is fully online, and then do
1584 static int __cpuinit
rcu_spawn_one_cpu_kthread(int cpu
)
1586 struct sched_param sp
;
1587 struct task_struct
*t
;
1589 if (!rcu_scheduler_fully_active
||
1590 per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
)
1592 t
= kthread_create_on_node(rcu_cpu_kthread
,
1598 if (cpu_online(cpu
))
1599 kthread_bind(t
, cpu
);
1600 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1601 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
);
1602 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1603 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1604 per_cpu(rcu_cpu_kthread_task
, cpu
) = t
;
1605 wake_up_process(t
); /* Get to TASK_INTERRUPTIBLE quickly. */
1610 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1611 * kthreads when needed. We ignore requests to wake up kthreads
1612 * for offline CPUs, which is OK because force_quiescent_state()
1613 * takes care of this case.
1615 static int rcu_node_kthread(void *arg
)
1618 unsigned long flags
;
1620 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1621 struct sched_param sp
;
1622 struct task_struct
*t
;
1625 rnp
->node_kthread_status
= RCU_KTHREAD_WAITING
;
1626 rcu_wait(atomic_read(&rnp
->wakemask
) != 0);
1627 rnp
->node_kthread_status
= RCU_KTHREAD_RUNNING
;
1628 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1629 smp_mb(); /* Work around some architectures weak impls. */
1630 mask
= atomic_xchg(&rnp
->wakemask
, 0);
1631 smp_mb(); /* Work around some architectures weak impls. */
1632 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock. */
1633 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1) {
1634 if ((mask
& 0x1) == 0)
1637 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1638 if (!cpu_online(cpu
) || t
== NULL
) {
1642 per_cpu(rcu_cpu_has_work
, cpu
) = 1;
1643 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1644 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1649 rnp
->node_kthread_status
= RCU_KTHREAD_STOPPED
;
1654 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1655 * served by the rcu_node in question. The CPU hotplug lock is still
1656 * held, so the value of rnp->qsmaskinit will be stable.
1658 * We don't include outgoingcpu in the affinity set, use -1 if there is
1659 * no outgoing CPU. If there are no CPUs left in the affinity set,
1660 * this function allows the kthread to execute on any CPU.
1662 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1666 unsigned long mask
= rnp
->qsmaskinit
;
1668 if (rnp
->node_kthread_task
== NULL
)
1670 if (!alloc_cpumask_var(&cm
, GFP_KERNEL
))
1673 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1)
1674 if ((mask
& 0x1) && cpu
!= outgoingcpu
)
1675 cpumask_set_cpu(cpu
, cm
);
1676 if (cpumask_weight(cm
) == 0) {
1678 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++)
1679 cpumask_clear_cpu(cpu
, cm
);
1680 WARN_ON_ONCE(cpumask_weight(cm
) == 0);
1682 set_cpus_allowed_ptr(rnp
->node_kthread_task
, cm
);
1683 rcu_boost_kthread_setaffinity(rnp
, cm
);
1684 free_cpumask_var(cm
);
1688 * Spawn a per-rcu_node kthread, setting priority and affinity.
1689 * Called during boot before online/offline can happen, or, if
1690 * during runtime, with the main CPU-hotplug locks held. So only
1691 * one of these can be executing at a time.
1693 static int __cpuinit
rcu_spawn_one_node_kthread(struct rcu_state
*rsp
,
1694 struct rcu_node
*rnp
)
1696 unsigned long flags
;
1697 int rnp_index
= rnp
- &rsp
->node
[0];
1698 struct sched_param sp
;
1699 struct task_struct
*t
;
1701 if (!rcu_scheduler_fully_active
||
1702 rnp
->qsmaskinit
== 0)
1704 if (rnp
->node_kthread_task
== NULL
) {
1705 t
= kthread_create(rcu_node_kthread
, (void *)rnp
,
1706 "rcun%d", rnp_index
);
1709 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1710 rnp
->node_kthread_task
= t
;
1711 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1712 sp
.sched_priority
= 99;
1713 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1714 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1716 return rcu_spawn_one_boost_kthread(rsp
, rnp
, rnp_index
);
1720 * Spawn all kthreads -- called as soon as the scheduler is running.
1722 static int __init
rcu_spawn_kthreads(void)
1725 struct rcu_node
*rnp
;
1727 rcu_scheduler_fully_active
= 1;
1728 for_each_possible_cpu(cpu
) {
1729 per_cpu(rcu_cpu_has_work
, cpu
) = 0;
1730 if (cpu_online(cpu
))
1731 (void)rcu_spawn_one_cpu_kthread(cpu
);
1733 rnp
= rcu_get_root(rcu_state
);
1734 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1735 if (NUM_RCU_NODES
> 1) {
1736 rcu_for_each_leaf_node(rcu_state
, rnp
)
1737 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1741 early_initcall(rcu_spawn_kthreads
);
1743 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1745 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, cpu
);
1746 struct rcu_node
*rnp
= rdp
->mynode
;
1748 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1749 if (rcu_scheduler_fully_active
) {
1750 (void)rcu_spawn_one_cpu_kthread(cpu
);
1751 if (rnp
->node_kthread_task
== NULL
)
1752 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1756 #else /* #ifdef CONFIG_RCU_BOOST */
1758 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1760 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1763 static void invoke_rcu_callbacks_kthread(void)
1768 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1772 #ifdef CONFIG_HOTPLUG_CPU
1774 static void rcu_stop_cpu_kthread(int cpu
)
1778 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1780 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1784 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1788 static int __init
rcu_scheduler_really_started(void)
1790 rcu_scheduler_fully_active
= 1;
1793 early_initcall(rcu_scheduler_really_started
);
1795 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1799 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1803 void synchronize_sched_expedited(void)
1807 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
1809 #else /* #ifndef CONFIG_SMP */
1811 static atomic_t sync_sched_expedited_started
= ATOMIC_INIT(0);
1812 static atomic_t sync_sched_expedited_done
= ATOMIC_INIT(0);
1814 static int synchronize_sched_expedited_cpu_stop(void *data
)
1817 * There must be a full memory barrier on each affected CPU
1818 * between the time that try_stop_cpus() is called and the
1819 * time that it returns.
1821 * In the current initial implementation of cpu_stop, the
1822 * above condition is already met when the control reaches
1823 * this point and the following smp_mb() is not strictly
1824 * necessary. Do smp_mb() anyway for documentation and
1825 * robustness against future implementation changes.
1827 smp_mb(); /* See above comment block. */
1832 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1833 * approach to force grace period to end quickly. This consumes
1834 * significant time on all CPUs, and is thus not recommended for
1835 * any sort of common-case code.
1837 * Note that it is illegal to call this function while holding any
1838 * lock that is acquired by a CPU-hotplug notifier. Failing to
1839 * observe this restriction will result in deadlock.
1841 * This implementation can be thought of as an application of ticket
1842 * locking to RCU, with sync_sched_expedited_started and
1843 * sync_sched_expedited_done taking on the roles of the halves
1844 * of the ticket-lock word. Each task atomically increments
1845 * sync_sched_expedited_started upon entry, snapshotting the old value,
1846 * then attempts to stop all the CPUs. If this succeeds, then each
1847 * CPU will have executed a context switch, resulting in an RCU-sched
1848 * grace period. We are then done, so we use atomic_cmpxchg() to
1849 * update sync_sched_expedited_done to match our snapshot -- but
1850 * only if someone else has not already advanced past our snapshot.
1852 * On the other hand, if try_stop_cpus() fails, we check the value
1853 * of sync_sched_expedited_done. If it has advanced past our
1854 * initial snapshot, then someone else must have forced a grace period
1855 * some time after we took our snapshot. In this case, our work is
1856 * done for us, and we can simply return. Otherwise, we try again,
1857 * but keep our initial snapshot for purposes of checking for someone
1858 * doing our work for us.
1860 * If we fail too many times in a row, we fall back to synchronize_sched().
1862 void synchronize_sched_expedited(void)
1864 int firstsnap
, s
, snap
, trycount
= 0;
1866 /* Note that atomic_inc_return() implies full memory barrier. */
1867 smp_mb(); /* Work around some architectures weak impls. */
1868 firstsnap
= snap
= atomic_inc_return(&sync_sched_expedited_started
);
1869 smp_mb(); /* Work around some architectures weak impls. */
1873 * Each pass through the following loop attempts to force a
1874 * context switch on each CPU.
1876 while (try_stop_cpus(cpu_online_mask
,
1877 synchronize_sched_expedited_cpu_stop
,
1881 /* No joy, try again later. Or just synchronize_sched(). */
1882 if (trycount
++ < 10)
1883 udelay(trycount
* num_online_cpus());
1885 synchronize_sched();
1889 /* Check to see if someone else did our work for us. */
1890 s
= atomic_read(&sync_sched_expedited_done
);
1891 if (UINT_CMP_GE((unsigned)s
, (unsigned)firstsnap
)) {
1892 smp_mb(); /* ensure test happens before caller kfree */
1897 * Refetching sync_sched_expedited_started allows later
1898 * callers to piggyback on our grace period. We subtract
1899 * 1 to get the same token that the last incrementer got.
1900 * We retry after they started, so our grace period works
1901 * for them, and they started after our first try, so their
1902 * grace period works for us.
1905 snap
= atomic_read(&sync_sched_expedited_started
) - 1;
1906 smp_mb(); /* ensure read is before try_stop_cpus(). */
1910 * Everyone up to our most recent fetch is covered by our grace
1911 * period. Update the counter, but only if our work is still
1912 * relevant -- which it won't be if someone who started later
1913 * than we did beat us to the punch.
1916 s
= atomic_read(&sync_sched_expedited_done
);
1917 if (UINT_CMP_GE((unsigned)s
, (unsigned)snap
)) {
1918 smp_mb(); /* ensure test happens before caller kfree */
1921 } while (atomic_cmpxchg(&sync_sched_expedited_done
, s
, snap
) != s
);
1926 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
1928 #endif /* #else #ifndef CONFIG_SMP */
1930 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1933 * Check to see if any future RCU-related work will need to be done
1934 * by the current CPU, even if none need be done immediately, returning
1935 * 1 if so. This function is part of the RCU implementation; it is -not-
1936 * an exported member of the RCU API.
1938 * Because we have preemptible RCU, just check whether this CPU needs
1939 * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
1940 * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
1942 int rcu_needs_cpu(int cpu
)
1944 return rcu_needs_cpu_quick_check(cpu
);
1948 * Check to see if we need to continue a callback-flush operations to
1949 * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
1950 * entry is not configured, so we never do need to.
1952 static void rcu_needs_cpu_flush(void)
1956 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1958 #define RCU_NEEDS_CPU_FLUSHES 5
1959 static DEFINE_PER_CPU(int, rcu_dyntick_drain
);
1960 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff
);
1963 * Check to see if any future RCU-related work will need to be done
1964 * by the current CPU, even if none need be done immediately, returning
1965 * 1 if so. This function is part of the RCU implementation; it is -not-
1966 * an exported member of the RCU API.
1968 * Because we are not supporting preemptible RCU, attempt to accelerate
1969 * any current grace periods so that RCU no longer needs this CPU, but
1970 * only if all other CPUs are already in dynticks-idle mode. This will
1971 * allow the CPU cores to be powered down immediately, as opposed to after
1972 * waiting many milliseconds for grace periods to elapse.
1974 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1975 * disabled, we do one pass of force_quiescent_state(), then do a
1976 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1977 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1979 int rcu_needs_cpu(int cpu
)
1985 /* Check for being in the holdoff period. */
1986 if (per_cpu(rcu_dyntick_holdoff
, cpu
) == jiffies
)
1987 return rcu_needs_cpu_quick_check(cpu
);
1989 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1990 for_each_online_cpu(thatcpu
) {
1993 smp_mb(); /* Work around some architectures weak impls. */
1994 snap
= atomic_add_return(0, &per_cpu(rcu_dynticks
,
1996 smp_mb(); /* Work around some architectures weak impls. */
1997 if ((snap
& 0x1) != 0) {
1998 per_cpu(rcu_dyntick_drain
, cpu
) = 0;
1999 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
- 1;
2000 return rcu_needs_cpu_quick_check(cpu
);
2004 /* Check and update the rcu_dyntick_drain sequencing. */
2005 if (per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
2006 /* First time through, initialize the counter. */
2007 per_cpu(rcu_dyntick_drain
, cpu
) = RCU_NEEDS_CPU_FLUSHES
;
2008 } else if (--per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
2009 /* We have hit the limit, so time to give up. */
2010 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
;
2011 return rcu_needs_cpu_quick_check(cpu
);
2014 /* Do one step pushing remaining RCU callbacks through. */
2015 if (per_cpu(rcu_sched_data
, cpu
).nxtlist
) {
2017 force_quiescent_state(&rcu_sched_state
, 0);
2018 c
= c
|| per_cpu(rcu_sched_data
, cpu
).nxtlist
;
2020 if (per_cpu(rcu_bh_data
, cpu
).nxtlist
) {
2022 force_quiescent_state(&rcu_bh_state
, 0);
2023 c
= c
|| per_cpu(rcu_bh_data
, cpu
).nxtlist
;
2026 /* If RCU callbacks are still pending, RCU still needs this CPU. */
2033 * Check to see if we need to continue a callback-flush operations to
2034 * allow the last CPU to enter dyntick-idle mode.
2036 static void rcu_needs_cpu_flush(void)
2038 int cpu
= smp_processor_id();
2039 unsigned long flags
;
2041 if (per_cpu(rcu_dyntick_drain
, cpu
) <= 0)
2043 local_irq_save(flags
);
2044 (void)rcu_needs_cpu(cpu
);
2045 local_irq_restore(flags
);
2048 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */