2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
29 #define RCU_KTHREAD_PRIO 1
31 #ifdef CONFIG_RCU_BOOST
32 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
38 * Check the RCU kernel configuration parameters and print informative
39 * messages about anything out of the ordinary. If you like #ifdef, you
40 * will love this function.
42 static void __init
rcu_bootup_announce_oddness(void)
44 #ifdef CONFIG_RCU_TRACE
45 printk(KERN_INFO
"\tRCU debugfs-based tracing is enabled.\n");
47 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
48 printk(KERN_INFO
"\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
51 #ifdef CONFIG_RCU_FANOUT_EXACT
52 printk(KERN_INFO
"\tHierarchical RCU autobalancing is disabled.\n");
54 #ifdef CONFIG_RCU_FAST_NO_HZ
56 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58 #ifdef CONFIG_PROVE_RCU
59 printk(KERN_INFO
"\tRCU lockdep checking is enabled.\n");
61 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
62 printk(KERN_INFO
"\tRCU torture testing starts during boot.\n");
64 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
65 printk(KERN_INFO
"\tDump stacks of tasks blocking RCU-preempt GP.\n");
67 #if defined(CONFIG_RCU_CPU_STALL_INFO)
68 printk(KERN_INFO
"\tAdditional per-CPU info printed with stalls.\n");
70 #if NUM_RCU_LVL_4 != 0
71 printk(KERN_INFO
"\tExperimental four-level hierarchy is enabled.\n");
75 #ifdef CONFIG_TREE_PREEMPT_RCU
77 struct rcu_state rcu_preempt_state
= RCU_STATE_INITIALIZER(rcu_preempt
);
78 DEFINE_PER_CPU(struct rcu_data
, rcu_preempt_data
);
79 static struct rcu_state
*rcu_state
= &rcu_preempt_state
;
81 static void rcu_read_unlock_special(struct task_struct
*t
);
82 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
);
85 * Tell them what RCU they are running.
87 static void __init
rcu_bootup_announce(void)
89 printk(KERN_INFO
"Preemptible hierarchical RCU implementation.\n");
90 rcu_bootup_announce_oddness();
94 * Return the number of RCU-preempt batches processed thus far
95 * for debug and statistics.
97 long rcu_batches_completed_preempt(void)
99 return rcu_preempt_state
.completed
;
101 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt
);
104 * Return the number of RCU batches processed thus far for debug & stats.
106 long rcu_batches_completed(void)
108 return rcu_batches_completed_preempt();
110 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
113 * Force a quiescent state for preemptible RCU.
115 void rcu_force_quiescent_state(void)
117 force_quiescent_state(&rcu_preempt_state
, 0);
119 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
122 * Record a preemptible-RCU quiescent state for the specified CPU. Note
123 * that this just means that the task currently running on the CPU is
124 * not in a quiescent state. There might be any number of tasks blocked
125 * while in an RCU read-side critical section.
127 * Unlike the other rcu_*_qs() functions, callers to this function
128 * must disable irqs in order to protect the assignment to
129 * ->rcu_read_unlock_special.
131 static void rcu_preempt_qs(int cpu
)
133 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
135 rdp
->passed_quiesce_gpnum
= rdp
->gpnum
;
137 if (rdp
->passed_quiesce
== 0)
138 trace_rcu_grace_period("rcu_preempt", rdp
->gpnum
, "cpuqs");
139 rdp
->passed_quiesce
= 1;
140 current
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_NEED_QS
;
144 * We have entered the scheduler, and the current task might soon be
145 * context-switched away from. If this task is in an RCU read-side
146 * critical section, we will no longer be able to rely on the CPU to
147 * record that fact, so we enqueue the task on the blkd_tasks list.
148 * The task will dequeue itself when it exits the outermost enclosing
149 * RCU read-side critical section. Therefore, the current grace period
150 * cannot be permitted to complete until the blkd_tasks list entries
151 * predating the current grace period drain, in other words, until
152 * rnp->gp_tasks becomes NULL.
154 * Caller must disable preemption.
156 static void rcu_preempt_note_context_switch(int cpu
)
158 struct task_struct
*t
= current
;
160 struct rcu_data
*rdp
;
161 struct rcu_node
*rnp
;
163 if (t
->rcu_read_lock_nesting
> 0 &&
164 (t
->rcu_read_unlock_special
& RCU_READ_UNLOCK_BLOCKED
) == 0) {
166 /* Possibly blocking in an RCU read-side critical section. */
167 rdp
= per_cpu_ptr(rcu_preempt_state
.rda
, cpu
);
169 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
170 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_BLOCKED
;
171 t
->rcu_blocked_node
= rnp
;
174 * If this CPU has already checked in, then this task
175 * will hold up the next grace period rather than the
176 * current grace period. Queue the task accordingly.
177 * If the task is queued for the current grace period
178 * (i.e., this CPU has not yet passed through a quiescent
179 * state for the current grace period), then as long
180 * as that task remains queued, the current grace period
181 * cannot end. Note that there is some uncertainty as
182 * to exactly when the current grace period started.
183 * We take a conservative approach, which can result
184 * in unnecessarily waiting on tasks that started very
185 * slightly after the current grace period began. C'est
188 * But first, note that the current CPU must still be
191 WARN_ON_ONCE((rdp
->grpmask
& rnp
->qsmaskinit
) == 0);
192 WARN_ON_ONCE(!list_empty(&t
->rcu_node_entry
));
193 if ((rnp
->qsmask
& rdp
->grpmask
) && rnp
->gp_tasks
!= NULL
) {
194 list_add(&t
->rcu_node_entry
, rnp
->gp_tasks
->prev
);
195 rnp
->gp_tasks
= &t
->rcu_node_entry
;
196 #ifdef CONFIG_RCU_BOOST
197 if (rnp
->boost_tasks
!= NULL
)
198 rnp
->boost_tasks
= rnp
->gp_tasks
;
199 #endif /* #ifdef CONFIG_RCU_BOOST */
201 list_add(&t
->rcu_node_entry
, &rnp
->blkd_tasks
);
202 if (rnp
->qsmask
& rdp
->grpmask
)
203 rnp
->gp_tasks
= &t
->rcu_node_entry
;
205 trace_rcu_preempt_task(rdp
->rsp
->name
,
207 (rnp
->qsmask
& rdp
->grpmask
)
210 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
211 } else if (t
->rcu_read_lock_nesting
< 0 &&
212 t
->rcu_read_unlock_special
) {
215 * Complete exit from RCU read-side critical section on
216 * behalf of preempted instance of __rcu_read_unlock().
218 rcu_read_unlock_special(t
);
222 * Either we were not in an RCU read-side critical section to
223 * begin with, or we have now recorded that critical section
224 * globally. Either way, we can now note a quiescent state
225 * for this CPU. Again, if we were in an RCU read-side critical
226 * section, and if that critical section was blocking the current
227 * grace period, then the fact that the task has been enqueued
228 * means that we continue to block the current grace period.
230 local_irq_save(flags
);
232 local_irq_restore(flags
);
236 * Tree-preemptible RCU implementation for rcu_read_lock().
237 * Just increment ->rcu_read_lock_nesting, shared state will be updated
240 void __rcu_read_lock(void)
242 current
->rcu_read_lock_nesting
++;
243 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
245 EXPORT_SYMBOL_GPL(__rcu_read_lock
);
248 * Check for preempted RCU readers blocking the current grace period
249 * for the specified rcu_node structure. If the caller needs a reliable
250 * answer, it must hold the rcu_node's ->lock.
252 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
254 return rnp
->gp_tasks
!= NULL
;
258 * Record a quiescent state for all tasks that were previously queued
259 * on the specified rcu_node structure and that were blocking the current
260 * RCU grace period. The caller must hold the specified rnp->lock with
261 * irqs disabled, and this lock is released upon return, but irqs remain
264 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
265 __releases(rnp
->lock
)
268 struct rcu_node
*rnp_p
;
270 if (rnp
->qsmask
!= 0 || rcu_preempt_blocked_readers_cgp(rnp
)) {
271 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
272 return; /* Still need more quiescent states! */
278 * Either there is only one rcu_node in the tree,
279 * or tasks were kicked up to root rcu_node due to
280 * CPUs going offline.
282 rcu_report_qs_rsp(&rcu_preempt_state
, flags
);
286 /* Report up the rest of the hierarchy. */
288 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
289 raw_spin_lock(&rnp_p
->lock
); /* irqs already disabled. */
290 rcu_report_qs_rnp(mask
, &rcu_preempt_state
, rnp_p
, flags
);
294 * Advance a ->blkd_tasks-list pointer to the next entry, instead
295 * returning NULL if at the end of the list.
297 static struct list_head
*rcu_next_node_entry(struct task_struct
*t
,
298 struct rcu_node
*rnp
)
300 struct list_head
*np
;
302 np
= t
->rcu_node_entry
.next
;
303 if (np
== &rnp
->blkd_tasks
)
309 * Handle special cases during rcu_read_unlock(), such as needing to
310 * notify RCU core processing or task having blocked during the RCU
311 * read-side critical section.
313 static noinline
void rcu_read_unlock_special(struct task_struct
*t
)
319 struct list_head
*np
;
320 #ifdef CONFIG_RCU_BOOST
321 struct rt_mutex
*rbmp
= NULL
;
322 #endif /* #ifdef CONFIG_RCU_BOOST */
323 struct rcu_node
*rnp
;
326 /* NMI handlers cannot block and cannot safely manipulate state. */
330 local_irq_save(flags
);
333 * If RCU core is waiting for this CPU to exit critical section,
334 * let it know that we have done so.
336 special
= t
->rcu_read_unlock_special
;
337 if (special
& RCU_READ_UNLOCK_NEED_QS
) {
338 rcu_preempt_qs(smp_processor_id());
341 /* Hardware IRQ handlers cannot block. */
342 if (in_irq() || in_serving_softirq()) {
343 local_irq_restore(flags
);
347 /* Clean up if blocked during RCU read-side critical section. */
348 if (special
& RCU_READ_UNLOCK_BLOCKED
) {
349 t
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_BLOCKED
;
352 * Remove this task from the list it blocked on. The
353 * task can migrate while we acquire the lock, but at
354 * most one time. So at most two passes through loop.
357 rnp
= t
->rcu_blocked_node
;
358 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
359 if (rnp
== t
->rcu_blocked_node
)
361 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
363 empty
= !rcu_preempt_blocked_readers_cgp(rnp
);
364 empty_exp
= !rcu_preempted_readers_exp(rnp
);
365 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
366 np
= rcu_next_node_entry(t
, rnp
);
367 list_del_init(&t
->rcu_node_entry
);
368 t
->rcu_blocked_node
= NULL
;
369 trace_rcu_unlock_preempted_task("rcu_preempt",
371 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
373 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
375 #ifdef CONFIG_RCU_BOOST
376 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
377 rnp
->boost_tasks
= np
;
378 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
379 if (t
->rcu_boost_mutex
) {
380 rbmp
= t
->rcu_boost_mutex
;
381 t
->rcu_boost_mutex
= NULL
;
383 #endif /* #ifdef CONFIG_RCU_BOOST */
386 * If this was the last task on the current list, and if
387 * we aren't waiting on any CPUs, report the quiescent state.
388 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
389 * so we must take a snapshot of the expedited state.
391 empty_exp_now
= !rcu_preempted_readers_exp(rnp
);
392 if (!empty
&& !rcu_preempt_blocked_readers_cgp(rnp
)) {
393 trace_rcu_quiescent_state_report("preempt_rcu",
400 rcu_report_unblock_qs_rnp(rnp
, flags
);
402 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
404 #ifdef CONFIG_RCU_BOOST
405 /* Unboost if we were boosted. */
407 rt_mutex_unlock(rbmp
);
408 #endif /* #ifdef CONFIG_RCU_BOOST */
411 * If this was the last task on the expedited lists,
412 * then we need to report up the rcu_node hierarchy.
414 if (!empty_exp
&& empty_exp_now
)
415 rcu_report_exp_rnp(&rcu_preempt_state
, rnp
, true);
417 local_irq_restore(flags
);
422 * Tree-preemptible RCU implementation for rcu_read_unlock().
423 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
424 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
425 * invoke rcu_read_unlock_special() to clean up after a context switch
426 * in an RCU read-side critical section and other special cases.
428 void __rcu_read_unlock(void)
430 struct task_struct
*t
= current
;
432 if (t
->rcu_read_lock_nesting
!= 1)
433 --t
->rcu_read_lock_nesting
;
435 barrier(); /* critical section before exit code. */
436 t
->rcu_read_lock_nesting
= INT_MIN
;
437 barrier(); /* assign before ->rcu_read_unlock_special load */
438 if (unlikely(ACCESS_ONCE(t
->rcu_read_unlock_special
)))
439 rcu_read_unlock_special(t
);
440 barrier(); /* ->rcu_read_unlock_special load before assign */
441 t
->rcu_read_lock_nesting
= 0;
443 #ifdef CONFIG_PROVE_LOCKING
445 int rrln
= ACCESS_ONCE(t
->rcu_read_lock_nesting
);
447 WARN_ON_ONCE(rrln
< 0 && rrln
> INT_MIN
/ 2);
449 #endif /* #ifdef CONFIG_PROVE_LOCKING */
451 EXPORT_SYMBOL_GPL(__rcu_read_unlock
);
453 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
456 * Dump detailed information for all tasks blocking the current RCU
457 * grace period on the specified rcu_node structure.
459 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
462 struct task_struct
*t
;
464 if (!rcu_preempt_blocked_readers_cgp(rnp
))
466 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
467 t
= list_entry(rnp
->gp_tasks
,
468 struct task_struct
, rcu_node_entry
);
469 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
)
471 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
475 * Dump detailed information for all tasks blocking the current RCU
478 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
480 struct rcu_node
*rnp
= rcu_get_root(rsp
);
482 rcu_print_detail_task_stall_rnp(rnp
);
483 rcu_for_each_leaf_node(rsp
, rnp
)
484 rcu_print_detail_task_stall_rnp(rnp
);
487 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
489 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
493 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
495 #ifdef CONFIG_RCU_CPU_STALL_INFO
497 static void rcu_print_task_stall_begin(struct rcu_node
*rnp
)
499 printk(KERN_ERR
"\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
500 rnp
->level
, rnp
->grplo
, rnp
->grphi
);
503 static void rcu_print_task_stall_end(void)
505 printk(KERN_CONT
"\n");
508 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
510 static void rcu_print_task_stall_begin(struct rcu_node
*rnp
)
514 static void rcu_print_task_stall_end(void)
518 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
521 * Scan the current list of tasks blocked within RCU read-side critical
522 * sections, printing out the tid of each.
524 static int rcu_print_task_stall(struct rcu_node
*rnp
)
526 struct task_struct
*t
;
529 if (!rcu_preempt_blocked_readers_cgp(rnp
))
531 rcu_print_task_stall_begin(rnp
);
532 t
= list_entry(rnp
->gp_tasks
,
533 struct task_struct
, rcu_node_entry
);
534 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
535 printk(KERN_CONT
" P%d", t
->pid
);
538 rcu_print_task_stall_end();
543 * Suppress preemptible RCU's CPU stall warnings by pushing the
544 * time of the next stall-warning message comfortably far into the
547 static void rcu_preempt_stall_reset(void)
549 rcu_preempt_state
.jiffies_stall
= jiffies
+ ULONG_MAX
/ 2;
553 * Check that the list of blocked tasks for the newly completed grace
554 * period is in fact empty. It is a serious bug to complete a grace
555 * period that still has RCU readers blocked! This function must be
556 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
557 * must be held by the caller.
559 * Also, if there are blocked tasks on the list, they automatically
560 * block the newly created grace period, so set up ->gp_tasks accordingly.
562 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
564 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
));
565 if (!list_empty(&rnp
->blkd_tasks
))
566 rnp
->gp_tasks
= rnp
->blkd_tasks
.next
;
567 WARN_ON_ONCE(rnp
->qsmask
);
570 #ifdef CONFIG_HOTPLUG_CPU
573 * Handle tasklist migration for case in which all CPUs covered by the
574 * specified rcu_node have gone offline. Move them up to the root
575 * rcu_node. The reason for not just moving them to the immediate
576 * parent is to remove the need for rcu_read_unlock_special() to
577 * make more than two attempts to acquire the target rcu_node's lock.
578 * Returns true if there were tasks blocking the current RCU grace
581 * Returns 1 if there was previously a task blocking the current grace
582 * period on the specified rcu_node structure.
584 * The caller must hold rnp->lock with irqs disabled.
586 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
587 struct rcu_node
*rnp
,
588 struct rcu_data
*rdp
)
590 struct list_head
*lp
;
591 struct list_head
*lp_root
;
593 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
594 struct task_struct
*t
;
596 if (rnp
== rnp_root
) {
597 WARN_ONCE(1, "Last CPU thought to be offlined?");
598 return 0; /* Shouldn't happen: at least one CPU online. */
601 /* If we are on an internal node, complain bitterly. */
602 WARN_ON_ONCE(rnp
!= rdp
->mynode
);
605 * Move tasks up to root rcu_node. Don't try to get fancy for
606 * this corner-case operation -- just put this node's tasks
607 * at the head of the root node's list, and update the root node's
608 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
609 * if non-NULL. This might result in waiting for more tasks than
610 * absolutely necessary, but this is a good performance/complexity
613 if (rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->qsmask
== 0)
614 retval
|= RCU_OFL_TASKS_NORM_GP
;
615 if (rcu_preempted_readers_exp(rnp
))
616 retval
|= RCU_OFL_TASKS_EXP_GP
;
617 lp
= &rnp
->blkd_tasks
;
618 lp_root
= &rnp_root
->blkd_tasks
;
619 while (!list_empty(lp
)) {
620 t
= list_entry(lp
->next
, typeof(*t
), rcu_node_entry
);
621 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
622 list_del(&t
->rcu_node_entry
);
623 t
->rcu_blocked_node
= rnp_root
;
624 list_add(&t
->rcu_node_entry
, lp_root
);
625 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
626 rnp_root
->gp_tasks
= rnp
->gp_tasks
;
627 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
628 rnp_root
->exp_tasks
= rnp
->exp_tasks
;
629 #ifdef CONFIG_RCU_BOOST
630 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
631 rnp_root
->boost_tasks
= rnp
->boost_tasks
;
632 #endif /* #ifdef CONFIG_RCU_BOOST */
633 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
636 #ifdef CONFIG_RCU_BOOST
637 /* In case root is being boosted and leaf is not. */
638 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
639 if (rnp_root
->boost_tasks
!= NULL
&&
640 rnp_root
->boost_tasks
!= rnp_root
->gp_tasks
)
641 rnp_root
->boost_tasks
= rnp_root
->gp_tasks
;
642 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
643 #endif /* #ifdef CONFIG_RCU_BOOST */
645 rnp
->gp_tasks
= NULL
;
646 rnp
->exp_tasks
= NULL
;
650 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
653 * Do CPU-offline processing for preemptible RCU.
655 static void rcu_preempt_cleanup_dead_cpu(int cpu
)
657 rcu_cleanup_dead_cpu(cpu
, &rcu_preempt_state
);
661 * Check for a quiescent state from the current CPU. When a task blocks,
662 * the task is recorded in the corresponding CPU's rcu_node structure,
663 * which is checked elsewhere.
665 * Caller must disable hard irqs.
667 static void rcu_preempt_check_callbacks(int cpu
)
669 struct task_struct
*t
= current
;
671 if (t
->rcu_read_lock_nesting
== 0) {
675 if (t
->rcu_read_lock_nesting
> 0 &&
676 per_cpu(rcu_preempt_data
, cpu
).qs_pending
)
677 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_NEED_QS
;
681 * Process callbacks for preemptible RCU.
683 static void rcu_preempt_process_callbacks(void)
685 __rcu_process_callbacks(&rcu_preempt_state
,
686 &__get_cpu_var(rcu_preempt_data
));
689 #ifdef CONFIG_RCU_BOOST
691 static void rcu_preempt_do_callbacks(void)
693 rcu_do_batch(&rcu_preempt_state
, &__get_cpu_var(rcu_preempt_data
));
696 #endif /* #ifdef CONFIG_RCU_BOOST */
699 * Queue a preemptible-RCU callback for invocation after a grace period.
701 void call_rcu(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
703 __call_rcu(head
, func
, &rcu_preempt_state
, 0);
705 EXPORT_SYMBOL_GPL(call_rcu
);
708 * Queue an RCU callback for lazy invocation after a grace period.
709 * This will likely be later named something like "call_rcu_lazy()",
710 * but this change will require some way of tagging the lazy RCU
711 * callbacks in the list of pending callbacks. Until then, this
712 * function may only be called from __kfree_rcu().
714 void kfree_call_rcu(struct rcu_head
*head
,
715 void (*func
)(struct rcu_head
*rcu
))
717 __call_rcu(head
, func
, &rcu_preempt_state
, 1);
719 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
722 * synchronize_rcu - wait until a grace period has elapsed.
724 * Control will return to the caller some time after a full grace
725 * period has elapsed, in other words after all currently executing RCU
726 * read-side critical sections have completed. Note, however, that
727 * upon return from synchronize_rcu(), the caller might well be executing
728 * concurrently with new RCU read-side critical sections that began while
729 * synchronize_rcu() was waiting. RCU read-side critical sections are
730 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
732 void synchronize_rcu(void)
734 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map
) &&
735 !lock_is_held(&rcu_lock_map
) &&
736 !lock_is_held(&rcu_sched_lock_map
),
737 "Illegal synchronize_rcu() in RCU read-side critical section");
738 if (!rcu_scheduler_active
)
740 wait_rcu_gp(call_rcu
);
742 EXPORT_SYMBOL_GPL(synchronize_rcu
);
744 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq
);
745 static long sync_rcu_preempt_exp_count
;
746 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex
);
749 * Return non-zero if there are any tasks in RCU read-side critical
750 * sections blocking the current preemptible-RCU expedited grace period.
751 * If there is no preemptible-RCU expedited grace period currently in
752 * progress, returns zero unconditionally.
754 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
)
756 return rnp
->exp_tasks
!= NULL
;
760 * return non-zero if there is no RCU expedited grace period in progress
761 * for the specified rcu_node structure, in other words, if all CPUs and
762 * tasks covered by the specified rcu_node structure have done their bit
763 * for the current expedited grace period. Works only for preemptible
764 * RCU -- other RCU implementation use other means.
766 * Caller must hold sync_rcu_preempt_exp_mutex.
768 static int sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
770 return !rcu_preempted_readers_exp(rnp
) &&
771 ACCESS_ONCE(rnp
->expmask
) == 0;
775 * Report the exit from RCU read-side critical section for the last task
776 * that queued itself during or before the current expedited preemptible-RCU
777 * grace period. This event is reported either to the rcu_node structure on
778 * which the task was queued or to one of that rcu_node structure's ancestors,
779 * recursively up the tree. (Calm down, calm down, we do the recursion
782 * Most callers will set the "wake" flag, but the task initiating the
783 * expedited grace period need not wake itself.
785 * Caller must hold sync_rcu_preempt_exp_mutex.
787 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
793 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
795 if (!sync_rcu_preempt_exp_done(rnp
)) {
796 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
799 if (rnp
->parent
== NULL
) {
800 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
802 wake_up(&sync_rcu_preempt_exp_wq
);
806 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled */
808 raw_spin_lock(&rnp
->lock
); /* irqs already disabled */
809 rnp
->expmask
&= ~mask
;
814 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
815 * grace period for the specified rcu_node structure. If there are no such
816 * tasks, report it up the rcu_node hierarchy.
818 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
821 sync_rcu_preempt_exp_init(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
826 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
827 if (list_empty(&rnp
->blkd_tasks
))
828 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
830 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
831 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock */
835 rcu_report_exp_rnp(rsp
, rnp
, false); /* Don't wake self. */
839 * synchronize_rcu_expedited - Brute-force RCU grace period
841 * Wait for an RCU-preempt grace period, but expedite it. The basic
842 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
843 * the ->blkd_tasks lists and wait for this list to drain. This consumes
844 * significant time on all CPUs and is unfriendly to real-time workloads,
845 * so is thus not recommended for any sort of common-case code.
846 * In fact, if you are using synchronize_rcu_expedited() in a loop,
847 * please restructure your code to batch your updates, and then Use a
848 * single synchronize_rcu() instead.
850 * Note that it is illegal to call this function while holding any lock
851 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
852 * to call this function from a CPU-hotplug notifier. Failing to observe
853 * these restriction will result in deadlock.
855 void synchronize_rcu_expedited(void)
858 struct rcu_node
*rnp
;
859 struct rcu_state
*rsp
= &rcu_preempt_state
;
863 smp_mb(); /* Caller's modifications seen first by other CPUs. */
864 snap
= ACCESS_ONCE(sync_rcu_preempt_exp_count
) + 1;
865 smp_mb(); /* Above access cannot bleed into critical section. */
868 * Acquire lock, falling back to synchronize_rcu() if too many
869 * lock-acquisition failures. Of course, if someone does the
870 * expedited grace period for us, just leave.
872 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex
)) {
874 udelay(trycount
* num_online_cpus());
879 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
880 goto mb_ret
; /* Others did our work for us. */
882 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count
) - snap
) > 0)
883 goto unlock_mb_ret
; /* Others did our work for us. */
885 /* force all RCU readers onto ->blkd_tasks lists. */
886 synchronize_sched_expedited();
888 raw_spin_lock_irqsave(&rsp
->onofflock
, flags
);
890 /* Initialize ->expmask for all non-leaf rcu_node structures. */
891 rcu_for_each_nonleaf_node_breadth_first(rsp
, rnp
) {
892 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
893 rnp
->expmask
= rnp
->qsmaskinit
;
894 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
897 /* Snapshot current state of ->blkd_tasks lists. */
898 rcu_for_each_leaf_node(rsp
, rnp
)
899 sync_rcu_preempt_exp_init(rsp
, rnp
);
900 if (NUM_RCU_NODES
> 1)
901 sync_rcu_preempt_exp_init(rsp
, rcu_get_root(rsp
));
903 raw_spin_unlock_irqrestore(&rsp
->onofflock
, flags
);
905 /* Wait for snapshotted ->blkd_tasks lists to drain. */
906 rnp
= rcu_get_root(rsp
);
907 wait_event(sync_rcu_preempt_exp_wq
,
908 sync_rcu_preempt_exp_done(rnp
));
910 /* Clean up and exit. */
911 smp_mb(); /* ensure expedited GP seen before counter increment. */
912 ACCESS_ONCE(sync_rcu_preempt_exp_count
)++;
914 mutex_unlock(&sync_rcu_preempt_exp_mutex
);
916 smp_mb(); /* ensure subsequent action seen after grace period. */
918 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
921 * Check to see if there is any immediate preemptible-RCU-related work
924 static int rcu_preempt_pending(int cpu
)
926 return __rcu_pending(&rcu_preempt_state
,
927 &per_cpu(rcu_preempt_data
, cpu
));
931 * Does preemptible RCU have callbacks on this CPU?
933 static int rcu_preempt_cpu_has_callbacks(int cpu
)
935 return !!per_cpu(rcu_preempt_data
, cpu
).nxtlist
;
939 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
941 void rcu_barrier(void)
943 _rcu_barrier(&rcu_preempt_state
, call_rcu
);
945 EXPORT_SYMBOL_GPL(rcu_barrier
);
948 * Initialize preemptible RCU's per-CPU data.
950 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
952 rcu_init_percpu_data(cpu
, &rcu_preempt_state
, 1);
956 * Move preemptible RCU's callbacks from dying CPU to other online CPU
957 * and record a quiescent state.
959 static void rcu_preempt_cleanup_dying_cpu(void)
961 rcu_cleanup_dying_cpu(&rcu_preempt_state
);
965 * Initialize preemptible RCU's state structures.
967 static void __init
__rcu_init_preempt(void)
969 rcu_init_one(&rcu_preempt_state
, &rcu_preempt_data
);
973 * Check for a task exiting while in a preemptible-RCU read-side
974 * critical section, clean up if so. No need to issue warnings,
975 * as debug_check_no_locks_held() already does this if lockdep
980 struct task_struct
*t
= current
;
982 if (t
->rcu_read_lock_nesting
== 0)
984 t
->rcu_read_lock_nesting
= 1;
988 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
990 static struct rcu_state
*rcu_state
= &rcu_sched_state
;
993 * Tell them what RCU they are running.
995 static void __init
rcu_bootup_announce(void)
997 printk(KERN_INFO
"Hierarchical RCU implementation.\n");
998 rcu_bootup_announce_oddness();
1002 * Return the number of RCU batches processed thus far for debug & stats.
1004 long rcu_batches_completed(void)
1006 return rcu_batches_completed_sched();
1008 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
1011 * Force a quiescent state for RCU, which, because there is no preemptible
1012 * RCU, becomes the same as rcu-sched.
1014 void rcu_force_quiescent_state(void)
1016 rcu_sched_force_quiescent_state();
1018 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
1021 * Because preemptible RCU does not exist, we never have to check for
1022 * CPUs being in quiescent states.
1024 static void rcu_preempt_note_context_switch(int cpu
)
1029 * Because preemptible RCU does not exist, there are never any preempted
1032 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
1037 #ifdef CONFIG_HOTPLUG_CPU
1039 /* Because preemptible RCU does not exist, no quieting of tasks. */
1040 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
1042 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1045 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1048 * Because preemptible RCU does not exist, we never have to check for
1049 * tasks blocked within RCU read-side critical sections.
1051 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
1056 * Because preemptible RCU does not exist, we never have to check for
1057 * tasks blocked within RCU read-side critical sections.
1059 static int rcu_print_task_stall(struct rcu_node
*rnp
)
1065 * Because preemptible RCU does not exist, there is no need to suppress
1066 * its CPU stall warnings.
1068 static void rcu_preempt_stall_reset(void)
1073 * Because there is no preemptible RCU, there can be no readers blocked,
1074 * so there is no need to check for blocked tasks. So check only for
1075 * bogus qsmask values.
1077 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
1079 WARN_ON_ONCE(rnp
->qsmask
);
1082 #ifdef CONFIG_HOTPLUG_CPU
1085 * Because preemptible RCU does not exist, it never needs to migrate
1086 * tasks that were blocked within RCU read-side critical sections, and
1087 * such non-existent tasks cannot possibly have been blocking the current
1090 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
1091 struct rcu_node
*rnp
,
1092 struct rcu_data
*rdp
)
1097 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1100 * Because preemptible RCU does not exist, it never needs CPU-offline
1103 static void rcu_preempt_cleanup_dead_cpu(int cpu
)
1108 * Because preemptible RCU does not exist, it never has any callbacks
1111 static void rcu_preempt_check_callbacks(int cpu
)
1116 * Because preemptible RCU does not exist, it never has any callbacks
1119 static void rcu_preempt_process_callbacks(void)
1124 * Queue an RCU callback for lazy invocation after a grace period.
1125 * This will likely be later named something like "call_rcu_lazy()",
1126 * but this change will require some way of tagging the lazy RCU
1127 * callbacks in the list of pending callbacks. Until then, this
1128 * function may only be called from __kfree_rcu().
1130 * Because there is no preemptible RCU, we use RCU-sched instead.
1132 void kfree_call_rcu(struct rcu_head
*head
,
1133 void (*func
)(struct rcu_head
*rcu
))
1135 __call_rcu(head
, func
, &rcu_sched_state
, 1);
1137 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
1140 * Wait for an rcu-preempt grace period, but make it happen quickly.
1141 * But because preemptible RCU does not exist, map to rcu-sched.
1143 void synchronize_rcu_expedited(void)
1145 synchronize_sched_expedited();
1147 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
1149 #ifdef CONFIG_HOTPLUG_CPU
1152 * Because preemptible RCU does not exist, there is never any need to
1153 * report on tasks preempted in RCU read-side critical sections during
1154 * expedited RCU grace periods.
1156 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
1161 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1164 * Because preemptible RCU does not exist, it never has any work to do.
1166 static int rcu_preempt_pending(int cpu
)
1172 * Because preemptible RCU does not exist, it never has callbacks
1174 static int rcu_preempt_cpu_has_callbacks(int cpu
)
1180 * Because preemptible RCU does not exist, rcu_barrier() is just
1181 * another name for rcu_barrier_sched().
1183 void rcu_barrier(void)
1185 rcu_barrier_sched();
1187 EXPORT_SYMBOL_GPL(rcu_barrier
);
1190 * Because preemptible RCU does not exist, there is no per-CPU
1191 * data to initialize.
1193 static void __cpuinit
rcu_preempt_init_percpu_data(int cpu
)
1198 * Because there is no preemptible RCU, there is no cleanup to do.
1200 static void rcu_preempt_cleanup_dying_cpu(void)
1205 * Because preemptible RCU does not exist, it need not be initialized.
1207 static void __init
__rcu_init_preempt(void)
1211 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1213 #ifdef CONFIG_RCU_BOOST
1215 #include "rtmutex_common.h"
1217 #ifdef CONFIG_RCU_TRACE
1219 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1221 if (list_empty(&rnp
->blkd_tasks
))
1222 rnp
->n_balk_blkd_tasks
++;
1223 else if (rnp
->exp_tasks
== NULL
&& rnp
->gp_tasks
== NULL
)
1224 rnp
->n_balk_exp_gp_tasks
++;
1225 else if (rnp
->gp_tasks
!= NULL
&& rnp
->boost_tasks
!= NULL
)
1226 rnp
->n_balk_boost_tasks
++;
1227 else if (rnp
->gp_tasks
!= NULL
&& rnp
->qsmask
!= 0)
1228 rnp
->n_balk_notblocked
++;
1229 else if (rnp
->gp_tasks
!= NULL
&&
1230 ULONG_CMP_LT(jiffies
, rnp
->boost_time
))
1231 rnp
->n_balk_notyet
++;
1236 #else /* #ifdef CONFIG_RCU_TRACE */
1238 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1242 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1245 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1246 * or ->boost_tasks, advancing the pointer to the next task in the
1247 * ->blkd_tasks list.
1249 * Note that irqs must be enabled: boosting the task can block.
1250 * Returns 1 if there are more tasks needing to be boosted.
1252 static int rcu_boost(struct rcu_node
*rnp
)
1254 unsigned long flags
;
1255 struct rt_mutex mtx
;
1256 struct task_struct
*t
;
1257 struct list_head
*tb
;
1259 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
)
1260 return 0; /* Nothing left to boost. */
1262 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1265 * Recheck under the lock: all tasks in need of boosting
1266 * might exit their RCU read-side critical sections on their own.
1268 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
) {
1269 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1274 * Preferentially boost tasks blocking expedited grace periods.
1275 * This cannot starve the normal grace periods because a second
1276 * expedited grace period must boost all blocked tasks, including
1277 * those blocking the pre-existing normal grace period.
1279 if (rnp
->exp_tasks
!= NULL
) {
1280 tb
= rnp
->exp_tasks
;
1281 rnp
->n_exp_boosts
++;
1283 tb
= rnp
->boost_tasks
;
1284 rnp
->n_normal_boosts
++;
1286 rnp
->n_tasks_boosted
++;
1289 * We boost task t by manufacturing an rt_mutex that appears to
1290 * be held by task t. We leave a pointer to that rt_mutex where
1291 * task t can find it, and task t will release the mutex when it
1292 * exits its outermost RCU read-side critical section. Then
1293 * simply acquiring this artificial rt_mutex will boost task
1294 * t's priority. (Thanks to tglx for suggesting this approach!)
1296 * Note that task t must acquire rnp->lock to remove itself from
1297 * the ->blkd_tasks list, which it will do from exit() if from
1298 * nowhere else. We therefore are guaranteed that task t will
1299 * stay around at least until we drop rnp->lock. Note that
1300 * rnp->lock also resolves races between our priority boosting
1301 * and task t's exiting its outermost RCU read-side critical
1304 t
= container_of(tb
, struct task_struct
, rcu_node_entry
);
1305 rt_mutex_init_proxy_locked(&mtx
, t
);
1306 t
->rcu_boost_mutex
= &mtx
;
1307 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1308 rt_mutex_lock(&mtx
); /* Side effect: boosts task t's priority. */
1309 rt_mutex_unlock(&mtx
); /* Keep lockdep happy. */
1311 return ACCESS_ONCE(rnp
->exp_tasks
) != NULL
||
1312 ACCESS_ONCE(rnp
->boost_tasks
) != NULL
;
1316 * Timer handler to initiate waking up of boost kthreads that
1317 * have yielded the CPU due to excessive numbers of tasks to
1318 * boost. We wake up the per-rcu_node kthread, which in turn
1319 * will wake up the booster kthread.
1321 static void rcu_boost_kthread_timer(unsigned long arg
)
1323 invoke_rcu_node_kthread((struct rcu_node
*)arg
);
1327 * Priority-boosting kthread. One per leaf rcu_node and one for the
1330 static int rcu_boost_kthread(void *arg
)
1332 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1336 trace_rcu_utilization("Start boost kthread@init");
1338 rnp
->boost_kthread_status
= RCU_KTHREAD_WAITING
;
1339 trace_rcu_utilization("End boost kthread@rcu_wait");
1340 rcu_wait(rnp
->boost_tasks
|| rnp
->exp_tasks
);
1341 trace_rcu_utilization("Start boost kthread@rcu_wait");
1342 rnp
->boost_kthread_status
= RCU_KTHREAD_RUNNING
;
1343 more2boost
= rcu_boost(rnp
);
1349 trace_rcu_utilization("End boost kthread@rcu_yield");
1350 rcu_yield(rcu_boost_kthread_timer
, (unsigned long)rnp
);
1351 trace_rcu_utilization("Start boost kthread@rcu_yield");
1356 trace_rcu_utilization("End boost kthread@notreached");
1361 * Check to see if it is time to start boosting RCU readers that are
1362 * blocking the current grace period, and, if so, tell the per-rcu_node
1363 * kthread to start boosting them. If there is an expedited grace
1364 * period in progress, it is always time to boost.
1366 * The caller must hold rnp->lock, which this function releases,
1367 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1368 * so we don't need to worry about it going away.
1370 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1372 struct task_struct
*t
;
1374 if (!rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->exp_tasks
== NULL
) {
1375 rnp
->n_balk_exp_gp_tasks
++;
1376 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1379 if (rnp
->exp_tasks
!= NULL
||
1380 (rnp
->gp_tasks
!= NULL
&&
1381 rnp
->boost_tasks
== NULL
&&
1383 ULONG_CMP_GE(jiffies
, rnp
->boost_time
))) {
1384 if (rnp
->exp_tasks
== NULL
)
1385 rnp
->boost_tasks
= rnp
->gp_tasks
;
1386 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1387 t
= rnp
->boost_kthread_task
;
1391 rcu_initiate_boost_trace(rnp
);
1392 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1397 * Wake up the per-CPU kthread to invoke RCU callbacks.
1399 static void invoke_rcu_callbacks_kthread(void)
1401 unsigned long flags
;
1403 local_irq_save(flags
);
1404 __this_cpu_write(rcu_cpu_has_work
, 1);
1405 if (__this_cpu_read(rcu_cpu_kthread_task
) != NULL
&&
1406 current
!= __this_cpu_read(rcu_cpu_kthread_task
))
1407 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task
));
1408 local_irq_restore(flags
);
1412 * Is the current CPU running the RCU-callbacks kthread?
1413 * Caller must have preemption disabled.
1415 static bool rcu_is_callbacks_kthread(void)
1417 return __get_cpu_var(rcu_cpu_kthread_task
) == current
;
1421 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1422 * held, so no one should be messing with the existence of the boost
1425 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
,
1428 struct task_struct
*t
;
1430 t
= rnp
->boost_kthread_task
;
1432 set_cpus_allowed_ptr(rnp
->boost_kthread_task
, cm
);
1435 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1438 * Do priority-boost accounting for the start of a new grace period.
1440 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1442 rnp
->boost_time
= jiffies
+ RCU_BOOST_DELAY_JIFFIES
;
1446 * Create an RCU-boost kthread for the specified node if one does not
1447 * already exist. We only create this kthread for preemptible RCU.
1448 * Returns zero if all is well, a negated errno otherwise.
1450 static int __cpuinit
rcu_spawn_one_boost_kthread(struct rcu_state
*rsp
,
1451 struct rcu_node
*rnp
,
1454 unsigned long flags
;
1455 struct sched_param sp
;
1456 struct task_struct
*t
;
1458 if (&rcu_preempt_state
!= rsp
)
1461 if (rnp
->boost_kthread_task
!= NULL
)
1463 t
= kthread_create(rcu_boost_kthread
, (void *)rnp
,
1464 "rcub/%d", rnp_index
);
1467 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1468 rnp
->boost_kthread_task
= t
;
1469 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1470 sp
.sched_priority
= RCU_BOOST_PRIO
;
1471 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1472 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1476 #ifdef CONFIG_HOTPLUG_CPU
1479 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1481 static void rcu_stop_cpu_kthread(int cpu
)
1483 struct task_struct
*t
;
1485 /* Stop the CPU's kthread. */
1486 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1488 per_cpu(rcu_cpu_kthread_task
, cpu
) = NULL
;
1493 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1495 static void rcu_kthread_do_work(void)
1497 rcu_do_batch(&rcu_sched_state
, &__get_cpu_var(rcu_sched_data
));
1498 rcu_do_batch(&rcu_bh_state
, &__get_cpu_var(rcu_bh_data
));
1499 rcu_preempt_do_callbacks();
1503 * Wake up the specified per-rcu_node-structure kthread.
1504 * Because the per-rcu_node kthreads are immortal, we don't need
1505 * to do anything to keep them alive.
1507 static void invoke_rcu_node_kthread(struct rcu_node
*rnp
)
1509 struct task_struct
*t
;
1511 t
= rnp
->node_kthread_task
;
1517 * Set the specified CPU's kthread to run RT or not, as specified by
1518 * the to_rt argument. The CPU-hotplug locks are held, so the task
1519 * is not going away.
1521 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1524 struct sched_param sp
;
1525 struct task_struct
*t
;
1527 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1531 policy
= SCHED_FIFO
;
1532 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1534 policy
= SCHED_NORMAL
;
1535 sp
.sched_priority
= 0;
1537 sched_setscheduler_nocheck(t
, policy
, &sp
);
1541 * Timer handler to initiate the waking up of per-CPU kthreads that
1542 * have yielded the CPU due to excess numbers of RCU callbacks.
1543 * We wake up the per-rcu_node kthread, which in turn will wake up
1544 * the booster kthread.
1546 static void rcu_cpu_kthread_timer(unsigned long arg
)
1548 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, arg
);
1549 struct rcu_node
*rnp
= rdp
->mynode
;
1551 atomic_or(rdp
->grpmask
, &rnp
->wakemask
);
1552 invoke_rcu_node_kthread(rnp
);
1556 * Drop to non-real-time priority and yield, but only after posting a
1557 * timer that will cause us to regain our real-time priority if we
1558 * remain preempted. Either way, we restore our real-time priority
1561 static void rcu_yield(void (*f
)(unsigned long), unsigned long arg
)
1563 struct sched_param sp
;
1564 struct timer_list yield_timer
;
1565 int prio
= current
->rt_priority
;
1567 setup_timer_on_stack(&yield_timer
, f
, arg
);
1568 mod_timer(&yield_timer
, jiffies
+ 2);
1569 sp
.sched_priority
= 0;
1570 sched_setscheduler_nocheck(current
, SCHED_NORMAL
, &sp
);
1571 set_user_nice(current
, 19);
1573 set_user_nice(current
, 0);
1574 sp
.sched_priority
= prio
;
1575 sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sp
);
1576 del_timer(&yield_timer
);
1580 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1581 * This can happen while the corresponding CPU is either coming online
1582 * or going offline. We cannot wait until the CPU is fully online
1583 * before starting the kthread, because the various notifier functions
1584 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1585 * the corresponding CPU is online.
1587 * Return 1 if the kthread needs to stop, 0 otherwise.
1589 * Caller must disable bh. This function can momentarily enable it.
1591 static int rcu_cpu_kthread_should_stop(int cpu
)
1593 while (cpu_is_offline(cpu
) ||
1594 !cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)) ||
1595 smp_processor_id() != cpu
) {
1596 if (kthread_should_stop())
1598 per_cpu(rcu_cpu_kthread_status
, cpu
) = RCU_KTHREAD_OFFCPU
;
1599 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = raw_smp_processor_id();
1601 schedule_timeout_uninterruptible(1);
1602 if (!cpumask_equal(¤t
->cpus_allowed
, cpumask_of(cpu
)))
1603 set_cpus_allowed_ptr(current
, cpumask_of(cpu
));
1606 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1611 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1612 * RCU softirq used in flavors and configurations of RCU that do not
1613 * support RCU priority boosting.
1615 static int rcu_cpu_kthread(void *arg
)
1617 int cpu
= (int)(long)arg
;
1618 unsigned long flags
;
1620 unsigned int *statusp
= &per_cpu(rcu_cpu_kthread_status
, cpu
);
1622 char *workp
= &per_cpu(rcu_cpu_has_work
, cpu
);
1624 trace_rcu_utilization("Start CPU kthread@init");
1626 *statusp
= RCU_KTHREAD_WAITING
;
1627 trace_rcu_utilization("End CPU kthread@rcu_wait");
1628 rcu_wait(*workp
!= 0 || kthread_should_stop());
1629 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1631 if (rcu_cpu_kthread_should_stop(cpu
)) {
1635 *statusp
= RCU_KTHREAD_RUNNING
;
1636 per_cpu(rcu_cpu_kthread_loops
, cpu
)++;
1637 local_irq_save(flags
);
1640 local_irq_restore(flags
);
1642 rcu_kthread_do_work();
1649 *statusp
= RCU_KTHREAD_YIELDING
;
1650 trace_rcu_utilization("End CPU kthread@rcu_yield");
1651 rcu_yield(rcu_cpu_kthread_timer
, (unsigned long)cpu
);
1652 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1656 *statusp
= RCU_KTHREAD_STOPPED
;
1657 trace_rcu_utilization("End CPU kthread@term");
1662 * Spawn a per-CPU kthread, setting up affinity and priority.
1663 * Because the CPU hotplug lock is held, no other CPU will be attempting
1664 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1665 * attempting to access it during boot, but the locking in kthread_bind()
1666 * will enforce sufficient ordering.
1668 * Please note that we cannot simply refuse to wake up the per-CPU
1669 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1670 * which can result in softlockup complaints if the task ends up being
1671 * idle for more than a couple of minutes.
1673 * However, please note also that we cannot bind the per-CPU kthread to its
1674 * CPU until that CPU is fully online. We also cannot wait until the
1675 * CPU is fully online before we create its per-CPU kthread, as this would
1676 * deadlock the system when CPU notifiers tried waiting for grace
1677 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1678 * is online. If its CPU is not yet fully online, then the code in
1679 * rcu_cpu_kthread() will wait until it is fully online, and then do
1682 static int __cpuinit
rcu_spawn_one_cpu_kthread(int cpu
)
1684 struct sched_param sp
;
1685 struct task_struct
*t
;
1687 if (!rcu_scheduler_fully_active
||
1688 per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
)
1690 t
= kthread_create_on_node(rcu_cpu_kthread
,
1696 if (cpu_online(cpu
))
1697 kthread_bind(t
, cpu
);
1698 per_cpu(rcu_cpu_kthread_cpu
, cpu
) = cpu
;
1699 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task
, cpu
) != NULL
);
1700 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1701 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1702 per_cpu(rcu_cpu_kthread_task
, cpu
) = t
;
1703 wake_up_process(t
); /* Get to TASK_INTERRUPTIBLE quickly. */
1708 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1709 * kthreads when needed. We ignore requests to wake up kthreads
1710 * for offline CPUs, which is OK because force_quiescent_state()
1711 * takes care of this case.
1713 static int rcu_node_kthread(void *arg
)
1716 unsigned long flags
;
1718 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1719 struct sched_param sp
;
1720 struct task_struct
*t
;
1723 rnp
->node_kthread_status
= RCU_KTHREAD_WAITING
;
1724 rcu_wait(atomic_read(&rnp
->wakemask
) != 0);
1725 rnp
->node_kthread_status
= RCU_KTHREAD_RUNNING
;
1726 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1727 mask
= atomic_xchg(&rnp
->wakemask
, 0);
1728 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock. */
1729 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1) {
1730 if ((mask
& 0x1) == 0)
1733 t
= per_cpu(rcu_cpu_kthread_task
, cpu
);
1734 if (!cpu_online(cpu
) || t
== NULL
) {
1738 per_cpu(rcu_cpu_has_work
, cpu
) = 1;
1739 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1740 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1745 rnp
->node_kthread_status
= RCU_KTHREAD_STOPPED
;
1750 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1751 * served by the rcu_node in question. The CPU hotplug lock is still
1752 * held, so the value of rnp->qsmaskinit will be stable.
1754 * We don't include outgoingcpu in the affinity set, use -1 if there is
1755 * no outgoing CPU. If there are no CPUs left in the affinity set,
1756 * this function allows the kthread to execute on any CPU.
1758 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1762 unsigned long mask
= rnp
->qsmaskinit
;
1764 if (rnp
->node_kthread_task
== NULL
)
1766 if (!alloc_cpumask_var(&cm
, GFP_KERNEL
))
1769 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1)
1770 if ((mask
& 0x1) && cpu
!= outgoingcpu
)
1771 cpumask_set_cpu(cpu
, cm
);
1772 if (cpumask_weight(cm
) == 0) {
1774 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++)
1775 cpumask_clear_cpu(cpu
, cm
);
1776 WARN_ON_ONCE(cpumask_weight(cm
) == 0);
1778 set_cpus_allowed_ptr(rnp
->node_kthread_task
, cm
);
1779 rcu_boost_kthread_setaffinity(rnp
, cm
);
1780 free_cpumask_var(cm
);
1784 * Spawn a per-rcu_node kthread, setting priority and affinity.
1785 * Called during boot before online/offline can happen, or, if
1786 * during runtime, with the main CPU-hotplug locks held. So only
1787 * one of these can be executing at a time.
1789 static int __cpuinit
rcu_spawn_one_node_kthread(struct rcu_state
*rsp
,
1790 struct rcu_node
*rnp
)
1792 unsigned long flags
;
1793 int rnp_index
= rnp
- &rsp
->node
[0];
1794 struct sched_param sp
;
1795 struct task_struct
*t
;
1797 if (!rcu_scheduler_fully_active
||
1798 rnp
->qsmaskinit
== 0)
1800 if (rnp
->node_kthread_task
== NULL
) {
1801 t
= kthread_create(rcu_node_kthread
, (void *)rnp
,
1802 "rcun/%d", rnp_index
);
1805 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1806 rnp
->node_kthread_task
= t
;
1807 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1808 sp
.sched_priority
= 99;
1809 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1810 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1812 return rcu_spawn_one_boost_kthread(rsp
, rnp
, rnp_index
);
1816 * Spawn all kthreads -- called as soon as the scheduler is running.
1818 static int __init
rcu_spawn_kthreads(void)
1821 struct rcu_node
*rnp
;
1823 rcu_scheduler_fully_active
= 1;
1824 for_each_possible_cpu(cpu
) {
1825 per_cpu(rcu_cpu_has_work
, cpu
) = 0;
1826 if (cpu_online(cpu
))
1827 (void)rcu_spawn_one_cpu_kthread(cpu
);
1829 rnp
= rcu_get_root(rcu_state
);
1830 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1831 if (NUM_RCU_NODES
> 1) {
1832 rcu_for_each_leaf_node(rcu_state
, rnp
)
1833 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1837 early_initcall(rcu_spawn_kthreads
);
1839 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1841 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, cpu
);
1842 struct rcu_node
*rnp
= rdp
->mynode
;
1844 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1845 if (rcu_scheduler_fully_active
) {
1846 (void)rcu_spawn_one_cpu_kthread(cpu
);
1847 if (rnp
->node_kthread_task
== NULL
)
1848 (void)rcu_spawn_one_node_kthread(rcu_state
, rnp
);
1852 #else /* #ifdef CONFIG_RCU_BOOST */
1854 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1856 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1859 static void invoke_rcu_callbacks_kthread(void)
1864 static bool rcu_is_callbacks_kthread(void)
1869 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1873 #ifdef CONFIG_HOTPLUG_CPU
1875 static void rcu_stop_cpu_kthread(int cpu
)
1879 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1881 static void rcu_node_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1885 static void rcu_cpu_kthread_setrt(int cpu
, int to_rt
)
1889 static int __init
rcu_scheduler_really_started(void)
1891 rcu_scheduler_fully_active
= 1;
1894 early_initcall(rcu_scheduler_really_started
);
1896 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1900 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1902 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1905 * Check to see if any future RCU-related work will need to be done
1906 * by the current CPU, even if none need be done immediately, returning
1907 * 1 if so. This function is part of the RCU implementation; it is -not-
1908 * an exported member of the RCU API.
1910 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1911 * any flavor of RCU.
1913 int rcu_needs_cpu(int cpu
)
1915 return rcu_cpu_has_callbacks(cpu
);
1919 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1921 static void rcu_prepare_for_idle_init(int cpu
)
1926 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1929 static void rcu_cleanup_after_idle(int cpu
)
1934 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1937 static void rcu_prepare_for_idle(int cpu
)
1941 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1944 * This code is invoked when a CPU goes idle, at which point we want
1945 * to have the CPU do everything required for RCU so that it can enter
1946 * the energy-efficient dyntick-idle mode. This is handled by a
1947 * state machine implemented by rcu_prepare_for_idle() below.
1949 * The following three proprocessor symbols control this state machine:
1951 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1952 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1953 * scheduling-clock interrupt than to loop through the state machine
1955 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1956 * optional if RCU does not need anything immediately from this
1957 * CPU, even if this CPU still has RCU callbacks queued. The first
1958 * times through the state machine are mandatory: we need to give
1959 * the state machine a chance to communicate a quiescent state
1961 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1962 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1963 * is sized to be roughly one RCU grace period. Those energy-efficiency
1964 * benchmarkers who might otherwise be tempted to set this to a large
1965 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1966 * system. And if you are -that- concerned about energy efficiency,
1967 * just power the system down and be done with it!
1968 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1969 * permitted to sleep in dyntick-idle mode with only lazy RCU
1970 * callbacks pending. Setting this too high can OOM your system.
1972 * The values below work well in practice. If future workloads require
1973 * adjustment, they can be converted into kernel config parameters, though
1974 * making the state machine smarter might be a better option.
1976 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1977 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1978 #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1979 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1981 static DEFINE_PER_CPU(int, rcu_dyntick_drain
);
1982 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff
);
1983 static DEFINE_PER_CPU(struct hrtimer
, rcu_idle_gp_timer
);
1984 static ktime_t rcu_idle_gp_wait
; /* If some non-lazy callbacks. */
1985 static ktime_t rcu_idle_lazy_gp_wait
; /* If only lazy callbacks. */
1988 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1989 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1990 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1991 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1992 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1993 * it is better to incur scheduling-clock interrupts than to spin
1994 * continuously for the same time duration!
1996 int rcu_needs_cpu(int cpu
)
1998 /* If no callbacks, RCU doesn't need the CPU. */
1999 if (!rcu_cpu_has_callbacks(cpu
))
2001 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2002 return per_cpu(rcu_dyntick_holdoff
, cpu
) == jiffies
;
2006 * Does the specified flavor of RCU have non-lazy callbacks pending on
2007 * the specified CPU? Both RCU flavor and CPU are specified by the
2008 * rcu_data structure.
2010 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data
*rdp
)
2012 return rdp
->qlen
!= rdp
->qlen_lazy
;
2015 #ifdef CONFIG_TREE_PREEMPT_RCU
2018 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
2019 * is no RCU-preempt in the kernel.)
2021 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
2023 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
2025 return __rcu_cpu_has_nonlazy_callbacks(rdp
);
2028 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2030 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
2035 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2038 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2040 static bool rcu_cpu_has_nonlazy_callbacks(int cpu
)
2042 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data
, cpu
)) ||
2043 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data
, cpu
)) ||
2044 rcu_preempt_cpu_has_nonlazy_callbacks(cpu
);
2048 * Timer handler used to force CPU to start pushing its remaining RCU
2049 * callbacks in the case where it entered dyntick-idle mode with callbacks
2050 * pending. The hander doesn't really need to do anything because the
2051 * real work is done upon re-entry to idle, or by the next scheduling-clock
2052 * interrupt should idle not be re-entered.
2054 static enum hrtimer_restart
rcu_idle_gp_timer_func(struct hrtimer
*hrtp
)
2056 trace_rcu_prep_idle("Timer");
2057 return HRTIMER_NORESTART
;
2061 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2063 static void rcu_prepare_for_idle_init(int cpu
)
2065 static int firsttime
= 1;
2066 struct hrtimer
*hrtp
= &per_cpu(rcu_idle_gp_timer
, cpu
);
2068 hrtimer_init(hrtp
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2069 hrtp
->function
= rcu_idle_gp_timer_func
;
2071 unsigned int upj
= jiffies_to_usecs(RCU_IDLE_GP_DELAY
);
2073 rcu_idle_gp_wait
= ns_to_ktime(upj
* (u64
)1000);
2074 upj
= jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY
);
2075 rcu_idle_lazy_gp_wait
= ns_to_ktime(upj
* (u64
)1000);
2081 * Clean up for exit from idle. Because we are exiting from idle, there
2082 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
2083 * do nothing if this timer is not active, so just cancel it unconditionally.
2085 static void rcu_cleanup_after_idle(int cpu
)
2087 hrtimer_cancel(&per_cpu(rcu_idle_gp_timer
, cpu
));
2091 * Check to see if any RCU-related work can be done by the current CPU,
2092 * and if so, schedule a softirq to get it done. This function is part
2093 * of the RCU implementation; it is -not- an exported member of the RCU API.
2095 * The idea is for the current CPU to clear out all work required by the
2096 * RCU core for the current grace period, so that this CPU can be permitted
2097 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2098 * at the end of the grace period by whatever CPU ends the grace period.
2099 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2100 * number of wakeups by a modest integer factor.
2102 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2103 * disabled, we do one pass of force_quiescent_state(), then do a
2104 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2105 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
2107 * The caller must have disabled interrupts.
2109 static void rcu_prepare_for_idle(int cpu
)
2112 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2113 * Also reset state to avoid prejudicing later attempts.
2115 if (!rcu_cpu_has_callbacks(cpu
)) {
2116 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
- 1;
2117 per_cpu(rcu_dyntick_drain
, cpu
) = 0;
2118 trace_rcu_prep_idle("No callbacks");
2123 * If in holdoff mode, just return. We will presumably have
2124 * refrained from disabling the scheduling-clock tick.
2126 if (per_cpu(rcu_dyntick_holdoff
, cpu
) == jiffies
) {
2127 trace_rcu_prep_idle("In holdoff");
2131 /* Check and update the rcu_dyntick_drain sequencing. */
2132 if (per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
2133 /* First time through, initialize the counter. */
2134 per_cpu(rcu_dyntick_drain
, cpu
) = RCU_IDLE_FLUSHES
;
2135 } else if (per_cpu(rcu_dyntick_drain
, cpu
) <= RCU_IDLE_OPT_FLUSHES
&&
2136 !rcu_pending(cpu
) &&
2137 !local_softirq_pending()) {
2138 /* Can we go dyntick-idle despite still having callbacks? */
2139 trace_rcu_prep_idle("Dyntick with callbacks");
2140 per_cpu(rcu_dyntick_drain
, cpu
) = 0;
2141 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
;
2142 if (rcu_cpu_has_nonlazy_callbacks(cpu
))
2143 hrtimer_start(&per_cpu(rcu_idle_gp_timer
, cpu
),
2144 rcu_idle_gp_wait
, HRTIMER_MODE_REL
);
2146 hrtimer_start(&per_cpu(rcu_idle_gp_timer
, cpu
),
2147 rcu_idle_lazy_gp_wait
, HRTIMER_MODE_REL
);
2148 return; /* Nothing more to do immediately. */
2149 } else if (--per_cpu(rcu_dyntick_drain
, cpu
) <= 0) {
2150 /* We have hit the limit, so time to give up. */
2151 per_cpu(rcu_dyntick_holdoff
, cpu
) = jiffies
;
2152 trace_rcu_prep_idle("Begin holdoff");
2153 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2158 * Do one step of pushing the remaining RCU callbacks through
2159 * the RCU core state machine.
2161 #ifdef CONFIG_TREE_PREEMPT_RCU
2162 if (per_cpu(rcu_preempt_data
, cpu
).nxtlist
) {
2163 rcu_preempt_qs(cpu
);
2164 force_quiescent_state(&rcu_preempt_state
, 0);
2166 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2167 if (per_cpu(rcu_sched_data
, cpu
).nxtlist
) {
2169 force_quiescent_state(&rcu_sched_state
, 0);
2171 if (per_cpu(rcu_bh_data
, cpu
).nxtlist
) {
2173 force_quiescent_state(&rcu_bh_state
, 0);
2177 * If RCU callbacks are still pending, RCU still needs this CPU.
2178 * So try forcing the callbacks through the grace period.
2180 if (rcu_cpu_has_callbacks(cpu
)) {
2181 trace_rcu_prep_idle("More callbacks");
2184 trace_rcu_prep_idle("Callbacks drained");
2187 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2189 #ifdef CONFIG_RCU_CPU_STALL_INFO
2191 #ifdef CONFIG_RCU_FAST_NO_HZ
2193 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
2195 struct hrtimer
*hrtp
= &per_cpu(rcu_idle_gp_timer
, cpu
);
2197 sprintf(cp
, "drain=%d %c timer=%lld",
2198 per_cpu(rcu_dyntick_drain
, cpu
),
2199 per_cpu(rcu_dyntick_holdoff
, cpu
) == jiffies
? 'H' : '.',
2200 hrtimer_active(hrtp
)
2201 ? ktime_to_us(hrtimer_get_remaining(hrtp
))
2205 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2207 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
2211 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2213 /* Initiate the stall-info list. */
2214 static void print_cpu_stall_info_begin(void)
2216 printk(KERN_CONT
"\n");
2220 * Print out diagnostic information for the specified stalled CPU.
2222 * If the specified CPU is aware of the current RCU grace period
2223 * (flavor specified by rsp), then print the number of scheduling
2224 * clock interrupts the CPU has taken during the time that it has
2225 * been aware. Otherwise, print the number of RCU grace periods
2226 * that this CPU is ignorant of, for example, "1" if the CPU was
2227 * aware of the previous grace period.
2229 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2231 static void print_cpu_stall_info(struct rcu_state
*rsp
, int cpu
)
2233 char fast_no_hz
[72];
2234 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
2235 struct rcu_dynticks
*rdtp
= rdp
->dynticks
;
2237 unsigned long ticks_value
;
2239 if (rsp
->gpnum
== rdp
->gpnum
) {
2240 ticks_title
= "ticks this GP";
2241 ticks_value
= rdp
->ticks_this_gp
;
2243 ticks_title
= "GPs behind";
2244 ticks_value
= rsp
->gpnum
- rdp
->gpnum
;
2246 print_cpu_stall_fast_no_hz(fast_no_hz
, cpu
);
2247 printk(KERN_ERR
"\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2248 cpu
, ticks_value
, ticks_title
,
2249 atomic_read(&rdtp
->dynticks
) & 0xfff,
2250 rdtp
->dynticks_nesting
, rdtp
->dynticks_nmi_nesting
,
2254 /* Terminate the stall-info list. */
2255 static void print_cpu_stall_info_end(void)
2257 printk(KERN_ERR
"\t");
2260 /* Zero ->ticks_this_gp for all flavors of RCU. */
2261 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
2263 rdp
->ticks_this_gp
= 0;
2266 /* Increment ->ticks_this_gp for all flavors of RCU. */
2267 static void increment_cpu_stall_ticks(void)
2269 __get_cpu_var(rcu_sched_data
).ticks_this_gp
++;
2270 __get_cpu_var(rcu_bh_data
).ticks_this_gp
++;
2271 #ifdef CONFIG_TREE_PREEMPT_RCU
2272 __get_cpu_var(rcu_preempt_data
).ticks_this_gp
++;
2273 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2276 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2278 static void print_cpu_stall_info_begin(void)
2280 printk(KERN_CONT
" {");
2283 static void print_cpu_stall_info(struct rcu_state
*rsp
, int cpu
)
2285 printk(KERN_CONT
" %d", cpu
);
2288 static void print_cpu_stall_info_end(void)
2290 printk(KERN_CONT
"} ");
2293 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
2297 static void increment_cpu_stall_ticks(void)
2301 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */