2 * RCU expedited grace periods
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2016
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 #include <linux/lockdep.h>
26 * Record the start of an expedited grace period.
28 static void rcu_exp_gp_seq_start(void)
30 rcu_seq_start(&rcu_state
.expedited_sequence
);
34 * Return then value that expedited-grace-period counter will have
35 * at the end of the current grace period.
37 static __maybe_unused
unsigned long rcu_exp_gp_seq_endval(void)
39 return rcu_seq_endval(&rcu_state
.expedited_sequence
);
43 * Record the end of an expedited grace period.
45 static void rcu_exp_gp_seq_end(void)
47 rcu_seq_end(&rcu_state
.expedited_sequence
);
48 smp_mb(); /* Ensure that consecutive grace periods serialize. */
52 * Take a snapshot of the expedited-grace-period counter.
54 static unsigned long rcu_exp_gp_seq_snap(void)
58 smp_mb(); /* Caller's modifications seen first by other CPUs. */
59 s
= rcu_seq_snap(&rcu_state
.expedited_sequence
);
60 trace_rcu_exp_grace_period(rcu_state
.name
, s
, TPS("snap"));
65 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
66 * if a full expedited grace period has elapsed since that snapshot
69 static bool rcu_exp_gp_seq_done(unsigned long s
)
71 return rcu_seq_done(&rcu_state
.expedited_sequence
, s
);
75 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
76 * recent CPU-online activity. Note that these masks are not cleared
77 * when CPUs go offline, so they reflect the union of all CPUs that have
78 * ever been online. This means that this function normally takes its
79 * no-work-to-do fastpath.
81 static void sync_exp_reset_tree_hotplug(void)
86 unsigned long oldmask
;
87 int ncpus
= smp_load_acquire(&rcu_state
.ncpus
); /* Order vs. locking. */
89 struct rcu_node
*rnp_up
;
91 /* If no new CPUs onlined since last time, nothing to do. */
92 if (likely(ncpus
== rcu_state
.ncpus_snap
))
94 rcu_state
.ncpus_snap
= ncpus
;
97 * Each pass through the following loop propagates newly onlined
98 * CPUs for the current rcu_node structure up the rcu_node tree.
100 rcu_for_each_leaf_node(rnp
) {
101 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
102 if (rnp
->expmaskinit
== rnp
->expmaskinitnext
) {
103 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
104 continue; /* No new CPUs, nothing to do. */
107 /* Update this node's mask, track old value for propagation. */
108 oldmask
= rnp
->expmaskinit
;
109 rnp
->expmaskinit
= rnp
->expmaskinitnext
;
110 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
112 /* If was already nonzero, nothing to propagate. */
116 /* Propagate the new CPU up the tree. */
118 rnp_up
= rnp
->parent
;
121 raw_spin_lock_irqsave_rcu_node(rnp_up
, flags
);
122 if (rnp_up
->expmaskinit
)
124 rnp_up
->expmaskinit
|= mask
;
125 raw_spin_unlock_irqrestore_rcu_node(rnp_up
, flags
);
128 mask
= rnp_up
->grpmask
;
129 rnp_up
= rnp_up
->parent
;
135 * Reset the ->expmask values in the rcu_node tree in preparation for
136 * a new expedited grace period.
138 static void __maybe_unused
sync_exp_reset_tree(void)
141 struct rcu_node
*rnp
;
143 sync_exp_reset_tree_hotplug();
144 rcu_for_each_node_breadth_first(rnp
) {
145 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
146 WARN_ON_ONCE(rnp
->expmask
);
147 rnp
->expmask
= rnp
->expmaskinit
;
148 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
153 * Return non-zero if there is no RCU expedited grace period in progress
154 * for the specified rcu_node structure, in other words, if all CPUs and
155 * tasks covered by the specified rcu_node structure have done their bit
156 * for the current expedited grace period. Works only for preemptible
157 * RCU -- other RCU implementation use other means.
159 * Caller must hold the specificed rcu_node structure's ->lock
161 static bool sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
163 raw_lockdep_assert_held_rcu_node(rnp
);
165 return rnp
->exp_tasks
== NULL
&&
166 READ_ONCE(rnp
->expmask
) == 0;
170 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
171 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
174 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node
*rnp
)
179 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
180 ret
= sync_rcu_preempt_exp_done(rnp
);
181 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
188 * Report the exit from RCU read-side critical section for the last task
189 * that queued itself during or before the current expedited preemptible-RCU
190 * grace period. This event is reported either to the rcu_node structure on
191 * which the task was queued or to one of that rcu_node structure's ancestors,
192 * recursively up the tree. (Calm down, calm down, we do the recursion
195 * Caller must hold the specified rcu_node structure's ->lock.
197 static void __rcu_report_exp_rnp(struct rcu_node
*rnp
,
198 bool wake
, unsigned long flags
)
199 __releases(rnp
->lock
)
204 if (!sync_rcu_preempt_exp_done(rnp
)) {
206 rcu_initiate_boost(rnp
, flags
);
208 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
211 if (rnp
->parent
== NULL
) {
212 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
214 smp_mb(); /* EGP done before wake_up(). */
215 swake_up_one(&rcu_state
.expedited_wq
);
220 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled */
222 raw_spin_lock_rcu_node(rnp
); /* irqs already disabled */
223 WARN_ON_ONCE(!(rnp
->expmask
& mask
));
224 rnp
->expmask
&= ~mask
;
229 * Report expedited quiescent state for specified node. This is a
230 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
232 static void __maybe_unused
rcu_report_exp_rnp(struct rcu_node
*rnp
, bool wake
)
236 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
237 __rcu_report_exp_rnp(rnp
, wake
, flags
);
241 * Report expedited quiescent state for multiple CPUs, all covered by the
242 * specified leaf rcu_node structure.
244 static void rcu_report_exp_cpu_mult(struct rcu_node
*rnp
,
245 unsigned long mask
, bool wake
)
249 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
250 if (!(rnp
->expmask
& mask
)) {
251 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
254 rnp
->expmask
&= ~mask
;
255 __rcu_report_exp_rnp(rnp
, wake
, flags
); /* Releases rnp->lock. */
259 * Report expedited quiescent state for specified rcu_data (CPU).
261 static void rcu_report_exp_rdp(struct rcu_data
*rdp
)
263 WRITE_ONCE(rdp
->deferred_qs
, false);
264 rcu_report_exp_cpu_mult(rdp
->mynode
, rdp
->grpmask
, true);
267 /* Common code for work-done checking. */
268 static bool sync_exp_work_done(unsigned long s
)
270 if (rcu_exp_gp_seq_done(s
)) {
271 trace_rcu_exp_grace_period(rcu_state
.name
, s
, TPS("done"));
272 /* Ensure test happens before caller kfree(). */
273 smp_mb__before_atomic(); /* ^^^ */
280 * Funnel-lock acquisition for expedited grace periods. Returns true
281 * if some other task completed an expedited grace period that this task
282 * can piggy-back on, and with no mutex held. Otherwise, returns false
283 * with the mutex held, indicating that the caller must actually do the
284 * expedited grace period.
286 static bool exp_funnel_lock(unsigned long s
)
288 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, raw_smp_processor_id());
289 struct rcu_node
*rnp
= rdp
->mynode
;
290 struct rcu_node
*rnp_root
= rcu_get_root();
292 /* Low-contention fastpath. */
293 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
) &&
295 ULONG_CMP_LT(READ_ONCE(rnp_root
->exp_seq_rq
), s
)) &&
296 mutex_trylock(&rcu_state
.exp_mutex
))
300 * Each pass through the following loop works its way up
301 * the rcu_node tree, returning if others have done the work or
302 * otherwise falls through to acquire ->exp_mutex. The mapping
303 * from CPU to rcu_node structure can be inexact, as it is just
304 * promoting locality and is not strictly needed for correctness.
306 for (; rnp
!= NULL
; rnp
= rnp
->parent
) {
307 if (sync_exp_work_done(s
))
310 /* Work not done, either wait here or go up. */
311 spin_lock(&rnp
->exp_lock
);
312 if (ULONG_CMP_GE(rnp
->exp_seq_rq
, s
)) {
314 /* Someone else doing GP, so wait for them. */
315 spin_unlock(&rnp
->exp_lock
);
316 trace_rcu_exp_funnel_lock(rcu_state
.name
, rnp
->level
,
317 rnp
->grplo
, rnp
->grphi
,
319 wait_event(rnp
->exp_wq
[rcu_seq_ctr(s
) & 0x3],
320 sync_exp_work_done(s
));
323 rnp
->exp_seq_rq
= s
; /* Followers can wait on us. */
324 spin_unlock(&rnp
->exp_lock
);
325 trace_rcu_exp_funnel_lock(rcu_state
.name
, rnp
->level
,
326 rnp
->grplo
, rnp
->grphi
, TPS("nxtlvl"));
328 mutex_lock(&rcu_state
.exp_mutex
);
330 if (sync_exp_work_done(s
)) {
331 mutex_unlock(&rcu_state
.exp_mutex
);
334 rcu_exp_gp_seq_start();
335 trace_rcu_exp_grace_period(rcu_state
.name
, s
, TPS("start"));
340 * Select the CPUs within the specified rcu_node that the upcoming
341 * expedited grace period needs to wait for.
343 static void sync_rcu_exp_select_node_cpus(struct work_struct
*wp
)
347 smp_call_func_t func
;
348 unsigned long mask_ofl_test
;
349 unsigned long mask_ofl_ipi
;
351 struct rcu_exp_work
*rewp
=
352 container_of(wp
, struct rcu_exp_work
, rew_work
);
353 struct rcu_node
*rnp
= container_of(rewp
, struct rcu_node
, rew
);
355 func
= rewp
->rew_func
;
356 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
358 /* Each pass checks a CPU for identity, offline, and idle. */
360 for_each_leaf_node_cpu_mask(rnp
, cpu
, rnp
->expmask
) {
361 unsigned long mask
= leaf_node_cpu_bit(rnp
, cpu
);
362 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
365 if (raw_smp_processor_id() == cpu
||
366 !(rnp
->qsmaskinitnext
& mask
)) {
367 mask_ofl_test
|= mask
;
369 snap
= rcu_dynticks_snap(rdp
);
370 if (rcu_dynticks_in_eqs(snap
))
371 mask_ofl_test
|= mask
;
373 rdp
->exp_dynticks_snap
= snap
;
376 mask_ofl_ipi
= rnp
->expmask
& ~mask_ofl_test
;
379 * Need to wait for any blocked tasks as well. Note that
380 * additional blocking tasks will also block the expedited GP
381 * until such time as the ->expmask bits are cleared.
383 if (rcu_preempt_has_tasks(rnp
))
384 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
385 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
387 /* IPI the remaining CPUs for expedited quiescent state. */
388 for_each_leaf_node_cpu_mask(rnp
, cpu
, rnp
->expmask
) {
389 unsigned long mask
= leaf_node_cpu_bit(rnp
, cpu
);
390 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
392 if (!(mask_ofl_ipi
& mask
))
395 if (rcu_dynticks_in_eqs_since(rdp
, rdp
->exp_dynticks_snap
)) {
396 mask_ofl_test
|= mask
;
399 ret
= smp_call_function_single(cpu
, func
, NULL
, 0);
401 mask_ofl_ipi
&= ~mask
;
404 /* Failed, raced with CPU hotplug operation. */
405 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
406 if ((rnp
->qsmaskinitnext
& mask
) &&
407 (rnp
->expmask
& mask
)) {
408 /* Online, so delay for a bit and try again. */
409 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
410 trace_rcu_exp_grace_period(rcu_state
.name
, rcu_exp_gp_seq_endval(), TPS("selectofl"));
411 schedule_timeout_uninterruptible(1);
414 /* CPU really is offline, so we can ignore it. */
415 if (!(rnp
->expmask
& mask
))
416 mask_ofl_ipi
&= ~mask
;
417 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
419 /* Report quiescent states for those that went offline. */
420 mask_ofl_test
|= mask_ofl_ipi
;
422 rcu_report_exp_cpu_mult(rnp
, mask_ofl_test
, false);
426 * Select the nodes that the upcoming expedited grace period needs
429 static void sync_rcu_exp_select_cpus(smp_call_func_t func
)
432 struct rcu_node
*rnp
;
434 trace_rcu_exp_grace_period(rcu_state
.name
, rcu_exp_gp_seq_endval(), TPS("reset"));
435 sync_exp_reset_tree();
436 trace_rcu_exp_grace_period(rcu_state
.name
, rcu_exp_gp_seq_endval(), TPS("select"));
438 /* Schedule work for each leaf rcu_node structure. */
439 rcu_for_each_leaf_node(rnp
) {
440 rnp
->exp_need_flush
= false;
441 if (!READ_ONCE(rnp
->expmask
))
442 continue; /* Avoid early boot non-existent wq. */
443 rnp
->rew
.rew_func
= func
;
444 if (!READ_ONCE(rcu_par_gp_wq
) ||
445 rcu_scheduler_active
!= RCU_SCHEDULER_RUNNING
||
446 rcu_is_last_leaf_node(rnp
)) {
447 /* No workqueues yet or last leaf, do direct call. */
448 sync_rcu_exp_select_node_cpus(&rnp
->rew
.rew_work
);
451 INIT_WORK(&rnp
->rew
.rew_work
, sync_rcu_exp_select_node_cpus
);
453 cpu
= cpumask_next(rnp
->grplo
- 1, cpu_online_mask
);
454 /* If all offline, queue the work on an unbound CPU. */
455 if (unlikely(cpu
> rnp
->grphi
))
456 cpu
= WORK_CPU_UNBOUND
;
457 queue_work_on(cpu
, rcu_par_gp_wq
, &rnp
->rew
.rew_work
);
459 rnp
->exp_need_flush
= true;
462 /* Wait for workqueue jobs (if any) to complete. */
463 rcu_for_each_leaf_node(rnp
)
464 if (rnp
->exp_need_flush
)
465 flush_work(&rnp
->rew
.rew_work
);
468 static void synchronize_sched_expedited_wait(void)
471 unsigned long jiffies_stall
;
472 unsigned long jiffies_start
;
475 struct rcu_node
*rnp
;
476 struct rcu_node
*rnp_root
= rcu_get_root();
479 trace_rcu_exp_grace_period(rcu_state
.name
, rcu_exp_gp_seq_endval(), TPS("startwait"));
480 jiffies_stall
= rcu_jiffies_till_stall_check();
481 jiffies_start
= jiffies
;
484 ret
= swait_event_timeout_exclusive(
485 rcu_state
.expedited_wq
,
486 sync_rcu_preempt_exp_done_unlocked(rnp_root
),
488 if (ret
> 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root
))
490 WARN_ON(ret
< 0); /* workqueues should not be signaled. */
491 if (rcu_cpu_stall_suppress
)
493 panic_on_rcu_stall();
494 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
497 rcu_for_each_leaf_node(rnp
) {
498 ndetected
+= rcu_print_task_exp_stall(rnp
);
499 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
500 struct rcu_data
*rdp
;
502 mask
= leaf_node_cpu_bit(rnp
, cpu
);
503 if (!(rnp
->expmask
& mask
))
506 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
507 pr_cont(" %d-%c%c%c", cpu
,
508 "O."[!!cpu_online(cpu
)],
509 "o."[!!(rdp
->grpmask
& rnp
->expmaskinit
)],
510 "N."[!!(rdp
->grpmask
& rnp
->expmaskinitnext
)]);
513 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
514 jiffies
- jiffies_start
, rcu_state
.expedited_sequence
,
515 rnp_root
->expmask
, ".T"[!!rnp_root
->exp_tasks
]);
517 pr_err("blocking rcu_node structures:");
518 rcu_for_each_node_breadth_first(rnp
) {
520 continue; /* printed unconditionally */
521 if (sync_rcu_preempt_exp_done_unlocked(rnp
))
523 pr_cont(" l=%u:%d-%d:%#lx/%c",
524 rnp
->level
, rnp
->grplo
, rnp
->grphi
,
526 ".T"[!!rnp
->exp_tasks
]);
530 rcu_for_each_leaf_node(rnp
) {
531 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
532 mask
= leaf_node_cpu_bit(rnp
, cpu
);
533 if (!(rnp
->expmask
& mask
))
538 jiffies_stall
= 3 * rcu_jiffies_till_stall_check() + 3;
543 * Wait for the current expedited grace period to complete, and then
544 * wake up everyone who piggybacked on the just-completed expedited
545 * grace period. Also update all the ->exp_seq_rq counters as needed
546 * in order to avoid counter-wrap problems.
548 static void rcu_exp_wait_wake(unsigned long s
)
550 struct rcu_node
*rnp
;
552 synchronize_sched_expedited_wait();
553 rcu_exp_gp_seq_end();
554 trace_rcu_exp_grace_period(rcu_state
.name
, s
, TPS("end"));
557 * Switch over to wakeup mode, allowing the next GP, but -only- the
558 * next GP, to proceed.
560 mutex_lock(&rcu_state
.exp_wake_mutex
);
562 rcu_for_each_node_breadth_first(rnp
) {
563 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
)) {
564 spin_lock(&rnp
->exp_lock
);
565 /* Recheck, avoid hang in case someone just arrived. */
566 if (ULONG_CMP_LT(rnp
->exp_seq_rq
, s
))
568 spin_unlock(&rnp
->exp_lock
);
570 smp_mb(); /* All above changes before wakeup. */
571 wake_up_all(&rnp
->exp_wq
[rcu_seq_ctr(rcu_state
.expedited_sequence
) & 0x3]);
573 trace_rcu_exp_grace_period(rcu_state
.name
, s
, TPS("endwake"));
574 mutex_unlock(&rcu_state
.exp_wake_mutex
);
578 * Common code to drive an expedited grace period forward, used by
579 * workqueues and mid-boot-time tasks.
581 static void rcu_exp_sel_wait_wake(smp_call_func_t func
, unsigned long s
)
583 /* Initialize the rcu_node tree in preparation for the wait. */
584 sync_rcu_exp_select_cpus(func
);
586 /* Wait and clean up, including waking everyone. */
587 rcu_exp_wait_wake(s
);
591 * Work-queue handler to drive an expedited grace period forward.
593 static void wait_rcu_exp_gp(struct work_struct
*wp
)
595 struct rcu_exp_work
*rewp
;
597 rewp
= container_of(wp
, struct rcu_exp_work
, rew_work
);
598 rcu_exp_sel_wait_wake(rewp
->rew_func
, rewp
->rew_s
);
602 * Given a smp_call_function() handler, kick off the specified
603 * implementation of expedited grace period.
605 static void _synchronize_rcu_expedited(smp_call_func_t func
)
607 struct rcu_data
*rdp
;
608 struct rcu_exp_work rew
;
609 struct rcu_node
*rnp
;
612 /* If expedited grace periods are prohibited, fall back to normal. */
613 if (rcu_gp_is_normal()) {
614 wait_rcu_gp(call_rcu
);
618 /* Take a snapshot of the sequence number. */
619 s
= rcu_exp_gp_seq_snap();
620 if (exp_funnel_lock(s
))
621 return; /* Someone else did our work for us. */
623 /* Ensure that load happens before action based on it. */
624 if (unlikely(rcu_scheduler_active
== RCU_SCHEDULER_INIT
)) {
625 /* Direct call during scheduler init and early_initcalls(). */
626 rcu_exp_sel_wait_wake(func
, s
);
628 /* Marshall arguments & schedule the expedited grace period. */
631 INIT_WORK_ONSTACK(&rew
.rew_work
, wait_rcu_exp_gp
);
632 queue_work(rcu_gp_wq
, &rew
.rew_work
);
635 /* Wait for expedited grace period to complete. */
636 rdp
= per_cpu_ptr(&rcu_data
, raw_smp_processor_id());
637 rnp
= rcu_get_root();
638 wait_event(rnp
->exp_wq
[rcu_seq_ctr(s
) & 0x3],
639 sync_exp_work_done(s
));
640 smp_mb(); /* Workqueue actions happen before return. */
642 /* Let the next expedited grace period start. */
643 mutex_unlock(&rcu_state
.exp_mutex
);
646 #ifdef CONFIG_PREEMPT_RCU
649 * Remote handler for smp_call_function_single(). If there is an
650 * RCU read-side critical section in effect, request that the
651 * next rcu_read_unlock() record the quiescent state up the
652 * ->expmask fields in the rcu_node tree. Otherwise, immediately
653 * report the quiescent state.
655 static void sync_rcu_exp_handler(void *unused
)
658 struct rcu_data
*rdp
= this_cpu_ptr(&rcu_data
);
659 struct rcu_node
*rnp
= rdp
->mynode
;
660 struct task_struct
*t
= current
;
663 * First, the common case of not being in an RCU read-side
664 * critical section. If also enabled or idle, immediately
665 * report the quiescent state, otherwise defer.
667 if (!t
->rcu_read_lock_nesting
) {
668 if (!(preempt_count() & (PREEMPT_MASK
| SOFTIRQ_MASK
)) ||
669 rcu_dynticks_curr_cpu_in_eqs()) {
670 rcu_report_exp_rdp(rdp
);
672 rdp
->deferred_qs
= true;
673 set_tsk_need_resched(t
);
674 set_preempt_need_resched();
680 * Second, the less-common case of being in an RCU read-side
681 * critical section. In this case we can count on a future
682 * rcu_read_unlock(). However, this rcu_read_unlock() might
683 * execute on some other CPU, but in that case there will be
684 * a future context switch. Either way, if the expedited
685 * grace period is still waiting on this CPU, set ->deferred_qs
686 * so that the eventual quiescent state will be reported.
687 * Note that there is a large group of race conditions that
688 * can have caused this quiescent state to already have been
689 * reported, so we really do need to check ->expmask.
691 if (t
->rcu_read_lock_nesting
> 0) {
692 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
693 if (rnp
->expmask
& rdp
->grpmask
)
694 rdp
->deferred_qs
= true;
695 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
699 * The final and least likely case is where the interrupted
700 * code was just about to or just finished exiting the RCU-preempt
701 * read-side critical section, and no, we can't tell which.
702 * So either way, set ->deferred_qs to flag later code that
703 * a quiescent state is required.
705 * If the CPU is fully enabled (or if some buggy RCU-preempt
706 * read-side critical section is being used from idle), just
707 * invoke rcu_preempt_defer_qs() to immediately report the
708 * quiescent state. We cannot use rcu_read_unlock_special()
709 * because we are in an interrupt handler, which will cause that
710 * function to take an early exit without doing anything.
712 * Otherwise, force a context switch after the CPU enables everything.
714 rdp
->deferred_qs
= true;
715 if (!(preempt_count() & (PREEMPT_MASK
| SOFTIRQ_MASK
)) ||
716 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
717 rcu_preempt_deferred_qs(t
);
719 set_tsk_need_resched(t
);
720 set_preempt_need_resched();
724 /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
725 static void sync_sched_exp_online_cleanup(int cpu
)
730 * synchronize_rcu_expedited - Brute-force RCU grace period
732 * Wait for an RCU-preempt grace period, but expedite it. The basic
733 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
734 * checks whether the CPU is in an RCU-preempt critical section, and
735 * if so, it sets a flag that causes the outermost rcu_read_unlock()
736 * to report the quiescent state. On the other hand, if the CPU is
737 * not in an RCU read-side critical section, the IPI handler reports
738 * the quiescent state immediately.
740 * Although this is a greate improvement over previous expedited
741 * implementations, it is still unfriendly to real-time workloads, so is
742 * thus not recommended for any sort of common-case code. In fact, if
743 * you are using synchronize_rcu_expedited() in a loop, please restructure
744 * your code to batch your updates, and then Use a single synchronize_rcu()
747 * This has the same semantics as (but is more brutal than) synchronize_rcu().
749 void synchronize_rcu_expedited(void)
751 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
752 lock_is_held(&rcu_lock_map
) ||
753 lock_is_held(&rcu_sched_lock_map
),
754 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
756 if (rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
)
758 _synchronize_rcu_expedited(sync_rcu_exp_handler
);
760 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
762 #else /* #ifdef CONFIG_PREEMPT_RCU */
764 /* Invoked on each online non-idle CPU for expedited quiescent state. */
765 static void sync_sched_exp_handler(void *unused
)
767 struct rcu_data
*rdp
;
768 struct rcu_node
*rnp
;
770 rdp
= this_cpu_ptr(&rcu_data
);
772 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
) ||
773 __this_cpu_read(rcu_data
.cpu_no_qs
.b
.exp
))
775 if (rcu_is_cpu_rrupt_from_idle()) {
776 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data
));
779 __this_cpu_write(rcu_data
.cpu_no_qs
.b
.exp
, true);
780 /* Store .exp before .rcu_urgent_qs. */
781 smp_store_release(this_cpu_ptr(&rcu_data
.rcu_urgent_qs
), true);
782 set_tsk_need_resched(current
);
783 set_preempt_need_resched();
786 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
787 static void sync_sched_exp_online_cleanup(int cpu
)
789 struct rcu_data
*rdp
;
791 struct rcu_node
*rnp
;
793 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
795 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
))
797 ret
= smp_call_function_single(cpu
, sync_sched_exp_handler
, NULL
, 0);
802 * Because a context switch is a grace period for !PREEMPT, any
803 * blocking grace-period wait automatically implies a grace period if
804 * there is only one CPU online at any point time during execution of
805 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
806 * occasionally incorrectly indicate that there are multiple CPUs online
807 * when there was in fact only one the whole time, as this just adds some
808 * overhead: RCU still operates correctly.
810 static int rcu_blocking_is_gp(void)
814 might_sleep(); /* Check for RCU read-side critical section. */
816 ret
= num_online_cpus() <= 1;
821 /* PREEMPT=n implementation of synchronize_rcu_expedited(). */
822 void synchronize_rcu_expedited(void)
824 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
825 lock_is_held(&rcu_lock_map
) ||
826 lock_is_held(&rcu_sched_lock_map
),
827 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
829 /* If only one CPU, this is automatically a grace period. */
830 if (rcu_blocking_is_gp())
833 _synchronize_rcu_expedited(sync_sched_exp_handler
);
835 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
837 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */