2 * RCU expedited grace periods
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2016
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
24 * Record the start of an expedited grace period.
26 static void rcu_exp_gp_seq_start(struct rcu_state
*rsp
)
28 rcu_seq_start(&rsp
->expedited_sequence
);
32 * Record the end of an expedited grace period.
34 static void rcu_exp_gp_seq_end(struct rcu_state
*rsp
)
36 rcu_seq_end(&rsp
->expedited_sequence
);
37 smp_mb(); /* Ensure that consecutive grace periods serialize. */
41 * Take a snapshot of the expedited-grace-period counter.
43 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state
*rsp
)
47 smp_mb(); /* Caller's modifications seen first by other CPUs. */
48 s
= rcu_seq_snap(&rsp
->expedited_sequence
);
49 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("snap"));
54 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
55 * if a full expedited grace period has elapsed since that snapshot
58 static bool rcu_exp_gp_seq_done(struct rcu_state
*rsp
, unsigned long s
)
60 return rcu_seq_done(&rsp
->expedited_sequence
, s
);
64 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
65 * recent CPU-online activity. Note that these masks are not cleared
66 * when CPUs go offline, so they reflect the union of all CPUs that have
67 * ever been online. This means that this function normally takes its
68 * no-work-to-do fastpath.
70 static void sync_exp_reset_tree_hotplug(struct rcu_state
*rsp
)
75 unsigned long oldmask
;
76 int ncpus
= smp_load_acquire(&rsp
->ncpus
); /* Order against locking. */
78 struct rcu_node
*rnp_up
;
80 /* If no new CPUs onlined since last time, nothing to do. */
81 if (likely(ncpus
== rsp
->ncpus_snap
))
83 rsp
->ncpus_snap
= ncpus
;
86 * Each pass through the following loop propagates newly onlined
87 * CPUs for the current rcu_node structure up the rcu_node tree.
89 rcu_for_each_leaf_node(rsp
, rnp
) {
90 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
91 if (rnp
->expmaskinit
== rnp
->expmaskinitnext
) {
92 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
93 continue; /* No new CPUs, nothing to do. */
96 /* Update this node's mask, track old value for propagation. */
97 oldmask
= rnp
->expmaskinit
;
98 rnp
->expmaskinit
= rnp
->expmaskinitnext
;
99 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
101 /* If was already nonzero, nothing to propagate. */
105 /* Propagate the new CPU up the tree. */
107 rnp_up
= rnp
->parent
;
110 raw_spin_lock_irqsave_rcu_node(rnp_up
, flags
);
111 if (rnp_up
->expmaskinit
)
113 rnp_up
->expmaskinit
|= mask
;
114 raw_spin_unlock_irqrestore_rcu_node(rnp_up
, flags
);
117 mask
= rnp_up
->grpmask
;
118 rnp_up
= rnp_up
->parent
;
124 * Reset the ->expmask values in the rcu_node tree in preparation for
125 * a new expedited grace period.
127 static void __maybe_unused
sync_exp_reset_tree(struct rcu_state
*rsp
)
130 struct rcu_node
*rnp
;
132 sync_exp_reset_tree_hotplug(rsp
);
133 rcu_for_each_node_breadth_first(rsp
, rnp
) {
134 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
135 WARN_ON_ONCE(rnp
->expmask
);
136 rnp
->expmask
= rnp
->expmaskinit
;
137 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
142 * Return non-zero if there is no RCU expedited grace period in progress
143 * for the specified rcu_node structure, in other words, if all CPUs and
144 * tasks covered by the specified rcu_node structure have done their bit
145 * for the current expedited grace period. Works only for preemptible
146 * RCU -- other RCU implementation use other means.
148 * Caller must hold the rcu_state's exp_mutex.
150 static bool sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
152 return rnp
->exp_tasks
== NULL
&&
153 READ_ONCE(rnp
->expmask
) == 0;
157 * Report the exit from RCU read-side critical section for the last task
158 * that queued itself during or before the current expedited preemptible-RCU
159 * grace period. This event is reported either to the rcu_node structure on
160 * which the task was queued or to one of that rcu_node structure's ancestors,
161 * recursively up the tree. (Calm down, calm down, we do the recursion
164 * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
165 * structure's ->lock.
167 static void __rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
168 bool wake
, unsigned long flags
)
169 __releases(rnp
->lock
)
174 if (!sync_rcu_preempt_exp_done(rnp
)) {
176 rcu_initiate_boost(rnp
, flags
);
178 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
181 if (rnp
->parent
== NULL
) {
182 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
184 smp_mb(); /* EGP done before wake_up(). */
185 swake_up(&rsp
->expedited_wq
);
190 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled */
192 raw_spin_lock_rcu_node(rnp
); /* irqs already disabled */
193 WARN_ON_ONCE(!(rnp
->expmask
& mask
));
194 rnp
->expmask
&= ~mask
;
199 * Report expedited quiescent state for specified node. This is a
200 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
202 * Caller must hold the rcu_state's exp_mutex.
204 static void __maybe_unused
rcu_report_exp_rnp(struct rcu_state
*rsp
,
205 struct rcu_node
*rnp
, bool wake
)
209 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
210 __rcu_report_exp_rnp(rsp
, rnp
, wake
, flags
);
214 * Report expedited quiescent state for multiple CPUs, all covered by the
215 * specified leaf rcu_node structure. Caller must hold the rcu_state's
218 static void rcu_report_exp_cpu_mult(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
219 unsigned long mask
, bool wake
)
223 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
224 if (!(rnp
->expmask
& mask
)) {
225 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
228 rnp
->expmask
&= ~mask
;
229 __rcu_report_exp_rnp(rsp
, rnp
, wake
, flags
); /* Releases rnp->lock. */
233 * Report expedited quiescent state for specified rcu_data (CPU).
235 static void rcu_report_exp_rdp(struct rcu_state
*rsp
, struct rcu_data
*rdp
,
238 rcu_report_exp_cpu_mult(rsp
, rdp
->mynode
, rdp
->grpmask
, wake
);
241 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
242 static bool sync_exp_work_done(struct rcu_state
*rsp
, atomic_long_t
*stat
,
245 if (rcu_exp_gp_seq_done(rsp
, s
)) {
246 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("done"));
247 /* Ensure test happens before caller kfree(). */
248 smp_mb__before_atomic(); /* ^^^ */
249 atomic_long_inc(stat
);
256 * Funnel-lock acquisition for expedited grace periods. Returns true
257 * if some other task completed an expedited grace period that this task
258 * can piggy-back on, and with no mutex held. Otherwise, returns false
259 * with the mutex held, indicating that the caller must actually do the
260 * expedited grace period.
262 static bool exp_funnel_lock(struct rcu_state
*rsp
, unsigned long s
)
264 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, raw_smp_processor_id());
265 struct rcu_node
*rnp
= rdp
->mynode
;
266 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
268 /* Low-contention fastpath. */
269 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
) &&
271 ULONG_CMP_LT(READ_ONCE(rnp_root
->exp_seq_rq
), s
)) &&
272 mutex_trylock(&rsp
->exp_mutex
))
276 * Each pass through the following loop works its way up
277 * the rcu_node tree, returning if others have done the work or
278 * otherwise falls through to acquire rsp->exp_mutex. The mapping
279 * from CPU to rcu_node structure can be inexact, as it is just
280 * promoting locality and is not strictly needed for correctness.
282 for (; rnp
!= NULL
; rnp
= rnp
->parent
) {
283 if (sync_exp_work_done(rsp
, &rdp
->exp_workdone1
, s
))
286 /* Work not done, either wait here or go up. */
287 spin_lock(&rnp
->exp_lock
);
288 if (ULONG_CMP_GE(rnp
->exp_seq_rq
, s
)) {
290 /* Someone else doing GP, so wait for them. */
291 spin_unlock(&rnp
->exp_lock
);
292 trace_rcu_exp_funnel_lock(rsp
->name
, rnp
->level
,
293 rnp
->grplo
, rnp
->grphi
,
295 wait_event(rnp
->exp_wq
[rcu_seq_ctr(s
) & 0x3],
296 sync_exp_work_done(rsp
,
297 &rdp
->exp_workdone2
, s
));
300 rnp
->exp_seq_rq
= s
; /* Followers can wait on us. */
301 spin_unlock(&rnp
->exp_lock
);
302 trace_rcu_exp_funnel_lock(rsp
->name
, rnp
->level
, rnp
->grplo
,
303 rnp
->grphi
, TPS("nxtlvl"));
305 mutex_lock(&rsp
->exp_mutex
);
307 if (sync_exp_work_done(rsp
, &rdp
->exp_workdone3
, s
)) {
308 mutex_unlock(&rsp
->exp_mutex
);
311 rcu_exp_gp_seq_start(rsp
);
312 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("start"));
316 /* Invoked on each online non-idle CPU for expedited quiescent state. */
317 static void sync_sched_exp_handler(void *data
)
319 struct rcu_data
*rdp
;
320 struct rcu_node
*rnp
;
321 struct rcu_state
*rsp
= data
;
323 rdp
= this_cpu_ptr(rsp
->rda
);
325 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
) ||
326 __this_cpu_read(rcu_sched_data
.cpu_no_qs
.b
.exp
))
328 if (rcu_is_cpu_rrupt_from_idle()) {
329 rcu_report_exp_rdp(&rcu_sched_state
,
330 this_cpu_ptr(&rcu_sched_data
), true);
333 __this_cpu_write(rcu_sched_data
.cpu_no_qs
.b
.exp
, true);
334 /* Store .exp before .rcu_urgent_qs. */
335 smp_store_release(this_cpu_ptr(&rcu_dynticks
.rcu_urgent_qs
), true);
336 resched_cpu(smp_processor_id());
339 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
340 static void sync_sched_exp_online_cleanup(int cpu
)
342 struct rcu_data
*rdp
;
344 struct rcu_node
*rnp
;
345 struct rcu_state
*rsp
= &rcu_sched_state
;
347 rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
349 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
))
351 ret
= smp_call_function_single(cpu
, sync_sched_exp_handler
, rsp
, 0);
356 * Select the nodes that the upcoming expedited grace period needs
359 static void sync_rcu_exp_select_cpus(struct rcu_state
*rsp
,
360 smp_call_func_t func
)
364 unsigned long mask_ofl_test
;
365 unsigned long mask_ofl_ipi
;
367 struct rcu_node
*rnp
;
369 sync_exp_reset_tree(rsp
);
370 rcu_for_each_leaf_node(rsp
, rnp
) {
371 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
373 /* Each pass checks a CPU for identity, offline, and idle. */
375 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
376 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
378 rdp
->exp_dynticks_snap
=
379 rcu_dynticks_snap(rdp
->dynticks
);
380 if (raw_smp_processor_id() == cpu
||
381 rcu_dynticks_in_eqs(rdp
->exp_dynticks_snap
) ||
382 !(rnp
->qsmaskinitnext
& rdp
->grpmask
))
383 mask_ofl_test
|= rdp
->grpmask
;
385 mask_ofl_ipi
= rnp
->expmask
& ~mask_ofl_test
;
388 * Need to wait for any blocked tasks as well. Note that
389 * additional blocking tasks will also block the expedited
390 * GP until such time as the ->expmask bits are cleared.
392 if (rcu_preempt_has_tasks(rnp
))
393 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
394 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
396 /* IPI the remaining CPUs for expedited quiescent state. */
397 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
398 unsigned long mask
= leaf_node_cpu_bit(rnp
, cpu
);
399 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
401 if (!(mask_ofl_ipi
& mask
))
404 if (rcu_dynticks_in_eqs_since(rdp
->dynticks
,
405 rdp
->exp_dynticks_snap
)) {
406 mask_ofl_test
|= mask
;
409 ret
= smp_call_function_single(cpu
, func
, rsp
, 0);
411 mask_ofl_ipi
&= ~mask
;
414 /* Failed, raced with CPU hotplug operation. */
415 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
416 if ((rnp
->qsmaskinitnext
& mask
) &&
417 (rnp
->expmask
& mask
)) {
418 /* Online, so delay for a bit and try again. */
419 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
420 schedule_timeout_uninterruptible(1);
423 /* CPU really is offline, so we can ignore it. */
424 if (!(rnp
->expmask
& mask
))
425 mask_ofl_ipi
&= ~mask
;
426 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
428 /* Report quiescent states for those that went offline. */
429 mask_ofl_test
|= mask_ofl_ipi
;
431 rcu_report_exp_cpu_mult(rsp
, rnp
, mask_ofl_test
, false);
435 static void synchronize_sched_expedited_wait(struct rcu_state
*rsp
)
438 unsigned long jiffies_stall
;
439 unsigned long jiffies_start
;
442 struct rcu_node
*rnp
;
443 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
446 jiffies_stall
= rcu_jiffies_till_stall_check();
447 jiffies_start
= jiffies
;
450 ret
= swait_event_timeout(
452 sync_rcu_preempt_exp_done(rnp_root
),
454 if (ret
> 0 || sync_rcu_preempt_exp_done(rnp_root
))
456 WARN_ON(ret
< 0); /* workqueues should not be signaled. */
457 if (rcu_cpu_stall_suppress
)
459 panic_on_rcu_stall();
460 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
463 rcu_for_each_leaf_node(rsp
, rnp
) {
464 ndetected
+= rcu_print_task_exp_stall(rnp
);
465 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
466 struct rcu_data
*rdp
;
468 mask
= leaf_node_cpu_bit(rnp
, cpu
);
469 if (!(rnp
->expmask
& mask
))
472 rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
473 pr_cont(" %d-%c%c%c", cpu
,
474 "O."[!!cpu_online(cpu
)],
475 "o."[!!(rdp
->grpmask
& rnp
->expmaskinit
)],
476 "N."[!!(rdp
->grpmask
& rnp
->expmaskinitnext
)]);
479 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
480 jiffies
- jiffies_start
, rsp
->expedited_sequence
,
481 rnp_root
->expmask
, ".T"[!!rnp_root
->exp_tasks
]);
483 pr_err("blocking rcu_node structures:");
484 rcu_for_each_node_breadth_first(rsp
, rnp
) {
486 continue; /* printed unconditionally */
487 if (sync_rcu_preempt_exp_done(rnp
))
489 pr_cont(" l=%u:%d-%d:%#lx/%c",
490 rnp
->level
, rnp
->grplo
, rnp
->grphi
,
492 ".T"[!!rnp
->exp_tasks
]);
496 rcu_for_each_leaf_node(rsp
, rnp
) {
497 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
498 mask
= leaf_node_cpu_bit(rnp
, cpu
);
499 if (!(rnp
->expmask
& mask
))
504 jiffies_stall
= 3 * rcu_jiffies_till_stall_check() + 3;
509 * Wait for the current expedited grace period to complete, and then
510 * wake up everyone who piggybacked on the just-completed expedited
511 * grace period. Also update all the ->exp_seq_rq counters as needed
512 * in order to avoid counter-wrap problems.
514 static void rcu_exp_wait_wake(struct rcu_state
*rsp
, unsigned long s
)
516 struct rcu_node
*rnp
;
518 synchronize_sched_expedited_wait(rsp
);
519 rcu_exp_gp_seq_end(rsp
);
520 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("end"));
523 * Switch over to wakeup mode, allowing the next GP, but -only- the
524 * next GP, to proceed.
526 mutex_lock(&rsp
->exp_wake_mutex
);
528 rcu_for_each_node_breadth_first(rsp
, rnp
) {
529 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
)) {
530 spin_lock(&rnp
->exp_lock
);
531 /* Recheck, avoid hang in case someone just arrived. */
532 if (ULONG_CMP_LT(rnp
->exp_seq_rq
, s
))
534 spin_unlock(&rnp
->exp_lock
);
536 smp_mb(); /* All above changes before wakeup. */
537 wake_up_all(&rnp
->exp_wq
[rcu_seq_ctr(rsp
->expedited_sequence
) & 0x3]);
539 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("endwake"));
540 mutex_unlock(&rsp
->exp_wake_mutex
);
543 /* Let the workqueue handler know what it is supposed to do. */
544 struct rcu_exp_work
{
545 smp_call_func_t rew_func
;
546 struct rcu_state
*rew_rsp
;
548 struct work_struct rew_work
;
552 * Common code to drive an expedited grace period forward, used by
553 * workqueues and mid-boot-time tasks.
555 static void rcu_exp_sel_wait_wake(struct rcu_state
*rsp
,
556 smp_call_func_t func
, unsigned long s
)
558 /* Initialize the rcu_node tree in preparation for the wait. */
559 sync_rcu_exp_select_cpus(rsp
, func
);
561 /* Wait and clean up, including waking everyone. */
562 rcu_exp_wait_wake(rsp
, s
);
566 * Work-queue handler to drive an expedited grace period forward.
568 static void wait_rcu_exp_gp(struct work_struct
*wp
)
570 struct rcu_exp_work
*rewp
;
572 rewp
= container_of(wp
, struct rcu_exp_work
, rew_work
);
573 rcu_exp_sel_wait_wake(rewp
->rew_rsp
, rewp
->rew_func
, rewp
->rew_s
);
577 * Given an rcu_state pointer and a smp_call_function() handler, kick
578 * off the specified flavor of expedited grace period.
580 static void _synchronize_rcu_expedited(struct rcu_state
*rsp
,
581 smp_call_func_t func
)
583 struct rcu_data
*rdp
;
584 struct rcu_exp_work rew
;
585 struct rcu_node
*rnp
;
588 /* If expedited grace periods are prohibited, fall back to normal. */
589 if (rcu_gp_is_normal()) {
590 wait_rcu_gp(rsp
->call
);
594 /* Take a snapshot of the sequence number. */
595 s
= rcu_exp_gp_seq_snap(rsp
);
596 if (exp_funnel_lock(rsp
, s
))
597 return; /* Someone else did our work for us. */
599 /* Ensure that load happens before action based on it. */
600 if (unlikely(rcu_scheduler_active
== RCU_SCHEDULER_INIT
)) {
601 /* Direct call during scheduler init and early_initcalls(). */
602 rcu_exp_sel_wait_wake(rsp
, func
, s
);
604 /* Marshall arguments & schedule the expedited grace period. */
608 INIT_WORK_ONSTACK(&rew
.rew_work
, wait_rcu_exp_gp
);
609 schedule_work(&rew
.rew_work
);
612 /* Wait for expedited grace period to complete. */
613 rdp
= per_cpu_ptr(rsp
->rda
, raw_smp_processor_id());
614 rnp
= rcu_get_root(rsp
);
615 wait_event(rnp
->exp_wq
[rcu_seq_ctr(s
) & 0x3],
616 sync_exp_work_done(rsp
, &rdp
->exp_workdone0
, s
));
617 smp_mb(); /* Workqueue actions happen before return. */
619 /* Let the next expedited grace period start. */
620 mutex_unlock(&rsp
->exp_mutex
);
624 * synchronize_sched_expedited - Brute-force RCU-sched grace period
626 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
627 * approach to force the grace period to end quickly. This consumes
628 * significant time on all CPUs and is unfriendly to real-time workloads,
629 * so is thus not recommended for any sort of common-case code. In fact,
630 * if you are using synchronize_sched_expedited() in a loop, please
631 * restructure your code to batch your updates, and then use a single
632 * synchronize_sched() instead.
634 * This implementation can be thought of as an application of sequence
635 * locking to expedited grace periods, but using the sequence counter to
636 * determine when someone else has already done the work instead of for
639 void synchronize_sched_expedited(void)
641 struct rcu_state
*rsp
= &rcu_sched_state
;
643 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
644 lock_is_held(&rcu_lock_map
) ||
645 lock_is_held(&rcu_sched_lock_map
),
646 "Illegal synchronize_sched_expedited() in RCU read-side critical section");
648 /* If only one CPU, this is automatically a grace period. */
649 if (rcu_blocking_is_gp())
652 _synchronize_rcu_expedited(rsp
, sync_sched_exp_handler
);
654 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
656 #ifdef CONFIG_PREEMPT_RCU
659 * Remote handler for smp_call_function_single(). If there is an
660 * RCU read-side critical section in effect, request that the
661 * next rcu_read_unlock() record the quiescent state up the
662 * ->expmask fields in the rcu_node tree. Otherwise, immediately
663 * report the quiescent state.
665 static void sync_rcu_exp_handler(void *info
)
667 struct rcu_data
*rdp
;
668 struct rcu_state
*rsp
= info
;
669 struct task_struct
*t
= current
;
672 * Within an RCU read-side critical section, request that the next
673 * rcu_read_unlock() report. Unless this RCU read-side critical
674 * section has already blocked, in which case it is already set
675 * up for the expedited grace period to wait on it.
677 if (t
->rcu_read_lock_nesting
> 0 &&
678 !t
->rcu_read_unlock_special
.b
.blocked
) {
679 t
->rcu_read_unlock_special
.b
.exp_need_qs
= true;
684 * We are either exiting an RCU read-side critical section (negative
685 * values of t->rcu_read_lock_nesting) or are not in one at all
686 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
687 * read-side critical section that blocked before this expedited
688 * grace period started. Either way, we can immediately report
689 * the quiescent state.
691 rdp
= this_cpu_ptr(rsp
->rda
);
692 rcu_report_exp_rdp(rsp
, rdp
, true);
696 * synchronize_rcu_expedited - Brute-force RCU grace period
698 * Wait for an RCU-preempt grace period, but expedite it. The basic
699 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
700 * checks whether the CPU is in an RCU-preempt critical section, and
701 * if so, it sets a flag that causes the outermost rcu_read_unlock()
702 * to report the quiescent state. On the other hand, if the CPU is
703 * not in an RCU read-side critical section, the IPI handler reports
704 * the quiescent state immediately.
706 * Although this is a greate improvement over previous expedited
707 * implementations, it is still unfriendly to real-time workloads, so is
708 * thus not recommended for any sort of common-case code. In fact, if
709 * you are using synchronize_rcu_expedited() in a loop, please restructure
710 * your code to batch your updates, and then Use a single synchronize_rcu()
713 void synchronize_rcu_expedited(void)
715 struct rcu_state
*rsp
= rcu_state_p
;
717 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
718 lock_is_held(&rcu_lock_map
) ||
719 lock_is_held(&rcu_sched_lock_map
),
720 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
722 if (rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
)
724 _synchronize_rcu_expedited(rsp
, sync_rcu_exp_handler
);
726 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
728 #else /* #ifdef CONFIG_PREEMPT_RCU */
731 * Wait for an rcu-preempt grace period, but make it happen quickly.
732 * But because preemptible RCU does not exist, map to rcu-sched.
734 void synchronize_rcu_expedited(void)
736 synchronize_sched_expedited();
738 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
740 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */