2 * RCU expedited grace periods
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2016
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 /* Wrapper functions for expedited grace periods. */
24 static void rcu_exp_gp_seq_start(struct rcu_state
*rsp
)
26 rcu_seq_start(&rsp
->expedited_sequence
);
28 static void rcu_exp_gp_seq_end(struct rcu_state
*rsp
)
30 rcu_seq_end(&rsp
->expedited_sequence
);
31 smp_mb(); /* Ensure that consecutive grace periods serialize. */
33 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state
*rsp
)
37 smp_mb(); /* Caller's modifications seen first by other CPUs. */
38 s
= rcu_seq_snap(&rsp
->expedited_sequence
);
39 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("snap"));
42 static bool rcu_exp_gp_seq_done(struct rcu_state
*rsp
, unsigned long s
)
44 return rcu_seq_done(&rsp
->expedited_sequence
, s
);
48 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
49 * recent CPU-online activity. Note that these masks are not cleared
50 * when CPUs go offline, so they reflect the union of all CPUs that have
51 * ever been online. This means that this function normally takes its
52 * no-work-to-do fastpath.
54 static void sync_exp_reset_tree_hotplug(struct rcu_state
*rsp
)
59 unsigned long oldmask
;
60 int ncpus
= READ_ONCE(rsp
->ncpus
);
62 struct rcu_node
*rnp_up
;
64 /* If no new CPUs onlined since last time, nothing to do. */
65 if (likely(ncpus
== rsp
->ncpus_snap
))
67 rsp
->ncpus_snap
= ncpus
;
70 * Each pass through the following loop propagates newly onlined
71 * CPUs for the current rcu_node structure up the rcu_node tree.
73 rcu_for_each_leaf_node(rsp
, rnp
) {
74 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
75 if (rnp
->expmaskinit
== rnp
->expmaskinitnext
) {
76 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
77 continue; /* No new CPUs, nothing to do. */
80 /* Update this node's mask, track old value for propagation. */
81 oldmask
= rnp
->expmaskinit
;
82 rnp
->expmaskinit
= rnp
->expmaskinitnext
;
83 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
85 /* If was already nonzero, nothing to propagate. */
89 /* Propagate the new CPU up the tree. */
94 raw_spin_lock_irqsave_rcu_node(rnp_up
, flags
);
95 if (rnp_up
->expmaskinit
)
97 rnp_up
->expmaskinit
|= mask
;
98 raw_spin_unlock_irqrestore_rcu_node(rnp_up
, flags
);
101 mask
= rnp_up
->grpmask
;
102 rnp_up
= rnp_up
->parent
;
108 * Reset the ->expmask values in the rcu_node tree in preparation for
109 * a new expedited grace period.
111 static void __maybe_unused
sync_exp_reset_tree(struct rcu_state
*rsp
)
114 struct rcu_node
*rnp
;
116 sync_exp_reset_tree_hotplug(rsp
);
117 rcu_for_each_node_breadth_first(rsp
, rnp
) {
118 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
119 WARN_ON_ONCE(rnp
->expmask
);
120 rnp
->expmask
= rnp
->expmaskinit
;
121 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
126 * Return non-zero if there is no RCU expedited grace period in progress
127 * for the specified rcu_node structure, in other words, if all CPUs and
128 * tasks covered by the specified rcu_node structure have done their bit
129 * for the current expedited grace period. Works only for preemptible
130 * RCU -- other RCU implementation use other means.
132 * Caller must hold the rcu_state's exp_mutex.
134 static int sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
136 return rnp
->exp_tasks
== NULL
&&
137 READ_ONCE(rnp
->expmask
) == 0;
141 * Report the exit from RCU read-side critical section for the last task
142 * that queued itself during or before the current expedited preemptible-RCU
143 * grace period. This event is reported either to the rcu_node structure on
144 * which the task was queued or to one of that rcu_node structure's ancestors,
145 * recursively up the tree. (Calm down, calm down, we do the recursion
148 * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
149 * structure's ->lock.
151 static void __rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
152 bool wake
, unsigned long flags
)
153 __releases(rnp
->lock
)
158 if (!sync_rcu_preempt_exp_done(rnp
)) {
160 rcu_initiate_boost(rnp
, flags
);
162 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
165 if (rnp
->parent
== NULL
) {
166 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
168 smp_mb(); /* EGP done before wake_up(). */
169 swake_up(&rsp
->expedited_wq
);
174 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled */
176 raw_spin_lock_rcu_node(rnp
); /* irqs already disabled */
177 WARN_ON_ONCE(!(rnp
->expmask
& mask
));
178 rnp
->expmask
&= ~mask
;
183 * Report expedited quiescent state for specified node. This is a
184 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
186 * Caller must hold the rcu_state's exp_mutex.
188 static void __maybe_unused
rcu_report_exp_rnp(struct rcu_state
*rsp
,
189 struct rcu_node
*rnp
, bool wake
)
193 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
194 __rcu_report_exp_rnp(rsp
, rnp
, wake
, flags
);
198 * Report expedited quiescent state for multiple CPUs, all covered by the
199 * specified leaf rcu_node structure. Caller must hold the rcu_state's
202 static void rcu_report_exp_cpu_mult(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
203 unsigned long mask
, bool wake
)
207 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
208 if (!(rnp
->expmask
& mask
)) {
209 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
212 rnp
->expmask
&= ~mask
;
213 __rcu_report_exp_rnp(rsp
, rnp
, wake
, flags
); /* Releases rnp->lock. */
217 * Report expedited quiescent state for specified rcu_data (CPU).
219 static void rcu_report_exp_rdp(struct rcu_state
*rsp
, struct rcu_data
*rdp
,
222 rcu_report_exp_cpu_mult(rsp
, rdp
->mynode
, rdp
->grpmask
, wake
);
225 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
226 static bool sync_exp_work_done(struct rcu_state
*rsp
, atomic_long_t
*stat
,
229 if (rcu_exp_gp_seq_done(rsp
, s
)) {
230 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("done"));
231 /* Ensure test happens before caller kfree(). */
232 smp_mb__before_atomic(); /* ^^^ */
233 atomic_long_inc(stat
);
240 * Funnel-lock acquisition for expedited grace periods. Returns true
241 * if some other task completed an expedited grace period that this task
242 * can piggy-back on, and with no mutex held. Otherwise, returns false
243 * with the mutex held, indicating that the caller must actually do the
244 * expedited grace period.
246 static bool exp_funnel_lock(struct rcu_state
*rsp
, unsigned long s
)
248 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, raw_smp_processor_id());
249 struct rcu_node
*rnp
= rdp
->mynode
;
250 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
252 /* Low-contention fastpath. */
253 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
) &&
255 ULONG_CMP_LT(READ_ONCE(rnp_root
->exp_seq_rq
), s
)) &&
256 mutex_trylock(&rsp
->exp_mutex
))
260 * Each pass through the following loop works its way up
261 * the rcu_node tree, returning if others have done the work or
262 * otherwise falls through to acquire rsp->exp_mutex. The mapping
263 * from CPU to rcu_node structure can be inexact, as it is just
264 * promoting locality and is not strictly needed for correctness.
266 for (; rnp
!= NULL
; rnp
= rnp
->parent
) {
267 if (sync_exp_work_done(rsp
, &rdp
->exp_workdone1
, s
))
270 /* Work not done, either wait here or go up. */
271 spin_lock(&rnp
->exp_lock
);
272 if (ULONG_CMP_GE(rnp
->exp_seq_rq
, s
)) {
274 /* Someone else doing GP, so wait for them. */
275 spin_unlock(&rnp
->exp_lock
);
276 trace_rcu_exp_funnel_lock(rsp
->name
, rnp
->level
,
277 rnp
->grplo
, rnp
->grphi
,
279 wait_event(rnp
->exp_wq
[(s
>> 1) & 0x3],
280 sync_exp_work_done(rsp
,
281 &rdp
->exp_workdone2
, s
));
284 rnp
->exp_seq_rq
= s
; /* Followers can wait on us. */
285 spin_unlock(&rnp
->exp_lock
);
286 trace_rcu_exp_funnel_lock(rsp
->name
, rnp
->level
, rnp
->grplo
,
287 rnp
->grphi
, TPS("nxtlvl"));
289 mutex_lock(&rsp
->exp_mutex
);
291 if (sync_exp_work_done(rsp
, &rdp
->exp_workdone3
, s
)) {
292 mutex_unlock(&rsp
->exp_mutex
);
295 rcu_exp_gp_seq_start(rsp
);
296 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("start"));
300 /* Invoked on each online non-idle CPU for expedited quiescent state. */
301 static void sync_sched_exp_handler(void *data
)
303 struct rcu_data
*rdp
;
304 struct rcu_node
*rnp
;
305 struct rcu_state
*rsp
= data
;
307 rdp
= this_cpu_ptr(rsp
->rda
);
309 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
) ||
310 __this_cpu_read(rcu_sched_data
.cpu_no_qs
.b
.exp
))
312 if (rcu_is_cpu_rrupt_from_idle()) {
313 rcu_report_exp_rdp(&rcu_sched_state
,
314 this_cpu_ptr(&rcu_sched_data
), true);
317 __this_cpu_write(rcu_sched_data
.cpu_no_qs
.b
.exp
, true);
318 resched_cpu(smp_processor_id());
321 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
322 static void sync_sched_exp_online_cleanup(int cpu
)
324 struct rcu_data
*rdp
;
326 struct rcu_node
*rnp
;
327 struct rcu_state
*rsp
= &rcu_sched_state
;
329 rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
331 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
))
333 ret
= smp_call_function_single(cpu
, sync_sched_exp_handler
, rsp
, 0);
338 * Select the nodes that the upcoming expedited grace period needs
341 static void sync_rcu_exp_select_cpus(struct rcu_state
*rsp
,
342 smp_call_func_t func
)
346 unsigned long mask_ofl_test
;
347 unsigned long mask_ofl_ipi
;
349 struct rcu_node
*rnp
;
351 sync_exp_reset_tree(rsp
);
352 rcu_for_each_leaf_node(rsp
, rnp
) {
353 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
355 /* Each pass checks a CPU for identity, offline, and idle. */
357 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
358 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
359 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
361 rdp
->exp_dynticks_snap
=
362 atomic_add_return(0, &rdtp
->dynticks
);
363 if (raw_smp_processor_id() == cpu
||
364 !(rdp
->exp_dynticks_snap
& 0x1) ||
365 !(rnp
->qsmaskinitnext
& rdp
->grpmask
))
366 mask_ofl_test
|= rdp
->grpmask
;
368 mask_ofl_ipi
= rnp
->expmask
& ~mask_ofl_test
;
371 * Need to wait for any blocked tasks as well. Note that
372 * additional blocking tasks will also block the expedited
373 * GP until such time as the ->expmask bits are cleared.
375 if (rcu_preempt_has_tasks(rnp
))
376 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
377 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
379 /* IPI the remaining CPUs for expedited quiescent state. */
380 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
381 unsigned long mask
= leaf_node_cpu_bit(rnp
, cpu
);
382 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
383 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
385 if (!(mask_ofl_ipi
& mask
))
388 if (atomic_add_return(0, &rdtp
->dynticks
) !=
389 rdp
->exp_dynticks_snap
) {
390 mask_ofl_test
|= mask
;
393 ret
= smp_call_function_single(cpu
, func
, rsp
, 0);
395 mask_ofl_ipi
&= ~mask
;
398 /* Failed, raced with CPU hotplug operation. */
399 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
400 if ((rnp
->qsmaskinitnext
& mask
) &&
401 (rnp
->expmask
& mask
)) {
402 /* Online, so delay for a bit and try again. */
403 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
404 schedule_timeout_uninterruptible(1);
407 /* CPU really is offline, so we can ignore it. */
408 if (!(rnp
->expmask
& mask
))
409 mask_ofl_ipi
&= ~mask
;
410 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
412 /* Report quiescent states for those that went offline. */
413 mask_ofl_test
|= mask_ofl_ipi
;
415 rcu_report_exp_cpu_mult(rsp
, rnp
, mask_ofl_test
, false);
419 static void synchronize_sched_expedited_wait(struct rcu_state
*rsp
)
422 unsigned long jiffies_stall
;
423 unsigned long jiffies_start
;
426 struct rcu_node
*rnp
;
427 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
430 jiffies_stall
= rcu_jiffies_till_stall_check();
431 jiffies_start
= jiffies
;
434 ret
= swait_event_timeout(
436 sync_rcu_preempt_exp_done(rnp_root
),
438 if (ret
> 0 || sync_rcu_preempt_exp_done(rnp_root
))
440 WARN_ON(ret
< 0); /* workqueues should not be signaled. */
441 if (rcu_cpu_stall_suppress
)
443 panic_on_rcu_stall();
444 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
447 rcu_for_each_leaf_node(rsp
, rnp
) {
448 ndetected
+= rcu_print_task_exp_stall(rnp
);
449 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
450 struct rcu_data
*rdp
;
452 mask
= leaf_node_cpu_bit(rnp
, cpu
);
453 if (!(rnp
->expmask
& mask
))
456 rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
457 pr_cont(" %d-%c%c%c", cpu
,
458 "O."[!!cpu_online(cpu
)],
459 "o."[!!(rdp
->grpmask
& rnp
->expmaskinit
)],
460 "N."[!!(rdp
->grpmask
& rnp
->expmaskinitnext
)]);
463 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
464 jiffies
- jiffies_start
, rsp
->expedited_sequence
,
465 rnp_root
->expmask
, ".T"[!!rnp_root
->exp_tasks
]);
467 pr_err("blocking rcu_node structures:");
468 rcu_for_each_node_breadth_first(rsp
, rnp
) {
470 continue; /* printed unconditionally */
471 if (sync_rcu_preempt_exp_done(rnp
))
473 pr_cont(" l=%u:%d-%d:%#lx/%c",
474 rnp
->level
, rnp
->grplo
, rnp
->grphi
,
476 ".T"[!!rnp
->exp_tasks
]);
480 rcu_for_each_leaf_node(rsp
, rnp
) {
481 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
482 mask
= leaf_node_cpu_bit(rnp
, cpu
);
483 if (!(rnp
->expmask
& mask
))
488 jiffies_stall
= 3 * rcu_jiffies_till_stall_check() + 3;
493 * Wait for the current expedited grace period to complete, and then
494 * wake up everyone who piggybacked on the just-completed expedited
495 * grace period. Also update all the ->exp_seq_rq counters as needed
496 * in order to avoid counter-wrap problems.
498 static void rcu_exp_wait_wake(struct rcu_state
*rsp
, unsigned long s
)
500 struct rcu_node
*rnp
;
502 synchronize_sched_expedited_wait(rsp
);
503 rcu_exp_gp_seq_end(rsp
);
504 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("end"));
507 * Switch over to wakeup mode, allowing the next GP, but -only- the
508 * next GP, to proceed.
510 mutex_lock(&rsp
->exp_wake_mutex
);
512 rcu_for_each_node_breadth_first(rsp
, rnp
) {
513 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
)) {
514 spin_lock(&rnp
->exp_lock
);
515 /* Recheck, avoid hang in case someone just arrived. */
516 if (ULONG_CMP_LT(rnp
->exp_seq_rq
, s
))
518 spin_unlock(&rnp
->exp_lock
);
520 wake_up_all(&rnp
->exp_wq
[(rsp
->expedited_sequence
>> 1) & 0x3]);
522 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("endwake"));
523 mutex_unlock(&rsp
->exp_wake_mutex
);
526 /* Let the workqueue handler know what it is supposed to do. */
527 struct rcu_exp_work
{
528 smp_call_func_t rew_func
;
529 struct rcu_state
*rew_rsp
;
531 struct work_struct rew_work
;
535 * Common code to drive an expedited grace period forward, used by
536 * workqueues and mid-boot-time tasks.
538 static void rcu_exp_sel_wait_wake(struct rcu_state
*rsp
,
539 smp_call_func_t func
, unsigned long s
)
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 sync_rcu_exp_select_cpus(rsp
, func
);
544 /* Wait and clean up, including waking everyone. */
545 rcu_exp_wait_wake(rsp
, s
);
549 * Work-queue handler to drive an expedited grace period forward.
551 static void wait_rcu_exp_gp(struct work_struct
*wp
)
553 struct rcu_exp_work
*rewp
;
555 rewp
= container_of(wp
, struct rcu_exp_work
, rew_work
);
556 rcu_exp_sel_wait_wake(rewp
->rew_rsp
, rewp
->rew_func
, rewp
->rew_s
);
560 * Given an rcu_state pointer and a smp_call_function() handler, kick
561 * off the specified flavor of expedited grace period.
563 static void _synchronize_rcu_expedited(struct rcu_state
*rsp
,
564 smp_call_func_t func
)
566 struct rcu_data
*rdp
;
567 struct rcu_exp_work rew
;
568 struct rcu_node
*rnp
;
571 /* If expedited grace periods are prohibited, fall back to normal. */
572 if (rcu_gp_is_normal()) {
573 wait_rcu_gp(rsp
->call
);
577 /* Take a snapshot of the sequence number. */
578 s
= rcu_exp_gp_seq_snap(rsp
);
579 if (exp_funnel_lock(rsp
, s
))
580 return; /* Someone else did our work for us. */
582 /* Ensure that load happens before action based on it. */
583 if (unlikely(rcu_scheduler_active
== RCU_SCHEDULER_INIT
)) {
584 /* Direct call during scheduler init and early_initcalls(). */
585 rcu_exp_sel_wait_wake(rsp
, func
, s
);
587 /* Marshall arguments & schedule the expedited grace period. */
591 INIT_WORK_ONSTACK(&rew
.rew_work
, wait_rcu_exp_gp
);
592 schedule_work(&rew
.rew_work
);
595 /* Wait for expedited grace period to complete. */
596 rdp
= per_cpu_ptr(rsp
->rda
, raw_smp_processor_id());
597 rnp
= rcu_get_root(rsp
);
598 wait_event(rnp
->exp_wq
[(s
>> 1) & 0x3],
599 sync_exp_work_done(rsp
,
600 &rdp
->exp_workdone0
, s
));
602 /* Let the next expedited grace period start. */
603 mutex_unlock(&rsp
->exp_mutex
);
607 * synchronize_sched_expedited - Brute-force RCU-sched grace period
609 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
610 * approach to force the grace period to end quickly. This consumes
611 * significant time on all CPUs and is unfriendly to real-time workloads,
612 * so is thus not recommended for any sort of common-case code. In fact,
613 * if you are using synchronize_sched_expedited() in a loop, please
614 * restructure your code to batch your updates, and then use a single
615 * synchronize_sched() instead.
617 * This implementation can be thought of as an application of sequence
618 * locking to expedited grace periods, but using the sequence counter to
619 * determine when someone else has already done the work instead of for
622 void synchronize_sched_expedited(void)
624 struct rcu_state
*rsp
= &rcu_sched_state
;
626 /* If only one CPU, this is automatically a grace period. */
627 if (rcu_blocking_is_gp())
630 _synchronize_rcu_expedited(rsp
, sync_sched_exp_handler
);
632 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
634 #ifdef CONFIG_PREEMPT_RCU
637 * Remote handler for smp_call_function_single(). If there is an
638 * RCU read-side critical section in effect, request that the
639 * next rcu_read_unlock() record the quiescent state up the
640 * ->expmask fields in the rcu_node tree. Otherwise, immediately
641 * report the quiescent state.
643 static void sync_rcu_exp_handler(void *info
)
645 struct rcu_data
*rdp
;
646 struct rcu_state
*rsp
= info
;
647 struct task_struct
*t
= current
;
650 * Within an RCU read-side critical section, request that the next
651 * rcu_read_unlock() report. Unless this RCU read-side critical
652 * section has already blocked, in which case it is already set
653 * up for the expedited grace period to wait on it.
655 if (t
->rcu_read_lock_nesting
> 0 &&
656 !t
->rcu_read_unlock_special
.b
.blocked
) {
657 t
->rcu_read_unlock_special
.b
.exp_need_qs
= true;
662 * We are either exiting an RCU read-side critical section (negative
663 * values of t->rcu_read_lock_nesting) or are not in one at all
664 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
665 * read-side critical section that blocked before this expedited
666 * grace period started. Either way, we can immediately report
667 * the quiescent state.
669 rdp
= this_cpu_ptr(rsp
->rda
);
670 rcu_report_exp_rdp(rsp
, rdp
, true);
674 * synchronize_rcu_expedited - Brute-force RCU grace period
676 * Wait for an RCU-preempt grace period, but expedite it. The basic
677 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
678 * checks whether the CPU is in an RCU-preempt critical section, and
679 * if so, it sets a flag that causes the outermost rcu_read_unlock()
680 * to report the quiescent state. On the other hand, if the CPU is
681 * not in an RCU read-side critical section, the IPI handler reports
682 * the quiescent state immediately.
684 * Although this is a greate improvement over previous expedited
685 * implementations, it is still unfriendly to real-time workloads, so is
686 * thus not recommended for any sort of common-case code. In fact, if
687 * you are using synchronize_rcu_expedited() in a loop, please restructure
688 * your code to batch your updates, and then Use a single synchronize_rcu()
691 void synchronize_rcu_expedited(void)
693 struct rcu_state
*rsp
= rcu_state_p
;
695 if (rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
)
697 _synchronize_rcu_expedited(rsp
, sync_rcu_exp_handler
);
699 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
701 #else /* #ifdef CONFIG_PREEMPT_RCU */
704 * Wait for an rcu-preempt grace period, but make it happen quickly.
705 * But because preemptible RCU does not exist, map to rcu-sched.
707 void synchronize_rcu_expedited(void)
709 synchronize_sched_expedited();
711 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
713 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
716 * Switch to run-time mode once Tree RCU has fully initialized.
718 static int __init
rcu_exp_runtime_mode(void)
720 rcu_test_sync_prims();
721 rcu_scheduler_active
= RCU_SCHEDULER_RUNNING
;
722 rcu_test_sync_prims();
725 core_initcall(rcu_exp_runtime_mode
);