2 * RCU expedited grace periods
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2016
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 #include <linux/lockdep.h>
26 * Record the start of an expedited grace period.
28 static void rcu_exp_gp_seq_start(struct rcu_state
*rsp
)
30 rcu_seq_start(&rsp
->expedited_sequence
);
34 * Return then value that expedited-grace-period counter will have
35 * at the end of the current grace period.
37 static __maybe_unused
unsigned long rcu_exp_gp_seq_endval(struct rcu_state
*rsp
)
39 return rcu_seq_endval(&rsp
->expedited_sequence
);
43 * Record the end of an expedited grace period.
45 static void rcu_exp_gp_seq_end(struct rcu_state
*rsp
)
47 rcu_seq_end(&rsp
->expedited_sequence
);
48 smp_mb(); /* Ensure that consecutive grace periods serialize. */
52 * Take a snapshot of the expedited-grace-period counter.
54 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state
*rsp
)
58 smp_mb(); /* Caller's modifications seen first by other CPUs. */
59 s
= rcu_seq_snap(&rsp
->expedited_sequence
);
60 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("snap"));
65 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
66 * if a full expedited grace period has elapsed since that snapshot
69 static bool rcu_exp_gp_seq_done(struct rcu_state
*rsp
, unsigned long s
)
71 return rcu_seq_done(&rsp
->expedited_sequence
, s
);
75 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
76 * recent CPU-online activity. Note that these masks are not cleared
77 * when CPUs go offline, so they reflect the union of all CPUs that have
78 * ever been online. This means that this function normally takes its
79 * no-work-to-do fastpath.
81 static void sync_exp_reset_tree_hotplug(struct rcu_state
*rsp
)
86 unsigned long oldmask
;
87 int ncpus
= smp_load_acquire(&rsp
->ncpus
); /* Order against locking. */
89 struct rcu_node
*rnp_up
;
91 /* If no new CPUs onlined since last time, nothing to do. */
92 if (likely(ncpus
== rsp
->ncpus_snap
))
94 rsp
->ncpus_snap
= ncpus
;
97 * Each pass through the following loop propagates newly onlined
98 * CPUs for the current rcu_node structure up the rcu_node tree.
100 rcu_for_each_leaf_node(rsp
, rnp
) {
101 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
102 if (rnp
->expmaskinit
== rnp
->expmaskinitnext
) {
103 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
104 continue; /* No new CPUs, nothing to do. */
107 /* Update this node's mask, track old value for propagation. */
108 oldmask
= rnp
->expmaskinit
;
109 rnp
->expmaskinit
= rnp
->expmaskinitnext
;
110 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
112 /* If was already nonzero, nothing to propagate. */
116 /* Propagate the new CPU up the tree. */
118 rnp_up
= rnp
->parent
;
121 raw_spin_lock_irqsave_rcu_node(rnp_up
, flags
);
122 if (rnp_up
->expmaskinit
)
124 rnp_up
->expmaskinit
|= mask
;
125 raw_spin_unlock_irqrestore_rcu_node(rnp_up
, flags
);
128 mask
= rnp_up
->grpmask
;
129 rnp_up
= rnp_up
->parent
;
135 * Reset the ->expmask values in the rcu_node tree in preparation for
136 * a new expedited grace period.
138 static void __maybe_unused
sync_exp_reset_tree(struct rcu_state
*rsp
)
141 struct rcu_node
*rnp
;
143 sync_exp_reset_tree_hotplug(rsp
);
144 rcu_for_each_node_breadth_first(rsp
, rnp
) {
145 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
146 WARN_ON_ONCE(rnp
->expmask
);
147 rnp
->expmask
= rnp
->expmaskinit
;
148 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
153 * Return non-zero if there is no RCU expedited grace period in progress
154 * for the specified rcu_node structure, in other words, if all CPUs and
155 * tasks covered by the specified rcu_node structure have done their bit
156 * for the current expedited grace period. Works only for preemptible
157 * RCU -- other RCU implementation use other means.
159 * Caller must hold the specificed rcu_node structure's ->lock
161 static bool sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
163 raw_lockdep_assert_held_rcu_node(rnp
);
165 return rnp
->exp_tasks
== NULL
&&
166 READ_ONCE(rnp
->expmask
) == 0;
170 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
171 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
174 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node
*rnp
)
179 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
180 ret
= sync_rcu_preempt_exp_done(rnp
);
181 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
188 * Report the exit from RCU read-side critical section for the last task
189 * that queued itself during or before the current expedited preemptible-RCU
190 * grace period. This event is reported either to the rcu_node structure on
191 * which the task was queued or to one of that rcu_node structure's ancestors,
192 * recursively up the tree. (Calm down, calm down, we do the recursion
195 * Caller must hold the specified rcu_node structure's ->lock.
197 static void __rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
198 bool wake
, unsigned long flags
)
199 __releases(rnp
->lock
)
204 if (!sync_rcu_preempt_exp_done(rnp
)) {
206 rcu_initiate_boost(rnp
, flags
);
208 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
211 if (rnp
->parent
== NULL
) {
212 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
214 smp_mb(); /* EGP done before wake_up(). */
215 swake_up_one(&rsp
->expedited_wq
);
220 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled */
222 raw_spin_lock_rcu_node(rnp
); /* irqs already disabled */
223 WARN_ON_ONCE(!(rnp
->expmask
& mask
));
224 rnp
->expmask
&= ~mask
;
229 * Report expedited quiescent state for specified node. This is a
230 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
232 static void __maybe_unused
rcu_report_exp_rnp(struct rcu_state
*rsp
,
233 struct rcu_node
*rnp
, bool wake
)
237 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
238 __rcu_report_exp_rnp(rsp
, rnp
, wake
, flags
);
242 * Report expedited quiescent state for multiple CPUs, all covered by the
243 * specified leaf rcu_node structure.
245 static void rcu_report_exp_cpu_mult(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
246 unsigned long mask
, bool wake
)
250 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
251 if (!(rnp
->expmask
& mask
)) {
252 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
255 rnp
->expmask
&= ~mask
;
256 __rcu_report_exp_rnp(rsp
, rnp
, wake
, flags
); /* Releases rnp->lock. */
260 * Report expedited quiescent state for specified rcu_data (CPU).
262 static void rcu_report_exp_rdp(struct rcu_state
*rsp
, struct rcu_data
*rdp
,
265 rcu_report_exp_cpu_mult(rsp
, rdp
->mynode
, rdp
->grpmask
, wake
);
268 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
269 static bool sync_exp_work_done(struct rcu_state
*rsp
, unsigned long s
)
271 if (rcu_exp_gp_seq_done(rsp
, s
)) {
272 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("done"));
273 /* Ensure test happens before caller kfree(). */
274 smp_mb__before_atomic(); /* ^^^ */
281 * Funnel-lock acquisition for expedited grace periods. Returns true
282 * if some other task completed an expedited grace period that this task
283 * can piggy-back on, and with no mutex held. Otherwise, returns false
284 * with the mutex held, indicating that the caller must actually do the
285 * expedited grace period.
287 static bool exp_funnel_lock(struct rcu_state
*rsp
, unsigned long s
)
289 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, raw_smp_processor_id());
290 struct rcu_node
*rnp
= rdp
->mynode
;
291 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
293 /* Low-contention fastpath. */
294 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
) &&
296 ULONG_CMP_LT(READ_ONCE(rnp_root
->exp_seq_rq
), s
)) &&
297 mutex_trylock(&rsp
->exp_mutex
))
301 * Each pass through the following loop works its way up
302 * the rcu_node tree, returning if others have done the work or
303 * otherwise falls through to acquire rsp->exp_mutex. The mapping
304 * from CPU to rcu_node structure can be inexact, as it is just
305 * promoting locality and is not strictly needed for correctness.
307 for (; rnp
!= NULL
; rnp
= rnp
->parent
) {
308 if (sync_exp_work_done(rsp
, s
))
311 /* Work not done, either wait here or go up. */
312 spin_lock(&rnp
->exp_lock
);
313 if (ULONG_CMP_GE(rnp
->exp_seq_rq
, s
)) {
315 /* Someone else doing GP, so wait for them. */
316 spin_unlock(&rnp
->exp_lock
);
317 trace_rcu_exp_funnel_lock(rsp
->name
, rnp
->level
,
318 rnp
->grplo
, rnp
->grphi
,
320 wait_event(rnp
->exp_wq
[rcu_seq_ctr(s
) & 0x3],
321 sync_exp_work_done(rsp
, s
));
324 rnp
->exp_seq_rq
= s
; /* Followers can wait on us. */
325 spin_unlock(&rnp
->exp_lock
);
326 trace_rcu_exp_funnel_lock(rsp
->name
, rnp
->level
, rnp
->grplo
,
327 rnp
->grphi
, TPS("nxtlvl"));
329 mutex_lock(&rsp
->exp_mutex
);
331 if (sync_exp_work_done(rsp
, s
)) {
332 mutex_unlock(&rsp
->exp_mutex
);
335 rcu_exp_gp_seq_start(rsp
);
336 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("start"));
340 /* Invoked on each online non-idle CPU for expedited quiescent state. */
341 static void sync_sched_exp_handler(void *data
)
343 struct rcu_data
*rdp
;
344 struct rcu_node
*rnp
;
345 struct rcu_state
*rsp
= data
;
347 rdp
= this_cpu_ptr(rsp
->rda
);
349 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
) ||
350 __this_cpu_read(rcu_sched_data
.cpu_no_qs
.b
.exp
))
352 if (rcu_is_cpu_rrupt_from_idle()) {
353 rcu_report_exp_rdp(&rcu_sched_state
,
354 this_cpu_ptr(&rcu_sched_data
), true);
357 __this_cpu_write(rcu_sched_data
.cpu_no_qs
.b
.exp
, true);
358 /* Store .exp before .rcu_urgent_qs. */
359 smp_store_release(this_cpu_ptr(&rcu_dynticks
.rcu_urgent_qs
), true);
360 resched_cpu(smp_processor_id());
363 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
364 static void sync_sched_exp_online_cleanup(int cpu
)
366 struct rcu_data
*rdp
;
368 struct rcu_node
*rnp
;
369 struct rcu_state
*rsp
= &rcu_sched_state
;
371 rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
373 if (!(READ_ONCE(rnp
->expmask
) & rdp
->grpmask
))
375 ret
= smp_call_function_single(cpu
, sync_sched_exp_handler
, rsp
, 0);
380 * Select the CPUs within the specified rcu_node that the upcoming
381 * expedited grace period needs to wait for.
383 static void sync_rcu_exp_select_node_cpus(struct work_struct
*wp
)
387 smp_call_func_t func
;
388 unsigned long mask_ofl_test
;
389 unsigned long mask_ofl_ipi
;
391 struct rcu_exp_work
*rewp
=
392 container_of(wp
, struct rcu_exp_work
, rew_work
);
393 struct rcu_node
*rnp
= container_of(rewp
, struct rcu_node
, rew
);
394 struct rcu_state
*rsp
= rewp
->rew_rsp
;
396 func
= rewp
->rew_func
;
397 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
399 /* Each pass checks a CPU for identity, offline, and idle. */
401 for_each_leaf_node_cpu_mask(rnp
, cpu
, rnp
->expmask
) {
402 unsigned long mask
= leaf_node_cpu_bit(rnp
, cpu
);
403 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
404 struct rcu_dynticks
*rdtp
= per_cpu_ptr(&rcu_dynticks
, cpu
);
407 if (raw_smp_processor_id() == cpu
||
408 !(rnp
->qsmaskinitnext
& mask
)) {
409 mask_ofl_test
|= mask
;
411 snap
= rcu_dynticks_snap(rdtp
);
412 if (rcu_dynticks_in_eqs(snap
))
413 mask_ofl_test
|= mask
;
415 rdp
->exp_dynticks_snap
= snap
;
418 mask_ofl_ipi
= rnp
->expmask
& ~mask_ofl_test
;
421 * Need to wait for any blocked tasks as well. Note that
422 * additional blocking tasks will also block the expedited GP
423 * until such time as the ->expmask bits are cleared.
425 if (rcu_preempt_has_tasks(rnp
))
426 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
427 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
429 /* IPI the remaining CPUs for expedited quiescent state. */
430 for_each_leaf_node_cpu_mask(rnp
, cpu
, rnp
->expmask
) {
431 unsigned long mask
= leaf_node_cpu_bit(rnp
, cpu
);
432 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
434 if (!(mask_ofl_ipi
& mask
))
437 if (rcu_dynticks_in_eqs_since(rdp
->dynticks
,
438 rdp
->exp_dynticks_snap
)) {
439 mask_ofl_test
|= mask
;
442 ret
= smp_call_function_single(cpu
, func
, rsp
, 0);
444 mask_ofl_ipi
&= ~mask
;
447 /* Failed, raced with CPU hotplug operation. */
448 raw_spin_lock_irqsave_rcu_node(rnp
, flags
);
449 if ((rnp
->qsmaskinitnext
& mask
) &&
450 (rnp
->expmask
& mask
)) {
451 /* Online, so delay for a bit and try again. */
452 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
453 trace_rcu_exp_grace_period(rsp
->name
, rcu_exp_gp_seq_endval(rsp
), TPS("selectofl"));
454 schedule_timeout_uninterruptible(1);
457 /* CPU really is offline, so we can ignore it. */
458 if (!(rnp
->expmask
& mask
))
459 mask_ofl_ipi
&= ~mask
;
460 raw_spin_unlock_irqrestore_rcu_node(rnp
, flags
);
462 /* Report quiescent states for those that went offline. */
463 mask_ofl_test
|= mask_ofl_ipi
;
465 rcu_report_exp_cpu_mult(rsp
, rnp
, mask_ofl_test
, false);
469 * Select the nodes that the upcoming expedited grace period needs
472 static void sync_rcu_exp_select_cpus(struct rcu_state
*rsp
,
473 smp_call_func_t func
)
476 struct rcu_node
*rnp
;
478 trace_rcu_exp_grace_period(rsp
->name
, rcu_exp_gp_seq_endval(rsp
), TPS("reset"));
479 sync_exp_reset_tree(rsp
);
480 trace_rcu_exp_grace_period(rsp
->name
, rcu_exp_gp_seq_endval(rsp
), TPS("select"));
482 /* Schedule work for each leaf rcu_node structure. */
483 rcu_for_each_leaf_node(rsp
, rnp
) {
484 rnp
->exp_need_flush
= false;
485 if (!READ_ONCE(rnp
->expmask
))
486 continue; /* Avoid early boot non-existent wq. */
487 rnp
->rew
.rew_func
= func
;
488 rnp
->rew
.rew_rsp
= rsp
;
489 if (!READ_ONCE(rcu_par_gp_wq
) ||
490 rcu_scheduler_active
!= RCU_SCHEDULER_RUNNING
||
491 rcu_is_last_leaf_node(rsp
, rnp
)) {
492 /* No workqueues yet or last leaf, do direct call. */
493 sync_rcu_exp_select_node_cpus(&rnp
->rew
.rew_work
);
496 INIT_WORK(&rnp
->rew
.rew_work
, sync_rcu_exp_select_node_cpus
);
498 cpu
= cpumask_next(rnp
->grplo
- 1, cpu_online_mask
);
499 /* If all offline, queue the work on an unbound CPU. */
500 if (unlikely(cpu
> rnp
->grphi
))
501 cpu
= WORK_CPU_UNBOUND
;
502 queue_work_on(cpu
, rcu_par_gp_wq
, &rnp
->rew
.rew_work
);
504 rnp
->exp_need_flush
= true;
507 /* Wait for workqueue jobs (if any) to complete. */
508 rcu_for_each_leaf_node(rsp
, rnp
)
509 if (rnp
->exp_need_flush
)
510 flush_work(&rnp
->rew
.rew_work
);
513 static void synchronize_sched_expedited_wait(struct rcu_state
*rsp
)
516 unsigned long jiffies_stall
;
517 unsigned long jiffies_start
;
520 struct rcu_node
*rnp
;
521 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
524 trace_rcu_exp_grace_period(rsp
->name
, rcu_exp_gp_seq_endval(rsp
), TPS("startwait"));
525 jiffies_stall
= rcu_jiffies_till_stall_check();
526 jiffies_start
= jiffies
;
529 ret
= swait_event_timeout_exclusive(
531 sync_rcu_preempt_exp_done_unlocked(rnp_root
),
533 if (ret
> 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root
))
535 WARN_ON(ret
< 0); /* workqueues should not be signaled. */
536 if (rcu_cpu_stall_suppress
)
538 panic_on_rcu_stall();
539 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
542 rcu_for_each_leaf_node(rsp
, rnp
) {
543 ndetected
+= rcu_print_task_exp_stall(rnp
);
544 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
545 struct rcu_data
*rdp
;
547 mask
= leaf_node_cpu_bit(rnp
, cpu
);
548 if (!(rnp
->expmask
& mask
))
551 rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
552 pr_cont(" %d-%c%c%c", cpu
,
553 "O."[!!cpu_online(cpu
)],
554 "o."[!!(rdp
->grpmask
& rnp
->expmaskinit
)],
555 "N."[!!(rdp
->grpmask
& rnp
->expmaskinitnext
)]);
558 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
559 jiffies
- jiffies_start
, rsp
->expedited_sequence
,
560 rnp_root
->expmask
, ".T"[!!rnp_root
->exp_tasks
]);
562 pr_err("blocking rcu_node structures:");
563 rcu_for_each_node_breadth_first(rsp
, rnp
) {
565 continue; /* printed unconditionally */
566 if (sync_rcu_preempt_exp_done_unlocked(rnp
))
568 pr_cont(" l=%u:%d-%d:%#lx/%c",
569 rnp
->level
, rnp
->grplo
, rnp
->grphi
,
571 ".T"[!!rnp
->exp_tasks
]);
575 rcu_for_each_leaf_node(rsp
, rnp
) {
576 for_each_leaf_node_possible_cpu(rnp
, cpu
) {
577 mask
= leaf_node_cpu_bit(rnp
, cpu
);
578 if (!(rnp
->expmask
& mask
))
583 jiffies_stall
= 3 * rcu_jiffies_till_stall_check() + 3;
588 * Wait for the current expedited grace period to complete, and then
589 * wake up everyone who piggybacked on the just-completed expedited
590 * grace period. Also update all the ->exp_seq_rq counters as needed
591 * in order to avoid counter-wrap problems.
593 static void rcu_exp_wait_wake(struct rcu_state
*rsp
, unsigned long s
)
595 struct rcu_node
*rnp
;
597 synchronize_sched_expedited_wait(rsp
);
598 rcu_exp_gp_seq_end(rsp
);
599 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("end"));
602 * Switch over to wakeup mode, allowing the next GP, but -only- the
603 * next GP, to proceed.
605 mutex_lock(&rsp
->exp_wake_mutex
);
607 rcu_for_each_node_breadth_first(rsp
, rnp
) {
608 if (ULONG_CMP_LT(READ_ONCE(rnp
->exp_seq_rq
), s
)) {
609 spin_lock(&rnp
->exp_lock
);
610 /* Recheck, avoid hang in case someone just arrived. */
611 if (ULONG_CMP_LT(rnp
->exp_seq_rq
, s
))
613 spin_unlock(&rnp
->exp_lock
);
615 smp_mb(); /* All above changes before wakeup. */
616 wake_up_all(&rnp
->exp_wq
[rcu_seq_ctr(rsp
->expedited_sequence
) & 0x3]);
618 trace_rcu_exp_grace_period(rsp
->name
, s
, TPS("endwake"));
619 mutex_unlock(&rsp
->exp_wake_mutex
);
623 * Common code to drive an expedited grace period forward, used by
624 * workqueues and mid-boot-time tasks.
626 static void rcu_exp_sel_wait_wake(struct rcu_state
*rsp
,
627 smp_call_func_t func
, unsigned long s
)
629 /* Initialize the rcu_node tree in preparation for the wait. */
630 sync_rcu_exp_select_cpus(rsp
, func
);
632 /* Wait and clean up, including waking everyone. */
633 rcu_exp_wait_wake(rsp
, s
);
637 * Work-queue handler to drive an expedited grace period forward.
639 static void wait_rcu_exp_gp(struct work_struct
*wp
)
641 struct rcu_exp_work
*rewp
;
643 rewp
= container_of(wp
, struct rcu_exp_work
, rew_work
);
644 rcu_exp_sel_wait_wake(rewp
->rew_rsp
, rewp
->rew_func
, rewp
->rew_s
);
648 * Given an rcu_state pointer and a smp_call_function() handler, kick
649 * off the specified flavor of expedited grace period.
651 static void _synchronize_rcu_expedited(struct rcu_state
*rsp
,
652 smp_call_func_t func
)
654 struct rcu_data
*rdp
;
655 struct rcu_exp_work rew
;
656 struct rcu_node
*rnp
;
659 /* If expedited grace periods are prohibited, fall back to normal. */
660 if (rcu_gp_is_normal()) {
661 wait_rcu_gp(rsp
->call
);
665 /* Take a snapshot of the sequence number. */
666 s
= rcu_exp_gp_seq_snap(rsp
);
667 if (exp_funnel_lock(rsp
, s
))
668 return; /* Someone else did our work for us. */
670 /* Ensure that load happens before action based on it. */
671 if (unlikely(rcu_scheduler_active
== RCU_SCHEDULER_INIT
)) {
672 /* Direct call during scheduler init and early_initcalls(). */
673 rcu_exp_sel_wait_wake(rsp
, func
, s
);
675 /* Marshall arguments & schedule the expedited grace period. */
679 INIT_WORK_ONSTACK(&rew
.rew_work
, wait_rcu_exp_gp
);
680 queue_work(rcu_gp_wq
, &rew
.rew_work
);
683 /* Wait for expedited grace period to complete. */
684 rdp
= per_cpu_ptr(rsp
->rda
, raw_smp_processor_id());
685 rnp
= rcu_get_root(rsp
);
686 wait_event(rnp
->exp_wq
[rcu_seq_ctr(s
) & 0x3],
687 sync_exp_work_done(rsp
, s
));
688 smp_mb(); /* Workqueue actions happen before return. */
690 /* Let the next expedited grace period start. */
691 mutex_unlock(&rsp
->exp_mutex
);
695 * synchronize_sched_expedited - Brute-force RCU-sched grace period
697 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
698 * approach to force the grace period to end quickly. This consumes
699 * significant time on all CPUs and is unfriendly to real-time workloads,
700 * so is thus not recommended for any sort of common-case code. In fact,
701 * if you are using synchronize_sched_expedited() in a loop, please
702 * restructure your code to batch your updates, and then use a single
703 * synchronize_sched() instead.
705 * This implementation can be thought of as an application of sequence
706 * locking to expedited grace periods, but using the sequence counter to
707 * determine when someone else has already done the work instead of for
710 void synchronize_sched_expedited(void)
712 struct rcu_state
*rsp
= &rcu_sched_state
;
714 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
715 lock_is_held(&rcu_lock_map
) ||
716 lock_is_held(&rcu_sched_lock_map
),
717 "Illegal synchronize_sched_expedited() in RCU read-side critical section");
719 /* If only one CPU, this is automatically a grace period. */
720 if (rcu_blocking_is_gp())
723 _synchronize_rcu_expedited(rsp
, sync_sched_exp_handler
);
725 EXPORT_SYMBOL_GPL(synchronize_sched_expedited
);
727 #ifdef CONFIG_PREEMPT_RCU
730 * Remote handler for smp_call_function_single(). If there is an
731 * RCU read-side critical section in effect, request that the
732 * next rcu_read_unlock() record the quiescent state up the
733 * ->expmask fields in the rcu_node tree. Otherwise, immediately
734 * report the quiescent state.
736 static void sync_rcu_exp_handler(void *info
)
738 struct rcu_data
*rdp
;
739 struct rcu_state
*rsp
= info
;
740 struct task_struct
*t
= current
;
743 * Within an RCU read-side critical section, request that the next
744 * rcu_read_unlock() report. Unless this RCU read-side critical
745 * section has already blocked, in which case it is already set
746 * up for the expedited grace period to wait on it.
748 if (t
->rcu_read_lock_nesting
> 0 &&
749 !t
->rcu_read_unlock_special
.b
.blocked
) {
750 t
->rcu_read_unlock_special
.b
.exp_need_qs
= true;
755 * We are either exiting an RCU read-side critical section (negative
756 * values of t->rcu_read_lock_nesting) or are not in one at all
757 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
758 * read-side critical section that blocked before this expedited
759 * grace period started. Either way, we can immediately report
760 * the quiescent state.
762 rdp
= this_cpu_ptr(rsp
->rda
);
763 rcu_report_exp_rdp(rsp
, rdp
, true);
767 * synchronize_rcu_expedited - Brute-force RCU grace period
769 * Wait for an RCU-preempt grace period, but expedite it. The basic
770 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
771 * checks whether the CPU is in an RCU-preempt critical section, and
772 * if so, it sets a flag that causes the outermost rcu_read_unlock()
773 * to report the quiescent state. On the other hand, if the CPU is
774 * not in an RCU read-side critical section, the IPI handler reports
775 * the quiescent state immediately.
777 * Although this is a greate improvement over previous expedited
778 * implementations, it is still unfriendly to real-time workloads, so is
779 * thus not recommended for any sort of common-case code. In fact, if
780 * you are using synchronize_rcu_expedited() in a loop, please restructure
781 * your code to batch your updates, and then Use a single synchronize_rcu()
784 void synchronize_rcu_expedited(void)
786 struct rcu_state
*rsp
= rcu_state_p
;
788 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map
) ||
789 lock_is_held(&rcu_lock_map
) ||
790 lock_is_held(&rcu_sched_lock_map
),
791 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
793 if (rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
)
795 _synchronize_rcu_expedited(rsp
, sync_rcu_exp_handler
);
797 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
799 #else /* #ifdef CONFIG_PREEMPT_RCU */
802 * Wait for an rcu-preempt grace period, but make it happen quickly.
803 * But because preemptible RCU does not exist, map to rcu-sched.
805 void synchronize_rcu_expedited(void)
807 synchronize_sched_expedited();
809 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
811 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */