1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
11 * Author: Ingo Molnar <mingo@elte.hu>
12 * Paul E. McKenney <paulmck@linux.ibm.com>
13 * Frederic Weisbecker <frederic@kernel.org>
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask
; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll
; /* Offload kthread are to poll. */
20 static inline bool rcu_current_is_nocb_kthread(struct rcu_data
*rdp
)
22 /* Race on early boot between thread creation and assignment */
23 if (!rdp
->nocb_cb_kthread
|| !rdp
->nocb_gp_kthread
)
26 if (current
== rdp
->nocb_cb_kthread
|| current
== rdp
->nocb_gp_kthread
)
33 * Offload callback processing from the boot-time-specified set of CPUs
34 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
35 * created that pull the callbacks from the corresponding CPU, wait for
36 * a grace period to elapse, and invoke the callbacks. These kthreads
37 * are organized into GP kthreads, which manage incoming callbacks, wait for
38 * grace periods, and awaken CB kthreads, and the CB kthreads, which only
39 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
40 * do a wake_up() on their GP kthread when they insert a callback into any
41 * empty list, unless the rcu_nocb_poll boot parameter has been specified,
42 * in which case each kthread actively polls its CPU. (Which isn't so great
43 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
45 * This is intended to be used in conjunction with Frederic Weisbecker's
46 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
47 * running CPU-bound user-mode computations.
49 * Offloading of callbacks can also be used as an energy-efficiency
50 * measure because CPUs with no RCU callbacks queued are more aggressive
51 * about entering dyntick-idle mode.
56 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
57 * If the list is invalid, a warning is emitted and all CPUs are offloaded.
59 static int __init
rcu_nocb_setup(char *str
)
61 alloc_bootmem_cpumask_var(&rcu_nocb_mask
);
63 if (cpulist_parse(++str
, rcu_nocb_mask
)) {
64 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
65 cpumask_setall(rcu_nocb_mask
);
68 rcu_state
.nocb_is_setup
= true;
71 __setup("rcu_nocbs", rcu_nocb_setup
);
73 static int __init
parse_rcu_nocb_poll(char *arg
)
78 __setup("rcu_nocb_poll", parse_rcu_nocb_poll
);
81 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
82 * After all, the main point of bypassing is to avoid lock contention
83 * on ->nocb_lock, which only can happen at high call_rcu() rates.
85 static int nocb_nobypass_lim_per_jiffy
= 16 * 1000 / HZ
;
86 module_param(nocb_nobypass_lim_per_jiffy
, int, 0);
89 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
90 * lock isn't immediately available, perform minimal sanity check.
92 static void rcu_nocb_bypass_lock(struct rcu_data
*rdp
)
93 __acquires(&rdp
->nocb_bypass_lock
)
95 lockdep_assert_irqs_disabled();
96 if (raw_spin_trylock(&rdp
->nocb_bypass_lock
))
99 * Contention expected only when local enqueue collide with
100 * remote flush from kthreads.
102 WARN_ON_ONCE(smp_processor_id() != rdp
->cpu
);
103 raw_spin_lock(&rdp
->nocb_bypass_lock
);
107 * Conditionally acquire the specified rcu_data structure's
108 * ->nocb_bypass_lock.
110 static bool rcu_nocb_bypass_trylock(struct rcu_data
*rdp
)
112 lockdep_assert_irqs_disabled();
113 return raw_spin_trylock(&rdp
->nocb_bypass_lock
);
117 * Release the specified rcu_data structure's ->nocb_bypass_lock.
119 static void rcu_nocb_bypass_unlock(struct rcu_data
*rdp
)
120 __releases(&rdp
->nocb_bypass_lock
)
122 lockdep_assert_irqs_disabled();
123 raw_spin_unlock(&rdp
->nocb_bypass_lock
);
127 * Acquire the specified rcu_data structure's ->nocb_lock, but only
128 * if it corresponds to a no-CBs CPU.
130 static void rcu_nocb_lock(struct rcu_data
*rdp
)
132 lockdep_assert_irqs_disabled();
133 if (!rcu_rdp_is_offloaded(rdp
))
135 raw_spin_lock(&rdp
->nocb_lock
);
139 * Release the specified rcu_data structure's ->nocb_lock, but only
140 * if it corresponds to a no-CBs CPU.
142 static void rcu_nocb_unlock(struct rcu_data
*rdp
)
144 if (rcu_rdp_is_offloaded(rdp
)) {
145 lockdep_assert_irqs_disabled();
146 raw_spin_unlock(&rdp
->nocb_lock
);
151 * Release the specified rcu_data structure's ->nocb_lock and restore
152 * interrupts, but only if it corresponds to a no-CBs CPU.
154 static void rcu_nocb_unlock_irqrestore(struct rcu_data
*rdp
,
157 if (rcu_rdp_is_offloaded(rdp
)) {
158 lockdep_assert_irqs_disabled();
159 raw_spin_unlock_irqrestore(&rdp
->nocb_lock
, flags
);
161 local_irq_restore(flags
);
165 /* Lockdep check that ->cblist may be safely accessed. */
166 static void rcu_lockdep_assert_cblist_protected(struct rcu_data
*rdp
)
168 lockdep_assert_irqs_disabled();
169 if (rcu_rdp_is_offloaded(rdp
))
170 lockdep_assert_held(&rdp
->nocb_lock
);
174 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
177 static void rcu_nocb_gp_cleanup(struct swait_queue_head
*sq
)
182 static struct swait_queue_head
*rcu_nocb_gp_get(struct rcu_node
*rnp
)
184 return &rnp
->nocb_gp_wq
[rcu_seq_ctr(rnp
->gp_seq
) & 0x1];
187 static void rcu_init_one_nocb(struct rcu_node
*rnp
)
189 init_swait_queue_head(&rnp
->nocb_gp_wq
[0]);
190 init_swait_queue_head(&rnp
->nocb_gp_wq
[1]);
193 static bool __wake_nocb_gp(struct rcu_data
*rdp_gp
,
194 struct rcu_data
*rdp
,
195 bool force
, unsigned long flags
)
196 __releases(rdp_gp
->nocb_gp_lock
)
198 bool needwake
= false;
200 if (!READ_ONCE(rdp_gp
->nocb_gp_kthread
)) {
201 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
202 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
203 TPS("AlreadyAwake"));
207 if (rdp_gp
->nocb_defer_wakeup
> RCU_NOCB_WAKE_NOT
) {
208 WRITE_ONCE(rdp_gp
->nocb_defer_wakeup
, RCU_NOCB_WAKE_NOT
);
209 del_timer(&rdp_gp
->nocb_timer
);
212 if (force
|| READ_ONCE(rdp_gp
->nocb_gp_sleep
)) {
213 WRITE_ONCE(rdp_gp
->nocb_gp_sleep
, false);
216 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
218 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("DoWake"));
219 swake_up_one_online(&rdp_gp
->nocb_gp_wq
);
226 * Kick the GP kthread for this NOCB group.
228 static bool wake_nocb_gp(struct rcu_data
*rdp
, bool force
)
231 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
233 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
234 return __wake_nocb_gp(rdp_gp
, rdp
, force
, flags
);
237 #ifdef CONFIG_RCU_LAZY
239 * LAZY_FLUSH_JIFFIES decides the maximum amount of time that
240 * can elapse before lazy callbacks are flushed. Lazy callbacks
241 * could be flushed much earlier for a number of other reasons
242 * however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are
243 * left unsubmitted to RCU after those many jiffies.
245 #define LAZY_FLUSH_JIFFIES (10 * HZ)
246 static unsigned long jiffies_lazy_flush
= LAZY_FLUSH_JIFFIES
;
248 // To be called only from test code.
249 void rcu_set_jiffies_lazy_flush(unsigned long jif
)
251 jiffies_lazy_flush
= jif
;
253 EXPORT_SYMBOL(rcu_set_jiffies_lazy_flush
);
255 unsigned long rcu_get_jiffies_lazy_flush(void)
257 return jiffies_lazy_flush
;
259 EXPORT_SYMBOL(rcu_get_jiffies_lazy_flush
);
263 * Arrange to wake the GP kthread for this NOCB group at some future
264 * time when it is safe to do so.
266 static void wake_nocb_gp_defer(struct rcu_data
*rdp
, int waketype
,
270 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
272 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
275 * Bypass wakeup overrides previous deferments. In case of
276 * callback storms, no need to wake up too early.
278 if (waketype
== RCU_NOCB_WAKE_LAZY
&&
279 rdp
->nocb_defer_wakeup
== RCU_NOCB_WAKE_NOT
) {
280 mod_timer(&rdp_gp
->nocb_timer
, jiffies
+ rcu_get_jiffies_lazy_flush());
281 WRITE_ONCE(rdp_gp
->nocb_defer_wakeup
, waketype
);
282 } else if (waketype
== RCU_NOCB_WAKE_BYPASS
) {
283 mod_timer(&rdp_gp
->nocb_timer
, jiffies
+ 2);
284 WRITE_ONCE(rdp_gp
->nocb_defer_wakeup
, waketype
);
286 if (rdp_gp
->nocb_defer_wakeup
< RCU_NOCB_WAKE
)
287 mod_timer(&rdp_gp
->nocb_timer
, jiffies
+ 1);
288 if (rdp_gp
->nocb_defer_wakeup
< waketype
)
289 WRITE_ONCE(rdp_gp
->nocb_defer_wakeup
, waketype
);
292 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
294 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, reason
);
298 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
299 * However, if there is a callback to be enqueued and if ->nocb_bypass
300 * proves to be initially empty, just return false because the no-CB GP
301 * kthread may need to be awakened in this case.
303 * Return true if there was something to be flushed and it succeeded, otherwise
306 * Note that this function always returns true if rhp is NULL.
308 static bool rcu_nocb_do_flush_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp_in
,
309 unsigned long j
, bool lazy
)
311 struct rcu_cblist rcl
;
312 struct rcu_head
*rhp
= rhp_in
;
314 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp
));
315 rcu_lockdep_assert_cblist_protected(rdp
);
316 lockdep_assert_held(&rdp
->nocb_bypass_lock
);
317 if (rhp
&& !rcu_cblist_n_cbs(&rdp
->nocb_bypass
)) {
318 raw_spin_unlock(&rdp
->nocb_bypass_lock
);
321 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
323 rcu_segcblist_inc_len(&rdp
->cblist
); /* Must precede enqueue. */
326 * If the new CB requested was a lazy one, queue it onto the main
327 * ->cblist so that we can take advantage of the grace-period that will
328 * happen regardless. But queue it onto the bypass list first so that
329 * the lazy CB is ordered with the existing CBs in the bypass list.
332 rcu_cblist_enqueue(&rdp
->nocb_bypass
, rhp
);
335 rcu_cblist_flush_enqueue(&rcl
, &rdp
->nocb_bypass
, rhp
);
336 WRITE_ONCE(rdp
->lazy_len
, 0);
338 rcu_segcblist_insert_pend_cbs(&rdp
->cblist
, &rcl
);
339 WRITE_ONCE(rdp
->nocb_bypass_first
, j
);
340 rcu_nocb_bypass_unlock(rdp
);
345 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
346 * However, if there is a callback to be enqueued and if ->nocb_bypass
347 * proves to be initially empty, just return false because the no-CB GP
348 * kthread may need to be awakened in this case.
350 * Note that this function always returns true if rhp is NULL.
352 static bool rcu_nocb_flush_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
353 unsigned long j
, bool lazy
)
355 if (!rcu_rdp_is_offloaded(rdp
))
357 rcu_lockdep_assert_cblist_protected(rdp
);
358 rcu_nocb_bypass_lock(rdp
);
359 return rcu_nocb_do_flush_bypass(rdp
, rhp
, j
, lazy
);
363 * If the ->nocb_bypass_lock is immediately available, flush the
364 * ->nocb_bypass queue into ->cblist.
366 static void rcu_nocb_try_flush_bypass(struct rcu_data
*rdp
, unsigned long j
)
368 rcu_lockdep_assert_cblist_protected(rdp
);
369 if (!rcu_rdp_is_offloaded(rdp
) ||
370 !rcu_nocb_bypass_trylock(rdp
))
372 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp
, NULL
, j
, false));
376 * See whether it is appropriate to use the ->nocb_bypass list in order
377 * to control contention on ->nocb_lock. A limited number of direct
378 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
379 * is non-empty, further callbacks must be placed into ->nocb_bypass,
380 * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch
381 * back to direct use of ->cblist. However, ->nocb_bypass should not be
382 * used if ->cblist is empty, because otherwise callbacks can be stranded
383 * on ->nocb_bypass because we cannot count on the current CPU ever again
384 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
385 * non-empty, the corresponding no-CBs grace-period kthread must not be
386 * in an indefinite sleep state.
388 * Finally, it is not permitted to use the bypass during early boot,
389 * as doing so would confuse the auto-initialization code. Besides
390 * which, there is no point in worrying about lock contention while
391 * there is only one CPU in operation.
393 static bool rcu_nocb_try_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
394 bool *was_alldone
, unsigned long flags
,
398 unsigned long cur_gp_seq
;
399 unsigned long j
= jiffies
;
400 long ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
401 bool bypass_is_lazy
= (ncbs
== READ_ONCE(rdp
->lazy_len
));
403 lockdep_assert_irqs_disabled();
405 // Pure softirq/rcuc based processing: no bypassing, no
407 if (!rcu_rdp_is_offloaded(rdp
)) {
408 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
412 // Don't use ->nocb_bypass during early boot.
413 if (rcu_scheduler_active
!= RCU_SCHEDULER_RUNNING
) {
415 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
416 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
420 // If we have advanced to a new jiffy, reset counts to allow
421 // moving back from ->nocb_bypass to ->cblist.
422 if (j
== rdp
->nocb_nobypass_last
) {
423 c
= rdp
->nocb_nobypass_count
+ 1;
425 WRITE_ONCE(rdp
->nocb_nobypass_last
, j
);
426 c
= rdp
->nocb_nobypass_count
- nocb_nobypass_lim_per_jiffy
;
427 if (ULONG_CMP_LT(rdp
->nocb_nobypass_count
,
428 nocb_nobypass_lim_per_jiffy
))
430 else if (c
> nocb_nobypass_lim_per_jiffy
)
431 c
= nocb_nobypass_lim_per_jiffy
;
433 WRITE_ONCE(rdp
->nocb_nobypass_count
, c
);
435 // If there hasn't yet been all that many ->cblist enqueues
436 // this jiffy, tell the caller to enqueue onto ->cblist. But flush
437 // ->nocb_bypass first.
438 // Lazy CBs throttle this back and do immediate bypass queuing.
439 if (rdp
->nocb_nobypass_count
< nocb_nobypass_lim_per_jiffy
&& !lazy
) {
441 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
443 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
446 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp
, NULL
, j
, false));
447 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
448 return false; // Caller must enqueue the callback.
451 // If ->nocb_bypass has been used too long or is too full,
452 // flush ->nocb_bypass to ->cblist.
453 if ((ncbs
&& !bypass_is_lazy
&& j
!= READ_ONCE(rdp
->nocb_bypass_first
)) ||
454 (ncbs
&& bypass_is_lazy
&&
455 (time_after(j
, READ_ONCE(rdp
->nocb_bypass_first
) + rcu_get_jiffies_lazy_flush()))) ||
458 *was_alldone
= !rcu_segcblist_pend_cbs(&rdp
->cblist
);
460 if (!rcu_nocb_flush_bypass(rdp
, rhp
, j
, lazy
)) {
462 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
464 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
465 return false; // Caller must enqueue the callback.
467 if (j
!= rdp
->nocb_gp_adv_time
&&
468 rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
) &&
469 rcu_seq_done(&rdp
->mynode
->gp_seq
, cur_gp_seq
)) {
470 rcu_advance_cbs_nowake(rdp
->mynode
, rdp
);
471 rdp
->nocb_gp_adv_time
= j
;
474 // The flush succeeded and we moved CBs into the regular list.
475 // Don't wait for the wake up timer as it may be too far ahead.
476 // Wake up the GP thread now instead, if the cblist was empty.
477 __call_rcu_nocb_wake(rdp
, *was_alldone
, flags
);
479 return true; // Callback already enqueued.
482 // We need to use the bypass.
483 rcu_nocb_bypass_lock(rdp
);
484 ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
485 rcu_segcblist_inc_len(&rdp
->cblist
); /* Must precede enqueue. */
486 rcu_cblist_enqueue(&rdp
->nocb_bypass
, rhp
);
489 WRITE_ONCE(rdp
->lazy_len
, rdp
->lazy_len
+ 1);
492 WRITE_ONCE(rdp
->nocb_bypass_first
, j
);
493 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("FirstBQ"));
495 rcu_nocb_bypass_unlock(rdp
);
497 // A wake up of the grace period kthread or timer adjustment
498 // needs to be done only if:
499 // 1. Bypass list was fully empty before (this is the first
500 // bypass list entry), or:
501 // 2. Both of these conditions are met:
502 // a. The bypass list previously had only lazy CBs, and:
503 // b. The new CB is non-lazy.
504 if (!ncbs
|| (bypass_is_lazy
&& !lazy
)) {
505 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
506 rcu_nocb_lock(rdp
); // Rare during call_rcu() flood.
507 if (!rcu_segcblist_pend_cbs(&rdp
->cblist
)) {
508 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
510 __call_rcu_nocb_wake(rdp
, true, flags
);
512 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
513 TPS("FirstBQnoWake"));
514 rcu_nocb_unlock(rdp
);
517 return true; // Callback already enqueued.
521 * Awaken the no-CBs grace-period kthread if needed, either due to it
522 * legitimately being asleep or due to overload conditions.
524 * If warranted, also wake up the kthread servicing this CPUs queues.
526 static void __call_rcu_nocb_wake(struct rcu_data
*rdp
, bool was_alldone
,
528 __releases(rdp
->nocb_lock
)
531 unsigned long cur_gp_seq
;
535 struct task_struct
*t
;
536 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
538 // If we are being polled or there is no kthread, just leave.
539 t
= READ_ONCE(rdp
->nocb_gp_kthread
);
540 if (rcu_nocb_poll
|| !t
) {
541 rcu_nocb_unlock(rdp
);
542 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
546 // Need to actually to a wakeup.
547 len
= rcu_segcblist_n_cbs(&rdp
->cblist
);
548 bypass_len
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
549 lazy_len
= READ_ONCE(rdp
->lazy_len
);
551 rdp
->qlen_last_fqs_check
= len
;
552 // Only lazy CBs in bypass list
553 if (lazy_len
&& bypass_len
== lazy_len
) {
554 rcu_nocb_unlock(rdp
);
555 wake_nocb_gp_defer(rdp
, RCU_NOCB_WAKE_LAZY
,
557 } else if (!irqs_disabled_flags(flags
) && cpu_online(rdp
->cpu
)) {
558 /* ... if queue was empty ... */
559 rcu_nocb_unlock(rdp
);
560 wake_nocb_gp(rdp
, false);
561 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
565 * Don't do the wake-up upfront on fragile paths.
566 * Also offline CPUs can't call swake_up_one_online() from
567 * (soft-)IRQs. Rely on the final deferred wake-up from
568 * rcutree_report_cpu_dead()
570 rcu_nocb_unlock(rdp
);
571 wake_nocb_gp_defer(rdp
, RCU_NOCB_WAKE
,
572 TPS("WakeEmptyIsDeferred"));
574 } else if (len
> rdp
->qlen_last_fqs_check
+ qhimark
) {
575 /* ... or if many callbacks queued. */
576 rdp
->qlen_last_fqs_check
= len
;
578 if (j
!= rdp
->nocb_gp_adv_time
&&
579 rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
) &&
580 rcu_seq_done(&rdp
->mynode
->gp_seq
, cur_gp_seq
)) {
581 rcu_advance_cbs_nowake(rdp
->mynode
, rdp
);
582 rdp
->nocb_gp_adv_time
= j
;
584 smp_mb(); /* Enqueue before timer_pending(). */
585 if ((rdp
->nocb_cb_sleep
||
586 !rcu_segcblist_ready_cbs(&rdp
->cblist
)) &&
587 !timer_pending(&rdp_gp
->nocb_timer
)) {
588 rcu_nocb_unlock(rdp
);
589 wake_nocb_gp_defer(rdp
, RCU_NOCB_WAKE_FORCE
,
590 TPS("WakeOvfIsDeferred"));
592 rcu_nocb_unlock(rdp
);
593 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("WakeNot"));
596 rcu_nocb_unlock(rdp
);
597 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("WakeNot"));
601 static void call_rcu_nocb(struct rcu_data
*rdp
, struct rcu_head
*head
,
602 rcu_callback_t func
, unsigned long flags
, bool lazy
)
606 if (!rcu_nocb_try_bypass(rdp
, head
, &was_alldone
, flags
, lazy
)) {
607 /* Not enqueued on bypass but locked, do regular enqueue */
608 rcutree_enqueue(rdp
, head
, func
);
609 __call_rcu_nocb_wake(rdp
, was_alldone
, flags
); /* unlocks */
613 static void nocb_gp_toggle_rdp(struct rcu_data
*rdp_gp
, struct rcu_data
*rdp
)
615 struct rcu_segcblist
*cblist
= &rdp
->cblist
;
619 * Locking orders future de-offloaded callbacks enqueue against previous
620 * handling of this rdp. Ie: Make sure rcuog is done with this rdp before
621 * deoffloaded callbacks can be enqueued.
623 raw_spin_lock_irqsave(&rdp
->nocb_lock
, flags
);
624 if (!rcu_segcblist_test_flags(cblist
, SEGCBLIST_OFFLOADED
)) {
626 * Offloading. Set our flag and notify the offload worker.
627 * We will handle this rdp until it ever gets de-offloaded.
629 list_add_tail(&rdp
->nocb_entry_rdp
, &rdp_gp
->nocb_head_rdp
);
630 rcu_segcblist_set_flags(cblist
, SEGCBLIST_OFFLOADED
);
633 * De-offloading. Clear our flag and notify the de-offload worker.
634 * We will ignore this rdp until it ever gets re-offloaded.
636 list_del(&rdp
->nocb_entry_rdp
);
637 rcu_segcblist_clear_flags(cblist
, SEGCBLIST_OFFLOADED
);
639 raw_spin_unlock_irqrestore(&rdp
->nocb_lock
, flags
);
642 static void nocb_gp_sleep(struct rcu_data
*my_rdp
, int cpu
)
644 trace_rcu_nocb_wake(rcu_state
.name
, cpu
, TPS("Sleep"));
645 swait_event_interruptible_exclusive(my_rdp
->nocb_gp_wq
,
646 !READ_ONCE(my_rdp
->nocb_gp_sleep
));
647 trace_rcu_nocb_wake(rcu_state
.name
, cpu
, TPS("EndSleep"));
651 * No-CBs GP kthreads come here to wait for additional callbacks to show up
652 * or for grace periods to end.
654 static void nocb_gp_wait(struct rcu_data
*my_rdp
)
657 int __maybe_unused cpu
= my_rdp
->cpu
;
658 unsigned long cur_gp_seq
;
661 unsigned long j
= jiffies
;
663 bool needwait_gp
= false; // This prevents actual uninitialized use.
666 struct rcu_data
*rdp
, *rdp_toggling
= NULL
;
667 struct rcu_node
*rnp
;
668 unsigned long wait_gp_seq
= 0; // Suppress "use uninitialized" warning.
669 bool wasempty
= false;
672 * Each pass through the following loop checks for CBs and for the
673 * nearest grace period (if any) to wait for next. The CB kthreads
674 * and the global grace-period kthread are awakened if needed.
676 WARN_ON_ONCE(my_rdp
->nocb_gp_rdp
!= my_rdp
);
678 * An rcu_data structure is removed from the list after its
679 * CPU is de-offloaded and added to the list before that CPU is
680 * (re-)offloaded. If the following loop happens to be referencing
681 * that rcu_data structure during the time that the corresponding
682 * CPU is de-offloaded and then immediately re-offloaded, this
683 * loop's rdp pointer will be carried to the end of the list by
684 * the resulting pair of list operations. This can cause the loop
685 * to skip over some of the rcu_data structures that were supposed
686 * to have been scanned. Fortunately a new iteration through the
687 * entire loop is forced after a given CPU's rcu_data structure
688 * is added to the list, so the skipped-over rcu_data structures
689 * won't be ignored for long.
691 list_for_each_entry(rdp
, &my_rdp
->nocb_head_rdp
, nocb_entry_rdp
) {
693 bool flush_bypass
= false;
696 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("Check"));
697 rcu_nocb_lock_irqsave(rdp
, flags
);
698 lockdep_assert_held(&rdp
->nocb_lock
);
699 bypass_ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
700 lazy_ncbs
= READ_ONCE(rdp
->lazy_len
);
702 if (bypass_ncbs
&& (lazy_ncbs
== bypass_ncbs
) &&
703 (time_after(j
, READ_ONCE(rdp
->nocb_bypass_first
) + rcu_get_jiffies_lazy_flush()) ||
704 bypass_ncbs
> 2 * qhimark
)) {
706 } else if (bypass_ncbs
&& (lazy_ncbs
!= bypass_ncbs
) &&
707 (time_after(j
, READ_ONCE(rdp
->nocb_bypass_first
) + 1) ||
708 bypass_ncbs
> 2 * qhimark
)) {
710 } else if (!bypass_ncbs
&& rcu_segcblist_empty(&rdp
->cblist
)) {
711 rcu_nocb_unlock_irqrestore(rdp
, flags
);
712 continue; /* No callbacks here, try next. */
716 // Bypass full or old, so flush it.
717 (void)rcu_nocb_try_flush_bypass(rdp
, j
);
718 bypass_ncbs
= rcu_cblist_n_cbs(&rdp
->nocb_bypass
);
719 lazy_ncbs
= READ_ONCE(rdp
->lazy_len
);
723 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
724 bypass_ncbs
== lazy_ncbs
? TPS("Lazy") : TPS("Bypass"));
725 if (bypass_ncbs
== lazy_ncbs
)
732 // Advance callbacks if helpful and low contention.
734 if (!rcu_segcblist_restempty(&rdp
->cblist
,
735 RCU_NEXT_READY_TAIL
) ||
736 (rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
) &&
737 rcu_seq_done(&rnp
->gp_seq
, cur_gp_seq
))) {
738 raw_spin_lock_rcu_node(rnp
); /* irqs disabled. */
739 needwake_gp
= rcu_advance_cbs(rnp
, rdp
);
740 wasempty
= rcu_segcblist_restempty(&rdp
->cblist
,
741 RCU_NEXT_READY_TAIL
);
742 raw_spin_unlock_rcu_node(rnp
); /* irqs disabled. */
744 // Need to wait on some grace period?
745 WARN_ON_ONCE(wasempty
&&
746 !rcu_segcblist_restempty(&rdp
->cblist
,
747 RCU_NEXT_READY_TAIL
));
748 if (rcu_segcblist_nextgp(&rdp
->cblist
, &cur_gp_seq
)) {
750 ULONG_CMP_LT(cur_gp_seq
, wait_gp_seq
))
751 wait_gp_seq
= cur_gp_seq
;
753 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
,
756 if (rcu_segcblist_ready_cbs(&rdp
->cblist
)) {
757 needwake
= rdp
->nocb_cb_sleep
;
758 WRITE_ONCE(rdp
->nocb_cb_sleep
, false);
762 rcu_nocb_unlock_irqrestore(rdp
, flags
);
764 swake_up_one(&rdp
->nocb_cb_wq
);
768 rcu_gp_kthread_wake();
771 my_rdp
->nocb_gp_bypass
= bypass
;
772 my_rdp
->nocb_gp_gp
= needwait_gp
;
773 my_rdp
->nocb_gp_seq
= needwait_gp
? wait_gp_seq
: 0;
775 // At least one child with non-empty ->nocb_bypass, so set
776 // timer in order to avoid stranding its callbacks.
777 if (!rcu_nocb_poll
) {
778 // If bypass list only has lazy CBs. Add a deferred lazy wake up.
779 if (lazy
&& !bypass
) {
780 wake_nocb_gp_defer(my_rdp
, RCU_NOCB_WAKE_LAZY
,
781 TPS("WakeLazyIsDeferred"));
782 // Otherwise add a deferred bypass wake up.
784 wake_nocb_gp_defer(my_rdp
, RCU_NOCB_WAKE_BYPASS
,
785 TPS("WakeBypassIsDeferred"));
790 /* Polling, so trace if first poll in the series. */
792 trace_rcu_nocb_wake(rcu_state
.name
, cpu
, TPS("Poll"));
793 if (list_empty(&my_rdp
->nocb_head_rdp
)) {
794 raw_spin_lock_irqsave(&my_rdp
->nocb_gp_lock
, flags
);
795 if (!my_rdp
->nocb_toggling_rdp
)
796 WRITE_ONCE(my_rdp
->nocb_gp_sleep
, true);
797 raw_spin_unlock_irqrestore(&my_rdp
->nocb_gp_lock
, flags
);
798 /* Wait for any offloading rdp */
799 nocb_gp_sleep(my_rdp
, cpu
);
801 schedule_timeout_idle(1);
803 } else if (!needwait_gp
) {
804 /* Wait for callbacks to appear. */
805 nocb_gp_sleep(my_rdp
, cpu
);
807 rnp
= my_rdp
->mynode
;
808 trace_rcu_this_gp(rnp
, my_rdp
, wait_gp_seq
, TPS("StartWait"));
809 swait_event_interruptible_exclusive(
810 rnp
->nocb_gp_wq
[rcu_seq_ctr(wait_gp_seq
) & 0x1],
811 rcu_seq_done(&rnp
->gp_seq
, wait_gp_seq
) ||
812 !READ_ONCE(my_rdp
->nocb_gp_sleep
));
813 trace_rcu_this_gp(rnp
, my_rdp
, wait_gp_seq
, TPS("EndWait"));
816 if (!rcu_nocb_poll
) {
817 raw_spin_lock_irqsave(&my_rdp
->nocb_gp_lock
, flags
);
818 // (De-)queue an rdp to/from the group if its nocb state is changing
819 rdp_toggling
= my_rdp
->nocb_toggling_rdp
;
821 my_rdp
->nocb_toggling_rdp
= NULL
;
823 if (my_rdp
->nocb_defer_wakeup
> RCU_NOCB_WAKE_NOT
) {
824 WRITE_ONCE(my_rdp
->nocb_defer_wakeup
, RCU_NOCB_WAKE_NOT
);
825 del_timer(&my_rdp
->nocb_timer
);
827 WRITE_ONCE(my_rdp
->nocb_gp_sleep
, true);
828 raw_spin_unlock_irqrestore(&my_rdp
->nocb_gp_lock
, flags
);
830 rdp_toggling
= READ_ONCE(my_rdp
->nocb_toggling_rdp
);
833 * Paranoid locking to make sure nocb_toggling_rdp is well
834 * reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could
835 * race with another round of nocb toggling for this rdp.
836 * Nocb locking should prevent from that already but we stick
837 * to paranoia, especially in rare path.
839 raw_spin_lock_irqsave(&my_rdp
->nocb_gp_lock
, flags
);
840 my_rdp
->nocb_toggling_rdp
= NULL
;
841 raw_spin_unlock_irqrestore(&my_rdp
->nocb_gp_lock
, flags
);
846 nocb_gp_toggle_rdp(my_rdp
, rdp_toggling
);
847 swake_up_one(&rdp_toggling
->nocb_state_wq
);
850 my_rdp
->nocb_gp_seq
= -1;
851 WARN_ON(signal_pending(current
));
855 * No-CBs grace-period-wait kthread. There is one of these per group
856 * of CPUs, but only once at least one CPU in that group has come online
857 * at least once since boot. This kthread checks for newly posted
858 * callbacks from any of the CPUs it is responsible for, waits for a
859 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
860 * that then have callback-invocation work to do.
862 static int rcu_nocb_gp_kthread(void *arg
)
864 struct rcu_data
*rdp
= arg
;
867 WRITE_ONCE(rdp
->nocb_gp_loops
, rdp
->nocb_gp_loops
+ 1);
869 cond_resched_tasks_rcu_qs();
874 static inline bool nocb_cb_wait_cond(struct rcu_data
*rdp
)
876 return !READ_ONCE(rdp
->nocb_cb_sleep
) || kthread_should_park();
880 * Invoke any ready callbacks from the corresponding no-CBs CPU,
881 * then, if there are no more, wait for more to appear.
883 static void nocb_cb_wait(struct rcu_data
*rdp
)
885 struct rcu_segcblist
*cblist
= &rdp
->cblist
;
886 unsigned long cur_gp_seq
;
888 bool needwake_gp
= false;
889 struct rcu_node
*rnp
= rdp
->mynode
;
891 swait_event_interruptible_exclusive(rdp
->nocb_cb_wq
,
892 nocb_cb_wait_cond(rdp
));
893 if (kthread_should_park()) {
895 * kthread_park() must be preceded by an rcu_barrier().
896 * But yet another rcu_barrier() might have sneaked in between
897 * the barrier callback execution and the callbacks counter
900 if (rdp
->nocb_cb_sleep
) {
901 rcu_nocb_lock_irqsave(rdp
, flags
);
902 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp
->cblist
));
903 rcu_nocb_unlock_irqrestore(rdp
, flags
);
906 } else if (READ_ONCE(rdp
->nocb_cb_sleep
)) {
907 WARN_ON(signal_pending(current
));
908 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("WokeEmpty"));
911 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp
));
913 local_irq_save(flags
);
915 local_irq_restore(flags
);
917 * Disable BH to provide the expected environment. Also, when
918 * transitioning to/from NOCB mode, a self-requeuing callback might
919 * be invoked from softirq. A short grace period could cause both
920 * instances of this callback would execute concurrently.
925 lockdep_assert_irqs_enabled();
926 rcu_nocb_lock_irqsave(rdp
, flags
);
927 if (rcu_segcblist_nextgp(cblist
, &cur_gp_seq
) &&
928 rcu_seq_done(&rnp
->gp_seq
, cur_gp_seq
) &&
929 raw_spin_trylock_rcu_node(rnp
)) { /* irqs already disabled. */
930 needwake_gp
= rcu_advance_cbs(rdp
->mynode
, rdp
);
931 raw_spin_unlock_rcu_node(rnp
); /* irqs remain disabled. */
934 if (!rcu_segcblist_ready_cbs(cblist
)) {
935 WRITE_ONCE(rdp
->nocb_cb_sleep
, true);
936 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("CBSleep"));
938 WRITE_ONCE(rdp
->nocb_cb_sleep
, false);
941 rcu_nocb_unlock_irqrestore(rdp
, flags
);
943 rcu_gp_kthread_wake();
947 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
948 * nocb_cb_wait() to do the dirty work.
950 static int rcu_nocb_cb_kthread(void *arg
)
952 struct rcu_data
*rdp
= arg
;
954 // Each pass through this loop does one callback batch, and,
955 // if there are no more ready callbacks, waits for them.
958 cond_resched_tasks_rcu_qs();
963 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
964 static int rcu_nocb_need_deferred_wakeup(struct rcu_data
*rdp
, int level
)
966 return READ_ONCE(rdp
->nocb_defer_wakeup
) >= level
;
969 /* Do a deferred wakeup of rcu_nocb_kthread(). */
970 static bool do_nocb_deferred_wakeup_common(struct rcu_data
*rdp_gp
,
971 struct rcu_data
*rdp
, int level
,
973 __releases(rdp_gp
->nocb_gp_lock
)
978 if (!rcu_nocb_need_deferred_wakeup(rdp_gp
, level
)) {
979 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
983 ndw
= rdp_gp
->nocb_defer_wakeup
;
984 ret
= __wake_nocb_gp(rdp_gp
, rdp
, ndw
== RCU_NOCB_WAKE_FORCE
, flags
);
985 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("DeferredWake"));
990 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
991 static void do_nocb_deferred_wakeup_timer(struct timer_list
*t
)
994 struct rcu_data
*rdp
= from_timer(rdp
, t
, nocb_timer
);
996 WARN_ON_ONCE(rdp
->nocb_gp_rdp
!= rdp
);
997 trace_rcu_nocb_wake(rcu_state
.name
, rdp
->cpu
, TPS("Timer"));
999 raw_spin_lock_irqsave(&rdp
->nocb_gp_lock
, flags
);
1000 smp_mb__after_spinlock(); /* Timer expire before wakeup. */
1001 do_nocb_deferred_wakeup_common(rdp
, rdp
, RCU_NOCB_WAKE_BYPASS
, flags
);
1005 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
1006 * This means we do an inexact common-case check. Note that if
1007 * we miss, ->nocb_timer will eventually clean things up.
1009 static bool do_nocb_deferred_wakeup(struct rcu_data
*rdp
)
1011 unsigned long flags
;
1012 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
1014 if (!rdp_gp
|| !rcu_nocb_need_deferred_wakeup(rdp_gp
, RCU_NOCB_WAKE
))
1017 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
1018 return do_nocb_deferred_wakeup_common(rdp_gp
, rdp
, RCU_NOCB_WAKE
, flags
);
1021 void rcu_nocb_flush_deferred_wakeup(void)
1023 do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data
));
1025 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup
);
1027 static int rcu_nocb_queue_toggle_rdp(struct rcu_data
*rdp
)
1029 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
1030 bool wake_gp
= false;
1031 unsigned long flags
;
1033 raw_spin_lock_irqsave(&rdp_gp
->nocb_gp_lock
, flags
);
1034 // Queue this rdp for add/del to/from the list to iterate on rcuog
1035 WRITE_ONCE(rdp_gp
->nocb_toggling_rdp
, rdp
);
1036 if (rdp_gp
->nocb_gp_sleep
) {
1037 rdp_gp
->nocb_gp_sleep
= false;
1040 raw_spin_unlock_irqrestore(&rdp_gp
->nocb_gp_lock
, flags
);
1045 static bool rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data
*rdp
)
1047 unsigned long flags
;
1051 * Locking makes sure rcuog is done handling this rdp before deoffloaded
1052 * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable
1053 * while the ->nocb_lock is held.
1055 raw_spin_lock_irqsave(&rdp
->nocb_lock
, flags
);
1056 ret
= !rcu_segcblist_test_flags(&rdp
->cblist
, SEGCBLIST_OFFLOADED
);
1057 raw_spin_unlock_irqrestore(&rdp
->nocb_lock
, flags
);
1062 static int rcu_nocb_rdp_deoffload(struct rcu_data
*rdp
)
1064 unsigned long flags
;
1066 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
1068 /* CPU must be offline, unless it's early boot */
1069 WARN_ON_ONCE(cpu_online(rdp
->cpu
) && rdp
->cpu
!= raw_smp_processor_id());
1071 pr_info("De-offloading %d\n", rdp
->cpu
);
1073 /* Flush all callbacks from segcblist and bypass */
1077 * Make sure the rcuoc kthread isn't in the middle of a nocb locked
1078 * sequence while offloading is deactivated, along with nocb locking.
1080 if (rdp
->nocb_cb_kthread
)
1081 kthread_park(rdp
->nocb_cb_kthread
);
1083 rcu_nocb_lock_irqsave(rdp
, flags
);
1084 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
1085 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp
->cblist
));
1086 rcu_nocb_unlock_irqrestore(rdp
, flags
);
1088 wake_gp
= rcu_nocb_queue_toggle_rdp(rdp
);
1090 mutex_lock(&rdp_gp
->nocb_gp_kthread_mutex
);
1092 if (rdp_gp
->nocb_gp_kthread
) {
1094 wake_up_process(rdp_gp
->nocb_gp_kthread
);
1096 swait_event_exclusive(rdp
->nocb_state_wq
,
1097 rcu_nocb_rdp_deoffload_wait_cond(rdp
));
1100 * No kthread to clear the flags for us or remove the rdp from the nocb list
1101 * to iterate. Do it here instead. Locking doesn't look stricly necessary
1102 * but we stick to paranoia in this rare path.
1104 raw_spin_lock_irqsave(&rdp
->nocb_lock
, flags
);
1105 rcu_segcblist_clear_flags(&rdp
->cblist
, SEGCBLIST_OFFLOADED
);
1106 raw_spin_unlock_irqrestore(&rdp
->nocb_lock
, flags
);
1108 list_del(&rdp
->nocb_entry_rdp
);
1111 mutex_unlock(&rdp_gp
->nocb_gp_kthread_mutex
);
1116 int rcu_nocb_cpu_deoffload(int cpu
)
1118 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1122 mutex_lock(&rcu_state
.nocb_mutex
);
1123 if (rcu_rdp_is_offloaded(rdp
)) {
1124 if (!cpu_online(cpu
)) {
1125 ret
= rcu_nocb_rdp_deoffload(rdp
);
1127 cpumask_clear_cpu(cpu
, rcu_nocb_mask
);
1129 pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp
->cpu
);
1133 mutex_unlock(&rcu_state
.nocb_mutex
);
1138 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload
);
1140 static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data
*rdp
)
1142 unsigned long flags
;
1145 raw_spin_lock_irqsave(&rdp
->nocb_lock
, flags
);
1146 ret
= rcu_segcblist_test_flags(&rdp
->cblist
, SEGCBLIST_OFFLOADED
);
1147 raw_spin_unlock_irqrestore(&rdp
->nocb_lock
, flags
);
1152 static int rcu_nocb_rdp_offload(struct rcu_data
*rdp
)
1155 struct rcu_data
*rdp_gp
= rdp
->nocb_gp_rdp
;
1157 WARN_ON_ONCE(cpu_online(rdp
->cpu
));
1159 * For now we only support re-offload, ie: the rdp must have been
1160 * offloaded on boot first.
1162 if (!rdp
->nocb_gp_rdp
)
1165 if (WARN_ON_ONCE(!rdp_gp
->nocb_gp_kthread
))
1168 pr_info("Offloading %d\n", rdp
->cpu
);
1170 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp
->nocb_bypass
));
1171 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp
->cblist
));
1173 wake_gp
= rcu_nocb_queue_toggle_rdp(rdp
);
1175 wake_up_process(rdp_gp
->nocb_gp_kthread
);
1177 swait_event_exclusive(rdp
->nocb_state_wq
,
1178 rcu_nocb_rdp_offload_wait_cond(rdp
));
1180 kthread_unpark(rdp
->nocb_cb_kthread
);
1185 int rcu_nocb_cpu_offload(int cpu
)
1187 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1191 mutex_lock(&rcu_state
.nocb_mutex
);
1192 if (!rcu_rdp_is_offloaded(rdp
)) {
1193 if (!cpu_online(cpu
)) {
1194 ret
= rcu_nocb_rdp_offload(rdp
);
1196 cpumask_set_cpu(cpu
, rcu_nocb_mask
);
1198 pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp
->cpu
);
1202 mutex_unlock(&rcu_state
.nocb_mutex
);
1207 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload
);
1209 #ifdef CONFIG_RCU_LAZY
1210 static unsigned long
1211 lazy_rcu_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1214 unsigned long count
= 0;
1216 if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask
)))
1219 /* Protect rcu_nocb_mask against concurrent (de-)offloading. */
1220 if (!mutex_trylock(&rcu_state
.nocb_mutex
))
1223 /* Snapshot count of all CPUs */
1224 for_each_cpu(cpu
, rcu_nocb_mask
) {
1225 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1227 count
+= READ_ONCE(rdp
->lazy_len
);
1230 mutex_unlock(&rcu_state
.nocb_mutex
);
1232 return count
? count
: SHRINK_EMPTY
;
1235 static unsigned long
1236 lazy_rcu_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1239 unsigned long flags
;
1240 unsigned long count
= 0;
1242 if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask
)))
1245 * Protect against concurrent (de-)offloading. Otherwise nocb locking
1246 * may be ignored or imbalanced.
1248 if (!mutex_trylock(&rcu_state
.nocb_mutex
)) {
1250 * But really don't insist if nocb_mutex is contended since we
1251 * can't guarantee that it will never engage in a dependency
1252 * chain involving memory allocation. The lock is seldom contended
1258 /* Snapshot count of all CPUs */
1259 for_each_cpu(cpu
, rcu_nocb_mask
) {
1260 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1263 if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp
)))
1266 if (!READ_ONCE(rdp
->lazy_len
))
1269 rcu_nocb_lock_irqsave(rdp
, flags
);
1271 * Recheck under the nocb lock. Since we are not holding the bypass
1272 * lock we may still race with increments from the enqueuer but still
1273 * we know for sure if there is at least one lazy callback.
1275 _count
= READ_ONCE(rdp
->lazy_len
);
1277 rcu_nocb_unlock_irqrestore(rdp
, flags
);
1280 rcu_nocb_try_flush_bypass(rdp
, jiffies
);
1281 rcu_nocb_unlock_irqrestore(rdp
, flags
);
1282 wake_nocb_gp(rdp
, false);
1283 sc
->nr_to_scan
-= _count
;
1285 if (sc
->nr_to_scan
<= 0)
1289 mutex_unlock(&rcu_state
.nocb_mutex
);
1291 return count
? count
: SHRINK_STOP
;
1293 #endif // #ifdef CONFIG_RCU_LAZY
1295 void __init
rcu_init_nohz(void)
1298 struct rcu_data
*rdp
;
1299 const struct cpumask
*cpumask
= NULL
;
1300 struct shrinker
* __maybe_unused lazy_rcu_shrinker
;
1302 #if defined(CONFIG_NO_HZ_FULL)
1303 if (tick_nohz_full_running
&& !cpumask_empty(tick_nohz_full_mask
))
1304 cpumask
= tick_nohz_full_mask
;
1307 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL
) &&
1308 !rcu_state
.nocb_is_setup
&& !cpumask
)
1309 cpumask
= cpu_possible_mask
;
1312 if (!cpumask_available(rcu_nocb_mask
)) {
1313 if (!zalloc_cpumask_var(&rcu_nocb_mask
, GFP_KERNEL
)) {
1314 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1319 cpumask_or(rcu_nocb_mask
, rcu_nocb_mask
, cpumask
);
1320 rcu_state
.nocb_is_setup
= true;
1323 if (!rcu_state
.nocb_is_setup
)
1326 #ifdef CONFIG_RCU_LAZY
1327 lazy_rcu_shrinker
= shrinker_alloc(0, "rcu-lazy");
1328 if (!lazy_rcu_shrinker
) {
1329 pr_err("Failed to allocate lazy_rcu shrinker!\n");
1331 lazy_rcu_shrinker
->count_objects
= lazy_rcu_shrink_count
;
1332 lazy_rcu_shrinker
->scan_objects
= lazy_rcu_shrink_scan
;
1334 shrinker_register(lazy_rcu_shrinker
);
1336 #endif // #ifdef CONFIG_RCU_LAZY
1338 if (!cpumask_subset(rcu_nocb_mask
, cpu_possible_mask
)) {
1339 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1340 cpumask_and(rcu_nocb_mask
, cpu_possible_mask
,
1343 if (cpumask_empty(rcu_nocb_mask
))
1344 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1346 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1347 cpumask_pr_args(rcu_nocb_mask
));
1349 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1351 for_each_cpu(cpu
, rcu_nocb_mask
) {
1352 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1353 if (rcu_segcblist_empty(&rdp
->cblist
))
1354 rcu_segcblist_init(&rdp
->cblist
);
1355 rcu_segcblist_set_flags(&rdp
->cblist
, SEGCBLIST_OFFLOADED
);
1357 rcu_organize_nocb_kthreads();
1360 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1361 static void __init
rcu_boot_init_nocb_percpu_data(struct rcu_data
*rdp
)
1363 init_swait_queue_head(&rdp
->nocb_cb_wq
);
1364 init_swait_queue_head(&rdp
->nocb_gp_wq
);
1365 init_swait_queue_head(&rdp
->nocb_state_wq
);
1366 raw_spin_lock_init(&rdp
->nocb_lock
);
1367 raw_spin_lock_init(&rdp
->nocb_bypass_lock
);
1368 raw_spin_lock_init(&rdp
->nocb_gp_lock
);
1369 timer_setup(&rdp
->nocb_timer
, do_nocb_deferred_wakeup_timer
, 0);
1370 rcu_cblist_init(&rdp
->nocb_bypass
);
1371 WRITE_ONCE(rdp
->lazy_len
, 0);
1372 mutex_init(&rdp
->nocb_gp_kthread_mutex
);
1376 * If the specified CPU is a no-CBs CPU that does not already have its
1377 * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread
1378 * for this CPU's group has not yet been created, spawn it as well.
1380 static void rcu_spawn_cpu_nocb_kthread(int cpu
)
1382 struct rcu_data
*rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1383 struct rcu_data
*rdp_gp
;
1384 struct task_struct
*t
;
1385 struct sched_param sp
;
1387 if (!rcu_scheduler_fully_active
|| !rcu_state
.nocb_is_setup
)
1390 /* If there already is an rcuo kthread, then nothing to do. */
1391 if (rdp
->nocb_cb_kthread
)
1394 /* If we didn't spawn the GP kthread first, reorganize! */
1395 sp
.sched_priority
= kthread_prio
;
1396 rdp_gp
= rdp
->nocb_gp_rdp
;
1397 mutex_lock(&rdp_gp
->nocb_gp_kthread_mutex
);
1398 if (!rdp_gp
->nocb_gp_kthread
) {
1399 t
= kthread_run(rcu_nocb_gp_kthread
, rdp_gp
,
1400 "rcuog/%d", rdp_gp
->cpu
);
1401 if (WARN_ONCE(IS_ERR(t
), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__
)) {
1402 mutex_unlock(&rdp_gp
->nocb_gp_kthread_mutex
);
1405 WRITE_ONCE(rdp_gp
->nocb_gp_kthread
, t
);
1407 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1409 mutex_unlock(&rdp_gp
->nocb_gp_kthread_mutex
);
1411 /* Spawn the kthread for this CPU. */
1412 t
= kthread_create(rcu_nocb_cb_kthread
, rdp
,
1413 "rcuo%c/%d", rcu_state
.abbr
, cpu
);
1414 if (WARN_ONCE(IS_ERR(t
), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__
))
1417 if (rcu_rdp_is_offloaded(rdp
))
1422 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST
) && kthread_prio
)
1423 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1425 WRITE_ONCE(rdp
->nocb_cb_kthread
, t
);
1426 WRITE_ONCE(rdp
->nocb_gp_kthread
, rdp_gp
->nocb_gp_kthread
);
1431 * No need to protect against concurrent rcu_barrier()
1432 * because the number of callbacks should be 0 for a non-boot CPU,
1433 * therefore rcu_barrier() shouldn't even try to grab the nocb_lock.
1434 * But hold nocb_mutex to avoid nocb_lock imbalance from shrinker.
1436 WARN_ON_ONCE(system_state
> SYSTEM_BOOTING
&& rcu_segcblist_n_cbs(&rdp
->cblist
));
1437 mutex_lock(&rcu_state
.nocb_mutex
);
1438 if (rcu_rdp_is_offloaded(rdp
)) {
1439 rcu_nocb_rdp_deoffload(rdp
);
1440 cpumask_clear_cpu(cpu
, rcu_nocb_mask
);
1442 mutex_unlock(&rcu_state
.nocb_mutex
);
1445 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1446 static int rcu_nocb_gp_stride
= -1;
1447 module_param(rcu_nocb_gp_stride
, int, 0444);
1450 * Initialize GP-CB relationships for all no-CBs CPU.
1452 static void __init
rcu_organize_nocb_kthreads(void)
1455 bool firsttime
= true;
1456 bool gotnocbs
= false;
1457 bool gotnocbscbs
= true;
1458 int ls
= rcu_nocb_gp_stride
;
1459 int nl
= 0; /* Next GP kthread. */
1460 struct rcu_data
*rdp
;
1461 struct rcu_data
*rdp_gp
= NULL
; /* Suppress misguided gcc warn. */
1463 if (!cpumask_available(rcu_nocb_mask
))
1466 ls
= nr_cpu_ids
/ int_sqrt(nr_cpu_ids
);
1467 rcu_nocb_gp_stride
= ls
;
1471 * Each pass through this loop sets up one rcu_data structure.
1472 * Should the corresponding CPU come online in the future, then
1473 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1475 for_each_possible_cpu(cpu
) {
1476 rdp
= per_cpu_ptr(&rcu_data
, cpu
);
1477 if (rdp
->cpu
>= nl
) {
1478 /* New GP kthread, set up for CBs & next GP. */
1480 nl
= DIV_ROUND_UP(rdp
->cpu
+ 1, ls
) * ls
;
1482 INIT_LIST_HEAD(&rdp
->nocb_head_rdp
);
1485 pr_cont("%s\n", gotnocbscbs
1486 ? "" : " (self only)");
1487 gotnocbscbs
= false;
1489 pr_alert("%s: No-CB GP kthread CPU %d:",
1493 /* Another CB kthread, link to previous GP kthread. */
1496 pr_cont(" %d", cpu
);
1498 rdp
->nocb_gp_rdp
= rdp_gp
;
1499 if (cpumask_test_cpu(cpu
, rcu_nocb_mask
))
1500 list_add_tail(&rdp
->nocb_entry_rdp
, &rdp_gp
->nocb_head_rdp
);
1502 if (gotnocbs
&& dump_tree
)
1503 pr_cont("%s\n", gotnocbscbs
? "" : " (self only)");
1507 * Bind the current task to the offloaded CPUs. If there are no offloaded
1508 * CPUs, leave the task unbound. Splat if the bind attempt fails.
1510 void rcu_bind_current_to_nocb(void)
1512 if (cpumask_available(rcu_nocb_mask
) && !cpumask_empty(rcu_nocb_mask
))
1513 WARN_ON(sched_setaffinity(current
->pid
, rcu_nocb_mask
));
1515 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb
);
1517 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1519 static char *show_rcu_should_be_on_cpu(struct task_struct
*tsp
)
1521 return tsp
&& task_is_running(tsp
) && !tsp
->on_cpu
? "!" : "";
1523 #else // #ifdef CONFIG_SMP
1524 static char *show_rcu_should_be_on_cpu(struct task_struct
*tsp
)
1528 #endif // #else #ifdef CONFIG_SMP
1531 * Dump out nocb grace-period kthread state for the specified rcu_data
1534 static void show_rcu_nocb_gp_state(struct rcu_data
*rdp
)
1536 struct rcu_node
*rnp
= rdp
->mynode
;
1538 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1540 "kK"[!!rdp
->nocb_gp_kthread
],
1541 "lL"[raw_spin_is_locked(&rdp
->nocb_gp_lock
)],
1542 "dD"[!!rdp
->nocb_defer_wakeup
],
1543 "tT"[timer_pending(&rdp
->nocb_timer
)],
1544 "sS"[!!rdp
->nocb_gp_sleep
],
1545 ".W"[swait_active(&rdp
->nocb_gp_wq
)],
1546 ".W"[swait_active(&rnp
->nocb_gp_wq
[0])],
1547 ".W"[swait_active(&rnp
->nocb_gp_wq
[1])],
1548 ".B"[!!rdp
->nocb_gp_bypass
],
1549 ".G"[!!rdp
->nocb_gp_gp
],
1550 (long)rdp
->nocb_gp_seq
,
1551 rnp
->grplo
, rnp
->grphi
, READ_ONCE(rdp
->nocb_gp_loops
),
1552 rdp
->nocb_gp_kthread
? task_state_to_char(rdp
->nocb_gp_kthread
) : '.',
1553 rdp
->nocb_gp_kthread
? (int)task_cpu(rdp
->nocb_gp_kthread
) : -1,
1554 show_rcu_should_be_on_cpu(rdp
->nocb_gp_kthread
));
1557 /* Dump out nocb kthread state for the specified rcu_data structure. */
1558 static void show_rcu_nocb_state(struct rcu_data
*rdp
)
1562 struct rcu_data
*nocb_next_rdp
;
1563 struct rcu_segcblist
*rsclp
= &rdp
->cblist
;
1567 if (rdp
->nocb_gp_rdp
== rdp
)
1568 show_rcu_nocb_gp_state(rdp
);
1570 nocb_next_rdp
= list_next_or_null_rcu(&rdp
->nocb_gp_rdp
->nocb_head_rdp
,
1571 &rdp
->nocb_entry_rdp
,
1575 sprintf(bufw
, "%ld", rsclp
->gp_seq
[RCU_WAIT_TAIL
]);
1576 sprintf(bufr
, "%ld", rsclp
->gp_seq
[RCU_NEXT_READY_TAIL
]);
1577 pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1578 rdp
->cpu
, rdp
->nocb_gp_rdp
->cpu
,
1579 nocb_next_rdp
? nocb_next_rdp
->cpu
: -1,
1580 "kK"[!!rdp
->nocb_cb_kthread
],
1581 "bB"[raw_spin_is_locked(&rdp
->nocb_bypass_lock
)],
1582 "lL"[raw_spin_is_locked(&rdp
->nocb_lock
)],
1583 "sS"[!!rdp
->nocb_cb_sleep
],
1584 ".W"[swait_active(&rdp
->nocb_cb_wq
)],
1585 jiffies
- rdp
->nocb_bypass_first
,
1586 jiffies
- rdp
->nocb_nobypass_last
,
1587 rdp
->nocb_nobypass_count
,
1588 ".D"[rcu_segcblist_ready_cbs(rsclp
)],
1589 ".W"[!rcu_segcblist_segempty(rsclp
, RCU_WAIT_TAIL
)],
1590 rcu_segcblist_segempty(rsclp
, RCU_WAIT_TAIL
) ? "" : bufw
,
1591 ".R"[!rcu_segcblist_segempty(rsclp
, RCU_NEXT_READY_TAIL
)],
1592 rcu_segcblist_segempty(rsclp
, RCU_NEXT_READY_TAIL
) ? "" : bufr
,
1593 ".N"[!rcu_segcblist_segempty(rsclp
, RCU_NEXT_TAIL
)],
1594 ".B"[!!rcu_cblist_n_cbs(&rdp
->nocb_bypass
)],
1595 rcu_segcblist_n_cbs(&rdp
->cblist
),
1596 rdp
->nocb_cb_kthread
? task_state_to_char(rdp
->nocb_cb_kthread
) : '.',
1597 rdp
->nocb_cb_kthread
? (int)task_cpu(rdp
->nocb_cb_kthread
) : -1,
1598 show_rcu_should_be_on_cpu(rdp
->nocb_cb_kthread
));
1600 /* It is OK for GP kthreads to have GP state. */
1601 if (rdp
->nocb_gp_rdp
== rdp
)
1604 waslocked
= raw_spin_is_locked(&rdp
->nocb_gp_lock
);
1605 wassleep
= swait_active(&rdp
->nocb_gp_wq
);
1606 if (!rdp
->nocb_gp_sleep
&& !waslocked
&& !wassleep
)
1607 return; /* Nothing untoward. */
1609 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1611 "dD"[!!rdp
->nocb_defer_wakeup
],
1612 "sS"[!!rdp
->nocb_gp_sleep
],
1616 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1618 /* No ->nocb_lock to acquire. */
1619 static void rcu_nocb_lock(struct rcu_data
*rdp
)
1623 /* No ->nocb_lock to release. */
1624 static void rcu_nocb_unlock(struct rcu_data
*rdp
)
1628 /* No ->nocb_lock to release. */
1629 static void rcu_nocb_unlock_irqrestore(struct rcu_data
*rdp
,
1630 unsigned long flags
)
1632 local_irq_restore(flags
);
1635 /* Lockdep check that ->cblist may be safely accessed. */
1636 static void rcu_lockdep_assert_cblist_protected(struct rcu_data
*rdp
)
1638 lockdep_assert_irqs_disabled();
1641 static void rcu_nocb_gp_cleanup(struct swait_queue_head
*sq
)
1645 static struct swait_queue_head
*rcu_nocb_gp_get(struct rcu_node
*rnp
)
1650 static void rcu_init_one_nocb(struct rcu_node
*rnp
)
1654 static bool wake_nocb_gp(struct rcu_data
*rdp
, bool force
)
1659 static bool rcu_nocb_flush_bypass(struct rcu_data
*rdp
, struct rcu_head
*rhp
,
1660 unsigned long j
, bool lazy
)
1665 static void call_rcu_nocb(struct rcu_data
*rdp
, struct rcu_head
*head
,
1666 rcu_callback_t func
, unsigned long flags
, bool lazy
)
1668 WARN_ON_ONCE(1); /* Should be dead code! */
1671 static void __call_rcu_nocb_wake(struct rcu_data
*rdp
, bool was_empty
,
1672 unsigned long flags
)
1674 WARN_ON_ONCE(1); /* Should be dead code! */
1677 static void __init
rcu_boot_init_nocb_percpu_data(struct rcu_data
*rdp
)
1681 static int rcu_nocb_need_deferred_wakeup(struct rcu_data
*rdp
, int level
)
1686 static bool do_nocb_deferred_wakeup(struct rcu_data
*rdp
)
1691 static void rcu_spawn_cpu_nocb_kthread(int cpu
)
1695 static void show_rcu_nocb_state(struct rcu_data
*rdp
)
1699 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */