1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
5 * Copyright (C) 2020 Paul E. McKenney
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
11 ////////////////////////////////////////////////////////////////////////
13 // Generic data structures.
16 typedef void (*rcu_tasks_gp_func_t
)(struct rcu_tasks
*rtp
);
17 typedef void (*pregp_func_t
)(struct list_head
*hop
);
18 typedef void (*pertask_func_t
)(struct task_struct
*t
, struct list_head
*hop
);
19 typedef void (*postscan_func_t
)(struct list_head
*hop
);
20 typedef void (*holdouts_func_t
)(struct list_head
*hop
, bool ndrpt
, bool *frptp
);
21 typedef void (*postgp_func_t
)(struct rcu_tasks
*rtp
);
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @lazy_timer: Timer to unlazify callbacks.
29 * @urgent_gp: Number of additional non-lazy grace periods.
30 * @rtp_n_lock_retries: Rough lock-contention statistic.
31 * @rtp_work: Work queue for invoking callbacks.
32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
33 * @barrier_q_head: RCU callback for barrier operation.
34 * @rtp_blkd_tasks: List of tasks blocked as readers.
35 * @rtp_exit_list: List of tasks in the latter portion of do_exit().
36 * @cpu: CPU number corresponding to this entry.
37 * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
38 * @rtpp: Pointer to the rcu_tasks structure.
40 struct rcu_tasks_percpu
{
41 struct rcu_segcblist cblist
;
42 raw_spinlock_t __private lock
;
43 unsigned long rtp_jiffies
;
44 unsigned long rtp_n_lock_retries
;
45 struct timer_list lazy_timer
;
46 unsigned int urgent_gp
;
47 struct work_struct rtp_work
;
48 struct irq_work rtp_irq_work
;
49 struct rcu_head barrier_q_head
;
50 struct list_head rtp_blkd_tasks
;
51 struct list_head rtp_exit_list
;
54 struct rcu_tasks
*rtpp
;
58 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
59 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
60 * @cbs_gbl_lock: Lock protecting callback list.
61 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
62 * @gp_func: This flavor's grace-period-wait function.
63 * @gp_state: Grace period's most recent state transition (debugging).
64 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
65 * @init_fract: Initial backoff sleep interval.
66 * @gp_jiffies: Time of last @gp_state transition.
67 * @gp_start: Most recent grace-period start in jiffies.
68 * @tasks_gp_seq: Number of grace periods completed since boot in upper bits.
69 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
70 * @n_ipis_fails: Number of IPI-send failures.
71 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
72 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
73 * @pregp_func: This flavor's pre-grace-period function (optional).
74 * @pertask_func: This flavor's per-task scan function (optional).
75 * @postscan_func: This flavor's post-task scan function (optional).
76 * @holdouts_func: This flavor's holdout-list scan function (optional).
77 * @postgp_func: This flavor's post-grace-period function (optional).
78 * @call_func: This flavor's call_rcu()-equivalent function.
79 * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE).
80 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
81 * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
82 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
83 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
84 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
85 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
86 * @barrier_q_mutex: Serialize barrier operations.
87 * @barrier_q_count: Number of queues being waited on.
88 * @barrier_q_completion: Barrier wait/wakeup mechanism.
89 * @barrier_q_seq: Sequence number for barrier operations.
90 * @barrier_q_start: Most recent barrier start in jiffies.
91 * @name: This flavor's textual name.
92 * @kname: This flavor's kthread name.
95 struct rcuwait cbs_wait
;
96 raw_spinlock_t cbs_gbl_lock
;
97 struct mutex tasks_gp_mutex
;
101 unsigned long gp_jiffies
;
102 unsigned long gp_start
;
103 unsigned long tasks_gp_seq
;
104 unsigned long n_ipis
;
105 unsigned long n_ipis_fails
;
106 struct task_struct
*kthread_ptr
;
107 unsigned long lazy_jiffies
;
108 rcu_tasks_gp_func_t gp_func
;
109 pregp_func_t pregp_func
;
110 pertask_func_t pertask_func
;
111 postscan_func_t postscan_func
;
112 holdouts_func_t holdouts_func
;
113 postgp_func_t postgp_func
;
114 call_rcu_func_t call_func
;
115 unsigned int wait_state
;
116 struct rcu_tasks_percpu __percpu
*rtpcpu
;
117 struct rcu_tasks_percpu
**rtpcp_array
;
118 int percpu_enqueue_shift
;
119 int percpu_enqueue_lim
;
120 int percpu_dequeue_lim
;
121 unsigned long percpu_dequeue_gpseq
;
122 struct mutex barrier_q_mutex
;
123 atomic_t barrier_q_count
;
124 struct completion barrier_q_completion
;
125 unsigned long barrier_q_seq
;
126 unsigned long barrier_q_start
;
131 static void call_rcu_tasks_iw_wakeup(struct irq_work
*iwp
);
133 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
134 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
135 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
136 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
138 static struct rcu_tasks rt_name = \
140 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
141 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
142 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
145 .wait_state = TASK_UNINTERRUPTIBLE, \
146 .rtpcpu = &rt_name ## __percpu, \
147 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
149 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
150 .percpu_enqueue_lim = 1, \
151 .percpu_dequeue_lim = 1, \
152 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
153 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
157 #ifdef CONFIG_TASKS_RCU
159 /* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */
160 static void tasks_rcu_exit_srcu_stall(struct timer_list
*unused
);
161 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer
, tasks_rcu_exit_srcu_stall
);
164 /* Avoid IPIing CPUs early in the grace period. */
165 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
166 static int rcu_task_ipi_delay __read_mostly
= RCU_TASK_IPI_DELAY
;
167 module_param(rcu_task_ipi_delay
, int, 0644);
169 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
170 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
171 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
172 static int rcu_task_stall_timeout __read_mostly
= RCU_TASK_STALL_TIMEOUT
;
173 module_param(rcu_task_stall_timeout
, int, 0644);
174 #define RCU_TASK_STALL_INFO (HZ * 10)
175 static int rcu_task_stall_info __read_mostly
= RCU_TASK_STALL_INFO
;
176 module_param(rcu_task_stall_info
, int, 0644);
177 static int rcu_task_stall_info_mult __read_mostly
= 3;
178 module_param(rcu_task_stall_info_mult
, int, 0444);
180 static int rcu_task_enqueue_lim __read_mostly
= -1;
181 module_param(rcu_task_enqueue_lim
, int, 0444);
183 static bool rcu_task_cb_adjust
;
184 static int rcu_task_contend_lim __read_mostly
= 100;
185 module_param(rcu_task_contend_lim
, int, 0444);
186 static int rcu_task_collapse_lim __read_mostly
= 10;
187 module_param(rcu_task_collapse_lim
, int, 0444);
188 static int rcu_task_lazy_lim __read_mostly
= 32;
189 module_param(rcu_task_lazy_lim
, int, 0444);
191 static int rcu_task_cpu_ids
;
193 /* RCU tasks grace-period state for debugging. */
195 #define RTGS_WAIT_WAIT_CBS 1
196 #define RTGS_WAIT_GP 2
197 #define RTGS_PRE_WAIT_GP 3
198 #define RTGS_SCAN_TASKLIST 4
199 #define RTGS_POST_SCAN_TASKLIST 5
200 #define RTGS_WAIT_SCAN_HOLDOUTS 6
201 #define RTGS_SCAN_HOLDOUTS 7
202 #define RTGS_POST_GP 8
203 #define RTGS_WAIT_READERS 9
204 #define RTGS_INVOKE_CBS 10
205 #define RTGS_WAIT_CBS 11
206 #ifndef CONFIG_TINY_RCU
207 static const char * const rcu_tasks_gp_state_names
[] = {
209 "RTGS_WAIT_WAIT_CBS",
212 "RTGS_SCAN_TASKLIST",
213 "RTGS_POST_SCAN_TASKLIST",
214 "RTGS_WAIT_SCAN_HOLDOUTS",
215 "RTGS_SCAN_HOLDOUTS",
221 #endif /* #ifndef CONFIG_TINY_RCU */
223 ////////////////////////////////////////////////////////////////////////
227 static void rcu_tasks_invoke_cbs_wq(struct work_struct
*wp
);
229 /* Record grace-period phase and time. */
230 static void set_tasks_gp_state(struct rcu_tasks
*rtp
, int newstate
)
232 rtp
->gp_state
= newstate
;
233 rtp
->gp_jiffies
= jiffies
;
236 #ifndef CONFIG_TINY_RCU
237 /* Return state name. */
238 static const char *tasks_gp_state_getname(struct rcu_tasks
*rtp
)
240 int i
= data_race(rtp
->gp_state
); // Let KCSAN detect update races
241 int j
= READ_ONCE(i
); // Prevent the compiler from reading twice
243 if (j
>= ARRAY_SIZE(rcu_tasks_gp_state_names
))
245 return rcu_tasks_gp_state_names
[j
];
247 #endif /* #ifndef CONFIG_TINY_RCU */
249 // Initialize per-CPU callback lists for the specified flavor of
250 // Tasks RCU. Do not enqueue callbacks before this function is invoked.
251 static void cblist_init_generic(struct rcu_tasks
*rtp
)
259 if (rcu_task_enqueue_lim
< 0) {
260 rcu_task_enqueue_lim
= 1;
261 rcu_task_cb_adjust
= true;
262 } else if (rcu_task_enqueue_lim
== 0) {
263 rcu_task_enqueue_lim
= 1;
265 lim
= rcu_task_enqueue_lim
;
267 rtp
->rtpcp_array
= kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu
*), GFP_KERNEL
);
268 BUG_ON(!rtp
->rtpcp_array
);
270 for_each_possible_cpu(cpu
) {
271 struct rcu_tasks_percpu
*rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, cpu
);
273 WARN_ON_ONCE(!rtpcp
);
275 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp
, lock
));
276 if (rcu_segcblist_empty(&rtpcp
->cblist
))
277 rcu_segcblist_init(&rtpcp
->cblist
);
278 INIT_WORK(&rtpcp
->rtp_work
, rcu_tasks_invoke_cbs_wq
);
281 rtpcp
->index
= index
;
282 rtp
->rtpcp_array
[index
] = rtpcp
;
284 if (!rtpcp
->rtp_blkd_tasks
.next
)
285 INIT_LIST_HEAD(&rtpcp
->rtp_blkd_tasks
);
286 if (!rtpcp
->rtp_exit_list
.next
)
287 INIT_LIST_HEAD(&rtpcp
->rtp_exit_list
);
288 rtpcp
->barrier_q_head
.next
= &rtpcp
->barrier_q_head
;
292 rcu_task_cpu_ids
= maxcpu
+ 1;
293 if (lim
> rcu_task_cpu_ids
)
294 lim
= rcu_task_cpu_ids
;
295 shift
= ilog2(rcu_task_cpu_ids
/ lim
);
296 if (((rcu_task_cpu_ids
- 1) >> shift
) >= lim
)
298 WRITE_ONCE(rtp
->percpu_enqueue_shift
, shift
);
299 WRITE_ONCE(rtp
->percpu_dequeue_lim
, lim
);
300 smp_store_release(&rtp
->percpu_enqueue_lim
, lim
);
302 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
303 rtp
->name
, data_race(rtp
->percpu_enqueue_shift
), data_race(rtp
->percpu_enqueue_lim
),
304 rcu_task_cb_adjust
, rcu_task_cpu_ids
);
307 // Compute wakeup time for lazy callback timer.
308 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks
*rtp
)
310 return jiffies
+ rtp
->lazy_jiffies
;
313 // Timer handler that unlazifies lazy callbacks.
314 static void call_rcu_tasks_generic_timer(struct timer_list
*tlp
)
317 bool needwake
= false;
318 struct rcu_tasks
*rtp
;
319 struct rcu_tasks_percpu
*rtpcp
= from_timer(rtpcp
, tlp
, lazy_timer
);
322 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
323 if (!rcu_segcblist_empty(&rtpcp
->cblist
) && rtp
->lazy_jiffies
) {
324 if (!rtpcp
->urgent_gp
)
325 rtpcp
->urgent_gp
= 1;
327 mod_timer(&rtpcp
->lazy_timer
, rcu_tasks_lazy_time(rtp
));
329 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
331 rcuwait_wake_up(&rtp
->cbs_wait
);
334 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
335 static void call_rcu_tasks_iw_wakeup(struct irq_work
*iwp
)
337 struct rcu_tasks
*rtp
;
338 struct rcu_tasks_percpu
*rtpcp
= container_of(iwp
, struct rcu_tasks_percpu
, rtp_irq_work
);
341 rcuwait_wake_up(&rtp
->cbs_wait
);
344 // Enqueue a callback for the specified flavor of Tasks RCU.
345 static void call_rcu_tasks_generic(struct rcu_head
*rhp
, rcu_callback_t func
,
346 struct rcu_tasks
*rtp
)
350 bool havekthread
= smp_load_acquire(&rtp
->kthread_ptr
);
353 bool needadjust
= false;
355 struct rcu_tasks_percpu
*rtpcp
;
359 local_irq_save(flags
);
361 ideal_cpu
= smp_processor_id() >> READ_ONCE(rtp
->percpu_enqueue_shift
);
362 chosen_cpu
= cpumask_next(ideal_cpu
- 1, cpu_possible_mask
);
363 WARN_ON_ONCE(chosen_cpu
>= rcu_task_cpu_ids
);
364 rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, chosen_cpu
);
365 if (!raw_spin_trylock_rcu_node(rtpcp
)) { // irqs already disabled.
366 raw_spin_lock_rcu_node(rtpcp
); // irqs already disabled.
368 if (rtpcp
->rtp_jiffies
!= j
) {
369 rtpcp
->rtp_jiffies
= j
;
370 rtpcp
->rtp_n_lock_retries
= 0;
372 if (rcu_task_cb_adjust
&& ++rtpcp
->rtp_n_lock_retries
> rcu_task_contend_lim
&&
373 READ_ONCE(rtp
->percpu_enqueue_lim
) != rcu_task_cpu_ids
)
374 needadjust
= true; // Defer adjustment to avoid deadlock.
376 // Queuing callbacks before initialization not yet supported.
377 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp
->cblist
)))
378 rcu_segcblist_init(&rtpcp
->cblist
);
379 needwake
= (func
== wakeme_after_rcu
) ||
380 (rcu_segcblist_n_cbs(&rtpcp
->cblist
) == rcu_task_lazy_lim
);
381 if (havekthread
&& !needwake
&& !timer_pending(&rtpcp
->lazy_timer
)) {
382 if (rtp
->lazy_jiffies
)
383 mod_timer(&rtpcp
->lazy_timer
, rcu_tasks_lazy_time(rtp
));
385 needwake
= rcu_segcblist_empty(&rtpcp
->cblist
);
388 rtpcp
->urgent_gp
= 3;
389 rcu_segcblist_enqueue(&rtpcp
->cblist
, rhp
);
390 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
391 if (unlikely(needadjust
)) {
392 raw_spin_lock_irqsave(&rtp
->cbs_gbl_lock
, flags
);
393 if (rtp
->percpu_enqueue_lim
!= rcu_task_cpu_ids
) {
394 WRITE_ONCE(rtp
->percpu_enqueue_shift
, 0);
395 WRITE_ONCE(rtp
->percpu_dequeue_lim
, rcu_task_cpu_ids
);
396 smp_store_release(&rtp
->percpu_enqueue_lim
, rcu_task_cpu_ids
);
397 pr_info("Switching %s to per-CPU callback queuing.\n", rtp
->name
);
399 raw_spin_unlock_irqrestore(&rtp
->cbs_gbl_lock
, flags
);
402 /* We can't create the thread unless interrupts are enabled. */
403 if (needwake
&& READ_ONCE(rtp
->kthread_ptr
))
404 irq_work_queue(&rtpcp
->rtp_irq_work
);
407 // RCU callback function for rcu_barrier_tasks_generic().
408 static void rcu_barrier_tasks_generic_cb(struct rcu_head
*rhp
)
410 struct rcu_tasks
*rtp
;
411 struct rcu_tasks_percpu
*rtpcp
;
413 rhp
->next
= rhp
; // Mark the callback as having been invoked.
414 rtpcp
= container_of(rhp
, struct rcu_tasks_percpu
, barrier_q_head
);
416 if (atomic_dec_and_test(&rtp
->barrier_q_count
))
417 complete(&rtp
->barrier_q_completion
);
420 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
421 // Operates in a manner similar to rcu_barrier().
422 static void __maybe_unused
rcu_barrier_tasks_generic(struct rcu_tasks
*rtp
)
426 struct rcu_tasks_percpu
*rtpcp
;
427 unsigned long s
= rcu_seq_snap(&rtp
->barrier_q_seq
);
429 mutex_lock(&rtp
->barrier_q_mutex
);
430 if (rcu_seq_done(&rtp
->barrier_q_seq
, s
)) {
432 mutex_unlock(&rtp
->barrier_q_mutex
);
435 rtp
->barrier_q_start
= jiffies
;
436 rcu_seq_start(&rtp
->barrier_q_seq
);
437 init_completion(&rtp
->barrier_q_completion
);
438 atomic_set(&rtp
->barrier_q_count
, 2);
439 for_each_possible_cpu(cpu
) {
440 if (cpu
>= smp_load_acquire(&rtp
->percpu_dequeue_lim
))
442 rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, cpu
);
443 rtpcp
->barrier_q_head
.func
= rcu_barrier_tasks_generic_cb
;
444 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
445 if (rcu_segcblist_entrain(&rtpcp
->cblist
, &rtpcp
->barrier_q_head
))
446 atomic_inc(&rtp
->barrier_q_count
);
447 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
449 if (atomic_sub_and_test(2, &rtp
->barrier_q_count
))
450 complete(&rtp
->barrier_q_completion
);
451 wait_for_completion(&rtp
->barrier_q_completion
);
452 rcu_seq_end(&rtp
->barrier_q_seq
);
453 mutex_unlock(&rtp
->barrier_q_mutex
);
456 // Advance callbacks and indicate whether either a grace period or
457 // callback invocation is needed.
458 static int rcu_tasks_need_gpcb(struct rcu_tasks
*rtp
)
463 bool gpdone
= poll_state_synchronize_rcu(rtp
->percpu_dequeue_gpseq
);
469 dequeue_limit
= smp_load_acquire(&rtp
->percpu_dequeue_lim
);
470 for (cpu
= 0; cpu
< dequeue_limit
; cpu
++) {
471 if (!cpu_possible(cpu
))
473 struct rcu_tasks_percpu
*rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, cpu
);
475 /* Advance and accelerate any new callbacks. */
476 if (!rcu_segcblist_n_cbs(&rtpcp
->cblist
))
478 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
479 // Should we shrink down to a single callback queue?
480 n
= rcu_segcblist_n_cbs(&rtpcp
->cblist
);
486 rcu_segcblist_advance(&rtpcp
->cblist
, rcu_seq_current(&rtp
->tasks_gp_seq
));
487 (void)rcu_segcblist_accelerate(&rtpcp
->cblist
, rcu_seq_snap(&rtp
->tasks_gp_seq
));
488 if (rtpcp
->urgent_gp
> 0 && rcu_segcblist_pend_cbs(&rtpcp
->cblist
)) {
489 if (rtp
->lazy_jiffies
)
492 } else if (rcu_segcblist_empty(&rtpcp
->cblist
)) {
493 rtpcp
->urgent_gp
= 0;
495 if (rcu_segcblist_ready_cbs(&rtpcp
->cblist
))
497 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
500 // Shrink down to a single callback queue if appropriate.
501 // This is done in two stages: (1) If there are no more than
502 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
503 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
504 // if there has not been an increase in callbacks, limit dequeuing
505 // to CPU 0. Note the matching RCU read-side critical section in
506 // call_rcu_tasks_generic().
507 if (rcu_task_cb_adjust
&& ncbs
<= rcu_task_collapse_lim
) {
508 raw_spin_lock_irqsave(&rtp
->cbs_gbl_lock
, flags
);
509 if (rtp
->percpu_enqueue_lim
> 1) {
510 WRITE_ONCE(rtp
->percpu_enqueue_shift
, order_base_2(rcu_task_cpu_ids
));
511 smp_store_release(&rtp
->percpu_enqueue_lim
, 1);
512 rtp
->percpu_dequeue_gpseq
= get_state_synchronize_rcu();
514 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp
->name
);
516 raw_spin_unlock_irqrestore(&rtp
->cbs_gbl_lock
, flags
);
518 if (rcu_task_cb_adjust
&& !ncbsnz
&& gpdone
) {
519 raw_spin_lock_irqsave(&rtp
->cbs_gbl_lock
, flags
);
520 if (rtp
->percpu_enqueue_lim
< rtp
->percpu_dequeue_lim
) {
521 WRITE_ONCE(rtp
->percpu_dequeue_lim
, 1);
522 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp
->name
);
524 if (rtp
->percpu_dequeue_lim
== 1) {
525 for (cpu
= rtp
->percpu_dequeue_lim
; cpu
< rcu_task_cpu_ids
; cpu
++) {
526 if (!cpu_possible(cpu
))
528 struct rcu_tasks_percpu
*rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, cpu
);
530 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp
->cblist
));
533 raw_spin_unlock_irqrestore(&rtp
->cbs_gbl_lock
, flags
);
539 // Advance callbacks and invoke any that are ready.
540 static void rcu_tasks_invoke_cbs(struct rcu_tasks
*rtp
, struct rcu_tasks_percpu
*rtpcp
)
546 struct rcu_head
*rhp
;
547 struct rcu_cblist rcl
= RCU_CBLIST_INITIALIZER(rcl
);
548 struct rcu_tasks_percpu
*rtpcp_next
;
550 index
= rtpcp
->index
* 2 + 1;
551 if (index
< num_possible_cpus()) {
552 rtpcp_next
= rtp
->rtpcp_array
[index
];
553 if (rtpcp_next
->cpu
< smp_load_acquire(&rtp
->percpu_dequeue_lim
)) {
554 cpuwq
= rcu_cpu_beenfullyonline(rtpcp_next
->cpu
) ? rtpcp_next
->cpu
: WORK_CPU_UNBOUND
;
555 queue_work_on(cpuwq
, system_wq
, &rtpcp_next
->rtp_work
);
557 if (index
< num_possible_cpus()) {
558 rtpcp_next
= rtp
->rtpcp_array
[index
];
559 if (rtpcp_next
->cpu
< smp_load_acquire(&rtp
->percpu_dequeue_lim
)) {
560 cpuwq
= rcu_cpu_beenfullyonline(rtpcp_next
->cpu
) ? rtpcp_next
->cpu
: WORK_CPU_UNBOUND
;
561 queue_work_on(cpuwq
, system_wq
, &rtpcp_next
->rtp_work
);
567 if (rcu_segcblist_empty(&rtpcp
->cblist
))
569 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
570 rcu_segcblist_advance(&rtpcp
->cblist
, rcu_seq_current(&rtp
->tasks_gp_seq
));
571 rcu_segcblist_extract_done_cbs(&rtpcp
->cblist
, &rcl
);
572 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
574 for (rhp
= rcu_cblist_dequeue(&rcl
); rhp
; rhp
= rcu_cblist_dequeue(&rcl
)) {
575 debug_rcu_head_callback(rhp
);
581 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
582 rcu_segcblist_add_len(&rtpcp
->cblist
, -len
);
583 (void)rcu_segcblist_accelerate(&rtpcp
->cblist
, rcu_seq_snap(&rtp
->tasks_gp_seq
));
584 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
587 // Workqueue flood to advance callbacks and invoke any that are ready.
588 static void rcu_tasks_invoke_cbs_wq(struct work_struct
*wp
)
590 struct rcu_tasks
*rtp
;
591 struct rcu_tasks_percpu
*rtpcp
= container_of(wp
, struct rcu_tasks_percpu
, rtp_work
);
594 rcu_tasks_invoke_cbs(rtp
, rtpcp
);
597 // Wait for one grace period.
598 static void rcu_tasks_one_gp(struct rcu_tasks
*rtp
, bool midboot
)
602 mutex_lock(&rtp
->tasks_gp_mutex
);
604 // If there were none, wait a bit and start over.
605 if (unlikely(midboot
)) {
608 mutex_unlock(&rtp
->tasks_gp_mutex
);
609 set_tasks_gp_state(rtp
, RTGS_WAIT_CBS
);
610 rcuwait_wait_event(&rtp
->cbs_wait
,
611 (needgpcb
= rcu_tasks_need_gpcb(rtp
)),
613 mutex_lock(&rtp
->tasks_gp_mutex
);
616 if (needgpcb
& 0x2) {
617 // Wait for one grace period.
618 set_tasks_gp_state(rtp
, RTGS_WAIT_GP
);
619 rtp
->gp_start
= jiffies
;
620 rcu_seq_start(&rtp
->tasks_gp_seq
);
622 rcu_seq_end(&rtp
->tasks_gp_seq
);
626 set_tasks_gp_state(rtp
, RTGS_INVOKE_CBS
);
627 rcu_tasks_invoke_cbs(rtp
, per_cpu_ptr(rtp
->rtpcpu
, 0));
628 mutex_unlock(&rtp
->tasks_gp_mutex
);
631 // RCU-tasks kthread that detects grace periods and invokes callbacks.
632 static int __noreturn
rcu_tasks_kthread(void *arg
)
635 struct rcu_tasks
*rtp
= arg
;
637 for_each_possible_cpu(cpu
) {
638 struct rcu_tasks_percpu
*rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, cpu
);
640 timer_setup(&rtpcp
->lazy_timer
, call_rcu_tasks_generic_timer
, 0);
641 rtpcp
->urgent_gp
= 1;
644 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
645 housekeeping_affine(current
, HK_TYPE_RCU
);
646 smp_store_release(&rtp
->kthread_ptr
, current
); // Let GPs start!
649 * Each pass through the following loop makes one check for
650 * newly arrived callbacks, and, if there are some, waits for
651 * one RCU-tasks grace period and then invokes the callbacks.
652 * This loop is terminated by the system going down. ;-)
655 // Wait for one grace period and invoke any callbacks
657 rcu_tasks_one_gp(rtp
, false);
659 // Paranoid sleep to keep this from entering a tight loop.
660 schedule_timeout_idle(rtp
->gp_sleep
);
664 // Wait for a grace period for the specified flavor of Tasks RCU.
665 static void synchronize_rcu_tasks_generic(struct rcu_tasks
*rtp
)
667 /* Complain if the scheduler has not started. */
668 if (WARN_ONCE(rcu_scheduler_active
== RCU_SCHEDULER_INACTIVE
,
669 "synchronize_%s() called too soon", rtp
->name
))
672 // If the grace-period kthread is running, use it.
673 if (READ_ONCE(rtp
->kthread_ptr
)) {
674 wait_rcu_gp_state(rtp
->wait_state
, rtp
->call_func
);
677 rcu_tasks_one_gp(rtp
, true);
680 /* Spawn RCU-tasks grace-period kthread. */
681 static void __init
rcu_spawn_tasks_kthread_generic(struct rcu_tasks
*rtp
)
683 struct task_struct
*t
;
685 t
= kthread_run(rcu_tasks_kthread
, rtp
, "%s_kthread", rtp
->kname
);
686 if (WARN_ONCE(IS_ERR(t
), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__
, rtp
->name
))
688 smp_mb(); /* Ensure others see full kthread. */
691 #ifndef CONFIG_TINY_RCU
694 * Print any non-default Tasks RCU settings.
696 static void __init
rcu_tasks_bootup_oddness(void)
698 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
701 if (rcu_task_stall_timeout
!= RCU_TASK_STALL_TIMEOUT
)
702 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout
);
703 rtsimc
= clamp(rcu_task_stall_info_mult
, 1, 10);
704 if (rtsimc
!= rcu_task_stall_info_mult
) {
705 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc
);
706 rcu_task_stall_info_mult
= rtsimc
;
708 #endif /* #ifdef CONFIG_TASKS_RCU */
709 #ifdef CONFIG_TASKS_RCU
710 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
711 #endif /* #ifdef CONFIG_TASKS_RCU */
712 #ifdef CONFIG_TASKS_RUDE_RCU
713 pr_info("\tRude variant of Tasks RCU enabled.\n");
714 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
715 #ifdef CONFIG_TASKS_TRACE_RCU
716 pr_info("\tTracing variant of Tasks RCU enabled.\n");
717 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
721 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
722 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks
*rtp
, char *s
)
725 bool havecbs
= false;
726 bool haveurgent
= false;
727 bool haveurgentcbs
= false;
729 for_each_possible_cpu(cpu
) {
730 struct rcu_tasks_percpu
*rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, cpu
);
732 if (!data_race(rcu_segcblist_empty(&rtpcp
->cblist
)))
734 if (data_race(rtpcp
->urgent_gp
))
736 if (!data_race(rcu_segcblist_empty(&rtpcp
->cblist
)) && data_race(rtpcp
->urgent_gp
))
737 haveurgentcbs
= true;
738 if (havecbs
&& haveurgent
&& haveurgentcbs
)
741 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
743 tasks_gp_state_getname(rtp
), data_race(rtp
->gp_state
),
744 jiffies
- data_race(rtp
->gp_jiffies
),
745 data_race(rcu_seq_current(&rtp
->tasks_gp_seq
)),
746 data_race(rtp
->n_ipis_fails
), data_race(rtp
->n_ipis
),
747 ".k"[!!data_race(rtp
->kthread_ptr
)],
755 /* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */
756 static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks
*rtp
, char *tt
,
762 unsigned long j
= jiffies
;
764 pr_alert("%s%s Tasks%s RCU g%ld gp_start %lu gp_jiffies %lu gp_state %d (%s).\n",
765 tt
, tf
, tst
, data_race(rtp
->tasks_gp_seq
),
766 j
- data_race(rtp
->gp_start
), j
- data_race(rtp
->gp_jiffies
),
767 data_race(rtp
->gp_state
), tasks_gp_state_getname(rtp
));
768 pr_alert("\tEnqueue shift %d limit %d Dequeue limit %d gpseq %lu.\n",
769 data_race(rtp
->percpu_enqueue_shift
),
770 data_race(rtp
->percpu_enqueue_lim
),
771 data_race(rtp
->percpu_dequeue_lim
),
772 data_race(rtp
->percpu_dequeue_gpseq
));
773 (void)zalloc_cpumask_var(&cm
, GFP_KERNEL
);
774 pr_alert("\tCallback counts:");
775 for_each_possible_cpu(cpu
) {
777 struct rcu_tasks_percpu
*rtpcp
= per_cpu_ptr(rtp
->rtpcpu
, cpu
);
779 if (cpumask_available(cm
) && !rcu_barrier_cb_is_done(&rtpcp
->barrier_q_head
))
780 cpumask_set_cpu(cpu
, cm
);
781 n
= rcu_segcblist_n_cbs(&rtpcp
->cblist
);
784 pr_cont(" %d:%ld", cpu
, n
);
790 pr_cont(" (none).\n");
791 pr_alert("\tBarrier seq %lu start %lu count %d holdout CPUs ",
792 data_race(rtp
->barrier_q_seq
), j
- data_race(rtp
->barrier_q_start
),
793 atomic_read(&rtp
->barrier_q_count
));
794 if (cpumask_available(cm
) && !cpumask_empty(cm
))
795 pr_cont(" %*pbl.\n", cpumask_pr_args(cm
));
797 pr_cont("(none).\n");
798 free_cpumask_var(cm
);
801 #endif // #ifndef CONFIG_TINY_RCU
803 static void exit_tasks_rcu_finish_trace(struct task_struct
*t
);
805 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
807 ////////////////////////////////////////////////////////////////////////
809 // Shared code between task-list-scanning variants of Tasks RCU.
811 /* Wait for one RCU-tasks grace period. */
812 static void rcu_tasks_wait_gp(struct rcu_tasks
*rtp
)
814 struct task_struct
*g
;
818 unsigned long lastinfo
;
819 unsigned long lastreport
;
820 bool reported
= false;
822 struct task_struct
*t
;
824 set_tasks_gp_state(rtp
, RTGS_PRE_WAIT_GP
);
825 rtp
->pregp_func(&holdouts
);
828 * There were callbacks, so we need to wait for an RCU-tasks
829 * grace period. Start off by scanning the task list for tasks
830 * that are not already voluntarily blocked. Mark these tasks
831 * and make a list of them in holdouts.
833 set_tasks_gp_state(rtp
, RTGS_SCAN_TASKLIST
);
834 if (rtp
->pertask_func
) {
836 for_each_process_thread(g
, t
)
837 rtp
->pertask_func(t
, &holdouts
);
841 set_tasks_gp_state(rtp
, RTGS_POST_SCAN_TASKLIST
);
842 rtp
->postscan_func(&holdouts
);
845 * Each pass through the following loop scans the list of holdout
846 * tasks, removing any that are no longer holdouts. When the list
847 * is empty, we are done.
849 lastreport
= jiffies
;
850 lastinfo
= lastreport
;
851 rtsi
= READ_ONCE(rcu_task_stall_info
);
853 // Start off with initial wait and slowly back off to 1 HZ wait.
854 fract
= rtp
->init_fract
;
856 while (!list_empty(&holdouts
)) {
862 // Slowly back off waiting for holdouts
863 set_tasks_gp_state(rtp
, RTGS_WAIT_SCAN_HOLDOUTS
);
864 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
865 schedule_timeout_idle(fract
);
867 exp
= jiffies_to_nsecs(fract
);
868 __set_current_state(TASK_IDLE
);
869 schedule_hrtimeout_range(&exp
, jiffies_to_nsecs(HZ
/ 2), HRTIMER_MODE_REL_HARD
);
875 rtst
= READ_ONCE(rcu_task_stall_timeout
);
876 needreport
= rtst
> 0 && time_after(jiffies
, lastreport
+ rtst
);
878 lastreport
= jiffies
;
882 WARN_ON(signal_pending(current
));
883 set_tasks_gp_state(rtp
, RTGS_SCAN_HOLDOUTS
);
884 rtp
->holdouts_func(&holdouts
, needreport
, &firstreport
);
886 // Print pre-stall informational messages if needed.
888 if (rtsi
> 0 && !reported
&& time_after(j
, lastinfo
+ rtsi
)) {
890 rtsi
= rtsi
* rcu_task_stall_info_mult
;
891 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
892 __func__
, rtp
->kname
, rtp
->tasks_gp_seq
, j
- rtp
->gp_start
);
896 set_tasks_gp_state(rtp
, RTGS_POST_GP
);
897 rtp
->postgp_func(rtp
);
900 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
902 #ifdef CONFIG_TASKS_RCU
904 ////////////////////////////////////////////////////////////////////////
906 // Simple variant of RCU whose quiescent states are voluntary context
907 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
908 // As such, grace periods can take one good long time. There are no
909 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
910 // because this implementation is intended to get the system into a safe
911 // state for some of the manipulations involved in tracing and the like.
912 // Finally, this implementation does not support high call_rcu_tasks()
913 // rates from multiple CPUs. If this is required, per-CPU callback lists
916 // The implementation uses rcu_tasks_wait_gp(), which relies on function
917 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
918 // function sets these function pointers up so that rcu_tasks_wait_gp()
919 // invokes these functions in this order:
921 // rcu_tasks_pregp_step():
922 // Invokes synchronize_rcu() in order to wait for all in-flight
923 // t->on_rq and t->nvcsw transitions to complete. This works because
924 // all such transitions are carried out with interrupts disabled.
925 // rcu_tasks_pertask(), invoked on every non-idle task:
926 // For every runnable non-idle task other than the current one, use
927 // get_task_struct() to pin down that task, snapshot that task's
928 // number of voluntary context switches, and add that task to the
930 // rcu_tasks_postscan():
931 // Gather per-CPU lists of tasks in do_exit() to ensure that all
932 // tasks that were in the process of exiting (and which thus might
933 // not know to synchronize with this RCU Tasks grace period) have
934 // completed exiting. The synchronize_rcu() in rcu_tasks_postgp()
935 // will take care of any tasks stuck in the non-preemptible region
936 // of do_exit() following its call to exit_tasks_rcu_finish().
937 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
938 // Scans the holdout list, attempting to identify a quiescent state
939 // for each task on the list. If there is a quiescent state, the
940 // corresponding task is removed from the holdout list.
941 // rcu_tasks_postgp():
942 // Invokes synchronize_rcu() in order to ensure that all prior
943 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
944 // to have happened before the end of this RCU Tasks grace period.
945 // Again, this works because all such transitions are carried out
946 // with interrupts disabled.
948 // For each exiting task, the exit_tasks_rcu_start() and
949 // exit_tasks_rcu_finish() functions add and remove, respectively, the
950 // current task to a per-CPU list of tasks that rcu_tasks_postscan() must
951 // wait on. This is necessary because rcu_tasks_postscan() must wait on
952 // tasks that have already been removed from the global list of tasks.
954 // Pre-grace-period update-side code is ordered before the grace
955 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
956 // is ordered before the grace period via synchronize_rcu() call in
957 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
960 /* Pre-grace-period preparation. */
961 static void rcu_tasks_pregp_step(struct list_head
*hop
)
964 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
965 * to complete. Invoking synchronize_rcu() suffices because all
966 * these transitions occur with interrupts disabled. Without this
967 * synchronize_rcu(), a read-side critical section that started
968 * before the grace period might be incorrectly seen as having
969 * started after the grace period.
971 * This synchronize_rcu() also dispenses with the need for a
972 * memory barrier on the first store to t->rcu_tasks_holdout,
973 * as it forces the store to happen after the beginning of the
979 /* Check for quiescent states since the pregp's synchronize_rcu() */
980 static bool rcu_tasks_is_holdout(struct task_struct
*t
)
984 /* Has the task been seen voluntarily sleeping? */
985 if (!READ_ONCE(t
->on_rq
))
989 * t->on_rq && !t->se.sched_delayed *could* be considered sleeping but
990 * since it is a spurious state (it will transition into the
991 * traditional blocked state or get woken up without outside
992 * dependencies), not considering it such should only affect timing.
994 * Be conservative for now and not include it.
998 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
999 * quiescent states. But CPU boot code performed by the idle task
1000 * isn't a quiescent state.
1002 if (is_idle_task(t
))
1007 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
1008 if (t
== idle_task(cpu
) && !rcu_cpu_online(cpu
))
1014 /* Per-task initial processing. */
1015 static void rcu_tasks_pertask(struct task_struct
*t
, struct list_head
*hop
)
1017 if (t
!= current
&& rcu_tasks_is_holdout(t
)) {
1019 t
->rcu_tasks_nvcsw
= READ_ONCE(t
->nvcsw
);
1020 WRITE_ONCE(t
->rcu_tasks_holdout
, true);
1021 list_add(&t
->rcu_tasks_holdout_list
, hop
);
1025 void call_rcu_tasks(struct rcu_head
*rhp
, rcu_callback_t func
);
1026 DEFINE_RCU_TASKS(rcu_tasks
, rcu_tasks_wait_gp
, call_rcu_tasks
, "RCU Tasks");
1028 /* Processing between scanning taskslist and draining the holdout list. */
1029 static void rcu_tasks_postscan(struct list_head
*hop
)
1032 int rtsi
= READ_ONCE(rcu_task_stall_info
);
1034 if (!IS_ENABLED(CONFIG_TINY_RCU
)) {
1035 tasks_rcu_exit_srcu_stall_timer
.expires
= jiffies
+ rtsi
;
1036 add_timer(&tasks_rcu_exit_srcu_stall_timer
);
1040 * Exiting tasks may escape the tasklist scan. Those are vulnerable
1041 * until their final schedule() with TASK_DEAD state. To cope with
1042 * this, divide the fragile exit path part in two intersecting
1043 * read side critical sections:
1045 * 1) A task_struct list addition before calling exit_notify(),
1046 * which may remove the task from the tasklist, with the
1047 * removal after the final preempt_disable() call in do_exit().
1049 * 2) An _RCU_ read side starting with the final preempt_disable()
1050 * call in do_exit() and ending with the final call to schedule()
1051 * with TASK_DEAD state.
1053 * This handles the part 1). And postgp will handle part 2) with a
1054 * call to synchronize_rcu().
1057 for_each_possible_cpu(cpu
) {
1058 unsigned long j
= jiffies
+ 1;
1059 struct rcu_tasks_percpu
*rtpcp
= per_cpu_ptr(rcu_tasks
.rtpcpu
, cpu
);
1060 struct task_struct
*t
;
1061 struct task_struct
*t1
;
1062 struct list_head tmp
;
1064 raw_spin_lock_irq_rcu_node(rtpcp
);
1065 list_for_each_entry_safe(t
, t1
, &rtpcp
->rtp_exit_list
, rcu_tasks_exit_list
) {
1066 if (list_empty(&t
->rcu_tasks_holdout_list
))
1067 rcu_tasks_pertask(t
, hop
);
1069 // RT kernels need frequent pauses, otherwise
1070 // pause at least once per pair of jiffies.
1071 if (!IS_ENABLED(CONFIG_PREEMPT_RT
) && time_before(jiffies
, j
))
1074 // Keep our place in the list while pausing.
1075 // Nothing else traverses this list, so adding a
1076 // bare list_head is OK.
1077 list_add(&tmp
, &t
->rcu_tasks_exit_list
);
1078 raw_spin_unlock_irq_rcu_node(rtpcp
);
1079 cond_resched(); // For CONFIG_PREEMPT=n kernels
1080 raw_spin_lock_irq_rcu_node(rtpcp
);
1081 t1
= list_entry(tmp
.next
, struct task_struct
, rcu_tasks_exit_list
);
1085 raw_spin_unlock_irq_rcu_node(rtpcp
);
1088 if (!IS_ENABLED(CONFIG_TINY_RCU
))
1089 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer
);
1092 /* See if tasks are still holding out, complain if so. */
1093 static void check_holdout_task(struct task_struct
*t
,
1094 bool needreport
, bool *firstreport
)
1098 if (!READ_ONCE(t
->rcu_tasks_holdout
) ||
1099 t
->rcu_tasks_nvcsw
!= READ_ONCE(t
->nvcsw
) ||
1100 !rcu_tasks_is_holdout(t
) ||
1101 (IS_ENABLED(CONFIG_NO_HZ_FULL
) &&
1102 !is_idle_task(t
) && READ_ONCE(t
->rcu_tasks_idle_cpu
) >= 0)) {
1103 WRITE_ONCE(t
->rcu_tasks_holdout
, false);
1104 list_del_init(&t
->rcu_tasks_holdout_list
);
1108 rcu_request_urgent_qs_task(t
);
1112 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1113 *firstreport
= false;
1116 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1117 t
, ".I"[is_idle_task(t
)],
1118 "N."[cpu
< 0 || !tick_nohz_full_cpu(cpu
)],
1119 t
->rcu_tasks_nvcsw
, t
->nvcsw
, t
->rcu_tasks_holdout
,
1120 data_race(t
->rcu_tasks_idle_cpu
), cpu
);
1124 /* Scan the holdout lists for tasks no longer holding out. */
1125 static void check_all_holdout_tasks(struct list_head
*hop
,
1126 bool needreport
, bool *firstreport
)
1128 struct task_struct
*t
, *t1
;
1130 list_for_each_entry_safe(t
, t1
, hop
, rcu_tasks_holdout_list
) {
1131 check_holdout_task(t
, needreport
, firstreport
);
1136 /* Finish off the Tasks-RCU grace period. */
1137 static void rcu_tasks_postgp(struct rcu_tasks
*rtp
)
1140 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1141 * memory barriers prior to them in the schedule() path, memory
1142 * reordering on other CPUs could cause their RCU-tasks read-side
1143 * critical sections to extend past the end of the grace period.
1144 * However, because these ->nvcsw updates are carried out with
1145 * interrupts disabled, we can use synchronize_rcu() to force the
1146 * needed ordering on all such CPUs.
1148 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1149 * accesses to be within the grace period, avoiding the need for
1150 * memory barriers for ->rcu_tasks_holdout accesses.
1152 * In addition, this synchronize_rcu() waits for exiting tasks
1153 * to complete their final preempt_disable() region of execution,
1154 * enforcing the whole region before tasklist removal until
1155 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1156 * read side critical section.
1161 static void tasks_rcu_exit_srcu_stall(struct timer_list
*unused
)
1163 #ifndef CONFIG_TINY_RCU
1166 rtsi
= READ_ONCE(rcu_task_stall_info
);
1167 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1168 __func__
, rcu_tasks
.kname
, rcu_tasks
.tasks_gp_seq
,
1169 tasks_gp_state_getname(&rcu_tasks
), jiffies
- rcu_tasks
.gp_jiffies
);
1170 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1171 tasks_rcu_exit_srcu_stall_timer
.expires
= jiffies
+ rtsi
;
1172 add_timer(&tasks_rcu_exit_srcu_stall_timer
);
1173 #endif // #ifndef CONFIG_TINY_RCU
1177 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1178 * @rhp: structure to be used for queueing the RCU updates.
1179 * @func: actual callback function to be invoked after the grace period
1181 * The callback function will be invoked some time after a full grace
1182 * period elapses, in other words after all currently executing RCU
1183 * read-side critical sections have completed. call_rcu_tasks() assumes
1184 * that the read-side critical sections end at a voluntary context
1185 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1186 * or transition to usermode execution. As such, there are no read-side
1187 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1188 * this primitive is intended to determine that all tasks have passed
1189 * through a safe state, not so much for data-structure synchronization.
1191 * See the description of call_rcu() for more detailed information on
1192 * memory ordering guarantees.
1194 void call_rcu_tasks(struct rcu_head
*rhp
, rcu_callback_t func
)
1196 call_rcu_tasks_generic(rhp
, func
, &rcu_tasks
);
1198 EXPORT_SYMBOL_GPL(call_rcu_tasks
);
1201 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1203 * Control will return to the caller some time after a full rcu-tasks
1204 * grace period has elapsed, in other words after all currently
1205 * executing rcu-tasks read-side critical sections have elapsed. These
1206 * read-side critical sections are delimited by calls to schedule(),
1207 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1208 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1210 * This is a very specialized primitive, intended only for a few uses in
1211 * tracing and other situations requiring manipulation of function
1212 * preambles and profiling hooks. The synchronize_rcu_tasks() function
1213 * is not (yet) intended for heavy use from multiple CPUs.
1215 * See the description of synchronize_rcu() for more detailed information
1216 * on memory ordering guarantees.
1218 void synchronize_rcu_tasks(void)
1220 synchronize_rcu_tasks_generic(&rcu_tasks
);
1222 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks
);
1225 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1227 * Although the current implementation is guaranteed to wait, it is not
1228 * obligated to, for example, if there are no pending callbacks.
1230 void rcu_barrier_tasks(void)
1232 rcu_barrier_tasks_generic(&rcu_tasks
);
1234 EXPORT_SYMBOL_GPL(rcu_barrier_tasks
);
1236 static int rcu_tasks_lazy_ms
= -1;
1237 module_param(rcu_tasks_lazy_ms
, int, 0444);
1239 static int __init
rcu_spawn_tasks_kthread(void)
1241 rcu_tasks
.gp_sleep
= HZ
/ 10;
1242 rcu_tasks
.init_fract
= HZ
/ 10;
1243 if (rcu_tasks_lazy_ms
>= 0)
1244 rcu_tasks
.lazy_jiffies
= msecs_to_jiffies(rcu_tasks_lazy_ms
);
1245 rcu_tasks
.pregp_func
= rcu_tasks_pregp_step
;
1246 rcu_tasks
.pertask_func
= rcu_tasks_pertask
;
1247 rcu_tasks
.postscan_func
= rcu_tasks_postscan
;
1248 rcu_tasks
.holdouts_func
= check_all_holdout_tasks
;
1249 rcu_tasks
.postgp_func
= rcu_tasks_postgp
;
1250 rcu_tasks
.wait_state
= TASK_IDLE
;
1251 rcu_spawn_tasks_kthread_generic(&rcu_tasks
);
1255 #if !defined(CONFIG_TINY_RCU)
1256 void show_rcu_tasks_classic_gp_kthread(void)
1258 show_rcu_tasks_generic_gp_kthread(&rcu_tasks
, "");
1260 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread
);
1262 void rcu_tasks_torture_stats_print(char *tt
, char *tf
)
1264 rcu_tasks_torture_stats_print_generic(&rcu_tasks
, tt
, tf
, "");
1266 EXPORT_SYMBOL_GPL(rcu_tasks_torture_stats_print
);
1267 #endif // !defined(CONFIG_TINY_RCU)
1269 struct task_struct
*get_rcu_tasks_gp_kthread(void)
1271 return rcu_tasks
.kthread_ptr
;
1273 EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread
);
1275 void rcu_tasks_get_gp_data(int *flags
, unsigned long *gp_seq
)
1278 *gp_seq
= rcu_seq_current(&rcu_tasks
.tasks_gp_seq
);
1280 EXPORT_SYMBOL_GPL(rcu_tasks_get_gp_data
);
1283 * Protect against tasklist scan blind spot while the task is exiting and
1284 * may be removed from the tasklist. Do this by adding the task to yet
1287 * Note that the task will remove itself from this list, so there is no
1288 * need for get_task_struct(), except in the case where rcu_tasks_pertask()
1289 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
1290 * the needed get_task_struct().
1292 void exit_tasks_rcu_start(void)
1294 unsigned long flags
;
1295 struct rcu_tasks_percpu
*rtpcp
;
1296 struct task_struct
*t
= current
;
1298 WARN_ON_ONCE(!list_empty(&t
->rcu_tasks_exit_list
));
1300 rtpcp
= this_cpu_ptr(rcu_tasks
.rtpcpu
);
1301 t
->rcu_tasks_exit_cpu
= smp_processor_id();
1302 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
1303 WARN_ON_ONCE(!rtpcp
->rtp_exit_list
.next
);
1304 list_add(&t
->rcu_tasks_exit_list
, &rtpcp
->rtp_exit_list
);
1305 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
1310 * Remove the task from the "yet another list" because do_exit() is now
1311 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
1313 void exit_tasks_rcu_finish(void)
1315 unsigned long flags
;
1316 struct rcu_tasks_percpu
*rtpcp
;
1317 struct task_struct
*t
= current
;
1319 WARN_ON_ONCE(list_empty(&t
->rcu_tasks_exit_list
));
1320 rtpcp
= per_cpu_ptr(rcu_tasks
.rtpcpu
, t
->rcu_tasks_exit_cpu
);
1321 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
1322 list_del_init(&t
->rcu_tasks_exit_list
);
1323 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
1325 exit_tasks_rcu_finish_trace(t
);
1328 #else /* #ifdef CONFIG_TASKS_RCU */
1329 void exit_tasks_rcu_start(void) { }
1330 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current
); }
1331 #endif /* #else #ifdef CONFIG_TASKS_RCU */
1333 #ifdef CONFIG_TASKS_RUDE_RCU
1335 ////////////////////////////////////////////////////////////////////////
1337 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's
1338 // trick of passing an empty function to schedule_on_each_cpu().
1339 // This approach provides batching of concurrent calls to the synchronous
1340 // synchronize_rcu_tasks_rude() API. This invokes schedule_on_each_cpu()
1341 // in order to send IPIs far and wide and induces otherwise unnecessary
1342 // context switches on all online CPUs, whether idle or not.
1344 // Callback handling is provided by the rcu_tasks_kthread() function.
1346 // Ordering is provided by the scheduler's context-switch code.
1348 // Empty function to allow workqueues to force a context switch.
1349 static void rcu_tasks_be_rude(struct work_struct
*work
)
1353 // Wait for one rude RCU-tasks grace period.
1354 static void rcu_tasks_rude_wait_gp(struct rcu_tasks
*rtp
)
1356 rtp
->n_ipis
+= cpumask_weight(cpu_online_mask
);
1357 schedule_on_each_cpu(rcu_tasks_be_rude
);
1360 static void call_rcu_tasks_rude(struct rcu_head
*rhp
, rcu_callback_t func
);
1361 DEFINE_RCU_TASKS(rcu_tasks_rude
, rcu_tasks_rude_wait_gp
, call_rcu_tasks_rude
,
1365 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1366 * @rhp: structure to be used for queueing the RCU updates.
1367 * @func: actual callback function to be invoked after the grace period
1369 * The callback function will be invoked some time after a full grace
1370 * period elapses, in other words after all currently executing RCU
1371 * read-side critical sections have completed. call_rcu_tasks_rude()
1372 * assumes that the read-side critical sections end at context switch,
1373 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1374 * usermode execution is schedulable). As such, there are no read-side
1375 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1376 * this primitive is intended to determine that all tasks have passed
1377 * through a safe state, not so much for data-structure synchronization.
1379 * See the description of call_rcu() for more detailed information on
1380 * memory ordering guarantees.
1382 * This is no longer exported, and is instead reserved for use by
1383 * synchronize_rcu_tasks_rude().
1385 static void call_rcu_tasks_rude(struct rcu_head
*rhp
, rcu_callback_t func
)
1387 call_rcu_tasks_generic(rhp
, func
, &rcu_tasks_rude
);
1391 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1393 * Control will return to the caller some time after a rude rcu-tasks
1394 * grace period has elapsed, in other words after all currently
1395 * executing rcu-tasks read-side critical sections have elapsed. These
1396 * read-side critical sections are delimited by calls to schedule(),
1397 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1398 * context), and (in theory, anyway) cond_resched().
1400 * This is a very specialized primitive, intended only for a few uses in
1401 * tracing and other situations requiring manipulation of function preambles
1402 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1403 * (yet) intended for heavy use from multiple CPUs.
1405 * See the description of synchronize_rcu() for more detailed information
1406 * on memory ordering guarantees.
1408 void synchronize_rcu_tasks_rude(void)
1410 if (!IS_ENABLED(CONFIG_ARCH_WANTS_NO_INSTR
) || IS_ENABLED(CONFIG_FORCE_TASKS_RUDE_RCU
))
1411 synchronize_rcu_tasks_generic(&rcu_tasks_rude
);
1413 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude
);
1415 static int __init
rcu_spawn_tasks_rude_kthread(void)
1417 rcu_tasks_rude
.gp_sleep
= HZ
/ 10;
1418 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude
);
1422 #if !defined(CONFIG_TINY_RCU)
1423 void show_rcu_tasks_rude_gp_kthread(void)
1425 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude
, "");
1427 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread
);
1429 void rcu_tasks_rude_torture_stats_print(char *tt
, char *tf
)
1431 rcu_tasks_torture_stats_print_generic(&rcu_tasks_rude
, tt
, tf
, "");
1433 EXPORT_SYMBOL_GPL(rcu_tasks_rude_torture_stats_print
);
1434 #endif // !defined(CONFIG_TINY_RCU)
1436 struct task_struct
*get_rcu_tasks_rude_gp_kthread(void)
1438 return rcu_tasks_rude
.kthread_ptr
;
1440 EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread
);
1442 void rcu_tasks_rude_get_gp_data(int *flags
, unsigned long *gp_seq
)
1445 *gp_seq
= rcu_seq_current(&rcu_tasks_rude
.tasks_gp_seq
);
1447 EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data
);
1449 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1451 ////////////////////////////////////////////////////////////////////////
1453 // Tracing variant of Tasks RCU. This variant is designed to be used
1454 // to protect tracing hooks, including those of BPF. This variant
1457 // 1. Has explicit read-side markers to allow finite grace periods
1458 // in the face of in-kernel loops for PREEMPT=n builds.
1460 // 2. Protects code in the idle loop, exception entry/exit, and
1461 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1463 // 3. Avoids expensive read-side instructions, having overhead similar
1464 // to that of Preemptible RCU.
1466 // There are of course downsides. For example, the grace-period code
1467 // can send IPIs to CPUs, even when those CPUs are in the idle loop or
1468 // in nohz_full userspace. If needed, these downsides can be at least
1469 // partially remedied.
1471 // Perhaps most important, this variant of RCU does not affect the vanilla
1472 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1473 // readers can operate from idle, offline, and exception entry/exit in no
1474 // way allows rcu_preempt and rcu_sched readers to also do so.
1476 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1477 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1478 // function sets these function pointers up so that rcu_tasks_wait_gp()
1479 // invokes these functions in this order:
1481 // rcu_tasks_trace_pregp_step():
1482 // Disables CPU hotplug, adds all currently executing tasks to the
1483 // holdout list, then checks the state of all tasks that blocked
1484 // or were preempted within their current RCU Tasks Trace read-side
1485 // critical section, adding them to the holdout list if appropriate.
1486 // Finally, this function re-enables CPU hotplug.
1487 // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1488 // rcu_tasks_trace_postscan():
1489 // Invokes synchronize_rcu() to wait for late-stage exiting tasks
1490 // to finish exiting.
1491 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1492 // Scans the holdout list, attempting to identify a quiescent state
1493 // for each task on the list. If there is a quiescent state, the
1494 // corresponding task is removed from the holdout list. Once this
1495 // list is empty, the grace period has completed.
1496 // rcu_tasks_trace_postgp():
1497 // Provides the needed full memory barrier and does debug checks.
1499 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1501 // Pre-grace-period update-side code is ordered before the grace period
1502 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1503 // read-side code is ordered before the grace period by atomic operations
1504 // on .b.need_qs flag of each task involved in this process, or by scheduler
1505 // context-switch ordering (for locked-down non-running readers).
1507 // The lockdep state must be outside of #ifdef to be useful.
1508 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1509 static struct lock_class_key rcu_lock_trace_key
;
1510 struct lockdep_map rcu_trace_lock_map
=
1511 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key
);
1512 EXPORT_SYMBOL_GPL(rcu_trace_lock_map
);
1513 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1515 #ifdef CONFIG_TASKS_TRACE_RCU
1517 // Record outstanding IPIs to each CPU. No point in sending two...
1518 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu
);
1520 // The number of detections of task quiescent state relying on
1521 // heavyweight readers executing explicit memory barriers.
1522 static unsigned long n_heavy_reader_attempts
;
1523 static unsigned long n_heavy_reader_updates
;
1524 static unsigned long n_heavy_reader_ofl_updates
;
1525 static unsigned long n_trc_holdouts
;
1527 void call_rcu_tasks_trace(struct rcu_head
*rhp
, rcu_callback_t func
);
1528 DEFINE_RCU_TASKS(rcu_tasks_trace
, rcu_tasks_wait_gp
, call_rcu_tasks_trace
,
1531 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1532 static u8
rcu_ld_need_qs(struct task_struct
*t
)
1534 smp_mb(); // Enforce full grace-period ordering.
1535 return smp_load_acquire(&t
->trc_reader_special
.b
.need_qs
);
1538 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1539 static void rcu_st_need_qs(struct task_struct
*t
, u8 v
)
1541 smp_store_release(&t
->trc_reader_special
.b
.need_qs
, v
);
1542 smp_mb(); // Enforce full grace-period ordering.
1546 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1547 * the four-byte operand-size restriction of some platforms.
1549 * Returns the old value, which is often ignored.
1551 u8
rcu_trc_cmpxchg_need_qs(struct task_struct
*t
, u8 old
, u8
new)
1553 return cmpxchg(&t
->trc_reader_special
.b
.need_qs
, old
, new);
1555 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs
);
1558 * If we are the last reader, signal the grace-period kthread.
1559 * Also remove from the per-CPU list of blocked tasks.
1561 void rcu_read_unlock_trace_special(struct task_struct
*t
)
1563 unsigned long flags
;
1564 struct rcu_tasks_percpu
*rtpcp
;
1565 union rcu_special trs
;
1567 // Open-coded full-word version of rcu_ld_need_qs().
1568 smp_mb(); // Enforce full grace-period ordering.
1569 trs
= smp_load_acquire(&t
->trc_reader_special
);
1571 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB
) && t
->trc_reader_special
.b
.need_mb
)
1572 smp_mb(); // Pairs with update-side barriers.
1573 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1574 if (trs
.b
.need_qs
== (TRC_NEED_QS_CHECKED
| TRC_NEED_QS
)) {
1575 u8 result
= rcu_trc_cmpxchg_need_qs(t
, TRC_NEED_QS_CHECKED
| TRC_NEED_QS
,
1576 TRC_NEED_QS_CHECKED
);
1578 WARN_ONCE(result
!= trs
.b
.need_qs
, "%s: result = %d", __func__
, result
);
1580 if (trs
.b
.blocked
) {
1581 rtpcp
= per_cpu_ptr(rcu_tasks_trace
.rtpcpu
, t
->trc_blkd_cpu
);
1582 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
1583 list_del_init(&t
->trc_blkd_node
);
1584 WRITE_ONCE(t
->trc_reader_special
.b
.blocked
, false);
1585 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
1587 WRITE_ONCE(t
->trc_reader_nesting
, 0);
1589 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special
);
1591 /* Add a newly blocked reader task to its CPU's list. */
1592 void rcu_tasks_trace_qs_blkd(struct task_struct
*t
)
1594 unsigned long flags
;
1595 struct rcu_tasks_percpu
*rtpcp
;
1597 local_irq_save(flags
);
1598 rtpcp
= this_cpu_ptr(rcu_tasks_trace
.rtpcpu
);
1599 raw_spin_lock_rcu_node(rtpcp
); // irqs already disabled
1600 t
->trc_blkd_cpu
= smp_processor_id();
1601 if (!rtpcp
->rtp_blkd_tasks
.next
)
1602 INIT_LIST_HEAD(&rtpcp
->rtp_blkd_tasks
);
1603 list_add(&t
->trc_blkd_node
, &rtpcp
->rtp_blkd_tasks
);
1604 WRITE_ONCE(t
->trc_reader_special
.b
.blocked
, true);
1605 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
1607 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd
);
1609 /* Add a task to the holdout list, if it is not already on the list. */
1610 static void trc_add_holdout(struct task_struct
*t
, struct list_head
*bhp
)
1612 if (list_empty(&t
->trc_holdout_list
)) {
1614 list_add(&t
->trc_holdout_list
, bhp
);
1619 /* Remove a task from the holdout list, if it is in fact present. */
1620 static void trc_del_holdout(struct task_struct
*t
)
1622 if (!list_empty(&t
->trc_holdout_list
)) {
1623 list_del_init(&t
->trc_holdout_list
);
1629 /* IPI handler to check task state. */
1630 static void trc_read_check_handler(void *t_in
)
1633 struct task_struct
*t
= current
;
1634 struct task_struct
*texp
= t_in
;
1636 // If the task is no longer running on this CPU, leave.
1637 if (unlikely(texp
!= t
))
1638 goto reset_ipi
; // Already on holdout list, so will check later.
1640 // If the task is not in a read-side critical section, and
1641 // if this is the last reader, awaken the grace-period kthread.
1642 nesting
= READ_ONCE(t
->trc_reader_nesting
);
1643 if (likely(!nesting
)) {
1644 rcu_trc_cmpxchg_need_qs(t
, 0, TRC_NEED_QS_CHECKED
);
1647 // If we are racing with an rcu_read_unlock_trace(), try again later.
1648 if (unlikely(nesting
< 0))
1651 // Get here if the task is in a read-side critical section.
1652 // Set its state so that it will update state for the grace-period
1653 // kthread upon exit from that critical section.
1654 rcu_trc_cmpxchg_need_qs(t
, 0, TRC_NEED_QS
| TRC_NEED_QS_CHECKED
);
1657 // Allow future IPIs to be sent on CPU and for task.
1658 // Also order this IPI handler against any later manipulations of
1659 // the intended task.
1660 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu
, smp_processor_id()), false); // ^^^
1661 smp_store_release(&texp
->trc_ipi_to_cpu
, -1); // ^^^
1664 /* Callback function for scheduler to check locked-down task. */
1665 static int trc_inspect_reader(struct task_struct
*t
, void *bhp_in
)
1667 struct list_head
*bhp
= bhp_in
;
1668 int cpu
= task_cpu(t
);
1670 bool ofl
= cpu_is_offline(cpu
);
1672 if (task_curr(t
) && !ofl
) {
1673 // If no chance of heavyweight readers, do it the hard way.
1674 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB
))
1677 // If heavyweight readers are enabled on the remote task,
1678 // we can inspect its state despite its currently running.
1679 // However, we cannot safely change its state.
1680 n_heavy_reader_attempts
++;
1681 // Check for "running" idle tasks on offline CPUs.
1682 if (!rcu_watching_zero_in_eqs(cpu
, &t
->trc_reader_nesting
))
1683 return -EINVAL
; // No quiescent state, do it the hard way.
1684 n_heavy_reader_updates
++;
1687 // The task is not running, so C-language access is safe.
1688 nesting
= t
->trc_reader_nesting
;
1689 WARN_ON_ONCE(ofl
&& task_curr(t
) && (t
!= idle_task(task_cpu(t
))));
1690 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB
) && ofl
)
1691 n_heavy_reader_ofl_updates
++;
1694 // If not exiting a read-side critical section, mark as checked
1695 // so that the grace-period kthread will remove it from the
1698 rcu_trc_cmpxchg_need_qs(t
, 0, TRC_NEED_QS_CHECKED
);
1699 return 0; // In QS, so done.
1702 return -EINVAL
; // Reader transitioning, try again later.
1704 // The task is in a read-side critical section, so set up its
1705 // state so that it will update state upon exit from that critical
1707 if (!rcu_trc_cmpxchg_need_qs(t
, 0, TRC_NEED_QS
| TRC_NEED_QS_CHECKED
))
1708 trc_add_holdout(t
, bhp
);
1712 /* Attempt to extract the state for the specified task. */
1713 static void trc_wait_for_one_reader(struct task_struct
*t
,
1714 struct list_head
*bhp
)
1718 // If a previous IPI is still in flight, let it complete.
1719 if (smp_load_acquire(&t
->trc_ipi_to_cpu
) != -1) // Order IPI
1722 // The current task had better be in a quiescent state.
1724 rcu_trc_cmpxchg_need_qs(t
, 0, TRC_NEED_QS_CHECKED
);
1725 WARN_ON_ONCE(READ_ONCE(t
->trc_reader_nesting
));
1729 // Attempt to nail down the task for inspection.
1731 if (!task_call_func(t
, trc_inspect_reader
, bhp
)) {
1737 // If this task is not yet on the holdout list, then we are in
1738 // an RCU read-side critical section. Otherwise, the invocation of
1739 // trc_add_holdout() that added it to the list did the necessary
1740 // get_task_struct(). Either way, the task cannot be freed out
1741 // from under this code.
1743 // If currently running, send an IPI, either way, add to list.
1744 trc_add_holdout(t
, bhp
);
1746 time_after(jiffies
+ 1, rcu_tasks_trace
.gp_start
+ rcu_task_ipi_delay
)) {
1747 // The task is currently running, so try IPIing it.
1750 // If there is already an IPI outstanding, let it happen.
1751 if (per_cpu(trc_ipi_to_cpu
, cpu
) || t
->trc_ipi_to_cpu
>= 0)
1754 per_cpu(trc_ipi_to_cpu
, cpu
) = true;
1755 t
->trc_ipi_to_cpu
= cpu
;
1756 rcu_tasks_trace
.n_ipis
++;
1757 if (smp_call_function_single(cpu
, trc_read_check_handler
, t
, 0)) {
1758 // Just in case there is some other reason for
1759 // failure than the target CPU being offline.
1760 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1762 rcu_tasks_trace
.n_ipis_fails
++;
1763 per_cpu(trc_ipi_to_cpu
, cpu
) = false;
1764 t
->trc_ipi_to_cpu
= -1;
1770 * Initialize for first-round processing for the specified task.
1771 * Return false if task is NULL or already taken care of, true otherwise.
1773 static bool rcu_tasks_trace_pertask_prep(struct task_struct
*t
, bool notself
)
1775 // During early boot when there is only the one boot CPU, there
1776 // is no idle task for the other CPUs. Also, the grace-period
1777 // kthread is always in a quiescent state. In addition, just return
1778 // if this task is already on the list.
1779 if (unlikely(t
== NULL
) || (t
== current
&& notself
) || !list_empty(&t
->trc_holdout_list
))
1782 rcu_st_need_qs(t
, 0);
1783 t
->trc_ipi_to_cpu
= -1;
1787 /* Do first-round processing for the specified task. */
1788 static void rcu_tasks_trace_pertask(struct task_struct
*t
, struct list_head
*hop
)
1790 if (rcu_tasks_trace_pertask_prep(t
, true))
1791 trc_wait_for_one_reader(t
, hop
);
1794 /* Initialize for a new RCU-tasks-trace grace period. */
1795 static void rcu_tasks_trace_pregp_step(struct list_head
*hop
)
1797 LIST_HEAD(blkd_tasks
);
1799 unsigned long flags
;
1800 struct rcu_tasks_percpu
*rtpcp
;
1801 struct task_struct
*t
;
1803 // There shouldn't be any old IPIs, but...
1804 for_each_possible_cpu(cpu
)
1805 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu
, cpu
));
1807 // Disable CPU hotplug across the CPU scan for the benefit of
1808 // any IPIs that might be needed. This also waits for all readers
1809 // in CPU-hotplug code paths.
1812 // These rcu_tasks_trace_pertask_prep() calls are serialized to
1813 // allow safe access to the hop list.
1814 for_each_online_cpu(cpu
) {
1816 // Note that cpu_curr_snapshot() picks up the target
1817 // CPU's current task while its runqueue is locked with
1818 // an smp_mb__after_spinlock(). This ensures that either
1819 // the grace-period kthread will see that task's read-side
1820 // critical section or the task will see the updater's pre-GP
1821 // accesses. The trailing smp_mb() in cpu_curr_snapshot()
1822 // does not currently play a role other than simplify
1823 // that function's ordering semantics. If these simplified
1824 // ordering semantics continue to be redundant, that smp_mb()
1825 // might be removed.
1826 t
= cpu_curr_snapshot(cpu
);
1827 if (rcu_tasks_trace_pertask_prep(t
, true))
1828 trc_add_holdout(t
, hop
);
1830 cond_resched_tasks_rcu_qs();
1833 // Only after all running tasks have been accounted for is it
1834 // safe to take care of the tasks that have blocked within their
1835 // current RCU tasks trace read-side critical section.
1836 for_each_possible_cpu(cpu
) {
1837 rtpcp
= per_cpu_ptr(rcu_tasks_trace
.rtpcpu
, cpu
);
1838 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
1839 list_splice_init(&rtpcp
->rtp_blkd_tasks
, &blkd_tasks
);
1840 while (!list_empty(&blkd_tasks
)) {
1842 t
= list_first_entry(&blkd_tasks
, struct task_struct
, trc_blkd_node
);
1843 list_del_init(&t
->trc_blkd_node
);
1844 list_add(&t
->trc_blkd_node
, &rtpcp
->rtp_blkd_tasks
);
1845 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
1846 rcu_tasks_trace_pertask(t
, hop
);
1848 raw_spin_lock_irqsave_rcu_node(rtpcp
, flags
);
1850 raw_spin_unlock_irqrestore_rcu_node(rtpcp
, flags
);
1851 cond_resched_tasks_rcu_qs();
1854 // Re-enable CPU hotplug now that the holdout list is populated.
1859 * Do intermediate processing between task and holdout scans.
1861 static void rcu_tasks_trace_postscan(struct list_head
*hop
)
1863 // Wait for late-stage exiting tasks to finish exiting.
1864 // These might have passed the call to exit_tasks_rcu_finish().
1866 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1868 // Any tasks that exit after this point will set
1869 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1872 /* Communicate task state back to the RCU tasks trace stall warning request. */
1873 struct trc_stall_chk_rdr
{
1879 static int trc_check_slow_task(struct task_struct
*t
, void *arg
)
1881 struct trc_stall_chk_rdr
*trc_rdrp
= arg
;
1883 if (task_curr(t
) && cpu_online(task_cpu(t
)))
1884 return false; // It is running, so decline to inspect it.
1885 trc_rdrp
->nesting
= READ_ONCE(t
->trc_reader_nesting
);
1886 trc_rdrp
->ipi_to_cpu
= READ_ONCE(t
->trc_ipi_to_cpu
);
1887 trc_rdrp
->needqs
= rcu_ld_need_qs(t
);
1891 /* Show the state of a task stalling the current RCU tasks trace GP. */
1892 static void show_stalled_task_trace(struct task_struct
*t
, bool *firstreport
)
1895 struct trc_stall_chk_rdr trc_rdr
;
1896 bool is_idle_tsk
= is_idle_task(t
);
1899 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1900 *firstreport
= false;
1903 if (!task_call_func(t
, trc_check_slow_task
, &trc_rdr
))
1904 pr_alert("P%d: %c%c\n",
1906 ".I"[t
->trc_ipi_to_cpu
>= 0],
1909 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1911 ".I"[trc_rdr
.ipi_to_cpu
>= 0],
1913 ".N"[cpu
>= 0 && tick_nohz_full_cpu(cpu
)],
1914 ".B"[!!data_race(t
->trc_reader_special
.b
.blocked
)],
1916 " !CN"[trc_rdr
.needqs
& 0x3],
1917 " ?"[trc_rdr
.needqs
> 0x3],
1918 cpu
, cpu_online(cpu
) ? "" : "(offline)");
1922 /* List stalled IPIs for RCU tasks trace. */
1923 static void show_stalled_ipi_trace(void)
1927 for_each_possible_cpu(cpu
)
1928 if (per_cpu(trc_ipi_to_cpu
, cpu
))
1929 pr_alert("\tIPI outstanding to CPU %d\n", cpu
);
1932 /* Do one scan of the holdout list. */
1933 static void check_all_holdout_tasks_trace(struct list_head
*hop
,
1934 bool needreport
, bool *firstreport
)
1936 struct task_struct
*g
, *t
;
1938 // Disable CPU hotplug across the holdout list scan for IPIs.
1941 list_for_each_entry_safe(t
, g
, hop
, trc_holdout_list
) {
1942 // If safe and needed, try to check the current task.
1943 if (READ_ONCE(t
->trc_ipi_to_cpu
) == -1 &&
1944 !(rcu_ld_need_qs(t
) & TRC_NEED_QS_CHECKED
))
1945 trc_wait_for_one_reader(t
, hop
);
1947 // If check succeeded, remove this task from the list.
1948 if (smp_load_acquire(&t
->trc_ipi_to_cpu
) == -1 &&
1949 rcu_ld_need_qs(t
) == TRC_NEED_QS_CHECKED
)
1951 else if (needreport
)
1952 show_stalled_task_trace(t
, firstreport
);
1953 cond_resched_tasks_rcu_qs();
1956 // Re-enable CPU hotplug now that the holdout list scan has completed.
1961 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1962 show_stalled_ipi_trace();
1966 static void rcu_tasks_trace_empty_fn(void *unused
)
1970 /* Wait for grace period to complete and provide ordering. */
1971 static void rcu_tasks_trace_postgp(struct rcu_tasks
*rtp
)
1975 // Wait for any lingering IPI handlers to complete. Note that
1976 // if a CPU has gone offline or transitioned to userspace in the
1977 // meantime, all IPI handlers should have been drained beforehand.
1978 // Yes, this assumes that CPUs process IPIs in order. If that ever
1979 // changes, there will need to be a recheck and/or timed wait.
1980 for_each_online_cpu(cpu
)
1981 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu
, cpu
))))
1982 smp_call_function_single(cpu
, rcu_tasks_trace_empty_fn
, NULL
, 1);
1984 smp_mb(); // Caller's code must be ordered after wakeup.
1985 // Pairs with pretty much every ordering primitive.
1988 /* Report any needed quiescent state for this exiting task. */
1989 static void exit_tasks_rcu_finish_trace(struct task_struct
*t
)
1991 union rcu_special trs
= READ_ONCE(t
->trc_reader_special
);
1993 rcu_trc_cmpxchg_need_qs(t
, 0, TRC_NEED_QS_CHECKED
);
1994 WARN_ON_ONCE(READ_ONCE(t
->trc_reader_nesting
));
1995 if (WARN_ON_ONCE(rcu_ld_need_qs(t
) & TRC_NEED_QS
|| trs
.b
.blocked
))
1996 rcu_read_unlock_trace_special(t
);
1998 WRITE_ONCE(t
->trc_reader_nesting
, 0);
2002 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
2003 * @rhp: structure to be used for queueing the RCU updates.
2004 * @func: actual callback function to be invoked after the grace period
2006 * The callback function will be invoked some time after a trace rcu-tasks
2007 * grace period elapses, in other words after all currently executing
2008 * trace rcu-tasks read-side critical sections have completed. These
2009 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
2010 * and rcu_read_unlock_trace().
2012 * See the description of call_rcu() for more detailed information on
2013 * memory ordering guarantees.
2015 void call_rcu_tasks_trace(struct rcu_head
*rhp
, rcu_callback_t func
)
2017 call_rcu_tasks_generic(rhp
, func
, &rcu_tasks_trace
);
2019 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace
);
2022 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
2024 * Control will return to the caller some time after a trace rcu-tasks
2025 * grace period has elapsed, in other words after all currently executing
2026 * trace rcu-tasks read-side critical sections have elapsed. These read-side
2027 * critical sections are delimited by calls to rcu_read_lock_trace()
2028 * and rcu_read_unlock_trace().
2030 * This is a very specialized primitive, intended only for a few uses in
2031 * tracing and other situations requiring manipulation of function preambles
2032 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
2033 * (yet) intended for heavy use from multiple CPUs.
2035 * See the description of synchronize_rcu() for more detailed information
2036 * on memory ordering guarantees.
2038 void synchronize_rcu_tasks_trace(void)
2040 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map
), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
2041 synchronize_rcu_tasks_generic(&rcu_tasks_trace
);
2043 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace
);
2046 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
2048 * Although the current implementation is guaranteed to wait, it is not
2049 * obligated to, for example, if there are no pending callbacks.
2051 void rcu_barrier_tasks_trace(void)
2053 rcu_barrier_tasks_generic(&rcu_tasks_trace
);
2055 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace
);
2057 int rcu_tasks_trace_lazy_ms
= -1;
2058 module_param(rcu_tasks_trace_lazy_ms
, int, 0444);
2060 static int __init
rcu_spawn_tasks_trace_kthread(void)
2062 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB
)) {
2063 rcu_tasks_trace
.gp_sleep
= HZ
/ 10;
2064 rcu_tasks_trace
.init_fract
= HZ
/ 10;
2066 rcu_tasks_trace
.gp_sleep
= HZ
/ 200;
2067 if (rcu_tasks_trace
.gp_sleep
<= 0)
2068 rcu_tasks_trace
.gp_sleep
= 1;
2069 rcu_tasks_trace
.init_fract
= HZ
/ 200;
2070 if (rcu_tasks_trace
.init_fract
<= 0)
2071 rcu_tasks_trace
.init_fract
= 1;
2073 if (rcu_tasks_trace_lazy_ms
>= 0)
2074 rcu_tasks_trace
.lazy_jiffies
= msecs_to_jiffies(rcu_tasks_trace_lazy_ms
);
2075 rcu_tasks_trace
.pregp_func
= rcu_tasks_trace_pregp_step
;
2076 rcu_tasks_trace
.postscan_func
= rcu_tasks_trace_postscan
;
2077 rcu_tasks_trace
.holdouts_func
= check_all_holdout_tasks_trace
;
2078 rcu_tasks_trace
.postgp_func
= rcu_tasks_trace_postgp
;
2079 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace
);
2083 #if !defined(CONFIG_TINY_RCU)
2084 void show_rcu_tasks_trace_gp_kthread(void)
2088 snprintf(buf
, sizeof(buf
), "N%lu h:%lu/%lu/%lu",
2089 data_race(n_trc_holdouts
),
2090 data_race(n_heavy_reader_ofl_updates
),
2091 data_race(n_heavy_reader_updates
),
2092 data_race(n_heavy_reader_attempts
));
2093 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace
, buf
);
2095 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread
);
2097 void rcu_tasks_trace_torture_stats_print(char *tt
, char *tf
)
2099 rcu_tasks_torture_stats_print_generic(&rcu_tasks_trace
, tt
, tf
, "");
2101 EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print
);
2102 #endif // !defined(CONFIG_TINY_RCU)
2104 struct task_struct
*get_rcu_tasks_trace_gp_kthread(void)
2106 return rcu_tasks_trace
.kthread_ptr
;
2108 EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread
);
2110 void rcu_tasks_trace_get_gp_data(int *flags
, unsigned long *gp_seq
)
2113 *gp_seq
= rcu_seq_current(&rcu_tasks_trace
.tasks_gp_seq
);
2115 EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data
);
2117 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
2118 static void exit_tasks_rcu_finish_trace(struct task_struct
*t
) { }
2119 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
2121 #ifndef CONFIG_TINY_RCU
2122 void show_rcu_tasks_gp_kthreads(void)
2124 show_rcu_tasks_classic_gp_kthread();
2125 show_rcu_tasks_rude_gp_kthread();
2126 show_rcu_tasks_trace_gp_kthread();
2128 #endif /* #ifndef CONFIG_TINY_RCU */
2130 #ifdef CONFIG_PROVE_RCU
2131 struct rcu_tasks_test_desc
{
2135 unsigned long runstart
;
2138 static struct rcu_tasks_test_desc tests
[] = {
2140 .name
= "call_rcu_tasks()",
2141 /* If not defined, the test is skipped. */
2142 .notrun
= IS_ENABLED(CONFIG_TASKS_RCU
),
2145 .name
= "call_rcu_tasks_trace()",
2146 /* If not defined, the test is skipped. */
2147 .notrun
= IS_ENABLED(CONFIG_TASKS_TRACE_RCU
)
2151 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
2152 static void test_rcu_tasks_callback(struct rcu_head
*rhp
)
2154 struct rcu_tasks_test_desc
*rttd
=
2155 container_of(rhp
, struct rcu_tasks_test_desc
, rh
);
2157 pr_info("Callback from %s invoked.\n", rttd
->name
);
2159 rttd
->notrun
= false;
2161 #endif // #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
2163 static void rcu_tasks_initiate_self_tests(void)
2165 #ifdef CONFIG_TASKS_RCU
2166 pr_info("Running RCU Tasks wait API self tests\n");
2167 tests
[0].runstart
= jiffies
;
2168 synchronize_rcu_tasks();
2169 call_rcu_tasks(&tests
[0].rh
, test_rcu_tasks_callback
);
2172 #ifdef CONFIG_TASKS_RUDE_RCU
2173 pr_info("Running RCU Tasks Rude wait API self tests\n");
2174 synchronize_rcu_tasks_rude();
2177 #ifdef CONFIG_TASKS_TRACE_RCU
2178 pr_info("Running RCU Tasks Trace wait API self tests\n");
2179 tests
[1].runstart
= jiffies
;
2180 synchronize_rcu_tasks_trace();
2181 call_rcu_tasks_trace(&tests
[1].rh
, test_rcu_tasks_callback
);
2186 * Return: 0 - test passed
2187 * 1 - test failed, but have not timed out yet
2188 * -1 - test failed and timed out
2190 static int rcu_tasks_verify_self_tests(void)
2194 unsigned long bst
= rcu_task_stall_timeout
;
2196 if (bst
<= 0 || bst
> RCU_TASK_BOOT_STALL_TIMEOUT
)
2197 bst
= RCU_TASK_BOOT_STALL_TIMEOUT
;
2198 for (i
= 0; i
< ARRAY_SIZE(tests
); i
++) {
2199 while (tests
[i
].notrun
) { // still hanging.
2200 if (time_after(jiffies
, tests
[i
].runstart
+ bst
)) {
2201 pr_err("%s has failed boot-time tests.\n", tests
[i
].name
);
2215 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2216 * test passes or has timed out.
2218 static struct delayed_work rcu_tasks_verify_work
;
2219 static void rcu_tasks_verify_work_fn(struct work_struct
*work __maybe_unused
)
2221 int ret
= rcu_tasks_verify_self_tests();
2226 /* Test fails but not timed out yet, reschedule another check */
2227 schedule_delayed_work(&rcu_tasks_verify_work
, HZ
);
2230 static int rcu_tasks_verify_schedule_work(void)
2232 INIT_DELAYED_WORK(&rcu_tasks_verify_work
, rcu_tasks_verify_work_fn
);
2233 rcu_tasks_verify_work_fn(NULL
);
2236 late_initcall(rcu_tasks_verify_schedule_work
);
2237 #else /* #ifdef CONFIG_PROVE_RCU */
2238 static void rcu_tasks_initiate_self_tests(void) { }
2239 #endif /* #else #ifdef CONFIG_PROVE_RCU */
2241 void __init
tasks_cblist_init_generic(void)
2243 lockdep_assert_irqs_disabled();
2244 WARN_ON(num_online_cpus() > 1);
2246 #ifdef CONFIG_TASKS_RCU
2247 cblist_init_generic(&rcu_tasks
);
2250 #ifdef CONFIG_TASKS_RUDE_RCU
2251 cblist_init_generic(&rcu_tasks_rude
);
2254 #ifdef CONFIG_TASKS_TRACE_RCU
2255 cblist_init_generic(&rcu_tasks_trace
);
2259 void __init
rcu_init_tasks_generic(void)
2261 #ifdef CONFIG_TASKS_RCU
2262 rcu_spawn_tasks_kthread();
2265 #ifdef CONFIG_TASKS_RUDE_RCU
2266 rcu_spawn_tasks_rude_kthread();
2269 #ifdef CONFIG_TASKS_TRACE_RCU
2270 rcu_spawn_tasks_trace_kthread();
2273 // Run the self-tests.
2274 rcu_tasks_initiate_self_tests();
2277 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2278 static inline void rcu_tasks_bootup_oddness(void) {}
2279 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */