1 // SPDX-License-Identifier: GPL-2.0+
3 // Torture test for smp_call_function() and friends.
5 // Copyright (C) Facebook, 2020.
7 // Author: Paul E. McKenney <paulmck@kernel.org>
9 #define pr_fmt(fmt) fmt
11 #include <linux/atomic.h>
12 #include <linux/bitops.h>
13 #include <linux/completion.h>
14 #include <linux/cpu.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/kthread.h>
20 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/percpu.h>
26 #include <linux/rcupdate.h>
27 #include <linux/rcupdate_trace.h>
28 #include <linux/reboot.h>
29 #include <linux/sched.h>
30 #include <linux/spinlock.h>
31 #include <linux/smp.h>
32 #include <linux/stat.h>
33 #include <linux/srcu.h>
34 #include <linux/slab.h>
35 #include <linux/torture.h>
36 #include <linux/types.h>
38 #define SCFTORT_STRING "scftorture"
39 #define SCFTORT_FLAG SCFTORT_STRING ": "
41 #define VERBOSE_SCFTORTOUT(s, x...) \
42 do { if (verbose) pr_alert(SCFTORT_FLAG s "\n", ## x); } while (0)
44 #define SCFTORTOUT_ERRSTRING(s, x...) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x)
46 MODULE_DESCRIPTION("Torture tests on the smp_call_function() family of primitives");
47 MODULE_LICENSE("GPL");
48 MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
50 // Wait until there are multiple CPUs before starting test.
51 torture_param(int, holdoff
, IS_BUILTIN(CONFIG_SCF_TORTURE_TEST
) ? 10 : 0,
52 "Holdoff time before test start (s)");
53 torture_param(int, longwait
, 0, "Include ridiculously long waits? (seconds)");
54 torture_param(int, nthreads
, -1, "# threads, defaults to -1 for all CPUs.");
55 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
56 torture_param(int, onoff_interval
, 0, "Time between CPU hotplugs (s), 0=disable");
57 torture_param(int, shutdown_secs
, 0, "Shutdown time (ms), <= zero to disable.");
58 torture_param(int, stat_interval
, 60, "Number of seconds between stats printk()s.");
59 torture_param(int, stutter
, 5, "Number of jiffies to run/halt test, 0=disable");
60 torture_param(bool, use_cpus_read_lock
, 0, "Use cpus_read_lock() to exclude CPU hotplug.");
61 torture_param(int, verbose
, 0, "Enable verbose debugging printk()s");
62 torture_param(int, weight_resched
, -1, "Testing weight for resched_cpu() operations.");
63 torture_param(int, weight_single
, -1, "Testing weight for single-CPU no-wait operations.");
64 torture_param(int, weight_single_rpc
, -1, "Testing weight for single-CPU RPC operations.");
65 torture_param(int, weight_single_wait
, -1, "Testing weight for single-CPU operations.");
66 torture_param(int, weight_many
, -1, "Testing weight for multi-CPU no-wait operations.");
67 torture_param(int, weight_many_wait
, -1, "Testing weight for multi-CPU operations.");
68 torture_param(int, weight_all
, -1, "Testing weight for all-CPU no-wait operations.");
69 torture_param(int, weight_all_wait
, -1, "Testing weight for all-CPU operations.");
71 static char *torture_type
= "";
74 # define SCFTORT_SHUTDOWN 0
76 # define SCFTORT_SHUTDOWN 1
79 torture_param(bool, shutdown
, SCFTORT_SHUTDOWN
, "Shutdown at end of torture test.");
81 struct scf_statistics
{
82 struct task_struct
*task
;
86 long long n_single_ofl
;
87 long long n_single_rpc
;
88 long long n_single_rpc_ofl
;
89 long long n_single_wait
;
90 long long n_single_wait_ofl
;
92 long long n_many_wait
;
97 static struct scf_statistics
*scf_stats_p
;
98 static struct task_struct
*scf_torture_stats_task
;
99 static DEFINE_PER_CPU(long long, scf_invoked_count
);
100 static DEFINE_PER_CPU(struct llist_head
, scf_free_pool
);
102 // Data for random primitive selection
103 #define SCF_PRIM_RESCHED 0
104 #define SCF_PRIM_SINGLE 1
105 #define SCF_PRIM_SINGLE_RPC 2
106 #define SCF_PRIM_MANY 3
107 #define SCF_PRIM_ALL 4
108 #define SCF_NPRIMS 8 // Need wait and no-wait versions of each,
109 // except for SCF_PRIM_RESCHED and
110 // SCF_PRIM_SINGLE_RPC.
112 static char *scf_prim_name
[] = {
114 "smp_call_function_single",
115 "smp_call_function_single_rpc",
116 "smp_call_function_many",
120 struct scf_selector
{
121 unsigned long scfs_weight
;
125 static struct scf_selector scf_sel_array
[SCF_NPRIMS
];
126 static int scf_sel_array_len
;
127 static unsigned long scf_sel_totweight
;
129 // Communicate between caller and handler.
133 int scfc_cpu
; // -1 for not _single().
136 struct completion scfc_completion
;
137 struct llist_node scf_node
;
140 // Use to wait for all threads to start.
141 static atomic_t n_started
;
142 static atomic_t n_errs
;
143 static atomic_t n_mb_in_errs
;
144 static atomic_t n_mb_out_errs
;
145 static atomic_t n_alloc_errs
;
147 static char *bangstr
= "";
149 static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand
);
151 extern void resched_cpu(int cpu
); // An alternative IPI vector.
153 static void scf_add_to_free_list(struct scf_check
*scfcp
)
155 struct llist_head
*pool
;
160 cpu
= raw_smp_processor_id() % nthreads
;
161 pool
= &per_cpu(scf_free_pool
, cpu
);
162 llist_add(&scfcp
->scf_node
, pool
);
165 static void scf_cleanup_free_list(unsigned int cpu
)
167 struct llist_head
*pool
;
168 struct llist_node
*node
;
169 struct scf_check
*scfcp
;
171 pool
= &per_cpu(scf_free_pool
, cpu
);
172 node
= llist_del_all(pool
);
174 scfcp
= llist_entry(node
, struct scf_check
, scf_node
);
180 // Print torture statistics. Caller must ensure serialization.
181 static void scf_torture_stats_print(void)
185 long long invoked_count
= 0;
186 bool isdone
= READ_ONCE(scfdone
);
187 struct scf_statistics scfs
= {};
189 for_each_possible_cpu(cpu
)
190 invoked_count
+= data_race(per_cpu(scf_invoked_count
, cpu
));
191 for (i
= 0; i
< nthreads
; i
++) {
192 scfs
.n_resched
+= scf_stats_p
[i
].n_resched
;
193 scfs
.n_single
+= scf_stats_p
[i
].n_single
;
194 scfs
.n_single_ofl
+= scf_stats_p
[i
].n_single_ofl
;
195 scfs
.n_single_rpc
+= scf_stats_p
[i
].n_single_rpc
;
196 scfs
.n_single_wait
+= scf_stats_p
[i
].n_single_wait
;
197 scfs
.n_single_wait_ofl
+= scf_stats_p
[i
].n_single_wait_ofl
;
198 scfs
.n_many
+= scf_stats_p
[i
].n_many
;
199 scfs
.n_many_wait
+= scf_stats_p
[i
].n_many_wait
;
200 scfs
.n_all
+= scf_stats_p
[i
].n_all
;
201 scfs
.n_all_wait
+= scf_stats_p
[i
].n_all_wait
;
203 if (atomic_read(&n_errs
) || atomic_read(&n_mb_in_errs
) ||
204 atomic_read(&n_mb_out_errs
) ||
205 (!IS_ENABLED(CONFIG_KASAN
) && atomic_read(&n_alloc_errs
)))
207 pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
208 SCFTORT_FLAG
, bangstr
, isdone
? "VER" : "ver", invoked_count
, scfs
.n_resched
,
209 scfs
.n_single
, scfs
.n_single_wait
, scfs
.n_single_ofl
, scfs
.n_single_wait_ofl
,
210 scfs
.n_single_rpc
, scfs
.n_single_rpc_ofl
,
211 scfs
.n_many
, scfs
.n_many_wait
, scfs
.n_all
, scfs
.n_all_wait
);
212 torture_onoff_stats();
213 pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs
),
214 atomic_read(&n_mb_in_errs
), atomic_read(&n_mb_out_errs
),
215 atomic_read(&n_alloc_errs
));
218 // Periodically prints torture statistics, if periodic statistics printing
219 // was specified via the stat_interval module parameter.
221 scf_torture_stats(void *arg
)
223 VERBOSE_TOROUT_STRING("scf_torture_stats task started");
225 schedule_timeout_interruptible(stat_interval
* HZ
);
226 scf_torture_stats_print();
227 torture_shutdown_absorb("scf_torture_stats");
228 } while (!torture_must_stop());
229 torture_kthread_stopping("scf_torture_stats");
233 // Add a primitive to the scf_sel_array[].
234 static void scf_sel_add(unsigned long weight
, int prim
, bool wait
)
236 struct scf_selector
*scfsp
= &scf_sel_array
[scf_sel_array_len
];
238 // If no weight, if array would overflow, if computing three-place
239 // percentages would overflow, or if the scf_prim_name[] array would
240 // overflow, don't bother. In the last three two cases, complain.
242 WARN_ON_ONCE(scf_sel_array_len
>= ARRAY_SIZE(scf_sel_array
)) ||
243 WARN_ON_ONCE(0 - 100000 * weight
<= 100000 * scf_sel_totweight
) ||
244 WARN_ON_ONCE(prim
>= ARRAY_SIZE(scf_prim_name
)))
246 scf_sel_totweight
+= weight
;
247 scfsp
->scfs_weight
= scf_sel_totweight
;
248 scfsp
->scfs_prim
= prim
;
249 scfsp
->scfs_wait
= wait
;
253 // Dump out weighting percentages for scf_prim_name[] array.
254 static void scf_sel_dump(void)
257 unsigned long oldw
= 0;
258 struct scf_selector
*scfsp
;
261 for (i
= 0; i
< scf_sel_array_len
; i
++) {
262 scfsp
= &scf_sel_array
[i
];
263 w
= (scfsp
->scfs_weight
- oldw
) * 100000 / scf_sel_totweight
;
264 pr_info("%s: %3lu.%03lu %s(%s)\n", __func__
, w
/ 1000, w
% 1000,
265 scf_prim_name
[scfsp
->scfs_prim
],
266 scfsp
->scfs_wait
? "wait" : "nowait");
267 oldw
= scfsp
->scfs_weight
;
271 // Randomly pick a primitive and wait/nowait, based on weightings.
272 static struct scf_selector
*scf_sel_rand(struct torture_random_state
*trsp
)
275 unsigned long w
= torture_random(trsp
) % (scf_sel_totweight
+ 1);
277 for (i
= 0; i
< scf_sel_array_len
; i
++)
278 if (scf_sel_array
[i
].scfs_weight
>= w
)
279 return &scf_sel_array
[i
];
281 return &scf_sel_array
[0];
284 // Update statistics and occasionally burn up mass quantities of CPU time,
285 // if told to do so via scftorture.longwait. Otherwise, occasionally burn
287 static void scf_handler(void *scfc_in
)
291 unsigned long r
= torture_random(this_cpu_ptr(&scf_torture_rand
));
292 struct scf_check
*scfcp
= scfc_in
;
295 WRITE_ONCE(scfcp
->scfc_out
, false); // For multiple receivers.
296 if (WARN_ON_ONCE(unlikely(!READ_ONCE(scfcp
->scfc_in
))))
297 atomic_inc(&n_mb_in_errs
);
299 this_cpu_inc(scf_invoked_count
);
310 udelay((r
& 0xff) + 1);
313 r
= r
% longwait
+ 1;
314 for (i
= 0; i
< r
; i
++) {
315 for (j
= 0; j
< 1000; j
++) {
321 if (unlikely(!scfcp
))
323 if (scfcp
->scfc_wait
) {
324 WRITE_ONCE(scfcp
->scfc_out
, true);
326 complete(&scfcp
->scfc_completion
);
328 scf_add_to_free_list(scfcp
);
332 // As above, but check for correct CPU.
333 static void scf_handler_1(void *scfc_in
)
335 struct scf_check
*scfcp
= scfc_in
;
337 if (likely(scfcp
) && WARN_ONCE(smp_processor_id() != scfcp
->scfc_cpu
, "%s: Wanted CPU %d got CPU %d\n", __func__
, scfcp
->scfc_cpu
, smp_processor_id())) {
343 // Randomly do an smp_call_function*() invocation.
344 static void scftorture_invoke_one(struct scf_statistics
*scfp
, struct torture_random_state
*trsp
)
346 bool allocfail
= false;
349 struct scf_check
*scfcp
= NULL
;
350 struct scf_selector
*scfsp
= scf_sel_rand(trsp
);
352 if (scfsp
->scfs_prim
== SCF_PRIM_SINGLE
|| scfsp
->scfs_wait
) {
353 scfcp
= kmalloc(sizeof(*scfcp
), GFP_ATOMIC
);
355 WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN
));
356 atomic_inc(&n_alloc_errs
);
359 scfcp
->scfc_cpu
= -1;
360 scfcp
->scfc_wait
= scfsp
->scfs_wait
;
361 scfcp
->scfc_out
= false;
362 scfcp
->scfc_rpc
= false;
365 if (use_cpus_read_lock
)
369 switch (scfsp
->scfs_prim
) {
370 case SCF_PRIM_RESCHED
:
371 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST
)) {
372 cpu
= torture_random(trsp
) % nr_cpu_ids
;
375 this_cpu_inc(scf_invoked_count
);
378 case SCF_PRIM_SINGLE
:
379 cpu
= torture_random(trsp
) % nr_cpu_ids
;
380 if (scfsp
->scfs_wait
)
381 scfp
->n_single_wait
++;
385 scfcp
->scfc_cpu
= cpu
;
386 barrier(); // Prevent race-reduction compiler optimizations.
387 scfcp
->scfc_in
= true;
389 ret
= smp_call_function_single(cpu
, scf_handler_1
, (void *)scfcp
, scfsp
->scfs_wait
);
391 if (scfsp
->scfs_wait
)
392 scfp
->n_single_wait_ofl
++;
394 scfp
->n_single_ofl
++;
395 scf_add_to_free_list(scfcp
);
399 case SCF_PRIM_SINGLE_RPC
:
402 cpu
= torture_random(trsp
) % nr_cpu_ids
;
403 scfp
->n_single_rpc
++;
404 scfcp
->scfc_cpu
= cpu
;
405 scfcp
->scfc_wait
= true;
406 init_completion(&scfcp
->scfc_completion
);
407 scfcp
->scfc_rpc
= true;
408 barrier(); // Prevent race-reduction compiler optimizations.
409 scfcp
->scfc_in
= true;
410 ret
= smp_call_function_single(cpu
, scf_handler_1
, (void *)scfcp
, 0);
412 if (use_cpus_read_lock
)
416 wait_for_completion(&scfcp
->scfc_completion
);
417 if (use_cpus_read_lock
)
422 scfp
->n_single_rpc_ofl
++;
423 scf_add_to_free_list(scfcp
);
428 if (scfsp
->scfs_wait
)
433 barrier(); // Prevent race-reduction compiler optimizations.
434 scfcp
->scfc_in
= true;
436 smp_call_function_many(cpu_online_mask
, scf_handler
, scfcp
, scfsp
->scfs_wait
);
439 if (scfsp
->scfs_wait
)
444 barrier(); // Prevent race-reduction compiler optimizations.
445 scfcp
->scfc_in
= true;
447 smp_call_function(scf_handler
, scfcp
, scfsp
->scfs_wait
);
452 scfcp
->scfc_out
= true;
454 if (scfcp
&& scfsp
->scfs_wait
) {
455 if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp
->scfs_prim
== SCF_PRIM_SINGLE
) &&
457 pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__
, scfsp
->scfs_prim
);
458 atomic_inc(&n_mb_out_errs
); // Leak rather than trash!
460 scf_add_to_free_list(scfcp
);
462 barrier(); // Prevent race-reduction compiler optimizations.
464 if (use_cpus_read_lock
)
469 schedule_timeout_idle((1 + longwait
) * HZ
); // Let no-wait handlers complete.
470 else if (!(torture_random(trsp
) & 0xfff))
471 schedule_timeout_uninterruptible(1);
474 // SCF test kthread. Repeatedly does calls to members of the
475 // smp_call_function() family of functions.
476 static int scftorture_invoker(void *arg
)
480 DEFINE_TORTURE_RANDOM(rand
);
481 struct scf_statistics
*scfp
= (struct scf_statistics
*)arg
;
482 bool was_offline
= false;
484 VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp
->cpu
);
485 cpu
= scfp
->cpu
% nr_cpu_ids
;
486 WARN_ON_ONCE(set_cpus_allowed_ptr(current
, cpumask_of(cpu
)));
487 set_user_nice(current
, MAX_NICE
);
489 schedule_timeout_interruptible(holdoff
* HZ
);
491 VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp
->cpu
, raw_smp_processor_id());
493 // Make sure that the CPU is affinitized appropriately during testing.
494 curcpu
= raw_smp_processor_id();
495 WARN_ONCE(curcpu
!= cpu
,
496 "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
497 __func__
, scfp
->cpu
, curcpu
, nr_cpu_ids
);
499 if (!atomic_dec_return(&n_started
))
500 while (atomic_read_acquire(&n_started
)) {
501 if (torture_must_stop()) {
502 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp
->cpu
);
505 schedule_timeout_uninterruptible(1);
508 VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp
->cpu
);
511 scf_cleanup_free_list(cpu
);
513 scftorture_invoke_one(scfp
, &rand
);
514 while (cpu_is_offline(cpu
) && !torture_must_stop()) {
515 schedule_timeout_interruptible(HZ
/ 5);
519 set_cpus_allowed_ptr(current
, cpumask_of(cpu
));
523 stutter_wait("scftorture_invoker");
524 } while (!torture_must_stop());
526 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp
->cpu
);
528 torture_kthread_stopping("scftorture_invoker");
533 scftorture_print_module_parms(const char *tag
)
535 pr_alert(SCFTORT_FLAG
536 "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag
,
537 verbose
, holdoff
, longwait
, nthreads
, onoff_holdoff
, onoff_interval
, shutdown
, stat_interval
, stutter
, use_cpus_read_lock
, weight_resched
, weight_single
, weight_single_rpc
, weight_single_wait
, weight_many
, weight_many_wait
, weight_all
, weight_all_wait
);
540 static void scf_cleanup_handler(void *unused
)
544 static void scf_torture_cleanup(void)
548 if (torture_cleanup_begin())
551 WRITE_ONCE(scfdone
, true);
552 if (nthreads
&& scf_stats_p
)
553 for (i
= 0; i
< nthreads
; i
++)
554 torture_stop_kthread("scftorture_invoker", scf_stats_p
[i
].task
);
557 smp_call_function(scf_cleanup_handler
, NULL
, 1);
558 torture_stop_kthread(scf_torture_stats
, scf_torture_stats_task
);
559 scf_torture_stats_print(); // -After- the stats thread is stopped!
560 kfree(scf_stats_p
); // -After- the last stats print has completed!
563 for (i
= 0; i
< nr_cpu_ids
; i
++)
564 scf_cleanup_free_list(i
);
566 if (atomic_read(&n_errs
) || atomic_read(&n_mb_in_errs
) || atomic_read(&n_mb_out_errs
))
567 scftorture_print_module_parms("End of test: FAILURE");
568 else if (torture_onoff_failures())
569 scftorture_print_module_parms("End of test: LOCK_HOTPLUG");
571 scftorture_print_module_parms("End of test: SUCCESS");
574 torture_cleanup_end();
577 static int __init
scf_torture_init(void)
581 unsigned long weight_resched1
= weight_resched
;
582 unsigned long weight_single1
= weight_single
;
583 unsigned long weight_single_rpc1
= weight_single_rpc
;
584 unsigned long weight_single_wait1
= weight_single_wait
;
585 unsigned long weight_many1
= weight_many
;
586 unsigned long weight_many_wait1
= weight_many_wait
;
587 unsigned long weight_all1
= weight_all
;
588 unsigned long weight_all_wait1
= weight_all_wait
;
590 if (!torture_init_begin(SCFTORT_STRING
, verbose
))
593 scftorture_print_module_parms("Start of test");
595 if (weight_resched
<= 0 &&
596 weight_single
<= 0 && weight_single_rpc
<= 0 && weight_single_wait
<= 0 &&
597 weight_many
<= 0 && weight_many_wait
<= 0 &&
598 weight_all
<= 0 && weight_all_wait
<= 0) {
599 weight_resched1
= weight_resched
== 0 ? 0 : 2 * nr_cpu_ids
;
600 weight_single1
= weight_single
== 0 ? 0 : 2 * nr_cpu_ids
;
601 weight_single_rpc1
= weight_single_rpc
== 0 ? 0 : 2 * nr_cpu_ids
;
602 weight_single_wait1
= weight_single_wait
== 0 ? 0 : 2 * nr_cpu_ids
;
603 weight_many1
= weight_many
== 0 ? 0 : 2;
604 weight_many_wait1
= weight_many_wait
== 0 ? 0 : 2;
605 weight_all1
= weight_all
== 0 ? 0 : 1;
606 weight_all_wait1
= weight_all_wait
== 0 ? 0 : 1;
608 if (weight_resched
== -1)
610 if (weight_single
== -1)
612 if (weight_single_rpc
== -1)
613 weight_single_rpc1
= 0;
614 if (weight_single_wait
== -1)
615 weight_single_wait1
= 0;
616 if (weight_many
== -1)
618 if (weight_many_wait
== -1)
619 weight_many_wait1
= 0;
620 if (weight_all
== -1)
622 if (weight_all_wait
== -1)
623 weight_all_wait1
= 0;
625 if (weight_resched1
== 0 && weight_single1
== 0 && weight_single_rpc1
== 0 &&
626 weight_single_wait1
== 0 && weight_many1
== 0 && weight_many_wait1
== 0 &&
627 weight_all1
== 0 && weight_all_wait1
== 0) {
628 SCFTORTOUT_ERRSTRING("all zero weights makes no sense");
632 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST
))
633 scf_sel_add(weight_resched1
, SCF_PRIM_RESCHED
, false);
634 else if (weight_resched1
)
635 SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored");
636 scf_sel_add(weight_single1
, SCF_PRIM_SINGLE
, false);
637 scf_sel_add(weight_single_rpc1
, SCF_PRIM_SINGLE_RPC
, true);
638 scf_sel_add(weight_single_wait1
, SCF_PRIM_SINGLE
, true);
639 scf_sel_add(weight_many1
, SCF_PRIM_MANY
, false);
640 scf_sel_add(weight_many_wait1
, SCF_PRIM_MANY
, true);
641 scf_sel_add(weight_all1
, SCF_PRIM_ALL
, false);
642 scf_sel_add(weight_all_wait1
, SCF_PRIM_ALL
, true);
645 if (onoff_interval
> 0) {
646 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
, onoff_interval
, NULL
);
647 if (torture_init_error(firsterr
))
650 if (shutdown_secs
> 0) {
651 firsterr
= torture_shutdown_init(shutdown_secs
, scf_torture_cleanup
);
652 if (torture_init_error(firsterr
))
656 firsterr
= torture_stutter_init(stutter
, stutter
);
657 if (torture_init_error(firsterr
))
661 // Worker tasks invoking smp_call_function().
663 nthreads
= num_online_cpus();
664 scf_stats_p
= kcalloc(nthreads
, sizeof(scf_stats_p
[0]), GFP_KERNEL
);
666 SCFTORTOUT_ERRSTRING("out of memory");
671 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads
);
673 atomic_set(&n_started
, nthreads
);
674 for (i
= 0; i
< nthreads
; i
++) {
675 scf_stats_p
[i
].cpu
= i
;
676 firsterr
= torture_create_kthread(scftorture_invoker
, (void *)&scf_stats_p
[i
],
677 scf_stats_p
[i
].task
);
678 if (torture_init_error(firsterr
))
681 if (stat_interval
> 0) {
682 firsterr
= torture_create_kthread(scf_torture_stats
, NULL
, scf_torture_stats_task
);
683 if (torture_init_error(firsterr
))
692 scf_torture_cleanup();
694 WARN_ON(!IS_MODULE(CONFIG_SCF_TORTURE_TEST
));
700 module_init(scf_torture_init
);
701 module_exit(scf_torture_cleanup
);