1 // SPDX-License-Identifier: GPL-2.0+
3 // Torture test for smp_call_function() and friends.
5 // Copyright (C) Facebook, 2020.
7 // Author: Paul E. McKenney <paulmck@kernel.org>
9 #define pr_fmt(fmt) fmt
11 #include <linux/atomic.h>
12 #include <linux/bitops.h>
13 #include <linux/completion.h>
14 #include <linux/cpu.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/kthread.h>
20 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/percpu.h>
26 #include <linux/rcupdate.h>
27 #include <linux/rcupdate_trace.h>
28 #include <linux/reboot.h>
29 #include <linux/sched.h>
30 #include <linux/spinlock.h>
31 #include <linux/smp.h>
32 #include <linux/stat.h>
33 #include <linux/srcu.h>
34 #include <linux/slab.h>
35 #include <linux/torture.h>
36 #include <linux/types.h>
38 #define SCFTORT_STRING "scftorture"
39 #define SCFTORT_FLAG SCFTORT_STRING ": "
41 #define VERBOSE_SCFTORTOUT(s, x...) \
42 do { if (verbose) pr_alert(SCFTORT_FLAG s "\n", ## x); } while (0)
44 #define SCFTORTOUT_ERRSTRING(s, x...) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x)
46 MODULE_DESCRIPTION("Torture tests on the smp_call_function() family of primitives");
47 MODULE_LICENSE("GPL");
48 MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
50 // Wait until there are multiple CPUs before starting test.
51 torture_param(int, holdoff
, IS_BUILTIN(CONFIG_SCF_TORTURE_TEST
) ? 10 : 0,
52 "Holdoff time before test start (s)");
53 torture_param(int, longwait
, 0, "Include ridiculously long waits? (seconds)");
54 torture_param(int, nthreads
, -1, "# threads, defaults to -1 for all CPUs.");
55 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
56 torture_param(int, onoff_interval
, 0, "Time between CPU hotplugs (s), 0=disable");
57 torture_param(int, shutdown_secs
, 0, "Shutdown time (ms), <= zero to disable.");
58 torture_param(int, stat_interval
, 60, "Number of seconds between stats printk()s.");
59 torture_param(int, stutter
, 5, "Number of jiffies to run/halt test, 0=disable");
60 torture_param(bool, use_cpus_read_lock
, 0, "Use cpus_read_lock() to exclude CPU hotplug.");
61 torture_param(int, verbose
, 0, "Enable verbose debugging printk()s");
62 torture_param(int, weight_resched
, -1, "Testing weight for resched_cpu() operations.");
63 torture_param(int, weight_single
, -1, "Testing weight for single-CPU no-wait operations.");
64 torture_param(int, weight_single_rpc
, -1, "Testing weight for single-CPU RPC operations.");
65 torture_param(int, weight_single_wait
, -1, "Testing weight for single-CPU operations.");
66 torture_param(int, weight_many
, -1, "Testing weight for multi-CPU no-wait operations.");
67 torture_param(int, weight_many_wait
, -1, "Testing weight for multi-CPU operations.");
68 torture_param(int, weight_all
, -1, "Testing weight for all-CPU no-wait operations.");
69 torture_param(int, weight_all_wait
, -1, "Testing weight for all-CPU operations.");
71 static char *torture_type
= "";
74 # define SCFTORT_SHUTDOWN 0
76 # define SCFTORT_SHUTDOWN 1
79 torture_param(bool, shutdown
, SCFTORT_SHUTDOWN
, "Shutdown at end of torture test.");
81 struct scf_statistics
{
82 struct task_struct
*task
;
86 long long n_single_ofl
;
87 long long n_single_rpc
;
88 long long n_single_rpc_ofl
;
89 long long n_single_wait
;
90 long long n_single_wait_ofl
;
92 long long n_many_wait
;
97 static struct scf_statistics
*scf_stats_p
;
98 static struct task_struct
*scf_torture_stats_task
;
99 static DEFINE_PER_CPU(long long, scf_invoked_count
);
101 // Data for random primitive selection
102 #define SCF_PRIM_RESCHED 0
103 #define SCF_PRIM_SINGLE 1
104 #define SCF_PRIM_SINGLE_RPC 2
105 #define SCF_PRIM_MANY 3
106 #define SCF_PRIM_ALL 4
107 #define SCF_NPRIMS 8 // Need wait and no-wait versions of each,
108 // except for SCF_PRIM_RESCHED and
109 // SCF_PRIM_SINGLE_RPC.
111 static char *scf_prim_name
[] = {
113 "smp_call_function_single",
114 "smp_call_function_single_rpc",
115 "smp_call_function_many",
119 struct scf_selector
{
120 unsigned long scfs_weight
;
124 static struct scf_selector scf_sel_array
[SCF_NPRIMS
];
125 static int scf_sel_array_len
;
126 static unsigned long scf_sel_totweight
;
128 // Communicate between caller and handler.
132 int scfc_cpu
; // -1 for not _single().
135 struct completion scfc_completion
;
138 // Use to wait for all threads to start.
139 static atomic_t n_started
;
140 static atomic_t n_errs
;
141 static atomic_t n_mb_in_errs
;
142 static atomic_t n_mb_out_errs
;
143 static atomic_t n_alloc_errs
;
145 static char *bangstr
= "";
147 static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand
);
149 extern void resched_cpu(int cpu
); // An alternative IPI vector.
151 // Print torture statistics. Caller must ensure serialization.
152 static void scf_torture_stats_print(void)
156 long long invoked_count
= 0;
157 bool isdone
= READ_ONCE(scfdone
);
158 struct scf_statistics scfs
= {};
160 for_each_possible_cpu(cpu
)
161 invoked_count
+= data_race(per_cpu(scf_invoked_count
, cpu
));
162 for (i
= 0; i
< nthreads
; i
++) {
163 scfs
.n_resched
+= scf_stats_p
[i
].n_resched
;
164 scfs
.n_single
+= scf_stats_p
[i
].n_single
;
165 scfs
.n_single_ofl
+= scf_stats_p
[i
].n_single_ofl
;
166 scfs
.n_single_rpc
+= scf_stats_p
[i
].n_single_rpc
;
167 scfs
.n_single_wait
+= scf_stats_p
[i
].n_single_wait
;
168 scfs
.n_single_wait_ofl
+= scf_stats_p
[i
].n_single_wait_ofl
;
169 scfs
.n_many
+= scf_stats_p
[i
].n_many
;
170 scfs
.n_many_wait
+= scf_stats_p
[i
].n_many_wait
;
171 scfs
.n_all
+= scf_stats_p
[i
].n_all
;
172 scfs
.n_all_wait
+= scf_stats_p
[i
].n_all_wait
;
174 if (atomic_read(&n_errs
) || atomic_read(&n_mb_in_errs
) ||
175 atomic_read(&n_mb_out_errs
) ||
176 (!IS_ENABLED(CONFIG_KASAN
) && atomic_read(&n_alloc_errs
)))
178 pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
179 SCFTORT_FLAG
, bangstr
, isdone
? "VER" : "ver", invoked_count
, scfs
.n_resched
,
180 scfs
.n_single
, scfs
.n_single_wait
, scfs
.n_single_ofl
, scfs
.n_single_wait_ofl
,
181 scfs
.n_single_rpc
, scfs
.n_single_rpc_ofl
,
182 scfs
.n_many
, scfs
.n_many_wait
, scfs
.n_all
, scfs
.n_all_wait
);
183 torture_onoff_stats();
184 pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs
),
185 atomic_read(&n_mb_in_errs
), atomic_read(&n_mb_out_errs
),
186 atomic_read(&n_alloc_errs
));
189 // Periodically prints torture statistics, if periodic statistics printing
190 // was specified via the stat_interval module parameter.
192 scf_torture_stats(void *arg
)
194 VERBOSE_TOROUT_STRING("scf_torture_stats task started");
196 schedule_timeout_interruptible(stat_interval
* HZ
);
197 scf_torture_stats_print();
198 torture_shutdown_absorb("scf_torture_stats");
199 } while (!torture_must_stop());
200 torture_kthread_stopping("scf_torture_stats");
204 // Add a primitive to the scf_sel_array[].
205 static void scf_sel_add(unsigned long weight
, int prim
, bool wait
)
207 struct scf_selector
*scfsp
= &scf_sel_array
[scf_sel_array_len
];
209 // If no weight, if array would overflow, if computing three-place
210 // percentages would overflow, or if the scf_prim_name[] array would
211 // overflow, don't bother. In the last three two cases, complain.
213 WARN_ON_ONCE(scf_sel_array_len
>= ARRAY_SIZE(scf_sel_array
)) ||
214 WARN_ON_ONCE(0 - 100000 * weight
<= 100000 * scf_sel_totweight
) ||
215 WARN_ON_ONCE(prim
>= ARRAY_SIZE(scf_prim_name
)))
217 scf_sel_totweight
+= weight
;
218 scfsp
->scfs_weight
= scf_sel_totweight
;
219 scfsp
->scfs_prim
= prim
;
220 scfsp
->scfs_wait
= wait
;
224 // Dump out weighting percentages for scf_prim_name[] array.
225 static void scf_sel_dump(void)
228 unsigned long oldw
= 0;
229 struct scf_selector
*scfsp
;
232 for (i
= 0; i
< scf_sel_array_len
; i
++) {
233 scfsp
= &scf_sel_array
[i
];
234 w
= (scfsp
->scfs_weight
- oldw
) * 100000 / scf_sel_totweight
;
235 pr_info("%s: %3lu.%03lu %s(%s)\n", __func__
, w
/ 1000, w
% 1000,
236 scf_prim_name
[scfsp
->scfs_prim
],
237 scfsp
->scfs_wait
? "wait" : "nowait");
238 oldw
= scfsp
->scfs_weight
;
242 // Randomly pick a primitive and wait/nowait, based on weightings.
243 static struct scf_selector
*scf_sel_rand(struct torture_random_state
*trsp
)
246 unsigned long w
= torture_random(trsp
) % (scf_sel_totweight
+ 1);
248 for (i
= 0; i
< scf_sel_array_len
; i
++)
249 if (scf_sel_array
[i
].scfs_weight
>= w
)
250 return &scf_sel_array
[i
];
252 return &scf_sel_array
[0];
255 // Update statistics and occasionally burn up mass quantities of CPU time,
256 // if told to do so via scftorture.longwait. Otherwise, occasionally burn
258 static void scf_handler(void *scfc_in
)
262 unsigned long r
= torture_random(this_cpu_ptr(&scf_torture_rand
));
263 struct scf_check
*scfcp
= scfc_in
;
266 WRITE_ONCE(scfcp
->scfc_out
, false); // For multiple receivers.
267 if (WARN_ON_ONCE(unlikely(!READ_ONCE(scfcp
->scfc_in
))))
268 atomic_inc(&n_mb_in_errs
);
270 this_cpu_inc(scf_invoked_count
);
281 udelay((r
& 0xff) + 1);
284 r
= r
% longwait
+ 1;
285 for (i
= 0; i
< r
; i
++) {
286 for (j
= 0; j
< 1000; j
++) {
292 if (unlikely(!scfcp
))
294 if (scfcp
->scfc_wait
) {
295 WRITE_ONCE(scfcp
->scfc_out
, true);
297 complete(&scfcp
->scfc_completion
);
303 // As above, but check for correct CPU.
304 static void scf_handler_1(void *scfc_in
)
306 struct scf_check
*scfcp
= scfc_in
;
308 if (likely(scfcp
) && WARN_ONCE(smp_processor_id() != scfcp
->scfc_cpu
, "%s: Wanted CPU %d got CPU %d\n", __func__
, scfcp
->scfc_cpu
, smp_processor_id())) {
314 // Randomly do an smp_call_function*() invocation.
315 static void scftorture_invoke_one(struct scf_statistics
*scfp
, struct torture_random_state
*trsp
)
317 bool allocfail
= false;
320 struct scf_check
*scfcp
= NULL
;
321 struct scf_selector
*scfsp
= scf_sel_rand(trsp
);
323 if (use_cpus_read_lock
)
327 if (scfsp
->scfs_prim
== SCF_PRIM_SINGLE
|| scfsp
->scfs_wait
) {
328 scfcp
= kmalloc(sizeof(*scfcp
), GFP_ATOMIC
);
330 WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN
));
331 atomic_inc(&n_alloc_errs
);
334 scfcp
->scfc_cpu
= -1;
335 scfcp
->scfc_wait
= scfsp
->scfs_wait
;
336 scfcp
->scfc_out
= false;
337 scfcp
->scfc_rpc
= false;
340 switch (scfsp
->scfs_prim
) {
341 case SCF_PRIM_RESCHED
:
342 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST
)) {
343 cpu
= torture_random(trsp
) % nr_cpu_ids
;
346 this_cpu_inc(scf_invoked_count
);
349 case SCF_PRIM_SINGLE
:
350 cpu
= torture_random(trsp
) % nr_cpu_ids
;
351 if (scfsp
->scfs_wait
)
352 scfp
->n_single_wait
++;
356 scfcp
->scfc_cpu
= cpu
;
357 barrier(); // Prevent race-reduction compiler optimizations.
358 scfcp
->scfc_in
= true;
360 ret
= smp_call_function_single(cpu
, scf_handler_1
, (void *)scfcp
, scfsp
->scfs_wait
);
362 if (scfsp
->scfs_wait
)
363 scfp
->n_single_wait_ofl
++;
365 scfp
->n_single_ofl
++;
370 case SCF_PRIM_SINGLE_RPC
:
373 cpu
= torture_random(trsp
) % nr_cpu_ids
;
374 scfp
->n_single_rpc
++;
375 scfcp
->scfc_cpu
= cpu
;
376 scfcp
->scfc_wait
= true;
377 init_completion(&scfcp
->scfc_completion
);
378 scfcp
->scfc_rpc
= true;
379 barrier(); // Prevent race-reduction compiler optimizations.
380 scfcp
->scfc_in
= true;
381 ret
= smp_call_function_single(cpu
, scf_handler_1
, (void *)scfcp
, 0);
383 if (use_cpus_read_lock
)
387 wait_for_completion(&scfcp
->scfc_completion
);
388 if (use_cpus_read_lock
)
393 scfp
->n_single_rpc_ofl
++;
399 if (scfsp
->scfs_wait
)
404 barrier(); // Prevent race-reduction compiler optimizations.
405 scfcp
->scfc_in
= true;
407 smp_call_function_many(cpu_online_mask
, scf_handler
, scfcp
, scfsp
->scfs_wait
);
410 if (scfsp
->scfs_wait
)
415 barrier(); // Prevent race-reduction compiler optimizations.
416 scfcp
->scfc_in
= true;
418 smp_call_function(scf_handler
, scfcp
, scfsp
->scfs_wait
);
423 scfcp
->scfc_out
= true;
425 if (scfcp
&& scfsp
->scfs_wait
) {
426 if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp
->scfs_prim
== SCF_PRIM_SINGLE
) &&
428 pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__
, scfsp
->scfs_prim
);
429 atomic_inc(&n_mb_out_errs
); // Leak rather than trash!
433 barrier(); // Prevent race-reduction compiler optimizations.
435 if (use_cpus_read_lock
)
440 schedule_timeout_idle((1 + longwait
) * HZ
); // Let no-wait handlers complete.
441 else if (!(torture_random(trsp
) & 0xfff))
442 schedule_timeout_uninterruptible(1);
445 // SCF test kthread. Repeatedly does calls to members of the
446 // smp_call_function() family of functions.
447 static int scftorture_invoker(void *arg
)
451 DEFINE_TORTURE_RANDOM(rand
);
452 struct scf_statistics
*scfp
= (struct scf_statistics
*)arg
;
453 bool was_offline
= false;
455 VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp
->cpu
);
456 cpu
= scfp
->cpu
% nr_cpu_ids
;
457 WARN_ON_ONCE(set_cpus_allowed_ptr(current
, cpumask_of(cpu
)));
458 set_user_nice(current
, MAX_NICE
);
460 schedule_timeout_interruptible(holdoff
* HZ
);
462 VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp
->cpu
, raw_smp_processor_id());
464 // Make sure that the CPU is affinitized appropriately during testing.
465 curcpu
= raw_smp_processor_id();
466 WARN_ONCE(curcpu
!= scfp
->cpu
% nr_cpu_ids
,
467 "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
468 __func__
, scfp
->cpu
, curcpu
, nr_cpu_ids
);
470 if (!atomic_dec_return(&n_started
))
471 while (atomic_read_acquire(&n_started
)) {
472 if (torture_must_stop()) {
473 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp
->cpu
);
476 schedule_timeout_uninterruptible(1);
479 VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp
->cpu
);
482 scftorture_invoke_one(scfp
, &rand
);
483 while (cpu_is_offline(cpu
) && !torture_must_stop()) {
484 schedule_timeout_interruptible(HZ
/ 5);
488 set_cpus_allowed_ptr(current
, cpumask_of(cpu
));
492 stutter_wait("scftorture_invoker");
493 } while (!torture_must_stop());
495 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp
->cpu
);
497 torture_kthread_stopping("scftorture_invoker");
502 scftorture_print_module_parms(const char *tag
)
504 pr_alert(SCFTORT_FLAG
505 "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag
,
506 verbose
, holdoff
, longwait
, nthreads
, onoff_holdoff
, onoff_interval
, shutdown
, stat_interval
, stutter
, use_cpus_read_lock
, weight_resched
, weight_single
, weight_single_rpc
, weight_single_wait
, weight_many
, weight_many_wait
, weight_all
, weight_all_wait
);
509 static void scf_cleanup_handler(void *unused
)
513 static void scf_torture_cleanup(void)
517 if (torture_cleanup_begin())
520 WRITE_ONCE(scfdone
, true);
521 if (nthreads
&& scf_stats_p
)
522 for (i
= 0; i
< nthreads
; i
++)
523 torture_stop_kthread("scftorture_invoker", scf_stats_p
[i
].task
);
526 smp_call_function(scf_cleanup_handler
, NULL
, 0);
527 torture_stop_kthread(scf_torture_stats
, scf_torture_stats_task
);
528 scf_torture_stats_print(); // -After- the stats thread is stopped!
529 kfree(scf_stats_p
); // -After- the last stats print has completed!
532 if (atomic_read(&n_errs
) || atomic_read(&n_mb_in_errs
) || atomic_read(&n_mb_out_errs
))
533 scftorture_print_module_parms("End of test: FAILURE");
534 else if (torture_onoff_failures())
535 scftorture_print_module_parms("End of test: LOCK_HOTPLUG");
537 scftorture_print_module_parms("End of test: SUCCESS");
540 torture_cleanup_end();
543 static int __init
scf_torture_init(void)
547 unsigned long weight_resched1
= weight_resched
;
548 unsigned long weight_single1
= weight_single
;
549 unsigned long weight_single_rpc1
= weight_single_rpc
;
550 unsigned long weight_single_wait1
= weight_single_wait
;
551 unsigned long weight_many1
= weight_many
;
552 unsigned long weight_many_wait1
= weight_many_wait
;
553 unsigned long weight_all1
= weight_all
;
554 unsigned long weight_all_wait1
= weight_all_wait
;
556 if (!torture_init_begin(SCFTORT_STRING
, verbose
))
559 scftorture_print_module_parms("Start of test");
561 if (weight_resched
<= 0 &&
562 weight_single
<= 0 && weight_single_rpc
<= 0 && weight_single_wait
<= 0 &&
563 weight_many
<= 0 && weight_many_wait
<= 0 &&
564 weight_all
<= 0 && weight_all_wait
<= 0) {
565 weight_resched1
= weight_resched
== 0 ? 0 : 2 * nr_cpu_ids
;
566 weight_single1
= weight_single
== 0 ? 0 : 2 * nr_cpu_ids
;
567 weight_single_rpc1
= weight_single_rpc
== 0 ? 0 : 2 * nr_cpu_ids
;
568 weight_single_wait1
= weight_single_wait
== 0 ? 0 : 2 * nr_cpu_ids
;
569 weight_many1
= weight_many
== 0 ? 0 : 2;
570 weight_many_wait1
= weight_many_wait
== 0 ? 0 : 2;
571 weight_all1
= weight_all
== 0 ? 0 : 1;
572 weight_all_wait1
= weight_all_wait
== 0 ? 0 : 1;
574 if (weight_resched
== -1)
576 if (weight_single
== -1)
578 if (weight_single_rpc
== -1)
579 weight_single_rpc1
= 0;
580 if (weight_single_wait
== -1)
581 weight_single_wait1
= 0;
582 if (weight_many
== -1)
584 if (weight_many_wait
== -1)
585 weight_many_wait1
= 0;
586 if (weight_all
== -1)
588 if (weight_all_wait
== -1)
589 weight_all_wait1
= 0;
591 if (weight_resched1
== 0 && weight_single1
== 0 && weight_single_rpc1
== 0 &&
592 weight_single_wait1
== 0 && weight_many1
== 0 && weight_many_wait1
== 0 &&
593 weight_all1
== 0 && weight_all_wait1
== 0) {
594 SCFTORTOUT_ERRSTRING("all zero weights makes no sense");
598 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST
))
599 scf_sel_add(weight_resched1
, SCF_PRIM_RESCHED
, false);
600 else if (weight_resched1
)
601 SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored");
602 scf_sel_add(weight_single1
, SCF_PRIM_SINGLE
, false);
603 scf_sel_add(weight_single_rpc1
, SCF_PRIM_SINGLE_RPC
, true);
604 scf_sel_add(weight_single_wait1
, SCF_PRIM_SINGLE
, true);
605 scf_sel_add(weight_many1
, SCF_PRIM_MANY
, false);
606 scf_sel_add(weight_many_wait1
, SCF_PRIM_MANY
, true);
607 scf_sel_add(weight_all1
, SCF_PRIM_ALL
, false);
608 scf_sel_add(weight_all_wait1
, SCF_PRIM_ALL
, true);
611 if (onoff_interval
> 0) {
612 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
, onoff_interval
, NULL
);
613 if (torture_init_error(firsterr
))
616 if (shutdown_secs
> 0) {
617 firsterr
= torture_shutdown_init(shutdown_secs
, scf_torture_cleanup
);
618 if (torture_init_error(firsterr
))
622 firsterr
= torture_stutter_init(stutter
, stutter
);
623 if (torture_init_error(firsterr
))
627 // Worker tasks invoking smp_call_function().
629 nthreads
= num_online_cpus();
630 scf_stats_p
= kcalloc(nthreads
, sizeof(scf_stats_p
[0]), GFP_KERNEL
);
632 SCFTORTOUT_ERRSTRING("out of memory");
637 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads
);
639 atomic_set(&n_started
, nthreads
);
640 for (i
= 0; i
< nthreads
; i
++) {
641 scf_stats_p
[i
].cpu
= i
;
642 firsterr
= torture_create_kthread(scftorture_invoker
, (void *)&scf_stats_p
[i
],
643 scf_stats_p
[i
].task
);
644 if (torture_init_error(firsterr
))
647 if (stat_interval
> 0) {
648 firsterr
= torture_create_kthread(scf_torture_stats
, NULL
, scf_torture_stats_task
);
649 if (torture_init_error(firsterr
))
658 scf_torture_cleanup();
660 WARN_ON(!IS_MODULE(CONFIG_SCF_TORTURE_TEST
));
666 module_init(scf_torture_init
);
667 module_exit(scf_torture_cleanup
);