1 // SPDX-License-Identifier: GPL-2.0
5 * Builtin regression testing command: ever growing number of sanity tests
14 #include <sys/types.h>
25 #include <subcmd/parse-options.h>
26 #include <subcmd/run-command.h>
29 #include "util/rlimit.h"
30 #include "util/strbuf.h"
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <subcmd/exec-cmd.h>
34 #include <linux/zalloc.h>
36 #include "tests-scripts.h"
39 * Command line option to not fork the test running in the same process and
40 * making them easier to debug.
42 static bool dont_fork
;
43 /* Fork the tests in parallel and wait for their completion. */
44 static bool sequential
;
45 const char *dso_to_test
;
46 const char *test_objdump_path
= "objdump";
49 * List of architecture specific tests. Not a weak symbol as the array length is
50 * dependent on the initialization, as such GCC with LTO complains of
51 * conflicting definitions with a weak symbol.
53 #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
54 extern struct test_suite
*arch_tests
[];
56 static struct test_suite
*arch_tests
[] = {
61 static struct test_suite
*generic_tests
[] = {
62 &suite__vmlinux_matches_kallsyms
,
63 #ifdef HAVE_LIBTRACEEVENT
64 &suite__openat_syscall_event
,
65 &suite__openat_syscall_event_on_all_cpus
,
77 &suite__perf_evsel__roundtrip_name_test
,
78 #ifdef HAVE_LIBTRACEEVENT
79 &suite__perf_evsel__tp_sched_test
,
80 &suite__syscall_openat_tp_fields
,
85 &suite__bp_signal_overflow
,
86 &suite__bp_accounting
,
89 &suite__sw_clock_freq
,
91 &suite__sample_parsing
,
92 &suite__keep_tracking
,
93 &suite__parse_no_sample_id_all
,
95 &suite__mmap_thread_lookup
,
96 &suite__thread_maps_share
,
98 &suite__hists_cumulate
,
99 #ifdef HAVE_LIBTRACEEVENT
100 &suite__switch_tracking
,
102 &suite__fdarray__filter
,
103 &suite__fdarray__add
,
104 &suite__kmod_path__parse
,
106 &suite__session_topology
,
107 &suite__thread_map_synthesize
,
108 &suite__thread_map_remove
,
110 &suite__synthesize_stat_config
,
111 &suite__synthesize_stat
,
112 &suite__synthesize_stat_round
,
113 &suite__event_update
,
115 &suite__backward_ring_buffer
,
117 &suite__is_printable_array
,
118 &suite__bitmap_print
,
120 &suite__unit_number__scnprint
,
123 &suite__jit_write_elf
,
126 &suite__maps__merge_in
,
127 &suite__demangle_java
,
128 &suite__demangle_ocaml
,
129 &suite__parse_metric
,
130 &suite__pe_file_parsing
,
131 &suite__expand_cgroup_events
,
132 &suite__perf_time_to_tsc
,
135 &suite__event_groups
,
141 static struct test_workload
*workloads
[] = {
151 #define workloads__for_each(workload) \
152 for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
154 static int num_subtests(const struct test_suite
*t
)
162 while (t
->test_cases
[num
].name
)
168 static bool has_subtests(const struct test_suite
*t
)
170 return num_subtests(t
) > 1;
173 static const char *skip_reason(const struct test_suite
*t
, int subtest
)
178 return t
->test_cases
[subtest
>= 0 ? subtest
: 0].skip_reason
;
181 static const char *test_description(const struct test_suite
*t
, int subtest
)
183 if (t
->test_cases
&& subtest
>= 0)
184 return t
->test_cases
[subtest
].desc
;
189 static test_fnptr
test_function(const struct test_suite
*t
, int subtest
)
192 return t
->test_cases
[0].run_case
;
194 return t
->test_cases
[subtest
].run_case
;
197 static bool test_exclusive(const struct test_suite
*t
, int subtest
)
200 return t
->test_cases
[0].exclusive
;
202 return t
->test_cases
[subtest
].exclusive
;
205 static bool perf_test__matches(const char *desc
, int curr
, int argc
, const char *argv
[])
212 for (i
= 0; i
< argc
; ++i
) {
214 long nr
= strtoul(argv
[i
], &end
, 10);
222 if (strcasestr(desc
, argv
[i
]))
230 struct child_process process
;
231 struct test_suite
*test
;
236 static jmp_buf run_test_jmp_buf
;
238 static void child_test_sig_handler(int sig
)
240 siglongjmp(run_test_jmp_buf
, sig
);
243 static int run_test_child(struct child_process
*process
)
245 const int signals
[] = {
246 SIGABRT
, SIGBUS
, SIGFPE
, SIGILL
, SIGINT
, SIGPIPE
, SIGQUIT
, SIGSEGV
, SIGTERM
,
248 struct child_test
*child
= container_of(process
, struct child_test
, process
);
251 err
= sigsetjmp(run_test_jmp_buf
, 1);
253 fprintf(stderr
, "\n---- unexpected signal (%d) ----\n", err
);
254 err
= err
> 0 ? -err
: -1;
258 for (size_t i
= 0; i
< ARRAY_SIZE(signals
); i
++)
259 signal(signals
[i
], child_test_sig_handler
);
261 pr_debug("--- start ---\n");
262 pr_debug("test child forked, pid %d\n", getpid());
263 err
= test_function(child
->test
, child
->subtest
)(child
->test
, child
->subtest
);
264 pr_debug("---- end(%d) ----\n", err
);
268 for (size_t i
= 0; i
< ARRAY_SIZE(signals
); i
++)
269 signal(signals
[i
], SIG_DFL
);
273 #define TEST_RUNNING -3
275 static int print_test_result(struct test_suite
*t
, int i
, int subtest
, int result
, int width
,
278 if (has_subtests(t
)) {
279 int subw
= width
> 2 ? width
- 2 : width
;
281 pr_info("%3d.%1d: %-*s:", i
+ 1, subtest
+ 1, subw
, test_description(t
, subtest
));
283 pr_info("%3d: %-*s:", i
+ 1, width
, test_description(t
, subtest
));
287 color_fprintf(stderr
, PERF_COLOR_YELLOW
, " Running (%d active)\n", running
);
293 const char *reason
= skip_reason(t
, subtest
);
296 color_fprintf(stderr
, PERF_COLOR_YELLOW
, " Skip (%s)\n", reason
);
298 color_fprintf(stderr
, PERF_COLOR_YELLOW
, " Skip\n");
303 color_fprintf(stderr
, PERF_COLOR_RED
, " FAILED!\n");
310 static void finish_test(struct child_test
**child_tests
, int running_test
, int child_test_num
,
313 struct child_test
*child_test
= child_tests
[running_test
];
314 struct test_suite
*t
;
316 bool err_done
= false;
317 struct strbuf err_output
= STRBUF_INIT
;
318 int last_running
= -1;
321 if (child_test
== NULL
) {
322 /* Test wasn't started. */
325 t
= child_test
->test
;
326 i
= child_test
->test_num
;
327 subi
= child_test
->subtest
;
328 err
= child_test
->process
.err
;
330 * For test suites with subtests, display the suite name ahead of the
333 if (has_subtests(t
) && subi
== 0)
334 pr_info("%3d: %-*s:\n", i
+ 1, width
, test_description(t
, -1));
337 * Busy loop reading from the child's stdout/stderr that are set to be
338 * non-blocking until EOF.
341 fcntl(err
, F_SETFL
, O_NONBLOCK
);
344 pr_info("%3d.%1d: %s:\n", i
+ 1, subi
+ 1, test_description(t
, subi
));
346 pr_info("%3d: %s:\n", i
+ 1, test_description(t
, -1));
349 struct pollfd pfds
[1] = {
351 .events
= POLLIN
| POLLERR
| POLLHUP
| POLLNVAL
,
354 if (perf_use_color_default
) {
357 for (int y
= running_test
; y
< child_test_num
; y
++) {
358 if (child_tests
[y
] == NULL
)
360 if (check_if_command_finished(&child_tests
[y
]->process
) == 0)
363 if (running
!= last_running
) {
364 if (last_running
!= -1) {
366 * Erase "Running (.. active)" line
367 * printed before poll/sleep.
369 fprintf(debug_file(), PERF_COLOR_DELETE_LINE
);
371 print_test_result(t
, i
, subi
, TEST_RUNNING
, width
, running
);
372 last_running
= running
;
378 /* No child stderr to poll, sleep for 10ms for child to complete. */
381 /* Poll to avoid excessive spinning, timeout set for 100ms. */
382 poll(pfds
, ARRAY_SIZE(pfds
), /*timeout=*/100);
383 if (pfds
[0].revents
) {
387 len
= read(err
, buf
, sizeof(buf
) - 1);
392 strbuf_addstr(&err_output
, buf
);
397 err_done
= check_if_command_finished(&child_test
->process
);
399 if (perf_use_color_default
&& last_running
!= -1) {
400 /* Erase "Running (.. active)" line printed before poll/sleep. */
401 fprintf(debug_file(), PERF_COLOR_DELETE_LINE
);
403 /* Clean up child process. */
404 ret
= finish_command(&child_test
->process
);
405 if (verbose
> 1 || (verbose
== 1 && ret
== TEST_FAIL
))
406 fprintf(stderr
, "%s", err_output
.buf
);
408 strbuf_release(&err_output
);
409 print_test_result(t
, i
, subi
, ret
, width
, /*running=*/0);
412 zfree(&child_tests
[running_test
]);
415 static int start_test(struct test_suite
*test
, int i
, int subi
, struct child_test
**child
,
423 pr_debug("--- start ---\n");
424 err
= test_function(test
, subi
)(test
, subi
);
425 pr_debug("---- end ----\n");
426 print_test_result(test
, i
, subi
, err
, width
, /*running=*/0);
430 if (pass
== 1 && !sequential
&& test_exclusive(test
, subi
)) {
431 /* When parallel, skip exclusive tests on the first pass. */
434 if (pass
!= 1 && (sequential
|| !test_exclusive(test
, subi
))) {
435 /* Sequential and non-exclusive tests were run on the first pass. */
438 *child
= zalloc(sizeof(**child
));
442 (*child
)->test
= test
;
443 (*child
)->test_num
= i
;
444 (*child
)->subtest
= subi
;
445 (*child
)->process
.pid
= -1;
446 (*child
)->process
.no_stdin
= 1;
448 (*child
)->process
.no_stdout
= 1;
449 (*child
)->process
.no_stderr
= 1;
451 (*child
)->process
.stdout_to_stderr
= 1;
452 (*child
)->process
.out
= -1;
453 (*child
)->process
.err
= -1;
455 (*child
)->process
.no_exec_cmd
= run_test_child
;
456 if (sequential
|| pass
== 2) {
457 err
= start_command(&(*child
)->process
);
460 finish_test(child
, /*running_test=*/0, /*child_test_num=*/1, width
);
463 return start_command(&(*child
)->process
);
466 /* State outside of __cmd_test for the sake of the signal handler. */
468 static size_t num_tests
;
469 static struct child_test
**child_tests
;
470 static jmp_buf cmd_test_jmp_buf
;
472 static void cmd_test_sig_handler(int sig
)
474 siglongjmp(cmd_test_jmp_buf
, sig
);
477 static int __cmd_test(struct test_suite
**suites
, int argc
, const char *argv
[],
478 struct intlist
*skiplist
)
480 static int width
= 0;
483 for (struct test_suite
**t
= suites
; *t
; t
++) {
484 int len
= strlen(test_description(*t
, -1));
489 if (has_subtests(*t
)) {
490 for (int subi
= 0, subn
= num_subtests(*t
); subi
< subn
; subi
++) {
491 len
= strlen(test_description(*t
, subi
));
500 child_tests
= calloc(num_tests
, sizeof(*child_tests
));
504 err
= sigsetjmp(cmd_test_jmp_buf
, 1);
506 pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
508 for (size_t x
= 0; x
< num_tests
; x
++) {
509 struct child_test
*child_test
= child_tests
[x
];
514 pr_debug3("Killing %d pid %d\n",
515 child_test
->test_num
+ 1,
516 child_test
->process
.pid
);
517 kill(child_test
->process
.pid
, err
);
521 signal(SIGINT
, cmd_test_sig_handler
);
522 signal(SIGTERM
, cmd_test_sig_handler
);
525 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
526 * runs the exclusive tests sequentially. In other modes all tests are
529 for (int pass
= 1; pass
<= 2; pass
++) {
530 int child_test_num
= 0;
533 for (struct test_suite
**t
= suites
; *t
; t
++) {
536 if (!perf_test__matches(test_description(*t
, -1), curr
, argc
, argv
)) {
538 * Test suite shouldn't be run based on
539 * description. See if subtest should.
543 for (int subi
= 0, subn
= num_subtests(*t
); subi
< subn
; subi
++) {
544 if (perf_test__matches(test_description(*t
, subi
),
553 if (intlist__find(skiplist
, i
)) {
554 pr_info("%3d: %-*s:", curr
+ 1, width
, test_description(*t
, -1));
555 color_fprintf(stderr
, PERF_COLOR_YELLOW
, " Skip (user override)\n");
559 if (!has_subtests(*t
)) {
560 err
= start_test(*t
, curr
, -1, &child_tests
[child_test_num
++],
566 for (int subi
= 0, subn
= num_subtests(*t
); subi
< subn
; subi
++) {
567 if (!perf_test__matches(test_description(*t
, subi
),
571 err
= start_test(*t
, curr
, subi
, &child_tests
[child_test_num
++],
578 /* Parallel mode starts tests but doesn't finish them. Do that now. */
579 for (size_t x
= 0; x
< num_tests
; x
++)
580 finish_test(child_tests
, x
, num_tests
, width
);
584 signal(SIGINT
, SIG_DFL
);
585 signal(SIGTERM
, SIG_DFL
);
587 pr_err("Internal test harness failure. Completing any started tests:\n:");
588 for (size_t x
= 0; x
< num_tests
; x
++)
589 finish_test(child_tests
, x
, num_tests
, width
);
595 static int perf_test__list(struct test_suite
**suites
, int argc
, const char **argv
)
599 for (struct test_suite
**t
= suites
; *t
; t
++) {
602 if (!perf_test__matches(test_description(*t
, -1), curr
, argc
, argv
))
605 pr_info("%3d: %s\n", i
, test_description(*t
, -1));
607 if (has_subtests(*t
)) {
608 int subn
= num_subtests(*t
);
611 for (subi
= 0; subi
< subn
; subi
++)
612 pr_info("%3d:%1d: %s\n", i
, subi
+ 1,
613 test_description(*t
, subi
));
619 static int workloads__fprintf_list(FILE *fp
)
621 struct test_workload
*twl
;
624 workloads__for_each(twl
)
625 printed
+= fprintf(fp
, "%s\n", twl
->name
);
630 static int run_workload(const char *work
, int argc
, const char **argv
)
632 struct test_workload
*twl
;
634 workloads__for_each(twl
) {
635 if (!strcmp(twl
->name
, work
))
636 return twl
->func(argc
, argv
);
639 pr_info("No workload found: %s\n", work
);
643 static int perf_test__config(const char *var
, const char *value
,
644 void *data __maybe_unused
)
646 if (!strcmp(var
, "annotate.objdump"))
647 test_objdump_path
= value
;
652 static struct test_suite
**build_suites(void)
655 * TODO: suites is static to avoid needing to clean up the scripts tests
656 * for leak sanitizer.
658 static struct test_suite
**suites
[] = {
663 struct test_suite
**result
;
664 struct test_suite
*t
;
665 size_t n
= 0, num_suites
= 0;
667 if (suites
[2] == NULL
)
668 suites
[2] = create_script_test_suites();
670 #define for_each_test(t) \
671 for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0) \
672 while ((t = suites[i][j++]) != NULL)
677 result
= calloc(num_suites
+ 1, sizeof(struct test_suite
*));
679 for (int pass
= 1; pass
<= 2; pass
++) {
681 bool exclusive
= false;
683 if (!has_subtests(t
)) {
684 exclusive
= test_exclusive(t
, -1);
686 for (int subi
= 0, subn
= num_subtests(t
); subi
< subn
; subi
++) {
687 if (test_exclusive(t
, subi
)) {
693 if ((!exclusive
&& pass
== 1) || (exclusive
&& pass
== 2))
701 int cmd_test(int argc
, const char **argv
)
703 const char *test_usage
[] = {
704 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
707 const char *skip
= NULL
;
708 const char *workload
= NULL
;
709 bool list_workloads
= false;
710 const struct option test_options
[] = {
711 OPT_STRING('s', "skip", &skip
, "tests", "tests to skip"),
712 OPT_INCR('v', "verbose", &verbose
,
713 "be more verbose (show symbol address, etc)"),
714 OPT_BOOLEAN('F', "dont-fork", &dont_fork
,
715 "Do not fork for testcase"),
716 OPT_BOOLEAN('S', "sequential", &sequential
,
717 "Run the tests one after another rather than in parallel"),
718 OPT_STRING('w', "workload", &workload
, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
719 OPT_BOOLEAN(0, "list-workloads", &list_workloads
, "List the available builtin workloads to use with -w/--workload"),
720 OPT_STRING(0, "dso", &dso_to_test
, "dso", "dso to test"),
721 OPT_STRING(0, "objdump", &test_objdump_path
, "path",
722 "objdump binary to use for disassembly and annotations"),
725 const char * const test_subcommands
[] = { "list", NULL
};
726 struct intlist
*skiplist
= NULL
;
727 int ret
= hists__init();
728 struct test_suite
**suites
;
733 perf_config(perf_test__config
, NULL
);
735 /* Unbuffered output */
736 setvbuf(stdout
, NULL
, _IONBF
, 0);
738 argc
= parse_options_subcommand(argc
, argv
, test_options
, test_subcommands
, test_usage
, 0);
739 if (argc
>= 1 && !strcmp(argv
[0], "list")) {
740 suites
= build_suites();
741 ret
= perf_test__list(suites
, argc
- 1, argv
+ 1);
747 return run_workload(workload
, argc
, argv
);
749 if (list_workloads
) {
750 workloads__fprintf_list(stdout
);
757 symbol_conf
.priv_size
= sizeof(int);
758 symbol_conf
.try_vmlinux_path
= true;
761 if (symbol__init(NULL
) < 0)
765 skiplist
= intlist__new(skip
);
767 * Tests that create BPF maps, for instance, need more than the 64K
770 rlimit__bump_memlock();
772 suites
= build_suites();
773 ret
= __cmd_test(suites
, argc
, argv
, skiplist
);