8 #include <util/bpf-loader.h>
9 #include <util/evlist.h>
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/kernel.h>
13 #include <api/fs/fs.h>
19 #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
21 #ifdef HAVE_LIBBPF_SUPPORT
23 static int epoll_pwait_loop(void)
27 /* Should fail NR_ITERS times */
28 for (i
= 0; i
< NR_ITERS
; i
++)
29 epoll_pwait(-(i
+ 1), NULL
, 0, 0, NULL
);
33 #ifdef HAVE_BPF_PROLOGUE
35 static int llseek_loop(void)
39 fds
[0] = open("/dev/null", O_RDONLY
);
40 fds
[1] = open("/dev/null", O_RDWR
);
42 if (fds
[0] < 0 || fds
[1] < 0)
45 for (i
= 0; i
< NR_ITERS
; i
++) {
46 lseek(fds
[i
% 2], i
, (i
/ 2) % 2 ? SEEK_CUR
: SEEK_SET
);
47 lseek(fds
[(i
+ 1) % 2], i
, (i
/ 2) % 2 ? SEEK_CUR
: SEEK_SET
);
57 enum test_llvm__testcase prog_id
;
60 const char *msg_compile_fail
;
61 const char *msg_load_fail
;
62 int (*target_func
)(void);
65 } bpf_testcase_table
[] = {
67 .prog_id
= LLVM_TESTCASE_BASE
,
68 .desc
= "Basic BPF filtering",
69 .name
= "[basic_bpf_test]",
70 .msg_compile_fail
= "fix 'perf test LLVM' first",
71 .msg_load_fail
= "load bpf object failed",
72 .target_func
= &epoll_pwait_loop
,
73 .expect_result
= (NR_ITERS
+ 1) / 2,
76 .prog_id
= LLVM_TESTCASE_BASE
,
77 .desc
= "BPF pinning",
78 .name
= "[bpf_pinning]",
79 .msg_compile_fail
= "fix kbuild first",
80 .msg_load_fail
= "check your vmlinux setting?",
81 .target_func
= &epoll_pwait_loop
,
82 .expect_result
= (NR_ITERS
+ 1) / 2,
85 #ifdef HAVE_BPF_PROLOGUE
87 .prog_id
= LLVM_TESTCASE_BPF_PROLOGUE
,
88 .desc
= "BPF prologue generation",
89 .name
= "[bpf_prologue_test]",
90 .msg_compile_fail
= "fix kbuild first",
91 .msg_load_fail
= "check your vmlinux setting?",
92 .target_func
= &llseek_loop
,
93 .expect_result
= (NR_ITERS
+ 1) / 4,
97 .prog_id
= LLVM_TESTCASE_BPF_RELOCATION
,
98 .desc
= "BPF relocation checker",
99 .name
= "[bpf_relocation_test]",
100 .msg_compile_fail
= "fix 'perf test LLVM' first",
101 .msg_load_fail
= "libbpf error when dealing with relocation",
105 static int do_test(struct bpf_object
*obj
, int (*func
)(void),
108 struct record_opts opts
= {
115 .default_interval
= 1,
119 char sbuf
[STRERR_BUFSIZE
];
120 struct perf_evlist
*evlist
;
121 int i
, ret
= TEST_FAIL
, err
= 0, count
= 0;
123 struct parse_events_state parse_state
;
124 struct parse_events_error parse_error
;
126 bzero(&parse_error
, sizeof(parse_error
));
127 bzero(&parse_state
, sizeof(parse_state
));
128 parse_state
.error
= &parse_error
;
129 INIT_LIST_HEAD(&parse_state
.list
);
131 err
= parse_events_load_bpf_obj(&parse_state
, &parse_state
.list
, obj
, NULL
);
132 if (err
|| list_empty(&parse_state
.list
)) {
133 pr_debug("Failed to add events selected by BPF\n");
137 snprintf(pid
, sizeof(pid
), "%d", getpid());
138 pid
[sizeof(pid
) - 1] = '\0';
139 opts
.target
.tid
= opts
.target
.pid
= pid
;
141 /* Instead of perf_evlist__new_default, don't add default events */
142 evlist
= perf_evlist__new();
144 pr_debug("Not enough memory to create evlist\n");
148 err
= perf_evlist__create_maps(evlist
, &opts
.target
);
150 pr_debug("Not enough memory to create thread/cpu maps\n");
151 goto out_delete_evlist
;
154 perf_evlist__splice_list_tail(evlist
, &parse_state
.list
);
155 evlist
->nr_groups
= parse_state
.nr_groups
;
157 perf_evlist__config(evlist
, &opts
, NULL
);
159 err
= perf_evlist__open(evlist
);
161 pr_debug("perf_evlist__open: %s\n",
162 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
163 goto out_delete_evlist
;
166 err
= perf_evlist__mmap(evlist
, opts
.mmap_pages
);
168 pr_debug("perf_evlist__mmap: %s\n",
169 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
170 goto out_delete_evlist
;
173 perf_evlist__enable(evlist
);
175 perf_evlist__disable(evlist
);
177 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
178 union perf_event
*event
;
180 while ((event
= perf_evlist__mmap_read(evlist
, i
)) != NULL
) {
181 const u32 type
= event
->header
.type
;
183 if (type
== PERF_RECORD_SAMPLE
)
188 if (count
!= expect
) {
189 pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect
, count
);
190 goto out_delete_evlist
;
196 perf_evlist__delete(evlist
);
200 static struct bpf_object
*
201 prepare_bpf(void *obj_buf
, size_t obj_buf_sz
, const char *name
)
203 struct bpf_object
*obj
;
205 obj
= bpf__prepare_load_buffer(obj_buf
, obj_buf_sz
, name
);
207 pr_debug("Compile BPF program failed.\n");
213 static int __test__bpf(int idx
)
218 struct bpf_object
*obj
;
220 ret
= test_llvm__fetch_bpf_obj(&obj_buf
, &obj_buf_sz
,
221 bpf_testcase_table
[idx
].prog_id
,
223 if (ret
!= TEST_OK
|| !obj_buf
|| !obj_buf_sz
) {
224 pr_debug("Unable to get BPF object, %s\n",
225 bpf_testcase_table
[idx
].msg_compile_fail
);
232 obj
= prepare_bpf(obj_buf
, obj_buf_sz
,
233 bpf_testcase_table
[idx
].name
);
234 if ((!!bpf_testcase_table
[idx
].target_func
) != (!!obj
)) {
236 pr_debug("Fail to load BPF object: %s\n",
237 bpf_testcase_table
[idx
].msg_load_fail
);
239 pr_debug("Success unexpectedly: %s\n",
240 bpf_testcase_table
[idx
].msg_load_fail
);
247 bpf_testcase_table
[idx
].target_func
,
248 bpf_testcase_table
[idx
].expect_result
);
251 if (bpf_testcase_table
[idx
].pin
) {
254 if (!bpf_fs__mount()) {
255 pr_debug("BPF filesystem not mounted\n");
259 err
= mkdir(PERF_TEST_BPF_PATH
, 0777);
260 if (err
&& errno
!= EEXIST
) {
261 pr_debug("Failed to make perf_test dir: %s\n",
266 if (bpf_object__pin(obj
, PERF_TEST_BPF_PATH
))
268 if (rm_rf(PERF_TEST_BPF_PATH
))
278 int test__bpf_subtest_get_nr(void)
280 return (int)ARRAY_SIZE(bpf_testcase_table
);
283 const char *test__bpf_subtest_get_desc(int i
)
285 if (i
< 0 || i
>= (int)ARRAY_SIZE(bpf_testcase_table
))
287 return bpf_testcase_table
[i
].desc
;
290 static int check_env(void)
293 unsigned int kver_int
;
294 char license
[] = "GPL";
296 struct bpf_insn insns
[] = {
297 BPF_MOV64_IMM(BPF_REG_0
, 1),
301 err
= fetch_kernel_version(&kver_int
, NULL
, 0);
303 pr_debug("Unable to get kernel version\n");
307 err
= bpf_load_program(BPF_PROG_TYPE_KPROBE
, insns
,
308 sizeof(insns
) / sizeof(insns
[0]),
309 license
, kver_int
, NULL
, 0);
311 pr_err("Missing basic BPF support, skip this test: %s\n",
320 int test__bpf(struct test
*test __maybe_unused
, int i
)
324 if (i
< 0 || i
>= (int)ARRAY_SIZE(bpf_testcase_table
))
327 if (geteuid() != 0) {
328 pr_debug("Only root can run BPF test\n");
335 err
= __test__bpf(i
);
340 int test__bpf_subtest_get_nr(void)
345 const char *test__bpf_subtest_get_desc(int i __maybe_unused
)
350 int test__bpf(struct test
*test __maybe_unused
, int i __maybe_unused
)
352 pr_debug("Skip BPF test because BPF support is not compiled\n");