8 #include <util/bpf-loader.h>
9 #include <util/evlist.h>
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/kernel.h>
13 #include <api/fs/fs.h>
19 #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
21 #ifdef HAVE_LIBBPF_SUPPORT
23 static int epoll_pwait_loop(void)
27 /* Should fail NR_ITERS times */
28 for (i
= 0; i
< NR_ITERS
; i
++)
29 epoll_pwait(-(i
+ 1), NULL
, 0, 0, NULL
);
33 #ifdef HAVE_BPF_PROLOGUE
35 static int llseek_loop(void)
39 fds
[0] = open("/dev/null", O_RDONLY
);
40 fds
[1] = open("/dev/null", O_RDWR
);
42 if (fds
[0] < 0 || fds
[1] < 0)
45 for (i
= 0; i
< NR_ITERS
; i
++) {
46 lseek(fds
[i
% 2], i
, (i
/ 2) % 2 ? SEEK_CUR
: SEEK_SET
);
47 lseek(fds
[(i
+ 1) % 2], i
, (i
/ 2) % 2 ? SEEK_CUR
: SEEK_SET
);
57 enum test_llvm__testcase prog_id
;
60 const char *msg_compile_fail
;
61 const char *msg_load_fail
;
62 int (*target_func
)(void);
65 } bpf_testcase_table
[] = {
67 .prog_id
= LLVM_TESTCASE_BASE
,
68 .desc
= "Basic BPF filtering",
69 .name
= "[basic_bpf_test]",
70 .msg_compile_fail
= "fix 'perf test LLVM' first",
71 .msg_load_fail
= "load bpf object failed",
72 .target_func
= &epoll_pwait_loop
,
73 .expect_result
= (NR_ITERS
+ 1) / 2,
76 .prog_id
= LLVM_TESTCASE_BASE
,
77 .desc
= "BPF pinning",
78 .name
= "[bpf_pinning]",
79 .msg_compile_fail
= "fix kbuild first",
80 .msg_load_fail
= "check your vmlinux setting?",
81 .target_func
= &epoll_pwait_loop
,
82 .expect_result
= (NR_ITERS
+ 1) / 2,
85 #ifdef HAVE_BPF_PROLOGUE
87 .prog_id
= LLVM_TESTCASE_BPF_PROLOGUE
,
88 .desc
= "BPF prologue generation",
89 .name
= "[bpf_prologue_test]",
90 .msg_compile_fail
= "fix kbuild first",
91 .msg_load_fail
= "check your vmlinux setting?",
92 .target_func
= &llseek_loop
,
93 .expect_result
= (NR_ITERS
+ 1) / 4,
97 .prog_id
= LLVM_TESTCASE_BPF_RELOCATION
,
98 .desc
= "BPF relocation checker",
99 .name
= "[bpf_relocation_test]",
100 .msg_compile_fail
= "fix 'perf test LLVM' first",
101 .msg_load_fail
= "libbpf error when dealing with relocation",
105 static int do_test(struct bpf_object
*obj
, int (*func
)(void),
108 struct record_opts opts
= {
115 .default_interval
= 1,
119 char sbuf
[STRERR_BUFSIZE
];
120 struct perf_evlist
*evlist
;
121 int i
, ret
= TEST_FAIL
, err
= 0, count
= 0;
123 struct parse_events_state parse_state
;
124 struct parse_events_error parse_error
;
126 bzero(&parse_error
, sizeof(parse_error
));
127 bzero(&parse_state
, sizeof(parse_state
));
128 parse_state
.error
= &parse_error
;
129 INIT_LIST_HEAD(&parse_state
.list
);
131 err
= parse_events_load_bpf_obj(&parse_state
, &parse_state
.list
, obj
, NULL
);
132 if (err
|| list_empty(&parse_state
.list
)) {
133 pr_debug("Failed to add events selected by BPF\n");
137 snprintf(pid
, sizeof(pid
), "%d", getpid());
138 pid
[sizeof(pid
) - 1] = '\0';
139 opts
.target
.tid
= opts
.target
.pid
= pid
;
141 /* Instead of perf_evlist__new_default, don't add default events */
142 evlist
= perf_evlist__new();
144 pr_debug("Not enough memory to create evlist\n");
148 err
= perf_evlist__create_maps(evlist
, &opts
.target
);
150 pr_debug("Not enough memory to create thread/cpu maps\n");
151 goto out_delete_evlist
;
154 perf_evlist__splice_list_tail(evlist
, &parse_state
.list
);
155 evlist
->nr_groups
= parse_state
.nr_groups
;
157 perf_evlist__config(evlist
, &opts
, NULL
);
159 err
= perf_evlist__open(evlist
);
161 pr_debug("perf_evlist__open: %s\n",
162 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
163 goto out_delete_evlist
;
166 err
= perf_evlist__mmap(evlist
, opts
.mmap_pages
);
168 pr_debug("perf_evlist__mmap: %s\n",
169 str_error_r(errno
, sbuf
, sizeof(sbuf
)));
170 goto out_delete_evlist
;
173 perf_evlist__enable(evlist
);
175 perf_evlist__disable(evlist
);
177 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
178 union perf_event
*event
;
179 struct perf_mmap
*md
;
181 md
= &evlist
->mmap
[i
];
182 if (perf_mmap__read_init(md
) < 0)
185 while ((event
= perf_mmap__read_event(md
)) != NULL
) {
186 const u32 type
= event
->header
.type
;
188 if (type
== PERF_RECORD_SAMPLE
)
191 perf_mmap__read_done(md
);
194 if (count
!= expect
) {
195 pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect
, count
);
196 goto out_delete_evlist
;
202 perf_evlist__delete(evlist
);
206 static struct bpf_object
*
207 prepare_bpf(void *obj_buf
, size_t obj_buf_sz
, const char *name
)
209 struct bpf_object
*obj
;
211 obj
= bpf__prepare_load_buffer(obj_buf
, obj_buf_sz
, name
);
213 pr_debug("Compile BPF program failed.\n");
219 static int __test__bpf(int idx
)
224 struct bpf_object
*obj
;
226 ret
= test_llvm__fetch_bpf_obj(&obj_buf
, &obj_buf_sz
,
227 bpf_testcase_table
[idx
].prog_id
,
229 if (ret
!= TEST_OK
|| !obj_buf
|| !obj_buf_sz
) {
230 pr_debug("Unable to get BPF object, %s\n",
231 bpf_testcase_table
[idx
].msg_compile_fail
);
238 obj
= prepare_bpf(obj_buf
, obj_buf_sz
,
239 bpf_testcase_table
[idx
].name
);
240 if ((!!bpf_testcase_table
[idx
].target_func
) != (!!obj
)) {
242 pr_debug("Fail to load BPF object: %s\n",
243 bpf_testcase_table
[idx
].msg_load_fail
);
245 pr_debug("Success unexpectedly: %s\n",
246 bpf_testcase_table
[idx
].msg_load_fail
);
253 bpf_testcase_table
[idx
].target_func
,
254 bpf_testcase_table
[idx
].expect_result
);
257 if (bpf_testcase_table
[idx
].pin
) {
260 if (!bpf_fs__mount()) {
261 pr_debug("BPF filesystem not mounted\n");
265 err
= mkdir(PERF_TEST_BPF_PATH
, 0777);
266 if (err
&& errno
!= EEXIST
) {
267 pr_debug("Failed to make perf_test dir: %s\n",
272 if (bpf_object__pin(obj
, PERF_TEST_BPF_PATH
))
274 if (rm_rf(PERF_TEST_BPF_PATH
))
284 int test__bpf_subtest_get_nr(void)
286 return (int)ARRAY_SIZE(bpf_testcase_table
);
289 const char *test__bpf_subtest_get_desc(int i
)
291 if (i
< 0 || i
>= (int)ARRAY_SIZE(bpf_testcase_table
))
293 return bpf_testcase_table
[i
].desc
;
296 static int check_env(void)
299 unsigned int kver_int
;
300 char license
[] = "GPL";
302 struct bpf_insn insns
[] = {
303 BPF_MOV64_IMM(BPF_REG_0
, 1),
307 err
= fetch_kernel_version(&kver_int
, NULL
, 0);
309 pr_debug("Unable to get kernel version\n");
313 err
= bpf_load_program(BPF_PROG_TYPE_KPROBE
, insns
,
314 sizeof(insns
) / sizeof(insns
[0]),
315 license
, kver_int
, NULL
, 0);
317 pr_err("Missing basic BPF support, skip this test: %s\n",
326 int test__bpf(struct test
*test __maybe_unused
, int i
)
330 if (i
< 0 || i
>= (int)ARRAY_SIZE(bpf_testcase_table
))
333 if (geteuid() != 0) {
334 pr_debug("Only root can run BPF test\n");
341 err
= __test__bpf(i
);
346 int test__bpf_subtest_get_nr(void)
351 const char *test__bpf_subtest_get_desc(int i __maybe_unused
)
356 int test__bpf(struct test
*test __maybe_unused
, int i __maybe_unused
)
358 pr_debug("Skip BPF test because BPF support is not compiled\n");