1 // SPDX-License-Identifier: GPL-2.0
5 #include <sys/socket.h>
6 #include <test_progs.h>
8 #define MAX_CNT_RAWTP 10ull
9 #define MAX_STACK_RAWTP 100
11 static int duration
= 0;
13 struct get_stack_trace_t
{
17 int user_stack_buildid_size
;
18 __u64 kern_stack
[MAX_STACK_RAWTP
];
19 __u64 user_stack
[MAX_STACK_RAWTP
];
20 struct bpf_stack_build_id user_stack_buildid
[MAX_STACK_RAWTP
];
23 static void get_stack_print_output(void *ctx
, int cpu
, void *data
, __u32 size
)
25 bool good_kern_stack
= false, good_user_stack
= false;
26 const char *nonjit_func
= "___bpf_prog_run";
27 struct get_stack_trace_t
*e
= data
;
34 if (size
< sizeof(struct get_stack_trace_t
)) {
35 __u64
*raw_data
= data
;
38 num_stack
= size
/ sizeof(__u64
);
39 /* If jit is enabled, we do not have a good way to
40 * verify the sanity of the kernel stack. So we
41 * just assume it is good if the stack is not empty.
42 * This could be improved in the future.
44 if (env
.jit_enabled
) {
45 found
= num_stack
> 0;
47 for (i
= 0; i
< num_stack
; i
++) {
48 ks
= ksym_search(raw_data
[i
]);
49 if (ks
&& (strcmp(ks
->name
, nonjit_func
) == 0)) {
56 good_kern_stack
= true;
57 good_user_stack
= true;
60 num_stack
= e
->kern_stack_size
/ sizeof(__u64
);
61 if (env
.jit_enabled
) {
62 good_kern_stack
= num_stack
> 0;
64 for (i
= 0; i
< num_stack
; i
++) {
65 ks
= ksym_search(e
->kern_stack
[i
]);
66 if (ks
&& (strcmp(ks
->name
, nonjit_func
) == 0)) {
67 good_kern_stack
= true;
72 if (e
->user_stack_size
> 0 && e
->user_stack_buildid_size
> 0)
73 good_user_stack
= true;
77 CHECK(!good_kern_stack
, "kern_stack", "corrupted kernel stack\n");
79 CHECK(!good_user_stack
, "user_stack", "corrupted user stack\n");
82 void test_get_stack_raw_tp(void)
84 const char *file
= "./test_get_stack_rawtp.o";
85 const char *prog_name
= "raw_tracepoint/sys_enter";
86 int i
, err
, prog_fd
, exp_cnt
= MAX_CNT_RAWTP
;
87 struct perf_buffer_opts pb_opts
= {};
88 struct perf_buffer
*pb
= NULL
;
89 struct bpf_link
*link
= NULL
;
90 struct timespec tv
= {0, 10};
91 struct bpf_program
*prog
;
92 struct bpf_object
*obj
;
96 err
= bpf_prog_load(file
, BPF_PROG_TYPE_RAW_TRACEPOINT
, &obj
, &prog_fd
);
97 if (CHECK(err
, "prog_load raw tp", "err %d errno %d\n", err
, errno
))
100 prog
= bpf_object__find_program_by_title(obj
, prog_name
);
101 if (CHECK(!prog
, "find_probe", "prog '%s' not found\n", prog_name
))
104 map
= bpf_object__find_map_by_name(obj
, "perfmap");
105 if (CHECK(!map
, "bpf_find_map", "not found\n"))
108 err
= load_kallsyms();
109 if (CHECK(err
< 0, "load_kallsyms", "err %d errno %d\n", err
, errno
))
113 CPU_SET(0, &cpu_set
);
114 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set
), &cpu_set
);
115 if (CHECK(err
, "set_affinity", "err %d, errno %d\n", err
, errno
))
118 link
= bpf_program__attach_raw_tracepoint(prog
, "sys_enter");
119 if (CHECK(IS_ERR(link
), "attach_raw_tp", "err %ld\n", PTR_ERR(link
)))
122 pb_opts
.sample_cb
= get_stack_print_output
;
123 pb
= perf_buffer__new(bpf_map__fd(map
), 8, &pb_opts
);
124 if (CHECK(IS_ERR(pb
), "perf_buf__new", "err %ld\n", PTR_ERR(pb
)))
127 /* trigger some syscall action */
128 for (i
= 0; i
< MAX_CNT_RAWTP
; i
++)
129 nanosleep(&tv
, NULL
);
131 while (exp_cnt
> 0) {
132 err
= perf_buffer__poll(pb
, 100);
133 if (err
< 0 && CHECK(err
< 0, "pb__poll", "err %d\n", err
))
139 if (!IS_ERR_OR_NULL(link
))
140 bpf_link__destroy(link
);
141 if (!IS_ERR_OR_NULL(pb
))
142 perf_buffer__free(pb
);
143 bpf_object__close(obj
);