1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
4 #define MAX_CNT_RAWTP 10ull
5 #define MAX_STACK_RAWTP 100
6 struct get_stack_trace_t
{
10 int user_stack_buildid_size
;
11 __u64 kern_stack
[MAX_STACK_RAWTP
];
12 __u64 user_stack
[MAX_STACK_RAWTP
];
13 struct bpf_stack_build_id user_stack_buildid
[MAX_STACK_RAWTP
];
16 static int get_stack_print_output(void *data
, int size
)
18 bool good_kern_stack
= false, good_user_stack
= false;
19 const char *nonjit_func
= "___bpf_prog_run";
20 struct get_stack_trace_t
*e
= data
;
27 if (size
< sizeof(struct get_stack_trace_t
)) {
28 __u64
*raw_data
= data
;
31 num_stack
= size
/ sizeof(__u64
);
32 /* If jit is enabled, we do not have a good way to
33 * verify the sanity of the kernel stack. So we
34 * just assume it is good if the stack is not empty.
35 * This could be improved in the future.
38 found
= num_stack
> 0;
40 for (i
= 0; i
< num_stack
; i
++) {
41 ks
= ksym_search(raw_data
[i
]);
42 if (ks
&& (strcmp(ks
->name
, nonjit_func
) == 0)) {
49 good_kern_stack
= true;
50 good_user_stack
= true;
53 num_stack
= e
->kern_stack_size
/ sizeof(__u64
);
55 good_kern_stack
= num_stack
> 0;
57 for (i
= 0; i
< num_stack
; i
++) {
58 ks
= ksym_search(e
->kern_stack
[i
]);
59 if (ks
&& (strcmp(ks
->name
, nonjit_func
) == 0)) {
60 good_kern_stack
= true;
65 if (e
->user_stack_size
> 0 && e
->user_stack_buildid_size
> 0)
66 good_user_stack
= true;
68 if (!good_kern_stack
|| !good_user_stack
)
69 return LIBBPF_PERF_EVENT_ERROR
;
71 if (cnt
== MAX_CNT_RAWTP
)
72 return LIBBPF_PERF_EVENT_DONE
;
74 return LIBBPF_PERF_EVENT_CONT
;
77 void test_get_stack_raw_tp(void)
79 const char *file
= "./test_get_stack_rawtp.o";
80 int i
, efd
, err
, prog_fd
, pmu_fd
, perfmap_fd
;
81 struct perf_event_attr attr
= {};
82 struct timespec tv
= {0, 10};
83 __u32 key
= 0, duration
= 0;
84 struct bpf_object
*obj
;
86 err
= bpf_prog_load(file
, BPF_PROG_TYPE_RAW_TRACEPOINT
, &obj
, &prog_fd
);
87 if (CHECK(err
, "prog_load raw tp", "err %d errno %d\n", err
, errno
))
90 efd
= bpf_raw_tracepoint_open("sys_enter", prog_fd
);
91 if (CHECK(efd
< 0, "raw_tp_open", "err %d errno %d\n", efd
, errno
))
94 perfmap_fd
= bpf_find_map(__func__
, obj
, "perfmap");
95 if (CHECK(perfmap_fd
< 0, "bpf_find_map", "err %d errno %d\n",
99 err
= load_kallsyms();
100 if (CHECK(err
< 0, "load_kallsyms", "err %d errno %d\n", err
, errno
))
103 attr
.sample_type
= PERF_SAMPLE_RAW
;
104 attr
.type
= PERF_TYPE_SOFTWARE
;
105 attr
.config
= PERF_COUNT_SW_BPF_OUTPUT
;
106 pmu_fd
= syscall(__NR_perf_event_open
, &attr
, getpid()/*pid*/, -1/*cpu*/,
108 if (CHECK(pmu_fd
< 0, "perf_event_open", "err %d errno %d\n", pmu_fd
,
112 err
= bpf_map_update_elem(perfmap_fd
, &key
, &pmu_fd
, BPF_ANY
);
113 if (CHECK(err
< 0, "bpf_map_update_elem", "err %d errno %d\n", err
,
117 err
= ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0);
118 if (CHECK(err
< 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
122 err
= perf_event_mmap(pmu_fd
);
123 if (CHECK(err
< 0, "perf_event_mmap", "err %d errno %d\n", err
, errno
))
126 /* trigger some syscall action */
127 for (i
= 0; i
< MAX_CNT_RAWTP
; i
++)
128 nanosleep(&tv
, NULL
);
130 err
= perf_event_poller(pmu_fd
, get_stack_print_output
);
131 if (CHECK(err
< 0, "perf_event_poller", "err %d errno %d\n", err
, errno
))
134 goto close_prog_noerr
;
138 bpf_object__close(obj
);