1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
11 #include <sys/ioctl.h>
12 #include <linux/perf_event.h>
13 #include <linux/bpf.h>
17 #include <sys/resource.h>
18 #include <bpf/libbpf.h>
21 #include "trace_helpers.h"
23 #define SAMPLE_FREQ 50
25 static bool sys_read_seen
, sys_write_seen
;
27 static void print_ksym(__u64 addr
)
33 sym
= ksym_search(addr
);
35 printf("ksym not found. Is kallsyms loaded?\n");
39 printf("%s;", sym
->name
);
40 if (!strstr(sym
->name
, "sys_read"))
42 else if (!strstr(sym
->name
, "sys_write"))
43 sys_write_seen
= true;
46 static void print_addr(__u64 addr
)
50 printf("%llx;", addr
);
53 #define TASK_COMM_LEN 16
56 char comm
[TASK_COMM_LEN
];
61 static void print_stack(struct key_t
*key
, __u64 count
)
63 __u64 ip
[PERF_MAX_STACK_DEPTH
] = {};
67 printf("%3lld %s;", count
, key
->comm
);
68 if (bpf_map_lookup_elem(map_fd
[1], &key
->kernstack
, ip
) != 0) {
71 for (i
= PERF_MAX_STACK_DEPTH
- 1; i
>= 0; i
--)
75 if (bpf_map_lookup_elem(map_fd
[1], &key
->userstack
, ip
) != 0) {
78 for (i
= PERF_MAX_STACK_DEPTH
- 1; i
>= 0; i
--)
86 if (key
->kernstack
== -EEXIST
&& !warned
) {
87 printf("stackmap collisions seen. Consider increasing size\n");
89 } else if ((int)key
->kernstack
< 0 && (int)key
->userstack
< 0) {
90 printf("err stackid %d %d\n", key
->kernstack
, key
->userstack
);
94 static void int_exit(int sig
)
100 static void print_stacks(void)
102 struct key_t key
= {}, next_key
;
104 __u32 stackid
= 0, next_id
;
105 int fd
= map_fd
[0], stack_map
= map_fd
[1];
107 sys_read_seen
= sys_write_seen
= false;
108 while (bpf_map_get_next_key(fd
, &key
, &next_key
) == 0) {
109 bpf_map_lookup_elem(fd
, &next_key
, &value
);
110 print_stack(&next_key
, value
);
111 bpf_map_delete_elem(fd
, &next_key
);
115 if (!sys_read_seen
|| !sys_write_seen
) {
116 printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
120 /* clear stack map */
121 while (bpf_map_get_next_key(stack_map
, &stackid
, &next_id
) == 0) {
122 bpf_map_delete_elem(stack_map
, &next_id
);
127 static inline int generate_load(void)
129 if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
130 printf("failed to generate some load with dd: %s\n", strerror(errno
));
137 static void test_perf_event_all_cpu(struct perf_event_attr
*attr
)
139 int nr_cpus
= sysconf(_SC_NPROCESSORS_CONF
);
140 int *pmu_fd
= malloc(nr_cpus
* sizeof(int));
143 /* system wide perf event, no need to inherit */
146 /* open perf_event on all cpus */
147 for (i
= 0; i
< nr_cpus
; i
++) {
148 pmu_fd
[i
] = sys_perf_event_open(attr
, -1, i
, -1, 0);
150 printf("sys_perf_event_open failed\n");
154 assert(ioctl(pmu_fd
[i
], PERF_EVENT_IOC_SET_BPF
, prog_fd
[0]) == 0);
155 assert(ioctl(pmu_fd
[i
], PERF_EVENT_IOC_ENABLE
) == 0);
158 if (generate_load() < 0) {
164 for (i
--; i
>= 0; i
--) {
165 ioctl(pmu_fd
[i
], PERF_EVENT_IOC_DISABLE
);
173 static void test_perf_event_task(struct perf_event_attr
*attr
)
175 int pmu_fd
, error
= 0;
177 /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
178 * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
182 /* open task bound event */
183 pmu_fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
185 printf("sys_perf_event_open failed\n");
188 assert(ioctl(pmu_fd
, PERF_EVENT_IOC_SET_BPF
, prog_fd
[0]) == 0);
189 assert(ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
) == 0);
191 if (generate_load() < 0) {
197 ioctl(pmu_fd
, PERF_EVENT_IOC_DISABLE
);
203 static void test_bpf_perf_event(void)
205 struct perf_event_attr attr_type_hw
= {
206 .sample_freq
= SAMPLE_FREQ
,
208 .type
= PERF_TYPE_HARDWARE
,
209 .config
= PERF_COUNT_HW_CPU_CYCLES
,
211 struct perf_event_attr attr_type_sw
= {
212 .sample_freq
= SAMPLE_FREQ
,
214 .type
= PERF_TYPE_SOFTWARE
,
215 .config
= PERF_COUNT_SW_CPU_CLOCK
,
217 struct perf_event_attr attr_hw_cache_l1d
= {
218 .sample_freq
= SAMPLE_FREQ
,
220 .type
= PERF_TYPE_HW_CACHE
,
222 PERF_COUNT_HW_CACHE_L1D
|
223 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
224 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16),
226 struct perf_event_attr attr_hw_cache_branch_miss
= {
227 .sample_freq
= SAMPLE_FREQ
,
229 .type
= PERF_TYPE_HW_CACHE
,
231 PERF_COUNT_HW_CACHE_BPU
|
232 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
233 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16),
235 struct perf_event_attr attr_type_raw
= {
236 .sample_freq
= SAMPLE_FREQ
,
238 .type
= PERF_TYPE_RAW
,
239 /* Intel Instruction Retired */
242 struct perf_event_attr attr_type_raw_lock_load
= {
243 .sample_freq
= SAMPLE_FREQ
,
245 .type
= PERF_TYPE_RAW
,
246 /* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
248 /* Request to record lock address from PEBS */
249 .sample_type
= PERF_SAMPLE_ADDR
,
250 /* Record address value requires precise event */
254 printf("Test HW_CPU_CYCLES\n");
255 test_perf_event_all_cpu(&attr_type_hw
);
256 test_perf_event_task(&attr_type_hw
);
258 printf("Test SW_CPU_CLOCK\n");
259 test_perf_event_all_cpu(&attr_type_sw
);
260 test_perf_event_task(&attr_type_sw
);
262 printf("Test HW_CACHE_L1D\n");
263 test_perf_event_all_cpu(&attr_hw_cache_l1d
);
264 test_perf_event_task(&attr_hw_cache_l1d
);
266 printf("Test HW_CACHE_BPU\n");
267 test_perf_event_all_cpu(&attr_hw_cache_branch_miss
);
268 test_perf_event_task(&attr_hw_cache_branch_miss
);
270 printf("Test Instruction Retired\n");
271 test_perf_event_all_cpu(&attr_type_raw
);
272 test_perf_event_task(&attr_type_raw
);
274 printf("Test Lock Load\n");
275 test_perf_event_all_cpu(&attr_type_raw_lock_load
);
276 test_perf_event_task(&attr_type_raw_lock_load
);
278 printf("*** PASS ***\n");
282 int main(int argc
, char **argv
)
284 struct rlimit r
= {RLIM_INFINITY
, RLIM_INFINITY
};
287 snprintf(filename
, sizeof(filename
), "%s_kern.o", argv
[0]);
288 setrlimit(RLIMIT_MEMLOCK
, &r
);
290 signal(SIGINT
, int_exit
);
291 signal(SIGTERM
, int_exit
);
293 if (load_kallsyms()) {
294 printf("failed to process /proc/kallsyms\n");
298 if (load_bpf_file(filename
)) {
299 printf("%s", bpf_log_buf
);
307 test_bpf_perf_event();