1 /* Copyright (c) 2016 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
14 #include <sys/ioctl.h>
15 #include <linux/perf_event.h>
16 #include <linux/bpf.h>
20 #include <sys/resource.h>
25 #define SAMPLE_FREQ 50
27 static bool sys_read_seen
, sys_write_seen
;
29 static void print_ksym(__u64 addr
)
35 sym
= ksym_search(addr
);
36 printf("%s;", sym
->name
);
37 if (!strcmp(sym
->name
, "sys_read"))
39 else if (!strcmp(sym
->name
, "sys_write"))
40 sys_write_seen
= true;
43 static void print_addr(__u64 addr
)
47 printf("%llx;", addr
);
50 #define TASK_COMM_LEN 16
53 char comm
[TASK_COMM_LEN
];
58 static void print_stack(struct key_t
*key
, __u64 count
)
60 __u64 ip
[PERF_MAX_STACK_DEPTH
] = {};
64 printf("%3lld %s;", count
, key
->comm
);
65 if (bpf_map_lookup_elem(map_fd
[1], &key
->kernstack
, ip
) != 0) {
68 for (i
= PERF_MAX_STACK_DEPTH
- 1; i
>= 0; i
--)
72 if (bpf_map_lookup_elem(map_fd
[1], &key
->userstack
, ip
) != 0) {
75 for (i
= PERF_MAX_STACK_DEPTH
- 1; i
>= 0; i
--)
83 if (key
->kernstack
== -EEXIST
&& !warned
) {
84 printf("stackmap collisions seen. Consider increasing size\n");
86 } else if ((int)key
->kernstack
< 0 && (int)key
->userstack
< 0) {
87 printf("err stackid %d %d\n", key
->kernstack
, key
->userstack
);
91 static void int_exit(int sig
)
97 static void print_stacks(void)
99 struct key_t key
= {}, next_key
;
101 __u32 stackid
= 0, next_id
;
102 int fd
= map_fd
[0], stack_map
= map_fd
[1];
104 sys_read_seen
= sys_write_seen
= false;
105 while (bpf_map_get_next_key(fd
, &key
, &next_key
) == 0) {
106 bpf_map_lookup_elem(fd
, &next_key
, &value
);
107 print_stack(&next_key
, value
);
108 bpf_map_delete_elem(fd
, &next_key
);
112 if (!sys_read_seen
|| !sys_write_seen
) {
113 printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
117 /* clear stack map */
118 while (bpf_map_get_next_key(stack_map
, &stackid
, &next_id
) == 0) {
119 bpf_map_delete_elem(stack_map
, &next_id
);
124 static void test_perf_event_all_cpu(struct perf_event_attr
*attr
)
126 int nr_cpus
= sysconf(_SC_NPROCESSORS_CONF
);
127 int *pmu_fd
= malloc(nr_cpus
* sizeof(int));
130 /* system wide perf event, no need to inherit */
133 /* open perf_event on all cpus */
134 for (i
= 0; i
< nr_cpus
; i
++) {
135 pmu_fd
[i
] = sys_perf_event_open(attr
, -1, i
, -1, 0);
137 printf("sys_perf_event_open failed\n");
141 assert(ioctl(pmu_fd
[i
], PERF_EVENT_IOC_SET_BPF
, prog_fd
[0]) == 0);
142 assert(ioctl(pmu_fd
[i
], PERF_EVENT_IOC_ENABLE
) == 0);
144 system("dd if=/dev/zero of=/dev/null count=5000k status=none");
147 for (i
--; i
>= 0; i
--) {
148 ioctl(pmu_fd
[i
], PERF_EVENT_IOC_DISABLE
);
156 static void test_perf_event_task(struct perf_event_attr
*attr
)
160 /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
161 * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
165 /* open task bound event */
166 pmu_fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
168 printf("sys_perf_event_open failed\n");
171 assert(ioctl(pmu_fd
, PERF_EVENT_IOC_SET_BPF
, prog_fd
[0]) == 0);
172 assert(ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
) == 0);
173 system("dd if=/dev/zero of=/dev/null count=5000k status=none");
175 ioctl(pmu_fd
, PERF_EVENT_IOC_DISABLE
);
179 static void test_bpf_perf_event(void)
181 struct perf_event_attr attr_type_hw
= {
182 .sample_freq
= SAMPLE_FREQ
,
184 .type
= PERF_TYPE_HARDWARE
,
185 .config
= PERF_COUNT_HW_CPU_CYCLES
,
187 struct perf_event_attr attr_type_sw
= {
188 .sample_freq
= SAMPLE_FREQ
,
190 .type
= PERF_TYPE_SOFTWARE
,
191 .config
= PERF_COUNT_SW_CPU_CLOCK
,
193 struct perf_event_attr attr_hw_cache_l1d
= {
194 .sample_freq
= SAMPLE_FREQ
,
196 .type
= PERF_TYPE_HW_CACHE
,
198 PERF_COUNT_HW_CACHE_L1D
|
199 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
200 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16),
202 struct perf_event_attr attr_hw_cache_branch_miss
= {
203 .sample_freq
= SAMPLE_FREQ
,
205 .type
= PERF_TYPE_HW_CACHE
,
207 PERF_COUNT_HW_CACHE_BPU
|
208 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
209 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16),
211 struct perf_event_attr attr_type_raw
= {
212 .sample_freq
= SAMPLE_FREQ
,
214 .type
= PERF_TYPE_RAW
,
215 /* Intel Instruction Retired */
219 printf("Test HW_CPU_CYCLES\n");
220 test_perf_event_all_cpu(&attr_type_hw
);
221 test_perf_event_task(&attr_type_hw
);
223 printf("Test SW_CPU_CLOCK\n");
224 test_perf_event_all_cpu(&attr_type_sw
);
225 test_perf_event_task(&attr_type_sw
);
227 printf("Test HW_CACHE_L1D\n");
228 test_perf_event_all_cpu(&attr_hw_cache_l1d
);
229 test_perf_event_task(&attr_hw_cache_l1d
);
231 printf("Test HW_CACHE_BPU\n");
232 test_perf_event_all_cpu(&attr_hw_cache_branch_miss
);
233 test_perf_event_task(&attr_hw_cache_branch_miss
);
235 printf("Test Instruction Retired\n");
236 test_perf_event_all_cpu(&attr_type_raw
);
237 test_perf_event_task(&attr_type_raw
);
239 printf("*** PASS ***\n");
243 int main(int argc
, char **argv
)
245 struct rlimit r
= {RLIM_INFINITY
, RLIM_INFINITY
};
248 snprintf(filename
, sizeof(filename
), "%s_kern.o", argv
[0]);
249 setrlimit(RLIMIT_MEMLOCK
, &r
);
251 signal(SIGINT
, int_exit
);
252 signal(SIGTERM
, int_exit
);
254 if (load_kallsyms()) {
255 printf("failed to process /proc/kallsyms\n");
259 if (load_bpf_file(filename
)) {
260 printf("%s", bpf_log_buf
);
268 test_bpf_perf_event();