Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / samples / bpf / trace_event_user.c
blobb6cd358d0418e8b47342d7d72d8a84c467fcd130
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
3 */
4 #include <stdio.h>
5 #include <unistd.h>
6 #include <stdlib.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <linux/perf_event.h>
10 #include <linux/bpf.h>
11 #include <signal.h>
12 #include <errno.h>
13 #include <sys/resource.h>
14 #include <bpf/bpf.h>
15 #include <bpf/libbpf.h>
16 #include "perf-sys.h"
17 #include "trace_helpers.h"
19 #define __must_check
20 #include <linux/err.h>
22 #define SAMPLE_FREQ 50
24 static int pid;
25 /* counts, stackmap */
26 static int map_fd[2];
27 struct bpf_program *prog;
28 static bool sys_read_seen, sys_write_seen;
30 static void print_ksym(__u64 addr)
32 struct ksym *sym;
34 if (!addr)
35 return;
36 sym = ksym_search(addr);
37 if (!sym) {
38 printf("ksym not found. Is kallsyms loaded?\n");
39 return;
42 printf("%s;", sym->name);
43 if (!strstr(sym->name, "sys_read"))
44 sys_read_seen = true;
45 else if (!strstr(sym->name, "sys_write"))
46 sys_write_seen = true;
49 static void print_addr(__u64 addr)
51 if (!addr)
52 return;
53 printf("%llx;", addr);
56 #define TASK_COMM_LEN 16
58 struct key_t {
59 char comm[TASK_COMM_LEN];
60 __u32 kernstack;
61 __u32 userstack;
64 static void print_stack(struct key_t *key, __u64 count)
66 __u64 ip[PERF_MAX_STACK_DEPTH] = {};
67 static bool warned;
68 int i;
70 printf("%3lld %s;", count, key->comm);
71 if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
72 printf("---;");
73 } else {
74 for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
75 print_ksym(ip[i]);
77 printf("-;");
78 if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
79 printf("---;");
80 } else {
81 for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
82 print_addr(ip[i]);
84 if (count < 6)
85 printf("\r");
86 else
87 printf("\n");
89 if (key->kernstack == -EEXIST && !warned) {
90 printf("stackmap collisions seen. Consider increasing size\n");
91 warned = true;
92 } else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
93 printf("err stackid %d %d\n", key->kernstack, key->userstack);
97 static void err_exit(int err)
99 kill(pid, SIGKILL);
100 exit(err);
103 static void print_stacks(void)
105 struct key_t key = {}, next_key;
106 __u64 value;
107 __u32 stackid = 0, next_id;
108 int error = 1, fd = map_fd[0], stack_map = map_fd[1];
110 sys_read_seen = sys_write_seen = false;
111 while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
112 bpf_map_lookup_elem(fd, &next_key, &value);
113 print_stack(&next_key, value);
114 bpf_map_delete_elem(fd, &next_key);
115 key = next_key;
117 printf("\n");
118 if (!sys_read_seen || !sys_write_seen) {
119 printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
120 err_exit(error);
123 /* clear stack map */
124 while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
125 bpf_map_delete_elem(stack_map, &next_id);
126 stackid = next_id;
130 static inline int generate_load(void)
132 if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
133 printf("failed to generate some load with dd: %s\n", strerror(errno));
134 return -1;
137 return 0;
140 static void test_perf_event_all_cpu(struct perf_event_attr *attr)
142 int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
143 struct bpf_link **links = calloc(nr_cpus, sizeof(struct bpf_link *));
144 int i, pmu_fd, error = 1;
146 if (!links) {
147 printf("malloc of links failed\n");
148 goto err;
151 /* system wide perf event, no need to inherit */
152 attr->inherit = 0;
154 /* open perf_event on all cpus */
155 for (i = 0; i < nr_cpus; i++) {
156 pmu_fd = sys_perf_event_open(attr, -1, i, -1, 0);
157 if (pmu_fd < 0) {
158 printf("sys_perf_event_open failed\n");
159 goto all_cpu_err;
161 links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
162 if (IS_ERR(links[i])) {
163 printf("bpf_program__attach_perf_event failed\n");
164 links[i] = NULL;
165 close(pmu_fd);
166 goto all_cpu_err;
170 if (generate_load() < 0)
171 goto all_cpu_err;
173 print_stacks();
174 error = 0;
175 all_cpu_err:
176 for (i--; i >= 0; i--)
177 bpf_link__destroy(links[i]);
178 err:
179 free(links);
180 if (error)
181 err_exit(error);
184 static void test_perf_event_task(struct perf_event_attr *attr)
186 struct bpf_link *link = NULL;
187 int pmu_fd, error = 1;
189 /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
190 * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
192 attr->inherit = 1;
194 /* open task bound event */
195 pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
196 if (pmu_fd < 0) {
197 printf("sys_perf_event_open failed\n");
198 goto err;
200 link = bpf_program__attach_perf_event(prog, pmu_fd);
201 if (IS_ERR(link)) {
202 printf("bpf_program__attach_perf_event failed\n");
203 link = NULL;
204 close(pmu_fd);
205 goto err;
208 if (generate_load() < 0)
209 goto err;
211 print_stacks();
212 error = 0;
213 err:
214 bpf_link__destroy(link);
215 if (error)
216 err_exit(error);
219 static void test_bpf_perf_event(void)
221 struct perf_event_attr attr_type_hw = {
222 .sample_freq = SAMPLE_FREQ,
223 .freq = 1,
224 .type = PERF_TYPE_HARDWARE,
225 .config = PERF_COUNT_HW_CPU_CYCLES,
227 struct perf_event_attr attr_type_sw = {
228 .sample_freq = SAMPLE_FREQ,
229 .freq = 1,
230 .type = PERF_TYPE_SOFTWARE,
231 .config = PERF_COUNT_SW_CPU_CLOCK,
233 struct perf_event_attr attr_hw_cache_l1d = {
234 .sample_freq = SAMPLE_FREQ,
235 .freq = 1,
236 .type = PERF_TYPE_HW_CACHE,
237 .config =
238 PERF_COUNT_HW_CACHE_L1D |
239 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
240 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
242 struct perf_event_attr attr_hw_cache_branch_miss = {
243 .sample_freq = SAMPLE_FREQ,
244 .freq = 1,
245 .type = PERF_TYPE_HW_CACHE,
246 .config =
247 PERF_COUNT_HW_CACHE_BPU |
248 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
249 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
251 struct perf_event_attr attr_type_raw = {
252 .sample_freq = SAMPLE_FREQ,
253 .freq = 1,
254 .type = PERF_TYPE_RAW,
255 /* Intel Instruction Retired */
256 .config = 0xc0,
258 struct perf_event_attr attr_type_raw_lock_load = {
259 .sample_freq = SAMPLE_FREQ,
260 .freq = 1,
261 .type = PERF_TYPE_RAW,
262 /* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
263 .config = 0x21d0,
264 /* Request to record lock address from PEBS */
265 .sample_type = PERF_SAMPLE_ADDR,
266 /* Record address value requires precise event */
267 .precise_ip = 2,
270 printf("Test HW_CPU_CYCLES\n");
271 test_perf_event_all_cpu(&attr_type_hw);
272 test_perf_event_task(&attr_type_hw);
274 printf("Test SW_CPU_CLOCK\n");
275 test_perf_event_all_cpu(&attr_type_sw);
276 test_perf_event_task(&attr_type_sw);
278 printf("Test HW_CACHE_L1D\n");
279 test_perf_event_all_cpu(&attr_hw_cache_l1d);
280 test_perf_event_task(&attr_hw_cache_l1d);
282 printf("Test HW_CACHE_BPU\n");
283 test_perf_event_all_cpu(&attr_hw_cache_branch_miss);
284 test_perf_event_task(&attr_hw_cache_branch_miss);
286 printf("Test Instruction Retired\n");
287 test_perf_event_all_cpu(&attr_type_raw);
288 test_perf_event_task(&attr_type_raw);
290 printf("Test Lock Load\n");
291 test_perf_event_all_cpu(&attr_type_raw_lock_load);
292 test_perf_event_task(&attr_type_raw_lock_load);
294 printf("*** PASS ***\n");
298 int main(int argc, char **argv)
300 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
301 struct bpf_object *obj = NULL;
302 char filename[256];
303 int error = 1;
305 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
306 setrlimit(RLIMIT_MEMLOCK, &r);
308 signal(SIGINT, err_exit);
309 signal(SIGTERM, err_exit);
311 if (load_kallsyms()) {
312 printf("failed to process /proc/kallsyms\n");
313 goto cleanup;
316 obj = bpf_object__open_file(filename, NULL);
317 if (IS_ERR(obj)) {
318 printf("opening BPF object file failed\n");
319 obj = NULL;
320 goto cleanup;
323 prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
324 if (!prog) {
325 printf("finding a prog in obj file failed\n");
326 goto cleanup;
329 /* load BPF program */
330 if (bpf_object__load(obj)) {
331 printf("loading BPF object file failed\n");
332 goto cleanup;
335 map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
336 map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
337 if (map_fd[0] < 0 || map_fd[1] < 0) {
338 printf("finding a counts/stackmap map in obj file failed\n");
339 goto cleanup;
342 pid = fork();
343 if (pid == 0) {
344 read_trace_pipe();
345 return 0;
346 } else if (pid == -1) {
347 printf("couldn't spawn process\n");
348 goto cleanup;
351 test_bpf_perf_event();
352 error = 0;
354 cleanup:
355 bpf_object__close(obj);
356 err_exit(error);