Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / perf / util / bpf_ftrace.c
blob06d1c4018407a26590e82c389376da4001a85329
1 #include <stdio.h>
2 #include <fcntl.h>
3 #include <stdint.h>
4 #include <stdlib.h>
6 #include <linux/err.h>
8 #include "util/ftrace.h"
9 #include "util/cpumap.h"
10 #include "util/thread_map.h"
11 #include "util/debug.h"
12 #include "util/evlist.h"
13 #include "util/bpf_counter.h"
15 #include "util/bpf_skel/func_latency.skel.h"
17 static struct func_latency_bpf *skel;
19 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
21 int fd, err;
22 int i, ncpus = 1, ntasks = 1;
23 struct filter_entry *func;
25 if (!list_is_singular(&ftrace->filters)) {
26 pr_err("ERROR: %s target function(s).\n",
27 list_empty(&ftrace->filters) ? "No" : "Too many");
28 return -1;
31 func = list_first_entry(&ftrace->filters, struct filter_entry, list);
33 skel = func_latency_bpf__open();
34 if (!skel) {
35 pr_err("Failed to open func latency skeleton\n");
36 return -1;
39 /* don't need to set cpu filter for system-wide mode */
40 if (ftrace->target.cpu_list) {
41 ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
42 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
43 skel->rodata->has_cpu = 1;
46 if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
47 ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
48 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
49 skel->rodata->has_task = 1;
52 skel->rodata->use_nsec = ftrace->use_nsec;
54 set_max_rlimit();
56 err = func_latency_bpf__load(skel);
57 if (err) {
58 pr_err("Failed to load func latency skeleton\n");
59 goto out;
62 if (ftrace->target.cpu_list) {
63 u32 cpu;
64 u8 val = 1;
66 fd = bpf_map__fd(skel->maps.cpu_filter);
68 for (i = 0; i < ncpus; i++) {
69 cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
70 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
74 if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
75 u32 pid;
76 u8 val = 1;
78 fd = bpf_map__fd(skel->maps.task_filter);
80 for (i = 0; i < ntasks; i++) {
81 pid = perf_thread_map__pid(ftrace->evlist->core.threads, i);
82 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
86 skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
87 false, func->name);
88 if (IS_ERR(skel->links.func_begin)) {
89 pr_err("Failed to attach fentry program\n");
90 err = PTR_ERR(skel->links.func_begin);
91 goto out;
94 skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
95 true, func->name);
96 if (IS_ERR(skel->links.func_end)) {
97 pr_err("Failed to attach fexit program\n");
98 err = PTR_ERR(skel->links.func_end);
99 goto out;
102 /* XXX: we don't actually use this fd - just for poll() */
103 return open("/dev/null", O_RDONLY);
105 out:
106 return err;
109 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
111 skel->bss->enabled = 1;
112 return 0;
115 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
117 skel->bss->enabled = 0;
118 return 0;
121 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
122 int buckets[])
124 int i, fd, err;
125 u32 idx;
126 u64 *hist;
127 int ncpus = cpu__max_cpu().cpu;
129 fd = bpf_map__fd(skel->maps.latency);
131 hist = calloc(ncpus, sizeof(*hist));
132 if (hist == NULL)
133 return -ENOMEM;
135 for (idx = 0; idx < NUM_BUCKET; idx++) {
136 err = bpf_map_lookup_elem(fd, &idx, hist);
137 if (err) {
138 buckets[idx] = 0;
139 continue;
142 for (i = 0; i < ncpus; i++)
143 buckets[idx] += hist[i];
146 free(hist);
147 return 0;
150 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
152 func_latency_bpf__destroy(skel);
153 return 0;