8 #include "util/ftrace.h"
9 #include "util/cpumap.h"
10 #include "util/thread_map.h"
11 #include "util/debug.h"
12 #include "util/evlist.h"
13 #include "util/bpf_counter.h"
15 #include "util/bpf_skel/func_latency.skel.h"
17 static struct func_latency_bpf
*skel
;
19 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace
*ftrace
)
22 int i
, ncpus
= 1, ntasks
= 1;
23 struct filter_entry
*func
;
25 if (!list_is_singular(&ftrace
->filters
)) {
26 pr_err("ERROR: %s target function(s).\n",
27 list_empty(&ftrace
->filters
) ? "No" : "Too many");
31 func
= list_first_entry(&ftrace
->filters
, struct filter_entry
, list
);
33 skel
= func_latency_bpf__open();
35 pr_err("Failed to open func latency skeleton\n");
39 /* don't need to set cpu filter for system-wide mode */
40 if (ftrace
->target
.cpu_list
) {
41 ncpus
= perf_cpu_map__nr(ftrace
->evlist
->core
.user_requested_cpus
);
42 bpf_map__set_max_entries(skel
->maps
.cpu_filter
, ncpus
);
43 skel
->rodata
->has_cpu
= 1;
46 if (target__has_task(&ftrace
->target
) || target__none(&ftrace
->target
)) {
47 ntasks
= perf_thread_map__nr(ftrace
->evlist
->core
.threads
);
48 bpf_map__set_max_entries(skel
->maps
.task_filter
, ntasks
);
49 skel
->rodata
->has_task
= 1;
52 skel
->rodata
->use_nsec
= ftrace
->use_nsec
;
56 err
= func_latency_bpf__load(skel
);
58 pr_err("Failed to load func latency skeleton\n");
62 if (ftrace
->target
.cpu_list
) {
66 fd
= bpf_map__fd(skel
->maps
.cpu_filter
);
68 for (i
= 0; i
< ncpus
; i
++) {
69 cpu
= perf_cpu_map__cpu(ftrace
->evlist
->core
.user_requested_cpus
, i
).cpu
;
70 bpf_map_update_elem(fd
, &cpu
, &val
, BPF_ANY
);
74 if (target__has_task(&ftrace
->target
) || target__none(&ftrace
->target
)) {
78 fd
= bpf_map__fd(skel
->maps
.task_filter
);
80 for (i
= 0; i
< ntasks
; i
++) {
81 pid
= perf_thread_map__pid(ftrace
->evlist
->core
.threads
, i
);
82 bpf_map_update_elem(fd
, &pid
, &val
, BPF_ANY
);
86 skel
->links
.func_begin
= bpf_program__attach_kprobe(skel
->progs
.func_begin
,
88 if (IS_ERR(skel
->links
.func_begin
)) {
89 pr_err("Failed to attach fentry program\n");
90 err
= PTR_ERR(skel
->links
.func_begin
);
94 skel
->links
.func_end
= bpf_program__attach_kprobe(skel
->progs
.func_end
,
96 if (IS_ERR(skel
->links
.func_end
)) {
97 pr_err("Failed to attach fexit program\n");
98 err
= PTR_ERR(skel
->links
.func_end
);
102 /* XXX: we don't actually use this fd - just for poll() */
103 return open("/dev/null", O_RDONLY
);
109 int perf_ftrace__latency_start_bpf(struct perf_ftrace
*ftrace __maybe_unused
)
111 skel
->bss
->enabled
= 1;
115 int perf_ftrace__latency_stop_bpf(struct perf_ftrace
*ftrace __maybe_unused
)
117 skel
->bss
->enabled
= 0;
121 int perf_ftrace__latency_read_bpf(struct perf_ftrace
*ftrace __maybe_unused
,
127 int ncpus
= cpu__max_cpu().cpu
;
129 fd
= bpf_map__fd(skel
->maps
.latency
);
131 hist
= calloc(ncpus
, sizeof(*hist
));
135 for (idx
= 0; idx
< NUM_BUCKET
; idx
++) {
136 err
= bpf_map_lookup_elem(fd
, &idx
, hist
);
142 for (i
= 0; i
< ncpus
; i
++)
143 buckets
[idx
] += hist
[i
];
150 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace
*ftrace __maybe_unused
)
152 func_latency_bpf__destroy(skel
);