Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / perf / util / bpf_counter.c
blob73fcafbffc6a6618c1e983d5eb9636c3b369637b
1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2019 Facebook */
5 #include <assert.h>
6 #include <limits.h>
7 #include <unistd.h>
8 #include <sys/file.h>
9 #include <sys/time.h>
10 #include <linux/err.h>
11 #include <linux/zalloc.h>
12 #include <api/fs/fs.h>
13 #include <perf/bpf_perf.h>
15 #include "bpf_counter.h"
16 #include "bpf-utils.h"
17 #include "counts.h"
18 #include "debug.h"
19 #include "evsel.h"
20 #include "evlist.h"
21 #include "target.h"
22 #include "cgroup.h"
23 #include "cpumap.h"
24 #include "thread_map.h"
26 #include "bpf_skel/bpf_prog_profiler.skel.h"
27 #include "bpf_skel/bperf_u.h"
28 #include "bpf_skel/bperf_leader.skel.h"
29 #include "bpf_skel/bperf_follower.skel.h"
31 #define ATTR_MAP_SIZE 16
33 static inline void *u64_to_ptr(__u64 ptr)
35 return (void *)(unsigned long)ptr;
38 static struct bpf_counter *bpf_counter_alloc(void)
40 struct bpf_counter *counter;
42 counter = zalloc(sizeof(*counter));
43 if (counter)
44 INIT_LIST_HEAD(&counter->list);
45 return counter;
48 static int bpf_program_profiler__destroy(struct evsel *evsel)
50 struct bpf_counter *counter, *tmp;
52 list_for_each_entry_safe(counter, tmp,
53 &evsel->bpf_counter_list, list) {
54 list_del_init(&counter->list);
55 bpf_prog_profiler_bpf__destroy(counter->skel);
56 free(counter);
58 assert(list_empty(&evsel->bpf_counter_list));
60 return 0;
63 static char *bpf_target_prog_name(int tgt_fd)
65 struct bpf_func_info *func_info;
66 struct perf_bpil *info_linear;
67 const struct btf_type *t;
68 struct btf *btf = NULL;
69 char *name = NULL;
71 info_linear = get_bpf_prog_info_linear(tgt_fd, 1UL << PERF_BPIL_FUNC_INFO);
72 if (IS_ERR_OR_NULL(info_linear)) {
73 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
74 return NULL;
77 if (info_linear->info.btf_id == 0) {
78 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
79 goto out;
82 btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
83 if (libbpf_get_error(btf)) {
84 pr_debug("failed to load btf for prog FD %d\n", tgt_fd);
85 goto out;
88 func_info = u64_to_ptr(info_linear->info.func_info);
89 t = btf__type_by_id(btf, func_info[0].type_id);
90 if (!t) {
91 pr_debug("btf %d doesn't have type %d\n",
92 info_linear->info.btf_id, func_info[0].type_id);
93 goto out;
95 name = strdup(btf__name_by_offset(btf, t->name_off));
96 out:
97 btf__free(btf);
98 free(info_linear);
99 return name;
102 static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
104 struct bpf_prog_profiler_bpf *skel;
105 struct bpf_counter *counter;
106 struct bpf_program *prog;
107 char *prog_name = NULL;
108 int prog_fd;
109 int err;
111 prog_fd = bpf_prog_get_fd_by_id(prog_id);
112 if (prog_fd < 0) {
113 pr_err("Failed to open fd for bpf prog %u\n", prog_id);
114 return -1;
116 counter = bpf_counter_alloc();
117 if (!counter) {
118 close(prog_fd);
119 return -1;
122 skel = bpf_prog_profiler_bpf__open();
123 if (!skel) {
124 pr_err("Failed to open bpf skeleton\n");
125 goto err_out;
128 skel->rodata->num_cpu = evsel__nr_cpus(evsel);
130 bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
131 bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
132 bpf_map__set_max_entries(skel->maps.accum_readings, 1);
134 prog_name = bpf_target_prog_name(prog_fd);
135 if (!prog_name) {
136 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
137 goto err_out;
140 bpf_object__for_each_program(prog, skel->obj) {
141 err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
142 if (err) {
143 pr_err("bpf_program__set_attach_target failed.\n"
144 "Does bpf prog %u have BTF?\n", prog_id);
145 goto err_out;
148 set_max_rlimit();
149 err = bpf_prog_profiler_bpf__load(skel);
150 if (err) {
151 pr_err("bpf_prog_profiler_bpf__load failed\n");
152 goto err_out;
155 assert(skel != NULL);
156 counter->skel = skel;
157 list_add(&counter->list, &evsel->bpf_counter_list);
158 free(prog_name);
159 close(prog_fd);
160 return 0;
161 err_out:
162 bpf_prog_profiler_bpf__destroy(skel);
163 free(prog_name);
164 free(counter);
165 close(prog_fd);
166 return -1;
169 static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
171 char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
172 u32 prog_id;
173 int ret;
175 bpf_str_ = bpf_str = strdup(target->bpf_str);
176 if (!bpf_str)
177 return -1;
179 while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
180 prog_id = strtoul(tok, &p, 10);
181 if (prog_id == 0 || prog_id == UINT_MAX ||
182 (*p != '\0' && *p != ',')) {
183 pr_err("Failed to parse bpf prog ids %s\n",
184 target->bpf_str);
185 free(bpf_str_);
186 return -1;
189 ret = bpf_program_profiler_load_one(evsel, prog_id);
190 if (ret) {
191 bpf_program_profiler__destroy(evsel);
192 free(bpf_str_);
193 return -1;
195 bpf_str = NULL;
197 free(bpf_str_);
198 return 0;
201 static int bpf_program_profiler__enable(struct evsel *evsel)
203 struct bpf_counter *counter;
204 int ret;
206 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
207 assert(counter->skel != NULL);
208 ret = bpf_prog_profiler_bpf__attach(counter->skel);
209 if (ret) {
210 bpf_program_profiler__destroy(evsel);
211 return ret;
214 return 0;
217 static int bpf_program_profiler__disable(struct evsel *evsel)
219 struct bpf_counter *counter;
221 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
222 assert(counter->skel != NULL);
223 bpf_prog_profiler_bpf__detach(counter->skel);
225 return 0;
228 static int bpf_program_profiler__read(struct evsel *evsel)
230 // BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
231 // Sometimes possible > online, like on a Ryzen 3900X that has 24
232 // threads but its possible showed 0-31 -acme
233 int num_cpu_bpf = libbpf_num_possible_cpus();
234 struct bpf_perf_event_value values[num_cpu_bpf];
235 struct bpf_counter *counter;
236 struct perf_counts_values *counts;
237 int reading_map_fd;
238 __u32 key = 0;
239 int err, idx, bpf_cpu;
241 if (list_empty(&evsel->bpf_counter_list))
242 return -EAGAIN;
244 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
245 counts = perf_counts(evsel->counts, idx, 0);
246 counts->val = 0;
247 counts->ena = 0;
248 counts->run = 0;
250 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
251 struct bpf_prog_profiler_bpf *skel = counter->skel;
253 assert(skel != NULL);
254 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
256 err = bpf_map_lookup_elem(reading_map_fd, &key, values);
257 if (err) {
258 pr_err("failed to read value\n");
259 return err;
262 for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
263 idx = perf_cpu_map__idx(evsel__cpus(evsel),
264 (struct perf_cpu){.cpu = bpf_cpu});
265 if (idx == -1)
266 continue;
267 counts = perf_counts(evsel->counts, idx, 0);
268 counts->val += values[bpf_cpu].counter;
269 counts->ena += values[bpf_cpu].enabled;
270 counts->run += values[bpf_cpu].running;
273 return 0;
276 static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx,
277 int fd)
279 struct bpf_prog_profiler_bpf *skel;
280 struct bpf_counter *counter;
281 int ret;
283 list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
284 skel = counter->skel;
285 assert(skel != NULL);
287 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
288 &cpu_map_idx, &fd, BPF_ANY);
289 if (ret)
290 return ret;
292 return 0;
295 struct bpf_counter_ops bpf_program_profiler_ops = {
296 .load = bpf_program_profiler__load,
297 .enable = bpf_program_profiler__enable,
298 .disable = bpf_program_profiler__disable,
299 .read = bpf_program_profiler__read,
300 .destroy = bpf_program_profiler__destroy,
301 .install_pe = bpf_program_profiler__install_pe,
304 static bool bperf_attr_map_compatible(int attr_map_fd)
306 struct bpf_map_info map_info = {0};
307 __u32 map_info_len = sizeof(map_info);
308 int err;
310 err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
312 if (err)
313 return false;
314 return (map_info.key_size == sizeof(struct perf_event_attr)) &&
315 (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
318 static int bperf_lock_attr_map(struct target *target)
320 char path[PATH_MAX];
321 int map_fd, err;
323 if (target->attr_map) {
324 scnprintf(path, PATH_MAX, "%s", target->attr_map);
325 } else {
326 scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
327 BPF_PERF_DEFAULT_ATTR_MAP_PATH);
330 if (access(path, F_OK)) {
331 map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
332 sizeof(struct perf_event_attr),
333 sizeof(struct perf_event_attr_map_entry),
334 ATTR_MAP_SIZE, NULL);
335 if (map_fd < 0)
336 return -1;
338 err = bpf_obj_pin(map_fd, path);
339 if (err) {
340 /* someone pinned the map in parallel? */
341 close(map_fd);
342 map_fd = bpf_obj_get(path);
343 if (map_fd < 0)
344 return -1;
346 } else {
347 map_fd = bpf_obj_get(path);
348 if (map_fd < 0)
349 return -1;
352 if (!bperf_attr_map_compatible(map_fd)) {
353 close(map_fd);
354 return -1;
357 err = flock(map_fd, LOCK_EX);
358 if (err) {
359 close(map_fd);
360 return -1;
362 return map_fd;
365 static int bperf_check_target(struct evsel *evsel,
366 struct target *target,
367 enum bperf_filter_type *filter_type,
368 __u32 *filter_entry_cnt)
370 if (evsel->core.leader->nr_members > 1) {
371 pr_err("bpf managed perf events do not yet support groups.\n");
372 return -1;
375 /* determine filter type based on target */
376 if (target->system_wide) {
377 *filter_type = BPERF_FILTER_GLOBAL;
378 *filter_entry_cnt = 1;
379 } else if (target->cpu_list) {
380 *filter_type = BPERF_FILTER_CPU;
381 *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
382 } else if (target->tid) {
383 *filter_type = BPERF_FILTER_PID;
384 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
385 } else if (target->pid || evsel->evlist->workload.pid != -1) {
386 *filter_type = BPERF_FILTER_TGID;
387 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
388 } else {
389 pr_err("bpf managed perf events do not yet support these targets.\n");
390 return -1;
393 return 0;
396 static struct perf_cpu_map *all_cpu_map;
397 static __u32 filter_entry_cnt;
399 static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
400 struct perf_event_attr_map_entry *entry)
402 struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
403 int link_fd, diff_map_fd, err;
404 struct bpf_link *link = NULL;
406 if (!skel) {
407 pr_err("Failed to open leader skeleton\n");
408 return -1;
411 bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
412 err = bperf_leader_bpf__load(skel);
413 if (err) {
414 pr_err("Failed to load leader skeleton\n");
415 goto out;
418 link = bpf_program__attach(skel->progs.on_switch);
419 if (IS_ERR(link)) {
420 pr_err("Failed to attach leader program\n");
421 err = PTR_ERR(link);
422 goto out;
425 link_fd = bpf_link__fd(link);
426 diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
427 entry->link_id = bpf_link_get_id(link_fd);
428 entry->diff_map_id = bpf_map_get_id(diff_map_fd);
429 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
430 assert(err == 0);
432 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
433 assert(evsel->bperf_leader_link_fd >= 0);
436 * save leader_skel for install_pe, which is called within
437 * following evsel__open_per_cpu call
439 evsel->leader_skel = skel;
440 evsel__open_per_cpu(evsel, all_cpu_map, -1);
442 out:
443 bperf_leader_bpf__destroy(skel);
444 bpf_link__destroy(link);
445 return err;
448 static int bperf_attach_follower_program(struct bperf_follower_bpf *skel,
449 enum bperf_filter_type filter_type,
450 bool inherit)
452 struct bpf_link *link;
453 int err = 0;
455 if ((filter_type == BPERF_FILTER_PID ||
456 filter_type == BPERF_FILTER_TGID) && inherit)
457 /* attach all follower bpf progs to enable event inheritance */
458 err = bperf_follower_bpf__attach(skel);
459 else {
460 link = bpf_program__attach(skel->progs.fexit_XXX);
461 if (IS_ERR(link))
462 err = PTR_ERR(link);
465 return err;
468 static int bperf__load(struct evsel *evsel, struct target *target)
470 struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
471 int attr_map_fd, diff_map_fd = -1, err;
472 enum bperf_filter_type filter_type;
473 __u32 i;
475 if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
476 return -1;
478 if (!all_cpu_map) {
479 all_cpu_map = perf_cpu_map__new_online_cpus();
480 if (!all_cpu_map)
481 return -1;
484 evsel->bperf_leader_prog_fd = -1;
485 evsel->bperf_leader_link_fd = -1;
488 * Step 1: hold a fd on the leader program and the bpf_link, if
489 * the program is not already gone, reload the program.
490 * Use flock() to ensure exclusive access to the perf_event_attr
491 * map.
493 attr_map_fd = bperf_lock_attr_map(target);
494 if (attr_map_fd < 0) {
495 pr_err("Failed to lock perf_event_attr map\n");
496 return -1;
499 err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
500 if (err) {
501 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
502 if (err)
503 goto out;
506 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
507 if (evsel->bperf_leader_link_fd < 0 &&
508 bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
509 err = -1;
510 goto out;
513 * The bpf_link holds reference to the leader program, and the
514 * leader program holds reference to the maps. Therefore, if
515 * link_id is valid, diff_map_id should also be valid.
517 evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
518 bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
519 assert(evsel->bperf_leader_prog_fd >= 0);
521 diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
522 assert(diff_map_fd >= 0);
525 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
526 * whether the kernel support it
528 err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
529 if (err) {
530 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
531 "Therefore, --use-bpf might show inaccurate readings\n");
532 goto out;
535 /* Step 2: load the follower skeleton */
536 evsel->follower_skel = bperf_follower_bpf__open();
537 if (!evsel->follower_skel) {
538 err = -1;
539 pr_err("Failed to open follower skeleton\n");
540 goto out;
543 /* attach fexit program to the leader program */
544 bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
545 evsel->bperf_leader_prog_fd, "on_switch");
547 /* connect to leader diff_reading map */
548 bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
550 /* set up reading map */
551 bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
552 filter_entry_cnt);
553 err = bperf_follower_bpf__load(evsel->follower_skel);
554 if (err) {
555 pr_err("Failed to load follower skeleton\n");
556 bperf_follower_bpf__destroy(evsel->follower_skel);
557 evsel->follower_skel = NULL;
558 goto out;
561 for (i = 0; i < filter_entry_cnt; i++) {
562 int filter_map_fd;
563 __u32 key;
564 struct bperf_filter_value fval = { i, 0 };
566 if (filter_type == BPERF_FILTER_PID ||
567 filter_type == BPERF_FILTER_TGID)
568 key = perf_thread_map__pid(evsel->core.threads, i);
569 else if (filter_type == BPERF_FILTER_CPU)
570 key = perf_cpu_map__cpu(evsel->core.cpus, i).cpu;
571 else
572 break;
574 filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
575 bpf_map_update_elem(filter_map_fd, &key, &fval, BPF_ANY);
578 evsel->follower_skel->bss->type = filter_type;
579 evsel->follower_skel->bss->inherit = target->inherit;
581 err = bperf_attach_follower_program(evsel->follower_skel, filter_type,
582 target->inherit);
584 out:
585 if (err && evsel->bperf_leader_link_fd >= 0)
586 close(evsel->bperf_leader_link_fd);
587 if (err && evsel->bperf_leader_prog_fd >= 0)
588 close(evsel->bperf_leader_prog_fd);
589 if (diff_map_fd >= 0)
590 close(diff_map_fd);
592 flock(attr_map_fd, LOCK_UN);
593 close(attr_map_fd);
595 return err;
598 static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
600 struct bperf_leader_bpf *skel = evsel->leader_skel;
602 return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
603 &cpu_map_idx, &fd, BPF_ANY);
607 * trigger the leader prog on each cpu, so the accum_reading map could get
608 * the latest readings.
610 static int bperf_sync_counters(struct evsel *evsel)
612 int num_cpu, i, cpu;
614 num_cpu = perf_cpu_map__nr(all_cpu_map);
615 for (i = 0; i < num_cpu; i++) {
616 cpu = perf_cpu_map__cpu(all_cpu_map, i).cpu;
617 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
619 return 0;
622 static int bperf__enable(struct evsel *evsel)
624 evsel->follower_skel->bss->enabled = 1;
625 return 0;
628 static int bperf__disable(struct evsel *evsel)
630 evsel->follower_skel->bss->enabled = 0;
631 return 0;
634 static int bperf__read(struct evsel *evsel)
636 struct bperf_follower_bpf *skel = evsel->follower_skel;
637 __u32 num_cpu_bpf = cpu__max_cpu().cpu;
638 struct bpf_perf_event_value values[num_cpu_bpf];
639 struct perf_counts_values *counts;
640 int reading_map_fd, err = 0;
641 __u32 i;
642 int j;
644 bperf_sync_counters(evsel);
645 reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
647 for (i = 0; i < filter_entry_cnt; i++) {
648 struct perf_cpu entry;
649 __u32 cpu;
651 err = bpf_map_lookup_elem(reading_map_fd, &i, values);
652 if (err)
653 goto out;
654 switch (evsel->follower_skel->bss->type) {
655 case BPERF_FILTER_GLOBAL:
656 assert(i == 0);
658 perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
659 counts = perf_counts(evsel->counts, j, 0);
660 counts->val = values[entry.cpu].counter;
661 counts->ena = values[entry.cpu].enabled;
662 counts->run = values[entry.cpu].running;
664 break;
665 case BPERF_FILTER_CPU:
666 cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
667 assert(cpu >= 0);
668 counts = perf_counts(evsel->counts, i, 0);
669 counts->val = values[cpu].counter;
670 counts->ena = values[cpu].enabled;
671 counts->run = values[cpu].running;
672 break;
673 case BPERF_FILTER_PID:
674 case BPERF_FILTER_TGID:
675 counts = perf_counts(evsel->counts, 0, i);
676 counts->val = 0;
677 counts->ena = 0;
678 counts->run = 0;
680 for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
681 counts->val += values[cpu].counter;
682 counts->ena += values[cpu].enabled;
683 counts->run += values[cpu].running;
685 break;
686 default:
687 break;
690 out:
691 return err;
694 static int bperf__destroy(struct evsel *evsel)
696 bperf_follower_bpf__destroy(evsel->follower_skel);
697 close(evsel->bperf_leader_prog_fd);
698 close(evsel->bperf_leader_link_fd);
699 return 0;
703 * bperf: share hardware PMCs with BPF
705 * perf uses performance monitoring counters (PMC) to monitor system
706 * performance. The PMCs are limited hardware resources. For example,
707 * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
709 * Modern data center systems use these PMCs in many different ways:
710 * system level monitoring, (maybe nested) container level monitoring, per
711 * process monitoring, profiling (in sample mode), etc. In some cases,
712 * there are more active perf_events than available hardware PMCs. To allow
713 * all perf_events to have a chance to run, it is necessary to do expensive
714 * time multiplexing of events.
716 * On the other hand, many monitoring tools count the common metrics
717 * (cycles, instructions). It is a waste to have multiple tools create
718 * multiple perf_events of "cycles" and occupy multiple PMCs.
720 * bperf tries to reduce such wastes by allowing multiple perf_events of
721 * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
722 * of having each perf-stat session to read its own perf_events, bperf uses
723 * BPF programs to read the perf_events and aggregate readings to BPF maps.
724 * Then, the perf-stat session(s) reads the values from these BPF maps.
726 * ||
727 * shared progs and maps <- || -> per session progs and maps
728 * ||
729 * --------------- ||
730 * | perf_events | ||
731 * --------------- fexit || -----------------
732 * | --------||----> | follower prog |
733 * --------------- / || --- -----------------
734 * cs -> | leader prog |/ ||/ | |
735 * --> --------------- /|| -------------- ------------------
736 * / | | / || | filter map | | accum_readings |
737 * / ------------ ------------ || -------------- ------------------
738 * | | prev map | | diff map | || |
739 * | ------------ ------------ || |
740 * \ || |
741 * = \ ==================================================== | ============
742 * \ / user space
743 * \ /
744 * \ /
745 * BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM
746 * \ /
747 * \ /
748 * \------ perf-stat ----------------------/
750 * The figure above shows the architecture of bperf. Note that the figure
751 * is divided into 3 regions: shared progs and maps (top left), per session
752 * progs and maps (top right), and user space (bottom).
754 * The leader prog is triggered on each context switch (cs). The leader
755 * prog reads perf_events and stores the difference (current_reading -
756 * previous_reading) to the diff map. For the same metric, e.g. "cycles",
757 * multiple perf-stat sessions share the same leader prog.
759 * Each perf-stat session creates a follower prog as fexit program to the
760 * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
761 * follower progs to the same leader prog. The follower prog checks current
762 * task and processor ID to decide whether to add the value from the diff
763 * map to its accumulated reading map (accum_readings).
765 * Finally, perf-stat user space reads the value from accum_reading map.
767 * Besides context switch, it is also necessary to trigger the leader prog
768 * before perf-stat reads the value. Otherwise, the accum_reading map may
769 * not have the latest reading from the perf_events. This is achieved by
770 * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
772 * Comment before the definition of struct perf_event_attr_map_entry
773 * describes how different sessions of perf-stat share information about
774 * the leader prog.
777 struct bpf_counter_ops bperf_ops = {
778 .load = bperf__load,
779 .enable = bperf__enable,
780 .disable = bperf__disable,
781 .read = bperf__read,
782 .install_pe = bperf__install_pe,
783 .destroy = bperf__destroy,
786 extern struct bpf_counter_ops bperf_cgrp_ops;
788 static inline bool bpf_counter_skip(struct evsel *evsel)
790 return evsel->bpf_counter_ops == NULL;
793 int bpf_counter__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
795 if (bpf_counter_skip(evsel))
796 return 0;
797 return evsel->bpf_counter_ops->install_pe(evsel, cpu_map_idx, fd);
800 int bpf_counter__load(struct evsel *evsel, struct target *target)
802 if (target->bpf_str)
803 evsel->bpf_counter_ops = &bpf_program_profiler_ops;
804 else if (cgrp_event_expanded && target->use_bpf)
805 evsel->bpf_counter_ops = &bperf_cgrp_ops;
806 else if (target->use_bpf || evsel->bpf_counter ||
807 evsel__match_bpf_counter_events(evsel->name))
808 evsel->bpf_counter_ops = &bperf_ops;
810 if (evsel->bpf_counter_ops)
811 return evsel->bpf_counter_ops->load(evsel, target);
812 return 0;
815 int bpf_counter__enable(struct evsel *evsel)
817 if (bpf_counter_skip(evsel))
818 return 0;
819 return evsel->bpf_counter_ops->enable(evsel);
822 int bpf_counter__disable(struct evsel *evsel)
824 if (bpf_counter_skip(evsel))
825 return 0;
826 return evsel->bpf_counter_ops->disable(evsel);
829 int bpf_counter__read(struct evsel *evsel)
831 if (bpf_counter_skip(evsel))
832 return -EAGAIN;
833 return evsel->bpf_counter_ops->read(evsel);
836 void bpf_counter__destroy(struct evsel *evsel)
838 if (bpf_counter_skip(evsel))
839 return;
840 evsel->bpf_counter_ops->destroy(evsel);
841 evsel->bpf_counter_ops = NULL;
842 evsel->bpf_skel = NULL;