1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2019 Facebook */
10 #include <linux/err.h>
11 #include <linux/zalloc.h>
12 #include <api/fs/fs.h>
13 #include <perf/bpf_perf.h>
15 #include "bpf_counter.h"
16 #include "bpf-utils.h"
24 #include "thread_map.h"
26 #include "bpf_skel/bpf_prog_profiler.skel.h"
27 #include "bpf_skel/bperf_u.h"
28 #include "bpf_skel/bperf_leader.skel.h"
29 #include "bpf_skel/bperf_follower.skel.h"
31 #define ATTR_MAP_SIZE 16
33 static inline void *u64_to_ptr(__u64 ptr
)
35 return (void *)(unsigned long)ptr
;
38 static struct bpf_counter
*bpf_counter_alloc(void)
40 struct bpf_counter
*counter
;
42 counter
= zalloc(sizeof(*counter
));
44 INIT_LIST_HEAD(&counter
->list
);
48 static int bpf_program_profiler__destroy(struct evsel
*evsel
)
50 struct bpf_counter
*counter
, *tmp
;
52 list_for_each_entry_safe(counter
, tmp
,
53 &evsel
->bpf_counter_list
, list
) {
54 list_del_init(&counter
->list
);
55 bpf_prog_profiler_bpf__destroy(counter
->skel
);
58 assert(list_empty(&evsel
->bpf_counter_list
));
63 static char *bpf_target_prog_name(int tgt_fd
)
65 struct bpf_func_info
*func_info
;
66 struct perf_bpil
*info_linear
;
67 const struct btf_type
*t
;
68 struct btf
*btf
= NULL
;
71 info_linear
= get_bpf_prog_info_linear(tgt_fd
, 1UL << PERF_BPIL_FUNC_INFO
);
72 if (IS_ERR_OR_NULL(info_linear
)) {
73 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd
);
77 if (info_linear
->info
.btf_id
== 0) {
78 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd
);
82 btf
= btf__load_from_kernel_by_id(info_linear
->info
.btf_id
);
83 if (libbpf_get_error(btf
)) {
84 pr_debug("failed to load btf for prog FD %d\n", tgt_fd
);
88 func_info
= u64_to_ptr(info_linear
->info
.func_info
);
89 t
= btf__type_by_id(btf
, func_info
[0].type_id
);
91 pr_debug("btf %d doesn't have type %d\n",
92 info_linear
->info
.btf_id
, func_info
[0].type_id
);
95 name
= strdup(btf__name_by_offset(btf
, t
->name_off
));
102 static int bpf_program_profiler_load_one(struct evsel
*evsel
, u32 prog_id
)
104 struct bpf_prog_profiler_bpf
*skel
;
105 struct bpf_counter
*counter
;
106 struct bpf_program
*prog
;
107 char *prog_name
= NULL
;
111 prog_fd
= bpf_prog_get_fd_by_id(prog_id
);
113 pr_err("Failed to open fd for bpf prog %u\n", prog_id
);
116 counter
= bpf_counter_alloc();
122 skel
= bpf_prog_profiler_bpf__open();
124 pr_err("Failed to open bpf skeleton\n");
128 skel
->rodata
->num_cpu
= evsel__nr_cpus(evsel
);
130 bpf_map__set_max_entries(skel
->maps
.events
, evsel__nr_cpus(evsel
));
131 bpf_map__set_max_entries(skel
->maps
.fentry_readings
, 1);
132 bpf_map__set_max_entries(skel
->maps
.accum_readings
, 1);
134 prog_name
= bpf_target_prog_name(prog_fd
);
136 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id
);
140 bpf_object__for_each_program(prog
, skel
->obj
) {
141 err
= bpf_program__set_attach_target(prog
, prog_fd
, prog_name
);
143 pr_err("bpf_program__set_attach_target failed.\n"
144 "Does bpf prog %u have BTF?\n", prog_id
);
149 err
= bpf_prog_profiler_bpf__load(skel
);
151 pr_err("bpf_prog_profiler_bpf__load failed\n");
155 assert(skel
!= NULL
);
156 counter
->skel
= skel
;
157 list_add(&counter
->list
, &evsel
->bpf_counter_list
);
162 bpf_prog_profiler_bpf__destroy(skel
);
169 static int bpf_program_profiler__load(struct evsel
*evsel
, struct target
*target
)
171 char *bpf_str
, *bpf_str_
, *tok
, *saveptr
= NULL
, *p
;
175 bpf_str_
= bpf_str
= strdup(target
->bpf_str
);
179 while ((tok
= strtok_r(bpf_str
, ",", &saveptr
)) != NULL
) {
180 prog_id
= strtoul(tok
, &p
, 10);
181 if (prog_id
== 0 || prog_id
== UINT_MAX
||
182 (*p
!= '\0' && *p
!= ',')) {
183 pr_err("Failed to parse bpf prog ids %s\n",
189 ret
= bpf_program_profiler_load_one(evsel
, prog_id
);
191 bpf_program_profiler__destroy(evsel
);
201 static int bpf_program_profiler__enable(struct evsel
*evsel
)
203 struct bpf_counter
*counter
;
206 list_for_each_entry(counter
, &evsel
->bpf_counter_list
, list
) {
207 assert(counter
->skel
!= NULL
);
208 ret
= bpf_prog_profiler_bpf__attach(counter
->skel
);
210 bpf_program_profiler__destroy(evsel
);
217 static int bpf_program_profiler__disable(struct evsel
*evsel
)
219 struct bpf_counter
*counter
;
221 list_for_each_entry(counter
, &evsel
->bpf_counter_list
, list
) {
222 assert(counter
->skel
!= NULL
);
223 bpf_prog_profiler_bpf__detach(counter
->skel
);
228 static int bpf_program_profiler__read(struct evsel
*evsel
)
230 // BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
231 // Sometimes possible > online, like on a Ryzen 3900X that has 24
232 // threads but its possible showed 0-31 -acme
233 int num_cpu_bpf
= libbpf_num_possible_cpus();
234 struct bpf_perf_event_value values
[num_cpu_bpf
];
235 struct bpf_counter
*counter
;
236 struct perf_counts_values
*counts
;
239 int err
, idx
, bpf_cpu
;
241 if (list_empty(&evsel
->bpf_counter_list
))
244 perf_cpu_map__for_each_idx(idx
, evsel__cpus(evsel
)) {
245 counts
= perf_counts(evsel
->counts
, idx
, 0);
250 list_for_each_entry(counter
, &evsel
->bpf_counter_list
, list
) {
251 struct bpf_prog_profiler_bpf
*skel
= counter
->skel
;
253 assert(skel
!= NULL
);
254 reading_map_fd
= bpf_map__fd(skel
->maps
.accum_readings
);
256 err
= bpf_map_lookup_elem(reading_map_fd
, &key
, values
);
258 pr_err("failed to read value\n");
262 for (bpf_cpu
= 0; bpf_cpu
< num_cpu_bpf
; bpf_cpu
++) {
263 idx
= perf_cpu_map__idx(evsel__cpus(evsel
),
264 (struct perf_cpu
){.cpu
= bpf_cpu
});
267 counts
= perf_counts(evsel
->counts
, idx
, 0);
268 counts
->val
+= values
[bpf_cpu
].counter
;
269 counts
->ena
+= values
[bpf_cpu
].enabled
;
270 counts
->run
+= values
[bpf_cpu
].running
;
276 static int bpf_program_profiler__install_pe(struct evsel
*evsel
, int cpu_map_idx
,
279 struct bpf_prog_profiler_bpf
*skel
;
280 struct bpf_counter
*counter
;
283 list_for_each_entry(counter
, &evsel
->bpf_counter_list
, list
) {
284 skel
= counter
->skel
;
285 assert(skel
!= NULL
);
287 ret
= bpf_map_update_elem(bpf_map__fd(skel
->maps
.events
),
288 &cpu_map_idx
, &fd
, BPF_ANY
);
295 struct bpf_counter_ops bpf_program_profiler_ops
= {
296 .load
= bpf_program_profiler__load
,
297 .enable
= bpf_program_profiler__enable
,
298 .disable
= bpf_program_profiler__disable
,
299 .read
= bpf_program_profiler__read
,
300 .destroy
= bpf_program_profiler__destroy
,
301 .install_pe
= bpf_program_profiler__install_pe
,
304 static bool bperf_attr_map_compatible(int attr_map_fd
)
306 struct bpf_map_info map_info
= {0};
307 __u32 map_info_len
= sizeof(map_info
);
310 err
= bpf_obj_get_info_by_fd(attr_map_fd
, &map_info
, &map_info_len
);
314 return (map_info
.key_size
== sizeof(struct perf_event_attr
)) &&
315 (map_info
.value_size
== sizeof(struct perf_event_attr_map_entry
));
318 static int bperf_lock_attr_map(struct target
*target
)
323 if (target
->attr_map
) {
324 scnprintf(path
, PATH_MAX
, "%s", target
->attr_map
);
326 scnprintf(path
, PATH_MAX
, "%s/fs/bpf/%s", sysfs__mountpoint(),
327 BPF_PERF_DEFAULT_ATTR_MAP_PATH
);
330 if (access(path
, F_OK
)) {
331 map_fd
= bpf_map_create(BPF_MAP_TYPE_HASH
, NULL
,
332 sizeof(struct perf_event_attr
),
333 sizeof(struct perf_event_attr_map_entry
),
334 ATTR_MAP_SIZE
, NULL
);
338 err
= bpf_obj_pin(map_fd
, path
);
340 /* someone pinned the map in parallel? */
342 map_fd
= bpf_obj_get(path
);
347 map_fd
= bpf_obj_get(path
);
352 if (!bperf_attr_map_compatible(map_fd
)) {
357 err
= flock(map_fd
, LOCK_EX
);
365 static int bperf_check_target(struct evsel
*evsel
,
366 struct target
*target
,
367 enum bperf_filter_type
*filter_type
,
368 __u32
*filter_entry_cnt
)
370 if (evsel
->core
.leader
->nr_members
> 1) {
371 pr_err("bpf managed perf events do not yet support groups.\n");
375 /* determine filter type based on target */
376 if (target
->system_wide
) {
377 *filter_type
= BPERF_FILTER_GLOBAL
;
378 *filter_entry_cnt
= 1;
379 } else if (target
->cpu_list
) {
380 *filter_type
= BPERF_FILTER_CPU
;
381 *filter_entry_cnt
= perf_cpu_map__nr(evsel__cpus(evsel
));
382 } else if (target
->tid
) {
383 *filter_type
= BPERF_FILTER_PID
;
384 *filter_entry_cnt
= perf_thread_map__nr(evsel
->core
.threads
);
385 } else if (target
->pid
|| evsel
->evlist
->workload
.pid
!= -1) {
386 *filter_type
= BPERF_FILTER_TGID
;
387 *filter_entry_cnt
= perf_thread_map__nr(evsel
->core
.threads
);
389 pr_err("bpf managed perf events do not yet support these targets.\n");
396 static struct perf_cpu_map
*all_cpu_map
;
397 static __u32 filter_entry_cnt
;
399 static int bperf_reload_leader_program(struct evsel
*evsel
, int attr_map_fd
,
400 struct perf_event_attr_map_entry
*entry
)
402 struct bperf_leader_bpf
*skel
= bperf_leader_bpf__open();
403 int link_fd
, diff_map_fd
, err
;
404 struct bpf_link
*link
= NULL
;
407 pr_err("Failed to open leader skeleton\n");
411 bpf_map__set_max_entries(skel
->maps
.events
, libbpf_num_possible_cpus());
412 err
= bperf_leader_bpf__load(skel
);
414 pr_err("Failed to load leader skeleton\n");
418 link
= bpf_program__attach(skel
->progs
.on_switch
);
420 pr_err("Failed to attach leader program\n");
425 link_fd
= bpf_link__fd(link
);
426 diff_map_fd
= bpf_map__fd(skel
->maps
.diff_readings
);
427 entry
->link_id
= bpf_link_get_id(link_fd
);
428 entry
->diff_map_id
= bpf_map_get_id(diff_map_fd
);
429 err
= bpf_map_update_elem(attr_map_fd
, &evsel
->core
.attr
, entry
, BPF_ANY
);
432 evsel
->bperf_leader_link_fd
= bpf_link_get_fd_by_id(entry
->link_id
);
433 assert(evsel
->bperf_leader_link_fd
>= 0);
436 * save leader_skel for install_pe, which is called within
437 * following evsel__open_per_cpu call
439 evsel
->leader_skel
= skel
;
440 evsel__open_per_cpu(evsel
, all_cpu_map
, -1);
443 bperf_leader_bpf__destroy(skel
);
444 bpf_link__destroy(link
);
448 static int bperf_attach_follower_program(struct bperf_follower_bpf
*skel
,
449 enum bperf_filter_type filter_type
,
452 struct bpf_link
*link
;
455 if ((filter_type
== BPERF_FILTER_PID
||
456 filter_type
== BPERF_FILTER_TGID
) && inherit
)
457 /* attach all follower bpf progs to enable event inheritance */
458 err
= bperf_follower_bpf__attach(skel
);
460 link
= bpf_program__attach(skel
->progs
.fexit_XXX
);
468 static int bperf__load(struct evsel
*evsel
, struct target
*target
)
470 struct perf_event_attr_map_entry entry
= {0xffffffff, 0xffffffff};
471 int attr_map_fd
, diff_map_fd
= -1, err
;
472 enum bperf_filter_type filter_type
;
475 if (bperf_check_target(evsel
, target
, &filter_type
, &filter_entry_cnt
))
479 all_cpu_map
= perf_cpu_map__new_online_cpus();
484 evsel
->bperf_leader_prog_fd
= -1;
485 evsel
->bperf_leader_link_fd
= -1;
488 * Step 1: hold a fd on the leader program and the bpf_link, if
489 * the program is not already gone, reload the program.
490 * Use flock() to ensure exclusive access to the perf_event_attr
493 attr_map_fd
= bperf_lock_attr_map(target
);
494 if (attr_map_fd
< 0) {
495 pr_err("Failed to lock perf_event_attr map\n");
499 err
= bpf_map_lookup_elem(attr_map_fd
, &evsel
->core
.attr
, &entry
);
501 err
= bpf_map_update_elem(attr_map_fd
, &evsel
->core
.attr
, &entry
, BPF_ANY
);
506 evsel
->bperf_leader_link_fd
= bpf_link_get_fd_by_id(entry
.link_id
);
507 if (evsel
->bperf_leader_link_fd
< 0 &&
508 bperf_reload_leader_program(evsel
, attr_map_fd
, &entry
)) {
513 * The bpf_link holds reference to the leader program, and the
514 * leader program holds reference to the maps. Therefore, if
515 * link_id is valid, diff_map_id should also be valid.
517 evsel
->bperf_leader_prog_fd
= bpf_prog_get_fd_by_id(
518 bpf_link_get_prog_id(evsel
->bperf_leader_link_fd
));
519 assert(evsel
->bperf_leader_prog_fd
>= 0);
521 diff_map_fd
= bpf_map_get_fd_by_id(entry
.diff_map_id
);
522 assert(diff_map_fd
>= 0);
525 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
526 * whether the kernel support it
528 err
= bperf_trigger_reading(evsel
->bperf_leader_prog_fd
, 0);
530 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
531 "Therefore, --use-bpf might show inaccurate readings\n");
535 /* Step 2: load the follower skeleton */
536 evsel
->follower_skel
= bperf_follower_bpf__open();
537 if (!evsel
->follower_skel
) {
539 pr_err("Failed to open follower skeleton\n");
543 /* attach fexit program to the leader program */
544 bpf_program__set_attach_target(evsel
->follower_skel
->progs
.fexit_XXX
,
545 evsel
->bperf_leader_prog_fd
, "on_switch");
547 /* connect to leader diff_reading map */
548 bpf_map__reuse_fd(evsel
->follower_skel
->maps
.diff_readings
, diff_map_fd
);
550 /* set up reading map */
551 bpf_map__set_max_entries(evsel
->follower_skel
->maps
.accum_readings
,
553 err
= bperf_follower_bpf__load(evsel
->follower_skel
);
555 pr_err("Failed to load follower skeleton\n");
556 bperf_follower_bpf__destroy(evsel
->follower_skel
);
557 evsel
->follower_skel
= NULL
;
561 for (i
= 0; i
< filter_entry_cnt
; i
++) {
564 struct bperf_filter_value fval
= { i
, 0 };
566 if (filter_type
== BPERF_FILTER_PID
||
567 filter_type
== BPERF_FILTER_TGID
)
568 key
= perf_thread_map__pid(evsel
->core
.threads
, i
);
569 else if (filter_type
== BPERF_FILTER_CPU
)
570 key
= perf_cpu_map__cpu(evsel
->core
.cpus
, i
).cpu
;
574 filter_map_fd
= bpf_map__fd(evsel
->follower_skel
->maps
.filter
);
575 bpf_map_update_elem(filter_map_fd
, &key
, &fval
, BPF_ANY
);
578 evsel
->follower_skel
->bss
->type
= filter_type
;
579 evsel
->follower_skel
->bss
->inherit
= target
->inherit
;
581 err
= bperf_attach_follower_program(evsel
->follower_skel
, filter_type
,
585 if (err
&& evsel
->bperf_leader_link_fd
>= 0)
586 close(evsel
->bperf_leader_link_fd
);
587 if (err
&& evsel
->bperf_leader_prog_fd
>= 0)
588 close(evsel
->bperf_leader_prog_fd
);
589 if (diff_map_fd
>= 0)
592 flock(attr_map_fd
, LOCK_UN
);
598 static int bperf__install_pe(struct evsel
*evsel
, int cpu_map_idx
, int fd
)
600 struct bperf_leader_bpf
*skel
= evsel
->leader_skel
;
602 return bpf_map_update_elem(bpf_map__fd(skel
->maps
.events
),
603 &cpu_map_idx
, &fd
, BPF_ANY
);
607 * trigger the leader prog on each cpu, so the accum_reading map could get
608 * the latest readings.
610 static int bperf_sync_counters(struct evsel
*evsel
)
614 num_cpu
= perf_cpu_map__nr(all_cpu_map
);
615 for (i
= 0; i
< num_cpu
; i
++) {
616 cpu
= perf_cpu_map__cpu(all_cpu_map
, i
).cpu
;
617 bperf_trigger_reading(evsel
->bperf_leader_prog_fd
, cpu
);
622 static int bperf__enable(struct evsel
*evsel
)
624 evsel
->follower_skel
->bss
->enabled
= 1;
628 static int bperf__disable(struct evsel
*evsel
)
630 evsel
->follower_skel
->bss
->enabled
= 0;
634 static int bperf__read(struct evsel
*evsel
)
636 struct bperf_follower_bpf
*skel
= evsel
->follower_skel
;
637 __u32 num_cpu_bpf
= cpu__max_cpu().cpu
;
638 struct bpf_perf_event_value values
[num_cpu_bpf
];
639 struct perf_counts_values
*counts
;
640 int reading_map_fd
, err
= 0;
644 bperf_sync_counters(evsel
);
645 reading_map_fd
= bpf_map__fd(skel
->maps
.accum_readings
);
647 for (i
= 0; i
< filter_entry_cnt
; i
++) {
648 struct perf_cpu entry
;
651 err
= bpf_map_lookup_elem(reading_map_fd
, &i
, values
);
654 switch (evsel
->follower_skel
->bss
->type
) {
655 case BPERF_FILTER_GLOBAL
:
658 perf_cpu_map__for_each_cpu(entry
, j
, evsel__cpus(evsel
)) {
659 counts
= perf_counts(evsel
->counts
, j
, 0);
660 counts
->val
= values
[entry
.cpu
].counter
;
661 counts
->ena
= values
[entry
.cpu
].enabled
;
662 counts
->run
= values
[entry
.cpu
].running
;
665 case BPERF_FILTER_CPU
:
666 cpu
= perf_cpu_map__cpu(evsel__cpus(evsel
), i
).cpu
;
668 counts
= perf_counts(evsel
->counts
, i
, 0);
669 counts
->val
= values
[cpu
].counter
;
670 counts
->ena
= values
[cpu
].enabled
;
671 counts
->run
= values
[cpu
].running
;
673 case BPERF_FILTER_PID
:
674 case BPERF_FILTER_TGID
:
675 counts
= perf_counts(evsel
->counts
, 0, i
);
680 for (cpu
= 0; cpu
< num_cpu_bpf
; cpu
++) {
681 counts
->val
+= values
[cpu
].counter
;
682 counts
->ena
+= values
[cpu
].enabled
;
683 counts
->run
+= values
[cpu
].running
;
694 static int bperf__destroy(struct evsel
*evsel
)
696 bperf_follower_bpf__destroy(evsel
->follower_skel
);
697 close(evsel
->bperf_leader_prog_fd
);
698 close(evsel
->bperf_leader_link_fd
);
703 * bperf: share hardware PMCs with BPF
705 * perf uses performance monitoring counters (PMC) to monitor system
706 * performance. The PMCs are limited hardware resources. For example,
707 * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
709 * Modern data center systems use these PMCs in many different ways:
710 * system level monitoring, (maybe nested) container level monitoring, per
711 * process monitoring, profiling (in sample mode), etc. In some cases,
712 * there are more active perf_events than available hardware PMCs. To allow
713 * all perf_events to have a chance to run, it is necessary to do expensive
714 * time multiplexing of events.
716 * On the other hand, many monitoring tools count the common metrics
717 * (cycles, instructions). It is a waste to have multiple tools create
718 * multiple perf_events of "cycles" and occupy multiple PMCs.
720 * bperf tries to reduce such wastes by allowing multiple perf_events of
721 * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
722 * of having each perf-stat session to read its own perf_events, bperf uses
723 * BPF programs to read the perf_events and aggregate readings to BPF maps.
724 * Then, the perf-stat session(s) reads the values from these BPF maps.
727 * shared progs and maps <- || -> per session progs and maps
731 * --------------- fexit || -----------------
732 * | --------||----> | follower prog |
733 * --------------- / || --- -----------------
734 * cs -> | leader prog |/ ||/ | |
735 * --> --------------- /|| -------------- ------------------
736 * / | | / || | filter map | | accum_readings |
737 * / ------------ ------------ || -------------- ------------------
738 * | | prev map | | diff map | || |
739 * | ------------ ------------ || |
741 * = \ ==================================================== | ============
745 * BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM
748 * \------ perf-stat ----------------------/
750 * The figure above shows the architecture of bperf. Note that the figure
751 * is divided into 3 regions: shared progs and maps (top left), per session
752 * progs and maps (top right), and user space (bottom).
754 * The leader prog is triggered on each context switch (cs). The leader
755 * prog reads perf_events and stores the difference (current_reading -
756 * previous_reading) to the diff map. For the same metric, e.g. "cycles",
757 * multiple perf-stat sessions share the same leader prog.
759 * Each perf-stat session creates a follower prog as fexit program to the
760 * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
761 * follower progs to the same leader prog. The follower prog checks current
762 * task and processor ID to decide whether to add the value from the diff
763 * map to its accumulated reading map (accum_readings).
765 * Finally, perf-stat user space reads the value from accum_reading map.
767 * Besides context switch, it is also necessary to trigger the leader prog
768 * before perf-stat reads the value. Otherwise, the accum_reading map may
769 * not have the latest reading from the perf_events. This is achieved by
770 * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
772 * Comment before the definition of struct perf_event_attr_map_entry
773 * describes how different sessions of perf-stat share information about
777 struct bpf_counter_ops bperf_ops
= {
779 .enable
= bperf__enable
,
780 .disable
= bperf__disable
,
782 .install_pe
= bperf__install_pe
,
783 .destroy
= bperf__destroy
,
786 extern struct bpf_counter_ops bperf_cgrp_ops
;
788 static inline bool bpf_counter_skip(struct evsel
*evsel
)
790 return evsel
->bpf_counter_ops
== NULL
;
793 int bpf_counter__install_pe(struct evsel
*evsel
, int cpu_map_idx
, int fd
)
795 if (bpf_counter_skip(evsel
))
797 return evsel
->bpf_counter_ops
->install_pe(evsel
, cpu_map_idx
, fd
);
800 int bpf_counter__load(struct evsel
*evsel
, struct target
*target
)
803 evsel
->bpf_counter_ops
= &bpf_program_profiler_ops
;
804 else if (cgrp_event_expanded
&& target
->use_bpf
)
805 evsel
->bpf_counter_ops
= &bperf_cgrp_ops
;
806 else if (target
->use_bpf
|| evsel
->bpf_counter
||
807 evsel__match_bpf_counter_events(evsel
->name
))
808 evsel
->bpf_counter_ops
= &bperf_ops
;
810 if (evsel
->bpf_counter_ops
)
811 return evsel
->bpf_counter_ops
->load(evsel
, target
);
815 int bpf_counter__enable(struct evsel
*evsel
)
817 if (bpf_counter_skip(evsel
))
819 return evsel
->bpf_counter_ops
->enable(evsel
);
822 int bpf_counter__disable(struct evsel
*evsel
)
824 if (bpf_counter_skip(evsel
))
826 return evsel
->bpf_counter_ops
->disable(evsel
);
829 int bpf_counter__read(struct evsel
*evsel
)
831 if (bpf_counter_skip(evsel
))
833 return evsel
->bpf_counter_ops
->read(evsel
);
836 void bpf_counter__destroy(struct evsel
*evsel
)
838 if (bpf_counter_skip(evsel
))
840 evsel
->bpf_counter_ops
->destroy(evsel
);
841 evsel
->bpf_counter_ops
= NULL
;
842 evsel
->bpf_skel
= NULL
;