gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / tools / bpf / bpftool / skeleton / profiler.bpf.c
blob20034c12f7c52728f6ef1876e248ab1e81d63c61
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3 #include "profiler.h"
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
8 /* map of perf event fds, num_cpu * num_metric entries */
9 struct {
10 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
11 __uint(key_size, sizeof(u32));
12 __uint(value_size, sizeof(int));
13 } events SEC(".maps");
15 /* readings at fentry */
16 struct {
17 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
18 __uint(key_size, sizeof(u32));
19 __uint(value_size, sizeof(struct bpf_perf_event_value));
20 } fentry_readings SEC(".maps");
22 /* accumulated readings */
23 struct {
24 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
25 __uint(key_size, sizeof(u32));
26 __uint(value_size, sizeof(struct bpf_perf_event_value));
27 } accum_readings SEC(".maps");
29 /* sample counts, one per cpu */
30 struct {
31 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
32 __uint(key_size, sizeof(u32));
33 __uint(value_size, sizeof(u64));
34 } counts SEC(".maps");
36 const volatile __u32 num_cpu = 1;
37 const volatile __u32 num_metric = 1;
38 #define MAX_NUM_MATRICS 4
40 SEC("fentry/XXX")
41 int BPF_PROG(fentry_XXX)
43 struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
44 u32 key = bpf_get_smp_processor_id();
45 u32 i;
47 /* look up before reading, to reduce error */
48 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
49 u32 flag = i;
51 ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
52 if (!ptrs[i])
53 return 0;
56 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
57 struct bpf_perf_event_value reading;
58 int err;
60 err = bpf_perf_event_read_value(&events, key, &reading,
61 sizeof(reading));
62 if (err)
63 return 0;
64 *(ptrs[i]) = reading;
65 key += num_cpu;
68 return 0;
71 static inline void
72 fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
74 struct bpf_perf_event_value *before, diff, *accum;
76 before = bpf_map_lookup_elem(&fentry_readings, &id);
77 /* only account samples with a valid fentry_reading */
78 if (before && before->counter) {
79 struct bpf_perf_event_value *accum;
81 diff.counter = after->counter - before->counter;
82 diff.enabled = after->enabled - before->enabled;
83 diff.running = after->running - before->running;
85 accum = bpf_map_lookup_elem(&accum_readings, &id);
86 if (accum) {
87 accum->counter += diff.counter;
88 accum->enabled += diff.enabled;
89 accum->running += diff.running;
94 SEC("fexit/XXX")
95 int BPF_PROG(fexit_XXX)
97 struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
98 u32 cpu = bpf_get_smp_processor_id();
99 u32 i, one = 1, zero = 0;
100 int err;
101 u64 *count;
103 /* read all events before updating the maps, to reduce error */
104 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
105 err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
106 readings + i, sizeof(*readings));
107 if (err)
108 return 0;
110 count = bpf_map_lookup_elem(&counts, &zero);
111 if (count) {
112 *count += 1;
113 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
114 fexit_update_maps(i, &readings[i]);
116 return 0;
119 char LICENSE[] SEC("license") = "GPL";