1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
14 #include <linux/time64.h>
16 #include "util/debug.h"
17 #include "util/evsel.h"
18 #include "util/kwork.h"
21 #include <perf/cpumap.h>
23 #include "util/bpf_skel/kwork_trace.skel.h"
26 * This should be in sync with "util/kwork_trace.bpf.c"
28 #define MAX_KWORKNAME 128
44 struct kwork_class_bpf
{
45 struct kwork_class
*class;
47 void (*load_prepare
)(struct perf_kwork
*kwork
);
48 int (*get_work_name
)(struct work_key
*key
, char **ret_name
);
51 static struct kwork_trace_bpf
*skel
;
53 static struct timespec ts_start
;
54 static struct timespec ts_end
;
56 void perf_kwork__trace_start(void)
58 clock_gettime(CLOCK_MONOTONIC
, &ts_start
);
59 skel
->bss
->enabled
= 1;
62 void perf_kwork__trace_finish(void)
64 clock_gettime(CLOCK_MONOTONIC
, &ts_end
);
65 skel
->bss
->enabled
= 0;
68 static int get_work_name_from_map(struct work_key
*key
, char **ret_name
)
70 char name
[MAX_KWORKNAME
] = { 0 };
71 int fd
= bpf_map__fd(skel
->maps
.perf_kwork_names
);
76 pr_debug("Invalid names map fd\n");
80 if ((bpf_map_lookup_elem(fd
, key
, name
) == 0) && (strlen(name
) != 0)) {
81 *ret_name
= strdup(name
);
82 if (*ret_name
== NULL
) {
83 pr_err("Failed to copy work name\n");
91 static void irq_load_prepare(struct perf_kwork
*kwork
)
93 if (kwork
->report
== KWORK_REPORT_RUNTIME
) {
94 bpf_program__set_autoload(skel
->progs
.report_irq_handler_entry
, true);
95 bpf_program__set_autoload(skel
->progs
.report_irq_handler_exit
, true);
99 static struct kwork_class_bpf kwork_irq_bpf
= {
100 .load_prepare
= irq_load_prepare
,
101 .get_work_name
= get_work_name_from_map
,
104 static void softirq_load_prepare(struct perf_kwork
*kwork
)
106 if (kwork
->report
== KWORK_REPORT_RUNTIME
) {
107 bpf_program__set_autoload(skel
->progs
.report_softirq_entry
, true);
108 bpf_program__set_autoload(skel
->progs
.report_softirq_exit
, true);
109 } else if (kwork
->report
== KWORK_REPORT_LATENCY
) {
110 bpf_program__set_autoload(skel
->progs
.latency_softirq_raise
, true);
111 bpf_program__set_autoload(skel
->progs
.latency_softirq_entry
, true);
115 static struct kwork_class_bpf kwork_softirq_bpf
= {
116 .load_prepare
= softirq_load_prepare
,
117 .get_work_name
= get_work_name_from_map
,
120 static void workqueue_load_prepare(struct perf_kwork
*kwork
)
122 if (kwork
->report
== KWORK_REPORT_RUNTIME
) {
123 bpf_program__set_autoload(skel
->progs
.report_workqueue_execute_start
, true);
124 bpf_program__set_autoload(skel
->progs
.report_workqueue_execute_end
, true);
125 } else if (kwork
->report
== KWORK_REPORT_LATENCY
) {
126 bpf_program__set_autoload(skel
->progs
.latency_workqueue_activate_work
, true);
127 bpf_program__set_autoload(skel
->progs
.latency_workqueue_execute_start
, true);
131 static struct kwork_class_bpf kwork_workqueue_bpf
= {
132 .load_prepare
= workqueue_load_prepare
,
133 .get_work_name
= get_work_name_from_map
,
136 static struct kwork_class_bpf
*
137 kwork_class_bpf_supported_list
[KWORK_CLASS_MAX
] = {
138 [KWORK_CLASS_IRQ
] = &kwork_irq_bpf
,
139 [KWORK_CLASS_SOFTIRQ
] = &kwork_softirq_bpf
,
140 [KWORK_CLASS_WORKQUEUE
] = &kwork_workqueue_bpf
,
143 static bool valid_kwork_class_type(enum kwork_class_type type
)
145 return type
>= 0 && type
< KWORK_CLASS_MAX
? true : false;
148 static int setup_filters(struct perf_kwork
*kwork
)
150 if (kwork
->cpu_list
!= NULL
) {
152 struct perf_cpu_map
*map
;
154 int fd
= bpf_map__fd(skel
->maps
.perf_kwork_cpu_filter
);
157 pr_debug("Invalid cpu filter fd\n");
161 map
= perf_cpu_map__new(kwork
->cpu_list
);
163 pr_debug("Invalid cpu_list\n");
167 nr_cpus
= libbpf_num_possible_cpus();
168 perf_cpu_map__for_each_cpu(cpu
, idx
, map
) {
171 if (cpu
.cpu
>= nr_cpus
) {
172 perf_cpu_map__put(map
);
173 pr_err("Requested cpu %d too large\n", cpu
.cpu
);
176 bpf_map_update_elem(fd
, &cpu
.cpu
, &val
, BPF_ANY
);
178 perf_cpu_map__put(map
);
181 if (kwork
->profile_name
!= NULL
) {
184 if (strlen(kwork
->profile_name
) >= MAX_KWORKNAME
) {
185 pr_err("Requested name filter %s too large, limit to %d\n",
186 kwork
->profile_name
, MAX_KWORKNAME
- 1);
190 fd
= bpf_map__fd(skel
->maps
.perf_kwork_name_filter
);
192 pr_debug("Invalid name filter fd\n");
197 bpf_map_update_elem(fd
, &key
, kwork
->profile_name
, BPF_ANY
);
203 int perf_kwork__trace_prepare_bpf(struct perf_kwork
*kwork
)
205 struct bpf_program
*prog
;
206 struct kwork_class
*class;
207 struct kwork_class_bpf
*class_bpf
;
208 enum kwork_class_type type
;
210 skel
= kwork_trace_bpf__open();
212 pr_debug("Failed to open kwork trace skeleton\n");
217 * set all progs to non-autoload,
218 * then set corresponding progs according to config
220 bpf_object__for_each_program(prog
, skel
->obj
)
221 bpf_program__set_autoload(prog
, false);
223 list_for_each_entry(class, &kwork
->class_list
, list
) {
225 if (!valid_kwork_class_type(type
) ||
226 (kwork_class_bpf_supported_list
[type
] == NULL
)) {
227 pr_err("Unsupported bpf trace class %s\n", class->name
);
231 class_bpf
= kwork_class_bpf_supported_list
[type
];
232 class_bpf
->class = class;
234 if (class_bpf
->load_prepare
!= NULL
)
235 class_bpf
->load_prepare(kwork
);
238 if (kwork
->cpu_list
!= NULL
)
239 skel
->rodata
->has_cpu_filter
= 1;
240 if (kwork
->profile_name
!= NULL
)
241 skel
->rodata
->has_name_filter
= 1;
243 if (kwork_trace_bpf__load(skel
)) {
244 pr_debug("Failed to load kwork trace skeleton\n");
248 if (setup_filters(kwork
))
251 if (kwork_trace_bpf__attach(skel
)) {
252 pr_debug("Failed to attach kwork trace skeleton\n");
259 kwork_trace_bpf__destroy(skel
);
263 static int add_work(struct perf_kwork
*kwork
,
264 struct work_key
*key
,
265 struct report_data
*data
)
267 struct kwork_work
*work
;
268 struct kwork_class_bpf
*bpf_trace
;
269 struct kwork_work tmp
= {
274 enum kwork_class_type type
= key
->type
;
276 if (!valid_kwork_class_type(type
)) {
277 pr_debug("Invalid class type %d to add work\n", type
);
281 bpf_trace
= kwork_class_bpf_supported_list
[type
];
282 tmp
.class = bpf_trace
->class;
284 if ((bpf_trace
->get_work_name
!= NULL
) &&
285 (bpf_trace
->get_work_name(key
, &tmp
.name
)))
288 work
= perf_kwork_add_work(kwork
, tmp
.class, &tmp
);
292 if (kwork
->report
== KWORK_REPORT_RUNTIME
) {
293 work
->nr_atoms
= data
->nr
;
294 work
->total_runtime
= data
->total_time
;
295 work
->max_runtime
= data
->max_time
;
296 work
->max_runtime_start
= data
->max_time_start
;
297 work
->max_runtime_end
= data
->max_time_end
;
298 } else if (kwork
->report
== KWORK_REPORT_LATENCY
) {
299 work
->nr_atoms
= data
->nr
;
300 work
->total_latency
= data
->total_time
;
301 work
->max_latency
= data
->max_time
;
302 work
->max_latency_start
= data
->max_time_start
;
303 work
->max_latency_end
= data
->max_time_end
;
305 pr_debug("Invalid bpf report type %d\n", kwork
->report
);
309 kwork
->timestart
= (u64
)ts_start
.tv_sec
* NSEC_PER_SEC
+ ts_start
.tv_nsec
;
310 kwork
->timeend
= (u64
)ts_end
.tv_sec
* NSEC_PER_SEC
+ ts_end
.tv_nsec
;
315 int perf_kwork__report_read_bpf(struct perf_kwork
*kwork
)
317 struct report_data data
;
318 struct work_key key
= {
323 struct work_key prev
= {
328 int fd
= bpf_map__fd(skel
->maps
.perf_kwork_report
);
331 pr_debug("Invalid report fd\n");
335 while (!bpf_map_get_next_key(fd
, &prev
, &key
)) {
336 if ((bpf_map_lookup_elem(fd
, &key
, &data
)) != 0) {
337 pr_debug("Failed to lookup report elem\n");
341 if ((data
.nr
!= 0) && (add_work(kwork
, &key
, &data
) != 0))
349 void perf_kwork__report_cleanup_bpf(void)
351 kwork_trace_bpf__destroy(skel
);