1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
14 #include <linux/time64.h>
16 #include "util/debug.h"
17 #include "util/evsel.h"
18 #include "util/kwork.h"
21 #include <perf/cpumap.h>
23 #include "util/bpf_skel/kwork_top.skel.h"
26 * This should be in sync with "util/kwork_top.bpf.c"
28 #define MAX_COMMAND_LEN 16
41 char comm
[MAX_COMMAND_LEN
];
55 struct kwork_class_bpf
{
56 struct kwork_class
*class;
57 void (*load_prepare
)(void);
60 static struct kwork_top_bpf
*skel
;
62 void perf_kwork__top_start(void)
66 clock_gettime(CLOCK_MONOTONIC
, &ts
);
67 skel
->bss
->from_timestamp
= (u64
)ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
;
68 skel
->bss
->enabled
= 1;
69 pr_debug("perf kwork top start at: %lld\n", skel
->bss
->from_timestamp
);
72 void perf_kwork__top_finish(void)
76 skel
->bss
->enabled
= 0;
77 clock_gettime(CLOCK_MONOTONIC
, &ts
);
78 skel
->bss
->to_timestamp
= (u64
)ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
;
79 pr_debug("perf kwork top finish at: %lld\n", skel
->bss
->to_timestamp
);
82 static void irq_load_prepare(void)
84 bpf_program__set_autoload(skel
->progs
.on_irq_handler_entry
, true);
85 bpf_program__set_autoload(skel
->progs
.on_irq_handler_exit
, true);
88 static struct kwork_class_bpf kwork_irq_bpf
= {
89 .load_prepare
= irq_load_prepare
,
92 static void softirq_load_prepare(void)
94 bpf_program__set_autoload(skel
->progs
.on_softirq_entry
, true);
95 bpf_program__set_autoload(skel
->progs
.on_softirq_exit
, true);
98 static struct kwork_class_bpf kwork_softirq_bpf
= {
99 .load_prepare
= softirq_load_prepare
,
102 static void sched_load_prepare(void)
104 bpf_program__set_autoload(skel
->progs
.on_switch
, true);
107 static struct kwork_class_bpf kwork_sched_bpf
= {
108 .load_prepare
= sched_load_prepare
,
111 static struct kwork_class_bpf
*
112 kwork_class_bpf_supported_list
[KWORK_CLASS_MAX
] = {
113 [KWORK_CLASS_IRQ
] = &kwork_irq_bpf
,
114 [KWORK_CLASS_SOFTIRQ
] = &kwork_softirq_bpf
,
115 [KWORK_CLASS_SCHED
] = &kwork_sched_bpf
,
118 static bool valid_kwork_class_type(enum kwork_class_type type
)
120 return type
>= 0 && type
< KWORK_CLASS_MAX
;
123 static int setup_filters(struct perf_kwork
*kwork
)
125 if (kwork
->cpu_list
) {
126 int idx
, nr_cpus
, fd
;
127 struct perf_cpu_map
*map
;
130 fd
= bpf_map__fd(skel
->maps
.kwork_top_cpu_filter
);
132 pr_debug("Invalid cpu filter fd\n");
136 map
= perf_cpu_map__new(kwork
->cpu_list
);
138 pr_debug("Invalid cpu_list\n");
142 nr_cpus
= libbpf_num_possible_cpus();
143 perf_cpu_map__for_each_cpu(cpu
, idx
, map
) {
146 if (cpu
.cpu
>= nr_cpus
) {
147 perf_cpu_map__put(map
);
148 pr_err("Requested cpu %d too large\n", cpu
.cpu
);
151 bpf_map_update_elem(fd
, &cpu
.cpu
, &val
, BPF_ANY
);
153 perf_cpu_map__put(map
);
159 int perf_kwork__top_prepare_bpf(struct perf_kwork
*kwork
)
161 struct bpf_program
*prog
;
162 struct kwork_class
*class;
163 struct kwork_class_bpf
*class_bpf
;
164 enum kwork_class_type type
;
166 skel
= kwork_top_bpf__open();
168 pr_debug("Failed to open kwork top skeleton\n");
173 * set all progs to non-autoload,
174 * then set corresponding progs according to config
176 bpf_object__for_each_program(prog
, skel
->obj
)
177 bpf_program__set_autoload(prog
, false);
179 list_for_each_entry(class, &kwork
->class_list
, list
) {
181 if (!valid_kwork_class_type(type
) ||
182 !kwork_class_bpf_supported_list
[type
]) {
183 pr_err("Unsupported bpf trace class %s\n", class->name
);
187 class_bpf
= kwork_class_bpf_supported_list
[type
];
188 class_bpf
->class = class;
190 if (class_bpf
->load_prepare
)
191 class_bpf
->load_prepare();
195 skel
->rodata
->has_cpu_filter
= 1;
197 if (kwork_top_bpf__load(skel
)) {
198 pr_debug("Failed to load kwork top skeleton\n");
202 if (setup_filters(kwork
))
205 if (kwork_top_bpf__attach(skel
)) {
206 pr_debug("Failed to attach kwork top skeleton\n");
213 kwork_top_bpf__destroy(skel
);
217 static void read_task_info(struct kwork_work
*work
)
220 struct task_data data
;
221 struct task_key key
= {
226 fd
= bpf_map__fd(skel
->maps
.kwork_top_tasks
);
228 pr_debug("Invalid top tasks map fd\n");
232 if (!bpf_map_lookup_elem(fd
, &key
, &data
)) {
233 work
->tgid
= data
.tgid
;
234 work
->is_kthread
= data
.is_kthread
;
235 work
->name
= strdup(data
.comm
);
238 static int add_work(struct perf_kwork
*kwork
, struct work_key
*key
,
239 struct work_data
*data
, int cpu
)
241 struct kwork_class_bpf
*bpf_trace
;
242 struct kwork_work
*work
;
243 struct kwork_work tmp
= {
248 enum kwork_class_type type
= key
->type
;
250 if (!valid_kwork_class_type(type
)) {
251 pr_debug("Invalid class type %d to add work\n", type
);
255 bpf_trace
= kwork_class_bpf_supported_list
[type
];
256 tmp
.class = bpf_trace
->class;
258 work
= perf_kwork_add_work(kwork
, tmp
.class, &tmp
);
262 work
->total_runtime
= data
->runtime
;
263 read_task_info(work
);
268 int perf_kwork__top_read_bpf(struct perf_kwork
*kwork
)
271 struct work_data
*data
;
272 struct work_key key
, prev
;
274 fd
= bpf_map__fd(skel
->maps
.kwork_top_works
);
276 pr_debug("Invalid top runtime fd\n");
280 nr_cpus
= libbpf_num_possible_cpus();
281 data
= calloc(nr_cpus
, sizeof(struct work_data
));
285 memset(&prev
, 0, sizeof(prev
));
286 while (!bpf_map_get_next_key(fd
, &prev
, &key
)) {
287 if ((bpf_map_lookup_elem(fd
, &key
, data
)) != 0) {
288 pr_debug("Failed to lookup top elem\n");
292 for (i
= 0; i
< nr_cpus
; i
++) {
293 if (data
[i
].runtime
== 0)
296 if (add_work(kwork
, &key
, &data
[i
], i
))
306 void perf_kwork__top_cleanup_bpf(void)
308 kwork_top_bpf__destroy(skel
);