Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / perf / util / kwork.h
blob76fe2a821bcfd70c7afc2ff93249179fb60e7038
1 #ifndef PERF_UTIL_KWORK_H
2 #define PERF_UTIL_KWORK_H
4 #include "util/tool.h"
5 #include "util/time-utils.h"
7 #include <linux/bitmap.h>
8 #include <linux/list.h>
9 #include <linux/rbtree.h>
10 #include <linux/types.h>
12 struct perf_sample;
13 struct perf_session;
15 enum kwork_class_type {
16 KWORK_CLASS_IRQ,
17 KWORK_CLASS_SOFTIRQ,
18 KWORK_CLASS_WORKQUEUE,
19 KWORK_CLASS_SCHED,
20 KWORK_CLASS_MAX,
23 enum kwork_report_type {
24 KWORK_REPORT_RUNTIME,
25 KWORK_REPORT_LATENCY,
26 KWORK_REPORT_TIMEHIST,
27 KWORK_REPORT_TOP,
30 enum kwork_trace_type {
31 KWORK_TRACE_RAISE,
32 KWORK_TRACE_ENTRY,
33 KWORK_TRACE_EXIT,
34 KWORK_TRACE_MAX,
38 * data structure:
40 * +==================+ +============+ +======================+
41 * | class | | work | | atom |
42 * +==================+ +============+ +======================+
43 * +------------+ | +-----+ | | +------+ | | +-------+ +-----+ |
44 * | perf_kwork | +-> | irq | --------|+-> | eth0 | --+-> | raise | - | ... | --+ +-----------+
45 * +-----+------+ || +-----+ ||| +------+ ||| +-------+ +-----+ | | | |
46 * | || ||| ||| | +-> | atom_page |
47 * | || ||| ||| +-------+ +-----+ | | |
48 * | class_list ||| |+-> | entry | - | ... | ----> | |
49 * | || ||| ||| +-------+ +-----+ | | |
50 * | || ||| ||| | +-> | |
51 * | || ||| ||| +-------+ +-----+ | | | |
52 * | || ||| |+-> | exit | - | ... | --+ +-----+-----+
53 * | || ||| | | +-------+ +-----+ | |
54 * | || ||| | | | |
55 * | || ||| +-----+ | | | |
56 * | || |+-> | ... | | | | |
57 * | || | | +-----+ | | | |
58 * | || | | | | | |
59 * | || +---------+ | | +-----+ | | +-------+ +-----+ | |
60 * | +-> | softirq | -------> | RCU | ---+-> | raise | - | ... | --+ +-----+-----+
61 * | || +---------+ | | +-----+ ||| +-------+ +-----+ | | | |
62 * | || | | ||| | +-> | atom_page |
63 * | || | | ||| +-------+ +-----+ | | |
64 * | || | | |+-> | entry | - | ... | ----> | |
65 * | || | | ||| +-------+ +-----+ | | |
66 * | || | | ||| | +-> | |
67 * | || | | ||| +-------+ +-----+ | | | |
68 * | || | | |+-> | exit | - | ... | --+ +-----+-----+
69 * | || | | | | +-------+ +-----+ | |
70 * | || | | | | | |
71 * | || +-----------+ | | +-----+ | | | |
72 * | +-> | workqueue | -----> | ... | | | | |
73 * | | +-----------+ | | +-----+ | | | |
74 * | +==================+ +============+ +======================+ |
75 * | |
76 * +----> atom_page_list ---------------------------------------------------------+
80 struct kwork_atom {
81 struct list_head list;
82 u64 time;
83 struct kwork_atom *prev;
85 void *page_addr;
86 unsigned long bit_inpage;
89 #define NR_ATOM_PER_PAGE 128
90 struct kwork_atom_page {
91 struct list_head list;
92 struct kwork_atom atoms[NR_ATOM_PER_PAGE];
93 DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
96 struct perf_kwork;
97 struct kwork_class;
98 struct kwork_work {
100 * class field
102 struct rb_node node;
103 struct kwork_class *class;
106 * work field
108 u64 id;
109 int cpu;
110 char *name;
113 * atom field
115 u64 nr_atoms;
116 struct list_head atom_list[KWORK_TRACE_MAX];
119 * runtime report
121 u64 max_runtime;
122 u64 max_runtime_start;
123 u64 max_runtime_end;
124 u64 total_runtime;
127 * latency report
129 u64 max_latency;
130 u64 max_latency_start;
131 u64 max_latency_end;
132 u64 total_latency;
135 * top report
137 u32 cpu_usage;
138 u32 tgid;
139 bool is_kthread;
142 struct kwork_class {
143 struct list_head list;
144 const char *name;
145 enum kwork_class_type type;
147 unsigned int nr_tracepoints;
148 const struct evsel_str_handler *tp_handlers;
150 struct rb_root_cached work_root;
152 int (*class_init)(struct kwork_class *class,
153 struct perf_session *session);
155 void (*work_init)(struct perf_kwork *kwork,
156 struct kwork_class *class,
157 struct kwork_work *work,
158 enum kwork_trace_type src_type,
159 struct evsel *evsel,
160 struct perf_sample *sample,
161 struct machine *machine);
163 void (*work_name)(struct kwork_work *work,
164 char *buf, int len);
167 struct trace_kwork_handler {
168 int (*raise_event)(struct perf_kwork *kwork,
169 struct kwork_class *class, struct evsel *evsel,
170 struct perf_sample *sample, struct machine *machine);
172 int (*entry_event)(struct perf_kwork *kwork,
173 struct kwork_class *class, struct evsel *evsel,
174 struct perf_sample *sample, struct machine *machine);
176 int (*exit_event)(struct perf_kwork *kwork,
177 struct kwork_class *class, struct evsel *evsel,
178 struct perf_sample *sample, struct machine *machine);
180 int (*sched_switch_event)(struct perf_kwork *kwork,
181 struct kwork_class *class, struct evsel *evsel,
182 struct perf_sample *sample, struct machine *machine);
185 struct __top_cpus_runtime {
186 u64 load;
187 u64 idle;
188 u64 irq;
189 u64 softirq;
190 u64 total;
193 struct kwork_top_stat {
194 DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
195 struct __top_cpus_runtime *cpus_runtime;
198 struct perf_kwork {
200 * metadata
202 struct perf_tool tool;
203 struct list_head class_list;
204 struct list_head atom_page_list;
205 struct list_head sort_list, cmp_id;
206 struct rb_root_cached sorted_work_root;
207 const struct trace_kwork_handler *tp_handler;
210 * profile filters
212 const char *profile_name;
214 const char *cpu_list;
215 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
217 const char *time_str;
218 struct perf_time_interval ptime;
221 * options for command
223 bool force;
224 const char *event_list_str;
225 enum kwork_report_type report;
228 * options for subcommand
230 bool summary;
231 const char *sort_order;
232 bool show_callchain;
233 unsigned int max_stack;
234 bool use_bpf;
237 * statistics
239 u64 timestart;
240 u64 timeend;
242 unsigned long nr_events;
243 unsigned long nr_lost_chunks;
244 unsigned long nr_lost_events;
246 u64 all_runtime;
247 u64 all_count;
248 u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
251 * perf kwork top data
253 struct kwork_top_stat top_stat;
256 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
257 struct kwork_class *class,
258 struct kwork_work *key);
260 #ifdef HAVE_BPF_SKEL
262 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
263 int perf_kwork__report_read_bpf(struct perf_kwork *kwork);
264 void perf_kwork__report_cleanup_bpf(void);
266 void perf_kwork__trace_start(void);
267 void perf_kwork__trace_finish(void);
269 int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork);
270 int perf_kwork__top_read_bpf(struct perf_kwork *kwork);
271 void perf_kwork__top_cleanup_bpf(void);
273 void perf_kwork__top_start(void);
274 void perf_kwork__top_finish(void);
276 #else /* !HAVE_BPF_SKEL */
278 static inline int
279 perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
281 return -1;
284 static inline int
285 perf_kwork__report_read_bpf(struct perf_kwork *kwork __maybe_unused)
287 return -1;
290 static inline void perf_kwork__report_cleanup_bpf(void) {}
292 static inline void perf_kwork__trace_start(void) {}
293 static inline void perf_kwork__trace_finish(void) {}
295 static inline int
296 perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
298 return -1;
301 static inline int
302 perf_kwork__top_read_bpf(struct perf_kwork *kwork __maybe_unused)
304 return -1;
307 static inline void perf_kwork__top_cleanup_bpf(void) {}
309 static inline void perf_kwork__top_start(void) {}
310 static inline void perf_kwork__top_finish(void) {}
312 #endif /* HAVE_BPF_SKEL */
314 #endif /* PERF_UTIL_KWORK_H */