Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / perf / ui / hist.c
blobe5491995adf08bf25733d27c1b36a5c57c69a09d
1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/compiler.h>
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
14 #include "../util/thread.h"
15 #include "../util/util.h"
17 /* hist period print (hpp) functions */
19 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
20 ({ \
21 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
22 advance_hpp(hpp, __ret); \
23 __ret; \
26 static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val,
27 int nr_samples, const char *fmt, int len,
28 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
30 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) {
31 double percent = 0.0;
32 u64 total = hists__total_period(hists);
34 if (total)
35 percent = 100.0 * val / total;
37 return hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
40 if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
41 double avg = nr_samples ? (1.0 * val / nr_samples) : 0;
43 return hpp__call_print_fn(hpp, print_fn, fmt, len, avg);
46 return hpp__call_print_fn(hpp, print_fn, fmt, len, val);
49 struct hpp_fmt_value {
50 struct hists *hists;
51 u64 val;
52 int samples;
55 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
56 hpp_field_fn get_field, const char *fmt, int len,
57 hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
59 int ret = 0;
60 struct hists *hists = he->hists;
61 struct evsel *evsel = hists_to_evsel(hists);
62 struct evsel *pos;
63 char *buf = hpp->buf;
64 size_t size = hpp->size;
65 int i = 0, nr_members = 1;
66 struct hpp_fmt_value *values;
68 if (evsel__is_group_event(evsel))
69 nr_members = evsel->core.nr_members;
71 values = calloc(nr_members, sizeof(*values));
72 if (values == NULL)
73 return 0;
75 values[0].hists = evsel__hists(evsel);
76 values[0].val = get_field(he);
77 values[0].samples = he->stat.nr_events;
79 if (evsel__is_group_event(evsel)) {
80 struct hist_entry *pair;
82 for_each_group_member(pos, evsel)
83 values[++i].hists = evsel__hists(pos);
85 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
86 for (i = 0; i < nr_members; i++) {
87 if (values[i].hists != pair->hists)
88 continue;
90 values[i].val = get_field(pair);
91 values[i].samples = pair->stat.nr_events;
92 break;
97 for (i = 0; i < nr_members; i++) {
98 if (symbol_conf.skip_empty &&
99 values[i].hists->stats.nr_samples == 0)
100 continue;
102 ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val,
103 values[i].samples, fmt, len,
104 print_fn, fmtype);
107 free(values);
110 * Restore original buf and size as it's where caller expects
111 * the result will be saved.
113 hpp->buf = buf;
114 hpp->size = size;
116 return ret;
119 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
120 struct hist_entry *he, hpp_field_fn get_field,
121 const char *fmtstr, hpp_snprint_fn print_fn,
122 enum perf_hpp_fmt_type fmtype)
124 int len = fmt->user_len ?: fmt->len;
126 if (symbol_conf.field_sep) {
127 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
128 print_fn, fmtype);
131 if (fmtype == PERF_HPP_FMT_TYPE__PERCENT)
132 len -= 2; /* 2 for a space and a % sign */
133 else
134 len -= 1;
136 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype);
139 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
140 struct hist_entry *he, hpp_field_fn get_field,
141 const char *fmtstr, hpp_snprint_fn print_fn,
142 enum perf_hpp_fmt_type fmtype)
144 if (!symbol_conf.cumulate_callchain) {
145 int len = fmt->user_len ?: fmt->len;
146 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
149 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype);
152 static int field_cmp(u64 field_a, u64 field_b)
154 if (field_a > field_b)
155 return 1;
156 if (field_a < field_b)
157 return -1;
158 return 0;
161 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
162 hpp_field_fn get_field, int nr_members,
163 u64 **fields_a, u64 **fields_b)
165 u64 *fa = calloc(nr_members, sizeof(*fa)),
166 *fb = calloc(nr_members, sizeof(*fb));
167 struct hist_entry *pair;
169 if (!fa || !fb)
170 goto out_free;
172 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
173 struct evsel *evsel = hists_to_evsel(pair->hists);
174 fa[evsel__group_idx(evsel)] = get_field(pair);
177 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
178 struct evsel *evsel = hists_to_evsel(pair->hists);
179 fb[evsel__group_idx(evsel)] = get_field(pair);
182 *fields_a = fa;
183 *fields_b = fb;
184 return 0;
185 out_free:
186 free(fa);
187 free(fb);
188 *fields_a = *fields_b = NULL;
189 return -1;
192 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
193 hpp_field_fn get_field, int idx)
195 struct evsel *evsel = hists_to_evsel(a->hists);
196 u64 *fields_a, *fields_b;
197 int cmp, nr_members, ret, i;
199 cmp = field_cmp(get_field(a), get_field(b));
200 if (!evsel__is_group_event(evsel))
201 return cmp;
203 nr_members = evsel->core.nr_members;
204 if (idx < 1 || idx >= nr_members)
205 return cmp;
207 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
208 if (ret) {
209 ret = cmp;
210 goto out;
213 ret = field_cmp(fields_a[idx], fields_b[idx]);
214 if (ret)
215 goto out;
217 for (i = 1; i < nr_members; i++) {
218 if (i != idx) {
219 ret = field_cmp(fields_a[i], fields_b[i]);
220 if (ret)
221 goto out;
225 out:
226 free(fields_a);
227 free(fields_b);
229 return ret;
232 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
233 hpp_field_fn get_field)
235 s64 ret;
236 int i, nr_members;
237 struct evsel *evsel;
238 u64 *fields_a, *fields_b;
240 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
241 return __hpp__group_sort_idx(a, b, get_field,
242 symbol_conf.group_sort_idx);
245 ret = field_cmp(get_field(a), get_field(b));
246 if (ret || !symbol_conf.event_group)
247 return ret;
249 evsel = hists_to_evsel(a->hists);
250 if (!evsel__is_group_event(evsel))
251 return ret;
253 nr_members = evsel->core.nr_members;
254 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
255 if (i)
256 goto out;
258 for (i = 1; i < nr_members; i++) {
259 ret = field_cmp(fields_a[i], fields_b[i]);
260 if (ret)
261 break;
264 out:
265 free(fields_a);
266 free(fields_b);
268 return ret;
271 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
272 hpp_field_fn get_field)
274 s64 ret = 0;
276 if (symbol_conf.cumulate_callchain) {
278 * Put caller above callee when they have equal period.
280 ret = field_cmp(get_field(a), get_field(b));
281 if (ret)
282 return ret;
284 if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
285 (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
286 !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
287 return 0;
289 ret = b->callchain->max_depth - a->callchain->max_depth;
290 if (callchain_param.order == ORDER_CALLER)
291 ret = -ret;
293 return ret;
296 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
297 struct perf_hpp *hpp __maybe_unused,
298 struct hists *hists)
300 int len = fmt->user_len ?: fmt->len;
301 struct evsel *evsel = hists_to_evsel(hists);
303 if (symbol_conf.event_group) {
304 int nr = 0;
305 struct evsel *pos;
307 for_each_group_evsel(pos, evsel) {
308 if (!symbol_conf.skip_empty ||
309 evsel__hists(pos)->stats.nr_samples)
310 nr++;
313 len = max(len, nr * fmt->len);
316 if (len < (int)strlen(fmt->name))
317 len = strlen(fmt->name);
319 return len;
322 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
323 struct hists *hists, int line __maybe_unused,
324 int *span __maybe_unused)
326 int len = hpp__width_fn(fmt, hpp, hists);
327 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
330 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
332 va_list args;
333 ssize_t ssize = hpp->size;
334 double percent;
335 int ret, len;
337 va_start(args, fmt);
338 len = va_arg(args, int);
339 percent = va_arg(args, double);
340 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
341 va_end(args);
343 return (ret >= ssize) ? (ssize - 1) : ret;
346 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
348 va_list args;
349 ssize_t ssize = hpp->size;
350 int ret;
352 va_start(args, fmt);
353 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
354 va_end(args);
356 return (ret >= ssize) ? (ssize - 1) : ret;
359 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
360 static u64 he_get_##_field(struct hist_entry *he) \
362 return he->stat._field; \
365 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
366 struct perf_hpp *hpp, struct hist_entry *he) \
368 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
369 hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
372 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
373 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
374 struct perf_hpp *hpp, struct hist_entry *he) \
376 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
377 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
380 #define __HPP_SORT_FN(_type, _field) \
381 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
382 struct hist_entry *a, struct hist_entry *b) \
384 return __hpp__sort(a, b, he_get_##_field); \
387 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
388 static u64 he_get_acc_##_field(struct hist_entry *he) \
390 return he->stat_acc->_field; \
393 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
394 struct perf_hpp *hpp, struct hist_entry *he) \
396 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
397 hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
400 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
401 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
402 struct perf_hpp *hpp, struct hist_entry *he) \
404 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
405 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
408 #define __HPP_SORT_ACC_FN(_type, _field) \
409 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
410 struct hist_entry *a, struct hist_entry *b) \
412 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
415 #define __HPP_ENTRY_RAW_FN(_type, _field) \
416 static u64 he_get_raw_##_field(struct hist_entry *he) \
418 return he->stat._field; \
421 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
422 struct perf_hpp *hpp, struct hist_entry *he) \
424 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
425 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW); \
428 #define __HPP_SORT_RAW_FN(_type, _field) \
429 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
430 struct hist_entry *a, struct hist_entry *b) \
432 return __hpp__sort(a, b, he_get_raw_##_field); \
435 #define __HPP_ENTRY_AVERAGE_FN(_type, _field) \
436 static u64 he_get_##_field(struct hist_entry *he) \
438 return he->stat._field; \
441 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
442 struct perf_hpp *hpp, struct hist_entry *he) \
444 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f", \
445 hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE); \
448 #define __HPP_SORT_AVERAGE_FN(_type, _field) \
449 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
450 struct hist_entry *a, struct hist_entry *b) \
452 return __hpp__sort(a, b, he_get_##_field); \
456 #define HPP_PERCENT_FNS(_type, _field) \
457 __HPP_COLOR_PERCENT_FN(_type, _field) \
458 __HPP_ENTRY_PERCENT_FN(_type, _field) \
459 __HPP_SORT_FN(_type, _field)
461 #define HPP_PERCENT_ACC_FNS(_type, _field) \
462 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
463 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
464 __HPP_SORT_ACC_FN(_type, _field)
466 #define HPP_RAW_FNS(_type, _field) \
467 __HPP_ENTRY_RAW_FN(_type, _field) \
468 __HPP_SORT_RAW_FN(_type, _field)
470 #define HPP_AVERAGE_FNS(_type, _field) \
471 __HPP_ENTRY_AVERAGE_FN(_type, _field) \
472 __HPP_SORT_AVERAGE_FN(_type, _field)
474 HPP_PERCENT_FNS(overhead, period)
475 HPP_PERCENT_FNS(overhead_sys, period_sys)
476 HPP_PERCENT_FNS(overhead_us, period_us)
477 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
478 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
479 HPP_PERCENT_ACC_FNS(overhead_acc, period)
481 HPP_RAW_FNS(samples, nr_events)
482 HPP_RAW_FNS(period, period)
484 HPP_AVERAGE_FNS(weight1, weight1)
485 HPP_AVERAGE_FNS(weight2, weight2)
486 HPP_AVERAGE_FNS(weight3, weight3)
488 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
489 struct hist_entry *a __maybe_unused,
490 struct hist_entry *b __maybe_unused)
492 return 0;
495 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
497 return a->header == hpp__header_fn;
500 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
502 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
503 return false;
505 return a->idx == b->idx;
508 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
510 .name = _name, \
511 .header = hpp__header_fn, \
512 .width = hpp__width_fn, \
513 .color = hpp__color_ ## _fn, \
514 .entry = hpp__entry_ ## _fn, \
515 .cmp = hpp__nop_cmp, \
516 .collapse = hpp__nop_cmp, \
517 .sort = hpp__sort_ ## _fn, \
518 .idx = PERF_HPP__ ## _idx, \
519 .equal = hpp__equal, \
522 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
524 .name = _name, \
525 .header = hpp__header_fn, \
526 .width = hpp__width_fn, \
527 .color = hpp__color_ ## _fn, \
528 .entry = hpp__entry_ ## _fn, \
529 .cmp = hpp__nop_cmp, \
530 .collapse = hpp__nop_cmp, \
531 .sort = hpp__sort_ ## _fn, \
532 .idx = PERF_HPP__ ## _idx, \
533 .equal = hpp__equal, \
536 #define HPP__PRINT_FNS(_name, _fn, _idx) \
538 .name = _name, \
539 .header = hpp__header_fn, \
540 .width = hpp__width_fn, \
541 .entry = hpp__entry_ ## _fn, \
542 .cmp = hpp__nop_cmp, \
543 .collapse = hpp__nop_cmp, \
544 .sort = hpp__sort_ ## _fn, \
545 .idx = PERF_HPP__ ## _idx, \
546 .equal = hpp__equal, \
549 struct perf_hpp_fmt perf_hpp__format[] = {
550 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
551 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
552 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
553 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
554 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
555 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
556 HPP__PRINT_FNS("Samples", samples, SAMPLES),
557 HPP__PRINT_FNS("Period", period, PERIOD),
558 HPP__PRINT_FNS("Weight1", weight1, WEIGHT1),
559 HPP__PRINT_FNS("Weight2", weight2, WEIGHT2),
560 HPP__PRINT_FNS("Weight3", weight3, WEIGHT3),
563 struct perf_hpp_list perf_hpp_list = {
564 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
565 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
566 .nr_header_lines = 1,
569 #undef HPP__COLOR_PRINT_FNS
570 #undef HPP__COLOR_ACC_PRINT_FNS
571 #undef HPP__PRINT_FNS
573 #undef HPP_PERCENT_FNS
574 #undef HPP_PERCENT_ACC_FNS
575 #undef HPP_RAW_FNS
576 #undef HPP_AVERAGE_FNS
578 #undef __HPP_HEADER_FN
579 #undef __HPP_WIDTH_FN
580 #undef __HPP_COLOR_PERCENT_FN
581 #undef __HPP_ENTRY_PERCENT_FN
582 #undef __HPP_COLOR_ACC_PERCENT_FN
583 #undef __HPP_ENTRY_ACC_PERCENT_FN
584 #undef __HPP_ENTRY_RAW_FN
585 #undef __HPP_ENTRY_AVERAGE_FN
586 #undef __HPP_SORT_FN
587 #undef __HPP_SORT_ACC_FN
588 #undef __HPP_SORT_RAW_FN
589 #undef __HPP_SORT_AVERAGE_FN
591 static void fmt_free(struct perf_hpp_fmt *fmt)
594 * At this point fmt should be completely
595 * unhooked, if not it's a bug.
597 BUG_ON(!list_empty(&fmt->list));
598 BUG_ON(!list_empty(&fmt->sort_list));
600 if (fmt->free)
601 fmt->free(fmt);
604 void perf_hpp__init(void)
606 int i;
608 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
609 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
611 INIT_LIST_HEAD(&fmt->list);
613 /* sort_list may be linked by setup_sorting() */
614 if (fmt->sort_list.next == NULL)
615 INIT_LIST_HEAD(&fmt->sort_list);
619 * If user specified field order, no need to setup default fields.
621 if (is_strict_order(field_order))
622 return;
624 if (symbol_conf.cumulate_callchain) {
625 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
626 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
629 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
631 if (symbol_conf.show_cpu_utilization) {
632 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
633 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
635 if (perf_guest) {
636 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
637 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
641 if (symbol_conf.show_nr_samples)
642 hpp_dimension__add_output(PERF_HPP__SAMPLES);
644 if (symbol_conf.show_total_period)
645 hpp_dimension__add_output(PERF_HPP__PERIOD);
648 void perf_hpp_list__column_register(struct perf_hpp_list *list,
649 struct perf_hpp_fmt *format)
651 list_add_tail(&format->list, &list->fields);
654 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
655 struct perf_hpp_fmt *format)
657 list_add_tail(&format->sort_list, &list->sorts);
660 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
661 struct perf_hpp_fmt *format)
663 list_add(&format->sort_list, &list->sorts);
666 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
668 list_del_init(&format->list);
669 fmt_free(format);
672 void perf_hpp__cancel_cumulate(void)
674 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
676 if (is_strict_order(field_order))
677 return;
679 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
680 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
682 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
683 if (acc->equal(acc, fmt)) {
684 perf_hpp__column_unregister(fmt);
685 continue;
688 if (ovh->equal(ovh, fmt))
689 fmt->name = "Overhead";
693 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
695 return a->equal && a->equal(a, b);
698 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
700 struct perf_hpp_fmt *fmt;
702 /* append sort keys to output field */
703 perf_hpp_list__for_each_sort_list(list, fmt) {
704 struct perf_hpp_fmt *pos;
706 /* skip sort-only fields ("sort_compute" in perf diff) */
707 if (!fmt->entry && !fmt->color)
708 continue;
710 perf_hpp_list__for_each_format(list, pos) {
711 if (fmt_equal(fmt, pos))
712 goto next;
715 perf_hpp__column_register(fmt);
716 next:
717 continue;
721 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
723 struct perf_hpp_fmt *fmt;
725 /* append output fields to sort keys */
726 perf_hpp_list__for_each_format(list, fmt) {
727 struct perf_hpp_fmt *pos;
729 perf_hpp_list__for_each_sort_list(list, pos) {
730 if (fmt_equal(fmt, pos))
731 goto next;
734 perf_hpp__register_sort_field(fmt);
735 next:
736 continue;
741 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
743 struct perf_hpp_fmt *fmt, *tmp;
745 /* reset output fields */
746 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
747 list_del_init(&fmt->list);
748 list_del_init(&fmt->sort_list);
749 fmt_free(fmt);
752 /* reset sort keys */
753 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
754 list_del_init(&fmt->list);
755 list_del_init(&fmt->sort_list);
756 fmt_free(fmt);
761 * See hists__fprintf to match the column widths
763 unsigned int hists__sort_list_width(struct hists *hists)
765 struct perf_hpp_fmt *fmt;
766 int ret = 0;
767 bool first = true;
768 struct perf_hpp dummy_hpp;
770 hists__for_each_format(hists, fmt) {
771 if (perf_hpp__should_skip(fmt, hists))
772 continue;
774 if (first)
775 first = false;
776 else
777 ret += 2;
779 ret += fmt->width(fmt, &dummy_hpp, hists);
782 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
783 ret += 3 + BITS_PER_LONG / 4;
785 return ret;
788 unsigned int hists__overhead_width(struct hists *hists)
790 struct perf_hpp_fmt *fmt;
791 int ret = 0;
792 bool first = true;
793 struct perf_hpp dummy_hpp;
795 hists__for_each_format(hists, fmt) {
796 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
797 break;
799 if (first)
800 first = false;
801 else
802 ret += 2;
804 ret += fmt->width(fmt, &dummy_hpp, hists);
807 return ret;
810 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
812 if (perf_hpp__is_sort_entry(fmt))
813 return perf_hpp__reset_sort_width(fmt, hists);
815 if (perf_hpp__is_dynamic_entry(fmt))
816 return;
818 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
820 switch (fmt->idx) {
821 case PERF_HPP__OVERHEAD:
822 case PERF_HPP__OVERHEAD_SYS:
823 case PERF_HPP__OVERHEAD_US:
824 case PERF_HPP__OVERHEAD_ACC:
825 fmt->len = 8;
826 break;
828 case PERF_HPP__OVERHEAD_GUEST_SYS:
829 case PERF_HPP__OVERHEAD_GUEST_US:
830 fmt->len = 9;
831 break;
833 case PERF_HPP__SAMPLES:
834 case PERF_HPP__PERIOD:
835 fmt->len = 12;
836 break;
838 case PERF_HPP__WEIGHT1:
839 case PERF_HPP__WEIGHT2:
840 case PERF_HPP__WEIGHT3:
841 fmt->len = 8;
842 break;
844 default:
845 break;
849 void hists__reset_column_width(struct hists *hists)
851 struct perf_hpp_fmt *fmt;
852 struct perf_hpp_list_node *node;
854 hists__for_each_format(hists, fmt)
855 perf_hpp__reset_width(fmt, hists);
857 /* hierarchy entries have their own hpp list */
858 list_for_each_entry(node, &hists->hpp_formats, list) {
859 perf_hpp_list__for_each_format(&node->hpp, fmt)
860 perf_hpp__reset_width(fmt, hists);
864 void perf_hpp__set_user_width(const char *width_list_str)
866 struct perf_hpp_fmt *fmt;
867 const char *ptr = width_list_str;
869 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
870 char *p;
872 int len = strtol(ptr, &p, 10);
873 fmt->user_len = len;
875 if (*p == ',')
876 ptr = p + 1;
877 else
878 break;
882 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
884 struct perf_hpp_list_node *node = NULL;
885 struct perf_hpp_fmt *fmt_copy;
886 bool found = false;
887 bool skip = perf_hpp__should_skip(fmt, hists);
889 list_for_each_entry(node, &hists->hpp_formats, list) {
890 if (node->level == fmt->level) {
891 found = true;
892 break;
896 if (!found) {
897 node = malloc(sizeof(*node));
898 if (node == NULL)
899 return -1;
901 node->skip = skip;
902 node->level = fmt->level;
903 perf_hpp_list__init(&node->hpp);
905 hists->nr_hpp_node++;
906 list_add_tail(&node->list, &hists->hpp_formats);
909 fmt_copy = perf_hpp_fmt__dup(fmt);
910 if (fmt_copy == NULL)
911 return -1;
913 if (!skip)
914 node->skip = false;
916 list_add_tail(&fmt_copy->list, &node->hpp.fields);
917 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
919 return 0;
922 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
923 struct evlist *evlist)
925 struct evsel *evsel;
926 struct perf_hpp_fmt *fmt;
927 struct hists *hists;
928 int ret;
930 if (!symbol_conf.report_hierarchy)
931 return 0;
933 evlist__for_each_entry(evlist, evsel) {
934 hists = evsel__hists(evsel);
936 perf_hpp_list__for_each_sort_list(list, fmt) {
937 if (perf_hpp__is_dynamic_entry(fmt) &&
938 !perf_hpp__defined_dynamic_entry(fmt, hists))
939 continue;
941 ret = add_hierarchy_fmt(hists, fmt);
942 if (ret < 0)
943 return ret;
947 return 0;