Merge tag 'block-5.9-2020-08-14' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / tools / perf / ui / hist.c
blobc1f24d00485272bbbd0a87bc9762d6b304b1a349
1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/compiler.h>
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
14 #include "../perf.h"
16 /* hist period print (hpp) functions */
18 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
19 ({ \
20 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
21 advance_hpp(hpp, __ret); \
22 __ret; \
25 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
26 hpp_field_fn get_field, const char *fmt, int len,
27 hpp_snprint_fn print_fn, bool fmt_percent)
29 int ret;
30 struct hists *hists = he->hists;
31 struct evsel *evsel = hists_to_evsel(hists);
32 char *buf = hpp->buf;
33 size_t size = hpp->size;
35 if (fmt_percent) {
36 double percent = 0.0;
37 u64 total = hists__total_period(hists);
39 if (total)
40 percent = 100.0 * get_field(he) / total;
42 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
43 } else
44 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
46 if (evsel__is_group_event(evsel)) {
47 int prev_idx, idx_delta;
48 struct hist_entry *pair;
49 int nr_members = evsel->core.nr_members;
51 prev_idx = evsel__group_idx(evsel);
53 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
54 u64 period = get_field(pair);
55 u64 total = hists__total_period(pair->hists);
57 if (!total)
58 continue;
60 evsel = hists_to_evsel(pair->hists);
61 idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
63 while (idx_delta--) {
65 * zero-fill group members in the middle which
66 * have no sample
68 if (fmt_percent) {
69 ret += hpp__call_print_fn(hpp, print_fn,
70 fmt, len, 0.0);
71 } else {
72 ret += hpp__call_print_fn(hpp, print_fn,
73 fmt, len, 0ULL);
77 if (fmt_percent) {
78 ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
79 100.0 * period / total);
80 } else {
81 ret += hpp__call_print_fn(hpp, print_fn, fmt,
82 len, period);
85 prev_idx = evsel__group_idx(evsel);
88 idx_delta = nr_members - prev_idx - 1;
90 while (idx_delta--) {
92 * zero-fill group members at last which have no sample
94 if (fmt_percent) {
95 ret += hpp__call_print_fn(hpp, print_fn,
96 fmt, len, 0.0);
97 } else {
98 ret += hpp__call_print_fn(hpp, print_fn,
99 fmt, len, 0ULL);
105 * Restore original buf and size as it's where caller expects
106 * the result will be saved.
108 hpp->buf = buf;
109 hpp->size = size;
111 return ret;
114 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
115 struct hist_entry *he, hpp_field_fn get_field,
116 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
118 int len = fmt->user_len ?: fmt->len;
120 if (symbol_conf.field_sep) {
121 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
122 print_fn, fmt_percent);
125 if (fmt_percent)
126 len -= 2; /* 2 for a space and a % sign */
127 else
128 len -= 1;
130 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
133 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
134 struct hist_entry *he, hpp_field_fn get_field,
135 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
137 if (!symbol_conf.cumulate_callchain) {
138 int len = fmt->user_len ?: fmt->len;
139 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
142 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
145 static int field_cmp(u64 field_a, u64 field_b)
147 if (field_a > field_b)
148 return 1;
149 if (field_a < field_b)
150 return -1;
151 return 0;
154 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
155 hpp_field_fn get_field, int nr_members,
156 u64 **fields_a, u64 **fields_b)
158 u64 *fa = calloc(nr_members, sizeof(*fa)),
159 *fb = calloc(nr_members, sizeof(*fb));
160 struct hist_entry *pair;
162 if (!fa || !fb)
163 goto out_free;
165 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
166 struct evsel *evsel = hists_to_evsel(pair->hists);
167 fa[evsel__group_idx(evsel)] = get_field(pair);
170 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
171 struct evsel *evsel = hists_to_evsel(pair->hists);
172 fb[evsel__group_idx(evsel)] = get_field(pair);
175 *fields_a = fa;
176 *fields_b = fb;
177 return 0;
178 out_free:
179 free(fa);
180 free(fb);
181 *fields_a = *fields_b = NULL;
182 return -1;
185 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
186 hpp_field_fn get_field, int idx)
188 struct evsel *evsel = hists_to_evsel(a->hists);
189 u64 *fields_a, *fields_b;
190 int cmp, nr_members, ret, i;
192 cmp = field_cmp(get_field(a), get_field(b));
193 if (!evsel__is_group_event(evsel))
194 return cmp;
196 nr_members = evsel->core.nr_members;
197 if (idx < 1 || idx >= nr_members)
198 return cmp;
200 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
201 if (ret) {
202 ret = cmp;
203 goto out;
206 ret = field_cmp(fields_a[idx], fields_b[idx]);
207 if (ret)
208 goto out;
210 for (i = 1; i < nr_members; i++) {
211 if (i != idx) {
212 ret = field_cmp(fields_a[i], fields_b[i]);
213 if (ret)
214 goto out;
218 out:
219 free(fields_a);
220 free(fields_b);
222 return ret;
225 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
226 hpp_field_fn get_field)
228 s64 ret;
229 int i, nr_members;
230 struct evsel *evsel;
231 u64 *fields_a, *fields_b;
233 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
234 return __hpp__group_sort_idx(a, b, get_field,
235 symbol_conf.group_sort_idx);
238 ret = field_cmp(get_field(a), get_field(b));
239 if (ret || !symbol_conf.event_group)
240 return ret;
242 evsel = hists_to_evsel(a->hists);
243 if (!evsel__is_group_event(evsel))
244 return ret;
246 nr_members = evsel->core.nr_members;
247 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
248 if (i)
249 goto out;
251 for (i = 1; i < nr_members; i++) {
252 ret = field_cmp(fields_a[i], fields_b[i]);
253 if (ret)
254 break;
257 out:
258 free(fields_a);
259 free(fields_b);
261 return ret;
264 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
265 hpp_field_fn get_field)
267 s64 ret = 0;
269 if (symbol_conf.cumulate_callchain) {
271 * Put caller above callee when they have equal period.
273 ret = field_cmp(get_field(a), get_field(b));
274 if (ret)
275 return ret;
277 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
278 return 0;
280 ret = b->callchain->max_depth - a->callchain->max_depth;
281 if (callchain_param.order == ORDER_CALLER)
282 ret = -ret;
284 return ret;
287 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
288 struct perf_hpp *hpp __maybe_unused,
289 struct hists *hists)
291 int len = fmt->user_len ?: fmt->len;
292 struct evsel *evsel = hists_to_evsel(hists);
294 if (symbol_conf.event_group)
295 len = max(len, evsel->core.nr_members * fmt->len);
297 if (len < (int)strlen(fmt->name))
298 len = strlen(fmt->name);
300 return len;
303 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
304 struct hists *hists, int line __maybe_unused,
305 int *span __maybe_unused)
307 int len = hpp__width_fn(fmt, hpp, hists);
308 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
311 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
313 va_list args;
314 ssize_t ssize = hpp->size;
315 double percent;
316 int ret, len;
318 va_start(args, fmt);
319 len = va_arg(args, int);
320 percent = va_arg(args, double);
321 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
322 va_end(args);
324 return (ret >= ssize) ? (ssize - 1) : ret;
327 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
329 va_list args;
330 ssize_t ssize = hpp->size;
331 int ret;
333 va_start(args, fmt);
334 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
335 va_end(args);
337 return (ret >= ssize) ? (ssize - 1) : ret;
340 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
341 static u64 he_get_##_field(struct hist_entry *he) \
343 return he->stat._field; \
346 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
347 struct perf_hpp *hpp, struct hist_entry *he) \
349 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
350 hpp_color_scnprintf, true); \
353 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
354 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
355 struct perf_hpp *hpp, struct hist_entry *he) \
357 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
358 hpp_entry_scnprintf, true); \
361 #define __HPP_SORT_FN(_type, _field) \
362 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
363 struct hist_entry *a, struct hist_entry *b) \
365 return __hpp__sort(a, b, he_get_##_field); \
368 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
369 static u64 he_get_acc_##_field(struct hist_entry *he) \
371 return he->stat_acc->_field; \
374 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
375 struct perf_hpp *hpp, struct hist_entry *he) \
377 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
378 hpp_color_scnprintf, true); \
381 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
382 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
383 struct perf_hpp *hpp, struct hist_entry *he) \
385 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
386 hpp_entry_scnprintf, true); \
389 #define __HPP_SORT_ACC_FN(_type, _field) \
390 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
391 struct hist_entry *a, struct hist_entry *b) \
393 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
396 #define __HPP_ENTRY_RAW_FN(_type, _field) \
397 static u64 he_get_raw_##_field(struct hist_entry *he) \
399 return he->stat._field; \
402 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
403 struct perf_hpp *hpp, struct hist_entry *he) \
405 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
406 hpp_entry_scnprintf, false); \
409 #define __HPP_SORT_RAW_FN(_type, _field) \
410 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
411 struct hist_entry *a, struct hist_entry *b) \
413 return __hpp__sort(a, b, he_get_raw_##_field); \
417 #define HPP_PERCENT_FNS(_type, _field) \
418 __HPP_COLOR_PERCENT_FN(_type, _field) \
419 __HPP_ENTRY_PERCENT_FN(_type, _field) \
420 __HPP_SORT_FN(_type, _field)
422 #define HPP_PERCENT_ACC_FNS(_type, _field) \
423 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
424 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
425 __HPP_SORT_ACC_FN(_type, _field)
427 #define HPP_RAW_FNS(_type, _field) \
428 __HPP_ENTRY_RAW_FN(_type, _field) \
429 __HPP_SORT_RAW_FN(_type, _field)
431 HPP_PERCENT_FNS(overhead, period)
432 HPP_PERCENT_FNS(overhead_sys, period_sys)
433 HPP_PERCENT_FNS(overhead_us, period_us)
434 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
435 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
436 HPP_PERCENT_ACC_FNS(overhead_acc, period)
438 HPP_RAW_FNS(samples, nr_events)
439 HPP_RAW_FNS(period, period)
441 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
442 struct hist_entry *a __maybe_unused,
443 struct hist_entry *b __maybe_unused)
445 return 0;
448 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
450 return a->header == hpp__header_fn;
453 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
455 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
456 return false;
458 return a->idx == b->idx;
461 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
463 .name = _name, \
464 .header = hpp__header_fn, \
465 .width = hpp__width_fn, \
466 .color = hpp__color_ ## _fn, \
467 .entry = hpp__entry_ ## _fn, \
468 .cmp = hpp__nop_cmp, \
469 .collapse = hpp__nop_cmp, \
470 .sort = hpp__sort_ ## _fn, \
471 .idx = PERF_HPP__ ## _idx, \
472 .equal = hpp__equal, \
475 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
477 .name = _name, \
478 .header = hpp__header_fn, \
479 .width = hpp__width_fn, \
480 .color = hpp__color_ ## _fn, \
481 .entry = hpp__entry_ ## _fn, \
482 .cmp = hpp__nop_cmp, \
483 .collapse = hpp__nop_cmp, \
484 .sort = hpp__sort_ ## _fn, \
485 .idx = PERF_HPP__ ## _idx, \
486 .equal = hpp__equal, \
489 #define HPP__PRINT_FNS(_name, _fn, _idx) \
491 .name = _name, \
492 .header = hpp__header_fn, \
493 .width = hpp__width_fn, \
494 .entry = hpp__entry_ ## _fn, \
495 .cmp = hpp__nop_cmp, \
496 .collapse = hpp__nop_cmp, \
497 .sort = hpp__sort_ ## _fn, \
498 .idx = PERF_HPP__ ## _idx, \
499 .equal = hpp__equal, \
502 struct perf_hpp_fmt perf_hpp__format[] = {
503 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
504 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
505 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
506 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
507 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
508 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
509 HPP__PRINT_FNS("Samples", samples, SAMPLES),
510 HPP__PRINT_FNS("Period", period, PERIOD)
513 struct perf_hpp_list perf_hpp_list = {
514 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
515 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
516 .nr_header_lines = 1,
519 #undef HPP__COLOR_PRINT_FNS
520 #undef HPP__COLOR_ACC_PRINT_FNS
521 #undef HPP__PRINT_FNS
523 #undef HPP_PERCENT_FNS
524 #undef HPP_PERCENT_ACC_FNS
525 #undef HPP_RAW_FNS
527 #undef __HPP_HEADER_FN
528 #undef __HPP_WIDTH_FN
529 #undef __HPP_COLOR_PERCENT_FN
530 #undef __HPP_ENTRY_PERCENT_FN
531 #undef __HPP_COLOR_ACC_PERCENT_FN
532 #undef __HPP_ENTRY_ACC_PERCENT_FN
533 #undef __HPP_ENTRY_RAW_FN
534 #undef __HPP_SORT_FN
535 #undef __HPP_SORT_ACC_FN
536 #undef __HPP_SORT_RAW_FN
539 void perf_hpp__init(void)
541 int i;
543 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
544 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
546 INIT_LIST_HEAD(&fmt->list);
548 /* sort_list may be linked by setup_sorting() */
549 if (fmt->sort_list.next == NULL)
550 INIT_LIST_HEAD(&fmt->sort_list);
554 * If user specified field order, no need to setup default fields.
556 if (is_strict_order(field_order))
557 return;
559 if (symbol_conf.cumulate_callchain) {
560 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
561 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
564 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
566 if (symbol_conf.show_cpu_utilization) {
567 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
568 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
570 if (perf_guest) {
571 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
572 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
576 if (symbol_conf.show_nr_samples)
577 hpp_dimension__add_output(PERF_HPP__SAMPLES);
579 if (symbol_conf.show_total_period)
580 hpp_dimension__add_output(PERF_HPP__PERIOD);
583 void perf_hpp_list__column_register(struct perf_hpp_list *list,
584 struct perf_hpp_fmt *format)
586 list_add_tail(&format->list, &list->fields);
589 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
590 struct perf_hpp_fmt *format)
592 list_add_tail(&format->sort_list, &list->sorts);
595 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
596 struct perf_hpp_fmt *format)
598 list_add(&format->sort_list, &list->sorts);
601 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
603 list_del_init(&format->list);
606 void perf_hpp__cancel_cumulate(void)
608 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
610 if (is_strict_order(field_order))
611 return;
613 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
614 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
616 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
617 if (acc->equal(acc, fmt)) {
618 perf_hpp__column_unregister(fmt);
619 continue;
622 if (ovh->equal(ovh, fmt))
623 fmt->name = "Overhead";
627 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
629 return a->equal && a->equal(a, b);
632 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
634 struct perf_hpp_fmt *fmt;
636 /* append sort keys to output field */
637 perf_hpp_list__for_each_sort_list(list, fmt) {
638 struct perf_hpp_fmt *pos;
640 /* skip sort-only fields ("sort_compute" in perf diff) */
641 if (!fmt->entry && !fmt->color)
642 continue;
644 perf_hpp_list__for_each_format(list, pos) {
645 if (fmt_equal(fmt, pos))
646 goto next;
649 perf_hpp__column_register(fmt);
650 next:
651 continue;
655 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
657 struct perf_hpp_fmt *fmt;
659 /* append output fields to sort keys */
660 perf_hpp_list__for_each_format(list, fmt) {
661 struct perf_hpp_fmt *pos;
663 perf_hpp_list__for_each_sort_list(list, pos) {
664 if (fmt_equal(fmt, pos))
665 goto next;
668 perf_hpp__register_sort_field(fmt);
669 next:
670 continue;
675 static void fmt_free(struct perf_hpp_fmt *fmt)
678 * At this point fmt should be completely
679 * unhooked, if not it's a bug.
681 BUG_ON(!list_empty(&fmt->list));
682 BUG_ON(!list_empty(&fmt->sort_list));
684 if (fmt->free)
685 fmt->free(fmt);
688 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
690 struct perf_hpp_fmt *fmt, *tmp;
692 /* reset output fields */
693 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
694 list_del_init(&fmt->list);
695 list_del_init(&fmt->sort_list);
696 fmt_free(fmt);
699 /* reset sort keys */
700 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
701 list_del_init(&fmt->list);
702 list_del_init(&fmt->sort_list);
703 fmt_free(fmt);
708 * See hists__fprintf to match the column widths
710 unsigned int hists__sort_list_width(struct hists *hists)
712 struct perf_hpp_fmt *fmt;
713 int ret = 0;
714 bool first = true;
715 struct perf_hpp dummy_hpp;
717 hists__for_each_format(hists, fmt) {
718 if (perf_hpp__should_skip(fmt, hists))
719 continue;
721 if (first)
722 first = false;
723 else
724 ret += 2;
726 ret += fmt->width(fmt, &dummy_hpp, hists);
729 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
730 ret += 3 + BITS_PER_LONG / 4;
732 return ret;
735 unsigned int hists__overhead_width(struct hists *hists)
737 struct perf_hpp_fmt *fmt;
738 int ret = 0;
739 bool first = true;
740 struct perf_hpp dummy_hpp;
742 hists__for_each_format(hists, fmt) {
743 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
744 break;
746 if (first)
747 first = false;
748 else
749 ret += 2;
751 ret += fmt->width(fmt, &dummy_hpp, hists);
754 return ret;
757 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
759 if (perf_hpp__is_sort_entry(fmt))
760 return perf_hpp__reset_sort_width(fmt, hists);
762 if (perf_hpp__is_dynamic_entry(fmt))
763 return;
765 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
767 switch (fmt->idx) {
768 case PERF_HPP__OVERHEAD:
769 case PERF_HPP__OVERHEAD_SYS:
770 case PERF_HPP__OVERHEAD_US:
771 case PERF_HPP__OVERHEAD_ACC:
772 fmt->len = 8;
773 break;
775 case PERF_HPP__OVERHEAD_GUEST_SYS:
776 case PERF_HPP__OVERHEAD_GUEST_US:
777 fmt->len = 9;
778 break;
780 case PERF_HPP__SAMPLES:
781 case PERF_HPP__PERIOD:
782 fmt->len = 12;
783 break;
785 default:
786 break;
790 void hists__reset_column_width(struct hists *hists)
792 struct perf_hpp_fmt *fmt;
793 struct perf_hpp_list_node *node;
795 hists__for_each_format(hists, fmt)
796 perf_hpp__reset_width(fmt, hists);
798 /* hierarchy entries have their own hpp list */
799 list_for_each_entry(node, &hists->hpp_formats, list) {
800 perf_hpp_list__for_each_format(&node->hpp, fmt)
801 perf_hpp__reset_width(fmt, hists);
805 void perf_hpp__set_user_width(const char *width_list_str)
807 struct perf_hpp_fmt *fmt;
808 const char *ptr = width_list_str;
810 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
811 char *p;
813 int len = strtol(ptr, &p, 10);
814 fmt->user_len = len;
816 if (*p == ',')
817 ptr = p + 1;
818 else
819 break;
823 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
825 struct perf_hpp_list_node *node = NULL;
826 struct perf_hpp_fmt *fmt_copy;
827 bool found = false;
828 bool skip = perf_hpp__should_skip(fmt, hists);
830 list_for_each_entry(node, &hists->hpp_formats, list) {
831 if (node->level == fmt->level) {
832 found = true;
833 break;
837 if (!found) {
838 node = malloc(sizeof(*node));
839 if (node == NULL)
840 return -1;
842 node->skip = skip;
843 node->level = fmt->level;
844 perf_hpp_list__init(&node->hpp);
846 hists->nr_hpp_node++;
847 list_add_tail(&node->list, &hists->hpp_formats);
850 fmt_copy = perf_hpp_fmt__dup(fmt);
851 if (fmt_copy == NULL)
852 return -1;
854 if (!skip)
855 node->skip = false;
857 list_add_tail(&fmt_copy->list, &node->hpp.fields);
858 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
860 return 0;
863 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
864 struct evlist *evlist)
866 struct evsel *evsel;
867 struct perf_hpp_fmt *fmt;
868 struct hists *hists;
869 int ret;
871 if (!symbol_conf.report_hierarchy)
872 return 0;
874 evlist__for_each_entry(evlist, evsel) {
875 hists = evsel__hists(evsel);
877 perf_hpp_list__for_each_sort_list(list, fmt) {
878 if (perf_hpp__is_dynamic_entry(fmt) &&
879 !perf_hpp__defined_dynamic_entry(fmt, hists))
880 continue;
882 ret = add_hierarchy_fmt(hists, fmt);
883 if (ret < 0)
884 return ret;
888 return 0;