treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / tools / perf / ui / hist.c
blobf736755000616e9421dfa03d33d22c0f2793a8e7
1 // SPDX-License-Identifier: GPL-2.0
2 #include <inttypes.h>
3 #include <math.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/compiler.h>
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
14 #include "../perf.h"
16 /* hist period print (hpp) functions */
18 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
19 ({ \
20 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
21 advance_hpp(hpp, __ret); \
22 __ret; \
25 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
26 hpp_field_fn get_field, const char *fmt, int len,
27 hpp_snprint_fn print_fn, bool fmt_percent)
29 int ret;
30 struct hists *hists = he->hists;
31 struct evsel *evsel = hists_to_evsel(hists);
32 char *buf = hpp->buf;
33 size_t size = hpp->size;
35 if (fmt_percent) {
36 double percent = 0.0;
37 u64 total = hists__total_period(hists);
39 if (total)
40 percent = 100.0 * get_field(he) / total;
42 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
43 } else
44 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
46 if (perf_evsel__is_group_event(evsel)) {
47 int prev_idx, idx_delta;
48 struct hist_entry *pair;
49 int nr_members = evsel->core.nr_members;
51 prev_idx = perf_evsel__group_idx(evsel);
53 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
54 u64 period = get_field(pair);
55 u64 total = hists__total_period(pair->hists);
57 if (!total)
58 continue;
60 evsel = hists_to_evsel(pair->hists);
61 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
63 while (idx_delta--) {
65 * zero-fill group members in the middle which
66 * have no sample
68 if (fmt_percent) {
69 ret += hpp__call_print_fn(hpp, print_fn,
70 fmt, len, 0.0);
71 } else {
72 ret += hpp__call_print_fn(hpp, print_fn,
73 fmt, len, 0ULL);
77 if (fmt_percent) {
78 ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
79 100.0 * period / total);
80 } else {
81 ret += hpp__call_print_fn(hpp, print_fn, fmt,
82 len, period);
85 prev_idx = perf_evsel__group_idx(evsel);
88 idx_delta = nr_members - prev_idx - 1;
90 while (idx_delta--) {
92 * zero-fill group members at last which have no sample
94 if (fmt_percent) {
95 ret += hpp__call_print_fn(hpp, print_fn,
96 fmt, len, 0.0);
97 } else {
98 ret += hpp__call_print_fn(hpp, print_fn,
99 fmt, len, 0ULL);
105 * Restore original buf and size as it's where caller expects
106 * the result will be saved.
108 hpp->buf = buf;
109 hpp->size = size;
111 return ret;
114 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
115 struct hist_entry *he, hpp_field_fn get_field,
116 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
118 int len = fmt->user_len ?: fmt->len;
120 if (symbol_conf.field_sep) {
121 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
122 print_fn, fmt_percent);
125 if (fmt_percent)
126 len -= 2; /* 2 for a space and a % sign */
127 else
128 len -= 1;
130 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
133 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
134 struct hist_entry *he, hpp_field_fn get_field,
135 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
137 if (!symbol_conf.cumulate_callchain) {
138 int len = fmt->user_len ?: fmt->len;
139 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
142 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
145 static int field_cmp(u64 field_a, u64 field_b)
147 if (field_a > field_b)
148 return 1;
149 if (field_a < field_b)
150 return -1;
151 return 0;
154 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
155 hpp_field_fn get_field)
157 s64 ret;
158 int i, nr_members;
159 struct evsel *evsel;
160 struct hist_entry *pair;
161 u64 *fields_a, *fields_b;
163 ret = field_cmp(get_field(a), get_field(b));
164 if (ret || !symbol_conf.event_group)
165 return ret;
167 evsel = hists_to_evsel(a->hists);
168 if (!perf_evsel__is_group_event(evsel))
169 return ret;
171 nr_members = evsel->core.nr_members;
172 fields_a = calloc(nr_members, sizeof(*fields_a));
173 fields_b = calloc(nr_members, sizeof(*fields_b));
175 if (!fields_a || !fields_b)
176 goto out;
178 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
179 evsel = hists_to_evsel(pair->hists);
180 fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
183 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
184 evsel = hists_to_evsel(pair->hists);
185 fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
188 for (i = 1; i < nr_members; i++) {
189 ret = field_cmp(fields_a[i], fields_b[i]);
190 if (ret)
191 break;
194 out:
195 free(fields_a);
196 free(fields_b);
198 return ret;
201 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
202 hpp_field_fn get_field)
204 s64 ret = 0;
206 if (symbol_conf.cumulate_callchain) {
208 * Put caller above callee when they have equal period.
210 ret = field_cmp(get_field(a), get_field(b));
211 if (ret)
212 return ret;
214 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
215 return 0;
217 ret = b->callchain->max_depth - a->callchain->max_depth;
218 if (callchain_param.order == ORDER_CALLER)
219 ret = -ret;
221 return ret;
224 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
225 struct perf_hpp *hpp __maybe_unused,
226 struct hists *hists)
228 int len = fmt->user_len ?: fmt->len;
229 struct evsel *evsel = hists_to_evsel(hists);
231 if (symbol_conf.event_group)
232 len = max(len, evsel->core.nr_members * fmt->len);
234 if (len < (int)strlen(fmt->name))
235 len = strlen(fmt->name);
237 return len;
240 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
241 struct hists *hists, int line __maybe_unused,
242 int *span __maybe_unused)
244 int len = hpp__width_fn(fmt, hpp, hists);
245 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
248 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
250 va_list args;
251 ssize_t ssize = hpp->size;
252 double percent;
253 int ret, len;
255 va_start(args, fmt);
256 len = va_arg(args, int);
257 percent = va_arg(args, double);
258 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
259 va_end(args);
261 return (ret >= ssize) ? (ssize - 1) : ret;
264 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
266 va_list args;
267 ssize_t ssize = hpp->size;
268 int ret;
270 va_start(args, fmt);
271 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
272 va_end(args);
274 return (ret >= ssize) ? (ssize - 1) : ret;
277 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
278 static u64 he_get_##_field(struct hist_entry *he) \
280 return he->stat._field; \
283 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
284 struct perf_hpp *hpp, struct hist_entry *he) \
286 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
287 hpp_color_scnprintf, true); \
290 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
291 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
292 struct perf_hpp *hpp, struct hist_entry *he) \
294 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
295 hpp_entry_scnprintf, true); \
298 #define __HPP_SORT_FN(_type, _field) \
299 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
300 struct hist_entry *a, struct hist_entry *b) \
302 return __hpp__sort(a, b, he_get_##_field); \
305 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
306 static u64 he_get_acc_##_field(struct hist_entry *he) \
308 return he->stat_acc->_field; \
311 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
312 struct perf_hpp *hpp, struct hist_entry *he) \
314 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
315 hpp_color_scnprintf, true); \
318 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
319 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
320 struct perf_hpp *hpp, struct hist_entry *he) \
322 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
323 hpp_entry_scnprintf, true); \
326 #define __HPP_SORT_ACC_FN(_type, _field) \
327 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
328 struct hist_entry *a, struct hist_entry *b) \
330 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
333 #define __HPP_ENTRY_RAW_FN(_type, _field) \
334 static u64 he_get_raw_##_field(struct hist_entry *he) \
336 return he->stat._field; \
339 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
340 struct perf_hpp *hpp, struct hist_entry *he) \
342 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
343 hpp_entry_scnprintf, false); \
346 #define __HPP_SORT_RAW_FN(_type, _field) \
347 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
348 struct hist_entry *a, struct hist_entry *b) \
350 return __hpp__sort(a, b, he_get_raw_##_field); \
354 #define HPP_PERCENT_FNS(_type, _field) \
355 __HPP_COLOR_PERCENT_FN(_type, _field) \
356 __HPP_ENTRY_PERCENT_FN(_type, _field) \
357 __HPP_SORT_FN(_type, _field)
359 #define HPP_PERCENT_ACC_FNS(_type, _field) \
360 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
361 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
362 __HPP_SORT_ACC_FN(_type, _field)
364 #define HPP_RAW_FNS(_type, _field) \
365 __HPP_ENTRY_RAW_FN(_type, _field) \
366 __HPP_SORT_RAW_FN(_type, _field)
368 HPP_PERCENT_FNS(overhead, period)
369 HPP_PERCENT_FNS(overhead_sys, period_sys)
370 HPP_PERCENT_FNS(overhead_us, period_us)
371 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
372 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
373 HPP_PERCENT_ACC_FNS(overhead_acc, period)
375 HPP_RAW_FNS(samples, nr_events)
376 HPP_RAW_FNS(period, period)
378 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
379 struct hist_entry *a __maybe_unused,
380 struct hist_entry *b __maybe_unused)
382 return 0;
385 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
387 return a->header == hpp__header_fn;
390 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
392 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
393 return false;
395 return a->idx == b->idx;
398 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
400 .name = _name, \
401 .header = hpp__header_fn, \
402 .width = hpp__width_fn, \
403 .color = hpp__color_ ## _fn, \
404 .entry = hpp__entry_ ## _fn, \
405 .cmp = hpp__nop_cmp, \
406 .collapse = hpp__nop_cmp, \
407 .sort = hpp__sort_ ## _fn, \
408 .idx = PERF_HPP__ ## _idx, \
409 .equal = hpp__equal, \
412 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
414 .name = _name, \
415 .header = hpp__header_fn, \
416 .width = hpp__width_fn, \
417 .color = hpp__color_ ## _fn, \
418 .entry = hpp__entry_ ## _fn, \
419 .cmp = hpp__nop_cmp, \
420 .collapse = hpp__nop_cmp, \
421 .sort = hpp__sort_ ## _fn, \
422 .idx = PERF_HPP__ ## _idx, \
423 .equal = hpp__equal, \
426 #define HPP__PRINT_FNS(_name, _fn, _idx) \
428 .name = _name, \
429 .header = hpp__header_fn, \
430 .width = hpp__width_fn, \
431 .entry = hpp__entry_ ## _fn, \
432 .cmp = hpp__nop_cmp, \
433 .collapse = hpp__nop_cmp, \
434 .sort = hpp__sort_ ## _fn, \
435 .idx = PERF_HPP__ ## _idx, \
436 .equal = hpp__equal, \
439 struct perf_hpp_fmt perf_hpp__format[] = {
440 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
441 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
442 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
443 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
444 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
445 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
446 HPP__PRINT_FNS("Samples", samples, SAMPLES),
447 HPP__PRINT_FNS("Period", period, PERIOD)
450 struct perf_hpp_list perf_hpp_list = {
451 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
452 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
453 .nr_header_lines = 1,
456 #undef HPP__COLOR_PRINT_FNS
457 #undef HPP__COLOR_ACC_PRINT_FNS
458 #undef HPP__PRINT_FNS
460 #undef HPP_PERCENT_FNS
461 #undef HPP_PERCENT_ACC_FNS
462 #undef HPP_RAW_FNS
464 #undef __HPP_HEADER_FN
465 #undef __HPP_WIDTH_FN
466 #undef __HPP_COLOR_PERCENT_FN
467 #undef __HPP_ENTRY_PERCENT_FN
468 #undef __HPP_COLOR_ACC_PERCENT_FN
469 #undef __HPP_ENTRY_ACC_PERCENT_FN
470 #undef __HPP_ENTRY_RAW_FN
471 #undef __HPP_SORT_FN
472 #undef __HPP_SORT_ACC_FN
473 #undef __HPP_SORT_RAW_FN
476 void perf_hpp__init(void)
478 int i;
480 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
481 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
483 INIT_LIST_HEAD(&fmt->list);
485 /* sort_list may be linked by setup_sorting() */
486 if (fmt->sort_list.next == NULL)
487 INIT_LIST_HEAD(&fmt->sort_list);
491 * If user specified field order, no need to setup default fields.
493 if (is_strict_order(field_order))
494 return;
496 if (symbol_conf.cumulate_callchain) {
497 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
498 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
501 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
503 if (symbol_conf.show_cpu_utilization) {
504 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
505 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
507 if (perf_guest) {
508 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
509 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
513 if (symbol_conf.show_nr_samples)
514 hpp_dimension__add_output(PERF_HPP__SAMPLES);
516 if (symbol_conf.show_total_period)
517 hpp_dimension__add_output(PERF_HPP__PERIOD);
520 void perf_hpp_list__column_register(struct perf_hpp_list *list,
521 struct perf_hpp_fmt *format)
523 list_add_tail(&format->list, &list->fields);
526 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
527 struct perf_hpp_fmt *format)
529 list_add_tail(&format->sort_list, &list->sorts);
532 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
533 struct perf_hpp_fmt *format)
535 list_add(&format->sort_list, &list->sorts);
538 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
540 list_del_init(&format->list);
543 void perf_hpp__cancel_cumulate(void)
545 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
547 if (is_strict_order(field_order))
548 return;
550 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
551 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
553 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
554 if (acc->equal(acc, fmt)) {
555 perf_hpp__column_unregister(fmt);
556 continue;
559 if (ovh->equal(ovh, fmt))
560 fmt->name = "Overhead";
564 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
566 return a->equal && a->equal(a, b);
569 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
571 struct perf_hpp_fmt *fmt;
573 /* append sort keys to output field */
574 perf_hpp_list__for_each_sort_list(list, fmt) {
575 struct perf_hpp_fmt *pos;
577 /* skip sort-only fields ("sort_compute" in perf diff) */
578 if (!fmt->entry && !fmt->color)
579 continue;
581 perf_hpp_list__for_each_format(list, pos) {
582 if (fmt_equal(fmt, pos))
583 goto next;
586 perf_hpp__column_register(fmt);
587 next:
588 continue;
592 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
594 struct perf_hpp_fmt *fmt;
596 /* append output fields to sort keys */
597 perf_hpp_list__for_each_format(list, fmt) {
598 struct perf_hpp_fmt *pos;
600 perf_hpp_list__for_each_sort_list(list, pos) {
601 if (fmt_equal(fmt, pos))
602 goto next;
605 perf_hpp__register_sort_field(fmt);
606 next:
607 continue;
612 static void fmt_free(struct perf_hpp_fmt *fmt)
615 * At this point fmt should be completely
616 * unhooked, if not it's a bug.
618 BUG_ON(!list_empty(&fmt->list));
619 BUG_ON(!list_empty(&fmt->sort_list));
621 if (fmt->free)
622 fmt->free(fmt);
625 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
627 struct perf_hpp_fmt *fmt, *tmp;
629 /* reset output fields */
630 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
631 list_del_init(&fmt->list);
632 list_del_init(&fmt->sort_list);
633 fmt_free(fmt);
636 /* reset sort keys */
637 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
638 list_del_init(&fmt->list);
639 list_del_init(&fmt->sort_list);
640 fmt_free(fmt);
645 * See hists__fprintf to match the column widths
647 unsigned int hists__sort_list_width(struct hists *hists)
649 struct perf_hpp_fmt *fmt;
650 int ret = 0;
651 bool first = true;
652 struct perf_hpp dummy_hpp;
654 hists__for_each_format(hists, fmt) {
655 if (perf_hpp__should_skip(fmt, hists))
656 continue;
658 if (first)
659 first = false;
660 else
661 ret += 2;
663 ret += fmt->width(fmt, &dummy_hpp, hists);
666 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
667 ret += 3 + BITS_PER_LONG / 4;
669 return ret;
672 unsigned int hists__overhead_width(struct hists *hists)
674 struct perf_hpp_fmt *fmt;
675 int ret = 0;
676 bool first = true;
677 struct perf_hpp dummy_hpp;
679 hists__for_each_format(hists, fmt) {
680 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
681 break;
683 if (first)
684 first = false;
685 else
686 ret += 2;
688 ret += fmt->width(fmt, &dummy_hpp, hists);
691 return ret;
694 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
696 if (perf_hpp__is_sort_entry(fmt))
697 return perf_hpp__reset_sort_width(fmt, hists);
699 if (perf_hpp__is_dynamic_entry(fmt))
700 return;
702 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
704 switch (fmt->idx) {
705 case PERF_HPP__OVERHEAD:
706 case PERF_HPP__OVERHEAD_SYS:
707 case PERF_HPP__OVERHEAD_US:
708 case PERF_HPP__OVERHEAD_ACC:
709 fmt->len = 8;
710 break;
712 case PERF_HPP__OVERHEAD_GUEST_SYS:
713 case PERF_HPP__OVERHEAD_GUEST_US:
714 fmt->len = 9;
715 break;
717 case PERF_HPP__SAMPLES:
718 case PERF_HPP__PERIOD:
719 fmt->len = 12;
720 break;
722 default:
723 break;
727 void hists__reset_column_width(struct hists *hists)
729 struct perf_hpp_fmt *fmt;
730 struct perf_hpp_list_node *node;
732 hists__for_each_format(hists, fmt)
733 perf_hpp__reset_width(fmt, hists);
735 /* hierarchy entries have their own hpp list */
736 list_for_each_entry(node, &hists->hpp_formats, list) {
737 perf_hpp_list__for_each_format(&node->hpp, fmt)
738 perf_hpp__reset_width(fmt, hists);
742 void perf_hpp__set_user_width(const char *width_list_str)
744 struct perf_hpp_fmt *fmt;
745 const char *ptr = width_list_str;
747 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
748 char *p;
750 int len = strtol(ptr, &p, 10);
751 fmt->user_len = len;
753 if (*p == ',')
754 ptr = p + 1;
755 else
756 break;
760 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
762 struct perf_hpp_list_node *node = NULL;
763 struct perf_hpp_fmt *fmt_copy;
764 bool found = false;
765 bool skip = perf_hpp__should_skip(fmt, hists);
767 list_for_each_entry(node, &hists->hpp_formats, list) {
768 if (node->level == fmt->level) {
769 found = true;
770 break;
774 if (!found) {
775 node = malloc(sizeof(*node));
776 if (node == NULL)
777 return -1;
779 node->skip = skip;
780 node->level = fmt->level;
781 perf_hpp_list__init(&node->hpp);
783 hists->nr_hpp_node++;
784 list_add_tail(&node->list, &hists->hpp_formats);
787 fmt_copy = perf_hpp_fmt__dup(fmt);
788 if (fmt_copy == NULL)
789 return -1;
791 if (!skip)
792 node->skip = false;
794 list_add_tail(&fmt_copy->list, &node->hpp.fields);
795 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
797 return 0;
800 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
801 struct evlist *evlist)
803 struct evsel *evsel;
804 struct perf_hpp_fmt *fmt;
805 struct hists *hists;
806 int ret;
808 if (!symbol_conf.report_hierarchy)
809 return 0;
811 evlist__for_each_entry(evlist, evsel) {
812 hists = evsel__hists(evsel);
814 perf_hpp_list__for_each_sort_list(list, fmt) {
815 if (perf_hpp__is_dynamic_entry(fmt) &&
816 !perf_hpp__defined_dynamic_entry(fmt, hists))
817 continue;
819 ret = add_hierarchy_fmt(hists, fmt);
820 if (ret < 0)
821 return ret;
825 return 0;