1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/compiler.h>
8 #include "../util/callchain.h"
9 #include "../util/debug.h"
10 #include "../util/hist.h"
11 #include "../util/sort.h"
12 #include "../util/evsel.h"
13 #include "../util/evlist.h"
16 /* hist period print (hpp) functions */
18 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
20 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
21 advance_hpp(hpp, __ret); \
25 static int __hpp__fmt(struct perf_hpp
*hpp
, struct hist_entry
*he
,
26 hpp_field_fn get_field
, const char *fmt
, int len
,
27 hpp_snprint_fn print_fn
, bool fmt_percent
)
30 struct hists
*hists
= he
->hists
;
31 struct evsel
*evsel
= hists_to_evsel(hists
);
33 size_t size
= hpp
->size
;
37 u64 total
= hists__total_period(hists
);
40 percent
= 100.0 * get_field(he
) / total
;
42 ret
= hpp__call_print_fn(hpp
, print_fn
, fmt
, len
, percent
);
44 ret
= hpp__call_print_fn(hpp
, print_fn
, fmt
, len
, get_field(he
));
46 if (evsel__is_group_event(evsel
)) {
47 int prev_idx
, idx_delta
;
48 struct hist_entry
*pair
;
49 int nr_members
= evsel
->core
.nr_members
;
51 prev_idx
= evsel__group_idx(evsel
);
53 list_for_each_entry(pair
, &he
->pairs
.head
, pairs
.node
) {
54 u64 period
= get_field(pair
);
55 u64 total
= hists__total_period(pair
->hists
);
60 evsel
= hists_to_evsel(pair
->hists
);
61 idx_delta
= evsel__group_idx(evsel
) - prev_idx
- 1;
65 * zero-fill group members in the middle which
69 ret
+= hpp__call_print_fn(hpp
, print_fn
,
72 ret
+= hpp__call_print_fn(hpp
, print_fn
,
78 ret
+= hpp__call_print_fn(hpp
, print_fn
, fmt
, len
,
79 100.0 * period
/ total
);
81 ret
+= hpp__call_print_fn(hpp
, print_fn
, fmt
,
85 prev_idx
= evsel__group_idx(evsel
);
88 idx_delta
= nr_members
- prev_idx
- 1;
92 * zero-fill group members at last which have no sample
95 ret
+= hpp__call_print_fn(hpp
, print_fn
,
98 ret
+= hpp__call_print_fn(hpp
, print_fn
,
105 * Restore original buf and size as it's where caller expects
106 * the result will be saved.
114 int hpp__fmt(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
115 struct hist_entry
*he
, hpp_field_fn get_field
,
116 const char *fmtstr
, hpp_snprint_fn print_fn
, bool fmt_percent
)
118 int len
= fmt
->user_len
?: fmt
->len
;
120 if (symbol_conf
.field_sep
) {
121 return __hpp__fmt(hpp
, he
, get_field
, fmtstr
, 1,
122 print_fn
, fmt_percent
);
126 len
-= 2; /* 2 for a space and a % sign */
130 return __hpp__fmt(hpp
, he
, get_field
, fmtstr
, len
, print_fn
, fmt_percent
);
133 int hpp__fmt_acc(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
134 struct hist_entry
*he
, hpp_field_fn get_field
,
135 const char *fmtstr
, hpp_snprint_fn print_fn
, bool fmt_percent
)
137 if (!symbol_conf
.cumulate_callchain
) {
138 int len
= fmt
->user_len
?: fmt
->len
;
139 return snprintf(hpp
->buf
, hpp
->size
, " %*s", len
- 1, "N/A");
142 return hpp__fmt(fmt
, hpp
, he
, get_field
, fmtstr
, print_fn
, fmt_percent
);
145 static int field_cmp(u64 field_a
, u64 field_b
)
147 if (field_a
> field_b
)
149 if (field_a
< field_b
)
154 static int hist_entry__new_pair(struct hist_entry
*a
, struct hist_entry
*b
,
155 hpp_field_fn get_field
, int nr_members
,
156 u64
**fields_a
, u64
**fields_b
)
158 u64
*fa
= calloc(nr_members
, sizeof(*fa
)),
159 *fb
= calloc(nr_members
, sizeof(*fb
));
160 struct hist_entry
*pair
;
165 list_for_each_entry(pair
, &a
->pairs
.head
, pairs
.node
) {
166 struct evsel
*evsel
= hists_to_evsel(pair
->hists
);
167 fa
[evsel__group_idx(evsel
)] = get_field(pair
);
170 list_for_each_entry(pair
, &b
->pairs
.head
, pairs
.node
) {
171 struct evsel
*evsel
= hists_to_evsel(pair
->hists
);
172 fb
[evsel__group_idx(evsel
)] = get_field(pair
);
181 *fields_a
= *fields_b
= NULL
;
185 static int __hpp__group_sort_idx(struct hist_entry
*a
, struct hist_entry
*b
,
186 hpp_field_fn get_field
, int idx
)
188 struct evsel
*evsel
= hists_to_evsel(a
->hists
);
189 u64
*fields_a
, *fields_b
;
190 int cmp
, nr_members
, ret
, i
;
192 cmp
= field_cmp(get_field(a
), get_field(b
));
193 if (!evsel__is_group_event(evsel
))
196 nr_members
= evsel
->core
.nr_members
;
197 if (idx
< 1 || idx
>= nr_members
)
200 ret
= hist_entry__new_pair(a
, b
, get_field
, nr_members
, &fields_a
, &fields_b
);
206 ret
= field_cmp(fields_a
[idx
], fields_b
[idx
]);
210 for (i
= 1; i
< nr_members
; i
++) {
212 ret
= field_cmp(fields_a
[i
], fields_b
[i
]);
225 static int __hpp__sort(struct hist_entry
*a
, struct hist_entry
*b
,
226 hpp_field_fn get_field
)
231 u64
*fields_a
, *fields_b
;
233 if (symbol_conf
.group_sort_idx
&& symbol_conf
.event_group
) {
234 return __hpp__group_sort_idx(a
, b
, get_field
,
235 symbol_conf
.group_sort_idx
);
238 ret
= field_cmp(get_field(a
), get_field(b
));
239 if (ret
|| !symbol_conf
.event_group
)
242 evsel
= hists_to_evsel(a
->hists
);
243 if (!evsel__is_group_event(evsel
))
246 nr_members
= evsel
->core
.nr_members
;
247 i
= hist_entry__new_pair(a
, b
, get_field
, nr_members
, &fields_a
, &fields_b
);
251 for (i
= 1; i
< nr_members
; i
++) {
252 ret
= field_cmp(fields_a
[i
], fields_b
[i
]);
264 static int __hpp__sort_acc(struct hist_entry
*a
, struct hist_entry
*b
,
265 hpp_field_fn get_field
)
269 if (symbol_conf
.cumulate_callchain
) {
271 * Put caller above callee when they have equal period.
273 ret
= field_cmp(get_field(a
), get_field(b
));
277 if (a
->thread
!= b
->thread
|| !hist_entry__has_callchains(a
) || !symbol_conf
.use_callchain
)
280 ret
= b
->callchain
->max_depth
- a
->callchain
->max_depth
;
281 if (callchain_param
.order
== ORDER_CALLER
)
287 static int hpp__width_fn(struct perf_hpp_fmt
*fmt
,
288 struct perf_hpp
*hpp __maybe_unused
,
291 int len
= fmt
->user_len
?: fmt
->len
;
292 struct evsel
*evsel
= hists_to_evsel(hists
);
294 if (symbol_conf
.event_group
)
295 len
= max(len
, evsel
->core
.nr_members
* fmt
->len
);
297 if (len
< (int)strlen(fmt
->name
))
298 len
= strlen(fmt
->name
);
303 static int hpp__header_fn(struct perf_hpp_fmt
*fmt
, struct perf_hpp
*hpp
,
304 struct hists
*hists
, int line __maybe_unused
,
305 int *span __maybe_unused
)
307 int len
= hpp__width_fn(fmt
, hpp
, hists
);
308 return scnprintf(hpp
->buf
, hpp
->size
, "%*s", len
, fmt
->name
);
311 int hpp_color_scnprintf(struct perf_hpp
*hpp
, const char *fmt
, ...)
314 ssize_t ssize
= hpp
->size
;
319 len
= va_arg(args
, int);
320 percent
= va_arg(args
, double);
321 ret
= percent_color_len_snprintf(hpp
->buf
, hpp
->size
, fmt
, len
, percent
);
324 return (ret
>= ssize
) ? (ssize
- 1) : ret
;
327 static int hpp_entry_scnprintf(struct perf_hpp
*hpp
, const char *fmt
, ...)
330 ssize_t ssize
= hpp
->size
;
334 ret
= vsnprintf(hpp
->buf
, hpp
->size
, fmt
, args
);
337 return (ret
>= ssize
) ? (ssize
- 1) : ret
;
340 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
341 static u64 he_get_##_field(struct hist_entry *he) \
343 return he->stat._field; \
346 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
347 struct perf_hpp *hpp, struct hist_entry *he) \
349 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
350 hpp_color_scnprintf, true); \
353 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
354 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
355 struct perf_hpp *hpp, struct hist_entry *he) \
357 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
358 hpp_entry_scnprintf, true); \
361 #define __HPP_SORT_FN(_type, _field) \
362 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
363 struct hist_entry *a, struct hist_entry *b) \
365 return __hpp__sort(a, b, he_get_##_field); \
368 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
369 static u64 he_get_acc_##_field(struct hist_entry *he) \
371 return he->stat_acc->_field; \
374 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
375 struct perf_hpp *hpp, struct hist_entry *he) \
377 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
378 hpp_color_scnprintf, true); \
381 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
382 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
383 struct perf_hpp *hpp, struct hist_entry *he) \
385 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
386 hpp_entry_scnprintf, true); \
389 #define __HPP_SORT_ACC_FN(_type, _field) \
390 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
391 struct hist_entry *a, struct hist_entry *b) \
393 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
396 #define __HPP_ENTRY_RAW_FN(_type, _field) \
397 static u64 he_get_raw_##_field(struct hist_entry *he) \
399 return he->stat._field; \
402 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
403 struct perf_hpp *hpp, struct hist_entry *he) \
405 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
406 hpp_entry_scnprintf, false); \
409 #define __HPP_SORT_RAW_FN(_type, _field) \
410 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
411 struct hist_entry *a, struct hist_entry *b) \
413 return __hpp__sort(a, b, he_get_raw_##_field); \
417 #define HPP_PERCENT_FNS(_type, _field) \
418 __HPP_COLOR_PERCENT_FN(_type, _field) \
419 __HPP_ENTRY_PERCENT_FN(_type, _field) \
420 __HPP_SORT_FN(_type, _field)
422 #define HPP_PERCENT_ACC_FNS(_type, _field) \
423 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
424 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
425 __HPP_SORT_ACC_FN(_type, _field)
427 #define HPP_RAW_FNS(_type, _field) \
428 __HPP_ENTRY_RAW_FN(_type, _field) \
429 __HPP_SORT_RAW_FN(_type, _field)
431 HPP_PERCENT_FNS(overhead
, period
)
432 HPP_PERCENT_FNS(overhead_sys
, period_sys
)
433 HPP_PERCENT_FNS(overhead_us
, period_us
)
434 HPP_PERCENT_FNS(overhead_guest_sys
, period_guest_sys
)
435 HPP_PERCENT_FNS(overhead_guest_us
, period_guest_us
)
436 HPP_PERCENT_ACC_FNS(overhead_acc
, period
)
438 HPP_RAW_FNS(samples
, nr_events
)
439 HPP_RAW_FNS(period
, period
)
441 static int64_t hpp__nop_cmp(struct perf_hpp_fmt
*fmt __maybe_unused
,
442 struct hist_entry
*a __maybe_unused
,
443 struct hist_entry
*b __maybe_unused
)
448 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt
*a
)
450 return a
->header
== hpp__header_fn
;
453 static bool hpp__equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
455 if (!perf_hpp__is_hpp_entry(a
) || !perf_hpp__is_hpp_entry(b
))
458 return a
->idx
== b
->idx
;
461 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
464 .header = hpp__header_fn, \
465 .width = hpp__width_fn, \
466 .color = hpp__color_ ## _fn, \
467 .entry = hpp__entry_ ## _fn, \
468 .cmp = hpp__nop_cmp, \
469 .collapse = hpp__nop_cmp, \
470 .sort = hpp__sort_ ## _fn, \
471 .idx = PERF_HPP__ ## _idx, \
472 .equal = hpp__equal, \
475 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
478 .header = hpp__header_fn, \
479 .width = hpp__width_fn, \
480 .color = hpp__color_ ## _fn, \
481 .entry = hpp__entry_ ## _fn, \
482 .cmp = hpp__nop_cmp, \
483 .collapse = hpp__nop_cmp, \
484 .sort = hpp__sort_ ## _fn, \
485 .idx = PERF_HPP__ ## _idx, \
486 .equal = hpp__equal, \
489 #define HPP__PRINT_FNS(_name, _fn, _idx) \
492 .header = hpp__header_fn, \
493 .width = hpp__width_fn, \
494 .entry = hpp__entry_ ## _fn, \
495 .cmp = hpp__nop_cmp, \
496 .collapse = hpp__nop_cmp, \
497 .sort = hpp__sort_ ## _fn, \
498 .idx = PERF_HPP__ ## _idx, \
499 .equal = hpp__equal, \
502 struct perf_hpp_fmt perf_hpp__format
[] = {
503 HPP__COLOR_PRINT_FNS("Overhead", overhead
, OVERHEAD
),
504 HPP__COLOR_PRINT_FNS("sys", overhead_sys
, OVERHEAD_SYS
),
505 HPP__COLOR_PRINT_FNS("usr", overhead_us
, OVERHEAD_US
),
506 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys
, OVERHEAD_GUEST_SYS
),
507 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us
, OVERHEAD_GUEST_US
),
508 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc
, OVERHEAD_ACC
),
509 HPP__PRINT_FNS("Samples", samples
, SAMPLES
),
510 HPP__PRINT_FNS("Period", period
, PERIOD
)
513 struct perf_hpp_list perf_hpp_list
= {
514 .fields
= LIST_HEAD_INIT(perf_hpp_list
.fields
),
515 .sorts
= LIST_HEAD_INIT(perf_hpp_list
.sorts
),
516 .nr_header_lines
= 1,
519 #undef HPP__COLOR_PRINT_FNS
520 #undef HPP__COLOR_ACC_PRINT_FNS
521 #undef HPP__PRINT_FNS
523 #undef HPP_PERCENT_FNS
524 #undef HPP_PERCENT_ACC_FNS
527 #undef __HPP_HEADER_FN
528 #undef __HPP_WIDTH_FN
529 #undef __HPP_COLOR_PERCENT_FN
530 #undef __HPP_ENTRY_PERCENT_FN
531 #undef __HPP_COLOR_ACC_PERCENT_FN
532 #undef __HPP_ENTRY_ACC_PERCENT_FN
533 #undef __HPP_ENTRY_RAW_FN
535 #undef __HPP_SORT_ACC_FN
536 #undef __HPP_SORT_RAW_FN
539 void perf_hpp__init(void)
543 for (i
= 0; i
< PERF_HPP__MAX_INDEX
; i
++) {
544 struct perf_hpp_fmt
*fmt
= &perf_hpp__format
[i
];
546 INIT_LIST_HEAD(&fmt
->list
);
548 /* sort_list may be linked by setup_sorting() */
549 if (fmt
->sort_list
.next
== NULL
)
550 INIT_LIST_HEAD(&fmt
->sort_list
);
554 * If user specified field order, no need to setup default fields.
556 if (is_strict_order(field_order
))
559 if (symbol_conf
.cumulate_callchain
) {
560 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC
);
561 perf_hpp__format
[PERF_HPP__OVERHEAD
].name
= "Self";
564 hpp_dimension__add_output(PERF_HPP__OVERHEAD
);
566 if (symbol_conf
.show_cpu_utilization
) {
567 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS
);
568 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US
);
571 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS
);
572 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US
);
576 if (symbol_conf
.show_nr_samples
)
577 hpp_dimension__add_output(PERF_HPP__SAMPLES
);
579 if (symbol_conf
.show_total_period
)
580 hpp_dimension__add_output(PERF_HPP__PERIOD
);
583 void perf_hpp_list__column_register(struct perf_hpp_list
*list
,
584 struct perf_hpp_fmt
*format
)
586 list_add_tail(&format
->list
, &list
->fields
);
589 void perf_hpp_list__register_sort_field(struct perf_hpp_list
*list
,
590 struct perf_hpp_fmt
*format
)
592 list_add_tail(&format
->sort_list
, &list
->sorts
);
595 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list
*list
,
596 struct perf_hpp_fmt
*format
)
598 list_add(&format
->sort_list
, &list
->sorts
);
601 void perf_hpp__column_unregister(struct perf_hpp_fmt
*format
)
603 list_del_init(&format
->list
);
606 void perf_hpp__cancel_cumulate(void)
608 struct perf_hpp_fmt
*fmt
, *acc
, *ovh
, *tmp
;
610 if (is_strict_order(field_order
))
613 ovh
= &perf_hpp__format
[PERF_HPP__OVERHEAD
];
614 acc
= &perf_hpp__format
[PERF_HPP__OVERHEAD_ACC
];
616 perf_hpp_list__for_each_format_safe(&perf_hpp_list
, fmt
, tmp
) {
617 if (acc
->equal(acc
, fmt
)) {
618 perf_hpp__column_unregister(fmt
);
622 if (ovh
->equal(ovh
, fmt
))
623 fmt
->name
= "Overhead";
627 static bool fmt_equal(struct perf_hpp_fmt
*a
, struct perf_hpp_fmt
*b
)
629 return a
->equal
&& a
->equal(a
, b
);
632 void perf_hpp__setup_output_field(struct perf_hpp_list
*list
)
634 struct perf_hpp_fmt
*fmt
;
636 /* append sort keys to output field */
637 perf_hpp_list__for_each_sort_list(list
, fmt
) {
638 struct perf_hpp_fmt
*pos
;
640 /* skip sort-only fields ("sort_compute" in perf diff) */
641 if (!fmt
->entry
&& !fmt
->color
)
644 perf_hpp_list__for_each_format(list
, pos
) {
645 if (fmt_equal(fmt
, pos
))
649 perf_hpp__column_register(fmt
);
655 void perf_hpp__append_sort_keys(struct perf_hpp_list
*list
)
657 struct perf_hpp_fmt
*fmt
;
659 /* append output fields to sort keys */
660 perf_hpp_list__for_each_format(list
, fmt
) {
661 struct perf_hpp_fmt
*pos
;
663 perf_hpp_list__for_each_sort_list(list
, pos
) {
664 if (fmt_equal(fmt
, pos
))
668 perf_hpp__register_sort_field(fmt
);
675 static void fmt_free(struct perf_hpp_fmt
*fmt
)
678 * At this point fmt should be completely
679 * unhooked, if not it's a bug.
681 BUG_ON(!list_empty(&fmt
->list
));
682 BUG_ON(!list_empty(&fmt
->sort_list
));
688 void perf_hpp__reset_output_field(struct perf_hpp_list
*list
)
690 struct perf_hpp_fmt
*fmt
, *tmp
;
692 /* reset output fields */
693 perf_hpp_list__for_each_format_safe(list
, fmt
, tmp
) {
694 list_del_init(&fmt
->list
);
695 list_del_init(&fmt
->sort_list
);
699 /* reset sort keys */
700 perf_hpp_list__for_each_sort_list_safe(list
, fmt
, tmp
) {
701 list_del_init(&fmt
->list
);
702 list_del_init(&fmt
->sort_list
);
708 * See hists__fprintf to match the column widths
710 unsigned int hists__sort_list_width(struct hists
*hists
)
712 struct perf_hpp_fmt
*fmt
;
715 struct perf_hpp dummy_hpp
;
717 hists__for_each_format(hists
, fmt
) {
718 if (perf_hpp__should_skip(fmt
, hists
))
726 ret
+= fmt
->width(fmt
, &dummy_hpp
, hists
);
729 if (verbose
> 0 && hists__has(hists
, sym
)) /* Addr + origin */
730 ret
+= 3 + BITS_PER_LONG
/ 4;
735 unsigned int hists__overhead_width(struct hists
*hists
)
737 struct perf_hpp_fmt
*fmt
;
740 struct perf_hpp dummy_hpp
;
742 hists__for_each_format(hists
, fmt
) {
743 if (perf_hpp__is_sort_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
751 ret
+= fmt
->width(fmt
, &dummy_hpp
, hists
);
757 void perf_hpp__reset_width(struct perf_hpp_fmt
*fmt
, struct hists
*hists
)
759 if (perf_hpp__is_sort_entry(fmt
))
760 return perf_hpp__reset_sort_width(fmt
, hists
);
762 if (perf_hpp__is_dynamic_entry(fmt
))
765 BUG_ON(fmt
->idx
>= PERF_HPP__MAX_INDEX
);
768 case PERF_HPP__OVERHEAD
:
769 case PERF_HPP__OVERHEAD_SYS
:
770 case PERF_HPP__OVERHEAD_US
:
771 case PERF_HPP__OVERHEAD_ACC
:
775 case PERF_HPP__OVERHEAD_GUEST_SYS
:
776 case PERF_HPP__OVERHEAD_GUEST_US
:
780 case PERF_HPP__SAMPLES
:
781 case PERF_HPP__PERIOD
:
790 void hists__reset_column_width(struct hists
*hists
)
792 struct perf_hpp_fmt
*fmt
;
793 struct perf_hpp_list_node
*node
;
795 hists__for_each_format(hists
, fmt
)
796 perf_hpp__reset_width(fmt
, hists
);
798 /* hierarchy entries have their own hpp list */
799 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
800 perf_hpp_list__for_each_format(&node
->hpp
, fmt
)
801 perf_hpp__reset_width(fmt
, hists
);
805 void perf_hpp__set_user_width(const char *width_list_str
)
807 struct perf_hpp_fmt
*fmt
;
808 const char *ptr
= width_list_str
;
810 perf_hpp_list__for_each_format(&perf_hpp_list
, fmt
) {
813 int len
= strtol(ptr
, &p
, 10);
823 static int add_hierarchy_fmt(struct hists
*hists
, struct perf_hpp_fmt
*fmt
)
825 struct perf_hpp_list_node
*node
= NULL
;
826 struct perf_hpp_fmt
*fmt_copy
;
828 bool skip
= perf_hpp__should_skip(fmt
, hists
);
830 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
831 if (node
->level
== fmt
->level
) {
838 node
= malloc(sizeof(*node
));
843 node
->level
= fmt
->level
;
844 perf_hpp_list__init(&node
->hpp
);
846 hists
->nr_hpp_node
++;
847 list_add_tail(&node
->list
, &hists
->hpp_formats
);
850 fmt_copy
= perf_hpp_fmt__dup(fmt
);
851 if (fmt_copy
== NULL
)
857 list_add_tail(&fmt_copy
->list
, &node
->hpp
.fields
);
858 list_add_tail(&fmt_copy
->sort_list
, &node
->hpp
.sorts
);
863 int perf_hpp__setup_hists_formats(struct perf_hpp_list
*list
,
864 struct evlist
*evlist
)
867 struct perf_hpp_fmt
*fmt
;
871 if (!symbol_conf
.report_hierarchy
)
874 evlist__for_each_entry(evlist
, evsel
) {
875 hists
= evsel__hists(evsel
);
877 perf_hpp_list__for_each_sort_list(list
, fmt
) {
878 if (perf_hpp__is_dynamic_entry(fmt
) &&
879 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
882 ret
= add_hierarchy_fmt(hists
, fmt
);