1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/string.h>
6 #include "../../util/callchain.h"
7 #include "../../util/debug.h"
8 #include "../../util/event.h"
9 #include "../../util/hist.h"
10 #include "../../util/map.h"
11 #include "../../util/maps.h"
12 #include "../../util/symbol.h"
13 #include "../../util/sort.h"
14 #include "../../util/evsel.h"
15 #include "../../util/srcline.h"
16 #include "../../util/string2.h"
17 #include "../../util/thread.h"
18 #include "../../util/block-info.h"
19 #include <linux/ctype.h>
20 #include <linux/zalloc.h>
22 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
25 int ret
= fprintf(fp
, " ");
27 for (i
= 0; i
< left_margin
; i
++)
28 ret
+= fprintf(fp
, " ");
33 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
37 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
39 for (i
= 0; i
< depth
; i
++)
40 if (depth_mask
& (1 << i
))
41 ret
+= fprintf(fp
, "| ");
43 ret
+= fprintf(fp
, " ");
45 ret
+= fprintf(fp
, "\n");
50 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_node
*node
,
51 struct callchain_list
*chain
,
52 int depth
, int depth_mask
, int period
,
53 u64 total_samples
, int left_margin
)
57 char bf
[1024], *alloc_str
= NULL
;
61 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
62 for (i
= 0; i
< depth
; i
++) {
63 if (depth_mask
& (1 << i
))
64 ret
+= fprintf(fp
, "|");
66 ret
+= fprintf(fp
, " ");
67 if (!period
&& i
== depth
- 1) {
68 ret
+= fprintf(fp
, "--");
69 ret
+= callchain_node__fprintf_value(node
, fp
, total_samples
);
70 ret
+= fprintf(fp
, "--");
72 ret
+= fprintf(fp
, "%s", " ");
75 str
= callchain_list__sym_name(chain
, bf
, sizeof(bf
), false);
77 if (symbol_conf
.show_branchflag_count
) {
78 callchain_list_counts__printf_value(chain
, NULL
,
81 if (asprintf(&alloc_str
, "%s%s", str
, buf
) < 0)
82 str
= "Not enough memory!";
94 static struct symbol
*rem_sq_bracket
;
95 static struct callchain_list rem_hits
;
97 static void init_rem_hits(void)
99 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
100 if (!rem_sq_bracket
) {
101 fprintf(stderr
, "Not enough memory to display remaining hits\n");
105 strcpy(rem_sq_bracket
->name
, "[...]");
106 rem_hits
.ms
.sym
= rem_sq_bracket
;
109 static size_t __callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
110 u64 total_samples
, int depth
,
111 int depth_mask
, int left_margin
)
113 struct rb_node
*node
, *next
;
114 struct callchain_node
*child
= NULL
;
115 struct callchain_list
*chain
;
116 int new_depth_mask
= depth_mask
;
120 uint entries_printed
= 0;
123 remaining
= total_samples
;
125 node
= rb_first(root
);
130 child
= rb_entry(node
, struct callchain_node
, rb_node
);
131 cumul
= callchain_cumul_hits(child
);
133 cumul_count
+= callchain_cumul_counts(child
);
136 * The depth mask manages the output of pipes that show
137 * the depth. We don't want to keep the pipes of the current
138 * level for the last child of this depth.
139 * Except if we have remaining filtered hits. They will
140 * supersede the last child
142 next
= rb_next(node
);
143 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
144 new_depth_mask
&= ~(1 << (depth
- 1));
147 * But we keep the older depth mask for the line separator
148 * to keep the level link until we reach the last child
150 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
153 list_for_each_entry(chain
, &child
->val
, list
) {
154 ret
+= ipchain__fprintf_graph(fp
, child
, chain
, depth
,
160 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
161 new_total
= child
->children_hit
;
163 new_total
= total_samples
;
165 ret
+= __callchain__fprintf_graph(fp
, &child
->rb_root
, new_total
,
167 new_depth_mask
| (1 << depth
),
170 if (++entries_printed
== callchain_param
.print_limit
)
174 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
175 remaining
&& remaining
!= total_samples
) {
176 struct callchain_node rem_node
= {
183 if (callchain_param
.value
== CCVAL_COUNT
&& child
&& child
->parent
) {
184 rem_node
.count
= child
->parent
->children_count
- cumul_count
;
185 if (rem_node
.count
<= 0)
189 new_depth_mask
&= ~(1 << (depth
- 1));
190 ret
+= ipchain__fprintf_graph(fp
, &rem_node
, &rem_hits
, depth
,
191 new_depth_mask
, 0, total_samples
,
199 * If have one single callchain root, don't bother printing
200 * its percentage (100 % in fractal mode and the same percentage
201 * than the hist in graph mode). This also avoid one level of column.
203 * However when percent-limit applied, it's possible that single callchain
204 * node have different (non-100% in fractal mode) percentage.
206 static bool need_percent_display(struct rb_node
*node
, u64 parent_samples
)
208 struct callchain_node
*cnode
;
213 cnode
= rb_entry(node
, struct callchain_node
, rb_node
);
214 return callchain_cumul_hits(cnode
) != parent_samples
;
217 static size_t callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
218 u64 total_samples
, u64 parent_samples
,
221 struct callchain_node
*cnode
;
222 struct callchain_list
*chain
;
223 u32 entries_printed
= 0;
224 bool printed
= false;
225 struct rb_node
*node
;
230 node
= rb_first(root
);
231 if (node
&& !need_percent_display(node
, parent_samples
)) {
232 cnode
= rb_entry(node
, struct callchain_node
, rb_node
);
233 list_for_each_entry(chain
, &cnode
->val
, list
) {
235 * If we sort by symbol, the first entry is the same than
236 * the symbol. No need to print it otherwise it appears as
239 if (!i
++ && field_order
== NULL
&&
240 sort_order
&& strstarts(sort_order
, "sym"))
244 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
245 ret
+= fprintf(fp
, "|\n");
246 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
247 ret
+= fprintf(fp
, "---");
251 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
253 ret
+= fprintf(fp
, "%s",
254 callchain_list__sym_name(chain
, bf
,
258 if (symbol_conf
.show_branchflag_count
)
259 ret
+= callchain_list_counts__printf_value(
261 ret
+= fprintf(fp
, "\n");
263 if (++entries_printed
== callchain_param
.print_limit
)
266 root
= &cnode
->rb_root
;
269 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
270 total_samples
= parent_samples
;
272 ret
+= __callchain__fprintf_graph(fp
, root
, total_samples
,
275 /* do not add a blank line if it printed nothing */
276 ret
+= fprintf(fp
, "\n");
282 static size_t __callchain__fprintf_flat(FILE *fp
, struct callchain_node
*node
,
285 struct callchain_list
*chain
;
292 ret
+= __callchain__fprintf_flat(fp
, node
->parent
, total_samples
);
295 list_for_each_entry(chain
, &node
->val
, list
) {
296 if (chain
->ip
>= PERF_CONTEXT_MAX
)
298 ret
+= fprintf(fp
, " %s\n", callchain_list__sym_name(chain
,
299 bf
, sizeof(bf
), false));
305 static size_t callchain__fprintf_flat(FILE *fp
, struct rb_root
*tree
,
309 u32 entries_printed
= 0;
310 struct callchain_node
*chain
;
311 struct rb_node
*rb_node
= rb_first(tree
);
314 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
316 ret
+= fprintf(fp
, " ");
317 ret
+= callchain_node__fprintf_value(chain
, fp
, total_samples
);
318 ret
+= fprintf(fp
, "\n");
319 ret
+= __callchain__fprintf_flat(fp
, chain
, total_samples
);
320 ret
+= fprintf(fp
, "\n");
321 if (++entries_printed
== callchain_param
.print_limit
)
324 rb_node
= rb_next(rb_node
);
330 static size_t __callchain__fprintf_folded(FILE *fp
, struct callchain_node
*node
)
332 const char *sep
= symbol_conf
.field_sep
?: ";";
333 struct callchain_list
*chain
;
341 ret
+= __callchain__fprintf_folded(fp
, node
->parent
);
344 list_for_each_entry(chain
, &node
->val
, list
) {
345 if (chain
->ip
>= PERF_CONTEXT_MAX
)
347 ret
+= fprintf(fp
, "%s%s", first
? "" : sep
,
348 callchain_list__sym_name(chain
,
349 bf
, sizeof(bf
), false));
356 static size_t callchain__fprintf_folded(FILE *fp
, struct rb_root
*tree
,
360 u32 entries_printed
= 0;
361 struct callchain_node
*chain
;
362 struct rb_node
*rb_node
= rb_first(tree
);
366 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
368 ret
+= callchain_node__fprintf_value(chain
, fp
, total_samples
);
369 ret
+= fprintf(fp
, " ");
370 ret
+= __callchain__fprintf_folded(fp
, chain
);
371 ret
+= fprintf(fp
, "\n");
372 if (++entries_printed
== callchain_param
.print_limit
)
375 rb_node
= rb_next(rb_node
);
381 static size_t hist_entry_callchain__fprintf(struct hist_entry
*he
,
382 u64 total_samples
, int left_margin
,
385 u64 parent_samples
= he
->stat
.period
;
387 if (symbol_conf
.cumulate_callchain
)
388 parent_samples
= he
->stat_acc
->period
;
390 switch (callchain_param
.mode
) {
391 case CHAIN_GRAPH_REL
:
392 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, total_samples
,
393 parent_samples
, left_margin
);
395 case CHAIN_GRAPH_ABS
:
396 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, total_samples
,
397 parent_samples
, left_margin
);
400 return callchain__fprintf_flat(fp
, &he
->sorted_chain
, total_samples
);
403 return callchain__fprintf_folded(fp
, &he
->sorted_chain
, total_samples
);
408 pr_err("Bad callchain mode\n");
414 int __hist_entry__snprintf(struct hist_entry
*he
, struct perf_hpp
*hpp
,
415 struct perf_hpp_list
*hpp_list
)
417 const char *sep
= symbol_conf
.field_sep
;
418 struct perf_hpp_fmt
*fmt
;
419 char *start
= hpp
->buf
;
423 if (symbol_conf
.exclude_other
&& !he
->parent
)
426 perf_hpp_list__for_each_format(hpp_list
, fmt
) {
427 if (perf_hpp__should_skip(fmt
, he
->hists
))
431 * If there's no field_sep, we still need
432 * to display initial ' '.
434 if (!sep
|| !first
) {
435 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%s", sep
?: " ");
436 advance_hpp(hpp
, ret
);
440 if (perf_hpp__use_color() && fmt
->color
)
441 ret
= fmt
->color(fmt
, hpp
, he
);
443 ret
= fmt
->entry(fmt
, hpp
, he
);
445 ret
= hist_entry__snprintf_alignment(he
, hpp
, fmt
, ret
);
446 advance_hpp(hpp
, ret
);
449 return hpp
->buf
- start
;
452 static int hist_entry__snprintf(struct hist_entry
*he
, struct perf_hpp
*hpp
)
454 return __hist_entry__snprintf(he
, hpp
, he
->hists
->hpp_list
);
457 static int hist_entry__hierarchy_fprintf(struct hist_entry
*he
,
458 struct perf_hpp
*hpp
,
462 const char *sep
= symbol_conf
.field_sep
;
463 struct perf_hpp_fmt
*fmt
;
464 struct perf_hpp_list_node
*fmt_node
;
465 char *buf
= hpp
->buf
;
466 size_t size
= hpp
->size
;
467 int ret
, printed
= 0;
470 if (symbol_conf
.exclude_other
&& !he
->parent
)
473 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*s", he
->depth
* HIERARCHY_INDENT
, "");
474 advance_hpp(hpp
, ret
);
476 /* the first hpp_list_node is for overhead columns */
477 fmt_node
= list_first_entry(&hists
->hpp_formats
,
478 struct perf_hpp_list_node
, list
);
479 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
481 * If there's no field_sep, we still need
482 * to display initial ' '.
484 if (!sep
|| !first
) {
485 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%s", sep
?: " ");
486 advance_hpp(hpp
, ret
);
490 if (perf_hpp__use_color() && fmt
->color
)
491 ret
= fmt
->color(fmt
, hpp
, he
);
493 ret
= fmt
->entry(fmt
, hpp
, he
);
495 ret
= hist_entry__snprintf_alignment(he
, hpp
, fmt
, ret
);
496 advance_hpp(hpp
, ret
);
500 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*s",
501 (hists
->nr_hpp_node
- 2) * HIERARCHY_INDENT
, "");
502 advance_hpp(hpp
, ret
);
504 printed
+= fprintf(fp
, "%s", buf
);
506 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
511 * No need to call hist_entry__snprintf_alignment() since this
512 * fmt is always the last column in the hierarchy mode.
514 if (perf_hpp__use_color() && fmt
->color
)
515 fmt
->color(fmt
, hpp
, he
);
517 fmt
->entry(fmt
, hpp
, he
);
520 * dynamic entries are right-aligned but we want left-aligned
521 * in the hierarchy mode
523 printed
+= fprintf(fp
, "%s%s", sep
?: " ", skip_spaces(buf
));
525 printed
+= putc('\n', fp
);
527 if (he
->leaf
&& hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
) {
528 u64 total
= hists__total_period(hists
);
530 printed
+= hist_entry_callchain__fprintf(he
, total
, 0, fp
);
538 static int hist_entry__block_fprintf(struct hist_entry
*he
,
539 char *bf
, size_t size
,
542 struct block_hist
*bh
= container_of(he
, struct block_hist
, he
);
545 for (unsigned int i
= 0; i
< bh
->block_hists
.nr_entries
; i
++) {
546 struct perf_hpp hpp
= {
553 hist_entry__snprintf(he
, &hpp
);
556 ret
+= fprintf(fp
, "%s\n", bf
);
562 static int hist_entry__individual_block_fprintf(struct hist_entry
*he
,
563 char *bf
, size_t size
,
568 struct perf_hpp hpp
= {
574 hist_entry__snprintf(he
, &hpp
);
576 ret
+= fprintf(fp
, "%s\n", bf
);
581 static int hist_entry__fprintf(struct hist_entry
*he
, size_t size
,
582 char *bf
, size_t bfsz
, FILE *fp
,
583 bool ignore_callchains
)
586 int callchain_ret
= 0;
587 struct perf_hpp hpp
= {
591 struct hists
*hists
= he
->hists
;
592 u64 total_period
= hists
->stats
.total_period
;
594 if (size
== 0 || size
> bfsz
)
595 size
= hpp
.size
= bfsz
;
597 if (symbol_conf
.report_hierarchy
)
598 return hist_entry__hierarchy_fprintf(he
, &hpp
, hists
, fp
);
600 if (symbol_conf
.report_block
)
601 return hist_entry__block_fprintf(he
, bf
, size
, fp
);
603 if (symbol_conf
.report_individual_block
)
604 return hist_entry__individual_block_fprintf(he
, bf
, size
, fp
);
606 hist_entry__snprintf(he
, &hpp
);
608 ret
= fprintf(fp
, "%s\n", bf
);
610 if (hist_entry__has_callchains(he
) && !ignore_callchains
)
611 callchain_ret
= hist_entry_callchain__fprintf(he
, total_period
,
614 ret
+= callchain_ret
;
619 static int print_hierarchy_indent(const char *sep
, int indent
,
620 const char *line
, FILE *fp
)
624 if (sep
!= NULL
|| indent
< 2)
627 width
= (indent
- 2) * HIERARCHY_INDENT
;
629 return fprintf(fp
, "%-*.*s", width
, width
, line
);
632 static int hists__fprintf_hierarchy_headers(struct hists
*hists
,
633 struct perf_hpp
*hpp
, FILE *fp
)
635 bool first_node
, first_col
;
639 unsigned header_width
= 0;
640 struct perf_hpp_fmt
*fmt
;
641 struct perf_hpp_list_node
*fmt_node
;
642 const char *sep
= symbol_conf
.field_sep
;
644 indent
= hists
->nr_hpp_node
;
646 /* preserve max indent depth for column headers */
647 print_hierarchy_indent(sep
, indent
, " ", fp
);
649 /* the first hpp_list_node is for overhead columns */
650 fmt_node
= list_first_entry(&hists
->hpp_formats
,
651 struct perf_hpp_list_node
, list
);
653 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
654 fmt
->header(fmt
, hpp
, hists
, 0, NULL
);
655 fprintf(fp
, "%s%s", hpp
->buf
, sep
?: " ");
658 /* combine sort headers with ' / ' */
660 list_for_each_entry_continue(fmt_node
, &hists
->hpp_formats
, list
) {
662 header_width
+= fprintf(fp
, " / ");
666 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
667 if (perf_hpp__should_skip(fmt
, hists
))
671 header_width
+= fprintf(fp
, "+");
674 fmt
->header(fmt
, hpp
, hists
, 0, NULL
);
676 header_width
+= fprintf(fp
, "%s", strim(hpp
->buf
));
682 /* preserve max indent depth for initial dots */
683 print_hierarchy_indent(sep
, indent
, dots
, fp
);
685 /* the first hpp_list_node is for overhead columns */
686 fmt_node
= list_first_entry(&hists
->hpp_formats
,
687 struct perf_hpp_list_node
, list
);
690 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
692 fprintf(fp
, "%s", sep
?: "..");
695 width
= fmt
->width(fmt
, hpp
, hists
);
696 fprintf(fp
, "%.*s", width
, dots
);
700 list_for_each_entry_continue(fmt_node
, &hists
->hpp_formats
, list
) {
702 width
= depth
* HIERARCHY_INDENT
;
704 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
705 if (perf_hpp__should_skip(fmt
, hists
))
709 width
++; /* for '+' sign between column header */
712 width
+= fmt
->width(fmt
, hpp
, hists
);
715 if (width
> header_width
)
716 header_width
= width
;
721 fprintf(fp
, "%s%-.*s", sep
?: " ", header_width
, dots
);
723 fprintf(fp
, "\n#\n");
728 static void fprintf_line(struct hists
*hists
, struct perf_hpp
*hpp
,
731 struct perf_hpp_fmt
*fmt
;
732 const char *sep
= symbol_conf
.field_sep
;
736 hists__for_each_format(hists
, fmt
) {
737 if (perf_hpp__should_skip(fmt
, hists
))
741 fprintf(fp
, "%s", sep
?: " ");
745 fmt
->header(fmt
, hpp
, hists
, line
, &span
);
748 fprintf(fp
, "%s", hpp
->buf
);
753 hists__fprintf_standard_headers(struct hists
*hists
,
754 struct perf_hpp
*hpp
,
757 struct perf_hpp_list
*hpp_list
= hists
->hpp_list
;
758 struct perf_hpp_fmt
*fmt
;
760 const char *sep
= symbol_conf
.field_sep
;
764 for (line
= 0; line
< hpp_list
->nr_header_lines
; line
++) {
765 /* first # is displayed one level up */
768 fprintf_line(hists
, hpp
, line
, fp
);
773 return hpp_list
->nr_header_lines
;
779 hists__for_each_format(hists
, fmt
) {
782 if (perf_hpp__should_skip(fmt
, hists
))
786 fprintf(fp
, "%s", sep
?: " ");
790 width
= fmt
->width(fmt
, hpp
, hists
);
791 for (i
= 0; i
< width
; i
++)
797 return hpp_list
->nr_header_lines
+ 2;
800 int hists__fprintf_headers(struct hists
*hists
, FILE *fp
)
803 struct perf_hpp dummy_hpp
= {
810 if (symbol_conf
.report_hierarchy
)
811 return hists__fprintf_hierarchy_headers(hists
, &dummy_hpp
, fp
);
813 return hists__fprintf_standard_headers(hists
, &dummy_hpp
, fp
);
817 size_t hists__fprintf(struct hists
*hists
, bool show_header
, int max_rows
,
818 int max_cols
, float min_pcnt
, FILE *fp
,
819 bool ignore_callchains
)
823 const char *sep
= symbol_conf
.field_sep
;
831 hists__reset_column_width(hists
);
833 if (symbol_conf
.col_width_list_str
)
834 perf_hpp__set_user_width(symbol_conf
.col_width_list_str
);
837 nr_rows
+= hists__fprintf_headers(hists
, fp
);
839 if (max_rows
&& nr_rows
>= max_rows
)
842 linesz
= hists__sort_list_width(hists
) + 3 + 1;
843 linesz
+= perf_hpp__color_overhead();
844 line
= malloc(linesz
);
850 indent
= hists__overhead_width(hists
) + 4;
852 for (nd
= rb_first_cached(&hists
->entries
); nd
;
853 nd
= __rb_hierarchy_next(nd
, HMD_FORCE_CHILD
)) {
854 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
860 if (symbol_conf
.report_individual_block
)
861 percent
= block_info__total_cycles_percent(h
);
863 percent
= hist_entry__get_percent_limit(h
);
865 if (percent
< min_pcnt
)
868 ret
+= hist_entry__fprintf(h
, max_cols
, line
, linesz
, fp
, ignore_callchains
);
870 if (max_rows
&& ++nr_rows
>= max_rows
)
874 * If all children are filtered out or percent-limited,
875 * display "no entry >= x.xx%" message.
877 if (!h
->leaf
&& !hist_entry__has_hierarchy_children(h
, min_pcnt
)) {
878 int depth
= hists
->nr_hpp_node
+ h
->depth
+ 1;
880 print_hierarchy_indent(sep
, depth
, " ", fp
);
881 fprintf(fp
, "%*sno entry >= %.2f%%\n", indent
, "", min_pcnt
);
883 if (max_rows
&& ++nr_rows
>= max_rows
)
887 if (h
->ms
.map
== NULL
&& verbose
> 1) {
888 maps__fprintf(h
->thread
->maps
, fp
);
889 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
895 zfree(&rem_sq_bracket
);
900 size_t events_stats__fprintf(struct events_stats
*stats
, FILE *fp
)
905 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
908 name
= perf_event__name(i
);
909 if (!strcmp(name
, "UNKNOWN"))
912 ret
+= fprintf(fp
, "%16s events: %10d\n", name
, stats
->nr_events
[i
]);