1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/string.h>
5 #include "../../util/util.h"
6 #include "../../util/hist.h"
7 #include "../../util/sort.h"
8 #include "../../util/evsel.h"
9 #include "../../util/srcline.h"
10 #include "../../util/string2.h"
11 #include "../../util/thread.h"
12 #include "../../util/sane_ctype.h"
14 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
17 int ret
= fprintf(fp
, " ");
19 for (i
= 0; i
< left_margin
; i
++)
20 ret
+= fprintf(fp
, " ");
25 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
29 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
31 for (i
= 0; i
< depth
; i
++)
32 if (depth_mask
& (1 << i
))
33 ret
+= fprintf(fp
, "| ");
35 ret
+= fprintf(fp
, " ");
37 ret
+= fprintf(fp
, "\n");
42 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_node
*node
,
43 struct callchain_list
*chain
,
44 int depth
, int depth_mask
, int period
,
45 u64 total_samples
, int left_margin
)
49 char bf
[1024], *alloc_str
= NULL
;
53 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
54 for (i
= 0; i
< depth
; i
++) {
55 if (depth_mask
& (1 << i
))
56 ret
+= fprintf(fp
, "|");
58 ret
+= fprintf(fp
, " ");
59 if (!period
&& i
== depth
- 1) {
60 ret
+= fprintf(fp
, "--");
61 ret
+= callchain_node__fprintf_value(node
, fp
, total_samples
);
62 ret
+= fprintf(fp
, "--");
64 ret
+= fprintf(fp
, "%s", " ");
67 str
= callchain_list__sym_name(chain
, bf
, sizeof(bf
), false);
69 if (symbol_conf
.show_branchflag_count
) {
70 callchain_list_counts__printf_value(chain
, NULL
,
73 if (asprintf(&alloc_str
, "%s%s", str
, buf
) < 0)
74 str
= "Not enough memory!";
86 static struct symbol
*rem_sq_bracket
;
87 static struct callchain_list rem_hits
;
89 static void init_rem_hits(void)
91 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
92 if (!rem_sq_bracket
) {
93 fprintf(stderr
, "Not enough memory to display remaining hits\n");
97 strcpy(rem_sq_bracket
->name
, "[...]");
98 rem_hits
.ms
.sym
= rem_sq_bracket
;
101 static size_t __callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
102 u64 total_samples
, int depth
,
103 int depth_mask
, int left_margin
)
105 struct rb_node
*node
, *next
;
106 struct callchain_node
*child
= NULL
;
107 struct callchain_list
*chain
;
108 int new_depth_mask
= depth_mask
;
112 uint entries_printed
= 0;
115 remaining
= total_samples
;
117 node
= rb_first(root
);
122 child
= rb_entry(node
, struct callchain_node
, rb_node
);
123 cumul
= callchain_cumul_hits(child
);
125 cumul_count
+= callchain_cumul_counts(child
);
128 * The depth mask manages the output of pipes that show
129 * the depth. We don't want to keep the pipes of the current
130 * level for the last child of this depth.
131 * Except if we have remaining filtered hits. They will
132 * supersede the last child
134 next
= rb_next(node
);
135 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
136 new_depth_mask
&= ~(1 << (depth
- 1));
139 * But we keep the older depth mask for the line separator
140 * to keep the level link until we reach the last child
142 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
145 list_for_each_entry(chain
, &child
->val
, list
) {
146 ret
+= ipchain__fprintf_graph(fp
, child
, chain
, depth
,
152 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
153 new_total
= child
->children_hit
;
155 new_total
= total_samples
;
157 ret
+= __callchain__fprintf_graph(fp
, &child
->rb_root
, new_total
,
159 new_depth_mask
| (1 << depth
),
162 if (++entries_printed
== callchain_param
.print_limit
)
166 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
167 remaining
&& remaining
!= total_samples
) {
168 struct callchain_node rem_node
= {
175 if (callchain_param
.value
== CCVAL_COUNT
&& child
&& child
->parent
) {
176 rem_node
.count
= child
->parent
->children_count
- cumul_count
;
177 if (rem_node
.count
<= 0)
181 new_depth_mask
&= ~(1 << (depth
- 1));
182 ret
+= ipchain__fprintf_graph(fp
, &rem_node
, &rem_hits
, depth
,
183 new_depth_mask
, 0, total_samples
,
191 * If have one single callchain root, don't bother printing
192 * its percentage (100 % in fractal mode and the same percentage
193 * than the hist in graph mode). This also avoid one level of column.
195 * However when percent-limit applied, it's possible that single callchain
196 * node have different (non-100% in fractal mode) percentage.
198 static bool need_percent_display(struct rb_node
*node
, u64 parent_samples
)
200 struct callchain_node
*cnode
;
205 cnode
= rb_entry(node
, struct callchain_node
, rb_node
);
206 return callchain_cumul_hits(cnode
) != parent_samples
;
209 static size_t callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
210 u64 total_samples
, u64 parent_samples
,
213 struct callchain_node
*cnode
;
214 struct callchain_list
*chain
;
215 u32 entries_printed
= 0;
216 bool printed
= false;
217 struct rb_node
*node
;
222 node
= rb_first(root
);
223 if (node
&& !need_percent_display(node
, parent_samples
)) {
224 cnode
= rb_entry(node
, struct callchain_node
, rb_node
);
225 list_for_each_entry(chain
, &cnode
->val
, list
) {
227 * If we sort by symbol, the first entry is the same than
228 * the symbol. No need to print it otherwise it appears as
231 if (!i
++ && field_order
== NULL
&&
232 sort_order
&& strstarts(sort_order
, "sym"))
236 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
237 ret
+= fprintf(fp
, "|\n");
238 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
239 ret
+= fprintf(fp
, "---");
243 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
245 ret
+= fprintf(fp
, "%s",
246 callchain_list__sym_name(chain
, bf
,
250 if (symbol_conf
.show_branchflag_count
)
251 ret
+= callchain_list_counts__printf_value(
253 ret
+= fprintf(fp
, "\n");
255 if (++entries_printed
== callchain_param
.print_limit
)
258 root
= &cnode
->rb_root
;
261 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
262 total_samples
= parent_samples
;
264 ret
+= __callchain__fprintf_graph(fp
, root
, total_samples
,
267 /* do not add a blank line if it printed nothing */
268 ret
+= fprintf(fp
, "\n");
274 static size_t __callchain__fprintf_flat(FILE *fp
, struct callchain_node
*node
,
277 struct callchain_list
*chain
;
284 ret
+= __callchain__fprintf_flat(fp
, node
->parent
, total_samples
);
287 list_for_each_entry(chain
, &node
->val
, list
) {
288 if (chain
->ip
>= PERF_CONTEXT_MAX
)
290 ret
+= fprintf(fp
, " %s\n", callchain_list__sym_name(chain
,
291 bf
, sizeof(bf
), false));
297 static size_t callchain__fprintf_flat(FILE *fp
, struct rb_root
*tree
,
301 u32 entries_printed
= 0;
302 struct callchain_node
*chain
;
303 struct rb_node
*rb_node
= rb_first(tree
);
306 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
308 ret
+= fprintf(fp
, " ");
309 ret
+= callchain_node__fprintf_value(chain
, fp
, total_samples
);
310 ret
+= fprintf(fp
, "\n");
311 ret
+= __callchain__fprintf_flat(fp
, chain
, total_samples
);
312 ret
+= fprintf(fp
, "\n");
313 if (++entries_printed
== callchain_param
.print_limit
)
316 rb_node
= rb_next(rb_node
);
322 static size_t __callchain__fprintf_folded(FILE *fp
, struct callchain_node
*node
)
324 const char *sep
= symbol_conf
.field_sep
?: ";";
325 struct callchain_list
*chain
;
333 ret
+= __callchain__fprintf_folded(fp
, node
->parent
);
336 list_for_each_entry(chain
, &node
->val
, list
) {
337 if (chain
->ip
>= PERF_CONTEXT_MAX
)
339 ret
+= fprintf(fp
, "%s%s", first
? "" : sep
,
340 callchain_list__sym_name(chain
,
341 bf
, sizeof(bf
), false));
348 static size_t callchain__fprintf_folded(FILE *fp
, struct rb_root
*tree
,
352 u32 entries_printed
= 0;
353 struct callchain_node
*chain
;
354 struct rb_node
*rb_node
= rb_first(tree
);
358 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
360 ret
+= callchain_node__fprintf_value(chain
, fp
, total_samples
);
361 ret
+= fprintf(fp
, " ");
362 ret
+= __callchain__fprintf_folded(fp
, chain
);
363 ret
+= fprintf(fp
, "\n");
364 if (++entries_printed
== callchain_param
.print_limit
)
367 rb_node
= rb_next(rb_node
);
373 static size_t hist_entry_callchain__fprintf(struct hist_entry
*he
,
374 u64 total_samples
, int left_margin
,
377 u64 parent_samples
= he
->stat
.period
;
379 if (symbol_conf
.cumulate_callchain
)
380 parent_samples
= he
->stat_acc
->period
;
382 switch (callchain_param
.mode
) {
383 case CHAIN_GRAPH_REL
:
384 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, total_samples
,
385 parent_samples
, left_margin
);
387 case CHAIN_GRAPH_ABS
:
388 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, total_samples
,
389 parent_samples
, left_margin
);
392 return callchain__fprintf_flat(fp
, &he
->sorted_chain
, total_samples
);
395 return callchain__fprintf_folded(fp
, &he
->sorted_chain
, total_samples
);
400 pr_err("Bad callchain mode\n");
406 int __hist_entry__snprintf(struct hist_entry
*he
, struct perf_hpp
*hpp
,
407 struct perf_hpp_list
*hpp_list
)
409 const char *sep
= symbol_conf
.field_sep
;
410 struct perf_hpp_fmt
*fmt
;
411 char *start
= hpp
->buf
;
415 if (symbol_conf
.exclude_other
&& !he
->parent
)
418 perf_hpp_list__for_each_format(hpp_list
, fmt
) {
419 if (perf_hpp__should_skip(fmt
, he
->hists
))
423 * If there's no field_sep, we still need
424 * to display initial ' '.
426 if (!sep
|| !first
) {
427 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%s", sep
?: " ");
428 advance_hpp(hpp
, ret
);
432 if (perf_hpp__use_color() && fmt
->color
)
433 ret
= fmt
->color(fmt
, hpp
, he
);
435 ret
= fmt
->entry(fmt
, hpp
, he
);
437 ret
= hist_entry__snprintf_alignment(he
, hpp
, fmt
, ret
);
438 advance_hpp(hpp
, ret
);
441 return hpp
->buf
- start
;
444 static int hist_entry__snprintf(struct hist_entry
*he
, struct perf_hpp
*hpp
)
446 return __hist_entry__snprintf(he
, hpp
, he
->hists
->hpp_list
);
449 static int hist_entry__hierarchy_fprintf(struct hist_entry
*he
,
450 struct perf_hpp
*hpp
,
454 const char *sep
= symbol_conf
.field_sep
;
455 struct perf_hpp_fmt
*fmt
;
456 struct perf_hpp_list_node
*fmt_node
;
457 char *buf
= hpp
->buf
;
458 size_t size
= hpp
->size
;
459 int ret
, printed
= 0;
462 if (symbol_conf
.exclude_other
&& !he
->parent
)
465 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*s", he
->depth
* HIERARCHY_INDENT
, "");
466 advance_hpp(hpp
, ret
);
468 /* the first hpp_list_node is for overhead columns */
469 fmt_node
= list_first_entry(&hists
->hpp_formats
,
470 struct perf_hpp_list_node
, list
);
471 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
473 * If there's no field_sep, we still need
474 * to display initial ' '.
476 if (!sep
|| !first
) {
477 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%s", sep
?: " ");
478 advance_hpp(hpp
, ret
);
482 if (perf_hpp__use_color() && fmt
->color
)
483 ret
= fmt
->color(fmt
, hpp
, he
);
485 ret
= fmt
->entry(fmt
, hpp
, he
);
487 ret
= hist_entry__snprintf_alignment(he
, hpp
, fmt
, ret
);
488 advance_hpp(hpp
, ret
);
492 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*s",
493 (hists
->nr_hpp_node
- 2) * HIERARCHY_INDENT
, "");
494 advance_hpp(hpp
, ret
);
496 printed
+= fprintf(fp
, "%s", buf
);
498 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
503 * No need to call hist_entry__snprintf_alignment() since this
504 * fmt is always the last column in the hierarchy mode.
506 if (perf_hpp__use_color() && fmt
->color
)
507 fmt
->color(fmt
, hpp
, he
);
509 fmt
->entry(fmt
, hpp
, he
);
512 * dynamic entries are right-aligned but we want left-aligned
513 * in the hierarchy mode
515 printed
+= fprintf(fp
, "%s%s", sep
?: " ", ltrim(buf
));
517 printed
+= putc('\n', fp
);
519 if (symbol_conf
.use_callchain
&& he
->leaf
) {
520 u64 total
= hists__total_period(hists
);
522 printed
+= hist_entry_callchain__fprintf(he
, total
, 0, fp
);
530 static int hist_entry__fprintf(struct hist_entry
*he
, size_t size
,
531 char *bf
, size_t bfsz
, FILE *fp
,
535 int callchain_ret
= 0;
536 struct perf_hpp hpp
= {
540 struct hists
*hists
= he
->hists
;
541 u64 total_period
= hists
->stats
.total_period
;
543 if (size
== 0 || size
> bfsz
)
544 size
= hpp
.size
= bfsz
;
546 if (symbol_conf
.report_hierarchy
)
547 return hist_entry__hierarchy_fprintf(he
, &hpp
, hists
, fp
);
549 hist_entry__snprintf(he
, &hpp
);
551 ret
= fprintf(fp
, "%s\n", bf
);
554 callchain_ret
= hist_entry_callchain__fprintf(he
, total_period
,
557 ret
+= callchain_ret
;
562 static int print_hierarchy_indent(const char *sep
, int indent
,
563 const char *line
, FILE *fp
)
565 if (sep
!= NULL
|| indent
< 2)
568 return fprintf(fp
, "%-.*s", (indent
- 2) * HIERARCHY_INDENT
, line
);
571 static int hists__fprintf_hierarchy_headers(struct hists
*hists
,
572 struct perf_hpp
*hpp
, FILE *fp
)
574 bool first_node
, first_col
;
578 unsigned header_width
= 0;
579 struct perf_hpp_fmt
*fmt
;
580 struct perf_hpp_list_node
*fmt_node
;
581 const char *sep
= symbol_conf
.field_sep
;
583 indent
= hists
->nr_hpp_node
;
585 /* preserve max indent depth for column headers */
586 print_hierarchy_indent(sep
, indent
, spaces
, fp
);
588 /* the first hpp_list_node is for overhead columns */
589 fmt_node
= list_first_entry(&hists
->hpp_formats
,
590 struct perf_hpp_list_node
, list
);
592 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
593 fmt
->header(fmt
, hpp
, hists
, 0, NULL
);
594 fprintf(fp
, "%s%s", hpp
->buf
, sep
?: " ");
597 /* combine sort headers with ' / ' */
599 list_for_each_entry_continue(fmt_node
, &hists
->hpp_formats
, list
) {
601 header_width
+= fprintf(fp
, " / ");
605 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
606 if (perf_hpp__should_skip(fmt
, hists
))
610 header_width
+= fprintf(fp
, "+");
613 fmt
->header(fmt
, hpp
, hists
, 0, NULL
);
615 header_width
+= fprintf(fp
, "%s", trim(hpp
->buf
));
621 /* preserve max indent depth for initial dots */
622 print_hierarchy_indent(sep
, indent
, dots
, fp
);
624 /* the first hpp_list_node is for overhead columns */
625 fmt_node
= list_first_entry(&hists
->hpp_formats
,
626 struct perf_hpp_list_node
, list
);
629 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
631 fprintf(fp
, "%s", sep
?: "..");
634 width
= fmt
->width(fmt
, hpp
, hists
);
635 fprintf(fp
, "%.*s", width
, dots
);
639 list_for_each_entry_continue(fmt_node
, &hists
->hpp_formats
, list
) {
641 width
= depth
* HIERARCHY_INDENT
;
643 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
644 if (perf_hpp__should_skip(fmt
, hists
))
648 width
++; /* for '+' sign between column header */
651 width
+= fmt
->width(fmt
, hpp
, hists
);
654 if (width
> header_width
)
655 header_width
= width
;
660 fprintf(fp
, "%s%-.*s", sep
?: " ", header_width
, dots
);
662 fprintf(fp
, "\n#\n");
667 static void fprintf_line(struct hists
*hists
, struct perf_hpp
*hpp
,
670 struct perf_hpp_fmt
*fmt
;
671 const char *sep
= symbol_conf
.field_sep
;
675 hists__for_each_format(hists
, fmt
) {
676 if (perf_hpp__should_skip(fmt
, hists
))
680 fprintf(fp
, "%s", sep
?: " ");
684 fmt
->header(fmt
, hpp
, hists
, line
, &span
);
687 fprintf(fp
, "%s", hpp
->buf
);
692 hists__fprintf_standard_headers(struct hists
*hists
,
693 struct perf_hpp
*hpp
,
696 struct perf_hpp_list
*hpp_list
= hists
->hpp_list
;
697 struct perf_hpp_fmt
*fmt
;
699 const char *sep
= symbol_conf
.field_sep
;
703 for (line
= 0; line
< hpp_list
->nr_header_lines
; line
++) {
704 /* first # is displayed one level up */
707 fprintf_line(hists
, hpp
, line
, fp
);
712 return hpp_list
->nr_header_lines
;
718 hists__for_each_format(hists
, fmt
) {
721 if (perf_hpp__should_skip(fmt
, hists
))
725 fprintf(fp
, "%s", sep
?: " ");
729 width
= fmt
->width(fmt
, hpp
, hists
);
730 for (i
= 0; i
< width
; i
++)
736 return hpp_list
->nr_header_lines
+ 2;
739 int hists__fprintf_headers(struct hists
*hists
, FILE *fp
)
742 struct perf_hpp dummy_hpp
= {
749 if (symbol_conf
.report_hierarchy
)
750 return hists__fprintf_hierarchy_headers(hists
, &dummy_hpp
, fp
);
752 return hists__fprintf_standard_headers(hists
, &dummy_hpp
, fp
);
756 size_t hists__fprintf(struct hists
*hists
, bool show_header
, int max_rows
,
757 int max_cols
, float min_pcnt
, FILE *fp
,
762 const char *sep
= symbol_conf
.field_sep
;
770 hists__reset_column_width(hists
);
772 if (symbol_conf
.col_width_list_str
)
773 perf_hpp__set_user_width(symbol_conf
.col_width_list_str
);
776 nr_rows
+= hists__fprintf_headers(hists
, fp
);
778 if (max_rows
&& nr_rows
>= max_rows
)
781 linesz
= hists__sort_list_width(hists
) + 3 + 1;
782 linesz
+= perf_hpp__color_overhead();
783 line
= malloc(linesz
);
789 indent
= hists__overhead_width(hists
) + 4;
791 for (nd
= rb_first(&hists
->entries
); nd
; nd
= __rb_hierarchy_next(nd
, HMD_FORCE_CHILD
)) {
792 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
798 percent
= hist_entry__get_percent_limit(h
);
799 if (percent
< min_pcnt
)
802 ret
+= hist_entry__fprintf(h
, max_cols
, line
, linesz
, fp
, use_callchain
);
804 if (max_rows
&& ++nr_rows
>= max_rows
)
808 * If all children are filtered out or percent-limited,
809 * display "no entry >= x.xx%" message.
811 if (!h
->leaf
&& !hist_entry__has_hierarchy_children(h
, min_pcnt
)) {
812 int depth
= hists
->nr_hpp_node
+ h
->depth
+ 1;
814 print_hierarchy_indent(sep
, depth
, spaces
, fp
);
815 fprintf(fp
, "%*sno entry >= %.2f%%\n", indent
, "", min_pcnt
);
817 if (max_rows
&& ++nr_rows
>= max_rows
)
821 if (h
->ms
.map
== NULL
&& verbose
> 1) {
822 __map_groups__fprintf_maps(h
->thread
->mg
,
824 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
830 zfree(&rem_sq_bracket
);
835 size_t events_stats__fprintf(struct events_stats
*stats
, FILE *fp
)
840 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
843 if (stats
->nr_events
[i
] == 0)
846 name
= perf_event__name(i
);
847 if (!strcmp(name
, "UNKNOWN"))
850 ret
+= fprintf(fp
, "%16s events: %10d\n", name
,
851 stats
->nr_events
[i
]);