3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
9 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
12 int ret
= fprintf(fp
, " ");
14 for (i
= 0; i
< left_margin
; i
++)
15 ret
+= fprintf(fp
, " ");
20 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
24 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
26 for (i
= 0; i
< depth
; i
++)
27 if (depth_mask
& (1 << i
))
28 ret
+= fprintf(fp
, "| ");
30 ret
+= fprintf(fp
, " ");
32 ret
+= fprintf(fp
, "\n");
37 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_node
*node
,
38 struct callchain_list
*chain
,
39 int depth
, int depth_mask
, int period
,
40 u64 total_samples
, int left_margin
)
46 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
47 for (i
= 0; i
< depth
; i
++) {
48 if (depth_mask
& (1 << i
))
49 ret
+= fprintf(fp
, "|");
51 ret
+= fprintf(fp
, " ");
52 if (!period
&& i
== depth
- 1) {
53 ret
+= fprintf(fp
, "--");
54 ret
+= callchain_node__fprintf_value(node
, fp
, total_samples
);
55 ret
+= fprintf(fp
, "--");
57 ret
+= fprintf(fp
, "%s", " ");
59 fputs(callchain_list__sym_name(chain
, bf
, sizeof(bf
), false), fp
);
64 static struct symbol
*rem_sq_bracket
;
65 static struct callchain_list rem_hits
;
67 static void init_rem_hits(void)
69 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
70 if (!rem_sq_bracket
) {
71 fprintf(stderr
, "Not enough memory to display remaining hits\n");
75 strcpy(rem_sq_bracket
->name
, "[...]");
76 rem_hits
.ms
.sym
= rem_sq_bracket
;
79 static size_t __callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
80 u64 total_samples
, int depth
,
81 int depth_mask
, int left_margin
)
83 struct rb_node
*node
, *next
;
84 struct callchain_node
*child
= NULL
;
85 struct callchain_list
*chain
;
86 int new_depth_mask
= depth_mask
;
90 uint entries_printed
= 0;
93 remaining
= total_samples
;
95 node
= rb_first(root
);
100 child
= rb_entry(node
, struct callchain_node
, rb_node
);
101 cumul
= callchain_cumul_hits(child
);
103 cumul_count
+= callchain_cumul_counts(child
);
106 * The depth mask manages the output of pipes that show
107 * the depth. We don't want to keep the pipes of the current
108 * level for the last child of this depth.
109 * Except if we have remaining filtered hits. They will
110 * supersede the last child
112 next
= rb_next(node
);
113 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
114 new_depth_mask
&= ~(1 << (depth
- 1));
117 * But we keep the older depth mask for the line separator
118 * to keep the level link until we reach the last child
120 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
123 list_for_each_entry(chain
, &child
->val
, list
) {
124 ret
+= ipchain__fprintf_graph(fp
, child
, chain
, depth
,
130 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
131 new_total
= child
->children_hit
;
133 new_total
= total_samples
;
135 ret
+= __callchain__fprintf_graph(fp
, &child
->rb_root
, new_total
,
137 new_depth_mask
| (1 << depth
),
140 if (++entries_printed
== callchain_param
.print_limit
)
144 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
145 remaining
&& remaining
!= total_samples
) {
146 struct callchain_node rem_node
= {
153 if (callchain_param
.value
== CCVAL_COUNT
&& child
&& child
->parent
) {
154 rem_node
.count
= child
->parent
->children_count
- cumul_count
;
155 if (rem_node
.count
<= 0)
159 new_depth_mask
&= ~(1 << (depth
- 1));
160 ret
+= ipchain__fprintf_graph(fp
, &rem_node
, &rem_hits
, depth
,
161 new_depth_mask
, 0, total_samples
,
169 * If have one single callchain root, don't bother printing
170 * its percentage (100 % in fractal mode and the same percentage
171 * than the hist in graph mode). This also avoid one level of column.
173 * However when percent-limit applied, it's possible that single callchain
174 * node have different (non-100% in fractal mode) percentage.
176 static bool need_percent_display(struct rb_node
*node
, u64 parent_samples
)
178 struct callchain_node
*cnode
;
183 cnode
= rb_entry(node
, struct callchain_node
, rb_node
);
184 return callchain_cumul_hits(cnode
) != parent_samples
;
187 static size_t callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
188 u64 total_samples
, u64 parent_samples
,
191 struct callchain_node
*cnode
;
192 struct callchain_list
*chain
;
193 u32 entries_printed
= 0;
194 bool printed
= false;
195 struct rb_node
*node
;
200 node
= rb_first(root
);
201 if (node
&& !need_percent_display(node
, parent_samples
)) {
202 cnode
= rb_entry(node
, struct callchain_node
, rb_node
);
203 list_for_each_entry(chain
, &cnode
->val
, list
) {
205 * If we sort by symbol, the first entry is the same than
206 * the symbol. No need to print it otherwise it appears as
209 if (!i
++ && field_order
== NULL
&&
210 sort_order
&& !prefixcmp(sort_order
, "sym"))
213 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
214 ret
+= fprintf(fp
, "|\n");
215 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
216 ret
+= fprintf(fp
, "---");
220 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
222 ret
+= fprintf(fp
, "%s\n", callchain_list__sym_name(chain
, bf
, sizeof(bf
),
225 if (++entries_printed
== callchain_param
.print_limit
)
228 root
= &cnode
->rb_root
;
231 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
232 total_samples
= parent_samples
;
234 ret
+= __callchain__fprintf_graph(fp
, root
, total_samples
,
237 /* do not add a blank line if it printed nothing */
238 ret
+= fprintf(fp
, "\n");
244 static size_t __callchain__fprintf_flat(FILE *fp
, struct callchain_node
*node
,
247 struct callchain_list
*chain
;
254 ret
+= __callchain__fprintf_flat(fp
, node
->parent
, total_samples
);
257 list_for_each_entry(chain
, &node
->val
, list
) {
258 if (chain
->ip
>= PERF_CONTEXT_MAX
)
260 ret
+= fprintf(fp
, " %s\n", callchain_list__sym_name(chain
,
261 bf
, sizeof(bf
), false));
267 static size_t callchain__fprintf_flat(FILE *fp
, struct rb_root
*tree
,
271 u32 entries_printed
= 0;
272 struct callchain_node
*chain
;
273 struct rb_node
*rb_node
= rb_first(tree
);
276 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
278 ret
+= fprintf(fp
, " ");
279 ret
+= callchain_node__fprintf_value(chain
, fp
, total_samples
);
280 ret
+= fprintf(fp
, "\n");
281 ret
+= __callchain__fprintf_flat(fp
, chain
, total_samples
);
282 ret
+= fprintf(fp
, "\n");
283 if (++entries_printed
== callchain_param
.print_limit
)
286 rb_node
= rb_next(rb_node
);
292 static size_t __callchain__fprintf_folded(FILE *fp
, struct callchain_node
*node
)
294 const char *sep
= symbol_conf
.field_sep
?: ";";
295 struct callchain_list
*chain
;
303 ret
+= __callchain__fprintf_folded(fp
, node
->parent
);
306 list_for_each_entry(chain
, &node
->val
, list
) {
307 if (chain
->ip
>= PERF_CONTEXT_MAX
)
309 ret
+= fprintf(fp
, "%s%s", first
? "" : sep
,
310 callchain_list__sym_name(chain
,
311 bf
, sizeof(bf
), false));
318 static size_t callchain__fprintf_folded(FILE *fp
, struct rb_root
*tree
,
322 u32 entries_printed
= 0;
323 struct callchain_node
*chain
;
324 struct rb_node
*rb_node
= rb_first(tree
);
328 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
330 ret
+= callchain_node__fprintf_value(chain
, fp
, total_samples
);
331 ret
+= fprintf(fp
, " ");
332 ret
+= __callchain__fprintf_folded(fp
, chain
);
333 ret
+= fprintf(fp
, "\n");
334 if (++entries_printed
== callchain_param
.print_limit
)
337 rb_node
= rb_next(rb_node
);
343 static size_t hist_entry_callchain__fprintf(struct hist_entry
*he
,
344 u64 total_samples
, int left_margin
,
347 u64 parent_samples
= he
->stat
.period
;
349 if (symbol_conf
.cumulate_callchain
)
350 parent_samples
= he
->stat_acc
->period
;
352 switch (callchain_param
.mode
) {
353 case CHAIN_GRAPH_REL
:
354 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, total_samples
,
355 parent_samples
, left_margin
);
357 case CHAIN_GRAPH_ABS
:
358 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, total_samples
,
359 parent_samples
, left_margin
);
362 return callchain__fprintf_flat(fp
, &he
->sorted_chain
, total_samples
);
365 return callchain__fprintf_folded(fp
, &he
->sorted_chain
, total_samples
);
370 pr_err("Bad callchain mode\n");
376 static int hist_entry__snprintf(struct hist_entry
*he
, struct perf_hpp
*hpp
)
378 const char *sep
= symbol_conf
.field_sep
;
379 struct perf_hpp_fmt
*fmt
;
380 char *start
= hpp
->buf
;
384 if (symbol_conf
.exclude_other
&& !he
->parent
)
387 hists__for_each_format(he
->hists
, fmt
) {
388 if (perf_hpp__should_skip(fmt
, he
->hists
))
392 * If there's no field_sep, we still need
393 * to display initial ' '.
395 if (!sep
|| !first
) {
396 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%s", sep
?: " ");
397 advance_hpp(hpp
, ret
);
401 if (perf_hpp__use_color() && fmt
->color
)
402 ret
= fmt
->color(fmt
, hpp
, he
);
404 ret
= fmt
->entry(fmt
, hpp
, he
);
406 ret
= hist_entry__snprintf_alignment(he
, hpp
, fmt
, ret
);
407 advance_hpp(hpp
, ret
);
410 return hpp
->buf
- start
;
413 static int hist_entry__hierarchy_fprintf(struct hist_entry
*he
,
414 struct perf_hpp
*hpp
,
418 const char *sep
= symbol_conf
.field_sep
;
419 struct perf_hpp_fmt
*fmt
;
420 struct perf_hpp_list_node
*fmt_node
;
421 char *buf
= hpp
->buf
;
422 size_t size
= hpp
->size
;
423 int ret
, printed
= 0;
426 if (symbol_conf
.exclude_other
&& !he
->parent
)
429 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*s", he
->depth
* HIERARCHY_INDENT
, "");
430 advance_hpp(hpp
, ret
);
432 /* the first hpp_list_node is for overhead columns */
433 fmt_node
= list_first_entry(&hists
->hpp_formats
,
434 struct perf_hpp_list_node
, list
);
435 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
437 * If there's no field_sep, we still need
438 * to display initial ' '.
440 if (!sep
|| !first
) {
441 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%s", sep
?: " ");
442 advance_hpp(hpp
, ret
);
446 if (perf_hpp__use_color() && fmt
->color
)
447 ret
= fmt
->color(fmt
, hpp
, he
);
449 ret
= fmt
->entry(fmt
, hpp
, he
);
451 ret
= hist_entry__snprintf_alignment(he
, hpp
, fmt
, ret
);
452 advance_hpp(hpp
, ret
);
456 ret
= scnprintf(hpp
->buf
, hpp
->size
, "%*s",
457 (hists
->nr_hpp_node
- 2) * HIERARCHY_INDENT
, "");
458 advance_hpp(hpp
, ret
);
460 printed
+= fprintf(fp
, "%s", buf
);
462 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
467 * No need to call hist_entry__snprintf_alignment() since this
468 * fmt is always the last column in the hierarchy mode.
470 if (perf_hpp__use_color() && fmt
->color
)
471 fmt
->color(fmt
, hpp
, he
);
473 fmt
->entry(fmt
, hpp
, he
);
476 * dynamic entries are right-aligned but we want left-aligned
477 * in the hierarchy mode
479 printed
+= fprintf(fp
, "%s%s", sep
?: " ", ltrim(buf
));
481 printed
+= putc('\n', fp
);
483 if (symbol_conf
.use_callchain
&& he
->leaf
) {
484 u64 total
= hists__total_period(hists
);
486 printed
+= hist_entry_callchain__fprintf(he
, total
, 0, fp
);
494 static int hist_entry__fprintf(struct hist_entry
*he
, size_t size
,
496 char *bf
, size_t bfsz
, FILE *fp
)
499 struct perf_hpp hpp
= {
503 u64 total_period
= hists
->stats
.total_period
;
505 if (size
== 0 || size
> bfsz
)
506 size
= hpp
.size
= bfsz
;
508 if (symbol_conf
.report_hierarchy
)
509 return hist_entry__hierarchy_fprintf(he
, &hpp
, hists
, fp
);
511 hist_entry__snprintf(he
, &hpp
);
513 ret
= fprintf(fp
, "%s\n", bf
);
515 if (symbol_conf
.use_callchain
)
516 ret
+= hist_entry_callchain__fprintf(he
, total_period
, 0, fp
);
521 static int print_hierarchy_indent(const char *sep
, int indent
,
522 const char *line
, FILE *fp
)
524 if (sep
!= NULL
|| indent
< 2)
527 return fprintf(fp
, "%-.*s", (indent
- 2) * HIERARCHY_INDENT
, line
);
530 static int print_hierarchy_header(struct hists
*hists
, struct perf_hpp
*hpp
,
531 const char *sep
, FILE *fp
)
533 bool first_node
, first_col
;
537 unsigned header_width
= 0;
538 struct perf_hpp_fmt
*fmt
;
539 struct perf_hpp_list_node
*fmt_node
;
541 indent
= hists
->nr_hpp_node
;
543 /* preserve max indent depth for column headers */
544 print_hierarchy_indent(sep
, indent
, spaces
, fp
);
546 /* the first hpp_list_node is for overhead columns */
547 fmt_node
= list_first_entry(&hists
->hpp_formats
,
548 struct perf_hpp_list_node
, list
);
550 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
551 fmt
->header(fmt
, hpp
, hists_to_evsel(hists
));
552 fprintf(fp
, "%s%s", hpp
->buf
, sep
?: " ");
555 /* combine sort headers with ' / ' */
557 list_for_each_entry_continue(fmt_node
, &hists
->hpp_formats
, list
) {
559 header_width
+= fprintf(fp
, " / ");
563 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
564 if (perf_hpp__should_skip(fmt
, hists
))
568 header_width
+= fprintf(fp
, "+");
571 fmt
->header(fmt
, hpp
, hists_to_evsel(hists
));
574 header_width
+= fprintf(fp
, "%s", ltrim(hpp
->buf
));
580 /* preserve max indent depth for initial dots */
581 print_hierarchy_indent(sep
, indent
, dots
, fp
);
583 /* the first hpp_list_node is for overhead columns */
584 fmt_node
= list_first_entry(&hists
->hpp_formats
,
585 struct perf_hpp_list_node
, list
);
588 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
590 fprintf(fp
, "%s", sep
?: "..");
593 width
= fmt
->width(fmt
, hpp
, hists_to_evsel(hists
));
594 fprintf(fp
, "%.*s", width
, dots
);
598 list_for_each_entry_continue(fmt_node
, &hists
->hpp_formats
, list
) {
600 width
= depth
* HIERARCHY_INDENT
;
602 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
) {
603 if (perf_hpp__should_skip(fmt
, hists
))
607 width
++; /* for '+' sign between column header */
610 width
+= fmt
->width(fmt
, hpp
, hists_to_evsel(hists
));
613 if (width
> header_width
)
614 header_width
= width
;
619 fprintf(fp
, "%s%-.*s", sep
?: " ", header_width
, dots
);
621 fprintf(fp
, "\n#\n");
626 size_t hists__fprintf(struct hists
*hists
, bool show_header
, int max_rows
,
627 int max_cols
, float min_pcnt
, FILE *fp
)
629 struct perf_hpp_fmt
*fmt
;
630 struct perf_hpp_list_node
*fmt_node
;
634 const char *sep
= symbol_conf
.field_sep
;
637 struct perf_hpp dummy_hpp
= {
648 hists__for_each_format(hists
, fmt
)
649 perf_hpp__reset_width(fmt
, hists
);
651 if (symbol_conf
.col_width_list_str
)
652 perf_hpp__set_user_width(symbol_conf
.col_width_list_str
);
659 if (symbol_conf
.report_hierarchy
) {
660 list_for_each_entry(fmt_node
, &hists
->hpp_formats
, list
) {
661 perf_hpp_list__for_each_format(&fmt_node
->hpp
, fmt
)
662 perf_hpp__reset_width(fmt
, hists
);
664 nr_rows
+= print_hierarchy_header(hists
, &dummy_hpp
, sep
, fp
);
668 hists__for_each_format(hists
, fmt
) {
669 if (perf_hpp__should_skip(fmt
, hists
))
673 fprintf(fp
, "%s", sep
?: " ");
677 fmt
->header(fmt
, &dummy_hpp
, hists_to_evsel(hists
));
678 fprintf(fp
, "%s", bf
);
682 if (max_rows
&& ++nr_rows
>= max_rows
)
692 hists__for_each_format(hists
, fmt
) {
695 if (perf_hpp__should_skip(fmt
, hists
))
699 fprintf(fp
, "%s", sep
?: " ");
703 width
= fmt
->width(fmt
, &dummy_hpp
, hists_to_evsel(hists
));
704 for (i
= 0; i
< width
; i
++)
709 if (max_rows
&& ++nr_rows
>= max_rows
)
713 if (max_rows
&& ++nr_rows
>= max_rows
)
717 linesz
= hists__sort_list_width(hists
) + 3 + 1;
718 linesz
+= perf_hpp__color_overhead();
719 line
= malloc(linesz
);
725 indent
= hists__overhead_width(hists
) + 4;
727 for (nd
= rb_first(&hists
->entries
); nd
; nd
= __rb_hierarchy_next(nd
, HMD_FORCE_CHILD
)) {
728 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
734 percent
= hist_entry__get_percent_limit(h
);
735 if (percent
< min_pcnt
)
738 ret
+= hist_entry__fprintf(h
, max_cols
, hists
, line
, linesz
, fp
);
740 if (max_rows
&& ++nr_rows
>= max_rows
)
744 * If all children are filtered out or percent-limited,
745 * display "no entry >= x.xx%" message.
747 if (!h
->leaf
&& !hist_entry__has_hierarchy_children(h
, min_pcnt
)) {
748 int depth
= hists
->nr_hpp_node
+ h
->depth
+ 1;
750 print_hierarchy_indent(sep
, depth
, spaces
, fp
);
751 fprintf(fp
, "%*sno entry >= %.2f%%\n", indent
, "", min_pcnt
);
753 if (max_rows
&& ++nr_rows
>= max_rows
)
757 if (h
->ms
.map
== NULL
&& verbose
> 1) {
758 __map_groups__fprintf_maps(h
->thread
->mg
,
760 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
766 zfree(&rem_sq_bracket
);
771 size_t events_stats__fprintf(struct events_stats
*stats
, FILE *fp
)
776 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
779 if (stats
->nr_events
[i
] == 0)
782 name
= perf_event__name(i
);
783 if (!strcmp(name
, "UNKNOWN"))
786 ret
+= fprintf(fp
, "%16s events: %10d\n", name
,
787 stats
->nr_events
[i
]);