9 static bool hists__filter_entry_by_dso(struct hists
*hists
,
10 struct hist_entry
*he
);
11 static bool hists__filter_entry_by_thread(struct hists
*hists
,
12 struct hist_entry
*he
);
13 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
14 struct hist_entry
*he
);
23 struct callchain_param callchain_param
= {
24 .mode
= CHAIN_GRAPH_REL
,
29 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
31 return hists
->col_len
[col
];
34 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
36 hists
->col_len
[col
] = len
;
39 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
41 if (len
> hists__col_len(hists
, col
)) {
42 hists__set_col_len(hists
, col
, len
);
48 static void hists__reset_col_len(struct hists
*hists
)
52 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
53 hists__set_col_len(hists
, col
, 0);
56 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
58 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
60 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
61 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
62 !symbol_conf
.dso_list
)
63 hists__set_col_len(hists
, dso
, unresolved_col_width
);
66 static void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
68 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
72 hists__new_col_len(hists
, HISTC_SYMBOL
, h
->ms
.sym
->namelen
+ 4);
74 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
76 len
= thread__comm_len(h
->thread
);
77 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
78 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
81 len
= dso__name_len(h
->ms
.map
->dso
);
82 hists__new_col_len(hists
, HISTC_DSO
, len
);
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
91 if (h
->branch_info
->from
.sym
) {
92 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
93 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
95 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
96 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
98 symlen
= unresolved_col_width
+ 4 + 2;
99 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
100 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
103 if (h
->branch_info
->to
.sym
) {
104 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
105 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
107 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
108 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
110 symlen
= unresolved_col_width
+ 4 + 2;
111 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
112 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
117 static void hist_entry__add_cpumode_period(struct hist_entry
*he
,
118 unsigned int cpumode
, u64 period
)
121 case PERF_RECORD_MISC_KERNEL
:
122 he
->period_sys
+= period
;
124 case PERF_RECORD_MISC_USER
:
125 he
->period_us
+= period
;
127 case PERF_RECORD_MISC_GUEST_KERNEL
:
128 he
->period_guest_sys
+= period
;
130 case PERF_RECORD_MISC_GUEST_USER
:
131 he
->period_guest_us
+= period
;
138 static void hist_entry__decay(struct hist_entry
*he
)
140 he
->period
= (he
->period
* 7) / 8;
141 he
->nr_events
= (he
->nr_events
* 7) / 8;
144 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
146 u64 prev_period
= he
->period
;
148 if (prev_period
== 0)
151 hist_entry__decay(he
);
154 hists
->stats
.total_period
-= prev_period
- he
->period
;
156 return he
->period
== 0;
159 static void __hists__decay_entries(struct hists
*hists
, bool zap_user
,
160 bool zap_kernel
, bool threaded
)
162 struct rb_node
*next
= rb_first(&hists
->entries
);
163 struct hist_entry
*n
;
166 n
= rb_entry(next
, struct hist_entry
, rb_node
);
167 next
= rb_next(&n
->rb_node
);
169 * We may be annotating this, for instance, so keep it here in
170 * case some it gets new samples, we'll eventually free it when
171 * the user stops browsing and it agains gets fully decayed.
173 if (((zap_user
&& n
->level
== '.') ||
174 (zap_kernel
&& n
->level
!= '.') ||
175 hists__decay_entry(hists
, n
)) &&
177 rb_erase(&n
->rb_node
, &hists
->entries
);
179 if (sort__need_collapse
|| threaded
)
180 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
188 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
190 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, false);
193 void hists__decay_entries_threaded(struct hists
*hists
,
194 bool zap_user
, bool zap_kernel
)
196 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, true);
200 * histogram, sorted on item, collects periods
203 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
205 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
206 struct hist_entry
*he
= malloc(sizeof(*he
) + callchain_size
);
212 he
->ms
.map
->referenced
= true;
213 if (symbol_conf
.use_callchain
)
214 callchain_init(he
->callchain
);
220 static void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
223 hists__calc_col_len(hists
, h
);
225 hists
->stats
.total_period
+= h
->period
;
229 static u8
symbol__parent_filter(const struct symbol
*parent
)
231 if (symbol_conf
.exclude_other
&& parent
== NULL
)
232 return 1 << HIST_FILTER__PARENT
;
236 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
237 struct hist_entry
*entry
,
238 struct addr_location
*al
,
242 struct rb_node
*parent
= NULL
;
243 struct hist_entry
*he
;
246 pthread_mutex_lock(&hists
->lock
);
248 p
= &hists
->entries_in
->rb_node
;
252 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
254 cmp
= hist_entry__cmp(entry
, he
);
257 he
->period
+= period
;
260 /* If the map of an existing hist_entry has
261 * become out-of-date due to an exec() or
262 * similar, update it. Otherwise we will
263 * mis-adjust symbol addresses when computing
264 * the history counter to increment.
266 if (he
->ms
.map
!= entry
->ms
.map
) {
267 he
->ms
.map
= entry
->ms
.map
;
269 he
->ms
.map
->referenced
= true;
280 he
= hist_entry__new(entry
);
284 rb_link_node(&he
->rb_node_in
, parent
, p
);
285 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
287 hist_entry__add_cpumode_period(he
, al
->cpumode
, period
);
289 pthread_mutex_unlock(&hists
->lock
);
293 struct hist_entry
*__hists__add_branch_entry(struct hists
*self
,
294 struct addr_location
*al
,
295 struct symbol
*sym_parent
,
296 struct branch_info
*bi
,
299 struct hist_entry entry
= {
300 .thread
= al
->thread
,
309 .parent
= sym_parent
,
310 .filtered
= symbol__parent_filter(sym_parent
),
314 return add_hist_entry(self
, &entry
, al
, period
);
317 struct hist_entry
*__hists__add_entry(struct hists
*self
,
318 struct addr_location
*al
,
319 struct symbol
*sym_parent
, u64 period
)
321 struct hist_entry entry
= {
322 .thread
= al
->thread
,
331 .parent
= sym_parent
,
332 .filtered
= symbol__parent_filter(sym_parent
),
335 return add_hist_entry(self
, &entry
, al
, period
);
339 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
341 struct sort_entry
*se
;
344 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
345 cmp
= se
->se_cmp(left
, right
);
354 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
356 struct sort_entry
*se
;
359 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
360 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
362 f
= se
->se_collapse
?: se
->se_cmp
;
364 cmp
= f(left
, right
);
372 void hist_entry__free(struct hist_entry
*he
)
378 * collapse the histogram
381 static bool hists__collapse_insert_entry(struct hists
*hists __used
,
382 struct rb_root
*root
,
383 struct hist_entry
*he
)
385 struct rb_node
**p
= &root
->rb_node
;
386 struct rb_node
*parent
= NULL
;
387 struct hist_entry
*iter
;
392 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
394 cmp
= hist_entry__collapse(iter
, he
);
397 iter
->period
+= he
->period
;
398 iter
->nr_events
+= he
->nr_events
;
399 if (symbol_conf
.use_callchain
) {
400 callchain_cursor_reset(&callchain_cursor
);
401 callchain_merge(&callchain_cursor
,
405 hist_entry__free(he
);
415 rb_link_node(&he
->rb_node_in
, parent
, p
);
416 rb_insert_color(&he
->rb_node_in
, root
);
420 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
422 struct rb_root
*root
;
424 pthread_mutex_lock(&hists
->lock
);
426 root
= hists
->entries_in
;
427 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
428 hists
->entries_in
= &hists
->entries_in_array
[0];
430 pthread_mutex_unlock(&hists
->lock
);
435 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
437 hists__filter_entry_by_dso(hists
, he
);
438 hists__filter_entry_by_thread(hists
, he
);
439 hists__filter_entry_by_symbol(hists
, he
);
442 static void __hists__collapse_resort(struct hists
*hists
, bool threaded
)
444 struct rb_root
*root
;
445 struct rb_node
*next
;
446 struct hist_entry
*n
;
448 if (!sort__need_collapse
&& !threaded
)
451 root
= hists__get_rotate_entries_in(hists
);
452 next
= rb_first(root
);
455 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
456 next
= rb_next(&n
->rb_node_in
);
458 rb_erase(&n
->rb_node_in
, root
);
459 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
461 * If it wasn't combined with one of the entries already
462 * collapsed, we need to apply the filters that may have
463 * been set by, say, the hist_browser.
465 hists__apply_filters(hists
, n
);
470 void hists__collapse_resort(struct hists
*hists
)
472 return __hists__collapse_resort(hists
, false);
475 void hists__collapse_resort_threaded(struct hists
*hists
)
477 return __hists__collapse_resort(hists
, true);
481 * reverse the map, sort on period.
484 static void __hists__insert_output_entry(struct rb_root
*entries
,
485 struct hist_entry
*he
,
486 u64 min_callchain_hits
)
488 struct rb_node
**p
= &entries
->rb_node
;
489 struct rb_node
*parent
= NULL
;
490 struct hist_entry
*iter
;
492 if (symbol_conf
.use_callchain
)
493 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
494 min_callchain_hits
, &callchain_param
);
498 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
500 if (he
->period
> iter
->period
)
506 rb_link_node(&he
->rb_node
, parent
, p
);
507 rb_insert_color(&he
->rb_node
, entries
);
510 static void __hists__output_resort(struct hists
*hists
, bool threaded
)
512 struct rb_root
*root
;
513 struct rb_node
*next
;
514 struct hist_entry
*n
;
515 u64 min_callchain_hits
;
517 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
519 if (sort__need_collapse
|| threaded
)
520 root
= &hists
->entries_collapsed
;
522 root
= hists
->entries_in
;
524 next
= rb_first(root
);
525 hists
->entries
= RB_ROOT
;
527 hists
->nr_entries
= 0;
528 hists
->stats
.total_period
= 0;
529 hists__reset_col_len(hists
);
532 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
533 next
= rb_next(&n
->rb_node_in
);
535 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
536 hists__inc_nr_entries(hists
, n
);
540 void hists__output_resort(struct hists
*hists
)
542 return __hists__output_resort(hists
, false);
545 void hists__output_resort_threaded(struct hists
*hists
)
547 return __hists__output_resort(hists
, true);
550 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
553 int ret
= fprintf(fp
, " ");
555 for (i
= 0; i
< left_margin
; i
++)
556 ret
+= fprintf(fp
, " ");
561 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
565 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
567 for (i
= 0; i
< depth
; i
++)
568 if (depth_mask
& (1 << i
))
569 ret
+= fprintf(fp
, "| ");
571 ret
+= fprintf(fp
, " ");
573 ret
+= fprintf(fp
, "\n");
578 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
,
579 int depth
, int depth_mask
, int period
,
580 u64 total_samples
, u64 hits
,
586 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
587 for (i
= 0; i
< depth
; i
++) {
588 if (depth_mask
& (1 << i
))
589 ret
+= fprintf(fp
, "|");
591 ret
+= fprintf(fp
, " ");
592 if (!period
&& i
== depth
- 1) {
595 percent
= hits
* 100.0 / total_samples
;
596 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
598 ret
+= fprintf(fp
, "%s", " ");
601 ret
+= fprintf(fp
, "%s\n", chain
->ms
.sym
->name
);
603 ret
+= fprintf(fp
, "0x%0" PRIx64
"\n", chain
->ip
);
608 static struct symbol
*rem_sq_bracket
;
609 static struct callchain_list rem_hits
;
611 static void init_rem_hits(void)
613 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
614 if (!rem_sq_bracket
) {
615 fprintf(stderr
, "Not enough memory to display remaining hits\n");
619 strcpy(rem_sq_bracket
->name
, "[...]");
620 rem_hits
.ms
.sym
= rem_sq_bracket
;
623 static size_t __callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
624 u64 total_samples
, int depth
,
625 int depth_mask
, int left_margin
)
627 struct rb_node
*node
, *next
;
628 struct callchain_node
*child
;
629 struct callchain_list
*chain
;
630 int new_depth_mask
= depth_mask
;
634 uint entries_printed
= 0;
636 remaining
= total_samples
;
638 node
= rb_first(root
);
643 child
= rb_entry(node
, struct callchain_node
, rb_node
);
644 cumul
= callchain_cumul_hits(child
);
648 * The depth mask manages the output of pipes that show
649 * the depth. We don't want to keep the pipes of the current
650 * level for the last child of this depth.
651 * Except if we have remaining filtered hits. They will
652 * supersede the last child
654 next
= rb_next(node
);
655 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
656 new_depth_mask
&= ~(1 << (depth
- 1));
659 * But we keep the older depth mask for the line separator
660 * to keep the level link until we reach the last child
662 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
665 list_for_each_entry(chain
, &child
->val
, list
) {
666 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
673 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
674 new_total
= child
->children_hit
;
676 new_total
= total_samples
;
678 ret
+= __callchain__fprintf_graph(fp
, &child
->rb_root
, new_total
,
680 new_depth_mask
| (1 << depth
),
683 if (++entries_printed
== callchain_param
.print_limit
)
687 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
688 remaining
&& remaining
!= total_samples
) {
693 new_depth_mask
&= ~(1 << (depth
- 1));
694 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
695 new_depth_mask
, 0, total_samples
,
696 remaining
, left_margin
);
702 static size_t callchain__fprintf_graph(FILE *fp
, struct rb_root
*root
,
703 u64 total_samples
, int left_margin
)
705 struct callchain_node
*cnode
;
706 struct callchain_list
*chain
;
707 u32 entries_printed
= 0;
708 bool printed
= false;
709 struct rb_node
*node
;
714 * If have one single callchain root, don't bother printing
715 * its percentage (100 % in fractal mode and the same percentage
716 * than the hist in graph mode). This also avoid one level of column.
718 node
= rb_first(root
);
719 if (node
&& !rb_next(node
)) {
720 cnode
= rb_entry(node
, struct callchain_node
, rb_node
);
721 list_for_each_entry(chain
, &cnode
->val
, list
) {
723 * If we sort by symbol, the first entry is the same than
724 * the symbol. No need to print it otherwise it appears as
727 if (!i
++ && sort__first_dimension
== SORT_SYM
)
730 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
731 ret
+= fprintf(fp
, "|\n");
732 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
733 ret
+= fprintf(fp
, "---");
737 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
740 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
742 ret
+= fprintf(fp
, " %p\n", (void *)(long)chain
->ip
);
744 if (++entries_printed
== callchain_param
.print_limit
)
747 root
= &cnode
->rb_root
;
750 return __callchain__fprintf_graph(fp
, root
, total_samples
,
754 static size_t __callchain__fprintf_flat(FILE *fp
,
755 struct callchain_node
*self
,
758 struct callchain_list
*chain
;
764 ret
+= __callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
767 list_for_each_entry(chain
, &self
->val
, list
) {
768 if (chain
->ip
>= PERF_CONTEXT_MAX
)
771 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
773 ret
+= fprintf(fp
, " %p\n",
774 (void *)(long)chain
->ip
);
780 static size_t callchain__fprintf_flat(FILE *fp
, struct rb_root
*self
,
784 u32 entries_printed
= 0;
785 struct rb_node
*rb_node
;
786 struct callchain_node
*chain
;
788 rb_node
= rb_first(self
);
792 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
793 percent
= chain
->hit
* 100.0 / total_samples
;
795 ret
= percent_color_fprintf(fp
, " %6.2f%%\n", percent
);
796 ret
+= __callchain__fprintf_flat(fp
, chain
, total_samples
);
797 ret
+= fprintf(fp
, "\n");
798 if (++entries_printed
== callchain_param
.print_limit
)
801 rb_node
= rb_next(rb_node
);
807 static size_t hist_entry_callchain__fprintf(struct hist_entry
*he
,
808 u64 total_samples
, int left_margin
,
811 switch (callchain_param
.mode
) {
812 case CHAIN_GRAPH_REL
:
813 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, he
->period
,
816 case CHAIN_GRAPH_ABS
:
817 return callchain__fprintf_graph(fp
, &he
->sorted_chain
, total_samples
,
821 return callchain__fprintf_flat(fp
, &he
->sorted_chain
, total_samples
);
826 pr_err("Bad callchain mode\n");
832 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
834 struct rb_node
*next
= rb_first(&hists
->entries
);
835 struct hist_entry
*n
;
838 hists__reset_col_len(hists
);
840 while (next
&& row
++ < max_rows
) {
841 n
= rb_entry(next
, struct hist_entry
, rb_node
);
843 hists__calc_col_len(hists
, n
);
844 next
= rb_next(&n
->rb_node
);
848 static int hist_entry__pcnt_snprintf(struct hist_entry
*he
, char *s
,
849 size_t size
, struct hists
*pair_hists
,
850 bool show_displacement
, long displacement
,
851 bool color
, u64 total_period
)
853 u64 period
, total
, period_sys
, period_us
, period_guest_sys
, period_guest_us
;
855 const char *sep
= symbol_conf
.field_sep
;
858 if (symbol_conf
.exclude_other
&& !he
->parent
)
862 period
= he
->pair
? he
->pair
->period
: 0;
863 nr_events
= he
->pair
? he
->pair
->nr_events
: 0;
864 total
= pair_hists
->stats
.total_period
;
865 period_sys
= he
->pair
? he
->pair
->period_sys
: 0;
866 period_us
= he
->pair
? he
->pair
->period_us
: 0;
867 period_guest_sys
= he
->pair
? he
->pair
->period_guest_sys
: 0;
868 period_guest_us
= he
->pair
? he
->pair
->period_guest_us
: 0;
871 nr_events
= he
->nr_events
;
872 total
= total_period
;
873 period_sys
= he
->period_sys
;
874 period_us
= he
->period_us
;
875 period_guest_sys
= he
->period_guest_sys
;
876 period_guest_us
= he
->period_guest_us
;
881 ret
= percent_color_snprintf(s
, size
,
882 sep
? "%.2f" : " %6.2f%%",
883 (period
* 100.0) / total
);
885 ret
= scnprintf(s
, size
, sep
? "%.2f" : " %6.2f%%",
886 (period
* 100.0) / total
);
887 if (symbol_conf
.show_cpu_utilization
) {
888 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
889 sep
? "%.2f" : " %6.2f%%",
890 (period_sys
* 100.0) / total
);
891 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
892 sep
? "%.2f" : " %6.2f%%",
893 (period_us
* 100.0) / total
);
895 ret
+= percent_color_snprintf(s
+ ret
,
897 sep
? "%.2f" : " %6.2f%%",
898 (period_guest_sys
* 100.0) /
900 ret
+= percent_color_snprintf(s
+ ret
,
902 sep
? "%.2f" : " %6.2f%%",
903 (period_guest_us
* 100.0) /
908 ret
= scnprintf(s
, size
, sep
? "%" PRIu64
: "%12" PRIu64
" ", period
);
910 if (symbol_conf
.show_nr_samples
) {
912 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, nr_events
);
914 ret
+= scnprintf(s
+ ret
, size
- ret
, "%11" PRIu64
, nr_events
);
917 if (symbol_conf
.show_total_period
) {
919 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, period
);
921 ret
+= scnprintf(s
+ ret
, size
- ret
, " %12" PRIu64
, period
);
926 double old_percent
= 0, new_percent
= 0, diff
;
929 old_percent
= (period
* 100.0) / total
;
930 if (total_period
> 0)
931 new_percent
= (he
->period
* 100.0) / total_period
;
933 diff
= new_percent
- old_percent
;
935 if (fabs(diff
) >= 0.01)
936 scnprintf(bf
, sizeof(bf
), "%+4.2F%%", diff
);
938 scnprintf(bf
, sizeof(bf
), " ");
941 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
943 ret
+= scnprintf(s
+ ret
, size
- ret
, "%11.11s", bf
);
945 if (show_displacement
) {
947 scnprintf(bf
, sizeof(bf
), "%+4ld", displacement
);
949 scnprintf(bf
, sizeof(bf
), " ");
952 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
954 ret
+= scnprintf(s
+ ret
, size
- ret
, "%6.6s", bf
);
961 int hist_entry__snprintf(struct hist_entry
*he
, char *s
, size_t size
,
964 const char *sep
= symbol_conf
.field_sep
;
965 struct sort_entry
*se
;
968 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
972 ret
+= scnprintf(s
+ ret
, size
- ret
, "%s", sep
?: " ");
973 ret
+= se
->se_snprintf(he
, s
+ ret
, size
- ret
,
974 hists__col_len(hists
, se
->se_width_idx
));
980 static int hist_entry__fprintf(struct hist_entry
*he
, size_t size
,
981 struct hists
*hists
, struct hists
*pair_hists
,
982 bool show_displacement
, long displacement
,
983 u64 total_period
, FILE *fp
)
988 if (size
== 0 || size
> sizeof(bf
))
991 ret
= hist_entry__pcnt_snprintf(he
, bf
, size
, pair_hists
,
992 show_displacement
, displacement
,
994 hist_entry__snprintf(he
, bf
+ ret
, size
- ret
, hists
);
995 return fprintf(fp
, "%s\n", bf
);
998 static size_t hist_entry__fprintf_callchain(struct hist_entry
*he
,
1000 u64 total_period
, FILE *fp
)
1002 int left_margin
= 0;
1004 if (sort__first_dimension
== SORT_COMM
) {
1005 struct sort_entry
*se
= list_first_entry(&hist_entry__sort_list
,
1007 left_margin
= hists__col_len(hists
, se
->se_width_idx
);
1008 left_margin
-= thread__comm_len(he
->thread
);
1011 return hist_entry_callchain__fprintf(he
, total_period
, left_margin
, fp
);
1014 size_t hists__fprintf(struct hists
*hists
, struct hists
*pair
,
1015 bool show_displacement
, bool show_header
, int max_rows
,
1016 int max_cols
, FILE *fp
)
1018 struct sort_entry
*se
;
1022 unsigned long position
= 1;
1023 long displacement
= 0;
1025 const char *sep
= symbol_conf
.field_sep
;
1026 const char *col_width
= symbol_conf
.col_width_list_str
;
1034 fprintf(fp
, "# %s", pair
? "Baseline" : "Overhead");
1036 if (symbol_conf
.show_cpu_utilization
) {
1038 ret
+= fprintf(fp
, "%csys", *sep
);
1039 ret
+= fprintf(fp
, "%cus", *sep
);
1041 ret
+= fprintf(fp
, "%cguest sys", *sep
);
1042 ret
+= fprintf(fp
, "%cguest us", *sep
);
1045 ret
+= fprintf(fp
, " sys ");
1046 ret
+= fprintf(fp
, " us ");
1048 ret
+= fprintf(fp
, " guest sys ");
1049 ret
+= fprintf(fp
, " guest us ");
1054 if (symbol_conf
.show_nr_samples
) {
1056 fprintf(fp
, "%cSamples", *sep
);
1058 fputs(" Samples ", fp
);
1061 if (symbol_conf
.show_total_period
) {
1063 ret
+= fprintf(fp
, "%cPeriod", *sep
);
1065 ret
+= fprintf(fp
, " Period ");
1070 ret
+= fprintf(fp
, "%cDelta", *sep
);
1072 ret
+= fprintf(fp
, " Delta ");
1074 if (show_displacement
) {
1076 ret
+= fprintf(fp
, "%cDisplacement", *sep
);
1078 ret
+= fprintf(fp
, " Displ");
1082 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1086 fprintf(fp
, "%c%s", *sep
, se
->se_header
);
1089 width
= strlen(se
->se_header
);
1090 if (symbol_conf
.col_width_list_str
) {
1092 hists__set_col_len(hists
, se
->se_width_idx
,
1094 col_width
= strchr(col_width
, ',');
1099 if (!hists__new_col_len(hists
, se
->se_width_idx
, width
))
1100 width
= hists__col_len(hists
, se
->se_width_idx
);
1101 fprintf(fp
, " %*s", width
, se
->se_header
);
1105 if (max_rows
&& ++nr_rows
>= max_rows
)
1111 fprintf(fp
, "# ........");
1112 if (symbol_conf
.show_cpu_utilization
)
1113 fprintf(fp
, " ....... .......");
1114 if (symbol_conf
.show_nr_samples
)
1115 fprintf(fp
, " ..........");
1116 if (symbol_conf
.show_total_period
)
1117 fprintf(fp
, " ............");
1119 fprintf(fp
, " ..........");
1120 if (show_displacement
)
1121 fprintf(fp
, " .....");
1123 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1130 width
= hists__col_len(hists
, se
->se_width_idx
);
1132 width
= strlen(se
->se_header
);
1133 for (i
= 0; i
< width
; i
++)
1138 if (max_rows
&& ++nr_rows
>= max_rows
)
1142 if (max_rows
&& ++nr_rows
>= max_rows
)
1146 total_period
= hists
->stats
.total_period
;
1148 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1149 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1154 if (show_displacement
) {
1155 if (h
->pair
!= NULL
)
1156 displacement
= ((long)h
->pair
->position
-
1162 ret
+= hist_entry__fprintf(h
, max_cols
, hists
, pair
, show_displacement
,
1163 displacement
, total_period
, fp
);
1165 if (symbol_conf
.use_callchain
)
1166 ret
+= hist_entry__fprintf_callchain(h
, hists
, total_period
, fp
);
1167 if (max_rows
&& ++nr_rows
>= max_rows
)
1170 if (h
->ms
.map
== NULL
&& verbose
> 1) {
1171 __map_groups__fprintf_maps(&h
->thread
->mg
,
1172 MAP__FUNCTION
, verbose
, fp
);
1173 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
1177 free(rem_sq_bracket
);
1183 * See hists__fprintf to match the column widths
1185 unsigned int hists__sort_list_width(struct hists
*hists
)
1187 struct sort_entry
*se
;
1188 int ret
= 9; /* total % */
1190 if (symbol_conf
.show_cpu_utilization
) {
1191 ret
+= 7; /* count_sys % */
1192 ret
+= 6; /* count_us % */
1194 ret
+= 13; /* count_guest_sys % */
1195 ret
+= 12; /* count_guest_us % */
1199 if (symbol_conf
.show_nr_samples
)
1202 if (symbol_conf
.show_total_period
)
1205 list_for_each_entry(se
, &hist_entry__sort_list
, list
)
1207 ret
+= 2 + hists__col_len(hists
, se
->se_width_idx
);
1209 if (verbose
) /* Addr + origin */
1210 ret
+= 3 + BITS_PER_LONG
/ 4;
1215 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1216 enum hist_filter filter
)
1218 h
->filtered
&= ~(1 << filter
);
1222 ++hists
->nr_entries
;
1224 hists
->nr_entries
+= h
->nr_rows
;
1226 hists
->stats
.total_period
+= h
->period
;
1227 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->nr_events
;
1229 hists__calc_col_len(hists
, h
);
1233 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1234 struct hist_entry
*he
)
1236 if (hists
->dso_filter
!= NULL
&&
1237 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1238 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1245 void hists__filter_by_dso(struct hists
*hists
)
1249 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1250 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1251 hists__reset_col_len(hists
);
1253 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1254 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1256 if (symbol_conf
.exclude_other
&& !h
->parent
)
1259 if (hists__filter_entry_by_dso(hists
, h
))
1262 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1266 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1267 struct hist_entry
*he
)
1269 if (hists
->thread_filter
!= NULL
&&
1270 he
->thread
!= hists
->thread_filter
) {
1271 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1278 void hists__filter_by_thread(struct hists
*hists
)
1282 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1283 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1284 hists__reset_col_len(hists
);
1286 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1287 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1289 if (hists__filter_entry_by_thread(hists
, h
))
1292 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1296 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1297 struct hist_entry
*he
)
1299 if (hists
->symbol_filter_str
!= NULL
&&
1300 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1301 hists
->symbol_filter_str
) == NULL
)) {
1302 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1309 void hists__filter_by_symbol(struct hists
*hists
)
1313 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1314 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1315 hists__reset_col_len(hists
);
1317 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1318 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1320 if (hists__filter_entry_by_symbol(hists
, h
))
1323 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
1327 int hist_entry__inc_addr_samples(struct hist_entry
*he
, int evidx
, u64 ip
)
1329 return symbol__inc_addr_samples(he
->ms
.sym
, he
->ms
.map
, evidx
, ip
);
1332 int hist_entry__annotate(struct hist_entry
*he
, size_t privsize
)
1334 return symbol__annotate(he
->ms
.sym
, he
->ms
.map
, privsize
);
1337 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1339 ++hists
->stats
.nr_events
[0];
1340 ++hists
->stats
.nr_events
[type
];
1343 size_t hists__fprintf_nr_events(struct hists
*hists
, FILE *fp
)
1348 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
1351 if (hists
->stats
.nr_events
[i
] == 0)
1354 name
= perf_event__name(i
);
1355 if (!strcmp(name
, "UNKNOWN"))
1358 ret
+= fprintf(fp
, "%16s events: %10d\n", name
,
1359 hists
->stats
.nr_events
[i
]);