9 static bool hists__filter_entry_by_dso(struct hists
*hists
,
10 struct hist_entry
*he
);
11 static bool hists__filter_entry_by_thread(struct hists
*hists
,
12 struct hist_entry
*he
);
20 struct callchain_param callchain_param
= {
21 .mode
= CHAIN_GRAPH_REL
,
26 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
28 return hists
->col_len
[col
];
31 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
33 hists
->col_len
[col
] = len
;
36 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
38 if (len
> hists__col_len(hists
, col
)) {
39 hists__set_col_len(hists
, col
, len
);
45 static void hists__reset_col_len(struct hists
*hists
)
49 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
50 hists__set_col_len(hists
, col
, 0);
53 static void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
58 hists__new_col_len(hists
, HISTC_SYMBOL
, h
->ms
.sym
->namelen
);
60 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
62 if (hists__col_len(hists
, HISTC_DSO
) < unresolved_col_width
&&
63 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
64 !symbol_conf
.dso_list
)
65 hists__set_col_len(hists
, HISTC_DSO
,
66 unresolved_col_width
);
69 len
= thread__comm_len(h
->thread
);
70 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
71 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
74 len
= dso__name_len(h
->ms
.map
->dso
);
75 hists__new_col_len(hists
, HISTC_DSO
, len
);
79 static void hist_entry__add_cpumode_period(struct hist_entry
*self
,
80 unsigned int cpumode
, u64 period
)
83 case PERF_RECORD_MISC_KERNEL
:
84 self
->period_sys
+= period
;
86 case PERF_RECORD_MISC_USER
:
87 self
->period_us
+= period
;
89 case PERF_RECORD_MISC_GUEST_KERNEL
:
90 self
->period_guest_sys
+= period
;
92 case PERF_RECORD_MISC_GUEST_USER
:
93 self
->period_guest_us
+= period
;
100 static void hist_entry__decay(struct hist_entry
*he
)
102 he
->period
= (he
->period
* 7) / 8;
103 he
->nr_events
= (he
->nr_events
* 7) / 8;
106 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
108 u64 prev_period
= he
->period
;
110 if (prev_period
== 0)
113 hist_entry__decay(he
);
116 hists
->stats
.total_period
-= prev_period
- he
->period
;
118 return he
->period
== 0;
121 static void __hists__decay_entries(struct hists
*hists
, bool zap_user
,
122 bool zap_kernel
, bool threaded
)
124 struct rb_node
*next
= rb_first(&hists
->entries
);
125 struct hist_entry
*n
;
128 n
= rb_entry(next
, struct hist_entry
, rb_node
);
129 next
= rb_next(&n
->rb_node
);
131 * We may be annotating this, for instance, so keep it here in
132 * case some it gets new samples, we'll eventually free it when
133 * the user stops browsing and it agains gets fully decayed.
135 if (((zap_user
&& n
->level
== '.') ||
136 (zap_kernel
&& n
->level
!= '.') ||
137 hists__decay_entry(hists
, n
)) &&
139 rb_erase(&n
->rb_node
, &hists
->entries
);
141 if (sort__need_collapse
|| threaded
)
142 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
150 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
152 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, false);
155 void hists__decay_entries_threaded(struct hists
*hists
,
156 bool zap_user
, bool zap_kernel
)
158 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, true);
162 * histogram, sorted on item, collects periods
165 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
167 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
168 struct hist_entry
*self
= malloc(sizeof(*self
) + callchain_size
);
174 self
->ms
.map
->referenced
= true;
175 if (symbol_conf
.use_callchain
)
176 callchain_init(self
->callchain
);
182 static void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
185 hists__calc_col_len(hists
, h
);
187 hists
->stats
.total_period
+= h
->period
;
191 static u8
symbol__parent_filter(const struct symbol
*parent
)
193 if (symbol_conf
.exclude_other
&& parent
== NULL
)
194 return 1 << HIST_FILTER__PARENT
;
198 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
199 struct addr_location
*al
,
200 struct symbol
*sym_parent
, u64 period
)
203 struct rb_node
*parent
= NULL
;
204 struct hist_entry
*he
;
205 struct hist_entry entry
= {
206 .thread
= al
->thread
,
215 .parent
= sym_parent
,
216 .filtered
= symbol__parent_filter(sym_parent
),
220 pthread_mutex_lock(&hists
->lock
);
222 p
= &hists
->entries_in
->rb_node
;
226 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
228 cmp
= hist_entry__cmp(&entry
, he
);
231 he
->period
+= period
;
242 he
= hist_entry__new(&entry
);
246 rb_link_node(&he
->rb_node_in
, parent
, p
);
247 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
249 hist_entry__add_cpumode_period(he
, al
->cpumode
, period
);
251 pthread_mutex_unlock(&hists
->lock
);
256 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
258 struct sort_entry
*se
;
261 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
262 cmp
= se
->se_cmp(left
, right
);
271 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
273 struct sort_entry
*se
;
276 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
277 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
279 f
= se
->se_collapse
?: se
->se_cmp
;
281 cmp
= f(left
, right
);
289 void hist_entry__free(struct hist_entry
*he
)
295 * collapse the histogram
298 static bool hists__collapse_insert_entry(struct hists
*hists
,
299 struct rb_root
*root
,
300 struct hist_entry
*he
)
302 struct rb_node
**p
= &root
->rb_node
;
303 struct rb_node
*parent
= NULL
;
304 struct hist_entry
*iter
;
309 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
311 cmp
= hist_entry__collapse(iter
, he
);
314 iter
->period
+= he
->period
;
315 iter
->nr_events
+= he
->nr_events
;
316 if (symbol_conf
.use_callchain
) {
317 callchain_cursor_reset(&hists
->callchain_cursor
);
318 callchain_merge(&hists
->callchain_cursor
, iter
->callchain
,
321 hist_entry__free(he
);
331 rb_link_node(&he
->rb_node_in
, parent
, p
);
332 rb_insert_color(&he
->rb_node_in
, root
);
336 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
338 struct rb_root
*root
;
340 pthread_mutex_lock(&hists
->lock
);
342 root
= hists
->entries_in
;
343 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
344 hists
->entries_in
= &hists
->entries_in_array
[0];
346 pthread_mutex_unlock(&hists
->lock
);
351 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
353 hists__filter_entry_by_dso(hists
, he
);
354 hists__filter_entry_by_thread(hists
, he
);
357 static void __hists__collapse_resort(struct hists
*hists
, bool threaded
)
359 struct rb_root
*root
;
360 struct rb_node
*next
;
361 struct hist_entry
*n
;
363 if (!sort__need_collapse
&& !threaded
)
366 root
= hists__get_rotate_entries_in(hists
);
367 next
= rb_first(root
);
370 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
371 next
= rb_next(&n
->rb_node_in
);
373 rb_erase(&n
->rb_node_in
, root
);
374 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
376 * If it wasn't combined with one of the entries already
377 * collapsed, we need to apply the filters that may have
378 * been set by, say, the hist_browser.
380 hists__apply_filters(hists
, n
);
385 void hists__collapse_resort(struct hists
*hists
)
387 return __hists__collapse_resort(hists
, false);
390 void hists__collapse_resort_threaded(struct hists
*hists
)
392 return __hists__collapse_resort(hists
, true);
396 * reverse the map, sort on period.
399 static void __hists__insert_output_entry(struct rb_root
*entries
,
400 struct hist_entry
*he
,
401 u64 min_callchain_hits
)
403 struct rb_node
**p
= &entries
->rb_node
;
404 struct rb_node
*parent
= NULL
;
405 struct hist_entry
*iter
;
407 if (symbol_conf
.use_callchain
)
408 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
409 min_callchain_hits
, &callchain_param
);
413 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
415 if (he
->period
> iter
->period
)
421 rb_link_node(&he
->rb_node
, parent
, p
);
422 rb_insert_color(&he
->rb_node
, entries
);
425 static void __hists__output_resort(struct hists
*hists
, bool threaded
)
427 struct rb_root
*root
;
428 struct rb_node
*next
;
429 struct hist_entry
*n
;
430 u64 min_callchain_hits
;
432 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
434 if (sort__need_collapse
|| threaded
)
435 root
= &hists
->entries_collapsed
;
437 root
= hists
->entries_in
;
439 next
= rb_first(root
);
440 hists
->entries
= RB_ROOT
;
442 hists
->nr_entries
= 0;
443 hists
->stats
.total_period
= 0;
444 hists__reset_col_len(hists
);
447 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
448 next
= rb_next(&n
->rb_node_in
);
450 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
451 hists__inc_nr_entries(hists
, n
);
455 void hists__output_resort(struct hists
*hists
)
457 return __hists__output_resort(hists
, false);
460 void hists__output_resort_threaded(struct hists
*hists
)
462 return __hists__output_resort(hists
, true);
465 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
468 int ret
= fprintf(fp
, " ");
470 for (i
= 0; i
< left_margin
; i
++)
471 ret
+= fprintf(fp
, " ");
476 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
480 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
482 for (i
= 0; i
< depth
; i
++)
483 if (depth_mask
& (1 << i
))
484 ret
+= fprintf(fp
, "| ");
486 ret
+= fprintf(fp
, " ");
488 ret
+= fprintf(fp
, "\n");
493 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
,
494 int depth
, int depth_mask
, int period
,
495 u64 total_samples
, u64 hits
,
501 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
502 for (i
= 0; i
< depth
; i
++) {
503 if (depth_mask
& (1 << i
))
504 ret
+= fprintf(fp
, "|");
506 ret
+= fprintf(fp
, " ");
507 if (!period
&& i
== depth
- 1) {
510 percent
= hits
* 100.0 / total_samples
;
511 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
513 ret
+= fprintf(fp
, "%s", " ");
516 ret
+= fprintf(fp
, "%s\n", chain
->ms
.sym
->name
);
518 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
523 static struct symbol
*rem_sq_bracket
;
524 static struct callchain_list rem_hits
;
526 static void init_rem_hits(void)
528 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
529 if (!rem_sq_bracket
) {
530 fprintf(stderr
, "Not enough memory to display remaining hits\n");
534 strcpy(rem_sq_bracket
->name
, "[...]");
535 rem_hits
.ms
.sym
= rem_sq_bracket
;
538 static size_t __callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
539 u64 total_samples
, int depth
,
540 int depth_mask
, int left_margin
)
542 struct rb_node
*node
, *next
;
543 struct callchain_node
*child
;
544 struct callchain_list
*chain
;
545 int new_depth_mask
= depth_mask
;
550 uint entries_printed
= 0;
552 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
553 new_total
= self
->children_hit
;
555 new_total
= total_samples
;
557 remaining
= new_total
;
559 node
= rb_first(&self
->rb_root
);
563 child
= rb_entry(node
, struct callchain_node
, rb_node
);
564 cumul
= callchain_cumul_hits(child
);
568 * The depth mask manages the output of pipes that show
569 * the depth. We don't want to keep the pipes of the current
570 * level for the last child of this depth.
571 * Except if we have remaining filtered hits. They will
572 * supersede the last child
574 next
= rb_next(node
);
575 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
576 new_depth_mask
&= ~(1 << (depth
- 1));
579 * But we keep the older depth mask for the line separator
580 * to keep the level link until we reach the last child
582 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
585 list_for_each_entry(chain
, &child
->val
, list
) {
586 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
592 ret
+= __callchain__fprintf_graph(fp
, child
, new_total
,
594 new_depth_mask
| (1 << depth
),
597 if (++entries_printed
== callchain_param
.print_limit
)
601 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
602 remaining
&& remaining
!= new_total
) {
607 new_depth_mask
&= ~(1 << (depth
- 1));
609 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
610 new_depth_mask
, 0, new_total
,
611 remaining
, left_margin
);
617 static size_t callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
618 u64 total_samples
, int left_margin
)
620 struct callchain_list
*chain
;
621 bool printed
= false;
624 u32 entries_printed
= 0;
626 list_for_each_entry(chain
, &self
->val
, list
) {
627 if (!i
++ && sort__first_dimension
== SORT_SYM
)
631 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
632 ret
+= fprintf(fp
, "|\n");
633 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
634 ret
+= fprintf(fp
, "---");
639 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
642 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
644 ret
+= fprintf(fp
, " %p\n", (void *)(long)chain
->ip
);
646 if (++entries_printed
== callchain_param
.print_limit
)
650 ret
+= __callchain__fprintf_graph(fp
, self
, total_samples
, 1, 1, left_margin
);
655 static size_t callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
658 struct callchain_list
*chain
;
664 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
667 list_for_each_entry(chain
, &self
->val
, list
) {
668 if (chain
->ip
>= PERF_CONTEXT_MAX
)
671 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
673 ret
+= fprintf(fp
, " %p\n",
674 (void *)(long)chain
->ip
);
680 static size_t hist_entry_callchain__fprintf(FILE *fp
, struct hist_entry
*self
,
681 u64 total_samples
, int left_margin
)
683 struct rb_node
*rb_node
;
684 struct callchain_node
*chain
;
686 u32 entries_printed
= 0;
688 rb_node
= rb_first(&self
->sorted_chain
);
692 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
693 percent
= chain
->hit
* 100.0 / total_samples
;
694 switch (callchain_param
.mode
) {
696 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
698 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
700 case CHAIN_GRAPH_ABS
: /* Falldown */
701 case CHAIN_GRAPH_REL
:
702 ret
+= callchain__fprintf_graph(fp
, chain
, total_samples
,
708 ret
+= fprintf(fp
, "\n");
709 if (++entries_printed
== callchain_param
.print_limit
)
711 rb_node
= rb_next(rb_node
);
717 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
719 struct rb_node
*next
= rb_first(&hists
->entries
);
720 struct hist_entry
*n
;
723 hists__reset_col_len(hists
);
725 while (next
&& row
++ < max_rows
) {
726 n
= rb_entry(next
, struct hist_entry
, rb_node
);
728 hists__calc_col_len(hists
, n
);
729 next
= rb_next(&n
->rb_node
);
733 static int hist_entry__pcnt_snprintf(struct hist_entry
*self
, char *s
,
734 size_t size
, struct hists
*pair_hists
,
735 bool show_displacement
, long displacement
,
736 bool color
, u64 session_total
)
738 u64 period
, total
, period_sys
, period_us
, period_guest_sys
, period_guest_us
;
740 const char *sep
= symbol_conf
.field_sep
;
743 if (symbol_conf
.exclude_other
&& !self
->parent
)
747 period
= self
->pair
? self
->pair
->period
: 0;
748 nr_events
= self
->pair
? self
->pair
->nr_events
: 0;
749 total
= pair_hists
->stats
.total_period
;
750 period_sys
= self
->pair
? self
->pair
->period_sys
: 0;
751 period_us
= self
->pair
? self
->pair
->period_us
: 0;
752 period_guest_sys
= self
->pair
? self
->pair
->period_guest_sys
: 0;
753 period_guest_us
= self
->pair
? self
->pair
->period_guest_us
: 0;
755 period
= self
->period
;
756 nr_events
= self
->nr_events
;
757 total
= session_total
;
758 period_sys
= self
->period_sys
;
759 period_us
= self
->period_us
;
760 period_guest_sys
= self
->period_guest_sys
;
761 period_guest_us
= self
->period_guest_us
;
766 ret
= percent_color_snprintf(s
, size
,
767 sep
? "%.2f" : " %6.2f%%",
768 (period
* 100.0) / total
);
770 ret
= snprintf(s
, size
, sep
? "%.2f" : " %6.2f%%",
771 (period
* 100.0) / total
);
772 if (symbol_conf
.show_cpu_utilization
) {
773 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
774 sep
? "%.2f" : " %6.2f%%",
775 (period_sys
* 100.0) / total
);
776 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
777 sep
? "%.2f" : " %6.2f%%",
778 (period_us
* 100.0) / total
);
780 ret
+= percent_color_snprintf(s
+ ret
,
782 sep
? "%.2f" : " %6.2f%%",
783 (period_guest_sys
* 100.0) /
785 ret
+= percent_color_snprintf(s
+ ret
,
787 sep
? "%.2f" : " %6.2f%%",
788 (period_guest_us
* 100.0) /
793 ret
= snprintf(s
, size
, sep
? "%" PRIu64
: "%12" PRIu64
" ", period
);
795 if (symbol_conf
.show_nr_samples
) {
797 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, nr_events
);
799 ret
+= snprintf(s
+ ret
, size
- ret
, "%11" PRIu64
, nr_events
);
802 if (symbol_conf
.show_total_period
) {
804 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, period
);
806 ret
+= snprintf(s
+ ret
, size
- ret
, " %12" PRIu64
, period
);
811 double old_percent
= 0, new_percent
= 0, diff
;
814 old_percent
= (period
* 100.0) / total
;
815 if (session_total
> 0)
816 new_percent
= (self
->period
* 100.0) / session_total
;
818 diff
= new_percent
- old_percent
;
820 if (fabs(diff
) >= 0.01)
821 snprintf(bf
, sizeof(bf
), "%+4.2F%%", diff
);
823 snprintf(bf
, sizeof(bf
), " ");
826 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
828 ret
+= snprintf(s
+ ret
, size
- ret
, "%11.11s", bf
);
830 if (show_displacement
) {
832 snprintf(bf
, sizeof(bf
), "%+4ld", displacement
);
834 snprintf(bf
, sizeof(bf
), " ");
837 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
839 ret
+= snprintf(s
+ ret
, size
- ret
, "%6.6s", bf
);
846 int hist_entry__snprintf(struct hist_entry
*he
, char *s
, size_t size
,
849 const char *sep
= symbol_conf
.field_sep
;
850 struct sort_entry
*se
;
853 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
857 ret
+= snprintf(s
+ ret
, size
- ret
, "%s", sep
?: " ");
858 ret
+= se
->se_snprintf(he
, s
+ ret
, size
- ret
,
859 hists__col_len(hists
, se
->se_width_idx
));
865 int hist_entry__fprintf(struct hist_entry
*he
, size_t size
, struct hists
*hists
,
866 struct hists
*pair_hists
, bool show_displacement
,
867 long displacement
, FILE *fp
, u64 session_total
)
872 if (size
== 0 || size
> sizeof(bf
))
875 ret
= hist_entry__pcnt_snprintf(he
, bf
, size
, pair_hists
,
876 show_displacement
, displacement
,
877 true, session_total
);
878 hist_entry__snprintf(he
, bf
+ ret
, size
- ret
, hists
);
879 return fprintf(fp
, "%s\n", bf
);
882 static size_t hist_entry__fprintf_callchain(struct hist_entry
*self
,
883 struct hists
*hists
, FILE *fp
,
888 if (sort__first_dimension
== SORT_COMM
) {
889 struct sort_entry
*se
= list_first_entry(&hist_entry__sort_list
,
891 left_margin
= hists__col_len(hists
, se
->se_width_idx
);
892 left_margin
-= thread__comm_len(self
->thread
);
895 return hist_entry_callchain__fprintf(fp
, self
, session_total
,
899 size_t hists__fprintf(struct hists
*hists
, struct hists
*pair
,
900 bool show_displacement
, bool show_header
, int max_rows
,
901 int max_cols
, FILE *fp
)
903 struct sort_entry
*se
;
906 unsigned long position
= 1;
907 long displacement
= 0;
909 const char *sep
= symbol_conf
.field_sep
;
910 const char *col_width
= symbol_conf
.col_width_list_str
;
918 fprintf(fp
, "# %s", pair
? "Baseline" : "Overhead");
920 if (symbol_conf
.show_nr_samples
) {
922 fprintf(fp
, "%cSamples", *sep
);
924 fputs(" Samples ", fp
);
927 if (symbol_conf
.show_total_period
) {
929 ret
+= fprintf(fp
, "%cPeriod", *sep
);
931 ret
+= fprintf(fp
, " Period ");
934 if (symbol_conf
.show_cpu_utilization
) {
936 ret
+= fprintf(fp
, "%csys", *sep
);
937 ret
+= fprintf(fp
, "%cus", *sep
);
939 ret
+= fprintf(fp
, "%cguest sys", *sep
);
940 ret
+= fprintf(fp
, "%cguest us", *sep
);
943 ret
+= fprintf(fp
, " sys ");
944 ret
+= fprintf(fp
, " us ");
946 ret
+= fprintf(fp
, " guest sys ");
947 ret
+= fprintf(fp
, " guest us ");
954 ret
+= fprintf(fp
, "%cDelta", *sep
);
956 ret
+= fprintf(fp
, " Delta ");
958 if (show_displacement
) {
960 ret
+= fprintf(fp
, "%cDisplacement", *sep
);
962 ret
+= fprintf(fp
, " Displ");
966 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
970 fprintf(fp
, "%c%s", *sep
, se
->se_header
);
973 width
= strlen(se
->se_header
);
974 if (symbol_conf
.col_width_list_str
) {
976 hists__set_col_len(hists
, se
->se_width_idx
,
978 col_width
= strchr(col_width
, ',');
983 if (!hists__new_col_len(hists
, se
->se_width_idx
, width
))
984 width
= hists__col_len(hists
, se
->se_width_idx
);
985 fprintf(fp
, " %*s", width
, se
->se_header
);
989 if (max_rows
&& ++nr_rows
>= max_rows
)
995 fprintf(fp
, "# ........");
996 if (symbol_conf
.show_nr_samples
)
997 fprintf(fp
, " ..........");
998 if (symbol_conf
.show_total_period
)
999 fprintf(fp
, " ............");
1001 fprintf(fp
, " ..........");
1002 if (show_displacement
)
1003 fprintf(fp
, " .....");
1005 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1012 width
= hists__col_len(hists
, se
->se_width_idx
);
1014 width
= strlen(se
->se_header
);
1015 for (i
= 0; i
< width
; i
++)
1020 if (max_rows
&& ++nr_rows
>= max_rows
)
1024 if (max_rows
&& ++nr_rows
>= max_rows
)
1028 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1029 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1034 if (show_displacement
) {
1035 if (h
->pair
!= NULL
)
1036 displacement
= ((long)h
->pair
->position
-
1042 ret
+= hist_entry__fprintf(h
, max_cols
, hists
, pair
, show_displacement
,
1043 displacement
, fp
, hists
->stats
.total_period
);
1045 if (symbol_conf
.use_callchain
)
1046 ret
+= hist_entry__fprintf_callchain(h
, hists
, fp
,
1047 hists
->stats
.total_period
);
1048 if (max_rows
&& ++nr_rows
>= max_rows
)
1051 if (h
->ms
.map
== NULL
&& verbose
> 1) {
1052 __map_groups__fprintf_maps(&h
->thread
->mg
,
1053 MAP__FUNCTION
, verbose
, fp
);
1054 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
1058 free(rem_sq_bracket
);
1064 * See hists__fprintf to match the column widths
1066 unsigned int hists__sort_list_width(struct hists
*hists
)
1068 struct sort_entry
*se
;
1069 int ret
= 9; /* total % */
1071 if (symbol_conf
.show_cpu_utilization
) {
1072 ret
+= 7; /* count_sys % */
1073 ret
+= 6; /* count_us % */
1075 ret
+= 13; /* count_guest_sys % */
1076 ret
+= 12; /* count_guest_us % */
1080 if (symbol_conf
.show_nr_samples
)
1083 if (symbol_conf
.show_total_period
)
1086 list_for_each_entry(se
, &hist_entry__sort_list
, list
)
1088 ret
+= 2 + hists__col_len(hists
, se
->se_width_idx
);
1090 if (verbose
) /* Addr + origin */
1091 ret
+= 3 + BITS_PER_LONG
/ 4;
1096 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1097 enum hist_filter filter
)
1099 h
->filtered
&= ~(1 << filter
);
1103 ++hists
->nr_entries
;
1105 hists
->nr_entries
+= h
->nr_rows
;
1107 hists
->stats
.total_period
+= h
->period
;
1108 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->nr_events
;
1110 hists__calc_col_len(hists
, h
);
1114 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1115 struct hist_entry
*he
)
1117 if (hists
->dso_filter
!= NULL
&&
1118 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1119 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1126 void hists__filter_by_dso(struct hists
*hists
)
1130 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1131 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1132 hists__reset_col_len(hists
);
1134 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1135 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1137 if (symbol_conf
.exclude_other
&& !h
->parent
)
1140 if (hists__filter_entry_by_dso(hists
, h
))
1143 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1147 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1148 struct hist_entry
*he
)
1150 if (hists
->thread_filter
!= NULL
&&
1151 he
->thread
!= hists
->thread_filter
) {
1152 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1159 void hists__filter_by_thread(struct hists
*hists
)
1163 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1164 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1165 hists__reset_col_len(hists
);
1167 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1168 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1170 if (hists__filter_entry_by_thread(hists
, h
))
1173 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1177 int hist_entry__inc_addr_samples(struct hist_entry
*he
, int evidx
, u64 ip
)
1179 return symbol__inc_addr_samples(he
->ms
.sym
, he
->ms
.map
, evidx
, ip
);
1182 int hist_entry__annotate(struct hist_entry
*he
, size_t privsize
)
1184 return symbol__annotate(he
->ms
.sym
, he
->ms
.map
, privsize
);
1187 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1189 ++hists
->stats
.nr_events
[0];
1190 ++hists
->stats
.nr_events
[type
];
1193 size_t hists__fprintf_nr_events(struct hists
*hists
, FILE *fp
)
1198 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
1201 if (hists
->stats
.nr_events
[i
] == 0)
1204 name
= perf_event__name(i
);
1205 if (!strcmp(name
, "UNKNOWN"))
1208 ret
+= fprintf(fp
, "%16s events: %10d\n", name
,
1209 hists
->stats
.nr_events
[i
]);