9 static bool hists__filter_entry_by_dso(struct hists
*hists
,
10 struct hist_entry
*he
);
11 static bool hists__filter_entry_by_thread(struct hists
*hists
,
12 struct hist_entry
*he
);
20 struct callchain_param callchain_param
= {
21 .mode
= CHAIN_GRAPH_REL
,
26 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
28 return hists
->col_len
[col
];
31 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
33 hists
->col_len
[col
] = len
;
36 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
38 if (len
> hists__col_len(hists
, col
)) {
39 hists__set_col_len(hists
, col
, len
);
45 static void hists__reset_col_len(struct hists
*hists
)
49 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
50 hists__set_col_len(hists
, col
, 0);
53 static void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
58 hists__new_col_len(hists
, HISTC_SYMBOL
, h
->ms
.sym
->namelen
);
60 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
62 if (hists__col_len(hists
, HISTC_DSO
) < unresolved_col_width
&&
63 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
64 !symbol_conf
.dso_list
)
65 hists__set_col_len(hists
, HISTC_DSO
,
66 unresolved_col_width
);
69 len
= thread__comm_len(h
->thread
);
70 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
71 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
74 len
= dso__name_len(h
->ms
.map
->dso
);
75 hists__new_col_len(hists
, HISTC_DSO
, len
);
79 static void hist_entry__add_cpumode_period(struct hist_entry
*he
,
80 unsigned int cpumode
, u64 period
)
83 case PERF_RECORD_MISC_KERNEL
:
84 he
->period_sys
+= period
;
86 case PERF_RECORD_MISC_USER
:
87 he
->period_us
+= period
;
89 case PERF_RECORD_MISC_GUEST_KERNEL
:
90 he
->period_guest_sys
+= period
;
92 case PERF_RECORD_MISC_GUEST_USER
:
93 he
->period_guest_us
+= period
;
100 static void hist_entry__decay(struct hist_entry
*he
)
102 he
->period
= (he
->period
* 7) / 8;
103 he
->nr_events
= (he
->nr_events
* 7) / 8;
106 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
108 u64 prev_period
= he
->period
;
110 if (prev_period
== 0)
113 hist_entry__decay(he
);
116 hists
->stats
.total_period
-= prev_period
- he
->period
;
118 return he
->period
== 0;
121 static void __hists__decay_entries(struct hists
*hists
, bool zap_user
,
122 bool zap_kernel
, bool threaded
)
124 struct rb_node
*next
= rb_first(&hists
->entries
);
125 struct hist_entry
*n
;
128 n
= rb_entry(next
, struct hist_entry
, rb_node
);
129 next
= rb_next(&n
->rb_node
);
131 * We may be annotating this, for instance, so keep it here in
132 * case some it gets new samples, we'll eventually free it when
133 * the user stops browsing and it agains gets fully decayed.
135 if (((zap_user
&& n
->level
== '.') ||
136 (zap_kernel
&& n
->level
!= '.') ||
137 hists__decay_entry(hists
, n
)) &&
139 rb_erase(&n
->rb_node
, &hists
->entries
);
141 if (sort__need_collapse
|| threaded
)
142 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
150 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
152 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, false);
155 void hists__decay_entries_threaded(struct hists
*hists
,
156 bool zap_user
, bool zap_kernel
)
158 return __hists__decay_entries(hists
, zap_user
, zap_kernel
, true);
162 * histogram, sorted on item, collects periods
165 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
167 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
168 struct hist_entry
*he
= malloc(sizeof(*he
) + callchain_size
);
174 he
->ms
.map
->referenced
= true;
175 if (symbol_conf
.use_callchain
)
176 callchain_init(he
->callchain
);
182 static void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
185 hists__calc_col_len(hists
, h
);
187 hists
->stats
.total_period
+= h
->period
;
191 static u8
symbol__parent_filter(const struct symbol
*parent
)
193 if (symbol_conf
.exclude_other
&& parent
== NULL
)
194 return 1 << HIST_FILTER__PARENT
;
198 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
199 struct addr_location
*al
,
200 struct symbol
*sym_parent
, u64 period
)
203 struct rb_node
*parent
= NULL
;
204 struct hist_entry
*he
;
205 struct hist_entry entry
= {
206 .thread
= al
->thread
,
215 .parent
= sym_parent
,
216 .filtered
= symbol__parent_filter(sym_parent
),
220 pthread_mutex_lock(&hists
->lock
);
222 p
= &hists
->entries_in
->rb_node
;
226 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
228 cmp
= hist_entry__cmp(&entry
, he
);
231 he
->period
+= period
;
234 /* If the map of an existing hist_entry has
235 * become out-of-date due to an exec() or
236 * similar, update it. Otherwise we will
237 * mis-adjust symbol addresses when computing
238 * the history counter to increment.
240 if (he
->ms
.map
!= entry
.ms
.map
) {
241 he
->ms
.map
= entry
.ms
.map
;
243 he
->ms
.map
->referenced
= true;
254 he
= hist_entry__new(&entry
);
258 rb_link_node(&he
->rb_node_in
, parent
, p
);
259 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
261 hist_entry__add_cpumode_period(he
, al
->cpumode
, period
);
263 pthread_mutex_unlock(&hists
->lock
);
268 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
270 struct sort_entry
*se
;
273 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
274 cmp
= se
->se_cmp(left
, right
);
283 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
285 struct sort_entry
*se
;
288 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
289 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
291 f
= se
->se_collapse
?: se
->se_cmp
;
293 cmp
= f(left
, right
);
301 void hist_entry__free(struct hist_entry
*he
)
307 * collapse the histogram
310 static bool hists__collapse_insert_entry(struct hists
*hists
,
311 struct rb_root
*root
,
312 struct hist_entry
*he
)
314 struct rb_node
**p
= &root
->rb_node
;
315 struct rb_node
*parent
= NULL
;
316 struct hist_entry
*iter
;
321 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
323 cmp
= hist_entry__collapse(iter
, he
);
326 iter
->period
+= he
->period
;
327 iter
->nr_events
+= he
->nr_events
;
328 if (symbol_conf
.use_callchain
) {
329 callchain_cursor_reset(&hists
->callchain_cursor
);
330 callchain_merge(&hists
->callchain_cursor
, iter
->callchain
,
333 hist_entry__free(he
);
343 rb_link_node(&he
->rb_node_in
, parent
, p
);
344 rb_insert_color(&he
->rb_node_in
, root
);
348 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
350 struct rb_root
*root
;
352 pthread_mutex_lock(&hists
->lock
);
354 root
= hists
->entries_in
;
355 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
356 hists
->entries_in
= &hists
->entries_in_array
[0];
358 pthread_mutex_unlock(&hists
->lock
);
363 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
365 hists__filter_entry_by_dso(hists
, he
);
366 hists__filter_entry_by_thread(hists
, he
);
369 static void __hists__collapse_resort(struct hists
*hists
, bool threaded
)
371 struct rb_root
*root
;
372 struct rb_node
*next
;
373 struct hist_entry
*n
;
375 if (!sort__need_collapse
&& !threaded
)
378 root
= hists__get_rotate_entries_in(hists
);
379 next
= rb_first(root
);
382 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
383 next
= rb_next(&n
->rb_node_in
);
385 rb_erase(&n
->rb_node_in
, root
);
386 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
388 * If it wasn't combined with one of the entries already
389 * collapsed, we need to apply the filters that may have
390 * been set by, say, the hist_browser.
392 hists__apply_filters(hists
, n
);
397 void hists__collapse_resort(struct hists
*hists
)
399 return __hists__collapse_resort(hists
, false);
402 void hists__collapse_resort_threaded(struct hists
*hists
)
404 return __hists__collapse_resort(hists
, true);
408 * reverse the map, sort on period.
411 static void __hists__insert_output_entry(struct rb_root
*entries
,
412 struct hist_entry
*he
,
413 u64 min_callchain_hits
)
415 struct rb_node
**p
= &entries
->rb_node
;
416 struct rb_node
*parent
= NULL
;
417 struct hist_entry
*iter
;
419 if (symbol_conf
.use_callchain
)
420 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
421 min_callchain_hits
, &callchain_param
);
425 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
427 if (he
->period
> iter
->period
)
433 rb_link_node(&he
->rb_node
, parent
, p
);
434 rb_insert_color(&he
->rb_node
, entries
);
437 static void __hists__output_resort(struct hists
*hists
, bool threaded
)
439 struct rb_root
*root
;
440 struct rb_node
*next
;
441 struct hist_entry
*n
;
442 u64 min_callchain_hits
;
444 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
446 if (sort__need_collapse
|| threaded
)
447 root
= &hists
->entries_collapsed
;
449 root
= hists
->entries_in
;
451 next
= rb_first(root
);
452 hists
->entries
= RB_ROOT
;
454 hists
->nr_entries
= 0;
455 hists
->stats
.total_period
= 0;
456 hists__reset_col_len(hists
);
459 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
460 next
= rb_next(&n
->rb_node_in
);
462 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
463 hists__inc_nr_entries(hists
, n
);
467 void hists__output_resort(struct hists
*hists
)
469 return __hists__output_resort(hists
, false);
472 void hists__output_resort_threaded(struct hists
*hists
)
474 return __hists__output_resort(hists
, true);
477 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
480 int ret
= fprintf(fp
, " ");
482 for (i
= 0; i
< left_margin
; i
++)
483 ret
+= fprintf(fp
, " ");
488 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
492 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
494 for (i
= 0; i
< depth
; i
++)
495 if (depth_mask
& (1 << i
))
496 ret
+= fprintf(fp
, "| ");
498 ret
+= fprintf(fp
, " ");
500 ret
+= fprintf(fp
, "\n");
505 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
,
506 int depth
, int depth_mask
, int period
,
507 u64 total_samples
, u64 hits
,
513 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
514 for (i
= 0; i
< depth
; i
++) {
515 if (depth_mask
& (1 << i
))
516 ret
+= fprintf(fp
, "|");
518 ret
+= fprintf(fp
, " ");
519 if (!period
&& i
== depth
- 1) {
522 percent
= hits
* 100.0 / total_samples
;
523 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
525 ret
+= fprintf(fp
, "%s", " ");
528 ret
+= fprintf(fp
, "%s\n", chain
->ms
.sym
->name
);
530 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
535 static struct symbol
*rem_sq_bracket
;
536 static struct callchain_list rem_hits
;
538 static void init_rem_hits(void)
540 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
541 if (!rem_sq_bracket
) {
542 fprintf(stderr
, "Not enough memory to display remaining hits\n");
546 strcpy(rem_sq_bracket
->name
, "[...]");
547 rem_hits
.ms
.sym
= rem_sq_bracket
;
550 static size_t __callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
551 u64 total_samples
, int depth
,
552 int depth_mask
, int left_margin
)
554 struct rb_node
*node
, *next
;
555 struct callchain_node
*child
;
556 struct callchain_list
*chain
;
557 int new_depth_mask
= depth_mask
;
562 uint entries_printed
= 0;
564 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
565 new_total
= self
->children_hit
;
567 new_total
= total_samples
;
569 remaining
= new_total
;
571 node
= rb_first(&self
->rb_root
);
575 child
= rb_entry(node
, struct callchain_node
, rb_node
);
576 cumul
= callchain_cumul_hits(child
);
580 * The depth mask manages the output of pipes that show
581 * the depth. We don't want to keep the pipes of the current
582 * level for the last child of this depth.
583 * Except if we have remaining filtered hits. They will
584 * supersede the last child
586 next
= rb_next(node
);
587 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
588 new_depth_mask
&= ~(1 << (depth
- 1));
591 * But we keep the older depth mask for the line separator
592 * to keep the level link until we reach the last child
594 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
597 list_for_each_entry(chain
, &child
->val
, list
) {
598 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
604 ret
+= __callchain__fprintf_graph(fp
, child
, new_total
,
606 new_depth_mask
| (1 << depth
),
609 if (++entries_printed
== callchain_param
.print_limit
)
613 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
614 remaining
&& remaining
!= new_total
) {
619 new_depth_mask
&= ~(1 << (depth
- 1));
621 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
622 new_depth_mask
, 0, new_total
,
623 remaining
, left_margin
);
629 static size_t callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
630 u64 total_samples
, int left_margin
)
632 struct callchain_list
*chain
;
633 bool printed
= false;
636 u32 entries_printed
= 0;
638 list_for_each_entry(chain
, &self
->val
, list
) {
639 if (!i
++ && sort__first_dimension
== SORT_SYM
)
643 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
644 ret
+= fprintf(fp
, "|\n");
645 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
646 ret
+= fprintf(fp
, "---");
651 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
654 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
656 ret
+= fprintf(fp
, " %p\n", (void *)(long)chain
->ip
);
658 if (++entries_printed
== callchain_param
.print_limit
)
662 ret
+= __callchain__fprintf_graph(fp
, self
, total_samples
, 1, 1, left_margin
);
667 static size_t callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
670 struct callchain_list
*chain
;
676 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
679 list_for_each_entry(chain
, &self
->val
, list
) {
680 if (chain
->ip
>= PERF_CONTEXT_MAX
)
683 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
685 ret
+= fprintf(fp
, " %p\n",
686 (void *)(long)chain
->ip
);
692 static size_t hist_entry_callchain__fprintf(struct hist_entry
*he
,
693 u64 total_samples
, int left_margin
,
696 struct rb_node
*rb_node
;
697 struct callchain_node
*chain
;
699 u32 entries_printed
= 0;
701 rb_node
= rb_first(&he
->sorted_chain
);
705 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
706 percent
= chain
->hit
* 100.0 / total_samples
;
707 switch (callchain_param
.mode
) {
709 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
711 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
713 case CHAIN_GRAPH_ABS
: /* Falldown */
714 case CHAIN_GRAPH_REL
:
715 ret
+= callchain__fprintf_graph(fp
, chain
, total_samples
,
721 ret
+= fprintf(fp
, "\n");
722 if (++entries_printed
== callchain_param
.print_limit
)
724 rb_node
= rb_next(rb_node
);
730 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
732 struct rb_node
*next
= rb_first(&hists
->entries
);
733 struct hist_entry
*n
;
736 hists__reset_col_len(hists
);
738 while (next
&& row
++ < max_rows
) {
739 n
= rb_entry(next
, struct hist_entry
, rb_node
);
741 hists__calc_col_len(hists
, n
);
742 next
= rb_next(&n
->rb_node
);
746 static int hist_entry__pcnt_snprintf(struct hist_entry
*he
, char *s
,
747 size_t size
, struct hists
*pair_hists
,
748 bool show_displacement
, long displacement
,
749 bool color
, u64 total_period
)
751 u64 period
, total
, period_sys
, period_us
, period_guest_sys
, period_guest_us
;
753 const char *sep
= symbol_conf
.field_sep
;
756 if (symbol_conf
.exclude_other
&& !he
->parent
)
760 period
= he
->pair
? he
->pair
->period
: 0;
761 nr_events
= he
->pair
? he
->pair
->nr_events
: 0;
762 total
= pair_hists
->stats
.total_period
;
763 period_sys
= he
->pair
? he
->pair
->period_sys
: 0;
764 period_us
= he
->pair
? he
->pair
->period_us
: 0;
765 period_guest_sys
= he
->pair
? he
->pair
->period_guest_sys
: 0;
766 period_guest_us
= he
->pair
? he
->pair
->period_guest_us
: 0;
769 nr_events
= he
->nr_events
;
770 total
= total_period
;
771 period_sys
= he
->period_sys
;
772 period_us
= he
->period_us
;
773 period_guest_sys
= he
->period_guest_sys
;
774 period_guest_us
= he
->period_guest_us
;
779 ret
= percent_color_snprintf(s
, size
,
780 sep
? "%.2f" : " %6.2f%%",
781 (period
* 100.0) / total
);
783 ret
= scnprintf(s
, size
, sep
? "%.2f" : " %6.2f%%",
784 (period
* 100.0) / total
);
785 if (symbol_conf
.show_cpu_utilization
) {
786 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
787 sep
? "%.2f" : " %6.2f%%",
788 (period_sys
* 100.0) / total
);
789 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
790 sep
? "%.2f" : " %6.2f%%",
791 (period_us
* 100.0) / total
);
793 ret
+= percent_color_snprintf(s
+ ret
,
795 sep
? "%.2f" : " %6.2f%%",
796 (period_guest_sys
* 100.0) /
798 ret
+= percent_color_snprintf(s
+ ret
,
800 sep
? "%.2f" : " %6.2f%%",
801 (period_guest_us
* 100.0) /
806 ret
= scnprintf(s
, size
, sep
? "%" PRIu64
: "%12" PRIu64
" ", period
);
808 if (symbol_conf
.show_nr_samples
) {
810 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, nr_events
);
812 ret
+= scnprintf(s
+ ret
, size
- ret
, "%11" PRIu64
, nr_events
);
815 if (symbol_conf
.show_total_period
) {
817 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, period
);
819 ret
+= scnprintf(s
+ ret
, size
- ret
, " %12" PRIu64
, period
);
824 double old_percent
= 0, new_percent
= 0, diff
;
827 old_percent
= (period
* 100.0) / total
;
828 if (total_period
> 0)
829 new_percent
= (he
->period
* 100.0) / total_period
;
831 diff
= new_percent
- old_percent
;
833 if (fabs(diff
) >= 0.01)
834 ret
+= scnprintf(bf
, sizeof(bf
), "%+4.2F%%", diff
);
836 ret
+= scnprintf(bf
, sizeof(bf
), " ");
839 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
841 ret
+= scnprintf(s
+ ret
, size
- ret
, "%11.11s", bf
);
843 if (show_displacement
) {
845 ret
+= scnprintf(bf
, sizeof(bf
), "%+4ld", displacement
);
847 ret
+= scnprintf(bf
, sizeof(bf
), " ");
850 ret
+= scnprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
852 ret
+= scnprintf(s
+ ret
, size
- ret
, "%6.6s", bf
);
859 int hist_entry__snprintf(struct hist_entry
*he
, char *s
, size_t size
,
862 const char *sep
= symbol_conf
.field_sep
;
863 struct sort_entry
*se
;
866 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
870 ret
+= scnprintf(s
+ ret
, size
- ret
, "%s", sep
?: " ");
871 ret
+= se
->se_snprintf(he
, s
+ ret
, size
- ret
,
872 hists__col_len(hists
, se
->se_width_idx
));
878 static int hist_entry__fprintf(struct hist_entry
*he
, size_t size
,
879 struct hists
*hists
, struct hists
*pair_hists
,
880 bool show_displacement
, long displacement
,
881 u64 total_period
, FILE *fp
)
886 if (size
== 0 || size
> sizeof(bf
))
889 ret
= hist_entry__pcnt_snprintf(he
, bf
, size
, pair_hists
,
890 show_displacement
, displacement
,
892 hist_entry__snprintf(he
, bf
+ ret
, size
- ret
, hists
);
893 return fprintf(fp
, "%s\n", bf
);
896 static size_t hist_entry__fprintf_callchain(struct hist_entry
*he
,
898 u64 total_period
, FILE *fp
)
902 if (sort__first_dimension
== SORT_COMM
) {
903 struct sort_entry
*se
= list_first_entry(&hist_entry__sort_list
,
905 left_margin
= hists__col_len(hists
, se
->se_width_idx
);
906 left_margin
-= thread__comm_len(he
->thread
);
909 return hist_entry_callchain__fprintf(he
, total_period
, left_margin
, fp
);
912 size_t hists__fprintf(struct hists
*hists
, struct hists
*pair
,
913 bool show_displacement
, bool show_header
, int max_rows
,
914 int max_cols
, FILE *fp
)
916 struct sort_entry
*se
;
920 unsigned long position
= 1;
921 long displacement
= 0;
923 const char *sep
= symbol_conf
.field_sep
;
924 const char *col_width
= symbol_conf
.col_width_list_str
;
932 fprintf(fp
, "# %s", pair
? "Baseline" : "Overhead");
934 if (symbol_conf
.show_cpu_utilization
) {
936 ret
+= fprintf(fp
, "%csys", *sep
);
937 ret
+= fprintf(fp
, "%cus", *sep
);
939 ret
+= fprintf(fp
, "%cguest sys", *sep
);
940 ret
+= fprintf(fp
, "%cguest us", *sep
);
943 ret
+= fprintf(fp
, " sys ");
944 ret
+= fprintf(fp
, " us ");
946 ret
+= fprintf(fp
, " guest sys ");
947 ret
+= fprintf(fp
, " guest us ");
952 if (symbol_conf
.show_nr_samples
) {
954 fprintf(fp
, "%cSamples", *sep
);
956 fputs(" Samples ", fp
);
959 if (symbol_conf
.show_total_period
) {
961 ret
+= fprintf(fp
, "%cPeriod", *sep
);
963 ret
+= fprintf(fp
, " Period ");
968 ret
+= fprintf(fp
, "%cDelta", *sep
);
970 ret
+= fprintf(fp
, " Delta ");
972 if (show_displacement
) {
974 ret
+= fprintf(fp
, "%cDisplacement", *sep
);
976 ret
+= fprintf(fp
, " Displ");
980 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
984 fprintf(fp
, "%c%s", *sep
, se
->se_header
);
987 width
= strlen(se
->se_header
);
988 if (symbol_conf
.col_width_list_str
) {
990 hists__set_col_len(hists
, se
->se_width_idx
,
992 col_width
= strchr(col_width
, ',');
997 if (!hists__new_col_len(hists
, se
->se_width_idx
, width
))
998 width
= hists__col_len(hists
, se
->se_width_idx
);
999 fprintf(fp
, " %*s", width
, se
->se_header
);
1003 if (max_rows
&& ++nr_rows
>= max_rows
)
1009 fprintf(fp
, "# ........");
1010 if (symbol_conf
.show_cpu_utilization
)
1011 fprintf(fp
, " ....... .......");
1012 if (symbol_conf
.show_nr_samples
)
1013 fprintf(fp
, " ..........");
1014 if (symbol_conf
.show_total_period
)
1015 fprintf(fp
, " ............");
1017 fprintf(fp
, " ..........");
1018 if (show_displacement
)
1019 fprintf(fp
, " .....");
1021 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1028 width
= hists__col_len(hists
, se
->se_width_idx
);
1030 width
= strlen(se
->se_header
);
1031 for (i
= 0; i
< width
; i
++)
1036 if (max_rows
&& ++nr_rows
>= max_rows
)
1040 if (max_rows
&& ++nr_rows
>= max_rows
)
1044 total_period
= hists
->stats
.total_period
;
1046 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1047 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1052 if (show_displacement
) {
1053 if (h
->pair
!= NULL
)
1054 displacement
= ((long)h
->pair
->position
-
1060 ret
+= hist_entry__fprintf(h
, max_cols
, hists
, pair
, show_displacement
,
1061 displacement
, total_period
, fp
);
1063 if (symbol_conf
.use_callchain
)
1064 ret
+= hist_entry__fprintf_callchain(h
, hists
, total_period
, fp
);
1065 if (max_rows
&& ++nr_rows
>= max_rows
)
1068 if (h
->ms
.map
== NULL
&& verbose
> 1) {
1069 __map_groups__fprintf_maps(&h
->thread
->mg
,
1070 MAP__FUNCTION
, verbose
, fp
);
1071 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
1075 free(rem_sq_bracket
);
1081 * See hists__fprintf to match the column widths
1083 unsigned int hists__sort_list_width(struct hists
*hists
)
1085 struct sort_entry
*se
;
1086 int ret
= 9; /* total % */
1088 if (symbol_conf
.show_cpu_utilization
) {
1089 ret
+= 7; /* count_sys % */
1090 ret
+= 6; /* count_us % */
1092 ret
+= 13; /* count_guest_sys % */
1093 ret
+= 12; /* count_guest_us % */
1097 if (symbol_conf
.show_nr_samples
)
1100 if (symbol_conf
.show_total_period
)
1103 list_for_each_entry(se
, &hist_entry__sort_list
, list
)
1105 ret
+= 2 + hists__col_len(hists
, se
->se_width_idx
);
1107 if (verbose
) /* Addr + origin */
1108 ret
+= 3 + BITS_PER_LONG
/ 4;
1113 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1114 enum hist_filter filter
)
1116 h
->filtered
&= ~(1 << filter
);
1120 ++hists
->nr_entries
;
1122 hists
->nr_entries
+= h
->nr_rows
;
1124 hists
->stats
.total_period
+= h
->period
;
1125 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->nr_events
;
1127 hists__calc_col_len(hists
, h
);
1131 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1132 struct hist_entry
*he
)
1134 if (hists
->dso_filter
!= NULL
&&
1135 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1136 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1143 void hists__filter_by_dso(struct hists
*hists
)
1147 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1148 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1149 hists__reset_col_len(hists
);
1151 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1152 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1154 if (symbol_conf
.exclude_other
&& !h
->parent
)
1157 if (hists__filter_entry_by_dso(hists
, h
))
1160 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1164 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1165 struct hist_entry
*he
)
1167 if (hists
->thread_filter
!= NULL
&&
1168 he
->thread
!= hists
->thread_filter
) {
1169 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1176 void hists__filter_by_thread(struct hists
*hists
)
1180 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1181 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1182 hists__reset_col_len(hists
);
1184 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1185 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1187 if (hists__filter_entry_by_thread(hists
, h
))
1190 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1194 int hist_entry__inc_addr_samples(struct hist_entry
*he
, int evidx
, u64 ip
)
1196 return symbol__inc_addr_samples(he
->ms
.sym
, he
->ms
.map
, evidx
, ip
);
1199 int hist_entry__annotate(struct hist_entry
*he
, size_t privsize
)
1201 return symbol__annotate(he
->ms
.sym
, he
->ms
.map
, privsize
);
1204 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1206 ++hists
->stats
.nr_events
[0];
1207 ++hists
->stats
.nr_events
[type
];
1210 size_t hists__fprintf_nr_events(struct hists
*hists
, FILE *fp
)
1215 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
1218 if (hists
->stats
.nr_events
[i
] == 0)
1221 name
= perf_event__name(i
);
1222 if (!strcmp(name
, "UNKNOWN"))
1225 ret
+= fprintf(fp
, "%16s events: %10d\n", name
,
1226 hists
->stats
.nr_events
[i
]);