15 struct callchain_param callchain_param
= {
16 .mode
= CHAIN_GRAPH_REL
,
21 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
23 return hists
->col_len
[col
];
26 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
28 hists
->col_len
[col
] = len
;
31 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
33 if (len
> hists__col_len(hists
, col
)) {
34 hists__set_col_len(hists
, col
, len
);
40 static void hists__reset_col_len(struct hists
*hists
)
44 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
45 hists__set_col_len(hists
, col
, 0);
48 static void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
53 hists__new_col_len(hists
, HISTC_SYMBOL
, h
->ms
.sym
->namelen
);
55 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
57 if (hists__col_len(hists
, HISTC_DSO
) < unresolved_col_width
&&
58 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
59 !symbol_conf
.dso_list
)
60 hists__set_col_len(hists
, HISTC_DSO
,
61 unresolved_col_width
);
64 len
= thread__comm_len(h
->thread
);
65 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
66 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
69 len
= dso__name_len(h
->ms
.map
->dso
);
70 hists__new_col_len(hists
, HISTC_DSO
, len
);
74 static void hist_entry__add_cpumode_period(struct hist_entry
*self
,
75 unsigned int cpumode
, u64 period
)
78 case PERF_RECORD_MISC_KERNEL
:
79 self
->period_sys
+= period
;
81 case PERF_RECORD_MISC_USER
:
82 self
->period_us
+= period
;
84 case PERF_RECORD_MISC_GUEST_KERNEL
:
85 self
->period_guest_sys
+= period
;
87 case PERF_RECORD_MISC_GUEST_USER
:
88 self
->period_guest_us
+= period
;
95 static void hist_entry__decay(struct hist_entry
*he
)
97 he
->period
= (he
->period
* 7) / 8;
98 he
->nr_events
= (he
->nr_events
* 7) / 8;
101 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
105 hists
->stats
.total_period
-= he
->period
;
106 hist_entry__decay(he
);
107 hists
->stats
.total_period
+= he
->period
;
108 return he
->period
== 0;
111 static void __hists__decay_entries(struct hists
*hists
, bool threaded
)
113 struct rb_node
*next
= rb_first(&hists
->entries
);
114 struct hist_entry
*n
;
117 n
= rb_entry(next
, struct hist_entry
, rb_node
);
118 next
= rb_next(&n
->rb_node
);
120 * We may be annotating this, for instance, so keep it here in
121 * case some it gets new samples, we'll eventually free it when
122 * the user stops browsing and it agains gets fully decayed.
124 if (hists__decay_entry(hists
, n
) && !n
->used
) {
125 rb_erase(&n
->rb_node
, &hists
->entries
);
127 if (sort__need_collapse
|| threaded
)
128 rb_erase(&n
->rb_node_in
, &hists
->entries_collapsed
);
136 void hists__decay_entries(struct hists
*hists
)
138 return __hists__decay_entries(hists
, false);
141 void hists__decay_entries_threaded(struct hists
*hists
)
143 return __hists__decay_entries(hists
, true);
147 * histogram, sorted on item, collects periods
150 static struct hist_entry
*hist_entry__new(struct hist_entry
*template)
152 size_t callchain_size
= symbol_conf
.use_callchain
? sizeof(struct callchain_root
) : 0;
153 struct hist_entry
*self
= malloc(sizeof(*self
) + callchain_size
);
159 self
->ms
.map
->referenced
= true;
160 if (symbol_conf
.use_callchain
)
161 callchain_init(self
->callchain
);
167 static void hists__inc_nr_entries(struct hists
*hists
, struct hist_entry
*h
)
170 hists__calc_col_len(hists
, h
);
172 hists
->stats
.total_period
+= h
->period
;
176 static u8
symbol__parent_filter(const struct symbol
*parent
)
178 if (symbol_conf
.exclude_other
&& parent
== NULL
)
179 return 1 << HIST_FILTER__PARENT
;
183 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
184 struct addr_location
*al
,
185 struct symbol
*sym_parent
, u64 period
)
188 struct rb_node
*parent
= NULL
;
189 struct hist_entry
*he
;
190 struct hist_entry entry
= {
191 .thread
= al
->thread
,
200 .parent
= sym_parent
,
201 .filtered
= symbol__parent_filter(sym_parent
),
205 pthread_mutex_lock(&hists
->lock
);
207 p
= &hists
->entries_in
->rb_node
;
211 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
213 cmp
= hist_entry__cmp(&entry
, he
);
216 he
->period
+= period
;
227 he
= hist_entry__new(&entry
);
231 rb_link_node(&he
->rb_node_in
, parent
, p
);
232 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
234 hist_entry__add_cpumode_period(he
, al
->cpumode
, period
);
236 pthread_mutex_unlock(&hists
->lock
);
241 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
243 struct sort_entry
*se
;
246 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
247 cmp
= se
->se_cmp(left
, right
);
256 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
258 struct sort_entry
*se
;
261 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
262 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
264 f
= se
->se_collapse
?: se
->se_cmp
;
266 cmp
= f(left
, right
);
274 void hist_entry__free(struct hist_entry
*he
)
280 * collapse the histogram
283 static bool hists__collapse_insert_entry(struct hists
*hists
,
284 struct rb_root
*root
,
285 struct hist_entry
*he
)
287 struct rb_node
**p
= &root
->rb_node
;
288 struct rb_node
*parent
= NULL
;
289 struct hist_entry
*iter
;
294 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
296 cmp
= hist_entry__collapse(iter
, he
);
299 iter
->period
+= he
->period
;
300 iter
->nr_events
+= he
->nr_events
;
301 if (symbol_conf
.use_callchain
) {
302 callchain_cursor_reset(&hists
->callchain_cursor
);
303 callchain_merge(&hists
->callchain_cursor
, iter
->callchain
,
306 hist_entry__free(he
);
316 rb_link_node(&he
->rb_node_in
, parent
, p
);
317 rb_insert_color(&he
->rb_node_in
, root
);
321 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
323 struct rb_root
*root
;
325 pthread_mutex_lock(&hists
->lock
);
327 root
= hists
->entries_in
;
328 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
329 hists
->entries_in
= &hists
->entries_in_array
[0];
331 pthread_mutex_unlock(&hists
->lock
);
336 static void __hists__collapse_resort(struct hists
*hists
, bool threaded
)
338 struct rb_root
*root
;
339 struct rb_node
*next
;
340 struct hist_entry
*n
;
342 if (!sort__need_collapse
&& !threaded
)
345 root
= hists__get_rotate_entries_in(hists
);
346 next
= rb_first(root
);
347 hists
->stats
.total_period
= 0;
350 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
351 next
= rb_next(&n
->rb_node_in
);
353 rb_erase(&n
->rb_node_in
, root
);
354 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
))
355 hists__inc_nr_entries(hists
, n
);
359 void hists__collapse_resort(struct hists
*hists
)
361 return __hists__collapse_resort(hists
, false);
364 void hists__collapse_resort_threaded(struct hists
*hists
)
366 return __hists__collapse_resort(hists
, true);
370 * reverse the map, sort on period.
373 static void __hists__insert_output_entry(struct rb_root
*entries
,
374 struct hist_entry
*he
,
375 u64 min_callchain_hits
)
377 struct rb_node
**p
= &entries
->rb_node
;
378 struct rb_node
*parent
= NULL
;
379 struct hist_entry
*iter
;
381 if (symbol_conf
.use_callchain
)
382 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
383 min_callchain_hits
, &callchain_param
);
387 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
389 if (he
->period
> iter
->period
)
395 rb_link_node(&he
->rb_node
, parent
, p
);
396 rb_insert_color(&he
->rb_node
, entries
);
399 static void __hists__output_resort(struct hists
*hists
, bool threaded
)
401 struct rb_root
*root
;
402 struct rb_node
*next
;
403 struct hist_entry
*n
;
404 u64 min_callchain_hits
;
406 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
408 if (sort__need_collapse
|| threaded
)
409 root
= &hists
->entries_collapsed
;
411 root
= hists
->entries_in
;
413 next
= rb_first(root
);
414 hists
->entries
= RB_ROOT
;
416 hists
->nr_entries
= 0;
417 hists__reset_col_len(hists
);
420 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
421 next
= rb_next(&n
->rb_node_in
);
423 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
424 hists__inc_nr_entries(hists
, n
);
428 void hists__output_resort(struct hists
*hists
)
430 return __hists__output_resort(hists
, false);
433 void hists__output_resort_threaded(struct hists
*hists
)
435 return __hists__output_resort(hists
, true);
438 static size_t callchain__fprintf_left_margin(FILE *fp
, int left_margin
)
441 int ret
= fprintf(fp
, " ");
443 for (i
= 0; i
< left_margin
; i
++)
444 ret
+= fprintf(fp
, " ");
449 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
,
453 size_t ret
= callchain__fprintf_left_margin(fp
, left_margin
);
455 for (i
= 0; i
< depth
; i
++)
456 if (depth_mask
& (1 << i
))
457 ret
+= fprintf(fp
, "| ");
459 ret
+= fprintf(fp
, " ");
461 ret
+= fprintf(fp
, "\n");
466 static size_t ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
,
467 int depth
, int depth_mask
, int period
,
468 u64 total_samples
, u64 hits
,
474 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
475 for (i
= 0; i
< depth
; i
++) {
476 if (depth_mask
& (1 << i
))
477 ret
+= fprintf(fp
, "|");
479 ret
+= fprintf(fp
, " ");
480 if (!period
&& i
== depth
- 1) {
483 percent
= hits
* 100.0 / total_samples
;
484 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
486 ret
+= fprintf(fp
, "%s", " ");
489 ret
+= fprintf(fp
, "%s\n", chain
->ms
.sym
->name
);
491 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
496 static struct symbol
*rem_sq_bracket
;
497 static struct callchain_list rem_hits
;
499 static void init_rem_hits(void)
501 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
502 if (!rem_sq_bracket
) {
503 fprintf(stderr
, "Not enough memory to display remaining hits\n");
507 strcpy(rem_sq_bracket
->name
, "[...]");
508 rem_hits
.ms
.sym
= rem_sq_bracket
;
511 static size_t __callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
512 u64 total_samples
, int depth
,
513 int depth_mask
, int left_margin
)
515 struct rb_node
*node
, *next
;
516 struct callchain_node
*child
;
517 struct callchain_list
*chain
;
518 int new_depth_mask
= depth_mask
;
523 uint entries_printed
= 0;
525 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
526 new_total
= self
->children_hit
;
528 new_total
= total_samples
;
530 remaining
= new_total
;
532 node
= rb_first(&self
->rb_root
);
536 child
= rb_entry(node
, struct callchain_node
, rb_node
);
537 cumul
= callchain_cumul_hits(child
);
541 * The depth mask manages the output of pipes that show
542 * the depth. We don't want to keep the pipes of the current
543 * level for the last child of this depth.
544 * Except if we have remaining filtered hits. They will
545 * supersede the last child
547 next
= rb_next(node
);
548 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
549 new_depth_mask
&= ~(1 << (depth
- 1));
552 * But we keep the older depth mask for the line separator
553 * to keep the level link until we reach the last child
555 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
,
558 list_for_each_entry(chain
, &child
->val
, list
) {
559 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
565 ret
+= __callchain__fprintf_graph(fp
, child
, new_total
,
567 new_depth_mask
| (1 << depth
),
570 if (++entries_printed
== callchain_param
.print_limit
)
574 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
575 remaining
&& remaining
!= new_total
) {
580 new_depth_mask
&= ~(1 << (depth
- 1));
582 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
583 new_depth_mask
, 0, new_total
,
584 remaining
, left_margin
);
590 static size_t callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
591 u64 total_samples
, int left_margin
)
593 struct callchain_list
*chain
;
594 bool printed
= false;
597 u32 entries_printed
= 0;
599 list_for_each_entry(chain
, &self
->val
, list
) {
600 if (!i
++ && sort__first_dimension
== SORT_SYM
)
604 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
605 ret
+= fprintf(fp
, "|\n");
606 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
607 ret
+= fprintf(fp
, "---");
612 ret
+= callchain__fprintf_left_margin(fp
, left_margin
);
615 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
617 ret
+= fprintf(fp
, " %p\n", (void *)(long)chain
->ip
);
619 if (++entries_printed
== callchain_param
.print_limit
)
623 ret
+= __callchain__fprintf_graph(fp
, self
, total_samples
, 1, 1, left_margin
);
628 static size_t callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
631 struct callchain_list
*chain
;
637 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
640 list_for_each_entry(chain
, &self
->val
, list
) {
641 if (chain
->ip
>= PERF_CONTEXT_MAX
)
644 ret
+= fprintf(fp
, " %s\n", chain
->ms
.sym
->name
);
646 ret
+= fprintf(fp
, " %p\n",
647 (void *)(long)chain
->ip
);
653 static size_t hist_entry_callchain__fprintf(FILE *fp
, struct hist_entry
*self
,
654 u64 total_samples
, int left_margin
)
656 struct rb_node
*rb_node
;
657 struct callchain_node
*chain
;
659 u32 entries_printed
= 0;
661 rb_node
= rb_first(&self
->sorted_chain
);
665 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
666 percent
= chain
->hit
* 100.0 / total_samples
;
667 switch (callchain_param
.mode
) {
669 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
671 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
673 case CHAIN_GRAPH_ABS
: /* Falldown */
674 case CHAIN_GRAPH_REL
:
675 ret
+= callchain__fprintf_graph(fp
, chain
, total_samples
,
681 ret
+= fprintf(fp
, "\n");
682 if (++entries_printed
== callchain_param
.print_limit
)
684 rb_node
= rb_next(rb_node
);
690 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
692 struct rb_node
*next
= rb_first(&hists
->entries
);
693 struct hist_entry
*n
;
696 hists__reset_col_len(hists
);
698 while (next
&& row
++ < max_rows
) {
699 n
= rb_entry(next
, struct hist_entry
, rb_node
);
700 hists__calc_col_len(hists
, n
);
701 next
= rb_next(&n
->rb_node
);
705 int hist_entry__snprintf(struct hist_entry
*self
, char *s
, size_t size
,
706 struct hists
*hists
, struct hists
*pair_hists
,
707 bool show_displacement
, long displacement
,
708 bool color
, u64 session_total
)
710 struct sort_entry
*se
;
711 u64 period
, total
, period_sys
, period_us
, period_guest_sys
, period_guest_us
;
713 const char *sep
= symbol_conf
.field_sep
;
716 if (symbol_conf
.exclude_other
&& !self
->parent
)
720 period
= self
->pair
? self
->pair
->period
: 0;
721 nr_events
= self
->pair
? self
->pair
->nr_events
: 0;
722 total
= pair_hists
->stats
.total_period
;
723 period_sys
= self
->pair
? self
->pair
->period_sys
: 0;
724 period_us
= self
->pair
? self
->pair
->period_us
: 0;
725 period_guest_sys
= self
->pair
? self
->pair
->period_guest_sys
: 0;
726 period_guest_us
= self
->pair
? self
->pair
->period_guest_us
: 0;
728 period
= self
->period
;
729 nr_events
= self
->nr_events
;
730 total
= session_total
;
731 period_sys
= self
->period_sys
;
732 period_us
= self
->period_us
;
733 period_guest_sys
= self
->period_guest_sys
;
734 period_guest_us
= self
->period_guest_us
;
739 ret
= percent_color_snprintf(s
, size
,
740 sep
? "%.2f" : " %6.2f%%",
741 (period
* 100.0) / total
);
743 ret
= snprintf(s
, size
, sep
? "%.2f" : " %6.2f%%",
744 (period
* 100.0) / total
);
745 if (symbol_conf
.show_cpu_utilization
) {
746 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
747 sep
? "%.2f" : " %6.2f%%",
748 (period_sys
* 100.0) / total
);
749 ret
+= percent_color_snprintf(s
+ ret
, size
- ret
,
750 sep
? "%.2f" : " %6.2f%%",
751 (period_us
* 100.0) / total
);
753 ret
+= percent_color_snprintf(s
+ ret
,
755 sep
? "%.2f" : " %6.2f%%",
756 (period_guest_sys
* 100.0) /
758 ret
+= percent_color_snprintf(s
+ ret
,
760 sep
? "%.2f" : " %6.2f%%",
761 (period_guest_us
* 100.0) /
766 ret
= snprintf(s
, size
, sep
? "%" PRIu64
: "%12" PRIu64
" ", period
);
768 if (symbol_conf
.show_nr_samples
) {
770 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, nr_events
);
772 ret
+= snprintf(s
+ ret
, size
- ret
, "%11" PRIu64
, nr_events
);
775 if (symbol_conf
.show_total_period
) {
777 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%" PRIu64
, *sep
, period
);
779 ret
+= snprintf(s
+ ret
, size
- ret
, " %12" PRIu64
, period
);
784 double old_percent
= 0, new_percent
= 0, diff
;
787 old_percent
= (period
* 100.0) / total
;
788 if (session_total
> 0)
789 new_percent
= (self
->period
* 100.0) / session_total
;
791 diff
= new_percent
- old_percent
;
793 if (fabs(diff
) >= 0.01)
794 snprintf(bf
, sizeof(bf
), "%+4.2F%%", diff
);
796 snprintf(bf
, sizeof(bf
), " ");
799 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
801 ret
+= snprintf(s
+ ret
, size
- ret
, "%11.11s", bf
);
803 if (show_displacement
) {
805 snprintf(bf
, sizeof(bf
), "%+4ld", displacement
);
807 snprintf(bf
, sizeof(bf
), " ");
810 ret
+= snprintf(s
+ ret
, size
- ret
, "%c%s", *sep
, bf
);
812 ret
+= snprintf(s
+ ret
, size
- ret
, "%6.6s", bf
);
816 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
820 ret
+= snprintf(s
+ ret
, size
- ret
, "%s", sep
?: " ");
821 ret
+= se
->se_snprintf(self
, s
+ ret
, size
- ret
,
822 hists__col_len(hists
, se
->se_width_idx
));
828 int hist_entry__fprintf(struct hist_entry
*he
, size_t size
, struct hists
*hists
,
829 struct hists
*pair_hists
, bool show_displacement
,
830 long displacement
, FILE *fp
, u64 session_total
)
834 if (size
== 0 || size
> sizeof(bf
))
837 hist_entry__snprintf(he
, bf
, size
, hists
, pair_hists
,
838 show_displacement
, displacement
,
839 true, session_total
);
840 return fprintf(fp
, "%s\n", bf
);
843 static size_t hist_entry__fprintf_callchain(struct hist_entry
*self
,
844 struct hists
*hists
, FILE *fp
,
849 if (sort__first_dimension
== SORT_COMM
) {
850 struct sort_entry
*se
= list_first_entry(&hist_entry__sort_list
,
852 left_margin
= hists__col_len(hists
, se
->se_width_idx
);
853 left_margin
-= thread__comm_len(self
->thread
);
856 return hist_entry_callchain__fprintf(fp
, self
, session_total
,
860 size_t hists__fprintf(struct hists
*hists
, struct hists
*pair
,
861 bool show_displacement
, bool show_header
, int max_rows
,
862 int max_cols
, FILE *fp
)
864 struct sort_entry
*se
;
867 unsigned long position
= 1;
868 long displacement
= 0;
870 const char *sep
= symbol_conf
.field_sep
;
871 const char *col_width
= symbol_conf
.col_width_list_str
;
879 fprintf(fp
, "# %s", pair
? "Baseline" : "Overhead");
881 if (symbol_conf
.show_nr_samples
) {
883 fprintf(fp
, "%cSamples", *sep
);
885 fputs(" Samples ", fp
);
888 if (symbol_conf
.show_total_period
) {
890 ret
+= fprintf(fp
, "%cPeriod", *sep
);
892 ret
+= fprintf(fp
, " Period ");
895 if (symbol_conf
.show_cpu_utilization
) {
897 ret
+= fprintf(fp
, "%csys", *sep
);
898 ret
+= fprintf(fp
, "%cus", *sep
);
900 ret
+= fprintf(fp
, "%cguest sys", *sep
);
901 ret
+= fprintf(fp
, "%cguest us", *sep
);
904 ret
+= fprintf(fp
, " sys ");
905 ret
+= fprintf(fp
, " us ");
907 ret
+= fprintf(fp
, " guest sys ");
908 ret
+= fprintf(fp
, " guest us ");
915 ret
+= fprintf(fp
, "%cDelta", *sep
);
917 ret
+= fprintf(fp
, " Delta ");
919 if (show_displacement
) {
921 ret
+= fprintf(fp
, "%cDisplacement", *sep
);
923 ret
+= fprintf(fp
, " Displ");
927 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
931 fprintf(fp
, "%c%s", *sep
, se
->se_header
);
934 width
= strlen(se
->se_header
);
935 if (symbol_conf
.col_width_list_str
) {
937 hists__set_col_len(hists
, se
->se_width_idx
,
939 col_width
= strchr(col_width
, ',');
944 if (!hists__new_col_len(hists
, se
->se_width_idx
, width
))
945 width
= hists__col_len(hists
, se
->se_width_idx
);
946 fprintf(fp
, " %*s", width
, se
->se_header
);
950 if (max_rows
&& ++nr_rows
>= max_rows
)
956 fprintf(fp
, "# ........");
957 if (symbol_conf
.show_nr_samples
)
958 fprintf(fp
, " ..........");
959 if (symbol_conf
.show_total_period
)
960 fprintf(fp
, " ............");
962 fprintf(fp
, " ..........");
963 if (show_displacement
)
964 fprintf(fp
, " .....");
966 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
973 width
= hists__col_len(hists
, se
->se_width_idx
);
975 width
= strlen(se
->se_header
);
976 for (i
= 0; i
< width
; i
++)
981 if (max_rows
&& ++nr_rows
>= max_rows
)
985 if (max_rows
&& ++nr_rows
>= max_rows
)
989 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
990 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
995 if (show_displacement
) {
997 displacement
= ((long)h
->pair
->position
-
1003 ret
+= hist_entry__fprintf(h
, max_cols
, hists
, pair
, show_displacement
,
1004 displacement
, fp
, hists
->stats
.total_period
);
1006 if (symbol_conf
.use_callchain
)
1007 ret
+= hist_entry__fprintf_callchain(h
, hists
, fp
,
1008 hists
->stats
.total_period
);
1009 if (max_rows
&& ++nr_rows
>= max_rows
)
1012 if (h
->ms
.map
== NULL
&& verbose
> 1) {
1013 __map_groups__fprintf_maps(&h
->thread
->mg
,
1014 MAP__FUNCTION
, verbose
, fp
);
1015 fprintf(fp
, "%.10s end\n", graph_dotted_line
);
1019 free(rem_sq_bracket
);
1025 * See hists__fprintf to match the column widths
1027 unsigned int hists__sort_list_width(struct hists
*hists
)
1029 struct sort_entry
*se
;
1030 int ret
= 9; /* total % */
1032 if (symbol_conf
.show_cpu_utilization
) {
1033 ret
+= 7; /* count_sys % */
1034 ret
+= 6; /* count_us % */
1036 ret
+= 13; /* count_guest_sys % */
1037 ret
+= 12; /* count_guest_us % */
1041 if (symbol_conf
.show_nr_samples
)
1044 if (symbol_conf
.show_total_period
)
1047 list_for_each_entry(se
, &hist_entry__sort_list
, list
)
1049 ret
+= 2 + hists__col_len(hists
, se
->se_width_idx
);
1051 if (verbose
) /* Addr + origin */
1052 ret
+= 3 + BITS_PER_LONG
/ 4;
1057 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1058 enum hist_filter filter
)
1060 h
->filtered
&= ~(1 << filter
);
1064 ++hists
->nr_entries
;
1066 hists
->nr_entries
+= h
->nr_rows
;
1068 hists
->stats
.total_period
+= h
->period
;
1069 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] += h
->nr_events
;
1071 hists__calc_col_len(hists
, h
);
1074 void hists__filter_by_dso(struct hists
*hists
, const struct dso
*dso
)
1078 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1079 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1080 hists__reset_col_len(hists
);
1082 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1083 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1085 if (symbol_conf
.exclude_other
&& !h
->parent
)
1088 if (dso
!= NULL
&& (h
->ms
.map
== NULL
|| h
->ms
.map
->dso
!= dso
)) {
1089 h
->filtered
|= (1 << HIST_FILTER__DSO
);
1093 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1097 void hists__filter_by_thread(struct hists
*hists
, const struct thread
*thread
)
1101 hists
->nr_entries
= hists
->stats
.total_period
= 0;
1102 hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
] = 0;
1103 hists__reset_col_len(hists
);
1105 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1106 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1108 if (thread
!= NULL
&& h
->thread
!= thread
) {
1109 h
->filtered
|= (1 << HIST_FILTER__THREAD
);
1113 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1117 int hist_entry__inc_addr_samples(struct hist_entry
*he
, int evidx
, u64 ip
)
1119 return symbol__inc_addr_samples(he
->ms
.sym
, he
->ms
.map
, evidx
, ip
);
1122 int hist_entry__annotate(struct hist_entry
*he
, size_t privsize
)
1124 return symbol__annotate(he
->ms
.sym
, he
->ms
.map
, privsize
);
1127 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1129 ++hists
->stats
.nr_events
[0];
1130 ++hists
->stats
.nr_events
[type
];
1133 size_t hists__fprintf_nr_events(struct hists
*hists
, FILE *fp
)
1138 for (i
= 0; i
< PERF_RECORD_HEADER_MAX
; ++i
) {
1141 if (hists
->stats
.nr_events
[i
] == 0)
1144 name
= perf_event__name(i
);
1145 if (!strcmp(name
, "UNKNOWN"))
1148 ret
+= fprintf(fp
, "%16s events: %10d\n", name
,
1149 hists
->stats
.nr_events
[i
]);
1155 void hists__init(struct hists
*hists
)
1157 memset(hists
, 0, sizeof(*hists
));
1158 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
1159 hists
->entries_in
= &hists
->entries_in_array
[0];
1160 hists
->entries_collapsed
= RB_ROOT
;
1161 hists
->entries
= RB_ROOT
;
1162 pthread_mutex_init(&hists
->lock
, NULL
);