1 // SPDX-License-Identifier: GPL-2.0
8 #include "map_symbol.h"
10 #include "mem-events.h"
12 #include "namespaces.h"
21 #include "ui/progress.h"
25 #include <sys/param.h>
26 #include <linux/rbtree.h>
27 #include <linux/string.h>
28 #include <linux/time64.h>
29 #include <linux/zalloc.h>
31 static bool hists__filter_entry_by_dso(struct hists
*hists
,
32 struct hist_entry
*he
);
33 static bool hists__filter_entry_by_thread(struct hists
*hists
,
34 struct hist_entry
*he
);
35 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
36 struct hist_entry
*he
);
37 static bool hists__filter_entry_by_socket(struct hists
*hists
,
38 struct hist_entry
*he
);
40 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
42 return hists
->col_len
[col
];
45 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
47 hists
->col_len
[col
] = len
;
50 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
52 if (len
> hists__col_len(hists
, col
)) {
53 hists__set_col_len(hists
, col
, len
);
59 void hists__reset_col_len(struct hists
*hists
)
63 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
64 hists__set_col_len(hists
, col
, 0);
67 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
69 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
71 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
72 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
73 !symbol_conf
.dso_list
)
74 hists__set_col_len(hists
, dso
, unresolved_col_width
);
77 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
79 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
84 * +4 accounts for '[x] ' priv level info
85 * +2 accounts for 0x prefix on raw addresses
86 * +3 accounts for ' y ' symtab origin info
89 symlen
= h
->ms
.sym
->namelen
+ 4;
91 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
92 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
94 symlen
= unresolved_col_width
+ 4 + 2;
95 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
96 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
99 len
= thread__comm_len(h
->thread
);
100 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
101 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
104 len
= dso__name_len(h
->ms
.map
->dso
);
105 hists__new_col_len(hists
, HISTC_DSO
, len
);
109 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
111 if (h
->branch_info
) {
112 if (h
->branch_info
->from
.sym
) {
113 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
115 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
116 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
118 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
119 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
121 symlen
= unresolved_col_width
+ 4 + 2;
122 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
123 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
126 if (h
->branch_info
->to
.sym
) {
127 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
129 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
130 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
132 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
133 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
135 symlen
= unresolved_col_width
+ 4 + 2;
136 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
137 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
140 if (h
->branch_info
->srcline_from
)
141 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
142 strlen(h
->branch_info
->srcline_from
));
143 if (h
->branch_info
->srcline_to
)
144 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
145 strlen(h
->branch_info
->srcline_to
));
149 if (h
->mem_info
->daddr
.sym
) {
150 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
151 + unresolved_col_width
+ 2;
152 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
154 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
157 symlen
= unresolved_col_width
+ 4 + 2;
158 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
160 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
164 if (h
->mem_info
->iaddr
.sym
) {
165 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
166 + unresolved_col_width
+ 2;
167 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
170 symlen
= unresolved_col_width
+ 4 + 2;
171 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
175 if (h
->mem_info
->daddr
.map
) {
176 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
177 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
180 symlen
= unresolved_col_width
+ 4 + 2;
181 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
184 hists__new_col_len(hists
, HISTC_MEM_PHYS_DADDR
,
185 unresolved_col_width
+ 4 + 2);
188 symlen
= unresolved_col_width
+ 4 + 2;
189 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
190 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
191 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
194 hists__new_col_len(hists
, HISTC_CGROUP_ID
, 20);
195 hists__new_col_len(hists
, HISTC_CPU
, 3);
196 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
197 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
198 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
199 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
200 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
201 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
202 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
203 if (symbol_conf
.nanosecs
)
204 hists__new_col_len(hists
, HISTC_TIME
, 16);
206 hists__new_col_len(hists
, HISTC_TIME
, 12);
209 len
= MAX(strlen(h
->srcline
), strlen(sort_srcline
.se_header
));
210 hists__new_col_len(hists
, HISTC_SRCLINE
, len
);
214 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
217 hists__new_col_len(hists
, HISTC_TRANSACTION
,
218 hist_entry__transaction_len());
221 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
224 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
226 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
227 struct hist_entry
*n
;
230 hists__reset_col_len(hists
);
232 while (next
&& row
++ < max_rows
) {
233 n
= rb_entry(next
, struct hist_entry
, rb_node
);
235 hists__calc_col_len(hists
, n
);
236 next
= rb_next(&n
->rb_node
);
240 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
241 unsigned int cpumode
, u64 period
)
244 case PERF_RECORD_MISC_KERNEL
:
245 he_stat
->period_sys
+= period
;
247 case PERF_RECORD_MISC_USER
:
248 he_stat
->period_us
+= period
;
250 case PERF_RECORD_MISC_GUEST_KERNEL
:
251 he_stat
->period_guest_sys
+= period
;
253 case PERF_RECORD_MISC_GUEST_USER
:
254 he_stat
->period_guest_us
+= period
;
261 static long hist_time(unsigned long htime
)
263 unsigned long time_quantum
= symbol_conf
.time_quantum
;
265 return (htime
/ time_quantum
) * time_quantum
;
269 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
273 he_stat
->period
+= period
;
274 he_stat
->weight
+= weight
;
275 he_stat
->nr_events
+= 1;
278 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
280 dest
->period
+= src
->period
;
281 dest
->period_sys
+= src
->period_sys
;
282 dest
->period_us
+= src
->period_us
;
283 dest
->period_guest_sys
+= src
->period_guest_sys
;
284 dest
->period_guest_us
+= src
->period_guest_us
;
285 dest
->nr_events
+= src
->nr_events
;
286 dest
->weight
+= src
->weight
;
289 static void he_stat__decay(struct he_stat
*he_stat
)
291 he_stat
->period
= (he_stat
->period
* 7) / 8;
292 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
293 /* XXX need decay for weight too? */
296 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
298 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
300 u64 prev_period
= he
->stat
.period
;
303 if (prev_period
== 0)
306 he_stat__decay(&he
->stat
);
307 if (symbol_conf
.cumulate_callchain
)
308 he_stat__decay(he
->stat_acc
);
309 decay_callchain(he
->callchain
);
311 diff
= prev_period
- he
->stat
.period
;
314 hists
->stats
.total_period
-= diff
;
316 hists
->stats
.total_non_filtered_period
-= diff
;
320 struct hist_entry
*child
;
321 struct rb_node
*node
= rb_first_cached(&he
->hroot_out
);
323 child
= rb_entry(node
, struct hist_entry
, rb_node
);
324 node
= rb_next(node
);
326 if (hists__decay_entry(hists
, child
))
327 hists__delete_entry(hists
, child
);
331 return he
->stat
.period
== 0;
334 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
336 struct rb_root_cached
*root_in
;
337 struct rb_root_cached
*root_out
;
340 root_in
= &he
->parent_he
->hroot_in
;
341 root_out
= &he
->parent_he
->hroot_out
;
343 if (hists__has(hists
, need_collapse
))
344 root_in
= &hists
->entries_collapsed
;
346 root_in
= hists
->entries_in
;
347 root_out
= &hists
->entries
;
350 rb_erase_cached(&he
->rb_node_in
, root_in
);
351 rb_erase_cached(&he
->rb_node
, root_out
);
355 --hists
->nr_non_filtered_entries
;
357 hist_entry__delete(he
);
360 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
362 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
363 struct hist_entry
*n
;
366 n
= rb_entry(next
, struct hist_entry
, rb_node
);
367 next
= rb_next(&n
->rb_node
);
368 if (((zap_user
&& n
->level
== '.') ||
369 (zap_kernel
&& n
->level
!= '.') ||
370 hists__decay_entry(hists
, n
))) {
371 hists__delete_entry(hists
, n
);
376 void hists__delete_entries(struct hists
*hists
)
378 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
379 struct hist_entry
*n
;
382 n
= rb_entry(next
, struct hist_entry
, rb_node
);
383 next
= rb_next(&n
->rb_node
);
385 hists__delete_entry(hists
, n
);
389 struct hist_entry
*hists__get_entry(struct hists
*hists
, int idx
)
391 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
392 struct hist_entry
*n
;
396 n
= rb_entry(next
, struct hist_entry
, rb_node
);
400 next
= rb_next(&n
->rb_node
);
408 * histogram, sorted on item, collects periods
411 static int hist_entry__init(struct hist_entry
*he
,
412 struct hist_entry
*template,
414 size_t callchain_size
)
417 he
->callchain_size
= callchain_size
;
419 if (symbol_conf
.cumulate_callchain
) {
420 he
->stat_acc
= malloc(sizeof(he
->stat
));
421 if (he
->stat_acc
== NULL
)
423 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
425 memset(&he
->stat
, 0, sizeof(he
->stat
));
428 map__get(he
->ms
.map
);
430 if (he
->branch_info
) {
432 * This branch info is (a part of) allocated from
433 * sample__resolve_bstack() and will be freed after
434 * adding new entries. So we need to save a copy.
436 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
437 if (he
->branch_info
== NULL
)
440 memcpy(he
->branch_info
, template->branch_info
,
441 sizeof(*he
->branch_info
));
443 map__get(he
->branch_info
->from
.map
);
444 map__get(he
->branch_info
->to
.map
);
448 map__get(he
->mem_info
->iaddr
.map
);
449 map__get(he
->mem_info
->daddr
.map
);
452 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
453 callchain_init(he
->callchain
);
456 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
457 if (he
->raw_data
== NULL
)
462 he
->srcline
= strdup(he
->srcline
);
463 if (he
->srcline
== NULL
)
467 if (symbol_conf
.res_sample
) {
468 he
->res_samples
= calloc(sizeof(struct res_sample
),
469 symbol_conf
.res_sample
);
470 if (!he
->res_samples
)
474 INIT_LIST_HEAD(&he
->pairs
.node
);
475 thread__get(he
->thread
);
476 he
->hroot_in
= RB_ROOT_CACHED
;
477 he
->hroot_out
= RB_ROOT_CACHED
;
479 if (!symbol_conf
.report_hierarchy
)
488 zfree(&he
->raw_data
);
491 if (he
->branch_info
) {
492 map__put(he
->branch_info
->from
.map
);
493 map__put(he
->branch_info
->to
.map
);
494 zfree(&he
->branch_info
);
497 map__put(he
->mem_info
->iaddr
.map
);
498 map__put(he
->mem_info
->daddr
.map
);
501 map__zput(he
->ms
.map
);
502 zfree(&he
->stat_acc
);
506 static void *hist_entry__zalloc(size_t size
)
508 return zalloc(size
+ sizeof(struct hist_entry
));
511 static void hist_entry__free(void *ptr
)
516 static struct hist_entry_ops default_ops
= {
517 .new = hist_entry__zalloc
,
518 .free
= hist_entry__free
,
521 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
524 struct hist_entry_ops
*ops
= template->ops
;
525 size_t callchain_size
= 0;
526 struct hist_entry
*he
;
530 ops
= template->ops
= &default_ops
;
532 if (symbol_conf
.use_callchain
)
533 callchain_size
= sizeof(struct callchain_root
);
535 he
= ops
->new(callchain_size
);
537 err
= hist_entry__init(he
, template, sample_self
, callchain_size
);
547 static u8
symbol__parent_filter(const struct symbol
*parent
)
549 if (symbol_conf
.exclude_other
&& parent
== NULL
)
550 return 1 << HIST_FILTER__PARENT
;
554 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
556 if (!hist_entry__has_callchains(he
) || !symbol_conf
.use_callchain
)
559 he
->hists
->callchain_period
+= period
;
561 he
->hists
->callchain_non_filtered_period
+= period
;
564 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
565 struct hist_entry
*entry
,
566 struct addr_location
*al
,
570 struct rb_node
*parent
= NULL
;
571 struct hist_entry
*he
;
573 u64 period
= entry
->stat
.period
;
574 u64 weight
= entry
->stat
.weight
;
575 bool leftmost
= true;
577 p
= &hists
->entries_in
->rb_root
.rb_node
;
581 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
584 * Make sure that it receives arguments in a same order as
585 * hist_entry__collapse() so that we can use an appropriate
586 * function when searching an entry regardless which sort
589 cmp
= hist_entry__cmp(he
, entry
);
593 he_stat__add_period(&he
->stat
, period
, weight
);
594 hist_entry__add_callchain_period(he
, period
);
596 if (symbol_conf
.cumulate_callchain
)
597 he_stat__add_period(he
->stat_acc
, period
, weight
);
600 * This mem info was allocated from sample__resolve_mem
601 * and will not be used anymore.
603 mem_info__zput(entry
->mem_info
);
605 block_info__zput(entry
->block_info
);
607 /* If the map of an existing hist_entry has
608 * become out-of-date due to an exec() or
609 * similar, update it. Otherwise we will
610 * mis-adjust symbol addresses when computing
611 * the history counter to increment.
613 if (he
->ms
.map
!= entry
->ms
.map
) {
614 map__put(he
->ms
.map
);
615 he
->ms
.map
= map__get(entry
->ms
.map
);
628 he
= hist_entry__new(entry
, sample_self
);
633 hist_entry__add_callchain_period(he
, period
);
636 rb_link_node(&he
->rb_node_in
, parent
, p
);
637 rb_insert_color_cached(&he
->rb_node_in
, hists
->entries_in
, leftmost
);
640 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
641 if (symbol_conf
.cumulate_callchain
)
642 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
646 static unsigned random_max(unsigned high
)
648 unsigned thresh
= -high
% high
;
650 unsigned r
= random();
656 static void hists__res_sample(struct hist_entry
*he
, struct perf_sample
*sample
)
658 struct res_sample
*r
;
661 if (he
->num_res
< symbol_conf
.res_sample
) {
664 j
= random_max(symbol_conf
.res_sample
);
666 r
= &he
->res_samples
[j
];
667 r
->time
= sample
->time
;
668 r
->cpu
= sample
->cpu
;
669 r
->tid
= sample
->tid
;
672 static struct hist_entry
*
673 __hists__add_entry(struct hists
*hists
,
674 struct addr_location
*al
,
675 struct symbol
*sym_parent
,
676 struct branch_info
*bi
,
678 struct block_info
*block_info
,
679 struct perf_sample
*sample
,
681 struct hist_entry_ops
*ops
)
683 struct namespaces
*ns
= thread__namespaces(al
->thread
);
684 struct hist_entry entry
= {
685 .thread
= al
->thread
,
686 .comm
= thread__comm(al
->thread
),
688 .dev
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].dev
: 0,
689 .ino
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].ino
: 0,
695 .srcline
= (char *) al
->srcline
,
696 .socket
= al
->socket
,
698 .cpumode
= al
->cpumode
,
703 .period
= sample
->period
,
704 .weight
= sample
->weight
,
706 .parent
= sym_parent
,
707 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
711 .block_info
= block_info
,
712 .transaction
= sample
->transaction
,
713 .raw_data
= sample
->raw_data
,
714 .raw_size
= sample
->raw_size
,
716 .time
= hist_time(sample
->time
),
717 }, *he
= hists__findnew_entry(hists
, &entry
, al
, sample_self
);
719 if (!hists
->has_callchains
&& he
&& he
->callchain_size
!= 0)
720 hists
->has_callchains
= true;
721 if (he
&& symbol_conf
.res_sample
)
722 hists__res_sample(he
, sample
);
726 struct hist_entry
*hists__add_entry(struct hists
*hists
,
727 struct addr_location
*al
,
728 struct symbol
*sym_parent
,
729 struct branch_info
*bi
,
731 struct perf_sample
*sample
,
734 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
, NULL
,
735 sample
, sample_self
, NULL
);
738 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
739 struct hist_entry_ops
*ops
,
740 struct addr_location
*al
,
741 struct symbol
*sym_parent
,
742 struct branch_info
*bi
,
744 struct perf_sample
*sample
,
747 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
, NULL
,
748 sample
, sample_self
, ops
);
751 struct hist_entry
*hists__add_entry_block(struct hists
*hists
,
752 struct addr_location
*al
,
753 struct block_info
*block_info
)
755 struct hist_entry entry
= {
756 .block_info
= block_info
,
758 }, *he
= hists__findnew_entry(hists
, &entry
, al
, false);
764 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
765 struct addr_location
*al __maybe_unused
)
771 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
772 struct addr_location
*al __maybe_unused
)
778 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
780 struct perf_sample
*sample
= iter
->sample
;
783 mi
= sample__resolve_mem(sample
, al
);
792 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
795 struct mem_info
*mi
= iter
->priv
;
796 struct hists
*hists
= evsel__hists(iter
->evsel
);
797 struct perf_sample
*sample
= iter
->sample
;
798 struct hist_entry
*he
;
803 cost
= sample
->weight
;
808 * must pass period=weight in order to get the correct
809 * sorting from hists__collapse_resort() which is solely
810 * based on periods. We want sorting be done on nr_events * weight
811 * and this is indirectly achieved by passing period=weight here
812 * and the he_stat__add_period() function.
814 sample
->period
= cost
;
816 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
826 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
827 struct addr_location
*al __maybe_unused
)
829 struct evsel
*evsel
= iter
->evsel
;
830 struct hists
*hists
= evsel__hists(evsel
);
831 struct hist_entry
*he
= iter
->he
;
837 hists__inc_nr_samples(hists
, he
->filtered
);
839 err
= hist_entry__append_callchain(he
, iter
->sample
);
843 * We don't need to free iter->priv (mem_info) here since the mem info
844 * was either already freed in hists__findnew_entry() or passed to a
845 * new hist entry by hist_entry__new().
854 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
856 struct branch_info
*bi
;
857 struct perf_sample
*sample
= iter
->sample
;
859 bi
= sample__resolve_bstack(sample
, al
);
864 iter
->total
= sample
->branch_stack
->nr
;
871 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
872 struct addr_location
*al __maybe_unused
)
878 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
880 struct branch_info
*bi
= iter
->priv
;
886 if (iter
->curr
>= iter
->total
)
889 al
->map
= bi
[i
].to
.map
;
890 al
->sym
= bi
[i
].to
.sym
;
891 al
->addr
= bi
[i
].to
.addr
;
896 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
898 struct branch_info
*bi
;
899 struct evsel
*evsel
= iter
->evsel
;
900 struct hists
*hists
= evsel__hists(evsel
);
901 struct perf_sample
*sample
= iter
->sample
;
902 struct hist_entry
*he
= NULL
;
908 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
912 * The report shows the percentage of total branches captured
913 * and not events sampled. Thus we use a pseudo period of 1.
916 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
918 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
923 hists__inc_nr_samples(hists
, he
->filtered
);
932 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
933 struct addr_location
*al __maybe_unused
)
938 return iter
->curr
>= iter
->total
? 0 : -1;
942 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
943 struct addr_location
*al __maybe_unused
)
949 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
951 struct evsel
*evsel
= iter
->evsel
;
952 struct perf_sample
*sample
= iter
->sample
;
953 struct hist_entry
*he
;
955 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
965 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
966 struct addr_location
*al __maybe_unused
)
968 struct hist_entry
*he
= iter
->he
;
969 struct evsel
*evsel
= iter
->evsel
;
970 struct perf_sample
*sample
= iter
->sample
;
977 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
979 return hist_entry__append_callchain(he
, sample
);
983 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
984 struct addr_location
*al __maybe_unused
)
986 struct hist_entry
**he_cache
;
988 callchain_cursor_commit(&callchain_cursor
);
991 * This is for detecting cycles or recursions so that they're
992 * cumulated only one time to prevent entries more than 100%
995 he_cache
= malloc(sizeof(*he_cache
) * (callchain_cursor
.nr
+ 1));
996 if (he_cache
== NULL
)
999 iter
->priv
= he_cache
;
1006 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
1007 struct addr_location
*al
)
1009 struct evsel
*evsel
= iter
->evsel
;
1010 struct hists
*hists
= evsel__hists(evsel
);
1011 struct perf_sample
*sample
= iter
->sample
;
1012 struct hist_entry
**he_cache
= iter
->priv
;
1013 struct hist_entry
*he
;
1016 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
1022 he_cache
[iter
->curr
++] = he
;
1024 hist_entry__append_callchain(he
, sample
);
1027 * We need to re-initialize the cursor since callchain_append()
1028 * advanced the cursor to the end.
1030 callchain_cursor_commit(&callchain_cursor
);
1032 hists__inc_nr_samples(hists
, he
->filtered
);
1038 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
1039 struct addr_location
*al
)
1041 struct callchain_cursor_node
*node
;
1043 node
= callchain_cursor_current(&callchain_cursor
);
1047 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
1051 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
1052 struct addr_location
*al
)
1054 struct evsel
*evsel
= iter
->evsel
;
1055 struct perf_sample
*sample
= iter
->sample
;
1056 struct hist_entry
**he_cache
= iter
->priv
;
1057 struct hist_entry
*he
;
1058 struct hist_entry he_tmp
= {
1059 .hists
= evsel__hists(evsel
),
1061 .thread
= al
->thread
,
1062 .comm
= thread__comm(al
->thread
),
1068 .srcline
= (char *) al
->srcline
,
1069 .parent
= iter
->parent
,
1070 .raw_data
= sample
->raw_data
,
1071 .raw_size
= sample
->raw_size
,
1074 struct callchain_cursor cursor
;
1076 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
1078 callchain_cursor_advance(&callchain_cursor
);
1081 * Check if there's duplicate entries in the callchain.
1082 * It's possible that it has cycles or recursive calls.
1084 for (i
= 0; i
< iter
->curr
; i
++) {
1085 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
1086 /* to avoid calling callback function */
1092 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
1098 he_cache
[iter
->curr
++] = he
;
1100 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
1101 callchain_append(he
->callchain
, &cursor
, sample
->period
);
1106 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
1107 struct addr_location
*al __maybe_unused
)
1115 const struct hist_iter_ops hist_iter_mem
= {
1116 .prepare_entry
= iter_prepare_mem_entry
,
1117 .add_single_entry
= iter_add_single_mem_entry
,
1118 .next_entry
= iter_next_nop_entry
,
1119 .add_next_entry
= iter_add_next_nop_entry
,
1120 .finish_entry
= iter_finish_mem_entry
,
1123 const struct hist_iter_ops hist_iter_branch
= {
1124 .prepare_entry
= iter_prepare_branch_entry
,
1125 .add_single_entry
= iter_add_single_branch_entry
,
1126 .next_entry
= iter_next_branch_entry
,
1127 .add_next_entry
= iter_add_next_branch_entry
,
1128 .finish_entry
= iter_finish_branch_entry
,
1131 const struct hist_iter_ops hist_iter_normal
= {
1132 .prepare_entry
= iter_prepare_normal_entry
,
1133 .add_single_entry
= iter_add_single_normal_entry
,
1134 .next_entry
= iter_next_nop_entry
,
1135 .add_next_entry
= iter_add_next_nop_entry
,
1136 .finish_entry
= iter_finish_normal_entry
,
1139 const struct hist_iter_ops hist_iter_cumulative
= {
1140 .prepare_entry
= iter_prepare_cumulative_entry
,
1141 .add_single_entry
= iter_add_single_cumulative_entry
,
1142 .next_entry
= iter_next_cumulative_entry
,
1143 .add_next_entry
= iter_add_next_cumulative_entry
,
1144 .finish_entry
= iter_finish_cumulative_entry
,
1147 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1148 int max_stack_depth
, void *arg
)
1151 struct map
*alm
= NULL
;
1154 alm
= map__get(al
->map
);
1156 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1157 iter
->evsel
, al
, max_stack_depth
);
1163 err
= iter
->ops
->prepare_entry(iter
, al
);
1167 err
= iter
->ops
->add_single_entry(iter
, al
);
1171 if (iter
->he
&& iter
->add_entry_cb
) {
1172 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1177 while (iter
->ops
->next_entry(iter
, al
)) {
1178 err
= iter
->ops
->add_next_entry(iter
, al
);
1182 if (iter
->he
&& iter
->add_entry_cb
) {
1183 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1190 err2
= iter
->ops
->finish_entry(iter
, al
);
1200 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1202 struct hists
*hists
= left
->hists
;
1203 struct perf_hpp_fmt
*fmt
;
1206 hists__for_each_sort_list(hists
, fmt
) {
1207 if (perf_hpp__is_dynamic_entry(fmt
) &&
1208 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1211 cmp
= fmt
->cmp(fmt
, left
, right
);
1220 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1222 struct hists
*hists
= left
->hists
;
1223 struct perf_hpp_fmt
*fmt
;
1226 hists__for_each_sort_list(hists
, fmt
) {
1227 if (perf_hpp__is_dynamic_entry(fmt
) &&
1228 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1231 cmp
= fmt
->collapse(fmt
, left
, right
);
1239 void hist_entry__delete(struct hist_entry
*he
)
1241 struct hist_entry_ops
*ops
= he
->ops
;
1243 thread__zput(he
->thread
);
1244 map__zput(he
->ms
.map
);
1246 if (he
->branch_info
) {
1247 map__zput(he
->branch_info
->from
.map
);
1248 map__zput(he
->branch_info
->to
.map
);
1249 free_srcline(he
->branch_info
->srcline_from
);
1250 free_srcline(he
->branch_info
->srcline_to
);
1251 zfree(&he
->branch_info
);
1255 map__zput(he
->mem_info
->iaddr
.map
);
1256 map__zput(he
->mem_info
->daddr
.map
);
1257 mem_info__zput(he
->mem_info
);
1261 block_info__zput(he
->block_info
);
1263 zfree(&he
->res_samples
);
1264 zfree(&he
->stat_acc
);
1265 free_srcline(he
->srcline
);
1266 if (he
->srcfile
&& he
->srcfile
[0])
1267 zfree(&he
->srcfile
);
1268 free_callchain(he
->callchain
);
1269 zfree(&he
->trace_output
);
1270 zfree(&he
->raw_data
);
1275 * If this is not the last column, then we need to pad it according to the
1276 * pre-calculated max length for this column, otherwise don't bother adding
1277 * spaces because that would break viewing this with, for instance, 'less',
1278 * that would show tons of trailing spaces when a long C++ demangled method
1281 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1282 struct perf_hpp_fmt
*fmt
, int printed
)
1284 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1285 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1286 if (printed
< width
) {
1287 advance_hpp(hpp
, printed
);
1288 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1296 * collapse the histogram
1299 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1300 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1301 enum hist_filter type
);
1303 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1305 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1307 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1310 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1311 enum hist_filter type
,
1314 struct perf_hpp_fmt
*fmt
;
1315 bool type_match
= false;
1316 struct hist_entry
*parent
= he
->parent_he
;
1319 case HIST_FILTER__THREAD
:
1320 if (symbol_conf
.comm_list
== NULL
&&
1321 symbol_conf
.pid_list
== NULL
&&
1322 symbol_conf
.tid_list
== NULL
)
1325 case HIST_FILTER__DSO
:
1326 if (symbol_conf
.dso_list
== NULL
)
1329 case HIST_FILTER__SYMBOL
:
1330 if (symbol_conf
.sym_list
== NULL
)
1333 case HIST_FILTER__PARENT
:
1334 case HIST_FILTER__GUEST
:
1335 case HIST_FILTER__HOST
:
1336 case HIST_FILTER__SOCKET
:
1337 case HIST_FILTER__C2C
:
1342 /* if it's filtered by own fmt, it has to have filter bits */
1343 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1352 * If the filter is for current level entry, propagate
1353 * filter marker to parents. The marker bit was
1354 * already set by default so it only needs to clear
1355 * non-filtered entries.
1357 if (!(he
->filtered
& (1 << type
))) {
1359 parent
->filtered
&= ~(1 << type
);
1360 parent
= parent
->parent_he
;
1365 * If current entry doesn't have matching formats, set
1366 * filter marker for upper level entries. it will be
1367 * cleared if its lower level entries is not filtered.
1369 * For lower-level entries, it inherits parent's
1370 * filter bit so that lower level entries of a
1371 * non-filtered entry won't set the filter marker.
1374 he
->filtered
|= (1 << type
);
1376 he
->filtered
|= (parent
->filtered
& (1 << type
));
1380 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1382 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1383 check_thread_entry
);
1385 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1386 perf_hpp__is_dso_entry
);
1388 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1389 perf_hpp__is_sym_entry
);
1391 hists__apply_filters(he
->hists
, he
);
1394 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1395 struct rb_root_cached
*root
,
1396 struct hist_entry
*he
,
1397 struct hist_entry
*parent_he
,
1398 struct perf_hpp_list
*hpp_list
)
1400 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1401 struct rb_node
*parent
= NULL
;
1402 struct hist_entry
*iter
, *new;
1403 struct perf_hpp_fmt
*fmt
;
1405 bool leftmost
= true;
1407 while (*p
!= NULL
) {
1409 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1412 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1413 cmp
= fmt
->collapse(fmt
, iter
, he
);
1419 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1424 p
= &parent
->rb_left
;
1426 p
= &parent
->rb_right
;
1431 new = hist_entry__new(he
, true);
1435 hists
->nr_entries
++;
1437 /* save related format list for output */
1438 new->hpp_list
= hpp_list
;
1439 new->parent_he
= parent_he
;
1441 hist_entry__apply_hierarchy_filters(new);
1443 /* some fields are now passed to 'new' */
1444 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1445 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1446 he
->trace_output
= NULL
;
1448 new->trace_output
= NULL
;
1450 if (perf_hpp__is_srcline_entry(fmt
))
1453 new->srcline
= NULL
;
1455 if (perf_hpp__is_srcfile_entry(fmt
))
1458 new->srcfile
= NULL
;
1461 rb_link_node(&new->rb_node_in
, parent
, p
);
1462 rb_insert_color_cached(&new->rb_node_in
, root
, leftmost
);
1466 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1467 struct rb_root_cached
*root
,
1468 struct hist_entry
*he
)
1470 struct perf_hpp_list_node
*node
;
1471 struct hist_entry
*new_he
= NULL
;
1472 struct hist_entry
*parent
= NULL
;
1476 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1477 /* skip period (overhead) and elided columns */
1478 if (node
->level
== 0 || node
->skip
)
1481 /* insert copy of 'he' for each fmt into the hierarchy */
1482 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1483 if (new_he
== NULL
) {
1488 root
= &new_he
->hroot_in
;
1489 new_he
->depth
= depth
++;
1494 new_he
->leaf
= true;
1496 if (hist_entry__has_callchains(new_he
) &&
1497 symbol_conf
.use_callchain
) {
1498 callchain_cursor_reset(&callchain_cursor
);
1499 if (callchain_merge(&callchain_cursor
,
1506 /* 'he' is no longer used */
1507 hist_entry__delete(he
);
1509 /* return 0 (or -1) since it already applied filters */
1513 static int hists__collapse_insert_entry(struct hists
*hists
,
1514 struct rb_root_cached
*root
,
1515 struct hist_entry
*he
)
1517 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1518 struct rb_node
*parent
= NULL
;
1519 struct hist_entry
*iter
;
1521 bool leftmost
= true;
1523 if (symbol_conf
.report_hierarchy
)
1524 return hists__hierarchy_insert_entry(hists
, root
, he
);
1526 while (*p
!= NULL
) {
1528 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1530 cmp
= hist_entry__collapse(iter
, he
);
1535 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1536 if (symbol_conf
.cumulate_callchain
)
1537 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1539 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
) {
1540 callchain_cursor_reset(&callchain_cursor
);
1541 if (callchain_merge(&callchain_cursor
,
1546 hist_entry__delete(he
);
1553 p
= &(*p
)->rb_right
;
1557 hists
->nr_entries
++;
1559 rb_link_node(&he
->rb_node_in
, parent
, p
);
1560 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
1564 struct rb_root_cached
*hists__get_rotate_entries_in(struct hists
*hists
)
1566 struct rb_root_cached
*root
;
1568 pthread_mutex_lock(&hists
->lock
);
1570 root
= hists
->entries_in
;
1571 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1572 hists
->entries_in
= &hists
->entries_in_array
[0];
1574 pthread_mutex_unlock(&hists
->lock
);
1579 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1581 hists__filter_entry_by_dso(hists
, he
);
1582 hists__filter_entry_by_thread(hists
, he
);
1583 hists__filter_entry_by_symbol(hists
, he
);
1584 hists__filter_entry_by_socket(hists
, he
);
1587 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1589 struct rb_root_cached
*root
;
1590 struct rb_node
*next
;
1591 struct hist_entry
*n
;
1594 if (!hists__has(hists
, need_collapse
))
1597 hists
->nr_entries
= 0;
1599 root
= hists__get_rotate_entries_in(hists
);
1601 next
= rb_first_cached(root
);
1606 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1607 next
= rb_next(&n
->rb_node_in
);
1609 rb_erase_cached(&n
->rb_node_in
, root
);
1610 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1616 * If it wasn't combined with one of the entries already
1617 * collapsed, we need to apply the filters that may have
1618 * been set by, say, the hist_browser.
1620 hists__apply_filters(hists
, n
);
1623 ui_progress__update(prog
, 1);
1628 static int64_t hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1630 struct hists
*hists
= a
->hists
;
1631 struct perf_hpp_fmt
*fmt
;
1634 hists__for_each_sort_list(hists
, fmt
) {
1635 if (perf_hpp__should_skip(fmt
, a
->hists
))
1638 cmp
= fmt
->sort(fmt
, a
, b
);
1646 static void hists__reset_filter_stats(struct hists
*hists
)
1648 hists
->nr_non_filtered_entries
= 0;
1649 hists
->stats
.total_non_filtered_period
= 0;
1652 void hists__reset_stats(struct hists
*hists
)
1654 hists
->nr_entries
= 0;
1655 hists
->stats
.total_period
= 0;
1657 hists__reset_filter_stats(hists
);
1660 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1662 hists
->nr_non_filtered_entries
++;
1663 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1666 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1669 hists__inc_filter_stats(hists
, h
);
1671 hists
->nr_entries
++;
1672 hists
->stats
.total_period
+= h
->stat
.period
;
1675 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1677 struct rb_node
*node
;
1678 struct hist_entry
*he
;
1680 node
= rb_first_cached(&hists
->entries
);
1682 hists
->stats
.total_period
= 0;
1683 hists
->stats
.total_non_filtered_period
= 0;
1686 * recalculate total period using top-level entries only
1687 * since lower level entries only see non-filtered entries
1688 * but upper level entries have sum of both entries.
1691 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1692 node
= rb_next(node
);
1694 hists
->stats
.total_period
+= he
->stat
.period
;
1696 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1700 static void hierarchy_insert_output_entry(struct rb_root_cached
*root
,
1701 struct hist_entry
*he
)
1703 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1704 struct rb_node
*parent
= NULL
;
1705 struct hist_entry
*iter
;
1706 struct perf_hpp_fmt
*fmt
;
1707 bool leftmost
= true;
1709 while (*p
!= NULL
) {
1711 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1713 if (hist_entry__sort(he
, iter
) > 0)
1714 p
= &parent
->rb_left
;
1716 p
= &parent
->rb_right
;
1721 rb_link_node(&he
->rb_node
, parent
, p
);
1722 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
1724 /* update column width of dynamic entry */
1725 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1726 if (perf_hpp__is_dynamic_entry(fmt
))
1727 fmt
->sort(fmt
, he
, NULL
);
1731 static void hists__hierarchy_output_resort(struct hists
*hists
,
1732 struct ui_progress
*prog
,
1733 struct rb_root_cached
*root_in
,
1734 struct rb_root_cached
*root_out
,
1735 u64 min_callchain_hits
,
1738 struct rb_node
*node
;
1739 struct hist_entry
*he
;
1741 *root_out
= RB_ROOT_CACHED
;
1742 node
= rb_first_cached(root_in
);
1745 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1746 node
= rb_next(node
);
1748 hierarchy_insert_output_entry(root_out
, he
);
1751 ui_progress__update(prog
, 1);
1753 hists
->nr_entries
++;
1754 if (!he
->filtered
) {
1755 hists
->nr_non_filtered_entries
++;
1756 hists__calc_col_len(hists
, he
);
1760 hists__hierarchy_output_resort(hists
, prog
,
1771 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1772 u64 total
= he
->stat
.period
;
1774 if (symbol_conf
.cumulate_callchain
)
1775 total
= he
->stat_acc
->period
;
1777 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1780 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1781 min_callchain_hits
, &callchain_param
);
1785 static void __hists__insert_output_entry(struct rb_root_cached
*entries
,
1786 struct hist_entry
*he
,
1787 u64 min_callchain_hits
,
1790 struct rb_node
**p
= &entries
->rb_root
.rb_node
;
1791 struct rb_node
*parent
= NULL
;
1792 struct hist_entry
*iter
;
1793 struct perf_hpp_fmt
*fmt
;
1794 bool leftmost
= true;
1796 if (use_callchain
) {
1797 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1798 u64 total
= he
->stat
.period
;
1800 if (symbol_conf
.cumulate_callchain
)
1801 total
= he
->stat_acc
->period
;
1803 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1805 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1806 min_callchain_hits
, &callchain_param
);
1809 while (*p
!= NULL
) {
1811 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1813 if (hist_entry__sort(he
, iter
) > 0)
1816 p
= &(*p
)->rb_right
;
1821 rb_link_node(&he
->rb_node
, parent
, p
);
1822 rb_insert_color_cached(&he
->rb_node
, entries
, leftmost
);
1824 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1825 if (perf_hpp__is_dynamic_entry(fmt
) &&
1826 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1827 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1831 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1832 bool use_callchain
, hists__resort_cb_t cb
,
1835 struct rb_root_cached
*root
;
1836 struct rb_node
*next
;
1837 struct hist_entry
*n
;
1838 u64 callchain_total
;
1839 u64 min_callchain_hits
;
1841 callchain_total
= hists
->callchain_period
;
1842 if (symbol_conf
.filter_relative
)
1843 callchain_total
= hists
->callchain_non_filtered_period
;
1845 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1847 hists__reset_stats(hists
);
1848 hists__reset_col_len(hists
);
1850 if (symbol_conf
.report_hierarchy
) {
1851 hists__hierarchy_output_resort(hists
, prog
,
1852 &hists
->entries_collapsed
,
1856 hierarchy_recalc_total_periods(hists
);
1860 if (hists__has(hists
, need_collapse
))
1861 root
= &hists
->entries_collapsed
;
1863 root
= hists
->entries_in
;
1865 next
= rb_first_cached(root
);
1866 hists
->entries
= RB_ROOT_CACHED
;
1869 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1870 next
= rb_next(&n
->rb_node_in
);
1872 if (cb
&& cb(n
, cb_arg
))
1875 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1876 hists__inc_stats(hists
, n
);
1879 hists__calc_col_len(hists
, n
);
1882 ui_progress__update(prog
, 1);
1886 void perf_evsel__output_resort_cb(struct evsel
*evsel
, struct ui_progress
*prog
,
1887 hists__resort_cb_t cb
, void *cb_arg
)
1891 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1892 use_callchain
= evsel__has_callchain(evsel
);
1894 use_callchain
= symbol_conf
.use_callchain
;
1896 use_callchain
|= symbol_conf
.show_branchflag_count
;
1898 output_resort(evsel__hists(evsel
), prog
, use_callchain
, cb
, cb_arg
);
1901 void perf_evsel__output_resort(struct evsel
*evsel
, struct ui_progress
*prog
)
1903 return perf_evsel__output_resort_cb(evsel
, prog
, NULL
, NULL
);
1906 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1908 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
, NULL
);
1911 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1912 hists__resort_cb_t cb
)
1914 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
, NULL
);
1917 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1919 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1922 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1928 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1930 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1932 while (can_goto_child(he
, HMD_NORMAL
)) {
1933 node
= rb_last(&he
->hroot_out
.rb_root
);
1934 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1939 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1941 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1943 if (can_goto_child(he
, hmd
))
1944 node
= rb_first_cached(&he
->hroot_out
);
1946 node
= rb_next(node
);
1948 while (node
== NULL
) {
1953 node
= rb_next(&he
->rb_node
);
1958 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1960 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1962 node
= rb_prev(node
);
1964 return rb_hierarchy_last(node
);
1970 return &he
->rb_node
;
1973 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1975 struct rb_node
*node
;
1976 struct hist_entry
*child
;
1982 node
= rb_first_cached(&he
->hroot_out
);
1983 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1985 while (node
&& child
->filtered
) {
1986 node
= rb_next(node
);
1987 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1991 percent
= hist_entry__get_percent_limit(child
);
1995 return node
&& percent
>= limit
;
1998 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1999 enum hist_filter filter
)
2001 h
->filtered
&= ~(1 << filter
);
2003 if (symbol_conf
.report_hierarchy
) {
2004 struct hist_entry
*parent
= h
->parent_he
;
2007 he_stat__add_stat(&parent
->stat
, &h
->stat
);
2009 parent
->filtered
&= ~(1 << filter
);
2011 if (parent
->filtered
)
2014 /* force fold unfiltered entry for simplicity */
2015 parent
->unfolded
= false;
2016 parent
->has_no_entry
= false;
2017 parent
->row_offset
= 0;
2018 parent
->nr_rows
= 0;
2020 parent
= parent
->parent_he
;
2027 /* force fold unfiltered entry for simplicity */
2028 h
->unfolded
= false;
2029 h
->has_no_entry
= false;
2033 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
2035 hists__inc_filter_stats(hists
, h
);
2036 hists__calc_col_len(hists
, h
);
2040 static bool hists__filter_entry_by_dso(struct hists
*hists
,
2041 struct hist_entry
*he
)
2043 if (hists
->dso_filter
!= NULL
&&
2044 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
2045 he
->filtered
|= (1 << HIST_FILTER__DSO
);
2052 static bool hists__filter_entry_by_thread(struct hists
*hists
,
2053 struct hist_entry
*he
)
2055 if (hists
->thread_filter
!= NULL
&&
2056 he
->thread
!= hists
->thread_filter
) {
2057 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
2064 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
2065 struct hist_entry
*he
)
2067 if (hists
->symbol_filter_str
!= NULL
&&
2068 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
2069 hists
->symbol_filter_str
) == NULL
)) {
2070 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
2077 static bool hists__filter_entry_by_socket(struct hists
*hists
,
2078 struct hist_entry
*he
)
2080 if ((hists
->socket_filter
> -1) &&
2081 (he
->socket
!= hists
->socket_filter
)) {
2082 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
2089 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
2091 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
2095 hists
->stats
.nr_non_filtered_samples
= 0;
2097 hists__reset_filter_stats(hists
);
2098 hists__reset_col_len(hists
);
2100 for (nd
= rb_first_cached(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
2101 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2103 if (filter(hists
, h
))
2106 hists__remove_entry_filter(hists
, h
, type
);
2110 static void resort_filtered_entry(struct rb_root_cached
*root
,
2111 struct hist_entry
*he
)
2113 struct rb_node
**p
= &root
->rb_root
.rb_node
;
2114 struct rb_node
*parent
= NULL
;
2115 struct hist_entry
*iter
;
2116 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2118 bool leftmost
= true;
2120 while (*p
!= NULL
) {
2122 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
2124 if (hist_entry__sort(he
, iter
) > 0)
2127 p
= &(*p
)->rb_right
;
2132 rb_link_node(&he
->rb_node
, parent
, p
);
2133 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
2135 if (he
->leaf
|| he
->filtered
)
2138 nd
= rb_first_cached(&he
->hroot_out
);
2140 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2143 rb_erase_cached(&h
->rb_node
, &he
->hroot_out
);
2145 resort_filtered_entry(&new_root
, h
);
2148 he
->hroot_out
= new_root
;
2151 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
2154 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2156 hists
->stats
.nr_non_filtered_samples
= 0;
2158 hists__reset_filter_stats(hists
);
2159 hists__reset_col_len(hists
);
2161 nd
= rb_first_cached(&hists
->entries
);
2163 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2166 ret
= hist_entry__filter(h
, type
, arg
);
2169 * case 1. non-matching type
2170 * zero out the period, set filter marker and move to child
2173 memset(&h
->stat
, 0, sizeof(h
->stat
));
2174 h
->filtered
|= (1 << type
);
2176 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2179 * case 2. matched type (filter out)
2180 * set filter marker and move to next
2182 else if (ret
== 1) {
2183 h
->filtered
|= (1 << type
);
2185 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2188 * case 3. ok (not filtered)
2189 * add period to hists and parents, erase the filter marker
2190 * and move to next sibling
2193 hists__remove_entry_filter(hists
, h
, type
);
2195 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2199 hierarchy_recalc_total_periods(hists
);
2202 * resort output after applying a new filter since filter in a lower
2203 * hierarchy can change periods in a upper hierarchy.
2205 nd
= rb_first_cached(&hists
->entries
);
2207 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2210 rb_erase_cached(&h
->rb_node
, &hists
->entries
);
2212 resort_filtered_entry(&new_root
, h
);
2215 hists
->entries
= new_root
;
2218 void hists__filter_by_thread(struct hists
*hists
)
2220 if (symbol_conf
.report_hierarchy
)
2221 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2222 hists
->thread_filter
);
2224 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2225 hists__filter_entry_by_thread
);
2228 void hists__filter_by_dso(struct hists
*hists
)
2230 if (symbol_conf
.report_hierarchy
)
2231 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2234 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2235 hists__filter_entry_by_dso
);
2238 void hists__filter_by_symbol(struct hists
*hists
)
2240 if (symbol_conf
.report_hierarchy
)
2241 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2242 hists
->symbol_filter_str
);
2244 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2245 hists__filter_entry_by_symbol
);
2248 void hists__filter_by_socket(struct hists
*hists
)
2250 if (symbol_conf
.report_hierarchy
)
2251 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2252 &hists
->socket_filter
);
2254 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2255 hists__filter_entry_by_socket
);
2258 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2260 ++stats
->nr_events
[0];
2261 ++stats
->nr_events
[type
];
2264 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2266 events_stats__inc(&hists
->stats
, type
);
2269 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2271 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2273 hists
->stats
.nr_non_filtered_samples
++;
2276 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2277 struct hist_entry
*pair
)
2279 struct rb_root_cached
*root
;
2281 struct rb_node
*parent
= NULL
;
2282 struct hist_entry
*he
;
2284 bool leftmost
= true;
2286 if (hists__has(hists
, need_collapse
))
2287 root
= &hists
->entries_collapsed
;
2289 root
= hists
->entries_in
;
2291 p
= &root
->rb_root
.rb_node
;
2293 while (*p
!= NULL
) {
2295 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2297 cmp
= hist_entry__collapse(he
, pair
);
2305 p
= &(*p
)->rb_right
;
2310 he
= hist_entry__new(pair
, true);
2312 memset(&he
->stat
, 0, sizeof(he
->stat
));
2314 if (symbol_conf
.cumulate_callchain
)
2315 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2316 rb_link_node(&he
->rb_node_in
, parent
, p
);
2317 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2318 hists__inc_stats(hists
, he
);
2325 static struct hist_entry
*add_dummy_hierarchy_entry(struct hists
*hists
,
2326 struct rb_root_cached
*root
,
2327 struct hist_entry
*pair
)
2330 struct rb_node
*parent
= NULL
;
2331 struct hist_entry
*he
;
2332 struct perf_hpp_fmt
*fmt
;
2333 bool leftmost
= true;
2335 p
= &root
->rb_root
.rb_node
;
2336 while (*p
!= NULL
) {
2340 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2342 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2343 cmp
= fmt
->collapse(fmt
, he
, pair
);
2351 p
= &parent
->rb_left
;
2353 p
= &parent
->rb_right
;
2358 he
= hist_entry__new(pair
, true);
2360 rb_link_node(&he
->rb_node_in
, parent
, p
);
2361 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2365 memset(&he
->stat
, 0, sizeof(he
->stat
));
2366 hists__inc_stats(hists
, he
);
2372 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2373 struct hist_entry
*he
)
2377 if (hists__has(hists
, need_collapse
))
2378 n
= hists
->entries_collapsed
.rb_root
.rb_node
;
2380 n
= hists
->entries_in
->rb_root
.rb_node
;
2383 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2384 int64_t cmp
= hist_entry__collapse(iter
, he
);
2397 static struct hist_entry
*hists__find_hierarchy_entry(struct rb_root_cached
*root
,
2398 struct hist_entry
*he
)
2400 struct rb_node
*n
= root
->rb_root
.rb_node
;
2403 struct hist_entry
*iter
;
2404 struct perf_hpp_fmt
*fmt
;
2407 iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2408 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2409 cmp
= fmt
->collapse(fmt
, iter
, he
);
2425 static void hists__match_hierarchy(struct rb_root_cached
*leader_root
,
2426 struct rb_root_cached
*other_root
)
2429 struct hist_entry
*pos
, *pair
;
2431 for (nd
= rb_first_cached(leader_root
); nd
; nd
= rb_next(nd
)) {
2432 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2433 pair
= hists__find_hierarchy_entry(other_root
, pos
);
2436 hist_entry__add_pair(pair
, pos
);
2437 hists__match_hierarchy(&pos
->hroot_in
, &pair
->hroot_in
);
2443 * Look for pairs to link to the leader buckets (hist_entries):
2445 void hists__match(struct hists
*leader
, struct hists
*other
)
2447 struct rb_root_cached
*root
;
2449 struct hist_entry
*pos
, *pair
;
2451 if (symbol_conf
.report_hierarchy
) {
2452 /* hierarchy report always collapses entries */
2453 return hists__match_hierarchy(&leader
->entries_collapsed
,
2454 &other
->entries_collapsed
);
2457 if (hists__has(leader
, need_collapse
))
2458 root
= &leader
->entries_collapsed
;
2460 root
= leader
->entries_in
;
2462 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2463 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2464 pair
= hists__find_entry(other
, pos
);
2467 hist_entry__add_pair(pair
, pos
);
2471 static int hists__link_hierarchy(struct hists
*leader_hists
,
2472 struct hist_entry
*parent
,
2473 struct rb_root_cached
*leader_root
,
2474 struct rb_root_cached
*other_root
)
2477 struct hist_entry
*pos
, *leader
;
2479 for (nd
= rb_first_cached(other_root
); nd
; nd
= rb_next(nd
)) {
2480 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2482 if (hist_entry__has_pairs(pos
)) {
2485 list_for_each_entry(leader
, &pos
->pairs
.head
, pairs
.node
) {
2486 if (leader
->hists
== leader_hists
) {
2494 leader
= add_dummy_hierarchy_entry(leader_hists
,
2499 /* do not point parent in the pos */
2500 leader
->parent_he
= parent
;
2502 hist_entry__add_pair(pos
, leader
);
2506 if (hists__link_hierarchy(leader_hists
, leader
,
2508 &pos
->hroot_in
) < 0)
2516 * Look for entries in the other hists that are not present in the leader, if
2517 * we find them, just add a dummy entry on the leader hists, with period=0,
2518 * nr_events=0, to serve as the list header.
2520 int hists__link(struct hists
*leader
, struct hists
*other
)
2522 struct rb_root_cached
*root
;
2524 struct hist_entry
*pos
, *pair
;
2526 if (symbol_conf
.report_hierarchy
) {
2527 /* hierarchy report always collapses entries */
2528 return hists__link_hierarchy(leader
, NULL
,
2529 &leader
->entries_collapsed
,
2530 &other
->entries_collapsed
);
2533 if (hists__has(other
, need_collapse
))
2534 root
= &other
->entries_collapsed
;
2536 root
= other
->entries_in
;
2538 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2539 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2541 if (!hist_entry__has_pairs(pos
)) {
2542 pair
= hists__add_dummy_entry(leader
, pos
);
2545 hist_entry__add_pair(pos
, pair
);
2552 int hists__unlink(struct hists
*hists
)
2554 struct rb_root_cached
*root
;
2556 struct hist_entry
*pos
;
2558 if (hists__has(hists
, need_collapse
))
2559 root
= &hists
->entries_collapsed
;
2561 root
= hists
->entries_in
;
2563 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2564 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2565 list_del_init(&pos
->pairs
.node
);
2571 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2572 struct perf_sample
*sample
, bool nonany_branch_mode
)
2574 struct branch_info
*bi
;
2576 /* If we have branch cycles always annotate them. */
2577 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2580 bi
= sample__resolve_bstack(sample
, al
);
2582 struct addr_map_symbol
*prev
= NULL
;
2585 * Ignore errors, still want to process the
2588 * For non standard branch modes always
2589 * force no IPC (prev == NULL)
2591 * Note that perf stores branches reversed from
2594 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2595 addr_map_symbol__account_cycles(&bi
[i
].from
,
2596 nonany_branch_mode
? NULL
: prev
,
2597 bi
[i
].flags
.cycles
);
2605 size_t perf_evlist__fprintf_nr_events(struct evlist
*evlist
, FILE *fp
)
2610 evlist__for_each_entry(evlist
, pos
) {
2611 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2612 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2619 u64
hists__total_period(struct hists
*hists
)
2621 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2622 hists
->stats
.total_period
;
2625 int __hists__scnprintf_title(struct hists
*hists
, char *bf
, size_t size
, bool show_freq
)
2629 const struct dso
*dso
= hists
->dso_filter
;
2630 struct thread
*thread
= hists
->thread_filter
;
2631 int socket_id
= hists
->socket_filter
;
2632 unsigned long nr_samples
= hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2633 u64 nr_events
= hists
->stats
.total_period
;
2634 struct evsel
*evsel
= hists_to_evsel(hists
);
2635 const char *ev_name
= perf_evsel__name(evsel
);
2636 char buf
[512], sample_freq_str
[64] = "";
2637 size_t buflen
= sizeof(buf
);
2638 char ref
[30] = " show reference callgraph, ";
2639 bool enable_ref
= false;
2641 if (symbol_conf
.filter_relative
) {
2642 nr_samples
= hists
->stats
.nr_non_filtered_samples
;
2643 nr_events
= hists
->stats
.total_non_filtered_period
;
2646 if (perf_evsel__is_group_event(evsel
)) {
2649 perf_evsel__group_desc(evsel
, buf
, buflen
);
2652 for_each_group_member(pos
, evsel
) {
2653 struct hists
*pos_hists
= evsel__hists(pos
);
2655 if (symbol_conf
.filter_relative
) {
2656 nr_samples
+= pos_hists
->stats
.nr_non_filtered_samples
;
2657 nr_events
+= pos_hists
->stats
.total_non_filtered_period
;
2659 nr_samples
+= pos_hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2660 nr_events
+= pos_hists
->stats
.total_period
;
2665 if (symbol_conf
.show_ref_callgraph
&&
2666 strstr(ev_name
, "call-graph=no"))
2670 scnprintf(sample_freq_str
, sizeof(sample_freq_str
), " %d Hz,", evsel
->core
.attr
.sample_freq
);
2672 nr_samples
= convert_unit(nr_samples
, &unit
);
2673 printed
= scnprintf(bf
, size
,
2674 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64
,
2675 nr_samples
, unit
, evsel
->core
.nr_members
> 1 ? "s" : "",
2676 ev_name
, sample_freq_str
, enable_ref
? ref
: " ", nr_events
);
2679 if (hists
->uid_filter_str
)
2680 printed
+= snprintf(bf
+ printed
, size
- printed
,
2681 ", UID: %s", hists
->uid_filter_str
);
2683 if (hists__has(hists
, thread
)) {
2684 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2686 (thread
->comm_set
? thread__comm_str(thread
) : ""),
2689 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2691 (thread
->comm_set
? thread__comm_str(thread
) : ""));
2695 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2696 ", DSO: %s", dso
->short_name
);
2698 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2699 ", Processor Socket: %d", socket_id
);
2704 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2705 const char *arg
, int unset __maybe_unused
)
2707 if (!strcmp(arg
, "relative"))
2708 symbol_conf
.filter_relative
= true;
2709 else if (!strcmp(arg
, "absolute"))
2710 symbol_conf
.filter_relative
= false;
2712 pr_debug("Invalid percentage: %s\n", arg
);
2719 int perf_hist_config(const char *var
, const char *value
)
2721 if (!strcmp(var
, "hist.percentage"))
2722 return parse_filter_percentage(NULL
, value
, 0);
2727 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2729 memset(hists
, 0, sizeof(*hists
));
2730 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT_CACHED
;
2731 hists
->entries_in
= &hists
->entries_in_array
[0];
2732 hists
->entries_collapsed
= RB_ROOT_CACHED
;
2733 hists
->entries
= RB_ROOT_CACHED
;
2734 pthread_mutex_init(&hists
->lock
, NULL
);
2735 hists
->socket_filter
= -1;
2736 hists
->hpp_list
= hpp_list
;
2737 INIT_LIST_HEAD(&hists
->hpp_formats
);
2741 static void hists__delete_remaining_entries(struct rb_root_cached
*root
)
2743 struct rb_node
*node
;
2744 struct hist_entry
*he
;
2746 while (!RB_EMPTY_ROOT(&root
->rb_root
)) {
2747 node
= rb_first_cached(root
);
2748 rb_erase_cached(node
, root
);
2750 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2751 hist_entry__delete(he
);
2755 static void hists__delete_all_entries(struct hists
*hists
)
2757 hists__delete_entries(hists
);
2758 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2759 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2760 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2763 static void hists_evsel__exit(struct evsel
*evsel
)
2765 struct hists
*hists
= evsel__hists(evsel
);
2766 struct perf_hpp_fmt
*fmt
, *pos
;
2767 struct perf_hpp_list_node
*node
, *tmp
;
2769 hists__delete_all_entries(hists
);
2771 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2772 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2773 list_del_init(&fmt
->list
);
2776 list_del_init(&node
->list
);
2781 static int hists_evsel__init(struct evsel
*evsel
)
2783 struct hists
*hists
= evsel__hists(evsel
);
2785 __hists__init(hists
, &perf_hpp_list
);
2790 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2791 * stored in the rbtree...
2794 int hists__init(void)
2796 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2800 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2805 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2807 INIT_LIST_HEAD(&list
->fields
);
2808 INIT_LIST_HEAD(&list
->sorts
);