1 // SPDX-License-Identifier: GPL-2.0
8 #include "map_symbol.h"
10 #include "mem-events.h"
12 #include "namespaces.h"
21 #include "block-info.h"
22 #include "ui/progress.h"
26 #include <sys/param.h>
27 #include <linux/rbtree.h>
28 #include <linux/string.h>
29 #include <linux/time64.h>
30 #include <linux/zalloc.h>
32 static bool hists__filter_entry_by_dso(struct hists
*hists
,
33 struct hist_entry
*he
);
34 static bool hists__filter_entry_by_thread(struct hists
*hists
,
35 struct hist_entry
*he
);
36 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
37 struct hist_entry
*he
);
38 static bool hists__filter_entry_by_socket(struct hists
*hists
,
39 struct hist_entry
*he
);
41 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
43 return hists
->col_len
[col
];
46 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
48 hists
->col_len
[col
] = len
;
51 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
53 if (len
> hists__col_len(hists
, col
)) {
54 hists__set_col_len(hists
, col
, len
);
60 void hists__reset_col_len(struct hists
*hists
)
64 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
65 hists__set_col_len(hists
, col
, 0);
68 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
70 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
72 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
73 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
74 !symbol_conf
.dso_list
)
75 hists__set_col_len(hists
, dso
, unresolved_col_width
);
78 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
80 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
87 * +4 accounts for '[x] ' priv level info
88 * +2 accounts for 0x prefix on raw addresses
89 * +3 accounts for ' y ' symtab origin info
92 symlen
= h
->ms
.sym
->namelen
+ 4;
94 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
95 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
97 symlen
= unresolved_col_width
+ 4 + 2;
98 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
99 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
102 len
= thread__comm_len(h
->thread
);
103 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
104 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
107 len
= dso__name_len(h
->ms
.map
->dso
);
108 hists__new_col_len(hists
, HISTC_DSO
, len
);
112 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
114 if (h
->branch_info
) {
115 if (h
->branch_info
->from
.ms
.sym
) {
116 symlen
= (int)h
->branch_info
->from
.ms
.sym
->namelen
+ 4;
118 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
119 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
121 symlen
= dso__name_len(h
->branch_info
->from
.ms
.map
->dso
);
122 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
124 symlen
= unresolved_col_width
+ 4 + 2;
125 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
126 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
129 if (h
->branch_info
->to
.ms
.sym
) {
130 symlen
= (int)h
->branch_info
->to
.ms
.sym
->namelen
+ 4;
132 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
133 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
135 symlen
= dso__name_len(h
->branch_info
->to
.ms
.map
->dso
);
136 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
138 symlen
= unresolved_col_width
+ 4 + 2;
139 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
140 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
143 if (h
->branch_info
->srcline_from
)
144 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
145 strlen(h
->branch_info
->srcline_from
));
146 if (h
->branch_info
->srcline_to
)
147 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
148 strlen(h
->branch_info
->srcline_to
));
152 if (h
->mem_info
->daddr
.ms
.sym
) {
153 symlen
= (int)h
->mem_info
->daddr
.ms
.sym
->namelen
+ 4
154 + unresolved_col_width
+ 2;
155 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
157 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
160 symlen
= unresolved_col_width
+ 4 + 2;
161 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
163 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
167 if (h
->mem_info
->iaddr
.ms
.sym
) {
168 symlen
= (int)h
->mem_info
->iaddr
.ms
.sym
->namelen
+ 4
169 + unresolved_col_width
+ 2;
170 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
173 symlen
= unresolved_col_width
+ 4 + 2;
174 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
178 if (h
->mem_info
->daddr
.ms
.map
) {
179 symlen
= dso__name_len(h
->mem_info
->daddr
.ms
.map
->dso
);
180 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
183 symlen
= unresolved_col_width
+ 4 + 2;
184 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
187 hists__new_col_len(hists
, HISTC_MEM_PHYS_DADDR
,
188 unresolved_col_width
+ 4 + 2);
191 symlen
= unresolved_col_width
+ 4 + 2;
192 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
193 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
194 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
197 hists__new_col_len(hists
, HISTC_CGROUP_ID
, 20);
198 hists__new_col_len(hists
, HISTC_CPU
, 3);
199 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
200 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
201 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
202 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
203 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
204 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
205 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
206 if (symbol_conf
.nanosecs
)
207 hists__new_col_len(hists
, HISTC_TIME
, 16);
209 hists__new_col_len(hists
, HISTC_TIME
, 12);
212 len
= MAX(strlen(h
->srcline
), strlen(sort_srcline
.se_header
));
213 hists__new_col_len(hists
, HISTC_SRCLINE
, len
);
217 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
220 hists__new_col_len(hists
, HISTC_TRANSACTION
,
221 hist_entry__transaction_len());
224 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
227 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
229 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
230 struct hist_entry
*n
;
233 hists__reset_col_len(hists
);
235 while (next
&& row
++ < max_rows
) {
236 n
= rb_entry(next
, struct hist_entry
, rb_node
);
238 hists__calc_col_len(hists
, n
);
239 next
= rb_next(&n
->rb_node
);
243 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
244 unsigned int cpumode
, u64 period
)
247 case PERF_RECORD_MISC_KERNEL
:
248 he_stat
->period_sys
+= period
;
250 case PERF_RECORD_MISC_USER
:
251 he_stat
->period_us
+= period
;
253 case PERF_RECORD_MISC_GUEST_KERNEL
:
254 he_stat
->period_guest_sys
+= period
;
256 case PERF_RECORD_MISC_GUEST_USER
:
257 he_stat
->period_guest_us
+= period
;
264 static long hist_time(unsigned long htime
)
266 unsigned long time_quantum
= symbol_conf
.time_quantum
;
268 return (htime
/ time_quantum
) * time_quantum
;
272 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
276 he_stat
->period
+= period
;
277 he_stat
->weight
+= weight
;
278 he_stat
->nr_events
+= 1;
281 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
283 dest
->period
+= src
->period
;
284 dest
->period_sys
+= src
->period_sys
;
285 dest
->period_us
+= src
->period_us
;
286 dest
->period_guest_sys
+= src
->period_guest_sys
;
287 dest
->period_guest_us
+= src
->period_guest_us
;
288 dest
->nr_events
+= src
->nr_events
;
289 dest
->weight
+= src
->weight
;
292 static void he_stat__decay(struct he_stat
*he_stat
)
294 he_stat
->period
= (he_stat
->period
* 7) / 8;
295 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
296 /* XXX need decay for weight too? */
299 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
301 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
303 u64 prev_period
= he
->stat
.period
;
306 if (prev_period
== 0)
309 he_stat__decay(&he
->stat
);
310 if (symbol_conf
.cumulate_callchain
)
311 he_stat__decay(he
->stat_acc
);
312 decay_callchain(he
->callchain
);
314 diff
= prev_period
- he
->stat
.period
;
317 hists
->stats
.total_period
-= diff
;
319 hists
->stats
.total_non_filtered_period
-= diff
;
323 struct hist_entry
*child
;
324 struct rb_node
*node
= rb_first_cached(&he
->hroot_out
);
326 child
= rb_entry(node
, struct hist_entry
, rb_node
);
327 node
= rb_next(node
);
329 if (hists__decay_entry(hists
, child
))
330 hists__delete_entry(hists
, child
);
334 return he
->stat
.period
== 0;
337 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
339 struct rb_root_cached
*root_in
;
340 struct rb_root_cached
*root_out
;
343 root_in
= &he
->parent_he
->hroot_in
;
344 root_out
= &he
->parent_he
->hroot_out
;
346 if (hists__has(hists
, need_collapse
))
347 root_in
= &hists
->entries_collapsed
;
349 root_in
= hists
->entries_in
;
350 root_out
= &hists
->entries
;
353 rb_erase_cached(&he
->rb_node_in
, root_in
);
354 rb_erase_cached(&he
->rb_node
, root_out
);
358 --hists
->nr_non_filtered_entries
;
360 hist_entry__delete(he
);
363 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
365 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
366 struct hist_entry
*n
;
369 n
= rb_entry(next
, struct hist_entry
, rb_node
);
370 next
= rb_next(&n
->rb_node
);
371 if (((zap_user
&& n
->level
== '.') ||
372 (zap_kernel
&& n
->level
!= '.') ||
373 hists__decay_entry(hists
, n
))) {
374 hists__delete_entry(hists
, n
);
379 void hists__delete_entries(struct hists
*hists
)
381 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
382 struct hist_entry
*n
;
385 n
= rb_entry(next
, struct hist_entry
, rb_node
);
386 next
= rb_next(&n
->rb_node
);
388 hists__delete_entry(hists
, n
);
392 struct hist_entry
*hists__get_entry(struct hists
*hists
, int idx
)
394 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
395 struct hist_entry
*n
;
399 n
= rb_entry(next
, struct hist_entry
, rb_node
);
403 next
= rb_next(&n
->rb_node
);
411 * histogram, sorted on item, collects periods
414 static int hist_entry__init(struct hist_entry
*he
,
415 struct hist_entry
*template,
417 size_t callchain_size
)
420 he
->callchain_size
= callchain_size
;
422 if (symbol_conf
.cumulate_callchain
) {
423 he
->stat_acc
= malloc(sizeof(he
->stat
));
424 if (he
->stat_acc
== NULL
)
426 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
428 memset(&he
->stat
, 0, sizeof(he
->stat
));
431 map__get(he
->ms
.map
);
433 if (he
->branch_info
) {
435 * This branch info is (a part of) allocated from
436 * sample__resolve_bstack() and will be freed after
437 * adding new entries. So we need to save a copy.
439 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
440 if (he
->branch_info
== NULL
)
443 memcpy(he
->branch_info
, template->branch_info
,
444 sizeof(*he
->branch_info
));
446 map__get(he
->branch_info
->from
.ms
.map
);
447 map__get(he
->branch_info
->to
.ms
.map
);
451 map__get(he
->mem_info
->iaddr
.ms
.map
);
452 map__get(he
->mem_info
->daddr
.ms
.map
);
455 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
456 callchain_init(he
->callchain
);
459 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
460 if (he
->raw_data
== NULL
)
465 he
->srcline
= strdup(he
->srcline
);
466 if (he
->srcline
== NULL
)
470 if (symbol_conf
.res_sample
) {
471 he
->res_samples
= calloc(sizeof(struct res_sample
),
472 symbol_conf
.res_sample
);
473 if (!he
->res_samples
)
477 INIT_LIST_HEAD(&he
->pairs
.node
);
478 thread__get(he
->thread
);
479 he
->hroot_in
= RB_ROOT_CACHED
;
480 he
->hroot_out
= RB_ROOT_CACHED
;
482 if (!symbol_conf
.report_hierarchy
)
491 zfree(&he
->raw_data
);
494 if (he
->branch_info
) {
495 map__put(he
->branch_info
->from
.ms
.map
);
496 map__put(he
->branch_info
->to
.ms
.map
);
497 zfree(&he
->branch_info
);
500 map__put(he
->mem_info
->iaddr
.ms
.map
);
501 map__put(he
->mem_info
->daddr
.ms
.map
);
504 map__zput(he
->ms
.map
);
505 zfree(&he
->stat_acc
);
509 static void *hist_entry__zalloc(size_t size
)
511 return zalloc(size
+ sizeof(struct hist_entry
));
514 static void hist_entry__free(void *ptr
)
519 static struct hist_entry_ops default_ops
= {
520 .new = hist_entry__zalloc
,
521 .free
= hist_entry__free
,
524 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
527 struct hist_entry_ops
*ops
= template->ops
;
528 size_t callchain_size
= 0;
529 struct hist_entry
*he
;
533 ops
= template->ops
= &default_ops
;
535 if (symbol_conf
.use_callchain
)
536 callchain_size
= sizeof(struct callchain_root
);
538 he
= ops
->new(callchain_size
);
540 err
= hist_entry__init(he
, template, sample_self
, callchain_size
);
550 static u8
symbol__parent_filter(const struct symbol
*parent
)
552 if (symbol_conf
.exclude_other
&& parent
== NULL
)
553 return 1 << HIST_FILTER__PARENT
;
557 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
559 if (!hist_entry__has_callchains(he
) || !symbol_conf
.use_callchain
)
562 he
->hists
->callchain_period
+= period
;
564 he
->hists
->callchain_non_filtered_period
+= period
;
567 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
568 struct hist_entry
*entry
,
569 struct addr_location
*al
,
573 struct rb_node
*parent
= NULL
;
574 struct hist_entry
*he
;
576 u64 period
= entry
->stat
.period
;
577 u64 weight
= entry
->stat
.weight
;
578 bool leftmost
= true;
580 p
= &hists
->entries_in
->rb_root
.rb_node
;
584 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
587 * Make sure that it receives arguments in a same order as
588 * hist_entry__collapse() so that we can use an appropriate
589 * function when searching an entry regardless which sort
592 cmp
= hist_entry__cmp(he
, entry
);
596 he_stat__add_period(&he
->stat
, period
, weight
);
597 hist_entry__add_callchain_period(he
, period
);
599 if (symbol_conf
.cumulate_callchain
)
600 he_stat__add_period(he
->stat_acc
, period
, weight
);
603 * This mem info was allocated from sample__resolve_mem
604 * and will not be used anymore.
606 mem_info__zput(entry
->mem_info
);
608 block_info__zput(entry
->block_info
);
610 /* If the map of an existing hist_entry has
611 * become out-of-date due to an exec() or
612 * similar, update it. Otherwise we will
613 * mis-adjust symbol addresses when computing
614 * the history counter to increment.
616 if (he
->ms
.map
!= entry
->ms
.map
) {
617 map__put(he
->ms
.map
);
618 he
->ms
.map
= map__get(entry
->ms
.map
);
631 he
= hist_entry__new(entry
, sample_self
);
636 hist_entry__add_callchain_period(he
, period
);
639 rb_link_node(&he
->rb_node_in
, parent
, p
);
640 rb_insert_color_cached(&he
->rb_node_in
, hists
->entries_in
, leftmost
);
643 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
644 if (symbol_conf
.cumulate_callchain
)
645 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
649 static unsigned random_max(unsigned high
)
651 unsigned thresh
= -high
% high
;
653 unsigned r
= random();
659 static void hists__res_sample(struct hist_entry
*he
, struct perf_sample
*sample
)
661 struct res_sample
*r
;
664 if (he
->num_res
< symbol_conf
.res_sample
) {
667 j
= random_max(symbol_conf
.res_sample
);
669 r
= &he
->res_samples
[j
];
670 r
->time
= sample
->time
;
671 r
->cpu
= sample
->cpu
;
672 r
->tid
= sample
->tid
;
675 static struct hist_entry
*
676 __hists__add_entry(struct hists
*hists
,
677 struct addr_location
*al
,
678 struct symbol
*sym_parent
,
679 struct branch_info
*bi
,
681 struct block_info
*block_info
,
682 struct perf_sample
*sample
,
684 struct hist_entry_ops
*ops
)
686 struct namespaces
*ns
= thread__namespaces(al
->thread
);
687 struct hist_entry entry
= {
688 .thread
= al
->thread
,
689 .comm
= thread__comm(al
->thread
),
691 .dev
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].dev
: 0,
692 .ino
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].ino
: 0,
699 .srcline
= (char *) al
->srcline
,
700 .socket
= al
->socket
,
702 .cpumode
= al
->cpumode
,
707 .period
= sample
->period
,
708 .weight
= sample
->weight
,
710 .parent
= sym_parent
,
711 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
715 .block_info
= block_info
,
716 .transaction
= sample
->transaction
,
717 .raw_data
= sample
->raw_data
,
718 .raw_size
= sample
->raw_size
,
720 .time
= hist_time(sample
->time
),
721 }, *he
= hists__findnew_entry(hists
, &entry
, al
, sample_self
);
723 if (!hists
->has_callchains
&& he
&& he
->callchain_size
!= 0)
724 hists
->has_callchains
= true;
725 if (he
&& symbol_conf
.res_sample
)
726 hists__res_sample(he
, sample
);
730 struct hist_entry
*hists__add_entry(struct hists
*hists
,
731 struct addr_location
*al
,
732 struct symbol
*sym_parent
,
733 struct branch_info
*bi
,
735 struct perf_sample
*sample
,
738 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
, NULL
,
739 sample
, sample_self
, NULL
);
742 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
743 struct hist_entry_ops
*ops
,
744 struct addr_location
*al
,
745 struct symbol
*sym_parent
,
746 struct branch_info
*bi
,
748 struct perf_sample
*sample
,
751 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
, NULL
,
752 sample
, sample_self
, ops
);
755 struct hist_entry
*hists__add_entry_block(struct hists
*hists
,
756 struct addr_location
*al
,
757 struct block_info
*block_info
)
759 struct hist_entry entry
= {
760 .block_info
= block_info
,
767 }, *he
= hists__findnew_entry(hists
, &entry
, al
, false);
773 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
774 struct addr_location
*al __maybe_unused
)
780 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
781 struct addr_location
*al __maybe_unused
)
787 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
789 struct perf_sample
*sample
= iter
->sample
;
792 mi
= sample__resolve_mem(sample
, al
);
801 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
804 struct mem_info
*mi
= iter
->priv
;
805 struct hists
*hists
= evsel__hists(iter
->evsel
);
806 struct perf_sample
*sample
= iter
->sample
;
807 struct hist_entry
*he
;
812 cost
= sample
->weight
;
817 * must pass period=weight in order to get the correct
818 * sorting from hists__collapse_resort() which is solely
819 * based on periods. We want sorting be done on nr_events * weight
820 * and this is indirectly achieved by passing period=weight here
821 * and the he_stat__add_period() function.
823 sample
->period
= cost
;
825 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
835 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
836 struct addr_location
*al __maybe_unused
)
838 struct evsel
*evsel
= iter
->evsel
;
839 struct hists
*hists
= evsel__hists(evsel
);
840 struct hist_entry
*he
= iter
->he
;
846 hists__inc_nr_samples(hists
, he
->filtered
);
848 err
= hist_entry__append_callchain(he
, iter
->sample
);
852 * We don't need to free iter->priv (mem_info) here since the mem info
853 * was either already freed in hists__findnew_entry() or passed to a
854 * new hist entry by hist_entry__new().
863 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
865 struct branch_info
*bi
;
866 struct perf_sample
*sample
= iter
->sample
;
868 bi
= sample__resolve_bstack(sample
, al
);
873 iter
->total
= sample
->branch_stack
->nr
;
880 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
881 struct addr_location
*al __maybe_unused
)
887 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
889 struct branch_info
*bi
= iter
->priv
;
895 if (iter
->curr
>= iter
->total
)
898 al
->maps
= bi
[i
].to
.ms
.maps
;
899 al
->map
= bi
[i
].to
.ms
.map
;
900 al
->sym
= bi
[i
].to
.ms
.sym
;
901 al
->addr
= bi
[i
].to
.addr
;
906 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
908 struct branch_info
*bi
;
909 struct evsel
*evsel
= iter
->evsel
;
910 struct hists
*hists
= evsel__hists(evsel
);
911 struct perf_sample
*sample
= iter
->sample
;
912 struct hist_entry
*he
= NULL
;
918 if (iter
->hide_unresolved
&& !(bi
[i
].from
.ms
.sym
&& bi
[i
].to
.ms
.sym
))
922 * The report shows the percentage of total branches captured
923 * and not events sampled. Thus we use a pseudo period of 1.
926 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
928 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
933 hists__inc_nr_samples(hists
, he
->filtered
);
942 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
943 struct addr_location
*al __maybe_unused
)
948 return iter
->curr
>= iter
->total
? 0 : -1;
952 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
953 struct addr_location
*al __maybe_unused
)
959 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
961 struct evsel
*evsel
= iter
->evsel
;
962 struct perf_sample
*sample
= iter
->sample
;
963 struct hist_entry
*he
;
965 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
975 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
976 struct addr_location
*al __maybe_unused
)
978 struct hist_entry
*he
= iter
->he
;
979 struct evsel
*evsel
= iter
->evsel
;
980 struct perf_sample
*sample
= iter
->sample
;
987 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
989 return hist_entry__append_callchain(he
, sample
);
993 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
994 struct addr_location
*al __maybe_unused
)
996 struct hist_entry
**he_cache
;
998 callchain_cursor_commit(&callchain_cursor
);
1001 * This is for detecting cycles or recursions so that they're
1002 * cumulated only one time to prevent entries more than 100%
1005 he_cache
= malloc(sizeof(*he_cache
) * (callchain_cursor
.nr
+ 1));
1006 if (he_cache
== NULL
)
1009 iter
->priv
= he_cache
;
1016 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
1017 struct addr_location
*al
)
1019 struct evsel
*evsel
= iter
->evsel
;
1020 struct hists
*hists
= evsel__hists(evsel
);
1021 struct perf_sample
*sample
= iter
->sample
;
1022 struct hist_entry
**he_cache
= iter
->priv
;
1023 struct hist_entry
*he
;
1026 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
1032 he_cache
[iter
->curr
++] = he
;
1034 hist_entry__append_callchain(he
, sample
);
1037 * We need to re-initialize the cursor since callchain_append()
1038 * advanced the cursor to the end.
1040 callchain_cursor_commit(&callchain_cursor
);
1042 hists__inc_nr_samples(hists
, he
->filtered
);
1048 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
1049 struct addr_location
*al
)
1051 struct callchain_cursor_node
*node
;
1053 node
= callchain_cursor_current(&callchain_cursor
);
1057 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
1061 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
1062 struct addr_location
*al
)
1064 struct evsel
*evsel
= iter
->evsel
;
1065 struct perf_sample
*sample
= iter
->sample
;
1066 struct hist_entry
**he_cache
= iter
->priv
;
1067 struct hist_entry
*he
;
1068 struct hist_entry he_tmp
= {
1069 .hists
= evsel__hists(evsel
),
1071 .thread
= al
->thread
,
1072 .comm
= thread__comm(al
->thread
),
1079 .srcline
= (char *) al
->srcline
,
1080 .parent
= iter
->parent
,
1081 .raw_data
= sample
->raw_data
,
1082 .raw_size
= sample
->raw_size
,
1085 struct callchain_cursor cursor
;
1087 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
1089 callchain_cursor_advance(&callchain_cursor
);
1092 * Check if there's duplicate entries in the callchain.
1093 * It's possible that it has cycles or recursive calls.
1095 for (i
= 0; i
< iter
->curr
; i
++) {
1096 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
1097 /* to avoid calling callback function */
1103 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
1109 he_cache
[iter
->curr
++] = he
;
1111 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
1112 callchain_append(he
->callchain
, &cursor
, sample
->period
);
1117 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
1118 struct addr_location
*al __maybe_unused
)
1126 const struct hist_iter_ops hist_iter_mem
= {
1127 .prepare_entry
= iter_prepare_mem_entry
,
1128 .add_single_entry
= iter_add_single_mem_entry
,
1129 .next_entry
= iter_next_nop_entry
,
1130 .add_next_entry
= iter_add_next_nop_entry
,
1131 .finish_entry
= iter_finish_mem_entry
,
1134 const struct hist_iter_ops hist_iter_branch
= {
1135 .prepare_entry
= iter_prepare_branch_entry
,
1136 .add_single_entry
= iter_add_single_branch_entry
,
1137 .next_entry
= iter_next_branch_entry
,
1138 .add_next_entry
= iter_add_next_branch_entry
,
1139 .finish_entry
= iter_finish_branch_entry
,
1142 const struct hist_iter_ops hist_iter_normal
= {
1143 .prepare_entry
= iter_prepare_normal_entry
,
1144 .add_single_entry
= iter_add_single_normal_entry
,
1145 .next_entry
= iter_next_nop_entry
,
1146 .add_next_entry
= iter_add_next_nop_entry
,
1147 .finish_entry
= iter_finish_normal_entry
,
1150 const struct hist_iter_ops hist_iter_cumulative
= {
1151 .prepare_entry
= iter_prepare_cumulative_entry
,
1152 .add_single_entry
= iter_add_single_cumulative_entry
,
1153 .next_entry
= iter_next_cumulative_entry
,
1154 .add_next_entry
= iter_add_next_cumulative_entry
,
1155 .finish_entry
= iter_finish_cumulative_entry
,
1158 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1159 int max_stack_depth
, void *arg
)
1162 struct map
*alm
= NULL
;
1165 alm
= map__get(al
->map
);
1167 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1168 iter
->evsel
, al
, max_stack_depth
);
1174 err
= iter
->ops
->prepare_entry(iter
, al
);
1178 err
= iter
->ops
->add_single_entry(iter
, al
);
1182 if (iter
->he
&& iter
->add_entry_cb
) {
1183 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1188 while (iter
->ops
->next_entry(iter
, al
)) {
1189 err
= iter
->ops
->add_next_entry(iter
, al
);
1193 if (iter
->he
&& iter
->add_entry_cb
) {
1194 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1201 err2
= iter
->ops
->finish_entry(iter
, al
);
1211 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1213 struct hists
*hists
= left
->hists
;
1214 struct perf_hpp_fmt
*fmt
;
1217 hists__for_each_sort_list(hists
, fmt
) {
1218 if (perf_hpp__is_dynamic_entry(fmt
) &&
1219 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1222 cmp
= fmt
->cmp(fmt
, left
, right
);
1231 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1233 struct hists
*hists
= left
->hists
;
1234 struct perf_hpp_fmt
*fmt
;
1237 hists__for_each_sort_list(hists
, fmt
) {
1238 if (perf_hpp__is_dynamic_entry(fmt
) &&
1239 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1242 cmp
= fmt
->collapse(fmt
, left
, right
);
1250 void hist_entry__delete(struct hist_entry
*he
)
1252 struct hist_entry_ops
*ops
= he
->ops
;
1254 thread__zput(he
->thread
);
1255 map__zput(he
->ms
.map
);
1257 if (he
->branch_info
) {
1258 map__zput(he
->branch_info
->from
.ms
.map
);
1259 map__zput(he
->branch_info
->to
.ms
.map
);
1260 free_srcline(he
->branch_info
->srcline_from
);
1261 free_srcline(he
->branch_info
->srcline_to
);
1262 zfree(&he
->branch_info
);
1266 map__zput(he
->mem_info
->iaddr
.ms
.map
);
1267 map__zput(he
->mem_info
->daddr
.ms
.map
);
1268 mem_info__zput(he
->mem_info
);
1272 block_info__zput(he
->block_info
);
1274 zfree(&he
->res_samples
);
1275 zfree(&he
->stat_acc
);
1276 free_srcline(he
->srcline
);
1277 if (he
->srcfile
&& he
->srcfile
[0])
1278 zfree(&he
->srcfile
);
1279 free_callchain(he
->callchain
);
1280 zfree(&he
->trace_output
);
1281 zfree(&he
->raw_data
);
1286 * If this is not the last column, then we need to pad it according to the
1287 * pre-calculated max length for this column, otherwise don't bother adding
1288 * spaces because that would break viewing this with, for instance, 'less',
1289 * that would show tons of trailing spaces when a long C++ demangled method
1292 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1293 struct perf_hpp_fmt
*fmt
, int printed
)
1295 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1296 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1297 if (printed
< width
) {
1298 advance_hpp(hpp
, printed
);
1299 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1307 * collapse the histogram
1310 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1311 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1312 enum hist_filter type
);
1314 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1316 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1318 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1321 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1322 enum hist_filter type
,
1325 struct perf_hpp_fmt
*fmt
;
1326 bool type_match
= false;
1327 struct hist_entry
*parent
= he
->parent_he
;
1330 case HIST_FILTER__THREAD
:
1331 if (symbol_conf
.comm_list
== NULL
&&
1332 symbol_conf
.pid_list
== NULL
&&
1333 symbol_conf
.tid_list
== NULL
)
1336 case HIST_FILTER__DSO
:
1337 if (symbol_conf
.dso_list
== NULL
)
1340 case HIST_FILTER__SYMBOL
:
1341 if (symbol_conf
.sym_list
== NULL
)
1344 case HIST_FILTER__PARENT
:
1345 case HIST_FILTER__GUEST
:
1346 case HIST_FILTER__HOST
:
1347 case HIST_FILTER__SOCKET
:
1348 case HIST_FILTER__C2C
:
1353 /* if it's filtered by own fmt, it has to have filter bits */
1354 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1363 * If the filter is for current level entry, propagate
1364 * filter marker to parents. The marker bit was
1365 * already set by default so it only needs to clear
1366 * non-filtered entries.
1368 if (!(he
->filtered
& (1 << type
))) {
1370 parent
->filtered
&= ~(1 << type
);
1371 parent
= parent
->parent_he
;
1376 * If current entry doesn't have matching formats, set
1377 * filter marker for upper level entries. it will be
1378 * cleared if its lower level entries is not filtered.
1380 * For lower-level entries, it inherits parent's
1381 * filter bit so that lower level entries of a
1382 * non-filtered entry won't set the filter marker.
1385 he
->filtered
|= (1 << type
);
1387 he
->filtered
|= (parent
->filtered
& (1 << type
));
1391 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1393 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1394 check_thread_entry
);
1396 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1397 perf_hpp__is_dso_entry
);
1399 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1400 perf_hpp__is_sym_entry
);
1402 hists__apply_filters(he
->hists
, he
);
1405 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1406 struct rb_root_cached
*root
,
1407 struct hist_entry
*he
,
1408 struct hist_entry
*parent_he
,
1409 struct perf_hpp_list
*hpp_list
)
1411 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1412 struct rb_node
*parent
= NULL
;
1413 struct hist_entry
*iter
, *new;
1414 struct perf_hpp_fmt
*fmt
;
1416 bool leftmost
= true;
1418 while (*p
!= NULL
) {
1420 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1423 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1424 cmp
= fmt
->collapse(fmt
, iter
, he
);
1430 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1435 p
= &parent
->rb_left
;
1437 p
= &parent
->rb_right
;
1442 new = hist_entry__new(he
, true);
1446 hists
->nr_entries
++;
1448 /* save related format list for output */
1449 new->hpp_list
= hpp_list
;
1450 new->parent_he
= parent_he
;
1452 hist_entry__apply_hierarchy_filters(new);
1454 /* some fields are now passed to 'new' */
1455 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1456 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1457 he
->trace_output
= NULL
;
1459 new->trace_output
= NULL
;
1461 if (perf_hpp__is_srcline_entry(fmt
))
1464 new->srcline
= NULL
;
1466 if (perf_hpp__is_srcfile_entry(fmt
))
1469 new->srcfile
= NULL
;
1472 rb_link_node(&new->rb_node_in
, parent
, p
);
1473 rb_insert_color_cached(&new->rb_node_in
, root
, leftmost
);
1477 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1478 struct rb_root_cached
*root
,
1479 struct hist_entry
*he
)
1481 struct perf_hpp_list_node
*node
;
1482 struct hist_entry
*new_he
= NULL
;
1483 struct hist_entry
*parent
= NULL
;
1487 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1488 /* skip period (overhead) and elided columns */
1489 if (node
->level
== 0 || node
->skip
)
1492 /* insert copy of 'he' for each fmt into the hierarchy */
1493 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1494 if (new_he
== NULL
) {
1499 root
= &new_he
->hroot_in
;
1500 new_he
->depth
= depth
++;
1505 new_he
->leaf
= true;
1507 if (hist_entry__has_callchains(new_he
) &&
1508 symbol_conf
.use_callchain
) {
1509 callchain_cursor_reset(&callchain_cursor
);
1510 if (callchain_merge(&callchain_cursor
,
1517 /* 'he' is no longer used */
1518 hist_entry__delete(he
);
1520 /* return 0 (or -1) since it already applied filters */
1524 static int hists__collapse_insert_entry(struct hists
*hists
,
1525 struct rb_root_cached
*root
,
1526 struct hist_entry
*he
)
1528 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1529 struct rb_node
*parent
= NULL
;
1530 struct hist_entry
*iter
;
1532 bool leftmost
= true;
1534 if (symbol_conf
.report_hierarchy
)
1535 return hists__hierarchy_insert_entry(hists
, root
, he
);
1537 while (*p
!= NULL
) {
1539 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1541 cmp
= hist_entry__collapse(iter
, he
);
1546 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1547 if (symbol_conf
.cumulate_callchain
)
1548 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1550 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
) {
1551 callchain_cursor_reset(&callchain_cursor
);
1552 if (callchain_merge(&callchain_cursor
,
1557 hist_entry__delete(he
);
1564 p
= &(*p
)->rb_right
;
1568 hists
->nr_entries
++;
1570 rb_link_node(&he
->rb_node_in
, parent
, p
);
1571 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
1575 struct rb_root_cached
*hists__get_rotate_entries_in(struct hists
*hists
)
1577 struct rb_root_cached
*root
;
1579 pthread_mutex_lock(&hists
->lock
);
1581 root
= hists
->entries_in
;
1582 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1583 hists
->entries_in
= &hists
->entries_in_array
[0];
1585 pthread_mutex_unlock(&hists
->lock
);
1590 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1592 hists__filter_entry_by_dso(hists
, he
);
1593 hists__filter_entry_by_thread(hists
, he
);
1594 hists__filter_entry_by_symbol(hists
, he
);
1595 hists__filter_entry_by_socket(hists
, he
);
1598 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1600 struct rb_root_cached
*root
;
1601 struct rb_node
*next
;
1602 struct hist_entry
*n
;
1605 if (!hists__has(hists
, need_collapse
))
1608 hists
->nr_entries
= 0;
1610 root
= hists__get_rotate_entries_in(hists
);
1612 next
= rb_first_cached(root
);
1617 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1618 next
= rb_next(&n
->rb_node_in
);
1620 rb_erase_cached(&n
->rb_node_in
, root
);
1621 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1627 * If it wasn't combined with one of the entries already
1628 * collapsed, we need to apply the filters that may have
1629 * been set by, say, the hist_browser.
1631 hists__apply_filters(hists
, n
);
1634 ui_progress__update(prog
, 1);
1639 static int64_t hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1641 struct hists
*hists
= a
->hists
;
1642 struct perf_hpp_fmt
*fmt
;
1645 hists__for_each_sort_list(hists
, fmt
) {
1646 if (perf_hpp__should_skip(fmt
, a
->hists
))
1649 cmp
= fmt
->sort(fmt
, a
, b
);
1657 static void hists__reset_filter_stats(struct hists
*hists
)
1659 hists
->nr_non_filtered_entries
= 0;
1660 hists
->stats
.total_non_filtered_period
= 0;
1663 void hists__reset_stats(struct hists
*hists
)
1665 hists
->nr_entries
= 0;
1666 hists
->stats
.total_period
= 0;
1668 hists__reset_filter_stats(hists
);
1671 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1673 hists
->nr_non_filtered_entries
++;
1674 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1677 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1680 hists__inc_filter_stats(hists
, h
);
1682 hists
->nr_entries
++;
1683 hists
->stats
.total_period
+= h
->stat
.period
;
1686 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1688 struct rb_node
*node
;
1689 struct hist_entry
*he
;
1691 node
= rb_first_cached(&hists
->entries
);
1693 hists
->stats
.total_period
= 0;
1694 hists
->stats
.total_non_filtered_period
= 0;
1697 * recalculate total period using top-level entries only
1698 * since lower level entries only see non-filtered entries
1699 * but upper level entries have sum of both entries.
1702 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1703 node
= rb_next(node
);
1705 hists
->stats
.total_period
+= he
->stat
.period
;
1707 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1711 static void hierarchy_insert_output_entry(struct rb_root_cached
*root
,
1712 struct hist_entry
*he
)
1714 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1715 struct rb_node
*parent
= NULL
;
1716 struct hist_entry
*iter
;
1717 struct perf_hpp_fmt
*fmt
;
1718 bool leftmost
= true;
1720 while (*p
!= NULL
) {
1722 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1724 if (hist_entry__sort(he
, iter
) > 0)
1725 p
= &parent
->rb_left
;
1727 p
= &parent
->rb_right
;
1732 rb_link_node(&he
->rb_node
, parent
, p
);
1733 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
1735 /* update column width of dynamic entry */
1736 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1737 if (perf_hpp__is_dynamic_entry(fmt
))
1738 fmt
->sort(fmt
, he
, NULL
);
1742 static void hists__hierarchy_output_resort(struct hists
*hists
,
1743 struct ui_progress
*prog
,
1744 struct rb_root_cached
*root_in
,
1745 struct rb_root_cached
*root_out
,
1746 u64 min_callchain_hits
,
1749 struct rb_node
*node
;
1750 struct hist_entry
*he
;
1752 *root_out
= RB_ROOT_CACHED
;
1753 node
= rb_first_cached(root_in
);
1756 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1757 node
= rb_next(node
);
1759 hierarchy_insert_output_entry(root_out
, he
);
1762 ui_progress__update(prog
, 1);
1764 hists
->nr_entries
++;
1765 if (!he
->filtered
) {
1766 hists
->nr_non_filtered_entries
++;
1767 hists__calc_col_len(hists
, he
);
1771 hists__hierarchy_output_resort(hists
, prog
,
1782 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1783 u64 total
= he
->stat
.period
;
1785 if (symbol_conf
.cumulate_callchain
)
1786 total
= he
->stat_acc
->period
;
1788 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1791 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1792 min_callchain_hits
, &callchain_param
);
1796 static void __hists__insert_output_entry(struct rb_root_cached
*entries
,
1797 struct hist_entry
*he
,
1798 u64 min_callchain_hits
,
1801 struct rb_node
**p
= &entries
->rb_root
.rb_node
;
1802 struct rb_node
*parent
= NULL
;
1803 struct hist_entry
*iter
;
1804 struct perf_hpp_fmt
*fmt
;
1805 bool leftmost
= true;
1807 if (use_callchain
) {
1808 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1809 u64 total
= he
->stat
.period
;
1811 if (symbol_conf
.cumulate_callchain
)
1812 total
= he
->stat_acc
->period
;
1814 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1816 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1817 min_callchain_hits
, &callchain_param
);
1820 while (*p
!= NULL
) {
1822 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1824 if (hist_entry__sort(he
, iter
) > 0)
1827 p
= &(*p
)->rb_right
;
1832 rb_link_node(&he
->rb_node
, parent
, p
);
1833 rb_insert_color_cached(&he
->rb_node
, entries
, leftmost
);
1835 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1836 if (perf_hpp__is_dynamic_entry(fmt
) &&
1837 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1838 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1842 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1843 bool use_callchain
, hists__resort_cb_t cb
,
1846 struct rb_root_cached
*root
;
1847 struct rb_node
*next
;
1848 struct hist_entry
*n
;
1849 u64 callchain_total
;
1850 u64 min_callchain_hits
;
1852 callchain_total
= hists
->callchain_period
;
1853 if (symbol_conf
.filter_relative
)
1854 callchain_total
= hists
->callchain_non_filtered_period
;
1856 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1858 hists__reset_stats(hists
);
1859 hists__reset_col_len(hists
);
1861 if (symbol_conf
.report_hierarchy
) {
1862 hists__hierarchy_output_resort(hists
, prog
,
1863 &hists
->entries_collapsed
,
1867 hierarchy_recalc_total_periods(hists
);
1871 if (hists__has(hists
, need_collapse
))
1872 root
= &hists
->entries_collapsed
;
1874 root
= hists
->entries_in
;
1876 next
= rb_first_cached(root
);
1877 hists
->entries
= RB_ROOT_CACHED
;
1880 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1881 next
= rb_next(&n
->rb_node_in
);
1883 if (cb
&& cb(n
, cb_arg
))
1886 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1887 hists__inc_stats(hists
, n
);
1890 hists__calc_col_len(hists
, n
);
1893 ui_progress__update(prog
, 1);
1897 void perf_evsel__output_resort_cb(struct evsel
*evsel
, struct ui_progress
*prog
,
1898 hists__resort_cb_t cb
, void *cb_arg
)
1902 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1903 use_callchain
= evsel__has_callchain(evsel
);
1905 use_callchain
= symbol_conf
.use_callchain
;
1907 use_callchain
|= symbol_conf
.show_branchflag_count
;
1909 output_resort(evsel__hists(evsel
), prog
, use_callchain
, cb
, cb_arg
);
1912 void perf_evsel__output_resort(struct evsel
*evsel
, struct ui_progress
*prog
)
1914 return perf_evsel__output_resort_cb(evsel
, prog
, NULL
, NULL
);
1917 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1919 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
, NULL
);
1922 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1923 hists__resort_cb_t cb
)
1925 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
, NULL
);
1928 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1930 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1933 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1939 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1941 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1943 while (can_goto_child(he
, HMD_NORMAL
)) {
1944 node
= rb_last(&he
->hroot_out
.rb_root
);
1945 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1950 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1952 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1954 if (can_goto_child(he
, hmd
))
1955 node
= rb_first_cached(&he
->hroot_out
);
1957 node
= rb_next(node
);
1959 while (node
== NULL
) {
1964 node
= rb_next(&he
->rb_node
);
1969 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1971 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1973 node
= rb_prev(node
);
1975 return rb_hierarchy_last(node
);
1981 return &he
->rb_node
;
1984 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1986 struct rb_node
*node
;
1987 struct hist_entry
*child
;
1993 node
= rb_first_cached(&he
->hroot_out
);
1994 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1996 while (node
&& child
->filtered
) {
1997 node
= rb_next(node
);
1998 child
= rb_entry(node
, struct hist_entry
, rb_node
);
2002 percent
= hist_entry__get_percent_limit(child
);
2006 return node
&& percent
>= limit
;
2009 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
2010 enum hist_filter filter
)
2012 h
->filtered
&= ~(1 << filter
);
2014 if (symbol_conf
.report_hierarchy
) {
2015 struct hist_entry
*parent
= h
->parent_he
;
2018 he_stat__add_stat(&parent
->stat
, &h
->stat
);
2020 parent
->filtered
&= ~(1 << filter
);
2022 if (parent
->filtered
)
2025 /* force fold unfiltered entry for simplicity */
2026 parent
->unfolded
= false;
2027 parent
->has_no_entry
= false;
2028 parent
->row_offset
= 0;
2029 parent
->nr_rows
= 0;
2031 parent
= parent
->parent_he
;
2038 /* force fold unfiltered entry for simplicity */
2039 h
->unfolded
= false;
2040 h
->has_no_entry
= false;
2044 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
2046 hists__inc_filter_stats(hists
, h
);
2047 hists__calc_col_len(hists
, h
);
2051 static bool hists__filter_entry_by_dso(struct hists
*hists
,
2052 struct hist_entry
*he
)
2054 if (hists
->dso_filter
!= NULL
&&
2055 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
2056 he
->filtered
|= (1 << HIST_FILTER__DSO
);
2063 static bool hists__filter_entry_by_thread(struct hists
*hists
,
2064 struct hist_entry
*he
)
2066 if (hists
->thread_filter
!= NULL
&&
2067 he
->thread
!= hists
->thread_filter
) {
2068 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
2075 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
2076 struct hist_entry
*he
)
2078 if (hists
->symbol_filter_str
!= NULL
&&
2079 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
2080 hists
->symbol_filter_str
) == NULL
)) {
2081 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
2088 static bool hists__filter_entry_by_socket(struct hists
*hists
,
2089 struct hist_entry
*he
)
2091 if ((hists
->socket_filter
> -1) &&
2092 (he
->socket
!= hists
->socket_filter
)) {
2093 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
2100 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
2102 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
2106 hists
->stats
.nr_non_filtered_samples
= 0;
2108 hists__reset_filter_stats(hists
);
2109 hists__reset_col_len(hists
);
2111 for (nd
= rb_first_cached(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
2112 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2114 if (filter(hists
, h
))
2117 hists__remove_entry_filter(hists
, h
, type
);
2121 static void resort_filtered_entry(struct rb_root_cached
*root
,
2122 struct hist_entry
*he
)
2124 struct rb_node
**p
= &root
->rb_root
.rb_node
;
2125 struct rb_node
*parent
= NULL
;
2126 struct hist_entry
*iter
;
2127 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2129 bool leftmost
= true;
2131 while (*p
!= NULL
) {
2133 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
2135 if (hist_entry__sort(he
, iter
) > 0)
2138 p
= &(*p
)->rb_right
;
2143 rb_link_node(&he
->rb_node
, parent
, p
);
2144 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
2146 if (he
->leaf
|| he
->filtered
)
2149 nd
= rb_first_cached(&he
->hroot_out
);
2151 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2154 rb_erase_cached(&h
->rb_node
, &he
->hroot_out
);
2156 resort_filtered_entry(&new_root
, h
);
2159 he
->hroot_out
= new_root
;
2162 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
2165 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2167 hists
->stats
.nr_non_filtered_samples
= 0;
2169 hists__reset_filter_stats(hists
);
2170 hists__reset_col_len(hists
);
2172 nd
= rb_first_cached(&hists
->entries
);
2174 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2177 ret
= hist_entry__filter(h
, type
, arg
);
2180 * case 1. non-matching type
2181 * zero out the period, set filter marker and move to child
2184 memset(&h
->stat
, 0, sizeof(h
->stat
));
2185 h
->filtered
|= (1 << type
);
2187 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2190 * case 2. matched type (filter out)
2191 * set filter marker and move to next
2193 else if (ret
== 1) {
2194 h
->filtered
|= (1 << type
);
2196 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2199 * case 3. ok (not filtered)
2200 * add period to hists and parents, erase the filter marker
2201 * and move to next sibling
2204 hists__remove_entry_filter(hists
, h
, type
);
2206 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2210 hierarchy_recalc_total_periods(hists
);
2213 * resort output after applying a new filter since filter in a lower
2214 * hierarchy can change periods in a upper hierarchy.
2216 nd
= rb_first_cached(&hists
->entries
);
2218 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2221 rb_erase_cached(&h
->rb_node
, &hists
->entries
);
2223 resort_filtered_entry(&new_root
, h
);
2226 hists
->entries
= new_root
;
2229 void hists__filter_by_thread(struct hists
*hists
)
2231 if (symbol_conf
.report_hierarchy
)
2232 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2233 hists
->thread_filter
);
2235 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2236 hists__filter_entry_by_thread
);
2239 void hists__filter_by_dso(struct hists
*hists
)
2241 if (symbol_conf
.report_hierarchy
)
2242 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2245 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2246 hists__filter_entry_by_dso
);
2249 void hists__filter_by_symbol(struct hists
*hists
)
2251 if (symbol_conf
.report_hierarchy
)
2252 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2253 hists
->symbol_filter_str
);
2255 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2256 hists__filter_entry_by_symbol
);
2259 void hists__filter_by_socket(struct hists
*hists
)
2261 if (symbol_conf
.report_hierarchy
)
2262 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2263 &hists
->socket_filter
);
2265 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2266 hists__filter_entry_by_socket
);
2269 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2271 ++stats
->nr_events
[0];
2272 ++stats
->nr_events
[type
];
2275 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2277 events_stats__inc(&hists
->stats
, type
);
2280 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2282 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2284 hists
->stats
.nr_non_filtered_samples
++;
2287 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2288 struct hist_entry
*pair
)
2290 struct rb_root_cached
*root
;
2292 struct rb_node
*parent
= NULL
;
2293 struct hist_entry
*he
;
2295 bool leftmost
= true;
2297 if (hists__has(hists
, need_collapse
))
2298 root
= &hists
->entries_collapsed
;
2300 root
= hists
->entries_in
;
2302 p
= &root
->rb_root
.rb_node
;
2304 while (*p
!= NULL
) {
2306 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2308 cmp
= hist_entry__collapse(he
, pair
);
2316 p
= &(*p
)->rb_right
;
2321 he
= hist_entry__new(pair
, true);
2323 memset(&he
->stat
, 0, sizeof(he
->stat
));
2325 if (symbol_conf
.cumulate_callchain
)
2326 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2327 rb_link_node(&he
->rb_node_in
, parent
, p
);
2328 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2329 hists__inc_stats(hists
, he
);
2336 static struct hist_entry
*add_dummy_hierarchy_entry(struct hists
*hists
,
2337 struct rb_root_cached
*root
,
2338 struct hist_entry
*pair
)
2341 struct rb_node
*parent
= NULL
;
2342 struct hist_entry
*he
;
2343 struct perf_hpp_fmt
*fmt
;
2344 bool leftmost
= true;
2346 p
= &root
->rb_root
.rb_node
;
2347 while (*p
!= NULL
) {
2351 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2353 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2354 cmp
= fmt
->collapse(fmt
, he
, pair
);
2362 p
= &parent
->rb_left
;
2364 p
= &parent
->rb_right
;
2369 he
= hist_entry__new(pair
, true);
2371 rb_link_node(&he
->rb_node_in
, parent
, p
);
2372 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2376 memset(&he
->stat
, 0, sizeof(he
->stat
));
2377 hists__inc_stats(hists
, he
);
2383 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2384 struct hist_entry
*he
)
2388 if (hists__has(hists
, need_collapse
))
2389 n
= hists
->entries_collapsed
.rb_root
.rb_node
;
2391 n
= hists
->entries_in
->rb_root
.rb_node
;
2394 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2395 int64_t cmp
= hist_entry__collapse(iter
, he
);
2408 static struct hist_entry
*hists__find_hierarchy_entry(struct rb_root_cached
*root
,
2409 struct hist_entry
*he
)
2411 struct rb_node
*n
= root
->rb_root
.rb_node
;
2414 struct hist_entry
*iter
;
2415 struct perf_hpp_fmt
*fmt
;
2418 iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2419 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2420 cmp
= fmt
->collapse(fmt
, iter
, he
);
2436 static void hists__match_hierarchy(struct rb_root_cached
*leader_root
,
2437 struct rb_root_cached
*other_root
)
2440 struct hist_entry
*pos
, *pair
;
2442 for (nd
= rb_first_cached(leader_root
); nd
; nd
= rb_next(nd
)) {
2443 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2444 pair
= hists__find_hierarchy_entry(other_root
, pos
);
2447 hist_entry__add_pair(pair
, pos
);
2448 hists__match_hierarchy(&pos
->hroot_in
, &pair
->hroot_in
);
2454 * Look for pairs to link to the leader buckets (hist_entries):
2456 void hists__match(struct hists
*leader
, struct hists
*other
)
2458 struct rb_root_cached
*root
;
2460 struct hist_entry
*pos
, *pair
;
2462 if (symbol_conf
.report_hierarchy
) {
2463 /* hierarchy report always collapses entries */
2464 return hists__match_hierarchy(&leader
->entries_collapsed
,
2465 &other
->entries_collapsed
);
2468 if (hists__has(leader
, need_collapse
))
2469 root
= &leader
->entries_collapsed
;
2471 root
= leader
->entries_in
;
2473 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2474 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2475 pair
= hists__find_entry(other
, pos
);
2478 hist_entry__add_pair(pair
, pos
);
2482 static int hists__link_hierarchy(struct hists
*leader_hists
,
2483 struct hist_entry
*parent
,
2484 struct rb_root_cached
*leader_root
,
2485 struct rb_root_cached
*other_root
)
2488 struct hist_entry
*pos
, *leader
;
2490 for (nd
= rb_first_cached(other_root
); nd
; nd
= rb_next(nd
)) {
2491 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2493 if (hist_entry__has_pairs(pos
)) {
2496 list_for_each_entry(leader
, &pos
->pairs
.head
, pairs
.node
) {
2497 if (leader
->hists
== leader_hists
) {
2505 leader
= add_dummy_hierarchy_entry(leader_hists
,
2510 /* do not point parent in the pos */
2511 leader
->parent_he
= parent
;
2513 hist_entry__add_pair(pos
, leader
);
2517 if (hists__link_hierarchy(leader_hists
, leader
,
2519 &pos
->hroot_in
) < 0)
2527 * Look for entries in the other hists that are not present in the leader, if
2528 * we find them, just add a dummy entry on the leader hists, with period=0,
2529 * nr_events=0, to serve as the list header.
2531 int hists__link(struct hists
*leader
, struct hists
*other
)
2533 struct rb_root_cached
*root
;
2535 struct hist_entry
*pos
, *pair
;
2537 if (symbol_conf
.report_hierarchy
) {
2538 /* hierarchy report always collapses entries */
2539 return hists__link_hierarchy(leader
, NULL
,
2540 &leader
->entries_collapsed
,
2541 &other
->entries_collapsed
);
2544 if (hists__has(other
, need_collapse
))
2545 root
= &other
->entries_collapsed
;
2547 root
= other
->entries_in
;
2549 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2550 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2552 if (!hist_entry__has_pairs(pos
)) {
2553 pair
= hists__add_dummy_entry(leader
, pos
);
2556 hist_entry__add_pair(pos
, pair
);
2563 int hists__unlink(struct hists
*hists
)
2565 struct rb_root_cached
*root
;
2567 struct hist_entry
*pos
;
2569 if (hists__has(hists
, need_collapse
))
2570 root
= &hists
->entries_collapsed
;
2572 root
= hists
->entries_in
;
2574 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2575 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2576 list_del_init(&pos
->pairs
.node
);
2582 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2583 struct perf_sample
*sample
, bool nonany_branch_mode
,
2586 struct branch_info
*bi
;
2588 /* If we have branch cycles always annotate them. */
2589 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2592 bi
= sample__resolve_bstack(sample
, al
);
2594 struct addr_map_symbol
*prev
= NULL
;
2597 * Ignore errors, still want to process the
2600 * For non standard branch modes always
2601 * force no IPC (prev == NULL)
2603 * Note that perf stores branches reversed from
2606 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2607 addr_map_symbol__account_cycles(&bi
[i
].from
,
2608 nonany_branch_mode
? NULL
: prev
,
2609 bi
[i
].flags
.cycles
);
2613 *total_cycles
+= bi
[i
].flags
.cycles
;
2620 size_t perf_evlist__fprintf_nr_events(struct evlist
*evlist
, FILE *fp
)
2625 evlist__for_each_entry(evlist
, pos
) {
2626 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2627 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2634 u64
hists__total_period(struct hists
*hists
)
2636 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2637 hists
->stats
.total_period
;
2640 int __hists__scnprintf_title(struct hists
*hists
, char *bf
, size_t size
, bool show_freq
)
2644 const struct dso
*dso
= hists
->dso_filter
;
2645 struct thread
*thread
= hists
->thread_filter
;
2646 int socket_id
= hists
->socket_filter
;
2647 unsigned long nr_samples
= hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2648 u64 nr_events
= hists
->stats
.total_period
;
2649 struct evsel
*evsel
= hists_to_evsel(hists
);
2650 const char *ev_name
= perf_evsel__name(evsel
);
2651 char buf
[512], sample_freq_str
[64] = "";
2652 size_t buflen
= sizeof(buf
);
2653 char ref
[30] = " show reference callgraph, ";
2654 bool enable_ref
= false;
2656 if (symbol_conf
.filter_relative
) {
2657 nr_samples
= hists
->stats
.nr_non_filtered_samples
;
2658 nr_events
= hists
->stats
.total_non_filtered_period
;
2661 if (perf_evsel__is_group_event(evsel
)) {
2664 perf_evsel__group_desc(evsel
, buf
, buflen
);
2667 for_each_group_member(pos
, evsel
) {
2668 struct hists
*pos_hists
= evsel__hists(pos
);
2670 if (symbol_conf
.filter_relative
) {
2671 nr_samples
+= pos_hists
->stats
.nr_non_filtered_samples
;
2672 nr_events
+= pos_hists
->stats
.total_non_filtered_period
;
2674 nr_samples
+= pos_hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2675 nr_events
+= pos_hists
->stats
.total_period
;
2680 if (symbol_conf
.show_ref_callgraph
&&
2681 strstr(ev_name
, "call-graph=no"))
2685 scnprintf(sample_freq_str
, sizeof(sample_freq_str
), " %d Hz,", evsel
->core
.attr
.sample_freq
);
2687 nr_samples
= convert_unit(nr_samples
, &unit
);
2688 printed
= scnprintf(bf
, size
,
2689 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64
,
2690 nr_samples
, unit
, evsel
->core
.nr_members
> 1 ? "s" : "",
2691 ev_name
, sample_freq_str
, enable_ref
? ref
: " ", nr_events
);
2694 if (hists
->uid_filter_str
)
2695 printed
+= snprintf(bf
+ printed
, size
- printed
,
2696 ", UID: %s", hists
->uid_filter_str
);
2698 if (hists__has(hists
, thread
)) {
2699 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2701 (thread
->comm_set
? thread__comm_str(thread
) : ""),
2704 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2706 (thread
->comm_set
? thread__comm_str(thread
) : ""));
2710 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2711 ", DSO: %s", dso
->short_name
);
2713 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2714 ", Processor Socket: %d", socket_id
);
2719 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2720 const char *arg
, int unset __maybe_unused
)
2722 if (!strcmp(arg
, "relative"))
2723 symbol_conf
.filter_relative
= true;
2724 else if (!strcmp(arg
, "absolute"))
2725 symbol_conf
.filter_relative
= false;
2727 pr_debug("Invalid percentage: %s\n", arg
);
2734 int perf_hist_config(const char *var
, const char *value
)
2736 if (!strcmp(var
, "hist.percentage"))
2737 return parse_filter_percentage(NULL
, value
, 0);
2742 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2744 memset(hists
, 0, sizeof(*hists
));
2745 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT_CACHED
;
2746 hists
->entries_in
= &hists
->entries_in_array
[0];
2747 hists
->entries_collapsed
= RB_ROOT_CACHED
;
2748 hists
->entries
= RB_ROOT_CACHED
;
2749 pthread_mutex_init(&hists
->lock
, NULL
);
2750 hists
->socket_filter
= -1;
2751 hists
->hpp_list
= hpp_list
;
2752 INIT_LIST_HEAD(&hists
->hpp_formats
);
2756 static void hists__delete_remaining_entries(struct rb_root_cached
*root
)
2758 struct rb_node
*node
;
2759 struct hist_entry
*he
;
2761 while (!RB_EMPTY_ROOT(&root
->rb_root
)) {
2762 node
= rb_first_cached(root
);
2763 rb_erase_cached(node
, root
);
2765 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2766 hist_entry__delete(he
);
2770 static void hists__delete_all_entries(struct hists
*hists
)
2772 hists__delete_entries(hists
);
2773 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2774 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2775 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2778 static void hists_evsel__exit(struct evsel
*evsel
)
2780 struct hists
*hists
= evsel__hists(evsel
);
2781 struct perf_hpp_fmt
*fmt
, *pos
;
2782 struct perf_hpp_list_node
*node
, *tmp
;
2784 hists__delete_all_entries(hists
);
2786 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2787 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2788 list_del_init(&fmt
->list
);
2791 list_del_init(&node
->list
);
2796 static int hists_evsel__init(struct evsel
*evsel
)
2798 struct hists
*hists
= evsel__hists(evsel
);
2800 __hists__init(hists
, &perf_hpp_list
);
2805 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2806 * stored in the rbtree...
2809 int hists__init(void)
2811 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2815 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2820 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2822 INIT_LIST_HEAD(&list
->fields
);
2823 INIT_LIST_HEAD(&list
->sorts
);