1 // SPDX-License-Identifier: GPL-2.0
8 #include "map_symbol.h"
10 #include "mem-events.h"
12 #include "namespaces.h"
22 #include "block-info.h"
23 #include "ui/progress.h"
27 #include <sys/param.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/time64.h>
31 #include <linux/zalloc.h>
33 static bool hists__filter_entry_by_dso(struct hists
*hists
,
34 struct hist_entry
*he
);
35 static bool hists__filter_entry_by_thread(struct hists
*hists
,
36 struct hist_entry
*he
);
37 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
38 struct hist_entry
*he
);
39 static bool hists__filter_entry_by_socket(struct hists
*hists
,
40 struct hist_entry
*he
);
42 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
44 return hists
->col_len
[col
];
47 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
49 hists
->col_len
[col
] = len
;
52 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
54 if (len
> hists__col_len(hists
, col
)) {
55 hists__set_col_len(hists
, col
, len
);
61 void hists__reset_col_len(struct hists
*hists
)
65 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
66 hists__set_col_len(hists
, col
, 0);
69 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
71 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
73 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
74 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
75 !symbol_conf
.dso_list
)
76 hists__set_col_len(hists
, dso
, unresolved_col_width
);
79 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
81 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
88 * +4 accounts for '[x] ' priv level info
89 * +2 accounts for 0x prefix on raw addresses
90 * +3 accounts for ' y ' symtab origin info
93 symlen
= h
->ms
.sym
->namelen
+ 4;
95 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
96 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
98 symlen
= unresolved_col_width
+ 4 + 2;
99 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
100 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
103 len
= thread__comm_len(h
->thread
);
104 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
105 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
108 len
= dso__name_len(h
->ms
.map
->dso
);
109 hists__new_col_len(hists
, HISTC_DSO
, len
);
113 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
115 if (h
->branch_info
) {
116 if (h
->branch_info
->from
.ms
.sym
) {
117 symlen
= (int)h
->branch_info
->from
.ms
.sym
->namelen
+ 4;
119 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
120 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
122 symlen
= dso__name_len(h
->branch_info
->from
.ms
.map
->dso
);
123 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
125 symlen
= unresolved_col_width
+ 4 + 2;
126 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
127 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
130 if (h
->branch_info
->to
.ms
.sym
) {
131 symlen
= (int)h
->branch_info
->to
.ms
.sym
->namelen
+ 4;
133 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
134 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
136 symlen
= dso__name_len(h
->branch_info
->to
.ms
.map
->dso
);
137 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
139 symlen
= unresolved_col_width
+ 4 + 2;
140 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
141 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
144 if (h
->branch_info
->srcline_from
)
145 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
146 strlen(h
->branch_info
->srcline_from
));
147 if (h
->branch_info
->srcline_to
)
148 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
149 strlen(h
->branch_info
->srcline_to
));
153 if (h
->mem_info
->daddr
.ms
.sym
) {
154 symlen
= (int)h
->mem_info
->daddr
.ms
.sym
->namelen
+ 4
155 + unresolved_col_width
+ 2;
156 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
158 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
161 symlen
= unresolved_col_width
+ 4 + 2;
162 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
164 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
168 if (h
->mem_info
->iaddr
.ms
.sym
) {
169 symlen
= (int)h
->mem_info
->iaddr
.ms
.sym
->namelen
+ 4
170 + unresolved_col_width
+ 2;
171 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
174 symlen
= unresolved_col_width
+ 4 + 2;
175 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
179 if (h
->mem_info
->daddr
.ms
.map
) {
180 symlen
= dso__name_len(h
->mem_info
->daddr
.ms
.map
->dso
);
181 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
184 symlen
= unresolved_col_width
+ 4 + 2;
185 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
188 hists__new_col_len(hists
, HISTC_MEM_PHYS_DADDR
,
189 unresolved_col_width
+ 4 + 2);
192 symlen
= unresolved_col_width
+ 4 + 2;
193 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
194 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
195 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
198 hists__new_col_len(hists
, HISTC_CGROUP
, 6);
199 hists__new_col_len(hists
, HISTC_CGROUP_ID
, 20);
200 hists__new_col_len(hists
, HISTC_CPU
, 3);
201 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
202 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
203 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
204 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
205 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
206 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
207 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
208 if (symbol_conf
.nanosecs
)
209 hists__new_col_len(hists
, HISTC_TIME
, 16);
211 hists__new_col_len(hists
, HISTC_TIME
, 12);
214 len
= MAX(strlen(h
->srcline
), strlen(sort_srcline
.se_header
));
215 hists__new_col_len(hists
, HISTC_SRCLINE
, len
);
219 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
222 hists__new_col_len(hists
, HISTC_TRANSACTION
,
223 hist_entry__transaction_len());
226 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
229 const char *cgrp_name
= "unknown";
230 struct cgroup
*cgrp
= cgroup__find(h
->ms
.maps
->machine
->env
,
233 cgrp_name
= cgrp
->name
;
235 hists__new_col_len(hists
, HISTC_CGROUP
, strlen(cgrp_name
));
239 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
241 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
242 struct hist_entry
*n
;
245 hists__reset_col_len(hists
);
247 while (next
&& row
++ < max_rows
) {
248 n
= rb_entry(next
, struct hist_entry
, rb_node
);
250 hists__calc_col_len(hists
, n
);
251 next
= rb_next(&n
->rb_node
);
255 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
256 unsigned int cpumode
, u64 period
)
259 case PERF_RECORD_MISC_KERNEL
:
260 he_stat
->period_sys
+= period
;
262 case PERF_RECORD_MISC_USER
:
263 he_stat
->period_us
+= period
;
265 case PERF_RECORD_MISC_GUEST_KERNEL
:
266 he_stat
->period_guest_sys
+= period
;
268 case PERF_RECORD_MISC_GUEST_USER
:
269 he_stat
->period_guest_us
+= period
;
276 static long hist_time(unsigned long htime
)
278 unsigned long time_quantum
= symbol_conf
.time_quantum
;
280 return (htime
/ time_quantum
) * time_quantum
;
284 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
288 he_stat
->period
+= period
;
289 he_stat
->weight
+= weight
;
290 he_stat
->nr_events
+= 1;
293 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
295 dest
->period
+= src
->period
;
296 dest
->period_sys
+= src
->period_sys
;
297 dest
->period_us
+= src
->period_us
;
298 dest
->period_guest_sys
+= src
->period_guest_sys
;
299 dest
->period_guest_us
+= src
->period_guest_us
;
300 dest
->nr_events
+= src
->nr_events
;
301 dest
->weight
+= src
->weight
;
304 static void he_stat__decay(struct he_stat
*he_stat
)
306 he_stat
->period
= (he_stat
->period
* 7) / 8;
307 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
308 /* XXX need decay for weight too? */
311 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
313 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
315 u64 prev_period
= he
->stat
.period
;
318 if (prev_period
== 0)
321 he_stat__decay(&he
->stat
);
322 if (symbol_conf
.cumulate_callchain
)
323 he_stat__decay(he
->stat_acc
);
324 decay_callchain(he
->callchain
);
326 diff
= prev_period
- he
->stat
.period
;
329 hists
->stats
.total_period
-= diff
;
331 hists
->stats
.total_non_filtered_period
-= diff
;
335 struct hist_entry
*child
;
336 struct rb_node
*node
= rb_first_cached(&he
->hroot_out
);
338 child
= rb_entry(node
, struct hist_entry
, rb_node
);
339 node
= rb_next(node
);
341 if (hists__decay_entry(hists
, child
))
342 hists__delete_entry(hists
, child
);
346 return he
->stat
.period
== 0;
349 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
351 struct rb_root_cached
*root_in
;
352 struct rb_root_cached
*root_out
;
355 root_in
= &he
->parent_he
->hroot_in
;
356 root_out
= &he
->parent_he
->hroot_out
;
358 if (hists__has(hists
, need_collapse
))
359 root_in
= &hists
->entries_collapsed
;
361 root_in
= hists
->entries_in
;
362 root_out
= &hists
->entries
;
365 rb_erase_cached(&he
->rb_node_in
, root_in
);
366 rb_erase_cached(&he
->rb_node
, root_out
);
370 --hists
->nr_non_filtered_entries
;
372 hist_entry__delete(he
);
375 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
377 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
378 struct hist_entry
*n
;
381 n
= rb_entry(next
, struct hist_entry
, rb_node
);
382 next
= rb_next(&n
->rb_node
);
383 if (((zap_user
&& n
->level
== '.') ||
384 (zap_kernel
&& n
->level
!= '.') ||
385 hists__decay_entry(hists
, n
))) {
386 hists__delete_entry(hists
, n
);
391 void hists__delete_entries(struct hists
*hists
)
393 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
394 struct hist_entry
*n
;
397 n
= rb_entry(next
, struct hist_entry
, rb_node
);
398 next
= rb_next(&n
->rb_node
);
400 hists__delete_entry(hists
, n
);
404 struct hist_entry
*hists__get_entry(struct hists
*hists
, int idx
)
406 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
407 struct hist_entry
*n
;
411 n
= rb_entry(next
, struct hist_entry
, rb_node
);
415 next
= rb_next(&n
->rb_node
);
423 * histogram, sorted on item, collects periods
426 static int hist_entry__init(struct hist_entry
*he
,
427 struct hist_entry
*template,
429 size_t callchain_size
)
432 he
->callchain_size
= callchain_size
;
434 if (symbol_conf
.cumulate_callchain
) {
435 he
->stat_acc
= malloc(sizeof(he
->stat
));
436 if (he
->stat_acc
== NULL
)
438 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
440 memset(&he
->stat
, 0, sizeof(he
->stat
));
443 map__get(he
->ms
.map
);
445 if (he
->branch_info
) {
447 * This branch info is (a part of) allocated from
448 * sample__resolve_bstack() and will be freed after
449 * adding new entries. So we need to save a copy.
451 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
452 if (he
->branch_info
== NULL
)
455 memcpy(he
->branch_info
, template->branch_info
,
456 sizeof(*he
->branch_info
));
458 map__get(he
->branch_info
->from
.ms
.map
);
459 map__get(he
->branch_info
->to
.ms
.map
);
463 map__get(he
->mem_info
->iaddr
.ms
.map
);
464 map__get(he
->mem_info
->daddr
.ms
.map
);
467 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
468 callchain_init(he
->callchain
);
471 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
472 if (he
->raw_data
== NULL
)
477 he
->srcline
= strdup(he
->srcline
);
478 if (he
->srcline
== NULL
)
482 if (symbol_conf
.res_sample
) {
483 he
->res_samples
= calloc(sizeof(struct res_sample
),
484 symbol_conf
.res_sample
);
485 if (!he
->res_samples
)
489 INIT_LIST_HEAD(&he
->pairs
.node
);
490 thread__get(he
->thread
);
491 he
->hroot_in
= RB_ROOT_CACHED
;
492 he
->hroot_out
= RB_ROOT_CACHED
;
494 if (!symbol_conf
.report_hierarchy
)
503 zfree(&he
->raw_data
);
506 if (he
->branch_info
) {
507 map__put(he
->branch_info
->from
.ms
.map
);
508 map__put(he
->branch_info
->to
.ms
.map
);
509 zfree(&he
->branch_info
);
512 map__put(he
->mem_info
->iaddr
.ms
.map
);
513 map__put(he
->mem_info
->daddr
.ms
.map
);
516 map__zput(he
->ms
.map
);
517 zfree(&he
->stat_acc
);
521 static void *hist_entry__zalloc(size_t size
)
523 return zalloc(size
+ sizeof(struct hist_entry
));
526 static void hist_entry__free(void *ptr
)
531 static struct hist_entry_ops default_ops
= {
532 .new = hist_entry__zalloc
,
533 .free
= hist_entry__free
,
536 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
539 struct hist_entry_ops
*ops
= template->ops
;
540 size_t callchain_size
= 0;
541 struct hist_entry
*he
;
545 ops
= template->ops
= &default_ops
;
547 if (symbol_conf
.use_callchain
)
548 callchain_size
= sizeof(struct callchain_root
);
550 he
= ops
->new(callchain_size
);
552 err
= hist_entry__init(he
, template, sample_self
, callchain_size
);
562 static u8
symbol__parent_filter(const struct symbol
*parent
)
564 if (symbol_conf
.exclude_other
&& parent
== NULL
)
565 return 1 << HIST_FILTER__PARENT
;
569 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
571 if (!hist_entry__has_callchains(he
) || !symbol_conf
.use_callchain
)
574 he
->hists
->callchain_period
+= period
;
576 he
->hists
->callchain_non_filtered_period
+= period
;
579 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
580 struct hist_entry
*entry
,
581 struct addr_location
*al
,
585 struct rb_node
*parent
= NULL
;
586 struct hist_entry
*he
;
588 u64 period
= entry
->stat
.period
;
589 u64 weight
= entry
->stat
.weight
;
590 bool leftmost
= true;
592 p
= &hists
->entries_in
->rb_root
.rb_node
;
596 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
599 * Make sure that it receives arguments in a same order as
600 * hist_entry__collapse() so that we can use an appropriate
601 * function when searching an entry regardless which sort
604 cmp
= hist_entry__cmp(he
, entry
);
608 he_stat__add_period(&he
->stat
, period
, weight
);
609 hist_entry__add_callchain_period(he
, period
);
611 if (symbol_conf
.cumulate_callchain
)
612 he_stat__add_period(he
->stat_acc
, period
, weight
);
615 * This mem info was allocated from sample__resolve_mem
616 * and will not be used anymore.
618 mem_info__zput(entry
->mem_info
);
620 block_info__zput(entry
->block_info
);
622 /* If the map of an existing hist_entry has
623 * become out-of-date due to an exec() or
624 * similar, update it. Otherwise we will
625 * mis-adjust symbol addresses when computing
626 * the history counter to increment.
628 if (he
->ms
.map
!= entry
->ms
.map
) {
629 map__put(he
->ms
.map
);
630 he
->ms
.map
= map__get(entry
->ms
.map
);
643 he
= hist_entry__new(entry
, sample_self
);
648 hist_entry__add_callchain_period(he
, period
);
651 rb_link_node(&he
->rb_node_in
, parent
, p
);
652 rb_insert_color_cached(&he
->rb_node_in
, hists
->entries_in
, leftmost
);
655 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
656 if (symbol_conf
.cumulate_callchain
)
657 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
661 static unsigned random_max(unsigned high
)
663 unsigned thresh
= -high
% high
;
665 unsigned r
= random();
671 static void hists__res_sample(struct hist_entry
*he
, struct perf_sample
*sample
)
673 struct res_sample
*r
;
676 if (he
->num_res
< symbol_conf
.res_sample
) {
679 j
= random_max(symbol_conf
.res_sample
);
681 r
= &he
->res_samples
[j
];
682 r
->time
= sample
->time
;
683 r
->cpu
= sample
->cpu
;
684 r
->tid
= sample
->tid
;
687 static struct hist_entry
*
688 __hists__add_entry(struct hists
*hists
,
689 struct addr_location
*al
,
690 struct symbol
*sym_parent
,
691 struct branch_info
*bi
,
693 struct block_info
*block_info
,
694 struct perf_sample
*sample
,
696 struct hist_entry_ops
*ops
)
698 struct namespaces
*ns
= thread__namespaces(al
->thread
);
699 struct hist_entry entry
= {
700 .thread
= al
->thread
,
701 .comm
= thread__comm(al
->thread
),
703 .dev
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].dev
: 0,
704 .ino
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].ino
: 0,
706 .cgroup
= sample
->cgroup
,
712 .srcline
= (char *) al
->srcline
,
713 .socket
= al
->socket
,
715 .cpumode
= al
->cpumode
,
720 .period
= sample
->period
,
721 .weight
= sample
->weight
,
723 .parent
= sym_parent
,
724 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
728 .block_info
= block_info
,
729 .transaction
= sample
->transaction
,
730 .raw_data
= sample
->raw_data
,
731 .raw_size
= sample
->raw_size
,
733 .time
= hist_time(sample
->time
),
734 }, *he
= hists__findnew_entry(hists
, &entry
, al
, sample_self
);
736 if (!hists
->has_callchains
&& he
&& he
->callchain_size
!= 0)
737 hists
->has_callchains
= true;
738 if (he
&& symbol_conf
.res_sample
)
739 hists__res_sample(he
, sample
);
743 struct hist_entry
*hists__add_entry(struct hists
*hists
,
744 struct addr_location
*al
,
745 struct symbol
*sym_parent
,
746 struct branch_info
*bi
,
748 struct perf_sample
*sample
,
751 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
, NULL
,
752 sample
, sample_self
, NULL
);
755 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
756 struct hist_entry_ops
*ops
,
757 struct addr_location
*al
,
758 struct symbol
*sym_parent
,
759 struct branch_info
*bi
,
761 struct perf_sample
*sample
,
764 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
, NULL
,
765 sample
, sample_self
, ops
);
768 struct hist_entry
*hists__add_entry_block(struct hists
*hists
,
769 struct addr_location
*al
,
770 struct block_info
*block_info
)
772 struct hist_entry entry
= {
773 .block_info
= block_info
,
780 }, *he
= hists__findnew_entry(hists
, &entry
, al
, false);
786 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
787 struct addr_location
*al __maybe_unused
)
793 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
794 struct addr_location
*al __maybe_unused
)
800 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
802 struct perf_sample
*sample
= iter
->sample
;
805 mi
= sample__resolve_mem(sample
, al
);
814 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
817 struct mem_info
*mi
= iter
->priv
;
818 struct hists
*hists
= evsel__hists(iter
->evsel
);
819 struct perf_sample
*sample
= iter
->sample
;
820 struct hist_entry
*he
;
825 cost
= sample
->weight
;
830 * must pass period=weight in order to get the correct
831 * sorting from hists__collapse_resort() which is solely
832 * based on periods. We want sorting be done on nr_events * weight
833 * and this is indirectly achieved by passing period=weight here
834 * and the he_stat__add_period() function.
836 sample
->period
= cost
;
838 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
848 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
849 struct addr_location
*al __maybe_unused
)
851 struct evsel
*evsel
= iter
->evsel
;
852 struct hists
*hists
= evsel__hists(evsel
);
853 struct hist_entry
*he
= iter
->he
;
859 hists__inc_nr_samples(hists
, he
->filtered
);
861 err
= hist_entry__append_callchain(he
, iter
->sample
);
865 * We don't need to free iter->priv (mem_info) here since the mem info
866 * was either already freed in hists__findnew_entry() or passed to a
867 * new hist entry by hist_entry__new().
876 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
878 struct branch_info
*bi
;
879 struct perf_sample
*sample
= iter
->sample
;
881 bi
= sample__resolve_bstack(sample
, al
);
886 iter
->total
= sample
->branch_stack
->nr
;
893 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
894 struct addr_location
*al __maybe_unused
)
900 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
902 struct branch_info
*bi
= iter
->priv
;
908 if (iter
->curr
>= iter
->total
)
911 al
->maps
= bi
[i
].to
.ms
.maps
;
912 al
->map
= bi
[i
].to
.ms
.map
;
913 al
->sym
= bi
[i
].to
.ms
.sym
;
914 al
->addr
= bi
[i
].to
.addr
;
919 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
921 struct branch_info
*bi
;
922 struct evsel
*evsel
= iter
->evsel
;
923 struct hists
*hists
= evsel__hists(evsel
);
924 struct perf_sample
*sample
= iter
->sample
;
925 struct hist_entry
*he
= NULL
;
931 if (iter
->hide_unresolved
&& !(bi
[i
].from
.ms
.sym
&& bi
[i
].to
.ms
.sym
))
935 * The report shows the percentage of total branches captured
936 * and not events sampled. Thus we use a pseudo period of 1.
939 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
941 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
946 hists__inc_nr_samples(hists
, he
->filtered
);
955 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
956 struct addr_location
*al __maybe_unused
)
961 return iter
->curr
>= iter
->total
? 0 : -1;
965 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
966 struct addr_location
*al __maybe_unused
)
972 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
974 struct evsel
*evsel
= iter
->evsel
;
975 struct perf_sample
*sample
= iter
->sample
;
976 struct hist_entry
*he
;
978 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
988 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
989 struct addr_location
*al __maybe_unused
)
991 struct hist_entry
*he
= iter
->he
;
992 struct evsel
*evsel
= iter
->evsel
;
993 struct perf_sample
*sample
= iter
->sample
;
1000 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
1002 return hist_entry__append_callchain(he
, sample
);
1006 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
1007 struct addr_location
*al __maybe_unused
)
1009 struct hist_entry
**he_cache
;
1011 callchain_cursor_commit(&callchain_cursor
);
1014 * This is for detecting cycles or recursions so that they're
1015 * cumulated only one time to prevent entries more than 100%
1018 he_cache
= malloc(sizeof(*he_cache
) * (callchain_cursor
.nr
+ 1));
1019 if (he_cache
== NULL
)
1022 iter
->priv
= he_cache
;
1029 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
1030 struct addr_location
*al
)
1032 struct evsel
*evsel
= iter
->evsel
;
1033 struct hists
*hists
= evsel__hists(evsel
);
1034 struct perf_sample
*sample
= iter
->sample
;
1035 struct hist_entry
**he_cache
= iter
->priv
;
1036 struct hist_entry
*he
;
1039 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
1045 he_cache
[iter
->curr
++] = he
;
1047 hist_entry__append_callchain(he
, sample
);
1050 * We need to re-initialize the cursor since callchain_append()
1051 * advanced the cursor to the end.
1053 callchain_cursor_commit(&callchain_cursor
);
1055 hists__inc_nr_samples(hists
, he
->filtered
);
1061 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
1062 struct addr_location
*al
)
1064 struct callchain_cursor_node
*node
;
1066 node
= callchain_cursor_current(&callchain_cursor
);
1070 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
1074 hist_entry__fast__sym_diff(struct hist_entry
*left
,
1075 struct hist_entry
*right
)
1077 struct symbol
*sym_l
= left
->ms
.sym
;
1078 struct symbol
*sym_r
= right
->ms
.sym
;
1080 if (!sym_l
&& !sym_r
)
1081 return left
->ip
!= right
->ip
;
1083 return !!_sort__sym_cmp(sym_l
, sym_r
);
1088 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
1089 struct addr_location
*al
)
1091 struct evsel
*evsel
= iter
->evsel
;
1092 struct perf_sample
*sample
= iter
->sample
;
1093 struct hist_entry
**he_cache
= iter
->priv
;
1094 struct hist_entry
*he
;
1095 struct hist_entry he_tmp
= {
1096 .hists
= evsel__hists(evsel
),
1098 .thread
= al
->thread
,
1099 .comm
= thread__comm(al
->thread
),
1106 .srcline
= (char *) al
->srcline
,
1107 .parent
= iter
->parent
,
1108 .raw_data
= sample
->raw_data
,
1109 .raw_size
= sample
->raw_size
,
1112 struct callchain_cursor cursor
;
1113 bool fast
= hists__has(he_tmp
.hists
, sym
);
1115 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
1117 callchain_cursor_advance(&callchain_cursor
);
1120 * Check if there's duplicate entries in the callchain.
1121 * It's possible that it has cycles or recursive calls.
1123 for (i
= 0; i
< iter
->curr
; i
++) {
1125 * For most cases, there are no duplicate entries in callchain.
1126 * The symbols are usually different. Do a quick check for
1129 if (fast
&& hist_entry__fast__sym_diff(he_cache
[i
], &he_tmp
))
1132 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
1133 /* to avoid calling callback function */
1139 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
1145 he_cache
[iter
->curr
++] = he
;
1147 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
1148 callchain_append(he
->callchain
, &cursor
, sample
->period
);
1153 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
1154 struct addr_location
*al __maybe_unused
)
1162 const struct hist_iter_ops hist_iter_mem
= {
1163 .prepare_entry
= iter_prepare_mem_entry
,
1164 .add_single_entry
= iter_add_single_mem_entry
,
1165 .next_entry
= iter_next_nop_entry
,
1166 .add_next_entry
= iter_add_next_nop_entry
,
1167 .finish_entry
= iter_finish_mem_entry
,
1170 const struct hist_iter_ops hist_iter_branch
= {
1171 .prepare_entry
= iter_prepare_branch_entry
,
1172 .add_single_entry
= iter_add_single_branch_entry
,
1173 .next_entry
= iter_next_branch_entry
,
1174 .add_next_entry
= iter_add_next_branch_entry
,
1175 .finish_entry
= iter_finish_branch_entry
,
1178 const struct hist_iter_ops hist_iter_normal
= {
1179 .prepare_entry
= iter_prepare_normal_entry
,
1180 .add_single_entry
= iter_add_single_normal_entry
,
1181 .next_entry
= iter_next_nop_entry
,
1182 .add_next_entry
= iter_add_next_nop_entry
,
1183 .finish_entry
= iter_finish_normal_entry
,
1186 const struct hist_iter_ops hist_iter_cumulative
= {
1187 .prepare_entry
= iter_prepare_cumulative_entry
,
1188 .add_single_entry
= iter_add_single_cumulative_entry
,
1189 .next_entry
= iter_next_cumulative_entry
,
1190 .add_next_entry
= iter_add_next_cumulative_entry
,
1191 .finish_entry
= iter_finish_cumulative_entry
,
1194 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1195 int max_stack_depth
, void *arg
)
1198 struct map
*alm
= NULL
;
1201 alm
= map__get(al
->map
);
1203 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1204 iter
->evsel
, al
, max_stack_depth
);
1210 err
= iter
->ops
->prepare_entry(iter
, al
);
1214 err
= iter
->ops
->add_single_entry(iter
, al
);
1218 if (iter
->he
&& iter
->add_entry_cb
) {
1219 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1224 while (iter
->ops
->next_entry(iter
, al
)) {
1225 err
= iter
->ops
->add_next_entry(iter
, al
);
1229 if (iter
->he
&& iter
->add_entry_cb
) {
1230 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1237 err2
= iter
->ops
->finish_entry(iter
, al
);
1247 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1249 struct hists
*hists
= left
->hists
;
1250 struct perf_hpp_fmt
*fmt
;
1253 hists__for_each_sort_list(hists
, fmt
) {
1254 if (perf_hpp__is_dynamic_entry(fmt
) &&
1255 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1258 cmp
= fmt
->cmp(fmt
, left
, right
);
1267 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1269 struct hists
*hists
= left
->hists
;
1270 struct perf_hpp_fmt
*fmt
;
1273 hists__for_each_sort_list(hists
, fmt
) {
1274 if (perf_hpp__is_dynamic_entry(fmt
) &&
1275 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1278 cmp
= fmt
->collapse(fmt
, left
, right
);
1286 void hist_entry__delete(struct hist_entry
*he
)
1288 struct hist_entry_ops
*ops
= he
->ops
;
1290 thread__zput(he
->thread
);
1291 map__zput(he
->ms
.map
);
1293 if (he
->branch_info
) {
1294 map__zput(he
->branch_info
->from
.ms
.map
);
1295 map__zput(he
->branch_info
->to
.ms
.map
);
1296 free_srcline(he
->branch_info
->srcline_from
);
1297 free_srcline(he
->branch_info
->srcline_to
);
1298 zfree(&he
->branch_info
);
1302 map__zput(he
->mem_info
->iaddr
.ms
.map
);
1303 map__zput(he
->mem_info
->daddr
.ms
.map
);
1304 mem_info__zput(he
->mem_info
);
1308 block_info__zput(he
->block_info
);
1310 zfree(&he
->res_samples
);
1311 zfree(&he
->stat_acc
);
1312 free_srcline(he
->srcline
);
1313 if (he
->srcfile
&& he
->srcfile
[0])
1314 zfree(&he
->srcfile
);
1315 free_callchain(he
->callchain
);
1316 zfree(&he
->trace_output
);
1317 zfree(&he
->raw_data
);
1322 * If this is not the last column, then we need to pad it according to the
1323 * pre-calculated max length for this column, otherwise don't bother adding
1324 * spaces because that would break viewing this with, for instance, 'less',
1325 * that would show tons of trailing spaces when a long C++ demangled method
1328 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1329 struct perf_hpp_fmt
*fmt
, int printed
)
1331 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1332 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1333 if (printed
< width
) {
1334 advance_hpp(hpp
, printed
);
1335 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1343 * collapse the histogram
1346 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1347 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1348 enum hist_filter type
);
1350 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1352 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1354 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1357 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1358 enum hist_filter type
,
1361 struct perf_hpp_fmt
*fmt
;
1362 bool type_match
= false;
1363 struct hist_entry
*parent
= he
->parent_he
;
1366 case HIST_FILTER__THREAD
:
1367 if (symbol_conf
.comm_list
== NULL
&&
1368 symbol_conf
.pid_list
== NULL
&&
1369 symbol_conf
.tid_list
== NULL
)
1372 case HIST_FILTER__DSO
:
1373 if (symbol_conf
.dso_list
== NULL
)
1376 case HIST_FILTER__SYMBOL
:
1377 if (symbol_conf
.sym_list
== NULL
)
1380 case HIST_FILTER__PARENT
:
1381 case HIST_FILTER__GUEST
:
1382 case HIST_FILTER__HOST
:
1383 case HIST_FILTER__SOCKET
:
1384 case HIST_FILTER__C2C
:
1389 /* if it's filtered by own fmt, it has to have filter bits */
1390 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1399 * If the filter is for current level entry, propagate
1400 * filter marker to parents. The marker bit was
1401 * already set by default so it only needs to clear
1402 * non-filtered entries.
1404 if (!(he
->filtered
& (1 << type
))) {
1406 parent
->filtered
&= ~(1 << type
);
1407 parent
= parent
->parent_he
;
1412 * If current entry doesn't have matching formats, set
1413 * filter marker for upper level entries. it will be
1414 * cleared if its lower level entries is not filtered.
1416 * For lower-level entries, it inherits parent's
1417 * filter bit so that lower level entries of a
1418 * non-filtered entry won't set the filter marker.
1421 he
->filtered
|= (1 << type
);
1423 he
->filtered
|= (parent
->filtered
& (1 << type
));
1427 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1429 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1430 check_thread_entry
);
1432 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1433 perf_hpp__is_dso_entry
);
1435 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1436 perf_hpp__is_sym_entry
);
1438 hists__apply_filters(he
->hists
, he
);
1441 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1442 struct rb_root_cached
*root
,
1443 struct hist_entry
*he
,
1444 struct hist_entry
*parent_he
,
1445 struct perf_hpp_list
*hpp_list
)
1447 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1448 struct rb_node
*parent
= NULL
;
1449 struct hist_entry
*iter
, *new;
1450 struct perf_hpp_fmt
*fmt
;
1452 bool leftmost
= true;
1454 while (*p
!= NULL
) {
1456 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1459 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1460 cmp
= fmt
->collapse(fmt
, iter
, he
);
1466 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1471 p
= &parent
->rb_left
;
1473 p
= &parent
->rb_right
;
1478 new = hist_entry__new(he
, true);
1482 hists
->nr_entries
++;
1484 /* save related format list for output */
1485 new->hpp_list
= hpp_list
;
1486 new->parent_he
= parent_he
;
1488 hist_entry__apply_hierarchy_filters(new);
1490 /* some fields are now passed to 'new' */
1491 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1492 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1493 he
->trace_output
= NULL
;
1495 new->trace_output
= NULL
;
1497 if (perf_hpp__is_srcline_entry(fmt
))
1500 new->srcline
= NULL
;
1502 if (perf_hpp__is_srcfile_entry(fmt
))
1505 new->srcfile
= NULL
;
1508 rb_link_node(&new->rb_node_in
, parent
, p
);
1509 rb_insert_color_cached(&new->rb_node_in
, root
, leftmost
);
1513 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1514 struct rb_root_cached
*root
,
1515 struct hist_entry
*he
)
1517 struct perf_hpp_list_node
*node
;
1518 struct hist_entry
*new_he
= NULL
;
1519 struct hist_entry
*parent
= NULL
;
1523 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1524 /* skip period (overhead) and elided columns */
1525 if (node
->level
== 0 || node
->skip
)
1528 /* insert copy of 'he' for each fmt into the hierarchy */
1529 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1530 if (new_he
== NULL
) {
1535 root
= &new_he
->hroot_in
;
1536 new_he
->depth
= depth
++;
1541 new_he
->leaf
= true;
1543 if (hist_entry__has_callchains(new_he
) &&
1544 symbol_conf
.use_callchain
) {
1545 callchain_cursor_reset(&callchain_cursor
);
1546 if (callchain_merge(&callchain_cursor
,
1553 /* 'he' is no longer used */
1554 hist_entry__delete(he
);
1556 /* return 0 (or -1) since it already applied filters */
1560 static int hists__collapse_insert_entry(struct hists
*hists
,
1561 struct rb_root_cached
*root
,
1562 struct hist_entry
*he
)
1564 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1565 struct rb_node
*parent
= NULL
;
1566 struct hist_entry
*iter
;
1568 bool leftmost
= true;
1570 if (symbol_conf
.report_hierarchy
)
1571 return hists__hierarchy_insert_entry(hists
, root
, he
);
1573 while (*p
!= NULL
) {
1575 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1577 cmp
= hist_entry__collapse(iter
, he
);
1582 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1583 if (symbol_conf
.cumulate_callchain
)
1584 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1586 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
) {
1587 callchain_cursor_reset(&callchain_cursor
);
1588 if (callchain_merge(&callchain_cursor
,
1593 hist_entry__delete(he
);
1600 p
= &(*p
)->rb_right
;
1604 hists
->nr_entries
++;
1606 rb_link_node(&he
->rb_node_in
, parent
, p
);
1607 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
1611 struct rb_root_cached
*hists__get_rotate_entries_in(struct hists
*hists
)
1613 struct rb_root_cached
*root
;
1615 pthread_mutex_lock(&hists
->lock
);
1617 root
= hists
->entries_in
;
1618 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1619 hists
->entries_in
= &hists
->entries_in_array
[0];
1621 pthread_mutex_unlock(&hists
->lock
);
1626 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1628 hists__filter_entry_by_dso(hists
, he
);
1629 hists__filter_entry_by_thread(hists
, he
);
1630 hists__filter_entry_by_symbol(hists
, he
);
1631 hists__filter_entry_by_socket(hists
, he
);
1634 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1636 struct rb_root_cached
*root
;
1637 struct rb_node
*next
;
1638 struct hist_entry
*n
;
1641 if (!hists__has(hists
, need_collapse
))
1644 hists
->nr_entries
= 0;
1646 root
= hists__get_rotate_entries_in(hists
);
1648 next
= rb_first_cached(root
);
1653 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1654 next
= rb_next(&n
->rb_node_in
);
1656 rb_erase_cached(&n
->rb_node_in
, root
);
1657 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1663 * If it wasn't combined with one of the entries already
1664 * collapsed, we need to apply the filters that may have
1665 * been set by, say, the hist_browser.
1667 hists__apply_filters(hists
, n
);
1670 ui_progress__update(prog
, 1);
1675 static int64_t hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1677 struct hists
*hists
= a
->hists
;
1678 struct perf_hpp_fmt
*fmt
;
1681 hists__for_each_sort_list(hists
, fmt
) {
1682 if (perf_hpp__should_skip(fmt
, a
->hists
))
1685 cmp
= fmt
->sort(fmt
, a
, b
);
1693 static void hists__reset_filter_stats(struct hists
*hists
)
1695 hists
->nr_non_filtered_entries
= 0;
1696 hists
->stats
.total_non_filtered_period
= 0;
1699 void hists__reset_stats(struct hists
*hists
)
1701 hists
->nr_entries
= 0;
1702 hists
->stats
.total_period
= 0;
1704 hists__reset_filter_stats(hists
);
1707 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1709 hists
->nr_non_filtered_entries
++;
1710 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1713 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1716 hists__inc_filter_stats(hists
, h
);
1718 hists
->nr_entries
++;
1719 hists
->stats
.total_period
+= h
->stat
.period
;
1722 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1724 struct rb_node
*node
;
1725 struct hist_entry
*he
;
1727 node
= rb_first_cached(&hists
->entries
);
1729 hists
->stats
.total_period
= 0;
1730 hists
->stats
.total_non_filtered_period
= 0;
1733 * recalculate total period using top-level entries only
1734 * since lower level entries only see non-filtered entries
1735 * but upper level entries have sum of both entries.
1738 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1739 node
= rb_next(node
);
1741 hists
->stats
.total_period
+= he
->stat
.period
;
1743 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1747 static void hierarchy_insert_output_entry(struct rb_root_cached
*root
,
1748 struct hist_entry
*he
)
1750 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1751 struct rb_node
*parent
= NULL
;
1752 struct hist_entry
*iter
;
1753 struct perf_hpp_fmt
*fmt
;
1754 bool leftmost
= true;
1756 while (*p
!= NULL
) {
1758 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1760 if (hist_entry__sort(he
, iter
) > 0)
1761 p
= &parent
->rb_left
;
1763 p
= &parent
->rb_right
;
1768 rb_link_node(&he
->rb_node
, parent
, p
);
1769 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
1771 /* update column width of dynamic entry */
1772 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1773 if (perf_hpp__is_dynamic_entry(fmt
))
1774 fmt
->sort(fmt
, he
, NULL
);
1778 static void hists__hierarchy_output_resort(struct hists
*hists
,
1779 struct ui_progress
*prog
,
1780 struct rb_root_cached
*root_in
,
1781 struct rb_root_cached
*root_out
,
1782 u64 min_callchain_hits
,
1785 struct rb_node
*node
;
1786 struct hist_entry
*he
;
1788 *root_out
= RB_ROOT_CACHED
;
1789 node
= rb_first_cached(root_in
);
1792 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1793 node
= rb_next(node
);
1795 hierarchy_insert_output_entry(root_out
, he
);
1798 ui_progress__update(prog
, 1);
1800 hists
->nr_entries
++;
1801 if (!he
->filtered
) {
1802 hists
->nr_non_filtered_entries
++;
1803 hists__calc_col_len(hists
, he
);
1807 hists__hierarchy_output_resort(hists
, prog
,
1818 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1819 u64 total
= he
->stat
.period
;
1821 if (symbol_conf
.cumulate_callchain
)
1822 total
= he
->stat_acc
->period
;
1824 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1827 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1828 min_callchain_hits
, &callchain_param
);
1832 static void __hists__insert_output_entry(struct rb_root_cached
*entries
,
1833 struct hist_entry
*he
,
1834 u64 min_callchain_hits
,
1837 struct rb_node
**p
= &entries
->rb_root
.rb_node
;
1838 struct rb_node
*parent
= NULL
;
1839 struct hist_entry
*iter
;
1840 struct perf_hpp_fmt
*fmt
;
1841 bool leftmost
= true;
1843 if (use_callchain
) {
1844 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1845 u64 total
= he
->stat
.period
;
1847 if (symbol_conf
.cumulate_callchain
)
1848 total
= he
->stat_acc
->period
;
1850 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1852 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1853 min_callchain_hits
, &callchain_param
);
1856 while (*p
!= NULL
) {
1858 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1860 if (hist_entry__sort(he
, iter
) > 0)
1863 p
= &(*p
)->rb_right
;
1868 rb_link_node(&he
->rb_node
, parent
, p
);
1869 rb_insert_color_cached(&he
->rb_node
, entries
, leftmost
);
1871 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1872 if (perf_hpp__is_dynamic_entry(fmt
) &&
1873 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1874 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1878 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1879 bool use_callchain
, hists__resort_cb_t cb
,
1882 struct rb_root_cached
*root
;
1883 struct rb_node
*next
;
1884 struct hist_entry
*n
;
1885 u64 callchain_total
;
1886 u64 min_callchain_hits
;
1888 callchain_total
= hists
->callchain_period
;
1889 if (symbol_conf
.filter_relative
)
1890 callchain_total
= hists
->callchain_non_filtered_period
;
1892 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1894 hists__reset_stats(hists
);
1895 hists__reset_col_len(hists
);
1897 if (symbol_conf
.report_hierarchy
) {
1898 hists__hierarchy_output_resort(hists
, prog
,
1899 &hists
->entries_collapsed
,
1903 hierarchy_recalc_total_periods(hists
);
1907 if (hists__has(hists
, need_collapse
))
1908 root
= &hists
->entries_collapsed
;
1910 root
= hists
->entries_in
;
1912 next
= rb_first_cached(root
);
1913 hists
->entries
= RB_ROOT_CACHED
;
1916 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1917 next
= rb_next(&n
->rb_node_in
);
1919 if (cb
&& cb(n
, cb_arg
))
1922 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1923 hists__inc_stats(hists
, n
);
1926 hists__calc_col_len(hists
, n
);
1929 ui_progress__update(prog
, 1);
1933 void evsel__output_resort_cb(struct evsel
*evsel
, struct ui_progress
*prog
,
1934 hists__resort_cb_t cb
, void *cb_arg
)
1938 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1939 use_callchain
= evsel__has_callchain(evsel
);
1941 use_callchain
= symbol_conf
.use_callchain
;
1943 use_callchain
|= symbol_conf
.show_branchflag_count
;
1945 output_resort(evsel__hists(evsel
), prog
, use_callchain
, cb
, cb_arg
);
1948 void evsel__output_resort(struct evsel
*evsel
, struct ui_progress
*prog
)
1950 return evsel__output_resort_cb(evsel
, prog
, NULL
, NULL
);
1953 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1955 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
, NULL
);
1958 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1959 hists__resort_cb_t cb
)
1961 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
, NULL
);
1964 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1966 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1969 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1975 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1977 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1979 while (can_goto_child(he
, HMD_NORMAL
)) {
1980 node
= rb_last(&he
->hroot_out
.rb_root
);
1981 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1986 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1988 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1990 if (can_goto_child(he
, hmd
))
1991 node
= rb_first_cached(&he
->hroot_out
);
1993 node
= rb_next(node
);
1995 while (node
== NULL
) {
2000 node
= rb_next(&he
->rb_node
);
2005 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
2007 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
2009 node
= rb_prev(node
);
2011 return rb_hierarchy_last(node
);
2017 return &he
->rb_node
;
2020 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
2022 struct rb_node
*node
;
2023 struct hist_entry
*child
;
2029 node
= rb_first_cached(&he
->hroot_out
);
2030 child
= rb_entry(node
, struct hist_entry
, rb_node
);
2032 while (node
&& child
->filtered
) {
2033 node
= rb_next(node
);
2034 child
= rb_entry(node
, struct hist_entry
, rb_node
);
2038 percent
= hist_entry__get_percent_limit(child
);
2042 return node
&& percent
>= limit
;
2045 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
2046 enum hist_filter filter
)
2048 h
->filtered
&= ~(1 << filter
);
2050 if (symbol_conf
.report_hierarchy
) {
2051 struct hist_entry
*parent
= h
->parent_he
;
2054 he_stat__add_stat(&parent
->stat
, &h
->stat
);
2056 parent
->filtered
&= ~(1 << filter
);
2058 if (parent
->filtered
)
2061 /* force fold unfiltered entry for simplicity */
2062 parent
->unfolded
= false;
2063 parent
->has_no_entry
= false;
2064 parent
->row_offset
= 0;
2065 parent
->nr_rows
= 0;
2067 parent
= parent
->parent_he
;
2074 /* force fold unfiltered entry for simplicity */
2075 h
->unfolded
= false;
2076 h
->has_no_entry
= false;
2080 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
2082 hists__inc_filter_stats(hists
, h
);
2083 hists__calc_col_len(hists
, h
);
2087 static bool hists__filter_entry_by_dso(struct hists
*hists
,
2088 struct hist_entry
*he
)
2090 if (hists
->dso_filter
!= NULL
&&
2091 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
2092 he
->filtered
|= (1 << HIST_FILTER__DSO
);
2099 static bool hists__filter_entry_by_thread(struct hists
*hists
,
2100 struct hist_entry
*he
)
2102 if (hists
->thread_filter
!= NULL
&&
2103 he
->thread
!= hists
->thread_filter
) {
2104 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
2111 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
2112 struct hist_entry
*he
)
2114 if (hists
->symbol_filter_str
!= NULL
&&
2115 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
2116 hists
->symbol_filter_str
) == NULL
)) {
2117 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
2124 static bool hists__filter_entry_by_socket(struct hists
*hists
,
2125 struct hist_entry
*he
)
2127 if ((hists
->socket_filter
> -1) &&
2128 (he
->socket
!= hists
->socket_filter
)) {
2129 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
2136 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
2138 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
2142 hists
->stats
.nr_non_filtered_samples
= 0;
2144 hists__reset_filter_stats(hists
);
2145 hists__reset_col_len(hists
);
2147 for (nd
= rb_first_cached(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
2148 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2150 if (filter(hists
, h
))
2153 hists__remove_entry_filter(hists
, h
, type
);
2157 static void resort_filtered_entry(struct rb_root_cached
*root
,
2158 struct hist_entry
*he
)
2160 struct rb_node
**p
= &root
->rb_root
.rb_node
;
2161 struct rb_node
*parent
= NULL
;
2162 struct hist_entry
*iter
;
2163 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2165 bool leftmost
= true;
2167 while (*p
!= NULL
) {
2169 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
2171 if (hist_entry__sort(he
, iter
) > 0)
2174 p
= &(*p
)->rb_right
;
2179 rb_link_node(&he
->rb_node
, parent
, p
);
2180 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
2182 if (he
->leaf
|| he
->filtered
)
2185 nd
= rb_first_cached(&he
->hroot_out
);
2187 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2190 rb_erase_cached(&h
->rb_node
, &he
->hroot_out
);
2192 resort_filtered_entry(&new_root
, h
);
2195 he
->hroot_out
= new_root
;
2198 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
2201 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2203 hists
->stats
.nr_non_filtered_samples
= 0;
2205 hists__reset_filter_stats(hists
);
2206 hists__reset_col_len(hists
);
2208 nd
= rb_first_cached(&hists
->entries
);
2210 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2213 ret
= hist_entry__filter(h
, type
, arg
);
2216 * case 1. non-matching type
2217 * zero out the period, set filter marker and move to child
2220 memset(&h
->stat
, 0, sizeof(h
->stat
));
2221 h
->filtered
|= (1 << type
);
2223 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2226 * case 2. matched type (filter out)
2227 * set filter marker and move to next
2229 else if (ret
== 1) {
2230 h
->filtered
|= (1 << type
);
2232 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2235 * case 3. ok (not filtered)
2236 * add period to hists and parents, erase the filter marker
2237 * and move to next sibling
2240 hists__remove_entry_filter(hists
, h
, type
);
2242 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2246 hierarchy_recalc_total_periods(hists
);
2249 * resort output after applying a new filter since filter in a lower
2250 * hierarchy can change periods in a upper hierarchy.
2252 nd
= rb_first_cached(&hists
->entries
);
2254 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2257 rb_erase_cached(&h
->rb_node
, &hists
->entries
);
2259 resort_filtered_entry(&new_root
, h
);
2262 hists
->entries
= new_root
;
2265 void hists__filter_by_thread(struct hists
*hists
)
2267 if (symbol_conf
.report_hierarchy
)
2268 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2269 hists
->thread_filter
);
2271 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2272 hists__filter_entry_by_thread
);
2275 void hists__filter_by_dso(struct hists
*hists
)
2277 if (symbol_conf
.report_hierarchy
)
2278 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2281 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2282 hists__filter_entry_by_dso
);
2285 void hists__filter_by_symbol(struct hists
*hists
)
2287 if (symbol_conf
.report_hierarchy
)
2288 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2289 hists
->symbol_filter_str
);
2291 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2292 hists__filter_entry_by_symbol
);
2295 void hists__filter_by_socket(struct hists
*hists
)
2297 if (symbol_conf
.report_hierarchy
)
2298 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2299 &hists
->socket_filter
);
2301 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2302 hists__filter_entry_by_socket
);
2305 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2307 ++stats
->nr_events
[0];
2308 ++stats
->nr_events
[type
];
2311 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2313 events_stats__inc(&hists
->stats
, type
);
2316 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2318 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2320 hists
->stats
.nr_non_filtered_samples
++;
2323 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2324 struct hist_entry
*pair
)
2326 struct rb_root_cached
*root
;
2328 struct rb_node
*parent
= NULL
;
2329 struct hist_entry
*he
;
2331 bool leftmost
= true;
2333 if (hists__has(hists
, need_collapse
))
2334 root
= &hists
->entries_collapsed
;
2336 root
= hists
->entries_in
;
2338 p
= &root
->rb_root
.rb_node
;
2340 while (*p
!= NULL
) {
2342 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2344 cmp
= hist_entry__collapse(he
, pair
);
2352 p
= &(*p
)->rb_right
;
2357 he
= hist_entry__new(pair
, true);
2359 memset(&he
->stat
, 0, sizeof(he
->stat
));
2361 if (symbol_conf
.cumulate_callchain
)
2362 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2363 rb_link_node(&he
->rb_node_in
, parent
, p
);
2364 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2365 hists__inc_stats(hists
, he
);
2372 static struct hist_entry
*add_dummy_hierarchy_entry(struct hists
*hists
,
2373 struct rb_root_cached
*root
,
2374 struct hist_entry
*pair
)
2377 struct rb_node
*parent
= NULL
;
2378 struct hist_entry
*he
;
2379 struct perf_hpp_fmt
*fmt
;
2380 bool leftmost
= true;
2382 p
= &root
->rb_root
.rb_node
;
2383 while (*p
!= NULL
) {
2387 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2389 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2390 cmp
= fmt
->collapse(fmt
, he
, pair
);
2398 p
= &parent
->rb_left
;
2400 p
= &parent
->rb_right
;
2405 he
= hist_entry__new(pair
, true);
2407 rb_link_node(&he
->rb_node_in
, parent
, p
);
2408 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2412 memset(&he
->stat
, 0, sizeof(he
->stat
));
2413 hists__inc_stats(hists
, he
);
2419 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2420 struct hist_entry
*he
)
2424 if (hists__has(hists
, need_collapse
))
2425 n
= hists
->entries_collapsed
.rb_root
.rb_node
;
2427 n
= hists
->entries_in
->rb_root
.rb_node
;
2430 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2431 int64_t cmp
= hist_entry__collapse(iter
, he
);
2444 static struct hist_entry
*hists__find_hierarchy_entry(struct rb_root_cached
*root
,
2445 struct hist_entry
*he
)
2447 struct rb_node
*n
= root
->rb_root
.rb_node
;
2450 struct hist_entry
*iter
;
2451 struct perf_hpp_fmt
*fmt
;
2454 iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2455 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2456 cmp
= fmt
->collapse(fmt
, iter
, he
);
2472 static void hists__match_hierarchy(struct rb_root_cached
*leader_root
,
2473 struct rb_root_cached
*other_root
)
2476 struct hist_entry
*pos
, *pair
;
2478 for (nd
= rb_first_cached(leader_root
); nd
; nd
= rb_next(nd
)) {
2479 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2480 pair
= hists__find_hierarchy_entry(other_root
, pos
);
2483 hist_entry__add_pair(pair
, pos
);
2484 hists__match_hierarchy(&pos
->hroot_in
, &pair
->hroot_in
);
2490 * Look for pairs to link to the leader buckets (hist_entries):
2492 void hists__match(struct hists
*leader
, struct hists
*other
)
2494 struct rb_root_cached
*root
;
2496 struct hist_entry
*pos
, *pair
;
2498 if (symbol_conf
.report_hierarchy
) {
2499 /* hierarchy report always collapses entries */
2500 return hists__match_hierarchy(&leader
->entries_collapsed
,
2501 &other
->entries_collapsed
);
2504 if (hists__has(leader
, need_collapse
))
2505 root
= &leader
->entries_collapsed
;
2507 root
= leader
->entries_in
;
2509 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2510 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2511 pair
= hists__find_entry(other
, pos
);
2514 hist_entry__add_pair(pair
, pos
);
2518 static int hists__link_hierarchy(struct hists
*leader_hists
,
2519 struct hist_entry
*parent
,
2520 struct rb_root_cached
*leader_root
,
2521 struct rb_root_cached
*other_root
)
2524 struct hist_entry
*pos
, *leader
;
2526 for (nd
= rb_first_cached(other_root
); nd
; nd
= rb_next(nd
)) {
2527 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2529 if (hist_entry__has_pairs(pos
)) {
2532 list_for_each_entry(leader
, &pos
->pairs
.head
, pairs
.node
) {
2533 if (leader
->hists
== leader_hists
) {
2541 leader
= add_dummy_hierarchy_entry(leader_hists
,
2546 /* do not point parent in the pos */
2547 leader
->parent_he
= parent
;
2549 hist_entry__add_pair(pos
, leader
);
2553 if (hists__link_hierarchy(leader_hists
, leader
,
2555 &pos
->hroot_in
) < 0)
2563 * Look for entries in the other hists that are not present in the leader, if
2564 * we find them, just add a dummy entry on the leader hists, with period=0,
2565 * nr_events=0, to serve as the list header.
2567 int hists__link(struct hists
*leader
, struct hists
*other
)
2569 struct rb_root_cached
*root
;
2571 struct hist_entry
*pos
, *pair
;
2573 if (symbol_conf
.report_hierarchy
) {
2574 /* hierarchy report always collapses entries */
2575 return hists__link_hierarchy(leader
, NULL
,
2576 &leader
->entries_collapsed
,
2577 &other
->entries_collapsed
);
2580 if (hists__has(other
, need_collapse
))
2581 root
= &other
->entries_collapsed
;
2583 root
= other
->entries_in
;
2585 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2586 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2588 if (!hist_entry__has_pairs(pos
)) {
2589 pair
= hists__add_dummy_entry(leader
, pos
);
2592 hist_entry__add_pair(pos
, pair
);
2599 int hists__unlink(struct hists
*hists
)
2601 struct rb_root_cached
*root
;
2603 struct hist_entry
*pos
;
2605 if (hists__has(hists
, need_collapse
))
2606 root
= &hists
->entries_collapsed
;
2608 root
= hists
->entries_in
;
2610 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2611 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2612 list_del_init(&pos
->pairs
.node
);
2618 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2619 struct perf_sample
*sample
, bool nonany_branch_mode
,
2622 struct branch_info
*bi
;
2623 struct branch_entry
*entries
= perf_sample__branch_entries(sample
);
2625 /* If we have branch cycles always annotate them. */
2626 if (bs
&& bs
->nr
&& entries
[0].flags
.cycles
) {
2629 bi
= sample__resolve_bstack(sample
, al
);
2631 struct addr_map_symbol
*prev
= NULL
;
2634 * Ignore errors, still want to process the
2637 * For non standard branch modes always
2638 * force no IPC (prev == NULL)
2640 * Note that perf stores branches reversed from
2643 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2644 addr_map_symbol__account_cycles(&bi
[i
].from
,
2645 nonany_branch_mode
? NULL
: prev
,
2646 bi
[i
].flags
.cycles
);
2650 *total_cycles
+= bi
[i
].flags
.cycles
;
2657 size_t perf_evlist__fprintf_nr_events(struct evlist
*evlist
, FILE *fp
)
2662 evlist__for_each_entry(evlist
, pos
) {
2663 ret
+= fprintf(fp
, "%s stats:\n", evsel__name(pos
));
2664 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2671 u64
hists__total_period(struct hists
*hists
)
2673 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2674 hists
->stats
.total_period
;
2677 int __hists__scnprintf_title(struct hists
*hists
, char *bf
, size_t size
, bool show_freq
)
2681 const struct dso
*dso
= hists
->dso_filter
;
2682 struct thread
*thread
= hists
->thread_filter
;
2683 int socket_id
= hists
->socket_filter
;
2684 unsigned long nr_samples
= hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2685 u64 nr_events
= hists
->stats
.total_period
;
2686 struct evsel
*evsel
= hists_to_evsel(hists
);
2687 const char *ev_name
= evsel__name(evsel
);
2688 char buf
[512], sample_freq_str
[64] = "";
2689 size_t buflen
= sizeof(buf
);
2690 char ref
[30] = " show reference callgraph, ";
2691 bool enable_ref
= false;
2693 if (symbol_conf
.filter_relative
) {
2694 nr_samples
= hists
->stats
.nr_non_filtered_samples
;
2695 nr_events
= hists
->stats
.total_non_filtered_period
;
2698 if (evsel__is_group_event(evsel
)) {
2701 evsel__group_desc(evsel
, buf
, buflen
);
2704 for_each_group_member(pos
, evsel
) {
2705 struct hists
*pos_hists
= evsel__hists(pos
);
2707 if (symbol_conf
.filter_relative
) {
2708 nr_samples
+= pos_hists
->stats
.nr_non_filtered_samples
;
2709 nr_events
+= pos_hists
->stats
.total_non_filtered_period
;
2711 nr_samples
+= pos_hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2712 nr_events
+= pos_hists
->stats
.total_period
;
2717 if (symbol_conf
.show_ref_callgraph
&&
2718 strstr(ev_name
, "call-graph=no"))
2722 scnprintf(sample_freq_str
, sizeof(sample_freq_str
), " %d Hz,", evsel
->core
.attr
.sample_freq
);
2724 nr_samples
= convert_unit(nr_samples
, &unit
);
2725 printed
= scnprintf(bf
, size
,
2726 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64
,
2727 nr_samples
, unit
, evsel
->core
.nr_members
> 1 ? "s" : "",
2728 ev_name
, sample_freq_str
, enable_ref
? ref
: " ", nr_events
);
2731 if (hists
->uid_filter_str
)
2732 printed
+= snprintf(bf
+ printed
, size
- printed
,
2733 ", UID: %s", hists
->uid_filter_str
);
2735 if (hists__has(hists
, thread
)) {
2736 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2738 (thread
->comm_set
? thread__comm_str(thread
) : ""),
2741 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2743 (thread
->comm_set
? thread__comm_str(thread
) : ""));
2747 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2748 ", DSO: %s", dso
->short_name
);
2750 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2751 ", Processor Socket: %d", socket_id
);
2756 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2757 const char *arg
, int unset __maybe_unused
)
2759 if (!strcmp(arg
, "relative"))
2760 symbol_conf
.filter_relative
= true;
2761 else if (!strcmp(arg
, "absolute"))
2762 symbol_conf
.filter_relative
= false;
2764 pr_debug("Invalid percentage: %s\n", arg
);
2771 int perf_hist_config(const char *var
, const char *value
)
2773 if (!strcmp(var
, "hist.percentage"))
2774 return parse_filter_percentage(NULL
, value
, 0);
2779 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2781 memset(hists
, 0, sizeof(*hists
));
2782 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT_CACHED
;
2783 hists
->entries_in
= &hists
->entries_in_array
[0];
2784 hists
->entries_collapsed
= RB_ROOT_CACHED
;
2785 hists
->entries
= RB_ROOT_CACHED
;
2786 pthread_mutex_init(&hists
->lock
, NULL
);
2787 hists
->socket_filter
= -1;
2788 hists
->hpp_list
= hpp_list
;
2789 INIT_LIST_HEAD(&hists
->hpp_formats
);
2793 static void hists__delete_remaining_entries(struct rb_root_cached
*root
)
2795 struct rb_node
*node
;
2796 struct hist_entry
*he
;
2798 while (!RB_EMPTY_ROOT(&root
->rb_root
)) {
2799 node
= rb_first_cached(root
);
2800 rb_erase_cached(node
, root
);
2802 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2803 hist_entry__delete(he
);
2807 static void hists__delete_all_entries(struct hists
*hists
)
2809 hists__delete_entries(hists
);
2810 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2811 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2812 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2815 static void hists_evsel__exit(struct evsel
*evsel
)
2817 struct hists
*hists
= evsel__hists(evsel
);
2818 struct perf_hpp_fmt
*fmt
, *pos
;
2819 struct perf_hpp_list_node
*node
, *tmp
;
2821 hists__delete_all_entries(hists
);
2823 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2824 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2825 list_del_init(&fmt
->list
);
2828 list_del_init(&node
->list
);
2833 static int hists_evsel__init(struct evsel
*evsel
)
2835 struct hists
*hists
= evsel__hists(evsel
);
2837 __hists__init(hists
, &perf_hpp_list
);
2842 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2843 * stored in the rbtree...
2846 int hists__init(void)
2848 int err
= evsel__object_config(sizeof(struct hists_evsel
),
2849 hists_evsel__init
, hists_evsel__exit
);
2851 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2856 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2858 INIT_LIST_HEAD(&list
->fields
);
2859 INIT_LIST_HEAD(&list
->sorts
);