1 // SPDX-License-Identifier: GPL-2.0
8 #include "namespaces.h"
17 #include "ui/progress.h"
21 #include <sys/param.h>
22 #include <linux/time64.h>
24 static bool hists__filter_entry_by_dso(struct hists
*hists
,
25 struct hist_entry
*he
);
26 static bool hists__filter_entry_by_thread(struct hists
*hists
,
27 struct hist_entry
*he
);
28 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
29 struct hist_entry
*he
);
30 static bool hists__filter_entry_by_socket(struct hists
*hists
,
31 struct hist_entry
*he
);
33 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
35 return hists
->col_len
[col
];
38 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
40 hists
->col_len
[col
] = len
;
43 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
45 if (len
> hists__col_len(hists
, col
)) {
46 hists__set_col_len(hists
, col
, len
);
52 void hists__reset_col_len(struct hists
*hists
)
56 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
57 hists__set_col_len(hists
, col
, 0);
60 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
62 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
64 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
65 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
66 !symbol_conf
.dso_list
)
67 hists__set_col_len(hists
, dso
, unresolved_col_width
);
70 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
72 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
77 * +4 accounts for '[x] ' priv level info
78 * +2 accounts for 0x prefix on raw addresses
79 * +3 accounts for ' y ' symtab origin info
82 symlen
= h
->ms
.sym
->namelen
+ 4;
84 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
85 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
87 symlen
= unresolved_col_width
+ 4 + 2;
88 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
89 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
92 len
= thread__comm_len(h
->thread
);
93 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
94 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
97 len
= dso__name_len(h
->ms
.map
->dso
);
98 hists__new_col_len(hists
, HISTC_DSO
, len
);
102 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
104 if (h
->branch_info
) {
105 if (h
->branch_info
->from
.sym
) {
106 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
108 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
109 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
111 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
112 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
114 symlen
= unresolved_col_width
+ 4 + 2;
115 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
116 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
119 if (h
->branch_info
->to
.sym
) {
120 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
122 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
123 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
125 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
126 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
128 symlen
= unresolved_col_width
+ 4 + 2;
129 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
130 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
133 if (h
->branch_info
->srcline_from
)
134 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
135 strlen(h
->branch_info
->srcline_from
));
136 if (h
->branch_info
->srcline_to
)
137 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
138 strlen(h
->branch_info
->srcline_to
));
142 if (h
->mem_info
->daddr
.sym
) {
143 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
144 + unresolved_col_width
+ 2;
145 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
147 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
150 symlen
= unresolved_col_width
+ 4 + 2;
151 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
153 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
157 if (h
->mem_info
->iaddr
.sym
) {
158 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
159 + unresolved_col_width
+ 2;
160 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
163 symlen
= unresolved_col_width
+ 4 + 2;
164 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
168 if (h
->mem_info
->daddr
.map
) {
169 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
170 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
173 symlen
= unresolved_col_width
+ 4 + 2;
174 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
177 hists__new_col_len(hists
, HISTC_MEM_PHYS_DADDR
,
178 unresolved_col_width
+ 4 + 2);
181 symlen
= unresolved_col_width
+ 4 + 2;
182 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
183 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
184 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
187 hists__new_col_len(hists
, HISTC_CGROUP_ID
, 20);
188 hists__new_col_len(hists
, HISTC_CPU
, 3);
189 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
190 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
191 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
192 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
193 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
194 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
195 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
196 hists__new_col_len(hists
, HISTC_TIME
, 12);
199 len
= MAX(strlen(h
->srcline
), strlen(sort_srcline
.se_header
));
200 hists__new_col_len(hists
, HISTC_SRCLINE
, len
);
204 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
207 hists__new_col_len(hists
, HISTC_TRANSACTION
,
208 hist_entry__transaction_len());
211 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
214 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
216 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
217 struct hist_entry
*n
;
220 hists__reset_col_len(hists
);
222 while (next
&& row
++ < max_rows
) {
223 n
= rb_entry(next
, struct hist_entry
, rb_node
);
225 hists__calc_col_len(hists
, n
);
226 next
= rb_next(&n
->rb_node
);
230 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
231 unsigned int cpumode
, u64 period
)
234 case PERF_RECORD_MISC_KERNEL
:
235 he_stat
->period_sys
+= period
;
237 case PERF_RECORD_MISC_USER
:
238 he_stat
->period_us
+= period
;
240 case PERF_RECORD_MISC_GUEST_KERNEL
:
241 he_stat
->period_guest_sys
+= period
;
243 case PERF_RECORD_MISC_GUEST_USER
:
244 he_stat
->period_guest_us
+= period
;
251 static long hist_time(unsigned long htime
)
253 unsigned long time_quantum
= symbol_conf
.time_quantum
;
255 return (htime
/ time_quantum
) * time_quantum
;
259 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
263 he_stat
->period
+= period
;
264 he_stat
->weight
+= weight
;
265 he_stat
->nr_events
+= 1;
268 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
270 dest
->period
+= src
->period
;
271 dest
->period_sys
+= src
->period_sys
;
272 dest
->period_us
+= src
->period_us
;
273 dest
->period_guest_sys
+= src
->period_guest_sys
;
274 dest
->period_guest_us
+= src
->period_guest_us
;
275 dest
->nr_events
+= src
->nr_events
;
276 dest
->weight
+= src
->weight
;
279 static void he_stat__decay(struct he_stat
*he_stat
)
281 he_stat
->period
= (he_stat
->period
* 7) / 8;
282 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
283 /* XXX need decay for weight too? */
286 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
288 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
290 u64 prev_period
= he
->stat
.period
;
293 if (prev_period
== 0)
296 he_stat__decay(&he
->stat
);
297 if (symbol_conf
.cumulate_callchain
)
298 he_stat__decay(he
->stat_acc
);
299 decay_callchain(he
->callchain
);
301 diff
= prev_period
- he
->stat
.period
;
304 hists
->stats
.total_period
-= diff
;
306 hists
->stats
.total_non_filtered_period
-= diff
;
310 struct hist_entry
*child
;
311 struct rb_node
*node
= rb_first_cached(&he
->hroot_out
);
313 child
= rb_entry(node
, struct hist_entry
, rb_node
);
314 node
= rb_next(node
);
316 if (hists__decay_entry(hists
, child
))
317 hists__delete_entry(hists
, child
);
321 return he
->stat
.period
== 0;
324 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
326 struct rb_root_cached
*root_in
;
327 struct rb_root_cached
*root_out
;
330 root_in
= &he
->parent_he
->hroot_in
;
331 root_out
= &he
->parent_he
->hroot_out
;
333 if (hists__has(hists
, need_collapse
))
334 root_in
= &hists
->entries_collapsed
;
336 root_in
= hists
->entries_in
;
337 root_out
= &hists
->entries
;
340 rb_erase_cached(&he
->rb_node_in
, root_in
);
341 rb_erase_cached(&he
->rb_node
, root_out
);
345 --hists
->nr_non_filtered_entries
;
347 hist_entry__delete(he
);
350 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
352 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
353 struct hist_entry
*n
;
356 n
= rb_entry(next
, struct hist_entry
, rb_node
);
357 next
= rb_next(&n
->rb_node
);
358 if (((zap_user
&& n
->level
== '.') ||
359 (zap_kernel
&& n
->level
!= '.') ||
360 hists__decay_entry(hists
, n
))) {
361 hists__delete_entry(hists
, n
);
366 void hists__delete_entries(struct hists
*hists
)
368 struct rb_node
*next
= rb_first_cached(&hists
->entries
);
369 struct hist_entry
*n
;
372 n
= rb_entry(next
, struct hist_entry
, rb_node
);
373 next
= rb_next(&n
->rb_node
);
375 hists__delete_entry(hists
, n
);
380 * histogram, sorted on item, collects periods
383 static int hist_entry__init(struct hist_entry
*he
,
384 struct hist_entry
*template,
386 size_t callchain_size
)
389 he
->callchain_size
= callchain_size
;
391 if (symbol_conf
.cumulate_callchain
) {
392 he
->stat_acc
= malloc(sizeof(he
->stat
));
393 if (he
->stat_acc
== NULL
)
395 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
397 memset(&he
->stat
, 0, sizeof(he
->stat
));
400 map__get(he
->ms
.map
);
402 if (he
->branch_info
) {
404 * This branch info is (a part of) allocated from
405 * sample__resolve_bstack() and will be freed after
406 * adding new entries. So we need to save a copy.
408 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
409 if (he
->branch_info
== NULL
)
412 memcpy(he
->branch_info
, template->branch_info
,
413 sizeof(*he
->branch_info
));
415 map__get(he
->branch_info
->from
.map
);
416 map__get(he
->branch_info
->to
.map
);
420 map__get(he
->mem_info
->iaddr
.map
);
421 map__get(he
->mem_info
->daddr
.map
);
424 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
425 callchain_init(he
->callchain
);
428 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
429 if (he
->raw_data
== NULL
)
434 he
->srcline
= strdup(he
->srcline
);
435 if (he
->srcline
== NULL
)
439 if (symbol_conf
.res_sample
) {
440 he
->res_samples
= calloc(sizeof(struct res_sample
),
441 symbol_conf
.res_sample
);
442 if (!he
->res_samples
)
446 INIT_LIST_HEAD(&he
->pairs
.node
);
447 thread__get(he
->thread
);
448 he
->hroot_in
= RB_ROOT_CACHED
;
449 he
->hroot_out
= RB_ROOT_CACHED
;
451 if (!symbol_conf
.report_hierarchy
)
463 if (he
->branch_info
) {
464 map__put(he
->branch_info
->from
.map
);
465 map__put(he
->branch_info
->to
.map
);
466 free(he
->branch_info
);
469 map__put(he
->mem_info
->iaddr
.map
);
470 map__put(he
->mem_info
->daddr
.map
);
473 map__zput(he
->ms
.map
);
478 static void *hist_entry__zalloc(size_t size
)
480 return zalloc(size
+ sizeof(struct hist_entry
));
483 static void hist_entry__free(void *ptr
)
488 static struct hist_entry_ops default_ops
= {
489 .new = hist_entry__zalloc
,
490 .free
= hist_entry__free
,
493 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
496 struct hist_entry_ops
*ops
= template->ops
;
497 size_t callchain_size
= 0;
498 struct hist_entry
*he
;
502 ops
= template->ops
= &default_ops
;
504 if (symbol_conf
.use_callchain
)
505 callchain_size
= sizeof(struct callchain_root
);
507 he
= ops
->new(callchain_size
);
509 err
= hist_entry__init(he
, template, sample_self
, callchain_size
);
519 static u8
symbol__parent_filter(const struct symbol
*parent
)
521 if (symbol_conf
.exclude_other
&& parent
== NULL
)
522 return 1 << HIST_FILTER__PARENT
;
526 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
528 if (!hist_entry__has_callchains(he
) || !symbol_conf
.use_callchain
)
531 he
->hists
->callchain_period
+= period
;
533 he
->hists
->callchain_non_filtered_period
+= period
;
536 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
537 struct hist_entry
*entry
,
538 struct addr_location
*al
,
542 struct rb_node
*parent
= NULL
;
543 struct hist_entry
*he
;
545 u64 period
= entry
->stat
.period
;
546 u64 weight
= entry
->stat
.weight
;
547 bool leftmost
= true;
549 p
= &hists
->entries_in
->rb_root
.rb_node
;
553 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
556 * Make sure that it receives arguments in a same order as
557 * hist_entry__collapse() so that we can use an appropriate
558 * function when searching an entry regardless which sort
561 cmp
= hist_entry__cmp(he
, entry
);
565 he_stat__add_period(&he
->stat
, period
, weight
);
566 hist_entry__add_callchain_period(he
, period
);
568 if (symbol_conf
.cumulate_callchain
)
569 he_stat__add_period(he
->stat_acc
, period
, weight
);
572 * This mem info was allocated from sample__resolve_mem
573 * and will not be used anymore.
575 mem_info__zput(entry
->mem_info
);
577 /* If the map of an existing hist_entry has
578 * become out-of-date due to an exec() or
579 * similar, update it. Otherwise we will
580 * mis-adjust symbol addresses when computing
581 * the history counter to increment.
583 if (he
->ms
.map
!= entry
->ms
.map
) {
584 map__put(he
->ms
.map
);
585 he
->ms
.map
= map__get(entry
->ms
.map
);
598 he
= hist_entry__new(entry
, sample_self
);
603 hist_entry__add_callchain_period(he
, period
);
606 rb_link_node(&he
->rb_node_in
, parent
, p
);
607 rb_insert_color_cached(&he
->rb_node_in
, hists
->entries_in
, leftmost
);
610 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
611 if (symbol_conf
.cumulate_callchain
)
612 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
616 static unsigned random_max(unsigned high
)
618 unsigned thresh
= -high
% high
;
620 unsigned r
= random();
626 static void hists__res_sample(struct hist_entry
*he
, struct perf_sample
*sample
)
628 struct res_sample
*r
;
631 if (he
->num_res
< symbol_conf
.res_sample
) {
634 j
= random_max(symbol_conf
.res_sample
);
636 r
= &he
->res_samples
[j
];
637 r
->time
= sample
->time
;
638 r
->cpu
= sample
->cpu
;
639 r
->tid
= sample
->tid
;
642 static struct hist_entry
*
643 __hists__add_entry(struct hists
*hists
,
644 struct addr_location
*al
,
645 struct symbol
*sym_parent
,
646 struct branch_info
*bi
,
648 struct perf_sample
*sample
,
650 struct hist_entry_ops
*ops
)
652 struct namespaces
*ns
= thread__namespaces(al
->thread
);
653 struct hist_entry entry
= {
654 .thread
= al
->thread
,
655 .comm
= thread__comm(al
->thread
),
657 .dev
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].dev
: 0,
658 .ino
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].ino
: 0,
664 .srcline
= (char *) al
->srcline
,
665 .socket
= al
->socket
,
667 .cpumode
= al
->cpumode
,
672 .period
= sample
->period
,
673 .weight
= sample
->weight
,
675 .parent
= sym_parent
,
676 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
680 .transaction
= sample
->transaction
,
681 .raw_data
= sample
->raw_data
,
682 .raw_size
= sample
->raw_size
,
684 .time
= hist_time(sample
->time
),
685 }, *he
= hists__findnew_entry(hists
, &entry
, al
, sample_self
);
687 if (!hists
->has_callchains
&& he
&& he
->callchain_size
!= 0)
688 hists
->has_callchains
= true;
689 if (he
&& symbol_conf
.res_sample
)
690 hists__res_sample(he
, sample
);
694 struct hist_entry
*hists__add_entry(struct hists
*hists
,
695 struct addr_location
*al
,
696 struct symbol
*sym_parent
,
697 struct branch_info
*bi
,
699 struct perf_sample
*sample
,
702 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
703 sample
, sample_self
, NULL
);
706 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
707 struct hist_entry_ops
*ops
,
708 struct addr_location
*al
,
709 struct symbol
*sym_parent
,
710 struct branch_info
*bi
,
712 struct perf_sample
*sample
,
715 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
716 sample
, sample_self
, ops
);
720 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
721 struct addr_location
*al __maybe_unused
)
727 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
728 struct addr_location
*al __maybe_unused
)
734 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
736 struct perf_sample
*sample
= iter
->sample
;
739 mi
= sample__resolve_mem(sample
, al
);
748 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
751 struct mem_info
*mi
= iter
->priv
;
752 struct hists
*hists
= evsel__hists(iter
->evsel
);
753 struct perf_sample
*sample
= iter
->sample
;
754 struct hist_entry
*he
;
759 cost
= sample
->weight
;
764 * must pass period=weight in order to get the correct
765 * sorting from hists__collapse_resort() which is solely
766 * based on periods. We want sorting be done on nr_events * weight
767 * and this is indirectly achieved by passing period=weight here
768 * and the he_stat__add_period() function.
770 sample
->period
= cost
;
772 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
782 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
783 struct addr_location
*al __maybe_unused
)
785 struct perf_evsel
*evsel
= iter
->evsel
;
786 struct hists
*hists
= evsel__hists(evsel
);
787 struct hist_entry
*he
= iter
->he
;
793 hists__inc_nr_samples(hists
, he
->filtered
);
795 err
= hist_entry__append_callchain(he
, iter
->sample
);
799 * We don't need to free iter->priv (mem_info) here since the mem info
800 * was either already freed in hists__findnew_entry() or passed to a
801 * new hist entry by hist_entry__new().
810 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
812 struct branch_info
*bi
;
813 struct perf_sample
*sample
= iter
->sample
;
815 bi
= sample__resolve_bstack(sample
, al
);
820 iter
->total
= sample
->branch_stack
->nr
;
827 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
828 struct addr_location
*al __maybe_unused
)
834 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
836 struct branch_info
*bi
= iter
->priv
;
842 if (iter
->curr
>= iter
->total
)
845 al
->map
= bi
[i
].to
.map
;
846 al
->sym
= bi
[i
].to
.sym
;
847 al
->addr
= bi
[i
].to
.addr
;
852 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
854 struct branch_info
*bi
;
855 struct perf_evsel
*evsel
= iter
->evsel
;
856 struct hists
*hists
= evsel__hists(evsel
);
857 struct perf_sample
*sample
= iter
->sample
;
858 struct hist_entry
*he
= NULL
;
864 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
868 * The report shows the percentage of total branches captured
869 * and not events sampled. Thus we use a pseudo period of 1.
872 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
874 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
879 hists__inc_nr_samples(hists
, he
->filtered
);
888 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
889 struct addr_location
*al __maybe_unused
)
894 return iter
->curr
>= iter
->total
? 0 : -1;
898 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
899 struct addr_location
*al __maybe_unused
)
905 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
907 struct perf_evsel
*evsel
= iter
->evsel
;
908 struct perf_sample
*sample
= iter
->sample
;
909 struct hist_entry
*he
;
911 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
921 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
922 struct addr_location
*al __maybe_unused
)
924 struct hist_entry
*he
= iter
->he
;
925 struct perf_evsel
*evsel
= iter
->evsel
;
926 struct perf_sample
*sample
= iter
->sample
;
933 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
935 return hist_entry__append_callchain(he
, sample
);
939 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
940 struct addr_location
*al __maybe_unused
)
942 struct hist_entry
**he_cache
;
944 callchain_cursor_commit(&callchain_cursor
);
947 * This is for detecting cycles or recursions so that they're
948 * cumulated only one time to prevent entries more than 100%
951 he_cache
= malloc(sizeof(*he_cache
) * (callchain_cursor
.nr
+ 1));
952 if (he_cache
== NULL
)
955 iter
->priv
= he_cache
;
962 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
963 struct addr_location
*al
)
965 struct perf_evsel
*evsel
= iter
->evsel
;
966 struct hists
*hists
= evsel__hists(evsel
);
967 struct perf_sample
*sample
= iter
->sample
;
968 struct hist_entry
**he_cache
= iter
->priv
;
969 struct hist_entry
*he
;
972 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
978 he_cache
[iter
->curr
++] = he
;
980 hist_entry__append_callchain(he
, sample
);
983 * We need to re-initialize the cursor since callchain_append()
984 * advanced the cursor to the end.
986 callchain_cursor_commit(&callchain_cursor
);
988 hists__inc_nr_samples(hists
, he
->filtered
);
994 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
995 struct addr_location
*al
)
997 struct callchain_cursor_node
*node
;
999 node
= callchain_cursor_current(&callchain_cursor
);
1003 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
1007 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
1008 struct addr_location
*al
)
1010 struct perf_evsel
*evsel
= iter
->evsel
;
1011 struct perf_sample
*sample
= iter
->sample
;
1012 struct hist_entry
**he_cache
= iter
->priv
;
1013 struct hist_entry
*he
;
1014 struct hist_entry he_tmp
= {
1015 .hists
= evsel__hists(evsel
),
1017 .thread
= al
->thread
,
1018 .comm
= thread__comm(al
->thread
),
1024 .srcline
= (char *) al
->srcline
,
1025 .parent
= iter
->parent
,
1026 .raw_data
= sample
->raw_data
,
1027 .raw_size
= sample
->raw_size
,
1030 struct callchain_cursor cursor
;
1032 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
1034 callchain_cursor_advance(&callchain_cursor
);
1037 * Check if there's duplicate entries in the callchain.
1038 * It's possible that it has cycles or recursive calls.
1040 for (i
= 0; i
< iter
->curr
; i
++) {
1041 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
1042 /* to avoid calling callback function */
1048 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
1054 he_cache
[iter
->curr
++] = he
;
1056 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
1057 callchain_append(he
->callchain
, &cursor
, sample
->period
);
1062 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
1063 struct addr_location
*al __maybe_unused
)
1071 const struct hist_iter_ops hist_iter_mem
= {
1072 .prepare_entry
= iter_prepare_mem_entry
,
1073 .add_single_entry
= iter_add_single_mem_entry
,
1074 .next_entry
= iter_next_nop_entry
,
1075 .add_next_entry
= iter_add_next_nop_entry
,
1076 .finish_entry
= iter_finish_mem_entry
,
1079 const struct hist_iter_ops hist_iter_branch
= {
1080 .prepare_entry
= iter_prepare_branch_entry
,
1081 .add_single_entry
= iter_add_single_branch_entry
,
1082 .next_entry
= iter_next_branch_entry
,
1083 .add_next_entry
= iter_add_next_branch_entry
,
1084 .finish_entry
= iter_finish_branch_entry
,
1087 const struct hist_iter_ops hist_iter_normal
= {
1088 .prepare_entry
= iter_prepare_normal_entry
,
1089 .add_single_entry
= iter_add_single_normal_entry
,
1090 .next_entry
= iter_next_nop_entry
,
1091 .add_next_entry
= iter_add_next_nop_entry
,
1092 .finish_entry
= iter_finish_normal_entry
,
1095 const struct hist_iter_ops hist_iter_cumulative
= {
1096 .prepare_entry
= iter_prepare_cumulative_entry
,
1097 .add_single_entry
= iter_add_single_cumulative_entry
,
1098 .next_entry
= iter_next_cumulative_entry
,
1099 .add_next_entry
= iter_add_next_cumulative_entry
,
1100 .finish_entry
= iter_finish_cumulative_entry
,
1103 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1104 int max_stack_depth
, void *arg
)
1107 struct map
*alm
= NULL
;
1110 alm
= map__get(al
->map
);
1112 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1113 iter
->evsel
, al
, max_stack_depth
);
1119 err
= iter
->ops
->prepare_entry(iter
, al
);
1123 err
= iter
->ops
->add_single_entry(iter
, al
);
1127 if (iter
->he
&& iter
->add_entry_cb
) {
1128 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1133 while (iter
->ops
->next_entry(iter
, al
)) {
1134 err
= iter
->ops
->add_next_entry(iter
, al
);
1138 if (iter
->he
&& iter
->add_entry_cb
) {
1139 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1146 err2
= iter
->ops
->finish_entry(iter
, al
);
1156 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1158 struct hists
*hists
= left
->hists
;
1159 struct perf_hpp_fmt
*fmt
;
1162 hists__for_each_sort_list(hists
, fmt
) {
1163 if (perf_hpp__is_dynamic_entry(fmt
) &&
1164 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1167 cmp
= fmt
->cmp(fmt
, left
, right
);
1176 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1178 struct hists
*hists
= left
->hists
;
1179 struct perf_hpp_fmt
*fmt
;
1182 hists__for_each_sort_list(hists
, fmt
) {
1183 if (perf_hpp__is_dynamic_entry(fmt
) &&
1184 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1187 cmp
= fmt
->collapse(fmt
, left
, right
);
1195 void hist_entry__delete(struct hist_entry
*he
)
1197 struct hist_entry_ops
*ops
= he
->ops
;
1199 thread__zput(he
->thread
);
1200 map__zput(he
->ms
.map
);
1202 if (he
->branch_info
) {
1203 map__zput(he
->branch_info
->from
.map
);
1204 map__zput(he
->branch_info
->to
.map
);
1205 free_srcline(he
->branch_info
->srcline_from
);
1206 free_srcline(he
->branch_info
->srcline_to
);
1207 zfree(&he
->branch_info
);
1211 map__zput(he
->mem_info
->iaddr
.map
);
1212 map__zput(he
->mem_info
->daddr
.map
);
1213 mem_info__zput(he
->mem_info
);
1216 zfree(&he
->res_samples
);
1217 zfree(&he
->stat_acc
);
1218 free_srcline(he
->srcline
);
1219 if (he
->srcfile
&& he
->srcfile
[0])
1221 free_callchain(he
->callchain
);
1222 free(he
->trace_output
);
1228 * If this is not the last column, then we need to pad it according to the
1229 * pre-calculated max length for this column, otherwise don't bother adding
1230 * spaces because that would break viewing this with, for instance, 'less',
1231 * that would show tons of trailing spaces when a long C++ demangled method
1234 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1235 struct perf_hpp_fmt
*fmt
, int printed
)
1237 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1238 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1239 if (printed
< width
) {
1240 advance_hpp(hpp
, printed
);
1241 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1249 * collapse the histogram
1252 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1253 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1254 enum hist_filter type
);
1256 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1258 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1260 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1263 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1264 enum hist_filter type
,
1267 struct perf_hpp_fmt
*fmt
;
1268 bool type_match
= false;
1269 struct hist_entry
*parent
= he
->parent_he
;
1272 case HIST_FILTER__THREAD
:
1273 if (symbol_conf
.comm_list
== NULL
&&
1274 symbol_conf
.pid_list
== NULL
&&
1275 symbol_conf
.tid_list
== NULL
)
1278 case HIST_FILTER__DSO
:
1279 if (symbol_conf
.dso_list
== NULL
)
1282 case HIST_FILTER__SYMBOL
:
1283 if (symbol_conf
.sym_list
== NULL
)
1286 case HIST_FILTER__PARENT
:
1287 case HIST_FILTER__GUEST
:
1288 case HIST_FILTER__HOST
:
1289 case HIST_FILTER__SOCKET
:
1290 case HIST_FILTER__C2C
:
1295 /* if it's filtered by own fmt, it has to have filter bits */
1296 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1305 * If the filter is for current level entry, propagate
1306 * filter marker to parents. The marker bit was
1307 * already set by default so it only needs to clear
1308 * non-filtered entries.
1310 if (!(he
->filtered
& (1 << type
))) {
1312 parent
->filtered
&= ~(1 << type
);
1313 parent
= parent
->parent_he
;
1318 * If current entry doesn't have matching formats, set
1319 * filter marker for upper level entries. it will be
1320 * cleared if its lower level entries is not filtered.
1322 * For lower-level entries, it inherits parent's
1323 * filter bit so that lower level entries of a
1324 * non-filtered entry won't set the filter marker.
1327 he
->filtered
|= (1 << type
);
1329 he
->filtered
|= (parent
->filtered
& (1 << type
));
1333 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1335 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1336 check_thread_entry
);
1338 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1339 perf_hpp__is_dso_entry
);
1341 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1342 perf_hpp__is_sym_entry
);
1344 hists__apply_filters(he
->hists
, he
);
1347 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1348 struct rb_root_cached
*root
,
1349 struct hist_entry
*he
,
1350 struct hist_entry
*parent_he
,
1351 struct perf_hpp_list
*hpp_list
)
1353 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1354 struct rb_node
*parent
= NULL
;
1355 struct hist_entry
*iter
, *new;
1356 struct perf_hpp_fmt
*fmt
;
1358 bool leftmost
= true;
1360 while (*p
!= NULL
) {
1362 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1365 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1366 cmp
= fmt
->collapse(fmt
, iter
, he
);
1372 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1377 p
= &parent
->rb_left
;
1379 p
= &parent
->rb_right
;
1384 new = hist_entry__new(he
, true);
1388 hists
->nr_entries
++;
1390 /* save related format list for output */
1391 new->hpp_list
= hpp_list
;
1392 new->parent_he
= parent_he
;
1394 hist_entry__apply_hierarchy_filters(new);
1396 /* some fields are now passed to 'new' */
1397 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1398 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1399 he
->trace_output
= NULL
;
1401 new->trace_output
= NULL
;
1403 if (perf_hpp__is_srcline_entry(fmt
))
1406 new->srcline
= NULL
;
1408 if (perf_hpp__is_srcfile_entry(fmt
))
1411 new->srcfile
= NULL
;
1414 rb_link_node(&new->rb_node_in
, parent
, p
);
1415 rb_insert_color_cached(&new->rb_node_in
, root
, leftmost
);
1419 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1420 struct rb_root_cached
*root
,
1421 struct hist_entry
*he
)
1423 struct perf_hpp_list_node
*node
;
1424 struct hist_entry
*new_he
= NULL
;
1425 struct hist_entry
*parent
= NULL
;
1429 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1430 /* skip period (overhead) and elided columns */
1431 if (node
->level
== 0 || node
->skip
)
1434 /* insert copy of 'he' for each fmt into the hierarchy */
1435 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1436 if (new_he
== NULL
) {
1441 root
= &new_he
->hroot_in
;
1442 new_he
->depth
= depth
++;
1447 new_he
->leaf
= true;
1449 if (hist_entry__has_callchains(new_he
) &&
1450 symbol_conf
.use_callchain
) {
1451 callchain_cursor_reset(&callchain_cursor
);
1452 if (callchain_merge(&callchain_cursor
,
1459 /* 'he' is no longer used */
1460 hist_entry__delete(he
);
1462 /* return 0 (or -1) since it already applied filters */
1466 static int hists__collapse_insert_entry(struct hists
*hists
,
1467 struct rb_root_cached
*root
,
1468 struct hist_entry
*he
)
1470 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1471 struct rb_node
*parent
= NULL
;
1472 struct hist_entry
*iter
;
1474 bool leftmost
= true;
1476 if (symbol_conf
.report_hierarchy
)
1477 return hists__hierarchy_insert_entry(hists
, root
, he
);
1479 while (*p
!= NULL
) {
1481 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1483 cmp
= hist_entry__collapse(iter
, he
);
1488 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1489 if (symbol_conf
.cumulate_callchain
)
1490 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1492 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
) {
1493 callchain_cursor_reset(&callchain_cursor
);
1494 if (callchain_merge(&callchain_cursor
,
1499 hist_entry__delete(he
);
1506 p
= &(*p
)->rb_right
;
1510 hists
->nr_entries
++;
1512 rb_link_node(&he
->rb_node_in
, parent
, p
);
1513 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
1517 struct rb_root_cached
*hists__get_rotate_entries_in(struct hists
*hists
)
1519 struct rb_root_cached
*root
;
1521 pthread_mutex_lock(&hists
->lock
);
1523 root
= hists
->entries_in
;
1524 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1525 hists
->entries_in
= &hists
->entries_in_array
[0];
1527 pthread_mutex_unlock(&hists
->lock
);
1532 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1534 hists__filter_entry_by_dso(hists
, he
);
1535 hists__filter_entry_by_thread(hists
, he
);
1536 hists__filter_entry_by_symbol(hists
, he
);
1537 hists__filter_entry_by_socket(hists
, he
);
1540 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1542 struct rb_root_cached
*root
;
1543 struct rb_node
*next
;
1544 struct hist_entry
*n
;
1547 if (!hists__has(hists
, need_collapse
))
1550 hists
->nr_entries
= 0;
1552 root
= hists__get_rotate_entries_in(hists
);
1554 next
= rb_first_cached(root
);
1559 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1560 next
= rb_next(&n
->rb_node_in
);
1562 rb_erase_cached(&n
->rb_node_in
, root
);
1563 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1569 * If it wasn't combined with one of the entries already
1570 * collapsed, we need to apply the filters that may have
1571 * been set by, say, the hist_browser.
1573 hists__apply_filters(hists
, n
);
1576 ui_progress__update(prog
, 1);
1581 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1583 struct hists
*hists
= a
->hists
;
1584 struct perf_hpp_fmt
*fmt
;
1587 hists__for_each_sort_list(hists
, fmt
) {
1588 if (perf_hpp__should_skip(fmt
, a
->hists
))
1591 cmp
= fmt
->sort(fmt
, a
, b
);
1599 static void hists__reset_filter_stats(struct hists
*hists
)
1601 hists
->nr_non_filtered_entries
= 0;
1602 hists
->stats
.total_non_filtered_period
= 0;
1605 void hists__reset_stats(struct hists
*hists
)
1607 hists
->nr_entries
= 0;
1608 hists
->stats
.total_period
= 0;
1610 hists__reset_filter_stats(hists
);
1613 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1615 hists
->nr_non_filtered_entries
++;
1616 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1619 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1622 hists__inc_filter_stats(hists
, h
);
1624 hists
->nr_entries
++;
1625 hists
->stats
.total_period
+= h
->stat
.period
;
1628 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1630 struct rb_node
*node
;
1631 struct hist_entry
*he
;
1633 node
= rb_first_cached(&hists
->entries
);
1635 hists
->stats
.total_period
= 0;
1636 hists
->stats
.total_non_filtered_period
= 0;
1639 * recalculate total period using top-level entries only
1640 * since lower level entries only see non-filtered entries
1641 * but upper level entries have sum of both entries.
1644 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1645 node
= rb_next(node
);
1647 hists
->stats
.total_period
+= he
->stat
.period
;
1649 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1653 static void hierarchy_insert_output_entry(struct rb_root_cached
*root
,
1654 struct hist_entry
*he
)
1656 struct rb_node
**p
= &root
->rb_root
.rb_node
;
1657 struct rb_node
*parent
= NULL
;
1658 struct hist_entry
*iter
;
1659 struct perf_hpp_fmt
*fmt
;
1660 bool leftmost
= true;
1662 while (*p
!= NULL
) {
1664 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1666 if (hist_entry__sort(he
, iter
) > 0)
1667 p
= &parent
->rb_left
;
1669 p
= &parent
->rb_right
;
1674 rb_link_node(&he
->rb_node
, parent
, p
);
1675 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
1677 /* update column width of dynamic entry */
1678 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1679 if (perf_hpp__is_dynamic_entry(fmt
))
1680 fmt
->sort(fmt
, he
, NULL
);
1684 static void hists__hierarchy_output_resort(struct hists
*hists
,
1685 struct ui_progress
*prog
,
1686 struct rb_root_cached
*root_in
,
1687 struct rb_root_cached
*root_out
,
1688 u64 min_callchain_hits
,
1691 struct rb_node
*node
;
1692 struct hist_entry
*he
;
1694 *root_out
= RB_ROOT_CACHED
;
1695 node
= rb_first_cached(root_in
);
1698 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1699 node
= rb_next(node
);
1701 hierarchy_insert_output_entry(root_out
, he
);
1704 ui_progress__update(prog
, 1);
1706 hists
->nr_entries
++;
1707 if (!he
->filtered
) {
1708 hists
->nr_non_filtered_entries
++;
1709 hists__calc_col_len(hists
, he
);
1713 hists__hierarchy_output_resort(hists
, prog
,
1724 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1725 u64 total
= he
->stat
.period
;
1727 if (symbol_conf
.cumulate_callchain
)
1728 total
= he
->stat_acc
->period
;
1730 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1733 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1734 min_callchain_hits
, &callchain_param
);
1738 static void __hists__insert_output_entry(struct rb_root_cached
*entries
,
1739 struct hist_entry
*he
,
1740 u64 min_callchain_hits
,
1743 struct rb_node
**p
= &entries
->rb_root
.rb_node
;
1744 struct rb_node
*parent
= NULL
;
1745 struct hist_entry
*iter
;
1746 struct perf_hpp_fmt
*fmt
;
1747 bool leftmost
= true;
1749 if (use_callchain
) {
1750 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1751 u64 total
= he
->stat
.period
;
1753 if (symbol_conf
.cumulate_callchain
)
1754 total
= he
->stat_acc
->period
;
1756 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1758 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1759 min_callchain_hits
, &callchain_param
);
1762 while (*p
!= NULL
) {
1764 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1766 if (hist_entry__sort(he
, iter
) > 0)
1769 p
= &(*p
)->rb_right
;
1774 rb_link_node(&he
->rb_node
, parent
, p
);
1775 rb_insert_color_cached(&he
->rb_node
, entries
, leftmost
);
1777 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1778 if (perf_hpp__is_dynamic_entry(fmt
) &&
1779 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1780 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1784 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1785 bool use_callchain
, hists__resort_cb_t cb
,
1788 struct rb_root_cached
*root
;
1789 struct rb_node
*next
;
1790 struct hist_entry
*n
;
1791 u64 callchain_total
;
1792 u64 min_callchain_hits
;
1794 callchain_total
= hists
->callchain_period
;
1795 if (symbol_conf
.filter_relative
)
1796 callchain_total
= hists
->callchain_non_filtered_period
;
1798 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1800 hists__reset_stats(hists
);
1801 hists__reset_col_len(hists
);
1803 if (symbol_conf
.report_hierarchy
) {
1804 hists__hierarchy_output_resort(hists
, prog
,
1805 &hists
->entries_collapsed
,
1809 hierarchy_recalc_total_periods(hists
);
1813 if (hists__has(hists
, need_collapse
))
1814 root
= &hists
->entries_collapsed
;
1816 root
= hists
->entries_in
;
1818 next
= rb_first_cached(root
);
1819 hists
->entries
= RB_ROOT_CACHED
;
1822 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1823 next
= rb_next(&n
->rb_node_in
);
1825 if (cb
&& cb(n
, cb_arg
))
1828 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1829 hists__inc_stats(hists
, n
);
1832 hists__calc_col_len(hists
, n
);
1835 ui_progress__update(prog
, 1);
1839 void perf_evsel__output_resort_cb(struct perf_evsel
*evsel
, struct ui_progress
*prog
,
1840 hists__resort_cb_t cb
, void *cb_arg
)
1844 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1845 use_callchain
= evsel__has_callchain(evsel
);
1847 use_callchain
= symbol_conf
.use_callchain
;
1849 use_callchain
|= symbol_conf
.show_branchflag_count
;
1851 output_resort(evsel__hists(evsel
), prog
, use_callchain
, cb
, cb_arg
);
1854 void perf_evsel__output_resort(struct perf_evsel
*evsel
, struct ui_progress
*prog
)
1856 return perf_evsel__output_resort_cb(evsel
, prog
, NULL
, NULL
);
1859 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1861 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
, NULL
);
1864 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1865 hists__resort_cb_t cb
)
1867 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
, NULL
);
1870 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1872 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1875 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1881 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1883 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1885 while (can_goto_child(he
, HMD_NORMAL
)) {
1886 node
= rb_last(&he
->hroot_out
.rb_root
);
1887 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1892 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1894 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1896 if (can_goto_child(he
, hmd
))
1897 node
= rb_first_cached(&he
->hroot_out
);
1899 node
= rb_next(node
);
1901 while (node
== NULL
) {
1906 node
= rb_next(&he
->rb_node
);
1911 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1913 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1915 node
= rb_prev(node
);
1917 return rb_hierarchy_last(node
);
1923 return &he
->rb_node
;
1926 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1928 struct rb_node
*node
;
1929 struct hist_entry
*child
;
1935 node
= rb_first_cached(&he
->hroot_out
);
1936 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1938 while (node
&& child
->filtered
) {
1939 node
= rb_next(node
);
1940 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1944 percent
= hist_entry__get_percent_limit(child
);
1948 return node
&& percent
>= limit
;
1951 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1952 enum hist_filter filter
)
1954 h
->filtered
&= ~(1 << filter
);
1956 if (symbol_conf
.report_hierarchy
) {
1957 struct hist_entry
*parent
= h
->parent_he
;
1960 he_stat__add_stat(&parent
->stat
, &h
->stat
);
1962 parent
->filtered
&= ~(1 << filter
);
1964 if (parent
->filtered
)
1967 /* force fold unfiltered entry for simplicity */
1968 parent
->unfolded
= false;
1969 parent
->has_no_entry
= false;
1970 parent
->row_offset
= 0;
1971 parent
->nr_rows
= 0;
1973 parent
= parent
->parent_he
;
1980 /* force fold unfiltered entry for simplicity */
1981 h
->unfolded
= false;
1982 h
->has_no_entry
= false;
1986 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1988 hists__inc_filter_stats(hists
, h
);
1989 hists__calc_col_len(hists
, h
);
1993 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1994 struct hist_entry
*he
)
1996 if (hists
->dso_filter
!= NULL
&&
1997 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1998 he
->filtered
|= (1 << HIST_FILTER__DSO
);
2005 static bool hists__filter_entry_by_thread(struct hists
*hists
,
2006 struct hist_entry
*he
)
2008 if (hists
->thread_filter
!= NULL
&&
2009 he
->thread
!= hists
->thread_filter
) {
2010 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
2017 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
2018 struct hist_entry
*he
)
2020 if (hists
->symbol_filter_str
!= NULL
&&
2021 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
2022 hists
->symbol_filter_str
) == NULL
)) {
2023 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
2030 static bool hists__filter_entry_by_socket(struct hists
*hists
,
2031 struct hist_entry
*he
)
2033 if ((hists
->socket_filter
> -1) &&
2034 (he
->socket
!= hists
->socket_filter
)) {
2035 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
2042 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
2044 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
2048 hists
->stats
.nr_non_filtered_samples
= 0;
2050 hists__reset_filter_stats(hists
);
2051 hists__reset_col_len(hists
);
2053 for (nd
= rb_first_cached(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
2054 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2056 if (filter(hists
, h
))
2059 hists__remove_entry_filter(hists
, h
, type
);
2063 static void resort_filtered_entry(struct rb_root_cached
*root
,
2064 struct hist_entry
*he
)
2066 struct rb_node
**p
= &root
->rb_root
.rb_node
;
2067 struct rb_node
*parent
= NULL
;
2068 struct hist_entry
*iter
;
2069 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2071 bool leftmost
= true;
2073 while (*p
!= NULL
) {
2075 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
2077 if (hist_entry__sort(he
, iter
) > 0)
2080 p
= &(*p
)->rb_right
;
2085 rb_link_node(&he
->rb_node
, parent
, p
);
2086 rb_insert_color_cached(&he
->rb_node
, root
, leftmost
);
2088 if (he
->leaf
|| he
->filtered
)
2091 nd
= rb_first_cached(&he
->hroot_out
);
2093 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2096 rb_erase_cached(&h
->rb_node
, &he
->hroot_out
);
2098 resort_filtered_entry(&new_root
, h
);
2101 he
->hroot_out
= new_root
;
2104 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
2107 struct rb_root_cached new_root
= RB_ROOT_CACHED
;
2109 hists
->stats
.nr_non_filtered_samples
= 0;
2111 hists__reset_filter_stats(hists
);
2112 hists__reset_col_len(hists
);
2114 nd
= rb_first_cached(&hists
->entries
);
2116 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2119 ret
= hist_entry__filter(h
, type
, arg
);
2122 * case 1. non-matching type
2123 * zero out the period, set filter marker and move to child
2126 memset(&h
->stat
, 0, sizeof(h
->stat
));
2127 h
->filtered
|= (1 << type
);
2129 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2132 * case 2. matched type (filter out)
2133 * set filter marker and move to next
2135 else if (ret
== 1) {
2136 h
->filtered
|= (1 << type
);
2138 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2141 * case 3. ok (not filtered)
2142 * add period to hists and parents, erase the filter marker
2143 * and move to next sibling
2146 hists__remove_entry_filter(hists
, h
, type
);
2148 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2152 hierarchy_recalc_total_periods(hists
);
2155 * resort output after applying a new filter since filter in a lower
2156 * hierarchy can change periods in a upper hierarchy.
2158 nd
= rb_first_cached(&hists
->entries
);
2160 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2163 rb_erase_cached(&h
->rb_node
, &hists
->entries
);
2165 resort_filtered_entry(&new_root
, h
);
2168 hists
->entries
= new_root
;
2171 void hists__filter_by_thread(struct hists
*hists
)
2173 if (symbol_conf
.report_hierarchy
)
2174 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2175 hists
->thread_filter
);
2177 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2178 hists__filter_entry_by_thread
);
2181 void hists__filter_by_dso(struct hists
*hists
)
2183 if (symbol_conf
.report_hierarchy
)
2184 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2187 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2188 hists__filter_entry_by_dso
);
2191 void hists__filter_by_symbol(struct hists
*hists
)
2193 if (symbol_conf
.report_hierarchy
)
2194 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2195 hists
->symbol_filter_str
);
2197 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2198 hists__filter_entry_by_symbol
);
2201 void hists__filter_by_socket(struct hists
*hists
)
2203 if (symbol_conf
.report_hierarchy
)
2204 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2205 &hists
->socket_filter
);
2207 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2208 hists__filter_entry_by_socket
);
2211 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2213 ++stats
->nr_events
[0];
2214 ++stats
->nr_events
[type
];
2217 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2219 events_stats__inc(&hists
->stats
, type
);
2222 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2224 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2226 hists
->stats
.nr_non_filtered_samples
++;
2229 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2230 struct hist_entry
*pair
)
2232 struct rb_root_cached
*root
;
2234 struct rb_node
*parent
= NULL
;
2235 struct hist_entry
*he
;
2237 bool leftmost
= true;
2239 if (hists__has(hists
, need_collapse
))
2240 root
= &hists
->entries_collapsed
;
2242 root
= hists
->entries_in
;
2244 p
= &root
->rb_root
.rb_node
;
2246 while (*p
!= NULL
) {
2248 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2250 cmp
= hist_entry__collapse(he
, pair
);
2258 p
= &(*p
)->rb_right
;
2263 he
= hist_entry__new(pair
, true);
2265 memset(&he
->stat
, 0, sizeof(he
->stat
));
2267 if (symbol_conf
.cumulate_callchain
)
2268 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2269 rb_link_node(&he
->rb_node_in
, parent
, p
);
2270 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2271 hists__inc_stats(hists
, he
);
2278 static struct hist_entry
*add_dummy_hierarchy_entry(struct hists
*hists
,
2279 struct rb_root_cached
*root
,
2280 struct hist_entry
*pair
)
2283 struct rb_node
*parent
= NULL
;
2284 struct hist_entry
*he
;
2285 struct perf_hpp_fmt
*fmt
;
2286 bool leftmost
= true;
2288 p
= &root
->rb_root
.rb_node
;
2289 while (*p
!= NULL
) {
2293 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2295 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2296 cmp
= fmt
->collapse(fmt
, he
, pair
);
2304 p
= &parent
->rb_left
;
2306 p
= &parent
->rb_right
;
2311 he
= hist_entry__new(pair
, true);
2313 rb_link_node(&he
->rb_node_in
, parent
, p
);
2314 rb_insert_color_cached(&he
->rb_node_in
, root
, leftmost
);
2318 memset(&he
->stat
, 0, sizeof(he
->stat
));
2319 hists__inc_stats(hists
, he
);
2325 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2326 struct hist_entry
*he
)
2330 if (hists__has(hists
, need_collapse
))
2331 n
= hists
->entries_collapsed
.rb_root
.rb_node
;
2333 n
= hists
->entries_in
->rb_root
.rb_node
;
2336 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2337 int64_t cmp
= hist_entry__collapse(iter
, he
);
2350 static struct hist_entry
*hists__find_hierarchy_entry(struct rb_root_cached
*root
,
2351 struct hist_entry
*he
)
2353 struct rb_node
*n
= root
->rb_root
.rb_node
;
2356 struct hist_entry
*iter
;
2357 struct perf_hpp_fmt
*fmt
;
2360 iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2361 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2362 cmp
= fmt
->collapse(fmt
, iter
, he
);
2378 static void hists__match_hierarchy(struct rb_root_cached
*leader_root
,
2379 struct rb_root_cached
*other_root
)
2382 struct hist_entry
*pos
, *pair
;
2384 for (nd
= rb_first_cached(leader_root
); nd
; nd
= rb_next(nd
)) {
2385 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2386 pair
= hists__find_hierarchy_entry(other_root
, pos
);
2389 hist_entry__add_pair(pair
, pos
);
2390 hists__match_hierarchy(&pos
->hroot_in
, &pair
->hroot_in
);
2396 * Look for pairs to link to the leader buckets (hist_entries):
2398 void hists__match(struct hists
*leader
, struct hists
*other
)
2400 struct rb_root_cached
*root
;
2402 struct hist_entry
*pos
, *pair
;
2404 if (symbol_conf
.report_hierarchy
) {
2405 /* hierarchy report always collapses entries */
2406 return hists__match_hierarchy(&leader
->entries_collapsed
,
2407 &other
->entries_collapsed
);
2410 if (hists__has(leader
, need_collapse
))
2411 root
= &leader
->entries_collapsed
;
2413 root
= leader
->entries_in
;
2415 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2416 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2417 pair
= hists__find_entry(other
, pos
);
2420 hist_entry__add_pair(pair
, pos
);
2424 static int hists__link_hierarchy(struct hists
*leader_hists
,
2425 struct hist_entry
*parent
,
2426 struct rb_root_cached
*leader_root
,
2427 struct rb_root_cached
*other_root
)
2430 struct hist_entry
*pos
, *leader
;
2432 for (nd
= rb_first_cached(other_root
); nd
; nd
= rb_next(nd
)) {
2433 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2435 if (hist_entry__has_pairs(pos
)) {
2438 list_for_each_entry(leader
, &pos
->pairs
.head
, pairs
.node
) {
2439 if (leader
->hists
== leader_hists
) {
2447 leader
= add_dummy_hierarchy_entry(leader_hists
,
2452 /* do not point parent in the pos */
2453 leader
->parent_he
= parent
;
2455 hist_entry__add_pair(pos
, leader
);
2459 if (hists__link_hierarchy(leader_hists
, leader
,
2461 &pos
->hroot_in
) < 0)
2469 * Look for entries in the other hists that are not present in the leader, if
2470 * we find them, just add a dummy entry on the leader hists, with period=0,
2471 * nr_events=0, to serve as the list header.
2473 int hists__link(struct hists
*leader
, struct hists
*other
)
2475 struct rb_root_cached
*root
;
2477 struct hist_entry
*pos
, *pair
;
2479 if (symbol_conf
.report_hierarchy
) {
2480 /* hierarchy report always collapses entries */
2481 return hists__link_hierarchy(leader
, NULL
,
2482 &leader
->entries_collapsed
,
2483 &other
->entries_collapsed
);
2486 if (hists__has(other
, need_collapse
))
2487 root
= &other
->entries_collapsed
;
2489 root
= other
->entries_in
;
2491 for (nd
= rb_first_cached(root
); nd
; nd
= rb_next(nd
)) {
2492 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2494 if (!hist_entry__has_pairs(pos
)) {
2495 pair
= hists__add_dummy_entry(leader
, pos
);
2498 hist_entry__add_pair(pos
, pair
);
2505 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2506 struct perf_sample
*sample
, bool nonany_branch_mode
)
2508 struct branch_info
*bi
;
2510 /* If we have branch cycles always annotate them. */
2511 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2514 bi
= sample__resolve_bstack(sample
, al
);
2516 struct addr_map_symbol
*prev
= NULL
;
2519 * Ignore errors, still want to process the
2522 * For non standard branch modes always
2523 * force no IPC (prev == NULL)
2525 * Note that perf stores branches reversed from
2528 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2529 addr_map_symbol__account_cycles(&bi
[i
].from
,
2530 nonany_branch_mode
? NULL
: prev
,
2531 bi
[i
].flags
.cycles
);
2539 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
2541 struct perf_evsel
*pos
;
2544 evlist__for_each_entry(evlist
, pos
) {
2545 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2546 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2553 u64
hists__total_period(struct hists
*hists
)
2555 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2556 hists
->stats
.total_period
;
2559 int __hists__scnprintf_title(struct hists
*hists
, char *bf
, size_t size
, bool show_freq
)
2563 const struct dso
*dso
= hists
->dso_filter
;
2564 const struct thread
*thread
= hists
->thread_filter
;
2565 int socket_id
= hists
->socket_filter
;
2566 unsigned long nr_samples
= hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2567 u64 nr_events
= hists
->stats
.total_period
;
2568 struct perf_evsel
*evsel
= hists_to_evsel(hists
);
2569 const char *ev_name
= perf_evsel__name(evsel
);
2570 char buf
[512], sample_freq_str
[64] = "";
2571 size_t buflen
= sizeof(buf
);
2572 char ref
[30] = " show reference callgraph, ";
2573 bool enable_ref
= false;
2575 if (symbol_conf
.filter_relative
) {
2576 nr_samples
= hists
->stats
.nr_non_filtered_samples
;
2577 nr_events
= hists
->stats
.total_non_filtered_period
;
2580 if (perf_evsel__is_group_event(evsel
)) {
2581 struct perf_evsel
*pos
;
2583 perf_evsel__group_desc(evsel
, buf
, buflen
);
2586 for_each_group_member(pos
, evsel
) {
2587 struct hists
*pos_hists
= evsel__hists(pos
);
2589 if (symbol_conf
.filter_relative
) {
2590 nr_samples
+= pos_hists
->stats
.nr_non_filtered_samples
;
2591 nr_events
+= pos_hists
->stats
.total_non_filtered_period
;
2593 nr_samples
+= pos_hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2594 nr_events
+= pos_hists
->stats
.total_period
;
2599 if (symbol_conf
.show_ref_callgraph
&&
2600 strstr(ev_name
, "call-graph=no"))
2604 scnprintf(sample_freq_str
, sizeof(sample_freq_str
), " %d Hz,", evsel
->attr
.sample_freq
);
2606 nr_samples
= convert_unit(nr_samples
, &unit
);
2607 printed
= scnprintf(bf
, size
,
2608 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64
,
2609 nr_samples
, unit
, evsel
->nr_members
> 1 ? "s" : "",
2610 ev_name
, sample_freq_str
, enable_ref
? ref
: " ", nr_events
);
2613 if (hists
->uid_filter_str
)
2614 printed
+= snprintf(bf
+ printed
, size
- printed
,
2615 ", UID: %s", hists
->uid_filter_str
);
2617 if (hists__has(hists
, thread
)) {
2618 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2620 (thread
->comm_set
? thread__comm_str(thread
) : ""),
2623 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2625 (thread
->comm_set
? thread__comm_str(thread
) : ""));
2629 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2630 ", DSO: %s", dso
->short_name
);
2632 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2633 ", Processor Socket: %d", socket_id
);
2638 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2639 const char *arg
, int unset __maybe_unused
)
2641 if (!strcmp(arg
, "relative"))
2642 symbol_conf
.filter_relative
= true;
2643 else if (!strcmp(arg
, "absolute"))
2644 symbol_conf
.filter_relative
= false;
2646 pr_debug("Invalid percentage: %s\n", arg
);
2653 int perf_hist_config(const char *var
, const char *value
)
2655 if (!strcmp(var
, "hist.percentage"))
2656 return parse_filter_percentage(NULL
, value
, 0);
2661 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2663 memset(hists
, 0, sizeof(*hists
));
2664 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT_CACHED
;
2665 hists
->entries_in
= &hists
->entries_in_array
[0];
2666 hists
->entries_collapsed
= RB_ROOT_CACHED
;
2667 hists
->entries
= RB_ROOT_CACHED
;
2668 pthread_mutex_init(&hists
->lock
, NULL
);
2669 hists
->socket_filter
= -1;
2670 hists
->hpp_list
= hpp_list
;
2671 INIT_LIST_HEAD(&hists
->hpp_formats
);
2675 static void hists__delete_remaining_entries(struct rb_root_cached
*root
)
2677 struct rb_node
*node
;
2678 struct hist_entry
*he
;
2680 while (!RB_EMPTY_ROOT(&root
->rb_root
)) {
2681 node
= rb_first_cached(root
);
2682 rb_erase_cached(node
, root
);
2684 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2685 hist_entry__delete(he
);
2689 static void hists__delete_all_entries(struct hists
*hists
)
2691 hists__delete_entries(hists
);
2692 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2693 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2694 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2697 static void hists_evsel__exit(struct perf_evsel
*evsel
)
2699 struct hists
*hists
= evsel__hists(evsel
);
2700 struct perf_hpp_fmt
*fmt
, *pos
;
2701 struct perf_hpp_list_node
*node
, *tmp
;
2703 hists__delete_all_entries(hists
);
2705 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2706 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2707 list_del(&fmt
->list
);
2710 list_del(&node
->list
);
2715 static int hists_evsel__init(struct perf_evsel
*evsel
)
2717 struct hists
*hists
= evsel__hists(evsel
);
2719 __hists__init(hists
, &perf_hpp_list
);
2724 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2725 * stored in the rbtree...
2728 int hists__init(void)
2730 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2734 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2739 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2741 INIT_LIST_HEAD(&list
->fields
);
2742 INIT_LIST_HEAD(&list
->sorts
);