1 // SPDX-License-Identifier: GPL-2.0
7 #include "namespaces.h"
14 #include "ui/progress.h"
17 #include <sys/param.h>
19 static bool hists__filter_entry_by_dso(struct hists
*hists
,
20 struct hist_entry
*he
);
21 static bool hists__filter_entry_by_thread(struct hists
*hists
,
22 struct hist_entry
*he
);
23 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
24 struct hist_entry
*he
);
25 static bool hists__filter_entry_by_socket(struct hists
*hists
,
26 struct hist_entry
*he
);
28 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
30 return hists
->col_len
[col
];
33 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
35 hists
->col_len
[col
] = len
;
38 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
40 if (len
> hists__col_len(hists
, col
)) {
41 hists__set_col_len(hists
, col
, len
);
47 void hists__reset_col_len(struct hists
*hists
)
51 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
52 hists__set_col_len(hists
, col
, 0);
55 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
57 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
59 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
60 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
61 !symbol_conf
.dso_list
)
62 hists__set_col_len(hists
, dso
, unresolved_col_width
);
65 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
67 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
72 * +4 accounts for '[x] ' priv level info
73 * +2 accounts for 0x prefix on raw addresses
74 * +3 accounts for ' y ' symtab origin info
77 symlen
= h
->ms
.sym
->namelen
+ 4;
79 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
80 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
82 symlen
= unresolved_col_width
+ 4 + 2;
83 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
84 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
87 len
= thread__comm_len(h
->thread
);
88 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
89 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
92 len
= dso__name_len(h
->ms
.map
->dso
);
93 hists__new_col_len(hists
, HISTC_DSO
, len
);
97 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
100 if (h
->branch_info
->from
.sym
) {
101 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
103 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
104 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
106 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
107 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
109 symlen
= unresolved_col_width
+ 4 + 2;
110 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
111 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
114 if (h
->branch_info
->to
.sym
) {
115 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
117 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
118 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
120 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
121 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
123 symlen
= unresolved_col_width
+ 4 + 2;
124 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
125 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
128 if (h
->branch_info
->srcline_from
)
129 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
130 strlen(h
->branch_info
->srcline_from
));
131 if (h
->branch_info
->srcline_to
)
132 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
133 strlen(h
->branch_info
->srcline_to
));
137 if (h
->mem_info
->daddr
.sym
) {
138 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
139 + unresolved_col_width
+ 2;
140 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
142 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
145 symlen
= unresolved_col_width
+ 4 + 2;
146 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
148 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
152 if (h
->mem_info
->iaddr
.sym
) {
153 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
154 + unresolved_col_width
+ 2;
155 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
158 symlen
= unresolved_col_width
+ 4 + 2;
159 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
163 if (h
->mem_info
->daddr
.map
) {
164 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
165 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
168 symlen
= unresolved_col_width
+ 4 + 2;
169 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
172 hists__new_col_len(hists
, HISTC_MEM_PHYS_DADDR
,
173 unresolved_col_width
+ 4 + 2);
176 symlen
= unresolved_col_width
+ 4 + 2;
177 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
178 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
179 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
182 hists__new_col_len(hists
, HISTC_CGROUP_ID
, 20);
183 hists__new_col_len(hists
, HISTC_CPU
, 3);
184 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
185 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
186 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
187 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
188 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
189 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
190 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
193 len
= MAX(strlen(h
->srcline
), strlen(sort_srcline
.se_header
));
194 hists__new_col_len(hists
, HISTC_SRCLINE
, len
);
198 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
201 hists__new_col_len(hists
, HISTC_TRANSACTION
,
202 hist_entry__transaction_len());
205 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
208 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
210 struct rb_node
*next
= rb_first(&hists
->entries
);
211 struct hist_entry
*n
;
214 hists__reset_col_len(hists
);
216 while (next
&& row
++ < max_rows
) {
217 n
= rb_entry(next
, struct hist_entry
, rb_node
);
219 hists__calc_col_len(hists
, n
);
220 next
= rb_next(&n
->rb_node
);
224 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
225 unsigned int cpumode
, u64 period
)
228 case PERF_RECORD_MISC_KERNEL
:
229 he_stat
->period_sys
+= period
;
231 case PERF_RECORD_MISC_USER
:
232 he_stat
->period_us
+= period
;
234 case PERF_RECORD_MISC_GUEST_KERNEL
:
235 he_stat
->period_guest_sys
+= period
;
237 case PERF_RECORD_MISC_GUEST_USER
:
238 he_stat
->period_guest_us
+= period
;
245 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
249 he_stat
->period
+= period
;
250 he_stat
->weight
+= weight
;
251 he_stat
->nr_events
+= 1;
254 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
256 dest
->period
+= src
->period
;
257 dest
->period_sys
+= src
->period_sys
;
258 dest
->period_us
+= src
->period_us
;
259 dest
->period_guest_sys
+= src
->period_guest_sys
;
260 dest
->period_guest_us
+= src
->period_guest_us
;
261 dest
->nr_events
+= src
->nr_events
;
262 dest
->weight
+= src
->weight
;
265 static void he_stat__decay(struct he_stat
*he_stat
)
267 he_stat
->period
= (he_stat
->period
* 7) / 8;
268 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
269 /* XXX need decay for weight too? */
272 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
274 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
276 u64 prev_period
= he
->stat
.period
;
279 if (prev_period
== 0)
282 he_stat__decay(&he
->stat
);
283 if (symbol_conf
.cumulate_callchain
)
284 he_stat__decay(he
->stat_acc
);
285 decay_callchain(he
->callchain
);
287 diff
= prev_period
- he
->stat
.period
;
290 hists
->stats
.total_period
-= diff
;
292 hists
->stats
.total_non_filtered_period
-= diff
;
296 struct hist_entry
*child
;
297 struct rb_node
*node
= rb_first(&he
->hroot_out
);
299 child
= rb_entry(node
, struct hist_entry
, rb_node
);
300 node
= rb_next(node
);
302 if (hists__decay_entry(hists
, child
))
303 hists__delete_entry(hists
, child
);
307 return he
->stat
.period
== 0;
310 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
312 struct rb_root
*root_in
;
313 struct rb_root
*root_out
;
316 root_in
= &he
->parent_he
->hroot_in
;
317 root_out
= &he
->parent_he
->hroot_out
;
319 if (hists__has(hists
, need_collapse
))
320 root_in
= &hists
->entries_collapsed
;
322 root_in
= hists
->entries_in
;
323 root_out
= &hists
->entries
;
326 rb_erase(&he
->rb_node_in
, root_in
);
327 rb_erase(&he
->rb_node
, root_out
);
331 --hists
->nr_non_filtered_entries
;
333 hist_entry__delete(he
);
336 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
338 struct rb_node
*next
= rb_first(&hists
->entries
);
339 struct hist_entry
*n
;
342 n
= rb_entry(next
, struct hist_entry
, rb_node
);
343 next
= rb_next(&n
->rb_node
);
344 if (((zap_user
&& n
->level
== '.') ||
345 (zap_kernel
&& n
->level
!= '.') ||
346 hists__decay_entry(hists
, n
))) {
347 hists__delete_entry(hists
, n
);
352 void hists__delete_entries(struct hists
*hists
)
354 struct rb_node
*next
= rb_first(&hists
->entries
);
355 struct hist_entry
*n
;
358 n
= rb_entry(next
, struct hist_entry
, rb_node
);
359 next
= rb_next(&n
->rb_node
);
361 hists__delete_entry(hists
, n
);
366 * histogram, sorted on item, collects periods
369 static int hist_entry__init(struct hist_entry
*he
,
370 struct hist_entry
*template,
375 if (symbol_conf
.cumulate_callchain
) {
376 he
->stat_acc
= malloc(sizeof(he
->stat
));
377 if (he
->stat_acc
== NULL
)
379 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
381 memset(&he
->stat
, 0, sizeof(he
->stat
));
384 map__get(he
->ms
.map
);
386 if (he
->branch_info
) {
388 * This branch info is (a part of) allocated from
389 * sample__resolve_bstack() and will be freed after
390 * adding new entries. So we need to save a copy.
392 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
393 if (he
->branch_info
== NULL
) {
394 map__zput(he
->ms
.map
);
399 memcpy(he
->branch_info
, template->branch_info
,
400 sizeof(*he
->branch_info
));
402 map__get(he
->branch_info
->from
.map
);
403 map__get(he
->branch_info
->to
.map
);
407 map__get(he
->mem_info
->iaddr
.map
);
408 map__get(he
->mem_info
->daddr
.map
);
411 if (symbol_conf
.use_callchain
)
412 callchain_init(he
->callchain
);
415 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
417 if (he
->raw_data
== NULL
) {
418 map__put(he
->ms
.map
);
419 if (he
->branch_info
) {
420 map__put(he
->branch_info
->from
.map
);
421 map__put(he
->branch_info
->to
.map
);
422 free(he
->branch_info
);
425 map__put(he
->mem_info
->iaddr
.map
);
426 map__put(he
->mem_info
->daddr
.map
);
432 INIT_LIST_HEAD(&he
->pairs
.node
);
433 thread__get(he
->thread
);
434 he
->hroot_in
= RB_ROOT
;
435 he
->hroot_out
= RB_ROOT
;
437 if (!symbol_conf
.report_hierarchy
)
443 static void *hist_entry__zalloc(size_t size
)
445 return zalloc(size
+ sizeof(struct hist_entry
));
448 static void hist_entry__free(void *ptr
)
453 static struct hist_entry_ops default_ops
= {
454 .new = hist_entry__zalloc
,
455 .free
= hist_entry__free
,
458 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
461 struct hist_entry_ops
*ops
= template->ops
;
462 size_t callchain_size
= 0;
463 struct hist_entry
*he
;
467 ops
= template->ops
= &default_ops
;
469 if (symbol_conf
.use_callchain
)
470 callchain_size
= sizeof(struct callchain_root
);
472 he
= ops
->new(callchain_size
);
474 err
= hist_entry__init(he
, template, sample_self
);
484 static u8
symbol__parent_filter(const struct symbol
*parent
)
486 if (symbol_conf
.exclude_other
&& parent
== NULL
)
487 return 1 << HIST_FILTER__PARENT
;
491 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
493 if (!symbol_conf
.use_callchain
)
496 he
->hists
->callchain_period
+= period
;
498 he
->hists
->callchain_non_filtered_period
+= period
;
501 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
502 struct hist_entry
*entry
,
503 struct addr_location
*al
,
507 struct rb_node
*parent
= NULL
;
508 struct hist_entry
*he
;
510 u64 period
= entry
->stat
.period
;
511 u64 weight
= entry
->stat
.weight
;
513 p
= &hists
->entries_in
->rb_node
;
517 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
520 * Make sure that it receives arguments in a same order as
521 * hist_entry__collapse() so that we can use an appropriate
522 * function when searching an entry regardless which sort
525 cmp
= hist_entry__cmp(he
, entry
);
529 he_stat__add_period(&he
->stat
, period
, weight
);
530 hist_entry__add_callchain_period(he
, period
);
532 if (symbol_conf
.cumulate_callchain
)
533 he_stat__add_period(he
->stat_acc
, period
, weight
);
536 * This mem info was allocated from sample__resolve_mem
537 * and will not be used anymore.
539 zfree(&entry
->mem_info
);
541 /* If the map of an existing hist_entry has
542 * become out-of-date due to an exec() or
543 * similar, update it. Otherwise we will
544 * mis-adjust symbol addresses when computing
545 * the history counter to increment.
547 if (he
->ms
.map
!= entry
->ms
.map
) {
548 map__put(he
->ms
.map
);
549 he
->ms
.map
= map__get(entry
->ms
.map
);
560 he
= hist_entry__new(entry
, sample_self
);
565 hist_entry__add_callchain_period(he
, period
);
568 rb_link_node(&he
->rb_node_in
, parent
, p
);
569 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
572 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
573 if (symbol_conf
.cumulate_callchain
)
574 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
578 static struct hist_entry
*
579 __hists__add_entry(struct hists
*hists
,
580 struct addr_location
*al
,
581 struct symbol
*sym_parent
,
582 struct branch_info
*bi
,
584 struct perf_sample
*sample
,
586 struct hist_entry_ops
*ops
)
588 struct namespaces
*ns
= thread__namespaces(al
->thread
);
589 struct hist_entry entry
= {
590 .thread
= al
->thread
,
591 .comm
= thread__comm(al
->thread
),
593 .dev
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].dev
: 0,
594 .ino
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].ino
: 0,
600 .srcline
= al
->srcline
? strdup(al
->srcline
) : NULL
,
601 .socket
= al
->socket
,
603 .cpumode
= al
->cpumode
,
608 .period
= sample
->period
,
609 .weight
= sample
->weight
,
611 .parent
= sym_parent
,
612 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
616 .transaction
= sample
->transaction
,
617 .raw_data
= sample
->raw_data
,
618 .raw_size
= sample
->raw_size
,
622 return hists__findnew_entry(hists
, &entry
, al
, sample_self
);
625 struct hist_entry
*hists__add_entry(struct hists
*hists
,
626 struct addr_location
*al
,
627 struct symbol
*sym_parent
,
628 struct branch_info
*bi
,
630 struct perf_sample
*sample
,
633 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
634 sample
, sample_self
, NULL
);
637 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
638 struct hist_entry_ops
*ops
,
639 struct addr_location
*al
,
640 struct symbol
*sym_parent
,
641 struct branch_info
*bi
,
643 struct perf_sample
*sample
,
646 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
647 sample
, sample_self
, ops
);
651 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
652 struct addr_location
*al __maybe_unused
)
658 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
659 struct addr_location
*al __maybe_unused
)
665 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
667 struct perf_sample
*sample
= iter
->sample
;
670 mi
= sample__resolve_mem(sample
, al
);
679 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
682 struct mem_info
*mi
= iter
->priv
;
683 struct hists
*hists
= evsel__hists(iter
->evsel
);
684 struct perf_sample
*sample
= iter
->sample
;
685 struct hist_entry
*he
;
690 cost
= sample
->weight
;
695 * must pass period=weight in order to get the correct
696 * sorting from hists__collapse_resort() which is solely
697 * based on periods. We want sorting be done on nr_events * weight
698 * and this is indirectly achieved by passing period=weight here
699 * and the he_stat__add_period() function.
701 sample
->period
= cost
;
703 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
713 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
714 struct addr_location
*al __maybe_unused
)
716 struct perf_evsel
*evsel
= iter
->evsel
;
717 struct hists
*hists
= evsel__hists(evsel
);
718 struct hist_entry
*he
= iter
->he
;
724 hists__inc_nr_samples(hists
, he
->filtered
);
726 err
= hist_entry__append_callchain(he
, iter
->sample
);
730 * We don't need to free iter->priv (mem_info) here since the mem info
731 * was either already freed in hists__findnew_entry() or passed to a
732 * new hist entry by hist_entry__new().
741 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
743 struct branch_info
*bi
;
744 struct perf_sample
*sample
= iter
->sample
;
746 bi
= sample__resolve_bstack(sample
, al
);
751 iter
->total
= sample
->branch_stack
->nr
;
758 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
759 struct addr_location
*al __maybe_unused
)
765 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
767 struct branch_info
*bi
= iter
->priv
;
773 if (iter
->curr
>= iter
->total
)
776 al
->map
= bi
[i
].to
.map
;
777 al
->sym
= bi
[i
].to
.sym
;
778 al
->addr
= bi
[i
].to
.addr
;
783 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
785 struct branch_info
*bi
;
786 struct perf_evsel
*evsel
= iter
->evsel
;
787 struct hists
*hists
= evsel__hists(evsel
);
788 struct perf_sample
*sample
= iter
->sample
;
789 struct hist_entry
*he
= NULL
;
795 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
799 * The report shows the percentage of total branches captured
800 * and not events sampled. Thus we use a pseudo period of 1.
803 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
805 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
810 hists__inc_nr_samples(hists
, he
->filtered
);
819 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
820 struct addr_location
*al __maybe_unused
)
825 return iter
->curr
>= iter
->total
? 0 : -1;
829 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
830 struct addr_location
*al __maybe_unused
)
836 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
838 struct perf_evsel
*evsel
= iter
->evsel
;
839 struct perf_sample
*sample
= iter
->sample
;
840 struct hist_entry
*he
;
842 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
852 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
853 struct addr_location
*al __maybe_unused
)
855 struct hist_entry
*he
= iter
->he
;
856 struct perf_evsel
*evsel
= iter
->evsel
;
857 struct perf_sample
*sample
= iter
->sample
;
864 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
866 return hist_entry__append_callchain(he
, sample
);
870 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
871 struct addr_location
*al __maybe_unused
)
873 struct hist_entry
**he_cache
;
875 callchain_cursor_commit(&callchain_cursor
);
878 * This is for detecting cycles or recursions so that they're
879 * cumulated only one time to prevent entries more than 100%
882 he_cache
= malloc(sizeof(*he_cache
) * (iter
->max_stack
+ 1));
883 if (he_cache
== NULL
)
886 iter
->priv
= he_cache
;
893 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
894 struct addr_location
*al
)
896 struct perf_evsel
*evsel
= iter
->evsel
;
897 struct hists
*hists
= evsel__hists(evsel
);
898 struct perf_sample
*sample
= iter
->sample
;
899 struct hist_entry
**he_cache
= iter
->priv
;
900 struct hist_entry
*he
;
903 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
909 he_cache
[iter
->curr
++] = he
;
911 hist_entry__append_callchain(he
, sample
);
914 * We need to re-initialize the cursor since callchain_append()
915 * advanced the cursor to the end.
917 callchain_cursor_commit(&callchain_cursor
);
919 hists__inc_nr_samples(hists
, he
->filtered
);
925 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
926 struct addr_location
*al
)
928 struct callchain_cursor_node
*node
;
930 node
= callchain_cursor_current(&callchain_cursor
);
934 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
938 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
939 struct addr_location
*al
)
941 struct perf_evsel
*evsel
= iter
->evsel
;
942 struct perf_sample
*sample
= iter
->sample
;
943 struct hist_entry
**he_cache
= iter
->priv
;
944 struct hist_entry
*he
;
945 struct hist_entry he_tmp
= {
946 .hists
= evsel__hists(evsel
),
948 .thread
= al
->thread
,
949 .comm
= thread__comm(al
->thread
),
955 .srcline
= al
->srcline
? strdup(al
->srcline
) : NULL
,
956 .parent
= iter
->parent
,
957 .raw_data
= sample
->raw_data
,
958 .raw_size
= sample
->raw_size
,
961 struct callchain_cursor cursor
;
963 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
965 callchain_cursor_advance(&callchain_cursor
);
968 * Check if there's duplicate entries in the callchain.
969 * It's possible that it has cycles or recursive calls.
971 for (i
= 0; i
< iter
->curr
; i
++) {
972 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
973 /* to avoid calling callback function */
979 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
985 he_cache
[iter
->curr
++] = he
;
987 if (symbol_conf
.use_callchain
)
988 callchain_append(he
->callchain
, &cursor
, sample
->period
);
993 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
994 struct addr_location
*al __maybe_unused
)
1002 const struct hist_iter_ops hist_iter_mem
= {
1003 .prepare_entry
= iter_prepare_mem_entry
,
1004 .add_single_entry
= iter_add_single_mem_entry
,
1005 .next_entry
= iter_next_nop_entry
,
1006 .add_next_entry
= iter_add_next_nop_entry
,
1007 .finish_entry
= iter_finish_mem_entry
,
1010 const struct hist_iter_ops hist_iter_branch
= {
1011 .prepare_entry
= iter_prepare_branch_entry
,
1012 .add_single_entry
= iter_add_single_branch_entry
,
1013 .next_entry
= iter_next_branch_entry
,
1014 .add_next_entry
= iter_add_next_branch_entry
,
1015 .finish_entry
= iter_finish_branch_entry
,
1018 const struct hist_iter_ops hist_iter_normal
= {
1019 .prepare_entry
= iter_prepare_normal_entry
,
1020 .add_single_entry
= iter_add_single_normal_entry
,
1021 .next_entry
= iter_next_nop_entry
,
1022 .add_next_entry
= iter_add_next_nop_entry
,
1023 .finish_entry
= iter_finish_normal_entry
,
1026 const struct hist_iter_ops hist_iter_cumulative
= {
1027 .prepare_entry
= iter_prepare_cumulative_entry
,
1028 .add_single_entry
= iter_add_single_cumulative_entry
,
1029 .next_entry
= iter_next_cumulative_entry
,
1030 .add_next_entry
= iter_add_next_cumulative_entry
,
1031 .finish_entry
= iter_finish_cumulative_entry
,
1034 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1035 int max_stack_depth
, void *arg
)
1038 struct map
*alm
= NULL
;
1041 alm
= map__get(al
->map
);
1043 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1044 iter
->evsel
, al
, max_stack_depth
);
1048 iter
->max_stack
= max_stack_depth
;
1050 err
= iter
->ops
->prepare_entry(iter
, al
);
1054 err
= iter
->ops
->add_single_entry(iter
, al
);
1058 if (iter
->he
&& iter
->add_entry_cb
) {
1059 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1064 while (iter
->ops
->next_entry(iter
, al
)) {
1065 err
= iter
->ops
->add_next_entry(iter
, al
);
1069 if (iter
->he
&& iter
->add_entry_cb
) {
1070 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1077 err2
= iter
->ops
->finish_entry(iter
, al
);
1087 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1089 struct hists
*hists
= left
->hists
;
1090 struct perf_hpp_fmt
*fmt
;
1093 hists__for_each_sort_list(hists
, fmt
) {
1094 if (perf_hpp__is_dynamic_entry(fmt
) &&
1095 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1098 cmp
= fmt
->cmp(fmt
, left
, right
);
1107 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1109 struct hists
*hists
= left
->hists
;
1110 struct perf_hpp_fmt
*fmt
;
1113 hists__for_each_sort_list(hists
, fmt
) {
1114 if (perf_hpp__is_dynamic_entry(fmt
) &&
1115 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1118 cmp
= fmt
->collapse(fmt
, left
, right
);
1126 void hist_entry__delete(struct hist_entry
*he
)
1128 struct hist_entry_ops
*ops
= he
->ops
;
1130 thread__zput(he
->thread
);
1131 map__zput(he
->ms
.map
);
1133 if (he
->branch_info
) {
1134 map__zput(he
->branch_info
->from
.map
);
1135 map__zput(he
->branch_info
->to
.map
);
1136 free_srcline(he
->branch_info
->srcline_from
);
1137 free_srcline(he
->branch_info
->srcline_to
);
1138 zfree(&he
->branch_info
);
1142 map__zput(he
->mem_info
->iaddr
.map
);
1143 map__zput(he
->mem_info
->daddr
.map
);
1144 zfree(&he
->mem_info
);
1147 zfree(&he
->stat_acc
);
1148 free_srcline(he
->srcline
);
1149 if (he
->srcfile
&& he
->srcfile
[0])
1151 free_callchain(he
->callchain
);
1152 free(he
->trace_output
);
1158 * If this is not the last column, then we need to pad it according to the
1159 * pre-calculated max lenght for this column, otherwise don't bother adding
1160 * spaces because that would break viewing this with, for instance, 'less',
1161 * that would show tons of trailing spaces when a long C++ demangled method
1164 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1165 struct perf_hpp_fmt
*fmt
, int printed
)
1167 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1168 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1169 if (printed
< width
) {
1170 advance_hpp(hpp
, printed
);
1171 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1179 * collapse the histogram
1182 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1183 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1184 enum hist_filter type
);
1186 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1188 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1190 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1193 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1194 enum hist_filter type
,
1197 struct perf_hpp_fmt
*fmt
;
1198 bool type_match
= false;
1199 struct hist_entry
*parent
= he
->parent_he
;
1202 case HIST_FILTER__THREAD
:
1203 if (symbol_conf
.comm_list
== NULL
&&
1204 symbol_conf
.pid_list
== NULL
&&
1205 symbol_conf
.tid_list
== NULL
)
1208 case HIST_FILTER__DSO
:
1209 if (symbol_conf
.dso_list
== NULL
)
1212 case HIST_FILTER__SYMBOL
:
1213 if (symbol_conf
.sym_list
== NULL
)
1216 case HIST_FILTER__PARENT
:
1217 case HIST_FILTER__GUEST
:
1218 case HIST_FILTER__HOST
:
1219 case HIST_FILTER__SOCKET
:
1220 case HIST_FILTER__C2C
:
1225 /* if it's filtered by own fmt, it has to have filter bits */
1226 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1235 * If the filter is for current level entry, propagate
1236 * filter marker to parents. The marker bit was
1237 * already set by default so it only needs to clear
1238 * non-filtered entries.
1240 if (!(he
->filtered
& (1 << type
))) {
1242 parent
->filtered
&= ~(1 << type
);
1243 parent
= parent
->parent_he
;
1248 * If current entry doesn't have matching formats, set
1249 * filter marker for upper level entries. it will be
1250 * cleared if its lower level entries is not filtered.
1252 * For lower-level entries, it inherits parent's
1253 * filter bit so that lower level entries of a
1254 * non-filtered entry won't set the filter marker.
1257 he
->filtered
|= (1 << type
);
1259 he
->filtered
|= (parent
->filtered
& (1 << type
));
1263 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1265 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1266 check_thread_entry
);
1268 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1269 perf_hpp__is_dso_entry
);
1271 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1272 perf_hpp__is_sym_entry
);
1274 hists__apply_filters(he
->hists
, he
);
1277 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1278 struct rb_root
*root
,
1279 struct hist_entry
*he
,
1280 struct hist_entry
*parent_he
,
1281 struct perf_hpp_list
*hpp_list
)
1283 struct rb_node
**p
= &root
->rb_node
;
1284 struct rb_node
*parent
= NULL
;
1285 struct hist_entry
*iter
, *new;
1286 struct perf_hpp_fmt
*fmt
;
1289 while (*p
!= NULL
) {
1291 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1294 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1295 cmp
= fmt
->collapse(fmt
, iter
, he
);
1301 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1306 p
= &parent
->rb_left
;
1308 p
= &parent
->rb_right
;
1311 new = hist_entry__new(he
, true);
1315 hists
->nr_entries
++;
1317 /* save related format list for output */
1318 new->hpp_list
= hpp_list
;
1319 new->parent_he
= parent_he
;
1321 hist_entry__apply_hierarchy_filters(new);
1323 /* some fields are now passed to 'new' */
1324 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1325 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1326 he
->trace_output
= NULL
;
1328 new->trace_output
= NULL
;
1330 if (perf_hpp__is_srcline_entry(fmt
))
1333 new->srcline
= NULL
;
1335 if (perf_hpp__is_srcfile_entry(fmt
))
1338 new->srcfile
= NULL
;
1341 rb_link_node(&new->rb_node_in
, parent
, p
);
1342 rb_insert_color(&new->rb_node_in
, root
);
1346 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1347 struct rb_root
*root
,
1348 struct hist_entry
*he
)
1350 struct perf_hpp_list_node
*node
;
1351 struct hist_entry
*new_he
= NULL
;
1352 struct hist_entry
*parent
= NULL
;
1356 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1357 /* skip period (overhead) and elided columns */
1358 if (node
->level
== 0 || node
->skip
)
1361 /* insert copy of 'he' for each fmt into the hierarchy */
1362 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1363 if (new_he
== NULL
) {
1368 root
= &new_he
->hroot_in
;
1369 new_he
->depth
= depth
++;
1374 new_he
->leaf
= true;
1376 if (symbol_conf
.use_callchain
) {
1377 callchain_cursor_reset(&callchain_cursor
);
1378 if (callchain_merge(&callchain_cursor
,
1385 /* 'he' is no longer used */
1386 hist_entry__delete(he
);
1388 /* return 0 (or -1) since it already applied filters */
1392 static int hists__collapse_insert_entry(struct hists
*hists
,
1393 struct rb_root
*root
,
1394 struct hist_entry
*he
)
1396 struct rb_node
**p
= &root
->rb_node
;
1397 struct rb_node
*parent
= NULL
;
1398 struct hist_entry
*iter
;
1401 if (symbol_conf
.report_hierarchy
)
1402 return hists__hierarchy_insert_entry(hists
, root
, he
);
1404 while (*p
!= NULL
) {
1406 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1408 cmp
= hist_entry__collapse(iter
, he
);
1413 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1414 if (symbol_conf
.cumulate_callchain
)
1415 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1417 if (symbol_conf
.use_callchain
) {
1418 callchain_cursor_reset(&callchain_cursor
);
1419 if (callchain_merge(&callchain_cursor
,
1424 hist_entry__delete(he
);
1431 p
= &(*p
)->rb_right
;
1433 hists
->nr_entries
++;
1435 rb_link_node(&he
->rb_node_in
, parent
, p
);
1436 rb_insert_color(&he
->rb_node_in
, root
);
1440 struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1442 struct rb_root
*root
;
1444 pthread_mutex_lock(&hists
->lock
);
1446 root
= hists
->entries_in
;
1447 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1448 hists
->entries_in
= &hists
->entries_in_array
[0];
1450 pthread_mutex_unlock(&hists
->lock
);
1455 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1457 hists__filter_entry_by_dso(hists
, he
);
1458 hists__filter_entry_by_thread(hists
, he
);
1459 hists__filter_entry_by_symbol(hists
, he
);
1460 hists__filter_entry_by_socket(hists
, he
);
1463 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1465 struct rb_root
*root
;
1466 struct rb_node
*next
;
1467 struct hist_entry
*n
;
1470 if (!hists__has(hists
, need_collapse
))
1473 hists
->nr_entries
= 0;
1475 root
= hists__get_rotate_entries_in(hists
);
1477 next
= rb_first(root
);
1482 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1483 next
= rb_next(&n
->rb_node_in
);
1485 rb_erase(&n
->rb_node_in
, root
);
1486 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1492 * If it wasn't combined with one of the entries already
1493 * collapsed, we need to apply the filters that may have
1494 * been set by, say, the hist_browser.
1496 hists__apply_filters(hists
, n
);
1499 ui_progress__update(prog
, 1);
1504 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1506 struct hists
*hists
= a
->hists
;
1507 struct perf_hpp_fmt
*fmt
;
1510 hists__for_each_sort_list(hists
, fmt
) {
1511 if (perf_hpp__should_skip(fmt
, a
->hists
))
1514 cmp
= fmt
->sort(fmt
, a
, b
);
1522 static void hists__reset_filter_stats(struct hists
*hists
)
1524 hists
->nr_non_filtered_entries
= 0;
1525 hists
->stats
.total_non_filtered_period
= 0;
1528 void hists__reset_stats(struct hists
*hists
)
1530 hists
->nr_entries
= 0;
1531 hists
->stats
.total_period
= 0;
1533 hists__reset_filter_stats(hists
);
1536 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1538 hists
->nr_non_filtered_entries
++;
1539 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1542 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1545 hists__inc_filter_stats(hists
, h
);
1547 hists
->nr_entries
++;
1548 hists
->stats
.total_period
+= h
->stat
.period
;
1551 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1553 struct rb_node
*node
;
1554 struct hist_entry
*he
;
1556 node
= rb_first(&hists
->entries
);
1558 hists
->stats
.total_period
= 0;
1559 hists
->stats
.total_non_filtered_period
= 0;
1562 * recalculate total period using top-level entries only
1563 * since lower level entries only see non-filtered entries
1564 * but upper level entries have sum of both entries.
1567 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1568 node
= rb_next(node
);
1570 hists
->stats
.total_period
+= he
->stat
.period
;
1572 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1576 static void hierarchy_insert_output_entry(struct rb_root
*root
,
1577 struct hist_entry
*he
)
1579 struct rb_node
**p
= &root
->rb_node
;
1580 struct rb_node
*parent
= NULL
;
1581 struct hist_entry
*iter
;
1582 struct perf_hpp_fmt
*fmt
;
1584 while (*p
!= NULL
) {
1586 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1588 if (hist_entry__sort(he
, iter
) > 0)
1589 p
= &parent
->rb_left
;
1591 p
= &parent
->rb_right
;
1594 rb_link_node(&he
->rb_node
, parent
, p
);
1595 rb_insert_color(&he
->rb_node
, root
);
1597 /* update column width of dynamic entry */
1598 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1599 if (perf_hpp__is_dynamic_entry(fmt
))
1600 fmt
->sort(fmt
, he
, NULL
);
1604 static void hists__hierarchy_output_resort(struct hists
*hists
,
1605 struct ui_progress
*prog
,
1606 struct rb_root
*root_in
,
1607 struct rb_root
*root_out
,
1608 u64 min_callchain_hits
,
1611 struct rb_node
*node
;
1612 struct hist_entry
*he
;
1614 *root_out
= RB_ROOT
;
1615 node
= rb_first(root_in
);
1618 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1619 node
= rb_next(node
);
1621 hierarchy_insert_output_entry(root_out
, he
);
1624 ui_progress__update(prog
, 1);
1626 hists
->nr_entries
++;
1627 if (!he
->filtered
) {
1628 hists
->nr_non_filtered_entries
++;
1629 hists__calc_col_len(hists
, he
);
1633 hists__hierarchy_output_resort(hists
, prog
,
1644 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1645 u64 total
= he
->stat
.period
;
1647 if (symbol_conf
.cumulate_callchain
)
1648 total
= he
->stat_acc
->period
;
1650 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1653 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1654 min_callchain_hits
, &callchain_param
);
1658 static void __hists__insert_output_entry(struct rb_root
*entries
,
1659 struct hist_entry
*he
,
1660 u64 min_callchain_hits
,
1663 struct rb_node
**p
= &entries
->rb_node
;
1664 struct rb_node
*parent
= NULL
;
1665 struct hist_entry
*iter
;
1666 struct perf_hpp_fmt
*fmt
;
1668 if (use_callchain
) {
1669 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1670 u64 total
= he
->stat
.period
;
1672 if (symbol_conf
.cumulate_callchain
)
1673 total
= he
->stat_acc
->period
;
1675 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1677 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1678 min_callchain_hits
, &callchain_param
);
1681 while (*p
!= NULL
) {
1683 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1685 if (hist_entry__sort(he
, iter
) > 0)
1688 p
= &(*p
)->rb_right
;
1691 rb_link_node(&he
->rb_node
, parent
, p
);
1692 rb_insert_color(&he
->rb_node
, entries
);
1694 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1695 if (perf_hpp__is_dynamic_entry(fmt
) &&
1696 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1697 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1701 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1702 bool use_callchain
, hists__resort_cb_t cb
)
1704 struct rb_root
*root
;
1705 struct rb_node
*next
;
1706 struct hist_entry
*n
;
1707 u64 callchain_total
;
1708 u64 min_callchain_hits
;
1710 callchain_total
= hists
->callchain_period
;
1711 if (symbol_conf
.filter_relative
)
1712 callchain_total
= hists
->callchain_non_filtered_period
;
1714 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1716 hists__reset_stats(hists
);
1717 hists__reset_col_len(hists
);
1719 if (symbol_conf
.report_hierarchy
) {
1720 hists__hierarchy_output_resort(hists
, prog
,
1721 &hists
->entries_collapsed
,
1725 hierarchy_recalc_total_periods(hists
);
1729 if (hists__has(hists
, need_collapse
))
1730 root
= &hists
->entries_collapsed
;
1732 root
= hists
->entries_in
;
1734 next
= rb_first(root
);
1735 hists
->entries
= RB_ROOT
;
1738 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1739 next
= rb_next(&n
->rb_node_in
);
1744 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1745 hists__inc_stats(hists
, n
);
1748 hists__calc_col_len(hists
, n
);
1751 ui_progress__update(prog
, 1);
1755 void perf_evsel__output_resort(struct perf_evsel
*evsel
, struct ui_progress
*prog
)
1759 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1760 use_callchain
= evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
;
1762 use_callchain
= symbol_conf
.use_callchain
;
1764 use_callchain
|= symbol_conf
.show_branchflag_count
;
1766 output_resort(evsel__hists(evsel
), prog
, use_callchain
, NULL
);
1769 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1771 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
);
1774 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1775 hists__resort_cb_t cb
)
1777 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
);
1780 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1782 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1785 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1791 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1793 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1795 while (can_goto_child(he
, HMD_NORMAL
)) {
1796 node
= rb_last(&he
->hroot_out
);
1797 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1802 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1804 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1806 if (can_goto_child(he
, hmd
))
1807 node
= rb_first(&he
->hroot_out
);
1809 node
= rb_next(node
);
1811 while (node
== NULL
) {
1816 node
= rb_next(&he
->rb_node
);
1821 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1823 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1825 node
= rb_prev(node
);
1827 return rb_hierarchy_last(node
);
1833 return &he
->rb_node
;
1836 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1838 struct rb_node
*node
;
1839 struct hist_entry
*child
;
1845 node
= rb_first(&he
->hroot_out
);
1846 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1848 while (node
&& child
->filtered
) {
1849 node
= rb_next(node
);
1850 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1854 percent
= hist_entry__get_percent_limit(child
);
1858 return node
&& percent
>= limit
;
1861 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1862 enum hist_filter filter
)
1864 h
->filtered
&= ~(1 << filter
);
1866 if (symbol_conf
.report_hierarchy
) {
1867 struct hist_entry
*parent
= h
->parent_he
;
1870 he_stat__add_stat(&parent
->stat
, &h
->stat
);
1872 parent
->filtered
&= ~(1 << filter
);
1874 if (parent
->filtered
)
1877 /* force fold unfiltered entry for simplicity */
1878 parent
->unfolded
= false;
1879 parent
->has_no_entry
= false;
1880 parent
->row_offset
= 0;
1881 parent
->nr_rows
= 0;
1883 parent
= parent
->parent_he
;
1890 /* force fold unfiltered entry for simplicity */
1891 h
->unfolded
= false;
1892 h
->has_no_entry
= false;
1896 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1898 hists__inc_filter_stats(hists
, h
);
1899 hists__calc_col_len(hists
, h
);
1903 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1904 struct hist_entry
*he
)
1906 if (hists
->dso_filter
!= NULL
&&
1907 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1908 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1915 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1916 struct hist_entry
*he
)
1918 if (hists
->thread_filter
!= NULL
&&
1919 he
->thread
!= hists
->thread_filter
) {
1920 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1927 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1928 struct hist_entry
*he
)
1930 if (hists
->symbol_filter_str
!= NULL
&&
1931 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1932 hists
->symbol_filter_str
) == NULL
)) {
1933 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1940 static bool hists__filter_entry_by_socket(struct hists
*hists
,
1941 struct hist_entry
*he
)
1943 if ((hists
->socket_filter
> -1) &&
1944 (he
->socket
!= hists
->socket_filter
)) {
1945 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
1952 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
1954 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
1958 hists
->stats
.nr_non_filtered_samples
= 0;
1960 hists__reset_filter_stats(hists
);
1961 hists__reset_col_len(hists
);
1963 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1964 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1966 if (filter(hists
, h
))
1969 hists__remove_entry_filter(hists
, h
, type
);
1973 static void resort_filtered_entry(struct rb_root
*root
, struct hist_entry
*he
)
1975 struct rb_node
**p
= &root
->rb_node
;
1976 struct rb_node
*parent
= NULL
;
1977 struct hist_entry
*iter
;
1978 struct rb_root new_root
= RB_ROOT
;
1981 while (*p
!= NULL
) {
1983 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1985 if (hist_entry__sort(he
, iter
) > 0)
1988 p
= &(*p
)->rb_right
;
1991 rb_link_node(&he
->rb_node
, parent
, p
);
1992 rb_insert_color(&he
->rb_node
, root
);
1994 if (he
->leaf
|| he
->filtered
)
1997 nd
= rb_first(&he
->hroot_out
);
1999 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2002 rb_erase(&h
->rb_node
, &he
->hroot_out
);
2004 resort_filtered_entry(&new_root
, h
);
2007 he
->hroot_out
= new_root
;
2010 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
2013 struct rb_root new_root
= RB_ROOT
;
2015 hists
->stats
.nr_non_filtered_samples
= 0;
2017 hists__reset_filter_stats(hists
);
2018 hists__reset_col_len(hists
);
2020 nd
= rb_first(&hists
->entries
);
2022 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2025 ret
= hist_entry__filter(h
, type
, arg
);
2028 * case 1. non-matching type
2029 * zero out the period, set filter marker and move to child
2032 memset(&h
->stat
, 0, sizeof(h
->stat
));
2033 h
->filtered
|= (1 << type
);
2035 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2038 * case 2. matched type (filter out)
2039 * set filter marker and move to next
2041 else if (ret
== 1) {
2042 h
->filtered
|= (1 << type
);
2044 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2047 * case 3. ok (not filtered)
2048 * add period to hists and parents, erase the filter marker
2049 * and move to next sibling
2052 hists__remove_entry_filter(hists
, h
, type
);
2054 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2058 hierarchy_recalc_total_periods(hists
);
2061 * resort output after applying a new filter since filter in a lower
2062 * hierarchy can change periods in a upper hierarchy.
2064 nd
= rb_first(&hists
->entries
);
2066 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2069 rb_erase(&h
->rb_node
, &hists
->entries
);
2071 resort_filtered_entry(&new_root
, h
);
2074 hists
->entries
= new_root
;
2077 void hists__filter_by_thread(struct hists
*hists
)
2079 if (symbol_conf
.report_hierarchy
)
2080 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2081 hists
->thread_filter
);
2083 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2084 hists__filter_entry_by_thread
);
2087 void hists__filter_by_dso(struct hists
*hists
)
2089 if (symbol_conf
.report_hierarchy
)
2090 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2093 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2094 hists__filter_entry_by_dso
);
2097 void hists__filter_by_symbol(struct hists
*hists
)
2099 if (symbol_conf
.report_hierarchy
)
2100 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2101 hists
->symbol_filter_str
);
2103 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2104 hists__filter_entry_by_symbol
);
2107 void hists__filter_by_socket(struct hists
*hists
)
2109 if (symbol_conf
.report_hierarchy
)
2110 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2111 &hists
->socket_filter
);
2113 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2114 hists__filter_entry_by_socket
);
2117 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2119 ++stats
->nr_events
[0];
2120 ++stats
->nr_events
[type
];
2123 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2125 events_stats__inc(&hists
->stats
, type
);
2128 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2130 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2132 hists
->stats
.nr_non_filtered_samples
++;
2135 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2136 struct hist_entry
*pair
)
2138 struct rb_root
*root
;
2140 struct rb_node
*parent
= NULL
;
2141 struct hist_entry
*he
;
2144 if (hists__has(hists
, need_collapse
))
2145 root
= &hists
->entries_collapsed
;
2147 root
= hists
->entries_in
;
2151 while (*p
!= NULL
) {
2153 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2155 cmp
= hist_entry__collapse(he
, pair
);
2163 p
= &(*p
)->rb_right
;
2166 he
= hist_entry__new(pair
, true);
2168 memset(&he
->stat
, 0, sizeof(he
->stat
));
2170 if (symbol_conf
.cumulate_callchain
)
2171 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2172 rb_link_node(&he
->rb_node_in
, parent
, p
);
2173 rb_insert_color(&he
->rb_node_in
, root
);
2174 hists__inc_stats(hists
, he
);
2181 static struct hist_entry
*add_dummy_hierarchy_entry(struct hists
*hists
,
2182 struct rb_root
*root
,
2183 struct hist_entry
*pair
)
2186 struct rb_node
*parent
= NULL
;
2187 struct hist_entry
*he
;
2188 struct perf_hpp_fmt
*fmt
;
2191 while (*p
!= NULL
) {
2195 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2197 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2198 cmp
= fmt
->collapse(fmt
, he
, pair
);
2206 p
= &parent
->rb_left
;
2208 p
= &parent
->rb_right
;
2211 he
= hist_entry__new(pair
, true);
2213 rb_link_node(&he
->rb_node_in
, parent
, p
);
2214 rb_insert_color(&he
->rb_node_in
, root
);
2218 memset(&he
->stat
, 0, sizeof(he
->stat
));
2219 hists__inc_stats(hists
, he
);
2225 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2226 struct hist_entry
*he
)
2230 if (hists__has(hists
, need_collapse
))
2231 n
= hists
->entries_collapsed
.rb_node
;
2233 n
= hists
->entries_in
->rb_node
;
2236 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2237 int64_t cmp
= hist_entry__collapse(iter
, he
);
2250 static struct hist_entry
*hists__find_hierarchy_entry(struct rb_root
*root
,
2251 struct hist_entry
*he
)
2253 struct rb_node
*n
= root
->rb_node
;
2256 struct hist_entry
*iter
;
2257 struct perf_hpp_fmt
*fmt
;
2260 iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2261 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2262 cmp
= fmt
->collapse(fmt
, iter
, he
);
2278 static void hists__match_hierarchy(struct rb_root
*leader_root
,
2279 struct rb_root
*other_root
)
2282 struct hist_entry
*pos
, *pair
;
2284 for (nd
= rb_first(leader_root
); nd
; nd
= rb_next(nd
)) {
2285 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2286 pair
= hists__find_hierarchy_entry(other_root
, pos
);
2289 hist_entry__add_pair(pair
, pos
);
2290 hists__match_hierarchy(&pos
->hroot_in
, &pair
->hroot_in
);
2296 * Look for pairs to link to the leader buckets (hist_entries):
2298 void hists__match(struct hists
*leader
, struct hists
*other
)
2300 struct rb_root
*root
;
2302 struct hist_entry
*pos
, *pair
;
2304 if (symbol_conf
.report_hierarchy
) {
2305 /* hierarchy report always collapses entries */
2306 return hists__match_hierarchy(&leader
->entries_collapsed
,
2307 &other
->entries_collapsed
);
2310 if (hists__has(leader
, need_collapse
))
2311 root
= &leader
->entries_collapsed
;
2313 root
= leader
->entries_in
;
2315 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2316 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2317 pair
= hists__find_entry(other
, pos
);
2320 hist_entry__add_pair(pair
, pos
);
2324 static int hists__link_hierarchy(struct hists
*leader_hists
,
2325 struct hist_entry
*parent
,
2326 struct rb_root
*leader_root
,
2327 struct rb_root
*other_root
)
2330 struct hist_entry
*pos
, *leader
;
2332 for (nd
= rb_first(other_root
); nd
; nd
= rb_next(nd
)) {
2333 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2335 if (hist_entry__has_pairs(pos
)) {
2338 list_for_each_entry(leader
, &pos
->pairs
.head
, pairs
.node
) {
2339 if (leader
->hists
== leader_hists
) {
2347 leader
= add_dummy_hierarchy_entry(leader_hists
,
2352 /* do not point parent in the pos */
2353 leader
->parent_he
= parent
;
2355 hist_entry__add_pair(pos
, leader
);
2359 if (hists__link_hierarchy(leader_hists
, leader
,
2361 &pos
->hroot_in
) < 0)
2369 * Look for entries in the other hists that are not present in the leader, if
2370 * we find them, just add a dummy entry on the leader hists, with period=0,
2371 * nr_events=0, to serve as the list header.
2373 int hists__link(struct hists
*leader
, struct hists
*other
)
2375 struct rb_root
*root
;
2377 struct hist_entry
*pos
, *pair
;
2379 if (symbol_conf
.report_hierarchy
) {
2380 /* hierarchy report always collapses entries */
2381 return hists__link_hierarchy(leader
, NULL
,
2382 &leader
->entries_collapsed
,
2383 &other
->entries_collapsed
);
2386 if (hists__has(other
, need_collapse
))
2387 root
= &other
->entries_collapsed
;
2389 root
= other
->entries_in
;
2391 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2392 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2394 if (!hist_entry__has_pairs(pos
)) {
2395 pair
= hists__add_dummy_entry(leader
, pos
);
2398 hist_entry__add_pair(pos
, pair
);
2405 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2406 struct perf_sample
*sample
, bool nonany_branch_mode
)
2408 struct branch_info
*bi
;
2410 /* If we have branch cycles always annotate them. */
2411 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2414 bi
= sample__resolve_bstack(sample
, al
);
2416 struct addr_map_symbol
*prev
= NULL
;
2419 * Ignore errors, still want to process the
2422 * For non standard branch modes always
2423 * force no IPC (prev == NULL)
2425 * Note that perf stores branches reversed from
2428 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2429 addr_map_symbol__account_cycles(&bi
[i
].from
,
2430 nonany_branch_mode
? NULL
: prev
,
2431 bi
[i
].flags
.cycles
);
2439 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
2441 struct perf_evsel
*pos
;
2444 evlist__for_each_entry(evlist
, pos
) {
2445 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2446 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2453 u64
hists__total_period(struct hists
*hists
)
2455 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2456 hists
->stats
.total_period
;
2459 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2460 const char *arg
, int unset __maybe_unused
)
2462 if (!strcmp(arg
, "relative"))
2463 symbol_conf
.filter_relative
= true;
2464 else if (!strcmp(arg
, "absolute"))
2465 symbol_conf
.filter_relative
= false;
2467 pr_debug("Invalid percentage: %s\n", arg
);
2474 int perf_hist_config(const char *var
, const char *value
)
2476 if (!strcmp(var
, "hist.percentage"))
2477 return parse_filter_percentage(NULL
, value
, 0);
2482 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2484 memset(hists
, 0, sizeof(*hists
));
2485 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
2486 hists
->entries_in
= &hists
->entries_in_array
[0];
2487 hists
->entries_collapsed
= RB_ROOT
;
2488 hists
->entries
= RB_ROOT
;
2489 pthread_mutex_init(&hists
->lock
, NULL
);
2490 hists
->socket_filter
= -1;
2491 hists
->hpp_list
= hpp_list
;
2492 INIT_LIST_HEAD(&hists
->hpp_formats
);
2496 static void hists__delete_remaining_entries(struct rb_root
*root
)
2498 struct rb_node
*node
;
2499 struct hist_entry
*he
;
2501 while (!RB_EMPTY_ROOT(root
)) {
2502 node
= rb_first(root
);
2503 rb_erase(node
, root
);
2505 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2506 hist_entry__delete(he
);
2510 static void hists__delete_all_entries(struct hists
*hists
)
2512 hists__delete_entries(hists
);
2513 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2514 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2515 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2518 static void hists_evsel__exit(struct perf_evsel
*evsel
)
2520 struct hists
*hists
= evsel__hists(evsel
);
2521 struct perf_hpp_fmt
*fmt
, *pos
;
2522 struct perf_hpp_list_node
*node
, *tmp
;
2524 hists__delete_all_entries(hists
);
2526 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2527 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2528 list_del(&fmt
->list
);
2531 list_del(&node
->list
);
2536 static int hists_evsel__init(struct perf_evsel
*evsel
)
2538 struct hists
*hists
= evsel__hists(evsel
);
2540 __hists__init(hists
, &perf_hpp_list
);
2545 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2546 * stored in the rbtree...
2549 int hists__init(void)
2551 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2555 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2560 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2562 INIT_LIST_HEAD(&list
->fields
);
2563 INIT_LIST_HEAD(&list
->sorts
);