9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_thread(struct hists
*hists
,
15 struct hist_entry
*he
);
16 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
17 struct hist_entry
*he
);
18 static bool hists__filter_entry_by_socket(struct hists
*hists
,
19 struct hist_entry
*he
);
21 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
23 return hists
->col_len
[col
];
26 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
28 hists
->col_len
[col
] = len
;
31 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
33 if (len
> hists__col_len(hists
, col
)) {
34 hists__set_col_len(hists
, col
, len
);
40 void hists__reset_col_len(struct hists
*hists
)
44 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
45 hists__set_col_len(hists
, col
, 0);
48 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
50 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
52 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
53 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
54 !symbol_conf
.dso_list
)
55 hists__set_col_len(hists
, dso
, unresolved_col_width
);
58 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
60 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
70 symlen
= h
->ms
.sym
->namelen
+ 4;
72 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
73 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
75 symlen
= unresolved_col_width
+ 4 + 2;
76 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
77 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
80 len
= thread__comm_len(h
->thread
);
81 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
82 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
85 len
= dso__name_len(h
->ms
.map
->dso
);
86 hists__new_col_len(hists
, HISTC_DSO
, len
);
90 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
93 if (h
->branch_info
->from
.sym
) {
94 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
96 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
97 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
99 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
100 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
102 symlen
= unresolved_col_width
+ 4 + 2;
103 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
104 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
107 if (h
->branch_info
->to
.sym
) {
108 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
110 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
111 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
113 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
114 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
116 symlen
= unresolved_col_width
+ 4 + 2;
117 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
118 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
121 if (h
->branch_info
->srcline_from
)
122 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
123 strlen(h
->branch_info
->srcline_from
));
124 if (h
->branch_info
->srcline_to
)
125 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
126 strlen(h
->branch_info
->srcline_to
));
130 if (h
->mem_info
->daddr
.sym
) {
131 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
132 + unresolved_col_width
+ 2;
133 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
135 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
138 symlen
= unresolved_col_width
+ 4 + 2;
139 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
141 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
145 if (h
->mem_info
->iaddr
.sym
) {
146 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
147 + unresolved_col_width
+ 2;
148 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
151 symlen
= unresolved_col_width
+ 4 + 2;
152 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
156 if (h
->mem_info
->daddr
.map
) {
157 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
158 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
161 symlen
= unresolved_col_width
+ 4 + 2;
162 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
165 symlen
= unresolved_col_width
+ 4 + 2;
166 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
167 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
168 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
171 hists__new_col_len(hists
, HISTC_CPU
, 3);
172 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
173 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
174 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
175 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
176 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
177 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
178 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
181 hists__new_col_len(hists
, HISTC_SRCLINE
, strlen(h
->srcline
));
184 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
187 hists__new_col_len(hists
, HISTC_TRANSACTION
,
188 hist_entry__transaction_len());
191 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
194 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
196 struct rb_node
*next
= rb_first(&hists
->entries
);
197 struct hist_entry
*n
;
200 hists__reset_col_len(hists
);
202 while (next
&& row
++ < max_rows
) {
203 n
= rb_entry(next
, struct hist_entry
, rb_node
);
205 hists__calc_col_len(hists
, n
);
206 next
= rb_next(&n
->rb_node
);
210 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
211 unsigned int cpumode
, u64 period
)
214 case PERF_RECORD_MISC_KERNEL
:
215 he_stat
->period_sys
+= period
;
217 case PERF_RECORD_MISC_USER
:
218 he_stat
->period_us
+= period
;
220 case PERF_RECORD_MISC_GUEST_KERNEL
:
221 he_stat
->period_guest_sys
+= period
;
223 case PERF_RECORD_MISC_GUEST_USER
:
224 he_stat
->period_guest_us
+= period
;
231 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
235 he_stat
->period
+= period
;
236 he_stat
->weight
+= weight
;
237 he_stat
->nr_events
+= 1;
240 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
242 dest
->period
+= src
->period
;
243 dest
->period_sys
+= src
->period_sys
;
244 dest
->period_us
+= src
->period_us
;
245 dest
->period_guest_sys
+= src
->period_guest_sys
;
246 dest
->period_guest_us
+= src
->period_guest_us
;
247 dest
->nr_events
+= src
->nr_events
;
248 dest
->weight
+= src
->weight
;
251 static void he_stat__decay(struct he_stat
*he_stat
)
253 he_stat
->period
= (he_stat
->period
* 7) / 8;
254 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
255 /* XXX need decay for weight too? */
258 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
260 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
262 u64 prev_period
= he
->stat
.period
;
265 if (prev_period
== 0)
268 he_stat__decay(&he
->stat
);
269 if (symbol_conf
.cumulate_callchain
)
270 he_stat__decay(he
->stat_acc
);
271 decay_callchain(he
->callchain
);
273 diff
= prev_period
- he
->stat
.period
;
276 hists
->stats
.total_period
-= diff
;
278 hists
->stats
.total_non_filtered_period
-= diff
;
282 struct hist_entry
*child
;
283 struct rb_node
*node
= rb_first(&he
->hroot_out
);
285 child
= rb_entry(node
, struct hist_entry
, rb_node
);
286 node
= rb_next(node
);
288 if (hists__decay_entry(hists
, child
))
289 hists__delete_entry(hists
, child
);
293 return he
->stat
.period
== 0;
296 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
298 struct rb_root
*root_in
;
299 struct rb_root
*root_out
;
302 root_in
= &he
->parent_he
->hroot_in
;
303 root_out
= &he
->parent_he
->hroot_out
;
305 if (hists__has(hists
, need_collapse
))
306 root_in
= &hists
->entries_collapsed
;
308 root_in
= hists
->entries_in
;
309 root_out
= &hists
->entries
;
312 rb_erase(&he
->rb_node_in
, root_in
);
313 rb_erase(&he
->rb_node
, root_out
);
317 --hists
->nr_non_filtered_entries
;
319 hist_entry__delete(he
);
322 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
324 struct rb_node
*next
= rb_first(&hists
->entries
);
325 struct hist_entry
*n
;
328 n
= rb_entry(next
, struct hist_entry
, rb_node
);
329 next
= rb_next(&n
->rb_node
);
330 if (((zap_user
&& n
->level
== '.') ||
331 (zap_kernel
&& n
->level
!= '.') ||
332 hists__decay_entry(hists
, n
))) {
333 hists__delete_entry(hists
, n
);
338 void hists__delete_entries(struct hists
*hists
)
340 struct rb_node
*next
= rb_first(&hists
->entries
);
341 struct hist_entry
*n
;
344 n
= rb_entry(next
, struct hist_entry
, rb_node
);
345 next
= rb_next(&n
->rb_node
);
347 hists__delete_entry(hists
, n
);
352 * histogram, sorted on item, collects periods
355 static int hist_entry__init(struct hist_entry
*he
,
356 struct hist_entry
*template,
361 if (symbol_conf
.cumulate_callchain
) {
362 he
->stat_acc
= malloc(sizeof(he
->stat
));
363 if (he
->stat_acc
== NULL
)
365 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
367 memset(&he
->stat
, 0, sizeof(he
->stat
));
370 map__get(he
->ms
.map
);
372 if (he
->branch_info
) {
374 * This branch info is (a part of) allocated from
375 * sample__resolve_bstack() and will be freed after
376 * adding new entries. So we need to save a copy.
378 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
379 if (he
->branch_info
== NULL
) {
380 map__zput(he
->ms
.map
);
385 memcpy(he
->branch_info
, template->branch_info
,
386 sizeof(*he
->branch_info
));
388 map__get(he
->branch_info
->from
.map
);
389 map__get(he
->branch_info
->to
.map
);
393 map__get(he
->mem_info
->iaddr
.map
);
394 map__get(he
->mem_info
->daddr
.map
);
397 if (symbol_conf
.use_callchain
)
398 callchain_init(he
->callchain
);
401 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
403 if (he
->raw_data
== NULL
) {
404 map__put(he
->ms
.map
);
405 if (he
->branch_info
) {
406 map__put(he
->branch_info
->from
.map
);
407 map__put(he
->branch_info
->to
.map
);
408 free(he
->branch_info
);
411 map__put(he
->mem_info
->iaddr
.map
);
412 map__put(he
->mem_info
->daddr
.map
);
418 INIT_LIST_HEAD(&he
->pairs
.node
);
419 thread__get(he
->thread
);
421 if (!symbol_conf
.report_hierarchy
)
427 static void *hist_entry__zalloc(size_t size
)
429 return zalloc(size
+ sizeof(struct hist_entry
));
432 static void hist_entry__free(void *ptr
)
437 static struct hist_entry_ops default_ops
= {
438 .new = hist_entry__zalloc
,
439 .free
= hist_entry__free
,
442 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
445 struct hist_entry_ops
*ops
= template->ops
;
446 size_t callchain_size
= 0;
447 struct hist_entry
*he
;
451 ops
= template->ops
= &default_ops
;
453 if (symbol_conf
.use_callchain
)
454 callchain_size
= sizeof(struct callchain_root
);
456 he
= ops
->new(callchain_size
);
458 err
= hist_entry__init(he
, template, sample_self
);
468 static u8
symbol__parent_filter(const struct symbol
*parent
)
470 if (symbol_conf
.exclude_other
&& parent
== NULL
)
471 return 1 << HIST_FILTER__PARENT
;
475 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
477 if (!symbol_conf
.use_callchain
)
480 he
->hists
->callchain_period
+= period
;
482 he
->hists
->callchain_non_filtered_period
+= period
;
485 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
486 struct hist_entry
*entry
,
487 struct addr_location
*al
,
491 struct rb_node
*parent
= NULL
;
492 struct hist_entry
*he
;
494 u64 period
= entry
->stat
.period
;
495 u64 weight
= entry
->stat
.weight
;
497 p
= &hists
->entries_in
->rb_node
;
501 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
504 * Make sure that it receives arguments in a same order as
505 * hist_entry__collapse() so that we can use an appropriate
506 * function when searching an entry regardless which sort
509 cmp
= hist_entry__cmp(he
, entry
);
513 he_stat__add_period(&he
->stat
, period
, weight
);
514 hist_entry__add_callchain_period(he
, period
);
516 if (symbol_conf
.cumulate_callchain
)
517 he_stat__add_period(he
->stat_acc
, period
, weight
);
520 * This mem info was allocated from sample__resolve_mem
521 * and will not be used anymore.
523 zfree(&entry
->mem_info
);
525 /* If the map of an existing hist_entry has
526 * become out-of-date due to an exec() or
527 * similar, update it. Otherwise we will
528 * mis-adjust symbol addresses when computing
529 * the history counter to increment.
531 if (he
->ms
.map
!= entry
->ms
.map
) {
532 map__put(he
->ms
.map
);
533 he
->ms
.map
= map__get(entry
->ms
.map
);
544 he
= hist_entry__new(entry
, sample_self
);
549 hist_entry__add_callchain_period(he
, period
);
552 rb_link_node(&he
->rb_node_in
, parent
, p
);
553 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
556 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
557 if (symbol_conf
.cumulate_callchain
)
558 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
562 static struct hist_entry
*
563 __hists__add_entry(struct hists
*hists
,
564 struct addr_location
*al
,
565 struct symbol
*sym_parent
,
566 struct branch_info
*bi
,
568 struct perf_sample
*sample
,
570 struct hist_entry_ops
*ops
)
572 struct hist_entry entry
= {
573 .thread
= al
->thread
,
574 .comm
= thread__comm(al
->thread
),
579 .socket
= al
->socket
,
581 .cpumode
= al
->cpumode
,
586 .period
= sample
->period
,
587 .weight
= sample
->weight
,
589 .parent
= sym_parent
,
590 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
594 .transaction
= sample
->transaction
,
595 .raw_data
= sample
->raw_data
,
596 .raw_size
= sample
->raw_size
,
600 return hists__findnew_entry(hists
, &entry
, al
, sample_self
);
603 struct hist_entry
*hists__add_entry(struct hists
*hists
,
604 struct addr_location
*al
,
605 struct symbol
*sym_parent
,
606 struct branch_info
*bi
,
608 struct perf_sample
*sample
,
611 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
612 sample
, sample_self
, NULL
);
615 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
616 struct hist_entry_ops
*ops
,
617 struct addr_location
*al
,
618 struct symbol
*sym_parent
,
619 struct branch_info
*bi
,
621 struct perf_sample
*sample
,
624 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
625 sample
, sample_self
, ops
);
629 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
630 struct addr_location
*al __maybe_unused
)
636 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
637 struct addr_location
*al __maybe_unused
)
643 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
645 struct perf_sample
*sample
= iter
->sample
;
648 mi
= sample__resolve_mem(sample
, al
);
657 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
660 struct mem_info
*mi
= iter
->priv
;
661 struct hists
*hists
= evsel__hists(iter
->evsel
);
662 struct perf_sample
*sample
= iter
->sample
;
663 struct hist_entry
*he
;
668 cost
= sample
->weight
;
673 * must pass period=weight in order to get the correct
674 * sorting from hists__collapse_resort() which is solely
675 * based on periods. We want sorting be done on nr_events * weight
676 * and this is indirectly achieved by passing period=weight here
677 * and the he_stat__add_period() function.
679 sample
->period
= cost
;
681 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
691 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
692 struct addr_location
*al __maybe_unused
)
694 struct perf_evsel
*evsel
= iter
->evsel
;
695 struct hists
*hists
= evsel__hists(evsel
);
696 struct hist_entry
*he
= iter
->he
;
702 hists__inc_nr_samples(hists
, he
->filtered
);
704 err
= hist_entry__append_callchain(he
, iter
->sample
);
708 * We don't need to free iter->priv (mem_info) here since the mem info
709 * was either already freed in hists__findnew_entry() or passed to a
710 * new hist entry by hist_entry__new().
719 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
721 struct branch_info
*bi
;
722 struct perf_sample
*sample
= iter
->sample
;
724 bi
= sample__resolve_bstack(sample
, al
);
729 iter
->total
= sample
->branch_stack
->nr
;
736 iter_add_single_branch_entry(struct hist_entry_iter
*iter
,
737 struct addr_location
*al __maybe_unused
)
739 /* to avoid calling callback function */
746 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
748 struct branch_info
*bi
= iter
->priv
;
754 if (iter
->curr
>= iter
->total
)
757 al
->map
= bi
[i
].to
.map
;
758 al
->sym
= bi
[i
].to
.sym
;
759 al
->addr
= bi
[i
].to
.addr
;
764 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
766 struct branch_info
*bi
;
767 struct perf_evsel
*evsel
= iter
->evsel
;
768 struct hists
*hists
= evsel__hists(evsel
);
769 struct perf_sample
*sample
= iter
->sample
;
770 struct hist_entry
*he
= NULL
;
776 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
780 * The report shows the percentage of total branches captured
781 * and not events sampled. Thus we use a pseudo period of 1.
784 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
786 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
791 hists__inc_nr_samples(hists
, he
->filtered
);
800 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
801 struct addr_location
*al __maybe_unused
)
806 return iter
->curr
>= iter
->total
? 0 : -1;
810 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
811 struct addr_location
*al __maybe_unused
)
817 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
819 struct perf_evsel
*evsel
= iter
->evsel
;
820 struct perf_sample
*sample
= iter
->sample
;
821 struct hist_entry
*he
;
823 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
833 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
834 struct addr_location
*al __maybe_unused
)
836 struct hist_entry
*he
= iter
->he
;
837 struct perf_evsel
*evsel
= iter
->evsel
;
838 struct perf_sample
*sample
= iter
->sample
;
845 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
847 return hist_entry__append_callchain(he
, sample
);
851 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
852 struct addr_location
*al __maybe_unused
)
854 struct hist_entry
**he_cache
;
856 callchain_cursor_commit(&callchain_cursor
);
859 * This is for detecting cycles or recursions so that they're
860 * cumulated only one time to prevent entries more than 100%
863 he_cache
= malloc(sizeof(*he_cache
) * (iter
->max_stack
+ 1));
864 if (he_cache
== NULL
)
867 iter
->priv
= he_cache
;
874 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
875 struct addr_location
*al
)
877 struct perf_evsel
*evsel
= iter
->evsel
;
878 struct hists
*hists
= evsel__hists(evsel
);
879 struct perf_sample
*sample
= iter
->sample
;
880 struct hist_entry
**he_cache
= iter
->priv
;
881 struct hist_entry
*he
;
884 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
890 he_cache
[iter
->curr
++] = he
;
892 hist_entry__append_callchain(he
, sample
);
895 * We need to re-initialize the cursor since callchain_append()
896 * advanced the cursor to the end.
898 callchain_cursor_commit(&callchain_cursor
);
900 hists__inc_nr_samples(hists
, he
->filtered
);
906 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
907 struct addr_location
*al
)
909 struct callchain_cursor_node
*node
;
911 node
= callchain_cursor_current(&callchain_cursor
);
915 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
919 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
920 struct addr_location
*al
)
922 struct perf_evsel
*evsel
= iter
->evsel
;
923 struct perf_sample
*sample
= iter
->sample
;
924 struct hist_entry
**he_cache
= iter
->priv
;
925 struct hist_entry
*he
;
926 struct hist_entry he_tmp
= {
927 .hists
= evsel__hists(evsel
),
929 .thread
= al
->thread
,
930 .comm
= thread__comm(al
->thread
),
936 .parent
= iter
->parent
,
937 .raw_data
= sample
->raw_data
,
938 .raw_size
= sample
->raw_size
,
941 struct callchain_cursor cursor
;
943 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
945 callchain_cursor_advance(&callchain_cursor
);
948 * Check if there's duplicate entries in the callchain.
949 * It's possible that it has cycles or recursive calls.
951 for (i
= 0; i
< iter
->curr
; i
++) {
952 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
953 /* to avoid calling callback function */
959 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
965 he_cache
[iter
->curr
++] = he
;
967 if (symbol_conf
.use_callchain
)
968 callchain_append(he
->callchain
, &cursor
, sample
->period
);
973 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
974 struct addr_location
*al __maybe_unused
)
982 const struct hist_iter_ops hist_iter_mem
= {
983 .prepare_entry
= iter_prepare_mem_entry
,
984 .add_single_entry
= iter_add_single_mem_entry
,
985 .next_entry
= iter_next_nop_entry
,
986 .add_next_entry
= iter_add_next_nop_entry
,
987 .finish_entry
= iter_finish_mem_entry
,
990 const struct hist_iter_ops hist_iter_branch
= {
991 .prepare_entry
= iter_prepare_branch_entry
,
992 .add_single_entry
= iter_add_single_branch_entry
,
993 .next_entry
= iter_next_branch_entry
,
994 .add_next_entry
= iter_add_next_branch_entry
,
995 .finish_entry
= iter_finish_branch_entry
,
998 const struct hist_iter_ops hist_iter_normal
= {
999 .prepare_entry
= iter_prepare_normal_entry
,
1000 .add_single_entry
= iter_add_single_normal_entry
,
1001 .next_entry
= iter_next_nop_entry
,
1002 .add_next_entry
= iter_add_next_nop_entry
,
1003 .finish_entry
= iter_finish_normal_entry
,
1006 const struct hist_iter_ops hist_iter_cumulative
= {
1007 .prepare_entry
= iter_prepare_cumulative_entry
,
1008 .add_single_entry
= iter_add_single_cumulative_entry
,
1009 .next_entry
= iter_next_cumulative_entry
,
1010 .add_next_entry
= iter_add_next_cumulative_entry
,
1011 .finish_entry
= iter_finish_cumulative_entry
,
1014 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1015 int max_stack_depth
, void *arg
)
1019 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1020 iter
->evsel
, al
, max_stack_depth
);
1024 iter
->max_stack
= max_stack_depth
;
1026 err
= iter
->ops
->prepare_entry(iter
, al
);
1030 err
= iter
->ops
->add_single_entry(iter
, al
);
1034 if (iter
->he
&& iter
->add_entry_cb
) {
1035 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1040 while (iter
->ops
->next_entry(iter
, al
)) {
1041 err
= iter
->ops
->add_next_entry(iter
, al
);
1045 if (iter
->he
&& iter
->add_entry_cb
) {
1046 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1053 err2
= iter
->ops
->finish_entry(iter
, al
);
1061 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1063 struct hists
*hists
= left
->hists
;
1064 struct perf_hpp_fmt
*fmt
;
1067 hists__for_each_sort_list(hists
, fmt
) {
1068 if (perf_hpp__is_dynamic_entry(fmt
) &&
1069 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1072 cmp
= fmt
->cmp(fmt
, left
, right
);
1081 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1083 struct hists
*hists
= left
->hists
;
1084 struct perf_hpp_fmt
*fmt
;
1087 hists__for_each_sort_list(hists
, fmt
) {
1088 if (perf_hpp__is_dynamic_entry(fmt
) &&
1089 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1092 cmp
= fmt
->collapse(fmt
, left
, right
);
1100 void hist_entry__delete(struct hist_entry
*he
)
1102 struct hist_entry_ops
*ops
= he
->ops
;
1104 thread__zput(he
->thread
);
1105 map__zput(he
->ms
.map
);
1107 if (he
->branch_info
) {
1108 map__zput(he
->branch_info
->from
.map
);
1109 map__zput(he
->branch_info
->to
.map
);
1110 free_srcline(he
->branch_info
->srcline_from
);
1111 free_srcline(he
->branch_info
->srcline_to
);
1112 zfree(&he
->branch_info
);
1116 map__zput(he
->mem_info
->iaddr
.map
);
1117 map__zput(he
->mem_info
->daddr
.map
);
1118 zfree(&he
->mem_info
);
1121 zfree(&he
->stat_acc
);
1122 free_srcline(he
->srcline
);
1123 if (he
->srcfile
&& he
->srcfile
[0])
1125 free_callchain(he
->callchain
);
1126 free(he
->trace_output
);
1132 * If this is not the last column, then we need to pad it according to the
1133 * pre-calculated max lenght for this column, otherwise don't bother adding
1134 * spaces because that would break viewing this with, for instance, 'less',
1135 * that would show tons of trailing spaces when a long C++ demangled method
1138 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1139 struct perf_hpp_fmt
*fmt
, int printed
)
1141 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1142 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1143 if (printed
< width
) {
1144 advance_hpp(hpp
, printed
);
1145 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1153 * collapse the histogram
1156 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1157 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1158 enum hist_filter type
);
1160 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1162 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1164 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1167 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1168 enum hist_filter type
,
1171 struct perf_hpp_fmt
*fmt
;
1172 bool type_match
= false;
1173 struct hist_entry
*parent
= he
->parent_he
;
1176 case HIST_FILTER__THREAD
:
1177 if (symbol_conf
.comm_list
== NULL
&&
1178 symbol_conf
.pid_list
== NULL
&&
1179 symbol_conf
.tid_list
== NULL
)
1182 case HIST_FILTER__DSO
:
1183 if (symbol_conf
.dso_list
== NULL
)
1186 case HIST_FILTER__SYMBOL
:
1187 if (symbol_conf
.sym_list
== NULL
)
1190 case HIST_FILTER__PARENT
:
1191 case HIST_FILTER__GUEST
:
1192 case HIST_FILTER__HOST
:
1193 case HIST_FILTER__SOCKET
:
1198 /* if it's filtered by own fmt, it has to have filter bits */
1199 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1208 * If the filter is for current level entry, propagate
1209 * filter marker to parents. The marker bit was
1210 * already set by default so it only needs to clear
1211 * non-filtered entries.
1213 if (!(he
->filtered
& (1 << type
))) {
1215 parent
->filtered
&= ~(1 << type
);
1216 parent
= parent
->parent_he
;
1221 * If current entry doesn't have matching formats, set
1222 * filter marker for upper level entries. it will be
1223 * cleared if its lower level entries is not filtered.
1225 * For lower-level entries, it inherits parent's
1226 * filter bit so that lower level entries of a
1227 * non-filtered entry won't set the filter marker.
1230 he
->filtered
|= (1 << type
);
1232 he
->filtered
|= (parent
->filtered
& (1 << type
));
1236 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1238 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1239 check_thread_entry
);
1241 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1242 perf_hpp__is_dso_entry
);
1244 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1245 perf_hpp__is_sym_entry
);
1247 hists__apply_filters(he
->hists
, he
);
1250 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1251 struct rb_root
*root
,
1252 struct hist_entry
*he
,
1253 struct hist_entry
*parent_he
,
1254 struct perf_hpp_list
*hpp_list
)
1256 struct rb_node
**p
= &root
->rb_node
;
1257 struct rb_node
*parent
= NULL
;
1258 struct hist_entry
*iter
, *new;
1259 struct perf_hpp_fmt
*fmt
;
1262 while (*p
!= NULL
) {
1264 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1267 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1268 cmp
= fmt
->collapse(fmt
, iter
, he
);
1274 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1279 p
= &parent
->rb_left
;
1281 p
= &parent
->rb_right
;
1284 new = hist_entry__new(he
, true);
1288 hists
->nr_entries
++;
1290 /* save related format list for output */
1291 new->hpp_list
= hpp_list
;
1292 new->parent_he
= parent_he
;
1294 hist_entry__apply_hierarchy_filters(new);
1296 /* some fields are now passed to 'new' */
1297 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1298 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1299 he
->trace_output
= NULL
;
1301 new->trace_output
= NULL
;
1303 if (perf_hpp__is_srcline_entry(fmt
))
1306 new->srcline
= NULL
;
1308 if (perf_hpp__is_srcfile_entry(fmt
))
1311 new->srcfile
= NULL
;
1314 rb_link_node(&new->rb_node_in
, parent
, p
);
1315 rb_insert_color(&new->rb_node_in
, root
);
1319 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1320 struct rb_root
*root
,
1321 struct hist_entry
*he
)
1323 struct perf_hpp_list_node
*node
;
1324 struct hist_entry
*new_he
= NULL
;
1325 struct hist_entry
*parent
= NULL
;
1329 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1330 /* skip period (overhead) and elided columns */
1331 if (node
->level
== 0 || node
->skip
)
1334 /* insert copy of 'he' for each fmt into the hierarchy */
1335 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1336 if (new_he
== NULL
) {
1341 root
= &new_he
->hroot_in
;
1342 new_he
->depth
= depth
++;
1347 new_he
->leaf
= true;
1349 if (symbol_conf
.use_callchain
) {
1350 callchain_cursor_reset(&callchain_cursor
);
1351 if (callchain_merge(&callchain_cursor
,
1358 /* 'he' is no longer used */
1359 hist_entry__delete(he
);
1361 /* return 0 (or -1) since it already applied filters */
1365 static int hists__collapse_insert_entry(struct hists
*hists
,
1366 struct rb_root
*root
,
1367 struct hist_entry
*he
)
1369 struct rb_node
**p
= &root
->rb_node
;
1370 struct rb_node
*parent
= NULL
;
1371 struct hist_entry
*iter
;
1374 if (symbol_conf
.report_hierarchy
)
1375 return hists__hierarchy_insert_entry(hists
, root
, he
);
1377 while (*p
!= NULL
) {
1379 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1381 cmp
= hist_entry__collapse(iter
, he
);
1386 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1387 if (symbol_conf
.cumulate_callchain
)
1388 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1390 if (symbol_conf
.use_callchain
) {
1391 callchain_cursor_reset(&callchain_cursor
);
1392 if (callchain_merge(&callchain_cursor
,
1397 hist_entry__delete(he
);
1404 p
= &(*p
)->rb_right
;
1406 hists
->nr_entries
++;
1408 rb_link_node(&he
->rb_node_in
, parent
, p
);
1409 rb_insert_color(&he
->rb_node_in
, root
);
1413 struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1415 struct rb_root
*root
;
1417 pthread_mutex_lock(&hists
->lock
);
1419 root
= hists
->entries_in
;
1420 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1421 hists
->entries_in
= &hists
->entries_in_array
[0];
1423 pthread_mutex_unlock(&hists
->lock
);
1428 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1430 hists__filter_entry_by_dso(hists
, he
);
1431 hists__filter_entry_by_thread(hists
, he
);
1432 hists__filter_entry_by_symbol(hists
, he
);
1433 hists__filter_entry_by_socket(hists
, he
);
1436 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1438 struct rb_root
*root
;
1439 struct rb_node
*next
;
1440 struct hist_entry
*n
;
1443 if (!hists__has(hists
, need_collapse
))
1446 hists
->nr_entries
= 0;
1448 root
= hists__get_rotate_entries_in(hists
);
1450 next
= rb_first(root
);
1455 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1456 next
= rb_next(&n
->rb_node_in
);
1458 rb_erase(&n
->rb_node_in
, root
);
1459 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1465 * If it wasn't combined with one of the entries already
1466 * collapsed, we need to apply the filters that may have
1467 * been set by, say, the hist_browser.
1469 hists__apply_filters(hists
, n
);
1472 ui_progress__update(prog
, 1);
1477 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1479 struct hists
*hists
= a
->hists
;
1480 struct perf_hpp_fmt
*fmt
;
1483 hists__for_each_sort_list(hists
, fmt
) {
1484 if (perf_hpp__should_skip(fmt
, a
->hists
))
1487 cmp
= fmt
->sort(fmt
, a
, b
);
1495 static void hists__reset_filter_stats(struct hists
*hists
)
1497 hists
->nr_non_filtered_entries
= 0;
1498 hists
->stats
.total_non_filtered_period
= 0;
1501 void hists__reset_stats(struct hists
*hists
)
1503 hists
->nr_entries
= 0;
1504 hists
->stats
.total_period
= 0;
1506 hists__reset_filter_stats(hists
);
1509 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1511 hists
->nr_non_filtered_entries
++;
1512 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1515 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1518 hists__inc_filter_stats(hists
, h
);
1520 hists
->nr_entries
++;
1521 hists
->stats
.total_period
+= h
->stat
.period
;
1524 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1526 struct rb_node
*node
;
1527 struct hist_entry
*he
;
1529 node
= rb_first(&hists
->entries
);
1531 hists
->stats
.total_period
= 0;
1532 hists
->stats
.total_non_filtered_period
= 0;
1535 * recalculate total period using top-level entries only
1536 * since lower level entries only see non-filtered entries
1537 * but upper level entries have sum of both entries.
1540 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1541 node
= rb_next(node
);
1543 hists
->stats
.total_period
+= he
->stat
.period
;
1545 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1549 static void hierarchy_insert_output_entry(struct rb_root
*root
,
1550 struct hist_entry
*he
)
1552 struct rb_node
**p
= &root
->rb_node
;
1553 struct rb_node
*parent
= NULL
;
1554 struct hist_entry
*iter
;
1555 struct perf_hpp_fmt
*fmt
;
1557 while (*p
!= NULL
) {
1559 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1561 if (hist_entry__sort(he
, iter
) > 0)
1562 p
= &parent
->rb_left
;
1564 p
= &parent
->rb_right
;
1567 rb_link_node(&he
->rb_node
, parent
, p
);
1568 rb_insert_color(&he
->rb_node
, root
);
1570 /* update column width of dynamic entry */
1571 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1572 if (perf_hpp__is_dynamic_entry(fmt
))
1573 fmt
->sort(fmt
, he
, NULL
);
1577 static void hists__hierarchy_output_resort(struct hists
*hists
,
1578 struct ui_progress
*prog
,
1579 struct rb_root
*root_in
,
1580 struct rb_root
*root_out
,
1581 u64 min_callchain_hits
,
1584 struct rb_node
*node
;
1585 struct hist_entry
*he
;
1587 *root_out
= RB_ROOT
;
1588 node
= rb_first(root_in
);
1591 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1592 node
= rb_next(node
);
1594 hierarchy_insert_output_entry(root_out
, he
);
1597 ui_progress__update(prog
, 1);
1600 hists__hierarchy_output_resort(hists
, prog
,
1605 hists
->nr_entries
++;
1606 if (!he
->filtered
) {
1607 hists
->nr_non_filtered_entries
++;
1608 hists__calc_col_len(hists
, he
);
1617 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1618 u64 total
= he
->stat
.period
;
1620 if (symbol_conf
.cumulate_callchain
)
1621 total
= he
->stat_acc
->period
;
1623 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1626 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1627 min_callchain_hits
, &callchain_param
);
1631 static void __hists__insert_output_entry(struct rb_root
*entries
,
1632 struct hist_entry
*he
,
1633 u64 min_callchain_hits
,
1636 struct rb_node
**p
= &entries
->rb_node
;
1637 struct rb_node
*parent
= NULL
;
1638 struct hist_entry
*iter
;
1639 struct perf_hpp_fmt
*fmt
;
1641 if (use_callchain
) {
1642 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1643 u64 total
= he
->stat
.period
;
1645 if (symbol_conf
.cumulate_callchain
)
1646 total
= he
->stat_acc
->period
;
1648 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1650 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1651 min_callchain_hits
, &callchain_param
);
1654 while (*p
!= NULL
) {
1656 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1658 if (hist_entry__sort(he
, iter
) > 0)
1661 p
= &(*p
)->rb_right
;
1664 rb_link_node(&he
->rb_node
, parent
, p
);
1665 rb_insert_color(&he
->rb_node
, entries
);
1667 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1668 if (perf_hpp__is_dynamic_entry(fmt
) &&
1669 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1670 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1674 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1675 bool use_callchain
, hists__resort_cb_t cb
)
1677 struct rb_root
*root
;
1678 struct rb_node
*next
;
1679 struct hist_entry
*n
;
1680 u64 callchain_total
;
1681 u64 min_callchain_hits
;
1683 callchain_total
= hists
->callchain_period
;
1684 if (symbol_conf
.filter_relative
)
1685 callchain_total
= hists
->callchain_non_filtered_period
;
1687 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1689 hists__reset_stats(hists
);
1690 hists__reset_col_len(hists
);
1692 if (symbol_conf
.report_hierarchy
) {
1693 hists__hierarchy_output_resort(hists
, prog
,
1694 &hists
->entries_collapsed
,
1698 hierarchy_recalc_total_periods(hists
);
1702 if (hists__has(hists
, need_collapse
))
1703 root
= &hists
->entries_collapsed
;
1705 root
= hists
->entries_in
;
1707 next
= rb_first(root
);
1708 hists
->entries
= RB_ROOT
;
1711 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1712 next
= rb_next(&n
->rb_node_in
);
1717 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1718 hists__inc_stats(hists
, n
);
1721 hists__calc_col_len(hists
, n
);
1724 ui_progress__update(prog
, 1);
1728 void perf_evsel__output_resort(struct perf_evsel
*evsel
, struct ui_progress
*prog
)
1732 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1733 use_callchain
= evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
;
1735 use_callchain
= symbol_conf
.use_callchain
;
1737 output_resort(evsel__hists(evsel
), prog
, use_callchain
, NULL
);
1740 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1742 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
);
1745 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1746 hists__resort_cb_t cb
)
1748 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
);
1751 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1753 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1756 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1762 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1764 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1766 while (can_goto_child(he
, HMD_NORMAL
)) {
1767 node
= rb_last(&he
->hroot_out
);
1768 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1773 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1775 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1777 if (can_goto_child(he
, hmd
))
1778 node
= rb_first(&he
->hroot_out
);
1780 node
= rb_next(node
);
1782 while (node
== NULL
) {
1787 node
= rb_next(&he
->rb_node
);
1792 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1794 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1796 node
= rb_prev(node
);
1798 return rb_hierarchy_last(node
);
1804 return &he
->rb_node
;
1807 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1809 struct rb_node
*node
;
1810 struct hist_entry
*child
;
1816 node
= rb_first(&he
->hroot_out
);
1817 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1819 while (node
&& child
->filtered
) {
1820 node
= rb_next(node
);
1821 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1825 percent
= hist_entry__get_percent_limit(child
);
1829 return node
&& percent
>= limit
;
1832 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1833 enum hist_filter filter
)
1835 h
->filtered
&= ~(1 << filter
);
1837 if (symbol_conf
.report_hierarchy
) {
1838 struct hist_entry
*parent
= h
->parent_he
;
1841 he_stat__add_stat(&parent
->stat
, &h
->stat
);
1843 parent
->filtered
&= ~(1 << filter
);
1845 if (parent
->filtered
)
1848 /* force fold unfiltered entry for simplicity */
1849 parent
->unfolded
= false;
1850 parent
->has_no_entry
= false;
1851 parent
->row_offset
= 0;
1852 parent
->nr_rows
= 0;
1854 parent
= parent
->parent_he
;
1861 /* force fold unfiltered entry for simplicity */
1862 h
->unfolded
= false;
1863 h
->has_no_entry
= false;
1867 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1869 hists__inc_filter_stats(hists
, h
);
1870 hists__calc_col_len(hists
, h
);
1874 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1875 struct hist_entry
*he
)
1877 if (hists
->dso_filter
!= NULL
&&
1878 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1879 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1886 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1887 struct hist_entry
*he
)
1889 if (hists
->thread_filter
!= NULL
&&
1890 he
->thread
!= hists
->thread_filter
) {
1891 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1898 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1899 struct hist_entry
*he
)
1901 if (hists
->symbol_filter_str
!= NULL
&&
1902 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1903 hists
->symbol_filter_str
) == NULL
)) {
1904 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1911 static bool hists__filter_entry_by_socket(struct hists
*hists
,
1912 struct hist_entry
*he
)
1914 if ((hists
->socket_filter
> -1) &&
1915 (he
->socket
!= hists
->socket_filter
)) {
1916 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
1923 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
1925 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
1929 hists
->stats
.nr_non_filtered_samples
= 0;
1931 hists__reset_filter_stats(hists
);
1932 hists__reset_col_len(hists
);
1934 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1935 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1937 if (filter(hists
, h
))
1940 hists__remove_entry_filter(hists
, h
, type
);
1944 static void resort_filtered_entry(struct rb_root
*root
, struct hist_entry
*he
)
1946 struct rb_node
**p
= &root
->rb_node
;
1947 struct rb_node
*parent
= NULL
;
1948 struct hist_entry
*iter
;
1949 struct rb_root new_root
= RB_ROOT
;
1952 while (*p
!= NULL
) {
1954 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1956 if (hist_entry__sort(he
, iter
) > 0)
1959 p
= &(*p
)->rb_right
;
1962 rb_link_node(&he
->rb_node
, parent
, p
);
1963 rb_insert_color(&he
->rb_node
, root
);
1965 if (he
->leaf
|| he
->filtered
)
1968 nd
= rb_first(&he
->hroot_out
);
1970 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1973 rb_erase(&h
->rb_node
, &he
->hroot_out
);
1975 resort_filtered_entry(&new_root
, h
);
1978 he
->hroot_out
= new_root
;
1981 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
1984 struct rb_root new_root
= RB_ROOT
;
1986 hists
->stats
.nr_non_filtered_samples
= 0;
1988 hists__reset_filter_stats(hists
);
1989 hists__reset_col_len(hists
);
1991 nd
= rb_first(&hists
->entries
);
1993 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1996 ret
= hist_entry__filter(h
, type
, arg
);
1999 * case 1. non-matching type
2000 * zero out the period, set filter marker and move to child
2003 memset(&h
->stat
, 0, sizeof(h
->stat
));
2004 h
->filtered
|= (1 << type
);
2006 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2009 * case 2. matched type (filter out)
2010 * set filter marker and move to next
2012 else if (ret
== 1) {
2013 h
->filtered
|= (1 << type
);
2015 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2018 * case 3. ok (not filtered)
2019 * add period to hists and parents, erase the filter marker
2020 * and move to next sibling
2023 hists__remove_entry_filter(hists
, h
, type
);
2025 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2029 hierarchy_recalc_total_periods(hists
);
2032 * resort output after applying a new filter since filter in a lower
2033 * hierarchy can change periods in a upper hierarchy.
2035 nd
= rb_first(&hists
->entries
);
2037 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2040 rb_erase(&h
->rb_node
, &hists
->entries
);
2042 resort_filtered_entry(&new_root
, h
);
2045 hists
->entries
= new_root
;
2048 void hists__filter_by_thread(struct hists
*hists
)
2050 if (symbol_conf
.report_hierarchy
)
2051 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2052 hists
->thread_filter
);
2054 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2055 hists__filter_entry_by_thread
);
2058 void hists__filter_by_dso(struct hists
*hists
)
2060 if (symbol_conf
.report_hierarchy
)
2061 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2064 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2065 hists__filter_entry_by_dso
);
2068 void hists__filter_by_symbol(struct hists
*hists
)
2070 if (symbol_conf
.report_hierarchy
)
2071 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2072 hists
->symbol_filter_str
);
2074 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2075 hists__filter_entry_by_symbol
);
2078 void hists__filter_by_socket(struct hists
*hists
)
2080 if (symbol_conf
.report_hierarchy
)
2081 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2082 &hists
->socket_filter
);
2084 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2085 hists__filter_entry_by_socket
);
2088 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2090 ++stats
->nr_events
[0];
2091 ++stats
->nr_events
[type
];
2094 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2096 events_stats__inc(&hists
->stats
, type
);
2099 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2101 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2103 hists
->stats
.nr_non_filtered_samples
++;
2106 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2107 struct hist_entry
*pair
)
2109 struct rb_root
*root
;
2111 struct rb_node
*parent
= NULL
;
2112 struct hist_entry
*he
;
2115 if (hists__has(hists
, need_collapse
))
2116 root
= &hists
->entries_collapsed
;
2118 root
= hists
->entries_in
;
2122 while (*p
!= NULL
) {
2124 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2126 cmp
= hist_entry__collapse(he
, pair
);
2134 p
= &(*p
)->rb_right
;
2137 he
= hist_entry__new(pair
, true);
2139 memset(&he
->stat
, 0, sizeof(he
->stat
));
2141 if (symbol_conf
.cumulate_callchain
)
2142 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2143 rb_link_node(&he
->rb_node_in
, parent
, p
);
2144 rb_insert_color(&he
->rb_node_in
, root
);
2145 hists__inc_stats(hists
, he
);
2152 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2153 struct hist_entry
*he
)
2157 if (hists__has(hists
, need_collapse
))
2158 n
= hists
->entries_collapsed
.rb_node
;
2160 n
= hists
->entries_in
->rb_node
;
2163 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2164 int64_t cmp
= hist_entry__collapse(iter
, he
);
2178 * Look for pairs to link to the leader buckets (hist_entries):
2180 void hists__match(struct hists
*leader
, struct hists
*other
)
2182 struct rb_root
*root
;
2184 struct hist_entry
*pos
, *pair
;
2186 if (hists__has(leader
, need_collapse
))
2187 root
= &leader
->entries_collapsed
;
2189 root
= leader
->entries_in
;
2191 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2192 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2193 pair
= hists__find_entry(other
, pos
);
2196 hist_entry__add_pair(pair
, pos
);
2201 * Look for entries in the other hists that are not present in the leader, if
2202 * we find them, just add a dummy entry on the leader hists, with period=0,
2203 * nr_events=0, to serve as the list header.
2205 int hists__link(struct hists
*leader
, struct hists
*other
)
2207 struct rb_root
*root
;
2209 struct hist_entry
*pos
, *pair
;
2211 if (hists__has(other
, need_collapse
))
2212 root
= &other
->entries_collapsed
;
2214 root
= other
->entries_in
;
2216 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2217 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2219 if (!hist_entry__has_pairs(pos
)) {
2220 pair
= hists__add_dummy_entry(leader
, pos
);
2223 hist_entry__add_pair(pos
, pair
);
2230 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2231 struct perf_sample
*sample
, bool nonany_branch_mode
)
2233 struct branch_info
*bi
;
2235 /* If we have branch cycles always annotate them. */
2236 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2239 bi
= sample__resolve_bstack(sample
, al
);
2241 struct addr_map_symbol
*prev
= NULL
;
2244 * Ignore errors, still want to process the
2247 * For non standard branch modes always
2248 * force no IPC (prev == NULL)
2250 * Note that perf stores branches reversed from
2253 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2254 addr_map_symbol__account_cycles(&bi
[i
].from
,
2255 nonany_branch_mode
? NULL
: prev
,
2256 bi
[i
].flags
.cycles
);
2264 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
2266 struct perf_evsel
*pos
;
2269 evlist__for_each_entry(evlist
, pos
) {
2270 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2271 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2278 u64
hists__total_period(struct hists
*hists
)
2280 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2281 hists
->stats
.total_period
;
2284 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2285 const char *arg
, int unset __maybe_unused
)
2287 if (!strcmp(arg
, "relative"))
2288 symbol_conf
.filter_relative
= true;
2289 else if (!strcmp(arg
, "absolute"))
2290 symbol_conf
.filter_relative
= false;
2297 int perf_hist_config(const char *var
, const char *value
)
2299 if (!strcmp(var
, "hist.percentage"))
2300 return parse_filter_percentage(NULL
, value
, 0);
2305 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2307 memset(hists
, 0, sizeof(*hists
));
2308 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
2309 hists
->entries_in
= &hists
->entries_in_array
[0];
2310 hists
->entries_collapsed
= RB_ROOT
;
2311 hists
->entries
= RB_ROOT
;
2312 pthread_mutex_init(&hists
->lock
, NULL
);
2313 hists
->socket_filter
= -1;
2314 hists
->hpp_list
= hpp_list
;
2315 INIT_LIST_HEAD(&hists
->hpp_formats
);
2319 static void hists__delete_remaining_entries(struct rb_root
*root
)
2321 struct rb_node
*node
;
2322 struct hist_entry
*he
;
2324 while (!RB_EMPTY_ROOT(root
)) {
2325 node
= rb_first(root
);
2326 rb_erase(node
, root
);
2328 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2329 hist_entry__delete(he
);
2333 static void hists__delete_all_entries(struct hists
*hists
)
2335 hists__delete_entries(hists
);
2336 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2337 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2338 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2341 static void hists_evsel__exit(struct perf_evsel
*evsel
)
2343 struct hists
*hists
= evsel__hists(evsel
);
2344 struct perf_hpp_fmt
*fmt
, *pos
;
2345 struct perf_hpp_list_node
*node
, *tmp
;
2347 hists__delete_all_entries(hists
);
2349 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2350 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2351 list_del(&fmt
->list
);
2354 list_del(&node
->list
);
2359 static int hists_evsel__init(struct perf_evsel
*evsel
)
2361 struct hists
*hists
= evsel__hists(evsel
);
2363 __hists__init(hists
, &perf_hpp_list
);
2368 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2369 * stored in the rbtree...
2372 int hists__init(void)
2374 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2378 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2383 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2385 INIT_LIST_HEAD(&list
->fields
);
2386 INIT_LIST_HEAD(&list
->sorts
);