9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_thread(struct hists
*hists
,
15 struct hist_entry
*he
);
16 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
17 struct hist_entry
*he
);
18 static bool hists__filter_entry_by_socket(struct hists
*hists
,
19 struct hist_entry
*he
);
21 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
23 return hists
->col_len
[col
];
26 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
28 hists
->col_len
[col
] = len
;
31 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
33 if (len
> hists__col_len(hists
, col
)) {
34 hists__set_col_len(hists
, col
, len
);
40 void hists__reset_col_len(struct hists
*hists
)
44 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
45 hists__set_col_len(hists
, col
, 0);
48 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
50 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
52 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
53 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
54 !symbol_conf
.dso_list
)
55 hists__set_col_len(hists
, dso
, unresolved_col_width
);
58 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
60 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
65 * +4 accounts for '[x] ' priv level info
66 * +2 accounts for 0x prefix on raw addresses
67 * +3 accounts for ' y ' symtab origin info
70 symlen
= h
->ms
.sym
->namelen
+ 4;
72 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
73 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
75 symlen
= unresolved_col_width
+ 4 + 2;
76 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
77 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
80 len
= thread__comm_len(h
->thread
);
81 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
82 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
85 len
= dso__name_len(h
->ms
.map
->dso
);
86 hists__new_col_len(hists
, HISTC_DSO
, len
);
90 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
93 if (h
->branch_info
->from
.sym
) {
94 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
96 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
97 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
99 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
100 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
102 symlen
= unresolved_col_width
+ 4 + 2;
103 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
104 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
107 if (h
->branch_info
->to
.sym
) {
108 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
110 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
111 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
113 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
114 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
116 symlen
= unresolved_col_width
+ 4 + 2;
117 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
118 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
123 if (h
->mem_info
->daddr
.sym
) {
124 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
125 + unresolved_col_width
+ 2;
126 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
128 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
131 symlen
= unresolved_col_width
+ 4 + 2;
132 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
136 if (h
->mem_info
->iaddr
.sym
) {
137 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
138 + unresolved_col_width
+ 2;
139 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
142 symlen
= unresolved_col_width
+ 4 + 2;
143 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
147 if (h
->mem_info
->daddr
.map
) {
148 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
149 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
152 symlen
= unresolved_col_width
+ 4 + 2;
153 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
156 symlen
= unresolved_col_width
+ 4 + 2;
157 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
158 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
159 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
162 hists__new_col_len(hists
, HISTC_CPU
, 3);
163 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
164 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
165 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
166 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
167 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
168 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
169 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
172 hists__new_col_len(hists
, HISTC_SRCLINE
, strlen(h
->srcline
));
175 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
178 hists__new_col_len(hists
, HISTC_TRANSACTION
,
179 hist_entry__transaction_len());
182 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
184 struct rb_node
*next
= rb_first(&hists
->entries
);
185 struct hist_entry
*n
;
188 hists__reset_col_len(hists
);
190 while (next
&& row
++ < max_rows
) {
191 n
= rb_entry(next
, struct hist_entry
, rb_node
);
193 hists__calc_col_len(hists
, n
);
194 next
= rb_next(&n
->rb_node
);
198 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
199 unsigned int cpumode
, u64 period
)
202 case PERF_RECORD_MISC_KERNEL
:
203 he_stat
->period_sys
+= period
;
205 case PERF_RECORD_MISC_USER
:
206 he_stat
->period_us
+= period
;
208 case PERF_RECORD_MISC_GUEST_KERNEL
:
209 he_stat
->period_guest_sys
+= period
;
211 case PERF_RECORD_MISC_GUEST_USER
:
212 he_stat
->period_guest_us
+= period
;
219 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
223 he_stat
->period
+= period
;
224 he_stat
->weight
+= weight
;
225 he_stat
->nr_events
+= 1;
228 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
230 dest
->period
+= src
->period
;
231 dest
->period_sys
+= src
->period_sys
;
232 dest
->period_us
+= src
->period_us
;
233 dest
->period_guest_sys
+= src
->period_guest_sys
;
234 dest
->period_guest_us
+= src
->period_guest_us
;
235 dest
->nr_events
+= src
->nr_events
;
236 dest
->weight
+= src
->weight
;
239 static void he_stat__decay(struct he_stat
*he_stat
)
241 he_stat
->period
= (he_stat
->period
* 7) / 8;
242 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
243 /* XXX need decay for weight too? */
246 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
248 u64 prev_period
= he
->stat
.period
;
251 if (prev_period
== 0)
254 he_stat__decay(&he
->stat
);
255 if (symbol_conf
.cumulate_callchain
)
256 he_stat__decay(he
->stat_acc
);
257 decay_callchain(he
->callchain
);
259 diff
= prev_period
- he
->stat
.period
;
261 hists
->stats
.total_period
-= diff
;
263 hists
->stats
.total_non_filtered_period
-= diff
;
265 return he
->stat
.period
== 0;
268 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
270 rb_erase(&he
->rb_node
, &hists
->entries
);
272 if (sort__need_collapse
)
273 rb_erase(&he
->rb_node_in
, &hists
->entries_collapsed
);
275 rb_erase(&he
->rb_node_in
, hists
->entries_in
);
279 --hists
->nr_non_filtered_entries
;
281 hist_entry__delete(he
);
284 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
286 struct rb_node
*next
= rb_first(&hists
->entries
);
287 struct hist_entry
*n
;
290 n
= rb_entry(next
, struct hist_entry
, rb_node
);
291 next
= rb_next(&n
->rb_node
);
292 if (((zap_user
&& n
->level
== '.') ||
293 (zap_kernel
&& n
->level
!= '.') ||
294 hists__decay_entry(hists
, n
))) {
295 hists__delete_entry(hists
, n
);
300 void hists__delete_entries(struct hists
*hists
)
302 struct rb_node
*next
= rb_first(&hists
->entries
);
303 struct hist_entry
*n
;
306 n
= rb_entry(next
, struct hist_entry
, rb_node
);
307 next
= rb_next(&n
->rb_node
);
309 hists__delete_entry(hists
, n
);
314 * histogram, sorted on item, collects periods
317 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
320 size_t callchain_size
= 0;
321 struct hist_entry
*he
;
323 if (symbol_conf
.use_callchain
)
324 callchain_size
= sizeof(struct callchain_root
);
326 he
= zalloc(sizeof(*he
) + callchain_size
);
331 if (symbol_conf
.cumulate_callchain
) {
332 he
->stat_acc
= malloc(sizeof(he
->stat
));
333 if (he
->stat_acc
== NULL
) {
337 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
339 memset(&he
->stat
, 0, sizeof(he
->stat
));
342 map__get(he
->ms
.map
);
344 if (he
->branch_info
) {
346 * This branch info is (a part of) allocated from
347 * sample__resolve_bstack() and will be freed after
348 * adding new entries. So we need to save a copy.
350 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
351 if (he
->branch_info
== NULL
) {
352 map__zput(he
->ms
.map
);
358 memcpy(he
->branch_info
, template->branch_info
,
359 sizeof(*he
->branch_info
));
361 map__get(he
->branch_info
->from
.map
);
362 map__get(he
->branch_info
->to
.map
);
366 map__get(he
->mem_info
->iaddr
.map
);
367 map__get(he
->mem_info
->daddr
.map
);
370 if (symbol_conf
.use_callchain
)
371 callchain_init(he
->callchain
);
374 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
376 if (he
->raw_data
== NULL
) {
377 map__put(he
->ms
.map
);
378 if (he
->branch_info
) {
379 map__put(he
->branch_info
->from
.map
);
380 map__put(he
->branch_info
->to
.map
);
381 free(he
->branch_info
);
384 map__put(he
->mem_info
->iaddr
.map
);
385 map__put(he
->mem_info
->daddr
.map
);
392 INIT_LIST_HEAD(&he
->pairs
.node
);
393 thread__get(he
->thread
);
399 static u8
symbol__parent_filter(const struct symbol
*parent
)
401 if (symbol_conf
.exclude_other
&& parent
== NULL
)
402 return 1 << HIST_FILTER__PARENT
;
406 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
407 struct hist_entry
*entry
,
408 struct addr_location
*al
,
412 struct rb_node
*parent
= NULL
;
413 struct hist_entry
*he
;
415 u64 period
= entry
->stat
.period
;
416 u64 weight
= entry
->stat
.weight
;
418 p
= &hists
->entries_in
->rb_node
;
422 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
425 * Make sure that it receives arguments in a same order as
426 * hist_entry__collapse() so that we can use an appropriate
427 * function when searching an entry regardless which sort
430 cmp
= hist_entry__cmp(he
, entry
);
434 he_stat__add_period(&he
->stat
, period
, weight
);
435 if (symbol_conf
.cumulate_callchain
)
436 he_stat__add_period(he
->stat_acc
, period
, weight
);
439 * This mem info was allocated from sample__resolve_mem
440 * and will not be used anymore.
442 zfree(&entry
->mem_info
);
444 /* If the map of an existing hist_entry has
445 * become out-of-date due to an exec() or
446 * similar, update it. Otherwise we will
447 * mis-adjust symbol addresses when computing
448 * the history counter to increment.
450 if (he
->ms
.map
!= entry
->ms
.map
) {
451 map__put(he
->ms
.map
);
452 he
->ms
.map
= map__get(entry
->ms
.map
);
463 he
= hist_entry__new(entry
, sample_self
);
469 rb_link_node(&he
->rb_node_in
, parent
, p
);
470 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
473 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
474 if (symbol_conf
.cumulate_callchain
)
475 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
479 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
480 struct addr_location
*al
,
481 struct symbol
*sym_parent
,
482 struct branch_info
*bi
,
484 struct perf_sample
*sample
,
487 struct hist_entry entry
= {
488 .thread
= al
->thread
,
489 .comm
= thread__comm(al
->thread
),
494 .socket
= al
->socket
,
496 .cpumode
= al
->cpumode
,
501 .period
= sample
->period
,
502 .weight
= sample
->weight
,
504 .parent
= sym_parent
,
505 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
509 .transaction
= sample
->transaction
,
510 .raw_data
= sample
->raw_data
,
511 .raw_size
= sample
->raw_size
,
514 return hists__findnew_entry(hists
, &entry
, al
, sample_self
);
518 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
519 struct addr_location
*al __maybe_unused
)
525 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
526 struct addr_location
*al __maybe_unused
)
532 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
534 struct perf_sample
*sample
= iter
->sample
;
537 mi
= sample__resolve_mem(sample
, al
);
546 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
549 struct mem_info
*mi
= iter
->priv
;
550 struct hists
*hists
= evsel__hists(iter
->evsel
);
551 struct perf_sample
*sample
= iter
->sample
;
552 struct hist_entry
*he
;
557 cost
= sample
->weight
;
562 * must pass period=weight in order to get the correct
563 * sorting from hists__collapse_resort() which is solely
564 * based on periods. We want sorting be done on nr_events * weight
565 * and this is indirectly achieved by passing period=weight here
566 * and the he_stat__add_period() function.
568 sample
->period
= cost
;
570 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
580 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
581 struct addr_location
*al __maybe_unused
)
583 struct perf_evsel
*evsel
= iter
->evsel
;
584 struct hists
*hists
= evsel__hists(evsel
);
585 struct hist_entry
*he
= iter
->he
;
591 hists__inc_nr_samples(hists
, he
->filtered
);
593 err
= hist_entry__append_callchain(he
, iter
->sample
);
597 * We don't need to free iter->priv (mem_info) here since the mem info
598 * was either already freed in hists__findnew_entry() or passed to a
599 * new hist entry by hist_entry__new().
608 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
610 struct branch_info
*bi
;
611 struct perf_sample
*sample
= iter
->sample
;
613 bi
= sample__resolve_bstack(sample
, al
);
618 iter
->total
= sample
->branch_stack
->nr
;
625 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
626 struct addr_location
*al __maybe_unused
)
628 /* to avoid calling callback function */
635 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
637 struct branch_info
*bi
= iter
->priv
;
643 if (iter
->curr
>= iter
->total
)
646 al
->map
= bi
[i
].to
.map
;
647 al
->sym
= bi
[i
].to
.sym
;
648 al
->addr
= bi
[i
].to
.addr
;
653 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
655 struct branch_info
*bi
;
656 struct perf_evsel
*evsel
= iter
->evsel
;
657 struct hists
*hists
= evsel__hists(evsel
);
658 struct perf_sample
*sample
= iter
->sample
;
659 struct hist_entry
*he
= NULL
;
665 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
669 * The report shows the percentage of total branches captured
670 * and not events sampled. Thus we use a pseudo period of 1.
673 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
675 he
= __hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
680 hists__inc_nr_samples(hists
, he
->filtered
);
689 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
690 struct addr_location
*al __maybe_unused
)
695 return iter
->curr
>= iter
->total
? 0 : -1;
699 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
700 struct addr_location
*al __maybe_unused
)
706 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
708 struct perf_evsel
*evsel
= iter
->evsel
;
709 struct perf_sample
*sample
= iter
->sample
;
710 struct hist_entry
*he
;
712 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
722 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
723 struct addr_location
*al __maybe_unused
)
725 struct hist_entry
*he
= iter
->he
;
726 struct perf_evsel
*evsel
= iter
->evsel
;
727 struct perf_sample
*sample
= iter
->sample
;
734 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
736 return hist_entry__append_callchain(he
, sample
);
740 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
741 struct addr_location
*al __maybe_unused
)
743 struct hist_entry
**he_cache
;
745 callchain_cursor_commit(&callchain_cursor
);
748 * This is for detecting cycles or recursions so that they're
749 * cumulated only one time to prevent entries more than 100%
752 he_cache
= malloc(sizeof(*he_cache
) * (iter
->max_stack
+ 1));
753 if (he_cache
== NULL
)
756 iter
->priv
= he_cache
;
763 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
764 struct addr_location
*al
)
766 struct perf_evsel
*evsel
= iter
->evsel
;
767 struct hists
*hists
= evsel__hists(evsel
);
768 struct perf_sample
*sample
= iter
->sample
;
769 struct hist_entry
**he_cache
= iter
->priv
;
770 struct hist_entry
*he
;
773 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
779 he_cache
[iter
->curr
++] = he
;
781 hist_entry__append_callchain(he
, sample
);
784 * We need to re-initialize the cursor since callchain_append()
785 * advanced the cursor to the end.
787 callchain_cursor_commit(&callchain_cursor
);
789 hists__inc_nr_samples(hists
, he
->filtered
);
795 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
796 struct addr_location
*al
)
798 struct callchain_cursor_node
*node
;
800 node
= callchain_cursor_current(&callchain_cursor
);
804 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
808 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
809 struct addr_location
*al
)
811 struct perf_evsel
*evsel
= iter
->evsel
;
812 struct perf_sample
*sample
= iter
->sample
;
813 struct hist_entry
**he_cache
= iter
->priv
;
814 struct hist_entry
*he
;
815 struct hist_entry he_tmp
= {
816 .hists
= evsel__hists(evsel
),
818 .thread
= al
->thread
,
819 .comm
= thread__comm(al
->thread
),
825 .parent
= iter
->parent
,
826 .raw_data
= sample
->raw_data
,
827 .raw_size
= sample
->raw_size
,
830 struct callchain_cursor cursor
;
832 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
834 callchain_cursor_advance(&callchain_cursor
);
837 * Check if there's duplicate entries in the callchain.
838 * It's possible that it has cycles or recursive calls.
840 for (i
= 0; i
< iter
->curr
; i
++) {
841 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
842 /* to avoid calling callback function */
848 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
854 he_cache
[iter
->curr
++] = he
;
856 if (symbol_conf
.use_callchain
)
857 callchain_append(he
->callchain
, &cursor
, sample
->period
);
862 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
863 struct addr_location
*al __maybe_unused
)
871 const struct hist_iter_ops hist_iter_mem
= {
872 .prepare_entry
= iter_prepare_mem_entry
,
873 .add_single_entry
= iter_add_single_mem_entry
,
874 .next_entry
= iter_next_nop_entry
,
875 .add_next_entry
= iter_add_next_nop_entry
,
876 .finish_entry
= iter_finish_mem_entry
,
879 const struct hist_iter_ops hist_iter_branch
= {
880 .prepare_entry
= iter_prepare_branch_entry
,
881 .add_single_entry
= iter_add_single_branch_entry
,
882 .next_entry
= iter_next_branch_entry
,
883 .add_next_entry
= iter_add_next_branch_entry
,
884 .finish_entry
= iter_finish_branch_entry
,
887 const struct hist_iter_ops hist_iter_normal
= {
888 .prepare_entry
= iter_prepare_normal_entry
,
889 .add_single_entry
= iter_add_single_normal_entry
,
890 .next_entry
= iter_next_nop_entry
,
891 .add_next_entry
= iter_add_next_nop_entry
,
892 .finish_entry
= iter_finish_normal_entry
,
895 const struct hist_iter_ops hist_iter_cumulative
= {
896 .prepare_entry
= iter_prepare_cumulative_entry
,
897 .add_single_entry
= iter_add_single_cumulative_entry
,
898 .next_entry
= iter_next_cumulative_entry
,
899 .add_next_entry
= iter_add_next_cumulative_entry
,
900 .finish_entry
= iter_finish_cumulative_entry
,
903 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
904 int max_stack_depth
, void *arg
)
908 err
= sample__resolve_callchain(iter
->sample
, &iter
->parent
,
909 iter
->evsel
, al
, max_stack_depth
);
913 iter
->max_stack
= max_stack_depth
;
915 err
= iter
->ops
->prepare_entry(iter
, al
);
919 err
= iter
->ops
->add_single_entry(iter
, al
);
923 if (iter
->he
&& iter
->add_entry_cb
) {
924 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
929 while (iter
->ops
->next_entry(iter
, al
)) {
930 err
= iter
->ops
->add_next_entry(iter
, al
);
934 if (iter
->he
&& iter
->add_entry_cb
) {
935 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
942 err2
= iter
->ops
->finish_entry(iter
, al
);
950 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
952 struct perf_hpp_fmt
*fmt
;
955 perf_hpp__for_each_sort_list(fmt
) {
956 cmp
= fmt
->cmp(fmt
, left
, right
);
965 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
967 struct perf_hpp_fmt
*fmt
;
970 perf_hpp__for_each_sort_list(fmt
) {
971 cmp
= fmt
->collapse(fmt
, left
, right
);
979 void hist_entry__delete(struct hist_entry
*he
)
981 thread__zput(he
->thread
);
982 map__zput(he
->ms
.map
);
984 if (he
->branch_info
) {
985 map__zput(he
->branch_info
->from
.map
);
986 map__zput(he
->branch_info
->to
.map
);
987 zfree(&he
->branch_info
);
991 map__zput(he
->mem_info
->iaddr
.map
);
992 map__zput(he
->mem_info
->daddr
.map
);
993 zfree(&he
->mem_info
);
996 zfree(&he
->stat_acc
);
997 free_srcline(he
->srcline
);
998 if (he
->srcfile
&& he
->srcfile
[0])
1000 free_callchain(he
->callchain
);
1001 free(he
->trace_output
);
1007 * collapse the histogram
1010 bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
1011 struct rb_root
*root
, struct hist_entry
*he
)
1013 struct rb_node
**p
= &root
->rb_node
;
1014 struct rb_node
*parent
= NULL
;
1015 struct hist_entry
*iter
;
1018 while (*p
!= NULL
) {
1020 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1022 cmp
= hist_entry__collapse(iter
, he
);
1025 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1026 if (symbol_conf
.cumulate_callchain
)
1027 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1029 if (symbol_conf
.use_callchain
) {
1030 callchain_cursor_reset(&callchain_cursor
);
1031 callchain_merge(&callchain_cursor
,
1035 hist_entry__delete(he
);
1042 p
= &(*p
)->rb_right
;
1044 hists
->nr_entries
++;
1046 rb_link_node(&he
->rb_node_in
, parent
, p
);
1047 rb_insert_color(&he
->rb_node_in
, root
);
1051 struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1053 struct rb_root
*root
;
1055 pthread_mutex_lock(&hists
->lock
);
1057 root
= hists
->entries_in
;
1058 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1059 hists
->entries_in
= &hists
->entries_in_array
[0];
1061 pthread_mutex_unlock(&hists
->lock
);
1066 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1068 hists__filter_entry_by_dso(hists
, he
);
1069 hists__filter_entry_by_thread(hists
, he
);
1070 hists__filter_entry_by_symbol(hists
, he
);
1071 hists__filter_entry_by_socket(hists
, he
);
1074 void hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1076 struct rb_root
*root
;
1077 struct rb_node
*next
;
1078 struct hist_entry
*n
;
1080 if (!sort__need_collapse
)
1083 hists
->nr_entries
= 0;
1085 root
= hists__get_rotate_entries_in(hists
);
1087 next
= rb_first(root
);
1092 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1093 next
= rb_next(&n
->rb_node_in
);
1095 rb_erase(&n
->rb_node_in
, root
);
1096 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
1098 * If it wasn't combined with one of the entries already
1099 * collapsed, we need to apply the filters that may have
1100 * been set by, say, the hist_browser.
1102 hists__apply_filters(hists
, n
);
1105 ui_progress__update(prog
, 1);
1109 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1111 struct perf_hpp_fmt
*fmt
;
1114 perf_hpp__for_each_sort_list(fmt
) {
1115 if (perf_hpp__should_skip(fmt
, a
->hists
))
1118 cmp
= fmt
->sort(fmt
, a
, b
);
1126 static void hists__reset_filter_stats(struct hists
*hists
)
1128 hists
->nr_non_filtered_entries
= 0;
1129 hists
->stats
.total_non_filtered_period
= 0;
1132 void hists__reset_stats(struct hists
*hists
)
1134 hists
->nr_entries
= 0;
1135 hists
->stats
.total_period
= 0;
1137 hists__reset_filter_stats(hists
);
1140 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1142 hists
->nr_non_filtered_entries
++;
1143 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1146 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1149 hists__inc_filter_stats(hists
, h
);
1151 hists
->nr_entries
++;
1152 hists
->stats
.total_period
+= h
->stat
.period
;
1155 static void __hists__insert_output_entry(struct rb_root
*entries
,
1156 struct hist_entry
*he
,
1157 u64 min_callchain_hits
,
1160 struct rb_node
**p
= &entries
->rb_node
;
1161 struct rb_node
*parent
= NULL
;
1162 struct hist_entry
*iter
;
1165 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1166 min_callchain_hits
, &callchain_param
);
1168 while (*p
!= NULL
) {
1170 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1172 if (hist_entry__sort(he
, iter
) > 0)
1175 p
= &(*p
)->rb_right
;
1178 rb_link_node(&he
->rb_node
, parent
, p
);
1179 rb_insert_color(&he
->rb_node
, entries
);
1182 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1184 struct rb_root
*root
;
1185 struct rb_node
*next
;
1186 struct hist_entry
*n
;
1187 u64 min_callchain_hits
;
1188 struct perf_evsel
*evsel
= hists_to_evsel(hists
);
1191 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1192 use_callchain
= evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
;
1194 use_callchain
= symbol_conf
.use_callchain
;
1196 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
1198 if (sort__need_collapse
)
1199 root
= &hists
->entries_collapsed
;
1201 root
= hists
->entries_in
;
1203 next
= rb_first(root
);
1204 hists
->entries
= RB_ROOT
;
1206 hists__reset_stats(hists
);
1207 hists__reset_col_len(hists
);
1210 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1211 next
= rb_next(&n
->rb_node_in
);
1213 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1214 hists__inc_stats(hists
, n
);
1217 hists__calc_col_len(hists
, n
);
1220 ui_progress__update(prog
, 1);
1224 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1225 enum hist_filter filter
)
1227 h
->filtered
&= ~(1 << filter
);
1231 /* force fold unfiltered entry for simplicity */
1232 h
->unfolded
= false;
1236 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1238 hists__inc_filter_stats(hists
, h
);
1239 hists__calc_col_len(hists
, h
);
1243 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1244 struct hist_entry
*he
)
1246 if (hists
->dso_filter
!= NULL
&&
1247 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1248 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1255 void hists__filter_by_dso(struct hists
*hists
)
1259 hists
->stats
.nr_non_filtered_samples
= 0;
1261 hists__reset_filter_stats(hists
);
1262 hists__reset_col_len(hists
);
1264 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1265 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1267 if (symbol_conf
.exclude_other
&& !h
->parent
)
1270 if (hists__filter_entry_by_dso(hists
, h
))
1273 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1277 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1278 struct hist_entry
*he
)
1280 if (hists
->thread_filter
!= NULL
&&
1281 he
->thread
!= hists
->thread_filter
) {
1282 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1289 void hists__filter_by_thread(struct hists
*hists
)
1293 hists
->stats
.nr_non_filtered_samples
= 0;
1295 hists__reset_filter_stats(hists
);
1296 hists__reset_col_len(hists
);
1298 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1299 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1301 if (hists__filter_entry_by_thread(hists
, h
))
1304 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1308 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1309 struct hist_entry
*he
)
1311 if (hists
->symbol_filter_str
!= NULL
&&
1312 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1313 hists
->symbol_filter_str
) == NULL
)) {
1314 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1321 void hists__filter_by_symbol(struct hists
*hists
)
1325 hists
->stats
.nr_non_filtered_samples
= 0;
1327 hists__reset_filter_stats(hists
);
1328 hists__reset_col_len(hists
);
1330 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1331 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1333 if (hists__filter_entry_by_symbol(hists
, h
))
1336 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
1340 static bool hists__filter_entry_by_socket(struct hists
*hists
,
1341 struct hist_entry
*he
)
1343 if ((hists
->socket_filter
> -1) &&
1344 (he
->socket
!= hists
->socket_filter
)) {
1345 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
1352 void hists__filter_by_socket(struct hists
*hists
)
1356 hists
->stats
.nr_non_filtered_samples
= 0;
1358 hists__reset_filter_stats(hists
);
1359 hists__reset_col_len(hists
);
1361 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1362 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1364 if (hists__filter_entry_by_socket(hists
, h
))
1367 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SOCKET
);
1371 void events_stats__inc(struct events_stats
*stats
, u32 type
)
1373 ++stats
->nr_events
[0];
1374 ++stats
->nr_events
[type
];
1377 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1379 events_stats__inc(&hists
->stats
, type
);
1382 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
1384 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
1386 hists
->stats
.nr_non_filtered_samples
++;
1389 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
1390 struct hist_entry
*pair
)
1392 struct rb_root
*root
;
1394 struct rb_node
*parent
= NULL
;
1395 struct hist_entry
*he
;
1398 if (sort__need_collapse
)
1399 root
= &hists
->entries_collapsed
;
1401 root
= hists
->entries_in
;
1405 while (*p
!= NULL
) {
1407 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1409 cmp
= hist_entry__collapse(he
, pair
);
1417 p
= &(*p
)->rb_right
;
1420 he
= hist_entry__new(pair
, true);
1422 memset(&he
->stat
, 0, sizeof(he
->stat
));
1424 rb_link_node(&he
->rb_node_in
, parent
, p
);
1425 rb_insert_color(&he
->rb_node_in
, root
);
1426 hists__inc_stats(hists
, he
);
1433 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
1434 struct hist_entry
*he
)
1438 if (sort__need_collapse
)
1439 n
= hists
->entries_collapsed
.rb_node
;
1441 n
= hists
->entries_in
->rb_node
;
1444 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
1445 int64_t cmp
= hist_entry__collapse(iter
, he
);
1459 * Look for pairs to link to the leader buckets (hist_entries):
1461 void hists__match(struct hists
*leader
, struct hists
*other
)
1463 struct rb_root
*root
;
1465 struct hist_entry
*pos
, *pair
;
1467 if (sort__need_collapse
)
1468 root
= &leader
->entries_collapsed
;
1470 root
= leader
->entries_in
;
1472 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1473 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1474 pair
= hists__find_entry(other
, pos
);
1477 hist_entry__add_pair(pair
, pos
);
1482 * Look for entries in the other hists that are not present in the leader, if
1483 * we find them, just add a dummy entry on the leader hists, with period=0,
1484 * nr_events=0, to serve as the list header.
1486 int hists__link(struct hists
*leader
, struct hists
*other
)
1488 struct rb_root
*root
;
1490 struct hist_entry
*pos
, *pair
;
1492 if (sort__need_collapse
)
1493 root
= &other
->entries_collapsed
;
1495 root
= other
->entries_in
;
1497 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1498 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1500 if (!hist_entry__has_pairs(pos
)) {
1501 pair
= hists__add_dummy_entry(leader
, pos
);
1504 hist_entry__add_pair(pos
, pair
);
1511 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
1512 struct perf_sample
*sample
, bool nonany_branch_mode
)
1514 struct branch_info
*bi
;
1516 /* If we have branch cycles always annotate them. */
1517 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
1520 bi
= sample__resolve_bstack(sample
, al
);
1522 struct addr_map_symbol
*prev
= NULL
;
1525 * Ignore errors, still want to process the
1528 * For non standard branch modes always
1529 * force no IPC (prev == NULL)
1531 * Note that perf stores branches reversed from
1534 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
1535 addr_map_symbol__account_cycles(&bi
[i
].from
,
1536 nonany_branch_mode
? NULL
: prev
,
1537 bi
[i
].flags
.cycles
);
1545 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
1547 struct perf_evsel
*pos
;
1550 evlist__for_each(evlist
, pos
) {
1551 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
1552 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
1559 u64
hists__total_period(struct hists
*hists
)
1561 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
1562 hists
->stats
.total_period
;
1565 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
1566 const char *arg
, int unset __maybe_unused
)
1568 if (!strcmp(arg
, "relative"))
1569 symbol_conf
.filter_relative
= true;
1570 else if (!strcmp(arg
, "absolute"))
1571 symbol_conf
.filter_relative
= false;
1578 int perf_hist_config(const char *var
, const char *value
)
1580 if (!strcmp(var
, "hist.percentage"))
1581 return parse_filter_percentage(NULL
, value
, 0);
1586 int __hists__init(struct hists
*hists
)
1588 memset(hists
, 0, sizeof(*hists
));
1589 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
1590 hists
->entries_in
= &hists
->entries_in_array
[0];
1591 hists
->entries_collapsed
= RB_ROOT
;
1592 hists
->entries
= RB_ROOT
;
1593 pthread_mutex_init(&hists
->lock
, NULL
);
1594 hists
->socket_filter
= -1;
1598 static void hists__delete_remaining_entries(struct rb_root
*root
)
1600 struct rb_node
*node
;
1601 struct hist_entry
*he
;
1603 while (!RB_EMPTY_ROOT(root
)) {
1604 node
= rb_first(root
);
1605 rb_erase(node
, root
);
1607 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1608 hist_entry__delete(he
);
1612 static void hists__delete_all_entries(struct hists
*hists
)
1614 hists__delete_entries(hists
);
1615 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
1616 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
1617 hists__delete_remaining_entries(&hists
->entries_collapsed
);
1620 static void hists_evsel__exit(struct perf_evsel
*evsel
)
1622 struct hists
*hists
= evsel__hists(evsel
);
1624 hists__delete_all_entries(hists
);
1627 static int hists_evsel__init(struct perf_evsel
*evsel
)
1629 struct hists
*hists
= evsel__hists(evsel
);
1631 __hists__init(hists
);
1636 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1637 * stored in the rbtree...
1640 int hists__init(void)
1642 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
1646 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);