9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists
*hists
,
13 struct hist_entry
*he
);
14 static bool hists__filter_entry_by_thread(struct hists
*hists
,
15 struct hist_entry
*he
);
16 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
17 struct hist_entry
*he
);
19 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
21 return hists
->col_len
[col
];
24 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
26 hists
->col_len
[col
] = len
;
29 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
31 if (len
> hists__col_len(hists
, col
)) {
32 hists__set_col_len(hists
, col
, len
);
38 void hists__reset_col_len(struct hists
*hists
)
42 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
43 hists__set_col_len(hists
, col
, 0);
46 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
48 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
50 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
51 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
52 !symbol_conf
.dso_list
)
53 hists__set_col_len(hists
, dso
, unresolved_col_width
);
56 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
58 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
68 symlen
= h
->ms
.sym
->namelen
+ 4;
70 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
71 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
73 symlen
= unresolved_col_width
+ 4 + 2;
74 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
75 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
78 len
= thread__comm_len(h
->thread
);
79 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
80 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 6);
83 len
= dso__name_len(h
->ms
.map
->dso
);
84 hists__new_col_len(hists
, HISTC_DSO
, len
);
88 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
91 if (h
->branch_info
->from
.sym
) {
92 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
94 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
95 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
97 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
98 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
100 symlen
= unresolved_col_width
+ 4 + 2;
101 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
102 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
105 if (h
->branch_info
->to
.sym
) {
106 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
108 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
109 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
111 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
112 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
114 symlen
= unresolved_col_width
+ 4 + 2;
115 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
116 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
121 if (h
->mem_info
->daddr
.sym
) {
122 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
123 + unresolved_col_width
+ 2;
124 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
126 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
129 symlen
= unresolved_col_width
+ 4 + 2;
130 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
133 if (h
->mem_info
->daddr
.map
) {
134 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
135 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
138 symlen
= unresolved_col_width
+ 4 + 2;
139 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
142 symlen
= unresolved_col_width
+ 4 + 2;
143 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
144 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
147 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
148 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
149 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
150 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
151 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
152 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
155 hists__new_col_len(hists
, HISTC_TRANSACTION
,
156 hist_entry__transaction_len());
159 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
161 struct rb_node
*next
= rb_first(&hists
->entries
);
162 struct hist_entry
*n
;
165 hists__reset_col_len(hists
);
167 while (next
&& row
++ < max_rows
) {
168 n
= rb_entry(next
, struct hist_entry
, rb_node
);
170 hists__calc_col_len(hists
, n
);
171 next
= rb_next(&n
->rb_node
);
175 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
176 unsigned int cpumode
, u64 period
)
179 case PERF_RECORD_MISC_KERNEL
:
180 he_stat
->period_sys
+= period
;
182 case PERF_RECORD_MISC_USER
:
183 he_stat
->period_us
+= period
;
185 case PERF_RECORD_MISC_GUEST_KERNEL
:
186 he_stat
->period_guest_sys
+= period
;
188 case PERF_RECORD_MISC_GUEST_USER
:
189 he_stat
->period_guest_us
+= period
;
196 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
200 he_stat
->period
+= period
;
201 he_stat
->weight
+= weight
;
202 he_stat
->nr_events
+= 1;
205 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
207 dest
->period
+= src
->period
;
208 dest
->period_sys
+= src
->period_sys
;
209 dest
->period_us
+= src
->period_us
;
210 dest
->period_guest_sys
+= src
->period_guest_sys
;
211 dest
->period_guest_us
+= src
->period_guest_us
;
212 dest
->nr_events
+= src
->nr_events
;
213 dest
->weight
+= src
->weight
;
216 static void he_stat__decay(struct he_stat
*he_stat
)
218 he_stat
->period
= (he_stat
->period
* 7) / 8;
219 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
220 /* XXX need decay for weight too? */
223 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
225 u64 prev_period
= he
->stat
.period
;
228 if (prev_period
== 0)
231 he_stat__decay(&he
->stat
);
232 if (symbol_conf
.cumulate_callchain
)
233 he_stat__decay(he
->stat_acc
);
235 diff
= prev_period
- he
->stat
.period
;
237 hists
->stats
.total_period
-= diff
;
239 hists
->stats
.total_non_filtered_period
-= diff
;
241 return he
->stat
.period
== 0;
244 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
246 rb_erase(&he
->rb_node
, &hists
->entries
);
248 if (sort__need_collapse
)
249 rb_erase(&he
->rb_node_in
, &hists
->entries_collapsed
);
253 --hists
->nr_non_filtered_entries
;
255 hist_entry__delete(he
);
258 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
260 struct rb_node
*next
= rb_first(&hists
->entries
);
261 struct hist_entry
*n
;
264 n
= rb_entry(next
, struct hist_entry
, rb_node
);
265 next
= rb_next(&n
->rb_node
);
266 if (((zap_user
&& n
->level
== '.') ||
267 (zap_kernel
&& n
->level
!= '.') ||
268 hists__decay_entry(hists
, n
))) {
269 hists__delete_entry(hists
, n
);
274 void hists__delete_entries(struct hists
*hists
)
276 struct rb_node
*next
= rb_first(&hists
->entries
);
277 struct hist_entry
*n
;
280 n
= rb_entry(next
, struct hist_entry
, rb_node
);
281 next
= rb_next(&n
->rb_node
);
283 hists__delete_entry(hists
, n
);
288 * histogram, sorted on item, collects periods
291 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
294 size_t callchain_size
= 0;
295 struct hist_entry
*he
;
297 if (symbol_conf
.use_callchain
)
298 callchain_size
= sizeof(struct callchain_root
);
300 he
= zalloc(sizeof(*he
) + callchain_size
);
305 if (symbol_conf
.cumulate_callchain
) {
306 he
->stat_acc
= malloc(sizeof(he
->stat
));
307 if (he
->stat_acc
== NULL
) {
311 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
313 memset(&he
->stat
, 0, sizeof(he
->stat
));
317 he
->ms
.map
->referenced
= true;
319 if (he
->branch_info
) {
321 * This branch info is (a part of) allocated from
322 * sample__resolve_bstack() and will be freed after
323 * adding new entries. So we need to save a copy.
325 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
326 if (he
->branch_info
== NULL
) {
332 memcpy(he
->branch_info
, template->branch_info
,
333 sizeof(*he
->branch_info
));
335 if (he
->branch_info
->from
.map
)
336 he
->branch_info
->from
.map
->referenced
= true;
337 if (he
->branch_info
->to
.map
)
338 he
->branch_info
->to
.map
->referenced
= true;
342 if (he
->mem_info
->iaddr
.map
)
343 he
->mem_info
->iaddr
.map
->referenced
= true;
344 if (he
->mem_info
->daddr
.map
)
345 he
->mem_info
->daddr
.map
->referenced
= true;
348 if (symbol_conf
.use_callchain
)
349 callchain_init(he
->callchain
);
351 INIT_LIST_HEAD(&he
->pairs
.node
);
352 thread__get(he
->thread
);
358 static u8
symbol__parent_filter(const struct symbol
*parent
)
360 if (symbol_conf
.exclude_other
&& parent
== NULL
)
361 return 1 << HIST_FILTER__PARENT
;
365 static struct hist_entry
*add_hist_entry(struct hists
*hists
,
366 struct hist_entry
*entry
,
367 struct addr_location
*al
,
371 struct rb_node
*parent
= NULL
;
372 struct hist_entry
*he
;
374 u64 period
= entry
->stat
.period
;
375 u64 weight
= entry
->stat
.weight
;
377 p
= &hists
->entries_in
->rb_node
;
381 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
384 * Make sure that it receives arguments in a same order as
385 * hist_entry__collapse() so that we can use an appropriate
386 * function when searching an entry regardless which sort
389 cmp
= hist_entry__cmp(he
, entry
);
393 he_stat__add_period(&he
->stat
, period
, weight
);
394 if (symbol_conf
.cumulate_callchain
)
395 he_stat__add_period(he
->stat_acc
, period
, weight
);
398 * This mem info was allocated from sample__resolve_mem
399 * and will not be used anymore.
401 zfree(&entry
->mem_info
);
403 /* If the map of an existing hist_entry has
404 * become out-of-date due to an exec() or
405 * similar, update it. Otherwise we will
406 * mis-adjust symbol addresses when computing
407 * the history counter to increment.
409 if (he
->ms
.map
!= entry
->ms
.map
) {
410 he
->ms
.map
= entry
->ms
.map
;
412 he
->ms
.map
->referenced
= true;
423 he
= hist_entry__new(entry
, sample_self
);
429 rb_link_node(&he
->rb_node_in
, parent
, p
);
430 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
433 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
434 if (symbol_conf
.cumulate_callchain
)
435 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
439 struct hist_entry
*__hists__add_entry(struct hists
*hists
,
440 struct addr_location
*al
,
441 struct symbol
*sym_parent
,
442 struct branch_info
*bi
,
444 u64 period
, u64 weight
, u64 transaction
,
447 struct hist_entry entry
= {
448 .thread
= al
->thread
,
449 .comm
= thread__comm(al
->thread
),
455 .cpumode
= al
->cpumode
,
463 .parent
= sym_parent
,
464 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
468 .transaction
= transaction
,
471 return add_hist_entry(hists
, &entry
, al
, sample_self
);
475 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
476 struct addr_location
*al __maybe_unused
)
482 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
483 struct addr_location
*al __maybe_unused
)
489 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
491 struct perf_sample
*sample
= iter
->sample
;
494 mi
= sample__resolve_mem(sample
, al
);
503 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
506 struct mem_info
*mi
= iter
->priv
;
507 struct hists
*hists
= evsel__hists(iter
->evsel
);
508 struct hist_entry
*he
;
513 cost
= iter
->sample
->weight
;
518 * must pass period=weight in order to get the correct
519 * sorting from hists__collapse_resort() which is solely
520 * based on periods. We want sorting be done on nr_events * weight
521 * and this is indirectly achieved by passing period=weight here
522 * and the he_stat__add_period() function.
524 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
525 cost
, cost
, 0, true);
534 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
535 struct addr_location
*al __maybe_unused
)
537 struct perf_evsel
*evsel
= iter
->evsel
;
538 struct hists
*hists
= evsel__hists(evsel
);
539 struct hist_entry
*he
= iter
->he
;
545 hists__inc_nr_samples(hists
, he
->filtered
);
547 err
= hist_entry__append_callchain(he
, iter
->sample
);
551 * We don't need to free iter->priv (mem_info) here since
552 * the mem info was either already freed in add_hist_entry() or
553 * passed to a new hist entry by hist_entry__new().
562 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
564 struct branch_info
*bi
;
565 struct perf_sample
*sample
= iter
->sample
;
567 bi
= sample__resolve_bstack(sample
, al
);
572 iter
->total
= sample
->branch_stack
->nr
;
579 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
580 struct addr_location
*al __maybe_unused
)
582 /* to avoid calling callback function */
589 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
591 struct branch_info
*bi
= iter
->priv
;
597 if (iter
->curr
>= iter
->total
)
600 al
->map
= bi
[i
].to
.map
;
601 al
->sym
= bi
[i
].to
.sym
;
602 al
->addr
= bi
[i
].to
.addr
;
607 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
609 struct branch_info
*bi
;
610 struct perf_evsel
*evsel
= iter
->evsel
;
611 struct hists
*hists
= evsel__hists(evsel
);
612 struct hist_entry
*he
= NULL
;
618 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
622 * The report shows the percentage of total branches captured
623 * and not events sampled. Thus we use a pseudo period of 1.
625 he
= __hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
630 hists__inc_nr_samples(hists
, he
->filtered
);
639 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
640 struct addr_location
*al __maybe_unused
)
645 return iter
->curr
>= iter
->total
? 0 : -1;
649 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
650 struct addr_location
*al __maybe_unused
)
656 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
658 struct perf_evsel
*evsel
= iter
->evsel
;
659 struct perf_sample
*sample
= iter
->sample
;
660 struct hist_entry
*he
;
662 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
663 sample
->period
, sample
->weight
,
664 sample
->transaction
, true);
673 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
674 struct addr_location
*al __maybe_unused
)
676 struct hist_entry
*he
= iter
->he
;
677 struct perf_evsel
*evsel
= iter
->evsel
;
678 struct perf_sample
*sample
= iter
->sample
;
685 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
687 return hist_entry__append_callchain(he
, sample
);
691 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter __maybe_unused
,
692 struct addr_location
*al __maybe_unused
)
694 struct hist_entry
**he_cache
;
696 callchain_cursor_commit(&callchain_cursor
);
699 * This is for detecting cycles or recursions so that they're
700 * cumulated only one time to prevent entries more than 100%
703 he_cache
= malloc(sizeof(*he_cache
) * (PERF_MAX_STACK_DEPTH
+ 1));
704 if (he_cache
== NULL
)
707 iter
->priv
= he_cache
;
714 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
715 struct addr_location
*al
)
717 struct perf_evsel
*evsel
= iter
->evsel
;
718 struct hists
*hists
= evsel__hists(evsel
);
719 struct perf_sample
*sample
= iter
->sample
;
720 struct hist_entry
**he_cache
= iter
->priv
;
721 struct hist_entry
*he
;
724 he
= __hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
725 sample
->period
, sample
->weight
,
726 sample
->transaction
, true);
731 he_cache
[iter
->curr
++] = he
;
733 hist_entry__append_callchain(he
, sample
);
736 * We need to re-initialize the cursor since callchain_append()
737 * advanced the cursor to the end.
739 callchain_cursor_commit(&callchain_cursor
);
741 hists__inc_nr_samples(hists
, he
->filtered
);
747 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
748 struct addr_location
*al
)
750 struct callchain_cursor_node
*node
;
752 node
= callchain_cursor_current(&callchain_cursor
);
756 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
760 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
761 struct addr_location
*al
)
763 struct perf_evsel
*evsel
= iter
->evsel
;
764 struct perf_sample
*sample
= iter
->sample
;
765 struct hist_entry
**he_cache
= iter
->priv
;
766 struct hist_entry
*he
;
767 struct hist_entry he_tmp
= {
769 .thread
= al
->thread
,
770 .comm
= thread__comm(al
->thread
),
776 .parent
= iter
->parent
,
779 struct callchain_cursor cursor
;
781 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
783 callchain_cursor_advance(&callchain_cursor
);
786 * Check if there's duplicate entries in the callchain.
787 * It's possible that it has cycles or recursive calls.
789 for (i
= 0; i
< iter
->curr
; i
++) {
790 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
791 /* to avoid calling callback function */
797 he
= __hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
798 sample
->period
, sample
->weight
,
799 sample
->transaction
, false);
804 he_cache
[iter
->curr
++] = he
;
806 if (symbol_conf
.use_callchain
)
807 callchain_append(he
->callchain
, &cursor
, sample
->period
);
812 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
813 struct addr_location
*al __maybe_unused
)
821 const struct hist_iter_ops hist_iter_mem
= {
822 .prepare_entry
= iter_prepare_mem_entry
,
823 .add_single_entry
= iter_add_single_mem_entry
,
824 .next_entry
= iter_next_nop_entry
,
825 .add_next_entry
= iter_add_next_nop_entry
,
826 .finish_entry
= iter_finish_mem_entry
,
829 const struct hist_iter_ops hist_iter_branch
= {
830 .prepare_entry
= iter_prepare_branch_entry
,
831 .add_single_entry
= iter_add_single_branch_entry
,
832 .next_entry
= iter_next_branch_entry
,
833 .add_next_entry
= iter_add_next_branch_entry
,
834 .finish_entry
= iter_finish_branch_entry
,
837 const struct hist_iter_ops hist_iter_normal
= {
838 .prepare_entry
= iter_prepare_normal_entry
,
839 .add_single_entry
= iter_add_single_normal_entry
,
840 .next_entry
= iter_next_nop_entry
,
841 .add_next_entry
= iter_add_next_nop_entry
,
842 .finish_entry
= iter_finish_normal_entry
,
845 const struct hist_iter_ops hist_iter_cumulative
= {
846 .prepare_entry
= iter_prepare_cumulative_entry
,
847 .add_single_entry
= iter_add_single_cumulative_entry
,
848 .next_entry
= iter_next_cumulative_entry
,
849 .add_next_entry
= iter_add_next_cumulative_entry
,
850 .finish_entry
= iter_finish_cumulative_entry
,
853 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
854 struct perf_evsel
*evsel
, struct perf_sample
*sample
,
855 int max_stack_depth
, void *arg
)
859 err
= sample__resolve_callchain(sample
, &iter
->parent
, evsel
, al
,
865 iter
->sample
= sample
;
867 err
= iter
->ops
->prepare_entry(iter
, al
);
871 err
= iter
->ops
->add_single_entry(iter
, al
);
875 if (iter
->he
&& iter
->add_entry_cb
) {
876 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
881 while (iter
->ops
->next_entry(iter
, al
)) {
882 err
= iter
->ops
->add_next_entry(iter
, al
);
886 if (iter
->he
&& iter
->add_entry_cb
) {
887 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
894 err2
= iter
->ops
->finish_entry(iter
, al
);
902 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
904 struct perf_hpp_fmt
*fmt
;
907 perf_hpp__for_each_sort_list(fmt
) {
908 if (perf_hpp__should_skip(fmt
))
911 cmp
= fmt
->cmp(fmt
, left
, right
);
920 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
922 struct perf_hpp_fmt
*fmt
;
925 perf_hpp__for_each_sort_list(fmt
) {
926 if (perf_hpp__should_skip(fmt
))
929 cmp
= fmt
->collapse(fmt
, left
, right
);
937 void hist_entry__delete(struct hist_entry
*he
)
939 thread__zput(he
->thread
);
940 zfree(&he
->branch_info
);
941 zfree(&he
->mem_info
);
942 zfree(&he
->stat_acc
);
943 free_srcline(he
->srcline
);
944 free_callchain(he
->callchain
);
949 * collapse the histogram
952 static bool hists__collapse_insert_entry(struct hists
*hists __maybe_unused
,
953 struct rb_root
*root
,
954 struct hist_entry
*he
)
956 struct rb_node
**p
= &root
->rb_node
;
957 struct rb_node
*parent
= NULL
;
958 struct hist_entry
*iter
;
963 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
965 cmp
= hist_entry__collapse(iter
, he
);
968 he_stat__add_stat(&iter
->stat
, &he
->stat
);
969 if (symbol_conf
.cumulate_callchain
)
970 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
972 if (symbol_conf
.use_callchain
) {
973 callchain_cursor_reset(&callchain_cursor
);
974 callchain_merge(&callchain_cursor
,
978 hist_entry__delete(he
);
989 rb_link_node(&he
->rb_node_in
, parent
, p
);
990 rb_insert_color(&he
->rb_node_in
, root
);
994 static struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
996 struct rb_root
*root
;
998 pthread_mutex_lock(&hists
->lock
);
1000 root
= hists
->entries_in
;
1001 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1002 hists
->entries_in
= &hists
->entries_in_array
[0];
1004 pthread_mutex_unlock(&hists
->lock
);
1009 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1011 hists__filter_entry_by_dso(hists
, he
);
1012 hists__filter_entry_by_thread(hists
, he
);
1013 hists__filter_entry_by_symbol(hists
, he
);
1016 void hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1018 struct rb_root
*root
;
1019 struct rb_node
*next
;
1020 struct hist_entry
*n
;
1022 if (!sort__need_collapse
)
1025 hists
->nr_entries
= 0;
1027 root
= hists__get_rotate_entries_in(hists
);
1029 next
= rb_first(root
);
1034 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1035 next
= rb_next(&n
->rb_node_in
);
1037 rb_erase(&n
->rb_node_in
, root
);
1038 if (hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
)) {
1040 * If it wasn't combined with one of the entries already
1041 * collapsed, we need to apply the filters that may have
1042 * been set by, say, the hist_browser.
1044 hists__apply_filters(hists
, n
);
1047 ui_progress__update(prog
, 1);
1051 static int hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1053 struct perf_hpp_fmt
*fmt
;
1056 perf_hpp__for_each_sort_list(fmt
) {
1057 if (perf_hpp__should_skip(fmt
))
1060 cmp
= fmt
->sort(fmt
, a
, b
);
1068 static void hists__reset_filter_stats(struct hists
*hists
)
1070 hists
->nr_non_filtered_entries
= 0;
1071 hists
->stats
.total_non_filtered_period
= 0;
1074 void hists__reset_stats(struct hists
*hists
)
1076 hists
->nr_entries
= 0;
1077 hists
->stats
.total_period
= 0;
1079 hists__reset_filter_stats(hists
);
1082 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1084 hists
->nr_non_filtered_entries
++;
1085 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1088 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1091 hists__inc_filter_stats(hists
, h
);
1093 hists
->nr_entries
++;
1094 hists
->stats
.total_period
+= h
->stat
.period
;
1097 static void __hists__insert_output_entry(struct rb_root
*entries
,
1098 struct hist_entry
*he
,
1099 u64 min_callchain_hits
)
1101 struct rb_node
**p
= &entries
->rb_node
;
1102 struct rb_node
*parent
= NULL
;
1103 struct hist_entry
*iter
;
1105 if (symbol_conf
.use_callchain
)
1106 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1107 min_callchain_hits
, &callchain_param
);
1109 while (*p
!= NULL
) {
1111 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1113 if (hist_entry__sort(he
, iter
) > 0)
1116 p
= &(*p
)->rb_right
;
1119 rb_link_node(&he
->rb_node
, parent
, p
);
1120 rb_insert_color(&he
->rb_node
, entries
);
1123 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1125 struct rb_root
*root
;
1126 struct rb_node
*next
;
1127 struct hist_entry
*n
;
1128 u64 min_callchain_hits
;
1130 min_callchain_hits
= hists
->stats
.total_period
* (callchain_param
.min_percent
/ 100);
1132 if (sort__need_collapse
)
1133 root
= &hists
->entries_collapsed
;
1135 root
= hists
->entries_in
;
1137 next
= rb_first(root
);
1138 hists
->entries
= RB_ROOT
;
1140 hists__reset_stats(hists
);
1141 hists__reset_col_len(hists
);
1144 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1145 next
= rb_next(&n
->rb_node_in
);
1147 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
);
1148 hists__inc_stats(hists
, n
);
1151 hists__calc_col_len(hists
, n
);
1154 ui_progress__update(prog
, 1);
1158 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1159 enum hist_filter filter
)
1161 h
->filtered
&= ~(1 << filter
);
1165 /* force fold unfiltered entry for simplicity */
1166 h
->ms
.unfolded
= false;
1170 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1172 hists__inc_filter_stats(hists
, h
);
1173 hists__calc_col_len(hists
, h
);
1177 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1178 struct hist_entry
*he
)
1180 if (hists
->dso_filter
!= NULL
&&
1181 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1182 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1189 void hists__filter_by_dso(struct hists
*hists
)
1193 hists
->stats
.nr_non_filtered_samples
= 0;
1195 hists__reset_filter_stats(hists
);
1196 hists__reset_col_len(hists
);
1198 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1199 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1201 if (symbol_conf
.exclude_other
&& !h
->parent
)
1204 if (hists__filter_entry_by_dso(hists
, h
))
1207 hists__remove_entry_filter(hists
, h
, HIST_FILTER__DSO
);
1211 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1212 struct hist_entry
*he
)
1214 if (hists
->thread_filter
!= NULL
&&
1215 he
->thread
!= hists
->thread_filter
) {
1216 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1223 void hists__filter_by_thread(struct hists
*hists
)
1227 hists
->stats
.nr_non_filtered_samples
= 0;
1229 hists__reset_filter_stats(hists
);
1230 hists__reset_col_len(hists
);
1232 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1233 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1235 if (hists__filter_entry_by_thread(hists
, h
))
1238 hists__remove_entry_filter(hists
, h
, HIST_FILTER__THREAD
);
1242 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1243 struct hist_entry
*he
)
1245 if (hists
->symbol_filter_str
!= NULL
&&
1246 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1247 hists
->symbol_filter_str
) == NULL
)) {
1248 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1255 void hists__filter_by_symbol(struct hists
*hists
)
1259 hists
->stats
.nr_non_filtered_samples
= 0;
1261 hists__reset_filter_stats(hists
);
1262 hists__reset_col_len(hists
);
1264 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1265 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1267 if (hists__filter_entry_by_symbol(hists
, h
))
1270 hists__remove_entry_filter(hists
, h
, HIST_FILTER__SYMBOL
);
1274 void events_stats__inc(struct events_stats
*stats
, u32 type
)
1276 ++stats
->nr_events
[0];
1277 ++stats
->nr_events
[type
];
1280 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
1282 events_stats__inc(&hists
->stats
, type
);
1285 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
1287 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
1289 hists
->stats
.nr_non_filtered_samples
++;
1292 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
1293 struct hist_entry
*pair
)
1295 struct rb_root
*root
;
1297 struct rb_node
*parent
= NULL
;
1298 struct hist_entry
*he
;
1301 if (sort__need_collapse
)
1302 root
= &hists
->entries_collapsed
;
1304 root
= hists
->entries_in
;
1308 while (*p
!= NULL
) {
1310 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1312 cmp
= hist_entry__collapse(he
, pair
);
1320 p
= &(*p
)->rb_right
;
1323 he
= hist_entry__new(pair
, true);
1325 memset(&he
->stat
, 0, sizeof(he
->stat
));
1327 rb_link_node(&he
->rb_node_in
, parent
, p
);
1328 rb_insert_color(&he
->rb_node_in
, root
);
1329 hists__inc_stats(hists
, he
);
1336 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
1337 struct hist_entry
*he
)
1341 if (sort__need_collapse
)
1342 n
= hists
->entries_collapsed
.rb_node
;
1344 n
= hists
->entries_in
->rb_node
;
1347 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
1348 int64_t cmp
= hist_entry__collapse(iter
, he
);
1362 * Look for pairs to link to the leader buckets (hist_entries):
1364 void hists__match(struct hists
*leader
, struct hists
*other
)
1366 struct rb_root
*root
;
1368 struct hist_entry
*pos
, *pair
;
1370 if (sort__need_collapse
)
1371 root
= &leader
->entries_collapsed
;
1373 root
= leader
->entries_in
;
1375 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1376 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1377 pair
= hists__find_entry(other
, pos
);
1380 hist_entry__add_pair(pair
, pos
);
1385 * Look for entries in the other hists that are not present in the leader, if
1386 * we find them, just add a dummy entry on the leader hists, with period=0,
1387 * nr_events=0, to serve as the list header.
1389 int hists__link(struct hists
*leader
, struct hists
*other
)
1391 struct rb_root
*root
;
1393 struct hist_entry
*pos
, *pair
;
1395 if (sort__need_collapse
)
1396 root
= &other
->entries_collapsed
;
1398 root
= other
->entries_in
;
1400 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
1401 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
1403 if (!hist_entry__has_pairs(pos
)) {
1404 pair
= hists__add_dummy_entry(leader
, pos
);
1407 hist_entry__add_pair(pos
, pair
);
1415 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
1417 struct perf_evsel
*pos
;
1420 evlist__for_each(evlist
, pos
) {
1421 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
1422 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
1429 u64
hists__total_period(struct hists
*hists
)
1431 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
1432 hists
->stats
.total_period
;
1435 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
1436 const char *arg
, int unset __maybe_unused
)
1438 if (!strcmp(arg
, "relative"))
1439 symbol_conf
.filter_relative
= true;
1440 else if (!strcmp(arg
, "absolute"))
1441 symbol_conf
.filter_relative
= false;
1448 int perf_hist_config(const char *var
, const char *value
)
1450 if (!strcmp(var
, "hist.percentage"))
1451 return parse_filter_percentage(NULL
, value
, 0);
1456 static int hists_evsel__init(struct perf_evsel
*evsel
)
1458 struct hists
*hists
= evsel__hists(evsel
);
1460 memset(hists
, 0, sizeof(*hists
));
1461 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
1462 hists
->entries_in
= &hists
->entries_in_array
[0];
1463 hists
->entries_collapsed
= RB_ROOT
;
1464 hists
->entries
= RB_ROOT
;
1465 pthread_mutex_init(&hists
->lock
, NULL
);
1470 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1471 * stored in the rbtree...
1474 int hists__init(void)
1476 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
1477 hists_evsel__init
, NULL
);
1479 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);