1 // SPDX-License-Identifier: GPL-2.0
7 #include "namespaces.h"
15 #include "ui/progress.h"
19 #include <sys/param.h>
21 static bool hists__filter_entry_by_dso(struct hists
*hists
,
22 struct hist_entry
*he
);
23 static bool hists__filter_entry_by_thread(struct hists
*hists
,
24 struct hist_entry
*he
);
25 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
26 struct hist_entry
*he
);
27 static bool hists__filter_entry_by_socket(struct hists
*hists
,
28 struct hist_entry
*he
);
30 u16
hists__col_len(struct hists
*hists
, enum hist_column col
)
32 return hists
->col_len
[col
];
35 void hists__set_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
37 hists
->col_len
[col
] = len
;
40 bool hists__new_col_len(struct hists
*hists
, enum hist_column col
, u16 len
)
42 if (len
> hists__col_len(hists
, col
)) {
43 hists__set_col_len(hists
, col
, len
);
49 void hists__reset_col_len(struct hists
*hists
)
53 for (col
= 0; col
< HISTC_NR_COLS
; ++col
)
54 hists__set_col_len(hists
, col
, 0);
57 static void hists__set_unres_dso_col_len(struct hists
*hists
, int dso
)
59 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
61 if (hists__col_len(hists
, dso
) < unresolved_col_width
&&
62 !symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
63 !symbol_conf
.dso_list
)
64 hists__set_col_len(hists
, dso
, unresolved_col_width
);
67 void hists__calc_col_len(struct hists
*hists
, struct hist_entry
*h
)
69 const unsigned int unresolved_col_width
= BITS_PER_LONG
/ 4;
74 * +4 accounts for '[x] ' priv level info
75 * +2 accounts for 0x prefix on raw addresses
76 * +3 accounts for ' y ' symtab origin info
79 symlen
= h
->ms
.sym
->namelen
+ 4;
81 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
82 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
84 symlen
= unresolved_col_width
+ 4 + 2;
85 hists__new_col_len(hists
, HISTC_SYMBOL
, symlen
);
86 hists__set_unres_dso_col_len(hists
, HISTC_DSO
);
89 len
= thread__comm_len(h
->thread
);
90 if (hists__new_col_len(hists
, HISTC_COMM
, len
))
91 hists__set_col_len(hists
, HISTC_THREAD
, len
+ 8);
94 len
= dso__name_len(h
->ms
.map
->dso
);
95 hists__new_col_len(hists
, HISTC_DSO
, len
);
99 hists__new_col_len(hists
, HISTC_PARENT
, h
->parent
->namelen
);
101 if (h
->branch_info
) {
102 if (h
->branch_info
->from
.sym
) {
103 symlen
= (int)h
->branch_info
->from
.sym
->namelen
+ 4;
105 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
106 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
108 symlen
= dso__name_len(h
->branch_info
->from
.map
->dso
);
109 hists__new_col_len(hists
, HISTC_DSO_FROM
, symlen
);
111 symlen
= unresolved_col_width
+ 4 + 2;
112 hists__new_col_len(hists
, HISTC_SYMBOL_FROM
, symlen
);
113 hists__set_unres_dso_col_len(hists
, HISTC_DSO_FROM
);
116 if (h
->branch_info
->to
.sym
) {
117 symlen
= (int)h
->branch_info
->to
.sym
->namelen
+ 4;
119 symlen
+= BITS_PER_LONG
/ 4 + 2 + 3;
120 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
122 symlen
= dso__name_len(h
->branch_info
->to
.map
->dso
);
123 hists__new_col_len(hists
, HISTC_DSO_TO
, symlen
);
125 symlen
= unresolved_col_width
+ 4 + 2;
126 hists__new_col_len(hists
, HISTC_SYMBOL_TO
, symlen
);
127 hists__set_unres_dso_col_len(hists
, HISTC_DSO_TO
);
130 if (h
->branch_info
->srcline_from
)
131 hists__new_col_len(hists
, HISTC_SRCLINE_FROM
,
132 strlen(h
->branch_info
->srcline_from
));
133 if (h
->branch_info
->srcline_to
)
134 hists__new_col_len(hists
, HISTC_SRCLINE_TO
,
135 strlen(h
->branch_info
->srcline_to
));
139 if (h
->mem_info
->daddr
.sym
) {
140 symlen
= (int)h
->mem_info
->daddr
.sym
->namelen
+ 4
141 + unresolved_col_width
+ 2;
142 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
144 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
147 symlen
= unresolved_col_width
+ 4 + 2;
148 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
,
150 hists__new_col_len(hists
, HISTC_MEM_DCACHELINE
,
154 if (h
->mem_info
->iaddr
.sym
) {
155 symlen
= (int)h
->mem_info
->iaddr
.sym
->namelen
+ 4
156 + unresolved_col_width
+ 2;
157 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
160 symlen
= unresolved_col_width
+ 4 + 2;
161 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
,
165 if (h
->mem_info
->daddr
.map
) {
166 symlen
= dso__name_len(h
->mem_info
->daddr
.map
->dso
);
167 hists__new_col_len(hists
, HISTC_MEM_DADDR_DSO
,
170 symlen
= unresolved_col_width
+ 4 + 2;
171 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
174 hists__new_col_len(hists
, HISTC_MEM_PHYS_DADDR
,
175 unresolved_col_width
+ 4 + 2);
178 symlen
= unresolved_col_width
+ 4 + 2;
179 hists__new_col_len(hists
, HISTC_MEM_DADDR_SYMBOL
, symlen
);
180 hists__new_col_len(hists
, HISTC_MEM_IADDR_SYMBOL
, symlen
);
181 hists__set_unres_dso_col_len(hists
, HISTC_MEM_DADDR_DSO
);
184 hists__new_col_len(hists
, HISTC_CGROUP_ID
, 20);
185 hists__new_col_len(hists
, HISTC_CPU
, 3);
186 hists__new_col_len(hists
, HISTC_SOCKET
, 6);
187 hists__new_col_len(hists
, HISTC_MEM_LOCKED
, 6);
188 hists__new_col_len(hists
, HISTC_MEM_TLB
, 22);
189 hists__new_col_len(hists
, HISTC_MEM_SNOOP
, 12);
190 hists__new_col_len(hists
, HISTC_MEM_LVL
, 21 + 3);
191 hists__new_col_len(hists
, HISTC_LOCAL_WEIGHT
, 12);
192 hists__new_col_len(hists
, HISTC_GLOBAL_WEIGHT
, 12);
195 len
= MAX(strlen(h
->srcline
), strlen(sort_srcline
.se_header
));
196 hists__new_col_len(hists
, HISTC_SRCLINE
, len
);
200 hists__new_col_len(hists
, HISTC_SRCFILE
, strlen(h
->srcfile
));
203 hists__new_col_len(hists
, HISTC_TRANSACTION
,
204 hist_entry__transaction_len());
207 hists__new_col_len(hists
, HISTC_TRACE
, strlen(h
->trace_output
));
210 void hists__output_recalc_col_len(struct hists
*hists
, int max_rows
)
212 struct rb_node
*next
= rb_first(&hists
->entries
);
213 struct hist_entry
*n
;
216 hists__reset_col_len(hists
);
218 while (next
&& row
++ < max_rows
) {
219 n
= rb_entry(next
, struct hist_entry
, rb_node
);
221 hists__calc_col_len(hists
, n
);
222 next
= rb_next(&n
->rb_node
);
226 static void he_stat__add_cpumode_period(struct he_stat
*he_stat
,
227 unsigned int cpumode
, u64 period
)
230 case PERF_RECORD_MISC_KERNEL
:
231 he_stat
->period_sys
+= period
;
233 case PERF_RECORD_MISC_USER
:
234 he_stat
->period_us
+= period
;
236 case PERF_RECORD_MISC_GUEST_KERNEL
:
237 he_stat
->period_guest_sys
+= period
;
239 case PERF_RECORD_MISC_GUEST_USER
:
240 he_stat
->period_guest_us
+= period
;
247 static void he_stat__add_period(struct he_stat
*he_stat
, u64 period
,
251 he_stat
->period
+= period
;
252 he_stat
->weight
+= weight
;
253 he_stat
->nr_events
+= 1;
256 static void he_stat__add_stat(struct he_stat
*dest
, struct he_stat
*src
)
258 dest
->period
+= src
->period
;
259 dest
->period_sys
+= src
->period_sys
;
260 dest
->period_us
+= src
->period_us
;
261 dest
->period_guest_sys
+= src
->period_guest_sys
;
262 dest
->period_guest_us
+= src
->period_guest_us
;
263 dest
->nr_events
+= src
->nr_events
;
264 dest
->weight
+= src
->weight
;
267 static void he_stat__decay(struct he_stat
*he_stat
)
269 he_stat
->period
= (he_stat
->period
* 7) / 8;
270 he_stat
->nr_events
= (he_stat
->nr_events
* 7) / 8;
271 /* XXX need decay for weight too? */
274 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
);
276 static bool hists__decay_entry(struct hists
*hists
, struct hist_entry
*he
)
278 u64 prev_period
= he
->stat
.period
;
281 if (prev_period
== 0)
284 he_stat__decay(&he
->stat
);
285 if (symbol_conf
.cumulate_callchain
)
286 he_stat__decay(he
->stat_acc
);
287 decay_callchain(he
->callchain
);
289 diff
= prev_period
- he
->stat
.period
;
292 hists
->stats
.total_period
-= diff
;
294 hists
->stats
.total_non_filtered_period
-= diff
;
298 struct hist_entry
*child
;
299 struct rb_node
*node
= rb_first(&he
->hroot_out
);
301 child
= rb_entry(node
, struct hist_entry
, rb_node
);
302 node
= rb_next(node
);
304 if (hists__decay_entry(hists
, child
))
305 hists__delete_entry(hists
, child
);
309 return he
->stat
.period
== 0;
312 static void hists__delete_entry(struct hists
*hists
, struct hist_entry
*he
)
314 struct rb_root
*root_in
;
315 struct rb_root
*root_out
;
318 root_in
= &he
->parent_he
->hroot_in
;
319 root_out
= &he
->parent_he
->hroot_out
;
321 if (hists__has(hists
, need_collapse
))
322 root_in
= &hists
->entries_collapsed
;
324 root_in
= hists
->entries_in
;
325 root_out
= &hists
->entries
;
328 rb_erase(&he
->rb_node_in
, root_in
);
329 rb_erase(&he
->rb_node
, root_out
);
333 --hists
->nr_non_filtered_entries
;
335 hist_entry__delete(he
);
338 void hists__decay_entries(struct hists
*hists
, bool zap_user
, bool zap_kernel
)
340 struct rb_node
*next
= rb_first(&hists
->entries
);
341 struct hist_entry
*n
;
344 n
= rb_entry(next
, struct hist_entry
, rb_node
);
345 next
= rb_next(&n
->rb_node
);
346 if (((zap_user
&& n
->level
== '.') ||
347 (zap_kernel
&& n
->level
!= '.') ||
348 hists__decay_entry(hists
, n
))) {
349 hists__delete_entry(hists
, n
);
354 void hists__delete_entries(struct hists
*hists
)
356 struct rb_node
*next
= rb_first(&hists
->entries
);
357 struct hist_entry
*n
;
360 n
= rb_entry(next
, struct hist_entry
, rb_node
);
361 next
= rb_next(&n
->rb_node
);
363 hists__delete_entry(hists
, n
);
368 * histogram, sorted on item, collects periods
371 static int hist_entry__init(struct hist_entry
*he
,
372 struct hist_entry
*template,
374 size_t callchain_size
)
377 he
->callchain_size
= callchain_size
;
379 if (symbol_conf
.cumulate_callchain
) {
380 he
->stat_acc
= malloc(sizeof(he
->stat
));
381 if (he
->stat_acc
== NULL
)
383 memcpy(he
->stat_acc
, &he
->stat
, sizeof(he
->stat
));
385 memset(&he
->stat
, 0, sizeof(he
->stat
));
388 map__get(he
->ms
.map
);
390 if (he
->branch_info
) {
392 * This branch info is (a part of) allocated from
393 * sample__resolve_bstack() and will be freed after
394 * adding new entries. So we need to save a copy.
396 he
->branch_info
= malloc(sizeof(*he
->branch_info
));
397 if (he
->branch_info
== NULL
) {
398 map__zput(he
->ms
.map
);
403 memcpy(he
->branch_info
, template->branch_info
,
404 sizeof(*he
->branch_info
));
406 map__get(he
->branch_info
->from
.map
);
407 map__get(he
->branch_info
->to
.map
);
411 map__get(he
->mem_info
->iaddr
.map
);
412 map__get(he
->mem_info
->daddr
.map
);
415 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
416 callchain_init(he
->callchain
);
419 he
->raw_data
= memdup(he
->raw_data
, he
->raw_size
);
421 if (he
->raw_data
== NULL
) {
422 map__put(he
->ms
.map
);
423 if (he
->branch_info
) {
424 map__put(he
->branch_info
->from
.map
);
425 map__put(he
->branch_info
->to
.map
);
426 free(he
->branch_info
);
429 map__put(he
->mem_info
->iaddr
.map
);
430 map__put(he
->mem_info
->daddr
.map
);
436 INIT_LIST_HEAD(&he
->pairs
.node
);
437 thread__get(he
->thread
);
438 he
->hroot_in
= RB_ROOT
;
439 he
->hroot_out
= RB_ROOT
;
441 if (!symbol_conf
.report_hierarchy
)
447 static void *hist_entry__zalloc(size_t size
)
449 return zalloc(size
+ sizeof(struct hist_entry
));
452 static void hist_entry__free(void *ptr
)
457 static struct hist_entry_ops default_ops
= {
458 .new = hist_entry__zalloc
,
459 .free
= hist_entry__free
,
462 static struct hist_entry
*hist_entry__new(struct hist_entry
*template,
465 struct hist_entry_ops
*ops
= template->ops
;
466 size_t callchain_size
= 0;
467 struct hist_entry
*he
;
471 ops
= template->ops
= &default_ops
;
473 if (symbol_conf
.use_callchain
)
474 callchain_size
= sizeof(struct callchain_root
);
476 he
= ops
->new(callchain_size
);
478 err
= hist_entry__init(he
, template, sample_self
, callchain_size
);
488 static u8
symbol__parent_filter(const struct symbol
*parent
)
490 if (symbol_conf
.exclude_other
&& parent
== NULL
)
491 return 1 << HIST_FILTER__PARENT
;
495 static void hist_entry__add_callchain_period(struct hist_entry
*he
, u64 period
)
497 if (!hist_entry__has_callchains(he
) || !symbol_conf
.use_callchain
)
500 he
->hists
->callchain_period
+= period
;
502 he
->hists
->callchain_non_filtered_period
+= period
;
505 static struct hist_entry
*hists__findnew_entry(struct hists
*hists
,
506 struct hist_entry
*entry
,
507 struct addr_location
*al
,
511 struct rb_node
*parent
= NULL
;
512 struct hist_entry
*he
;
514 u64 period
= entry
->stat
.period
;
515 u64 weight
= entry
->stat
.weight
;
517 p
= &hists
->entries_in
->rb_node
;
521 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
524 * Make sure that it receives arguments in a same order as
525 * hist_entry__collapse() so that we can use an appropriate
526 * function when searching an entry regardless which sort
529 cmp
= hist_entry__cmp(he
, entry
);
533 he_stat__add_period(&he
->stat
, period
, weight
);
534 hist_entry__add_callchain_period(he
, period
);
536 if (symbol_conf
.cumulate_callchain
)
537 he_stat__add_period(he
->stat_acc
, period
, weight
);
540 * This mem info was allocated from sample__resolve_mem
541 * and will not be used anymore.
543 mem_info__zput(entry
->mem_info
);
545 /* If the map of an existing hist_entry has
546 * become out-of-date due to an exec() or
547 * similar, update it. Otherwise we will
548 * mis-adjust symbol addresses when computing
549 * the history counter to increment.
551 if (he
->ms
.map
!= entry
->ms
.map
) {
552 map__put(he
->ms
.map
);
553 he
->ms
.map
= map__get(entry
->ms
.map
);
564 he
= hist_entry__new(entry
, sample_self
);
569 hist_entry__add_callchain_period(he
, period
);
572 rb_link_node(&he
->rb_node_in
, parent
, p
);
573 rb_insert_color(&he
->rb_node_in
, hists
->entries_in
);
576 he_stat__add_cpumode_period(&he
->stat
, al
->cpumode
, period
);
577 if (symbol_conf
.cumulate_callchain
)
578 he_stat__add_cpumode_period(he
->stat_acc
, al
->cpumode
, period
);
582 static struct hist_entry
*
583 __hists__add_entry(struct hists
*hists
,
584 struct addr_location
*al
,
585 struct symbol
*sym_parent
,
586 struct branch_info
*bi
,
588 struct perf_sample
*sample
,
590 struct hist_entry_ops
*ops
)
592 struct namespaces
*ns
= thread__namespaces(al
->thread
);
593 struct hist_entry entry
= {
594 .thread
= al
->thread
,
595 .comm
= thread__comm(al
->thread
),
597 .dev
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].dev
: 0,
598 .ino
= ns
? ns
->link_info
[CGROUP_NS_INDEX
].ino
: 0,
604 .srcline
= al
->srcline
? strdup(al
->srcline
) : NULL
,
605 .socket
= al
->socket
,
607 .cpumode
= al
->cpumode
,
612 .period
= sample
->period
,
613 .weight
= sample
->weight
,
615 .parent
= sym_parent
,
616 .filtered
= symbol__parent_filter(sym_parent
) | al
->filtered
,
620 .transaction
= sample
->transaction
,
621 .raw_data
= sample
->raw_data
,
622 .raw_size
= sample
->raw_size
,
624 }, *he
= hists__findnew_entry(hists
, &entry
, al
, sample_self
);
626 if (!hists
->has_callchains
&& he
&& he
->callchain_size
!= 0)
627 hists
->has_callchains
= true;
631 struct hist_entry
*hists__add_entry(struct hists
*hists
,
632 struct addr_location
*al
,
633 struct symbol
*sym_parent
,
634 struct branch_info
*bi
,
636 struct perf_sample
*sample
,
639 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
640 sample
, sample_self
, NULL
);
643 struct hist_entry
*hists__add_entry_ops(struct hists
*hists
,
644 struct hist_entry_ops
*ops
,
645 struct addr_location
*al
,
646 struct symbol
*sym_parent
,
647 struct branch_info
*bi
,
649 struct perf_sample
*sample
,
652 return __hists__add_entry(hists
, al
, sym_parent
, bi
, mi
,
653 sample
, sample_self
, ops
);
657 iter_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
658 struct addr_location
*al __maybe_unused
)
664 iter_add_next_nop_entry(struct hist_entry_iter
*iter __maybe_unused
,
665 struct addr_location
*al __maybe_unused
)
671 iter_prepare_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
673 struct perf_sample
*sample
= iter
->sample
;
676 mi
= sample__resolve_mem(sample
, al
);
685 iter_add_single_mem_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
688 struct mem_info
*mi
= iter
->priv
;
689 struct hists
*hists
= evsel__hists(iter
->evsel
);
690 struct perf_sample
*sample
= iter
->sample
;
691 struct hist_entry
*he
;
696 cost
= sample
->weight
;
701 * must pass period=weight in order to get the correct
702 * sorting from hists__collapse_resort() which is solely
703 * based on periods. We want sorting be done on nr_events * weight
704 * and this is indirectly achieved by passing period=weight here
705 * and the he_stat__add_period() function.
707 sample
->period
= cost
;
709 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, mi
,
719 iter_finish_mem_entry(struct hist_entry_iter
*iter
,
720 struct addr_location
*al __maybe_unused
)
722 struct perf_evsel
*evsel
= iter
->evsel
;
723 struct hists
*hists
= evsel__hists(evsel
);
724 struct hist_entry
*he
= iter
->he
;
730 hists__inc_nr_samples(hists
, he
->filtered
);
732 err
= hist_entry__append_callchain(he
, iter
->sample
);
736 * We don't need to free iter->priv (mem_info) here since the mem info
737 * was either already freed in hists__findnew_entry() or passed to a
738 * new hist entry by hist_entry__new().
747 iter_prepare_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
749 struct branch_info
*bi
;
750 struct perf_sample
*sample
= iter
->sample
;
752 bi
= sample__resolve_bstack(sample
, al
);
757 iter
->total
= sample
->branch_stack
->nr
;
764 iter_add_single_branch_entry(struct hist_entry_iter
*iter __maybe_unused
,
765 struct addr_location
*al __maybe_unused
)
771 iter_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
773 struct branch_info
*bi
= iter
->priv
;
779 if (iter
->curr
>= iter
->total
)
782 al
->map
= bi
[i
].to
.map
;
783 al
->sym
= bi
[i
].to
.sym
;
784 al
->addr
= bi
[i
].to
.addr
;
789 iter_add_next_branch_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
791 struct branch_info
*bi
;
792 struct perf_evsel
*evsel
= iter
->evsel
;
793 struct hists
*hists
= evsel__hists(evsel
);
794 struct perf_sample
*sample
= iter
->sample
;
795 struct hist_entry
*he
= NULL
;
801 if (iter
->hide_unresolved
&& !(bi
[i
].from
.sym
&& bi
[i
].to
.sym
))
805 * The report shows the percentage of total branches captured
806 * and not events sampled. Thus we use a pseudo period of 1.
809 sample
->weight
= bi
->flags
.cycles
? bi
->flags
.cycles
: 1;
811 he
= hists__add_entry(hists
, al
, iter
->parent
, &bi
[i
], NULL
,
816 hists__inc_nr_samples(hists
, he
->filtered
);
825 iter_finish_branch_entry(struct hist_entry_iter
*iter
,
826 struct addr_location
*al __maybe_unused
)
831 return iter
->curr
>= iter
->total
? 0 : -1;
835 iter_prepare_normal_entry(struct hist_entry_iter
*iter __maybe_unused
,
836 struct addr_location
*al __maybe_unused
)
842 iter_add_single_normal_entry(struct hist_entry_iter
*iter
, struct addr_location
*al
)
844 struct perf_evsel
*evsel
= iter
->evsel
;
845 struct perf_sample
*sample
= iter
->sample
;
846 struct hist_entry
*he
;
848 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
858 iter_finish_normal_entry(struct hist_entry_iter
*iter
,
859 struct addr_location
*al __maybe_unused
)
861 struct hist_entry
*he
= iter
->he
;
862 struct perf_evsel
*evsel
= iter
->evsel
;
863 struct perf_sample
*sample
= iter
->sample
;
870 hists__inc_nr_samples(evsel__hists(evsel
), he
->filtered
);
872 return hist_entry__append_callchain(he
, sample
);
876 iter_prepare_cumulative_entry(struct hist_entry_iter
*iter
,
877 struct addr_location
*al __maybe_unused
)
879 struct hist_entry
**he_cache
;
881 callchain_cursor_commit(&callchain_cursor
);
884 * This is for detecting cycles or recursions so that they're
885 * cumulated only one time to prevent entries more than 100%
888 he_cache
= malloc(sizeof(*he_cache
) * (callchain_cursor
.nr
+ 1));
889 if (he_cache
== NULL
)
892 iter
->priv
= he_cache
;
899 iter_add_single_cumulative_entry(struct hist_entry_iter
*iter
,
900 struct addr_location
*al
)
902 struct perf_evsel
*evsel
= iter
->evsel
;
903 struct hists
*hists
= evsel__hists(evsel
);
904 struct perf_sample
*sample
= iter
->sample
;
905 struct hist_entry
**he_cache
= iter
->priv
;
906 struct hist_entry
*he
;
909 he
= hists__add_entry(hists
, al
, iter
->parent
, NULL
, NULL
,
915 he_cache
[iter
->curr
++] = he
;
917 hist_entry__append_callchain(he
, sample
);
920 * We need to re-initialize the cursor since callchain_append()
921 * advanced the cursor to the end.
923 callchain_cursor_commit(&callchain_cursor
);
925 hists__inc_nr_samples(hists
, he
->filtered
);
931 iter_next_cumulative_entry(struct hist_entry_iter
*iter
,
932 struct addr_location
*al
)
934 struct callchain_cursor_node
*node
;
936 node
= callchain_cursor_current(&callchain_cursor
);
940 return fill_callchain_info(al
, node
, iter
->hide_unresolved
);
944 iter_add_next_cumulative_entry(struct hist_entry_iter
*iter
,
945 struct addr_location
*al
)
947 struct perf_evsel
*evsel
= iter
->evsel
;
948 struct perf_sample
*sample
= iter
->sample
;
949 struct hist_entry
**he_cache
= iter
->priv
;
950 struct hist_entry
*he
;
951 struct hist_entry he_tmp
= {
952 .hists
= evsel__hists(evsel
),
954 .thread
= al
->thread
,
955 .comm
= thread__comm(al
->thread
),
961 .srcline
= al
->srcline
? strdup(al
->srcline
) : NULL
,
962 .parent
= iter
->parent
,
963 .raw_data
= sample
->raw_data
,
964 .raw_size
= sample
->raw_size
,
967 struct callchain_cursor cursor
;
969 callchain_cursor_snapshot(&cursor
, &callchain_cursor
);
971 callchain_cursor_advance(&callchain_cursor
);
974 * Check if there's duplicate entries in the callchain.
975 * It's possible that it has cycles or recursive calls.
977 for (i
= 0; i
< iter
->curr
; i
++) {
978 if (hist_entry__cmp(he_cache
[i
], &he_tmp
) == 0) {
979 /* to avoid calling callback function */
985 he
= hists__add_entry(evsel__hists(evsel
), al
, iter
->parent
, NULL
, NULL
,
991 he_cache
[iter
->curr
++] = he
;
993 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
)
994 callchain_append(he
->callchain
, &cursor
, sample
->period
);
999 iter_finish_cumulative_entry(struct hist_entry_iter
*iter
,
1000 struct addr_location
*al __maybe_unused
)
1008 const struct hist_iter_ops hist_iter_mem
= {
1009 .prepare_entry
= iter_prepare_mem_entry
,
1010 .add_single_entry
= iter_add_single_mem_entry
,
1011 .next_entry
= iter_next_nop_entry
,
1012 .add_next_entry
= iter_add_next_nop_entry
,
1013 .finish_entry
= iter_finish_mem_entry
,
1016 const struct hist_iter_ops hist_iter_branch
= {
1017 .prepare_entry
= iter_prepare_branch_entry
,
1018 .add_single_entry
= iter_add_single_branch_entry
,
1019 .next_entry
= iter_next_branch_entry
,
1020 .add_next_entry
= iter_add_next_branch_entry
,
1021 .finish_entry
= iter_finish_branch_entry
,
1024 const struct hist_iter_ops hist_iter_normal
= {
1025 .prepare_entry
= iter_prepare_normal_entry
,
1026 .add_single_entry
= iter_add_single_normal_entry
,
1027 .next_entry
= iter_next_nop_entry
,
1028 .add_next_entry
= iter_add_next_nop_entry
,
1029 .finish_entry
= iter_finish_normal_entry
,
1032 const struct hist_iter_ops hist_iter_cumulative
= {
1033 .prepare_entry
= iter_prepare_cumulative_entry
,
1034 .add_single_entry
= iter_add_single_cumulative_entry
,
1035 .next_entry
= iter_next_cumulative_entry
,
1036 .add_next_entry
= iter_add_next_cumulative_entry
,
1037 .finish_entry
= iter_finish_cumulative_entry
,
1040 int hist_entry_iter__add(struct hist_entry_iter
*iter
, struct addr_location
*al
,
1041 int max_stack_depth
, void *arg
)
1044 struct map
*alm
= NULL
;
1047 alm
= map__get(al
->map
);
1049 err
= sample__resolve_callchain(iter
->sample
, &callchain_cursor
, &iter
->parent
,
1050 iter
->evsel
, al
, max_stack_depth
);
1056 err
= iter
->ops
->prepare_entry(iter
, al
);
1060 err
= iter
->ops
->add_single_entry(iter
, al
);
1064 if (iter
->he
&& iter
->add_entry_cb
) {
1065 err
= iter
->add_entry_cb(iter
, al
, true, arg
);
1070 while (iter
->ops
->next_entry(iter
, al
)) {
1071 err
= iter
->ops
->add_next_entry(iter
, al
);
1075 if (iter
->he
&& iter
->add_entry_cb
) {
1076 err
= iter
->add_entry_cb(iter
, al
, false, arg
);
1083 err2
= iter
->ops
->finish_entry(iter
, al
);
1093 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
1095 struct hists
*hists
= left
->hists
;
1096 struct perf_hpp_fmt
*fmt
;
1099 hists__for_each_sort_list(hists
, fmt
) {
1100 if (perf_hpp__is_dynamic_entry(fmt
) &&
1101 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1104 cmp
= fmt
->cmp(fmt
, left
, right
);
1113 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
1115 struct hists
*hists
= left
->hists
;
1116 struct perf_hpp_fmt
*fmt
;
1119 hists__for_each_sort_list(hists
, fmt
) {
1120 if (perf_hpp__is_dynamic_entry(fmt
) &&
1121 !perf_hpp__defined_dynamic_entry(fmt
, hists
))
1124 cmp
= fmt
->collapse(fmt
, left
, right
);
1132 void hist_entry__delete(struct hist_entry
*he
)
1134 struct hist_entry_ops
*ops
= he
->ops
;
1136 thread__zput(he
->thread
);
1137 map__zput(he
->ms
.map
);
1139 if (he
->branch_info
) {
1140 map__zput(he
->branch_info
->from
.map
);
1141 map__zput(he
->branch_info
->to
.map
);
1142 free_srcline(he
->branch_info
->srcline_from
);
1143 free_srcline(he
->branch_info
->srcline_to
);
1144 zfree(&he
->branch_info
);
1148 map__zput(he
->mem_info
->iaddr
.map
);
1149 map__zput(he
->mem_info
->daddr
.map
);
1150 mem_info__zput(he
->mem_info
);
1153 zfree(&he
->stat_acc
);
1154 free_srcline(he
->srcline
);
1155 if (he
->srcfile
&& he
->srcfile
[0])
1157 free_callchain(he
->callchain
);
1158 free(he
->trace_output
);
1164 * If this is not the last column, then we need to pad it according to the
1165 * pre-calculated max lenght for this column, otherwise don't bother adding
1166 * spaces because that would break viewing this with, for instance, 'less',
1167 * that would show tons of trailing spaces when a long C++ demangled method
1170 int hist_entry__snprintf_alignment(struct hist_entry
*he
, struct perf_hpp
*hpp
,
1171 struct perf_hpp_fmt
*fmt
, int printed
)
1173 if (!list_is_last(&fmt
->list
, &he
->hists
->hpp_list
->fields
)) {
1174 const int width
= fmt
->width(fmt
, hpp
, he
->hists
);
1175 if (printed
< width
) {
1176 advance_hpp(hpp
, printed
);
1177 printed
= scnprintf(hpp
->buf
, hpp
->size
, "%-*s", width
- printed
, " ");
1185 * collapse the histogram
1188 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
);
1189 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*he
,
1190 enum hist_filter type
);
1192 typedef bool (*fmt_chk_fn
)(struct perf_hpp_fmt
*fmt
);
1194 static bool check_thread_entry(struct perf_hpp_fmt
*fmt
)
1196 return perf_hpp__is_thread_entry(fmt
) || perf_hpp__is_comm_entry(fmt
);
1199 static void hist_entry__check_and_remove_filter(struct hist_entry
*he
,
1200 enum hist_filter type
,
1203 struct perf_hpp_fmt
*fmt
;
1204 bool type_match
= false;
1205 struct hist_entry
*parent
= he
->parent_he
;
1208 case HIST_FILTER__THREAD
:
1209 if (symbol_conf
.comm_list
== NULL
&&
1210 symbol_conf
.pid_list
== NULL
&&
1211 symbol_conf
.tid_list
== NULL
)
1214 case HIST_FILTER__DSO
:
1215 if (symbol_conf
.dso_list
== NULL
)
1218 case HIST_FILTER__SYMBOL
:
1219 if (symbol_conf
.sym_list
== NULL
)
1222 case HIST_FILTER__PARENT
:
1223 case HIST_FILTER__GUEST
:
1224 case HIST_FILTER__HOST
:
1225 case HIST_FILTER__SOCKET
:
1226 case HIST_FILTER__C2C
:
1231 /* if it's filtered by own fmt, it has to have filter bits */
1232 perf_hpp_list__for_each_format(he
->hpp_list
, fmt
) {
1241 * If the filter is for current level entry, propagate
1242 * filter marker to parents. The marker bit was
1243 * already set by default so it only needs to clear
1244 * non-filtered entries.
1246 if (!(he
->filtered
& (1 << type
))) {
1248 parent
->filtered
&= ~(1 << type
);
1249 parent
= parent
->parent_he
;
1254 * If current entry doesn't have matching formats, set
1255 * filter marker for upper level entries. it will be
1256 * cleared if its lower level entries is not filtered.
1258 * For lower-level entries, it inherits parent's
1259 * filter bit so that lower level entries of a
1260 * non-filtered entry won't set the filter marker.
1263 he
->filtered
|= (1 << type
);
1265 he
->filtered
|= (parent
->filtered
& (1 << type
));
1269 static void hist_entry__apply_hierarchy_filters(struct hist_entry
*he
)
1271 hist_entry__check_and_remove_filter(he
, HIST_FILTER__THREAD
,
1272 check_thread_entry
);
1274 hist_entry__check_and_remove_filter(he
, HIST_FILTER__DSO
,
1275 perf_hpp__is_dso_entry
);
1277 hist_entry__check_and_remove_filter(he
, HIST_FILTER__SYMBOL
,
1278 perf_hpp__is_sym_entry
);
1280 hists__apply_filters(he
->hists
, he
);
1283 static struct hist_entry
*hierarchy_insert_entry(struct hists
*hists
,
1284 struct rb_root
*root
,
1285 struct hist_entry
*he
,
1286 struct hist_entry
*parent_he
,
1287 struct perf_hpp_list
*hpp_list
)
1289 struct rb_node
**p
= &root
->rb_node
;
1290 struct rb_node
*parent
= NULL
;
1291 struct hist_entry
*iter
, *new;
1292 struct perf_hpp_fmt
*fmt
;
1295 while (*p
!= NULL
) {
1297 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1300 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1301 cmp
= fmt
->collapse(fmt
, iter
, he
);
1307 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1312 p
= &parent
->rb_left
;
1314 p
= &parent
->rb_right
;
1317 new = hist_entry__new(he
, true);
1321 hists
->nr_entries
++;
1323 /* save related format list for output */
1324 new->hpp_list
= hpp_list
;
1325 new->parent_he
= parent_he
;
1327 hist_entry__apply_hierarchy_filters(new);
1329 /* some fields are now passed to 'new' */
1330 perf_hpp_list__for_each_sort_list(hpp_list
, fmt
) {
1331 if (perf_hpp__is_trace_entry(fmt
) || perf_hpp__is_dynamic_entry(fmt
))
1332 he
->trace_output
= NULL
;
1334 new->trace_output
= NULL
;
1336 if (perf_hpp__is_srcline_entry(fmt
))
1339 new->srcline
= NULL
;
1341 if (perf_hpp__is_srcfile_entry(fmt
))
1344 new->srcfile
= NULL
;
1347 rb_link_node(&new->rb_node_in
, parent
, p
);
1348 rb_insert_color(&new->rb_node_in
, root
);
1352 static int hists__hierarchy_insert_entry(struct hists
*hists
,
1353 struct rb_root
*root
,
1354 struct hist_entry
*he
)
1356 struct perf_hpp_list_node
*node
;
1357 struct hist_entry
*new_he
= NULL
;
1358 struct hist_entry
*parent
= NULL
;
1362 list_for_each_entry(node
, &hists
->hpp_formats
, list
) {
1363 /* skip period (overhead) and elided columns */
1364 if (node
->level
== 0 || node
->skip
)
1367 /* insert copy of 'he' for each fmt into the hierarchy */
1368 new_he
= hierarchy_insert_entry(hists
, root
, he
, parent
, &node
->hpp
);
1369 if (new_he
== NULL
) {
1374 root
= &new_he
->hroot_in
;
1375 new_he
->depth
= depth
++;
1380 new_he
->leaf
= true;
1382 if (hist_entry__has_callchains(new_he
) &&
1383 symbol_conf
.use_callchain
) {
1384 callchain_cursor_reset(&callchain_cursor
);
1385 if (callchain_merge(&callchain_cursor
,
1392 /* 'he' is no longer used */
1393 hist_entry__delete(he
);
1395 /* return 0 (or -1) since it already applied filters */
1399 static int hists__collapse_insert_entry(struct hists
*hists
,
1400 struct rb_root
*root
,
1401 struct hist_entry
*he
)
1403 struct rb_node
**p
= &root
->rb_node
;
1404 struct rb_node
*parent
= NULL
;
1405 struct hist_entry
*iter
;
1408 if (symbol_conf
.report_hierarchy
)
1409 return hists__hierarchy_insert_entry(hists
, root
, he
);
1411 while (*p
!= NULL
) {
1413 iter
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
1415 cmp
= hist_entry__collapse(iter
, he
);
1420 he_stat__add_stat(&iter
->stat
, &he
->stat
);
1421 if (symbol_conf
.cumulate_callchain
)
1422 he_stat__add_stat(iter
->stat_acc
, he
->stat_acc
);
1424 if (hist_entry__has_callchains(he
) && symbol_conf
.use_callchain
) {
1425 callchain_cursor_reset(&callchain_cursor
);
1426 if (callchain_merge(&callchain_cursor
,
1431 hist_entry__delete(he
);
1438 p
= &(*p
)->rb_right
;
1440 hists
->nr_entries
++;
1442 rb_link_node(&he
->rb_node_in
, parent
, p
);
1443 rb_insert_color(&he
->rb_node_in
, root
);
1447 struct rb_root
*hists__get_rotate_entries_in(struct hists
*hists
)
1449 struct rb_root
*root
;
1451 pthread_mutex_lock(&hists
->lock
);
1453 root
= hists
->entries_in
;
1454 if (++hists
->entries_in
> &hists
->entries_in_array
[1])
1455 hists
->entries_in
= &hists
->entries_in_array
[0];
1457 pthread_mutex_unlock(&hists
->lock
);
1462 static void hists__apply_filters(struct hists
*hists
, struct hist_entry
*he
)
1464 hists__filter_entry_by_dso(hists
, he
);
1465 hists__filter_entry_by_thread(hists
, he
);
1466 hists__filter_entry_by_symbol(hists
, he
);
1467 hists__filter_entry_by_socket(hists
, he
);
1470 int hists__collapse_resort(struct hists
*hists
, struct ui_progress
*prog
)
1472 struct rb_root
*root
;
1473 struct rb_node
*next
;
1474 struct hist_entry
*n
;
1477 if (!hists__has(hists
, need_collapse
))
1480 hists
->nr_entries
= 0;
1482 root
= hists__get_rotate_entries_in(hists
);
1484 next
= rb_first(root
);
1489 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1490 next
= rb_next(&n
->rb_node_in
);
1492 rb_erase(&n
->rb_node_in
, root
);
1493 ret
= hists__collapse_insert_entry(hists
, &hists
->entries_collapsed
, n
);
1499 * If it wasn't combined with one of the entries already
1500 * collapsed, we need to apply the filters that may have
1501 * been set by, say, the hist_browser.
1503 hists__apply_filters(hists
, n
);
1506 ui_progress__update(prog
, 1);
1511 static int64_t hist_entry__sort(struct hist_entry
*a
, struct hist_entry
*b
)
1513 struct hists
*hists
= a
->hists
;
1514 struct perf_hpp_fmt
*fmt
;
1517 hists__for_each_sort_list(hists
, fmt
) {
1518 if (perf_hpp__should_skip(fmt
, a
->hists
))
1521 cmp
= fmt
->sort(fmt
, a
, b
);
1529 static void hists__reset_filter_stats(struct hists
*hists
)
1531 hists
->nr_non_filtered_entries
= 0;
1532 hists
->stats
.total_non_filtered_period
= 0;
1535 void hists__reset_stats(struct hists
*hists
)
1537 hists
->nr_entries
= 0;
1538 hists
->stats
.total_period
= 0;
1540 hists__reset_filter_stats(hists
);
1543 static void hists__inc_filter_stats(struct hists
*hists
, struct hist_entry
*h
)
1545 hists
->nr_non_filtered_entries
++;
1546 hists
->stats
.total_non_filtered_period
+= h
->stat
.period
;
1549 void hists__inc_stats(struct hists
*hists
, struct hist_entry
*h
)
1552 hists__inc_filter_stats(hists
, h
);
1554 hists
->nr_entries
++;
1555 hists
->stats
.total_period
+= h
->stat
.period
;
1558 static void hierarchy_recalc_total_periods(struct hists
*hists
)
1560 struct rb_node
*node
;
1561 struct hist_entry
*he
;
1563 node
= rb_first(&hists
->entries
);
1565 hists
->stats
.total_period
= 0;
1566 hists
->stats
.total_non_filtered_period
= 0;
1569 * recalculate total period using top-level entries only
1570 * since lower level entries only see non-filtered entries
1571 * but upper level entries have sum of both entries.
1574 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1575 node
= rb_next(node
);
1577 hists
->stats
.total_period
+= he
->stat
.period
;
1579 hists
->stats
.total_non_filtered_period
+= he
->stat
.period
;
1583 static void hierarchy_insert_output_entry(struct rb_root
*root
,
1584 struct hist_entry
*he
)
1586 struct rb_node
**p
= &root
->rb_node
;
1587 struct rb_node
*parent
= NULL
;
1588 struct hist_entry
*iter
;
1589 struct perf_hpp_fmt
*fmt
;
1591 while (*p
!= NULL
) {
1593 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1595 if (hist_entry__sort(he
, iter
) > 0)
1596 p
= &parent
->rb_left
;
1598 p
= &parent
->rb_right
;
1601 rb_link_node(&he
->rb_node
, parent
, p
);
1602 rb_insert_color(&he
->rb_node
, root
);
1604 /* update column width of dynamic entry */
1605 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
1606 if (perf_hpp__is_dynamic_entry(fmt
))
1607 fmt
->sort(fmt
, he
, NULL
);
1611 static void hists__hierarchy_output_resort(struct hists
*hists
,
1612 struct ui_progress
*prog
,
1613 struct rb_root
*root_in
,
1614 struct rb_root
*root_out
,
1615 u64 min_callchain_hits
,
1618 struct rb_node
*node
;
1619 struct hist_entry
*he
;
1621 *root_out
= RB_ROOT
;
1622 node
= rb_first(root_in
);
1625 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
1626 node
= rb_next(node
);
1628 hierarchy_insert_output_entry(root_out
, he
);
1631 ui_progress__update(prog
, 1);
1633 hists
->nr_entries
++;
1634 if (!he
->filtered
) {
1635 hists
->nr_non_filtered_entries
++;
1636 hists__calc_col_len(hists
, he
);
1640 hists__hierarchy_output_resort(hists
, prog
,
1651 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1652 u64 total
= he
->stat
.period
;
1654 if (symbol_conf
.cumulate_callchain
)
1655 total
= he
->stat_acc
->period
;
1657 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1660 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1661 min_callchain_hits
, &callchain_param
);
1665 static void __hists__insert_output_entry(struct rb_root
*entries
,
1666 struct hist_entry
*he
,
1667 u64 min_callchain_hits
,
1670 struct rb_node
**p
= &entries
->rb_node
;
1671 struct rb_node
*parent
= NULL
;
1672 struct hist_entry
*iter
;
1673 struct perf_hpp_fmt
*fmt
;
1675 if (use_callchain
) {
1676 if (callchain_param
.mode
== CHAIN_GRAPH_REL
) {
1677 u64 total
= he
->stat
.period
;
1679 if (symbol_conf
.cumulate_callchain
)
1680 total
= he
->stat_acc
->period
;
1682 min_callchain_hits
= total
* (callchain_param
.min_percent
/ 100);
1684 callchain_param
.sort(&he
->sorted_chain
, he
->callchain
,
1685 min_callchain_hits
, &callchain_param
);
1688 while (*p
!= NULL
) {
1690 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1692 if (hist_entry__sort(he
, iter
) > 0)
1695 p
= &(*p
)->rb_right
;
1698 rb_link_node(&he
->rb_node
, parent
, p
);
1699 rb_insert_color(&he
->rb_node
, entries
);
1701 perf_hpp_list__for_each_sort_list(&perf_hpp_list
, fmt
) {
1702 if (perf_hpp__is_dynamic_entry(fmt
) &&
1703 perf_hpp__defined_dynamic_entry(fmt
, he
->hists
))
1704 fmt
->sort(fmt
, he
, NULL
); /* update column width */
1708 static void output_resort(struct hists
*hists
, struct ui_progress
*prog
,
1709 bool use_callchain
, hists__resort_cb_t cb
)
1711 struct rb_root
*root
;
1712 struct rb_node
*next
;
1713 struct hist_entry
*n
;
1714 u64 callchain_total
;
1715 u64 min_callchain_hits
;
1717 callchain_total
= hists
->callchain_period
;
1718 if (symbol_conf
.filter_relative
)
1719 callchain_total
= hists
->callchain_non_filtered_period
;
1721 min_callchain_hits
= callchain_total
* (callchain_param
.min_percent
/ 100);
1723 hists__reset_stats(hists
);
1724 hists__reset_col_len(hists
);
1726 if (symbol_conf
.report_hierarchy
) {
1727 hists__hierarchy_output_resort(hists
, prog
,
1728 &hists
->entries_collapsed
,
1732 hierarchy_recalc_total_periods(hists
);
1736 if (hists__has(hists
, need_collapse
))
1737 root
= &hists
->entries_collapsed
;
1739 root
= hists
->entries_in
;
1741 next
= rb_first(root
);
1742 hists
->entries
= RB_ROOT
;
1745 n
= rb_entry(next
, struct hist_entry
, rb_node_in
);
1746 next
= rb_next(&n
->rb_node_in
);
1751 __hists__insert_output_entry(&hists
->entries
, n
, min_callchain_hits
, use_callchain
);
1752 hists__inc_stats(hists
, n
);
1755 hists__calc_col_len(hists
, n
);
1758 ui_progress__update(prog
, 1);
1762 void perf_evsel__output_resort(struct perf_evsel
*evsel
, struct ui_progress
*prog
)
1766 if (evsel
&& symbol_conf
.use_callchain
&& !symbol_conf
.show_ref_callgraph
)
1767 use_callchain
= evsel__has_callchain(evsel
);
1769 use_callchain
= symbol_conf
.use_callchain
;
1771 use_callchain
|= symbol_conf
.show_branchflag_count
;
1773 output_resort(evsel__hists(evsel
), prog
, use_callchain
, NULL
);
1776 void hists__output_resort(struct hists
*hists
, struct ui_progress
*prog
)
1778 output_resort(hists
, prog
, symbol_conf
.use_callchain
, NULL
);
1781 void hists__output_resort_cb(struct hists
*hists
, struct ui_progress
*prog
,
1782 hists__resort_cb_t cb
)
1784 output_resort(hists
, prog
, symbol_conf
.use_callchain
, cb
);
1787 static bool can_goto_child(struct hist_entry
*he
, enum hierarchy_move_dir hmd
)
1789 if (he
->leaf
|| hmd
== HMD_FORCE_SIBLING
)
1792 if (he
->unfolded
|| hmd
== HMD_FORCE_CHILD
)
1798 struct rb_node
*rb_hierarchy_last(struct rb_node
*node
)
1800 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1802 while (can_goto_child(he
, HMD_NORMAL
)) {
1803 node
= rb_last(&he
->hroot_out
);
1804 he
= rb_entry(node
, struct hist_entry
, rb_node
);
1809 struct rb_node
*__rb_hierarchy_next(struct rb_node
*node
, enum hierarchy_move_dir hmd
)
1811 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1813 if (can_goto_child(he
, hmd
))
1814 node
= rb_first(&he
->hroot_out
);
1816 node
= rb_next(node
);
1818 while (node
== NULL
) {
1823 node
= rb_next(&he
->rb_node
);
1828 struct rb_node
*rb_hierarchy_prev(struct rb_node
*node
)
1830 struct hist_entry
*he
= rb_entry(node
, struct hist_entry
, rb_node
);
1832 node
= rb_prev(node
);
1834 return rb_hierarchy_last(node
);
1840 return &he
->rb_node
;
1843 bool hist_entry__has_hierarchy_children(struct hist_entry
*he
, float limit
)
1845 struct rb_node
*node
;
1846 struct hist_entry
*child
;
1852 node
= rb_first(&he
->hroot_out
);
1853 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1855 while (node
&& child
->filtered
) {
1856 node
= rb_next(node
);
1857 child
= rb_entry(node
, struct hist_entry
, rb_node
);
1861 percent
= hist_entry__get_percent_limit(child
);
1865 return node
&& percent
>= limit
;
1868 static void hists__remove_entry_filter(struct hists
*hists
, struct hist_entry
*h
,
1869 enum hist_filter filter
)
1871 h
->filtered
&= ~(1 << filter
);
1873 if (symbol_conf
.report_hierarchy
) {
1874 struct hist_entry
*parent
= h
->parent_he
;
1877 he_stat__add_stat(&parent
->stat
, &h
->stat
);
1879 parent
->filtered
&= ~(1 << filter
);
1881 if (parent
->filtered
)
1884 /* force fold unfiltered entry for simplicity */
1885 parent
->unfolded
= false;
1886 parent
->has_no_entry
= false;
1887 parent
->row_offset
= 0;
1888 parent
->nr_rows
= 0;
1890 parent
= parent
->parent_he
;
1897 /* force fold unfiltered entry for simplicity */
1898 h
->unfolded
= false;
1899 h
->has_no_entry
= false;
1903 hists
->stats
.nr_non_filtered_samples
+= h
->stat
.nr_events
;
1905 hists__inc_filter_stats(hists
, h
);
1906 hists__calc_col_len(hists
, h
);
1910 static bool hists__filter_entry_by_dso(struct hists
*hists
,
1911 struct hist_entry
*he
)
1913 if (hists
->dso_filter
!= NULL
&&
1914 (he
->ms
.map
== NULL
|| he
->ms
.map
->dso
!= hists
->dso_filter
)) {
1915 he
->filtered
|= (1 << HIST_FILTER__DSO
);
1922 static bool hists__filter_entry_by_thread(struct hists
*hists
,
1923 struct hist_entry
*he
)
1925 if (hists
->thread_filter
!= NULL
&&
1926 he
->thread
!= hists
->thread_filter
) {
1927 he
->filtered
|= (1 << HIST_FILTER__THREAD
);
1934 static bool hists__filter_entry_by_symbol(struct hists
*hists
,
1935 struct hist_entry
*he
)
1937 if (hists
->symbol_filter_str
!= NULL
&&
1938 (!he
->ms
.sym
|| strstr(he
->ms
.sym
->name
,
1939 hists
->symbol_filter_str
) == NULL
)) {
1940 he
->filtered
|= (1 << HIST_FILTER__SYMBOL
);
1947 static bool hists__filter_entry_by_socket(struct hists
*hists
,
1948 struct hist_entry
*he
)
1950 if ((hists
->socket_filter
> -1) &&
1951 (he
->socket
!= hists
->socket_filter
)) {
1952 he
->filtered
|= (1 << HIST_FILTER__SOCKET
);
1959 typedef bool (*filter_fn_t
)(struct hists
*hists
, struct hist_entry
*he
);
1961 static void hists__filter_by_type(struct hists
*hists
, int type
, filter_fn_t filter
)
1965 hists
->stats
.nr_non_filtered_samples
= 0;
1967 hists__reset_filter_stats(hists
);
1968 hists__reset_col_len(hists
);
1970 for (nd
= rb_first(&hists
->entries
); nd
; nd
= rb_next(nd
)) {
1971 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
1973 if (filter(hists
, h
))
1976 hists__remove_entry_filter(hists
, h
, type
);
1980 static void resort_filtered_entry(struct rb_root
*root
, struct hist_entry
*he
)
1982 struct rb_node
**p
= &root
->rb_node
;
1983 struct rb_node
*parent
= NULL
;
1984 struct hist_entry
*iter
;
1985 struct rb_root new_root
= RB_ROOT
;
1988 while (*p
!= NULL
) {
1990 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1992 if (hist_entry__sort(he
, iter
) > 0)
1995 p
= &(*p
)->rb_right
;
1998 rb_link_node(&he
->rb_node
, parent
, p
);
1999 rb_insert_color(&he
->rb_node
, root
);
2001 if (he
->leaf
|| he
->filtered
)
2004 nd
= rb_first(&he
->hroot_out
);
2006 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2009 rb_erase(&h
->rb_node
, &he
->hroot_out
);
2011 resort_filtered_entry(&new_root
, h
);
2014 he
->hroot_out
= new_root
;
2017 static void hists__filter_hierarchy(struct hists
*hists
, int type
, const void *arg
)
2020 struct rb_root new_root
= RB_ROOT
;
2022 hists
->stats
.nr_non_filtered_samples
= 0;
2024 hists__reset_filter_stats(hists
);
2025 hists__reset_col_len(hists
);
2027 nd
= rb_first(&hists
->entries
);
2029 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2032 ret
= hist_entry__filter(h
, type
, arg
);
2035 * case 1. non-matching type
2036 * zero out the period, set filter marker and move to child
2039 memset(&h
->stat
, 0, sizeof(h
->stat
));
2040 h
->filtered
|= (1 << type
);
2042 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_CHILD
);
2045 * case 2. matched type (filter out)
2046 * set filter marker and move to next
2048 else if (ret
== 1) {
2049 h
->filtered
|= (1 << type
);
2051 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2054 * case 3. ok (not filtered)
2055 * add period to hists and parents, erase the filter marker
2056 * and move to next sibling
2059 hists__remove_entry_filter(hists
, h
, type
);
2061 nd
= __rb_hierarchy_next(&h
->rb_node
, HMD_FORCE_SIBLING
);
2065 hierarchy_recalc_total_periods(hists
);
2068 * resort output after applying a new filter since filter in a lower
2069 * hierarchy can change periods in a upper hierarchy.
2071 nd
= rb_first(&hists
->entries
);
2073 struct hist_entry
*h
= rb_entry(nd
, struct hist_entry
, rb_node
);
2076 rb_erase(&h
->rb_node
, &hists
->entries
);
2078 resort_filtered_entry(&new_root
, h
);
2081 hists
->entries
= new_root
;
2084 void hists__filter_by_thread(struct hists
*hists
)
2086 if (symbol_conf
.report_hierarchy
)
2087 hists__filter_hierarchy(hists
, HIST_FILTER__THREAD
,
2088 hists
->thread_filter
);
2090 hists__filter_by_type(hists
, HIST_FILTER__THREAD
,
2091 hists__filter_entry_by_thread
);
2094 void hists__filter_by_dso(struct hists
*hists
)
2096 if (symbol_conf
.report_hierarchy
)
2097 hists__filter_hierarchy(hists
, HIST_FILTER__DSO
,
2100 hists__filter_by_type(hists
, HIST_FILTER__DSO
,
2101 hists__filter_entry_by_dso
);
2104 void hists__filter_by_symbol(struct hists
*hists
)
2106 if (symbol_conf
.report_hierarchy
)
2107 hists__filter_hierarchy(hists
, HIST_FILTER__SYMBOL
,
2108 hists
->symbol_filter_str
);
2110 hists__filter_by_type(hists
, HIST_FILTER__SYMBOL
,
2111 hists__filter_entry_by_symbol
);
2114 void hists__filter_by_socket(struct hists
*hists
)
2116 if (symbol_conf
.report_hierarchy
)
2117 hists__filter_hierarchy(hists
, HIST_FILTER__SOCKET
,
2118 &hists
->socket_filter
);
2120 hists__filter_by_type(hists
, HIST_FILTER__SOCKET
,
2121 hists__filter_entry_by_socket
);
2124 void events_stats__inc(struct events_stats
*stats
, u32 type
)
2126 ++stats
->nr_events
[0];
2127 ++stats
->nr_events
[type
];
2130 void hists__inc_nr_events(struct hists
*hists
, u32 type
)
2132 events_stats__inc(&hists
->stats
, type
);
2135 void hists__inc_nr_samples(struct hists
*hists
, bool filtered
)
2137 events_stats__inc(&hists
->stats
, PERF_RECORD_SAMPLE
);
2139 hists
->stats
.nr_non_filtered_samples
++;
2142 static struct hist_entry
*hists__add_dummy_entry(struct hists
*hists
,
2143 struct hist_entry
*pair
)
2145 struct rb_root
*root
;
2147 struct rb_node
*parent
= NULL
;
2148 struct hist_entry
*he
;
2151 if (hists__has(hists
, need_collapse
))
2152 root
= &hists
->entries_collapsed
;
2154 root
= hists
->entries_in
;
2158 while (*p
!= NULL
) {
2160 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2162 cmp
= hist_entry__collapse(he
, pair
);
2170 p
= &(*p
)->rb_right
;
2173 he
= hist_entry__new(pair
, true);
2175 memset(&he
->stat
, 0, sizeof(he
->stat
));
2177 if (symbol_conf
.cumulate_callchain
)
2178 memset(he
->stat_acc
, 0, sizeof(he
->stat
));
2179 rb_link_node(&he
->rb_node_in
, parent
, p
);
2180 rb_insert_color(&he
->rb_node_in
, root
);
2181 hists__inc_stats(hists
, he
);
2188 static struct hist_entry
*add_dummy_hierarchy_entry(struct hists
*hists
,
2189 struct rb_root
*root
,
2190 struct hist_entry
*pair
)
2193 struct rb_node
*parent
= NULL
;
2194 struct hist_entry
*he
;
2195 struct perf_hpp_fmt
*fmt
;
2198 while (*p
!= NULL
) {
2202 he
= rb_entry(parent
, struct hist_entry
, rb_node_in
);
2204 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2205 cmp
= fmt
->collapse(fmt
, he
, pair
);
2213 p
= &parent
->rb_left
;
2215 p
= &parent
->rb_right
;
2218 he
= hist_entry__new(pair
, true);
2220 rb_link_node(&he
->rb_node_in
, parent
, p
);
2221 rb_insert_color(&he
->rb_node_in
, root
);
2225 memset(&he
->stat
, 0, sizeof(he
->stat
));
2226 hists__inc_stats(hists
, he
);
2232 static struct hist_entry
*hists__find_entry(struct hists
*hists
,
2233 struct hist_entry
*he
)
2237 if (hists__has(hists
, need_collapse
))
2238 n
= hists
->entries_collapsed
.rb_node
;
2240 n
= hists
->entries_in
->rb_node
;
2243 struct hist_entry
*iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2244 int64_t cmp
= hist_entry__collapse(iter
, he
);
2257 static struct hist_entry
*hists__find_hierarchy_entry(struct rb_root
*root
,
2258 struct hist_entry
*he
)
2260 struct rb_node
*n
= root
->rb_node
;
2263 struct hist_entry
*iter
;
2264 struct perf_hpp_fmt
*fmt
;
2267 iter
= rb_entry(n
, struct hist_entry
, rb_node_in
);
2268 perf_hpp_list__for_each_sort_list(he
->hpp_list
, fmt
) {
2269 cmp
= fmt
->collapse(fmt
, iter
, he
);
2285 static void hists__match_hierarchy(struct rb_root
*leader_root
,
2286 struct rb_root
*other_root
)
2289 struct hist_entry
*pos
, *pair
;
2291 for (nd
= rb_first(leader_root
); nd
; nd
= rb_next(nd
)) {
2292 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2293 pair
= hists__find_hierarchy_entry(other_root
, pos
);
2296 hist_entry__add_pair(pair
, pos
);
2297 hists__match_hierarchy(&pos
->hroot_in
, &pair
->hroot_in
);
2303 * Look for pairs to link to the leader buckets (hist_entries):
2305 void hists__match(struct hists
*leader
, struct hists
*other
)
2307 struct rb_root
*root
;
2309 struct hist_entry
*pos
, *pair
;
2311 if (symbol_conf
.report_hierarchy
) {
2312 /* hierarchy report always collapses entries */
2313 return hists__match_hierarchy(&leader
->entries_collapsed
,
2314 &other
->entries_collapsed
);
2317 if (hists__has(leader
, need_collapse
))
2318 root
= &leader
->entries_collapsed
;
2320 root
= leader
->entries_in
;
2322 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2323 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2324 pair
= hists__find_entry(other
, pos
);
2327 hist_entry__add_pair(pair
, pos
);
2331 static int hists__link_hierarchy(struct hists
*leader_hists
,
2332 struct hist_entry
*parent
,
2333 struct rb_root
*leader_root
,
2334 struct rb_root
*other_root
)
2337 struct hist_entry
*pos
, *leader
;
2339 for (nd
= rb_first(other_root
); nd
; nd
= rb_next(nd
)) {
2340 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2342 if (hist_entry__has_pairs(pos
)) {
2345 list_for_each_entry(leader
, &pos
->pairs
.head
, pairs
.node
) {
2346 if (leader
->hists
== leader_hists
) {
2354 leader
= add_dummy_hierarchy_entry(leader_hists
,
2359 /* do not point parent in the pos */
2360 leader
->parent_he
= parent
;
2362 hist_entry__add_pair(pos
, leader
);
2366 if (hists__link_hierarchy(leader_hists
, leader
,
2368 &pos
->hroot_in
) < 0)
2376 * Look for entries in the other hists that are not present in the leader, if
2377 * we find them, just add a dummy entry on the leader hists, with period=0,
2378 * nr_events=0, to serve as the list header.
2380 int hists__link(struct hists
*leader
, struct hists
*other
)
2382 struct rb_root
*root
;
2384 struct hist_entry
*pos
, *pair
;
2386 if (symbol_conf
.report_hierarchy
) {
2387 /* hierarchy report always collapses entries */
2388 return hists__link_hierarchy(leader
, NULL
,
2389 &leader
->entries_collapsed
,
2390 &other
->entries_collapsed
);
2393 if (hists__has(other
, need_collapse
))
2394 root
= &other
->entries_collapsed
;
2396 root
= other
->entries_in
;
2398 for (nd
= rb_first(root
); nd
; nd
= rb_next(nd
)) {
2399 pos
= rb_entry(nd
, struct hist_entry
, rb_node_in
);
2401 if (!hist_entry__has_pairs(pos
)) {
2402 pair
= hists__add_dummy_entry(leader
, pos
);
2405 hist_entry__add_pair(pos
, pair
);
2412 void hist__account_cycles(struct branch_stack
*bs
, struct addr_location
*al
,
2413 struct perf_sample
*sample
, bool nonany_branch_mode
)
2415 struct branch_info
*bi
;
2417 /* If we have branch cycles always annotate them. */
2418 if (bs
&& bs
->nr
&& bs
->entries
[0].flags
.cycles
) {
2421 bi
= sample__resolve_bstack(sample
, al
);
2423 struct addr_map_symbol
*prev
= NULL
;
2426 * Ignore errors, still want to process the
2429 * For non standard branch modes always
2430 * force no IPC (prev == NULL)
2432 * Note that perf stores branches reversed from
2435 for (i
= bs
->nr
- 1; i
>= 0; i
--) {
2436 addr_map_symbol__account_cycles(&bi
[i
].from
,
2437 nonany_branch_mode
? NULL
: prev
,
2438 bi
[i
].flags
.cycles
);
2446 size_t perf_evlist__fprintf_nr_events(struct perf_evlist
*evlist
, FILE *fp
)
2448 struct perf_evsel
*pos
;
2451 evlist__for_each_entry(evlist
, pos
) {
2452 ret
+= fprintf(fp
, "%s stats:\n", perf_evsel__name(pos
));
2453 ret
+= events_stats__fprintf(&evsel__hists(pos
)->stats
, fp
);
2460 u64
hists__total_period(struct hists
*hists
)
2462 return symbol_conf
.filter_relative
? hists
->stats
.total_non_filtered_period
:
2463 hists
->stats
.total_period
;
2466 int __hists__scnprintf_title(struct hists
*hists
, char *bf
, size_t size
, bool show_freq
)
2470 const struct dso
*dso
= hists
->dso_filter
;
2471 const struct thread
*thread
= hists
->thread_filter
;
2472 int socket_id
= hists
->socket_filter
;
2473 unsigned long nr_samples
= hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2474 u64 nr_events
= hists
->stats
.total_period
;
2475 struct perf_evsel
*evsel
= hists_to_evsel(hists
);
2476 const char *ev_name
= perf_evsel__name(evsel
);
2477 char buf
[512], sample_freq_str
[64] = "";
2478 size_t buflen
= sizeof(buf
);
2479 char ref
[30] = " show reference callgraph, ";
2480 bool enable_ref
= false;
2482 if (symbol_conf
.filter_relative
) {
2483 nr_samples
= hists
->stats
.nr_non_filtered_samples
;
2484 nr_events
= hists
->stats
.total_non_filtered_period
;
2487 if (perf_evsel__is_group_event(evsel
)) {
2488 struct perf_evsel
*pos
;
2490 perf_evsel__group_desc(evsel
, buf
, buflen
);
2493 for_each_group_member(pos
, evsel
) {
2494 struct hists
*pos_hists
= evsel__hists(pos
);
2496 if (symbol_conf
.filter_relative
) {
2497 nr_samples
+= pos_hists
->stats
.nr_non_filtered_samples
;
2498 nr_events
+= pos_hists
->stats
.total_non_filtered_period
;
2500 nr_samples
+= pos_hists
->stats
.nr_events
[PERF_RECORD_SAMPLE
];
2501 nr_events
+= pos_hists
->stats
.total_period
;
2506 if (symbol_conf
.show_ref_callgraph
&&
2507 strstr(ev_name
, "call-graph=no"))
2511 scnprintf(sample_freq_str
, sizeof(sample_freq_str
), " %d Hz,", evsel
->attr
.sample_freq
);
2513 nr_samples
= convert_unit(nr_samples
, &unit
);
2514 printed
= scnprintf(bf
, size
,
2515 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64
,
2516 nr_samples
, unit
, evsel
->nr_members
> 1 ? "s" : "",
2517 ev_name
, sample_freq_str
, enable_ref
? ref
: " ", nr_events
);
2520 if (hists
->uid_filter_str
)
2521 printed
+= snprintf(bf
+ printed
, size
- printed
,
2522 ", UID: %s", hists
->uid_filter_str
);
2524 if (hists__has(hists
, thread
)) {
2525 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2527 (thread
->comm_set
? thread__comm_str(thread
) : ""),
2530 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2532 (thread
->comm_set
? thread__comm_str(thread
) : ""));
2536 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2537 ", DSO: %s", dso
->short_name
);
2539 printed
+= scnprintf(bf
+ printed
, size
- printed
,
2540 ", Processor Socket: %d", socket_id
);
2545 int parse_filter_percentage(const struct option
*opt __maybe_unused
,
2546 const char *arg
, int unset __maybe_unused
)
2548 if (!strcmp(arg
, "relative"))
2549 symbol_conf
.filter_relative
= true;
2550 else if (!strcmp(arg
, "absolute"))
2551 symbol_conf
.filter_relative
= false;
2553 pr_debug("Invalid percentage: %s\n", arg
);
2560 int perf_hist_config(const char *var
, const char *value
)
2562 if (!strcmp(var
, "hist.percentage"))
2563 return parse_filter_percentage(NULL
, value
, 0);
2568 int __hists__init(struct hists
*hists
, struct perf_hpp_list
*hpp_list
)
2570 memset(hists
, 0, sizeof(*hists
));
2571 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
2572 hists
->entries_in
= &hists
->entries_in_array
[0];
2573 hists
->entries_collapsed
= RB_ROOT
;
2574 hists
->entries
= RB_ROOT
;
2575 pthread_mutex_init(&hists
->lock
, NULL
);
2576 hists
->socket_filter
= -1;
2577 hists
->hpp_list
= hpp_list
;
2578 INIT_LIST_HEAD(&hists
->hpp_formats
);
2582 static void hists__delete_remaining_entries(struct rb_root
*root
)
2584 struct rb_node
*node
;
2585 struct hist_entry
*he
;
2587 while (!RB_EMPTY_ROOT(root
)) {
2588 node
= rb_first(root
);
2589 rb_erase(node
, root
);
2591 he
= rb_entry(node
, struct hist_entry
, rb_node_in
);
2592 hist_entry__delete(he
);
2596 static void hists__delete_all_entries(struct hists
*hists
)
2598 hists__delete_entries(hists
);
2599 hists__delete_remaining_entries(&hists
->entries_in_array
[0]);
2600 hists__delete_remaining_entries(&hists
->entries_in_array
[1]);
2601 hists__delete_remaining_entries(&hists
->entries_collapsed
);
2604 static void hists_evsel__exit(struct perf_evsel
*evsel
)
2606 struct hists
*hists
= evsel__hists(evsel
);
2607 struct perf_hpp_fmt
*fmt
, *pos
;
2608 struct perf_hpp_list_node
*node
, *tmp
;
2610 hists__delete_all_entries(hists
);
2612 list_for_each_entry_safe(node
, tmp
, &hists
->hpp_formats
, list
) {
2613 perf_hpp_list__for_each_format_safe(&node
->hpp
, fmt
, pos
) {
2614 list_del(&fmt
->list
);
2617 list_del(&node
->list
);
2622 static int hists_evsel__init(struct perf_evsel
*evsel
)
2624 struct hists
*hists
= evsel__hists(evsel
);
2626 __hists__init(hists
, &perf_hpp_list
);
2631 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2632 * stored in the rbtree...
2635 int hists__init(void)
2637 int err
= perf_evsel__object_config(sizeof(struct hists_evsel
),
2641 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr
);
2646 void perf_hpp_list__init(struct perf_hpp_list
*list
)
2648 INIT_LIST_HEAD(&list
->fields
);
2649 INIT_LIST_HEAD(&list
->sorts
);