KVM: PPC: Book3S HV: Flush link stack on guest exit to host kernel
[linux/fpc-iii.git] / tools / perf / util / hist.c
blob7b6eaf5e0bda51d512c3ab216b0ef224bd45b26d
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "sort.h"
14 #include "units.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "annotate.h"
18 #include "srcline.h"
19 #include "symbol.h"
20 #include "thread.h"
21 #include "ui/progress.h"
22 #include <errno.h>
23 #include <math.h>
24 #include <inttypes.h>
25 #include <sys/param.h>
26 #include <linux/rbtree.h>
27 #include <linux/string.h>
28 #include <linux/time64.h>
29 #include <linux/zalloc.h>
31 static bool hists__filter_entry_by_dso(struct hists *hists,
32 struct hist_entry *he);
33 static bool hists__filter_entry_by_thread(struct hists *hists,
34 struct hist_entry *he);
35 static bool hists__filter_entry_by_symbol(struct hists *hists,
36 struct hist_entry *he);
37 static bool hists__filter_entry_by_socket(struct hists *hists,
38 struct hist_entry *he);
40 u16 hists__col_len(struct hists *hists, enum hist_column col)
42 return hists->col_len[col];
45 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
47 hists->col_len[col] = len;
50 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
52 if (len > hists__col_len(hists, col)) {
53 hists__set_col_len(hists, col, len);
54 return true;
56 return false;
59 void hists__reset_col_len(struct hists *hists)
61 enum hist_column col;
63 for (col = 0; col < HISTC_NR_COLS; ++col)
64 hists__set_col_len(hists, col, 0);
67 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
69 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
71 if (hists__col_len(hists, dso) < unresolved_col_width &&
72 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
73 !symbol_conf.dso_list)
74 hists__set_col_len(hists, dso, unresolved_col_width);
77 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
79 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
80 int symlen;
81 u16 len;
84 * +4 accounts for '[x] ' priv level info
85 * +2 accounts for 0x prefix on raw addresses
86 * +3 accounts for ' y ' symtab origin info
88 if (h->ms.sym) {
89 symlen = h->ms.sym->namelen + 4;
90 if (verbose > 0)
91 symlen += BITS_PER_LONG / 4 + 2 + 3;
92 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
93 } else {
94 symlen = unresolved_col_width + 4 + 2;
95 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
96 hists__set_unres_dso_col_len(hists, HISTC_DSO);
99 len = thread__comm_len(h->thread);
100 if (hists__new_col_len(hists, HISTC_COMM, len))
101 hists__set_col_len(hists, HISTC_THREAD, len + 8);
103 if (h->ms.map) {
104 len = dso__name_len(h->ms.map->dso);
105 hists__new_col_len(hists, HISTC_DSO, len);
108 if (h->parent)
109 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
111 if (h->branch_info) {
112 if (h->branch_info->from.sym) {
113 symlen = (int)h->branch_info->from.sym->namelen + 4;
114 if (verbose > 0)
115 symlen += BITS_PER_LONG / 4 + 2 + 3;
116 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
118 symlen = dso__name_len(h->branch_info->from.map->dso);
119 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
120 } else {
121 symlen = unresolved_col_width + 4 + 2;
122 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
123 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
126 if (h->branch_info->to.sym) {
127 symlen = (int)h->branch_info->to.sym->namelen + 4;
128 if (verbose > 0)
129 symlen += BITS_PER_LONG / 4 + 2 + 3;
130 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
132 symlen = dso__name_len(h->branch_info->to.map->dso);
133 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
134 } else {
135 symlen = unresolved_col_width + 4 + 2;
136 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
137 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
140 if (h->branch_info->srcline_from)
141 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
142 strlen(h->branch_info->srcline_from));
143 if (h->branch_info->srcline_to)
144 hists__new_col_len(hists, HISTC_SRCLINE_TO,
145 strlen(h->branch_info->srcline_to));
148 if (h->mem_info) {
149 if (h->mem_info->daddr.sym) {
150 symlen = (int)h->mem_info->daddr.sym->namelen + 4
151 + unresolved_col_width + 2;
152 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
153 symlen);
154 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
155 symlen + 1);
156 } else {
157 symlen = unresolved_col_width + 4 + 2;
158 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
159 symlen);
160 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
161 symlen);
164 if (h->mem_info->iaddr.sym) {
165 symlen = (int)h->mem_info->iaddr.sym->namelen + 4
166 + unresolved_col_width + 2;
167 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
168 symlen);
169 } else {
170 symlen = unresolved_col_width + 4 + 2;
171 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
172 symlen);
175 if (h->mem_info->daddr.map) {
176 symlen = dso__name_len(h->mem_info->daddr.map->dso);
177 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
178 symlen);
179 } else {
180 symlen = unresolved_col_width + 4 + 2;
181 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
184 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
185 unresolved_col_width + 4 + 2);
187 } else {
188 symlen = unresolved_col_width + 4 + 2;
189 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
190 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
191 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
194 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
195 hists__new_col_len(hists, HISTC_CPU, 3);
196 hists__new_col_len(hists, HISTC_SOCKET, 6);
197 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
198 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
199 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
200 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
201 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
202 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
203 if (symbol_conf.nanosecs)
204 hists__new_col_len(hists, HISTC_TIME, 16);
205 else
206 hists__new_col_len(hists, HISTC_TIME, 12);
208 if (h->srcline) {
209 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
210 hists__new_col_len(hists, HISTC_SRCLINE, len);
213 if (h->srcfile)
214 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
216 if (h->transaction)
217 hists__new_col_len(hists, HISTC_TRANSACTION,
218 hist_entry__transaction_len());
220 if (h->trace_output)
221 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
224 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
226 struct rb_node *next = rb_first_cached(&hists->entries);
227 struct hist_entry *n;
228 int row = 0;
230 hists__reset_col_len(hists);
232 while (next && row++ < max_rows) {
233 n = rb_entry(next, struct hist_entry, rb_node);
234 if (!n->filtered)
235 hists__calc_col_len(hists, n);
236 next = rb_next(&n->rb_node);
240 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
241 unsigned int cpumode, u64 period)
243 switch (cpumode) {
244 case PERF_RECORD_MISC_KERNEL:
245 he_stat->period_sys += period;
246 break;
247 case PERF_RECORD_MISC_USER:
248 he_stat->period_us += period;
249 break;
250 case PERF_RECORD_MISC_GUEST_KERNEL:
251 he_stat->period_guest_sys += period;
252 break;
253 case PERF_RECORD_MISC_GUEST_USER:
254 he_stat->period_guest_us += period;
255 break;
256 default:
257 break;
261 static long hist_time(unsigned long htime)
263 unsigned long time_quantum = symbol_conf.time_quantum;
264 if (time_quantum)
265 return (htime / time_quantum) * time_quantum;
266 return htime;
269 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
270 u64 weight)
273 he_stat->period += period;
274 he_stat->weight += weight;
275 he_stat->nr_events += 1;
278 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
280 dest->period += src->period;
281 dest->period_sys += src->period_sys;
282 dest->period_us += src->period_us;
283 dest->period_guest_sys += src->period_guest_sys;
284 dest->period_guest_us += src->period_guest_us;
285 dest->nr_events += src->nr_events;
286 dest->weight += src->weight;
289 static void he_stat__decay(struct he_stat *he_stat)
291 he_stat->period = (he_stat->period * 7) / 8;
292 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
293 /* XXX need decay for weight too? */
296 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
298 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
300 u64 prev_period = he->stat.period;
301 u64 diff;
303 if (prev_period == 0)
304 return true;
306 he_stat__decay(&he->stat);
307 if (symbol_conf.cumulate_callchain)
308 he_stat__decay(he->stat_acc);
309 decay_callchain(he->callchain);
311 diff = prev_period - he->stat.period;
313 if (!he->depth) {
314 hists->stats.total_period -= diff;
315 if (!he->filtered)
316 hists->stats.total_non_filtered_period -= diff;
319 if (!he->leaf) {
320 struct hist_entry *child;
321 struct rb_node *node = rb_first_cached(&he->hroot_out);
322 while (node) {
323 child = rb_entry(node, struct hist_entry, rb_node);
324 node = rb_next(node);
326 if (hists__decay_entry(hists, child))
327 hists__delete_entry(hists, child);
331 return he->stat.period == 0;
334 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
336 struct rb_root_cached *root_in;
337 struct rb_root_cached *root_out;
339 if (he->parent_he) {
340 root_in = &he->parent_he->hroot_in;
341 root_out = &he->parent_he->hroot_out;
342 } else {
343 if (hists__has(hists, need_collapse))
344 root_in = &hists->entries_collapsed;
345 else
346 root_in = hists->entries_in;
347 root_out = &hists->entries;
350 rb_erase_cached(&he->rb_node_in, root_in);
351 rb_erase_cached(&he->rb_node, root_out);
353 --hists->nr_entries;
354 if (!he->filtered)
355 --hists->nr_non_filtered_entries;
357 hist_entry__delete(he);
360 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
362 struct rb_node *next = rb_first_cached(&hists->entries);
363 struct hist_entry *n;
365 while (next) {
366 n = rb_entry(next, struct hist_entry, rb_node);
367 next = rb_next(&n->rb_node);
368 if (((zap_user && n->level == '.') ||
369 (zap_kernel && n->level != '.') ||
370 hists__decay_entry(hists, n))) {
371 hists__delete_entry(hists, n);
376 void hists__delete_entries(struct hists *hists)
378 struct rb_node *next = rb_first_cached(&hists->entries);
379 struct hist_entry *n;
381 while (next) {
382 n = rb_entry(next, struct hist_entry, rb_node);
383 next = rb_next(&n->rb_node);
385 hists__delete_entry(hists, n);
389 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
391 struct rb_node *next = rb_first_cached(&hists->entries);
392 struct hist_entry *n;
393 int i = 0;
395 while (next) {
396 n = rb_entry(next, struct hist_entry, rb_node);
397 if (i == idx)
398 return n;
400 next = rb_next(&n->rb_node);
401 i++;
404 return NULL;
408 * histogram, sorted on item, collects periods
411 static int hist_entry__init(struct hist_entry *he,
412 struct hist_entry *template,
413 bool sample_self,
414 size_t callchain_size)
416 *he = *template;
417 he->callchain_size = callchain_size;
419 if (symbol_conf.cumulate_callchain) {
420 he->stat_acc = malloc(sizeof(he->stat));
421 if (he->stat_acc == NULL)
422 return -ENOMEM;
423 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
424 if (!sample_self)
425 memset(&he->stat, 0, sizeof(he->stat));
428 map__get(he->ms.map);
430 if (he->branch_info) {
432 * This branch info is (a part of) allocated from
433 * sample__resolve_bstack() and will be freed after
434 * adding new entries. So we need to save a copy.
436 he->branch_info = malloc(sizeof(*he->branch_info));
437 if (he->branch_info == NULL)
438 goto err;
440 memcpy(he->branch_info, template->branch_info,
441 sizeof(*he->branch_info));
443 map__get(he->branch_info->from.map);
444 map__get(he->branch_info->to.map);
447 if (he->mem_info) {
448 map__get(he->mem_info->iaddr.map);
449 map__get(he->mem_info->daddr.map);
452 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
453 callchain_init(he->callchain);
455 if (he->raw_data) {
456 he->raw_data = memdup(he->raw_data, he->raw_size);
457 if (he->raw_data == NULL)
458 goto err_infos;
461 if (he->srcline) {
462 he->srcline = strdup(he->srcline);
463 if (he->srcline == NULL)
464 goto err_rawdata;
467 if (symbol_conf.res_sample) {
468 he->res_samples = calloc(sizeof(struct res_sample),
469 symbol_conf.res_sample);
470 if (!he->res_samples)
471 goto err_srcline;
474 INIT_LIST_HEAD(&he->pairs.node);
475 thread__get(he->thread);
476 he->hroot_in = RB_ROOT_CACHED;
477 he->hroot_out = RB_ROOT_CACHED;
479 if (!symbol_conf.report_hierarchy)
480 he->leaf = true;
482 return 0;
484 err_srcline:
485 zfree(&he->srcline);
487 err_rawdata:
488 zfree(&he->raw_data);
490 err_infos:
491 if (he->branch_info) {
492 map__put(he->branch_info->from.map);
493 map__put(he->branch_info->to.map);
494 zfree(&he->branch_info);
496 if (he->mem_info) {
497 map__put(he->mem_info->iaddr.map);
498 map__put(he->mem_info->daddr.map);
500 err:
501 map__zput(he->ms.map);
502 zfree(&he->stat_acc);
503 return -ENOMEM;
506 static void *hist_entry__zalloc(size_t size)
508 return zalloc(size + sizeof(struct hist_entry));
511 static void hist_entry__free(void *ptr)
513 free(ptr);
516 static struct hist_entry_ops default_ops = {
517 .new = hist_entry__zalloc,
518 .free = hist_entry__free,
521 static struct hist_entry *hist_entry__new(struct hist_entry *template,
522 bool sample_self)
524 struct hist_entry_ops *ops = template->ops;
525 size_t callchain_size = 0;
526 struct hist_entry *he;
527 int err = 0;
529 if (!ops)
530 ops = template->ops = &default_ops;
532 if (symbol_conf.use_callchain)
533 callchain_size = sizeof(struct callchain_root);
535 he = ops->new(callchain_size);
536 if (he) {
537 err = hist_entry__init(he, template, sample_self, callchain_size);
538 if (err) {
539 ops->free(he);
540 he = NULL;
544 return he;
547 static u8 symbol__parent_filter(const struct symbol *parent)
549 if (symbol_conf.exclude_other && parent == NULL)
550 return 1 << HIST_FILTER__PARENT;
551 return 0;
554 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
556 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
557 return;
559 he->hists->callchain_period += period;
560 if (!he->filtered)
561 he->hists->callchain_non_filtered_period += period;
564 static struct hist_entry *hists__findnew_entry(struct hists *hists,
565 struct hist_entry *entry,
566 struct addr_location *al,
567 bool sample_self)
569 struct rb_node **p;
570 struct rb_node *parent = NULL;
571 struct hist_entry *he;
572 int64_t cmp;
573 u64 period = entry->stat.period;
574 u64 weight = entry->stat.weight;
575 bool leftmost = true;
577 p = &hists->entries_in->rb_root.rb_node;
579 while (*p != NULL) {
580 parent = *p;
581 he = rb_entry(parent, struct hist_entry, rb_node_in);
584 * Make sure that it receives arguments in a same order as
585 * hist_entry__collapse() so that we can use an appropriate
586 * function when searching an entry regardless which sort
587 * keys were used.
589 cmp = hist_entry__cmp(he, entry);
591 if (!cmp) {
592 if (sample_self) {
593 he_stat__add_period(&he->stat, period, weight);
594 hist_entry__add_callchain_period(he, period);
596 if (symbol_conf.cumulate_callchain)
597 he_stat__add_period(he->stat_acc, period, weight);
600 * This mem info was allocated from sample__resolve_mem
601 * and will not be used anymore.
603 mem_info__zput(entry->mem_info);
605 block_info__zput(entry->block_info);
607 /* If the map of an existing hist_entry has
608 * become out-of-date due to an exec() or
609 * similar, update it. Otherwise we will
610 * mis-adjust symbol addresses when computing
611 * the history counter to increment.
613 if (he->ms.map != entry->ms.map) {
614 map__put(he->ms.map);
615 he->ms.map = map__get(entry->ms.map);
617 goto out;
620 if (cmp < 0)
621 p = &(*p)->rb_left;
622 else {
623 p = &(*p)->rb_right;
624 leftmost = false;
628 he = hist_entry__new(entry, sample_self);
629 if (!he)
630 return NULL;
632 if (sample_self)
633 hist_entry__add_callchain_period(he, period);
634 hists->nr_entries++;
636 rb_link_node(&he->rb_node_in, parent, p);
637 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
638 out:
639 if (sample_self)
640 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
641 if (symbol_conf.cumulate_callchain)
642 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
643 return he;
646 static unsigned random_max(unsigned high)
648 unsigned thresh = -high % high;
649 for (;;) {
650 unsigned r = random();
651 if (r >= thresh)
652 return r % high;
656 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
658 struct res_sample *r;
659 int j;
661 if (he->num_res < symbol_conf.res_sample) {
662 j = he->num_res++;
663 } else {
664 j = random_max(symbol_conf.res_sample);
666 r = &he->res_samples[j];
667 r->time = sample->time;
668 r->cpu = sample->cpu;
669 r->tid = sample->tid;
672 static struct hist_entry*
673 __hists__add_entry(struct hists *hists,
674 struct addr_location *al,
675 struct symbol *sym_parent,
676 struct branch_info *bi,
677 struct mem_info *mi,
678 struct block_info *block_info,
679 struct perf_sample *sample,
680 bool sample_self,
681 struct hist_entry_ops *ops)
683 struct namespaces *ns = thread__namespaces(al->thread);
684 struct hist_entry entry = {
685 .thread = al->thread,
686 .comm = thread__comm(al->thread),
687 .cgroup_id = {
688 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
689 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
691 .ms = {
692 .map = al->map,
693 .sym = al->sym,
695 .srcline = (char *) al->srcline,
696 .socket = al->socket,
697 .cpu = al->cpu,
698 .cpumode = al->cpumode,
699 .ip = al->addr,
700 .level = al->level,
701 .stat = {
702 .nr_events = 1,
703 .period = sample->period,
704 .weight = sample->weight,
706 .parent = sym_parent,
707 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
708 .hists = hists,
709 .branch_info = bi,
710 .mem_info = mi,
711 .block_info = block_info,
712 .transaction = sample->transaction,
713 .raw_data = sample->raw_data,
714 .raw_size = sample->raw_size,
715 .ops = ops,
716 .time = hist_time(sample->time),
717 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
719 if (!hists->has_callchains && he && he->callchain_size != 0)
720 hists->has_callchains = true;
721 if (he && symbol_conf.res_sample)
722 hists__res_sample(he, sample);
723 return he;
726 struct hist_entry *hists__add_entry(struct hists *hists,
727 struct addr_location *al,
728 struct symbol *sym_parent,
729 struct branch_info *bi,
730 struct mem_info *mi,
731 struct perf_sample *sample,
732 bool sample_self)
734 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
735 sample, sample_self, NULL);
738 struct hist_entry *hists__add_entry_ops(struct hists *hists,
739 struct hist_entry_ops *ops,
740 struct addr_location *al,
741 struct symbol *sym_parent,
742 struct branch_info *bi,
743 struct mem_info *mi,
744 struct perf_sample *sample,
745 bool sample_self)
747 return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
748 sample, sample_self, ops);
751 struct hist_entry *hists__add_entry_block(struct hists *hists,
752 struct addr_location *al,
753 struct block_info *block_info)
755 struct hist_entry entry = {
756 .block_info = block_info,
757 .hists = hists,
758 }, *he = hists__findnew_entry(hists, &entry, al, false);
760 return he;
763 static int
764 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
765 struct addr_location *al __maybe_unused)
767 return 0;
770 static int
771 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
772 struct addr_location *al __maybe_unused)
774 return 0;
777 static int
778 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
780 struct perf_sample *sample = iter->sample;
781 struct mem_info *mi;
783 mi = sample__resolve_mem(sample, al);
784 if (mi == NULL)
785 return -ENOMEM;
787 iter->priv = mi;
788 return 0;
791 static int
792 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
794 u64 cost;
795 struct mem_info *mi = iter->priv;
796 struct hists *hists = evsel__hists(iter->evsel);
797 struct perf_sample *sample = iter->sample;
798 struct hist_entry *he;
800 if (mi == NULL)
801 return -EINVAL;
803 cost = sample->weight;
804 if (!cost)
805 cost = 1;
808 * must pass period=weight in order to get the correct
809 * sorting from hists__collapse_resort() which is solely
810 * based on periods. We want sorting be done on nr_events * weight
811 * and this is indirectly achieved by passing period=weight here
812 * and the he_stat__add_period() function.
814 sample->period = cost;
816 he = hists__add_entry(hists, al, iter->parent, NULL, mi,
817 sample, true);
818 if (!he)
819 return -ENOMEM;
821 iter->he = he;
822 return 0;
825 static int
826 iter_finish_mem_entry(struct hist_entry_iter *iter,
827 struct addr_location *al __maybe_unused)
829 struct evsel *evsel = iter->evsel;
830 struct hists *hists = evsel__hists(evsel);
831 struct hist_entry *he = iter->he;
832 int err = -EINVAL;
834 if (he == NULL)
835 goto out;
837 hists__inc_nr_samples(hists, he->filtered);
839 err = hist_entry__append_callchain(he, iter->sample);
841 out:
843 * We don't need to free iter->priv (mem_info) here since the mem info
844 * was either already freed in hists__findnew_entry() or passed to a
845 * new hist entry by hist_entry__new().
847 iter->priv = NULL;
849 iter->he = NULL;
850 return err;
853 static int
854 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
856 struct branch_info *bi;
857 struct perf_sample *sample = iter->sample;
859 bi = sample__resolve_bstack(sample, al);
860 if (!bi)
861 return -ENOMEM;
863 iter->curr = 0;
864 iter->total = sample->branch_stack->nr;
866 iter->priv = bi;
867 return 0;
870 static int
871 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
872 struct addr_location *al __maybe_unused)
874 return 0;
877 static int
878 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
880 struct branch_info *bi = iter->priv;
881 int i = iter->curr;
883 if (bi == NULL)
884 return 0;
886 if (iter->curr >= iter->total)
887 return 0;
889 al->map = bi[i].to.map;
890 al->sym = bi[i].to.sym;
891 al->addr = bi[i].to.addr;
892 return 1;
895 static int
896 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
898 struct branch_info *bi;
899 struct evsel *evsel = iter->evsel;
900 struct hists *hists = evsel__hists(evsel);
901 struct perf_sample *sample = iter->sample;
902 struct hist_entry *he = NULL;
903 int i = iter->curr;
904 int err = 0;
906 bi = iter->priv;
908 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
909 goto out;
912 * The report shows the percentage of total branches captured
913 * and not events sampled. Thus we use a pseudo period of 1.
915 sample->period = 1;
916 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
918 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
919 sample, true);
920 if (he == NULL)
921 return -ENOMEM;
923 hists__inc_nr_samples(hists, he->filtered);
925 out:
926 iter->he = he;
927 iter->curr++;
928 return err;
931 static int
932 iter_finish_branch_entry(struct hist_entry_iter *iter,
933 struct addr_location *al __maybe_unused)
935 zfree(&iter->priv);
936 iter->he = NULL;
938 return iter->curr >= iter->total ? 0 : -1;
941 static int
942 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
943 struct addr_location *al __maybe_unused)
945 return 0;
948 static int
949 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
951 struct evsel *evsel = iter->evsel;
952 struct perf_sample *sample = iter->sample;
953 struct hist_entry *he;
955 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
956 sample, true);
957 if (he == NULL)
958 return -ENOMEM;
960 iter->he = he;
961 return 0;
964 static int
965 iter_finish_normal_entry(struct hist_entry_iter *iter,
966 struct addr_location *al __maybe_unused)
968 struct hist_entry *he = iter->he;
969 struct evsel *evsel = iter->evsel;
970 struct perf_sample *sample = iter->sample;
972 if (he == NULL)
973 return 0;
975 iter->he = NULL;
977 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
979 return hist_entry__append_callchain(he, sample);
982 static int
983 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
984 struct addr_location *al __maybe_unused)
986 struct hist_entry **he_cache;
988 callchain_cursor_commit(&callchain_cursor);
991 * This is for detecting cycles or recursions so that they're
992 * cumulated only one time to prevent entries more than 100%
993 * overhead.
995 he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
996 if (he_cache == NULL)
997 return -ENOMEM;
999 iter->priv = he_cache;
1000 iter->curr = 0;
1002 return 0;
1005 static int
1006 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1007 struct addr_location *al)
1009 struct evsel *evsel = iter->evsel;
1010 struct hists *hists = evsel__hists(evsel);
1011 struct perf_sample *sample = iter->sample;
1012 struct hist_entry **he_cache = iter->priv;
1013 struct hist_entry *he;
1014 int err = 0;
1016 he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1017 sample, true);
1018 if (he == NULL)
1019 return -ENOMEM;
1021 iter->he = he;
1022 he_cache[iter->curr++] = he;
1024 hist_entry__append_callchain(he, sample);
1027 * We need to re-initialize the cursor since callchain_append()
1028 * advanced the cursor to the end.
1030 callchain_cursor_commit(&callchain_cursor);
1032 hists__inc_nr_samples(hists, he->filtered);
1034 return err;
1037 static int
1038 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1039 struct addr_location *al)
1041 struct callchain_cursor_node *node;
1043 node = callchain_cursor_current(&callchain_cursor);
1044 if (node == NULL)
1045 return 0;
1047 return fill_callchain_info(al, node, iter->hide_unresolved);
1050 static int
1051 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1052 struct addr_location *al)
1054 struct evsel *evsel = iter->evsel;
1055 struct perf_sample *sample = iter->sample;
1056 struct hist_entry **he_cache = iter->priv;
1057 struct hist_entry *he;
1058 struct hist_entry he_tmp = {
1059 .hists = evsel__hists(evsel),
1060 .cpu = al->cpu,
1061 .thread = al->thread,
1062 .comm = thread__comm(al->thread),
1063 .ip = al->addr,
1064 .ms = {
1065 .map = al->map,
1066 .sym = al->sym,
1068 .srcline = (char *) al->srcline,
1069 .parent = iter->parent,
1070 .raw_data = sample->raw_data,
1071 .raw_size = sample->raw_size,
1073 int i;
1074 struct callchain_cursor cursor;
1076 callchain_cursor_snapshot(&cursor, &callchain_cursor);
1078 callchain_cursor_advance(&callchain_cursor);
1081 * Check if there's duplicate entries in the callchain.
1082 * It's possible that it has cycles or recursive calls.
1084 for (i = 0; i < iter->curr; i++) {
1085 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1086 /* to avoid calling callback function */
1087 iter->he = NULL;
1088 return 0;
1092 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1093 sample, false);
1094 if (he == NULL)
1095 return -ENOMEM;
1097 iter->he = he;
1098 he_cache[iter->curr++] = he;
1100 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1101 callchain_append(he->callchain, &cursor, sample->period);
1102 return 0;
1105 static int
1106 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1107 struct addr_location *al __maybe_unused)
1109 zfree(&iter->priv);
1110 iter->he = NULL;
1112 return 0;
1115 const struct hist_iter_ops hist_iter_mem = {
1116 .prepare_entry = iter_prepare_mem_entry,
1117 .add_single_entry = iter_add_single_mem_entry,
1118 .next_entry = iter_next_nop_entry,
1119 .add_next_entry = iter_add_next_nop_entry,
1120 .finish_entry = iter_finish_mem_entry,
1123 const struct hist_iter_ops hist_iter_branch = {
1124 .prepare_entry = iter_prepare_branch_entry,
1125 .add_single_entry = iter_add_single_branch_entry,
1126 .next_entry = iter_next_branch_entry,
1127 .add_next_entry = iter_add_next_branch_entry,
1128 .finish_entry = iter_finish_branch_entry,
1131 const struct hist_iter_ops hist_iter_normal = {
1132 .prepare_entry = iter_prepare_normal_entry,
1133 .add_single_entry = iter_add_single_normal_entry,
1134 .next_entry = iter_next_nop_entry,
1135 .add_next_entry = iter_add_next_nop_entry,
1136 .finish_entry = iter_finish_normal_entry,
1139 const struct hist_iter_ops hist_iter_cumulative = {
1140 .prepare_entry = iter_prepare_cumulative_entry,
1141 .add_single_entry = iter_add_single_cumulative_entry,
1142 .next_entry = iter_next_cumulative_entry,
1143 .add_next_entry = iter_add_next_cumulative_entry,
1144 .finish_entry = iter_finish_cumulative_entry,
1147 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1148 int max_stack_depth, void *arg)
1150 int err, err2;
1151 struct map *alm = NULL;
1153 if (al)
1154 alm = map__get(al->map);
1156 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1157 iter->evsel, al, max_stack_depth);
1158 if (err) {
1159 map__put(alm);
1160 return err;
1163 err = iter->ops->prepare_entry(iter, al);
1164 if (err)
1165 goto out;
1167 err = iter->ops->add_single_entry(iter, al);
1168 if (err)
1169 goto out;
1171 if (iter->he && iter->add_entry_cb) {
1172 err = iter->add_entry_cb(iter, al, true, arg);
1173 if (err)
1174 goto out;
1177 while (iter->ops->next_entry(iter, al)) {
1178 err = iter->ops->add_next_entry(iter, al);
1179 if (err)
1180 break;
1182 if (iter->he && iter->add_entry_cb) {
1183 err = iter->add_entry_cb(iter, al, false, arg);
1184 if (err)
1185 goto out;
1189 out:
1190 err2 = iter->ops->finish_entry(iter, al);
1191 if (!err)
1192 err = err2;
1194 map__put(alm);
1196 return err;
1199 int64_t
1200 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1202 struct hists *hists = left->hists;
1203 struct perf_hpp_fmt *fmt;
1204 int64_t cmp = 0;
1206 hists__for_each_sort_list(hists, fmt) {
1207 if (perf_hpp__is_dynamic_entry(fmt) &&
1208 !perf_hpp__defined_dynamic_entry(fmt, hists))
1209 continue;
1211 cmp = fmt->cmp(fmt, left, right);
1212 if (cmp)
1213 break;
1216 return cmp;
1219 int64_t
1220 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1222 struct hists *hists = left->hists;
1223 struct perf_hpp_fmt *fmt;
1224 int64_t cmp = 0;
1226 hists__for_each_sort_list(hists, fmt) {
1227 if (perf_hpp__is_dynamic_entry(fmt) &&
1228 !perf_hpp__defined_dynamic_entry(fmt, hists))
1229 continue;
1231 cmp = fmt->collapse(fmt, left, right);
1232 if (cmp)
1233 break;
1236 return cmp;
1239 void hist_entry__delete(struct hist_entry *he)
1241 struct hist_entry_ops *ops = he->ops;
1243 thread__zput(he->thread);
1244 map__zput(he->ms.map);
1246 if (he->branch_info) {
1247 map__zput(he->branch_info->from.map);
1248 map__zput(he->branch_info->to.map);
1249 free_srcline(he->branch_info->srcline_from);
1250 free_srcline(he->branch_info->srcline_to);
1251 zfree(&he->branch_info);
1254 if (he->mem_info) {
1255 map__zput(he->mem_info->iaddr.map);
1256 map__zput(he->mem_info->daddr.map);
1257 mem_info__zput(he->mem_info);
1260 if (he->block_info)
1261 block_info__zput(he->block_info);
1263 zfree(&he->res_samples);
1264 zfree(&he->stat_acc);
1265 free_srcline(he->srcline);
1266 if (he->srcfile && he->srcfile[0])
1267 zfree(&he->srcfile);
1268 free_callchain(he->callchain);
1269 zfree(&he->trace_output);
1270 zfree(&he->raw_data);
1271 ops->free(he);
1275 * If this is not the last column, then we need to pad it according to the
1276 * pre-calculated max length for this column, otherwise don't bother adding
1277 * spaces because that would break viewing this with, for instance, 'less',
1278 * that would show tons of trailing spaces when a long C++ demangled method
1279 * names is sampled.
1281 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1282 struct perf_hpp_fmt *fmt, int printed)
1284 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1285 const int width = fmt->width(fmt, hpp, he->hists);
1286 if (printed < width) {
1287 advance_hpp(hpp, printed);
1288 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1292 return printed;
1296 * collapse the histogram
1299 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1300 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1301 enum hist_filter type);
1303 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1305 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1307 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1310 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1311 enum hist_filter type,
1312 fmt_chk_fn check)
1314 struct perf_hpp_fmt *fmt;
1315 bool type_match = false;
1316 struct hist_entry *parent = he->parent_he;
1318 switch (type) {
1319 case HIST_FILTER__THREAD:
1320 if (symbol_conf.comm_list == NULL &&
1321 symbol_conf.pid_list == NULL &&
1322 symbol_conf.tid_list == NULL)
1323 return;
1324 break;
1325 case HIST_FILTER__DSO:
1326 if (symbol_conf.dso_list == NULL)
1327 return;
1328 break;
1329 case HIST_FILTER__SYMBOL:
1330 if (symbol_conf.sym_list == NULL)
1331 return;
1332 break;
1333 case HIST_FILTER__PARENT:
1334 case HIST_FILTER__GUEST:
1335 case HIST_FILTER__HOST:
1336 case HIST_FILTER__SOCKET:
1337 case HIST_FILTER__C2C:
1338 default:
1339 return;
1342 /* if it's filtered by own fmt, it has to have filter bits */
1343 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1344 if (check(fmt)) {
1345 type_match = true;
1346 break;
1350 if (type_match) {
1352 * If the filter is for current level entry, propagate
1353 * filter marker to parents. The marker bit was
1354 * already set by default so it only needs to clear
1355 * non-filtered entries.
1357 if (!(he->filtered & (1 << type))) {
1358 while (parent) {
1359 parent->filtered &= ~(1 << type);
1360 parent = parent->parent_he;
1363 } else {
1365 * If current entry doesn't have matching formats, set
1366 * filter marker for upper level entries. it will be
1367 * cleared if its lower level entries is not filtered.
1369 * For lower-level entries, it inherits parent's
1370 * filter bit so that lower level entries of a
1371 * non-filtered entry won't set the filter marker.
1373 if (parent == NULL)
1374 he->filtered |= (1 << type);
1375 else
1376 he->filtered |= (parent->filtered & (1 << type));
1380 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1382 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1383 check_thread_entry);
1385 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1386 perf_hpp__is_dso_entry);
1388 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1389 perf_hpp__is_sym_entry);
1391 hists__apply_filters(he->hists, he);
1394 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1395 struct rb_root_cached *root,
1396 struct hist_entry *he,
1397 struct hist_entry *parent_he,
1398 struct perf_hpp_list *hpp_list)
1400 struct rb_node **p = &root->rb_root.rb_node;
1401 struct rb_node *parent = NULL;
1402 struct hist_entry *iter, *new;
1403 struct perf_hpp_fmt *fmt;
1404 int64_t cmp;
1405 bool leftmost = true;
1407 while (*p != NULL) {
1408 parent = *p;
1409 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1411 cmp = 0;
1412 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1413 cmp = fmt->collapse(fmt, iter, he);
1414 if (cmp)
1415 break;
1418 if (!cmp) {
1419 he_stat__add_stat(&iter->stat, &he->stat);
1420 return iter;
1423 if (cmp < 0)
1424 p = &parent->rb_left;
1425 else {
1426 p = &parent->rb_right;
1427 leftmost = false;
1431 new = hist_entry__new(he, true);
1432 if (new == NULL)
1433 return NULL;
1435 hists->nr_entries++;
1437 /* save related format list for output */
1438 new->hpp_list = hpp_list;
1439 new->parent_he = parent_he;
1441 hist_entry__apply_hierarchy_filters(new);
1443 /* some fields are now passed to 'new' */
1444 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1445 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1446 he->trace_output = NULL;
1447 else
1448 new->trace_output = NULL;
1450 if (perf_hpp__is_srcline_entry(fmt))
1451 he->srcline = NULL;
1452 else
1453 new->srcline = NULL;
1455 if (perf_hpp__is_srcfile_entry(fmt))
1456 he->srcfile = NULL;
1457 else
1458 new->srcfile = NULL;
1461 rb_link_node(&new->rb_node_in, parent, p);
1462 rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1463 return new;
1466 static int hists__hierarchy_insert_entry(struct hists *hists,
1467 struct rb_root_cached *root,
1468 struct hist_entry *he)
1470 struct perf_hpp_list_node *node;
1471 struct hist_entry *new_he = NULL;
1472 struct hist_entry *parent = NULL;
1473 int depth = 0;
1474 int ret = 0;
1476 list_for_each_entry(node, &hists->hpp_formats, list) {
1477 /* skip period (overhead) and elided columns */
1478 if (node->level == 0 || node->skip)
1479 continue;
1481 /* insert copy of 'he' for each fmt into the hierarchy */
1482 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1483 if (new_he == NULL) {
1484 ret = -1;
1485 break;
1488 root = &new_he->hroot_in;
1489 new_he->depth = depth++;
1490 parent = new_he;
1493 if (new_he) {
1494 new_he->leaf = true;
1496 if (hist_entry__has_callchains(new_he) &&
1497 symbol_conf.use_callchain) {
1498 callchain_cursor_reset(&callchain_cursor);
1499 if (callchain_merge(&callchain_cursor,
1500 new_he->callchain,
1501 he->callchain) < 0)
1502 ret = -1;
1506 /* 'he' is no longer used */
1507 hist_entry__delete(he);
1509 /* return 0 (or -1) since it already applied filters */
1510 return ret;
1513 static int hists__collapse_insert_entry(struct hists *hists,
1514 struct rb_root_cached *root,
1515 struct hist_entry *he)
1517 struct rb_node **p = &root->rb_root.rb_node;
1518 struct rb_node *parent = NULL;
1519 struct hist_entry *iter;
1520 int64_t cmp;
1521 bool leftmost = true;
1523 if (symbol_conf.report_hierarchy)
1524 return hists__hierarchy_insert_entry(hists, root, he);
1526 while (*p != NULL) {
1527 parent = *p;
1528 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1530 cmp = hist_entry__collapse(iter, he);
1532 if (!cmp) {
1533 int ret = 0;
1535 he_stat__add_stat(&iter->stat, &he->stat);
1536 if (symbol_conf.cumulate_callchain)
1537 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1539 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1540 callchain_cursor_reset(&callchain_cursor);
1541 if (callchain_merge(&callchain_cursor,
1542 iter->callchain,
1543 he->callchain) < 0)
1544 ret = -1;
1546 hist_entry__delete(he);
1547 return ret;
1550 if (cmp < 0)
1551 p = &(*p)->rb_left;
1552 else {
1553 p = &(*p)->rb_right;
1554 leftmost = false;
1557 hists->nr_entries++;
1559 rb_link_node(&he->rb_node_in, parent, p);
1560 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1561 return 1;
1564 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1566 struct rb_root_cached *root;
1568 pthread_mutex_lock(&hists->lock);
1570 root = hists->entries_in;
1571 if (++hists->entries_in > &hists->entries_in_array[1])
1572 hists->entries_in = &hists->entries_in_array[0];
1574 pthread_mutex_unlock(&hists->lock);
1576 return root;
1579 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1581 hists__filter_entry_by_dso(hists, he);
1582 hists__filter_entry_by_thread(hists, he);
1583 hists__filter_entry_by_symbol(hists, he);
1584 hists__filter_entry_by_socket(hists, he);
1587 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1589 struct rb_root_cached *root;
1590 struct rb_node *next;
1591 struct hist_entry *n;
1592 int ret;
1594 if (!hists__has(hists, need_collapse))
1595 return 0;
1597 hists->nr_entries = 0;
1599 root = hists__get_rotate_entries_in(hists);
1601 next = rb_first_cached(root);
1603 while (next) {
1604 if (session_done())
1605 break;
1606 n = rb_entry(next, struct hist_entry, rb_node_in);
1607 next = rb_next(&n->rb_node_in);
1609 rb_erase_cached(&n->rb_node_in, root);
1610 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1611 if (ret < 0)
1612 return -1;
1614 if (ret) {
1616 * If it wasn't combined with one of the entries already
1617 * collapsed, we need to apply the filters that may have
1618 * been set by, say, the hist_browser.
1620 hists__apply_filters(hists, n);
1622 if (prog)
1623 ui_progress__update(prog, 1);
1625 return 0;
1628 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1630 struct hists *hists = a->hists;
1631 struct perf_hpp_fmt *fmt;
1632 int64_t cmp = 0;
1634 hists__for_each_sort_list(hists, fmt) {
1635 if (perf_hpp__should_skip(fmt, a->hists))
1636 continue;
1638 cmp = fmt->sort(fmt, a, b);
1639 if (cmp)
1640 break;
1643 return cmp;
1646 static void hists__reset_filter_stats(struct hists *hists)
1648 hists->nr_non_filtered_entries = 0;
1649 hists->stats.total_non_filtered_period = 0;
1652 void hists__reset_stats(struct hists *hists)
1654 hists->nr_entries = 0;
1655 hists->stats.total_period = 0;
1657 hists__reset_filter_stats(hists);
1660 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1662 hists->nr_non_filtered_entries++;
1663 hists->stats.total_non_filtered_period += h->stat.period;
1666 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1668 if (!h->filtered)
1669 hists__inc_filter_stats(hists, h);
1671 hists->nr_entries++;
1672 hists->stats.total_period += h->stat.period;
1675 static void hierarchy_recalc_total_periods(struct hists *hists)
1677 struct rb_node *node;
1678 struct hist_entry *he;
1680 node = rb_first_cached(&hists->entries);
1682 hists->stats.total_period = 0;
1683 hists->stats.total_non_filtered_period = 0;
1686 * recalculate total period using top-level entries only
1687 * since lower level entries only see non-filtered entries
1688 * but upper level entries have sum of both entries.
1690 while (node) {
1691 he = rb_entry(node, struct hist_entry, rb_node);
1692 node = rb_next(node);
1694 hists->stats.total_period += he->stat.period;
1695 if (!he->filtered)
1696 hists->stats.total_non_filtered_period += he->stat.period;
1700 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1701 struct hist_entry *he)
1703 struct rb_node **p = &root->rb_root.rb_node;
1704 struct rb_node *parent = NULL;
1705 struct hist_entry *iter;
1706 struct perf_hpp_fmt *fmt;
1707 bool leftmost = true;
1709 while (*p != NULL) {
1710 parent = *p;
1711 iter = rb_entry(parent, struct hist_entry, rb_node);
1713 if (hist_entry__sort(he, iter) > 0)
1714 p = &parent->rb_left;
1715 else {
1716 p = &parent->rb_right;
1717 leftmost = false;
1721 rb_link_node(&he->rb_node, parent, p);
1722 rb_insert_color_cached(&he->rb_node, root, leftmost);
1724 /* update column width of dynamic entry */
1725 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1726 if (perf_hpp__is_dynamic_entry(fmt))
1727 fmt->sort(fmt, he, NULL);
1731 static void hists__hierarchy_output_resort(struct hists *hists,
1732 struct ui_progress *prog,
1733 struct rb_root_cached *root_in,
1734 struct rb_root_cached *root_out,
1735 u64 min_callchain_hits,
1736 bool use_callchain)
1738 struct rb_node *node;
1739 struct hist_entry *he;
1741 *root_out = RB_ROOT_CACHED;
1742 node = rb_first_cached(root_in);
1744 while (node) {
1745 he = rb_entry(node, struct hist_entry, rb_node_in);
1746 node = rb_next(node);
1748 hierarchy_insert_output_entry(root_out, he);
1750 if (prog)
1751 ui_progress__update(prog, 1);
1753 hists->nr_entries++;
1754 if (!he->filtered) {
1755 hists->nr_non_filtered_entries++;
1756 hists__calc_col_len(hists, he);
1759 if (!he->leaf) {
1760 hists__hierarchy_output_resort(hists, prog,
1761 &he->hroot_in,
1762 &he->hroot_out,
1763 min_callchain_hits,
1764 use_callchain);
1765 continue;
1768 if (!use_callchain)
1769 continue;
1771 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1772 u64 total = he->stat.period;
1774 if (symbol_conf.cumulate_callchain)
1775 total = he->stat_acc->period;
1777 min_callchain_hits = total * (callchain_param.min_percent / 100);
1780 callchain_param.sort(&he->sorted_chain, he->callchain,
1781 min_callchain_hits, &callchain_param);
1785 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1786 struct hist_entry *he,
1787 u64 min_callchain_hits,
1788 bool use_callchain)
1790 struct rb_node **p = &entries->rb_root.rb_node;
1791 struct rb_node *parent = NULL;
1792 struct hist_entry *iter;
1793 struct perf_hpp_fmt *fmt;
1794 bool leftmost = true;
1796 if (use_callchain) {
1797 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1798 u64 total = he->stat.period;
1800 if (symbol_conf.cumulate_callchain)
1801 total = he->stat_acc->period;
1803 min_callchain_hits = total * (callchain_param.min_percent / 100);
1805 callchain_param.sort(&he->sorted_chain, he->callchain,
1806 min_callchain_hits, &callchain_param);
1809 while (*p != NULL) {
1810 parent = *p;
1811 iter = rb_entry(parent, struct hist_entry, rb_node);
1813 if (hist_entry__sort(he, iter) > 0)
1814 p = &(*p)->rb_left;
1815 else {
1816 p = &(*p)->rb_right;
1817 leftmost = false;
1821 rb_link_node(&he->rb_node, parent, p);
1822 rb_insert_color_cached(&he->rb_node, entries, leftmost);
1824 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1825 if (perf_hpp__is_dynamic_entry(fmt) &&
1826 perf_hpp__defined_dynamic_entry(fmt, he->hists))
1827 fmt->sort(fmt, he, NULL); /* update column width */
1831 static void output_resort(struct hists *hists, struct ui_progress *prog,
1832 bool use_callchain, hists__resort_cb_t cb,
1833 void *cb_arg)
1835 struct rb_root_cached *root;
1836 struct rb_node *next;
1837 struct hist_entry *n;
1838 u64 callchain_total;
1839 u64 min_callchain_hits;
1841 callchain_total = hists->callchain_period;
1842 if (symbol_conf.filter_relative)
1843 callchain_total = hists->callchain_non_filtered_period;
1845 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1847 hists__reset_stats(hists);
1848 hists__reset_col_len(hists);
1850 if (symbol_conf.report_hierarchy) {
1851 hists__hierarchy_output_resort(hists, prog,
1852 &hists->entries_collapsed,
1853 &hists->entries,
1854 min_callchain_hits,
1855 use_callchain);
1856 hierarchy_recalc_total_periods(hists);
1857 return;
1860 if (hists__has(hists, need_collapse))
1861 root = &hists->entries_collapsed;
1862 else
1863 root = hists->entries_in;
1865 next = rb_first_cached(root);
1866 hists->entries = RB_ROOT_CACHED;
1868 while (next) {
1869 n = rb_entry(next, struct hist_entry, rb_node_in);
1870 next = rb_next(&n->rb_node_in);
1872 if (cb && cb(n, cb_arg))
1873 continue;
1875 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1876 hists__inc_stats(hists, n);
1878 if (!n->filtered)
1879 hists__calc_col_len(hists, n);
1881 if (prog)
1882 ui_progress__update(prog, 1);
1886 void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1887 hists__resort_cb_t cb, void *cb_arg)
1889 bool use_callchain;
1891 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1892 use_callchain = evsel__has_callchain(evsel);
1893 else
1894 use_callchain = symbol_conf.use_callchain;
1896 use_callchain |= symbol_conf.show_branchflag_count;
1898 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1901 void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1903 return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
1906 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1908 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1911 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1912 hists__resort_cb_t cb)
1914 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1917 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1919 if (he->leaf || hmd == HMD_FORCE_SIBLING)
1920 return false;
1922 if (he->unfolded || hmd == HMD_FORCE_CHILD)
1923 return true;
1925 return false;
1928 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1930 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1932 while (can_goto_child(he, HMD_NORMAL)) {
1933 node = rb_last(&he->hroot_out.rb_root);
1934 he = rb_entry(node, struct hist_entry, rb_node);
1936 return node;
1939 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1941 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1943 if (can_goto_child(he, hmd))
1944 node = rb_first_cached(&he->hroot_out);
1945 else
1946 node = rb_next(node);
1948 while (node == NULL) {
1949 he = he->parent_he;
1950 if (he == NULL)
1951 break;
1953 node = rb_next(&he->rb_node);
1955 return node;
1958 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1960 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1962 node = rb_prev(node);
1963 if (node)
1964 return rb_hierarchy_last(node);
1966 he = he->parent_he;
1967 if (he == NULL)
1968 return NULL;
1970 return &he->rb_node;
1973 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1975 struct rb_node *node;
1976 struct hist_entry *child;
1977 float percent;
1979 if (he->leaf)
1980 return false;
1982 node = rb_first_cached(&he->hroot_out);
1983 child = rb_entry(node, struct hist_entry, rb_node);
1985 while (node && child->filtered) {
1986 node = rb_next(node);
1987 child = rb_entry(node, struct hist_entry, rb_node);
1990 if (node)
1991 percent = hist_entry__get_percent_limit(child);
1992 else
1993 percent = 0;
1995 return node && percent >= limit;
1998 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1999 enum hist_filter filter)
2001 h->filtered &= ~(1 << filter);
2003 if (symbol_conf.report_hierarchy) {
2004 struct hist_entry *parent = h->parent_he;
2006 while (parent) {
2007 he_stat__add_stat(&parent->stat, &h->stat);
2009 parent->filtered &= ~(1 << filter);
2011 if (parent->filtered)
2012 goto next;
2014 /* force fold unfiltered entry for simplicity */
2015 parent->unfolded = false;
2016 parent->has_no_entry = false;
2017 parent->row_offset = 0;
2018 parent->nr_rows = 0;
2019 next:
2020 parent = parent->parent_he;
2024 if (h->filtered)
2025 return;
2027 /* force fold unfiltered entry for simplicity */
2028 h->unfolded = false;
2029 h->has_no_entry = false;
2030 h->row_offset = 0;
2031 h->nr_rows = 0;
2033 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2035 hists__inc_filter_stats(hists, h);
2036 hists__calc_col_len(hists, h);
2040 static bool hists__filter_entry_by_dso(struct hists *hists,
2041 struct hist_entry *he)
2043 if (hists->dso_filter != NULL &&
2044 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2045 he->filtered |= (1 << HIST_FILTER__DSO);
2046 return true;
2049 return false;
2052 static bool hists__filter_entry_by_thread(struct hists *hists,
2053 struct hist_entry *he)
2055 if (hists->thread_filter != NULL &&
2056 he->thread != hists->thread_filter) {
2057 he->filtered |= (1 << HIST_FILTER__THREAD);
2058 return true;
2061 return false;
2064 static bool hists__filter_entry_by_symbol(struct hists *hists,
2065 struct hist_entry *he)
2067 if (hists->symbol_filter_str != NULL &&
2068 (!he->ms.sym || strstr(he->ms.sym->name,
2069 hists->symbol_filter_str) == NULL)) {
2070 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2071 return true;
2074 return false;
2077 static bool hists__filter_entry_by_socket(struct hists *hists,
2078 struct hist_entry *he)
2080 if ((hists->socket_filter > -1) &&
2081 (he->socket != hists->socket_filter)) {
2082 he->filtered |= (1 << HIST_FILTER__SOCKET);
2083 return true;
2086 return false;
2089 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2091 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2093 struct rb_node *nd;
2095 hists->stats.nr_non_filtered_samples = 0;
2097 hists__reset_filter_stats(hists);
2098 hists__reset_col_len(hists);
2100 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2101 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2103 if (filter(hists, h))
2104 continue;
2106 hists__remove_entry_filter(hists, h, type);
2110 static void resort_filtered_entry(struct rb_root_cached *root,
2111 struct hist_entry *he)
2113 struct rb_node **p = &root->rb_root.rb_node;
2114 struct rb_node *parent = NULL;
2115 struct hist_entry *iter;
2116 struct rb_root_cached new_root = RB_ROOT_CACHED;
2117 struct rb_node *nd;
2118 bool leftmost = true;
2120 while (*p != NULL) {
2121 parent = *p;
2122 iter = rb_entry(parent, struct hist_entry, rb_node);
2124 if (hist_entry__sort(he, iter) > 0)
2125 p = &(*p)->rb_left;
2126 else {
2127 p = &(*p)->rb_right;
2128 leftmost = false;
2132 rb_link_node(&he->rb_node, parent, p);
2133 rb_insert_color_cached(&he->rb_node, root, leftmost);
2135 if (he->leaf || he->filtered)
2136 return;
2138 nd = rb_first_cached(&he->hroot_out);
2139 while (nd) {
2140 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2142 nd = rb_next(nd);
2143 rb_erase_cached(&h->rb_node, &he->hroot_out);
2145 resort_filtered_entry(&new_root, h);
2148 he->hroot_out = new_root;
2151 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2153 struct rb_node *nd;
2154 struct rb_root_cached new_root = RB_ROOT_CACHED;
2156 hists->stats.nr_non_filtered_samples = 0;
2158 hists__reset_filter_stats(hists);
2159 hists__reset_col_len(hists);
2161 nd = rb_first_cached(&hists->entries);
2162 while (nd) {
2163 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2164 int ret;
2166 ret = hist_entry__filter(h, type, arg);
2169 * case 1. non-matching type
2170 * zero out the period, set filter marker and move to child
2172 if (ret < 0) {
2173 memset(&h->stat, 0, sizeof(h->stat));
2174 h->filtered |= (1 << type);
2176 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2179 * case 2. matched type (filter out)
2180 * set filter marker and move to next
2182 else if (ret == 1) {
2183 h->filtered |= (1 << type);
2185 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2188 * case 3. ok (not filtered)
2189 * add period to hists and parents, erase the filter marker
2190 * and move to next sibling
2192 else {
2193 hists__remove_entry_filter(hists, h, type);
2195 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2199 hierarchy_recalc_total_periods(hists);
2202 * resort output after applying a new filter since filter in a lower
2203 * hierarchy can change periods in a upper hierarchy.
2205 nd = rb_first_cached(&hists->entries);
2206 while (nd) {
2207 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2209 nd = rb_next(nd);
2210 rb_erase_cached(&h->rb_node, &hists->entries);
2212 resort_filtered_entry(&new_root, h);
2215 hists->entries = new_root;
2218 void hists__filter_by_thread(struct hists *hists)
2220 if (symbol_conf.report_hierarchy)
2221 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2222 hists->thread_filter);
2223 else
2224 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2225 hists__filter_entry_by_thread);
2228 void hists__filter_by_dso(struct hists *hists)
2230 if (symbol_conf.report_hierarchy)
2231 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2232 hists->dso_filter);
2233 else
2234 hists__filter_by_type(hists, HIST_FILTER__DSO,
2235 hists__filter_entry_by_dso);
2238 void hists__filter_by_symbol(struct hists *hists)
2240 if (symbol_conf.report_hierarchy)
2241 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2242 hists->symbol_filter_str);
2243 else
2244 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2245 hists__filter_entry_by_symbol);
2248 void hists__filter_by_socket(struct hists *hists)
2250 if (symbol_conf.report_hierarchy)
2251 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2252 &hists->socket_filter);
2253 else
2254 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2255 hists__filter_entry_by_socket);
2258 void events_stats__inc(struct events_stats *stats, u32 type)
2260 ++stats->nr_events[0];
2261 ++stats->nr_events[type];
2264 void hists__inc_nr_events(struct hists *hists, u32 type)
2266 events_stats__inc(&hists->stats, type);
2269 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2271 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2272 if (!filtered)
2273 hists->stats.nr_non_filtered_samples++;
2276 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2277 struct hist_entry *pair)
2279 struct rb_root_cached *root;
2280 struct rb_node **p;
2281 struct rb_node *parent = NULL;
2282 struct hist_entry *he;
2283 int64_t cmp;
2284 bool leftmost = true;
2286 if (hists__has(hists, need_collapse))
2287 root = &hists->entries_collapsed;
2288 else
2289 root = hists->entries_in;
2291 p = &root->rb_root.rb_node;
2293 while (*p != NULL) {
2294 parent = *p;
2295 he = rb_entry(parent, struct hist_entry, rb_node_in);
2297 cmp = hist_entry__collapse(he, pair);
2299 if (!cmp)
2300 goto out;
2302 if (cmp < 0)
2303 p = &(*p)->rb_left;
2304 else {
2305 p = &(*p)->rb_right;
2306 leftmost = false;
2310 he = hist_entry__new(pair, true);
2311 if (he) {
2312 memset(&he->stat, 0, sizeof(he->stat));
2313 he->hists = hists;
2314 if (symbol_conf.cumulate_callchain)
2315 memset(he->stat_acc, 0, sizeof(he->stat));
2316 rb_link_node(&he->rb_node_in, parent, p);
2317 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2318 hists__inc_stats(hists, he);
2319 he->dummy = true;
2321 out:
2322 return he;
2325 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2326 struct rb_root_cached *root,
2327 struct hist_entry *pair)
2329 struct rb_node **p;
2330 struct rb_node *parent = NULL;
2331 struct hist_entry *he;
2332 struct perf_hpp_fmt *fmt;
2333 bool leftmost = true;
2335 p = &root->rb_root.rb_node;
2336 while (*p != NULL) {
2337 int64_t cmp = 0;
2339 parent = *p;
2340 he = rb_entry(parent, struct hist_entry, rb_node_in);
2342 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2343 cmp = fmt->collapse(fmt, he, pair);
2344 if (cmp)
2345 break;
2347 if (!cmp)
2348 goto out;
2350 if (cmp < 0)
2351 p = &parent->rb_left;
2352 else {
2353 p = &parent->rb_right;
2354 leftmost = false;
2358 he = hist_entry__new(pair, true);
2359 if (he) {
2360 rb_link_node(&he->rb_node_in, parent, p);
2361 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2363 he->dummy = true;
2364 he->hists = hists;
2365 memset(&he->stat, 0, sizeof(he->stat));
2366 hists__inc_stats(hists, he);
2368 out:
2369 return he;
2372 static struct hist_entry *hists__find_entry(struct hists *hists,
2373 struct hist_entry *he)
2375 struct rb_node *n;
2377 if (hists__has(hists, need_collapse))
2378 n = hists->entries_collapsed.rb_root.rb_node;
2379 else
2380 n = hists->entries_in->rb_root.rb_node;
2382 while (n) {
2383 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2384 int64_t cmp = hist_entry__collapse(iter, he);
2386 if (cmp < 0)
2387 n = n->rb_left;
2388 else if (cmp > 0)
2389 n = n->rb_right;
2390 else
2391 return iter;
2394 return NULL;
2397 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2398 struct hist_entry *he)
2400 struct rb_node *n = root->rb_root.rb_node;
2402 while (n) {
2403 struct hist_entry *iter;
2404 struct perf_hpp_fmt *fmt;
2405 int64_t cmp = 0;
2407 iter = rb_entry(n, struct hist_entry, rb_node_in);
2408 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2409 cmp = fmt->collapse(fmt, iter, he);
2410 if (cmp)
2411 break;
2414 if (cmp < 0)
2415 n = n->rb_left;
2416 else if (cmp > 0)
2417 n = n->rb_right;
2418 else
2419 return iter;
2422 return NULL;
2425 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2426 struct rb_root_cached *other_root)
2428 struct rb_node *nd;
2429 struct hist_entry *pos, *pair;
2431 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2432 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2433 pair = hists__find_hierarchy_entry(other_root, pos);
2435 if (pair) {
2436 hist_entry__add_pair(pair, pos);
2437 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2443 * Look for pairs to link to the leader buckets (hist_entries):
2445 void hists__match(struct hists *leader, struct hists *other)
2447 struct rb_root_cached *root;
2448 struct rb_node *nd;
2449 struct hist_entry *pos, *pair;
2451 if (symbol_conf.report_hierarchy) {
2452 /* hierarchy report always collapses entries */
2453 return hists__match_hierarchy(&leader->entries_collapsed,
2454 &other->entries_collapsed);
2457 if (hists__has(leader, need_collapse))
2458 root = &leader->entries_collapsed;
2459 else
2460 root = leader->entries_in;
2462 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2463 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2464 pair = hists__find_entry(other, pos);
2466 if (pair)
2467 hist_entry__add_pair(pair, pos);
2471 static int hists__link_hierarchy(struct hists *leader_hists,
2472 struct hist_entry *parent,
2473 struct rb_root_cached *leader_root,
2474 struct rb_root_cached *other_root)
2476 struct rb_node *nd;
2477 struct hist_entry *pos, *leader;
2479 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2480 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2482 if (hist_entry__has_pairs(pos)) {
2483 bool found = false;
2485 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2486 if (leader->hists == leader_hists) {
2487 found = true;
2488 break;
2491 if (!found)
2492 return -1;
2493 } else {
2494 leader = add_dummy_hierarchy_entry(leader_hists,
2495 leader_root, pos);
2496 if (leader == NULL)
2497 return -1;
2499 /* do not point parent in the pos */
2500 leader->parent_he = parent;
2502 hist_entry__add_pair(pos, leader);
2505 if (!pos->leaf) {
2506 if (hists__link_hierarchy(leader_hists, leader,
2507 &leader->hroot_in,
2508 &pos->hroot_in) < 0)
2509 return -1;
2512 return 0;
2516 * Look for entries in the other hists that are not present in the leader, if
2517 * we find them, just add a dummy entry on the leader hists, with period=0,
2518 * nr_events=0, to serve as the list header.
2520 int hists__link(struct hists *leader, struct hists *other)
2522 struct rb_root_cached *root;
2523 struct rb_node *nd;
2524 struct hist_entry *pos, *pair;
2526 if (symbol_conf.report_hierarchy) {
2527 /* hierarchy report always collapses entries */
2528 return hists__link_hierarchy(leader, NULL,
2529 &leader->entries_collapsed,
2530 &other->entries_collapsed);
2533 if (hists__has(other, need_collapse))
2534 root = &other->entries_collapsed;
2535 else
2536 root = other->entries_in;
2538 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2539 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2541 if (!hist_entry__has_pairs(pos)) {
2542 pair = hists__add_dummy_entry(leader, pos);
2543 if (pair == NULL)
2544 return -1;
2545 hist_entry__add_pair(pos, pair);
2549 return 0;
2552 int hists__unlink(struct hists *hists)
2554 struct rb_root_cached *root;
2555 struct rb_node *nd;
2556 struct hist_entry *pos;
2558 if (hists__has(hists, need_collapse))
2559 root = &hists->entries_collapsed;
2560 else
2561 root = hists->entries_in;
2563 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2564 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2565 list_del_init(&pos->pairs.node);
2568 return 0;
2571 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2572 struct perf_sample *sample, bool nonany_branch_mode)
2574 struct branch_info *bi;
2576 /* If we have branch cycles always annotate them. */
2577 if (bs && bs->nr && bs->entries[0].flags.cycles) {
2578 int i;
2580 bi = sample__resolve_bstack(sample, al);
2581 if (bi) {
2582 struct addr_map_symbol *prev = NULL;
2585 * Ignore errors, still want to process the
2586 * other entries.
2588 * For non standard branch modes always
2589 * force no IPC (prev == NULL)
2591 * Note that perf stores branches reversed from
2592 * program order!
2594 for (i = bs->nr - 1; i >= 0; i--) {
2595 addr_map_symbol__account_cycles(&bi[i].from,
2596 nonany_branch_mode ? NULL : prev,
2597 bi[i].flags.cycles);
2598 prev = &bi[i].to;
2600 free(bi);
2605 size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2607 struct evsel *pos;
2608 size_t ret = 0;
2610 evlist__for_each_entry(evlist, pos) {
2611 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2612 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2615 return ret;
2619 u64 hists__total_period(struct hists *hists)
2621 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2622 hists->stats.total_period;
2625 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2627 char unit;
2628 int printed;
2629 const struct dso *dso = hists->dso_filter;
2630 struct thread *thread = hists->thread_filter;
2631 int socket_id = hists->socket_filter;
2632 unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2633 u64 nr_events = hists->stats.total_period;
2634 struct evsel *evsel = hists_to_evsel(hists);
2635 const char *ev_name = perf_evsel__name(evsel);
2636 char buf[512], sample_freq_str[64] = "";
2637 size_t buflen = sizeof(buf);
2638 char ref[30] = " show reference callgraph, ";
2639 bool enable_ref = false;
2641 if (symbol_conf.filter_relative) {
2642 nr_samples = hists->stats.nr_non_filtered_samples;
2643 nr_events = hists->stats.total_non_filtered_period;
2646 if (perf_evsel__is_group_event(evsel)) {
2647 struct evsel *pos;
2649 perf_evsel__group_desc(evsel, buf, buflen);
2650 ev_name = buf;
2652 for_each_group_member(pos, evsel) {
2653 struct hists *pos_hists = evsel__hists(pos);
2655 if (symbol_conf.filter_relative) {
2656 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2657 nr_events += pos_hists->stats.total_non_filtered_period;
2658 } else {
2659 nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2660 nr_events += pos_hists->stats.total_period;
2665 if (symbol_conf.show_ref_callgraph &&
2666 strstr(ev_name, "call-graph=no"))
2667 enable_ref = true;
2669 if (show_freq)
2670 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2672 nr_samples = convert_unit(nr_samples, &unit);
2673 printed = scnprintf(bf, size,
2674 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2675 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2676 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2679 if (hists->uid_filter_str)
2680 printed += snprintf(bf + printed, size - printed,
2681 ", UID: %s", hists->uid_filter_str);
2682 if (thread) {
2683 if (hists__has(hists, thread)) {
2684 printed += scnprintf(bf + printed, size - printed,
2685 ", Thread: %s(%d)",
2686 (thread->comm_set ? thread__comm_str(thread) : ""),
2687 thread->tid);
2688 } else {
2689 printed += scnprintf(bf + printed, size - printed,
2690 ", Thread: %s",
2691 (thread->comm_set ? thread__comm_str(thread) : ""));
2694 if (dso)
2695 printed += scnprintf(bf + printed, size - printed,
2696 ", DSO: %s", dso->short_name);
2697 if (socket_id > -1)
2698 printed += scnprintf(bf + printed, size - printed,
2699 ", Processor Socket: %d", socket_id);
2701 return printed;
2704 int parse_filter_percentage(const struct option *opt __maybe_unused,
2705 const char *arg, int unset __maybe_unused)
2707 if (!strcmp(arg, "relative"))
2708 symbol_conf.filter_relative = true;
2709 else if (!strcmp(arg, "absolute"))
2710 symbol_conf.filter_relative = false;
2711 else {
2712 pr_debug("Invalid percentage: %s\n", arg);
2713 return -1;
2716 return 0;
2719 int perf_hist_config(const char *var, const char *value)
2721 if (!strcmp(var, "hist.percentage"))
2722 return parse_filter_percentage(NULL, value, 0);
2724 return 0;
2727 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2729 memset(hists, 0, sizeof(*hists));
2730 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2731 hists->entries_in = &hists->entries_in_array[0];
2732 hists->entries_collapsed = RB_ROOT_CACHED;
2733 hists->entries = RB_ROOT_CACHED;
2734 pthread_mutex_init(&hists->lock, NULL);
2735 hists->socket_filter = -1;
2736 hists->hpp_list = hpp_list;
2737 INIT_LIST_HEAD(&hists->hpp_formats);
2738 return 0;
2741 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2743 struct rb_node *node;
2744 struct hist_entry *he;
2746 while (!RB_EMPTY_ROOT(&root->rb_root)) {
2747 node = rb_first_cached(root);
2748 rb_erase_cached(node, root);
2750 he = rb_entry(node, struct hist_entry, rb_node_in);
2751 hist_entry__delete(he);
2755 static void hists__delete_all_entries(struct hists *hists)
2757 hists__delete_entries(hists);
2758 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2759 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2760 hists__delete_remaining_entries(&hists->entries_collapsed);
2763 static void hists_evsel__exit(struct evsel *evsel)
2765 struct hists *hists = evsel__hists(evsel);
2766 struct perf_hpp_fmt *fmt, *pos;
2767 struct perf_hpp_list_node *node, *tmp;
2769 hists__delete_all_entries(hists);
2771 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2772 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2773 list_del_init(&fmt->list);
2774 free(fmt);
2776 list_del_init(&node->list);
2777 free(node);
2781 static int hists_evsel__init(struct evsel *evsel)
2783 struct hists *hists = evsel__hists(evsel);
2785 __hists__init(hists, &perf_hpp_list);
2786 return 0;
2790 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2791 * stored in the rbtree...
2794 int hists__init(void)
2796 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2797 hists_evsel__init,
2798 hists_evsel__exit);
2799 if (err)
2800 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2802 return err;
2805 void perf_hpp_list__init(struct perf_hpp_list *list)
2807 INIT_LIST_HEAD(&list->fields);
2808 INIT_LIST_HEAD(&list->sorts);