SUNRPC: Report TCP errors to the caller
[linux/fpc-iii.git] / tools / perf / util / hist.c
blobcc22b9158b93c41fd0d44cd451ba189c56bc19dd
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
21 return hists->col_len[col];
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
26 hists->col_len[col] = len;
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
31 if (len > hists__col_len(hists, col)) {
32 hists__set_col_len(hists, col, len);
33 return true;
35 return false;
38 void hists__reset_col_len(struct hists *hists)
40 enum hist_column col;
42 for (col = 0; col < HISTC_NR_COLS; ++col)
43 hists__set_col_len(hists, col, 0);
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
48 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
50 if (hists__col_len(hists, dso) < unresolved_col_width &&
51 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 !symbol_conf.dso_list)
53 hists__set_col_len(hists, dso, unresolved_col_width);
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59 int symlen;
60 u16 len;
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
67 if (h->ms.sym) {
68 symlen = h->ms.sym->namelen + 4;
69 if (verbose)
70 symlen += BITS_PER_LONG / 4 + 2 + 3;
71 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
72 } else {
73 symlen = unresolved_col_width + 4 + 2;
74 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 len = thread__comm_len(h->thread);
79 if (hists__new_col_len(hists, HISTC_COMM, len))
80 hists__set_col_len(hists, HISTC_THREAD, len + 6);
82 if (h->ms.map) {
83 len = dso__name_len(h->ms.map->dso);
84 hists__new_col_len(hists, HISTC_DSO, len);
87 if (h->parent)
88 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
90 if (h->branch_info) {
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 if (verbose)
94 symlen += BITS_PER_LONG / 4 + 2 + 3;
95 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
97 symlen = dso__name_len(h->branch_info->from.map->dso);
98 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
99 } else {
100 symlen = unresolved_col_width + 4 + 2;
101 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 if (h->branch_info->to.sym) {
106 symlen = (int)h->branch_info->to.sym->namelen + 4;
107 if (verbose)
108 symlen += BITS_PER_LONG / 4 + 2 + 3;
109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
111 symlen = dso__name_len(h->branch_info->to.map->dso);
112 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
113 } else {
114 symlen = unresolved_col_width + 4 + 2;
115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
120 if (h->mem_info) {
121 if (h->mem_info->daddr.sym) {
122 symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 + unresolved_col_width + 2;
124 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
125 symlen);
126 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
127 symlen + 1);
128 } else {
129 symlen = unresolved_col_width + 4 + 2;
130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
131 symlen);
133 if (h->mem_info->daddr.map) {
134 symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
136 symlen);
137 } else {
138 symlen = unresolved_col_width + 4 + 2;
139 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
141 } else {
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
154 if (h->transaction)
155 hists__new_col_len(hists, HISTC_TRANSACTION,
156 hist_entry__transaction_len());
159 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
161 struct rb_node *next = rb_first(&hists->entries);
162 struct hist_entry *n;
163 int row = 0;
165 hists__reset_col_len(hists);
167 while (next && row++ < max_rows) {
168 n = rb_entry(next, struct hist_entry, rb_node);
169 if (!n->filtered)
170 hists__calc_col_len(hists, n);
171 next = rb_next(&n->rb_node);
175 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
176 unsigned int cpumode, u64 period)
178 switch (cpumode) {
179 case PERF_RECORD_MISC_KERNEL:
180 he_stat->period_sys += period;
181 break;
182 case PERF_RECORD_MISC_USER:
183 he_stat->period_us += period;
184 break;
185 case PERF_RECORD_MISC_GUEST_KERNEL:
186 he_stat->period_guest_sys += period;
187 break;
188 case PERF_RECORD_MISC_GUEST_USER:
189 he_stat->period_guest_us += period;
190 break;
191 default:
192 break;
196 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
197 u64 weight)
200 he_stat->period += period;
201 he_stat->weight += weight;
202 he_stat->nr_events += 1;
205 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
207 dest->period += src->period;
208 dest->period_sys += src->period_sys;
209 dest->period_us += src->period_us;
210 dest->period_guest_sys += src->period_guest_sys;
211 dest->period_guest_us += src->period_guest_us;
212 dest->nr_events += src->nr_events;
213 dest->weight += src->weight;
216 static void he_stat__decay(struct he_stat *he_stat)
218 he_stat->period = (he_stat->period * 7) / 8;
219 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
220 /* XXX need decay for weight too? */
223 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
225 u64 prev_period = he->stat.period;
226 u64 diff;
228 if (prev_period == 0)
229 return true;
231 he_stat__decay(&he->stat);
232 if (symbol_conf.cumulate_callchain)
233 he_stat__decay(he->stat_acc);
235 diff = prev_period - he->stat.period;
237 hists->stats.total_period -= diff;
238 if (!he->filtered)
239 hists->stats.total_non_filtered_period -= diff;
241 return he->stat.period == 0;
244 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
246 rb_erase(&he->rb_node, &hists->entries);
248 if (sort__need_collapse)
249 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
251 --hists->nr_entries;
252 if (!he->filtered)
253 --hists->nr_non_filtered_entries;
255 hist_entry__delete(he);
258 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
260 struct rb_node *next = rb_first(&hists->entries);
261 struct hist_entry *n;
263 while (next) {
264 n = rb_entry(next, struct hist_entry, rb_node);
265 next = rb_next(&n->rb_node);
266 if (((zap_user && n->level == '.') ||
267 (zap_kernel && n->level != '.') ||
268 hists__decay_entry(hists, n))) {
269 hists__delete_entry(hists, n);
274 void hists__delete_entries(struct hists *hists)
276 struct rb_node *next = rb_first(&hists->entries);
277 struct hist_entry *n;
279 while (next) {
280 n = rb_entry(next, struct hist_entry, rb_node);
281 next = rb_next(&n->rb_node);
283 hists__delete_entry(hists, n);
288 * histogram, sorted on item, collects periods
291 static struct hist_entry *hist_entry__new(struct hist_entry *template,
292 bool sample_self)
294 size_t callchain_size = 0;
295 struct hist_entry *he;
297 if (symbol_conf.use_callchain)
298 callchain_size = sizeof(struct callchain_root);
300 he = zalloc(sizeof(*he) + callchain_size);
302 if (he != NULL) {
303 *he = *template;
305 if (symbol_conf.cumulate_callchain) {
306 he->stat_acc = malloc(sizeof(he->stat));
307 if (he->stat_acc == NULL) {
308 free(he);
309 return NULL;
311 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
312 if (!sample_self)
313 memset(&he->stat, 0, sizeof(he->stat));
316 if (he->ms.map)
317 he->ms.map->referenced = true;
319 if (he->branch_info) {
321 * This branch info is (a part of) allocated from
322 * sample__resolve_bstack() and will be freed after
323 * adding new entries. So we need to save a copy.
325 he->branch_info = malloc(sizeof(*he->branch_info));
326 if (he->branch_info == NULL) {
327 free(he->stat_acc);
328 free(he);
329 return NULL;
332 memcpy(he->branch_info, template->branch_info,
333 sizeof(*he->branch_info));
335 if (he->branch_info->from.map)
336 he->branch_info->from.map->referenced = true;
337 if (he->branch_info->to.map)
338 he->branch_info->to.map->referenced = true;
341 if (he->mem_info) {
342 if (he->mem_info->iaddr.map)
343 he->mem_info->iaddr.map->referenced = true;
344 if (he->mem_info->daddr.map)
345 he->mem_info->daddr.map->referenced = true;
348 if (symbol_conf.use_callchain)
349 callchain_init(he->callchain);
351 INIT_LIST_HEAD(&he->pairs.node);
352 thread__get(he->thread);
355 return he;
358 static u8 symbol__parent_filter(const struct symbol *parent)
360 if (symbol_conf.exclude_other && parent == NULL)
361 return 1 << HIST_FILTER__PARENT;
362 return 0;
365 static struct hist_entry *add_hist_entry(struct hists *hists,
366 struct hist_entry *entry,
367 struct addr_location *al,
368 bool sample_self)
370 struct rb_node **p;
371 struct rb_node *parent = NULL;
372 struct hist_entry *he;
373 int64_t cmp;
374 u64 period = entry->stat.period;
375 u64 weight = entry->stat.weight;
377 p = &hists->entries_in->rb_node;
379 while (*p != NULL) {
380 parent = *p;
381 he = rb_entry(parent, struct hist_entry, rb_node_in);
384 * Make sure that it receives arguments in a same order as
385 * hist_entry__collapse() so that we can use an appropriate
386 * function when searching an entry regardless which sort
387 * keys were used.
389 cmp = hist_entry__cmp(he, entry);
391 if (!cmp) {
392 if (sample_self)
393 he_stat__add_period(&he->stat, period, weight);
394 if (symbol_conf.cumulate_callchain)
395 he_stat__add_period(he->stat_acc, period, weight);
398 * This mem info was allocated from sample__resolve_mem
399 * and will not be used anymore.
401 zfree(&entry->mem_info);
403 /* If the map of an existing hist_entry has
404 * become out-of-date due to an exec() or
405 * similar, update it. Otherwise we will
406 * mis-adjust symbol addresses when computing
407 * the history counter to increment.
409 if (he->ms.map != entry->ms.map) {
410 he->ms.map = entry->ms.map;
411 if (he->ms.map)
412 he->ms.map->referenced = true;
414 goto out;
417 if (cmp < 0)
418 p = &(*p)->rb_left;
419 else
420 p = &(*p)->rb_right;
423 he = hist_entry__new(entry, sample_self);
424 if (!he)
425 return NULL;
427 hists->nr_entries++;
429 rb_link_node(&he->rb_node_in, parent, p);
430 rb_insert_color(&he->rb_node_in, hists->entries_in);
431 out:
432 if (sample_self)
433 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
434 if (symbol_conf.cumulate_callchain)
435 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
436 return he;
439 struct hist_entry *__hists__add_entry(struct hists *hists,
440 struct addr_location *al,
441 struct symbol *sym_parent,
442 struct branch_info *bi,
443 struct mem_info *mi,
444 u64 period, u64 weight, u64 transaction,
445 bool sample_self)
447 struct hist_entry entry = {
448 .thread = al->thread,
449 .comm = thread__comm(al->thread),
450 .ms = {
451 .map = al->map,
452 .sym = al->sym,
454 .cpu = al->cpu,
455 .cpumode = al->cpumode,
456 .ip = al->addr,
457 .level = al->level,
458 .stat = {
459 .nr_events = 1,
460 .period = period,
461 .weight = weight,
463 .parent = sym_parent,
464 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
465 .hists = hists,
466 .branch_info = bi,
467 .mem_info = mi,
468 .transaction = transaction,
471 return add_hist_entry(hists, &entry, al, sample_self);
474 static int
475 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
476 struct addr_location *al __maybe_unused)
478 return 0;
481 static int
482 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
483 struct addr_location *al __maybe_unused)
485 return 0;
488 static int
489 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
491 struct perf_sample *sample = iter->sample;
492 struct mem_info *mi;
494 mi = sample__resolve_mem(sample, al);
495 if (mi == NULL)
496 return -ENOMEM;
498 iter->priv = mi;
499 return 0;
502 static int
503 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
505 u64 cost;
506 struct mem_info *mi = iter->priv;
507 struct hists *hists = evsel__hists(iter->evsel);
508 struct hist_entry *he;
510 if (mi == NULL)
511 return -EINVAL;
513 cost = iter->sample->weight;
514 if (!cost)
515 cost = 1;
518 * must pass period=weight in order to get the correct
519 * sorting from hists__collapse_resort() which is solely
520 * based on periods. We want sorting be done on nr_events * weight
521 * and this is indirectly achieved by passing period=weight here
522 * and the he_stat__add_period() function.
524 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
525 cost, cost, 0, true);
526 if (!he)
527 return -ENOMEM;
529 iter->he = he;
530 return 0;
533 static int
534 iter_finish_mem_entry(struct hist_entry_iter *iter,
535 struct addr_location *al __maybe_unused)
537 struct perf_evsel *evsel = iter->evsel;
538 struct hists *hists = evsel__hists(evsel);
539 struct hist_entry *he = iter->he;
540 int err = -EINVAL;
542 if (he == NULL)
543 goto out;
545 hists__inc_nr_samples(hists, he->filtered);
547 err = hist_entry__append_callchain(he, iter->sample);
549 out:
551 * We don't need to free iter->priv (mem_info) here since
552 * the mem info was either already freed in add_hist_entry() or
553 * passed to a new hist entry by hist_entry__new().
555 iter->priv = NULL;
557 iter->he = NULL;
558 return err;
561 static int
562 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
564 struct branch_info *bi;
565 struct perf_sample *sample = iter->sample;
567 bi = sample__resolve_bstack(sample, al);
568 if (!bi)
569 return -ENOMEM;
571 iter->curr = 0;
572 iter->total = sample->branch_stack->nr;
574 iter->priv = bi;
575 return 0;
578 static int
579 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
580 struct addr_location *al __maybe_unused)
582 /* to avoid calling callback function */
583 iter->he = NULL;
585 return 0;
588 static int
589 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
591 struct branch_info *bi = iter->priv;
592 int i = iter->curr;
594 if (bi == NULL)
595 return 0;
597 if (iter->curr >= iter->total)
598 return 0;
600 al->map = bi[i].to.map;
601 al->sym = bi[i].to.sym;
602 al->addr = bi[i].to.addr;
603 return 1;
606 static int
607 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
609 struct branch_info *bi;
610 struct perf_evsel *evsel = iter->evsel;
611 struct hists *hists = evsel__hists(evsel);
612 struct hist_entry *he = NULL;
613 int i = iter->curr;
614 int err = 0;
616 bi = iter->priv;
618 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
619 goto out;
622 * The report shows the percentage of total branches captured
623 * and not events sampled. Thus we use a pseudo period of 1.
625 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
626 1, 1, 0, true);
627 if (he == NULL)
628 return -ENOMEM;
630 hists__inc_nr_samples(hists, he->filtered);
632 out:
633 iter->he = he;
634 iter->curr++;
635 return err;
638 static int
639 iter_finish_branch_entry(struct hist_entry_iter *iter,
640 struct addr_location *al __maybe_unused)
642 zfree(&iter->priv);
643 iter->he = NULL;
645 return iter->curr >= iter->total ? 0 : -1;
648 static int
649 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
650 struct addr_location *al __maybe_unused)
652 return 0;
655 static int
656 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
658 struct perf_evsel *evsel = iter->evsel;
659 struct perf_sample *sample = iter->sample;
660 struct hist_entry *he;
662 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
663 sample->period, sample->weight,
664 sample->transaction, true);
665 if (he == NULL)
666 return -ENOMEM;
668 iter->he = he;
669 return 0;
672 static int
673 iter_finish_normal_entry(struct hist_entry_iter *iter,
674 struct addr_location *al __maybe_unused)
676 struct hist_entry *he = iter->he;
677 struct perf_evsel *evsel = iter->evsel;
678 struct perf_sample *sample = iter->sample;
680 if (he == NULL)
681 return 0;
683 iter->he = NULL;
685 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
687 return hist_entry__append_callchain(he, sample);
690 static int
691 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
692 struct addr_location *al __maybe_unused)
694 struct hist_entry **he_cache;
696 callchain_cursor_commit(&callchain_cursor);
699 * This is for detecting cycles or recursions so that they're
700 * cumulated only one time to prevent entries more than 100%
701 * overhead.
703 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
704 if (he_cache == NULL)
705 return -ENOMEM;
707 iter->priv = he_cache;
708 iter->curr = 0;
710 return 0;
713 static int
714 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
715 struct addr_location *al)
717 struct perf_evsel *evsel = iter->evsel;
718 struct hists *hists = evsel__hists(evsel);
719 struct perf_sample *sample = iter->sample;
720 struct hist_entry **he_cache = iter->priv;
721 struct hist_entry *he;
722 int err = 0;
724 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
725 sample->period, sample->weight,
726 sample->transaction, true);
727 if (he == NULL)
728 return -ENOMEM;
730 iter->he = he;
731 he_cache[iter->curr++] = he;
733 hist_entry__append_callchain(he, sample);
736 * We need to re-initialize the cursor since callchain_append()
737 * advanced the cursor to the end.
739 callchain_cursor_commit(&callchain_cursor);
741 hists__inc_nr_samples(hists, he->filtered);
743 return err;
746 static int
747 iter_next_cumulative_entry(struct hist_entry_iter *iter,
748 struct addr_location *al)
750 struct callchain_cursor_node *node;
752 node = callchain_cursor_current(&callchain_cursor);
753 if (node == NULL)
754 return 0;
756 return fill_callchain_info(al, node, iter->hide_unresolved);
759 static int
760 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
761 struct addr_location *al)
763 struct perf_evsel *evsel = iter->evsel;
764 struct perf_sample *sample = iter->sample;
765 struct hist_entry **he_cache = iter->priv;
766 struct hist_entry *he;
767 struct hist_entry he_tmp = {
768 .cpu = al->cpu,
769 .thread = al->thread,
770 .comm = thread__comm(al->thread),
771 .ip = al->addr,
772 .ms = {
773 .map = al->map,
774 .sym = al->sym,
776 .parent = iter->parent,
778 int i;
779 struct callchain_cursor cursor;
781 callchain_cursor_snapshot(&cursor, &callchain_cursor);
783 callchain_cursor_advance(&callchain_cursor);
786 * Check if there's duplicate entries in the callchain.
787 * It's possible that it has cycles or recursive calls.
789 for (i = 0; i < iter->curr; i++) {
790 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
791 /* to avoid calling callback function */
792 iter->he = NULL;
793 return 0;
797 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
798 sample->period, sample->weight,
799 sample->transaction, false);
800 if (he == NULL)
801 return -ENOMEM;
803 iter->he = he;
804 he_cache[iter->curr++] = he;
806 if (symbol_conf.use_callchain)
807 callchain_append(he->callchain, &cursor, sample->period);
808 return 0;
811 static int
812 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
813 struct addr_location *al __maybe_unused)
815 zfree(&iter->priv);
816 iter->he = NULL;
818 return 0;
821 const struct hist_iter_ops hist_iter_mem = {
822 .prepare_entry = iter_prepare_mem_entry,
823 .add_single_entry = iter_add_single_mem_entry,
824 .next_entry = iter_next_nop_entry,
825 .add_next_entry = iter_add_next_nop_entry,
826 .finish_entry = iter_finish_mem_entry,
829 const struct hist_iter_ops hist_iter_branch = {
830 .prepare_entry = iter_prepare_branch_entry,
831 .add_single_entry = iter_add_single_branch_entry,
832 .next_entry = iter_next_branch_entry,
833 .add_next_entry = iter_add_next_branch_entry,
834 .finish_entry = iter_finish_branch_entry,
837 const struct hist_iter_ops hist_iter_normal = {
838 .prepare_entry = iter_prepare_normal_entry,
839 .add_single_entry = iter_add_single_normal_entry,
840 .next_entry = iter_next_nop_entry,
841 .add_next_entry = iter_add_next_nop_entry,
842 .finish_entry = iter_finish_normal_entry,
845 const struct hist_iter_ops hist_iter_cumulative = {
846 .prepare_entry = iter_prepare_cumulative_entry,
847 .add_single_entry = iter_add_single_cumulative_entry,
848 .next_entry = iter_next_cumulative_entry,
849 .add_next_entry = iter_add_next_cumulative_entry,
850 .finish_entry = iter_finish_cumulative_entry,
853 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
854 struct perf_evsel *evsel, struct perf_sample *sample,
855 int max_stack_depth, void *arg)
857 int err, err2;
859 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
860 max_stack_depth);
861 if (err)
862 return err;
864 iter->evsel = evsel;
865 iter->sample = sample;
867 err = iter->ops->prepare_entry(iter, al);
868 if (err)
869 goto out;
871 err = iter->ops->add_single_entry(iter, al);
872 if (err)
873 goto out;
875 if (iter->he && iter->add_entry_cb) {
876 err = iter->add_entry_cb(iter, al, true, arg);
877 if (err)
878 goto out;
881 while (iter->ops->next_entry(iter, al)) {
882 err = iter->ops->add_next_entry(iter, al);
883 if (err)
884 break;
886 if (iter->he && iter->add_entry_cb) {
887 err = iter->add_entry_cb(iter, al, false, arg);
888 if (err)
889 goto out;
893 out:
894 err2 = iter->ops->finish_entry(iter, al);
895 if (!err)
896 err = err2;
898 return err;
901 int64_t
902 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
904 struct perf_hpp_fmt *fmt;
905 int64_t cmp = 0;
907 perf_hpp__for_each_sort_list(fmt) {
908 if (perf_hpp__should_skip(fmt))
909 continue;
911 cmp = fmt->cmp(fmt, left, right);
912 if (cmp)
913 break;
916 return cmp;
919 int64_t
920 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
922 struct perf_hpp_fmt *fmt;
923 int64_t cmp = 0;
925 perf_hpp__for_each_sort_list(fmt) {
926 if (perf_hpp__should_skip(fmt))
927 continue;
929 cmp = fmt->collapse(fmt, left, right);
930 if (cmp)
931 break;
934 return cmp;
937 void hist_entry__delete(struct hist_entry *he)
939 thread__zput(he->thread);
940 zfree(&he->branch_info);
941 zfree(&he->mem_info);
942 zfree(&he->stat_acc);
943 free_srcline(he->srcline);
944 free_callchain(he->callchain);
945 free(he);
949 * collapse the histogram
952 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
953 struct rb_root *root,
954 struct hist_entry *he)
956 struct rb_node **p = &root->rb_node;
957 struct rb_node *parent = NULL;
958 struct hist_entry *iter;
959 int64_t cmp;
961 while (*p != NULL) {
962 parent = *p;
963 iter = rb_entry(parent, struct hist_entry, rb_node_in);
965 cmp = hist_entry__collapse(iter, he);
967 if (!cmp) {
968 he_stat__add_stat(&iter->stat, &he->stat);
969 if (symbol_conf.cumulate_callchain)
970 he_stat__add_stat(iter->stat_acc, he->stat_acc);
972 if (symbol_conf.use_callchain) {
973 callchain_cursor_reset(&callchain_cursor);
974 callchain_merge(&callchain_cursor,
975 iter->callchain,
976 he->callchain);
978 hist_entry__delete(he);
979 return false;
982 if (cmp < 0)
983 p = &(*p)->rb_left;
984 else
985 p = &(*p)->rb_right;
987 hists->nr_entries++;
989 rb_link_node(&he->rb_node_in, parent, p);
990 rb_insert_color(&he->rb_node_in, root);
991 return true;
994 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
996 struct rb_root *root;
998 pthread_mutex_lock(&hists->lock);
1000 root = hists->entries_in;
1001 if (++hists->entries_in > &hists->entries_in_array[1])
1002 hists->entries_in = &hists->entries_in_array[0];
1004 pthread_mutex_unlock(&hists->lock);
1006 return root;
1009 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1011 hists__filter_entry_by_dso(hists, he);
1012 hists__filter_entry_by_thread(hists, he);
1013 hists__filter_entry_by_symbol(hists, he);
1016 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1018 struct rb_root *root;
1019 struct rb_node *next;
1020 struct hist_entry *n;
1022 if (!sort__need_collapse)
1023 return;
1025 hists->nr_entries = 0;
1027 root = hists__get_rotate_entries_in(hists);
1029 next = rb_first(root);
1031 while (next) {
1032 if (session_done())
1033 break;
1034 n = rb_entry(next, struct hist_entry, rb_node_in);
1035 next = rb_next(&n->rb_node_in);
1037 rb_erase(&n->rb_node_in, root);
1038 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1040 * If it wasn't combined with one of the entries already
1041 * collapsed, we need to apply the filters that may have
1042 * been set by, say, the hist_browser.
1044 hists__apply_filters(hists, n);
1046 if (prog)
1047 ui_progress__update(prog, 1);
1051 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1053 struct perf_hpp_fmt *fmt;
1054 int64_t cmp = 0;
1056 perf_hpp__for_each_sort_list(fmt) {
1057 if (perf_hpp__should_skip(fmt))
1058 continue;
1060 cmp = fmt->sort(fmt, a, b);
1061 if (cmp)
1062 break;
1065 return cmp;
1068 static void hists__reset_filter_stats(struct hists *hists)
1070 hists->nr_non_filtered_entries = 0;
1071 hists->stats.total_non_filtered_period = 0;
1074 void hists__reset_stats(struct hists *hists)
1076 hists->nr_entries = 0;
1077 hists->stats.total_period = 0;
1079 hists__reset_filter_stats(hists);
1082 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1084 hists->nr_non_filtered_entries++;
1085 hists->stats.total_non_filtered_period += h->stat.period;
1088 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1090 if (!h->filtered)
1091 hists__inc_filter_stats(hists, h);
1093 hists->nr_entries++;
1094 hists->stats.total_period += h->stat.period;
1097 static void __hists__insert_output_entry(struct rb_root *entries,
1098 struct hist_entry *he,
1099 u64 min_callchain_hits)
1101 struct rb_node **p = &entries->rb_node;
1102 struct rb_node *parent = NULL;
1103 struct hist_entry *iter;
1105 if (symbol_conf.use_callchain)
1106 callchain_param.sort(&he->sorted_chain, he->callchain,
1107 min_callchain_hits, &callchain_param);
1109 while (*p != NULL) {
1110 parent = *p;
1111 iter = rb_entry(parent, struct hist_entry, rb_node);
1113 if (hist_entry__sort(he, iter) > 0)
1114 p = &(*p)->rb_left;
1115 else
1116 p = &(*p)->rb_right;
1119 rb_link_node(&he->rb_node, parent, p);
1120 rb_insert_color(&he->rb_node, entries);
1123 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1125 struct rb_root *root;
1126 struct rb_node *next;
1127 struct hist_entry *n;
1128 u64 min_callchain_hits;
1130 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1132 if (sort__need_collapse)
1133 root = &hists->entries_collapsed;
1134 else
1135 root = hists->entries_in;
1137 next = rb_first(root);
1138 hists->entries = RB_ROOT;
1140 hists__reset_stats(hists);
1141 hists__reset_col_len(hists);
1143 while (next) {
1144 n = rb_entry(next, struct hist_entry, rb_node_in);
1145 next = rb_next(&n->rb_node_in);
1147 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1148 hists__inc_stats(hists, n);
1150 if (!n->filtered)
1151 hists__calc_col_len(hists, n);
1153 if (prog)
1154 ui_progress__update(prog, 1);
1158 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1159 enum hist_filter filter)
1161 h->filtered &= ~(1 << filter);
1162 if (h->filtered)
1163 return;
1165 /* force fold unfiltered entry for simplicity */
1166 h->ms.unfolded = false;
1167 h->row_offset = 0;
1168 h->nr_rows = 0;
1170 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1172 hists__inc_filter_stats(hists, h);
1173 hists__calc_col_len(hists, h);
1177 static bool hists__filter_entry_by_dso(struct hists *hists,
1178 struct hist_entry *he)
1180 if (hists->dso_filter != NULL &&
1181 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1182 he->filtered |= (1 << HIST_FILTER__DSO);
1183 return true;
1186 return false;
1189 void hists__filter_by_dso(struct hists *hists)
1191 struct rb_node *nd;
1193 hists->stats.nr_non_filtered_samples = 0;
1195 hists__reset_filter_stats(hists);
1196 hists__reset_col_len(hists);
1198 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1199 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1201 if (symbol_conf.exclude_other && !h->parent)
1202 continue;
1204 if (hists__filter_entry_by_dso(hists, h))
1205 continue;
1207 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1211 static bool hists__filter_entry_by_thread(struct hists *hists,
1212 struct hist_entry *he)
1214 if (hists->thread_filter != NULL &&
1215 he->thread != hists->thread_filter) {
1216 he->filtered |= (1 << HIST_FILTER__THREAD);
1217 return true;
1220 return false;
1223 void hists__filter_by_thread(struct hists *hists)
1225 struct rb_node *nd;
1227 hists->stats.nr_non_filtered_samples = 0;
1229 hists__reset_filter_stats(hists);
1230 hists__reset_col_len(hists);
1232 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1233 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1235 if (hists__filter_entry_by_thread(hists, h))
1236 continue;
1238 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1242 static bool hists__filter_entry_by_symbol(struct hists *hists,
1243 struct hist_entry *he)
1245 if (hists->symbol_filter_str != NULL &&
1246 (!he->ms.sym || strstr(he->ms.sym->name,
1247 hists->symbol_filter_str) == NULL)) {
1248 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1249 return true;
1252 return false;
1255 void hists__filter_by_symbol(struct hists *hists)
1257 struct rb_node *nd;
1259 hists->stats.nr_non_filtered_samples = 0;
1261 hists__reset_filter_stats(hists);
1262 hists__reset_col_len(hists);
1264 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1265 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1267 if (hists__filter_entry_by_symbol(hists, h))
1268 continue;
1270 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1274 void events_stats__inc(struct events_stats *stats, u32 type)
1276 ++stats->nr_events[0];
1277 ++stats->nr_events[type];
1280 void hists__inc_nr_events(struct hists *hists, u32 type)
1282 events_stats__inc(&hists->stats, type);
1285 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1287 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1288 if (!filtered)
1289 hists->stats.nr_non_filtered_samples++;
1292 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1293 struct hist_entry *pair)
1295 struct rb_root *root;
1296 struct rb_node **p;
1297 struct rb_node *parent = NULL;
1298 struct hist_entry *he;
1299 int64_t cmp;
1301 if (sort__need_collapse)
1302 root = &hists->entries_collapsed;
1303 else
1304 root = hists->entries_in;
1306 p = &root->rb_node;
1308 while (*p != NULL) {
1309 parent = *p;
1310 he = rb_entry(parent, struct hist_entry, rb_node_in);
1312 cmp = hist_entry__collapse(he, pair);
1314 if (!cmp)
1315 goto out;
1317 if (cmp < 0)
1318 p = &(*p)->rb_left;
1319 else
1320 p = &(*p)->rb_right;
1323 he = hist_entry__new(pair, true);
1324 if (he) {
1325 memset(&he->stat, 0, sizeof(he->stat));
1326 he->hists = hists;
1327 rb_link_node(&he->rb_node_in, parent, p);
1328 rb_insert_color(&he->rb_node_in, root);
1329 hists__inc_stats(hists, he);
1330 he->dummy = true;
1332 out:
1333 return he;
1336 static struct hist_entry *hists__find_entry(struct hists *hists,
1337 struct hist_entry *he)
1339 struct rb_node *n;
1341 if (sort__need_collapse)
1342 n = hists->entries_collapsed.rb_node;
1343 else
1344 n = hists->entries_in->rb_node;
1346 while (n) {
1347 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1348 int64_t cmp = hist_entry__collapse(iter, he);
1350 if (cmp < 0)
1351 n = n->rb_left;
1352 else if (cmp > 0)
1353 n = n->rb_right;
1354 else
1355 return iter;
1358 return NULL;
1362 * Look for pairs to link to the leader buckets (hist_entries):
1364 void hists__match(struct hists *leader, struct hists *other)
1366 struct rb_root *root;
1367 struct rb_node *nd;
1368 struct hist_entry *pos, *pair;
1370 if (sort__need_collapse)
1371 root = &leader->entries_collapsed;
1372 else
1373 root = leader->entries_in;
1375 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1376 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1377 pair = hists__find_entry(other, pos);
1379 if (pair)
1380 hist_entry__add_pair(pair, pos);
1385 * Look for entries in the other hists that are not present in the leader, if
1386 * we find them, just add a dummy entry on the leader hists, with period=0,
1387 * nr_events=0, to serve as the list header.
1389 int hists__link(struct hists *leader, struct hists *other)
1391 struct rb_root *root;
1392 struct rb_node *nd;
1393 struct hist_entry *pos, *pair;
1395 if (sort__need_collapse)
1396 root = &other->entries_collapsed;
1397 else
1398 root = other->entries_in;
1400 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1401 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1403 if (!hist_entry__has_pairs(pos)) {
1404 pair = hists__add_dummy_entry(leader, pos);
1405 if (pair == NULL)
1406 return -1;
1407 hist_entry__add_pair(pos, pair);
1411 return 0;
1415 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1417 struct perf_evsel *pos;
1418 size_t ret = 0;
1420 evlist__for_each(evlist, pos) {
1421 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1422 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1425 return ret;
1429 u64 hists__total_period(struct hists *hists)
1431 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1432 hists->stats.total_period;
1435 int parse_filter_percentage(const struct option *opt __maybe_unused,
1436 const char *arg, int unset __maybe_unused)
1438 if (!strcmp(arg, "relative"))
1439 symbol_conf.filter_relative = true;
1440 else if (!strcmp(arg, "absolute"))
1441 symbol_conf.filter_relative = false;
1442 else
1443 return -1;
1445 return 0;
1448 int perf_hist_config(const char *var, const char *value)
1450 if (!strcmp(var, "hist.percentage"))
1451 return parse_filter_percentage(NULL, value, 0);
1453 return 0;
1456 static int hists_evsel__init(struct perf_evsel *evsel)
1458 struct hists *hists = evsel__hists(evsel);
1460 memset(hists, 0, sizeof(*hists));
1461 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1462 hists->entries_in = &hists->entries_in_array[0];
1463 hists->entries_collapsed = RB_ROOT;
1464 hists->entries = RB_ROOT;
1465 pthread_mutex_init(&hists->lock, NULL);
1466 return 0;
1470 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1471 * stored in the rbtree...
1474 int hists__init(void)
1476 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1477 hists_evsel__init, NULL);
1478 if (err)
1479 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
1481 return err;