Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / tools / perf / util / hist.c
blob1794b926c5b5b90e666c296faff15169bd05460a
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
14 enum hist_filter {
15 HIST_FILTER__DSO,
16 HIST_FILTER__THREAD,
17 HIST_FILTER__PARENT,
20 struct callchain_param callchain_param = {
21 .mode = CHAIN_GRAPH_REL,
22 .min_percent = 0.5,
23 .order = ORDER_CALLEE
26 u16 hists__col_len(struct hists *hists, enum hist_column col)
28 return hists->col_len[col];
31 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
33 hists->col_len[col] = len;
36 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
38 if (len > hists__col_len(hists, col)) {
39 hists__set_col_len(hists, col, len);
40 return true;
42 return false;
45 static void hists__reset_col_len(struct hists *hists)
47 enum hist_column col;
49 for (col = 0; col < HISTC_NR_COLS; ++col)
50 hists__set_col_len(hists, col, 0);
53 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
55 u16 len;
57 if (h->ms.sym)
58 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen);
59 else {
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
62 if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width &&
63 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
64 !symbol_conf.dso_list)
65 hists__set_col_len(hists, HISTC_DSO,
66 unresolved_col_width);
69 len = thread__comm_len(h->thread);
70 if (hists__new_col_len(hists, HISTC_COMM, len))
71 hists__set_col_len(hists, HISTC_THREAD, len + 6);
73 if (h->ms.map) {
74 len = dso__name_len(h->ms.map->dso);
75 hists__new_col_len(hists, HISTC_DSO, len);
79 static void hist_entry__add_cpumode_period(struct hist_entry *he,
80 unsigned int cpumode, u64 period)
82 switch (cpumode) {
83 case PERF_RECORD_MISC_KERNEL:
84 he->period_sys += period;
85 break;
86 case PERF_RECORD_MISC_USER:
87 he->period_us += period;
88 break;
89 case PERF_RECORD_MISC_GUEST_KERNEL:
90 he->period_guest_sys += period;
91 break;
92 case PERF_RECORD_MISC_GUEST_USER:
93 he->period_guest_us += period;
94 break;
95 default:
96 break;
100 static void hist_entry__decay(struct hist_entry *he)
102 he->period = (he->period * 7) / 8;
103 he->nr_events = (he->nr_events * 7) / 8;
106 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
108 u64 prev_period = he->period;
110 if (prev_period == 0)
111 return true;
113 hist_entry__decay(he);
115 if (!he->filtered)
116 hists->stats.total_period -= prev_period - he->period;
118 return he->period == 0;
121 static void __hists__decay_entries(struct hists *hists, bool zap_user,
122 bool zap_kernel, bool threaded)
124 struct rb_node *next = rb_first(&hists->entries);
125 struct hist_entry *n;
127 while (next) {
128 n = rb_entry(next, struct hist_entry, rb_node);
129 next = rb_next(&n->rb_node);
131 * We may be annotating this, for instance, so keep it here in
132 * case some it gets new samples, we'll eventually free it when
133 * the user stops browsing and it agains gets fully decayed.
135 if (((zap_user && n->level == '.') ||
136 (zap_kernel && n->level != '.') ||
137 hists__decay_entry(hists, n)) &&
138 !n->used) {
139 rb_erase(&n->rb_node, &hists->entries);
141 if (sort__need_collapse || threaded)
142 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
144 hist_entry__free(n);
145 --hists->nr_entries;
150 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
152 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
155 void hists__decay_entries_threaded(struct hists *hists,
156 bool zap_user, bool zap_kernel)
158 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
162 * histogram, sorted on item, collects periods
165 static struct hist_entry *hist_entry__new(struct hist_entry *template)
167 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
168 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
170 if (he != NULL) {
171 *he = *template;
172 he->nr_events = 1;
173 if (he->ms.map)
174 he->ms.map->referenced = true;
175 if (symbol_conf.use_callchain)
176 callchain_init(he->callchain);
179 return he;
182 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
184 if (!h->filtered) {
185 hists__calc_col_len(hists, h);
186 ++hists->nr_entries;
187 hists->stats.total_period += h->period;
191 static u8 symbol__parent_filter(const struct symbol *parent)
193 if (symbol_conf.exclude_other && parent == NULL)
194 return 1 << HIST_FILTER__PARENT;
195 return 0;
198 struct hist_entry *__hists__add_entry(struct hists *hists,
199 struct addr_location *al,
200 struct symbol *sym_parent, u64 period)
202 struct rb_node **p;
203 struct rb_node *parent = NULL;
204 struct hist_entry *he;
205 struct hist_entry entry = {
206 .thread = al->thread,
207 .ms = {
208 .map = al->map,
209 .sym = al->sym,
211 .cpu = al->cpu,
212 .ip = al->addr,
213 .level = al->level,
214 .period = period,
215 .parent = sym_parent,
216 .filtered = symbol__parent_filter(sym_parent),
218 int cmp;
220 pthread_mutex_lock(&hists->lock);
222 p = &hists->entries_in->rb_node;
224 while (*p != NULL) {
225 parent = *p;
226 he = rb_entry(parent, struct hist_entry, rb_node_in);
228 cmp = hist_entry__cmp(&entry, he);
230 if (!cmp) {
231 he->period += period;
232 ++he->nr_events;
234 /* If the map of an existing hist_entry has
235 * become out-of-date due to an exec() or
236 * similar, update it. Otherwise we will
237 * mis-adjust symbol addresses when computing
238 * the history counter to increment.
240 if (he->ms.map != entry.ms.map) {
241 he->ms.map = entry.ms.map;
242 if (he->ms.map)
243 he->ms.map->referenced = true;
245 goto out;
248 if (cmp < 0)
249 p = &(*p)->rb_left;
250 else
251 p = &(*p)->rb_right;
254 he = hist_entry__new(&entry);
255 if (!he)
256 goto out_unlock;
258 rb_link_node(&he->rb_node_in, parent, p);
259 rb_insert_color(&he->rb_node_in, hists->entries_in);
260 out:
261 hist_entry__add_cpumode_period(he, al->cpumode, period);
262 out_unlock:
263 pthread_mutex_unlock(&hists->lock);
264 return he;
267 int64_t
268 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
270 struct sort_entry *se;
271 int64_t cmp = 0;
273 list_for_each_entry(se, &hist_entry__sort_list, list) {
274 cmp = se->se_cmp(left, right);
275 if (cmp)
276 break;
279 return cmp;
282 int64_t
283 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
285 struct sort_entry *se;
286 int64_t cmp = 0;
288 list_for_each_entry(se, &hist_entry__sort_list, list) {
289 int64_t (*f)(struct hist_entry *, struct hist_entry *);
291 f = se->se_collapse ?: se->se_cmp;
293 cmp = f(left, right);
294 if (cmp)
295 break;
298 return cmp;
301 void hist_entry__free(struct hist_entry *he)
303 free(he);
307 * collapse the histogram
310 static bool hists__collapse_insert_entry(struct hists *hists,
311 struct rb_root *root,
312 struct hist_entry *he)
314 struct rb_node **p = &root->rb_node;
315 struct rb_node *parent = NULL;
316 struct hist_entry *iter;
317 int64_t cmp;
319 while (*p != NULL) {
320 parent = *p;
321 iter = rb_entry(parent, struct hist_entry, rb_node_in);
323 cmp = hist_entry__collapse(iter, he);
325 if (!cmp) {
326 iter->period += he->period;
327 iter->nr_events += he->nr_events;
328 if (symbol_conf.use_callchain) {
329 callchain_cursor_reset(&hists->callchain_cursor);
330 callchain_merge(&hists->callchain_cursor, iter->callchain,
331 he->callchain);
333 hist_entry__free(he);
334 return false;
337 if (cmp < 0)
338 p = &(*p)->rb_left;
339 else
340 p = &(*p)->rb_right;
343 rb_link_node(&he->rb_node_in, parent, p);
344 rb_insert_color(&he->rb_node_in, root);
345 return true;
348 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
350 struct rb_root *root;
352 pthread_mutex_lock(&hists->lock);
354 root = hists->entries_in;
355 if (++hists->entries_in > &hists->entries_in_array[1])
356 hists->entries_in = &hists->entries_in_array[0];
358 pthread_mutex_unlock(&hists->lock);
360 return root;
363 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
365 hists__filter_entry_by_dso(hists, he);
366 hists__filter_entry_by_thread(hists, he);
369 static void __hists__collapse_resort(struct hists *hists, bool threaded)
371 struct rb_root *root;
372 struct rb_node *next;
373 struct hist_entry *n;
375 if (!sort__need_collapse && !threaded)
376 return;
378 root = hists__get_rotate_entries_in(hists);
379 next = rb_first(root);
381 while (next) {
382 n = rb_entry(next, struct hist_entry, rb_node_in);
383 next = rb_next(&n->rb_node_in);
385 rb_erase(&n->rb_node_in, root);
386 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
388 * If it wasn't combined with one of the entries already
389 * collapsed, we need to apply the filters that may have
390 * been set by, say, the hist_browser.
392 hists__apply_filters(hists, n);
397 void hists__collapse_resort(struct hists *hists)
399 return __hists__collapse_resort(hists, false);
402 void hists__collapse_resort_threaded(struct hists *hists)
404 return __hists__collapse_resort(hists, true);
408 * reverse the map, sort on period.
411 static void __hists__insert_output_entry(struct rb_root *entries,
412 struct hist_entry *he,
413 u64 min_callchain_hits)
415 struct rb_node **p = &entries->rb_node;
416 struct rb_node *parent = NULL;
417 struct hist_entry *iter;
419 if (symbol_conf.use_callchain)
420 callchain_param.sort(&he->sorted_chain, he->callchain,
421 min_callchain_hits, &callchain_param);
423 while (*p != NULL) {
424 parent = *p;
425 iter = rb_entry(parent, struct hist_entry, rb_node);
427 if (he->period > iter->period)
428 p = &(*p)->rb_left;
429 else
430 p = &(*p)->rb_right;
433 rb_link_node(&he->rb_node, parent, p);
434 rb_insert_color(&he->rb_node, entries);
437 static void __hists__output_resort(struct hists *hists, bool threaded)
439 struct rb_root *root;
440 struct rb_node *next;
441 struct hist_entry *n;
442 u64 min_callchain_hits;
444 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
446 if (sort__need_collapse || threaded)
447 root = &hists->entries_collapsed;
448 else
449 root = hists->entries_in;
451 next = rb_first(root);
452 hists->entries = RB_ROOT;
454 hists->nr_entries = 0;
455 hists->stats.total_period = 0;
456 hists__reset_col_len(hists);
458 while (next) {
459 n = rb_entry(next, struct hist_entry, rb_node_in);
460 next = rb_next(&n->rb_node_in);
462 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
463 hists__inc_nr_entries(hists, n);
467 void hists__output_resort(struct hists *hists)
469 return __hists__output_resort(hists, false);
472 void hists__output_resort_threaded(struct hists *hists)
474 return __hists__output_resort(hists, true);
477 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
479 int i;
480 int ret = fprintf(fp, " ");
482 for (i = 0; i < left_margin; i++)
483 ret += fprintf(fp, " ");
485 return ret;
488 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
489 int left_margin)
491 int i;
492 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
494 for (i = 0; i < depth; i++)
495 if (depth_mask & (1 << i))
496 ret += fprintf(fp, "| ");
497 else
498 ret += fprintf(fp, " ");
500 ret += fprintf(fp, "\n");
502 return ret;
505 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
506 int depth, int depth_mask, int period,
507 u64 total_samples, u64 hits,
508 int left_margin)
510 int i;
511 size_t ret = 0;
513 ret += callchain__fprintf_left_margin(fp, left_margin);
514 for (i = 0; i < depth; i++) {
515 if (depth_mask & (1 << i))
516 ret += fprintf(fp, "|");
517 else
518 ret += fprintf(fp, " ");
519 if (!period && i == depth - 1) {
520 double percent;
522 percent = hits * 100.0 / total_samples;
523 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
524 } else
525 ret += fprintf(fp, "%s", " ");
527 if (chain->ms.sym)
528 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
529 else
530 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
532 return ret;
535 static struct symbol *rem_sq_bracket;
536 static struct callchain_list rem_hits;
538 static void init_rem_hits(void)
540 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
541 if (!rem_sq_bracket) {
542 fprintf(stderr, "Not enough memory to display remaining hits\n");
543 return;
546 strcpy(rem_sq_bracket->name, "[...]");
547 rem_hits.ms.sym = rem_sq_bracket;
550 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
551 u64 total_samples, int depth,
552 int depth_mask, int left_margin)
554 struct rb_node *node, *next;
555 struct callchain_node *child;
556 struct callchain_list *chain;
557 int new_depth_mask = depth_mask;
558 u64 new_total;
559 u64 remaining;
560 size_t ret = 0;
561 int i;
562 uint entries_printed = 0;
564 if (callchain_param.mode == CHAIN_GRAPH_REL)
565 new_total = self->children_hit;
566 else
567 new_total = total_samples;
569 remaining = new_total;
571 node = rb_first(&self->rb_root);
572 while (node) {
573 u64 cumul;
575 child = rb_entry(node, struct callchain_node, rb_node);
576 cumul = callchain_cumul_hits(child);
577 remaining -= cumul;
580 * The depth mask manages the output of pipes that show
581 * the depth. We don't want to keep the pipes of the current
582 * level for the last child of this depth.
583 * Except if we have remaining filtered hits. They will
584 * supersede the last child
586 next = rb_next(node);
587 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
588 new_depth_mask &= ~(1 << (depth - 1));
591 * But we keep the older depth mask for the line separator
592 * to keep the level link until we reach the last child
594 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
595 left_margin);
596 i = 0;
597 list_for_each_entry(chain, &child->val, list) {
598 ret += ipchain__fprintf_graph(fp, chain, depth,
599 new_depth_mask, i++,
600 new_total,
601 cumul,
602 left_margin);
604 ret += __callchain__fprintf_graph(fp, child, new_total,
605 depth + 1,
606 new_depth_mask | (1 << depth),
607 left_margin);
608 node = next;
609 if (++entries_printed == callchain_param.print_limit)
610 break;
613 if (callchain_param.mode == CHAIN_GRAPH_REL &&
614 remaining && remaining != new_total) {
616 if (!rem_sq_bracket)
617 return ret;
619 new_depth_mask &= ~(1 << (depth - 1));
621 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
622 new_depth_mask, 0, new_total,
623 remaining, left_margin);
626 return ret;
629 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
630 u64 total_samples, int left_margin)
632 struct callchain_list *chain;
633 bool printed = false;
634 int i = 0;
635 int ret = 0;
636 u32 entries_printed = 0;
638 list_for_each_entry(chain, &self->val, list) {
639 if (!i++ && sort__first_dimension == SORT_SYM)
640 continue;
642 if (!printed) {
643 ret += callchain__fprintf_left_margin(fp, left_margin);
644 ret += fprintf(fp, "|\n");
645 ret += callchain__fprintf_left_margin(fp, left_margin);
646 ret += fprintf(fp, "---");
648 left_margin += 3;
649 printed = true;
650 } else
651 ret += callchain__fprintf_left_margin(fp, left_margin);
653 if (chain->ms.sym)
654 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
655 else
656 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
658 if (++entries_printed == callchain_param.print_limit)
659 break;
662 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
664 return ret;
667 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
668 u64 total_samples)
670 struct callchain_list *chain;
671 size_t ret = 0;
673 if (!self)
674 return 0;
676 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
679 list_for_each_entry(chain, &self->val, list) {
680 if (chain->ip >= PERF_CONTEXT_MAX)
681 continue;
682 if (chain->ms.sym)
683 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
684 else
685 ret += fprintf(fp, " %p\n",
686 (void *)(long)chain->ip);
689 return ret;
692 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
693 u64 total_samples, int left_margin,
694 FILE *fp)
696 struct rb_node *rb_node;
697 struct callchain_node *chain;
698 size_t ret = 0;
699 u32 entries_printed = 0;
701 rb_node = rb_first(&he->sorted_chain);
702 while (rb_node) {
703 double percent;
705 chain = rb_entry(rb_node, struct callchain_node, rb_node);
706 percent = chain->hit * 100.0 / total_samples;
707 switch (callchain_param.mode) {
708 case CHAIN_FLAT:
709 ret += percent_color_fprintf(fp, " %6.2f%%\n",
710 percent);
711 ret += callchain__fprintf_flat(fp, chain, total_samples);
712 break;
713 case CHAIN_GRAPH_ABS: /* Falldown */
714 case CHAIN_GRAPH_REL:
715 ret += callchain__fprintf_graph(fp, chain, total_samples,
716 left_margin);
717 case CHAIN_NONE:
718 default:
719 break;
721 ret += fprintf(fp, "\n");
722 if (++entries_printed == callchain_param.print_limit)
723 break;
724 rb_node = rb_next(rb_node);
727 return ret;
730 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
732 struct rb_node *next = rb_first(&hists->entries);
733 struct hist_entry *n;
734 int row = 0;
736 hists__reset_col_len(hists);
738 while (next && row++ < max_rows) {
739 n = rb_entry(next, struct hist_entry, rb_node);
740 if (!n->filtered)
741 hists__calc_col_len(hists, n);
742 next = rb_next(&n->rb_node);
746 static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
747 size_t size, struct hists *pair_hists,
748 bool show_displacement, long displacement,
749 bool color, u64 total_period)
751 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
752 u64 nr_events;
753 const char *sep = symbol_conf.field_sep;
754 int ret;
756 if (symbol_conf.exclude_other && !he->parent)
757 return 0;
759 if (pair_hists) {
760 period = he->pair ? he->pair->period : 0;
761 nr_events = he->pair ? he->pair->nr_events : 0;
762 total = pair_hists->stats.total_period;
763 period_sys = he->pair ? he->pair->period_sys : 0;
764 period_us = he->pair ? he->pair->period_us : 0;
765 period_guest_sys = he->pair ? he->pair->period_guest_sys : 0;
766 period_guest_us = he->pair ? he->pair->period_guest_us : 0;
767 } else {
768 period = he->period;
769 nr_events = he->nr_events;
770 total = total_period;
771 period_sys = he->period_sys;
772 period_us = he->period_us;
773 period_guest_sys = he->period_guest_sys;
774 period_guest_us = he->period_guest_us;
777 if (total) {
778 if (color)
779 ret = percent_color_snprintf(s, size,
780 sep ? "%.2f" : " %6.2f%%",
781 (period * 100.0) / total);
782 else
783 ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%",
784 (period * 100.0) / total);
785 if (symbol_conf.show_cpu_utilization) {
786 ret += percent_color_snprintf(s + ret, size - ret,
787 sep ? "%.2f" : " %6.2f%%",
788 (period_sys * 100.0) / total);
789 ret += percent_color_snprintf(s + ret, size - ret,
790 sep ? "%.2f" : " %6.2f%%",
791 (period_us * 100.0) / total);
792 if (perf_guest) {
793 ret += percent_color_snprintf(s + ret,
794 size - ret,
795 sep ? "%.2f" : " %6.2f%%",
796 (period_guest_sys * 100.0) /
797 total);
798 ret += percent_color_snprintf(s + ret,
799 size - ret,
800 sep ? "%.2f" : " %6.2f%%",
801 (period_guest_us * 100.0) /
802 total);
805 } else
806 ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
808 if (symbol_conf.show_nr_samples) {
809 if (sep)
810 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
811 else
812 ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
815 if (symbol_conf.show_total_period) {
816 if (sep)
817 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
818 else
819 ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
822 if (pair_hists) {
823 char bf[32];
824 double old_percent = 0, new_percent = 0, diff;
826 if (total > 0)
827 old_percent = (period * 100.0) / total;
828 if (total_period > 0)
829 new_percent = (he->period * 100.0) / total_period;
831 diff = new_percent - old_percent;
833 if (fabs(diff) >= 0.01)
834 ret += scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
835 else
836 ret += scnprintf(bf, sizeof(bf), " ");
838 if (sep)
839 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
840 else
841 ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
843 if (show_displacement) {
844 if (displacement)
845 ret += scnprintf(bf, sizeof(bf), "%+4ld", displacement);
846 else
847 ret += scnprintf(bf, sizeof(bf), " ");
849 if (sep)
850 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
851 else
852 ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
856 return ret;
859 int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
860 struct hists *hists)
862 const char *sep = symbol_conf.field_sep;
863 struct sort_entry *se;
864 int ret = 0;
866 list_for_each_entry(se, &hist_entry__sort_list, list) {
867 if (se->elide)
868 continue;
870 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
871 ret += se->se_snprintf(he, s + ret, size - ret,
872 hists__col_len(hists, se->se_width_idx));
875 return ret;
878 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
879 struct hists *hists, struct hists *pair_hists,
880 bool show_displacement, long displacement,
881 u64 total_period, FILE *fp)
883 char bf[512];
884 int ret;
886 if (size == 0 || size > sizeof(bf))
887 size = sizeof(bf);
889 ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
890 show_displacement, displacement,
891 true, total_period);
892 hist_entry__snprintf(he, bf + ret, size - ret, hists);
893 return fprintf(fp, "%s\n", bf);
896 static size_t hist_entry__fprintf_callchain(struct hist_entry *he,
897 struct hists *hists,
898 u64 total_period, FILE *fp)
900 int left_margin = 0;
902 if (sort__first_dimension == SORT_COMM) {
903 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
904 typeof(*se), list);
905 left_margin = hists__col_len(hists, se->se_width_idx);
906 left_margin -= thread__comm_len(he->thread);
909 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
912 size_t hists__fprintf(struct hists *hists, struct hists *pair,
913 bool show_displacement, bool show_header, int max_rows,
914 int max_cols, FILE *fp)
916 struct sort_entry *se;
917 struct rb_node *nd;
918 size_t ret = 0;
919 u64 total_period;
920 unsigned long position = 1;
921 long displacement = 0;
922 unsigned int width;
923 const char *sep = symbol_conf.field_sep;
924 const char *col_width = symbol_conf.col_width_list_str;
925 int nr_rows = 0;
927 init_rem_hits();
929 if (!show_header)
930 goto print_entries;
932 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
934 if (symbol_conf.show_cpu_utilization) {
935 if (sep) {
936 ret += fprintf(fp, "%csys", *sep);
937 ret += fprintf(fp, "%cus", *sep);
938 if (perf_guest) {
939 ret += fprintf(fp, "%cguest sys", *sep);
940 ret += fprintf(fp, "%cguest us", *sep);
942 } else {
943 ret += fprintf(fp, " sys ");
944 ret += fprintf(fp, " us ");
945 if (perf_guest) {
946 ret += fprintf(fp, " guest sys ");
947 ret += fprintf(fp, " guest us ");
952 if (symbol_conf.show_nr_samples) {
953 if (sep)
954 fprintf(fp, "%cSamples", *sep);
955 else
956 fputs(" Samples ", fp);
959 if (symbol_conf.show_total_period) {
960 if (sep)
961 ret += fprintf(fp, "%cPeriod", *sep);
962 else
963 ret += fprintf(fp, " Period ");
966 if (pair) {
967 if (sep)
968 ret += fprintf(fp, "%cDelta", *sep);
969 else
970 ret += fprintf(fp, " Delta ");
972 if (show_displacement) {
973 if (sep)
974 ret += fprintf(fp, "%cDisplacement", *sep);
975 else
976 ret += fprintf(fp, " Displ");
980 list_for_each_entry(se, &hist_entry__sort_list, list) {
981 if (se->elide)
982 continue;
983 if (sep) {
984 fprintf(fp, "%c%s", *sep, se->se_header);
985 continue;
987 width = strlen(se->se_header);
988 if (symbol_conf.col_width_list_str) {
989 if (col_width) {
990 hists__set_col_len(hists, se->se_width_idx,
991 atoi(col_width));
992 col_width = strchr(col_width, ',');
993 if (col_width)
994 ++col_width;
997 if (!hists__new_col_len(hists, se->se_width_idx, width))
998 width = hists__col_len(hists, se->se_width_idx);
999 fprintf(fp, " %*s", width, se->se_header);
1002 fprintf(fp, "\n");
1003 if (max_rows && ++nr_rows >= max_rows)
1004 goto out;
1006 if (sep)
1007 goto print_entries;
1009 fprintf(fp, "# ........");
1010 if (symbol_conf.show_cpu_utilization)
1011 fprintf(fp, " ....... .......");
1012 if (symbol_conf.show_nr_samples)
1013 fprintf(fp, " ..........");
1014 if (symbol_conf.show_total_period)
1015 fprintf(fp, " ............");
1016 if (pair) {
1017 fprintf(fp, " ..........");
1018 if (show_displacement)
1019 fprintf(fp, " .....");
1021 list_for_each_entry(se, &hist_entry__sort_list, list) {
1022 unsigned int i;
1024 if (se->elide)
1025 continue;
1027 fprintf(fp, " ");
1028 width = hists__col_len(hists, se->se_width_idx);
1029 if (width == 0)
1030 width = strlen(se->se_header);
1031 for (i = 0; i < width; i++)
1032 fprintf(fp, ".");
1035 fprintf(fp, "\n");
1036 if (max_rows && ++nr_rows >= max_rows)
1037 goto out;
1039 fprintf(fp, "#\n");
1040 if (max_rows && ++nr_rows >= max_rows)
1041 goto out;
1043 print_entries:
1044 total_period = hists->stats.total_period;
1046 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1047 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1049 if (h->filtered)
1050 continue;
1052 if (show_displacement) {
1053 if (h->pair != NULL)
1054 displacement = ((long)h->pair->position -
1055 (long)position);
1056 else
1057 displacement = 0;
1058 ++position;
1060 ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
1061 displacement, total_period, fp);
1063 if (symbol_conf.use_callchain)
1064 ret += hist_entry__fprintf_callchain(h, hists, total_period, fp);
1065 if (max_rows && ++nr_rows >= max_rows)
1066 goto out;
1068 if (h->ms.map == NULL && verbose > 1) {
1069 __map_groups__fprintf_maps(&h->thread->mg,
1070 MAP__FUNCTION, verbose, fp);
1071 fprintf(fp, "%.10s end\n", graph_dotted_line);
1074 out:
1075 free(rem_sq_bracket);
1077 return ret;
1081 * See hists__fprintf to match the column widths
1083 unsigned int hists__sort_list_width(struct hists *hists)
1085 struct sort_entry *se;
1086 int ret = 9; /* total % */
1088 if (symbol_conf.show_cpu_utilization) {
1089 ret += 7; /* count_sys % */
1090 ret += 6; /* count_us % */
1091 if (perf_guest) {
1092 ret += 13; /* count_guest_sys % */
1093 ret += 12; /* count_guest_us % */
1097 if (symbol_conf.show_nr_samples)
1098 ret += 11;
1100 if (symbol_conf.show_total_period)
1101 ret += 13;
1103 list_for_each_entry(se, &hist_entry__sort_list, list)
1104 if (!se->elide)
1105 ret += 2 + hists__col_len(hists, se->se_width_idx);
1107 if (verbose) /* Addr + origin */
1108 ret += 3 + BITS_PER_LONG / 4;
1110 return ret;
1113 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1114 enum hist_filter filter)
1116 h->filtered &= ~(1 << filter);
1117 if (h->filtered)
1118 return;
1120 ++hists->nr_entries;
1121 if (h->ms.unfolded)
1122 hists->nr_entries += h->nr_rows;
1123 h->row_offset = 0;
1124 hists->stats.total_period += h->period;
1125 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
1127 hists__calc_col_len(hists, h);
1131 static bool hists__filter_entry_by_dso(struct hists *hists,
1132 struct hist_entry *he)
1134 if (hists->dso_filter != NULL &&
1135 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1136 he->filtered |= (1 << HIST_FILTER__DSO);
1137 return true;
1140 return false;
1143 void hists__filter_by_dso(struct hists *hists)
1145 struct rb_node *nd;
1147 hists->nr_entries = hists->stats.total_period = 0;
1148 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1149 hists__reset_col_len(hists);
1151 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1152 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1154 if (symbol_conf.exclude_other && !h->parent)
1155 continue;
1157 if (hists__filter_entry_by_dso(hists, h))
1158 continue;
1160 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1164 static bool hists__filter_entry_by_thread(struct hists *hists,
1165 struct hist_entry *he)
1167 if (hists->thread_filter != NULL &&
1168 he->thread != hists->thread_filter) {
1169 he->filtered |= (1 << HIST_FILTER__THREAD);
1170 return true;
1173 return false;
1176 void hists__filter_by_thread(struct hists *hists)
1178 struct rb_node *nd;
1180 hists->nr_entries = hists->stats.total_period = 0;
1181 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1182 hists__reset_col_len(hists);
1184 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1185 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1187 if (hists__filter_entry_by_thread(hists, h))
1188 continue;
1190 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1194 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
1196 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
1199 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
1201 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
1204 void hists__inc_nr_events(struct hists *hists, u32 type)
1206 ++hists->stats.nr_events[0];
1207 ++hists->stats.nr_events[type];
1210 size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
1212 int i;
1213 size_t ret = 0;
1215 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1216 const char *name;
1218 if (hists->stats.nr_events[i] == 0)
1219 continue;
1221 name = perf_event__name(i);
1222 if (!strcmp(name, "UNKNOWN"))
1223 continue;
1225 ret += fprintf(fp, "%16s events: %10d\n", name,
1226 hists->stats.nr_events[i]);
1229 return ret;