fs/ext4/inode.c: use pr_warn_ratelimited()
[linux/fpc-iii.git] / tools / perf / util / hist.c
blobc749ba6136a0ac33d7cdae8bf5b6604f1ee524af
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include <math.h>
8 enum hist_filter {
9 HIST_FILTER__DSO,
10 HIST_FILTER__THREAD,
11 HIST_FILTER__PARENT,
14 struct callchain_param callchain_param = {
15 .mode = CHAIN_GRAPH_REL,
16 .min_percent = 0.5
19 u16 hists__col_len(struct hists *self, enum hist_column col)
21 return self->col_len[col];
24 void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
26 self->col_len[col] = len;
29 bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
31 if (len > hists__col_len(self, col)) {
32 hists__set_col_len(self, col, len);
33 return true;
35 return false;
38 static void hists__reset_col_len(struct hists *self)
40 enum hist_column col;
42 for (col = 0; col < HISTC_NR_COLS; ++col)
43 hists__set_col_len(self, col, 0);
46 static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
48 u16 len;
50 if (h->ms.sym)
51 hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
53 len = thread__comm_len(h->thread);
54 if (hists__new_col_len(self, HISTC_COMM, len))
55 hists__set_col_len(self, HISTC_THREAD, len + 6);
57 if (h->ms.map) {
58 len = dso__name_len(h->ms.map->dso);
59 hists__new_col_len(self, HISTC_DSO, len);
63 static void hist_entry__add_cpumode_period(struct hist_entry *self,
64 unsigned int cpumode, u64 period)
66 switch (cpumode) {
67 case PERF_RECORD_MISC_KERNEL:
68 self->period_sys += period;
69 break;
70 case PERF_RECORD_MISC_USER:
71 self->period_us += period;
72 break;
73 case PERF_RECORD_MISC_GUEST_KERNEL:
74 self->period_guest_sys += period;
75 break;
76 case PERF_RECORD_MISC_GUEST_USER:
77 self->period_guest_us += period;
78 break;
79 default:
80 break;
85 * histogram, sorted on item, collects periods
88 static struct hist_entry *hist_entry__new(struct hist_entry *template)
90 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
91 struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
93 if (self != NULL) {
94 *self = *template;
95 self->nr_events = 1;
96 if (self->ms.map)
97 self->ms.map->referenced = true;
98 if (symbol_conf.use_callchain)
99 callchain_init(self->callchain);
102 return self;
105 static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
107 if (!h->filtered) {
108 hists__calc_col_len(self, h);
109 ++self->nr_entries;
113 static u8 symbol__parent_filter(const struct symbol *parent)
115 if (symbol_conf.exclude_other && parent == NULL)
116 return 1 << HIST_FILTER__PARENT;
117 return 0;
120 struct hist_entry *__hists__add_entry(struct hists *self,
121 struct addr_location *al,
122 struct symbol *sym_parent, u64 period)
124 struct rb_node **p = &self->entries.rb_node;
125 struct rb_node *parent = NULL;
126 struct hist_entry *he;
127 struct hist_entry entry = {
128 .thread = al->thread,
129 .ms = {
130 .map = al->map,
131 .sym = al->sym,
133 .cpu = al->cpu,
134 .ip = al->addr,
135 .level = al->level,
136 .period = period,
137 .parent = sym_parent,
138 .filtered = symbol__parent_filter(sym_parent),
140 int cmp;
142 while (*p != NULL) {
143 parent = *p;
144 he = rb_entry(parent, struct hist_entry, rb_node);
146 cmp = hist_entry__cmp(&entry, he);
148 if (!cmp) {
149 he->period += period;
150 ++he->nr_events;
151 goto out;
154 if (cmp < 0)
155 p = &(*p)->rb_left;
156 else
157 p = &(*p)->rb_right;
160 he = hist_entry__new(&entry);
161 if (!he)
162 return NULL;
163 rb_link_node(&he->rb_node, parent, p);
164 rb_insert_color(&he->rb_node, &self->entries);
165 hists__inc_nr_entries(self, he);
166 out:
167 hist_entry__add_cpumode_period(he, al->cpumode, period);
168 return he;
171 int64_t
172 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
174 struct sort_entry *se;
175 int64_t cmp = 0;
177 list_for_each_entry(se, &hist_entry__sort_list, list) {
178 cmp = se->se_cmp(left, right);
179 if (cmp)
180 break;
183 return cmp;
186 int64_t
187 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
189 struct sort_entry *se;
190 int64_t cmp = 0;
192 list_for_each_entry(se, &hist_entry__sort_list, list) {
193 int64_t (*f)(struct hist_entry *, struct hist_entry *);
195 f = se->se_collapse ?: se->se_cmp;
197 cmp = f(left, right);
198 if (cmp)
199 break;
202 return cmp;
205 void hist_entry__free(struct hist_entry *he)
207 free(he);
211 * collapse the histogram
214 static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
216 struct rb_node **p = &root->rb_node;
217 struct rb_node *parent = NULL;
218 struct hist_entry *iter;
219 int64_t cmp;
221 while (*p != NULL) {
222 parent = *p;
223 iter = rb_entry(parent, struct hist_entry, rb_node);
225 cmp = hist_entry__collapse(iter, he);
227 if (!cmp) {
228 iter->period += he->period;
229 if (symbol_conf.use_callchain)
230 callchain_merge(iter->callchain, he->callchain);
231 hist_entry__free(he);
232 return false;
235 if (cmp < 0)
236 p = &(*p)->rb_left;
237 else
238 p = &(*p)->rb_right;
241 rb_link_node(&he->rb_node, parent, p);
242 rb_insert_color(&he->rb_node, root);
243 return true;
246 void hists__collapse_resort(struct hists *self)
248 struct rb_root tmp;
249 struct rb_node *next;
250 struct hist_entry *n;
252 if (!sort__need_collapse)
253 return;
255 tmp = RB_ROOT;
256 next = rb_first(&self->entries);
257 self->nr_entries = 0;
258 hists__reset_col_len(self);
260 while (next) {
261 n = rb_entry(next, struct hist_entry, rb_node);
262 next = rb_next(&n->rb_node);
264 rb_erase(&n->rb_node, &self->entries);
265 if (collapse__insert_entry(&tmp, n))
266 hists__inc_nr_entries(self, n);
269 self->entries = tmp;
273 * reverse the map, sort on period.
276 static void __hists__insert_output_entry(struct rb_root *entries,
277 struct hist_entry *he,
278 u64 min_callchain_hits)
280 struct rb_node **p = &entries->rb_node;
281 struct rb_node *parent = NULL;
282 struct hist_entry *iter;
284 if (symbol_conf.use_callchain)
285 callchain_param.sort(&he->sorted_chain, he->callchain,
286 min_callchain_hits, &callchain_param);
288 while (*p != NULL) {
289 parent = *p;
290 iter = rb_entry(parent, struct hist_entry, rb_node);
292 if (he->period > iter->period)
293 p = &(*p)->rb_left;
294 else
295 p = &(*p)->rb_right;
298 rb_link_node(&he->rb_node, parent, p);
299 rb_insert_color(&he->rb_node, entries);
302 void hists__output_resort(struct hists *self)
304 struct rb_root tmp;
305 struct rb_node *next;
306 struct hist_entry *n;
307 u64 min_callchain_hits;
309 min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
311 tmp = RB_ROOT;
312 next = rb_first(&self->entries);
314 self->nr_entries = 0;
315 hists__reset_col_len(self);
317 while (next) {
318 n = rb_entry(next, struct hist_entry, rb_node);
319 next = rb_next(&n->rb_node);
321 rb_erase(&n->rb_node, &self->entries);
322 __hists__insert_output_entry(&tmp, n, min_callchain_hits);
323 hists__inc_nr_entries(self, n);
326 self->entries = tmp;
329 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
331 int i;
332 int ret = fprintf(fp, " ");
334 for (i = 0; i < left_margin; i++)
335 ret += fprintf(fp, " ");
337 return ret;
340 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
341 int left_margin)
343 int i;
344 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
346 for (i = 0; i < depth; i++)
347 if (depth_mask & (1 << i))
348 ret += fprintf(fp, "| ");
349 else
350 ret += fprintf(fp, " ");
352 ret += fprintf(fp, "\n");
354 return ret;
357 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
358 int depth, int depth_mask, int period,
359 u64 total_samples, u64 hits,
360 int left_margin)
362 int i;
363 size_t ret = 0;
365 ret += callchain__fprintf_left_margin(fp, left_margin);
366 for (i = 0; i < depth; i++) {
367 if (depth_mask & (1 << i))
368 ret += fprintf(fp, "|");
369 else
370 ret += fprintf(fp, " ");
371 if (!period && i == depth - 1) {
372 double percent;
374 percent = hits * 100.0 / total_samples;
375 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
376 } else
377 ret += fprintf(fp, "%s", " ");
379 if (chain->ms.sym)
380 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
381 else
382 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
384 return ret;
387 static struct symbol *rem_sq_bracket;
388 static struct callchain_list rem_hits;
390 static void init_rem_hits(void)
392 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
393 if (!rem_sq_bracket) {
394 fprintf(stderr, "Not enough memory to display remaining hits\n");
395 return;
398 strcpy(rem_sq_bracket->name, "[...]");
399 rem_hits.ms.sym = rem_sq_bracket;
402 static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
403 u64 total_samples, int depth,
404 int depth_mask, int left_margin)
406 struct rb_node *node, *next;
407 struct callchain_node *child;
408 struct callchain_list *chain;
409 int new_depth_mask = depth_mask;
410 u64 new_total;
411 u64 remaining;
412 size_t ret = 0;
413 int i;
414 uint entries_printed = 0;
416 if (callchain_param.mode == CHAIN_GRAPH_REL)
417 new_total = self->children_hit;
418 else
419 new_total = total_samples;
421 remaining = new_total;
423 node = rb_first(&self->rb_root);
424 while (node) {
425 u64 cumul;
427 child = rb_entry(node, struct callchain_node, rb_node);
428 cumul = cumul_hits(child);
429 remaining -= cumul;
432 * The depth mask manages the output of pipes that show
433 * the depth. We don't want to keep the pipes of the current
434 * level for the last child of this depth.
435 * Except if we have remaining filtered hits. They will
436 * supersede the last child
438 next = rb_next(node);
439 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
440 new_depth_mask &= ~(1 << (depth - 1));
443 * But we keep the older depth mask for the line separator
444 * to keep the level link until we reach the last child
446 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
447 left_margin);
448 i = 0;
449 list_for_each_entry(chain, &child->val, list) {
450 ret += ipchain__fprintf_graph(fp, chain, depth,
451 new_depth_mask, i++,
452 new_total,
453 cumul,
454 left_margin);
456 ret += __callchain__fprintf_graph(fp, child, new_total,
457 depth + 1,
458 new_depth_mask | (1 << depth),
459 left_margin);
460 node = next;
461 if (++entries_printed == callchain_param.print_limit)
462 break;
465 if (callchain_param.mode == CHAIN_GRAPH_REL &&
466 remaining && remaining != new_total) {
468 if (!rem_sq_bracket)
469 return ret;
471 new_depth_mask &= ~(1 << (depth - 1));
473 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
474 new_depth_mask, 0, new_total,
475 remaining, left_margin);
478 return ret;
481 static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
482 u64 total_samples, int left_margin)
484 struct callchain_list *chain;
485 bool printed = false;
486 int i = 0;
487 int ret = 0;
488 u32 entries_printed = 0;
490 list_for_each_entry(chain, &self->val, list) {
491 if (!i++ && sort__first_dimension == SORT_SYM)
492 continue;
494 if (!printed) {
495 ret += callchain__fprintf_left_margin(fp, left_margin);
496 ret += fprintf(fp, "|\n");
497 ret += callchain__fprintf_left_margin(fp, left_margin);
498 ret += fprintf(fp, "---");
500 left_margin += 3;
501 printed = true;
502 } else
503 ret += callchain__fprintf_left_margin(fp, left_margin);
505 if (chain->ms.sym)
506 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
507 else
508 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
510 if (++entries_printed == callchain_param.print_limit)
511 break;
514 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
516 return ret;
519 static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
520 u64 total_samples)
522 struct callchain_list *chain;
523 size_t ret = 0;
525 if (!self)
526 return 0;
528 ret += callchain__fprintf_flat(fp, self->parent, total_samples);
531 list_for_each_entry(chain, &self->val, list) {
532 if (chain->ip >= PERF_CONTEXT_MAX)
533 continue;
534 if (chain->ms.sym)
535 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
536 else
537 ret += fprintf(fp, " %p\n",
538 (void *)(long)chain->ip);
541 return ret;
544 static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
545 u64 total_samples, int left_margin)
547 struct rb_node *rb_node;
548 struct callchain_node *chain;
549 size_t ret = 0;
550 u32 entries_printed = 0;
552 rb_node = rb_first(&self->sorted_chain);
553 while (rb_node) {
554 double percent;
556 chain = rb_entry(rb_node, struct callchain_node, rb_node);
557 percent = chain->hit * 100.0 / total_samples;
558 switch (callchain_param.mode) {
559 case CHAIN_FLAT:
560 ret += percent_color_fprintf(fp, " %6.2f%%\n",
561 percent);
562 ret += callchain__fprintf_flat(fp, chain, total_samples);
563 break;
564 case CHAIN_GRAPH_ABS: /* Falldown */
565 case CHAIN_GRAPH_REL:
566 ret += callchain__fprintf_graph(fp, chain, total_samples,
567 left_margin);
568 case CHAIN_NONE:
569 default:
570 break;
572 ret += fprintf(fp, "\n");
573 if (++entries_printed == callchain_param.print_limit)
574 break;
575 rb_node = rb_next(rb_node);
578 return ret;
581 int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
582 struct hists *hists, struct hists *pair_hists,
583 bool show_displacement, long displacement,
584 bool color, u64 session_total)
586 struct sort_entry *se;
587 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
588 const char *sep = symbol_conf.field_sep;
589 int ret;
591 if (symbol_conf.exclude_other && !self->parent)
592 return 0;
594 if (pair_hists) {
595 period = self->pair ? self->pair->period : 0;
596 total = pair_hists->stats.total_period;
597 period_sys = self->pair ? self->pair->period_sys : 0;
598 period_us = self->pair ? self->pair->period_us : 0;
599 period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
600 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
601 } else {
602 period = self->period;
603 total = session_total;
604 period_sys = self->period_sys;
605 period_us = self->period_us;
606 period_guest_sys = self->period_guest_sys;
607 period_guest_us = self->period_guest_us;
610 if (total) {
611 if (color)
612 ret = percent_color_snprintf(s, size,
613 sep ? "%.2f" : " %6.2f%%",
614 (period * 100.0) / total);
615 else
616 ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
617 (period * 100.0) / total);
618 if (symbol_conf.show_cpu_utilization) {
619 ret += percent_color_snprintf(s + ret, size - ret,
620 sep ? "%.2f" : " %6.2f%%",
621 (period_sys * 100.0) / total);
622 ret += percent_color_snprintf(s + ret, size - ret,
623 sep ? "%.2f" : " %6.2f%%",
624 (period_us * 100.0) / total);
625 if (perf_guest) {
626 ret += percent_color_snprintf(s + ret,
627 size - ret,
628 sep ? "%.2f" : " %6.2f%%",
629 (period_guest_sys * 100.0) /
630 total);
631 ret += percent_color_snprintf(s + ret,
632 size - ret,
633 sep ? "%.2f" : " %6.2f%%",
634 (period_guest_us * 100.0) /
635 total);
638 } else
639 ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
641 if (symbol_conf.show_nr_samples) {
642 if (sep)
643 ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
644 else
645 ret += snprintf(s + ret, size - ret, "%11lld", period);
648 if (pair_hists) {
649 char bf[32];
650 double old_percent = 0, new_percent = 0, diff;
652 if (total > 0)
653 old_percent = (period * 100.0) / total;
654 if (session_total > 0)
655 new_percent = (self->period * 100.0) / session_total;
657 diff = new_percent - old_percent;
659 if (fabs(diff) >= 0.01)
660 snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
661 else
662 snprintf(bf, sizeof(bf), " ");
664 if (sep)
665 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
666 else
667 ret += snprintf(s + ret, size - ret, "%11.11s", bf);
669 if (show_displacement) {
670 if (displacement)
671 snprintf(bf, sizeof(bf), "%+4ld", displacement);
672 else
673 snprintf(bf, sizeof(bf), " ");
675 if (sep)
676 ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
677 else
678 ret += snprintf(s + ret, size - ret, "%6.6s", bf);
682 list_for_each_entry(se, &hist_entry__sort_list, list) {
683 if (se->elide)
684 continue;
686 ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
687 ret += se->se_snprintf(self, s + ret, size - ret,
688 hists__col_len(hists, se->se_width_idx));
691 return ret;
694 int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
695 struct hists *pair_hists, bool show_displacement,
696 long displacement, FILE *fp, u64 session_total)
698 char bf[512];
699 hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
700 show_displacement, displacement,
701 true, session_total);
702 return fprintf(fp, "%s\n", bf);
705 static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
706 struct hists *hists, FILE *fp,
707 u64 session_total)
709 int left_margin = 0;
711 if (sort__first_dimension == SORT_COMM) {
712 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
713 typeof(*se), list);
714 left_margin = hists__col_len(hists, se->se_width_idx);
715 left_margin -= thread__comm_len(self->thread);
718 return hist_entry_callchain__fprintf(fp, self, session_total,
719 left_margin);
722 size_t hists__fprintf(struct hists *self, struct hists *pair,
723 bool show_displacement, FILE *fp)
725 struct sort_entry *se;
726 struct rb_node *nd;
727 size_t ret = 0;
728 unsigned long position = 1;
729 long displacement = 0;
730 unsigned int width;
731 const char *sep = symbol_conf.field_sep;
732 const char *col_width = symbol_conf.col_width_list_str;
734 init_rem_hits();
736 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
738 if (symbol_conf.show_nr_samples) {
739 if (sep)
740 fprintf(fp, "%cSamples", *sep);
741 else
742 fputs(" Samples ", fp);
745 if (symbol_conf.show_cpu_utilization) {
746 if (sep) {
747 ret += fprintf(fp, "%csys", *sep);
748 ret += fprintf(fp, "%cus", *sep);
749 if (perf_guest) {
750 ret += fprintf(fp, "%cguest sys", *sep);
751 ret += fprintf(fp, "%cguest us", *sep);
753 } else {
754 ret += fprintf(fp, " sys ");
755 ret += fprintf(fp, " us ");
756 if (perf_guest) {
757 ret += fprintf(fp, " guest sys ");
758 ret += fprintf(fp, " guest us ");
763 if (pair) {
764 if (sep)
765 ret += fprintf(fp, "%cDelta", *sep);
766 else
767 ret += fprintf(fp, " Delta ");
769 if (show_displacement) {
770 if (sep)
771 ret += fprintf(fp, "%cDisplacement", *sep);
772 else
773 ret += fprintf(fp, " Displ");
777 list_for_each_entry(se, &hist_entry__sort_list, list) {
778 if (se->elide)
779 continue;
780 if (sep) {
781 fprintf(fp, "%c%s", *sep, se->se_header);
782 continue;
784 width = strlen(se->se_header);
785 if (symbol_conf.col_width_list_str) {
786 if (col_width) {
787 hists__set_col_len(self, se->se_width_idx,
788 atoi(col_width));
789 col_width = strchr(col_width, ',');
790 if (col_width)
791 ++col_width;
794 if (!hists__new_col_len(self, se->se_width_idx, width))
795 width = hists__col_len(self, se->se_width_idx);
796 fprintf(fp, " %*s", width, se->se_header);
798 fprintf(fp, "\n");
800 if (sep)
801 goto print_entries;
803 fprintf(fp, "# ........");
804 if (symbol_conf.show_nr_samples)
805 fprintf(fp, " ..........");
806 if (pair) {
807 fprintf(fp, " ..........");
808 if (show_displacement)
809 fprintf(fp, " .....");
811 list_for_each_entry(se, &hist_entry__sort_list, list) {
812 unsigned int i;
814 if (se->elide)
815 continue;
817 fprintf(fp, " ");
818 width = hists__col_len(self, se->se_width_idx);
819 if (width == 0)
820 width = strlen(se->se_header);
821 for (i = 0; i < width; i++)
822 fprintf(fp, ".");
825 fprintf(fp, "\n#\n");
827 print_entries:
828 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
829 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
831 if (show_displacement) {
832 if (h->pair != NULL)
833 displacement = ((long)h->pair->position -
834 (long)position);
835 else
836 displacement = 0;
837 ++position;
839 ret += hist_entry__fprintf(h, self, pair, show_displacement,
840 displacement, fp, self->stats.total_period);
842 if (symbol_conf.use_callchain)
843 ret += hist_entry__fprintf_callchain(h, self, fp,
844 self->stats.total_period);
845 if (h->ms.map == NULL && verbose > 1) {
846 __map_groups__fprintf_maps(&h->thread->mg,
847 MAP__FUNCTION, verbose, fp);
848 fprintf(fp, "%.10s end\n", graph_dotted_line);
852 free(rem_sq_bracket);
854 return ret;
858 * See hists__fprintf to match the column widths
860 unsigned int hists__sort_list_width(struct hists *self)
862 struct sort_entry *se;
863 int ret = 9; /* total % */
865 if (symbol_conf.show_cpu_utilization) {
866 ret += 7; /* count_sys % */
867 ret += 6; /* count_us % */
868 if (perf_guest) {
869 ret += 13; /* count_guest_sys % */
870 ret += 12; /* count_guest_us % */
874 if (symbol_conf.show_nr_samples)
875 ret += 11;
877 list_for_each_entry(se, &hist_entry__sort_list, list)
878 if (!se->elide)
879 ret += 2 + hists__col_len(self, se->se_width_idx);
881 if (verbose) /* Addr + origin */
882 ret += 3 + BITS_PER_LONG / 4;
884 return ret;
887 static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
888 enum hist_filter filter)
890 h->filtered &= ~(1 << filter);
891 if (h->filtered)
892 return;
894 ++self->nr_entries;
895 if (h->ms.unfolded)
896 self->nr_entries += h->nr_rows;
897 h->row_offset = 0;
898 self->stats.total_period += h->period;
899 self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
901 hists__calc_col_len(self, h);
904 void hists__filter_by_dso(struct hists *self, const struct dso *dso)
906 struct rb_node *nd;
908 self->nr_entries = self->stats.total_period = 0;
909 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
910 hists__reset_col_len(self);
912 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
913 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
915 if (symbol_conf.exclude_other && !h->parent)
916 continue;
918 if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
919 h->filtered |= (1 << HIST_FILTER__DSO);
920 continue;
923 hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
927 void hists__filter_by_thread(struct hists *self, const struct thread *thread)
929 struct rb_node *nd;
931 self->nr_entries = self->stats.total_period = 0;
932 self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
933 hists__reset_col_len(self);
935 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
936 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
938 if (thread != NULL && h->thread != thread) {
939 h->filtered |= (1 << HIST_FILTER__THREAD);
940 continue;
943 hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
947 static int symbol__alloc_hist(struct symbol *self)
949 struct sym_priv *priv = symbol__priv(self);
950 const int size = (sizeof(*priv->hist) +
951 (self->end - self->start) * sizeof(u64));
953 priv->hist = zalloc(size);
954 return priv->hist == NULL ? -1 : 0;
957 int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
959 unsigned int sym_size, offset;
960 struct symbol *sym = self->ms.sym;
961 struct sym_priv *priv;
962 struct sym_hist *h;
964 if (!sym || !self->ms.map)
965 return 0;
967 priv = symbol__priv(sym);
968 if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
969 return -ENOMEM;
971 sym_size = sym->end - sym->start;
972 offset = ip - sym->start;
974 pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
976 if (offset >= sym_size)
977 return 0;
979 h = priv->hist;
980 h->sum++;
981 h->ip[offset]++;
983 pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
984 self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
985 return 0;
988 static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
990 struct objdump_line *self = malloc(sizeof(*self) + privsize);
992 if (self != NULL) {
993 self->offset = offset;
994 self->line = line;
997 return self;
1000 void objdump_line__free(struct objdump_line *self)
1002 free(self->line);
1003 free(self);
1006 static void objdump__add_line(struct list_head *head, struct objdump_line *line)
1008 list_add_tail(&line->node, head);
1011 struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
1012 struct objdump_line *pos)
1014 list_for_each_entry_continue(pos, head, node)
1015 if (pos->offset >= 0)
1016 return pos;
1018 return NULL;
1021 static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
1022 struct list_head *head, size_t privsize)
1024 struct symbol *sym = self->ms.sym;
1025 struct objdump_line *objdump_line;
1026 char *line = NULL, *tmp, *tmp2, *c;
1027 size_t line_len;
1028 s64 line_ip, offset = -1;
1030 if (getline(&line, &line_len, file) < 0)
1031 return -1;
1033 if (!line)
1034 return -1;
1036 while (line_len != 0 && isspace(line[line_len - 1]))
1037 line[--line_len] = '\0';
1039 c = strchr(line, '\n');
1040 if (c)
1041 *c = 0;
1043 line_ip = -1;
1046 * Strip leading spaces:
1048 tmp = line;
1049 while (*tmp) {
1050 if (*tmp != ' ')
1051 break;
1052 tmp++;
1055 if (*tmp) {
1057 * Parse hexa addresses followed by ':'
1059 line_ip = strtoull(tmp, &tmp2, 16);
1060 if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1061 line_ip = -1;
1064 if (line_ip != -1) {
1065 u64 start = map__rip_2objdump(self->ms.map, sym->start),
1066 end = map__rip_2objdump(self->ms.map, sym->end);
1068 offset = line_ip - start;
1069 if (offset < 0 || (u64)line_ip > end)
1070 offset = -1;
1073 objdump_line = objdump_line__new(offset, line, privsize);
1074 if (objdump_line == NULL) {
1075 free(line);
1076 return -1;
1078 objdump__add_line(head, objdump_line);
1080 return 0;
1083 int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
1084 size_t privsize)
1086 struct symbol *sym = self->ms.sym;
1087 struct map *map = self->ms.map;
1088 struct dso *dso = map->dso;
1089 char *filename = dso__build_id_filename(dso, NULL, 0);
1090 bool free_filename = true;
1091 char command[PATH_MAX * 2];
1092 FILE *file;
1093 int err = 0;
1094 u64 len;
1095 char symfs_filename[PATH_MAX];
1097 if (filename) {
1098 snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
1099 symbol_conf.symfs, filename);
1102 if (filename == NULL) {
1103 if (dso->has_build_id) {
1104 pr_err("Can't annotate %s: not enough memory\n",
1105 sym->name);
1106 return -ENOMEM;
1108 goto fallback;
1109 } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
1110 strstr(command, "[kernel.kallsyms]") ||
1111 access(symfs_filename, R_OK)) {
1112 free(filename);
1113 fallback:
1115 * If we don't have build-ids or the build-id file isn't in the
1116 * cache, or is just a kallsyms file, well, lets hope that this
1117 * DSO is the same as when 'perf record' ran.
1119 filename = dso->long_name;
1120 snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
1121 symbol_conf.symfs, filename);
1122 free_filename = false;
1125 if (dso->origin == DSO__ORIG_KERNEL) {
1126 if (dso->annotate_warned)
1127 goto out_free_filename;
1128 err = -ENOENT;
1129 dso->annotate_warned = 1;
1130 pr_err("Can't annotate %s: No vmlinux file was found in the "
1131 "path\n", sym->name);
1132 goto out_free_filename;
1135 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
1136 filename, sym->name, map->unmap_ip(map, sym->start),
1137 map->unmap_ip(map, sym->end));
1139 len = sym->end - sym->start;
1141 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1142 dso, dso->long_name, sym, sym->name);
1144 snprintf(command, sizeof(command),
1145 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
1146 map__rip_2objdump(map, sym->start),
1147 map__rip_2objdump(map, sym->end),
1148 symfs_filename, filename);
1150 pr_debug("Executing: %s\n", command);
1152 file = popen(command, "r");
1153 if (!file)
1154 goto out_free_filename;
1156 while (!feof(file))
1157 if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
1158 break;
1160 pclose(file);
1161 out_free_filename:
1162 if (free_filename)
1163 free(filename);
1164 return err;
1167 void hists__inc_nr_events(struct hists *self, u32 type)
1169 ++self->stats.nr_events[0];
1170 ++self->stats.nr_events[type];
1173 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
1175 int i;
1176 size_t ret = 0;
1178 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1179 const char *name = event__get_event_name(i);
1181 if (!strcmp(name, "UNKNOWN"))
1182 continue;
1184 ret += fprintf(fp, "%16s events: %10d\n", name,
1185 self->stats.nr_events[i]);
1188 return ret;