4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
10 #include "util/util.h"
12 #include "util/color.h"
13 #include <linux/list.h>
14 #include "util/cache.h"
15 #include <linux/rbtree.h>
16 #include "util/symbol.h"
17 #include "util/string.h"
18 #include "util/callchain.h"
19 #include "util/strlist.h"
22 #include "util/header.h"
24 #include "util/parse-options.h"
25 #include "util/parse-events.h"
31 static char const *input_name
= "perf.data";
32 static char *vmlinux
= NULL
;
34 static char default_sort_order
[] = "comm,dso,symbol";
35 static char *sort_order
= default_sort_order
;
36 static char *dso_list_str
, *comm_list_str
, *sym_list_str
,
38 static struct strlist
*dso_list
, *comm_list
, *sym_list
;
39 static char *field_sep
;
42 static int show_mask
= SHOW_KERNEL
| SHOW_USER
| SHOW_HV
;
44 static int dump_trace
= 0;
45 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
46 #define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
49 #define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
53 static int full_paths
;
54 static int show_nr_samples
;
56 static unsigned long page_size
;
57 static unsigned long mmap_window
= 32;
59 static char default_parent_pattern
[] = "^sys_|^do_page_fault";
60 static char *parent_pattern
= default_parent_pattern
;
61 static regex_t parent_regex
;
63 static int exclude_other
= 1;
65 static char callchain_default_opt
[] = "fractal,0.5";
70 struct callchain_param callchain_param
= {
71 .mode
= CHAIN_GRAPH_REL
,
75 static u64 sample_type
;
78 struct perf_event_header header
;
81 unsigned char __more_data
[];
85 struct perf_event_header header
;
90 char filename
[PATH_MAX
];
94 struct perf_event_header header
;
100 struct perf_event_header header
;
106 struct perf_event_header header
;
112 struct perf_event_header header
;
120 typedef union event_union
{
121 struct perf_event_header header
;
123 struct mmap_event mmap
;
124 struct comm_event comm
;
125 struct fork_event fork
;
126 struct lost_event lost
;
127 struct read_event read
;
130 static int repsep_fprintf(FILE *fp
, const char *fmt
, ...)
137 n
= vfprintf(fp
, fmt
, ap
);
140 n
= vasprintf(&bf
, fmt
, ap
);
144 sep
= strchr(sep
, *field_sep
);
157 static LIST_HEAD(dsos
);
158 static struct dso
*kernel_dso
;
159 static struct dso
*vdso
;
160 static struct dso
*hypervisor_dso
;
162 static void dsos__add(struct dso
*dso
)
164 list_add_tail(&dso
->node
, &dsos
);
167 static struct dso
*dsos__find(const char *name
)
171 list_for_each_entry(pos
, &dsos
, node
)
172 if (strcmp(pos
->name
, name
) == 0)
177 static struct dso
*dsos__findnew(const char *name
)
179 struct dso
*dso
= dsos__find(name
);
185 dso
= dso__new(name
, 0);
189 nr
= dso__load(dso
, NULL
, verbose
);
191 eprintf("Failed to open: %s\n", name
);
195 eprintf("No symbols found in: %s, maybe install a debug package?\n", name
);
206 static void dsos__fprintf(FILE *fp
)
210 list_for_each_entry(pos
, &dsos
, node
)
211 dso__fprintf(pos
, fp
);
214 static struct symbol
*vdso__find_symbol(struct dso
*dso
, u64 ip
)
216 return dso__find_symbol(dso
, ip
);
219 static int load_kernel(void)
223 kernel_dso
= dso__new("[kernel]", 0);
227 err
= dso__load_kernel(kernel_dso
, vmlinux
, NULL
, verbose
, modules
);
229 dso__delete(kernel_dso
);
232 dsos__add(kernel_dso
);
234 vdso
= dso__new("[vdso]", 0);
238 vdso
->find_symbol
= vdso__find_symbol
;
242 hypervisor_dso
= dso__new("[hypervisor]", 0);
245 dsos__add(hypervisor_dso
);
250 static char __cwd
[PATH_MAX
];
251 static char *cwd
= __cwd
;
254 static int strcommon(const char *pathname
)
258 while (n
< cwdlen
&& pathname
[n
] == cwd
[n
])
265 struct list_head node
;
269 u64 (*map_ip
)(struct map
*, u64
);
273 static u64
map__map_ip(struct map
*map
, u64 ip
)
275 return ip
- map
->start
+ map
->pgoff
;
278 static u64
vdso__map_ip(struct map
*map __used
, u64 ip
)
283 static inline int is_anon_memory(const char *filename
)
285 return strcmp(filename
, "//anon") == 0;
288 static struct map
*map__new(struct mmap_event
*event
)
290 struct map
*self
= malloc(sizeof(*self
));
293 const char *filename
= event
->filename
;
294 char newfilename
[PATH_MAX
];
298 int n
= strcommon(filename
);
301 snprintf(newfilename
, sizeof(newfilename
),
302 ".%s", filename
+ n
);
303 filename
= newfilename
;
307 anon
= is_anon_memory(filename
);
310 snprintf(newfilename
, sizeof(newfilename
), "/tmp/perf-%d.map", event
->pid
);
311 filename
= newfilename
;
314 self
->start
= event
->start
;
315 self
->end
= event
->start
+ event
->len
;
316 self
->pgoff
= event
->pgoff
;
318 self
->dso
= dsos__findnew(filename
);
319 if (self
->dso
== NULL
)
322 if (self
->dso
== vdso
|| anon
)
323 self
->map_ip
= vdso__map_ip
;
325 self
->map_ip
= map__map_ip
;
333 static struct map
*map__clone(struct map
*self
)
335 struct map
*map
= malloc(sizeof(*self
));
340 memcpy(map
, self
, sizeof(*self
));
345 static int map__overlap(struct map
*l
, struct map
*r
)
347 if (l
->start
> r
->start
) {
353 if (l
->end
> r
->start
)
359 static size_t map__fprintf(struct map
*self
, FILE *fp
)
361 return fprintf(fp
, " %Lx-%Lx %Lx %s\n",
362 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
367 struct rb_node rb_node
;
368 struct list_head maps
;
373 static struct thread
*thread__new(pid_t pid
)
375 struct thread
*self
= malloc(sizeof(*self
));
379 self
->comm
= malloc(32);
381 snprintf(self
->comm
, 32, ":%d", self
->pid
);
382 INIT_LIST_HEAD(&self
->maps
);
388 static unsigned int dsos__col_width
,
392 static int thread__set_comm(struct thread
*self
, const char *comm
)
396 self
->comm
= strdup(comm
);
400 if (!col_width_list_str
&& !field_sep
&&
401 (!comm_list
|| strlist__has_entry(comm_list
, comm
))) {
402 unsigned int slen
= strlen(comm
);
403 if (slen
> comms__col_width
) {
404 comms__col_width
= slen
;
405 threads__col_width
= slen
+ 6;
412 static size_t thread__fprintf(struct thread
*self
, FILE *fp
)
415 size_t ret
= fprintf(fp
, "Thread %d %s\n", self
->pid
, self
->comm
);
417 list_for_each_entry(pos
, &self
->maps
, node
)
418 ret
+= map__fprintf(pos
, fp
);
424 static struct rb_root threads
;
425 static struct thread
*last_match
;
427 static struct thread
*threads__findnew(pid_t pid
)
429 struct rb_node
**p
= &threads
.rb_node
;
430 struct rb_node
*parent
= NULL
;
434 * Font-end cache - PID lookups come in blocks,
435 * so most of the time we dont have to look up
438 if (last_match
&& last_match
->pid
== pid
)
443 th
= rb_entry(parent
, struct thread
, rb_node
);
445 if (th
->pid
== pid
) {
456 th
= thread__new(pid
);
458 rb_link_node(&th
->rb_node
, parent
, p
);
459 rb_insert_color(&th
->rb_node
, &threads
);
466 static void thread__insert_map(struct thread
*self
, struct map
*map
)
468 struct map
*pos
, *tmp
;
470 list_for_each_entry_safe(pos
, tmp
, &self
->maps
, node
) {
471 if (map__overlap(pos
, map
)) {
473 printf("overlapping maps:\n");
474 map__fprintf(map
, stdout
);
475 map__fprintf(pos
, stdout
);
478 if (map
->start
<= pos
->start
&& map
->end
> pos
->start
)
479 pos
->start
= map
->end
;
481 if (map
->end
>= pos
->end
&& map
->start
< pos
->end
)
482 pos
->end
= map
->start
;
485 printf("after collision:\n");
486 map__fprintf(pos
, stdout
);
489 if (pos
->start
>= pos
->end
) {
490 list_del_init(&pos
->node
);
496 list_add_tail(&map
->node
, &self
->maps
);
499 static int thread__fork(struct thread
*self
, struct thread
*parent
)
505 self
->comm
= strdup(parent
->comm
);
509 list_for_each_entry(map
, &parent
->maps
, node
) {
510 struct map
*new = map__clone(map
);
513 thread__insert_map(self
, new);
519 static struct map
*thread__find_map(struct thread
*self
, u64 ip
)
526 list_for_each_entry(pos
, &self
->maps
, node
)
527 if (ip
>= pos
->start
&& ip
<= pos
->end
)
533 static size_t threads__fprintf(FILE *fp
)
538 for (nd
= rb_first(&threads
); nd
; nd
= rb_next(nd
)) {
539 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
541 ret
+= thread__fprintf(pos
, fp
);
548 * histogram, sorted on item, collects counts
551 static struct rb_root hist
;
554 struct rb_node rb_node
;
556 struct thread
*thread
;
560 struct symbol
*parent
;
563 struct callchain_node callchain
;
564 struct rb_root sorted_chain
;
570 * configurable sorting bits
574 struct list_head list
;
578 int64_t (*cmp
)(struct hist_entry
*, struct hist_entry
*);
579 int64_t (*collapse
)(struct hist_entry
*, struct hist_entry
*);
580 size_t (*print
)(FILE *fp
, struct hist_entry
*, unsigned int width
);
585 static int64_t cmp_null(void *l
, void *r
)
598 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
600 return right
->thread
->pid
- left
->thread
->pid
;
604 sort__thread_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
606 return repsep_fprintf(fp
, "%*s:%5d", width
- 6,
607 self
->thread
->comm
?: "", self
->thread
->pid
);
610 static struct sort_entry sort_thread
= {
611 .header
= "Command: Pid",
612 .cmp
= sort__thread_cmp
,
613 .print
= sort__thread_print
,
614 .width
= &threads__col_width
,
620 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
622 return right
->thread
->pid
- left
->thread
->pid
;
626 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
628 char *comm_l
= left
->thread
->comm
;
629 char *comm_r
= right
->thread
->comm
;
631 if (!comm_l
|| !comm_r
)
632 return cmp_null(comm_l
, comm_r
);
634 return strcmp(comm_l
, comm_r
);
638 sort__comm_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
640 return repsep_fprintf(fp
, "%*s", width
, self
->thread
->comm
);
643 static struct sort_entry sort_comm
= {
645 .cmp
= sort__comm_cmp
,
646 .collapse
= sort__comm_collapse
,
647 .print
= sort__comm_print
,
648 .width
= &comms__col_width
,
654 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
656 struct dso
*dso_l
= left
->dso
;
657 struct dso
*dso_r
= right
->dso
;
659 if (!dso_l
|| !dso_r
)
660 return cmp_null(dso_l
, dso_r
);
662 return strcmp(dso_l
->name
, dso_r
->name
);
666 sort__dso_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
669 return repsep_fprintf(fp
, "%-*s", width
, self
->dso
->name
);
671 return repsep_fprintf(fp
, "%*llx", width
, (u64
)self
->ip
);
674 static struct sort_entry sort_dso
= {
675 .header
= "Shared Object",
676 .cmp
= sort__dso_cmp
,
677 .print
= sort__dso_print
,
678 .width
= &dsos__col_width
,
684 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
688 if (left
->sym
== right
->sym
)
691 ip_l
= left
->sym
? left
->sym
->start
: left
->ip
;
692 ip_r
= right
->sym
? right
->sym
->start
: right
->ip
;
694 return (int64_t)(ip_r
- ip_l
);
698 sort__sym_print(FILE *fp
, struct hist_entry
*self
, unsigned int width __used
)
703 ret
+= repsep_fprintf(fp
, "%#018llx %c ", (u64
)self
->ip
,
704 dso__symtab_origin(self
->dso
));
706 ret
+= repsep_fprintf(fp
, "[%c] ", self
->level
);
708 ret
+= repsep_fprintf(fp
, "%s", self
->sym
->name
);
710 if (self
->sym
->module
)
711 ret
+= repsep_fprintf(fp
, "\t[%s]",
712 self
->sym
->module
->name
);
714 ret
+= repsep_fprintf(fp
, "%#016llx", (u64
)self
->ip
);
720 static struct sort_entry sort_sym
= {
722 .cmp
= sort__sym_cmp
,
723 .print
= sort__sym_print
,
729 sort__parent_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
731 struct symbol
*sym_l
= left
->parent
;
732 struct symbol
*sym_r
= right
->parent
;
734 if (!sym_l
|| !sym_r
)
735 return cmp_null(sym_l
, sym_r
);
737 return strcmp(sym_l
->name
, sym_r
->name
);
741 sort__parent_print(FILE *fp
, struct hist_entry
*self
, unsigned int width
)
743 return repsep_fprintf(fp
, "%-*s", width
,
744 self
->parent
? self
->parent
->name
: "[other]");
747 static unsigned int parent_symbol__col_width
;
749 static struct sort_entry sort_parent
= {
750 .header
= "Parent symbol",
751 .cmp
= sort__parent_cmp
,
752 .print
= sort__parent_print
,
753 .width
= &parent_symbol__col_width
,
756 static int sort__need_collapse
= 0;
757 static int sort__has_parent
= 0;
759 struct sort_dimension
{
761 struct sort_entry
*entry
;
765 static struct sort_dimension sort_dimensions
[] = {
766 { .name
= "pid", .entry
= &sort_thread
, },
767 { .name
= "comm", .entry
= &sort_comm
, },
768 { .name
= "dso", .entry
= &sort_dso
, },
769 { .name
= "symbol", .entry
= &sort_sym
, },
770 { .name
= "parent", .entry
= &sort_parent
, },
773 static LIST_HEAD(hist_entry__sort_list
);
775 static int sort_dimension__add(char *tok
)
779 for (i
= 0; i
< ARRAY_SIZE(sort_dimensions
); i
++) {
780 struct sort_dimension
*sd
= &sort_dimensions
[i
];
785 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
788 if (sd
->entry
->collapse
)
789 sort__need_collapse
= 1;
791 if (sd
->entry
== &sort_parent
) {
792 int ret
= regcomp(&parent_regex
, parent_pattern
, REG_EXTENDED
);
796 regerror(ret
, &parent_regex
, err
, sizeof(err
));
797 fprintf(stderr
, "Invalid regex: %s\n%s",
798 parent_pattern
, err
);
801 sort__has_parent
= 1;
804 list_add_tail(&sd
->entry
->list
, &hist_entry__sort_list
);
814 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
816 struct sort_entry
*se
;
819 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
820 cmp
= se
->cmp(left
, right
);
829 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
831 struct sort_entry
*se
;
834 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
835 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
837 f
= se
->collapse
?: se
->cmp
;
839 cmp
= f(left
, right
);
847 static size_t ipchain__fprintf_graph_line(FILE *fp
, int depth
, int depth_mask
)
852 ret
+= fprintf(fp
, "%s", " ");
854 for (i
= 0; i
< depth
; i
++)
855 if (depth_mask
& (1 << i
))
856 ret
+= fprintf(fp
, "| ");
858 ret
+= fprintf(fp
, " ");
860 ret
+= fprintf(fp
, "\n");
865 ipchain__fprintf_graph(FILE *fp
, struct callchain_list
*chain
, int depth
,
866 int depth_mask
, int count
, u64 total_samples
,
872 ret
+= fprintf(fp
, "%s", " ");
873 for (i
= 0; i
< depth
; i
++) {
874 if (depth_mask
& (1 << i
))
875 ret
+= fprintf(fp
, "|");
877 ret
+= fprintf(fp
, " ");
878 if (!count
&& i
== depth
- 1) {
881 percent
= hits
* 100.0 / total_samples
;
882 ret
+= percent_color_fprintf(fp
, "--%2.2f%%-- ", percent
);
884 ret
+= fprintf(fp
, "%s", " ");
887 ret
+= fprintf(fp
, "%s\n", chain
->sym
->name
);
889 ret
+= fprintf(fp
, "%p\n", (void *)(long)chain
->ip
);
894 static struct symbol
*rem_sq_bracket
;
895 static struct callchain_list rem_hits
;
897 static void init_rem_hits(void)
899 rem_sq_bracket
= malloc(sizeof(*rem_sq_bracket
) + 6);
900 if (!rem_sq_bracket
) {
901 fprintf(stderr
, "Not enough memory to display remaining hits\n");
905 strcpy(rem_sq_bracket
->name
, "[...]");
906 rem_hits
.sym
= rem_sq_bracket
;
910 callchain__fprintf_graph(FILE *fp
, struct callchain_node
*self
,
911 u64 total_samples
, int depth
, int depth_mask
)
913 struct rb_node
*node
, *next
;
914 struct callchain_node
*child
;
915 struct callchain_list
*chain
;
916 int new_depth_mask
= depth_mask
;
922 if (callchain_param
.mode
== CHAIN_GRAPH_REL
)
923 new_total
= self
->children_hit
;
925 new_total
= total_samples
;
927 remaining
= new_total
;
929 node
= rb_first(&self
->rb_root
);
933 child
= rb_entry(node
, struct callchain_node
, rb_node
);
934 cumul
= cumul_hits(child
);
938 * The depth mask manages the output of pipes that show
939 * the depth. We don't want to keep the pipes of the current
940 * level for the last child of this depth.
941 * Except if we have remaining filtered hits. They will
942 * supersede the last child
944 next
= rb_next(node
);
945 if (!next
&& (callchain_param
.mode
!= CHAIN_GRAPH_REL
|| !remaining
))
946 new_depth_mask
&= ~(1 << (depth
- 1));
949 * But we keep the older depth mask for the line seperator
950 * to keep the level link until we reach the last child
952 ret
+= ipchain__fprintf_graph_line(fp
, depth
, depth_mask
);
954 list_for_each_entry(chain
, &child
->val
, list
) {
955 if (chain
->ip
>= PERF_CONTEXT_MAX
)
957 ret
+= ipchain__fprintf_graph(fp
, chain
, depth
,
962 ret
+= callchain__fprintf_graph(fp
, child
, new_total
,
964 new_depth_mask
| (1 << depth
));
968 if (callchain_param
.mode
== CHAIN_GRAPH_REL
&&
969 remaining
&& remaining
!= new_total
) {
974 new_depth_mask
&= ~(1 << (depth
- 1));
976 ret
+= ipchain__fprintf_graph(fp
, &rem_hits
, depth
,
977 new_depth_mask
, 0, new_total
,
985 callchain__fprintf_flat(FILE *fp
, struct callchain_node
*self
,
988 struct callchain_list
*chain
;
994 ret
+= callchain__fprintf_flat(fp
, self
->parent
, total_samples
);
997 list_for_each_entry(chain
, &self
->val
, list
) {
998 if (chain
->ip
>= PERF_CONTEXT_MAX
)
1001 ret
+= fprintf(fp
, " %s\n", chain
->sym
->name
);
1003 ret
+= fprintf(fp
, " %p\n",
1004 (void *)(long)chain
->ip
);
1011 hist_entry_callchain__fprintf(FILE *fp
, struct hist_entry
*self
,
1014 struct rb_node
*rb_node
;
1015 struct callchain_node
*chain
;
1018 rb_node
= rb_first(&self
->sorted_chain
);
1022 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
1023 percent
= chain
->hit
* 100.0 / total_samples
;
1024 switch (callchain_param
.mode
) {
1026 ret
+= percent_color_fprintf(fp
, " %6.2f%%\n",
1028 ret
+= callchain__fprintf_flat(fp
, chain
, total_samples
);
1030 case CHAIN_GRAPH_ABS
: /* Falldown */
1031 case CHAIN_GRAPH_REL
:
1032 ret
+= callchain__fprintf_graph(fp
, chain
,
1033 total_samples
, 1, 1);
1037 ret
+= fprintf(fp
, "\n");
1038 rb_node
= rb_next(rb_node
);
1046 hist_entry__fprintf(FILE *fp
, struct hist_entry
*self
, u64 total_samples
)
1048 struct sort_entry
*se
;
1051 if (exclude_other
&& !self
->parent
)
1055 ret
= percent_color_fprintf(fp
,
1056 field_sep
? "%.2f" : " %6.2f%%",
1057 (self
->count
* 100.0) / total_samples
);
1059 ret
= fprintf(fp
, field_sep
? "%lld" : "%12lld ", self
->count
);
1061 if (show_nr_samples
) {
1063 fprintf(fp
, "%c%lld", *field_sep
, self
->count
);
1065 fprintf(fp
, "%11lld", self
->count
);
1068 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1072 fprintf(fp
, "%s", field_sep
?: " ");
1073 ret
+= se
->print(fp
, self
, se
->width
? *se
->width
: 0);
1076 ret
+= fprintf(fp
, "\n");
1079 hist_entry_callchain__fprintf(fp
, self
, total_samples
);
1088 static void dso__calc_col_width(struct dso
*self
)
1090 if (!col_width_list_str
&& !field_sep
&&
1091 (!dso_list
|| strlist__has_entry(dso_list
, self
->name
))) {
1092 unsigned int slen
= strlen(self
->name
);
1093 if (slen
> dsos__col_width
)
1094 dsos__col_width
= slen
;
1097 self
->slen_calculated
= 1;
1100 static struct symbol
*
1101 resolve_symbol(struct thread
*thread
, struct map
**mapp
,
1102 struct dso
**dsop
, u64
*ipp
)
1104 struct dso
*dso
= dsop
? *dsop
: NULL
;
1105 struct map
*map
= mapp
? *mapp
: NULL
;
1117 map
= thread__find_map(thread
, ip
);
1120 * We have to do this here as we may have a dso
1121 * with no symbol hit that has a name longer than
1122 * the ones with symbols sampled.
1124 if (!sort_dso
.elide
&& !map
->dso
->slen_calculated
)
1125 dso__calc_col_width(map
->dso
);
1130 ip
= map
->map_ip(map
, ip
);
1135 * If this is outside of all known maps,
1136 * and is a negative address, try to look it
1137 * up in the kernel dso, as it might be a
1138 * vsyscall (which executes in user-mode):
1140 if ((long long)ip
< 0)
1143 dprintf(" ...... dso: %s\n", dso
? dso
->name
: "<not found>");
1144 dprintf(" ...... map: %Lx -> %Lx\n", *ipp
, ip
);
1153 return dso
->find_symbol(dso
, ip
);
1156 static int call__match(struct symbol
*sym
)
1158 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
1164 static struct symbol
**
1165 resolve_callchain(struct thread
*thread
, struct map
*map __used
,
1166 struct ip_callchain
*chain
, struct hist_entry
*entry
)
1168 u64 context
= PERF_CONTEXT_MAX
;
1169 struct symbol
**syms
= NULL
;
1173 syms
= calloc(chain
->nr
, sizeof(*syms
));
1175 fprintf(stderr
, "Can't allocate memory for symbols\n");
1180 for (i
= 0; i
< chain
->nr
; i
++) {
1181 u64 ip
= chain
->ips
[i
];
1182 struct dso
*dso
= NULL
;
1185 if (ip
>= PERF_CONTEXT_MAX
) {
1191 case PERF_CONTEXT_HV
:
1192 dso
= hypervisor_dso
;
1194 case PERF_CONTEXT_KERNEL
:
1201 sym
= resolve_symbol(thread
, NULL
, &dso
, &ip
);
1204 if (sort__has_parent
&& call__match(sym
) &&
1206 entry
->parent
= sym
;
1217 * collect histogram counts
1221 hist_entry__add(struct thread
*thread
, struct map
*map
, struct dso
*dso
,
1222 struct symbol
*sym
, u64 ip
, struct ip_callchain
*chain
,
1223 char level
, u64 count
)
1225 struct rb_node
**p
= &hist
.rb_node
;
1226 struct rb_node
*parent
= NULL
;
1227 struct hist_entry
*he
;
1228 struct symbol
**syms
= NULL
;
1229 struct hist_entry entry
= {
1238 .sorted_chain
= RB_ROOT
1242 if ((sort__has_parent
|| callchain
) && chain
)
1243 syms
= resolve_callchain(thread
, map
, chain
, &entry
);
1245 while (*p
!= NULL
) {
1247 he
= rb_entry(parent
, struct hist_entry
, rb_node
);
1249 cmp
= hist_entry__cmp(&entry
, he
);
1254 append_chain(&he
->callchain
, chain
, syms
);
1263 p
= &(*p
)->rb_right
;
1266 he
= malloc(sizeof(*he
));
1271 callchain_init(&he
->callchain
);
1272 append_chain(&he
->callchain
, chain
, syms
);
1275 rb_link_node(&he
->rb_node
, parent
, p
);
1276 rb_insert_color(&he
->rb_node
, &hist
);
1281 static void hist_entry__free(struct hist_entry
*he
)
1287 * collapse the histogram
1290 static struct rb_root collapse_hists
;
1292 static void collapse__insert_entry(struct hist_entry
*he
)
1294 struct rb_node
**p
= &collapse_hists
.rb_node
;
1295 struct rb_node
*parent
= NULL
;
1296 struct hist_entry
*iter
;
1299 while (*p
!= NULL
) {
1301 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1303 cmp
= hist_entry__collapse(iter
, he
);
1306 iter
->count
+= he
->count
;
1307 hist_entry__free(he
);
1314 p
= &(*p
)->rb_right
;
1317 rb_link_node(&he
->rb_node
, parent
, p
);
1318 rb_insert_color(&he
->rb_node
, &collapse_hists
);
1321 static void collapse__resort(void)
1323 struct rb_node
*next
;
1324 struct hist_entry
*n
;
1326 if (!sort__need_collapse
)
1329 next
= rb_first(&hist
);
1331 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1332 next
= rb_next(&n
->rb_node
);
1334 rb_erase(&n
->rb_node
, &hist
);
1335 collapse__insert_entry(n
);
1340 * reverse the map, sort on count.
1343 static struct rb_root output_hists
;
1345 static void output__insert_entry(struct hist_entry
*he
, u64 min_callchain_hits
)
1347 struct rb_node
**p
= &output_hists
.rb_node
;
1348 struct rb_node
*parent
= NULL
;
1349 struct hist_entry
*iter
;
1352 callchain_param
.sort(&he
->sorted_chain
, &he
->callchain
,
1353 min_callchain_hits
, &callchain_param
);
1355 while (*p
!= NULL
) {
1357 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
1359 if (he
->count
> iter
->count
)
1362 p
= &(*p
)->rb_right
;
1365 rb_link_node(&he
->rb_node
, parent
, p
);
1366 rb_insert_color(&he
->rb_node
, &output_hists
);
1369 static void output__resort(u64 total_samples
)
1371 struct rb_node
*next
;
1372 struct hist_entry
*n
;
1373 struct rb_root
*tree
= &hist
;
1374 u64 min_callchain_hits
;
1376 min_callchain_hits
= total_samples
* (callchain_param
.min_percent
/ 100);
1378 if (sort__need_collapse
)
1379 tree
= &collapse_hists
;
1381 next
= rb_first(tree
);
1384 n
= rb_entry(next
, struct hist_entry
, rb_node
);
1385 next
= rb_next(&n
->rb_node
);
1387 rb_erase(&n
->rb_node
, tree
);
1388 output__insert_entry(n
, min_callchain_hits
);
1392 static size_t output__fprintf(FILE *fp
, u64 total_samples
)
1394 struct hist_entry
*pos
;
1395 struct sort_entry
*se
;
1399 char *col_width
= col_width_list_str
;
1403 fprintf(fp
, "# Samples: %Ld\n", (u64
)total_samples
);
1406 fprintf(fp
, "# Overhead");
1407 if (show_nr_samples
) {
1409 fprintf(fp
, "%cSamples", *field_sep
);
1411 fputs(" Samples ", fp
);
1413 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1417 fprintf(fp
, "%c%s", *field_sep
, se
->header
);
1420 width
= strlen(se
->header
);
1422 if (col_width_list_str
) {
1424 *se
->width
= atoi(col_width
);
1425 col_width
= strchr(col_width
, ',');
1430 width
= *se
->width
= max(*se
->width
, width
);
1432 fprintf(fp
, " %*s", width
, se
->header
);
1439 fprintf(fp
, "# ........");
1440 if (show_nr_samples
)
1441 fprintf(fp
, " ..........");
1442 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
1452 width
= strlen(se
->header
);
1453 for (i
= 0; i
< width
; i
++)
1461 for (nd
= rb_first(&output_hists
); nd
; nd
= rb_next(nd
)) {
1462 pos
= rb_entry(nd
, struct hist_entry
, rb_node
);
1463 ret
+= hist_entry__fprintf(fp
, pos
, total_samples
);
1466 if (sort_order
== default_sort_order
&&
1467 parent_pattern
== default_parent_pattern
) {
1469 fprintf(fp
, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
1474 free(rem_sq_bracket
);
1479 static void register_idle_thread(void)
1481 struct thread
*thread
= threads__findnew(0);
1483 if (thread
== NULL
||
1484 thread__set_comm(thread
, "[idle]")) {
1485 fprintf(stderr
, "problem inserting idle task.\n");
1490 static unsigned long total
= 0,
1497 static int validate_chain(struct ip_callchain
*chain
, event_t
*event
)
1499 unsigned int chain_size
;
1501 chain_size
= event
->header
.size
;
1502 chain_size
-= (unsigned long)&event
->ip
.__more_data
- (unsigned long)event
;
1504 if (chain
->nr
*sizeof(u64
) > chain_size
)
1511 process_sample_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1515 struct dso
*dso
= NULL
;
1516 struct thread
*thread
= threads__findnew(event
->ip
.pid
);
1517 u64 ip
= event
->ip
.ip
;
1519 struct map
*map
= NULL
;
1520 void *more_data
= event
->ip
.__more_data
;
1521 struct ip_callchain
*chain
= NULL
;
1524 if (sample_type
& PERF_SAMPLE_PERIOD
) {
1525 period
= *(u64
*)more_data
;
1526 more_data
+= sizeof(u64
);
1529 dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
1530 (void *)(offset
+ head
),
1531 (void *)(long)(event
->header
.size
),
1537 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
1540 chain
= (void *)more_data
;
1542 dprintf("... chain: nr:%Lu\n", chain
->nr
);
1544 if (validate_chain(chain
, event
) < 0) {
1545 eprintf("call-chain problem with event, skipping it.\n");
1550 for (i
= 0; i
< chain
->nr
; i
++)
1551 dprintf("..... %2d: %016Lx\n", i
, chain
->ips
[i
]);
1555 dprintf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
1557 if (thread
== NULL
) {
1558 eprintf("problem processing %d event, skipping it.\n",
1559 event
->header
.type
);
1563 if (comm_list
&& !strlist__has_entry(comm_list
, thread
->comm
))
1566 cpumode
= event
->header
.misc
& PERF_EVENT_MISC_CPUMODE_MASK
;
1568 if (cpumode
== PERF_EVENT_MISC_KERNEL
) {
1574 dprintf(" ...... dso: %s\n", dso
->name
);
1576 } else if (cpumode
== PERF_EVENT_MISC_USER
) {
1585 dso
= hypervisor_dso
;
1587 dprintf(" ...... dso: [hypervisor]\n");
1590 if (show
& show_mask
) {
1591 struct symbol
*sym
= resolve_symbol(thread
, &map
, &dso
, &ip
);
1593 if (dso_list
&& dso
&& dso
->name
&& !strlist__has_entry(dso_list
, dso
->name
))
1596 if (sym_list
&& sym
&& !strlist__has_entry(sym_list
, sym
->name
))
1599 if (hist_entry__add(thread
, map
, dso
, sym
, ip
, chain
, level
, period
)) {
1600 eprintf("problem incrementing symbol count, skipping event\n");
1610 process_mmap_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1612 struct thread
*thread
= threads__findnew(event
->mmap
.pid
);
1613 struct map
*map
= map__new(&event
->mmap
);
1615 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1616 (void *)(offset
+ head
),
1617 (void *)(long)(event
->header
.size
),
1619 (void *)(long)event
->mmap
.start
,
1620 (void *)(long)event
->mmap
.len
,
1621 (void *)(long)event
->mmap
.pgoff
,
1622 event
->mmap
.filename
);
1624 if (thread
== NULL
|| map
== NULL
) {
1625 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1629 thread__insert_map(thread
, map
);
1636 process_comm_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1638 struct thread
*thread
= threads__findnew(event
->comm
.pid
);
1640 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1641 (void *)(offset
+ head
),
1642 (void *)(long)(event
->header
.size
),
1643 event
->comm
.comm
, event
->comm
.pid
);
1645 if (thread
== NULL
||
1646 thread__set_comm(thread
, event
->comm
.comm
)) {
1647 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
1656 process_task_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1658 struct thread
*thread
= threads__findnew(event
->fork
.pid
);
1659 struct thread
*parent
= threads__findnew(event
->fork
.ppid
);
1661 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
1662 (void *)(offset
+ head
),
1663 (void *)(long)(event
->header
.size
),
1664 event
->header
.type
== PERF_EVENT_FORK
? "FORK" : "EXIT",
1665 event
->fork
.pid
, event
->fork
.tid
,
1666 event
->fork
.ppid
, event
->fork
.ptid
);
1669 * A thread clone will have the same PID for both
1672 if (thread
== parent
)
1675 if (event
->header
.type
== PERF_EVENT_EXIT
)
1678 if (!thread
|| !parent
|| thread__fork(thread
, parent
)) {
1679 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1688 process_lost_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1690 dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
1691 (void *)(offset
+ head
),
1692 (void *)(long)(event
->header
.size
),
1696 total_lost
+= event
->lost
.lost
;
1701 static void trace_event(event_t
*event
)
1703 unsigned char *raw_event
= (void *)event
;
1704 char *color
= PERF_COLOR_BLUE
;
1711 cdprintf("\n. ... raw event: size %d bytes\n", event
->header
.size
);
1713 for (i
= 0; i
< event
->header
.size
; i
++) {
1714 if ((i
& 15) == 0) {
1716 cdprintf(" %04x: ", i
);
1719 cdprintf(" %02x", raw_event
[i
]);
1721 if (((i
& 15) == 15) || i
== event
->header
.size
-1) {
1723 for (j
= 0; j
< 15-(i
& 15); j
++)
1725 for (j
= 0; j
< (i
& 15); j
++) {
1726 if (isprint(raw_event
[i
-15+j
]))
1727 cdprintf("%c", raw_event
[i
-15+j
]);
1737 static struct perf_header
*header
;
1739 static struct perf_counter_attr
*perf_header__find_attr(u64 id
)
1743 for (i
= 0; i
< header
->attrs
; i
++) {
1744 struct perf_header_attr
*attr
= header
->attr
[i
];
1747 for (j
= 0; j
< attr
->ids
; j
++) {
1748 if (attr
->id
[j
] == id
)
1757 process_read_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1759 struct perf_counter_attr
*attr
= perf_header__find_attr(event
->read
.id
);
1761 dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
1762 (void *)(offset
+ head
),
1763 (void *)(long)(event
->header
.size
),
1766 attr
? __event_name(attr
->type
, attr
->config
)
1774 process_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1778 switch (event
->header
.type
) {
1779 case PERF_EVENT_SAMPLE
:
1780 return process_sample_event(event
, offset
, head
);
1782 case PERF_EVENT_MMAP
:
1783 return process_mmap_event(event
, offset
, head
);
1785 case PERF_EVENT_COMM
:
1786 return process_comm_event(event
, offset
, head
);
1788 case PERF_EVENT_FORK
:
1789 case PERF_EVENT_EXIT
:
1790 return process_task_event(event
, offset
, head
);
1792 case PERF_EVENT_LOST
:
1793 return process_lost_event(event
, offset
, head
);
1795 case PERF_EVENT_READ
:
1796 return process_read_event(event
, offset
, head
);
1799 * We dont process them right now but they are fine:
1802 case PERF_EVENT_THROTTLE
:
1803 case PERF_EVENT_UNTHROTTLE
:
1813 static u64
perf_header__sample_type(void)
1815 u64 sample_type
= 0;
1818 for (i
= 0; i
< header
->attrs
; i
++) {
1819 struct perf_header_attr
*attr
= header
->attr
[i
];
1822 sample_type
= attr
->attr
.sample_type
;
1823 else if (sample_type
!= attr
->attr
.sample_type
)
1824 die("non matching sample_type");
1830 static int __cmd_report(void)
1832 int ret
, rc
= EXIT_FAILURE
;
1833 unsigned long offset
= 0;
1834 unsigned long head
, shift
;
1840 register_idle_thread();
1842 input
= open(input_name
, O_RDONLY
);
1844 fprintf(stderr
, " failed to open file: %s", input_name
);
1845 if (!strcmp(input_name
, "perf.data"))
1846 fprintf(stderr
, " (try 'perf record' first)");
1847 fprintf(stderr
, "\n");
1851 ret
= fstat(input
, &stat
);
1853 perror("failed to stat file");
1857 if (!stat
.st_size
) {
1858 fprintf(stderr
, "zero-sized file, nothing to do!\n");
1862 header
= perf_header__read(input
);
1863 head
= header
->data_offset
;
1865 sample_type
= perf_header__sample_type();
1867 if (!(sample_type
& PERF_SAMPLE_CALLCHAIN
)) {
1868 if (sort__has_parent
) {
1869 fprintf(stderr
, "selected --sort parent, but no"
1870 " callchain data. Did you call"
1871 " perf record without -g?\n");
1875 fprintf(stderr
, "selected -c but no callchain data."
1876 " Did you call perf record without"
1880 } else if (callchain_param
.mode
!= CHAIN_NONE
&& !callchain
) {
1882 if (register_callchain_param(&callchain_param
) < 0) {
1883 fprintf(stderr
, "Can't register callchain"
1889 if (load_kernel() < 0) {
1890 perror("failed to load kernel symbols");
1891 return EXIT_FAILURE
;
1895 if (getcwd(__cwd
, sizeof(__cwd
)) == NULL
) {
1896 perror("failed to get the current directory");
1897 return EXIT_FAILURE
;
1899 cwdlen
= strlen(cwd
);
1905 shift
= page_size
* (head
/ page_size
);
1910 buf
= (char *)mmap(NULL
, page_size
* mmap_window
, PROT_READ
,
1911 MAP_SHARED
, input
, offset
);
1912 if (buf
== MAP_FAILED
) {
1913 perror("failed to mmap file");
1918 event
= (event_t
*)(buf
+ head
);
1920 size
= event
->header
.size
;
1924 if (head
+ event
->header
.size
>= page_size
* mmap_window
) {
1927 shift
= page_size
* (head
/ page_size
);
1929 ret
= munmap(buf
, page_size
* mmap_window
);
1937 size
= event
->header
.size
;
1939 dprintf("\n%p [%p]: event: %d\n",
1940 (void *)(offset
+ head
),
1941 (void *)(long)event
->header
.size
,
1942 event
->header
.type
);
1944 if (!size
|| process_event(event
, offset
, head
) < 0) {
1946 dprintf("%p [%p]: skipping unknown header type: %d\n",
1947 (void *)(offset
+ head
),
1948 (void *)(long)(event
->header
.size
),
1949 event
->header
.type
);
1954 * assume we lost track of the stream, check alignment, and
1955 * increment a single u64 in the hope to catch on again 'soon'.
1958 if (unlikely(head
& 7))
1966 if (offset
+ head
>= header
->data_offset
+ header
->data_size
)
1969 if (offset
+ head
< (unsigned long)stat
.st_size
)
1976 dprintf(" IP events: %10ld\n", total
);
1977 dprintf(" mmap events: %10ld\n", total_mmap
);
1978 dprintf(" comm events: %10ld\n", total_comm
);
1979 dprintf(" fork events: %10ld\n", total_fork
);
1980 dprintf(" lost events: %10ld\n", total_lost
);
1981 dprintf(" unknown events: %10ld\n", total_unknown
);
1987 threads__fprintf(stdout
);
1990 dsos__fprintf(stdout
);
1993 output__resort(total
);
1994 output__fprintf(stdout
, total
);
2000 parse_callchain_opt(const struct option
*opt __used
, const char *arg
,
2011 tok
= strtok((char *)arg
, ",");
2015 /* get the output mode */
2016 if (!strncmp(tok
, "graph", strlen(arg
)))
2017 callchain_param
.mode
= CHAIN_GRAPH_ABS
;
2019 else if (!strncmp(tok
, "flat", strlen(arg
)))
2020 callchain_param
.mode
= CHAIN_FLAT
;
2022 else if (!strncmp(tok
, "fractal", strlen(arg
)))
2023 callchain_param
.mode
= CHAIN_GRAPH_REL
;
2025 else if (!strncmp(tok
, "none", strlen(arg
))) {
2026 callchain_param
.mode
= CHAIN_NONE
;
2035 /* get the min percentage */
2036 tok
= strtok(NULL
, ",");
2040 callchain_param
.min_percent
= strtod(tok
, &endptr
);
2045 if (register_callchain_param(&callchain_param
) < 0) {
2046 fprintf(stderr
, "Can't register callchain params\n");
2052 static const char * const report_usage
[] = {
2053 "perf report [<options>] <command>",
2057 static const struct option options
[] = {
2058 OPT_STRING('i', "input", &input_name
, "file",
2060 OPT_BOOLEAN('v', "verbose", &verbose
,
2061 "be more verbose (show symbol address, etc)"),
2062 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
2063 "dump raw trace in ASCII"),
2064 OPT_STRING('k', "vmlinux", &vmlinux
, "file", "vmlinux pathname"),
2065 OPT_BOOLEAN('m', "modules", &modules
,
2066 "load module symbols - WARNING: use only with -k and LIVE kernel"),
2067 OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples
,
2068 "Show a column with the number of samples"),
2069 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
2070 "sort by key(s): pid, comm, dso, symbol, parent"),
2071 OPT_BOOLEAN('P', "full-paths", &full_paths
,
2072 "Don't shorten the pathnames taking into account the cwd"),
2073 OPT_STRING('p', "parent", &parent_pattern
, "regex",
2074 "regex filter to identify parent, see: '--sort parent'"),
2075 OPT_BOOLEAN('x', "exclude-other", &exclude_other
,
2076 "Only display entries with parent-match"),
2077 OPT_CALLBACK_DEFAULT('g', "call-graph", NULL
, "output_type,min_percent",
2078 "Display callchains using output_type and min percent threshold. "
2079 "Default: fractal,0.5", &parse_callchain_opt
, callchain_default_opt
),
2080 OPT_STRING('d', "dsos", &dso_list_str
, "dso[,dso...]",
2081 "only consider symbols in these dsos"),
2082 OPT_STRING('C', "comms", &comm_list_str
, "comm[,comm...]",
2083 "only consider symbols in these comms"),
2084 OPT_STRING('S', "symbols", &sym_list_str
, "symbol[,symbol...]",
2085 "only consider these symbols"),
2086 OPT_STRING('w', "column-widths", &col_width_list_str
,
2088 "don't try to adjust column width, use these fixed values"),
2089 OPT_STRING('t', "field-separator", &field_sep
, "separator",
2090 "separator for columns, no spaces will be added between "
2091 "columns '.' is reserved."),
2095 static void setup_sorting(void)
2097 char *tmp
, *tok
, *str
= strdup(sort_order
);
2099 for (tok
= strtok_r(str
, ", ", &tmp
);
2100 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
2101 if (sort_dimension__add(tok
) < 0) {
2102 error("Unknown --sort key: `%s'", tok
);
2103 usage_with_options(report_usage
, options
);
2110 static void setup_list(struct strlist
**list
, const char *list_str
,
2111 struct sort_entry
*se
, const char *list_name
,
2115 *list
= strlist__new(true, list_str
);
2117 fprintf(stderr
, "problems parsing %s list\n",
2121 if (strlist__nr_entries(*list
) == 1) {
2122 fprintf(fp
, "# %s: %s\n", list_name
,
2123 strlist__entry(*list
, 0)->s
);
2129 int cmd_report(int argc
, const char **argv
, const char *prefix __used
)
2133 page_size
= getpagesize();
2135 argc
= parse_options(argc
, argv
, options
, report_usage
, 0);
2139 if (parent_pattern
!= default_parent_pattern
) {
2140 sort_dimension__add("parent");
2141 sort_parent
.elide
= 1;
2146 * Any (unrecognized) arguments left?
2149 usage_with_options(report_usage
, options
);
2153 setup_list(&dso_list
, dso_list_str
, &sort_dso
, "dso", stdout
);
2154 setup_list(&comm_list
, comm_list_str
, &sort_comm
, "comm", stdout
);
2155 setup_list(&sym_list
, sym_list_str
, &sort_sym
, "symbol", stdout
);
2157 if (field_sep
&& *field_sep
== '.') {
2158 fputs("'.' is the only non valid --field-separator argument\n",
2163 return __cmd_report();