4 * Builtin annotate command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
10 #include "util/util.h"
12 #include "util/color.h"
13 #include <linux/list.h>
14 #include "util/cache.h"
15 #include <linux/rbtree.h>
16 #include "util/symbol.h"
17 #include "util/string.h"
21 #include "util/parse-options.h"
22 #include "util/parse-events.h"
28 static char const *input_name
= "perf.data";
29 static char *vmlinux
= "vmlinux";
31 static char default_sort_order
[] = "comm,symbol";
32 static char *sort_order
= default_sort_order
;
35 static int show_mask
= SHOW_KERNEL
| SHOW_USER
| SHOW_HV
;
37 static int dump_trace
= 0;
38 #define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
44 static int full_paths
;
46 static int print_line
;
48 static unsigned long page_size
;
49 static unsigned long mmap_window
= 32;
52 struct perf_event_header header
;
58 struct perf_event_header header
;
63 char filename
[PATH_MAX
];
67 struct perf_event_header header
;
73 struct perf_event_header header
;
77 typedef union event_union
{
78 struct perf_event_header header
;
80 struct mmap_event mmap
;
81 struct comm_event comm
;
82 struct fork_event fork
;
92 static LIST_HEAD(dsos
);
93 static struct dso
*kernel_dso
;
94 static struct dso
*vdso
;
97 static void dsos__add(struct dso
*dso
)
99 list_add_tail(&dso
->node
, &dsos
);
102 static struct dso
*dsos__find(const char *name
)
106 list_for_each_entry(pos
, &dsos
, node
)
107 if (strcmp(pos
->name
, name
) == 0)
112 static struct dso
*dsos__findnew(const char *name
)
114 struct dso
*dso
= dsos__find(name
);
120 dso
= dso__new(name
, 0);
124 nr
= dso__load(dso
, NULL
, verbose
);
127 fprintf(stderr
, "Failed to open: %s\n", name
);
130 if (!nr
&& verbose
) {
132 "No symbols found in: %s, maybe install a debug package?\n",
145 static void dsos__fprintf(FILE *fp
)
149 list_for_each_entry(pos
, &dsos
, node
)
150 dso__fprintf(pos
, fp
);
153 static struct symbol
*vdso__find_symbol(struct dso
*dso
, u64 ip
)
155 return dso__find_symbol(dso
, ip
);
158 static int load_kernel(void)
162 kernel_dso
= dso__new("[kernel]", 0);
166 err
= dso__load_kernel(kernel_dso
, vmlinux
, NULL
, verbose
, modules
);
168 dso__delete(kernel_dso
);
171 dsos__add(kernel_dso
);
173 vdso
= dso__new("[vdso]", 0);
177 vdso
->find_symbol
= vdso__find_symbol
;
185 struct list_head node
;
189 u64 (*map_ip
)(struct map
*, u64
);
193 static u64
map__map_ip(struct map
*map
, u64 ip
)
195 return ip
- map
->start
+ map
->pgoff
;
198 static u64
vdso__map_ip(struct map
*map __used
, u64 ip
)
203 static struct map
*map__new(struct mmap_event
*event
)
205 struct map
*self
= malloc(sizeof(*self
));
208 const char *filename
= event
->filename
;
210 self
->start
= event
->start
;
211 self
->end
= event
->start
+ event
->len
;
212 self
->pgoff
= event
->pgoff
;
214 self
->dso
= dsos__findnew(filename
);
215 if (self
->dso
== NULL
)
218 if (self
->dso
== vdso
)
219 self
->map_ip
= vdso__map_ip
;
221 self
->map_ip
= map__map_ip
;
229 static struct map
*map__clone(struct map
*self
)
231 struct map
*map
= malloc(sizeof(*self
));
236 memcpy(map
, self
, sizeof(*self
));
241 static int map__overlap(struct map
*l
, struct map
*r
)
243 if (l
->start
> r
->start
) {
249 if (l
->end
> r
->start
)
255 static size_t map__fprintf(struct map
*self
, FILE *fp
)
257 return fprintf(fp
, " %Lx-%Lx %Lx %s\n",
258 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
263 struct rb_node rb_node
;
264 struct list_head maps
;
269 static struct thread
*thread__new(pid_t pid
)
271 struct thread
*self
= malloc(sizeof(*self
));
275 self
->comm
= malloc(32);
277 snprintf(self
->comm
, 32, ":%d", self
->pid
);
278 INIT_LIST_HEAD(&self
->maps
);
284 static int thread__set_comm(struct thread
*self
, const char *comm
)
288 self
->comm
= strdup(comm
);
289 return self
->comm
? 0 : -ENOMEM
;
292 static size_t thread__fprintf(struct thread
*self
, FILE *fp
)
295 size_t ret
= fprintf(fp
, "Thread %d %s\n", self
->pid
, self
->comm
);
297 list_for_each_entry(pos
, &self
->maps
, node
)
298 ret
+= map__fprintf(pos
, fp
);
304 static struct rb_root threads
;
305 static struct thread
*last_match
;
307 static struct thread
*threads__findnew(pid_t pid
)
309 struct rb_node
**p
= &threads
.rb_node
;
310 struct rb_node
*parent
= NULL
;
314 * Font-end cache - PID lookups come in blocks,
315 * so most of the time we dont have to look up
318 if (last_match
&& last_match
->pid
== pid
)
323 th
= rb_entry(parent
, struct thread
, rb_node
);
325 if (th
->pid
== pid
) {
336 th
= thread__new(pid
);
338 rb_link_node(&th
->rb_node
, parent
, p
);
339 rb_insert_color(&th
->rb_node
, &threads
);
346 static void thread__insert_map(struct thread
*self
, struct map
*map
)
348 struct map
*pos
, *tmp
;
350 list_for_each_entry_safe(pos
, tmp
, &self
->maps
, node
) {
351 if (map__overlap(pos
, map
)) {
352 list_del_init(&pos
->node
);
358 list_add_tail(&map
->node
, &self
->maps
);
361 static int thread__fork(struct thread
*self
, struct thread
*parent
)
367 self
->comm
= strdup(parent
->comm
);
371 list_for_each_entry(map
, &parent
->maps
, node
) {
372 struct map
*new = map__clone(map
);
375 thread__insert_map(self
, new);
381 static struct map
*thread__find_map(struct thread
*self
, u64 ip
)
388 list_for_each_entry(pos
, &self
->maps
, node
)
389 if (ip
>= pos
->start
&& ip
<= pos
->end
)
395 static size_t threads__fprintf(FILE *fp
)
400 for (nd
= rb_first(&threads
); nd
; nd
= rb_next(nd
)) {
401 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
403 ret
+= thread__fprintf(pos
, fp
);
410 * histogram, sorted on item, collects counts
413 static struct rb_root hist
;
416 struct rb_node rb_node
;
418 struct thread
*thread
;
429 * configurable sorting bits
433 struct list_head list
;
437 int64_t (*cmp
)(struct hist_entry
*, struct hist_entry
*);
438 int64_t (*collapse
)(struct hist_entry
*, struct hist_entry
*);
439 size_t (*print
)(FILE *fp
, struct hist_entry
*);
445 sort__thread_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
447 return right
->thread
->pid
- left
->thread
->pid
;
451 sort__thread_print(FILE *fp
, struct hist_entry
*self
)
453 return fprintf(fp
, "%16s:%5d", self
->thread
->comm
?: "", self
->thread
->pid
);
456 static struct sort_entry sort_thread
= {
457 .header
= " Command: Pid",
458 .cmp
= sort__thread_cmp
,
459 .print
= sort__thread_print
,
465 sort__comm_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
467 return right
->thread
->pid
- left
->thread
->pid
;
471 sort__comm_collapse(struct hist_entry
*left
, struct hist_entry
*right
)
473 char *comm_l
= left
->thread
->comm
;
474 char *comm_r
= right
->thread
->comm
;
476 if (!comm_l
|| !comm_r
) {
477 if (!comm_l
&& !comm_r
)
485 return strcmp(comm_l
, comm_r
);
489 sort__comm_print(FILE *fp
, struct hist_entry
*self
)
491 return fprintf(fp
, "%16s", self
->thread
->comm
);
494 static struct sort_entry sort_comm
= {
495 .header
= " Command",
496 .cmp
= sort__comm_cmp
,
497 .collapse
= sort__comm_collapse
,
498 .print
= sort__comm_print
,
504 sort__dso_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
506 struct dso
*dso_l
= left
->dso
;
507 struct dso
*dso_r
= right
->dso
;
509 if (!dso_l
|| !dso_r
) {
510 if (!dso_l
&& !dso_r
)
518 return strcmp(dso_l
->name
, dso_r
->name
);
522 sort__dso_print(FILE *fp
, struct hist_entry
*self
)
525 return fprintf(fp
, "%-25s", self
->dso
->name
);
527 return fprintf(fp
, "%016llx ", (u64
)self
->ip
);
530 static struct sort_entry sort_dso
= {
531 .header
= "Shared Object ",
532 .cmp
= sort__dso_cmp
,
533 .print
= sort__dso_print
,
539 sort__sym_cmp(struct hist_entry
*left
, struct hist_entry
*right
)
543 if (left
->sym
== right
->sym
)
546 ip_l
= left
->sym
? left
->sym
->start
: left
->ip
;
547 ip_r
= right
->sym
? right
->sym
->start
: right
->ip
;
549 return (int64_t)(ip_r
- ip_l
);
553 sort__sym_print(FILE *fp
, struct hist_entry
*self
)
558 ret
+= fprintf(fp
, "%#018llx ", (u64
)self
->ip
);
561 ret
+= fprintf(fp
, "[%c] %s",
562 self
->dso
== kernel_dso
? 'k' : '.', self
->sym
->name
);
564 ret
+= fprintf(fp
, "%#016llx", (u64
)self
->ip
);
570 static struct sort_entry sort_sym
= {
572 .cmp
= sort__sym_cmp
,
573 .print
= sort__sym_print
,
576 static int sort__need_collapse
= 0;
578 struct sort_dimension
{
580 struct sort_entry
*entry
;
584 static struct sort_dimension sort_dimensions
[] = {
585 { .name
= "pid", .entry
= &sort_thread
, },
586 { .name
= "comm", .entry
= &sort_comm
, },
587 { .name
= "dso", .entry
= &sort_dso
, },
588 { .name
= "symbol", .entry
= &sort_sym
, },
591 static LIST_HEAD(hist_entry__sort_list
);
593 static int sort_dimension__add(char *tok
)
597 for (i
= 0; i
< ARRAY_SIZE(sort_dimensions
); i
++) {
598 struct sort_dimension
*sd
= &sort_dimensions
[i
];
603 if (strncasecmp(tok
, sd
->name
, strlen(tok
)))
606 if (sd
->entry
->collapse
)
607 sort__need_collapse
= 1;
609 list_add_tail(&sd
->entry
->list
, &hist_entry__sort_list
);
619 hist_entry__cmp(struct hist_entry
*left
, struct hist_entry
*right
)
621 struct sort_entry
*se
;
624 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
625 cmp
= se
->cmp(left
, right
);
634 hist_entry__collapse(struct hist_entry
*left
, struct hist_entry
*right
)
636 struct sort_entry
*se
;
639 list_for_each_entry(se
, &hist_entry__sort_list
, list
) {
640 int64_t (*f
)(struct hist_entry
*, struct hist_entry
*);
642 f
= se
->collapse
?: se
->cmp
;
644 cmp
= f(left
, right
);
653 * collect histogram counts
655 static void hist_hit(struct hist_entry
*he
, u64 ip
)
657 unsigned int sym_size
, offset
;
658 struct symbol
*sym
= he
->sym
;
662 if (!sym
|| !sym
->hist
)
665 sym_size
= sym
->end
- sym
->start
;
666 offset
= ip
- sym
->start
;
668 if (offset
>= sym_size
)
675 printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n",
676 (void *)(unsigned long)he
->sym
->start
,
678 (void *)(unsigned long)ip
, ip
- he
->sym
->start
,
683 hist_entry__add(struct thread
*thread
, struct map
*map
, struct dso
*dso
,
684 struct symbol
*sym
, u64 ip
, char level
)
686 struct rb_node
**p
= &hist
.rb_node
;
687 struct rb_node
*parent
= NULL
;
688 struct hist_entry
*he
;
689 struct hist_entry entry
= {
702 he
= rb_entry(parent
, struct hist_entry
, rb_node
);
704 cmp
= hist_entry__cmp(&entry
, he
);
718 he
= malloc(sizeof(*he
));
722 rb_link_node(&he
->rb_node
, parent
, p
);
723 rb_insert_color(&he
->rb_node
, &hist
);
728 static void hist_entry__free(struct hist_entry
*he
)
734 * collapse the histogram
737 static struct rb_root collapse_hists
;
739 static void collapse__insert_entry(struct hist_entry
*he
)
741 struct rb_node
**p
= &collapse_hists
.rb_node
;
742 struct rb_node
*parent
= NULL
;
743 struct hist_entry
*iter
;
748 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
750 cmp
= hist_entry__collapse(iter
, he
);
753 iter
->count
+= he
->count
;
754 hist_entry__free(he
);
764 rb_link_node(&he
->rb_node
, parent
, p
);
765 rb_insert_color(&he
->rb_node
, &collapse_hists
);
768 static void collapse__resort(void)
770 struct rb_node
*next
;
771 struct hist_entry
*n
;
773 if (!sort__need_collapse
)
776 next
= rb_first(&hist
);
778 n
= rb_entry(next
, struct hist_entry
, rb_node
);
779 next
= rb_next(&n
->rb_node
);
781 rb_erase(&n
->rb_node
, &hist
);
782 collapse__insert_entry(n
);
787 * reverse the map, sort on count.
790 static struct rb_root output_hists
;
792 static void output__insert_entry(struct hist_entry
*he
)
794 struct rb_node
**p
= &output_hists
.rb_node
;
795 struct rb_node
*parent
= NULL
;
796 struct hist_entry
*iter
;
800 iter
= rb_entry(parent
, struct hist_entry
, rb_node
);
802 if (he
->count
> iter
->count
)
808 rb_link_node(&he
->rb_node
, parent
, p
);
809 rb_insert_color(&he
->rb_node
, &output_hists
);
812 static void output__resort(void)
814 struct rb_node
*next
;
815 struct hist_entry
*n
;
816 struct rb_root
*tree
= &hist
;
818 if (sort__need_collapse
)
819 tree
= &collapse_hists
;
821 next
= rb_first(tree
);
824 n
= rb_entry(next
, struct hist_entry
, rb_node
);
825 next
= rb_next(&n
->rb_node
);
827 rb_erase(&n
->rb_node
, tree
);
828 output__insert_entry(n
);
832 static void register_idle_thread(void)
834 struct thread
*thread
= threads__findnew(0);
836 if (thread
== NULL
||
837 thread__set_comm(thread
, "[idle]")) {
838 fprintf(stderr
, "problem inserting idle task.\n");
843 static unsigned long total
= 0,
850 process_sample_event(event_t
*event
, unsigned long offset
, unsigned long head
)
854 struct dso
*dso
= NULL
;
855 struct thread
*thread
= threads__findnew(event
->ip
.pid
);
856 u64 ip
= event
->ip
.ip
;
857 struct map
*map
= NULL
;
859 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
860 (void *)(offset
+ head
),
861 (void *)(long)(event
->header
.size
),
866 dprintf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
868 if (thread
== NULL
) {
869 fprintf(stderr
, "problem processing %d event, skipping it.\n",
874 if (event
->header
.misc
& PERF_EVENT_MISC_KERNEL
) {
880 dprintf(" ...... dso: %s\n", dso
->name
);
882 } else if (event
->header
.misc
& PERF_EVENT_MISC_USER
) {
887 map
= thread__find_map(thread
, ip
);
889 ip
= map
->map_ip(map
, ip
);
893 * If this is outside of all known maps,
894 * and is a negative address, try to look it
895 * up in the kernel dso, as it might be a
896 * vsyscall (which executes in user-mode):
898 if ((long long)ip
< 0)
901 dprintf(" ...... dso: %s\n", dso
? dso
->name
: "<not found>");
906 dprintf(" ...... dso: [hypervisor]\n");
909 if (show
& show_mask
) {
910 struct symbol
*sym
= NULL
;
913 sym
= dso
->find_symbol(dso
, ip
);
915 if (hist_entry__add(thread
, map
, dso
, sym
, ip
, level
)) {
917 "problem incrementing symbol count, skipping event\n");
927 process_mmap_event(event_t
*event
, unsigned long offset
, unsigned long head
)
929 struct thread
*thread
= threads__findnew(event
->mmap
.pid
);
930 struct map
*map
= map__new(&event
->mmap
);
932 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
933 (void *)(offset
+ head
),
934 (void *)(long)(event
->header
.size
),
936 (void *)(long)event
->mmap
.start
,
937 (void *)(long)event
->mmap
.len
,
938 (void *)(long)event
->mmap
.pgoff
,
939 event
->mmap
.filename
);
941 if (thread
== NULL
|| map
== NULL
) {
942 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
946 thread__insert_map(thread
, map
);
953 process_comm_event(event_t
*event
, unsigned long offset
, unsigned long head
)
955 struct thread
*thread
= threads__findnew(event
->comm
.pid
);
957 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
958 (void *)(offset
+ head
),
959 (void *)(long)(event
->header
.size
),
960 event
->comm
.comm
, event
->comm
.pid
);
962 if (thread
== NULL
||
963 thread__set_comm(thread
, event
->comm
.comm
)) {
964 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
973 process_fork_event(event_t
*event
, unsigned long offset
, unsigned long head
)
975 struct thread
*thread
= threads__findnew(event
->fork
.pid
);
976 struct thread
*parent
= threads__findnew(event
->fork
.ppid
);
978 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
979 (void *)(offset
+ head
),
980 (void *)(long)(event
->header
.size
),
981 event
->fork
.pid
, event
->fork
.ppid
);
983 if (!thread
|| !parent
|| thread__fork(thread
, parent
)) {
984 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
993 process_event(event_t
*event
, unsigned long offset
, unsigned long head
)
995 switch (event
->header
.type
) {
996 case PERF_EVENT_SAMPLE
:
997 return process_sample_event(event
, offset
, head
);
999 case PERF_EVENT_MMAP
:
1000 return process_mmap_event(event
, offset
, head
);
1002 case PERF_EVENT_COMM
:
1003 return process_comm_event(event
, offset
, head
);
1005 case PERF_EVENT_FORK
:
1006 return process_fork_event(event
, offset
, head
);
1008 * We dont process them right now but they are fine:
1011 case PERF_EVENT_THROTTLE
:
1012 case PERF_EVENT_UNTHROTTLE
:
1023 parse_line(FILE *file
, struct symbol
*sym
, u64 start
, u64 len
)
1025 char *line
= NULL
, *tmp
, *tmp2
;
1026 static const char *prev_line
;
1027 static const char *prev_color
;
1028 unsigned int offset
;
1034 if (getline(&line
, &line_len
, file
) < 0)
1039 c
= strchr(line
, '\n');
1048 * Strip leading spaces:
1059 * Parse hexa addresses followed by ':'
1061 line_ip
= strtoull(tmp
, &tmp2
, 16);
1066 if (line_ip
!= -1) {
1067 const char *path
= NULL
;
1068 unsigned int hits
= 0;
1069 double percent
= 0.0;
1071 struct sym_ext
*sym_ext
= sym
->priv
;
1073 offset
= line_ip
- start
;
1075 hits
= sym
->hist
[offset
];
1077 if (offset
< len
&& sym_ext
) {
1078 path
= sym_ext
[offset
].path
;
1079 percent
= sym_ext
[offset
].percent
;
1080 } else if (sym
->hist_sum
)
1081 percent
= 100.0 * hits
/ sym
->hist_sum
;
1083 color
= get_percent_color(percent
);
1086 * Also color the filename and line if needed, with
1087 * the same color than the percentage. Don't print it
1088 * twice for close colored ip with the same filename:line
1091 if (!prev_line
|| strcmp(prev_line
, path
)
1092 || color
!= prev_color
) {
1093 color_fprintf(stdout
, color
, " %s", path
);
1099 color_fprintf(stdout
, color
, " %7.2f", percent
);
1101 color_fprintf(stdout
, PERF_COLOR_BLUE
, "%s\n", line
);
1106 printf(" : %s\n", line
);
1112 static struct rb_root root_sym_ext
;
1114 static void insert_source_line(struct sym_ext
*sym_ext
)
1116 struct sym_ext
*iter
;
1117 struct rb_node
**p
= &root_sym_ext
.rb_node
;
1118 struct rb_node
*parent
= NULL
;
1120 while (*p
!= NULL
) {
1122 iter
= rb_entry(parent
, struct sym_ext
, node
);
1124 if (sym_ext
->percent
> iter
->percent
)
1127 p
= &(*p
)->rb_right
;
1130 rb_link_node(&sym_ext
->node
, parent
, p
);
1131 rb_insert_color(&sym_ext
->node
, &root_sym_ext
);
1134 static void free_source_line(struct symbol
*sym
, int len
)
1136 struct sym_ext
*sym_ext
= sym
->priv
;
1142 for (i
= 0; i
< len
; i
++)
1143 free(sym_ext
[i
].path
);
1147 root_sym_ext
= RB_ROOT
;
1150 /* Get the filename:line for the colored entries */
1152 get_source_line(struct symbol
*sym
, u64 start
, int len
, char *filename
)
1155 char cmd
[PATH_MAX
* 2];
1156 struct sym_ext
*sym_ext
;
1161 sym
->priv
= calloc(len
, sizeof(struct sym_ext
));
1165 sym_ext
= sym
->priv
;
1167 for (i
= 0; i
< len
; i
++) {
1173 sym_ext
[i
].percent
= 100.0 * sym
->hist
[i
] / sym
->hist_sum
;
1174 if (sym_ext
[i
].percent
<= 0.5)
1178 sprintf(cmd
, "addr2line -e %s %016llx", filename
, offset
);
1179 fp
= popen(cmd
, "r");
1183 if (getline(&path
, &line_len
, fp
) < 0 || !line_len
)
1186 sym_ext
[i
].path
= malloc(sizeof(char) * line_len
+ 1);
1187 if (!sym_ext
[i
].path
)
1190 strcpy(sym_ext
[i
].path
, path
);
1191 insert_source_line(&sym_ext
[i
]);
1198 static void print_summary(char *filename
)
1200 struct sym_ext
*sym_ext
;
1201 struct rb_node
*node
;
1203 printf("\nSorted summary for file %s\n", filename
);
1204 printf("----------------------------------------------\n\n");
1206 if (RB_EMPTY_ROOT(&root_sym_ext
)) {
1207 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN
);
1211 node
= rb_first(&root_sym_ext
);
1217 sym_ext
= rb_entry(node
, struct sym_ext
, node
);
1218 percent
= sym_ext
->percent
;
1219 color
= get_percent_color(percent
);
1220 path
= sym_ext
->path
;
1222 color_fprintf(stdout
, color
, " %7.2f %s", percent
, path
);
1223 node
= rb_next(node
);
1227 static void annotate_sym(struct dso
*dso
, struct symbol
*sym
)
1229 char *filename
= dso
->name
, *d_filename
;
1230 u64 start
, end
, len
;
1231 char command
[PATH_MAX
*2];
1237 filename
= sym
->module
->path
;
1238 else if (dso
== kernel_dso
)
1241 start
= sym
->obj_start
;
1245 d_filename
= filename
;
1247 d_filename
= basename(filename
);
1249 end
= start
+ sym
->end
- sym
->start
+ 1;
1250 len
= sym
->end
- sym
->start
;
1253 get_source_line(sym
, start
, len
, filename
);
1254 print_summary(filename
);
1257 printf("\n\n------------------------------------------------\n");
1258 printf(" Percent | Source code & Disassembly of %s\n", d_filename
);
1259 printf("------------------------------------------------\n");
1262 printf("annotating [%p] %30s : [%p] %30s\n", dso
, dso
->name
, sym
, sym
->name
);
1264 sprintf(command
, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
1265 (u64
)start
, (u64
)end
, filename
, filename
);
1268 printf("doing: %s\n", command
);
1270 file
= popen(command
, "r");
1274 while (!feof(file
)) {
1275 if (parse_line(file
, sym
, start
, len
) < 0)
1281 free_source_line(sym
, len
);
1284 static void find_annotations(void)
1290 list_for_each_entry(dso
, &dsos
, node
) {
1292 for (nd
= rb_first(&dso
->syms
); nd
; nd
= rb_next(nd
)) {
1293 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
1296 annotate_sym(dso
, sym
);
1303 printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter
);
1306 static int __cmd_annotate(void)
1308 int ret
, rc
= EXIT_FAILURE
;
1309 unsigned long offset
= 0;
1310 unsigned long head
= 0;
1316 register_idle_thread();
1318 input
= open(input_name
, O_RDONLY
);
1320 perror("failed to open file");
1324 ret
= fstat(input
, &stat
);
1326 perror("failed to stat file");
1330 if (!stat
.st_size
) {
1331 fprintf(stderr
, "zero-sized file, nothing to do!\n");
1335 if (load_kernel() < 0) {
1336 perror("failed to load kernel symbols");
1337 return EXIT_FAILURE
;
1341 buf
= (char *)mmap(NULL
, page_size
* mmap_window
, PROT_READ
,
1342 MAP_SHARED
, input
, offset
);
1343 if (buf
== MAP_FAILED
) {
1344 perror("failed to mmap file");
1349 event
= (event_t
*)(buf
+ head
);
1351 size
= event
->header
.size
;
1355 if (head
+ event
->header
.size
>= page_size
* mmap_window
) {
1356 unsigned long shift
= page_size
* (head
/ page_size
);
1359 ret
= munmap(buf
, page_size
* mmap_window
);
1367 size
= event
->header
.size
;
1369 dprintf("%p [%p]: event: %d\n",
1370 (void *)(offset
+ head
),
1371 (void *)(long)event
->header
.size
,
1372 event
->header
.type
);
1374 if (!size
|| process_event(event
, offset
, head
) < 0) {
1376 dprintf("%p [%p]: skipping unknown header type: %d\n",
1377 (void *)(offset
+ head
),
1378 (void *)(long)(event
->header
.size
),
1379 event
->header
.type
);
1384 * assume we lost track of the stream, check alignment, and
1385 * increment a single u64 in the hope to catch on again 'soon'.
1388 if (unlikely(head
& 7))
1396 if (offset
+ head
< (unsigned long)stat
.st_size
)
1402 dprintf(" IP events: %10ld\n", total
);
1403 dprintf(" mmap events: %10ld\n", total_mmap
);
1404 dprintf(" comm events: %10ld\n", total_comm
);
1405 dprintf(" fork events: %10ld\n", total_fork
);
1406 dprintf(" unknown events: %10ld\n", total_unknown
);
1412 threads__fprintf(stdout
);
1415 dsos__fprintf(stdout
);
1425 static const char * const annotate_usage
[] = {
1426 "perf annotate [<options>] <command>",
1430 static const struct option options
[] = {
1431 OPT_STRING('i', "input", &input_name
, "file",
1433 OPT_STRING('s', "symbol", &sym_hist_filter
, "symbol",
1434 "symbol to annotate"),
1435 OPT_BOOLEAN('v', "verbose", &verbose
,
1436 "be more verbose (show symbol address, etc)"),
1437 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1438 "dump raw trace in ASCII"),
1439 OPT_STRING('k', "vmlinux", &vmlinux
, "file", "vmlinux pathname"),
1440 OPT_BOOLEAN('m', "modules", &modules
,
1441 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1442 OPT_BOOLEAN('l', "print-line", &print_line
,
1443 "print matching source lines (may be slow)"),
1444 OPT_BOOLEAN('P', "full-paths", &full_paths
,
1445 "Don't shorten the displayed pathnames"),
1449 static void setup_sorting(void)
1451 char *tmp
, *tok
, *str
= strdup(sort_order
);
1453 for (tok
= strtok_r(str
, ", ", &tmp
);
1454 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
1455 if (sort_dimension__add(tok
) < 0) {
1456 error("Unknown --sort key: `%s'", tok
);
1457 usage_with_options(annotate_usage
, options
);
1464 int cmd_annotate(int argc
, const char **argv
, const char *prefix __used
)
1468 page_size
= getpagesize();
1470 argc
= parse_options(argc
, argv
, options
, annotate_usage
, 0);
1476 * Special case: if there's an argument left then assume tha
1477 * it's a symbol filter:
1480 usage_with_options(annotate_usage
, options
);
1482 sym_hist_filter
= argv
[0];
1485 if (!sym_hist_filter
)
1486 usage_with_options(annotate_usage
, options
);
1490 return __cmd_annotate();