powerpc/mm/4k: don't allocate larger pmd page table for 4k
[linux/fpc-iii.git] / tools / perf / util / annotate.c
blob06cc04e5806a2692fffabbc2c038b82380dfcafd
1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-annotate.c, see those files for further
5 * copyright notes.
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
10 #include "util.h"
11 #include "ui/ui.h"
12 #include "sort.h"
13 #include "build-id.h"
14 #include "color.h"
15 #include "cache.h"
16 #include "symbol.h"
17 #include "debug.h"
18 #include "annotate.h"
19 #include "evsel.h"
20 #include "block-range.h"
21 #include "arch/common.h"
22 #include <regex.h>
23 #include <pthread.h>
24 #include <linux/bitops.h>
25 #include <sys/utsname.h>
27 const char *disassembler_style;
28 const char *objdump_path;
29 static regex_t file_lineno;
31 static struct ins_ops *ins__find(struct arch *arch, const char *name);
32 static void ins__sort(struct arch *arch);
33 static int disasm_line__parse(char *line, const char **namep, char **rawp);
35 struct arch {
36 const char *name;
37 struct ins *instructions;
38 size_t nr_instructions;
39 size_t nr_instructions_allocated;
40 struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
41 bool sorted_instructions;
42 bool initialized;
43 void *priv;
44 int (*init)(struct arch *arch);
45 struct {
46 char comment_char;
47 char skip_functions_char;
48 } objdump;
51 static struct ins_ops call_ops;
52 static struct ins_ops dec_ops;
53 static struct ins_ops jump_ops;
54 static struct ins_ops mov_ops;
55 static struct ins_ops nop_ops;
56 static struct ins_ops lock_ops;
57 static struct ins_ops ret_ops;
59 static int arch__grow_instructions(struct arch *arch)
61 struct ins *new_instructions;
62 size_t new_nr_allocated;
64 if (arch->nr_instructions_allocated == 0 && arch->instructions)
65 goto grow_from_non_allocated_table;
67 new_nr_allocated = arch->nr_instructions_allocated + 128;
68 new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
69 if (new_instructions == NULL)
70 return -1;
72 out_update_instructions:
73 arch->instructions = new_instructions;
74 arch->nr_instructions_allocated = new_nr_allocated;
75 return 0;
77 grow_from_non_allocated_table:
78 new_nr_allocated = arch->nr_instructions + 128;
79 new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
80 if (new_instructions == NULL)
81 return -1;
83 memcpy(new_instructions, arch->instructions, arch->nr_instructions);
84 goto out_update_instructions;
87 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
89 struct ins *ins;
91 if (arch->nr_instructions == arch->nr_instructions_allocated &&
92 arch__grow_instructions(arch))
93 return -1;
95 ins = &arch->instructions[arch->nr_instructions];
96 ins->name = strdup(name);
97 if (!ins->name)
98 return -1;
100 ins->ops = ops;
101 arch->nr_instructions++;
103 ins__sort(arch);
104 return 0;
107 #include "arch/arm/annotate/instructions.c"
108 #include "arch/arm64/annotate/instructions.c"
109 #include "arch/x86/annotate/instructions.c"
110 #include "arch/powerpc/annotate/instructions.c"
112 static struct arch architectures[] = {
114 .name = "arm",
115 .init = arm__annotate_init,
118 .name = "arm64",
119 .init = arm64__annotate_init,
122 .name = "x86",
123 .instructions = x86__instructions,
124 .nr_instructions = ARRAY_SIZE(x86__instructions),
125 .objdump = {
126 .comment_char = '#',
130 .name = "powerpc",
131 .init = powerpc__annotate_init,
135 static void ins__delete(struct ins_operands *ops)
137 if (ops == NULL)
138 return;
139 zfree(&ops->source.raw);
140 zfree(&ops->source.name);
141 zfree(&ops->target.raw);
142 zfree(&ops->target.name);
145 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
146 struct ins_operands *ops)
148 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
151 int ins__scnprintf(struct ins *ins, char *bf, size_t size,
152 struct ins_operands *ops)
154 if (ins->ops->scnprintf)
155 return ins->ops->scnprintf(ins, bf, size, ops);
157 return ins__raw_scnprintf(ins, bf, size, ops);
160 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
162 char *endptr, *tok, *name;
164 ops->target.addr = strtoull(ops->raw, &endptr, 16);
166 name = strchr(endptr, '<');
167 if (name == NULL)
168 goto indirect_call;
170 name++;
172 if (arch->objdump.skip_functions_char &&
173 strchr(name, arch->objdump.skip_functions_char))
174 return -1;
176 tok = strchr(name, '>');
177 if (tok == NULL)
178 return -1;
180 *tok = '\0';
181 ops->target.name = strdup(name);
182 *tok = '>';
184 return ops->target.name == NULL ? -1 : 0;
186 indirect_call:
187 tok = strchr(endptr, '*');
188 if (tok == NULL) {
189 struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr));
190 if (sym != NULL)
191 ops->target.name = strdup(sym->name);
192 else
193 ops->target.addr = 0;
194 return 0;
197 ops->target.addr = strtoull(tok + 1, NULL, 16);
198 return 0;
201 static int call__scnprintf(struct ins *ins, char *bf, size_t size,
202 struct ins_operands *ops)
204 if (ops->target.name)
205 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
207 if (ops->target.addr == 0)
208 return ins__raw_scnprintf(ins, bf, size, ops);
210 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
213 static struct ins_ops call_ops = {
214 .parse = call__parse,
215 .scnprintf = call__scnprintf,
218 bool ins__is_call(const struct ins *ins)
220 return ins->ops == &call_ops;
223 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
225 const char *s = strchr(ops->raw, '+');
226 const char *c = strchr(ops->raw, ',');
228 if (c++ != NULL)
229 ops->target.addr = strtoull(c, NULL, 16);
230 else
231 ops->target.addr = strtoull(ops->raw, NULL, 16);
233 if (s++ != NULL) {
234 ops->target.offset = strtoull(s, NULL, 16);
235 ops->target.offset_avail = true;
236 } else {
237 ops->target.offset_avail = false;
240 return 0;
243 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
244 struct ins_operands *ops)
246 if (!ops->target.addr || ops->target.offset < 0)
247 return ins__raw_scnprintf(ins, bf, size, ops);
249 return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
252 static struct ins_ops jump_ops = {
253 .parse = jump__parse,
254 .scnprintf = jump__scnprintf,
257 bool ins__is_jump(const struct ins *ins)
259 return ins->ops == &jump_ops;
262 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
264 char *endptr, *name, *t;
266 if (strstr(raw, "(%rip)") == NULL)
267 return 0;
269 *addrp = strtoull(comment, &endptr, 16);
270 name = strchr(endptr, '<');
271 if (name == NULL)
272 return -1;
274 name++;
276 t = strchr(name, '>');
277 if (t == NULL)
278 return 0;
280 *t = '\0';
281 *namep = strdup(name);
282 *t = '>';
284 return 0;
287 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
289 ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
290 if (ops->locked.ops == NULL)
291 return 0;
293 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
294 goto out_free_ops;
296 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
298 if (ops->locked.ins.ops == NULL)
299 goto out_free_ops;
301 if (ops->locked.ins.ops->parse &&
302 ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0)
303 goto out_free_ops;
305 return 0;
307 out_free_ops:
308 zfree(&ops->locked.ops);
309 return 0;
312 static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
313 struct ins_operands *ops)
315 int printed;
317 if (ops->locked.ins.ops == NULL)
318 return ins__raw_scnprintf(ins, bf, size, ops);
320 printed = scnprintf(bf, size, "%-6.6s ", ins->name);
321 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
322 size - printed, ops->locked.ops);
325 static void lock__delete(struct ins_operands *ops)
327 struct ins *ins = &ops->locked.ins;
329 if (ins->ops && ins->ops->free)
330 ins->ops->free(ops->locked.ops);
331 else
332 ins__delete(ops->locked.ops);
334 zfree(&ops->locked.ops);
335 zfree(&ops->target.raw);
336 zfree(&ops->target.name);
339 static struct ins_ops lock_ops = {
340 .free = lock__delete,
341 .parse = lock__parse,
342 .scnprintf = lock__scnprintf,
345 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused)
347 char *s = strchr(ops->raw, ','), *target, *comment, prev;
349 if (s == NULL)
350 return -1;
352 *s = '\0';
353 ops->source.raw = strdup(ops->raw);
354 *s = ',';
356 if (ops->source.raw == NULL)
357 return -1;
359 target = ++s;
360 comment = strchr(s, arch->objdump.comment_char);
362 if (comment != NULL)
363 s = comment - 1;
364 else
365 s = strchr(s, '\0') - 1;
367 while (s > target && isspace(s[0]))
368 --s;
369 s++;
370 prev = *s;
371 *s = '\0';
373 ops->target.raw = strdup(target);
374 *s = prev;
376 if (ops->target.raw == NULL)
377 goto out_free_source;
379 if (comment == NULL)
380 return 0;
382 while (comment[0] != '\0' && isspace(comment[0]))
383 ++comment;
385 comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
386 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
388 return 0;
390 out_free_source:
391 zfree(&ops->source.raw);
392 return -1;
395 static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
396 struct ins_operands *ops)
398 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
399 ops->source.name ?: ops->source.raw,
400 ops->target.name ?: ops->target.raw);
403 static struct ins_ops mov_ops = {
404 .parse = mov__parse,
405 .scnprintf = mov__scnprintf,
408 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
410 char *target, *comment, *s, prev;
412 target = s = ops->raw;
414 while (s[0] != '\0' && !isspace(s[0]))
415 ++s;
416 prev = *s;
417 *s = '\0';
419 ops->target.raw = strdup(target);
420 *s = prev;
422 if (ops->target.raw == NULL)
423 return -1;
425 comment = strchr(s, arch->objdump.comment_char);
426 if (comment == NULL)
427 return 0;
429 while (comment[0] != '\0' && isspace(comment[0]))
430 ++comment;
432 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
434 return 0;
437 static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
438 struct ins_operands *ops)
440 return scnprintf(bf, size, "%-6.6s %s", ins->name,
441 ops->target.name ?: ops->target.raw);
444 static struct ins_ops dec_ops = {
445 .parse = dec__parse,
446 .scnprintf = dec__scnprintf,
449 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
450 struct ins_operands *ops __maybe_unused)
452 return scnprintf(bf, size, "%-6.6s", "nop");
455 static struct ins_ops nop_ops = {
456 .scnprintf = nop__scnprintf,
459 static struct ins_ops ret_ops = {
460 .scnprintf = ins__raw_scnprintf,
463 bool ins__is_ret(const struct ins *ins)
465 return ins->ops == &ret_ops;
468 static int ins__key_cmp(const void *name, const void *insp)
470 const struct ins *ins = insp;
472 return strcmp(name, ins->name);
475 static int ins__cmp(const void *a, const void *b)
477 const struct ins *ia = a;
478 const struct ins *ib = b;
480 return strcmp(ia->name, ib->name);
483 static void ins__sort(struct arch *arch)
485 const int nmemb = arch->nr_instructions;
487 qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
490 static struct ins_ops *__ins__find(struct arch *arch, const char *name)
492 struct ins *ins;
493 const int nmemb = arch->nr_instructions;
495 if (!arch->sorted_instructions) {
496 ins__sort(arch);
497 arch->sorted_instructions = true;
500 ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
501 return ins ? ins->ops : NULL;
504 static struct ins_ops *ins__find(struct arch *arch, const char *name)
506 struct ins_ops *ops = __ins__find(arch, name);
508 if (!ops && arch->associate_instruction_ops)
509 ops = arch->associate_instruction_ops(arch, name);
511 return ops;
514 static int arch__key_cmp(const void *name, const void *archp)
516 const struct arch *arch = archp;
518 return strcmp(name, arch->name);
521 static int arch__cmp(const void *a, const void *b)
523 const struct arch *aa = a;
524 const struct arch *ab = b;
526 return strcmp(aa->name, ab->name);
529 static void arch__sort(void)
531 const int nmemb = ARRAY_SIZE(architectures);
533 qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
536 static struct arch *arch__find(const char *name)
538 const int nmemb = ARRAY_SIZE(architectures);
539 static bool sorted;
541 if (!sorted) {
542 arch__sort();
543 sorted = true;
546 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
549 int symbol__alloc_hist(struct symbol *sym)
551 struct annotation *notes = symbol__annotation(sym);
552 const size_t size = symbol__size(sym);
553 size_t sizeof_sym_hist;
555 /* Check for overflow when calculating sizeof_sym_hist */
556 if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
557 return -1;
559 sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
561 /* Check for overflow in zalloc argument */
562 if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
563 / symbol_conf.nr_events)
564 return -1;
566 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
567 if (notes->src == NULL)
568 return -1;
569 notes->src->sizeof_sym_hist = sizeof_sym_hist;
570 notes->src->nr_histograms = symbol_conf.nr_events;
571 INIT_LIST_HEAD(&notes->src->source);
572 return 0;
575 /* The cycles histogram is lazily allocated. */
576 static int symbol__alloc_hist_cycles(struct symbol *sym)
578 struct annotation *notes = symbol__annotation(sym);
579 const size_t size = symbol__size(sym);
581 notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
582 if (notes->src->cycles_hist == NULL)
583 return -1;
584 return 0;
587 void symbol__annotate_zero_histograms(struct symbol *sym)
589 struct annotation *notes = symbol__annotation(sym);
591 pthread_mutex_lock(&notes->lock);
592 if (notes->src != NULL) {
593 memset(notes->src->histograms, 0,
594 notes->src->nr_histograms * notes->src->sizeof_sym_hist);
595 if (notes->src->cycles_hist)
596 memset(notes->src->cycles_hist, 0,
597 symbol__size(sym) * sizeof(struct cyc_hist));
599 pthread_mutex_unlock(&notes->lock);
602 static int __symbol__account_cycles(struct annotation *notes,
603 u64 start,
604 unsigned offset, unsigned cycles,
605 unsigned have_start)
607 struct cyc_hist *ch;
609 ch = notes->src->cycles_hist;
611 * For now we can only account one basic block per
612 * final jump. But multiple could be overlapping.
613 * Always account the longest one. So when
614 * a shorter one has been already seen throw it away.
616 * We separately always account the full cycles.
618 ch[offset].num_aggr++;
619 ch[offset].cycles_aggr += cycles;
621 if (!have_start && ch[offset].have_start)
622 return 0;
623 if (ch[offset].num) {
624 if (have_start && (!ch[offset].have_start ||
625 ch[offset].start > start)) {
626 ch[offset].have_start = 0;
627 ch[offset].cycles = 0;
628 ch[offset].num = 0;
629 if (ch[offset].reset < 0xffff)
630 ch[offset].reset++;
631 } else if (have_start &&
632 ch[offset].start < start)
633 return 0;
635 ch[offset].have_start = have_start;
636 ch[offset].start = start;
637 ch[offset].cycles += cycles;
638 ch[offset].num++;
639 return 0;
642 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
643 struct annotation *notes, int evidx, u64 addr)
645 unsigned offset;
646 struct sym_hist *h;
648 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
650 if ((addr < sym->start || addr >= sym->end) &&
651 (addr != sym->end || sym->start != sym->end)) {
652 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
653 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
654 return -ERANGE;
657 offset = addr - sym->start;
658 h = annotation__histogram(notes, evidx);
659 h->sum++;
660 h->addr[offset]++;
662 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
663 ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
664 addr, addr - sym->start, evidx, h->addr[offset]);
665 return 0;
668 static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
670 struct annotation *notes = symbol__annotation(sym);
672 if (notes->src == NULL) {
673 if (symbol__alloc_hist(sym) < 0)
674 return NULL;
676 if (!notes->src->cycles_hist && cycles) {
677 if (symbol__alloc_hist_cycles(sym) < 0)
678 return NULL;
680 return notes;
683 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
684 int evidx, u64 addr)
686 struct annotation *notes;
688 if (sym == NULL)
689 return 0;
690 notes = symbol__get_annotation(sym, false);
691 if (notes == NULL)
692 return -ENOMEM;
693 return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
696 static int symbol__account_cycles(u64 addr, u64 start,
697 struct symbol *sym, unsigned cycles)
699 struct annotation *notes;
700 unsigned offset;
702 if (sym == NULL)
703 return 0;
704 notes = symbol__get_annotation(sym, true);
705 if (notes == NULL)
706 return -ENOMEM;
707 if (addr < sym->start || addr >= sym->end)
708 return -ERANGE;
710 if (start) {
711 if (start < sym->start || start >= sym->end)
712 return -ERANGE;
713 if (start >= addr)
714 start = 0;
716 offset = addr - sym->start;
717 return __symbol__account_cycles(notes,
718 start ? start - sym->start : 0,
719 offset, cycles,
720 !!start);
723 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
724 struct addr_map_symbol *start,
725 unsigned cycles)
727 u64 saddr = 0;
728 int err;
730 if (!cycles)
731 return 0;
734 * Only set start when IPC can be computed. We can only
735 * compute it when the basic block is completely in a single
736 * function.
737 * Special case the case when the jump is elsewhere, but
738 * it starts on the function start.
740 if (start &&
741 (start->sym == ams->sym ||
742 (ams->sym &&
743 start->addr == ams->sym->start + ams->map->start)))
744 saddr = start->al_addr;
745 if (saddr == 0)
746 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
747 ams->addr,
748 start ? start->addr : 0,
749 ams->sym ? ams->sym->start + ams->map->start : 0,
750 saddr);
751 err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
752 if (err)
753 pr_debug2("account_cycles failed %d\n", err);
754 return err;
757 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
759 return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
762 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
764 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
767 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map)
769 dl->ins.ops = ins__find(arch, dl->ins.name);
771 if (!dl->ins.ops)
772 return;
774 if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0)
775 dl->ins.ops = NULL;
778 static int disasm_line__parse(char *line, const char **namep, char **rawp)
780 char *name = line, tmp;
782 while (isspace(name[0]))
783 ++name;
785 if (name[0] == '\0')
786 return -1;
788 *rawp = name + 1;
790 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
791 ++*rawp;
793 tmp = (*rawp)[0];
794 (*rawp)[0] = '\0';
795 *namep = strdup(name);
797 if (*namep == NULL)
798 goto out_free_name;
800 (*rawp)[0] = tmp;
802 if ((*rawp)[0] != '\0') {
803 (*rawp)++;
804 while (isspace((*rawp)[0]))
805 ++(*rawp);
808 return 0;
810 out_free_name:
811 free((void *)namep);
812 *namep = NULL;
813 return -1;
816 static struct disasm_line *disasm_line__new(s64 offset, char *line,
817 size_t privsize, int line_nr,
818 struct arch *arch,
819 struct map *map)
821 struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
823 if (dl != NULL) {
824 dl->offset = offset;
825 dl->line = strdup(line);
826 dl->line_nr = line_nr;
827 if (dl->line == NULL)
828 goto out_delete;
830 if (offset != -1) {
831 if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
832 goto out_free_line;
834 disasm_line__init_ins(dl, arch, map);
838 return dl;
840 out_free_line:
841 zfree(&dl->line);
842 out_delete:
843 free(dl);
844 return NULL;
847 void disasm_line__free(struct disasm_line *dl)
849 zfree(&dl->line);
850 if (dl->ins.ops && dl->ins.ops->free)
851 dl->ins.ops->free(&dl->ops);
852 else
853 ins__delete(&dl->ops);
854 free((void *)dl->ins.name);
855 dl->ins.name = NULL;
856 free(dl);
859 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
861 if (raw || !dl->ins.ops)
862 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
864 return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
867 static void disasm__add(struct list_head *head, struct disasm_line *line)
869 list_add_tail(&line->node, head);
872 struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
874 list_for_each_entry_continue(pos, head, node)
875 if (pos->offset >= 0)
876 return pos;
878 return NULL;
881 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
882 s64 end, const char **path, u64 *nr_samples)
884 struct source_line *src_line = notes->src->lines;
885 double percent = 0.0;
886 *nr_samples = 0;
888 if (src_line) {
889 size_t sizeof_src_line = sizeof(*src_line) +
890 sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
892 while (offset < end) {
893 src_line = (void *)notes->src->lines +
894 (sizeof_src_line * offset);
896 if (*path == NULL)
897 *path = src_line->path;
899 percent += src_line->samples[evidx].percent;
900 *nr_samples += src_line->samples[evidx].nr;
901 offset++;
903 } else {
904 struct sym_hist *h = annotation__histogram(notes, evidx);
905 unsigned int hits = 0;
907 while (offset < end)
908 hits += h->addr[offset++];
910 if (h->sum) {
911 *nr_samples = hits;
912 percent = 100.0 * hits / h->sum;
916 return percent;
919 static const char *annotate__address_color(struct block_range *br)
921 double cov = block_range__coverage(br);
923 if (cov >= 0) {
924 /* mark red for >75% coverage */
925 if (cov > 0.75)
926 return PERF_COLOR_RED;
928 /* mark dull for <1% coverage */
929 if (cov < 0.01)
930 return PERF_COLOR_NORMAL;
933 return PERF_COLOR_MAGENTA;
936 static const char *annotate__asm_color(struct block_range *br)
938 double cov = block_range__coverage(br);
940 if (cov >= 0) {
941 /* mark dull for <1% coverage */
942 if (cov < 0.01)
943 return PERF_COLOR_NORMAL;
946 return PERF_COLOR_BLUE;
949 static void annotate__branch_printf(struct block_range *br, u64 addr)
951 bool emit_comment = true;
953 if (!br)
954 return;
956 #if 1
957 if (br->is_target && br->start == addr) {
958 struct block_range *branch = br;
959 double p;
962 * Find matching branch to our target.
964 while (!branch->is_branch)
965 branch = block_range__next(branch);
967 p = 100 *(double)br->entry / branch->coverage;
969 if (p > 0.1) {
970 if (emit_comment) {
971 emit_comment = false;
972 printf("\t#");
976 * The percentage of coverage joined at this target in relation
977 * to the next branch.
979 printf(" +%.2f%%", p);
982 #endif
983 if (br->is_branch && br->end == addr) {
984 double p = 100*(double)br->taken / br->coverage;
986 if (p > 0.1) {
987 if (emit_comment) {
988 emit_comment = false;
989 printf("\t#");
993 * The percentage of coverage leaving at this branch, and
994 * its prediction ratio.
996 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
1002 static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
1003 struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
1004 int max_lines, struct disasm_line *queue)
1006 static const char *prev_line;
1007 static const char *prev_color;
1009 if (dl->offset != -1) {
1010 const char *path = NULL;
1011 u64 nr_samples;
1012 double percent, max_percent = 0.0;
1013 double *ppercents = &percent;
1014 u64 *psamples = &nr_samples;
1015 int i, nr_percent = 1;
1016 const char *color;
1017 struct annotation *notes = symbol__annotation(sym);
1018 s64 offset = dl->offset;
1019 const u64 addr = start + offset;
1020 struct disasm_line *next;
1021 struct block_range *br;
1023 next = disasm__get_next_ip_line(&notes->src->source, dl);
1025 if (perf_evsel__is_group_event(evsel)) {
1026 nr_percent = evsel->nr_members;
1027 ppercents = calloc(nr_percent, sizeof(double));
1028 psamples = calloc(nr_percent, sizeof(u64));
1029 if (ppercents == NULL || psamples == NULL) {
1030 return -1;
1034 for (i = 0; i < nr_percent; i++) {
1035 percent = disasm__calc_percent(notes,
1036 notes->src->lines ? i : evsel->idx + i,
1037 offset,
1038 next ? next->offset : (s64) len,
1039 &path, &nr_samples);
1041 ppercents[i] = percent;
1042 psamples[i] = nr_samples;
1043 if (percent > max_percent)
1044 max_percent = percent;
1047 if (max_percent < min_pcnt)
1048 return -1;
1050 if (max_lines && printed >= max_lines)
1051 return 1;
1053 if (queue != NULL) {
1054 list_for_each_entry_from(queue, &notes->src->source, node) {
1055 if (queue == dl)
1056 break;
1057 disasm_line__print(queue, sym, start, evsel, len,
1058 0, 0, 1, NULL);
1062 color = get_percent_color(max_percent);
1065 * Also color the filename and line if needed, with
1066 * the same color than the percentage. Don't print it
1067 * twice for close colored addr with the same filename:line
1069 if (path) {
1070 if (!prev_line || strcmp(prev_line, path)
1071 || color != prev_color) {
1072 color_fprintf(stdout, color, " %s", path);
1073 prev_line = path;
1074 prev_color = color;
1078 for (i = 0; i < nr_percent; i++) {
1079 percent = ppercents[i];
1080 nr_samples = psamples[i];
1081 color = get_percent_color(percent);
1083 if (symbol_conf.show_total_period)
1084 color_fprintf(stdout, color, " %7" PRIu64,
1085 nr_samples);
1086 else
1087 color_fprintf(stdout, color, " %7.2f", percent);
1090 printf(" : ");
1092 br = block_range__find(addr);
1093 color_fprintf(stdout, annotate__address_color(br), " %" PRIx64 ":", addr);
1094 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
1095 annotate__branch_printf(br, addr);
1096 printf("\n");
1098 if (ppercents != &percent)
1099 free(ppercents);
1101 if (psamples != &nr_samples)
1102 free(psamples);
1104 } else if (max_lines && printed >= max_lines)
1105 return 1;
1106 else {
1107 int width = 8;
1109 if (queue)
1110 return -1;
1112 if (perf_evsel__is_group_event(evsel))
1113 width *= evsel->nr_members;
1115 if (!*dl->line)
1116 printf(" %*s:\n", width, " ");
1117 else
1118 printf(" %*s: %s\n", width, " ", dl->line);
1121 return 0;
1125 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
1126 * which looks like following
1128 * 0000000000415500 <_init>:
1129 * 415500: sub $0x8,%rsp
1130 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
1131 * 41550b: test %rax,%rax
1132 * 41550e: je 415515 <_init+0x15>
1133 * 415510: callq 416e70 <__gmon_start__@plt>
1134 * 415515: add $0x8,%rsp
1135 * 415519: retq
1137 * it will be parsed and saved into struct disasm_line as
1138 * <offset> <name> <ops.raw>
1140 * The offset will be a relative offset from the start of the symbol and -1
1141 * means that it's not a disassembly line so should be treated differently.
1142 * The ops.raw part will be parsed further according to type of the instruction.
1144 static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
1145 struct arch *arch,
1146 FILE *file, size_t privsize,
1147 int *line_nr)
1149 struct annotation *notes = symbol__annotation(sym);
1150 struct disasm_line *dl;
1151 char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
1152 size_t line_len;
1153 s64 line_ip, offset = -1;
1154 regmatch_t match[2];
1156 if (getline(&line, &line_len, file) < 0)
1157 return -1;
1159 if (!line)
1160 return -1;
1162 while (line_len != 0 && isspace(line[line_len - 1]))
1163 line[--line_len] = '\0';
1165 c = strchr(line, '\n');
1166 if (c)
1167 *c = 0;
1169 line_ip = -1;
1170 parsed_line = line;
1172 /* /filename:linenr ? Save line number and ignore. */
1173 if (regexec(&file_lineno, line, 2, match, 0) == 0) {
1174 *line_nr = atoi(line + match[1].rm_so);
1175 return 0;
1179 * Strip leading spaces:
1181 tmp = line;
1182 while (*tmp) {
1183 if (*tmp != ' ')
1184 break;
1185 tmp++;
1188 if (*tmp) {
1190 * Parse hexa addresses followed by ':'
1192 line_ip = strtoull(tmp, &tmp2, 16);
1193 if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
1194 line_ip = -1;
1197 if (line_ip != -1) {
1198 u64 start = map__rip_2objdump(map, sym->start),
1199 end = map__rip_2objdump(map, sym->end);
1201 offset = line_ip - start;
1202 if ((u64)line_ip < start || (u64)line_ip >= end)
1203 offset = -1;
1204 else
1205 parsed_line = tmp2 + 1;
1208 dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
1209 free(line);
1210 (*line_nr)++;
1212 if (dl == NULL)
1213 return -1;
1215 if (!disasm_line__has_offset(dl)) {
1216 dl->ops.target.offset = dl->ops.target.addr -
1217 map__rip_2objdump(map, sym->start);
1218 dl->ops.target.offset_avail = true;
1221 /* kcore has no symbols, so add the call target name */
1222 if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
1223 struct addr_map_symbol target = {
1224 .map = map,
1225 .addr = dl->ops.target.addr,
1228 if (!map_groups__find_ams(&target) &&
1229 target.sym->start == target.al_addr)
1230 dl->ops.target.name = strdup(target.sym->name);
1233 disasm__add(&notes->src->source, dl);
1235 return 0;
1238 static __attribute__((constructor)) void symbol__init_regexpr(void)
1240 regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
1243 static void delete_last_nop(struct symbol *sym)
1245 struct annotation *notes = symbol__annotation(sym);
1246 struct list_head *list = &notes->src->source;
1247 struct disasm_line *dl;
1249 while (!list_empty(list)) {
1250 dl = list_entry(list->prev, struct disasm_line, node);
1252 if (dl->ins.ops) {
1253 if (dl->ins.ops != &nop_ops)
1254 return;
1255 } else {
1256 if (!strstr(dl->line, " nop ") &&
1257 !strstr(dl->line, " nopl ") &&
1258 !strstr(dl->line, " nopw "))
1259 return;
1262 list_del(&dl->node);
1263 disasm_line__free(dl);
1267 int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
1268 int errnum, char *buf, size_t buflen)
1270 struct dso *dso = map->dso;
1272 BUG_ON(buflen == 0);
1274 if (errnum >= 0) {
1275 str_error_r(errnum, buf, buflen);
1276 return 0;
1279 switch (errnum) {
1280 case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
1281 char bf[SBUILD_ID_SIZE + 15] = " with build id ";
1282 char *build_id_msg = NULL;
1284 if (dso->has_build_id) {
1285 build_id__sprintf(dso->build_id,
1286 sizeof(dso->build_id), bf + 15);
1287 build_id_msg = bf;
1289 scnprintf(buf, buflen,
1290 "No vmlinux file%s\nwas found in the path.\n\n"
1291 "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
1292 "Please use:\n\n"
1293 " perf buildid-cache -vu vmlinux\n\n"
1294 "or:\n\n"
1295 " --vmlinux vmlinux\n", build_id_msg ?: "");
1297 break;
1298 default:
1299 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1300 break;
1303 return 0;
1306 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
1308 char linkname[PATH_MAX];
1309 char *build_id_filename;
1311 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1312 !dso__is_kcore(dso))
1313 return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
1315 build_id_filename = dso__build_id_filename(dso, NULL, 0);
1316 if (build_id_filename) {
1317 __symbol__join_symfs(filename, filename_size, build_id_filename);
1318 free(build_id_filename);
1319 } else {
1320 if (dso->has_build_id)
1321 return ENOMEM;
1322 goto fallback;
1325 if (dso__is_kcore(dso) ||
1326 readlink(filename, linkname, sizeof(linkname)) < 0 ||
1327 strstr(linkname, DSO__NAME_KALLSYMS) ||
1328 access(filename, R_OK)) {
1329 fallback:
1331 * If we don't have build-ids or the build-id file isn't in the
1332 * cache, or is just a kallsyms file, well, lets hope that this
1333 * DSO is the same as when 'perf record' ran.
1335 __symbol__join_symfs(filename, filename_size, dso->long_name);
1338 return 0;
1341 static const char *annotate__norm_arch(const char *arch_name)
1343 struct utsname uts;
1345 if (!arch_name) { /* Assume we are annotating locally. */
1346 if (uname(&uts) < 0)
1347 return NULL;
1348 arch_name = uts.machine;
1350 return normalize_arch((char *)arch_name);
1353 int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize)
1355 struct dso *dso = map->dso;
1356 char command[PATH_MAX * 2];
1357 struct arch *arch = NULL;
1358 FILE *file;
1359 char symfs_filename[PATH_MAX];
1360 struct kcore_extract kce;
1361 bool delete_extract = false;
1362 int stdout_fd[2];
1363 int lineno = 0;
1364 int nline;
1365 pid_t pid;
1366 int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
1368 if (err)
1369 return err;
1371 arch_name = annotate__norm_arch(arch_name);
1372 if (!arch_name)
1373 return -1;
1375 arch = arch__find(arch_name);
1376 if (arch == NULL)
1377 return -ENOTSUP;
1379 if (arch->init) {
1380 err = arch->init(arch);
1381 if (err) {
1382 pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
1383 return err;
1387 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
1388 symfs_filename, sym->name, map->unmap_ip(map, sym->start),
1389 map->unmap_ip(map, sym->end));
1391 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1392 dso, dso->long_name, sym, sym->name);
1394 if (dso__is_kcore(dso)) {
1395 kce.kcore_filename = symfs_filename;
1396 kce.addr = map__rip_2objdump(map, sym->start);
1397 kce.offs = sym->start;
1398 kce.len = sym->end - sym->start;
1399 if (!kcore_extract__create(&kce)) {
1400 delete_extract = true;
1401 strlcpy(symfs_filename, kce.extract_filename,
1402 sizeof(symfs_filename));
1404 } else if (dso__needs_decompress(dso)) {
1405 char tmp[PATH_MAX];
1406 struct kmod_path m;
1407 int fd;
1408 bool ret;
1410 if (kmod_path__parse_ext(&m, symfs_filename))
1411 goto out;
1413 snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
1415 fd = mkstemp(tmp);
1416 if (fd < 0) {
1417 free(m.ext);
1418 goto out;
1421 ret = decompress_to_file(m.ext, symfs_filename, fd);
1423 if (ret)
1424 pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
1426 free(m.ext);
1427 close(fd);
1429 if (!ret)
1430 goto out;
1432 strcpy(symfs_filename, tmp);
1435 snprintf(command, sizeof(command),
1436 "%s %s%s --start-address=0x%016" PRIx64
1437 " --stop-address=0x%016" PRIx64
1438 " -l -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
1439 objdump_path ? objdump_path : "objdump",
1440 disassembler_style ? "-M " : "",
1441 disassembler_style ? disassembler_style : "",
1442 map__rip_2objdump(map, sym->start),
1443 map__rip_2objdump(map, sym->end),
1444 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
1445 symbol_conf.annotate_src ? "-S" : "",
1446 symfs_filename, symfs_filename);
1448 pr_debug("Executing: %s\n", command);
1450 err = -1;
1451 if (pipe(stdout_fd) < 0) {
1452 pr_err("Failure creating the pipe to run %s\n", command);
1453 goto out_remove_tmp;
1456 pid = fork();
1457 if (pid < 0) {
1458 pr_err("Failure forking to run %s\n", command);
1459 goto out_close_stdout;
1462 if (pid == 0) {
1463 close(stdout_fd[0]);
1464 dup2(stdout_fd[1], 1);
1465 close(stdout_fd[1]);
1466 execl("/bin/sh", "sh", "-c", command, NULL);
1467 perror(command);
1468 exit(-1);
1471 close(stdout_fd[1]);
1473 file = fdopen(stdout_fd[0], "r");
1474 if (!file) {
1475 pr_err("Failure creating FILE stream for %s\n", command);
1477 * If we were using debug info should retry with
1478 * original binary.
1480 goto out_remove_tmp;
1483 nline = 0;
1484 while (!feof(file)) {
1485 if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
1486 &lineno) < 0)
1487 break;
1488 nline++;
1491 if (nline == 0)
1492 pr_err("No output from %s\n", command);
1495 * kallsyms does not have symbol sizes so there may a nop at the end.
1496 * Remove it.
1498 if (dso__is_kcore(dso))
1499 delete_last_nop(sym);
1501 fclose(file);
1502 err = 0;
1503 out_remove_tmp:
1504 close(stdout_fd[0]);
1506 if (dso__needs_decompress(dso))
1507 unlink(symfs_filename);
1509 if (delete_extract)
1510 kcore_extract__delete(&kce);
1511 out:
1512 return err;
1514 out_close_stdout:
1515 close(stdout_fd[1]);
1516 goto out_remove_tmp;
1519 static void insert_source_line(struct rb_root *root, struct source_line *src_line)
1521 struct source_line *iter;
1522 struct rb_node **p = &root->rb_node;
1523 struct rb_node *parent = NULL;
1524 int i, ret;
1526 while (*p != NULL) {
1527 parent = *p;
1528 iter = rb_entry(parent, struct source_line, node);
1530 ret = strcmp(iter->path, src_line->path);
1531 if (ret == 0) {
1532 for (i = 0; i < src_line->nr_pcnt; i++)
1533 iter->samples[i].percent_sum += src_line->samples[i].percent;
1534 return;
1537 if (ret < 0)
1538 p = &(*p)->rb_left;
1539 else
1540 p = &(*p)->rb_right;
1543 for (i = 0; i < src_line->nr_pcnt; i++)
1544 src_line->samples[i].percent_sum = src_line->samples[i].percent;
1546 rb_link_node(&src_line->node, parent, p);
1547 rb_insert_color(&src_line->node, root);
1550 static int cmp_source_line(struct source_line *a, struct source_line *b)
1552 int i;
1554 for (i = 0; i < a->nr_pcnt; i++) {
1555 if (a->samples[i].percent_sum == b->samples[i].percent_sum)
1556 continue;
1557 return a->samples[i].percent_sum > b->samples[i].percent_sum;
1560 return 0;
1563 static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
1565 struct source_line *iter;
1566 struct rb_node **p = &root->rb_node;
1567 struct rb_node *parent = NULL;
1569 while (*p != NULL) {
1570 parent = *p;
1571 iter = rb_entry(parent, struct source_line, node);
1573 if (cmp_source_line(src_line, iter))
1574 p = &(*p)->rb_left;
1575 else
1576 p = &(*p)->rb_right;
1579 rb_link_node(&src_line->node, parent, p);
1580 rb_insert_color(&src_line->node, root);
1583 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
1585 struct source_line *src_line;
1586 struct rb_node *node;
1588 node = rb_first(src_root);
1589 while (node) {
1590 struct rb_node *next;
1592 src_line = rb_entry(node, struct source_line, node);
1593 next = rb_next(node);
1594 rb_erase(node, src_root);
1596 __resort_source_line(dest_root, src_line);
1597 node = next;
1601 static void symbol__free_source_line(struct symbol *sym, int len)
1603 struct annotation *notes = symbol__annotation(sym);
1604 struct source_line *src_line = notes->src->lines;
1605 size_t sizeof_src_line;
1606 int i;
1608 sizeof_src_line = sizeof(*src_line) +
1609 (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
1611 for (i = 0; i < len; i++) {
1612 free_srcline(src_line->path);
1613 src_line = (void *)src_line + sizeof_src_line;
1616 zfree(&notes->src->lines);
1619 /* Get the filename:line for the colored entries */
1620 static int symbol__get_source_line(struct symbol *sym, struct map *map,
1621 struct perf_evsel *evsel,
1622 struct rb_root *root, int len)
1624 u64 start;
1625 int i, k;
1626 int evidx = evsel->idx;
1627 struct source_line *src_line;
1628 struct annotation *notes = symbol__annotation(sym);
1629 struct sym_hist *h = annotation__histogram(notes, evidx);
1630 struct rb_root tmp_root = RB_ROOT;
1631 int nr_pcnt = 1;
1632 u64 h_sum = h->sum;
1633 size_t sizeof_src_line = sizeof(struct source_line);
1635 if (perf_evsel__is_group_event(evsel)) {
1636 for (i = 1; i < evsel->nr_members; i++) {
1637 h = annotation__histogram(notes, evidx + i);
1638 h_sum += h->sum;
1640 nr_pcnt = evsel->nr_members;
1641 sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
1644 if (!h_sum)
1645 return 0;
1647 src_line = notes->src->lines = calloc(len, sizeof_src_line);
1648 if (!notes->src->lines)
1649 return -1;
1651 start = map__rip_2objdump(map, sym->start);
1653 for (i = 0; i < len; i++) {
1654 u64 offset;
1655 double percent_max = 0.0;
1657 src_line->nr_pcnt = nr_pcnt;
1659 for (k = 0; k < nr_pcnt; k++) {
1660 h = annotation__histogram(notes, evidx + k);
1661 src_line->samples[k].percent = 100.0 * h->addr[i] / h->sum;
1663 if (src_line->samples[k].percent > percent_max)
1664 percent_max = src_line->samples[k].percent;
1667 if (percent_max <= 0.5)
1668 goto next;
1670 offset = start + i;
1671 src_line->path = get_srcline(map->dso, offset, NULL, false);
1672 insert_source_line(&tmp_root, src_line);
1674 next:
1675 src_line = (void *)src_line + sizeof_src_line;
1678 resort_source_line(root, &tmp_root);
1679 return 0;
1682 static void print_summary(struct rb_root *root, const char *filename)
1684 struct source_line *src_line;
1685 struct rb_node *node;
1687 printf("\nSorted summary for file %s\n", filename);
1688 printf("----------------------------------------------\n\n");
1690 if (RB_EMPTY_ROOT(root)) {
1691 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
1692 return;
1695 node = rb_first(root);
1696 while (node) {
1697 double percent, percent_max = 0.0;
1698 const char *color;
1699 char *path;
1700 int i;
1702 src_line = rb_entry(node, struct source_line, node);
1703 for (i = 0; i < src_line->nr_pcnt; i++) {
1704 percent = src_line->samples[i].percent_sum;
1705 color = get_percent_color(percent);
1706 color_fprintf(stdout, color, " %7.2f", percent);
1708 if (percent > percent_max)
1709 percent_max = percent;
1712 path = src_line->path;
1713 color = get_percent_color(percent_max);
1714 color_fprintf(stdout, color, " %s\n", path);
1716 node = rb_next(node);
1720 static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
1722 struct annotation *notes = symbol__annotation(sym);
1723 struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1724 u64 len = symbol__size(sym), offset;
1726 for (offset = 0; offset < len; ++offset)
1727 if (h->addr[offset] != 0)
1728 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
1729 sym->start + offset, h->addr[offset]);
1730 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
1733 int symbol__annotate_printf(struct symbol *sym, struct map *map,
1734 struct perf_evsel *evsel, bool full_paths,
1735 int min_pcnt, int max_lines, int context)
1737 struct dso *dso = map->dso;
1738 char *filename;
1739 const char *d_filename;
1740 const char *evsel_name = perf_evsel__name(evsel);
1741 struct annotation *notes = symbol__annotation(sym);
1742 struct sym_hist *h = annotation__histogram(notes, evsel->idx);
1743 struct disasm_line *pos, *queue = NULL;
1744 u64 start = map__rip_2objdump(map, sym->start);
1745 int printed = 2, queue_len = 0;
1746 int more = 0;
1747 u64 len;
1748 int width = 8;
1749 int graph_dotted_len;
1751 filename = strdup(dso->long_name);
1752 if (!filename)
1753 return -ENOMEM;
1755 if (full_paths)
1756 d_filename = filename;
1757 else
1758 d_filename = basename(filename);
1760 len = symbol__size(sym);
1762 if (perf_evsel__is_group_event(evsel))
1763 width *= evsel->nr_members;
1765 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
1766 width, width, "Percent", d_filename, evsel_name, h->sum);
1768 printf("%-*.*s----\n",
1769 graph_dotted_len, graph_dotted_len, graph_dotted_line);
1771 if (verbose)
1772 symbol__annotate_hits(sym, evsel);
1774 list_for_each_entry(pos, &notes->src->source, node) {
1775 if (context && queue == NULL) {
1776 queue = pos;
1777 queue_len = 0;
1780 switch (disasm_line__print(pos, sym, start, evsel, len,
1781 min_pcnt, printed, max_lines,
1782 queue)) {
1783 case 0:
1784 ++printed;
1785 if (context) {
1786 printed += queue_len;
1787 queue = NULL;
1788 queue_len = 0;
1790 break;
1791 case 1:
1792 /* filtered by max_lines */
1793 ++more;
1794 break;
1795 case -1:
1796 default:
1798 * Filtered by min_pcnt or non IP lines when
1799 * context != 0
1801 if (!context)
1802 break;
1803 if (queue_len == context)
1804 queue = list_entry(queue->node.next, typeof(*queue), node);
1805 else
1806 ++queue_len;
1807 break;
1811 free(filename);
1813 return more;
1816 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
1818 struct annotation *notes = symbol__annotation(sym);
1819 struct sym_hist *h = annotation__histogram(notes, evidx);
1821 memset(h, 0, notes->src->sizeof_sym_hist);
1824 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
1826 struct annotation *notes = symbol__annotation(sym);
1827 struct sym_hist *h = annotation__histogram(notes, evidx);
1828 int len = symbol__size(sym), offset;
1830 h->sum = 0;
1831 for (offset = 0; offset < len; ++offset) {
1832 h->addr[offset] = h->addr[offset] * 7 / 8;
1833 h->sum += h->addr[offset];
1837 void disasm__purge(struct list_head *head)
1839 struct disasm_line *pos, *n;
1841 list_for_each_entry_safe(pos, n, head, node) {
1842 list_del(&pos->node);
1843 disasm_line__free(pos);
1847 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
1849 size_t printed;
1851 if (dl->offset == -1)
1852 return fprintf(fp, "%s\n", dl->line);
1854 printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
1856 if (dl->ops.raw[0] != '\0') {
1857 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
1858 dl->ops.raw);
1861 return printed + fprintf(fp, "\n");
1864 size_t disasm__fprintf(struct list_head *head, FILE *fp)
1866 struct disasm_line *pos;
1867 size_t printed = 0;
1869 list_for_each_entry(pos, head, node)
1870 printed += disasm_line__fprintf(pos, fp);
1872 return printed;
1875 int symbol__tty_annotate(struct symbol *sym, struct map *map,
1876 struct perf_evsel *evsel, bool print_lines,
1877 bool full_paths, int min_pcnt, int max_lines)
1879 struct dso *dso = map->dso;
1880 struct rb_root source_line = RB_ROOT;
1881 u64 len;
1883 if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0) < 0)
1884 return -1;
1886 len = symbol__size(sym);
1888 if (print_lines) {
1889 srcline_full_filename = full_paths;
1890 symbol__get_source_line(sym, map, evsel, &source_line, len);
1891 print_summary(&source_line, dso->long_name);
1894 symbol__annotate_printf(sym, map, evsel, full_paths,
1895 min_pcnt, max_lines, 0);
1896 if (print_lines)
1897 symbol__free_source_line(sym, len);
1899 disasm__purge(&symbol__annotation(sym)->src->source);
1901 return 0;
1904 bool ui__has_annotation(void)
1906 return use_browser == 1 && perf_hpp_list.sym;