21 #include <sys/utsname.h>
24 #define KSYM_NAME_LEN 256
27 static int dso__load_kernel_sym(struct dso
*dso
, struct map
*map
,
28 symbol_filter_t filter
);
29 static int dso__load_guest_kernel_sym(struct dso
*dso
, struct map
*map
,
30 symbol_filter_t filter
);
31 int vmlinux_path__nr_entries
;
34 struct symbol_conf symbol_conf
= {
36 .try_vmlinux_path
= true,
42 static enum dso_binary_type binary_type_symtab
[] = {
43 DSO_BINARY_TYPE__KALLSYMS
,
44 DSO_BINARY_TYPE__GUEST_KALLSYMS
,
45 DSO_BINARY_TYPE__JAVA_JIT
,
46 DSO_BINARY_TYPE__DEBUGLINK
,
47 DSO_BINARY_TYPE__BUILD_ID_CACHE
,
48 DSO_BINARY_TYPE__FEDORA_DEBUGINFO
,
49 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO
,
50 DSO_BINARY_TYPE__BUILDID_DEBUGINFO
,
51 DSO_BINARY_TYPE__SYSTEM_PATH_DSO
,
52 DSO_BINARY_TYPE__GUEST_KMODULE
,
53 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
,
54 DSO_BINARY_TYPE__NOT_FOUND
,
57 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
59 bool symbol_type__is_a(char symbol_type
, enum map_type map_type
)
61 symbol_type
= toupper(symbol_type
);
65 return symbol_type
== 'T' || symbol_type
== 'W';
67 return symbol_type
== 'D';
73 static int prefix_underscores_count(const char *str
)
75 const char *tail
= str
;
86 static int choose_best_symbol(struct symbol
*syma
, struct symbol
*symb
)
92 /* Prefer a symbol with non zero length */
93 a
= syma
->end
- syma
->start
;
94 b
= symb
->end
- symb
->start
;
95 if ((b
== 0) && (a
> 0))
97 else if ((a
== 0) && (b
> 0))
100 /* Prefer a non weak symbol over a weak one */
101 a
= syma
->binding
== STB_WEAK
;
102 b
= symb
->binding
== STB_WEAK
;
108 /* Prefer a global symbol over a non global one */
109 a
= syma
->binding
== STB_GLOBAL
;
110 b
= symb
->binding
== STB_GLOBAL
;
116 /* Prefer a symbol with less underscores */
117 a
= prefix_underscores_count(syma
->name
);
118 b
= prefix_underscores_count(symb
->name
);
124 /* Choose the symbol with the longest name */
125 na
= strlen(syma
->name
);
126 nb
= strlen(symb
->name
);
132 /* Avoid "SyS" kernel syscall aliases */
133 if (na
>= 3 && !strncmp(syma
->name
, "SyS", 3))
135 if (na
>= 10 && !strncmp(syma
->name
, "compat_SyS", 10))
141 void symbols__fixup_duplicate(struct rb_root
*symbols
)
144 struct symbol
*curr
, *next
;
146 nd
= rb_first(symbols
);
149 curr
= rb_entry(nd
, struct symbol
, rb_node
);
151 nd
= rb_next(&curr
->rb_node
);
152 next
= rb_entry(nd
, struct symbol
, rb_node
);
157 if (curr
->start
!= next
->start
)
160 if (choose_best_symbol(curr
, next
) == SYMBOL_A
) {
161 rb_erase(&next
->rb_node
, symbols
);
164 nd
= rb_next(&curr
->rb_node
);
165 rb_erase(&curr
->rb_node
, symbols
);
170 void symbols__fixup_end(struct rb_root
*symbols
)
172 struct rb_node
*nd
, *prevnd
= rb_first(symbols
);
173 struct symbol
*curr
, *prev
;
178 curr
= rb_entry(prevnd
, struct symbol
, rb_node
);
180 for (nd
= rb_next(prevnd
); nd
; nd
= rb_next(nd
)) {
182 curr
= rb_entry(nd
, struct symbol
, rb_node
);
184 if (prev
->end
== prev
->start
&& prev
->end
!= curr
->start
)
185 prev
->end
= curr
->start
- 1;
189 if (curr
->end
== curr
->start
)
190 curr
->end
= roundup(curr
->start
, 4096);
193 void __map_groups__fixup_end(struct map_groups
*mg
, enum map_type type
)
195 struct map
*prev
, *curr
;
196 struct rb_node
*nd
, *prevnd
= rb_first(&mg
->maps
[type
]);
201 curr
= rb_entry(prevnd
, struct map
, rb_node
);
203 for (nd
= rb_next(prevnd
); nd
; nd
= rb_next(nd
)) {
205 curr
= rb_entry(nd
, struct map
, rb_node
);
206 prev
->end
= curr
->start
- 1;
210 * We still haven't the actual symbols, so guess the
211 * last map final address.
216 struct symbol
*symbol__new(u64 start
, u64 len
, u8 binding
, const char *name
)
218 size_t namelen
= strlen(name
) + 1;
219 struct symbol
*sym
= calloc(1, (symbol_conf
.priv_size
+
220 sizeof(*sym
) + namelen
));
224 if (symbol_conf
.priv_size
)
225 sym
= ((void *)sym
) + symbol_conf
.priv_size
;
228 sym
->end
= len
? start
+ len
- 1 : start
;
229 sym
->binding
= binding
;
230 sym
->namelen
= namelen
- 1;
232 pr_debug4("%s: %s %#" PRIx64
"-%#" PRIx64
"\n",
233 __func__
, name
, start
, sym
->end
);
234 memcpy(sym
->name
, name
, namelen
);
239 void symbol__delete(struct symbol
*sym
)
241 free(((void *)sym
) - symbol_conf
.priv_size
);
244 size_t symbol__fprintf(struct symbol
*sym
, FILE *fp
)
246 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %c %s\n",
247 sym
->start
, sym
->end
,
248 sym
->binding
== STB_GLOBAL
? 'g' :
249 sym
->binding
== STB_LOCAL
? 'l' : 'w',
253 size_t symbol__fprintf_symname_offs(const struct symbol
*sym
,
254 const struct addr_location
*al
, FILE *fp
)
256 unsigned long offset
;
259 if (sym
&& sym
->name
) {
260 length
= fprintf(fp
, "%s", sym
->name
);
262 if (al
->addr
< sym
->end
)
263 offset
= al
->addr
- sym
->start
;
265 offset
= al
->addr
- al
->map
->start
- sym
->start
;
266 length
+= fprintf(fp
, "+0x%lx", offset
);
270 return fprintf(fp
, "[unknown]");
273 size_t symbol__fprintf_symname(const struct symbol
*sym
, FILE *fp
)
275 return symbol__fprintf_symname_offs(sym
, NULL
, fp
);
278 void symbols__delete(struct rb_root
*symbols
)
281 struct rb_node
*next
= rb_first(symbols
);
284 pos
= rb_entry(next
, struct symbol
, rb_node
);
285 next
= rb_next(&pos
->rb_node
);
286 rb_erase(&pos
->rb_node
, symbols
);
291 void symbols__insert(struct rb_root
*symbols
, struct symbol
*sym
)
293 struct rb_node
**p
= &symbols
->rb_node
;
294 struct rb_node
*parent
= NULL
;
295 const u64 ip
= sym
->start
;
300 s
= rb_entry(parent
, struct symbol
, rb_node
);
306 rb_link_node(&sym
->rb_node
, parent
, p
);
307 rb_insert_color(&sym
->rb_node
, symbols
);
310 static struct symbol
*symbols__find(struct rb_root
*symbols
, u64 ip
)
317 n
= symbols
->rb_node
;
320 struct symbol
*s
= rb_entry(n
, struct symbol
, rb_node
);
324 else if (ip
> s
->end
)
333 static struct symbol
*symbols__first(struct rb_root
*symbols
)
335 struct rb_node
*n
= rb_first(symbols
);
338 return rb_entry(n
, struct symbol
, rb_node
);
343 struct symbol_name_rb_node
{
344 struct rb_node rb_node
;
348 static void symbols__insert_by_name(struct rb_root
*symbols
, struct symbol
*sym
)
350 struct rb_node
**p
= &symbols
->rb_node
;
351 struct rb_node
*parent
= NULL
;
352 struct symbol_name_rb_node
*symn
, *s
;
354 symn
= container_of(sym
, struct symbol_name_rb_node
, sym
);
358 s
= rb_entry(parent
, struct symbol_name_rb_node
, rb_node
);
359 if (strcmp(sym
->name
, s
->sym
.name
) < 0)
364 rb_link_node(&symn
->rb_node
, parent
, p
);
365 rb_insert_color(&symn
->rb_node
, symbols
);
368 static void symbols__sort_by_name(struct rb_root
*symbols
,
369 struct rb_root
*source
)
373 for (nd
= rb_first(source
); nd
; nd
= rb_next(nd
)) {
374 struct symbol
*pos
= rb_entry(nd
, struct symbol
, rb_node
);
375 symbols__insert_by_name(symbols
, pos
);
379 static struct symbol
*symbols__find_by_name(struct rb_root
*symbols
,
387 n
= symbols
->rb_node
;
390 struct symbol_name_rb_node
*s
;
393 s
= rb_entry(n
, struct symbol_name_rb_node
, rb_node
);
394 cmp
= strcmp(name
, s
->sym
.name
);
407 struct symbol
*dso__find_symbol(struct dso
*dso
,
408 enum map_type type
, u64 addr
)
410 return symbols__find(&dso
->symbols
[type
], addr
);
413 struct symbol
*dso__first_symbol(struct dso
*dso
, enum map_type type
)
415 return symbols__first(&dso
->symbols
[type
]);
418 struct symbol
*dso__find_symbol_by_name(struct dso
*dso
, enum map_type type
,
421 return symbols__find_by_name(&dso
->symbol_names
[type
], name
);
424 void dso__sort_by_name(struct dso
*dso
, enum map_type type
)
426 dso__set_sorted_by_name(dso
, type
);
427 return symbols__sort_by_name(&dso
->symbol_names
[type
],
428 &dso
->symbols
[type
]);
431 size_t dso__fprintf_symbols_by_name(struct dso
*dso
,
432 enum map_type type
, FILE *fp
)
436 struct symbol_name_rb_node
*pos
;
438 for (nd
= rb_first(&dso
->symbol_names
[type
]); nd
; nd
= rb_next(nd
)) {
439 pos
= rb_entry(nd
, struct symbol_name_rb_node
, rb_node
);
440 fprintf(fp
, "%s\n", pos
->sym
.name
);
446 int kallsyms__parse(const char *filename
, void *arg
,
447 int (*process_symbol
)(void *arg
, const char *name
,
448 char type
, u64 start
))
453 FILE *file
= fopen(filename
, "r");
460 while (!feof(file
)) {
466 line_len
= getline(&line
, &n
, file
);
467 if (line_len
< 0 || !line
)
470 line
[--line_len
] = '\0'; /* \n */
472 len
= hex2u64(line
, &start
);
475 if (len
+ 2 >= line_len
)
478 symbol_type
= line
[len
];
480 symbol_name
= line
+ len
;
481 len
= line_len
- len
;
483 if (len
>= KSYM_NAME_LEN
) {
488 err
= process_symbol(arg
, symbol_name
,
502 struct process_kallsyms_args
{
507 static u8
kallsyms2elf_type(char type
)
512 return isupper(type
) ? STB_GLOBAL
: STB_LOCAL
;
515 static int map__process_kallsym_symbol(void *arg
, const char *name
,
516 char type
, u64 start
)
519 struct process_kallsyms_args
*a
= arg
;
520 struct rb_root
*root
= &a
->dso
->symbols
[a
->map
->type
];
522 if (!symbol_type__is_a(type
, a
->map
->type
))
526 * module symbols are not sorted so we add all
527 * symbols, setting length to 0, and rely on
528 * symbols__fixup_end() to fix it up.
530 sym
= symbol__new(start
, 0, kallsyms2elf_type(type
), name
);
534 * We will pass the symbols to the filter later, in
535 * map__split_kallsyms, when we have split the maps per module
537 symbols__insert(root
, sym
);
543 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
544 * so that we can in the next step set the symbol ->end address and then
545 * call kernel_maps__split_kallsyms.
547 static int dso__load_all_kallsyms(struct dso
*dso
, const char *filename
,
550 struct process_kallsyms_args args
= { .map
= map
, .dso
= dso
, };
551 return kallsyms__parse(filename
, &args
, map__process_kallsym_symbol
);
554 static int dso__split_kallsyms_for_kcore(struct dso
*dso
, struct map
*map
,
555 symbol_filter_t filter
)
557 struct map_groups
*kmaps
= map__kmap(map
)->kmaps
;
558 struct map
*curr_map
;
560 int count
= 0, moved
= 0;
561 struct rb_root
*root
= &dso
->symbols
[map
->type
];
562 struct rb_node
*next
= rb_first(root
);
567 pos
= rb_entry(next
, struct symbol
, rb_node
);
568 next
= rb_next(&pos
->rb_node
);
570 module
= strchr(pos
->name
, '\t');
574 curr_map
= map_groups__find(kmaps
, map
->type
, pos
->start
);
576 if (!curr_map
|| (filter
&& filter(curr_map
, pos
))) {
577 rb_erase(&pos
->rb_node
, root
);
580 pos
->start
-= curr_map
->start
- curr_map
->pgoff
;
582 pos
->end
-= curr_map
->start
- curr_map
->pgoff
;
583 if (curr_map
!= map
) {
584 rb_erase(&pos
->rb_node
, root
);
586 &curr_map
->dso
->symbols
[curr_map
->type
],
595 /* Symbols have been adjusted */
596 dso
->adjust_symbols
= 1;
598 return count
+ moved
;
602 * Split the symbols into maps, making sure there are no overlaps, i.e. the
603 * kernel range is broken in several maps, named [kernel].N, as we don't have
604 * the original ELF section names vmlinux have.
606 static int dso__split_kallsyms(struct dso
*dso
, struct map
*map
,
607 symbol_filter_t filter
)
609 struct map_groups
*kmaps
= map__kmap(map
)->kmaps
;
610 struct machine
*machine
= kmaps
->machine
;
611 struct map
*curr_map
= map
;
613 int count
= 0, moved
= 0;
614 struct rb_root
*root
= &dso
->symbols
[map
->type
];
615 struct rb_node
*next
= rb_first(root
);
616 int kernel_range
= 0;
621 pos
= rb_entry(next
, struct symbol
, rb_node
);
622 next
= rb_next(&pos
->rb_node
);
624 module
= strchr(pos
->name
, '\t');
626 if (!symbol_conf
.use_modules
)
631 if (strcmp(curr_map
->dso
->short_name
, module
)) {
632 if (curr_map
!= map
&&
633 dso
->kernel
== DSO_TYPE_GUEST_KERNEL
&&
634 machine__is_default_guest(machine
)) {
636 * We assume all symbols of a module are
637 * continuous in * kallsyms, so curr_map
638 * points to a module and all its
639 * symbols are in its kmap. Mark it as
642 dso__set_loaded(curr_map
->dso
,
646 curr_map
= map_groups__find_by_name(kmaps
,
648 if (curr_map
== NULL
) {
649 pr_debug("%s/proc/{kallsyms,modules} "
650 "inconsistency while looking "
651 "for \"%s\" module!\n",
652 machine
->root_dir
, module
);
657 if (curr_map
->dso
->loaded
&&
658 !machine__is_default_guest(machine
))
662 * So that we look just like we get from .ko files,
663 * i.e. not prelinked, relative to map->start.
665 pos
->start
= curr_map
->map_ip(curr_map
, pos
->start
);
666 pos
->end
= curr_map
->map_ip(curr_map
, pos
->end
);
667 } else if (curr_map
!= map
) {
668 char dso_name
[PATH_MAX
];
676 if (dso
->kernel
== DSO_TYPE_GUEST_KERNEL
)
677 snprintf(dso_name
, sizeof(dso_name
),
681 snprintf(dso_name
, sizeof(dso_name
),
685 ndso
= dso__new(dso_name
);
689 ndso
->kernel
= dso
->kernel
;
691 curr_map
= map__new2(pos
->start
, ndso
, map
->type
);
692 if (curr_map
== NULL
) {
697 curr_map
->map_ip
= curr_map
->unmap_ip
= identity__map_ip
;
698 map_groups__insert(kmaps
, curr_map
);
702 if (filter
&& filter(curr_map
, pos
)) {
703 discard_symbol
: rb_erase(&pos
->rb_node
, root
);
706 if (curr_map
!= map
) {
707 rb_erase(&pos
->rb_node
, root
);
708 symbols__insert(&curr_map
->dso
->symbols
[curr_map
->type
], pos
);
715 if (curr_map
!= map
&&
716 dso
->kernel
== DSO_TYPE_GUEST_KERNEL
&&
717 machine__is_default_guest(kmaps
->machine
)) {
718 dso__set_loaded(curr_map
->dso
, curr_map
->type
);
721 return count
+ moved
;
724 bool symbol__restricted_filename(const char *filename
,
725 const char *restricted_filename
)
727 bool restricted
= false;
729 if (symbol_conf
.kptr_restrict
) {
730 char *r
= realpath(filename
, NULL
);
733 restricted
= strcmp(r
, restricted_filename
) == 0;
742 struct kcore_mapfn_data
{
745 struct list_head maps
;
748 static int kcore_mapfn(u64 start
, u64 len
, u64 pgoff
, void *data
)
750 struct kcore_mapfn_data
*md
= data
;
753 map
= map__new2(start
, md
->dso
, md
->type
);
757 map
->end
= map
->start
+ len
;
760 list_add(&map
->node
, &md
->maps
);
766 * If kallsyms is referenced by name then we look for kcore in the same
769 static bool kcore_filename_from_kallsyms_filename(char *kcore_filename
,
770 const char *kallsyms_filename
)
774 strcpy(kcore_filename
, kallsyms_filename
);
775 name
= strrchr(kcore_filename
, '/');
779 if (!strcmp(name
, "/kallsyms")) {
780 strcpy(name
, "/kcore");
787 static int dso__load_kcore(struct dso
*dso
, struct map
*map
,
788 const char *kallsyms_filename
)
790 struct map_groups
*kmaps
= map__kmap(map
)->kmaps
;
791 struct machine
*machine
= kmaps
->machine
;
792 struct kcore_mapfn_data md
;
793 struct map
*old_map
, *new_map
, *replacement_map
= NULL
;
796 char kcore_filename
[PATH_MAX
];
799 /* This function requires that the map is the kernel map */
800 if (map
!= machine
->vmlinux_maps
[map
->type
])
803 if (!kcore_filename_from_kallsyms_filename(kcore_filename
,
809 INIT_LIST_HEAD(&md
.maps
);
811 fd
= open(kcore_filename
, O_RDONLY
);
815 /* Read new maps into temporary lists */
816 err
= file__read_maps(fd
, md
.type
== MAP__FUNCTION
, kcore_mapfn
, &md
,
821 if (list_empty(&md
.maps
)) {
826 /* Remove old maps */
827 old_map
= map_groups__first(kmaps
, map
->type
);
829 struct map
*next
= map_groups__next(old_map
);
832 map_groups__remove(kmaps
, old_map
);
836 /* Find the kernel map using the first symbol */
837 sym
= dso__first_symbol(dso
, map
->type
);
838 list_for_each_entry(new_map
, &md
.maps
, node
) {
839 if (sym
&& sym
->start
>= new_map
->start
&&
840 sym
->start
< new_map
->end
) {
841 replacement_map
= new_map
;
846 if (!replacement_map
)
847 replacement_map
= list_entry(md
.maps
.next
, struct map
, node
);
850 while (!list_empty(&md
.maps
)) {
851 new_map
= list_entry(md
.maps
.next
, struct map
, node
);
852 list_del(&new_map
->node
);
853 if (new_map
== replacement_map
) {
854 map
->start
= new_map
->start
;
855 map
->end
= new_map
->end
;
856 map
->pgoff
= new_map
->pgoff
;
857 map
->map_ip
= new_map
->map_ip
;
858 map
->unmap_ip
= new_map
->unmap_ip
;
859 map__delete(new_map
);
860 /* Ensure maps are correctly ordered */
861 map_groups__remove(kmaps
, map
);
862 map_groups__insert(kmaps
, map
);
864 map_groups__insert(kmaps
, new_map
);
869 * Set the data type and long name so that kcore can be read via
870 * dso__data_read_addr().
872 if (dso
->kernel
== DSO_TYPE_GUEST_KERNEL
)
873 dso
->data_type
= DSO_BINARY_TYPE__GUEST_KCORE
;
875 dso
->data_type
= DSO_BINARY_TYPE__KCORE
;
876 dso__set_long_name(dso
, strdup(kcore_filename
));
880 if (map
->type
== MAP__FUNCTION
)
881 pr_debug("Using %s for kernel object code\n", kcore_filename
);
883 pr_debug("Using %s for kernel data\n", kcore_filename
);
888 while (!list_empty(&md
.maps
)) {
889 map
= list_entry(md
.maps
.next
, struct map
, node
);
890 list_del(&map
->node
);
897 int dso__load_kallsyms(struct dso
*dso
, const char *filename
,
898 struct map
*map
, symbol_filter_t filter
)
900 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
903 if (dso__load_all_kallsyms(dso
, filename
, map
) < 0)
906 symbols__fixup_duplicate(&dso
->symbols
[map
->type
]);
907 symbols__fixup_end(&dso
->symbols
[map
->type
]);
909 if (dso
->kernel
== DSO_TYPE_GUEST_KERNEL
)
910 dso
->symtab_type
= DSO_BINARY_TYPE__GUEST_KALLSYMS
;
912 dso
->symtab_type
= DSO_BINARY_TYPE__KALLSYMS
;
914 if (!dso__load_kcore(dso
, map
, filename
))
915 return dso__split_kallsyms_for_kcore(dso
, map
, filter
);
917 return dso__split_kallsyms(dso
, map
, filter
);
920 static int dso__load_perf_map(struct dso
*dso
, struct map
*map
,
921 symbol_filter_t filter
)
928 file
= fopen(dso
->long_name
, "r");
932 while (!feof(file
)) {
937 line_len
= getline(&line
, &n
, file
);
944 line
[--line_len
] = '\0'; /* \n */
946 len
= hex2u64(line
, &start
);
949 if (len
+ 2 >= line_len
)
952 len
+= hex2u64(line
+ len
, &size
);
955 if (len
+ 2 >= line_len
)
958 sym
= symbol__new(start
, size
, STB_GLOBAL
, line
+ len
);
961 goto out_delete_line
;
963 if (filter
&& filter(map
, sym
))
966 symbols__insert(&dso
->symbols
[map
->type
], sym
);
982 int dso__load(struct dso
*dso
, struct map
*map
, symbol_filter_t filter
)
987 struct machine
*machine
;
988 char *root_dir
= (char *) "";
990 struct symsrc ss_
[2];
991 struct symsrc
*syms_ss
= NULL
, *runtime_ss
= NULL
;
993 dso__set_loaded(dso
, map
->type
);
995 if (dso
->kernel
== DSO_TYPE_KERNEL
)
996 return dso__load_kernel_sym(dso
, map
, filter
);
997 else if (dso
->kernel
== DSO_TYPE_GUEST_KERNEL
)
998 return dso__load_guest_kernel_sym(dso
, map
, filter
);
1000 if (map
->groups
&& map
->groups
->machine
)
1001 machine
= map
->groups
->machine
;
1005 dso
->adjust_symbols
= 0;
1007 if (strncmp(dso
->name
, "/tmp/perf-", 10) == 0) {
1010 if (lstat(dso
->name
, &st
) < 0)
1013 if (st
.st_uid
&& (st
.st_uid
!= geteuid())) {
1014 pr_warning("File %s not owned by current user or root, "
1015 "ignoring it.\n", dso
->name
);
1019 ret
= dso__load_perf_map(dso
, map
, filter
);
1020 dso
->symtab_type
= ret
> 0 ? DSO_BINARY_TYPE__JAVA_JIT
:
1021 DSO_BINARY_TYPE__NOT_FOUND
;
1026 root_dir
= machine
->root_dir
;
1028 name
= malloc(PATH_MAX
);
1032 /* Iterate over candidate debug images.
1033 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1034 * and/or opd section) for processing.
1036 for (i
= 0; i
< DSO_BINARY_TYPE__SYMTAB_CNT
; i
++) {
1037 struct symsrc
*ss
= &ss_
[ss_pos
];
1038 bool next_slot
= false;
1040 enum dso_binary_type symtab_type
= binary_type_symtab
[i
];
1042 if (dso__binary_type_file(dso
, symtab_type
,
1043 root_dir
, name
, PATH_MAX
))
1046 /* Name is now the name of the next image to try */
1047 if (symsrc__init(ss
, dso
, name
, symtab_type
) < 0)
1050 if (!syms_ss
&& symsrc__has_symtab(ss
)) {
1055 if (!runtime_ss
&& symsrc__possibly_runtime(ss
)) {
1063 if (syms_ss
&& runtime_ss
)
1069 if (!runtime_ss
&& !syms_ss
)
1072 if (runtime_ss
&& !syms_ss
) {
1073 syms_ss
= runtime_ss
;
1076 /* We'll have to hope for the best */
1077 if (!runtime_ss
&& syms_ss
)
1078 runtime_ss
= syms_ss
;
1083 km
= dso
->symtab_type
== DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
||
1084 dso
->symtab_type
== DSO_BINARY_TYPE__GUEST_KMODULE
;
1085 ret
= dso__load_sym(dso
, map
, syms_ss
, runtime_ss
, filter
, km
);
1093 nr_plt
= dso__synthesize_plt_symbols(dso
, runtime_ss
, map
, filter
);
1098 for (; ss_pos
> 0; ss_pos
--)
1099 symsrc__destroy(&ss_
[ss_pos
- 1]);
1102 if (ret
< 0 && strstr(dso
->name
, " (deleted)") != NULL
)
1107 struct map
*map_groups__find_by_name(struct map_groups
*mg
,
1108 enum map_type type
, const char *name
)
1112 for (nd
= rb_first(&mg
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
1113 struct map
*map
= rb_entry(nd
, struct map
, rb_node
);
1115 if (map
->dso
&& strcmp(map
->dso
->short_name
, name
) == 0)
1122 int dso__load_vmlinux(struct dso
*dso
, struct map
*map
,
1123 const char *vmlinux
, symbol_filter_t filter
)
1127 char symfs_vmlinux
[PATH_MAX
];
1128 enum dso_binary_type symtab_type
;
1130 if (vmlinux
[0] == '/')
1131 snprintf(symfs_vmlinux
, sizeof(symfs_vmlinux
), "%s", vmlinux
);
1133 snprintf(symfs_vmlinux
, sizeof(symfs_vmlinux
), "%s%s",
1134 symbol_conf
.symfs
, vmlinux
);
1136 if (dso
->kernel
== DSO_TYPE_GUEST_KERNEL
)
1137 symtab_type
= DSO_BINARY_TYPE__GUEST_VMLINUX
;
1139 symtab_type
= DSO_BINARY_TYPE__VMLINUX
;
1141 if (symsrc__init(&ss
, dso
, symfs_vmlinux
, symtab_type
))
1144 err
= dso__load_sym(dso
, map
, &ss
, &ss
, filter
, 0);
1145 symsrc__destroy(&ss
);
1148 if (dso
->kernel
== DSO_TYPE_GUEST_KERNEL
)
1149 dso
->data_type
= DSO_BINARY_TYPE__GUEST_VMLINUX
;
1151 dso
->data_type
= DSO_BINARY_TYPE__VMLINUX
;
1152 dso__set_long_name(dso
, (char *)vmlinux
);
1153 dso__set_loaded(dso
, map
->type
);
1154 pr_debug("Using %s for symbols\n", symfs_vmlinux
);
1160 int dso__load_vmlinux_path(struct dso
*dso
, struct map
*map
,
1161 symbol_filter_t filter
)
1166 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1167 vmlinux_path__nr_entries
+ 1);
1169 filename
= dso__build_id_filename(dso
, NULL
, 0);
1170 if (filename
!= NULL
) {
1171 err
= dso__load_vmlinux(dso
, map
, filename
, filter
);
1173 dso
->lname_alloc
= 1;
1179 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
) {
1180 err
= dso__load_vmlinux(dso
, map
, vmlinux_path
[i
], filter
);
1182 dso__set_long_name(dso
, strdup(vmlinux_path
[i
]));
1183 dso
->lname_alloc
= 1;
1191 static int dso__load_kernel_sym(struct dso
*dso
, struct map
*map
,
1192 symbol_filter_t filter
)
1195 const char *kallsyms_filename
= NULL
;
1196 char *kallsyms_allocated_filename
= NULL
;
1198 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1199 * it and only it, reporting errors to the user if it cannot be used.
1201 * For instance, try to analyse an ARM perf.data file _without_ a
1202 * build-id, or if the user specifies the wrong path to the right
1203 * vmlinux file, obviously we can't fallback to another vmlinux (a
1204 * x86_86 one, on the machine where analysis is being performed, say),
1205 * or worse, /proc/kallsyms.
1207 * If the specified file _has_ a build-id and there is a build-id
1208 * section in the perf.data file, we will still do the expected
1209 * validation in dso__load_vmlinux and will bail out if they don't
1212 if (symbol_conf
.kallsyms_name
!= NULL
) {
1213 kallsyms_filename
= symbol_conf
.kallsyms_name
;
1217 if (symbol_conf
.vmlinux_name
!= NULL
) {
1218 err
= dso__load_vmlinux(dso
, map
,
1219 symbol_conf
.vmlinux_name
, filter
);
1221 dso__set_long_name(dso
,
1222 strdup(symbol_conf
.vmlinux_name
));
1223 dso
->lname_alloc
= 1;
1229 if (vmlinux_path
!= NULL
) {
1230 err
= dso__load_vmlinux_path(dso
, map
, filter
);
1235 /* do not try local files if a symfs was given */
1236 if (symbol_conf
.symfs
[0] != 0)
1240 * Say the kernel DSO was created when processing the build-id header table,
1241 * we have a build-id, so check if it is the same as the running kernel,
1242 * using it if it is.
1244 if (dso
->has_build_id
) {
1245 u8 kallsyms_build_id
[BUILD_ID_SIZE
];
1246 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
1248 if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id
,
1249 sizeof(kallsyms_build_id
)) == 0) {
1250 if (dso__build_id_equal(dso
, kallsyms_build_id
)) {
1251 kallsyms_filename
= "/proc/kallsyms";
1256 * Now look if we have it on the build-id cache in
1257 * $HOME/.debug/[kernel.kallsyms].
1259 build_id__sprintf(dso
->build_id
, sizeof(dso
->build_id
),
1262 if (asprintf(&kallsyms_allocated_filename
,
1263 "%s/.debug/[kernel.kallsyms]/%s",
1264 getenv("HOME"), sbuild_id
) == -1) {
1265 pr_err("Not enough memory for kallsyms file lookup\n");
1269 kallsyms_filename
= kallsyms_allocated_filename
;
1271 if (access(kallsyms_filename
, F_OK
)) {
1272 pr_err("No kallsyms or vmlinux with build-id %s "
1273 "was found\n", sbuild_id
);
1274 free(kallsyms_allocated_filename
);
1279 * Last resort, if we don't have a build-id and couldn't find
1280 * any vmlinux file, try the running kernel kallsyms table.
1282 kallsyms_filename
= "/proc/kallsyms";
1286 err
= dso__load_kallsyms(dso
, kallsyms_filename
, map
, filter
);
1288 pr_debug("Using %s for symbols\n", kallsyms_filename
);
1289 free(kallsyms_allocated_filename
);
1291 if (err
> 0 && !dso__is_kcore(dso
)) {
1292 dso__set_long_name(dso
, strdup("[kernel.kallsyms]"));
1293 map__fixup_start(map
);
1294 map__fixup_end(map
);
1300 static int dso__load_guest_kernel_sym(struct dso
*dso
, struct map
*map
,
1301 symbol_filter_t filter
)
1304 const char *kallsyms_filename
= NULL
;
1305 struct machine
*machine
;
1306 char path
[PATH_MAX
];
1309 pr_debug("Guest kernel map hasn't the point to groups\n");
1312 machine
= map
->groups
->machine
;
1314 if (machine__is_default_guest(machine
)) {
1316 * if the user specified a vmlinux filename, use it and only
1317 * it, reporting errors to the user if it cannot be used.
1318 * Or use file guest_kallsyms inputted by user on commandline
1320 if (symbol_conf
.default_guest_vmlinux_name
!= NULL
) {
1321 err
= dso__load_vmlinux(dso
, map
,
1322 symbol_conf
.default_guest_vmlinux_name
, filter
);
1326 kallsyms_filename
= symbol_conf
.default_guest_kallsyms
;
1327 if (!kallsyms_filename
)
1330 sprintf(path
, "%s/proc/kallsyms", machine
->root_dir
);
1331 kallsyms_filename
= path
;
1334 err
= dso__load_kallsyms(dso
, kallsyms_filename
, map
, filter
);
1336 pr_debug("Using %s for symbols\n", kallsyms_filename
);
1337 if (err
> 0 && !dso__is_kcore(dso
)) {
1338 machine__mmap_name(machine
, path
, sizeof(path
));
1339 dso__set_long_name(dso
, strdup(path
));
1340 map__fixup_start(map
);
1341 map__fixup_end(map
);
1347 static void vmlinux_path__exit(void)
1349 while (--vmlinux_path__nr_entries
>= 0) {
1350 free(vmlinux_path
[vmlinux_path__nr_entries
]);
1351 vmlinux_path
[vmlinux_path__nr_entries
] = NULL
;
1355 vmlinux_path
= NULL
;
1358 static int vmlinux_path__init(void)
1363 vmlinux_path
= malloc(sizeof(char *) * 5);
1364 if (vmlinux_path
== NULL
)
1367 vmlinux_path
[vmlinux_path__nr_entries
] = strdup("vmlinux");
1368 if (vmlinux_path
[vmlinux_path__nr_entries
] == NULL
)
1370 ++vmlinux_path__nr_entries
;
1371 vmlinux_path
[vmlinux_path__nr_entries
] = strdup("/boot/vmlinux");
1372 if (vmlinux_path
[vmlinux_path__nr_entries
] == NULL
)
1374 ++vmlinux_path__nr_entries
;
1376 /* only try running kernel version if no symfs was given */
1377 if (symbol_conf
.symfs
[0] != 0)
1380 if (uname(&uts
) < 0)
1383 snprintf(bf
, sizeof(bf
), "/boot/vmlinux-%s", uts
.release
);
1384 vmlinux_path
[vmlinux_path__nr_entries
] = strdup(bf
);
1385 if (vmlinux_path
[vmlinux_path__nr_entries
] == NULL
)
1387 ++vmlinux_path__nr_entries
;
1388 snprintf(bf
, sizeof(bf
), "/lib/modules/%s/build/vmlinux", uts
.release
);
1389 vmlinux_path
[vmlinux_path__nr_entries
] = strdup(bf
);
1390 if (vmlinux_path
[vmlinux_path__nr_entries
] == NULL
)
1392 ++vmlinux_path__nr_entries
;
1393 snprintf(bf
, sizeof(bf
), "/usr/lib/debug/lib/modules/%s/vmlinux",
1395 vmlinux_path
[vmlinux_path__nr_entries
] = strdup(bf
);
1396 if (vmlinux_path
[vmlinux_path__nr_entries
] == NULL
)
1398 ++vmlinux_path__nr_entries
;
1403 vmlinux_path__exit();
1407 static int setup_list(struct strlist
**list
, const char *list_str
,
1408 const char *list_name
)
1410 if (list_str
== NULL
)
1413 *list
= strlist__new(true, list_str
);
1415 pr_err("problems parsing %s list\n", list_name
);
1421 static bool symbol__read_kptr_restrict(void)
1425 if (geteuid() != 0) {
1426 FILE *fp
= fopen("/proc/sys/kernel/kptr_restrict", "r");
1430 if (fgets(line
, sizeof(line
), fp
) != NULL
)
1431 value
= atoi(line
) != 0;
1440 int symbol__init(void)
1444 if (symbol_conf
.initialized
)
1447 symbol_conf
.priv_size
= PERF_ALIGN(symbol_conf
.priv_size
, sizeof(u64
));
1451 if (symbol_conf
.sort_by_name
)
1452 symbol_conf
.priv_size
+= (sizeof(struct symbol_name_rb_node
) -
1453 sizeof(struct symbol
));
1455 if (symbol_conf
.try_vmlinux_path
&& vmlinux_path__init() < 0)
1458 if (symbol_conf
.field_sep
&& *symbol_conf
.field_sep
== '.') {
1459 pr_err("'.' is the only non valid --field-separator argument\n");
1463 if (setup_list(&symbol_conf
.dso_list
,
1464 symbol_conf
.dso_list_str
, "dso") < 0)
1467 if (setup_list(&symbol_conf
.comm_list
,
1468 symbol_conf
.comm_list_str
, "comm") < 0)
1469 goto out_free_dso_list
;
1471 if (setup_list(&symbol_conf
.sym_list
,
1472 symbol_conf
.sym_list_str
, "symbol") < 0)
1473 goto out_free_comm_list
;
1476 * A path to symbols of "/" is identical to ""
1477 * reset here for simplicity.
1479 symfs
= realpath(symbol_conf
.symfs
, NULL
);
1481 symfs
= symbol_conf
.symfs
;
1482 if (strcmp(symfs
, "/") == 0)
1483 symbol_conf
.symfs
= "";
1484 if (symfs
!= symbol_conf
.symfs
)
1485 free((void *)symfs
);
1487 symbol_conf
.kptr_restrict
= symbol__read_kptr_restrict();
1489 symbol_conf
.initialized
= true;
1493 strlist__delete(symbol_conf
.comm_list
);
1495 strlist__delete(symbol_conf
.dso_list
);
1499 void symbol__exit(void)
1501 if (!symbol_conf
.initialized
)
1503 strlist__delete(symbol_conf
.sym_list
);
1504 strlist__delete(symbol_conf
.dso_list
);
1505 strlist__delete(symbol_conf
.comm_list
);
1506 vmlinux_path__exit();
1507 symbol_conf
.sym_list
= symbol_conf
.dso_list
= symbol_conf
.comm_list
= NULL
;
1508 symbol_conf
.initialized
= false;