perf intel-pt: Add lookahead callback
[linux/fpc-iii.git] / tools / perf / util / symbol.c
blobf4540f8bbed11bc0471c8e3ddbc3cd3793b22150
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <linux/kernel.h>
8 #include <linux/mman.h>
9 #include <linux/time64.h>
10 #include <sys/types.h>
11 #include <sys/stat.h>
12 #include <sys/param.h>
13 #include <fcntl.h>
14 #include <unistd.h>
15 #include <inttypes.h>
16 #include "annotate.h"
17 #include "build-id.h"
18 #include "util.h"
19 #include "debug.h"
20 #include "machine.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "strlist.h"
24 #include "intlist.h"
25 #include "namespaces.h"
26 #include "header.h"
27 #include "path.h"
28 #include "sane_ctype.h"
30 #include <elf.h>
31 #include <limits.h>
32 #include <symbol/kallsyms.h>
33 #include <sys/utsname.h>
35 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
36 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
37 static bool symbol__is_idle(const char *name);
39 int vmlinux_path__nr_entries;
40 char **vmlinux_path;
42 struct symbol_conf symbol_conf = {
43 .nanosecs = false,
44 .use_modules = true,
45 .try_vmlinux_path = true,
46 .demangle = true,
47 .demangle_kernel = false,
48 .cumulate_callchain = true,
49 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
50 .show_hist_headers = true,
51 .symfs = "",
52 .event_group = true,
53 .inline_name = true,
54 .res_sample = 0,
57 static enum dso_binary_type binary_type_symtab[] = {
58 DSO_BINARY_TYPE__KALLSYMS,
59 DSO_BINARY_TYPE__GUEST_KALLSYMS,
60 DSO_BINARY_TYPE__JAVA_JIT,
61 DSO_BINARY_TYPE__DEBUGLINK,
62 DSO_BINARY_TYPE__BUILD_ID_CACHE,
63 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
64 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
65 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
66 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
67 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
68 DSO_BINARY_TYPE__GUEST_KMODULE,
69 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
70 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
71 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
72 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
73 DSO_BINARY_TYPE__NOT_FOUND,
76 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
78 static bool symbol_type__filter(char symbol_type)
80 symbol_type = toupper(symbol_type);
81 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
84 static int prefix_underscores_count(const char *str)
86 const char *tail = str;
88 while (*tail == '_')
89 tail++;
91 return tail - str;
94 const char * __weak arch__normalize_symbol_name(const char *name)
96 return name;
99 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
101 return strcmp(namea, nameb);
104 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
105 unsigned int n)
107 return strncmp(namea, nameb, n);
110 int __weak arch__choose_best_symbol(struct symbol *syma,
111 struct symbol *symb __maybe_unused)
113 /* Avoid "SyS" kernel syscall aliases */
114 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
115 return SYMBOL_B;
116 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
117 return SYMBOL_B;
119 return SYMBOL_A;
122 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
124 s64 a;
125 s64 b;
126 size_t na, nb;
128 /* Prefer a symbol with non zero length */
129 a = syma->end - syma->start;
130 b = symb->end - symb->start;
131 if ((b == 0) && (a > 0))
132 return SYMBOL_A;
133 else if ((a == 0) && (b > 0))
134 return SYMBOL_B;
136 /* Prefer a non weak symbol over a weak one */
137 a = syma->binding == STB_WEAK;
138 b = symb->binding == STB_WEAK;
139 if (b && !a)
140 return SYMBOL_A;
141 if (a && !b)
142 return SYMBOL_B;
144 /* Prefer a global symbol over a non global one */
145 a = syma->binding == STB_GLOBAL;
146 b = symb->binding == STB_GLOBAL;
147 if (a && !b)
148 return SYMBOL_A;
149 if (b && !a)
150 return SYMBOL_B;
152 /* Prefer a symbol with less underscores */
153 a = prefix_underscores_count(syma->name);
154 b = prefix_underscores_count(symb->name);
155 if (b > a)
156 return SYMBOL_A;
157 else if (a > b)
158 return SYMBOL_B;
160 /* Choose the symbol with the longest name */
161 na = strlen(syma->name);
162 nb = strlen(symb->name);
163 if (na > nb)
164 return SYMBOL_A;
165 else if (na < nb)
166 return SYMBOL_B;
168 return arch__choose_best_symbol(syma, symb);
171 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
173 struct rb_node *nd;
174 struct symbol *curr, *next;
176 if (symbol_conf.allow_aliases)
177 return;
179 nd = rb_first_cached(symbols);
181 while (nd) {
182 curr = rb_entry(nd, struct symbol, rb_node);
183 again:
184 nd = rb_next(&curr->rb_node);
185 next = rb_entry(nd, struct symbol, rb_node);
187 if (!nd)
188 break;
190 if (curr->start != next->start)
191 continue;
193 if (choose_best_symbol(curr, next) == SYMBOL_A) {
194 rb_erase_cached(&next->rb_node, symbols);
195 symbol__delete(next);
196 goto again;
197 } else {
198 nd = rb_next(&curr->rb_node);
199 rb_erase_cached(&curr->rb_node, symbols);
200 symbol__delete(curr);
205 void symbols__fixup_end(struct rb_root_cached *symbols)
207 struct rb_node *nd, *prevnd = rb_first_cached(symbols);
208 struct symbol *curr, *prev;
210 if (prevnd == NULL)
211 return;
213 curr = rb_entry(prevnd, struct symbol, rb_node);
215 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
216 prev = curr;
217 curr = rb_entry(nd, struct symbol, rb_node);
219 if (prev->end == prev->start && prev->end != curr->start)
220 prev->end = curr->start;
223 /* Last entry */
224 if (curr->end == curr->start)
225 curr->end = roundup(curr->start, 4096) + 4096;
228 void map_groups__fixup_end(struct map_groups *mg)
230 struct maps *maps = &mg->maps;
231 struct map *next, *curr;
233 down_write(&maps->lock);
235 curr = maps__first(maps);
236 if (curr == NULL)
237 goto out_unlock;
239 for (next = map__next(curr); next; next = map__next(curr)) {
240 if (!curr->end)
241 curr->end = next->start;
242 curr = next;
246 * We still haven't the actual symbols, so guess the
247 * last map final address.
249 if (!curr->end)
250 curr->end = ~0ULL;
252 out_unlock:
253 up_write(&maps->lock);
256 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
258 size_t namelen = strlen(name) + 1;
259 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
260 sizeof(*sym) + namelen));
261 if (sym == NULL)
262 return NULL;
264 if (symbol_conf.priv_size) {
265 if (symbol_conf.init_annotation) {
266 struct annotation *notes = (void *)sym;
267 pthread_mutex_init(&notes->lock, NULL);
269 sym = ((void *)sym) + symbol_conf.priv_size;
272 sym->start = start;
273 sym->end = len ? start + len : start;
274 sym->type = type;
275 sym->binding = binding;
276 sym->namelen = namelen - 1;
278 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
279 __func__, name, start, sym->end);
280 memcpy(sym->name, name, namelen);
282 return sym;
285 void symbol__delete(struct symbol *sym)
287 free(((void *)sym) - symbol_conf.priv_size);
290 void symbols__delete(struct rb_root_cached *symbols)
292 struct symbol *pos;
293 struct rb_node *next = rb_first_cached(symbols);
295 while (next) {
296 pos = rb_entry(next, struct symbol, rb_node);
297 next = rb_next(&pos->rb_node);
298 rb_erase_cached(&pos->rb_node, symbols);
299 symbol__delete(pos);
303 void __symbols__insert(struct rb_root_cached *symbols,
304 struct symbol *sym, bool kernel)
306 struct rb_node **p = &symbols->rb_root.rb_node;
307 struct rb_node *parent = NULL;
308 const u64 ip = sym->start;
309 struct symbol *s;
310 bool leftmost = true;
312 if (kernel) {
313 const char *name = sym->name;
315 * ppc64 uses function descriptors and appends a '.' to the
316 * start of every instruction address. Remove it.
318 if (name[0] == '.')
319 name++;
320 sym->idle = symbol__is_idle(name);
323 while (*p != NULL) {
324 parent = *p;
325 s = rb_entry(parent, struct symbol, rb_node);
326 if (ip < s->start)
327 p = &(*p)->rb_left;
328 else {
329 p = &(*p)->rb_right;
330 leftmost = false;
333 rb_link_node(&sym->rb_node, parent, p);
334 rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
337 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
339 __symbols__insert(symbols, sym, false);
342 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
344 struct rb_node *n;
346 if (symbols == NULL)
347 return NULL;
349 n = symbols->rb_root.rb_node;
351 while (n) {
352 struct symbol *s = rb_entry(n, struct symbol, rb_node);
354 if (ip < s->start)
355 n = n->rb_left;
356 else if (ip > s->end || (ip == s->end && ip != s->start))
357 n = n->rb_right;
358 else
359 return s;
362 return NULL;
365 static struct symbol *symbols__first(struct rb_root_cached *symbols)
367 struct rb_node *n = rb_first_cached(symbols);
369 if (n)
370 return rb_entry(n, struct symbol, rb_node);
372 return NULL;
375 static struct symbol *symbols__last(struct rb_root_cached *symbols)
377 struct rb_node *n = rb_last(&symbols->rb_root);
379 if (n)
380 return rb_entry(n, struct symbol, rb_node);
382 return NULL;
385 static struct symbol *symbols__next(struct symbol *sym)
387 struct rb_node *n = rb_next(&sym->rb_node);
389 if (n)
390 return rb_entry(n, struct symbol, rb_node);
392 return NULL;
395 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
397 struct rb_node **p = &symbols->rb_root.rb_node;
398 struct rb_node *parent = NULL;
399 struct symbol_name_rb_node *symn, *s;
400 bool leftmost = true;
402 symn = container_of(sym, struct symbol_name_rb_node, sym);
404 while (*p != NULL) {
405 parent = *p;
406 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
407 if (strcmp(sym->name, s->sym.name) < 0)
408 p = &(*p)->rb_left;
409 else {
410 p = &(*p)->rb_right;
411 leftmost = false;
414 rb_link_node(&symn->rb_node, parent, p);
415 rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
418 static void symbols__sort_by_name(struct rb_root_cached *symbols,
419 struct rb_root_cached *source)
421 struct rb_node *nd;
423 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
424 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
425 symbols__insert_by_name(symbols, pos);
429 int symbol__match_symbol_name(const char *name, const char *str,
430 enum symbol_tag_include includes)
432 const char *versioning;
434 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
435 (versioning = strstr(name, "@@"))) {
436 int len = strlen(str);
438 if (len < versioning - name)
439 len = versioning - name;
441 return arch__compare_symbol_names_n(name, str, len);
442 } else
443 return arch__compare_symbol_names(name, str);
446 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
447 const char *name,
448 enum symbol_tag_include includes)
450 struct rb_node *n;
451 struct symbol_name_rb_node *s = NULL;
453 if (symbols == NULL)
454 return NULL;
456 n = symbols->rb_root.rb_node;
458 while (n) {
459 int cmp;
461 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
462 cmp = symbol__match_symbol_name(s->sym.name, name, includes);
464 if (cmp > 0)
465 n = n->rb_left;
466 else if (cmp < 0)
467 n = n->rb_right;
468 else
469 break;
472 if (n == NULL)
473 return NULL;
475 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
476 /* return first symbol that has same name (if any) */
477 for (n = rb_prev(n); n; n = rb_prev(n)) {
478 struct symbol_name_rb_node *tmp;
480 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
481 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
482 break;
484 s = tmp;
487 return &s->sym;
490 void dso__reset_find_symbol_cache(struct dso *dso)
492 dso->last_find_result.addr = 0;
493 dso->last_find_result.symbol = NULL;
496 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
498 __symbols__insert(&dso->symbols, sym, dso->kernel);
500 /* update the symbol cache if necessary */
501 if (dso->last_find_result.addr >= sym->start &&
502 (dso->last_find_result.addr < sym->end ||
503 sym->start == sym->end)) {
504 dso->last_find_result.symbol = sym;
508 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
510 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
511 dso->last_find_result.addr = addr;
512 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
515 return dso->last_find_result.symbol;
518 struct symbol *dso__first_symbol(struct dso *dso)
520 return symbols__first(&dso->symbols);
523 struct symbol *dso__last_symbol(struct dso *dso)
525 return symbols__last(&dso->symbols);
528 struct symbol *dso__next_symbol(struct symbol *sym)
530 return symbols__next(sym);
533 struct symbol *symbol__next_by_name(struct symbol *sym)
535 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
536 struct rb_node *n = rb_next(&s->rb_node);
538 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
542 * Returns first symbol that matched with @name.
544 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
546 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
547 SYMBOL_TAG_INCLUDE__NONE);
548 if (!s)
549 s = symbols__find_by_name(&dso->symbol_names, name,
550 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
551 return s;
554 void dso__sort_by_name(struct dso *dso)
556 dso__set_sorted_by_name(dso);
557 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
560 int modules__parse(const char *filename, void *arg,
561 int (*process_module)(void *arg, const char *name,
562 u64 start, u64 size))
564 char *line = NULL;
565 size_t n;
566 FILE *file;
567 int err = 0;
569 file = fopen(filename, "r");
570 if (file == NULL)
571 return -1;
573 while (1) {
574 char name[PATH_MAX];
575 u64 start, size;
576 char *sep, *endptr;
577 ssize_t line_len;
579 line_len = getline(&line, &n, file);
580 if (line_len < 0) {
581 if (feof(file))
582 break;
583 err = -1;
584 goto out;
587 if (!line) {
588 err = -1;
589 goto out;
592 line[--line_len] = '\0'; /* \n */
594 sep = strrchr(line, 'x');
595 if (sep == NULL)
596 continue;
598 hex2u64(sep + 1, &start);
600 sep = strchr(line, ' ');
601 if (sep == NULL)
602 continue;
604 *sep = '\0';
606 scnprintf(name, sizeof(name), "[%s]", line);
608 size = strtoul(sep + 1, &endptr, 0);
609 if (*endptr != ' ' && *endptr != '\t')
610 continue;
612 err = process_module(arg, name, start, size);
613 if (err)
614 break;
616 out:
617 free(line);
618 fclose(file);
619 return err;
623 * These are symbols in the kernel image, so make sure that
624 * sym is from a kernel DSO.
626 static bool symbol__is_idle(const char *name)
628 const char * const idle_symbols[] = {
629 "arch_cpu_idle",
630 "cpu_idle",
631 "cpu_startup_entry",
632 "intel_idle",
633 "default_idle",
634 "native_safe_halt",
635 "enter_idle",
636 "exit_idle",
637 "mwait_idle",
638 "mwait_idle_with_hints",
639 "poll_idle",
640 "ppc64_runlatch_off",
641 "pseries_dedicated_idle_sleep",
642 NULL
644 int i;
646 for (i = 0; idle_symbols[i]; i++) {
647 if (!strcmp(idle_symbols[i], name))
648 return true;
651 return false;
654 static int map__process_kallsym_symbol(void *arg, const char *name,
655 char type, u64 start)
657 struct symbol *sym;
658 struct dso *dso = arg;
659 struct rb_root_cached *root = &dso->symbols;
661 if (!symbol_type__filter(type))
662 return 0;
665 * module symbols are not sorted so we add all
666 * symbols, setting length to 0, and rely on
667 * symbols__fixup_end() to fix it up.
669 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
670 if (sym == NULL)
671 return -ENOMEM;
673 * We will pass the symbols to the filter later, in
674 * map__split_kallsyms, when we have split the maps per module
676 __symbols__insert(root, sym, !strchr(name, '['));
678 return 0;
682 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
683 * so that we can in the next step set the symbol ->end address and then
684 * call kernel_maps__split_kallsyms.
686 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
688 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
691 static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
693 struct map *curr_map;
694 struct symbol *pos;
695 int count = 0;
696 struct rb_root_cached old_root = dso->symbols;
697 struct rb_root_cached *root = &dso->symbols;
698 struct rb_node *next = rb_first_cached(root);
700 if (!kmaps)
701 return -1;
703 *root = RB_ROOT_CACHED;
705 while (next) {
706 char *module;
708 pos = rb_entry(next, struct symbol, rb_node);
709 next = rb_next(&pos->rb_node);
711 rb_erase_cached(&pos->rb_node, &old_root);
712 RB_CLEAR_NODE(&pos->rb_node);
713 module = strchr(pos->name, '\t');
714 if (module)
715 *module = '\0';
717 curr_map = map_groups__find(kmaps, pos->start);
719 if (!curr_map) {
720 symbol__delete(pos);
721 continue;
724 pos->start -= curr_map->start - curr_map->pgoff;
725 if (pos->end > curr_map->end)
726 pos->end = curr_map->end;
727 if (pos->end)
728 pos->end -= curr_map->start - curr_map->pgoff;
729 symbols__insert(&curr_map->dso->symbols, pos);
730 ++count;
733 /* Symbols have been adjusted */
734 dso->adjust_symbols = 1;
736 return count;
740 * Split the symbols into maps, making sure there are no overlaps, i.e. the
741 * kernel range is broken in several maps, named [kernel].N, as we don't have
742 * the original ELF section names vmlinux have.
744 static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
745 struct map *initial_map)
747 struct machine *machine;
748 struct map *curr_map = initial_map;
749 struct symbol *pos;
750 int count = 0, moved = 0;
751 struct rb_root_cached *root = &dso->symbols;
752 struct rb_node *next = rb_first_cached(root);
753 int kernel_range = 0;
754 bool x86_64;
756 if (!kmaps)
757 return -1;
759 machine = kmaps->machine;
761 x86_64 = machine__is(machine, "x86_64");
763 while (next) {
764 char *module;
766 pos = rb_entry(next, struct symbol, rb_node);
767 next = rb_next(&pos->rb_node);
769 module = strchr(pos->name, '\t');
770 if (module) {
771 if (!symbol_conf.use_modules)
772 goto discard_symbol;
774 *module++ = '\0';
776 if (strcmp(curr_map->dso->short_name, module)) {
777 if (curr_map != initial_map &&
778 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
779 machine__is_default_guest(machine)) {
781 * We assume all symbols of a module are
782 * continuous in * kallsyms, so curr_map
783 * points to a module and all its
784 * symbols are in its kmap. Mark it as
785 * loaded.
787 dso__set_loaded(curr_map->dso);
790 curr_map = map_groups__find_by_name(kmaps, module);
791 if (curr_map == NULL) {
792 pr_debug("%s/proc/{kallsyms,modules} "
793 "inconsistency while looking "
794 "for \"%s\" module!\n",
795 machine->root_dir, module);
796 curr_map = initial_map;
797 goto discard_symbol;
800 if (curr_map->dso->loaded &&
801 !machine__is_default_guest(machine))
802 goto discard_symbol;
805 * So that we look just like we get from .ko files,
806 * i.e. not prelinked, relative to initial_map->start.
808 pos->start = curr_map->map_ip(curr_map, pos->start);
809 pos->end = curr_map->map_ip(curr_map, pos->end);
810 } else if (x86_64 && is_entry_trampoline(pos->name)) {
812 * These symbols are not needed anymore since the
813 * trampoline maps refer to the text section and it's
814 * symbols instead. Avoid having to deal with
815 * relocations, and the assumption that the first symbol
816 * is the start of kernel text, by simply removing the
817 * symbols at this point.
819 goto discard_symbol;
820 } else if (curr_map != initial_map) {
821 char dso_name[PATH_MAX];
822 struct dso *ndso;
824 if (delta) {
825 /* Kernel was relocated at boot time */
826 pos->start -= delta;
827 pos->end -= delta;
830 if (count == 0) {
831 curr_map = initial_map;
832 goto add_symbol;
835 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
836 snprintf(dso_name, sizeof(dso_name),
837 "[guest.kernel].%d",
838 kernel_range++);
839 else
840 snprintf(dso_name, sizeof(dso_name),
841 "[kernel].%d",
842 kernel_range++);
844 ndso = dso__new(dso_name);
845 if (ndso == NULL)
846 return -1;
848 ndso->kernel = dso->kernel;
850 curr_map = map__new2(pos->start, ndso);
851 if (curr_map == NULL) {
852 dso__put(ndso);
853 return -1;
856 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
857 map_groups__insert(kmaps, curr_map);
858 ++kernel_range;
859 } else if (delta) {
860 /* Kernel was relocated at boot time */
861 pos->start -= delta;
862 pos->end -= delta;
864 add_symbol:
865 if (curr_map != initial_map) {
866 rb_erase_cached(&pos->rb_node, root);
867 symbols__insert(&curr_map->dso->symbols, pos);
868 ++moved;
869 } else
870 ++count;
872 continue;
873 discard_symbol:
874 rb_erase_cached(&pos->rb_node, root);
875 symbol__delete(pos);
878 if (curr_map != initial_map &&
879 dso->kernel == DSO_TYPE_GUEST_KERNEL &&
880 machine__is_default_guest(kmaps->machine)) {
881 dso__set_loaded(curr_map->dso);
884 return count + moved;
887 bool symbol__restricted_filename(const char *filename,
888 const char *restricted_filename)
890 bool restricted = false;
892 if (symbol_conf.kptr_restrict) {
893 char *r = realpath(filename, NULL);
895 if (r != NULL) {
896 restricted = strcmp(r, restricted_filename) == 0;
897 free(r);
898 return restricted;
902 return restricted;
905 struct module_info {
906 struct rb_node rb_node;
907 char *name;
908 u64 start;
911 static void add_module(struct module_info *mi, struct rb_root *modules)
913 struct rb_node **p = &modules->rb_node;
914 struct rb_node *parent = NULL;
915 struct module_info *m;
917 while (*p != NULL) {
918 parent = *p;
919 m = rb_entry(parent, struct module_info, rb_node);
920 if (strcmp(mi->name, m->name) < 0)
921 p = &(*p)->rb_left;
922 else
923 p = &(*p)->rb_right;
925 rb_link_node(&mi->rb_node, parent, p);
926 rb_insert_color(&mi->rb_node, modules);
929 static void delete_modules(struct rb_root *modules)
931 struct module_info *mi;
932 struct rb_node *next = rb_first(modules);
934 while (next) {
935 mi = rb_entry(next, struct module_info, rb_node);
936 next = rb_next(&mi->rb_node);
937 rb_erase(&mi->rb_node, modules);
938 zfree(&mi->name);
939 free(mi);
943 static struct module_info *find_module(const char *name,
944 struct rb_root *modules)
946 struct rb_node *n = modules->rb_node;
948 while (n) {
949 struct module_info *m;
950 int cmp;
952 m = rb_entry(n, struct module_info, rb_node);
953 cmp = strcmp(name, m->name);
954 if (cmp < 0)
955 n = n->rb_left;
956 else if (cmp > 0)
957 n = n->rb_right;
958 else
959 return m;
962 return NULL;
965 static int __read_proc_modules(void *arg, const char *name, u64 start,
966 u64 size __maybe_unused)
968 struct rb_root *modules = arg;
969 struct module_info *mi;
971 mi = zalloc(sizeof(struct module_info));
972 if (!mi)
973 return -ENOMEM;
975 mi->name = strdup(name);
976 mi->start = start;
978 if (!mi->name) {
979 free(mi);
980 return -ENOMEM;
983 add_module(mi, modules);
985 return 0;
988 static int read_proc_modules(const char *filename, struct rb_root *modules)
990 if (symbol__restricted_filename(filename, "/proc/modules"))
991 return -1;
993 if (modules__parse(filename, modules, __read_proc_modules)) {
994 delete_modules(modules);
995 return -1;
998 return 0;
1001 int compare_proc_modules(const char *from, const char *to)
1003 struct rb_root from_modules = RB_ROOT;
1004 struct rb_root to_modules = RB_ROOT;
1005 struct rb_node *from_node, *to_node;
1006 struct module_info *from_m, *to_m;
1007 int ret = -1;
1009 if (read_proc_modules(from, &from_modules))
1010 return -1;
1012 if (read_proc_modules(to, &to_modules))
1013 goto out_delete_from;
1015 from_node = rb_first(&from_modules);
1016 to_node = rb_first(&to_modules);
1017 while (from_node) {
1018 if (!to_node)
1019 break;
1021 from_m = rb_entry(from_node, struct module_info, rb_node);
1022 to_m = rb_entry(to_node, struct module_info, rb_node);
1024 if (from_m->start != to_m->start ||
1025 strcmp(from_m->name, to_m->name))
1026 break;
1028 from_node = rb_next(from_node);
1029 to_node = rb_next(to_node);
1032 if (!from_node && !to_node)
1033 ret = 0;
1035 delete_modules(&to_modules);
1036 out_delete_from:
1037 delete_modules(&from_modules);
1039 return ret;
1042 struct map *map_groups__first(struct map_groups *mg)
1044 return maps__first(&mg->maps);
1047 static int do_validate_kcore_modules(const char *filename,
1048 struct map_groups *kmaps)
1050 struct rb_root modules = RB_ROOT;
1051 struct map *old_map;
1052 int err;
1054 err = read_proc_modules(filename, &modules);
1055 if (err)
1056 return err;
1058 old_map = map_groups__first(kmaps);
1059 while (old_map) {
1060 struct map *next = map_groups__next(old_map);
1061 struct module_info *mi;
1063 if (!__map__is_kmodule(old_map)) {
1064 old_map = next;
1065 continue;
1068 /* Module must be in memory at the same address */
1069 mi = find_module(old_map->dso->short_name, &modules);
1070 if (!mi || mi->start != old_map->start) {
1071 err = -EINVAL;
1072 goto out;
1075 old_map = next;
1077 out:
1078 delete_modules(&modules);
1079 return err;
1083 * If kallsyms is referenced by name then we look for filename in the same
1084 * directory.
1086 static bool filename_from_kallsyms_filename(char *filename,
1087 const char *base_name,
1088 const char *kallsyms_filename)
1090 char *name;
1092 strcpy(filename, kallsyms_filename);
1093 name = strrchr(filename, '/');
1094 if (!name)
1095 return false;
1097 name += 1;
1099 if (!strcmp(name, "kallsyms")) {
1100 strcpy(name, base_name);
1101 return true;
1104 return false;
1107 static int validate_kcore_modules(const char *kallsyms_filename,
1108 struct map *map)
1110 struct map_groups *kmaps = map__kmaps(map);
1111 char modules_filename[PATH_MAX];
1113 if (!kmaps)
1114 return -EINVAL;
1116 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1117 kallsyms_filename))
1118 return -EINVAL;
1120 if (do_validate_kcore_modules(modules_filename, kmaps))
1121 return -EINVAL;
1123 return 0;
1126 static int validate_kcore_addresses(const char *kallsyms_filename,
1127 struct map *map)
1129 struct kmap *kmap = map__kmap(map);
1131 if (!kmap)
1132 return -EINVAL;
1134 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1135 u64 start;
1137 if (kallsyms__get_function_start(kallsyms_filename,
1138 kmap->ref_reloc_sym->name, &start))
1139 return -ENOENT;
1140 if (start != kmap->ref_reloc_sym->addr)
1141 return -EINVAL;
1144 return validate_kcore_modules(kallsyms_filename, map);
1147 struct kcore_mapfn_data {
1148 struct dso *dso;
1149 struct list_head maps;
1152 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1154 struct kcore_mapfn_data *md = data;
1155 struct map *map;
1157 map = map__new2(start, md->dso);
1158 if (map == NULL)
1159 return -ENOMEM;
1161 map->end = map->start + len;
1162 map->pgoff = pgoff;
1164 list_add(&map->node, &md->maps);
1166 return 0;
1170 * Merges map into map_groups by splitting the new map
1171 * within the existing map regions.
1173 int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
1175 struct map *old_map;
1176 LIST_HEAD(merged);
1178 for (old_map = map_groups__first(kmaps); old_map;
1179 old_map = map_groups__next(old_map)) {
1181 /* no overload with this one */
1182 if (new_map->end < old_map->start ||
1183 new_map->start >= old_map->end)
1184 continue;
1186 if (new_map->start < old_map->start) {
1188 * |new......
1189 * |old....
1191 if (new_map->end < old_map->end) {
1193 * |new......| -> |new..|
1194 * |old....| -> |old....|
1196 new_map->end = old_map->start;
1197 } else {
1199 * |new.............| -> |new..| |new..|
1200 * |old....| -> |old....|
1202 struct map *m = map__clone(new_map);
1204 if (!m)
1205 return -ENOMEM;
1207 m->end = old_map->start;
1208 list_add_tail(&m->node, &merged);
1209 new_map->start = old_map->end;
1211 } else {
1213 * |new......
1214 * |old....
1216 if (new_map->end < old_map->end) {
1218 * |new..| -> x
1219 * |old.........| -> |old.........|
1221 map__put(new_map);
1222 new_map = NULL;
1223 break;
1224 } else {
1226 * |new......| -> |new...|
1227 * |old....| -> |old....|
1229 new_map->start = old_map->end;
1234 while (!list_empty(&merged)) {
1235 old_map = list_entry(merged.next, struct map, node);
1236 list_del_init(&old_map->node);
1237 map_groups__insert(kmaps, old_map);
1238 map__put(old_map);
1241 if (new_map) {
1242 map_groups__insert(kmaps, new_map);
1243 map__put(new_map);
1245 return 0;
1248 static int dso__load_kcore(struct dso *dso, struct map *map,
1249 const char *kallsyms_filename)
1251 struct map_groups *kmaps = map__kmaps(map);
1252 struct kcore_mapfn_data md;
1253 struct map *old_map, *new_map, *replacement_map = NULL;
1254 struct machine *machine;
1255 bool is_64_bit;
1256 int err, fd;
1257 char kcore_filename[PATH_MAX];
1258 u64 stext;
1260 if (!kmaps)
1261 return -EINVAL;
1263 machine = kmaps->machine;
1265 /* This function requires that the map is the kernel map */
1266 if (!__map__is_kernel(map))
1267 return -EINVAL;
1269 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1270 kallsyms_filename))
1271 return -EINVAL;
1273 /* Modules and kernel must be present at their original addresses */
1274 if (validate_kcore_addresses(kallsyms_filename, map))
1275 return -EINVAL;
1277 md.dso = dso;
1278 INIT_LIST_HEAD(&md.maps);
1280 fd = open(kcore_filename, O_RDONLY);
1281 if (fd < 0) {
1282 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1283 kcore_filename);
1284 return -EINVAL;
1287 /* Read new maps into temporary lists */
1288 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1289 &is_64_bit);
1290 if (err)
1291 goto out_err;
1292 dso->is_64_bit = is_64_bit;
1294 if (list_empty(&md.maps)) {
1295 err = -EINVAL;
1296 goto out_err;
1299 /* Remove old maps */
1300 old_map = map_groups__first(kmaps);
1301 while (old_map) {
1302 struct map *next = map_groups__next(old_map);
1305 * We need to preserve eBPF maps even if they are
1306 * covered by kcore, because we need to access
1307 * eBPF dso for source data.
1309 if (old_map != map && !__map__is_bpf_prog(old_map))
1310 map_groups__remove(kmaps, old_map);
1311 old_map = next;
1313 machine->trampolines_mapped = false;
1315 /* Find the kernel map using the '_stext' symbol */
1316 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1317 list_for_each_entry(new_map, &md.maps, node) {
1318 if (stext >= new_map->start && stext < new_map->end) {
1319 replacement_map = new_map;
1320 break;
1325 if (!replacement_map)
1326 replacement_map = list_entry(md.maps.next, struct map, node);
1328 /* Add new maps */
1329 while (!list_empty(&md.maps)) {
1330 new_map = list_entry(md.maps.next, struct map, node);
1331 list_del_init(&new_map->node);
1332 if (new_map == replacement_map) {
1333 map->start = new_map->start;
1334 map->end = new_map->end;
1335 map->pgoff = new_map->pgoff;
1336 map->map_ip = new_map->map_ip;
1337 map->unmap_ip = new_map->unmap_ip;
1338 /* Ensure maps are correctly ordered */
1339 map__get(map);
1340 map_groups__remove(kmaps, map);
1341 map_groups__insert(kmaps, map);
1342 map__put(map);
1343 map__put(new_map);
1344 } else {
1346 * Merge kcore map into existing maps,
1347 * and ensure that current maps (eBPF)
1348 * stay intact.
1350 if (map_groups__merge_in(kmaps, new_map))
1351 goto out_err;
1355 if (machine__is(machine, "x86_64")) {
1356 u64 addr;
1359 * If one of the corresponding symbols is there, assume the
1360 * entry trampoline maps are too.
1362 if (!kallsyms__get_function_start(kallsyms_filename,
1363 ENTRY_TRAMPOLINE_NAME,
1364 &addr))
1365 machine->trampolines_mapped = true;
1369 * Set the data type and long name so that kcore can be read via
1370 * dso__data_read_addr().
1372 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1373 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1374 else
1375 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1376 dso__set_long_name(dso, strdup(kcore_filename), true);
1378 close(fd);
1380 if (map->prot & PROT_EXEC)
1381 pr_debug("Using %s for kernel object code\n", kcore_filename);
1382 else
1383 pr_debug("Using %s for kernel data\n", kcore_filename);
1385 return 0;
1387 out_err:
1388 while (!list_empty(&md.maps)) {
1389 map = list_entry(md.maps.next, struct map, node);
1390 list_del_init(&map->node);
1391 map__put(map);
1393 close(fd);
1394 return -EINVAL;
1398 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1399 * delta based on the relocation reference symbol.
1401 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1403 u64 addr;
1405 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1406 return 0;
1408 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1409 return -1;
1411 *delta = addr - kmap->ref_reloc_sym->addr;
1412 return 0;
1415 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1416 struct map *map, bool no_kcore)
1418 struct kmap *kmap = map__kmap(map);
1419 u64 delta = 0;
1421 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1422 return -1;
1424 if (!kmap || !kmap->kmaps)
1425 return -1;
1427 if (dso__load_all_kallsyms(dso, filename) < 0)
1428 return -1;
1430 if (kallsyms__delta(kmap, filename, &delta))
1431 return -1;
1433 symbols__fixup_end(&dso->symbols);
1434 symbols__fixup_duplicate(&dso->symbols);
1436 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1437 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1438 else
1439 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1441 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1442 return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1443 else
1444 return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1447 int dso__load_kallsyms(struct dso *dso, const char *filename,
1448 struct map *map)
1450 return __dso__load_kallsyms(dso, filename, map, false);
1453 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1455 char *line = NULL;
1456 size_t n;
1457 FILE *file;
1458 int nr_syms = 0;
1460 file = fopen(map_path, "r");
1461 if (file == NULL)
1462 goto out_failure;
1464 while (!feof(file)) {
1465 u64 start, size;
1466 struct symbol *sym;
1467 int line_len, len;
1469 line_len = getline(&line, &n, file);
1470 if (line_len < 0)
1471 break;
1473 if (!line)
1474 goto out_failure;
1476 line[--line_len] = '\0'; /* \n */
1478 len = hex2u64(line, &start);
1480 len++;
1481 if (len + 2 >= line_len)
1482 continue;
1484 len += hex2u64(line + len, &size);
1486 len++;
1487 if (len + 2 >= line_len)
1488 continue;
1490 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1492 if (sym == NULL)
1493 goto out_delete_line;
1495 symbols__insert(&dso->symbols, sym);
1496 nr_syms++;
1499 free(line);
1500 fclose(file);
1502 return nr_syms;
1504 out_delete_line:
1505 free(line);
1506 out_failure:
1507 return -1;
1510 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1511 enum dso_binary_type type)
1513 switch (type) {
1514 case DSO_BINARY_TYPE__JAVA_JIT:
1515 case DSO_BINARY_TYPE__DEBUGLINK:
1516 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1517 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1518 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1519 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1520 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1521 return !kmod && dso->kernel == DSO_TYPE_USER;
1523 case DSO_BINARY_TYPE__KALLSYMS:
1524 case DSO_BINARY_TYPE__VMLINUX:
1525 case DSO_BINARY_TYPE__KCORE:
1526 return dso->kernel == DSO_TYPE_KERNEL;
1528 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1529 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1530 case DSO_BINARY_TYPE__GUEST_KCORE:
1531 return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1533 case DSO_BINARY_TYPE__GUEST_KMODULE:
1534 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1535 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1536 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1538 * kernel modules know their symtab type - it's set when
1539 * creating a module dso in machine__findnew_module_map().
1541 return kmod && dso->symtab_type == type;
1543 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1544 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1545 return true;
1547 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1548 case DSO_BINARY_TYPE__NOT_FOUND:
1549 default:
1550 return false;
1554 /* Checks for the existence of the perf-<pid>.map file in two different
1555 * locations. First, if the process is a separate mount namespace, check in
1556 * that namespace using the pid of the innermost pid namespace. If's not in a
1557 * namespace, or the file can't be found there, try in the mount namespace of
1558 * the tracing process using our view of its pid.
1560 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1561 struct nsinfo **nsip)
1563 struct nscookie nsc;
1564 struct nsinfo *nsi;
1565 struct nsinfo *nnsi;
1566 int rc = -1;
1568 nsi = *nsip;
1570 if (nsi->need_setns) {
1571 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1572 nsinfo__mountns_enter(nsi, &nsc);
1573 rc = access(filebuf, R_OK);
1574 nsinfo__mountns_exit(&nsc);
1575 if (rc == 0)
1576 return rc;
1579 nnsi = nsinfo__copy(nsi);
1580 if (nnsi) {
1581 nsinfo__put(nsi);
1583 nnsi->need_setns = false;
1584 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1585 *nsip = nnsi;
1586 rc = 0;
1589 return rc;
1592 int dso__load(struct dso *dso, struct map *map)
1594 char *name;
1595 int ret = -1;
1596 u_int i;
1597 struct machine *machine;
1598 char *root_dir = (char *) "";
1599 int ss_pos = 0;
1600 struct symsrc ss_[2];
1601 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1602 bool kmod;
1603 bool perfmap;
1604 unsigned char build_id[BUILD_ID_SIZE];
1605 struct nscookie nsc;
1606 char newmapname[PATH_MAX];
1607 const char *map_path = dso->long_name;
1609 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1610 if (perfmap) {
1611 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1612 sizeof(newmapname), &dso->nsinfo) == 0)) {
1613 map_path = newmapname;
1617 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1618 pthread_mutex_lock(&dso->lock);
1620 /* check again under the dso->lock */
1621 if (dso__loaded(dso)) {
1622 ret = 1;
1623 goto out;
1626 if (map->groups && map->groups->machine)
1627 machine = map->groups->machine;
1628 else
1629 machine = NULL;
1631 if (dso->kernel) {
1632 if (dso->kernel == DSO_TYPE_KERNEL)
1633 ret = dso__load_kernel_sym(dso, map);
1634 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1635 ret = dso__load_guest_kernel_sym(dso, map);
1637 if (machine__is(machine, "x86_64"))
1638 machine__map_x86_64_entry_trampolines(machine, dso);
1639 goto out;
1642 dso->adjust_symbols = 0;
1644 if (perfmap) {
1645 ret = dso__load_perf_map(map_path, dso);
1646 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1647 DSO_BINARY_TYPE__NOT_FOUND;
1648 goto out;
1651 if (machine)
1652 root_dir = machine->root_dir;
1654 name = malloc(PATH_MAX);
1655 if (!name)
1656 goto out;
1658 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1659 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1660 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1661 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1665 * Read the build id if possible. This is required for
1666 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1668 if (!dso->has_build_id &&
1669 is_regular_file(dso->long_name)) {
1670 __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1671 if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1672 dso__set_build_id(dso, build_id);
1676 * Iterate over candidate debug images.
1677 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1678 * and/or opd section) for processing.
1680 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1681 struct symsrc *ss = &ss_[ss_pos];
1682 bool next_slot = false;
1683 bool is_reg;
1684 bool nsexit;
1685 int sirc = -1;
1687 enum dso_binary_type symtab_type = binary_type_symtab[i];
1689 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1690 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1692 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1693 continue;
1695 if (dso__read_binary_type_filename(dso, symtab_type,
1696 root_dir, name, PATH_MAX))
1697 continue;
1699 if (nsexit)
1700 nsinfo__mountns_exit(&nsc);
1702 is_reg = is_regular_file(name);
1703 if (is_reg)
1704 sirc = symsrc__init(ss, dso, name, symtab_type);
1706 if (nsexit)
1707 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1709 if (!is_reg || sirc < 0)
1710 continue;
1712 if (!syms_ss && symsrc__has_symtab(ss)) {
1713 syms_ss = ss;
1714 next_slot = true;
1715 if (!dso->symsrc_filename)
1716 dso->symsrc_filename = strdup(name);
1719 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1720 runtime_ss = ss;
1721 next_slot = true;
1724 if (next_slot) {
1725 ss_pos++;
1727 if (syms_ss && runtime_ss)
1728 break;
1729 } else {
1730 symsrc__destroy(ss);
1735 if (!runtime_ss && !syms_ss)
1736 goto out_free;
1738 if (runtime_ss && !syms_ss) {
1739 syms_ss = runtime_ss;
1742 /* We'll have to hope for the best */
1743 if (!runtime_ss && syms_ss)
1744 runtime_ss = syms_ss;
1746 if (syms_ss)
1747 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1748 else
1749 ret = -1;
1751 if (ret > 0) {
1752 int nr_plt;
1754 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1755 if (nr_plt > 0)
1756 ret += nr_plt;
1759 for (; ss_pos > 0; ss_pos--)
1760 symsrc__destroy(&ss_[ss_pos - 1]);
1761 out_free:
1762 free(name);
1763 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1764 ret = 0;
1765 out:
1766 dso__set_loaded(dso);
1767 pthread_mutex_unlock(&dso->lock);
1768 nsinfo__mountns_exit(&nsc);
1770 return ret;
1773 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1775 struct maps *maps = &mg->maps;
1776 struct map *map;
1777 struct rb_node *node;
1779 down_read(&maps->lock);
1781 for (node = maps->names.rb_node; node; ) {
1782 int rc;
1784 map = rb_entry(node, struct map, rb_node_name);
1786 rc = strcmp(map->dso->short_name, name);
1787 if (rc < 0)
1788 node = node->rb_left;
1789 else if (rc > 0)
1790 node = node->rb_right;
1791 else
1793 goto out_unlock;
1796 map = NULL;
1798 out_unlock:
1799 up_read(&maps->lock);
1800 return map;
1803 int dso__load_vmlinux(struct dso *dso, struct map *map,
1804 const char *vmlinux, bool vmlinux_allocated)
1806 int err = -1;
1807 struct symsrc ss;
1808 char symfs_vmlinux[PATH_MAX];
1809 enum dso_binary_type symtab_type;
1811 if (vmlinux[0] == '/')
1812 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1813 else
1814 symbol__join_symfs(symfs_vmlinux, vmlinux);
1816 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1817 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1818 else
1819 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1821 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1822 return -1;
1824 err = dso__load_sym(dso, map, &ss, &ss, 0);
1825 symsrc__destroy(&ss);
1827 if (err > 0) {
1828 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1829 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1830 else
1831 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1832 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1833 dso__set_loaded(dso);
1834 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1837 return err;
1840 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1842 int i, err = 0;
1843 char *filename = NULL;
1845 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1846 vmlinux_path__nr_entries + 1);
1848 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1849 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1850 if (err > 0)
1851 goto out;
1854 if (!symbol_conf.ignore_vmlinux_buildid)
1855 filename = dso__build_id_filename(dso, NULL, 0, false);
1856 if (filename != NULL) {
1857 err = dso__load_vmlinux(dso, map, filename, true);
1858 if (err > 0)
1859 goto out;
1860 free(filename);
1862 out:
1863 return err;
1866 static bool visible_dir_filter(const char *name, struct dirent *d)
1868 if (d->d_type != DT_DIR)
1869 return false;
1870 return lsdir_no_dot_filter(name, d);
1873 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1875 char kallsyms_filename[PATH_MAX];
1876 int ret = -1;
1877 struct strlist *dirs;
1878 struct str_node *nd;
1880 dirs = lsdir(dir, visible_dir_filter);
1881 if (!dirs)
1882 return -1;
1884 strlist__for_each_entry(nd, dirs) {
1885 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1886 "%s/%s/kallsyms", dir, nd->s);
1887 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1888 strlcpy(dir, kallsyms_filename, dir_sz);
1889 ret = 0;
1890 break;
1894 strlist__delete(dirs);
1896 return ret;
1900 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1901 * since access(R_OK) only checks with real UID/GID but open() use effective
1902 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1904 static bool filename__readable(const char *file)
1906 int fd = open(file, O_RDONLY);
1907 if (fd < 0)
1908 return false;
1909 close(fd);
1910 return true;
1913 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1915 u8 host_build_id[BUILD_ID_SIZE];
1916 char sbuild_id[SBUILD_ID_SIZE];
1917 bool is_host = false;
1918 char path[PATH_MAX];
1920 if (!dso->has_build_id) {
1922 * Last resort, if we don't have a build-id and couldn't find
1923 * any vmlinux file, try the running kernel kallsyms table.
1925 goto proc_kallsyms;
1928 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1929 sizeof(host_build_id)) == 0)
1930 is_host = dso__build_id_equal(dso, host_build_id);
1932 /* Try a fast path for /proc/kallsyms if possible */
1933 if (is_host) {
1935 * Do not check the build-id cache, unless we know we cannot use
1936 * /proc/kcore or module maps don't match to /proc/kallsyms.
1937 * To check readability of /proc/kcore, do not use access(R_OK)
1938 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1939 * can't check it.
1941 if (filename__readable("/proc/kcore") &&
1942 !validate_kcore_addresses("/proc/kallsyms", map))
1943 goto proc_kallsyms;
1946 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1948 /* Find kallsyms in build-id cache with kcore */
1949 scnprintf(path, sizeof(path), "%s/%s/%s",
1950 buildid_dir, DSO__NAME_KCORE, sbuild_id);
1952 if (!find_matching_kcore(map, path, sizeof(path)))
1953 return strdup(path);
1955 /* Use current /proc/kallsyms if possible */
1956 if (is_host) {
1957 proc_kallsyms:
1958 return strdup("/proc/kallsyms");
1961 /* Finally, find a cache of kallsyms */
1962 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1963 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1964 sbuild_id);
1965 return NULL;
1968 return strdup(path);
1971 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1973 int err;
1974 const char *kallsyms_filename = NULL;
1975 char *kallsyms_allocated_filename = NULL;
1977 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1978 * it and only it, reporting errors to the user if it cannot be used.
1980 * For instance, try to analyse an ARM perf.data file _without_ a
1981 * build-id, or if the user specifies the wrong path to the right
1982 * vmlinux file, obviously we can't fallback to another vmlinux (a
1983 * x86_86 one, on the machine where analysis is being performed, say),
1984 * or worse, /proc/kallsyms.
1986 * If the specified file _has_ a build-id and there is a build-id
1987 * section in the perf.data file, we will still do the expected
1988 * validation in dso__load_vmlinux and will bail out if they don't
1989 * match.
1991 if (symbol_conf.kallsyms_name != NULL) {
1992 kallsyms_filename = symbol_conf.kallsyms_name;
1993 goto do_kallsyms;
1996 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1997 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2000 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2001 err = dso__load_vmlinux_path(dso, map);
2002 if (err > 0)
2003 return err;
2006 /* do not try local files if a symfs was given */
2007 if (symbol_conf.symfs[0] != 0)
2008 return -1;
2010 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2011 if (!kallsyms_allocated_filename)
2012 return -1;
2014 kallsyms_filename = kallsyms_allocated_filename;
2016 do_kallsyms:
2017 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2018 if (err > 0)
2019 pr_debug("Using %s for symbols\n", kallsyms_filename);
2020 free(kallsyms_allocated_filename);
2022 if (err > 0 && !dso__is_kcore(dso)) {
2023 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2024 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2025 map__fixup_start(map);
2026 map__fixup_end(map);
2029 return err;
2032 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2034 int err;
2035 const char *kallsyms_filename = NULL;
2036 struct machine *machine;
2037 char path[PATH_MAX];
2039 if (!map->groups) {
2040 pr_debug("Guest kernel map hasn't the point to groups\n");
2041 return -1;
2043 machine = map->groups->machine;
2045 if (machine__is_default_guest(machine)) {
2047 * if the user specified a vmlinux filename, use it and only
2048 * it, reporting errors to the user if it cannot be used.
2049 * Or use file guest_kallsyms inputted by user on commandline
2051 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2052 err = dso__load_vmlinux(dso, map,
2053 symbol_conf.default_guest_vmlinux_name,
2054 false);
2055 return err;
2058 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2059 if (!kallsyms_filename)
2060 return -1;
2061 } else {
2062 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2063 kallsyms_filename = path;
2066 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2067 if (err > 0)
2068 pr_debug("Using %s for symbols\n", kallsyms_filename);
2069 if (err > 0 && !dso__is_kcore(dso)) {
2070 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2071 dso__set_long_name(dso, machine->mmap_name, false);
2072 map__fixup_start(map);
2073 map__fixup_end(map);
2076 return err;
2079 static void vmlinux_path__exit(void)
2081 while (--vmlinux_path__nr_entries >= 0)
2082 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2083 vmlinux_path__nr_entries = 0;
2085 zfree(&vmlinux_path);
2088 static const char * const vmlinux_paths[] = {
2089 "vmlinux",
2090 "/boot/vmlinux"
2093 static const char * const vmlinux_paths_upd[] = {
2094 "/boot/vmlinux-%s",
2095 "/usr/lib/debug/boot/vmlinux-%s",
2096 "/lib/modules/%s/build/vmlinux",
2097 "/usr/lib/debug/lib/modules/%s/vmlinux",
2098 "/usr/lib/debug/boot/vmlinux-%s.debug"
2101 static int vmlinux_path__add(const char *new_entry)
2103 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2104 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2105 return -1;
2106 ++vmlinux_path__nr_entries;
2108 return 0;
2111 static int vmlinux_path__init(struct perf_env *env)
2113 struct utsname uts;
2114 char bf[PATH_MAX];
2115 char *kernel_version;
2116 unsigned int i;
2118 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2119 ARRAY_SIZE(vmlinux_paths_upd)));
2120 if (vmlinux_path == NULL)
2121 return -1;
2123 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2124 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2125 goto out_fail;
2127 /* only try kernel version if no symfs was given */
2128 if (symbol_conf.symfs[0] != 0)
2129 return 0;
2131 if (env) {
2132 kernel_version = env->os_release;
2133 } else {
2134 if (uname(&uts) < 0)
2135 goto out_fail;
2137 kernel_version = uts.release;
2140 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2141 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2142 if (vmlinux_path__add(bf) < 0)
2143 goto out_fail;
2146 return 0;
2148 out_fail:
2149 vmlinux_path__exit();
2150 return -1;
2153 int setup_list(struct strlist **list, const char *list_str,
2154 const char *list_name)
2156 if (list_str == NULL)
2157 return 0;
2159 *list = strlist__new(list_str, NULL);
2160 if (!*list) {
2161 pr_err("problems parsing %s list\n", list_name);
2162 return -1;
2165 symbol_conf.has_filter = true;
2166 return 0;
2169 int setup_intlist(struct intlist **list, const char *list_str,
2170 const char *list_name)
2172 if (list_str == NULL)
2173 return 0;
2175 *list = intlist__new(list_str);
2176 if (!*list) {
2177 pr_err("problems parsing %s list\n", list_name);
2178 return -1;
2180 return 0;
2183 static bool symbol__read_kptr_restrict(void)
2185 bool value = false;
2186 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2188 if (fp != NULL) {
2189 char line[8];
2191 if (fgets(line, sizeof(line), fp) != NULL)
2192 value = ((geteuid() != 0) || (getuid() != 0)) ?
2193 (atoi(line) != 0) :
2194 (atoi(line) == 2);
2196 fclose(fp);
2199 return value;
2202 int symbol__annotation_init(void)
2204 if (symbol_conf.init_annotation)
2205 return 0;
2207 if (symbol_conf.initialized) {
2208 pr_err("Annotation needs to be init before symbol__init()\n");
2209 return -1;
2212 symbol_conf.priv_size += sizeof(struct annotation);
2213 symbol_conf.init_annotation = true;
2214 return 0;
2217 int symbol__init(struct perf_env *env)
2219 const char *symfs;
2221 if (symbol_conf.initialized)
2222 return 0;
2224 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2226 symbol__elf_init();
2228 if (symbol_conf.sort_by_name)
2229 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2230 sizeof(struct symbol));
2232 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2233 return -1;
2235 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2236 pr_err("'.' is the only non valid --field-separator argument\n");
2237 return -1;
2240 if (setup_list(&symbol_conf.dso_list,
2241 symbol_conf.dso_list_str, "dso") < 0)
2242 return -1;
2244 if (setup_list(&symbol_conf.comm_list,
2245 symbol_conf.comm_list_str, "comm") < 0)
2246 goto out_free_dso_list;
2248 if (setup_intlist(&symbol_conf.pid_list,
2249 symbol_conf.pid_list_str, "pid") < 0)
2250 goto out_free_comm_list;
2252 if (setup_intlist(&symbol_conf.tid_list,
2253 symbol_conf.tid_list_str, "tid") < 0)
2254 goto out_free_pid_list;
2256 if (setup_list(&symbol_conf.sym_list,
2257 symbol_conf.sym_list_str, "symbol") < 0)
2258 goto out_free_tid_list;
2260 if (setup_list(&symbol_conf.bt_stop_list,
2261 symbol_conf.bt_stop_list_str, "symbol") < 0)
2262 goto out_free_sym_list;
2265 * A path to symbols of "/" is identical to ""
2266 * reset here for simplicity.
2268 symfs = realpath(symbol_conf.symfs, NULL);
2269 if (symfs == NULL)
2270 symfs = symbol_conf.symfs;
2271 if (strcmp(symfs, "/") == 0)
2272 symbol_conf.symfs = "";
2273 if (symfs != symbol_conf.symfs)
2274 free((void *)symfs);
2276 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2278 symbol_conf.initialized = true;
2279 return 0;
2281 out_free_sym_list:
2282 strlist__delete(symbol_conf.sym_list);
2283 out_free_tid_list:
2284 intlist__delete(symbol_conf.tid_list);
2285 out_free_pid_list:
2286 intlist__delete(symbol_conf.pid_list);
2287 out_free_comm_list:
2288 strlist__delete(symbol_conf.comm_list);
2289 out_free_dso_list:
2290 strlist__delete(symbol_conf.dso_list);
2291 return -1;
2294 void symbol__exit(void)
2296 if (!symbol_conf.initialized)
2297 return;
2298 strlist__delete(symbol_conf.bt_stop_list);
2299 strlist__delete(symbol_conf.sym_list);
2300 strlist__delete(symbol_conf.dso_list);
2301 strlist__delete(symbol_conf.comm_list);
2302 intlist__delete(symbol_conf.tid_list);
2303 intlist__delete(symbol_conf.pid_list);
2304 vmlinux_path__exit();
2305 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2306 symbol_conf.bt_stop_list = NULL;
2307 symbol_conf.initialized = false;
2310 int symbol__config_symfs(const struct option *opt __maybe_unused,
2311 const char *dir, int unset __maybe_unused)
2313 char *bf = NULL;
2314 int ret;
2316 symbol_conf.symfs = strdup(dir);
2317 if (symbol_conf.symfs == NULL)
2318 return -ENOMEM;
2320 /* skip the locally configured cache if a symfs is given, and
2321 * config buildid dir to symfs/.debug
2323 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2324 if (ret < 0)
2325 return -ENOMEM;
2327 set_buildid_dir(bf);
2329 free(bf);
2330 return 0;
2333 struct mem_info *mem_info__get(struct mem_info *mi)
2335 if (mi)
2336 refcount_inc(&mi->refcnt);
2337 return mi;
2340 void mem_info__put(struct mem_info *mi)
2342 if (mi && refcount_dec_and_test(&mi->refcnt))
2343 free(mi);
2346 struct mem_info *mem_info__new(void)
2348 struct mem_info *mi = zalloc(sizeof(*mi));
2350 if (mi)
2351 refcount_set(&mi->refcnt, 1);
2352 return mi;