9 #include "demangle-java.h"
12 #include <symbol/kallsyms.h>
16 #define EM_AARCH64 183 /* ARM 64 bit */
20 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
21 extern char *cplus_demangle(const char *, int);
23 static inline char *bfd_demangle(void __maybe_unused
*v
, const char *c
, int i
)
25 return cplus_demangle(c
, i
);
29 static inline char *bfd_demangle(void __maybe_unused
*v
,
30 const char __maybe_unused
*c
,
36 #define PACKAGE 'perf'
41 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
42 static int elf_getphdrnum(Elf
*elf
, size_t *dst
)
47 ehdr
= gelf_getehdr(elf
, &gehdr
);
57 #ifndef NT_GNU_BUILD_ID
58 #define NT_GNU_BUILD_ID 3
62 * elf_symtab__for_each_symbol - iterate thru all the symbols
64 * @syms: struct elf_symtab instance to iterate
66 * @sym: GElf_Sym iterator
68 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
69 for (idx = 0, gelf_getsym(syms, idx, &sym);\
71 idx++, gelf_getsym(syms, idx, &sym))
73 static inline uint8_t elf_sym__type(const GElf_Sym
*sym
)
75 return GELF_ST_TYPE(sym
->st_info
);
79 #define STT_GNU_IFUNC 10
82 static inline int elf_sym__is_function(const GElf_Sym
*sym
)
84 return (elf_sym__type(sym
) == STT_FUNC
||
85 elf_sym__type(sym
) == STT_GNU_IFUNC
) &&
87 sym
->st_shndx
!= SHN_UNDEF
;
90 static inline bool elf_sym__is_object(const GElf_Sym
*sym
)
92 return elf_sym__type(sym
) == STT_OBJECT
&&
94 sym
->st_shndx
!= SHN_UNDEF
;
97 static inline int elf_sym__is_label(const GElf_Sym
*sym
)
99 return elf_sym__type(sym
) == STT_NOTYPE
&&
101 sym
->st_shndx
!= SHN_UNDEF
&&
102 sym
->st_shndx
!= SHN_ABS
;
105 static bool elf_sym__is_a(GElf_Sym
*sym
, enum map_type type
)
109 return elf_sym__is_function(sym
);
111 return elf_sym__is_object(sym
);
117 static inline const char *elf_sym__name(const GElf_Sym
*sym
,
118 const Elf_Data
*symstrs
)
120 return symstrs
->d_buf
+ sym
->st_name
;
123 static inline const char *elf_sec__name(const GElf_Shdr
*shdr
,
124 const Elf_Data
*secstrs
)
126 return secstrs
->d_buf
+ shdr
->sh_name
;
129 static inline int elf_sec__is_text(const GElf_Shdr
*shdr
,
130 const Elf_Data
*secstrs
)
132 return strstr(elf_sec__name(shdr
, secstrs
), "text") != NULL
;
135 static inline bool elf_sec__is_data(const GElf_Shdr
*shdr
,
136 const Elf_Data
*secstrs
)
138 return strstr(elf_sec__name(shdr
, secstrs
), "data") != NULL
;
141 static bool elf_sec__is_a(GElf_Shdr
*shdr
, Elf_Data
*secstrs
,
146 return elf_sec__is_text(shdr
, secstrs
);
148 return elf_sec__is_data(shdr
, secstrs
);
154 static size_t elf_addr_to_index(Elf
*elf
, GElf_Addr addr
)
160 while ((sec
= elf_nextscn(elf
, sec
)) != NULL
) {
161 gelf_getshdr(sec
, &shdr
);
163 if ((addr
>= shdr
.sh_addr
) &&
164 (addr
< (shdr
.sh_addr
+ shdr
.sh_size
)))
173 Elf_Scn
*elf_section_by_name(Elf
*elf
, GElf_Ehdr
*ep
,
174 GElf_Shdr
*shp
, const char *name
, size_t *idx
)
179 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
180 if (!elf_rawdata(elf_getscn(elf
, ep
->e_shstrndx
), NULL
))
183 while ((sec
= elf_nextscn(elf
, sec
)) != NULL
) {
186 gelf_getshdr(sec
, shp
);
187 str
= elf_strptr(elf
, ep
->e_shstrndx
, shp
->sh_name
);
188 if (str
&& !strcmp(name
, str
)) {
199 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
200 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
202 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
204 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
205 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
207 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
210 * We need to check if we have a .dynsym, so that we can handle the
211 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
212 * .dynsym or .symtab).
213 * And always look at the original dso, not at debuginfo packages, that
214 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
216 int dso__synthesize_plt_symbols(struct dso
*dso
, struct symsrc
*ss
, struct map
*map
,
217 symbol_filter_t filter
)
219 uint32_t nr_rel_entries
, idx
;
224 GElf_Shdr shdr_rel_plt
, shdr_dynsym
;
225 Elf_Data
*reldata
, *syms
, *symstrs
;
226 Elf_Scn
*scn_plt_rel
, *scn_symstrs
, *scn_dynsym
;
229 char sympltname
[1024];
231 int nr
= 0, symidx
, err
= 0;
239 scn_dynsym
= ss
->dynsym
;
240 shdr_dynsym
= ss
->dynshdr
;
241 dynsym_idx
= ss
->dynsym_idx
;
243 if (scn_dynsym
== NULL
)
246 scn_plt_rel
= elf_section_by_name(elf
, &ehdr
, &shdr_rel_plt
,
248 if (scn_plt_rel
== NULL
) {
249 scn_plt_rel
= elf_section_by_name(elf
, &ehdr
, &shdr_rel_plt
,
251 if (scn_plt_rel
== NULL
)
257 if (shdr_rel_plt
.sh_link
!= dynsym_idx
)
260 if (elf_section_by_name(elf
, &ehdr
, &shdr_plt
, ".plt", NULL
) == NULL
)
264 * Fetch the relocation section to find the idxes to the GOT
265 * and the symbols in the .dynsym they refer to.
267 reldata
= elf_getdata(scn_plt_rel
, NULL
);
271 syms
= elf_getdata(scn_dynsym
, NULL
);
275 scn_symstrs
= elf_getscn(elf
, shdr_dynsym
.sh_link
);
276 if (scn_symstrs
== NULL
)
279 symstrs
= elf_getdata(scn_symstrs
, NULL
);
283 if (symstrs
->d_size
== 0)
286 nr_rel_entries
= shdr_rel_plt
.sh_size
/ shdr_rel_plt
.sh_entsize
;
287 plt_offset
= shdr_plt
.sh_offset
;
289 if (shdr_rel_plt
.sh_type
== SHT_RELA
) {
290 GElf_Rela pos_mem
, *pos
;
292 elf_section__for_each_rela(reldata
, pos
, pos_mem
, idx
,
294 symidx
= GELF_R_SYM(pos
->r_info
);
295 plt_offset
+= shdr_plt
.sh_entsize
;
296 gelf_getsym(syms
, symidx
, &sym
);
297 snprintf(sympltname
, sizeof(sympltname
),
298 "%s@plt", elf_sym__name(&sym
, symstrs
));
300 f
= symbol__new(plt_offset
, shdr_plt
.sh_entsize
,
301 STB_GLOBAL
, sympltname
);
305 if (filter
&& filter(map
, f
))
308 symbols__insert(&dso
->symbols
[map
->type
], f
);
312 } else if (shdr_rel_plt
.sh_type
== SHT_REL
) {
313 GElf_Rel pos_mem
, *pos
;
314 elf_section__for_each_rel(reldata
, pos
, pos_mem
, idx
,
316 symidx
= GELF_R_SYM(pos
->r_info
);
317 plt_offset
+= shdr_plt
.sh_entsize
;
318 gelf_getsym(syms
, symidx
, &sym
);
319 snprintf(sympltname
, sizeof(sympltname
),
320 "%s@plt", elf_sym__name(&sym
, symstrs
));
322 f
= symbol__new(plt_offset
, shdr_plt
.sh_entsize
,
323 STB_GLOBAL
, sympltname
);
327 if (filter
&& filter(map
, f
))
330 symbols__insert(&dso
->symbols
[map
->type
], f
);
340 pr_debug("%s: problems reading %s PLT info.\n",
341 __func__
, dso
->long_name
);
346 * Align offset to 4 bytes as needed for note name and descriptor data.
348 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
350 static int elf_read_build_id(Elf
*elf
, void *bf
, size_t size
)
360 if (size
< BUILD_ID_SIZE
)
367 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
368 pr_err("%s: cannot get elf header.\n", __func__
);
373 * Check following sections for notes:
374 * '.note.gnu.build-id'
376 * '.note' (VDSO specific)
379 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
380 ".note.gnu.build-id", NULL
);
384 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
389 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
398 data
= elf_getdata(sec
, NULL
);
403 while (ptr
< (data
->d_buf
+ data
->d_size
)) {
404 GElf_Nhdr
*nhdr
= ptr
;
405 size_t namesz
= NOTE_ALIGN(nhdr
->n_namesz
),
406 descsz
= NOTE_ALIGN(nhdr
->n_descsz
);
409 ptr
+= sizeof(*nhdr
);
412 if (nhdr
->n_type
== NT_GNU_BUILD_ID
&&
413 nhdr
->n_namesz
== sizeof("GNU")) {
414 if (memcmp(name
, "GNU", sizeof("GNU")) == 0) {
415 size_t sz
= min(size
, descsz
);
417 memset(bf
+ sz
, 0, size
- sz
);
429 int filename__read_build_id(const char *filename
, void *bf
, size_t size
)
434 if (size
< BUILD_ID_SIZE
)
437 fd
= open(filename
, O_RDONLY
);
441 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
443 pr_debug2("%s: cannot read %s ELF file.\n", __func__
, filename
);
447 err
= elf_read_build_id(elf
, bf
, size
);
456 int sysfs__read_build_id(const char *filename
, void *build_id
, size_t size
)
460 if (size
< BUILD_ID_SIZE
)
463 fd
= open(filename
, O_RDONLY
);
470 size_t namesz
, descsz
;
472 if (read(fd
, &nhdr
, sizeof(nhdr
)) != sizeof(nhdr
))
475 namesz
= NOTE_ALIGN(nhdr
.n_namesz
);
476 descsz
= NOTE_ALIGN(nhdr
.n_descsz
);
477 if (nhdr
.n_type
== NT_GNU_BUILD_ID
&&
478 nhdr
.n_namesz
== sizeof("GNU")) {
479 if (read(fd
, bf
, namesz
) != (ssize_t
)namesz
)
481 if (memcmp(bf
, "GNU", sizeof("GNU")) == 0) {
482 size_t sz
= min(descsz
, size
);
483 if (read(fd
, build_id
, sz
) == (ssize_t
)sz
) {
484 memset(build_id
+ sz
, 0, size
- sz
);
488 } else if (read(fd
, bf
, descsz
) != (ssize_t
)descsz
)
491 int n
= namesz
+ descsz
;
492 if (read(fd
, bf
, n
) != n
)
501 int filename__read_debuglink(const char *filename
, char *debuglink
,
512 fd
= open(filename
, O_RDONLY
);
516 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
518 pr_debug2("%s: cannot read %s ELF file.\n", __func__
, filename
);
526 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
527 pr_err("%s: cannot get elf header.\n", __func__
);
531 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
532 ".gnu_debuglink", NULL
);
536 data
= elf_getdata(sec
, NULL
);
540 /* the start of this section is a zero-terminated string */
541 strncpy(debuglink
, data
->d_buf
, size
);
553 static int dso__swap_init(struct dso
*dso
, unsigned char eidata
)
555 static unsigned int const endian
= 1;
557 dso
->needs_swap
= DSO_SWAP__NO
;
561 /* We are big endian, DSO is little endian. */
562 if (*(unsigned char const *)&endian
!= 1)
563 dso
->needs_swap
= DSO_SWAP__YES
;
567 /* We are little endian, DSO is big endian. */
568 if (*(unsigned char const *)&endian
!= 0)
569 dso
->needs_swap
= DSO_SWAP__YES
;
573 pr_err("unrecognized DSO data encoding %d\n", eidata
);
580 static int decompress_kmodule(struct dso
*dso
, const char *name
,
581 enum dso_binary_type type
)
584 char tmpbuf
[] = "/tmp/perf-kmod-XXXXXX";
587 if (type
!= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
&&
588 type
!= DSO_BINARY_TYPE__GUEST_KMODULE_COMP
&&
589 type
!= DSO_BINARY_TYPE__BUILD_ID_CACHE
)
592 if (type
== DSO_BINARY_TYPE__BUILD_ID_CACHE
)
593 name
= dso
->long_name
;
595 if (kmod_path__parse_ext(&m
, name
) || !m
.comp
)
598 fd
= mkstemp(tmpbuf
);
600 dso
->load_errno
= errno
;
604 if (!decompress_to_file(m
.ext
, name
, fd
)) {
605 dso
->load_errno
= DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE
;
617 bool symsrc__possibly_runtime(struct symsrc
*ss
)
619 return ss
->dynsym
|| ss
->opdsec
;
622 bool symsrc__has_symtab(struct symsrc
*ss
)
624 return ss
->symtab
!= NULL
;
627 void symsrc__destroy(struct symsrc
*ss
)
634 bool __weak
elf__needs_adjust_symbols(GElf_Ehdr ehdr
)
636 return ehdr
.e_type
== ET_EXEC
|| ehdr
.e_type
== ET_REL
;
639 int symsrc__init(struct symsrc
*ss
, struct dso
*dso
, const char *name
,
640 enum dso_binary_type type
)
647 if (dso__needs_decompress(dso
)) {
648 fd
= decompress_kmodule(dso
, name
, type
);
652 fd
= open(name
, O_RDONLY
);
654 dso
->load_errno
= errno
;
659 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
661 pr_debug("%s: cannot read %s ELF file.\n", __func__
, name
);
662 dso
->load_errno
= DSO_LOAD_ERRNO__INVALID_ELF
;
666 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
667 dso
->load_errno
= DSO_LOAD_ERRNO__INVALID_ELF
;
668 pr_debug("%s: cannot get elf header.\n", __func__
);
672 if (dso__swap_init(dso
, ehdr
.e_ident
[EI_DATA
])) {
673 dso
->load_errno
= DSO_LOAD_ERRNO__INTERNAL_ERROR
;
677 /* Always reject images with a mismatched build-id: */
678 if (dso
->has_build_id
) {
679 u8 build_id
[BUILD_ID_SIZE
];
681 if (elf_read_build_id(elf
, build_id
, BUILD_ID_SIZE
) < 0) {
682 dso
->load_errno
= DSO_LOAD_ERRNO__CANNOT_READ_BUILDID
;
686 if (!dso__build_id_equal(dso
, build_id
)) {
687 pr_debug("%s: build id mismatch for %s.\n", __func__
, name
);
688 dso
->load_errno
= DSO_LOAD_ERRNO__MISMATCHING_BUILDID
;
693 ss
->is_64_bit
= (gelf_getclass(elf
) == ELFCLASS64
);
695 ss
->symtab
= elf_section_by_name(elf
, &ehdr
, &ss
->symshdr
, ".symtab",
697 if (ss
->symshdr
.sh_type
!= SHT_SYMTAB
)
701 ss
->dynsym
= elf_section_by_name(elf
, &ehdr
, &ss
->dynshdr
, ".dynsym",
703 if (ss
->dynshdr
.sh_type
!= SHT_DYNSYM
)
707 ss
->opdsec
= elf_section_by_name(elf
, &ehdr
, &ss
->opdshdr
, ".opd",
709 if (ss
->opdshdr
.sh_type
!= SHT_PROGBITS
)
712 if (dso
->kernel
== DSO_TYPE_USER
) {
714 ss
->adjust_symbols
= (ehdr
.e_type
== ET_EXEC
||
715 ehdr
.e_type
== ET_REL
||
717 elf_section_by_name(elf
, &ehdr
, &shdr
,
721 ss
->adjust_symbols
= elf__needs_adjust_symbols(ehdr
);
724 ss
->name
= strdup(name
);
726 dso
->load_errno
= errno
;
745 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
746 * @kmap: kernel maps and relocation reference symbol
748 * This function returns %true if we are dealing with the kernel maps and the
749 * relocation reference symbol has not yet been found. Otherwise %false is
752 static bool ref_reloc_sym_not_found(struct kmap
*kmap
)
754 return kmap
&& kmap
->ref_reloc_sym
&& kmap
->ref_reloc_sym
->name
&&
755 !kmap
->ref_reloc_sym
->unrelocated_addr
;
759 * ref_reloc - kernel relocation offset.
760 * @kmap: kernel maps and relocation reference symbol
762 * This function returns the offset of kernel addresses as determined by using
763 * the relocation reference symbol i.e. if the kernel has not been relocated
764 * then the return value is zero.
766 static u64
ref_reloc(struct kmap
*kmap
)
768 if (kmap
&& kmap
->ref_reloc_sym
&&
769 kmap
->ref_reloc_sym
->unrelocated_addr
)
770 return kmap
->ref_reloc_sym
->addr
-
771 kmap
->ref_reloc_sym
->unrelocated_addr
;
775 static bool want_demangle(bool is_kernel_sym
)
777 return is_kernel_sym
? symbol_conf
.demangle_kernel
: symbol_conf
.demangle
;
780 void __weak
arch__elf_sym_adjust(GElf_Sym
*sym __maybe_unused
) { }
782 int dso__load_sym(struct dso
*dso
, struct map
*map
,
783 struct symsrc
*syms_ss
, struct symsrc
*runtime_ss
,
784 symbol_filter_t filter
, int kmodule
)
786 struct kmap
*kmap
= dso
->kernel
? map__kmap(map
) : NULL
;
787 struct map_groups
*kmaps
= kmap
? map__kmaps(map
) : NULL
;
788 struct map
*curr_map
= map
;
789 struct dso
*curr_dso
= dso
;
790 Elf_Data
*symstrs
, *secstrs
;
797 Elf_Data
*syms
, *opddata
= NULL
;
799 Elf_Scn
*sec
, *sec_strndx
;
802 bool remap_kernel
= false, adjust_kernel_syms
= false;
807 dso
->symtab_type
= syms_ss
->type
;
808 dso
->is_64_bit
= syms_ss
->is_64_bit
;
809 dso
->rel
= syms_ss
->ehdr
.e_type
== ET_REL
;
812 * Modules may already have symbols from kallsyms, but those symbols
813 * have the wrong values for the dso maps, so remove them.
815 if (kmodule
&& syms_ss
->symtab
)
816 symbols__delete(&dso
->symbols
[map
->type
]);
818 if (!syms_ss
->symtab
) {
820 * If the vmlinux is stripped, fail so we will fall back
821 * to using kallsyms. The vmlinux runtime symbols aren't
827 syms_ss
->symtab
= syms_ss
->dynsym
;
828 syms_ss
->symshdr
= syms_ss
->dynshdr
;
832 ehdr
= syms_ss
->ehdr
;
833 sec
= syms_ss
->symtab
;
834 shdr
= syms_ss
->symshdr
;
836 if (elf_section_by_name(elf
, &ehdr
, &tshdr
, ".text", NULL
))
837 dso
->text_offset
= tshdr
.sh_addr
- tshdr
.sh_offset
;
839 if (runtime_ss
->opdsec
)
840 opddata
= elf_rawdata(runtime_ss
->opdsec
, NULL
);
842 syms
= elf_getdata(sec
, NULL
);
846 sec
= elf_getscn(elf
, shdr
.sh_link
);
850 symstrs
= elf_getdata(sec
, NULL
);
854 sec_strndx
= elf_getscn(runtime_ss
->elf
, runtime_ss
->ehdr
.e_shstrndx
);
855 if (sec_strndx
== NULL
)
858 secstrs
= elf_getdata(sec_strndx
, NULL
);
862 nr_syms
= shdr
.sh_size
/ shdr
.sh_entsize
;
864 memset(&sym
, 0, sizeof(sym
));
867 * The kernel relocation symbol is needed in advance in order to adjust
868 * kernel maps correctly.
870 if (ref_reloc_sym_not_found(kmap
)) {
871 elf_symtab__for_each_symbol(syms
, nr_syms
, idx
, sym
) {
872 const char *elf_name
= elf_sym__name(&sym
, symstrs
);
874 if (strcmp(elf_name
, kmap
->ref_reloc_sym
->name
))
876 kmap
->ref_reloc_sym
->unrelocated_addr
= sym
.st_value
;
877 map
->reloc
= kmap
->ref_reloc_sym
->addr
-
878 kmap
->ref_reloc_sym
->unrelocated_addr
;
884 * Handle any relocation of vdso necessary because older kernels
885 * attempted to prelink vdso to its virtual address.
887 if (dso__is_vdso(dso
))
888 map
->reloc
= map
->start
- dso
->text_offset
;
890 dso
->adjust_symbols
= runtime_ss
->adjust_symbols
|| ref_reloc(kmap
);
892 * Initial kernel and module mappings do not map to the dso. For
893 * function mappings, flag the fixups.
895 if (map
->type
== MAP__FUNCTION
&& (dso
->kernel
|| kmodule
)) {
897 adjust_kernel_syms
= dso
->adjust_symbols
;
899 elf_symtab__for_each_symbol(syms
, nr_syms
, idx
, sym
) {
901 const char *elf_name
= elf_sym__name(&sym
, symstrs
);
902 char *demangled
= NULL
;
903 int is_label
= elf_sym__is_label(&sym
);
904 const char *section_name
;
905 bool used_opd
= false;
907 if (!is_label
&& !elf_sym__is_a(&sym
, map
->type
))
910 /* Reject ARM ELF "mapping symbols": these aren't unique and
911 * don't identify functions, so will confuse the profile
913 if (ehdr
.e_machine
== EM_ARM
|| ehdr
.e_machine
== EM_AARCH64
) {
914 if (elf_name
[0] == '$' && strchr("adtx", elf_name
[1])
915 && (elf_name
[2] == '\0' || elf_name
[2] == '.'))
919 if (runtime_ss
->opdsec
&& sym
.st_shndx
== runtime_ss
->opdidx
) {
920 u32 offset
= sym
.st_value
- syms_ss
->opdshdr
.sh_addr
;
921 u64
*opd
= opddata
->d_buf
+ offset
;
922 sym
.st_value
= DSO__SWAP(dso
, u64
, *opd
);
923 sym
.st_shndx
= elf_addr_to_index(runtime_ss
->elf
,
928 * When loading symbols in a data mapping, ABS symbols (which
929 * has a value of SHN_ABS in its st_shndx) failed at
930 * elf_getscn(). And it marks the loading as a failure so
931 * already loaded symbols cannot be fixed up.
933 * I'm not sure what should be done. Just ignore them for now.
936 if (sym
.st_shndx
== SHN_ABS
)
939 sec
= elf_getscn(runtime_ss
->elf
, sym
.st_shndx
);
943 gelf_getshdr(sec
, &shdr
);
945 if (is_label
&& !elf_sec__is_a(&shdr
, secstrs
, map
->type
))
948 section_name
= elf_sec__name(&shdr
, secstrs
);
950 /* On ARM, symbols for thumb functions have 1 added to
951 * the symbol address as a flag - remove it */
952 if ((ehdr
.e_machine
== EM_ARM
) &&
953 (map
->type
== MAP__FUNCTION
) &&
957 arch__elf_sym_adjust(&sym
);
959 if (dso
->kernel
|| kmodule
) {
960 char dso_name
[PATH_MAX
];
962 /* Adjust symbol to map to file offset */
963 if (adjust_kernel_syms
)
964 sym
.st_value
-= shdr
.sh_addr
- shdr
.sh_offset
;
966 if (strcmp(section_name
,
967 (curr_dso
->short_name
+
968 dso
->short_name_len
)) == 0)
971 if (strcmp(section_name
, ".text") == 0) {
973 * The initial kernel mapping is based on
974 * kallsyms and identity maps. Overwrite it to
975 * map to the kernel dso.
977 if (remap_kernel
&& dso
->kernel
) {
978 remap_kernel
= false;
979 map
->start
= shdr
.sh_addr
+
981 map
->end
= map
->start
+ shdr
.sh_size
;
982 map
->pgoff
= shdr
.sh_offset
;
983 map
->map_ip
= map__map_ip
;
984 map
->unmap_ip
= map__unmap_ip
;
985 /* Ensure maps are correctly ordered */
988 map_groups__remove(kmaps
, map
);
989 map_groups__insert(kmaps
, map
);
995 * The initial module mapping is based on
996 * /proc/modules mapped to offset zero.
997 * Overwrite it to map to the module dso.
999 if (remap_kernel
&& kmodule
) {
1000 remap_kernel
= false;
1001 map
->pgoff
= shdr
.sh_offset
;
1012 snprintf(dso_name
, sizeof(dso_name
),
1013 "%s%s", dso
->short_name
, section_name
);
1015 curr_map
= map_groups__find_by_name(kmaps
, map
->type
, dso_name
);
1016 if (curr_map
== NULL
) {
1017 u64 start
= sym
.st_value
;
1020 start
+= map
->start
+ shdr
.sh_offset
;
1022 curr_dso
= dso__new(dso_name
);
1023 if (curr_dso
== NULL
)
1025 curr_dso
->kernel
= dso
->kernel
;
1026 curr_dso
->long_name
= dso
->long_name
;
1027 curr_dso
->long_name_len
= dso
->long_name_len
;
1028 curr_map
= map__new2(start
, curr_dso
,
1031 if (curr_map
== NULL
) {
1034 if (adjust_kernel_syms
) {
1035 curr_map
->start
= shdr
.sh_addr
+
1037 curr_map
->end
= curr_map
->start
+
1039 curr_map
->pgoff
= shdr
.sh_offset
;
1041 curr_map
->map_ip
= identity__map_ip
;
1042 curr_map
->unmap_ip
= identity__map_ip
;
1044 curr_dso
->symtab_type
= dso
->symtab_type
;
1045 map_groups__insert(kmaps
, curr_map
);
1047 * Add it before we drop the referece to curr_map,
1048 * i.e. while we still are sure to have a reference
1049 * to this DSO via curr_map->dso.
1051 dsos__add(&map
->groups
->machine
->dsos
, curr_dso
);
1052 /* kmaps already got it */
1054 dso__set_loaded(curr_dso
, map
->type
);
1056 curr_dso
= curr_map
->dso
;
1061 if ((used_opd
&& runtime_ss
->adjust_symbols
)
1062 || (!used_opd
&& syms_ss
->adjust_symbols
)) {
1063 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64
" "
1064 "sh_addr: %#" PRIx64
" sh_offset: %#" PRIx64
"\n", __func__
,
1065 (u64
)sym
.st_value
, (u64
)shdr
.sh_addr
,
1066 (u64
)shdr
.sh_offset
);
1067 sym
.st_value
-= shdr
.sh_addr
- shdr
.sh_offset
;
1071 * We need to figure out if the object was created from C++ sources
1072 * DWARF DW_compile_unit has this, but we don't always have access
1075 if (want_demangle(dso
->kernel
|| kmodule
)) {
1076 int demangle_flags
= DMGL_NO_OPTS
;
1078 demangle_flags
= DMGL_PARAMS
| DMGL_ANSI
;
1080 demangled
= bfd_demangle(NULL
, elf_name
, demangle_flags
);
1081 if (demangled
== NULL
)
1082 demangled
= java_demangle_sym(elf_name
, JAVA_DEMANGLE_NORET
);
1083 if (demangled
!= NULL
)
1084 elf_name
= demangled
;
1086 f
= symbol__new(sym
.st_value
, sym
.st_size
,
1087 GELF_ST_BIND(sym
.st_info
), elf_name
);
1092 if (filter
&& filter(curr_map
, f
))
1095 symbols__insert(&curr_dso
->symbols
[curr_map
->type
], f
);
1101 * For misannotated, zeroed, ASM function sizes.
1104 if (!symbol_conf
.allow_aliases
)
1105 symbols__fixup_duplicate(&dso
->symbols
[map
->type
]);
1106 symbols__fixup_end(&dso
->symbols
[map
->type
]);
1109 * We need to fixup this here too because we create new
1110 * maps here, for things like vsyscall sections.
1112 __map_groups__fixup_end(kmaps
, map
->type
);
1120 static int elf_read_maps(Elf
*elf
, bool exe
, mapfn_t mapfn
, void *data
)
1127 if (elf_getphdrnum(elf
, &phdrnum
))
1130 for (i
= 0; i
< phdrnum
; i
++) {
1131 if (gelf_getphdr(elf
, i
, &phdr
) == NULL
)
1133 if (phdr
.p_type
!= PT_LOAD
)
1136 if (!(phdr
.p_flags
& PF_X
))
1139 if (!(phdr
.p_flags
& PF_R
))
1142 sz
= min(phdr
.p_memsz
, phdr
.p_filesz
);
1145 err
= mapfn(phdr
.p_vaddr
, sz
, phdr
.p_offset
, data
);
1152 int file__read_maps(int fd
, bool exe
, mapfn_t mapfn
, void *data
,
1158 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
1163 *is_64_bit
= (gelf_getclass(elf
) == ELFCLASS64
);
1165 err
= elf_read_maps(elf
, exe
, mapfn
, data
);
1171 enum dso_type
dso__type_fd(int fd
)
1173 enum dso_type dso_type
= DSO__TYPE_UNKNOWN
;
1178 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
1183 if (ek
!= ELF_K_ELF
)
1186 if (gelf_getclass(elf
) == ELFCLASS64
) {
1187 dso_type
= DSO__TYPE_64BIT
;
1191 if (gelf_getehdr(elf
, &ehdr
) == NULL
)
1194 if (ehdr
.e_machine
== EM_X86_64
)
1195 dso_type
= DSO__TYPE_X32BIT
;
1197 dso_type
= DSO__TYPE_32BIT
;
1204 static int copy_bytes(int from
, off_t from_offs
, int to
, off_t to_offs
, u64 len
)
1209 char *buf
= malloc(page_size
);
1214 if (lseek(to
, to_offs
, SEEK_SET
) != to_offs
)
1217 if (lseek(from
, from_offs
, SEEK_SET
) != from_offs
)
1224 /* Use read because mmap won't work on proc files */
1225 r
= read(from
, buf
, n
);
1231 r
= write(to
, buf
, n
);
1252 static int kcore__open(struct kcore
*kcore
, const char *filename
)
1256 kcore
->fd
= open(filename
, O_RDONLY
);
1257 if (kcore
->fd
== -1)
1260 kcore
->elf
= elf_begin(kcore
->fd
, ELF_C_READ
, NULL
);
1264 kcore
->elfclass
= gelf_getclass(kcore
->elf
);
1265 if (kcore
->elfclass
== ELFCLASSNONE
)
1268 ehdr
= gelf_getehdr(kcore
->elf
, &kcore
->ehdr
);
1275 elf_end(kcore
->elf
);
1281 static int kcore__init(struct kcore
*kcore
, char *filename
, int elfclass
,
1284 kcore
->elfclass
= elfclass
;
1287 kcore
->fd
= mkstemp(filename
);
1289 kcore
->fd
= open(filename
, O_WRONLY
| O_CREAT
| O_EXCL
, 0400);
1290 if (kcore
->fd
== -1)
1293 kcore
->elf
= elf_begin(kcore
->fd
, ELF_C_WRITE
, NULL
);
1297 if (!gelf_newehdr(kcore
->elf
, elfclass
))
1300 memset(&kcore
->ehdr
, 0, sizeof(GElf_Ehdr
));
1305 elf_end(kcore
->elf
);
1312 static void kcore__close(struct kcore
*kcore
)
1314 elf_end(kcore
->elf
);
1318 static int kcore__copy_hdr(struct kcore
*from
, struct kcore
*to
, size_t count
)
1320 GElf_Ehdr
*ehdr
= &to
->ehdr
;
1321 GElf_Ehdr
*kehdr
= &from
->ehdr
;
1323 memcpy(ehdr
->e_ident
, kehdr
->e_ident
, EI_NIDENT
);
1324 ehdr
->e_type
= kehdr
->e_type
;
1325 ehdr
->e_machine
= kehdr
->e_machine
;
1326 ehdr
->e_version
= kehdr
->e_version
;
1329 ehdr
->e_flags
= kehdr
->e_flags
;
1330 ehdr
->e_phnum
= count
;
1331 ehdr
->e_shentsize
= 0;
1333 ehdr
->e_shstrndx
= 0;
1335 if (from
->elfclass
== ELFCLASS32
) {
1336 ehdr
->e_phoff
= sizeof(Elf32_Ehdr
);
1337 ehdr
->e_ehsize
= sizeof(Elf32_Ehdr
);
1338 ehdr
->e_phentsize
= sizeof(Elf32_Phdr
);
1340 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
1341 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
1342 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
1345 if (!gelf_update_ehdr(to
->elf
, ehdr
))
1348 if (!gelf_newphdr(to
->elf
, count
))
1354 static int kcore__add_phdr(struct kcore
*kcore
, int idx
, off_t offset
,
1359 .p_flags
= PF_R
| PF_W
| PF_X
,
1365 .p_align
= page_size
,
1368 if (!gelf_update_phdr(kcore
->elf
, idx
, &phdr
))
1374 static off_t
kcore__write(struct kcore
*kcore
)
1376 return elf_update(kcore
->elf
, ELF_C_WRITE
);
1385 struct kcore_copy_info
{
1391 u64 last_module_symbol
;
1392 struct phdr_data kernel_map
;
1393 struct phdr_data modules_map
;
1396 static int kcore_copy__process_kallsyms(void *arg
, const char *name
, char type
,
1399 struct kcore_copy_info
*kci
= arg
;
1401 if (!symbol_type__is_a(type
, MAP__FUNCTION
))
1404 if (strchr(name
, '[')) {
1405 if (start
> kci
->last_module_symbol
)
1406 kci
->last_module_symbol
= start
;
1410 if (!kci
->first_symbol
|| start
< kci
->first_symbol
)
1411 kci
->first_symbol
= start
;
1413 if (!kci
->last_symbol
|| start
> kci
->last_symbol
)
1414 kci
->last_symbol
= start
;
1416 if (!strcmp(name
, "_stext")) {
1421 if (!strcmp(name
, "_etext")) {
1429 static int kcore_copy__parse_kallsyms(struct kcore_copy_info
*kci
,
1432 char kallsyms_filename
[PATH_MAX
];
1434 scnprintf(kallsyms_filename
, PATH_MAX
, "%s/kallsyms", dir
);
1436 if (symbol__restricted_filename(kallsyms_filename
, "/proc/kallsyms"))
1439 if (kallsyms__parse(kallsyms_filename
, kci
,
1440 kcore_copy__process_kallsyms
) < 0)
1446 static int kcore_copy__process_modules(void *arg
,
1447 const char *name __maybe_unused
,
1450 struct kcore_copy_info
*kci
= arg
;
1452 if (!kci
->first_module
|| start
< kci
->first_module
)
1453 kci
->first_module
= start
;
1458 static int kcore_copy__parse_modules(struct kcore_copy_info
*kci
,
1461 char modules_filename
[PATH_MAX
];
1463 scnprintf(modules_filename
, PATH_MAX
, "%s/modules", dir
);
1465 if (symbol__restricted_filename(modules_filename
, "/proc/modules"))
1468 if (modules__parse(modules_filename
, kci
,
1469 kcore_copy__process_modules
) < 0)
1475 static void kcore_copy__map(struct phdr_data
*p
, u64 start
, u64 end
, u64 pgoff
,
1478 if (p
->addr
|| s
< start
|| s
>= end
)
1482 p
->offset
= (s
- start
) + pgoff
;
1483 p
->len
= e
< end
? e
- s
: end
- s
;
1486 static int kcore_copy__read_map(u64 start
, u64 len
, u64 pgoff
, void *data
)
1488 struct kcore_copy_info
*kci
= data
;
1489 u64 end
= start
+ len
;
1491 kcore_copy__map(&kci
->kernel_map
, start
, end
, pgoff
, kci
->stext
,
1494 kcore_copy__map(&kci
->modules_map
, start
, end
, pgoff
, kci
->first_module
,
1495 kci
->last_module_symbol
);
1500 static int kcore_copy__read_maps(struct kcore_copy_info
*kci
, Elf
*elf
)
1502 if (elf_read_maps(elf
, true, kcore_copy__read_map
, kci
) < 0)
1508 static int kcore_copy__calc_maps(struct kcore_copy_info
*kci
, const char *dir
,
1511 if (kcore_copy__parse_kallsyms(kci
, dir
))
1514 if (kcore_copy__parse_modules(kci
, dir
))
1518 kci
->stext
= round_down(kci
->stext
, page_size
);
1520 kci
->stext
= round_down(kci
->first_symbol
, page_size
);
1523 kci
->etext
= round_up(kci
->etext
, page_size
);
1524 } else if (kci
->last_symbol
) {
1525 kci
->etext
= round_up(kci
->last_symbol
, page_size
);
1526 kci
->etext
+= page_size
;
1529 kci
->first_module
= round_down(kci
->first_module
, page_size
);
1531 if (kci
->last_module_symbol
) {
1532 kci
->last_module_symbol
= round_up(kci
->last_module_symbol
,
1534 kci
->last_module_symbol
+= page_size
;
1537 if (!kci
->stext
|| !kci
->etext
)
1540 if (kci
->first_module
&& !kci
->last_module_symbol
)
1543 return kcore_copy__read_maps(kci
, elf
);
1546 static int kcore_copy__copy_file(const char *from_dir
, const char *to_dir
,
1549 char from_filename
[PATH_MAX
];
1550 char to_filename
[PATH_MAX
];
1552 scnprintf(from_filename
, PATH_MAX
, "%s/%s", from_dir
, name
);
1553 scnprintf(to_filename
, PATH_MAX
, "%s/%s", to_dir
, name
);
1555 return copyfile_mode(from_filename
, to_filename
, 0400);
1558 static int kcore_copy__unlink(const char *dir
, const char *name
)
1560 char filename
[PATH_MAX
];
1562 scnprintf(filename
, PATH_MAX
, "%s/%s", dir
, name
);
1564 return unlink(filename
);
1567 static int kcore_copy__compare_fds(int from
, int to
)
1575 buf_from
= malloc(page_size
);
1576 buf_to
= malloc(page_size
);
1577 if (!buf_from
|| !buf_to
)
1581 /* Use read because mmap won't work on proc files */
1582 ret
= read(from
, buf_from
, page_size
);
1591 if (readn(to
, buf_to
, len
) != (int)len
)
1594 if (memcmp(buf_from
, buf_to
, len
))
1605 static int kcore_copy__compare_files(const char *from_filename
,
1606 const char *to_filename
)
1608 int from
, to
, err
= -1;
1610 from
= open(from_filename
, O_RDONLY
);
1614 to
= open(to_filename
, O_RDONLY
);
1616 goto out_close_from
;
1618 err
= kcore_copy__compare_fds(from
, to
);
1626 static int kcore_copy__compare_file(const char *from_dir
, const char *to_dir
,
1629 char from_filename
[PATH_MAX
];
1630 char to_filename
[PATH_MAX
];
1632 scnprintf(from_filename
, PATH_MAX
, "%s/%s", from_dir
, name
);
1633 scnprintf(to_filename
, PATH_MAX
, "%s/%s", to_dir
, name
);
1635 return kcore_copy__compare_files(from_filename
, to_filename
);
1639 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1640 * @from_dir: from directory
1641 * @to_dir: to directory
1643 * This function copies kallsyms, modules and kcore files from one directory to
1644 * another. kallsyms and modules are copied entirely. Only code segments are
1645 * copied from kcore. It is assumed that two segments suffice: one for the
1646 * kernel proper and one for all the modules. The code segments are determined
1647 * from kallsyms and modules files. The kernel map starts at _stext or the
1648 * lowest function symbol, and ends at _etext or the highest function symbol.
1649 * The module map starts at the lowest module address and ends at the highest
1650 * module symbol. Start addresses are rounded down to the nearest page. End
1651 * addresses are rounded up to the nearest page. An extra page is added to the
1652 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1653 * symbol too. Because it contains only code sections, the resulting kcore is
1654 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1655 * is not the same for the kernel map and the modules map. That happens because
1656 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1657 * kallsyms and modules files are compared with their copies to check that
1658 * modules have not been loaded or unloaded while the copies were taking place.
1660 * Return: %0 on success, %-1 on failure.
1662 int kcore_copy(const char *from_dir
, const char *to_dir
)
1665 struct kcore extract
;
1667 int idx
= 0, err
= -1;
1668 off_t offset
= page_size
, sz
, modules_offset
= 0;
1669 struct kcore_copy_info kci
= { .stext
= 0, };
1670 char kcore_filename
[PATH_MAX
];
1671 char extract_filename
[PATH_MAX
];
1673 if (kcore_copy__copy_file(from_dir
, to_dir
, "kallsyms"))
1676 if (kcore_copy__copy_file(from_dir
, to_dir
, "modules"))
1677 goto out_unlink_kallsyms
;
1679 scnprintf(kcore_filename
, PATH_MAX
, "%s/kcore", from_dir
);
1680 scnprintf(extract_filename
, PATH_MAX
, "%s/kcore", to_dir
);
1682 if (kcore__open(&kcore
, kcore_filename
))
1683 goto out_unlink_modules
;
1685 if (kcore_copy__calc_maps(&kci
, from_dir
, kcore
.elf
))
1686 goto out_kcore_close
;
1688 if (kcore__init(&extract
, extract_filename
, kcore
.elfclass
, false))
1689 goto out_kcore_close
;
1691 if (!kci
.modules_map
.addr
)
1694 if (kcore__copy_hdr(&kcore
, &extract
, count
))
1695 goto out_extract_close
;
1697 if (kcore__add_phdr(&extract
, idx
++, offset
, kci
.kernel_map
.addr
,
1698 kci
.kernel_map
.len
))
1699 goto out_extract_close
;
1701 if (kci
.modules_map
.addr
) {
1702 modules_offset
= offset
+ kci
.kernel_map
.len
;
1703 if (kcore__add_phdr(&extract
, idx
, modules_offset
,
1704 kci
.modules_map
.addr
, kci
.modules_map
.len
))
1705 goto out_extract_close
;
1708 sz
= kcore__write(&extract
);
1709 if (sz
< 0 || sz
> offset
)
1710 goto out_extract_close
;
1712 if (copy_bytes(kcore
.fd
, kci
.kernel_map
.offset
, extract
.fd
, offset
,
1713 kci
.kernel_map
.len
))
1714 goto out_extract_close
;
1716 if (modules_offset
&& copy_bytes(kcore
.fd
, kci
.modules_map
.offset
,
1717 extract
.fd
, modules_offset
,
1718 kci
.modules_map
.len
))
1719 goto out_extract_close
;
1721 if (kcore_copy__compare_file(from_dir
, to_dir
, "modules"))
1722 goto out_extract_close
;
1724 if (kcore_copy__compare_file(from_dir
, to_dir
, "kallsyms"))
1725 goto out_extract_close
;
1730 kcore__close(&extract
);
1732 unlink(extract_filename
);
1734 kcore__close(&kcore
);
1737 kcore_copy__unlink(to_dir
, "modules");
1738 out_unlink_kallsyms
:
1740 kcore_copy__unlink(to_dir
, "kallsyms");
1745 int kcore_extract__create(struct kcore_extract
*kce
)
1748 struct kcore extract
;
1750 int idx
= 0, err
= -1;
1751 off_t offset
= page_size
, sz
;
1753 if (kcore__open(&kcore
, kce
->kcore_filename
))
1756 strcpy(kce
->extract_filename
, PERF_KCORE_EXTRACT
);
1757 if (kcore__init(&extract
, kce
->extract_filename
, kcore
.elfclass
, true))
1758 goto out_kcore_close
;
1760 if (kcore__copy_hdr(&kcore
, &extract
, count
))
1761 goto out_extract_close
;
1763 if (kcore__add_phdr(&extract
, idx
, offset
, kce
->addr
, kce
->len
))
1764 goto out_extract_close
;
1766 sz
= kcore__write(&extract
);
1767 if (sz
< 0 || sz
> offset
)
1768 goto out_extract_close
;
1770 if (copy_bytes(kcore
.fd
, kce
->offs
, extract
.fd
, offset
, kce
->len
))
1771 goto out_extract_close
;
1776 kcore__close(&extract
);
1778 unlink(kce
->extract_filename
);
1780 kcore__close(&kcore
);
1785 void kcore_extract__delete(struct kcore_extract
*kce
)
1787 unlink(kce
->extract_filename
);
1790 void symbol__elf_init(void)
1792 elf_version(EV_CURRENT
);