11 #include <symbol/kallsyms.h>
15 #define EM_AARCH64 183 /* ARM 64 bit */
19 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
20 extern char *cplus_demangle(const char *, int);
22 static inline char *bfd_demangle(void __maybe_unused
*v
, const char *c
, int i
)
24 return cplus_demangle(c
, i
);
28 static inline char *bfd_demangle(void __maybe_unused
*v
,
29 const char __maybe_unused
*c
,
35 #define PACKAGE 'perf'
40 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
41 static int elf_getphdrnum(Elf
*elf
, size_t *dst
)
46 ehdr
= gelf_getehdr(elf
, &gehdr
);
56 #ifndef NT_GNU_BUILD_ID
57 #define NT_GNU_BUILD_ID 3
61 * elf_symtab__for_each_symbol - iterate thru all the symbols
63 * @syms: struct elf_symtab instance to iterate
65 * @sym: GElf_Sym iterator
67 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
68 for (idx = 0, gelf_getsym(syms, idx, &sym);\
70 idx++, gelf_getsym(syms, idx, &sym))
72 static inline uint8_t elf_sym__type(const GElf_Sym
*sym
)
74 return GELF_ST_TYPE(sym
->st_info
);
78 #define STT_GNU_IFUNC 10
81 static inline int elf_sym__is_function(const GElf_Sym
*sym
)
83 return (elf_sym__type(sym
) == STT_FUNC
||
84 elf_sym__type(sym
) == STT_GNU_IFUNC
) &&
86 sym
->st_shndx
!= SHN_UNDEF
;
89 static inline bool elf_sym__is_object(const GElf_Sym
*sym
)
91 return elf_sym__type(sym
) == STT_OBJECT
&&
93 sym
->st_shndx
!= SHN_UNDEF
;
96 static inline int elf_sym__is_label(const GElf_Sym
*sym
)
98 return elf_sym__type(sym
) == STT_NOTYPE
&&
100 sym
->st_shndx
!= SHN_UNDEF
&&
101 sym
->st_shndx
!= SHN_ABS
;
104 static bool elf_sym__is_a(GElf_Sym
*sym
, enum map_type type
)
108 return elf_sym__is_function(sym
);
110 return elf_sym__is_object(sym
);
116 static inline const char *elf_sym__name(const GElf_Sym
*sym
,
117 const Elf_Data
*symstrs
)
119 return symstrs
->d_buf
+ sym
->st_name
;
122 static inline const char *elf_sec__name(const GElf_Shdr
*shdr
,
123 const Elf_Data
*secstrs
)
125 return secstrs
->d_buf
+ shdr
->sh_name
;
128 static inline int elf_sec__is_text(const GElf_Shdr
*shdr
,
129 const Elf_Data
*secstrs
)
131 return strstr(elf_sec__name(shdr
, secstrs
), "text") != NULL
;
134 static inline bool elf_sec__is_data(const GElf_Shdr
*shdr
,
135 const Elf_Data
*secstrs
)
137 return strstr(elf_sec__name(shdr
, secstrs
), "data") != NULL
;
140 static bool elf_sec__is_a(GElf_Shdr
*shdr
, Elf_Data
*secstrs
,
145 return elf_sec__is_text(shdr
, secstrs
);
147 return elf_sec__is_data(shdr
, secstrs
);
153 static size_t elf_addr_to_index(Elf
*elf
, GElf_Addr addr
)
159 while ((sec
= elf_nextscn(elf
, sec
)) != NULL
) {
160 gelf_getshdr(sec
, &shdr
);
162 if ((addr
>= shdr
.sh_addr
) &&
163 (addr
< (shdr
.sh_addr
+ shdr
.sh_size
)))
172 Elf_Scn
*elf_section_by_name(Elf
*elf
, GElf_Ehdr
*ep
,
173 GElf_Shdr
*shp
, const char *name
, size_t *idx
)
178 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
179 if (!elf_rawdata(elf_getscn(elf
, ep
->e_shstrndx
), NULL
))
182 while ((sec
= elf_nextscn(elf
, sec
)) != NULL
) {
185 gelf_getshdr(sec
, shp
);
186 str
= elf_strptr(elf
, ep
->e_shstrndx
, shp
->sh_name
);
187 if (str
&& !strcmp(name
, str
)) {
198 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
199 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
201 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
203 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
204 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
206 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
209 * We need to check if we have a .dynsym, so that we can handle the
210 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
211 * .dynsym or .symtab).
212 * And always look at the original dso, not at debuginfo packages, that
213 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
215 int dso__synthesize_plt_symbols(struct dso
*dso
, struct symsrc
*ss
, struct map
*map
,
216 symbol_filter_t filter
)
218 uint32_t nr_rel_entries
, idx
;
223 GElf_Shdr shdr_rel_plt
, shdr_dynsym
;
224 Elf_Data
*reldata
, *syms
, *symstrs
;
225 Elf_Scn
*scn_plt_rel
, *scn_symstrs
, *scn_dynsym
;
228 char sympltname
[1024];
230 int nr
= 0, symidx
, err
= 0;
238 scn_dynsym
= ss
->dynsym
;
239 shdr_dynsym
= ss
->dynshdr
;
240 dynsym_idx
= ss
->dynsym_idx
;
242 if (scn_dynsym
== NULL
)
245 scn_plt_rel
= elf_section_by_name(elf
, &ehdr
, &shdr_rel_plt
,
247 if (scn_plt_rel
== NULL
) {
248 scn_plt_rel
= elf_section_by_name(elf
, &ehdr
, &shdr_rel_plt
,
250 if (scn_plt_rel
== NULL
)
256 if (shdr_rel_plt
.sh_link
!= dynsym_idx
)
259 if (elf_section_by_name(elf
, &ehdr
, &shdr_plt
, ".plt", NULL
) == NULL
)
263 * Fetch the relocation section to find the idxes to the GOT
264 * and the symbols in the .dynsym they refer to.
266 reldata
= elf_getdata(scn_plt_rel
, NULL
);
270 syms
= elf_getdata(scn_dynsym
, NULL
);
274 scn_symstrs
= elf_getscn(elf
, shdr_dynsym
.sh_link
);
275 if (scn_symstrs
== NULL
)
278 symstrs
= elf_getdata(scn_symstrs
, NULL
);
282 if (symstrs
->d_size
== 0)
285 nr_rel_entries
= shdr_rel_plt
.sh_size
/ shdr_rel_plt
.sh_entsize
;
286 plt_offset
= shdr_plt
.sh_offset
;
288 if (shdr_rel_plt
.sh_type
== SHT_RELA
) {
289 GElf_Rela pos_mem
, *pos
;
291 elf_section__for_each_rela(reldata
, pos
, pos_mem
, idx
,
293 symidx
= GELF_R_SYM(pos
->r_info
);
294 plt_offset
+= shdr_plt
.sh_entsize
;
295 gelf_getsym(syms
, symidx
, &sym
);
296 snprintf(sympltname
, sizeof(sympltname
),
297 "%s@plt", elf_sym__name(&sym
, symstrs
));
299 f
= symbol__new(plt_offset
, shdr_plt
.sh_entsize
,
300 STB_GLOBAL
, sympltname
);
304 if (filter
&& filter(map
, f
))
307 symbols__insert(&dso
->symbols
[map
->type
], f
);
311 } else if (shdr_rel_plt
.sh_type
== SHT_REL
) {
312 GElf_Rel pos_mem
, *pos
;
313 elf_section__for_each_rel(reldata
, pos
, pos_mem
, idx
,
315 symidx
= GELF_R_SYM(pos
->r_info
);
316 plt_offset
+= shdr_plt
.sh_entsize
;
317 gelf_getsym(syms
, symidx
, &sym
);
318 snprintf(sympltname
, sizeof(sympltname
),
319 "%s@plt", elf_sym__name(&sym
, symstrs
));
321 f
= symbol__new(plt_offset
, shdr_plt
.sh_entsize
,
322 STB_GLOBAL
, sympltname
);
326 if (filter
&& filter(map
, f
))
329 symbols__insert(&dso
->symbols
[map
->type
], f
);
339 pr_debug("%s: problems reading %s PLT info.\n",
340 __func__
, dso
->long_name
);
345 * Align offset to 4 bytes as needed for note name and descriptor data.
347 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
349 static int elf_read_build_id(Elf
*elf
, void *bf
, size_t size
)
359 if (size
< BUILD_ID_SIZE
)
366 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
367 pr_err("%s: cannot get elf header.\n", __func__
);
372 * Check following sections for notes:
373 * '.note.gnu.build-id'
375 * '.note' (VDSO specific)
378 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
379 ".note.gnu.build-id", NULL
);
383 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
388 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
397 data
= elf_getdata(sec
, NULL
);
402 while (ptr
< (data
->d_buf
+ data
->d_size
)) {
403 GElf_Nhdr
*nhdr
= ptr
;
404 size_t namesz
= NOTE_ALIGN(nhdr
->n_namesz
),
405 descsz
= NOTE_ALIGN(nhdr
->n_descsz
);
408 ptr
+= sizeof(*nhdr
);
411 if (nhdr
->n_type
== NT_GNU_BUILD_ID
&&
412 nhdr
->n_namesz
== sizeof("GNU")) {
413 if (memcmp(name
, "GNU", sizeof("GNU")) == 0) {
414 size_t sz
= min(size
, descsz
);
416 memset(bf
+ sz
, 0, size
- sz
);
428 int filename__read_build_id(const char *filename
, void *bf
, size_t size
)
433 if (size
< BUILD_ID_SIZE
)
436 fd
= open(filename
, O_RDONLY
);
440 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
442 pr_debug2("%s: cannot read %s ELF file.\n", __func__
, filename
);
446 err
= elf_read_build_id(elf
, bf
, size
);
455 int sysfs__read_build_id(const char *filename
, void *build_id
, size_t size
)
459 if (size
< BUILD_ID_SIZE
)
462 fd
= open(filename
, O_RDONLY
);
469 size_t namesz
, descsz
;
471 if (read(fd
, &nhdr
, sizeof(nhdr
)) != sizeof(nhdr
))
474 namesz
= NOTE_ALIGN(nhdr
.n_namesz
);
475 descsz
= NOTE_ALIGN(nhdr
.n_descsz
);
476 if (nhdr
.n_type
== NT_GNU_BUILD_ID
&&
477 nhdr
.n_namesz
== sizeof("GNU")) {
478 if (read(fd
, bf
, namesz
) != (ssize_t
)namesz
)
480 if (memcmp(bf
, "GNU", sizeof("GNU")) == 0) {
481 size_t sz
= min(descsz
, size
);
482 if (read(fd
, build_id
, sz
) == (ssize_t
)sz
) {
483 memset(build_id
+ sz
, 0, size
- sz
);
487 } else if (read(fd
, bf
, descsz
) != (ssize_t
)descsz
)
490 int n
= namesz
+ descsz
;
491 if (read(fd
, bf
, n
) != n
)
500 int filename__read_debuglink(const char *filename
, char *debuglink
,
511 fd
= open(filename
, O_RDONLY
);
515 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
517 pr_debug2("%s: cannot read %s ELF file.\n", __func__
, filename
);
525 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
526 pr_err("%s: cannot get elf header.\n", __func__
);
530 sec
= elf_section_by_name(elf
, &ehdr
, &shdr
,
531 ".gnu_debuglink", NULL
);
535 data
= elf_getdata(sec
, NULL
);
539 /* the start of this section is a zero-terminated string */
540 strncpy(debuglink
, data
->d_buf
, size
);
552 static int dso__swap_init(struct dso
*dso
, unsigned char eidata
)
554 static unsigned int const endian
= 1;
556 dso
->needs_swap
= DSO_SWAP__NO
;
560 /* We are big endian, DSO is little endian. */
561 if (*(unsigned char const *)&endian
!= 1)
562 dso
->needs_swap
= DSO_SWAP__YES
;
566 /* We are little endian, DSO is big endian. */
567 if (*(unsigned char const *)&endian
!= 0)
568 dso
->needs_swap
= DSO_SWAP__YES
;
572 pr_err("unrecognized DSO data encoding %d\n", eidata
);
579 static int decompress_kmodule(struct dso
*dso
, const char *name
,
580 enum dso_binary_type type
)
583 char tmpbuf
[] = "/tmp/perf-kmod-XXXXXX";
586 if (type
!= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP
&&
587 type
!= DSO_BINARY_TYPE__GUEST_KMODULE_COMP
&&
588 type
!= DSO_BINARY_TYPE__BUILD_ID_CACHE
)
591 if (type
== DSO_BINARY_TYPE__BUILD_ID_CACHE
)
592 name
= dso
->long_name
;
594 if (kmod_path__parse_ext(&m
, name
) || !m
.comp
)
597 fd
= mkstemp(tmpbuf
);
599 dso
->load_errno
= errno
;
603 if (!decompress_to_file(m
.ext
, name
, fd
)) {
604 dso
->load_errno
= DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE
;
616 bool symsrc__possibly_runtime(struct symsrc
*ss
)
618 return ss
->dynsym
|| ss
->opdsec
;
621 bool symsrc__has_symtab(struct symsrc
*ss
)
623 return ss
->symtab
!= NULL
;
626 void symsrc__destroy(struct symsrc
*ss
)
633 bool __weak
elf__needs_adjust_symbols(GElf_Ehdr ehdr
)
635 return ehdr
.e_type
== ET_EXEC
|| ehdr
.e_type
== ET_REL
;
638 int symsrc__init(struct symsrc
*ss
, struct dso
*dso
, const char *name
,
639 enum dso_binary_type type
)
646 if (dso__needs_decompress(dso
)) {
647 fd
= decompress_kmodule(dso
, name
, type
);
651 fd
= open(name
, O_RDONLY
);
653 dso
->load_errno
= errno
;
658 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
660 pr_debug("%s: cannot read %s ELF file.\n", __func__
, name
);
661 dso
->load_errno
= DSO_LOAD_ERRNO__INVALID_ELF
;
665 if (gelf_getehdr(elf
, &ehdr
) == NULL
) {
666 dso
->load_errno
= DSO_LOAD_ERRNO__INVALID_ELF
;
667 pr_debug("%s: cannot get elf header.\n", __func__
);
671 if (dso__swap_init(dso
, ehdr
.e_ident
[EI_DATA
])) {
672 dso
->load_errno
= DSO_LOAD_ERRNO__INTERNAL_ERROR
;
676 /* Always reject images with a mismatched build-id: */
677 if (dso
->has_build_id
) {
678 u8 build_id
[BUILD_ID_SIZE
];
680 if (elf_read_build_id(elf
, build_id
, BUILD_ID_SIZE
) < 0) {
681 dso
->load_errno
= DSO_LOAD_ERRNO__CANNOT_READ_BUILDID
;
685 if (!dso__build_id_equal(dso
, build_id
)) {
686 pr_debug("%s: build id mismatch for %s.\n", __func__
, name
);
687 dso
->load_errno
= DSO_LOAD_ERRNO__MISMATCHING_BUILDID
;
692 ss
->is_64_bit
= (gelf_getclass(elf
) == ELFCLASS64
);
694 ss
->symtab
= elf_section_by_name(elf
, &ehdr
, &ss
->symshdr
, ".symtab",
696 if (ss
->symshdr
.sh_type
!= SHT_SYMTAB
)
700 ss
->dynsym
= elf_section_by_name(elf
, &ehdr
, &ss
->dynshdr
, ".dynsym",
702 if (ss
->dynshdr
.sh_type
!= SHT_DYNSYM
)
706 ss
->opdsec
= elf_section_by_name(elf
, &ehdr
, &ss
->opdshdr
, ".opd",
708 if (ss
->opdshdr
.sh_type
!= SHT_PROGBITS
)
711 if (dso
->kernel
== DSO_TYPE_USER
) {
713 ss
->adjust_symbols
= (ehdr
.e_type
== ET_EXEC
||
714 ehdr
.e_type
== ET_REL
||
716 elf_section_by_name(elf
, &ehdr
, &shdr
,
720 ss
->adjust_symbols
= elf__needs_adjust_symbols(ehdr
);
723 ss
->name
= strdup(name
);
725 dso
->load_errno
= errno
;
744 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
745 * @kmap: kernel maps and relocation reference symbol
747 * This function returns %true if we are dealing with the kernel maps and the
748 * relocation reference symbol has not yet been found. Otherwise %false is
751 static bool ref_reloc_sym_not_found(struct kmap
*kmap
)
753 return kmap
&& kmap
->ref_reloc_sym
&& kmap
->ref_reloc_sym
->name
&&
754 !kmap
->ref_reloc_sym
->unrelocated_addr
;
758 * ref_reloc - kernel relocation offset.
759 * @kmap: kernel maps and relocation reference symbol
761 * This function returns the offset of kernel addresses as determined by using
762 * the relocation reference symbol i.e. if the kernel has not been relocated
763 * then the return value is zero.
765 static u64
ref_reloc(struct kmap
*kmap
)
767 if (kmap
&& kmap
->ref_reloc_sym
&&
768 kmap
->ref_reloc_sym
->unrelocated_addr
)
769 return kmap
->ref_reloc_sym
->addr
-
770 kmap
->ref_reloc_sym
->unrelocated_addr
;
774 static bool want_demangle(bool is_kernel_sym
)
776 return is_kernel_sym
? symbol_conf
.demangle_kernel
: symbol_conf
.demangle
;
779 void __weak
arch__elf_sym_adjust(GElf_Sym
*sym __maybe_unused
) { }
781 int dso__load_sym(struct dso
*dso
, struct map
*map
,
782 struct symsrc
*syms_ss
, struct symsrc
*runtime_ss
,
783 symbol_filter_t filter
, int kmodule
)
785 struct kmap
*kmap
= dso
->kernel
? map__kmap(map
) : NULL
;
786 struct map_groups
*kmaps
= kmap
? map__kmaps(map
) : NULL
;
787 struct map
*curr_map
= map
;
788 struct dso
*curr_dso
= dso
;
789 Elf_Data
*symstrs
, *secstrs
;
795 Elf_Data
*syms
, *opddata
= NULL
;
797 Elf_Scn
*sec
, *sec_strndx
;
800 bool remap_kernel
= false, adjust_kernel_syms
= false;
805 dso
->symtab_type
= syms_ss
->type
;
806 dso
->is_64_bit
= syms_ss
->is_64_bit
;
807 dso
->rel
= syms_ss
->ehdr
.e_type
== ET_REL
;
810 * Modules may already have symbols from kallsyms, but those symbols
811 * have the wrong values for the dso maps, so remove them.
813 if (kmodule
&& syms_ss
->symtab
)
814 symbols__delete(&dso
->symbols
[map
->type
]);
816 if (!syms_ss
->symtab
) {
818 * If the vmlinux is stripped, fail so we will fall back
819 * to using kallsyms. The vmlinux runtime symbols aren't
825 syms_ss
->symtab
= syms_ss
->dynsym
;
826 syms_ss
->symshdr
= syms_ss
->dynshdr
;
830 ehdr
= syms_ss
->ehdr
;
831 sec
= syms_ss
->symtab
;
832 shdr
= syms_ss
->symshdr
;
834 if (runtime_ss
->opdsec
)
835 opddata
= elf_rawdata(runtime_ss
->opdsec
, NULL
);
837 syms
= elf_getdata(sec
, NULL
);
841 sec
= elf_getscn(elf
, shdr
.sh_link
);
845 symstrs
= elf_getdata(sec
, NULL
);
849 sec_strndx
= elf_getscn(runtime_ss
->elf
, runtime_ss
->ehdr
.e_shstrndx
);
850 if (sec_strndx
== NULL
)
853 secstrs
= elf_getdata(sec_strndx
, NULL
);
857 nr_syms
= shdr
.sh_size
/ shdr
.sh_entsize
;
859 memset(&sym
, 0, sizeof(sym
));
862 * The kernel relocation symbol is needed in advance in order to adjust
863 * kernel maps correctly.
865 if (ref_reloc_sym_not_found(kmap
)) {
866 elf_symtab__for_each_symbol(syms
, nr_syms
, idx
, sym
) {
867 const char *elf_name
= elf_sym__name(&sym
, symstrs
);
869 if (strcmp(elf_name
, kmap
->ref_reloc_sym
->name
))
871 kmap
->ref_reloc_sym
->unrelocated_addr
= sym
.st_value
;
872 map
->reloc
= kmap
->ref_reloc_sym
->addr
-
873 kmap
->ref_reloc_sym
->unrelocated_addr
;
879 * Handle any relocation of vdso necessary because older kernels
880 * attempted to prelink vdso to its virtual address.
882 if (dso__is_vdso(dso
)) {
885 if (elf_section_by_name(elf
, &ehdr
, &tshdr
, ".text", NULL
))
886 map
->reloc
= map
->start
- tshdr
.sh_addr
+ tshdr
.sh_offset
;
889 dso
->adjust_symbols
= runtime_ss
->adjust_symbols
|| ref_reloc(kmap
);
891 * Initial kernel and module mappings do not map to the dso. For
892 * function mappings, flag the fixups.
894 if (map
->type
== MAP__FUNCTION
&& (dso
->kernel
|| kmodule
)) {
896 adjust_kernel_syms
= dso
->adjust_symbols
;
898 elf_symtab__for_each_symbol(syms
, nr_syms
, idx
, sym
) {
900 const char *elf_name
= elf_sym__name(&sym
, symstrs
);
901 char *demangled
= NULL
;
902 int is_label
= elf_sym__is_label(&sym
);
903 const char *section_name
;
904 bool used_opd
= false;
906 if (!is_label
&& !elf_sym__is_a(&sym
, map
->type
))
909 /* Reject ARM ELF "mapping symbols": these aren't unique and
910 * don't identify functions, so will confuse the profile
912 if (ehdr
.e_machine
== EM_ARM
|| ehdr
.e_machine
== EM_AARCH64
) {
913 if (elf_name
[0] == '$' && strchr("adtx", elf_name
[1])
914 && (elf_name
[2] == '\0' || elf_name
[2] == '.'))
918 if (runtime_ss
->opdsec
&& sym
.st_shndx
== runtime_ss
->opdidx
) {
919 u32 offset
= sym
.st_value
- syms_ss
->opdshdr
.sh_addr
;
920 u64
*opd
= opddata
->d_buf
+ offset
;
921 sym
.st_value
= DSO__SWAP(dso
, u64
, *opd
);
922 sym
.st_shndx
= elf_addr_to_index(runtime_ss
->elf
,
927 * When loading symbols in a data mapping, ABS symbols (which
928 * has a value of SHN_ABS in its st_shndx) failed at
929 * elf_getscn(). And it marks the loading as a failure so
930 * already loaded symbols cannot be fixed up.
932 * I'm not sure what should be done. Just ignore them for now.
935 if (sym
.st_shndx
== SHN_ABS
)
938 sec
= elf_getscn(runtime_ss
->elf
, sym
.st_shndx
);
942 gelf_getshdr(sec
, &shdr
);
944 if (is_label
&& !elf_sec__is_a(&shdr
, secstrs
, map
->type
))
947 section_name
= elf_sec__name(&shdr
, secstrs
);
949 /* On ARM, symbols for thumb functions have 1 added to
950 * the symbol address as a flag - remove it */
951 if ((ehdr
.e_machine
== EM_ARM
) &&
952 (map
->type
== MAP__FUNCTION
) &&
956 arch__elf_sym_adjust(&sym
);
958 if (dso
->kernel
|| kmodule
) {
959 char dso_name
[PATH_MAX
];
961 /* Adjust symbol to map to file offset */
962 if (adjust_kernel_syms
)
963 sym
.st_value
-= shdr
.sh_addr
- shdr
.sh_offset
;
965 if (strcmp(section_name
,
966 (curr_dso
->short_name
+
967 dso
->short_name_len
)) == 0)
970 if (strcmp(section_name
, ".text") == 0) {
972 * The initial kernel mapping is based on
973 * kallsyms and identity maps. Overwrite it to
974 * map to the kernel dso.
976 if (remap_kernel
&& dso
->kernel
) {
977 remap_kernel
= false;
978 map
->start
= shdr
.sh_addr
+
980 map
->end
= map
->start
+ shdr
.sh_size
;
981 map
->pgoff
= shdr
.sh_offset
;
982 map
->map_ip
= map__map_ip
;
983 map
->unmap_ip
= map__unmap_ip
;
984 /* Ensure maps are correctly ordered */
987 map_groups__remove(kmaps
, map
);
988 map_groups__insert(kmaps
, map
);
994 * The initial module mapping is based on
995 * /proc/modules mapped to offset zero.
996 * Overwrite it to map to the module dso.
998 if (remap_kernel
&& kmodule
) {
999 remap_kernel
= false;
1000 map
->pgoff
= shdr
.sh_offset
;
1011 snprintf(dso_name
, sizeof(dso_name
),
1012 "%s%s", dso
->short_name
, section_name
);
1014 curr_map
= map_groups__find_by_name(kmaps
, map
->type
, dso_name
);
1015 if (curr_map
== NULL
) {
1016 u64 start
= sym
.st_value
;
1019 start
+= map
->start
+ shdr
.sh_offset
;
1021 curr_dso
= dso__new(dso_name
);
1022 if (curr_dso
== NULL
)
1024 curr_dso
->kernel
= dso
->kernel
;
1025 curr_dso
->long_name
= dso
->long_name
;
1026 curr_dso
->long_name_len
= dso
->long_name_len
;
1027 curr_map
= map__new2(start
, curr_dso
,
1029 if (curr_map
== NULL
) {
1033 if (adjust_kernel_syms
) {
1034 curr_map
->start
= shdr
.sh_addr
+
1036 curr_map
->end
= curr_map
->start
+
1038 curr_map
->pgoff
= shdr
.sh_offset
;
1040 curr_map
->map_ip
= identity__map_ip
;
1041 curr_map
->unmap_ip
= identity__map_ip
;
1043 curr_dso
->symtab_type
= dso
->symtab_type
;
1044 map_groups__insert(kmaps
, curr_map
);
1045 dsos__add(&map
->groups
->machine
->dsos
, curr_dso
);
1046 dso__set_loaded(curr_dso
, map
->type
);
1048 curr_dso
= curr_map
->dso
;
1053 if ((used_opd
&& runtime_ss
->adjust_symbols
)
1054 || (!used_opd
&& syms_ss
->adjust_symbols
)) {
1055 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64
" "
1056 "sh_addr: %#" PRIx64
" sh_offset: %#" PRIx64
"\n", __func__
,
1057 (u64
)sym
.st_value
, (u64
)shdr
.sh_addr
,
1058 (u64
)shdr
.sh_offset
);
1059 sym
.st_value
-= shdr
.sh_addr
- shdr
.sh_offset
;
1063 * We need to figure out if the object was created from C++ sources
1064 * DWARF DW_compile_unit has this, but we don't always have access
1067 if (want_demangle(dso
->kernel
|| kmodule
)) {
1068 int demangle_flags
= DMGL_NO_OPTS
;
1070 demangle_flags
= DMGL_PARAMS
| DMGL_ANSI
;
1072 demangled
= bfd_demangle(NULL
, elf_name
, demangle_flags
);
1073 if (demangled
!= NULL
)
1074 elf_name
= demangled
;
1076 f
= symbol__new(sym
.st_value
, sym
.st_size
,
1077 GELF_ST_BIND(sym
.st_info
), elf_name
);
1082 if (filter
&& filter(curr_map
, f
))
1085 symbols__insert(&curr_dso
->symbols
[curr_map
->type
], f
);
1091 * For misannotated, zeroed, ASM function sizes.
1094 if (!symbol_conf
.allow_aliases
)
1095 symbols__fixup_duplicate(&dso
->symbols
[map
->type
]);
1096 symbols__fixup_end(&dso
->symbols
[map
->type
]);
1099 * We need to fixup this here too because we create new
1100 * maps here, for things like vsyscall sections.
1102 __map_groups__fixup_end(kmaps
, map
->type
);
1110 static int elf_read_maps(Elf
*elf
, bool exe
, mapfn_t mapfn
, void *data
)
1117 if (elf_getphdrnum(elf
, &phdrnum
))
1120 for (i
= 0; i
< phdrnum
; i
++) {
1121 if (gelf_getphdr(elf
, i
, &phdr
) == NULL
)
1123 if (phdr
.p_type
!= PT_LOAD
)
1126 if (!(phdr
.p_flags
& PF_X
))
1129 if (!(phdr
.p_flags
& PF_R
))
1132 sz
= min(phdr
.p_memsz
, phdr
.p_filesz
);
1135 err
= mapfn(phdr
.p_vaddr
, sz
, phdr
.p_offset
, data
);
1142 int file__read_maps(int fd
, bool exe
, mapfn_t mapfn
, void *data
,
1148 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
1153 *is_64_bit
= (gelf_getclass(elf
) == ELFCLASS64
);
1155 err
= elf_read_maps(elf
, exe
, mapfn
, data
);
1161 enum dso_type
dso__type_fd(int fd
)
1163 enum dso_type dso_type
= DSO__TYPE_UNKNOWN
;
1168 elf
= elf_begin(fd
, PERF_ELF_C_READ_MMAP
, NULL
);
1173 if (ek
!= ELF_K_ELF
)
1176 if (gelf_getclass(elf
) == ELFCLASS64
) {
1177 dso_type
= DSO__TYPE_64BIT
;
1181 if (gelf_getehdr(elf
, &ehdr
) == NULL
)
1184 if (ehdr
.e_machine
== EM_X86_64
)
1185 dso_type
= DSO__TYPE_X32BIT
;
1187 dso_type
= DSO__TYPE_32BIT
;
1194 static int copy_bytes(int from
, off_t from_offs
, int to
, off_t to_offs
, u64 len
)
1199 char *buf
= malloc(page_size
);
1204 if (lseek(to
, to_offs
, SEEK_SET
) != to_offs
)
1207 if (lseek(from
, from_offs
, SEEK_SET
) != from_offs
)
1214 /* Use read because mmap won't work on proc files */
1215 r
= read(from
, buf
, n
);
1221 r
= write(to
, buf
, n
);
1242 static int kcore__open(struct kcore
*kcore
, const char *filename
)
1246 kcore
->fd
= open(filename
, O_RDONLY
);
1247 if (kcore
->fd
== -1)
1250 kcore
->elf
= elf_begin(kcore
->fd
, ELF_C_READ
, NULL
);
1254 kcore
->elfclass
= gelf_getclass(kcore
->elf
);
1255 if (kcore
->elfclass
== ELFCLASSNONE
)
1258 ehdr
= gelf_getehdr(kcore
->elf
, &kcore
->ehdr
);
1265 elf_end(kcore
->elf
);
1271 static int kcore__init(struct kcore
*kcore
, char *filename
, int elfclass
,
1274 kcore
->elfclass
= elfclass
;
1277 kcore
->fd
= mkstemp(filename
);
1279 kcore
->fd
= open(filename
, O_WRONLY
| O_CREAT
| O_EXCL
, 0400);
1280 if (kcore
->fd
== -1)
1283 kcore
->elf
= elf_begin(kcore
->fd
, ELF_C_WRITE
, NULL
);
1287 if (!gelf_newehdr(kcore
->elf
, elfclass
))
1290 memset(&kcore
->ehdr
, 0, sizeof(GElf_Ehdr
));
1295 elf_end(kcore
->elf
);
1302 static void kcore__close(struct kcore
*kcore
)
1304 elf_end(kcore
->elf
);
1308 static int kcore__copy_hdr(struct kcore
*from
, struct kcore
*to
, size_t count
)
1310 GElf_Ehdr
*ehdr
= &to
->ehdr
;
1311 GElf_Ehdr
*kehdr
= &from
->ehdr
;
1313 memcpy(ehdr
->e_ident
, kehdr
->e_ident
, EI_NIDENT
);
1314 ehdr
->e_type
= kehdr
->e_type
;
1315 ehdr
->e_machine
= kehdr
->e_machine
;
1316 ehdr
->e_version
= kehdr
->e_version
;
1319 ehdr
->e_flags
= kehdr
->e_flags
;
1320 ehdr
->e_phnum
= count
;
1321 ehdr
->e_shentsize
= 0;
1323 ehdr
->e_shstrndx
= 0;
1325 if (from
->elfclass
== ELFCLASS32
) {
1326 ehdr
->e_phoff
= sizeof(Elf32_Ehdr
);
1327 ehdr
->e_ehsize
= sizeof(Elf32_Ehdr
);
1328 ehdr
->e_phentsize
= sizeof(Elf32_Phdr
);
1330 ehdr
->e_phoff
= sizeof(Elf64_Ehdr
);
1331 ehdr
->e_ehsize
= sizeof(Elf64_Ehdr
);
1332 ehdr
->e_phentsize
= sizeof(Elf64_Phdr
);
1335 if (!gelf_update_ehdr(to
->elf
, ehdr
))
1338 if (!gelf_newphdr(to
->elf
, count
))
1344 static int kcore__add_phdr(struct kcore
*kcore
, int idx
, off_t offset
,
1349 .p_flags
= PF_R
| PF_W
| PF_X
,
1355 .p_align
= page_size
,
1358 if (!gelf_update_phdr(kcore
->elf
, idx
, &phdr
))
1364 static off_t
kcore__write(struct kcore
*kcore
)
1366 return elf_update(kcore
->elf
, ELF_C_WRITE
);
1375 struct kcore_copy_info
{
1381 u64 last_module_symbol
;
1382 struct phdr_data kernel_map
;
1383 struct phdr_data modules_map
;
1386 static int kcore_copy__process_kallsyms(void *arg
, const char *name
, char type
,
1389 struct kcore_copy_info
*kci
= arg
;
1391 if (!symbol_type__is_a(type
, MAP__FUNCTION
))
1394 if (strchr(name
, '[')) {
1395 if (start
> kci
->last_module_symbol
)
1396 kci
->last_module_symbol
= start
;
1400 if (!kci
->first_symbol
|| start
< kci
->first_symbol
)
1401 kci
->first_symbol
= start
;
1403 if (!kci
->last_symbol
|| start
> kci
->last_symbol
)
1404 kci
->last_symbol
= start
;
1406 if (!strcmp(name
, "_stext")) {
1411 if (!strcmp(name
, "_etext")) {
1419 static int kcore_copy__parse_kallsyms(struct kcore_copy_info
*kci
,
1422 char kallsyms_filename
[PATH_MAX
];
1424 scnprintf(kallsyms_filename
, PATH_MAX
, "%s/kallsyms", dir
);
1426 if (symbol__restricted_filename(kallsyms_filename
, "/proc/kallsyms"))
1429 if (kallsyms__parse(kallsyms_filename
, kci
,
1430 kcore_copy__process_kallsyms
) < 0)
1436 static int kcore_copy__process_modules(void *arg
,
1437 const char *name __maybe_unused
,
1440 struct kcore_copy_info
*kci
= arg
;
1442 if (!kci
->first_module
|| start
< kci
->first_module
)
1443 kci
->first_module
= start
;
1448 static int kcore_copy__parse_modules(struct kcore_copy_info
*kci
,
1451 char modules_filename
[PATH_MAX
];
1453 scnprintf(modules_filename
, PATH_MAX
, "%s/modules", dir
);
1455 if (symbol__restricted_filename(modules_filename
, "/proc/modules"))
1458 if (modules__parse(modules_filename
, kci
,
1459 kcore_copy__process_modules
) < 0)
1465 static void kcore_copy__map(struct phdr_data
*p
, u64 start
, u64 end
, u64 pgoff
,
1468 if (p
->addr
|| s
< start
|| s
>= end
)
1472 p
->offset
= (s
- start
) + pgoff
;
1473 p
->len
= e
< end
? e
- s
: end
- s
;
1476 static int kcore_copy__read_map(u64 start
, u64 len
, u64 pgoff
, void *data
)
1478 struct kcore_copy_info
*kci
= data
;
1479 u64 end
= start
+ len
;
1481 kcore_copy__map(&kci
->kernel_map
, start
, end
, pgoff
, kci
->stext
,
1484 kcore_copy__map(&kci
->modules_map
, start
, end
, pgoff
, kci
->first_module
,
1485 kci
->last_module_symbol
);
1490 static int kcore_copy__read_maps(struct kcore_copy_info
*kci
, Elf
*elf
)
1492 if (elf_read_maps(elf
, true, kcore_copy__read_map
, kci
) < 0)
1498 static int kcore_copy__calc_maps(struct kcore_copy_info
*kci
, const char *dir
,
1501 if (kcore_copy__parse_kallsyms(kci
, dir
))
1504 if (kcore_copy__parse_modules(kci
, dir
))
1508 kci
->stext
= round_down(kci
->stext
, page_size
);
1510 kci
->stext
= round_down(kci
->first_symbol
, page_size
);
1513 kci
->etext
= round_up(kci
->etext
, page_size
);
1514 } else if (kci
->last_symbol
) {
1515 kci
->etext
= round_up(kci
->last_symbol
, page_size
);
1516 kci
->etext
+= page_size
;
1519 kci
->first_module
= round_down(kci
->first_module
, page_size
);
1521 if (kci
->last_module_symbol
) {
1522 kci
->last_module_symbol
= round_up(kci
->last_module_symbol
,
1524 kci
->last_module_symbol
+= page_size
;
1527 if (!kci
->stext
|| !kci
->etext
)
1530 if (kci
->first_module
&& !kci
->last_module_symbol
)
1533 return kcore_copy__read_maps(kci
, elf
);
1536 static int kcore_copy__copy_file(const char *from_dir
, const char *to_dir
,
1539 char from_filename
[PATH_MAX
];
1540 char to_filename
[PATH_MAX
];
1542 scnprintf(from_filename
, PATH_MAX
, "%s/%s", from_dir
, name
);
1543 scnprintf(to_filename
, PATH_MAX
, "%s/%s", to_dir
, name
);
1545 return copyfile_mode(from_filename
, to_filename
, 0400);
1548 static int kcore_copy__unlink(const char *dir
, const char *name
)
1550 char filename
[PATH_MAX
];
1552 scnprintf(filename
, PATH_MAX
, "%s/%s", dir
, name
);
1554 return unlink(filename
);
1557 static int kcore_copy__compare_fds(int from
, int to
)
1565 buf_from
= malloc(page_size
);
1566 buf_to
= malloc(page_size
);
1567 if (!buf_from
|| !buf_to
)
1571 /* Use read because mmap won't work on proc files */
1572 ret
= read(from
, buf_from
, page_size
);
1581 if (readn(to
, buf_to
, len
) != (int)len
)
1584 if (memcmp(buf_from
, buf_to
, len
))
1595 static int kcore_copy__compare_files(const char *from_filename
,
1596 const char *to_filename
)
1598 int from
, to
, err
= -1;
1600 from
= open(from_filename
, O_RDONLY
);
1604 to
= open(to_filename
, O_RDONLY
);
1606 goto out_close_from
;
1608 err
= kcore_copy__compare_fds(from
, to
);
1616 static int kcore_copy__compare_file(const char *from_dir
, const char *to_dir
,
1619 char from_filename
[PATH_MAX
];
1620 char to_filename
[PATH_MAX
];
1622 scnprintf(from_filename
, PATH_MAX
, "%s/%s", from_dir
, name
);
1623 scnprintf(to_filename
, PATH_MAX
, "%s/%s", to_dir
, name
);
1625 return kcore_copy__compare_files(from_filename
, to_filename
);
1629 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1630 * @from_dir: from directory
1631 * @to_dir: to directory
1633 * This function copies kallsyms, modules and kcore files from one directory to
1634 * another. kallsyms and modules are copied entirely. Only code segments are
1635 * copied from kcore. It is assumed that two segments suffice: one for the
1636 * kernel proper and one for all the modules. The code segments are determined
1637 * from kallsyms and modules files. The kernel map starts at _stext or the
1638 * lowest function symbol, and ends at _etext or the highest function symbol.
1639 * The module map starts at the lowest module address and ends at the highest
1640 * module symbol. Start addresses are rounded down to the nearest page. End
1641 * addresses are rounded up to the nearest page. An extra page is added to the
1642 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1643 * symbol too. Because it contains only code sections, the resulting kcore is
1644 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1645 * is not the same for the kernel map and the modules map. That happens because
1646 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1647 * kallsyms and modules files are compared with their copies to check that
1648 * modules have not been loaded or unloaded while the copies were taking place.
1650 * Return: %0 on success, %-1 on failure.
1652 int kcore_copy(const char *from_dir
, const char *to_dir
)
1655 struct kcore extract
;
1657 int idx
= 0, err
= -1;
1658 off_t offset
= page_size
, sz
, modules_offset
= 0;
1659 struct kcore_copy_info kci
= { .stext
= 0, };
1660 char kcore_filename
[PATH_MAX
];
1661 char extract_filename
[PATH_MAX
];
1663 if (kcore_copy__copy_file(from_dir
, to_dir
, "kallsyms"))
1666 if (kcore_copy__copy_file(from_dir
, to_dir
, "modules"))
1667 goto out_unlink_kallsyms
;
1669 scnprintf(kcore_filename
, PATH_MAX
, "%s/kcore", from_dir
);
1670 scnprintf(extract_filename
, PATH_MAX
, "%s/kcore", to_dir
);
1672 if (kcore__open(&kcore
, kcore_filename
))
1673 goto out_unlink_modules
;
1675 if (kcore_copy__calc_maps(&kci
, from_dir
, kcore
.elf
))
1676 goto out_kcore_close
;
1678 if (kcore__init(&extract
, extract_filename
, kcore
.elfclass
, false))
1679 goto out_kcore_close
;
1681 if (!kci
.modules_map
.addr
)
1684 if (kcore__copy_hdr(&kcore
, &extract
, count
))
1685 goto out_extract_close
;
1687 if (kcore__add_phdr(&extract
, idx
++, offset
, kci
.kernel_map
.addr
,
1688 kci
.kernel_map
.len
))
1689 goto out_extract_close
;
1691 if (kci
.modules_map
.addr
) {
1692 modules_offset
= offset
+ kci
.kernel_map
.len
;
1693 if (kcore__add_phdr(&extract
, idx
, modules_offset
,
1694 kci
.modules_map
.addr
, kci
.modules_map
.len
))
1695 goto out_extract_close
;
1698 sz
= kcore__write(&extract
);
1699 if (sz
< 0 || sz
> offset
)
1700 goto out_extract_close
;
1702 if (copy_bytes(kcore
.fd
, kci
.kernel_map
.offset
, extract
.fd
, offset
,
1703 kci
.kernel_map
.len
))
1704 goto out_extract_close
;
1706 if (modules_offset
&& copy_bytes(kcore
.fd
, kci
.modules_map
.offset
,
1707 extract
.fd
, modules_offset
,
1708 kci
.modules_map
.len
))
1709 goto out_extract_close
;
1711 if (kcore_copy__compare_file(from_dir
, to_dir
, "modules"))
1712 goto out_extract_close
;
1714 if (kcore_copy__compare_file(from_dir
, to_dir
, "kallsyms"))
1715 goto out_extract_close
;
1720 kcore__close(&extract
);
1722 unlink(extract_filename
);
1724 kcore__close(&kcore
);
1727 kcore_copy__unlink(to_dir
, "modules");
1728 out_unlink_kallsyms
:
1730 kcore_copy__unlink(to_dir
, "kallsyms");
1735 int kcore_extract__create(struct kcore_extract
*kce
)
1738 struct kcore extract
;
1740 int idx
= 0, err
= -1;
1741 off_t offset
= page_size
, sz
;
1743 if (kcore__open(&kcore
, kce
->kcore_filename
))
1746 strcpy(kce
->extract_filename
, PERF_KCORE_EXTRACT
);
1747 if (kcore__init(&extract
, kce
->extract_filename
, kcore
.elfclass
, true))
1748 goto out_kcore_close
;
1750 if (kcore__copy_hdr(&kcore
, &extract
, count
))
1751 goto out_extract_close
;
1753 if (kcore__add_phdr(&extract
, idx
, offset
, kce
->addr
, kce
->len
))
1754 goto out_extract_close
;
1756 sz
= kcore__write(&extract
);
1757 if (sz
< 0 || sz
> offset
)
1758 goto out_extract_close
;
1760 if (copy_bytes(kcore
.fd
, kce
->offs
, extract
.fd
, offset
, kce
->len
))
1761 goto out_extract_close
;
1766 kcore__close(&extract
);
1768 unlink(kce
->extract_filename
);
1770 kcore__close(&kcore
);
1775 void kcore_extract__delete(struct kcore_extract
*kce
)
1777 unlink(kce
->extract_filename
);
1780 void symbol__elf_init(void)
1782 elf_version(EV_CURRENT
);