1 // SPDX-License-Identifier: GPL-2.0
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
14 #include "map_symbol.h"
20 #include <linux/string.h>
21 #include <linux/zalloc.h>
23 #include "namespaces.h"
28 static void __maps__insert(struct maps
*maps
, struct map
*map
);
30 static inline int is_android_lib(const char *filename
)
32 return strstarts(filename
, "/data/app-lib/") ||
33 strstarts(filename
, "/system/lib/");
36 static inline bool replace_android_lib(const char *filename
, char *newfilename
)
40 size_t app_abi_length
, new_length
;
41 size_t lib_length
= 0;
43 libname
= strrchr(filename
, '/');
45 lib_length
= strlen(libname
);
47 app_abi
= getenv("APP_ABI");
51 app_abi_length
= strlen(app_abi
);
53 if (strstarts(filename
, "/data/app-lib/")) {
59 new_length
= 7 + app_abi_length
+ lib_length
;
61 apk_path
= getenv("APK_PATH");
63 new_length
+= strlen(apk_path
) + 1;
64 if (new_length
> PATH_MAX
)
66 snprintf(newfilename
, new_length
,
67 "%s/libs/%s/%s", apk_path
, app_abi
, libname
);
69 if (new_length
> PATH_MAX
)
71 snprintf(newfilename
, new_length
,
72 "libs/%s/%s", app_abi
, libname
);
77 if (strstarts(filename
, "/system/lib/")) {
83 ndk
= getenv("NDK_ROOT");
84 app
= getenv("APP_PLATFORM");
89 ndk_length
= strlen(ndk
);
90 app_length
= strlen(app
);
92 if (!(ndk_length
&& app_length
&& app_abi_length
))
95 arch
= !strncmp(app_abi
, "arm", 3) ? "arm" :
96 !strncmp(app_abi
, "mips", 4) ? "mips" :
97 !strncmp(app_abi
, "x86", 3) ? "x86" : NULL
;
102 new_length
= 27 + ndk_length
+
103 app_length
+ lib_length
106 if (new_length
> PATH_MAX
)
108 snprintf(newfilename
, new_length
,
109 "%s/platforms/%s/arch-%s/usr/lib/%s",
110 ndk
, app
, arch
, libname
);
117 void map__init(struct map
*map
, u64 start
, u64 end
, u64 pgoff
, struct dso
*dso
)
123 map
->dso
= dso__get(dso
);
124 map
->map_ip
= map__map_ip
;
125 map
->unmap_ip
= map__unmap_ip
;
126 RB_CLEAR_NODE(&map
->rb_node
);
127 map
->erange_warned
= false;
128 refcount_set(&map
->refcnt
, 1);
131 struct map
*map__new(struct machine
*machine
, u64 start
, u64 len
,
132 u64 pgoff
, struct dso_id
*id
,
133 u32 prot
, u32 flags
, char *filename
,
134 struct thread
*thread
)
136 struct map
*map
= malloc(sizeof(*map
));
137 struct nsinfo
*nsi
= NULL
;
141 char newfilename
[PATH_MAX
];
143 int anon
, no_dso
, vdso
, android
;
145 android
= is_android_lib(filename
);
146 anon
= is_anon_memory(filename
) || flags
& MAP_HUGETLB
;
147 vdso
= is_vdso_map(filename
);
148 no_dso
= is_no_dso_memory(filename
);
151 nsi
= nsinfo__get(thread
->nsinfo
);
153 if ((anon
|| no_dso
) && nsi
&& (prot
& PROT_EXEC
)) {
154 snprintf(newfilename
, sizeof(newfilename
),
155 "/tmp/perf-%d.map", nsi
->pid
);
156 filename
= newfilename
;
160 if (replace_android_lib(filename
, newfilename
))
161 filename
= newfilename
;
165 /* The vdso maps are always on the host and not the
166 * container. Ensure that we don't use setns to look
169 nnsi
= nsinfo__copy(nsi
);
172 nnsi
->need_setns
= false;
176 dso
= machine__findnew_vdso(machine
, thread
);
178 dso
= machine__findnew_dso_id(machine
, filename
, id
);
183 map__init(map
, start
, start
+ len
, pgoff
, dso
);
185 if (anon
|| no_dso
) {
186 map
->map_ip
= map
->unmap_ip
= identity__map_ip
;
189 * Set memory without DSO as loaded. All map__find_*
190 * functions still return NULL, and we avoid the
191 * unnecessary map__load warning.
193 if (!(prot
& PROT_EXEC
))
194 dso__set_loaded(dso
);
207 * Constructor variant for modules (where we know from /proc/modules where
208 * they are loaded) and for vmlinux, where only after we load all the
209 * symbols we'll know where it starts and ends.
211 struct map
*map__new2(u64 start
, struct dso
*dso
)
213 struct map
*map
= calloc(1, (sizeof(*map
) +
214 (dso
->kernel
? sizeof(struct kmap
) : 0)));
217 * ->end will be filled after we load all the symbols
219 map__init(map
, start
, 0, 0, dso
);
225 bool __map__is_kernel(const struct map
*map
)
227 if (!map
->dso
->kernel
)
229 return machine__kernel_map(map__kmaps((struct map
*)map
)->machine
) == map
;
232 bool __map__is_extra_kernel_map(const struct map
*map
)
234 struct kmap
*kmap
= __map__kmap((struct map
*)map
);
236 return kmap
&& kmap
->name
[0];
239 bool __map__is_bpf_prog(const struct map
*map
)
243 if (map
->dso
->binary_type
== DSO_BINARY_TYPE__BPF_PROG_INFO
)
247 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
248 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
249 * guess the type based on name.
251 name
= map
->dso
->short_name
;
252 return name
&& (strstr(name
, "bpf_prog_") == name
);
255 bool __map__is_bpf_image(const struct map
*map
)
259 if (map
->dso
->binary_type
== DSO_BINARY_TYPE__BPF_IMAGE
)
263 * If PERF_RECORD_KSYMBOL is not included, the dso will not have
264 * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
265 * guess the type based on name.
267 name
= map
->dso
->short_name
;
268 return name
&& is_bpf_image(name
);
271 bool __map__is_ool(const struct map
*map
)
273 return map
->dso
&& map
->dso
->binary_type
== DSO_BINARY_TYPE__OOL
;
276 bool map__has_symbols(const struct map
*map
)
278 return dso__has_symbols(map
->dso
);
281 static void map__exit(struct map
*map
)
283 BUG_ON(refcount_read(&map
->refcnt
) != 0);
287 void map__delete(struct map
*map
)
293 void map__put(struct map
*map
)
295 if (map
&& refcount_dec_and_test(&map
->refcnt
))
299 void map__fixup_start(struct map
*map
)
301 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
302 struct rb_node
*nd
= rb_first_cached(symbols
);
304 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
305 map
->start
= sym
->start
;
309 void map__fixup_end(struct map
*map
)
311 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
312 struct rb_node
*nd
= rb_last(&symbols
->rb_root
);
314 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
319 #define DSO__DELETED "(deleted)"
321 int map__load(struct map
*map
)
323 const char *name
= map
->dso
->long_name
;
326 if (dso__loaded(map
->dso
))
329 nr
= dso__load(map
->dso
, map
);
331 if (map
->dso
->has_build_id
) {
332 char sbuild_id
[SBUILD_ID_SIZE
];
334 build_id__sprintf(&map
->dso
->bid
, sbuild_id
);
335 pr_debug("%s with build id %s not found", name
, sbuild_id
);
337 pr_debug("Failed to open %s", name
);
339 pr_debug(", continuing without symbols\n");
341 } else if (nr
== 0) {
342 #ifdef HAVE_LIBELF_SUPPORT
343 const size_t len
= strlen(name
);
344 const size_t real_len
= len
- sizeof(DSO__DELETED
);
346 if (len
> sizeof(DSO__DELETED
) &&
347 strcmp(name
+ real_len
+ 1, DSO__DELETED
) == 0) {
348 pr_debug("%.*s was updated (is prelink enabled?). "
349 "Restart the long running apps that use it!\n",
350 (int)real_len
, name
);
352 pr_debug("no symbols found in %s, maybe install a debug package?\n", name
);
361 struct symbol
*map__find_symbol(struct map
*map
, u64 addr
)
363 if (map__load(map
) < 0)
366 return dso__find_symbol(map
->dso
, addr
);
369 struct symbol
*map__find_symbol_by_name(struct map
*map
, const char *name
)
371 if (map__load(map
) < 0)
374 if (!dso__sorted_by_name(map
->dso
))
375 dso__sort_by_name(map
->dso
);
377 return dso__find_symbol_by_name(map
->dso
, name
);
380 struct map
*map__clone(struct map
*from
)
382 size_t size
= sizeof(struct map
);
385 if (from
->dso
&& from
->dso
->kernel
)
386 size
+= sizeof(struct kmap
);
388 map
= memdup(from
, size
);
390 refcount_set(&map
->refcnt
, 1);
391 RB_CLEAR_NODE(&map
->rb_node
);
398 size_t map__fprintf(struct map
*map
, FILE *fp
)
400 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s\n",
401 map
->start
, map
->end
, map
->pgoff
, map
->dso
->name
);
404 size_t map__fprintf_dsoname(struct map
*map
, FILE *fp
)
406 char buf
[symbol_conf
.pad_output_len_dso
+ 1];
407 const char *dsoname
= "[unknown]";
409 if (map
&& map
->dso
) {
410 if (symbol_conf
.show_kernel_path
&& map
->dso
->long_name
)
411 dsoname
= map
->dso
->long_name
;
413 dsoname
= map
->dso
->name
;
416 if (symbol_conf
.pad_output_len_dso
) {
417 scnprintf_pad(buf
, symbol_conf
.pad_output_len_dso
, "%s", dsoname
);
421 return fprintf(fp
, "%s", dsoname
);
424 char *map__srcline(struct map
*map
, u64 addr
, struct symbol
*sym
)
427 return SRCLINE_UNKNOWN
;
428 return get_srcline(map
->dso
, map__rip_2objdump(map
, addr
), sym
, true, true, addr
);
431 int map__fprintf_srcline(struct map
*map
, u64 addr
, const char *prefix
,
436 if (map
&& map
->dso
) {
437 char *srcline
= map__srcline(map
, addr
, NULL
);
438 if (strncmp(srcline
, SRCLINE_UNKNOWN
, strlen(SRCLINE_UNKNOWN
)) != 0)
439 ret
= fprintf(fp
, "%s%s", prefix
, srcline
);
440 free_srcline(srcline
);
445 void srccode_state_free(struct srccode_state
*state
)
447 zfree(&state
->srcfile
);
452 * map__rip_2objdump - convert symbol start address to objdump address.
454 * @rip: symbol start address
456 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
457 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
458 * relative to section start.
460 * Return: Address suitable for passing to "objdump --start-address="
462 u64
map__rip_2objdump(struct map
*map
, u64 rip
)
464 struct kmap
*kmap
= __map__kmap(map
);
467 * vmlinux does not have program headers for PTI entry trampolines and
468 * kcore may not either. However the trampoline object code is on the
469 * main kernel map, so just use that instead.
471 if (kmap
&& is_entry_trampoline(kmap
->name
) && kmap
->kmaps
&& kmap
->kmaps
->machine
) {
472 struct map
*kernel_map
= machine__kernel_map(kmap
->kmaps
->machine
);
478 if (!map
->dso
->adjust_symbols
)
482 return rip
- map
->pgoff
;
485 * kernel modules also have DSO_TYPE_USER in dso->kernel,
486 * but all kernel modules are ET_REL, so won't get here.
488 if (map
->dso
->kernel
== DSO_SPACE__USER
)
489 return rip
+ map
->dso
->text_offset
;
491 return map
->unmap_ip(map
, rip
) - map
->reloc
;
495 * map__objdump_2mem - convert objdump address to a memory address.
497 * @ip: objdump address
499 * Closely related to map__rip_2objdump(), this function takes an address from
500 * objdump and converts it to a memory address. Note this assumes that @map
501 * contains the address. To be sure the result is valid, check it forwards
502 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
504 * Return: Memory address.
506 u64
map__objdump_2mem(struct map
*map
, u64 ip
)
508 if (!map
->dso
->adjust_symbols
)
509 return map
->unmap_ip(map
, ip
);
512 return map
->unmap_ip(map
, ip
+ map
->pgoff
);
515 * kernel modules also have DSO_TYPE_USER in dso->kernel,
516 * but all kernel modules are ET_REL, so won't get here.
518 if (map
->dso
->kernel
== DSO_SPACE__USER
)
519 return map
->unmap_ip(map
, ip
- map
->dso
->text_offset
);
521 return ip
+ map
->reloc
;
524 void maps__init(struct maps
*maps
, struct machine
*machine
)
526 maps
->entries
= RB_ROOT
;
527 init_rwsem(&maps
->lock
);
528 maps
->machine
= machine
;
529 maps
->last_search_by_name
= NULL
;
531 maps
->maps_by_name
= NULL
;
532 refcount_set(&maps
->refcnt
, 1);
535 static void __maps__free_maps_by_name(struct maps
*maps
)
538 * Free everything to try to do it from the rbtree in the next search
540 zfree(&maps
->maps_by_name
);
541 maps
->nr_maps_allocated
= 0;
544 void maps__insert(struct maps
*maps
, struct map
*map
)
546 down_write(&maps
->lock
);
547 __maps__insert(maps
, map
);
550 if (map
->dso
&& map
->dso
->kernel
) {
551 struct kmap
*kmap
= map__kmap(map
);
556 pr_err("Internal error: kernel dso with non kernel map\n");
561 * If we already performed some search by name, then we need to add the just
562 * inserted map and resort.
564 if (maps
->maps_by_name
) {
565 if (maps
->nr_maps
> maps
->nr_maps_allocated
) {
566 int nr_allocate
= maps
->nr_maps
* 2;
567 struct map
**maps_by_name
= realloc(maps
->maps_by_name
, nr_allocate
* sizeof(map
));
569 if (maps_by_name
== NULL
) {
570 __maps__free_maps_by_name(maps
);
571 up_write(&maps
->lock
);
575 maps
->maps_by_name
= maps_by_name
;
576 maps
->nr_maps_allocated
= nr_allocate
;
578 maps
->maps_by_name
[maps
->nr_maps
- 1] = map
;
579 __maps__sort_by_name(maps
);
581 up_write(&maps
->lock
);
584 static void __maps__remove(struct maps
*maps
, struct map
*map
)
586 rb_erase_init(&map
->rb_node
, &maps
->entries
);
590 void maps__remove(struct maps
*maps
, struct map
*map
)
592 down_write(&maps
->lock
);
593 if (maps
->last_search_by_name
== map
)
594 maps
->last_search_by_name
= NULL
;
596 __maps__remove(maps
, map
);
598 if (maps
->maps_by_name
)
599 __maps__free_maps_by_name(maps
);
600 up_write(&maps
->lock
);
603 static void __maps__purge(struct maps
*maps
)
605 struct map
*pos
, *next
;
607 maps__for_each_entry_safe(maps
, pos
, next
) {
608 rb_erase_init(&pos
->rb_node
, &maps
->entries
);
613 void maps__exit(struct maps
*maps
)
615 down_write(&maps
->lock
);
617 up_write(&maps
->lock
);
620 bool maps__empty(struct maps
*maps
)
622 return !maps__first(maps
);
625 struct maps
*maps__new(struct machine
*machine
)
627 struct maps
*maps
= zalloc(sizeof(*maps
));
630 maps__init(maps
, machine
);
635 void maps__delete(struct maps
*maps
)
638 unwind__finish_access(maps
);
642 void maps__put(struct maps
*maps
)
644 if (maps
&& refcount_dec_and_test(&maps
->refcnt
))
648 struct symbol
*maps__find_symbol(struct maps
*maps
, u64 addr
, struct map
**mapp
)
650 struct map
*map
= maps__find(maps
, addr
);
652 /* Ensure map is loaded before using map->map_ip */
653 if (map
!= NULL
&& map__load(map
) >= 0) {
656 return map__find_symbol(map
, map
->map_ip(map
, addr
));
662 static bool map__contains_symbol(struct map
*map
, struct symbol
*sym
)
664 u64 ip
= map
->unmap_ip(map
, sym
->start
);
666 return ip
>= map
->start
&& ip
< map
->end
;
669 struct symbol
*maps__find_symbol_by_name(struct maps
*maps
, const char *name
, struct map
**mapp
)
674 down_read(&maps
->lock
);
676 maps__for_each_entry(maps
, pos
) {
677 sym
= map__find_symbol_by_name(pos
, name
);
681 if (!map__contains_symbol(pos
, sym
)) {
692 up_read(&maps
->lock
);
696 int maps__find_ams(struct maps
*maps
, struct addr_map_symbol
*ams
)
698 if (ams
->addr
< ams
->ms
.map
->start
|| ams
->addr
>= ams
->ms
.map
->end
) {
701 ams
->ms
.map
= maps__find(maps
, ams
->addr
);
702 if (ams
->ms
.map
== NULL
)
706 ams
->al_addr
= ams
->ms
.map
->map_ip(ams
->ms
.map
, ams
->addr
);
707 ams
->ms
.sym
= map__find_symbol(ams
->ms
.map
, ams
->al_addr
);
709 return ams
->ms
.sym
? 0 : -1;
712 size_t maps__fprintf(struct maps
*maps
, FILE *fp
)
717 down_read(&maps
->lock
);
719 maps__for_each_entry(maps
, pos
) {
720 printed
+= fprintf(fp
, "Map:");
721 printed
+= map__fprintf(pos
, fp
);
723 printed
+= dso__fprintf(pos
->dso
, fp
);
724 printed
+= fprintf(fp
, "--\n");
728 up_read(&maps
->lock
);
733 int maps__fixup_overlappings(struct maps
*maps
, struct map
*map
, FILE *fp
)
735 struct rb_root
*root
;
736 struct rb_node
*next
, *first
;
739 down_write(&maps
->lock
);
741 root
= &maps
->entries
;
744 * Find first map where end > map->start.
745 * Same as find_vma() in kernel.
747 next
= root
->rb_node
;
750 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
752 if (pos
->end
> map
->start
) {
754 if (pos
->start
<= map
->start
)
756 next
= next
->rb_left
;
758 next
= next
->rb_right
;
763 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
764 next
= rb_next(&pos
->rb_node
);
767 * Stop if current map starts after map->end.
768 * Maps are ordered by start: next will not overlap for sure.
770 if (pos
->start
>= map
->end
)
776 pr_debug("overlapping maps in %s (disable tui for more info)\n",
779 fputs("overlapping maps:\n", fp
);
780 map__fprintf(map
, fp
);
781 map__fprintf(pos
, fp
);
785 rb_erase_init(&pos
->rb_node
, root
);
787 * Now check if we need to create new maps for areas not
788 * overlapped by the new map:
790 if (map
->start
> pos
->start
) {
791 struct map
*before
= map__clone(pos
);
793 if (before
== NULL
) {
798 before
->end
= map
->start
;
799 __maps__insert(maps
, before
);
800 if (verbose
>= 2 && !use_browser
)
801 map__fprintf(before
, fp
);
805 if (map
->end
< pos
->end
) {
806 struct map
*after
= map__clone(pos
);
813 after
->start
= map
->end
;
814 after
->pgoff
+= map
->end
- pos
->start
;
815 assert(pos
->map_ip(pos
, map
->end
) == after
->map_ip(after
, map
->end
));
816 __maps__insert(maps
, after
);
817 if (verbose
>= 2 && !use_browser
)
818 map__fprintf(after
, fp
);
830 up_write(&maps
->lock
);
835 * XXX This should not really _copy_ te maps, but refcount them.
837 int maps__clone(struct thread
*thread
, struct maps
*parent
)
839 struct maps
*maps
= thread
->maps
;
843 down_read(&parent
->lock
);
845 maps__for_each_entry(parent
, map
) {
846 struct map
*new = map__clone(map
);
850 err
= unwind__prepare_access(maps
, new, NULL
);
854 maps__insert(maps
, new);
860 up_read(&parent
->lock
);
864 static void __maps__insert(struct maps
*maps
, struct map
*map
)
866 struct rb_node
**p
= &maps
->entries
.rb_node
;
867 struct rb_node
*parent
= NULL
;
868 const u64 ip
= map
->start
;
873 m
= rb_entry(parent
, struct map
, rb_node
);
880 rb_link_node(&map
->rb_node
, parent
, p
);
881 rb_insert_color(&map
->rb_node
, &maps
->entries
);
885 struct map
*maps__find(struct maps
*maps
, u64 ip
)
890 down_read(&maps
->lock
);
892 p
= maps
->entries
.rb_node
;
894 m
= rb_entry(p
, struct map
, rb_node
);
897 else if (ip
>= m
->end
)
905 up_read(&maps
->lock
);
909 struct map
*maps__first(struct maps
*maps
)
911 struct rb_node
*first
= rb_first(&maps
->entries
);
914 return rb_entry(first
, struct map
, rb_node
);
918 static struct map
*__map__next(struct map
*map
)
920 struct rb_node
*next
= rb_next(&map
->rb_node
);
923 return rb_entry(next
, struct map
, rb_node
);
927 struct map
*map__next(struct map
*map
)
929 return map
? __map__next(map
) : NULL
;
932 struct kmap
*__map__kmap(struct map
*map
)
934 if (!map
->dso
|| !map
->dso
->kernel
)
936 return (struct kmap
*)(map
+ 1);
939 struct kmap
*map__kmap(struct map
*map
)
941 struct kmap
*kmap
= __map__kmap(map
);
944 pr_err("Internal error: map__kmap with a non-kernel map\n");
948 struct maps
*map__kmaps(struct map
*map
)
950 struct kmap
*kmap
= map__kmap(map
);
952 if (!kmap
|| !kmap
->kmaps
) {
953 pr_err("Internal error: map__kmaps with a non-kernel map\n");