1 // SPDX-License-Identifier: GPL-2.0
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
14 #include "map_symbol.h"
20 #include <linux/string.h>
21 #include <linux/zalloc.h>
23 #include "namespaces.h"
28 static void __maps__insert(struct maps
*maps
, struct map
*map
);
30 static inline int is_anon_memory(const char *filename
, u32 flags
)
32 return flags
& MAP_HUGETLB
||
33 !strcmp(filename
, "//anon") ||
34 !strncmp(filename
, "/dev/zero", sizeof("/dev/zero") - 1) ||
35 !strncmp(filename
, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
38 static inline int is_no_dso_memory(const char *filename
)
40 return !strncmp(filename
, "[stack", 6) ||
41 !strncmp(filename
, "/SYSV",5) ||
42 !strcmp(filename
, "[heap]");
45 static inline int is_android_lib(const char *filename
)
47 return strstarts(filename
, "/data/app-lib/") ||
48 strstarts(filename
, "/system/lib/");
51 static inline bool replace_android_lib(const char *filename
, char *newfilename
)
55 size_t app_abi_length
, new_length
;
56 size_t lib_length
= 0;
58 libname
= strrchr(filename
, '/');
60 lib_length
= strlen(libname
);
62 app_abi
= getenv("APP_ABI");
66 app_abi_length
= strlen(app_abi
);
68 if (strstarts(filename
, "/data/app-lib/")) {
74 new_length
= 7 + app_abi_length
+ lib_length
;
76 apk_path
= getenv("APK_PATH");
78 new_length
+= strlen(apk_path
) + 1;
79 if (new_length
> PATH_MAX
)
81 snprintf(newfilename
, new_length
,
82 "%s/libs/%s/%s", apk_path
, app_abi
, libname
);
84 if (new_length
> PATH_MAX
)
86 snprintf(newfilename
, new_length
,
87 "libs/%s/%s", app_abi
, libname
);
92 if (strstarts(filename
, "/system/lib/")) {
98 ndk
= getenv("NDK_ROOT");
99 app
= getenv("APP_PLATFORM");
104 ndk_length
= strlen(ndk
);
105 app_length
= strlen(app
);
107 if (!(ndk_length
&& app_length
&& app_abi_length
))
110 arch
= !strncmp(app_abi
, "arm", 3) ? "arm" :
111 !strncmp(app_abi
, "mips", 4) ? "mips" :
112 !strncmp(app_abi
, "x86", 3) ? "x86" : NULL
;
117 new_length
= 27 + ndk_length
+
118 app_length
+ lib_length
121 if (new_length
> PATH_MAX
)
123 snprintf(newfilename
, new_length
,
124 "%s/platforms/%s/arch-%s/usr/lib/%s",
125 ndk
, app
, arch
, libname
);
132 void map__init(struct map
*map
, u64 start
, u64 end
, u64 pgoff
, struct dso
*dso
)
138 map
->dso
= dso__get(dso
);
139 map
->map_ip
= map__map_ip
;
140 map
->unmap_ip
= map__unmap_ip
;
141 RB_CLEAR_NODE(&map
->rb_node
);
142 map
->erange_warned
= false;
143 refcount_set(&map
->refcnt
, 1);
146 struct map
*map__new(struct machine
*machine
, u64 start
, u64 len
,
147 u64 pgoff
, struct dso_id
*id
,
148 u32 prot
, u32 flags
, char *filename
,
149 struct thread
*thread
)
151 struct map
*map
= malloc(sizeof(*map
));
152 struct nsinfo
*nsi
= NULL
;
156 char newfilename
[PATH_MAX
];
158 int anon
, no_dso
, vdso
, android
;
160 android
= is_android_lib(filename
);
161 anon
= is_anon_memory(filename
, flags
);
162 vdso
= is_vdso_map(filename
);
163 no_dso
= is_no_dso_memory(filename
);
166 nsi
= nsinfo__get(thread
->nsinfo
);
168 if ((anon
|| no_dso
) && nsi
&& (prot
& PROT_EXEC
)) {
169 snprintf(newfilename
, sizeof(newfilename
),
170 "/tmp/perf-%d.map", nsi
->pid
);
171 filename
= newfilename
;
175 if (replace_android_lib(filename
, newfilename
))
176 filename
= newfilename
;
180 /* The vdso maps are always on the host and not the
181 * container. Ensure that we don't use setns to look
184 nnsi
= nsinfo__copy(nsi
);
187 nnsi
->need_setns
= false;
191 dso
= machine__findnew_vdso(machine
, thread
);
193 dso
= machine__findnew_dso_id(machine
, filename
, id
);
198 map__init(map
, start
, start
+ len
, pgoff
, dso
);
200 if (anon
|| no_dso
) {
201 map
->map_ip
= map
->unmap_ip
= identity__map_ip
;
204 * Set memory without DSO as loaded. All map__find_*
205 * functions still return NULL, and we avoid the
206 * unnecessary map__load warning.
208 if (!(prot
& PROT_EXEC
))
209 dso__set_loaded(dso
);
222 * Constructor variant for modules (where we know from /proc/modules where
223 * they are loaded) and for vmlinux, where only after we load all the
224 * symbols we'll know where it starts and ends.
226 struct map
*map__new2(u64 start
, struct dso
*dso
)
228 struct map
*map
= calloc(1, (sizeof(*map
) +
229 (dso
->kernel
? sizeof(struct kmap
) : 0)));
232 * ->end will be filled after we load all the symbols
234 map__init(map
, start
, 0, 0, dso
);
240 bool __map__is_kernel(const struct map
*map
)
242 if (!map
->dso
->kernel
)
244 return machine__kernel_map(map__kmaps((struct map
*)map
)->machine
) == map
;
247 bool __map__is_extra_kernel_map(const struct map
*map
)
249 struct kmap
*kmap
= __map__kmap((struct map
*)map
);
251 return kmap
&& kmap
->name
[0];
254 bool __map__is_bpf_prog(const struct map
*map
)
258 if (map
->dso
->binary_type
== DSO_BINARY_TYPE__BPF_PROG_INFO
)
262 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
263 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
264 * guess the type based on name.
266 name
= map
->dso
->short_name
;
267 return name
&& (strstr(name
, "bpf_prog_") == name
);
270 bool __map__is_ool(const struct map
*map
)
272 return map
->dso
&& map
->dso
->binary_type
== DSO_BINARY_TYPE__OOL
;
275 bool map__has_symbols(const struct map
*map
)
277 return dso__has_symbols(map
->dso
);
280 static void map__exit(struct map
*map
)
282 BUG_ON(refcount_read(&map
->refcnt
) != 0);
286 void map__delete(struct map
*map
)
292 void map__put(struct map
*map
)
294 if (map
&& refcount_dec_and_test(&map
->refcnt
))
298 void map__fixup_start(struct map
*map
)
300 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
301 struct rb_node
*nd
= rb_first_cached(symbols
);
303 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
304 map
->start
= sym
->start
;
308 void map__fixup_end(struct map
*map
)
310 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
311 struct rb_node
*nd
= rb_last(&symbols
->rb_root
);
313 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
318 #define DSO__DELETED "(deleted)"
320 int map__load(struct map
*map
)
322 const char *name
= map
->dso
->long_name
;
325 if (dso__loaded(map
->dso
))
328 nr
= dso__load(map
->dso
, map
);
330 if (map
->dso
->has_build_id
) {
331 char sbuild_id
[SBUILD_ID_SIZE
];
333 build_id__sprintf(map
->dso
->build_id
,
334 sizeof(map
->dso
->build_id
),
336 pr_debug("%s with build id %s not found", name
, sbuild_id
);
338 pr_debug("Failed to open %s", name
);
340 pr_debug(", continuing without symbols\n");
342 } else if (nr
== 0) {
343 #ifdef HAVE_LIBELF_SUPPORT
344 const size_t len
= strlen(name
);
345 const size_t real_len
= len
- sizeof(DSO__DELETED
);
347 if (len
> sizeof(DSO__DELETED
) &&
348 strcmp(name
+ real_len
+ 1, DSO__DELETED
) == 0) {
349 pr_debug("%.*s was updated (is prelink enabled?). "
350 "Restart the long running apps that use it!\n",
351 (int)real_len
, name
);
353 pr_debug("no symbols found in %s, maybe install a debug package?\n", name
);
362 struct symbol
*map__find_symbol(struct map
*map
, u64 addr
)
364 if (map__load(map
) < 0)
367 return dso__find_symbol(map
->dso
, addr
);
370 struct symbol
*map__find_symbol_by_name(struct map
*map
, const char *name
)
372 if (map__load(map
) < 0)
375 if (!dso__sorted_by_name(map
->dso
))
376 dso__sort_by_name(map
->dso
);
378 return dso__find_symbol_by_name(map
->dso
, name
);
381 struct map
*map__clone(struct map
*from
)
383 size_t size
= sizeof(struct map
);
386 if (from
->dso
&& from
->dso
->kernel
)
387 size
+= sizeof(struct kmap
);
389 map
= memdup(from
, size
);
391 refcount_set(&map
->refcnt
, 1);
392 RB_CLEAR_NODE(&map
->rb_node
);
399 size_t map__fprintf(struct map
*map
, FILE *fp
)
401 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s\n",
402 map
->start
, map
->end
, map
->pgoff
, map
->dso
->name
);
405 size_t map__fprintf_dsoname(struct map
*map
, FILE *fp
)
407 char buf
[symbol_conf
.pad_output_len_dso
+ 1];
408 const char *dsoname
= "[unknown]";
410 if (map
&& map
->dso
) {
411 if (symbol_conf
.show_kernel_path
&& map
->dso
->long_name
)
412 dsoname
= map
->dso
->long_name
;
414 dsoname
= map
->dso
->name
;
417 if (symbol_conf
.pad_output_len_dso
) {
418 scnprintf_pad(buf
, symbol_conf
.pad_output_len_dso
, "%s", dsoname
);
422 return fprintf(fp
, "%s", dsoname
);
425 char *map__srcline(struct map
*map
, u64 addr
, struct symbol
*sym
)
428 return SRCLINE_UNKNOWN
;
429 return get_srcline(map
->dso
, map__rip_2objdump(map
, addr
), sym
, true, true, addr
);
432 int map__fprintf_srcline(struct map
*map
, u64 addr
, const char *prefix
,
437 if (map
&& map
->dso
) {
438 char *srcline
= map__srcline(map
, addr
, NULL
);
439 if (strncmp(srcline
, SRCLINE_UNKNOWN
, strlen(SRCLINE_UNKNOWN
)) != 0)
440 ret
= fprintf(fp
, "%s%s", prefix
, srcline
);
441 free_srcline(srcline
);
446 void srccode_state_free(struct srccode_state
*state
)
448 zfree(&state
->srcfile
);
453 * map__rip_2objdump - convert symbol start address to objdump address.
455 * @rip: symbol start address
457 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
458 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
459 * relative to section start.
461 * Return: Address suitable for passing to "objdump --start-address="
463 u64
map__rip_2objdump(struct map
*map
, u64 rip
)
465 struct kmap
*kmap
= __map__kmap(map
);
468 * vmlinux does not have program headers for PTI entry trampolines and
469 * kcore may not either. However the trampoline object code is on the
470 * main kernel map, so just use that instead.
472 if (kmap
&& is_entry_trampoline(kmap
->name
) && kmap
->kmaps
&& kmap
->kmaps
->machine
) {
473 struct map
*kernel_map
= machine__kernel_map(kmap
->kmaps
->machine
);
479 if (!map
->dso
->adjust_symbols
)
483 return rip
- map
->pgoff
;
486 * kernel modules also have DSO_TYPE_USER in dso->kernel,
487 * but all kernel modules are ET_REL, so won't get here.
489 if (map
->dso
->kernel
== DSO_SPACE__USER
)
490 return rip
+ map
->dso
->text_offset
;
492 return map
->unmap_ip(map
, rip
) - map
->reloc
;
496 * map__objdump_2mem - convert objdump address to a memory address.
498 * @ip: objdump address
500 * Closely related to map__rip_2objdump(), this function takes an address from
501 * objdump and converts it to a memory address. Note this assumes that @map
502 * contains the address. To be sure the result is valid, check it forwards
503 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
505 * Return: Memory address.
507 u64
map__objdump_2mem(struct map
*map
, u64 ip
)
509 if (!map
->dso
->adjust_symbols
)
510 return map
->unmap_ip(map
, ip
);
513 return map
->unmap_ip(map
, ip
+ map
->pgoff
);
516 * kernel modules also have DSO_TYPE_USER in dso->kernel,
517 * but all kernel modules are ET_REL, so won't get here.
519 if (map
->dso
->kernel
== DSO_SPACE__USER
)
520 return map
->unmap_ip(map
, ip
- map
->dso
->text_offset
);
522 return ip
+ map
->reloc
;
525 void maps__init(struct maps
*maps
, struct machine
*machine
)
527 maps
->entries
= RB_ROOT
;
528 init_rwsem(&maps
->lock
);
529 maps
->machine
= machine
;
530 maps
->last_search_by_name
= NULL
;
532 maps
->maps_by_name
= NULL
;
533 refcount_set(&maps
->refcnt
, 1);
536 static void __maps__free_maps_by_name(struct maps
*maps
)
539 * Free everything to try to do it from the rbtree in the next search
541 zfree(&maps
->maps_by_name
);
542 maps
->nr_maps_allocated
= 0;
545 void maps__insert(struct maps
*maps
, struct map
*map
)
547 down_write(&maps
->lock
);
548 __maps__insert(maps
, map
);
551 if (map
->dso
&& map
->dso
->kernel
) {
552 struct kmap
*kmap
= map__kmap(map
);
557 pr_err("Internal error: kernel dso with non kernel map\n");
562 * If we already performed some search by name, then we need to add the just
563 * inserted map and resort.
565 if (maps
->maps_by_name
) {
566 if (maps
->nr_maps
> maps
->nr_maps_allocated
) {
567 int nr_allocate
= maps
->nr_maps
* 2;
568 struct map
**maps_by_name
= realloc(maps
->maps_by_name
, nr_allocate
* sizeof(map
));
570 if (maps_by_name
== NULL
) {
571 __maps__free_maps_by_name(maps
);
572 up_write(&maps
->lock
);
576 maps
->maps_by_name
= maps_by_name
;
577 maps
->nr_maps_allocated
= nr_allocate
;
579 maps
->maps_by_name
[maps
->nr_maps
- 1] = map
;
580 __maps__sort_by_name(maps
);
582 up_write(&maps
->lock
);
585 static void __maps__remove(struct maps
*maps
, struct map
*map
)
587 rb_erase_init(&map
->rb_node
, &maps
->entries
);
591 void maps__remove(struct maps
*maps
, struct map
*map
)
593 down_write(&maps
->lock
);
594 if (maps
->last_search_by_name
== map
)
595 maps
->last_search_by_name
= NULL
;
597 __maps__remove(maps
, map
);
599 if (maps
->maps_by_name
)
600 __maps__free_maps_by_name(maps
);
601 up_write(&maps
->lock
);
604 static void __maps__purge(struct maps
*maps
)
606 struct map
*pos
, *next
;
608 maps__for_each_entry_safe(maps
, pos
, next
) {
609 rb_erase_init(&pos
->rb_node
, &maps
->entries
);
614 void maps__exit(struct maps
*maps
)
616 down_write(&maps
->lock
);
618 up_write(&maps
->lock
);
621 bool maps__empty(struct maps
*maps
)
623 return !maps__first(maps
);
626 struct maps
*maps__new(struct machine
*machine
)
628 struct maps
*maps
= zalloc(sizeof(*maps
));
631 maps__init(maps
, machine
);
636 void maps__delete(struct maps
*maps
)
639 unwind__finish_access(maps
);
643 void maps__put(struct maps
*maps
)
645 if (maps
&& refcount_dec_and_test(&maps
->refcnt
))
649 struct symbol
*maps__find_symbol(struct maps
*maps
, u64 addr
, struct map
**mapp
)
651 struct map
*map
= maps__find(maps
, addr
);
653 /* Ensure map is loaded before using map->map_ip */
654 if (map
!= NULL
&& map__load(map
) >= 0) {
657 return map__find_symbol(map
, map
->map_ip(map
, addr
));
663 static bool map__contains_symbol(struct map
*map
, struct symbol
*sym
)
665 u64 ip
= map
->unmap_ip(map
, sym
->start
);
667 return ip
>= map
->start
&& ip
< map
->end
;
670 struct symbol
*maps__find_symbol_by_name(struct maps
*maps
, const char *name
, struct map
**mapp
)
675 down_read(&maps
->lock
);
677 maps__for_each_entry(maps
, pos
) {
678 sym
= map__find_symbol_by_name(pos
, name
);
682 if (!map__contains_symbol(pos
, sym
)) {
693 up_read(&maps
->lock
);
697 int maps__find_ams(struct maps
*maps
, struct addr_map_symbol
*ams
)
699 if (ams
->addr
< ams
->ms
.map
->start
|| ams
->addr
>= ams
->ms
.map
->end
) {
702 ams
->ms
.map
= maps__find(maps
, ams
->addr
);
703 if (ams
->ms
.map
== NULL
)
707 ams
->al_addr
= ams
->ms
.map
->map_ip(ams
->ms
.map
, ams
->addr
);
708 ams
->ms
.sym
= map__find_symbol(ams
->ms
.map
, ams
->al_addr
);
710 return ams
->ms
.sym
? 0 : -1;
713 size_t maps__fprintf(struct maps
*maps
, FILE *fp
)
718 down_read(&maps
->lock
);
720 maps__for_each_entry(maps
, pos
) {
721 printed
+= fprintf(fp
, "Map:");
722 printed
+= map__fprintf(pos
, fp
);
724 printed
+= dso__fprintf(pos
->dso
, fp
);
725 printed
+= fprintf(fp
, "--\n");
729 up_read(&maps
->lock
);
734 int maps__fixup_overlappings(struct maps
*maps
, struct map
*map
, FILE *fp
)
736 struct rb_root
*root
;
737 struct rb_node
*next
, *first
;
740 down_write(&maps
->lock
);
742 root
= &maps
->entries
;
745 * Find first map where end > map->start.
746 * Same as find_vma() in kernel.
748 next
= root
->rb_node
;
751 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
753 if (pos
->end
> map
->start
) {
755 if (pos
->start
<= map
->start
)
757 next
= next
->rb_left
;
759 next
= next
->rb_right
;
764 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
765 next
= rb_next(&pos
->rb_node
);
768 * Stop if current map starts after map->end.
769 * Maps are ordered by start: next will not overlap for sure.
771 if (pos
->start
>= map
->end
)
777 pr_debug("overlapping maps in %s (disable tui for more info)\n",
780 fputs("overlapping maps:\n", fp
);
781 map__fprintf(map
, fp
);
782 map__fprintf(pos
, fp
);
786 rb_erase_init(&pos
->rb_node
, root
);
788 * Now check if we need to create new maps for areas not
789 * overlapped by the new map:
791 if (map
->start
> pos
->start
) {
792 struct map
*before
= map__clone(pos
);
794 if (before
== NULL
) {
799 before
->end
= map
->start
;
800 __maps__insert(maps
, before
);
801 if (verbose
>= 2 && !use_browser
)
802 map__fprintf(before
, fp
);
806 if (map
->end
< pos
->end
) {
807 struct map
*after
= map__clone(pos
);
814 after
->start
= map
->end
;
815 after
->pgoff
+= map
->end
- pos
->start
;
816 assert(pos
->map_ip(pos
, map
->end
) == after
->map_ip(after
, map
->end
));
817 __maps__insert(maps
, after
);
818 if (verbose
>= 2 && !use_browser
)
819 map__fprintf(after
, fp
);
831 up_write(&maps
->lock
);
836 * XXX This should not really _copy_ te maps, but refcount them.
838 int maps__clone(struct thread
*thread
, struct maps
*parent
)
840 struct maps
*maps
= thread
->maps
;
844 down_read(&parent
->lock
);
846 maps__for_each_entry(parent
, map
) {
847 struct map
*new = map__clone(map
);
851 err
= unwind__prepare_access(maps
, new, NULL
);
855 maps__insert(maps
, new);
861 up_read(&parent
->lock
);
865 static void __maps__insert(struct maps
*maps
, struct map
*map
)
867 struct rb_node
**p
= &maps
->entries
.rb_node
;
868 struct rb_node
*parent
= NULL
;
869 const u64 ip
= map
->start
;
874 m
= rb_entry(parent
, struct map
, rb_node
);
881 rb_link_node(&map
->rb_node
, parent
, p
);
882 rb_insert_color(&map
->rb_node
, &maps
->entries
);
886 struct map
*maps__find(struct maps
*maps
, u64 ip
)
891 down_read(&maps
->lock
);
893 p
= maps
->entries
.rb_node
;
895 m
= rb_entry(p
, struct map
, rb_node
);
898 else if (ip
>= m
->end
)
906 up_read(&maps
->lock
);
910 struct map
*maps__first(struct maps
*maps
)
912 struct rb_node
*first
= rb_first(&maps
->entries
);
915 return rb_entry(first
, struct map
, rb_node
);
919 static struct map
*__map__next(struct map
*map
)
921 struct rb_node
*next
= rb_next(&map
->rb_node
);
924 return rb_entry(next
, struct map
, rb_node
);
928 struct map
*map__next(struct map
*map
)
930 return map
? __map__next(map
) : NULL
;
933 struct kmap
*__map__kmap(struct map
*map
)
935 if (!map
->dso
|| !map
->dso
->kernel
)
937 return (struct kmap
*)(map
+ 1);
940 struct kmap
*map__kmap(struct map
*map
)
942 struct kmap
*kmap
= __map__kmap(map
);
945 pr_err("Internal error: map__kmap with a non-kernel map\n");
949 struct maps
*map__kmaps(struct map
*map
)
951 struct kmap
*kmap
= map__kmap(map
);
953 if (!kmap
|| !kmap
->kmaps
) {
954 pr_err("Internal error: map__kmaps with a non-kernel map\n");