1 // SPDX-License-Identifier: GPL-2.0
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
14 #include "map_symbol.h"
20 #include <linux/string.h>
21 #include <linux/zalloc.h>
23 #include "namespaces.h"
28 static void __maps__insert(struct maps
*maps
, struct map
*map
);
30 static inline int is_anon_memory(const char *filename
, u32 flags
)
32 return flags
& MAP_HUGETLB
||
33 !strcmp(filename
, "//anon") ||
34 !strncmp(filename
, "/dev/zero", sizeof("/dev/zero") - 1) ||
35 !strncmp(filename
, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
38 static inline int is_no_dso_memory(const char *filename
)
40 return !strncmp(filename
, "[stack", 6) ||
41 !strncmp(filename
, "/SYSV",5) ||
42 !strcmp(filename
, "[heap]");
45 static inline int is_android_lib(const char *filename
)
47 return !strncmp(filename
, "/data/app-lib", 13) ||
48 !strncmp(filename
, "/system/lib", 11);
51 static inline bool replace_android_lib(const char *filename
, char *newfilename
)
55 size_t app_abi_length
, new_length
;
56 size_t lib_length
= 0;
58 libname
= strrchr(filename
, '/');
60 lib_length
= strlen(libname
);
62 app_abi
= getenv("APP_ABI");
66 app_abi_length
= strlen(app_abi
);
68 if (!strncmp(filename
, "/data/app-lib", 13)) {
74 new_length
= 7 + app_abi_length
+ lib_length
;
76 apk_path
= getenv("APK_PATH");
78 new_length
+= strlen(apk_path
) + 1;
79 if (new_length
> PATH_MAX
)
81 snprintf(newfilename
, new_length
,
82 "%s/libs/%s/%s", apk_path
, app_abi
, libname
);
84 if (new_length
> PATH_MAX
)
86 snprintf(newfilename
, new_length
,
87 "libs/%s/%s", app_abi
, libname
);
92 if (!strncmp(filename
, "/system/lib/", 11)) {
98 ndk
= getenv("NDK_ROOT");
99 app
= getenv("APP_PLATFORM");
104 ndk_length
= strlen(ndk
);
105 app_length
= strlen(app
);
107 if (!(ndk_length
&& app_length
&& app_abi_length
))
110 arch
= !strncmp(app_abi
, "arm", 3) ? "arm" :
111 !strncmp(app_abi
, "mips", 4) ? "mips" :
112 !strncmp(app_abi
, "x86", 3) ? "x86" : NULL
;
117 new_length
= 27 + ndk_length
+
118 app_length
+ lib_length
121 if (new_length
> PATH_MAX
)
123 snprintf(newfilename
, new_length
,
124 "%s/platforms/%s/arch-%s/usr/lib/%s",
125 ndk
, app
, arch
, libname
);
132 void map__init(struct map
*map
, u64 start
, u64 end
, u64 pgoff
, struct dso
*dso
)
138 map
->dso
= dso__get(dso
);
139 map
->map_ip
= map__map_ip
;
140 map
->unmap_ip
= map__unmap_ip
;
141 RB_CLEAR_NODE(&map
->rb_node
);
142 map
->erange_warned
= false;
143 refcount_set(&map
->refcnt
, 1);
146 struct map
*map__new(struct machine
*machine
, u64 start
, u64 len
,
147 u64 pgoff
, struct dso_id
*id
,
148 u32 prot
, u32 flags
, char *filename
,
149 struct thread
*thread
)
151 struct map
*map
= malloc(sizeof(*map
));
152 struct nsinfo
*nsi
= NULL
;
156 char newfilename
[PATH_MAX
];
158 int anon
, no_dso
, vdso
, android
;
160 android
= is_android_lib(filename
);
161 anon
= is_anon_memory(filename
, flags
);
162 vdso
= is_vdso_map(filename
);
163 no_dso
= is_no_dso_memory(filename
);
166 nsi
= nsinfo__get(thread
->nsinfo
);
168 if ((anon
|| no_dso
) && nsi
&& (prot
& PROT_EXEC
)) {
169 snprintf(newfilename
, sizeof(newfilename
),
170 "/tmp/perf-%d.map", nsi
->pid
);
171 filename
= newfilename
;
175 if (replace_android_lib(filename
, newfilename
))
176 filename
= newfilename
;
180 /* The vdso maps are always on the host and not the
181 * container. Ensure that we don't use setns to look
184 nnsi
= nsinfo__copy(nsi
);
187 nnsi
->need_setns
= false;
191 dso
= machine__findnew_vdso(machine
, thread
);
193 dso
= machine__findnew_dso_id(machine
, filename
, id
);
198 map__init(map
, start
, start
+ len
, pgoff
, dso
);
200 if (anon
|| no_dso
) {
201 map
->map_ip
= map
->unmap_ip
= identity__map_ip
;
204 * Set memory without DSO as loaded. All map__find_*
205 * functions still return NULL, and we avoid the
206 * unnecessary map__load warning.
208 if (!(prot
& PROT_EXEC
))
209 dso__set_loaded(dso
);
222 * Constructor variant for modules (where we know from /proc/modules where
223 * they are loaded) and for vmlinux, where only after we load all the
224 * symbols we'll know where it starts and ends.
226 struct map
*map__new2(u64 start
, struct dso
*dso
)
228 struct map
*map
= calloc(1, (sizeof(*map
) +
229 (dso
->kernel
? sizeof(struct kmap
) : 0)));
232 * ->end will be filled after we load all the symbols
234 map__init(map
, start
, 0, 0, dso
);
240 bool __map__is_kernel(const struct map
*map
)
242 if (!map
->dso
->kernel
)
244 return machine__kernel_map(map__kmaps((struct map
*)map
)->machine
) == map
;
247 bool __map__is_extra_kernel_map(const struct map
*map
)
249 struct kmap
*kmap
= __map__kmap((struct map
*)map
);
251 return kmap
&& kmap
->name
[0];
254 bool __map__is_bpf_prog(const struct map
*map
)
258 if (map
->dso
->binary_type
== DSO_BINARY_TYPE__BPF_PROG_INFO
)
262 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
263 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
264 * guess the type based on name.
266 name
= map
->dso
->short_name
;
267 return name
&& (strstr(name
, "bpf_prog_") == name
);
270 bool map__has_symbols(const struct map
*map
)
272 return dso__has_symbols(map
->dso
);
275 static void map__exit(struct map
*map
)
277 BUG_ON(refcount_read(&map
->refcnt
) != 0);
281 void map__delete(struct map
*map
)
287 void map__put(struct map
*map
)
289 if (map
&& refcount_dec_and_test(&map
->refcnt
))
293 void map__fixup_start(struct map
*map
)
295 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
296 struct rb_node
*nd
= rb_first_cached(symbols
);
298 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
299 map
->start
= sym
->start
;
303 void map__fixup_end(struct map
*map
)
305 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
306 struct rb_node
*nd
= rb_last(&symbols
->rb_root
);
308 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
313 #define DSO__DELETED "(deleted)"
315 int map__load(struct map
*map
)
317 const char *name
= map
->dso
->long_name
;
320 if (dso__loaded(map
->dso
))
323 nr
= dso__load(map
->dso
, map
);
325 if (map
->dso
->has_build_id
) {
326 char sbuild_id
[SBUILD_ID_SIZE
];
328 build_id__sprintf(map
->dso
->build_id
,
329 sizeof(map
->dso
->build_id
),
331 pr_debug("%s with build id %s not found", name
, sbuild_id
);
333 pr_debug("Failed to open %s", name
);
335 pr_debug(", continuing without symbols\n");
337 } else if (nr
== 0) {
338 #ifdef HAVE_LIBELF_SUPPORT
339 const size_t len
= strlen(name
);
340 const size_t real_len
= len
- sizeof(DSO__DELETED
);
342 if (len
> sizeof(DSO__DELETED
) &&
343 strcmp(name
+ real_len
+ 1, DSO__DELETED
) == 0) {
344 pr_debug("%.*s was updated (is prelink enabled?). "
345 "Restart the long running apps that use it!\n",
346 (int)real_len
, name
);
348 pr_debug("no symbols found in %s, maybe install a debug package?\n", name
);
357 struct symbol
*map__find_symbol(struct map
*map
, u64 addr
)
359 if (map__load(map
) < 0)
362 return dso__find_symbol(map
->dso
, addr
);
365 struct symbol
*map__find_symbol_by_name(struct map
*map
, const char *name
)
367 if (map__load(map
) < 0)
370 if (!dso__sorted_by_name(map
->dso
))
371 dso__sort_by_name(map
->dso
);
373 return dso__find_symbol_by_name(map
->dso
, name
);
376 struct map
*map__clone(struct map
*from
)
378 struct map
*map
= memdup(from
, sizeof(*map
));
381 refcount_set(&map
->refcnt
, 1);
382 RB_CLEAR_NODE(&map
->rb_node
);
389 size_t map__fprintf(struct map
*map
, FILE *fp
)
391 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s\n",
392 map
->start
, map
->end
, map
->pgoff
, map
->dso
->name
);
395 size_t map__fprintf_dsoname(struct map
*map
, FILE *fp
)
397 char buf
[symbol_conf
.pad_output_len_dso
+ 1];
398 const char *dsoname
= "[unknown]";
400 if (map
&& map
->dso
) {
401 if (symbol_conf
.show_kernel_path
&& map
->dso
->long_name
)
402 dsoname
= map
->dso
->long_name
;
404 dsoname
= map
->dso
->name
;
407 if (symbol_conf
.pad_output_len_dso
) {
408 scnprintf_pad(buf
, symbol_conf
.pad_output_len_dso
, "%s", dsoname
);
412 return fprintf(fp
, "%s", dsoname
);
415 char *map__srcline(struct map
*map
, u64 addr
, struct symbol
*sym
)
418 return SRCLINE_UNKNOWN
;
419 return get_srcline(map
->dso
, map__rip_2objdump(map
, addr
), sym
, true, true, addr
);
422 int map__fprintf_srcline(struct map
*map
, u64 addr
, const char *prefix
,
427 if (map
&& map
->dso
) {
428 char *srcline
= map__srcline(map
, addr
, NULL
);
429 if (srcline
!= SRCLINE_UNKNOWN
)
430 ret
= fprintf(fp
, "%s%s", prefix
, srcline
);
431 free_srcline(srcline
);
436 void srccode_state_free(struct srccode_state
*state
)
438 zfree(&state
->srcfile
);
443 * map__rip_2objdump - convert symbol start address to objdump address.
445 * @rip: symbol start address
447 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
448 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
449 * relative to section start.
451 * Return: Address suitable for passing to "objdump --start-address="
453 u64
map__rip_2objdump(struct map
*map
, u64 rip
)
455 struct kmap
*kmap
= __map__kmap(map
);
458 * vmlinux does not have program headers for PTI entry trampolines and
459 * kcore may not either. However the trampoline object code is on the
460 * main kernel map, so just use that instead.
462 if (kmap
&& is_entry_trampoline(kmap
->name
) && kmap
->kmaps
&& kmap
->kmaps
->machine
) {
463 struct map
*kernel_map
= machine__kernel_map(kmap
->kmaps
->machine
);
469 if (!map
->dso
->adjust_symbols
)
473 return rip
- map
->pgoff
;
476 * kernel modules also have DSO_TYPE_USER in dso->kernel,
477 * but all kernel modules are ET_REL, so won't get here.
479 if (map
->dso
->kernel
== DSO_TYPE_USER
)
480 return rip
+ map
->dso
->text_offset
;
482 return map
->unmap_ip(map
, rip
) - map
->reloc
;
486 * map__objdump_2mem - convert objdump address to a memory address.
488 * @ip: objdump address
490 * Closely related to map__rip_2objdump(), this function takes an address from
491 * objdump and converts it to a memory address. Note this assumes that @map
492 * contains the address. To be sure the result is valid, check it forwards
493 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
495 * Return: Memory address.
497 u64
map__objdump_2mem(struct map
*map
, u64 ip
)
499 if (!map
->dso
->adjust_symbols
)
500 return map
->unmap_ip(map
, ip
);
503 return map
->unmap_ip(map
, ip
+ map
->pgoff
);
506 * kernel modules also have DSO_TYPE_USER in dso->kernel,
507 * but all kernel modules are ET_REL, so won't get here.
509 if (map
->dso
->kernel
== DSO_TYPE_USER
)
510 return map
->unmap_ip(map
, ip
- map
->dso
->text_offset
);
512 return ip
+ map
->reloc
;
515 void maps__init(struct maps
*maps
, struct machine
*machine
)
517 maps
->entries
= RB_ROOT
;
518 init_rwsem(&maps
->lock
);
519 maps
->machine
= machine
;
520 maps
->last_search_by_name
= NULL
;
522 maps
->maps_by_name
= NULL
;
523 refcount_set(&maps
->refcnt
, 1);
526 static void __maps__free_maps_by_name(struct maps
*maps
)
529 * Free everything to try to do it from the rbtree in the next search
531 zfree(&maps
->maps_by_name
);
532 maps
->nr_maps_allocated
= 0;
535 void maps__insert(struct maps
*maps
, struct map
*map
)
537 down_write(&maps
->lock
);
538 __maps__insert(maps
, map
);
542 * If we already performed some search by name, then we need to add the just
543 * inserted map and resort.
545 if (maps
->maps_by_name
) {
546 if (maps
->nr_maps
> maps
->nr_maps_allocated
) {
547 int nr_allocate
= maps
->nr_maps
* 2;
548 struct map
**maps_by_name
= realloc(maps
->maps_by_name
, nr_allocate
* sizeof(map
));
550 if (maps_by_name
== NULL
) {
551 __maps__free_maps_by_name(maps
);
555 maps
->maps_by_name
= maps_by_name
;
556 maps
->nr_maps_allocated
= nr_allocate
;
558 maps
->maps_by_name
[maps
->nr_maps
- 1] = map
;
559 __maps__sort_by_name(maps
);
561 up_write(&maps
->lock
);
564 static void __maps__remove(struct maps
*maps
, struct map
*map
)
566 rb_erase_init(&map
->rb_node
, &maps
->entries
);
570 void maps__remove(struct maps
*maps
, struct map
*map
)
572 down_write(&maps
->lock
);
573 if (maps
->last_search_by_name
== map
)
574 maps
->last_search_by_name
= NULL
;
576 __maps__remove(maps
, map
);
578 if (maps
->maps_by_name
)
579 __maps__free_maps_by_name(maps
);
580 up_write(&maps
->lock
);
583 static void __maps__purge(struct maps
*maps
)
585 struct map
*pos
, *next
;
587 maps__for_each_entry_safe(maps
, pos
, next
) {
588 rb_erase_init(&pos
->rb_node
, &maps
->entries
);
593 void maps__exit(struct maps
*maps
)
595 down_write(&maps
->lock
);
597 up_write(&maps
->lock
);
600 bool maps__empty(struct maps
*maps
)
602 return !maps__first(maps
);
605 struct maps
*maps__new(struct machine
*machine
)
607 struct maps
*maps
= zalloc(sizeof(*maps
));
610 maps__init(maps
, machine
);
615 void maps__delete(struct maps
*maps
)
618 unwind__finish_access(maps
);
622 void maps__put(struct maps
*maps
)
624 if (maps
&& refcount_dec_and_test(&maps
->refcnt
))
628 struct symbol
*maps__find_symbol(struct maps
*maps
, u64 addr
, struct map
**mapp
)
630 struct map
*map
= maps__find(maps
, addr
);
632 /* Ensure map is loaded before using map->map_ip */
633 if (map
!= NULL
&& map__load(map
) >= 0) {
636 return map__find_symbol(map
, map
->map_ip(map
, addr
));
642 static bool map__contains_symbol(struct map
*map
, struct symbol
*sym
)
644 u64 ip
= map
->unmap_ip(map
, sym
->start
);
646 return ip
>= map
->start
&& ip
< map
->end
;
649 struct symbol
*maps__find_symbol_by_name(struct maps
*maps
, const char *name
, struct map
**mapp
)
654 down_read(&maps
->lock
);
656 maps__for_each_entry(maps
, pos
) {
657 sym
= map__find_symbol_by_name(pos
, name
);
661 if (!map__contains_symbol(pos
, sym
)) {
672 up_read(&maps
->lock
);
676 int maps__find_ams(struct maps
*maps
, struct addr_map_symbol
*ams
)
678 if (ams
->addr
< ams
->ms
.map
->start
|| ams
->addr
>= ams
->ms
.map
->end
) {
681 ams
->ms
.map
= maps__find(maps
, ams
->addr
);
682 if (ams
->ms
.map
== NULL
)
686 ams
->al_addr
= ams
->ms
.map
->map_ip(ams
->ms
.map
, ams
->addr
);
687 ams
->ms
.sym
= map__find_symbol(ams
->ms
.map
, ams
->al_addr
);
689 return ams
->ms
.sym
? 0 : -1;
692 size_t maps__fprintf(struct maps
*maps
, FILE *fp
)
697 down_read(&maps
->lock
);
699 maps__for_each_entry(maps
, pos
) {
700 printed
+= fprintf(fp
, "Map:");
701 printed
+= map__fprintf(pos
, fp
);
703 printed
+= dso__fprintf(pos
->dso
, fp
);
704 printed
+= fprintf(fp
, "--\n");
708 up_read(&maps
->lock
);
713 int maps__fixup_overlappings(struct maps
*maps
, struct map
*map
, FILE *fp
)
715 struct rb_root
*root
;
716 struct rb_node
*next
, *first
;
719 down_write(&maps
->lock
);
721 root
= &maps
->entries
;
724 * Find first map where end > map->start.
725 * Same as find_vma() in kernel.
727 next
= root
->rb_node
;
730 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
732 if (pos
->end
> map
->start
) {
734 if (pos
->start
<= map
->start
)
736 next
= next
->rb_left
;
738 next
= next
->rb_right
;
743 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
744 next
= rb_next(&pos
->rb_node
);
747 * Stop if current map starts after map->end.
748 * Maps are ordered by start: next will not overlap for sure.
750 if (pos
->start
>= map
->end
)
756 pr_debug("overlapping maps in %s (disable tui for more info)\n",
759 fputs("overlapping maps:\n", fp
);
760 map__fprintf(map
, fp
);
761 map__fprintf(pos
, fp
);
765 rb_erase_init(&pos
->rb_node
, root
);
767 * Now check if we need to create new maps for areas not
768 * overlapped by the new map:
770 if (map
->start
> pos
->start
) {
771 struct map
*before
= map__clone(pos
);
773 if (before
== NULL
) {
778 before
->end
= map
->start
;
779 __maps__insert(maps
, before
);
780 if (verbose
>= 2 && !use_browser
)
781 map__fprintf(before
, fp
);
785 if (map
->end
< pos
->end
) {
786 struct map
*after
= map__clone(pos
);
793 after
->start
= map
->end
;
794 after
->pgoff
+= map
->end
- pos
->start
;
795 assert(pos
->map_ip(pos
, map
->end
) == after
->map_ip(after
, map
->end
));
796 __maps__insert(maps
, after
);
797 if (verbose
>= 2 && !use_browser
)
798 map__fprintf(after
, fp
);
810 up_write(&maps
->lock
);
815 * XXX This should not really _copy_ te maps, but refcount them.
817 int maps__clone(struct thread
*thread
, struct maps
*parent
)
819 struct maps
*maps
= thread
->maps
;
823 down_read(&parent
->lock
);
825 maps__for_each_entry(parent
, map
) {
826 struct map
*new = map__clone(map
);
830 err
= unwind__prepare_access(maps
, new, NULL
);
834 maps__insert(maps
, new);
840 up_read(&parent
->lock
);
844 static void __maps__insert(struct maps
*maps
, struct map
*map
)
846 struct rb_node
**p
= &maps
->entries
.rb_node
;
847 struct rb_node
*parent
= NULL
;
848 const u64 ip
= map
->start
;
853 m
= rb_entry(parent
, struct map
, rb_node
);
860 rb_link_node(&map
->rb_node
, parent
, p
);
861 rb_insert_color(&map
->rb_node
, &maps
->entries
);
865 struct map
*maps__find(struct maps
*maps
, u64 ip
)
870 down_read(&maps
->lock
);
872 p
= maps
->entries
.rb_node
;
874 m
= rb_entry(p
, struct map
, rb_node
);
877 else if (ip
>= m
->end
)
885 up_read(&maps
->lock
);
889 struct map
*maps__first(struct maps
*maps
)
891 struct rb_node
*first
= rb_first(&maps
->entries
);
894 return rb_entry(first
, struct map
, rb_node
);
898 static struct map
*__map__next(struct map
*map
)
900 struct rb_node
*next
= rb_next(&map
->rb_node
);
903 return rb_entry(next
, struct map
, rb_node
);
907 struct map
*map__next(struct map
*map
)
909 return map
? __map__next(map
) : NULL
;
912 struct kmap
*__map__kmap(struct map
*map
)
914 if (!map
->dso
|| !map
->dso
->kernel
)
916 return (struct kmap
*)(map
+ 1);
919 struct kmap
*map__kmap(struct map
*map
)
921 struct kmap
*kmap
= __map__kmap(map
);
924 pr_err("Internal error: map__kmap with a non-kernel map\n");
928 struct maps
*map__kmaps(struct map
*map
)
930 struct kmap
*kmap
= map__kmap(map
);
932 if (!kmap
|| !kmap
->kmaps
) {
933 pr_err("Internal error: map__kmaps with a non-kernel map\n");