1 // SPDX-License-Identifier: GPL-2.0
10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
18 #include <linux/string.h>
20 #include "namespaces.h"
24 static void __maps__insert(struct maps
*maps
, struct map
*map
);
25 static void __maps__insert_name(struct maps
*maps
, struct map
*map
);
27 static inline int is_anon_memory(const char *filename
, u32 flags
)
29 return flags
& MAP_HUGETLB
||
30 !strcmp(filename
, "//anon") ||
31 !strncmp(filename
, "/dev/zero", sizeof("/dev/zero") - 1) ||
32 !strncmp(filename
, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
35 static inline int is_no_dso_memory(const char *filename
)
37 return !strncmp(filename
, "[stack", 6) ||
38 !strncmp(filename
, "/SYSV",5) ||
39 !strcmp(filename
, "[heap]");
42 static inline int is_android_lib(const char *filename
)
44 return !strncmp(filename
, "/data/app-lib", 13) ||
45 !strncmp(filename
, "/system/lib", 11);
48 static inline bool replace_android_lib(const char *filename
, char *newfilename
)
52 size_t app_abi_length
, new_length
;
53 size_t lib_length
= 0;
55 libname
= strrchr(filename
, '/');
57 lib_length
= strlen(libname
);
59 app_abi
= getenv("APP_ABI");
63 app_abi_length
= strlen(app_abi
);
65 if (!strncmp(filename
, "/data/app-lib", 13)) {
71 new_length
= 7 + app_abi_length
+ lib_length
;
73 apk_path
= getenv("APK_PATH");
75 new_length
+= strlen(apk_path
) + 1;
76 if (new_length
> PATH_MAX
)
78 snprintf(newfilename
, new_length
,
79 "%s/libs/%s/%s", apk_path
, app_abi
, libname
);
81 if (new_length
> PATH_MAX
)
83 snprintf(newfilename
, new_length
,
84 "libs/%s/%s", app_abi
, libname
);
89 if (!strncmp(filename
, "/system/lib/", 11)) {
95 ndk
= getenv("NDK_ROOT");
96 app
= getenv("APP_PLATFORM");
101 ndk_length
= strlen(ndk
);
102 app_length
= strlen(app
);
104 if (!(ndk_length
&& app_length
&& app_abi_length
))
107 arch
= !strncmp(app_abi
, "arm", 3) ? "arm" :
108 !strncmp(app_abi
, "mips", 4) ? "mips" :
109 !strncmp(app_abi
, "x86", 3) ? "x86" : NULL
;
114 new_length
= 27 + ndk_length
+
115 app_length
+ lib_length
118 if (new_length
> PATH_MAX
)
120 snprintf(newfilename
, new_length
,
121 "%s/platforms/%s/arch-%s/usr/lib/%s",
122 ndk
, app
, arch
, libname
);
129 void map__init(struct map
*map
, u64 start
, u64 end
, u64 pgoff
, struct dso
*dso
)
135 map
->dso
= dso__get(dso
);
136 map
->map_ip
= map__map_ip
;
137 map
->unmap_ip
= map__unmap_ip
;
138 RB_CLEAR_NODE(&map
->rb_node
);
140 map
->erange_warned
= false;
141 refcount_set(&map
->refcnt
, 1);
144 struct map
*map__new(struct machine
*machine
, u64 start
, u64 len
,
145 u64 pgoff
, u32 d_maj
, u32 d_min
, u64 ino
,
146 u64 ino_gen
, u32 prot
, u32 flags
, char *filename
,
147 struct thread
*thread
)
149 struct map
*map
= malloc(sizeof(*map
));
150 struct nsinfo
*nsi
= NULL
;
154 char newfilename
[PATH_MAX
];
156 int anon
, no_dso
, vdso
, android
;
158 android
= is_android_lib(filename
);
159 anon
= is_anon_memory(filename
, flags
);
160 vdso
= is_vdso_map(filename
);
161 no_dso
= is_no_dso_memory(filename
);
166 map
->ino_generation
= ino_gen
;
169 nsi
= nsinfo__get(thread
->nsinfo
);
171 if ((anon
|| no_dso
) && nsi
&& (prot
& PROT_EXEC
)) {
172 snprintf(newfilename
, sizeof(newfilename
),
173 "/tmp/perf-%d.map", nsi
->pid
);
174 filename
= newfilename
;
178 if (replace_android_lib(filename
, newfilename
))
179 filename
= newfilename
;
183 /* The vdso maps are always on the host and not the
184 * container. Ensure that we don't use setns to look
187 nnsi
= nsinfo__copy(nsi
);
190 nnsi
->need_setns
= false;
194 dso
= machine__findnew_vdso(machine
, thread
);
196 dso
= machine__findnew_dso(machine
, filename
);
201 map__init(map
, start
, start
+ len
, pgoff
, dso
);
203 if (anon
|| no_dso
) {
204 map
->map_ip
= map
->unmap_ip
= identity__map_ip
;
207 * Set memory without DSO as loaded. All map__find_*
208 * functions still return NULL, and we avoid the
209 * unnecessary map__load warning.
211 if (!(prot
& PROT_EXEC
))
212 dso__set_loaded(dso
);
225 * Constructor variant for modules (where we know from /proc/modules where
226 * they are loaded) and for vmlinux, where only after we load all the
227 * symbols we'll know where it starts and ends.
229 struct map
*map__new2(u64 start
, struct dso
*dso
)
231 struct map
*map
= calloc(1, (sizeof(*map
) +
232 (dso
->kernel
? sizeof(struct kmap
) : 0)));
235 * ->end will be filled after we load all the symbols
237 map__init(map
, start
, 0, 0, dso
);
244 * Use this and __map__is_kmodule() for map instances that are in
245 * machine->kmaps, and thus have map->groups->machine all properly set, to
246 * disambiguate between the kernel and modules.
248 * When the need arises, introduce map__is_{kernel,kmodule)() that
249 * checks (map->groups != NULL && map->groups->machine != NULL &&
250 * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
252 bool __map__is_kernel(const struct map
*map
)
254 return machine__kernel_map(map
->groups
->machine
) == map
;
257 bool __map__is_extra_kernel_map(const struct map
*map
)
259 struct kmap
*kmap
= __map__kmap((struct map
*)map
);
261 return kmap
&& kmap
->name
[0];
264 bool __map__is_bpf_prog(const struct map
*map
)
268 if (map
->dso
->binary_type
== DSO_BINARY_TYPE__BPF_PROG_INFO
)
272 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
273 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
274 * guess the type based on name.
276 name
= map
->dso
->short_name
;
277 return name
&& (strstr(name
, "bpf_prog_") == name
);
280 bool map__has_symbols(const struct map
*map
)
282 return dso__has_symbols(map
->dso
);
285 static void map__exit(struct map
*map
)
287 BUG_ON(!RB_EMPTY_NODE(&map
->rb_node
));
291 void map__delete(struct map
*map
)
297 void map__put(struct map
*map
)
299 if (map
&& refcount_dec_and_test(&map
->refcnt
))
303 void map__fixup_start(struct map
*map
)
305 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
306 struct rb_node
*nd
= rb_first_cached(symbols
);
308 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
309 map
->start
= sym
->start
;
313 void map__fixup_end(struct map
*map
)
315 struct rb_root_cached
*symbols
= &map
->dso
->symbols
;
316 struct rb_node
*nd
= rb_last(&symbols
->rb_root
);
318 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
323 #define DSO__DELETED "(deleted)"
325 int map__load(struct map
*map
)
327 const char *name
= map
->dso
->long_name
;
330 if (dso__loaded(map
->dso
))
333 nr
= dso__load(map
->dso
, map
);
335 if (map
->dso
->has_build_id
) {
336 char sbuild_id
[SBUILD_ID_SIZE
];
338 build_id__sprintf(map
->dso
->build_id
,
339 sizeof(map
->dso
->build_id
),
341 pr_debug("%s with build id %s not found", name
, sbuild_id
);
343 pr_debug("Failed to open %s", name
);
345 pr_debug(", continuing without symbols\n");
347 } else if (nr
== 0) {
348 #ifdef HAVE_LIBELF_SUPPORT
349 const size_t len
= strlen(name
);
350 const size_t real_len
= len
- sizeof(DSO__DELETED
);
352 if (len
> sizeof(DSO__DELETED
) &&
353 strcmp(name
+ real_len
+ 1, DSO__DELETED
) == 0) {
354 pr_debug("%.*s was updated (is prelink enabled?). "
355 "Restart the long running apps that use it!\n",
356 (int)real_len
, name
);
358 pr_debug("no symbols found in %s, maybe install a debug package?\n", name
);
367 struct symbol
*map__find_symbol(struct map
*map
, u64 addr
)
369 if (map__load(map
) < 0)
372 return dso__find_symbol(map
->dso
, addr
);
375 struct symbol
*map__find_symbol_by_name(struct map
*map
, const char *name
)
377 if (map__load(map
) < 0)
380 if (!dso__sorted_by_name(map
->dso
))
381 dso__sort_by_name(map
->dso
);
383 return dso__find_symbol_by_name(map
->dso
, name
);
386 struct map
*map__clone(struct map
*from
)
388 struct map
*map
= memdup(from
, sizeof(*map
));
391 refcount_set(&map
->refcnt
, 1);
392 RB_CLEAR_NODE(&map
->rb_node
);
400 size_t map__fprintf(struct map
*map
, FILE *fp
)
402 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s\n",
403 map
->start
, map
->end
, map
->pgoff
, map
->dso
->name
);
406 size_t map__fprintf_dsoname(struct map
*map
, FILE *fp
)
408 const char *dsoname
= "[unknown]";
410 if (map
&& map
->dso
) {
411 if (symbol_conf
.show_kernel_path
&& map
->dso
->long_name
)
412 dsoname
= map
->dso
->long_name
;
414 dsoname
= map
->dso
->name
;
417 return fprintf(fp
, "%s", dsoname
);
420 char *map__srcline(struct map
*map
, u64 addr
, struct symbol
*sym
)
423 return SRCLINE_UNKNOWN
;
424 return get_srcline(map
->dso
, map__rip_2objdump(map
, addr
), sym
, true, true, addr
);
427 int map__fprintf_srcline(struct map
*map
, u64 addr
, const char *prefix
,
432 if (map
&& map
->dso
) {
433 char *srcline
= map__srcline(map
, addr
, NULL
);
434 if (srcline
!= SRCLINE_UNKNOWN
)
435 ret
= fprintf(fp
, "%s%s", prefix
, srcline
);
436 free_srcline(srcline
);
441 int map__fprintf_srccode(struct map
*map
, u64 addr
,
443 struct srccode_state
*state
)
451 if (!map
|| !map
->dso
)
453 srcfile
= get_srcline_split(map
->dso
,
454 map__rip_2objdump(map
, addr
),
459 /* Avoid redundant printing */
462 !strcmp(state
->srcfile
, srcfile
) &&
463 state
->line
== line
) {
468 srccode
= find_sourceline(srcfile
, line
, &len
);
472 ret
= fprintf(fp
, "|%-8d %.*s", line
, len
, srccode
);
473 state
->srcfile
= srcfile
;
483 void srccode_state_free(struct srccode_state
*state
)
485 zfree(&state
->srcfile
);
490 * map__rip_2objdump - convert symbol start address to objdump address.
492 * @rip: symbol start address
494 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
495 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
496 * relative to section start.
498 * Return: Address suitable for passing to "objdump --start-address="
500 u64
map__rip_2objdump(struct map
*map
, u64 rip
)
502 struct kmap
*kmap
= __map__kmap(map
);
505 * vmlinux does not have program headers for PTI entry trampolines and
506 * kcore may not either. However the trampoline object code is on the
507 * main kernel map, so just use that instead.
509 if (kmap
&& is_entry_trampoline(kmap
->name
) && kmap
->kmaps
&& kmap
->kmaps
->machine
) {
510 struct map
*kernel_map
= machine__kernel_map(kmap
->kmaps
->machine
);
516 if (!map
->dso
->adjust_symbols
)
520 return rip
- map
->pgoff
;
523 * kernel modules also have DSO_TYPE_USER in dso->kernel,
524 * but all kernel modules are ET_REL, so won't get here.
526 if (map
->dso
->kernel
== DSO_TYPE_USER
)
527 return rip
+ map
->dso
->text_offset
;
529 return map
->unmap_ip(map
, rip
) - map
->reloc
;
533 * map__objdump_2mem - convert objdump address to a memory address.
535 * @ip: objdump address
537 * Closely related to map__rip_2objdump(), this function takes an address from
538 * objdump and converts it to a memory address. Note this assumes that @map
539 * contains the address. To be sure the result is valid, check it forwards
540 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
542 * Return: Memory address.
544 u64
map__objdump_2mem(struct map
*map
, u64 ip
)
546 if (!map
->dso
->adjust_symbols
)
547 return map
->unmap_ip(map
, ip
);
550 return map
->unmap_ip(map
, ip
+ map
->pgoff
);
553 * kernel modules also have DSO_TYPE_USER in dso->kernel,
554 * but all kernel modules are ET_REL, so won't get here.
556 if (map
->dso
->kernel
== DSO_TYPE_USER
)
557 return map
->unmap_ip(map
, ip
- map
->dso
->text_offset
);
559 return ip
+ map
->reloc
;
562 static void maps__init(struct maps
*maps
)
564 maps
->entries
= RB_ROOT
;
565 maps
->names
= RB_ROOT
;
566 init_rwsem(&maps
->lock
);
569 void map_groups__init(struct map_groups
*mg
, struct machine
*machine
)
571 maps__init(&mg
->maps
);
572 mg
->machine
= machine
;
573 refcount_set(&mg
->refcnt
, 1);
576 void map_groups__insert(struct map_groups
*mg
, struct map
*map
)
578 maps__insert(&mg
->maps
, map
);
582 static void __maps__purge(struct maps
*maps
)
584 struct rb_root
*root
= &maps
->entries
;
585 struct rb_node
*next
= rb_first(root
);
588 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
590 next
= rb_next(&pos
->rb_node
);
591 rb_erase_init(&pos
->rb_node
, root
);
596 static void __maps__purge_names(struct maps
*maps
)
598 struct rb_root
*root
= &maps
->names
;
599 struct rb_node
*next
= rb_first(root
);
602 struct map
*pos
= rb_entry(next
, struct map
, rb_node_name
);
604 next
= rb_next(&pos
->rb_node_name
);
605 rb_erase_init(&pos
->rb_node_name
, root
);
610 static void maps__exit(struct maps
*maps
)
612 down_write(&maps
->lock
);
614 __maps__purge_names(maps
);
615 up_write(&maps
->lock
);
618 void map_groups__exit(struct map_groups
*mg
)
620 maps__exit(&mg
->maps
);
623 bool map_groups__empty(struct map_groups
*mg
)
625 return !maps__first(&mg
->maps
);
628 struct map_groups
*map_groups__new(struct machine
*machine
)
630 struct map_groups
*mg
= malloc(sizeof(*mg
));
633 map_groups__init(mg
, machine
);
638 void map_groups__delete(struct map_groups
*mg
)
640 map_groups__exit(mg
);
644 void map_groups__put(struct map_groups
*mg
)
646 if (mg
&& refcount_dec_and_test(&mg
->refcnt
))
647 map_groups__delete(mg
);
650 struct symbol
*map_groups__find_symbol(struct map_groups
*mg
,
651 u64 addr
, struct map
**mapp
)
653 struct map
*map
= map_groups__find(mg
, addr
);
655 /* Ensure map is loaded before using map->map_ip */
656 if (map
!= NULL
&& map__load(map
) >= 0) {
659 return map__find_symbol(map
, map
->map_ip(map
, addr
));
665 static bool map__contains_symbol(struct map
*map
, struct symbol
*sym
)
667 u64 ip
= map
->unmap_ip(map
, sym
->start
);
669 return ip
>= map
->start
&& ip
< map
->end
;
672 struct symbol
*maps__find_symbol_by_name(struct maps
*maps
, const char *name
,
678 down_read(&maps
->lock
);
680 for (nd
= rb_first(&maps
->entries
); nd
; nd
= rb_next(nd
)) {
681 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
683 sym
= map__find_symbol_by_name(pos
, name
);
687 if (!map__contains_symbol(pos
, sym
)) {
698 up_read(&maps
->lock
);
702 struct symbol
*map_groups__find_symbol_by_name(struct map_groups
*mg
,
706 return maps__find_symbol_by_name(&mg
->maps
, name
, mapp
);
709 int map_groups__find_ams(struct addr_map_symbol
*ams
)
711 if (ams
->addr
< ams
->map
->start
|| ams
->addr
>= ams
->map
->end
) {
712 if (ams
->map
->groups
== NULL
)
714 ams
->map
= map_groups__find(ams
->map
->groups
, ams
->addr
);
715 if (ams
->map
== NULL
)
719 ams
->al_addr
= ams
->map
->map_ip(ams
->map
, ams
->addr
);
720 ams
->sym
= map__find_symbol(ams
->map
, ams
->al_addr
);
722 return ams
->sym
? 0 : -1;
725 static size_t maps__fprintf(struct maps
*maps
, FILE *fp
)
730 down_read(&maps
->lock
);
732 for (nd
= rb_first(&maps
->entries
); nd
; nd
= rb_next(nd
)) {
733 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
734 printed
+= fprintf(fp
, "Map:");
735 printed
+= map__fprintf(pos
, fp
);
737 printed
+= dso__fprintf(pos
->dso
, fp
);
738 printed
+= fprintf(fp
, "--\n");
742 up_read(&maps
->lock
);
747 size_t map_groups__fprintf(struct map_groups
*mg
, FILE *fp
)
749 return maps__fprintf(&mg
->maps
, fp
);
752 static void __map_groups__insert(struct map_groups
*mg
, struct map
*map
)
754 __maps__insert(&mg
->maps
, map
);
755 __maps__insert_name(&mg
->maps
, map
);
759 static int maps__fixup_overlappings(struct maps
*maps
, struct map
*map
, FILE *fp
)
761 struct rb_root
*root
;
762 struct rb_node
*next
, *first
;
765 down_write(&maps
->lock
);
767 root
= &maps
->entries
;
770 * Find first map where end > map->start.
771 * Same as find_vma() in kernel.
773 next
= root
->rb_node
;
776 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
778 if (pos
->end
> map
->start
) {
780 if (pos
->start
<= map
->start
)
782 next
= next
->rb_left
;
784 next
= next
->rb_right
;
789 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
790 next
= rb_next(&pos
->rb_node
);
793 * Stop if current map starts after map->end.
794 * Maps are ordered by start: next will not overlap for sure.
796 if (pos
->start
>= map
->end
)
802 pr_debug("overlapping maps in %s (disable tui for more info)\n",
805 fputs("overlapping maps:\n", fp
);
806 map__fprintf(map
, fp
);
807 map__fprintf(pos
, fp
);
811 rb_erase_init(&pos
->rb_node
, root
);
813 * Now check if we need to create new maps for areas not
814 * overlapped by the new map:
816 if (map
->start
> pos
->start
) {
817 struct map
*before
= map__clone(pos
);
819 if (before
== NULL
) {
824 before
->end
= map
->start
;
825 __map_groups__insert(pos
->groups
, before
);
826 if (verbose
>= 2 && !use_browser
)
827 map__fprintf(before
, fp
);
831 if (map
->end
< pos
->end
) {
832 struct map
*after
= map__clone(pos
);
839 after
->start
= map
->end
;
840 __map_groups__insert(pos
->groups
, after
);
841 if (verbose
>= 2 && !use_browser
)
842 map__fprintf(after
, fp
);
854 up_write(&maps
->lock
);
858 int map_groups__fixup_overlappings(struct map_groups
*mg
, struct map
*map
,
861 return maps__fixup_overlappings(&mg
->maps
, map
, fp
);
865 * XXX This should not really _copy_ te maps, but refcount them.
867 int map_groups__clone(struct thread
*thread
, struct map_groups
*parent
)
869 struct map_groups
*mg
= thread
->mg
;
872 struct maps
*maps
= &parent
->maps
;
874 down_read(&maps
->lock
);
876 for (map
= maps__first(maps
); map
; map
= map__next(map
)) {
877 struct map
*new = map__clone(map
);
881 err
= unwind__prepare_access(thread
, new, NULL
);
885 map_groups__insert(mg
, new);
891 up_read(&maps
->lock
);
895 static void __maps__insert(struct maps
*maps
, struct map
*map
)
897 struct rb_node
**p
= &maps
->entries
.rb_node
;
898 struct rb_node
*parent
= NULL
;
899 const u64 ip
= map
->start
;
904 m
= rb_entry(parent
, struct map
, rb_node
);
911 rb_link_node(&map
->rb_node
, parent
, p
);
912 rb_insert_color(&map
->rb_node
, &maps
->entries
);
916 static void __maps__insert_name(struct maps
*maps
, struct map
*map
)
918 struct rb_node
**p
= &maps
->names
.rb_node
;
919 struct rb_node
*parent
= NULL
;
925 m
= rb_entry(parent
, struct map
, rb_node_name
);
926 rc
= strcmp(m
->dso
->short_name
, map
->dso
->short_name
);
932 rb_link_node(&map
->rb_node_name
, parent
, p
);
933 rb_insert_color(&map
->rb_node_name
, &maps
->names
);
937 void maps__insert(struct maps
*maps
, struct map
*map
)
939 down_write(&maps
->lock
);
940 __maps__insert(maps
, map
);
941 __maps__insert_name(maps
, map
);
942 up_write(&maps
->lock
);
945 static void __maps__remove(struct maps
*maps
, struct map
*map
)
947 rb_erase_init(&map
->rb_node
, &maps
->entries
);
950 rb_erase_init(&map
->rb_node_name
, &maps
->names
);
954 void maps__remove(struct maps
*maps
, struct map
*map
)
956 down_write(&maps
->lock
);
957 __maps__remove(maps
, map
);
958 up_write(&maps
->lock
);
961 struct map
*maps__find(struct maps
*maps
, u64 ip
)
966 down_read(&maps
->lock
);
968 p
= maps
->entries
.rb_node
;
970 m
= rb_entry(p
, struct map
, rb_node
);
973 else if (ip
>= m
->end
)
981 up_read(&maps
->lock
);
985 struct map
*maps__first(struct maps
*maps
)
987 struct rb_node
*first
= rb_first(&maps
->entries
);
990 return rb_entry(first
, struct map
, rb_node
);
994 struct map
*map__next(struct map
*map
)
996 struct rb_node
*next
= rb_next(&map
->rb_node
);
999 return rb_entry(next
, struct map
, rb_node
);
1003 struct kmap
*__map__kmap(struct map
*map
)
1005 if (!map
->dso
|| !map
->dso
->kernel
)
1007 return (struct kmap
*)(map
+ 1);
1010 struct kmap
*map__kmap(struct map
*map
)
1012 struct kmap
*kmap
= __map__kmap(map
);
1015 pr_err("Internal error: map__kmap with a non-kernel map\n");
1019 struct map_groups
*map__kmaps(struct map
*map
)
1021 struct kmap
*kmap
= map__kmap(map
);
1023 if (!kmap
|| !kmap
->kmaps
) {
1024 pr_err("Internal error: map__kmaps with a non-kernel map\n");