perf map: Fix overlapped map handling
[linux/fpc-iii.git] / tools / perf / util / map.c
blob97c0684588d9942a4ebf45fea09c5ae69549db5d
1 #include "symbol.h"
2 #include <assert.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <limits.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <unistd.h>
10 #include "map.h"
11 #include "thread.h"
12 #include "strlist.h"
13 #include "vdso.h"
14 #include "build-id.h"
15 #include "util.h"
16 #include "debug.h"
17 #include "machine.h"
18 #include <linux/string.h>
20 static void __maps__insert(struct maps *maps, struct map *map);
22 const char *map_type__name[MAP__NR_TYPES] = {
23 [MAP__FUNCTION] = "Functions",
24 [MAP__VARIABLE] = "Variables",
27 static inline int is_anon_memory(const char *filename)
29 return !strcmp(filename, "//anon") ||
30 !strcmp(filename, "/dev/zero (deleted)") ||
31 !strcmp(filename, "/anon_hugepage (deleted)");
34 static inline int is_no_dso_memory(const char *filename)
36 return !strncmp(filename, "[stack", 6) ||
37 !strncmp(filename, "/SYSV",5) ||
38 !strcmp(filename, "[heap]");
41 static inline int is_android_lib(const char *filename)
43 return !strncmp(filename, "/data/app-lib", 13) ||
44 !strncmp(filename, "/system/lib", 11);
47 static inline bool replace_android_lib(const char *filename, char *newfilename)
49 const char *libname;
50 char *app_abi;
51 size_t app_abi_length, new_length;
52 size_t lib_length = 0;
54 libname = strrchr(filename, '/');
55 if (libname)
56 lib_length = strlen(libname);
58 app_abi = getenv("APP_ABI");
59 if (!app_abi)
60 return false;
62 app_abi_length = strlen(app_abi);
64 if (!strncmp(filename, "/data/app-lib", 13)) {
65 char *apk_path;
67 if (!app_abi_length)
68 return false;
70 new_length = 7 + app_abi_length + lib_length;
72 apk_path = getenv("APK_PATH");
73 if (apk_path) {
74 new_length += strlen(apk_path) + 1;
75 if (new_length > PATH_MAX)
76 return false;
77 snprintf(newfilename, new_length,
78 "%s/libs/%s/%s", apk_path, app_abi, libname);
79 } else {
80 if (new_length > PATH_MAX)
81 return false;
82 snprintf(newfilename, new_length,
83 "libs/%s/%s", app_abi, libname);
85 return true;
88 if (!strncmp(filename, "/system/lib/", 11)) {
89 char *ndk, *app;
90 const char *arch;
91 size_t ndk_length;
92 size_t app_length;
94 ndk = getenv("NDK_ROOT");
95 app = getenv("APP_PLATFORM");
97 if (!(ndk && app))
98 return false;
100 ndk_length = strlen(ndk);
101 app_length = strlen(app);
103 if (!(ndk_length && app_length && app_abi_length))
104 return false;
106 arch = !strncmp(app_abi, "arm", 3) ? "arm" :
107 !strncmp(app_abi, "mips", 4) ? "mips" :
108 !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
110 if (!arch)
111 return false;
113 new_length = 27 + ndk_length +
114 app_length + lib_length
115 + strlen(arch);
117 if (new_length > PATH_MAX)
118 return false;
119 snprintf(newfilename, new_length,
120 "%s/platforms/%s/arch-%s/usr/lib/%s",
121 ndk, app, arch, libname);
123 return true;
125 return false;
128 void map__init(struct map *map, enum map_type type,
129 u64 start, u64 end, u64 pgoff, struct dso *dso)
131 map->type = type;
132 map->start = start;
133 map->end = end;
134 map->pgoff = pgoff;
135 map->reloc = 0;
136 map->dso = dso__get(dso);
137 map->map_ip = map__map_ip;
138 map->unmap_ip = map__unmap_ip;
139 RB_CLEAR_NODE(&map->rb_node);
140 map->groups = NULL;
141 map->erange_warned = false;
142 atomic_set(&map->refcnt, 1);
145 struct map *map__new(struct machine *machine, u64 start, u64 len,
146 u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
147 u64 ino_gen, u32 prot, u32 flags, char *filename,
148 enum map_type type, struct thread *thread)
150 struct map *map = malloc(sizeof(*map));
152 if (map != NULL) {
153 char newfilename[PATH_MAX];
154 struct dso *dso;
155 int anon, no_dso, vdso, android;
157 android = is_android_lib(filename);
158 anon = is_anon_memory(filename);
159 vdso = is_vdso_map(filename);
160 no_dso = is_no_dso_memory(filename);
162 map->maj = d_maj;
163 map->min = d_min;
164 map->ino = ino;
165 map->ino_generation = ino_gen;
166 map->prot = prot;
167 map->flags = flags;
169 if ((anon || no_dso) && type == MAP__FUNCTION) {
170 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
171 filename = newfilename;
174 if (android) {
175 if (replace_android_lib(filename, newfilename))
176 filename = newfilename;
179 if (vdso) {
180 pgoff = 0;
181 dso = machine__findnew_vdso(machine, thread);
182 } else
183 dso = machine__findnew_dso(machine, filename);
185 if (dso == NULL)
186 goto out_delete;
188 map__init(map, type, start, start + len, pgoff, dso);
190 if (anon || no_dso) {
191 map->map_ip = map->unmap_ip = identity__map_ip;
194 * Set memory without DSO as loaded. All map__find_*
195 * functions still return NULL, and we avoid the
196 * unnecessary map__load warning.
198 if (type != MAP__FUNCTION)
199 dso__set_loaded(dso, map->type);
201 dso__put(dso);
203 return map;
204 out_delete:
205 free(map);
206 return NULL;
210 * Constructor variant for modules (where we know from /proc/modules where
211 * they are loaded) and for vmlinux, where only after we load all the
212 * symbols we'll know where it starts and ends.
214 struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
216 struct map *map = calloc(1, (sizeof(*map) +
217 (dso->kernel ? sizeof(struct kmap) : 0)));
218 if (map != NULL) {
220 * ->end will be filled after we load all the symbols
222 map__init(map, type, start, 0, 0, dso);
225 return map;
229 * Use this and __map__is_kmodule() for map instances that are in
230 * machine->kmaps, and thus have map->groups->machine all properly set, to
231 * disambiguate between the kernel and modules.
233 * When the need arises, introduce map__is_{kernel,kmodule)() that
234 * checks (map->groups != NULL && map->groups->machine != NULL &&
235 * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
237 bool __map__is_kernel(const struct map *map)
239 return __machine__kernel_map(map->groups->machine, map->type) == map;
242 static void map__exit(struct map *map)
244 BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
245 dso__zput(map->dso);
248 void map__delete(struct map *map)
250 map__exit(map);
251 free(map);
254 void map__put(struct map *map)
256 if (map && atomic_dec_and_test(&map->refcnt))
257 map__delete(map);
260 void map__fixup_start(struct map *map)
262 struct rb_root *symbols = &map->dso->symbols[map->type];
263 struct rb_node *nd = rb_first(symbols);
264 if (nd != NULL) {
265 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
266 map->start = sym->start;
270 void map__fixup_end(struct map *map)
272 struct rb_root *symbols = &map->dso->symbols[map->type];
273 struct rb_node *nd = rb_last(symbols);
274 if (nd != NULL) {
275 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
276 map->end = sym->end;
280 #define DSO__DELETED "(deleted)"
282 int map__load(struct map *map, symbol_filter_t filter)
284 const char *name = map->dso->long_name;
285 int nr;
287 if (dso__loaded(map->dso, map->type))
288 return 0;
290 nr = dso__load(map->dso, map, filter);
291 if (nr < 0) {
292 if (map->dso->has_build_id) {
293 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
295 build_id__sprintf(map->dso->build_id,
296 sizeof(map->dso->build_id),
297 sbuild_id);
298 pr_warning("%s with build id %s not found",
299 name, sbuild_id);
300 } else
301 pr_warning("Failed to open %s", name);
303 pr_warning(", continuing without symbols\n");
304 return -1;
305 } else if (nr == 0) {
306 #ifdef HAVE_LIBELF_SUPPORT
307 const size_t len = strlen(name);
308 const size_t real_len = len - sizeof(DSO__DELETED);
310 if (len > sizeof(DSO__DELETED) &&
311 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
312 pr_warning("%.*s was updated (is prelink enabled?). "
313 "Restart the long running apps that use it!\n",
314 (int)real_len, name);
315 } else {
316 pr_warning("no symbols found in %s, maybe install "
317 "a debug package?\n", name);
319 #endif
320 return -1;
323 return 0;
326 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
328 return strcmp(namea, nameb);
331 struct symbol *map__find_symbol(struct map *map, u64 addr,
332 symbol_filter_t filter)
334 if (map__load(map, filter) < 0)
335 return NULL;
337 return dso__find_symbol(map->dso, map->type, addr);
340 struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
341 symbol_filter_t filter)
343 if (map__load(map, filter) < 0)
344 return NULL;
346 if (!dso__sorted_by_name(map->dso, map->type))
347 dso__sort_by_name(map->dso, map->type);
349 return dso__find_symbol_by_name(map->dso, map->type, name);
352 struct map *map__clone(struct map *from)
354 struct map *map = memdup(from, sizeof(*map));
356 if (map != NULL) {
357 atomic_set(&map->refcnt, 1);
358 RB_CLEAR_NODE(&map->rb_node);
359 dso__get(map->dso);
360 map->groups = NULL;
363 return map;
366 int map__overlap(struct map *l, struct map *r)
368 if (l->start > r->start) {
369 struct map *t = l;
370 l = r;
371 r = t;
374 if (l->end > r->start)
375 return 1;
377 return 0;
380 size_t map__fprintf(struct map *map, FILE *fp)
382 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
383 map->start, map->end, map->pgoff, map->dso->name);
386 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
388 const char *dsoname = "[unknown]";
390 if (map && map->dso && (map->dso->name || map->dso->long_name)) {
391 if (symbol_conf.show_kernel_path && map->dso->long_name)
392 dsoname = map->dso->long_name;
393 else if (map->dso->name)
394 dsoname = map->dso->name;
397 return fprintf(fp, "%s", dsoname);
400 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
401 FILE *fp)
403 char *srcline;
404 int ret = 0;
406 if (map && map->dso) {
407 srcline = get_srcline(map->dso,
408 map__rip_2objdump(map, addr), NULL, true);
409 if (srcline != SRCLINE_UNKNOWN)
410 ret = fprintf(fp, "%s%s", prefix, srcline);
411 free_srcline(srcline);
413 return ret;
417 * map__rip_2objdump - convert symbol start address to objdump address.
418 * @map: memory map
419 * @rip: symbol start address
421 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
422 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
423 * relative to section start.
425 * Return: Address suitable for passing to "objdump --start-address="
427 u64 map__rip_2objdump(struct map *map, u64 rip)
429 if (!map->dso->adjust_symbols)
430 return rip;
432 if (map->dso->rel)
433 return rip - map->pgoff;
435 return map->unmap_ip(map, rip) - map->reloc;
439 * map__objdump_2mem - convert objdump address to a memory address.
440 * @map: memory map
441 * @ip: objdump address
443 * Closely related to map__rip_2objdump(), this function takes an address from
444 * objdump and converts it to a memory address. Note this assumes that @map
445 * contains the address. To be sure the result is valid, check it forwards
446 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
448 * Return: Memory address.
450 u64 map__objdump_2mem(struct map *map, u64 ip)
452 if (!map->dso->adjust_symbols)
453 return map->unmap_ip(map, ip);
455 if (map->dso->rel)
456 return map->unmap_ip(map, ip + map->pgoff);
458 return ip + map->reloc;
461 static void maps__init(struct maps *maps)
463 maps->entries = RB_ROOT;
464 pthread_rwlock_init(&maps->lock, NULL);
467 void map_groups__init(struct map_groups *mg, struct machine *machine)
469 int i;
470 for (i = 0; i < MAP__NR_TYPES; ++i) {
471 maps__init(&mg->maps[i]);
473 mg->machine = machine;
474 atomic_set(&mg->refcnt, 1);
477 static void __maps__purge(struct maps *maps)
479 struct rb_root *root = &maps->entries;
480 struct rb_node *next = rb_first(root);
482 while (next) {
483 struct map *pos = rb_entry(next, struct map, rb_node);
485 next = rb_next(&pos->rb_node);
486 rb_erase_init(&pos->rb_node, root);
487 map__put(pos);
491 static void maps__exit(struct maps *maps)
493 pthread_rwlock_wrlock(&maps->lock);
494 __maps__purge(maps);
495 pthread_rwlock_unlock(&maps->lock);
498 void map_groups__exit(struct map_groups *mg)
500 int i;
502 for (i = 0; i < MAP__NR_TYPES; ++i)
503 maps__exit(&mg->maps[i]);
506 bool map_groups__empty(struct map_groups *mg)
508 int i;
510 for (i = 0; i < MAP__NR_TYPES; ++i) {
511 if (maps__first(&mg->maps[i]))
512 return false;
515 return true;
518 struct map_groups *map_groups__new(struct machine *machine)
520 struct map_groups *mg = malloc(sizeof(*mg));
522 if (mg != NULL)
523 map_groups__init(mg, machine);
525 return mg;
528 void map_groups__delete(struct map_groups *mg)
530 map_groups__exit(mg);
531 free(mg);
534 void map_groups__put(struct map_groups *mg)
536 if (mg && atomic_dec_and_test(&mg->refcnt))
537 map_groups__delete(mg);
540 struct symbol *map_groups__find_symbol(struct map_groups *mg,
541 enum map_type type, u64 addr,
542 struct map **mapp,
543 symbol_filter_t filter)
545 struct map *map = map_groups__find(mg, type, addr);
547 /* Ensure map is loaded before using map->map_ip */
548 if (map != NULL && map__load(map, filter) >= 0) {
549 if (mapp != NULL)
550 *mapp = map;
551 return map__find_symbol(map, map->map_ip(map, addr), filter);
554 return NULL;
557 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
558 struct map **mapp, symbol_filter_t filter)
560 struct symbol *sym;
561 struct rb_node *nd;
563 pthread_rwlock_rdlock(&maps->lock);
565 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
566 struct map *pos = rb_entry(nd, struct map, rb_node);
568 sym = map__find_symbol_by_name(pos, name, filter);
570 if (sym == NULL)
571 continue;
572 if (mapp != NULL)
573 *mapp = pos;
574 goto out;
577 sym = NULL;
578 out:
579 pthread_rwlock_unlock(&maps->lock);
580 return sym;
583 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
584 enum map_type type,
585 const char *name,
586 struct map **mapp,
587 symbol_filter_t filter)
589 struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
591 return sym;
594 int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
596 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
597 if (ams->map->groups == NULL)
598 return -1;
599 ams->map = map_groups__find(ams->map->groups, ams->map->type,
600 ams->addr);
601 if (ams->map == NULL)
602 return -1;
605 ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
606 ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
608 return ams->sym ? 0 : -1;
611 static size_t maps__fprintf(struct maps *maps, FILE *fp)
613 size_t printed = 0;
614 struct rb_node *nd;
616 pthread_rwlock_rdlock(&maps->lock);
618 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
619 struct map *pos = rb_entry(nd, struct map, rb_node);
620 printed += fprintf(fp, "Map:");
621 printed += map__fprintf(pos, fp);
622 if (verbose > 2) {
623 printed += dso__fprintf(pos->dso, pos->type, fp);
624 printed += fprintf(fp, "--\n");
628 pthread_rwlock_unlock(&maps->lock);
630 return printed;
633 size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
634 FILE *fp)
636 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
637 return printed += maps__fprintf(&mg->maps[type], fp);
640 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
642 size_t printed = 0, i;
643 for (i = 0; i < MAP__NR_TYPES; ++i)
644 printed += __map_groups__fprintf_maps(mg, i, fp);
645 return printed;
648 static void __map_groups__insert(struct map_groups *mg, struct map *map)
650 __maps__insert(&mg->maps[map->type], map);
651 map->groups = mg;
654 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
656 struct rb_root *root;
657 struct rb_node *next;
658 int err = 0;
660 pthread_rwlock_wrlock(&maps->lock);
662 root = &maps->entries;
663 next = rb_first(root);
665 while (next) {
666 struct map *pos = rb_entry(next, struct map, rb_node);
667 next = rb_next(&pos->rb_node);
669 if (!map__overlap(pos, map))
670 continue;
672 if (verbose >= 2) {
673 fputs("overlapping maps:\n", fp);
674 map__fprintf(map, fp);
675 map__fprintf(pos, fp);
678 rb_erase_init(&pos->rb_node, root);
680 * Now check if we need to create new maps for areas not
681 * overlapped by the new map:
683 if (map->start > pos->start) {
684 struct map *before = map__clone(pos);
686 if (before == NULL) {
687 err = -ENOMEM;
688 goto put_map;
691 before->end = map->start;
692 __map_groups__insert(pos->groups, before);
693 if (verbose >= 2)
694 map__fprintf(before, fp);
697 if (map->end < pos->end) {
698 struct map *after = map__clone(pos);
700 if (after == NULL) {
701 err = -ENOMEM;
702 goto put_map;
705 after->start = map->end;
706 after->pgoff += map->end - pos->start;
707 assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
708 __map_groups__insert(pos->groups, after);
709 if (verbose >= 2)
710 map__fprintf(after, fp);
712 put_map:
713 map__put(pos);
715 if (err)
716 goto out;
719 err = 0;
720 out:
721 pthread_rwlock_unlock(&maps->lock);
722 return err;
725 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
726 FILE *fp)
728 return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
732 * XXX This should not really _copy_ te maps, but refcount them.
734 int map_groups__clone(struct map_groups *mg,
735 struct map_groups *parent, enum map_type type)
737 int err = -ENOMEM;
738 struct map *map;
739 struct maps *maps = &parent->maps[type];
741 pthread_rwlock_rdlock(&maps->lock);
743 for (map = maps__first(maps); map; map = map__next(map)) {
744 struct map *new = map__clone(map);
745 if (new == NULL)
746 goto out_unlock;
747 map_groups__insert(mg, new);
750 err = 0;
751 out_unlock:
752 pthread_rwlock_unlock(&maps->lock);
753 return err;
756 static void __maps__insert(struct maps *maps, struct map *map)
758 struct rb_node **p = &maps->entries.rb_node;
759 struct rb_node *parent = NULL;
760 const u64 ip = map->start;
761 struct map *m;
763 while (*p != NULL) {
764 parent = *p;
765 m = rb_entry(parent, struct map, rb_node);
766 if (ip < m->start)
767 p = &(*p)->rb_left;
768 else
769 p = &(*p)->rb_right;
772 rb_link_node(&map->rb_node, parent, p);
773 rb_insert_color(&map->rb_node, &maps->entries);
774 map__get(map);
777 void maps__insert(struct maps *maps, struct map *map)
779 pthread_rwlock_wrlock(&maps->lock);
780 __maps__insert(maps, map);
781 pthread_rwlock_unlock(&maps->lock);
784 static void __maps__remove(struct maps *maps, struct map *map)
786 rb_erase_init(&map->rb_node, &maps->entries);
787 map__put(map);
790 void maps__remove(struct maps *maps, struct map *map)
792 pthread_rwlock_wrlock(&maps->lock);
793 __maps__remove(maps, map);
794 pthread_rwlock_unlock(&maps->lock);
797 struct map *maps__find(struct maps *maps, u64 ip)
799 struct rb_node **p, *parent = NULL;
800 struct map *m;
802 pthread_rwlock_rdlock(&maps->lock);
804 p = &maps->entries.rb_node;
805 while (*p != NULL) {
806 parent = *p;
807 m = rb_entry(parent, struct map, rb_node);
808 if (ip < m->start)
809 p = &(*p)->rb_left;
810 else if (ip >= m->end)
811 p = &(*p)->rb_right;
812 else
813 goto out;
816 m = NULL;
817 out:
818 pthread_rwlock_unlock(&maps->lock);
819 return m;
822 struct map *maps__first(struct maps *maps)
824 struct rb_node *first = rb_first(&maps->entries);
826 if (first)
827 return rb_entry(first, struct map, rb_node);
828 return NULL;
831 struct map *map__next(struct map *map)
833 struct rb_node *next = rb_next(&map->rb_node);
835 if (next)
836 return rb_entry(next, struct map, rb_node);
837 return NULL;
840 struct kmap *map__kmap(struct map *map)
842 if (!map->dso || !map->dso->kernel) {
843 pr_err("Internal error: map__kmap with a non-kernel map\n");
844 return NULL;
846 return (struct kmap *)(map + 1);
849 struct map_groups *map__kmaps(struct map *map)
851 struct kmap *kmap = map__kmap(map);
853 if (!kmap || !kmap->kmaps) {
854 pr_err("Internal error: map__kmaps with a non-kernel map\n");
855 return NULL;
857 return kmap->kmaps;