13 const char *map_type__name
[MAP__NR_TYPES
] = {
14 [MAP__FUNCTION
] = "Functions",
15 [MAP__VARIABLE
] = "Variables",
18 static inline int is_anon_memory(const char *filename
)
20 return strcmp(filename
, "//anon") == 0;
23 static inline int is_no_dso_memory(const char *filename
)
25 return !strcmp(filename
, "[stack]") ||
26 !strcmp(filename
, "[vdso]") ||
27 !strcmp(filename
, "[heap]");
30 void map__init(struct map
*self
, enum map_type type
,
31 u64 start
, u64 end
, u64 pgoff
, struct dso
*dso
)
38 self
->map_ip
= map__map_ip
;
39 self
->unmap_ip
= map__unmap_ip
;
40 RB_CLEAR_NODE(&self
->rb_node
);
42 self
->referenced
= false;
43 self
->erange_warned
= false;
46 struct map
*map__new(struct list_head
*dsos__list
, u64 start
, u64 len
,
47 u64 pgoff
, u32 pid
, char *filename
,
50 struct map
*self
= malloc(sizeof(*self
));
53 char newfilename
[PATH_MAX
];
57 anon
= is_anon_memory(filename
);
58 no_dso
= is_no_dso_memory(filename
);
61 snprintf(newfilename
, sizeof(newfilename
), "/tmp/perf-%d.map", pid
);
62 filename
= newfilename
;
65 dso
= __dsos__findnew(dsos__list
, filename
);
69 map__init(self
, type
, start
, start
+ len
, pgoff
, dso
);
72 self
->map_ip
= self
->unmap_ip
= identity__map_ip
;
75 * Set memory without DSO as loaded. All map__find_*
76 * functions still return NULL, and we avoid the
77 * unnecessary map__load warning.
80 dso__set_loaded(dso
, self
->type
);
89 void map__delete(struct map
*self
)
94 void map__fixup_start(struct map
*self
)
96 struct rb_root
*symbols
= &self
->dso
->symbols
[self
->type
];
97 struct rb_node
*nd
= rb_first(symbols
);
99 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
100 self
->start
= sym
->start
;
104 void map__fixup_end(struct map
*self
)
106 struct rb_root
*symbols
= &self
->dso
->symbols
[self
->type
];
107 struct rb_node
*nd
= rb_last(symbols
);
109 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
110 self
->end
= sym
->end
;
114 #define DSO__DELETED "(deleted)"
116 int map__load(struct map
*self
, symbol_filter_t filter
)
118 const char *name
= self
->dso
->long_name
;
121 if (dso__loaded(self
->dso
, self
->type
))
124 nr
= dso__load(self
->dso
, self
, filter
);
126 if (self
->dso
->has_build_id
) {
127 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
129 build_id__sprintf(self
->dso
->build_id
,
130 sizeof(self
->dso
->build_id
),
132 pr_warning("%s with build id %s not found",
135 pr_warning("Failed to open %s", name
);
137 pr_warning(", continuing without symbols\n");
139 } else if (nr
== 0) {
140 const size_t len
= strlen(name
);
141 const size_t real_len
= len
- sizeof(DSO__DELETED
);
143 if (len
> sizeof(DSO__DELETED
) &&
144 strcmp(name
+ real_len
+ 1, DSO__DELETED
) == 0) {
145 pr_warning("%.*s was updated (is prelink enabled?). "
146 "Restart the long running apps that use it!\n",
147 (int)real_len
, name
);
149 pr_warning("no symbols found in %s, maybe install "
150 "a debug package?\n", name
);
156 * Only applies to the kernel, as its symtabs aren't relative like the
159 if (self
->dso
->kernel
)
160 map__reloc_vmlinux(self
);
165 struct symbol
*map__find_symbol(struct map
*self
, u64 addr
,
166 symbol_filter_t filter
)
168 if (map__load(self
, filter
) < 0)
171 return dso__find_symbol(self
->dso
, self
->type
, addr
);
174 struct symbol
*map__find_symbol_by_name(struct map
*self
, const char *name
,
175 symbol_filter_t filter
)
177 if (map__load(self
, filter
) < 0)
180 if (!dso__sorted_by_name(self
->dso
, self
->type
))
181 dso__sort_by_name(self
->dso
, self
->type
);
183 return dso__find_symbol_by_name(self
->dso
, self
->type
, name
);
186 struct map
*map__clone(struct map
*self
)
188 struct map
*map
= malloc(sizeof(*self
));
193 memcpy(map
, self
, sizeof(*self
));
198 int map__overlap(struct map
*l
, struct map
*r
)
200 if (l
->start
> r
->start
) {
206 if (l
->end
> r
->start
)
212 size_t map__fprintf(struct map
*self
, FILE *fp
)
214 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s\n",
215 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
218 size_t map__fprintf_dsoname(struct map
*map
, FILE *fp
)
222 if (map
&& map
->dso
&& (map
->dso
->name
|| map
->dso
->long_name
)) {
223 if (symbol_conf
.show_kernel_path
&& map
->dso
->long_name
)
224 dsoname
= map
->dso
->long_name
;
225 else if (map
->dso
->name
)
226 dsoname
= map
->dso
->name
;
228 dsoname
= "[unknown]";
230 return fprintf(fp
, "%s", dsoname
);
234 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
235 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
237 u64
map__rip_2objdump(struct map
*map
, u64 rip
)
239 u64 addr
= map
->dso
->adjust_symbols
?
240 map
->unmap_ip(map
, rip
) : /* RIP -> IP */
245 u64
map__objdump_2ip(struct map
*map
, u64 addr
)
247 u64 ip
= map
->dso
->adjust_symbols
?
249 map
->unmap_ip(map
, addr
); /* RIP -> IP */
253 void map_groups__init(struct map_groups
*mg
)
256 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
257 mg
->maps
[i
] = RB_ROOT
;
258 INIT_LIST_HEAD(&mg
->removed_maps
[i
]);
263 static void maps__delete(struct rb_root
*maps
)
265 struct rb_node
*next
= rb_first(maps
);
268 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
270 next
= rb_next(&pos
->rb_node
);
271 rb_erase(&pos
->rb_node
, maps
);
276 static void maps__delete_removed(struct list_head
*maps
)
280 list_for_each_entry_safe(pos
, n
, maps
, node
) {
281 list_del(&pos
->node
);
286 void map_groups__exit(struct map_groups
*mg
)
290 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
291 maps__delete(&mg
->maps
[i
]);
292 maps__delete_removed(&mg
->removed_maps
[i
]);
296 void map_groups__flush(struct map_groups
*mg
)
300 for (type
= 0; type
< MAP__NR_TYPES
; type
++) {
301 struct rb_root
*root
= &mg
->maps
[type
];
302 struct rb_node
*next
= rb_first(root
);
305 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
306 next
= rb_next(&pos
->rb_node
);
307 rb_erase(&pos
->rb_node
, root
);
309 * We may have references to this map, for
310 * instance in some hist_entry instances, so
311 * just move them to a separate list.
313 list_add_tail(&pos
->node
, &mg
->removed_maps
[pos
->type
]);
318 struct symbol
*map_groups__find_symbol(struct map_groups
*mg
,
319 enum map_type type
, u64 addr
,
321 symbol_filter_t filter
)
323 struct map
*map
= map_groups__find(mg
, type
, addr
);
328 return map__find_symbol(map
, map
->map_ip(map
, addr
), filter
);
334 struct symbol
*map_groups__find_symbol_by_name(struct map_groups
*mg
,
338 symbol_filter_t filter
)
342 for (nd
= rb_first(&mg
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
343 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
344 struct symbol
*sym
= map__find_symbol_by_name(pos
, name
, filter
);
356 size_t __map_groups__fprintf_maps(struct map_groups
*mg
,
357 enum map_type type
, int verbose
, FILE *fp
)
359 size_t printed
= fprintf(fp
, "%s:\n", map_type__name
[type
]);
362 for (nd
= rb_first(&mg
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
363 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
364 printed
+= fprintf(fp
, "Map:");
365 printed
+= map__fprintf(pos
, fp
);
367 printed
+= dso__fprintf(pos
->dso
, type
, fp
);
368 printed
+= fprintf(fp
, "--\n");
375 size_t map_groups__fprintf_maps(struct map_groups
*mg
, int verbose
, FILE *fp
)
377 size_t printed
= 0, i
;
378 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
379 printed
+= __map_groups__fprintf_maps(mg
, i
, verbose
, fp
);
383 static size_t __map_groups__fprintf_removed_maps(struct map_groups
*mg
,
385 int verbose
, FILE *fp
)
390 list_for_each_entry(pos
, &mg
->removed_maps
[type
], node
) {
391 printed
+= fprintf(fp
, "Map:");
392 printed
+= map__fprintf(pos
, fp
);
394 printed
+= dso__fprintf(pos
->dso
, type
, fp
);
395 printed
+= fprintf(fp
, "--\n");
401 static size_t map_groups__fprintf_removed_maps(struct map_groups
*mg
,
402 int verbose
, FILE *fp
)
404 size_t printed
= 0, i
;
405 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
406 printed
+= __map_groups__fprintf_removed_maps(mg
, i
, verbose
, fp
);
410 size_t map_groups__fprintf(struct map_groups
*mg
, int verbose
, FILE *fp
)
412 size_t printed
= map_groups__fprintf_maps(mg
, verbose
, fp
);
413 printed
+= fprintf(fp
, "Removed maps:\n");
414 return printed
+ map_groups__fprintf_removed_maps(mg
, verbose
, fp
);
417 int map_groups__fixup_overlappings(struct map_groups
*mg
, struct map
*map
,
418 int verbose
, FILE *fp
)
420 struct rb_root
*root
= &mg
->maps
[map
->type
];
421 struct rb_node
*next
= rb_first(root
);
425 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
426 next
= rb_next(&pos
->rb_node
);
428 if (!map__overlap(pos
, map
))
432 fputs("overlapping maps:\n", fp
);
433 map__fprintf(map
, fp
);
434 map__fprintf(pos
, fp
);
437 rb_erase(&pos
->rb_node
, root
);
439 * Now check if we need to create new maps for areas not
440 * overlapped by the new map:
442 if (map
->start
> pos
->start
) {
443 struct map
*before
= map__clone(pos
);
445 if (before
== NULL
) {
450 before
->end
= map
->start
- 1;
451 map_groups__insert(mg
, before
);
453 map__fprintf(before
, fp
);
456 if (map
->end
< pos
->end
) {
457 struct map
*after
= map__clone(pos
);
464 after
->start
= map
->end
+ 1;
465 map_groups__insert(mg
, after
);
467 map__fprintf(after
, fp
);
471 * If we have references, just move them to a separate list.
474 list_add_tail(&pos
->node
, &mg
->removed_maps
[map
->type
]);
486 * XXX This should not really _copy_ te maps, but refcount them.
488 int map_groups__clone(struct map_groups
*mg
,
489 struct map_groups
*parent
, enum map_type type
)
492 for (nd
= rb_first(&parent
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
493 struct map
*map
= rb_entry(nd
, struct map
, rb_node
);
494 struct map
*new = map__clone(map
);
497 map_groups__insert(mg
, new);
502 static u64
map__reloc_map_ip(struct map
*map
, u64 ip
)
504 return ip
+ (s64
)map
->pgoff
;
507 static u64
map__reloc_unmap_ip(struct map
*map
, u64 ip
)
509 return ip
- (s64
)map
->pgoff
;
512 void map__reloc_vmlinux(struct map
*self
)
514 struct kmap
*kmap
= map__kmap(self
);
517 if (!kmap
->ref_reloc_sym
|| !kmap
->ref_reloc_sym
->unrelocated_addr
)
520 reloc
= (kmap
->ref_reloc_sym
->unrelocated_addr
-
521 kmap
->ref_reloc_sym
->addr
);
526 self
->map_ip
= map__reloc_map_ip
;
527 self
->unmap_ip
= map__reloc_unmap_ip
;
531 void maps__insert(struct rb_root
*maps
, struct map
*map
)
533 struct rb_node
**p
= &maps
->rb_node
;
534 struct rb_node
*parent
= NULL
;
535 const u64 ip
= map
->start
;
540 m
= rb_entry(parent
, struct map
, rb_node
);
547 rb_link_node(&map
->rb_node
, parent
, p
);
548 rb_insert_color(&map
->rb_node
, maps
);
551 void maps__remove(struct rb_root
*self
, struct map
*map
)
553 rb_erase(&map
->rb_node
, self
);
556 struct map
*maps__find(struct rb_root
*maps
, u64 ip
)
558 struct rb_node
**p
= &maps
->rb_node
;
559 struct rb_node
*parent
= NULL
;
564 m
= rb_entry(parent
, struct map
, rb_node
);
567 else if (ip
> m
->end
)
576 int machine__init(struct machine
*self
, const char *root_dir
, pid_t pid
)
578 map_groups__init(&self
->kmaps
);
579 RB_CLEAR_NODE(&self
->rb_node
);
580 INIT_LIST_HEAD(&self
->user_dsos
);
581 INIT_LIST_HEAD(&self
->kernel_dsos
);
583 self
->threads
= RB_ROOT
;
584 INIT_LIST_HEAD(&self
->dead_threads
);
585 self
->last_match
= NULL
;
587 self
->kmaps
.machine
= self
;
589 self
->root_dir
= strdup(root_dir
);
590 if (self
->root_dir
== NULL
)
593 if (pid
!= HOST_KERNEL_ID
) {
594 struct thread
*thread
= machine__findnew_thread(self
, pid
);
600 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
601 thread__set_comm(thread
, comm
);
607 static void dsos__delete(struct list_head
*self
)
611 list_for_each_entry_safe(pos
, n
, self
, node
) {
612 list_del(&pos
->node
);
617 void machine__exit(struct machine
*self
)
619 map_groups__exit(&self
->kmaps
);
620 dsos__delete(&self
->user_dsos
);
621 dsos__delete(&self
->kernel_dsos
);
622 free(self
->root_dir
);
623 self
->root_dir
= NULL
;
626 void machine__delete(struct machine
*self
)
632 struct machine
*machines__add(struct rb_root
*self
, pid_t pid
,
633 const char *root_dir
)
635 struct rb_node
**p
= &self
->rb_node
;
636 struct rb_node
*parent
= NULL
;
637 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
642 if (machine__init(machine
, root_dir
, pid
) != 0) {
649 pos
= rb_entry(parent
, struct machine
, rb_node
);
656 rb_link_node(&machine
->rb_node
, parent
, p
);
657 rb_insert_color(&machine
->rb_node
, self
);
662 struct machine
*machines__find(struct rb_root
*self
, pid_t pid
)
664 struct rb_node
**p
= &self
->rb_node
;
665 struct rb_node
*parent
= NULL
;
666 struct machine
*machine
;
667 struct machine
*default_machine
= NULL
;
671 machine
= rb_entry(parent
, struct machine
, rb_node
);
672 if (pid
< machine
->pid
)
674 else if (pid
> machine
->pid
)
679 default_machine
= machine
;
682 return default_machine
;
685 struct machine
*machines__findnew(struct rb_root
*self
, pid_t pid
)
688 const char *root_dir
= "";
689 struct machine
*machine
= machines__find(self
, pid
);
691 if (machine
&& (machine
->pid
== pid
))
694 if ((pid
!= HOST_KERNEL_ID
) &&
695 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
696 (symbol_conf
.guestmount
)) {
697 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
698 if (access(path
, R_OK
)) {
699 static struct strlist
*seen
;
702 seen
= strlist__new(true, NULL
);
704 if (!strlist__has_entry(seen
, path
)) {
705 pr_err("Can't access file %s\n", path
);
706 strlist__add(seen
, path
);
714 machine
= machines__add(self
, pid
, root_dir
);
720 void machines__process(struct rb_root
*self
, machine__process_t process
, void *data
)
724 for (nd
= rb_first(self
); nd
; nd
= rb_next(nd
)) {
725 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
730 char *machine__mmap_name(struct machine
*self
, char *bf
, size_t size
)
732 if (machine__is_host(self
))
733 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
734 else if (machine__is_default_guest(self
))
735 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
737 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms", self
->pid
);
742 void machines__set_id_hdr_size(struct rb_root
*machines
, u16 id_hdr_size
)
744 struct rb_node
*node
;
745 struct machine
*machine
;
747 for (node
= rb_first(machines
); node
; node
= rb_next(node
)) {
748 machine
= rb_entry(node
, struct machine
, rb_node
);
749 machine
->id_hdr_size
= id_hdr_size
;