11 const char *map_type__name
[MAP__NR_TYPES
] = {
12 [MAP__FUNCTION
] = "Functions",
13 [MAP__VARIABLE
] = "Variables",
16 static inline int is_anon_memory(const char *filename
)
18 return strcmp(filename
, "//anon") == 0;
21 static inline int is_no_dso_memory(const char *filename
)
23 return !strcmp(filename
, "[stack]") ||
24 !strcmp(filename
, "[vdso]") ||
25 !strcmp(filename
, "[heap]");
28 void map__init(struct map
*self
, enum map_type type
,
29 u64 start
, u64 end
, u64 pgoff
, struct dso
*dso
)
36 self
->map_ip
= map__map_ip
;
37 self
->unmap_ip
= map__unmap_ip
;
38 RB_CLEAR_NODE(&self
->rb_node
);
40 self
->referenced
= false;
43 struct map
*map__new(struct list_head
*dsos__list
, u64 start
, u64 len
,
44 u64 pgoff
, u32 pid
, char *filename
,
47 struct map
*self
= malloc(sizeof(*self
));
50 char newfilename
[PATH_MAX
];
54 anon
= is_anon_memory(filename
);
55 no_dso
= is_no_dso_memory(filename
);
58 snprintf(newfilename
, sizeof(newfilename
), "/tmp/perf-%d.map", pid
);
59 filename
= newfilename
;
62 dso
= __dsos__findnew(dsos__list
, filename
);
66 map__init(self
, type
, start
, start
+ len
, pgoff
, dso
);
69 self
->map_ip
= self
->unmap_ip
= identity__map_ip
;
72 * Set memory without DSO as loaded. All map__find_*
73 * functions still return NULL, and we avoid the
74 * unnecessary map__load warning.
77 dso__set_loaded(dso
, self
->type
);
86 void map__delete(struct map
*self
)
91 void map__fixup_start(struct map
*self
)
93 struct rb_root
*symbols
= &self
->dso
->symbols
[self
->type
];
94 struct rb_node
*nd
= rb_first(symbols
);
96 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
97 self
->start
= sym
->start
;
101 void map__fixup_end(struct map
*self
)
103 struct rb_root
*symbols
= &self
->dso
->symbols
[self
->type
];
104 struct rb_node
*nd
= rb_last(symbols
);
106 struct symbol
*sym
= rb_entry(nd
, struct symbol
, rb_node
);
107 self
->end
= sym
->end
;
111 #define DSO__DELETED "(deleted)"
113 int map__load(struct map
*self
, symbol_filter_t filter
)
115 const char *name
= self
->dso
->long_name
;
118 if (dso__loaded(self
->dso
, self
->type
))
121 nr
= dso__load(self
->dso
, self
, filter
);
123 if (self
->dso
->has_build_id
) {
124 char sbuild_id
[BUILD_ID_SIZE
* 2 + 1];
126 build_id__sprintf(self
->dso
->build_id
,
127 sizeof(self
->dso
->build_id
),
129 pr_warning("%s with build id %s not found",
132 pr_warning("Failed to open %s", name
);
134 pr_warning(", continuing without symbols\n");
136 } else if (nr
== 0) {
137 const size_t len
= strlen(name
);
138 const size_t real_len
= len
- sizeof(DSO__DELETED
);
140 if (len
> sizeof(DSO__DELETED
) &&
141 strcmp(name
+ real_len
+ 1, DSO__DELETED
) == 0) {
142 pr_warning("%.*s was updated, restart the long "
143 "running apps that use it!\n",
144 (int)real_len
, name
);
146 pr_warning("no symbols found in %s, maybe install "
147 "a debug package?\n", name
);
153 * Only applies to the kernel, as its symtabs aren't relative like the
156 if (self
->dso
->kernel
)
157 map__reloc_vmlinux(self
);
162 struct symbol
*map__find_symbol(struct map
*self
, u64 addr
,
163 symbol_filter_t filter
)
165 if (map__load(self
, filter
) < 0)
168 return dso__find_symbol(self
->dso
, self
->type
, addr
);
171 struct symbol
*map__find_symbol_by_name(struct map
*self
, const char *name
,
172 symbol_filter_t filter
)
174 if (map__load(self
, filter
) < 0)
177 if (!dso__sorted_by_name(self
->dso
, self
->type
))
178 dso__sort_by_name(self
->dso
, self
->type
);
180 return dso__find_symbol_by_name(self
->dso
, self
->type
, name
);
183 struct map
*map__clone(struct map
*self
)
185 struct map
*map
= malloc(sizeof(*self
));
190 memcpy(map
, self
, sizeof(*self
));
195 int map__overlap(struct map
*l
, struct map
*r
)
197 if (l
->start
> r
->start
) {
203 if (l
->end
> r
->start
)
209 size_t map__fprintf(struct map
*self
, FILE *fp
)
211 return fprintf(fp
, " %" PRIx64
"-%" PRIx64
" %" PRIx64
" %s\n",
212 self
->start
, self
->end
, self
->pgoff
, self
->dso
->name
);
216 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
217 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
219 u64
map__rip_2objdump(struct map
*map
, u64 rip
)
221 u64 addr
= map
->dso
->adjust_symbols
?
222 map
->unmap_ip(map
, rip
) : /* RIP -> IP */
227 u64
map__objdump_2ip(struct map
*map
, u64 addr
)
229 u64 ip
= map
->dso
->adjust_symbols
?
231 map
->unmap_ip(map
, addr
); /* RIP -> IP */
235 void map_groups__init(struct map_groups
*mg
)
238 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
239 mg
->maps
[i
] = RB_ROOT
;
240 INIT_LIST_HEAD(&mg
->removed_maps
[i
]);
245 static void maps__delete(struct rb_root
*maps
)
247 struct rb_node
*next
= rb_first(maps
);
250 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
252 next
= rb_next(&pos
->rb_node
);
253 rb_erase(&pos
->rb_node
, maps
);
258 static void maps__delete_removed(struct list_head
*maps
)
262 list_for_each_entry_safe(pos
, n
, maps
, node
) {
263 list_del(&pos
->node
);
268 void map_groups__exit(struct map_groups
*mg
)
272 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
273 maps__delete(&mg
->maps
[i
]);
274 maps__delete_removed(&mg
->removed_maps
[i
]);
278 void map_groups__flush(struct map_groups
*mg
)
282 for (type
= 0; type
< MAP__NR_TYPES
; type
++) {
283 struct rb_root
*root
= &mg
->maps
[type
];
284 struct rb_node
*next
= rb_first(root
);
287 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
288 next
= rb_next(&pos
->rb_node
);
289 rb_erase(&pos
->rb_node
, root
);
291 * We may have references to this map, for
292 * instance in some hist_entry instances, so
293 * just move them to a separate list.
295 list_add_tail(&pos
->node
, &mg
->removed_maps
[pos
->type
]);
300 struct symbol
*map_groups__find_symbol(struct map_groups
*mg
,
301 enum map_type type
, u64 addr
,
303 symbol_filter_t filter
)
305 struct map
*map
= map_groups__find(mg
, type
, addr
);
310 return map__find_symbol(map
, map
->map_ip(map
, addr
), filter
);
316 struct symbol
*map_groups__find_symbol_by_name(struct map_groups
*mg
,
320 symbol_filter_t filter
)
324 for (nd
= rb_first(&mg
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
325 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
326 struct symbol
*sym
= map__find_symbol_by_name(pos
, name
, filter
);
338 size_t __map_groups__fprintf_maps(struct map_groups
*mg
,
339 enum map_type type
, int verbose
, FILE *fp
)
341 size_t printed
= fprintf(fp
, "%s:\n", map_type__name
[type
]);
344 for (nd
= rb_first(&mg
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
345 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
346 printed
+= fprintf(fp
, "Map:");
347 printed
+= map__fprintf(pos
, fp
);
349 printed
+= dso__fprintf(pos
->dso
, type
, fp
);
350 printed
+= fprintf(fp
, "--\n");
357 size_t map_groups__fprintf_maps(struct map_groups
*mg
, int verbose
, FILE *fp
)
359 size_t printed
= 0, i
;
360 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
361 printed
+= __map_groups__fprintf_maps(mg
, i
, verbose
, fp
);
365 static size_t __map_groups__fprintf_removed_maps(struct map_groups
*mg
,
367 int verbose
, FILE *fp
)
372 list_for_each_entry(pos
, &mg
->removed_maps
[type
], node
) {
373 printed
+= fprintf(fp
, "Map:");
374 printed
+= map__fprintf(pos
, fp
);
376 printed
+= dso__fprintf(pos
->dso
, type
, fp
);
377 printed
+= fprintf(fp
, "--\n");
383 static size_t map_groups__fprintf_removed_maps(struct map_groups
*mg
,
384 int verbose
, FILE *fp
)
386 size_t printed
= 0, i
;
387 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
388 printed
+= __map_groups__fprintf_removed_maps(mg
, i
, verbose
, fp
);
392 size_t map_groups__fprintf(struct map_groups
*mg
, int verbose
, FILE *fp
)
394 size_t printed
= map_groups__fprintf_maps(mg
, verbose
, fp
);
395 printed
+= fprintf(fp
, "Removed maps:\n");
396 return printed
+ map_groups__fprintf_removed_maps(mg
, verbose
, fp
);
399 int map_groups__fixup_overlappings(struct map_groups
*mg
, struct map
*map
,
400 int verbose
, FILE *fp
)
402 struct rb_root
*root
= &mg
->maps
[map
->type
];
403 struct rb_node
*next
= rb_first(root
);
407 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
408 next
= rb_next(&pos
->rb_node
);
410 if (!map__overlap(pos
, map
))
414 fputs("overlapping maps:\n", fp
);
415 map__fprintf(map
, fp
);
416 map__fprintf(pos
, fp
);
419 rb_erase(&pos
->rb_node
, root
);
421 * Now check if we need to create new maps for areas not
422 * overlapped by the new map:
424 if (map
->start
> pos
->start
) {
425 struct map
*before
= map__clone(pos
);
427 if (before
== NULL
) {
432 before
->end
= map
->start
- 1;
433 map_groups__insert(mg
, before
);
435 map__fprintf(before
, fp
);
438 if (map
->end
< pos
->end
) {
439 struct map
*after
= map__clone(pos
);
446 after
->start
= map
->end
+ 1;
447 map_groups__insert(mg
, after
);
449 map__fprintf(after
, fp
);
453 * If we have references, just move them to a separate list.
456 list_add_tail(&pos
->node
, &mg
->removed_maps
[map
->type
]);
468 * XXX This should not really _copy_ te maps, but refcount them.
470 int map_groups__clone(struct map_groups
*mg
,
471 struct map_groups
*parent
, enum map_type type
)
474 for (nd
= rb_first(&parent
->maps
[type
]); nd
; nd
= rb_next(nd
)) {
475 struct map
*map
= rb_entry(nd
, struct map
, rb_node
);
476 struct map
*new = map__clone(map
);
479 map_groups__insert(mg
, new);
484 static u64
map__reloc_map_ip(struct map
*map
, u64 ip
)
486 return ip
+ (s64
)map
->pgoff
;
489 static u64
map__reloc_unmap_ip(struct map
*map
, u64 ip
)
491 return ip
- (s64
)map
->pgoff
;
494 void map__reloc_vmlinux(struct map
*self
)
496 struct kmap
*kmap
= map__kmap(self
);
499 if (!kmap
->ref_reloc_sym
|| !kmap
->ref_reloc_sym
->unrelocated_addr
)
502 reloc
= (kmap
->ref_reloc_sym
->unrelocated_addr
-
503 kmap
->ref_reloc_sym
->addr
);
508 self
->map_ip
= map__reloc_map_ip
;
509 self
->unmap_ip
= map__reloc_unmap_ip
;
513 void maps__insert(struct rb_root
*maps
, struct map
*map
)
515 struct rb_node
**p
= &maps
->rb_node
;
516 struct rb_node
*parent
= NULL
;
517 const u64 ip
= map
->start
;
522 m
= rb_entry(parent
, struct map
, rb_node
);
529 rb_link_node(&map
->rb_node
, parent
, p
);
530 rb_insert_color(&map
->rb_node
, maps
);
533 void maps__remove(struct rb_root
*self
, struct map
*map
)
535 rb_erase(&map
->rb_node
, self
);
538 struct map
*maps__find(struct rb_root
*maps
, u64 ip
)
540 struct rb_node
**p
= &maps
->rb_node
;
541 struct rb_node
*parent
= NULL
;
546 m
= rb_entry(parent
, struct map
, rb_node
);
549 else if (ip
> m
->end
)
558 int machine__init(struct machine
*self
, const char *root_dir
, pid_t pid
)
560 map_groups__init(&self
->kmaps
);
561 RB_CLEAR_NODE(&self
->rb_node
);
562 INIT_LIST_HEAD(&self
->user_dsos
);
563 INIT_LIST_HEAD(&self
->kernel_dsos
);
565 self
->kmaps
.machine
= self
;
567 self
->root_dir
= strdup(root_dir
);
568 return self
->root_dir
== NULL
? -ENOMEM
: 0;
571 static void dsos__delete(struct list_head
*self
)
575 list_for_each_entry_safe(pos
, n
, self
, node
) {
576 list_del(&pos
->node
);
581 void machine__exit(struct machine
*self
)
583 map_groups__exit(&self
->kmaps
);
584 dsos__delete(&self
->user_dsos
);
585 dsos__delete(&self
->kernel_dsos
);
586 free(self
->root_dir
);
587 self
->root_dir
= NULL
;
590 void machine__delete(struct machine
*self
)
596 struct machine
*machines__add(struct rb_root
*self
, pid_t pid
,
597 const char *root_dir
)
599 struct rb_node
**p
= &self
->rb_node
;
600 struct rb_node
*parent
= NULL
;
601 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
606 if (machine__init(machine
, root_dir
, pid
) != 0) {
613 pos
= rb_entry(parent
, struct machine
, rb_node
);
620 rb_link_node(&machine
->rb_node
, parent
, p
);
621 rb_insert_color(&machine
->rb_node
, self
);
626 struct machine
*machines__find(struct rb_root
*self
, pid_t pid
)
628 struct rb_node
**p
= &self
->rb_node
;
629 struct rb_node
*parent
= NULL
;
630 struct machine
*machine
;
631 struct machine
*default_machine
= NULL
;
635 machine
= rb_entry(parent
, struct machine
, rb_node
);
636 if (pid
< machine
->pid
)
638 else if (pid
> machine
->pid
)
643 default_machine
= machine
;
646 return default_machine
;
649 struct machine
*machines__findnew(struct rb_root
*self
, pid_t pid
)
652 const char *root_dir
;
653 struct machine
*machine
= machines__find(self
, pid
);
655 if (!machine
|| machine
->pid
!= pid
) {
656 if (pid
== HOST_KERNEL_ID
|| pid
== DEFAULT_GUEST_KERNEL_ID
)
659 if (!symbol_conf
.guestmount
)
661 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
662 if (access(path
, R_OK
)) {
663 pr_err("Can't access file %s\n", path
);
668 machine
= machines__add(self
, pid
, root_dir
);
675 void machines__process(struct rb_root
*self
, machine__process_t process
, void *data
)
679 for (nd
= rb_first(self
); nd
; nd
= rb_next(nd
)) {
680 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
685 char *machine__mmap_name(struct machine
*self
, char *bf
, size_t size
)
687 if (machine__is_host(self
))
688 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
689 else if (machine__is_default_guest(self
))
690 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
692 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms", self
->pid
);