spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / tools / perf / util / map.c
blob316aa0ab71224142ce9594000b0ade67f0e5fd1f
1 #include "symbol.h"
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <unistd.h>
9 #include "map.h"
11 const char *map_type__name[MAP__NR_TYPES] = {
12 [MAP__FUNCTION] = "Functions",
13 [MAP__VARIABLE] = "Variables",
16 static inline int is_anon_memory(const char *filename)
18 return strcmp(filename, "//anon") == 0;
21 static inline int is_no_dso_memory(const char *filename)
23 return !strcmp(filename, "[stack]") ||
24 !strcmp(filename, "[vdso]") ||
25 !strcmp(filename, "[heap]");
28 void map__init(struct map *self, enum map_type type,
29 u64 start, u64 end, u64 pgoff, struct dso *dso)
31 self->type = type;
32 self->start = start;
33 self->end = end;
34 self->pgoff = pgoff;
35 self->dso = dso;
36 self->map_ip = map__map_ip;
37 self->unmap_ip = map__unmap_ip;
38 RB_CLEAR_NODE(&self->rb_node);
39 self->groups = NULL;
40 self->referenced = false;
43 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
44 u64 pgoff, u32 pid, char *filename,
45 enum map_type type)
47 struct map *self = malloc(sizeof(*self));
49 if (self != NULL) {
50 char newfilename[PATH_MAX];
51 struct dso *dso;
52 int anon, no_dso;
54 anon = is_anon_memory(filename);
55 no_dso = is_no_dso_memory(filename);
57 if (anon) {
58 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
59 filename = newfilename;
62 dso = __dsos__findnew(dsos__list, filename);
63 if (dso == NULL)
64 goto out_delete;
66 map__init(self, type, start, start + len, pgoff, dso);
68 if (anon || no_dso) {
69 self->map_ip = self->unmap_ip = identity__map_ip;
72 * Set memory without DSO as loaded. All map__find_*
73 * functions still return NULL, and we avoid the
74 * unnecessary map__load warning.
76 if (no_dso)
77 dso__set_loaded(dso, self->type);
80 return self;
81 out_delete:
82 free(self);
83 return NULL;
86 void map__delete(struct map *self)
88 free(self);
91 void map__fixup_start(struct map *self)
93 struct rb_root *symbols = &self->dso->symbols[self->type];
94 struct rb_node *nd = rb_first(symbols);
95 if (nd != NULL) {
96 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
97 self->start = sym->start;
101 void map__fixup_end(struct map *self)
103 struct rb_root *symbols = &self->dso->symbols[self->type];
104 struct rb_node *nd = rb_last(symbols);
105 if (nd != NULL) {
106 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
107 self->end = sym->end;
111 #define DSO__DELETED "(deleted)"
113 int map__load(struct map *self, symbol_filter_t filter)
115 const char *name = self->dso->long_name;
116 int nr;
118 if (dso__loaded(self->dso, self->type))
119 return 0;
121 nr = dso__load(self->dso, self, filter);
122 if (nr < 0) {
123 if (self->dso->has_build_id) {
124 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
126 build_id__sprintf(self->dso->build_id,
127 sizeof(self->dso->build_id),
128 sbuild_id);
129 pr_warning("%s with build id %s not found",
130 name, sbuild_id);
131 } else
132 pr_warning("Failed to open %s", name);
134 pr_warning(", continuing without symbols\n");
135 return -1;
136 } else if (nr == 0) {
137 const size_t len = strlen(name);
138 const size_t real_len = len - sizeof(DSO__DELETED);
140 if (len > sizeof(DSO__DELETED) &&
141 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
142 pr_warning("%.*s was updated (is prelink enabled?). "
143 "Restart the long running apps that use it!\n",
144 (int)real_len, name);
145 } else {
146 pr_warning("no symbols found in %s, maybe install "
147 "a debug package?\n", name);
150 return -1;
153 * Only applies to the kernel, as its symtabs aren't relative like the
154 * module ones.
156 if (self->dso->kernel)
157 map__reloc_vmlinux(self);
159 return 0;
162 struct symbol *map__find_symbol(struct map *self, u64 addr,
163 symbol_filter_t filter)
165 if (map__load(self, filter) < 0)
166 return NULL;
168 return dso__find_symbol(self->dso, self->type, addr);
171 struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
172 symbol_filter_t filter)
174 if (map__load(self, filter) < 0)
175 return NULL;
177 if (!dso__sorted_by_name(self->dso, self->type))
178 dso__sort_by_name(self->dso, self->type);
180 return dso__find_symbol_by_name(self->dso, self->type, name);
183 struct map *map__clone(struct map *self)
185 struct map *map = malloc(sizeof(*self));
187 if (!map)
188 return NULL;
190 memcpy(map, self, sizeof(*self));
192 return map;
195 int map__overlap(struct map *l, struct map *r)
197 if (l->start > r->start) {
198 struct map *t = l;
199 l = r;
200 r = t;
203 if (l->end > r->start)
204 return 1;
206 return 0;
209 size_t map__fprintf(struct map *self, FILE *fp)
211 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
212 self->start, self->end, self->pgoff, self->dso->name);
216 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
217 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
219 u64 map__rip_2objdump(struct map *map, u64 rip)
221 u64 addr = map->dso->adjust_symbols ?
222 map->unmap_ip(map, rip) : /* RIP -> IP */
223 rip;
224 return addr;
227 u64 map__objdump_2ip(struct map *map, u64 addr)
229 u64 ip = map->dso->adjust_symbols ?
230 addr :
231 map->unmap_ip(map, addr); /* RIP -> IP */
232 return ip;
235 void map_groups__init(struct map_groups *mg)
237 int i;
238 for (i = 0; i < MAP__NR_TYPES; ++i) {
239 mg->maps[i] = RB_ROOT;
240 INIT_LIST_HEAD(&mg->removed_maps[i]);
242 mg->machine = NULL;
245 static void maps__delete(struct rb_root *maps)
247 struct rb_node *next = rb_first(maps);
249 while (next) {
250 struct map *pos = rb_entry(next, struct map, rb_node);
252 next = rb_next(&pos->rb_node);
253 rb_erase(&pos->rb_node, maps);
254 map__delete(pos);
258 static void maps__delete_removed(struct list_head *maps)
260 struct map *pos, *n;
262 list_for_each_entry_safe(pos, n, maps, node) {
263 list_del(&pos->node);
264 map__delete(pos);
268 void map_groups__exit(struct map_groups *mg)
270 int i;
272 for (i = 0; i < MAP__NR_TYPES; ++i) {
273 maps__delete(&mg->maps[i]);
274 maps__delete_removed(&mg->removed_maps[i]);
278 void map_groups__flush(struct map_groups *mg)
280 int type;
282 for (type = 0; type < MAP__NR_TYPES; type++) {
283 struct rb_root *root = &mg->maps[type];
284 struct rb_node *next = rb_first(root);
286 while (next) {
287 struct map *pos = rb_entry(next, struct map, rb_node);
288 next = rb_next(&pos->rb_node);
289 rb_erase(&pos->rb_node, root);
291 * We may have references to this map, for
292 * instance in some hist_entry instances, so
293 * just move them to a separate list.
295 list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
300 struct symbol *map_groups__find_symbol(struct map_groups *mg,
301 enum map_type type, u64 addr,
302 struct map **mapp,
303 symbol_filter_t filter)
305 struct map *map = map_groups__find(mg, type, addr);
307 if (map != NULL) {
308 if (mapp != NULL)
309 *mapp = map;
310 return map__find_symbol(map, map->map_ip(map, addr), filter);
313 return NULL;
316 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
317 enum map_type type,
318 const char *name,
319 struct map **mapp,
320 symbol_filter_t filter)
322 struct rb_node *nd;
324 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
325 struct map *pos = rb_entry(nd, struct map, rb_node);
326 struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
328 if (sym == NULL)
329 continue;
330 if (mapp != NULL)
331 *mapp = pos;
332 return sym;
335 return NULL;
338 size_t __map_groups__fprintf_maps(struct map_groups *mg,
339 enum map_type type, int verbose, FILE *fp)
341 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
342 struct rb_node *nd;
344 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
345 struct map *pos = rb_entry(nd, struct map, rb_node);
346 printed += fprintf(fp, "Map:");
347 printed += map__fprintf(pos, fp);
348 if (verbose > 2) {
349 printed += dso__fprintf(pos->dso, type, fp);
350 printed += fprintf(fp, "--\n");
354 return printed;
357 size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
359 size_t printed = 0, i;
360 for (i = 0; i < MAP__NR_TYPES; ++i)
361 printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
362 return printed;
365 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
366 enum map_type type,
367 int verbose, FILE *fp)
369 struct map *pos;
370 size_t printed = 0;
372 list_for_each_entry(pos, &mg->removed_maps[type], node) {
373 printed += fprintf(fp, "Map:");
374 printed += map__fprintf(pos, fp);
375 if (verbose > 1) {
376 printed += dso__fprintf(pos->dso, type, fp);
377 printed += fprintf(fp, "--\n");
380 return printed;
383 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
384 int verbose, FILE *fp)
386 size_t printed = 0, i;
387 for (i = 0; i < MAP__NR_TYPES; ++i)
388 printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
389 return printed;
392 size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
394 size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
395 printed += fprintf(fp, "Removed maps:\n");
396 return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
399 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
400 int verbose, FILE *fp)
402 struct rb_root *root = &mg->maps[map->type];
403 struct rb_node *next = rb_first(root);
404 int err = 0;
406 while (next) {
407 struct map *pos = rb_entry(next, struct map, rb_node);
408 next = rb_next(&pos->rb_node);
410 if (!map__overlap(pos, map))
411 continue;
413 if (verbose >= 2) {
414 fputs("overlapping maps:\n", fp);
415 map__fprintf(map, fp);
416 map__fprintf(pos, fp);
419 rb_erase(&pos->rb_node, root);
421 * Now check if we need to create new maps for areas not
422 * overlapped by the new map:
424 if (map->start > pos->start) {
425 struct map *before = map__clone(pos);
427 if (before == NULL) {
428 err = -ENOMEM;
429 goto move_map;
432 before->end = map->start - 1;
433 map_groups__insert(mg, before);
434 if (verbose >= 2)
435 map__fprintf(before, fp);
438 if (map->end < pos->end) {
439 struct map *after = map__clone(pos);
441 if (after == NULL) {
442 err = -ENOMEM;
443 goto move_map;
446 after->start = map->end + 1;
447 map_groups__insert(mg, after);
448 if (verbose >= 2)
449 map__fprintf(after, fp);
451 move_map:
453 * If we have references, just move them to a separate list.
455 if (pos->referenced)
456 list_add_tail(&pos->node, &mg->removed_maps[map->type]);
457 else
458 map__delete(pos);
460 if (err)
461 return err;
464 return 0;
468 * XXX This should not really _copy_ te maps, but refcount them.
470 int map_groups__clone(struct map_groups *mg,
471 struct map_groups *parent, enum map_type type)
473 struct rb_node *nd;
474 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
475 struct map *map = rb_entry(nd, struct map, rb_node);
476 struct map *new = map__clone(map);
477 if (new == NULL)
478 return -ENOMEM;
479 map_groups__insert(mg, new);
481 return 0;
484 static u64 map__reloc_map_ip(struct map *map, u64 ip)
486 return ip + (s64)map->pgoff;
489 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
491 return ip - (s64)map->pgoff;
494 void map__reloc_vmlinux(struct map *self)
496 struct kmap *kmap = map__kmap(self);
497 s64 reloc;
499 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
500 return;
502 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
503 kmap->ref_reloc_sym->addr);
505 if (!reloc)
506 return;
508 self->map_ip = map__reloc_map_ip;
509 self->unmap_ip = map__reloc_unmap_ip;
510 self->pgoff = reloc;
513 void maps__insert(struct rb_root *maps, struct map *map)
515 struct rb_node **p = &maps->rb_node;
516 struct rb_node *parent = NULL;
517 const u64 ip = map->start;
518 struct map *m;
520 while (*p != NULL) {
521 parent = *p;
522 m = rb_entry(parent, struct map, rb_node);
523 if (ip < m->start)
524 p = &(*p)->rb_left;
525 else
526 p = &(*p)->rb_right;
529 rb_link_node(&map->rb_node, parent, p);
530 rb_insert_color(&map->rb_node, maps);
533 void maps__remove(struct rb_root *self, struct map *map)
535 rb_erase(&map->rb_node, self);
538 struct map *maps__find(struct rb_root *maps, u64 ip)
540 struct rb_node **p = &maps->rb_node;
541 struct rb_node *parent = NULL;
542 struct map *m;
544 while (*p != NULL) {
545 parent = *p;
546 m = rb_entry(parent, struct map, rb_node);
547 if (ip < m->start)
548 p = &(*p)->rb_left;
549 else if (ip > m->end)
550 p = &(*p)->rb_right;
551 else
552 return m;
555 return NULL;
558 int machine__init(struct machine *self, const char *root_dir, pid_t pid)
560 map_groups__init(&self->kmaps);
561 RB_CLEAR_NODE(&self->rb_node);
562 INIT_LIST_HEAD(&self->user_dsos);
563 INIT_LIST_HEAD(&self->kernel_dsos);
565 self->threads = RB_ROOT;
566 INIT_LIST_HEAD(&self->dead_threads);
567 self->last_match = NULL;
569 self->kmaps.machine = self;
570 self->pid = pid;
571 self->root_dir = strdup(root_dir);
572 return self->root_dir == NULL ? -ENOMEM : 0;
575 static void dsos__delete(struct list_head *self)
577 struct dso *pos, *n;
579 list_for_each_entry_safe(pos, n, self, node) {
580 list_del(&pos->node);
581 dso__delete(pos);
585 void machine__exit(struct machine *self)
587 map_groups__exit(&self->kmaps);
588 dsos__delete(&self->user_dsos);
589 dsos__delete(&self->kernel_dsos);
590 free(self->root_dir);
591 self->root_dir = NULL;
594 void machine__delete(struct machine *self)
596 machine__exit(self);
597 free(self);
600 struct machine *machines__add(struct rb_root *self, pid_t pid,
601 const char *root_dir)
603 struct rb_node **p = &self->rb_node;
604 struct rb_node *parent = NULL;
605 struct machine *pos, *machine = malloc(sizeof(*machine));
607 if (!machine)
608 return NULL;
610 if (machine__init(machine, root_dir, pid) != 0) {
611 free(machine);
612 return NULL;
615 while (*p != NULL) {
616 parent = *p;
617 pos = rb_entry(parent, struct machine, rb_node);
618 if (pid < pos->pid)
619 p = &(*p)->rb_left;
620 else
621 p = &(*p)->rb_right;
624 rb_link_node(&machine->rb_node, parent, p);
625 rb_insert_color(&machine->rb_node, self);
627 return machine;
630 struct machine *machines__find(struct rb_root *self, pid_t pid)
632 struct rb_node **p = &self->rb_node;
633 struct rb_node *parent = NULL;
634 struct machine *machine;
635 struct machine *default_machine = NULL;
637 while (*p != NULL) {
638 parent = *p;
639 machine = rb_entry(parent, struct machine, rb_node);
640 if (pid < machine->pid)
641 p = &(*p)->rb_left;
642 else if (pid > machine->pid)
643 p = &(*p)->rb_right;
644 else
645 return machine;
646 if (!machine->pid)
647 default_machine = machine;
650 return default_machine;
653 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
655 char path[PATH_MAX];
656 const char *root_dir;
657 struct machine *machine = machines__find(self, pid);
659 if (!machine || machine->pid != pid) {
660 if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
661 root_dir = "";
662 else {
663 if (!symbol_conf.guestmount)
664 goto out;
665 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
666 if (access(path, R_OK)) {
667 pr_err("Can't access file %s\n", path);
668 goto out;
670 root_dir = path;
672 machine = machines__add(self, pid, root_dir);
675 out:
676 return machine;
679 void machines__process(struct rb_root *self, machine__process_t process, void *data)
681 struct rb_node *nd;
683 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
684 struct machine *pos = rb_entry(nd, struct machine, rb_node);
685 process(pos, data);
689 char *machine__mmap_name(struct machine *self, char *bf, size_t size)
691 if (machine__is_host(self))
692 snprintf(bf, size, "[%s]", "kernel.kallsyms");
693 else if (machine__is_default_guest(self))
694 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
695 else
696 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
698 return bf;