12 #include <symbol/kallsyms.h>
15 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
17 map_groups__init(&machine
->kmaps
);
18 RB_CLEAR_NODE(&machine
->rb_node
);
19 INIT_LIST_HEAD(&machine
->user_dsos
);
20 INIT_LIST_HEAD(&machine
->kernel_dsos
);
22 machine
->threads
= RB_ROOT
;
23 INIT_LIST_HEAD(&machine
->dead_threads
);
24 machine
->last_match
= NULL
;
26 machine
->kmaps
.machine
= machine
;
29 machine
->symbol_filter
= NULL
;
30 machine
->id_hdr_size
= 0;
32 machine
->root_dir
= strdup(root_dir
);
33 if (machine
->root_dir
== NULL
)
36 if (pid
!= HOST_KERNEL_ID
) {
37 struct thread
*thread
= machine__findnew_thread(machine
, 0,
44 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
45 thread__set_comm(thread
, comm
, 0);
51 struct machine
*machine__new_host(void)
53 struct machine
*machine
= malloc(sizeof(*machine
));
55 if (machine
!= NULL
) {
56 machine__init(machine
, "", HOST_KERNEL_ID
);
58 if (machine__create_kernel_maps(machine
) < 0)
68 static void dsos__delete(struct list_head
*dsos
)
72 list_for_each_entry_safe(pos
, n
, dsos
, node
) {
78 void machine__delete_dead_threads(struct machine
*machine
)
82 list_for_each_entry_safe(t
, n
, &machine
->dead_threads
, node
) {
88 void machine__delete_threads(struct machine
*machine
)
90 struct rb_node
*nd
= rb_first(&machine
->threads
);
93 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
95 rb_erase(&t
->rb_node
, &machine
->threads
);
101 void machine__exit(struct machine
*machine
)
103 map_groups__exit(&machine
->kmaps
);
104 dsos__delete(&machine
->user_dsos
);
105 dsos__delete(&machine
->kernel_dsos
);
106 zfree(&machine
->root_dir
);
109 void machine__delete(struct machine
*machine
)
111 machine__exit(machine
);
115 void machines__init(struct machines
*machines
)
117 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
118 machines
->guests
= RB_ROOT
;
119 machines
->symbol_filter
= NULL
;
122 void machines__exit(struct machines
*machines
)
124 machine__exit(&machines
->host
);
128 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
129 const char *root_dir
)
131 struct rb_node
**p
= &machines
->guests
.rb_node
;
132 struct rb_node
*parent
= NULL
;
133 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
138 if (machine__init(machine
, root_dir
, pid
) != 0) {
143 machine
->symbol_filter
= machines
->symbol_filter
;
147 pos
= rb_entry(parent
, struct machine
, rb_node
);
154 rb_link_node(&machine
->rb_node
, parent
, p
);
155 rb_insert_color(&machine
->rb_node
, &machines
->guests
);
160 void machines__set_symbol_filter(struct machines
*machines
,
161 symbol_filter_t symbol_filter
)
165 machines
->symbol_filter
= symbol_filter
;
166 machines
->host
.symbol_filter
= symbol_filter
;
168 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
169 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
171 machine
->symbol_filter
= symbol_filter
;
175 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
177 struct rb_node
**p
= &machines
->guests
.rb_node
;
178 struct rb_node
*parent
= NULL
;
179 struct machine
*machine
;
180 struct machine
*default_machine
= NULL
;
182 if (pid
== HOST_KERNEL_ID
)
183 return &machines
->host
;
187 machine
= rb_entry(parent
, struct machine
, rb_node
);
188 if (pid
< machine
->pid
)
190 else if (pid
> machine
->pid
)
195 default_machine
= machine
;
198 return default_machine
;
201 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
204 const char *root_dir
= "";
205 struct machine
*machine
= machines__find(machines
, pid
);
207 if (machine
&& (machine
->pid
== pid
))
210 if ((pid
!= HOST_KERNEL_ID
) &&
211 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
212 (symbol_conf
.guestmount
)) {
213 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
214 if (access(path
, R_OK
)) {
215 static struct strlist
*seen
;
218 seen
= strlist__new(true, NULL
);
220 if (!strlist__has_entry(seen
, path
)) {
221 pr_err("Can't access file %s\n", path
);
222 strlist__add(seen
, path
);
230 machine
= machines__add(machines
, pid
, root_dir
);
235 void machines__process_guests(struct machines
*machines
,
236 machine__process_t process
, void *data
)
240 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
241 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
246 char *machine__mmap_name(struct machine
*machine
, char *bf
, size_t size
)
248 if (machine__is_host(machine
))
249 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
250 else if (machine__is_default_guest(machine
))
251 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
253 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms",
260 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
262 struct rb_node
*node
;
263 struct machine
*machine
;
265 machines
->host
.id_hdr_size
= id_hdr_size
;
267 for (node
= rb_first(&machines
->guests
); node
; node
= rb_next(node
)) {
268 machine
= rb_entry(node
, struct machine
, rb_node
);
269 machine
->id_hdr_size
= id_hdr_size
;
275 static struct thread
*__machine__findnew_thread(struct machine
*machine
,
276 pid_t pid
, pid_t tid
,
279 struct rb_node
**p
= &machine
->threads
.rb_node
;
280 struct rb_node
*parent
= NULL
;
284 * Front-end cache - TID lookups come in blocks,
285 * so most of the time we dont have to look up
288 if (machine
->last_match
&& machine
->last_match
->tid
== tid
) {
289 if (pid
&& pid
!= machine
->last_match
->pid_
)
290 machine
->last_match
->pid_
= pid
;
291 return machine
->last_match
;
296 th
= rb_entry(parent
, struct thread
, rb_node
);
298 if (th
->tid
== tid
) {
299 machine
->last_match
= th
;
300 if (pid
&& pid
!= th
->pid_
)
314 th
= thread__new(pid
, tid
);
316 rb_link_node(&th
->rb_node
, parent
, p
);
317 rb_insert_color(&th
->rb_node
, &machine
->threads
);
318 machine
->last_match
= th
;
324 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
327 return __machine__findnew_thread(machine
, pid
, tid
, true);
330 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
333 return __machine__findnew_thread(machine
, pid
, tid
, false);
336 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
337 struct perf_sample
*sample
)
339 struct thread
*thread
= machine__findnew_thread(machine
,
344 perf_event__fprintf_comm(event
, stdout
);
346 if (thread
== NULL
|| thread__set_comm(thread
, event
->comm
.comm
, sample
->time
)) {
347 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
354 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
355 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
357 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
358 event
->lost
.id
, event
->lost
.lost
);
362 struct map
*machine__new_module(struct machine
*machine
, u64 start
,
363 const char *filename
)
366 struct dso
*dso
= __dsos__findnew(&machine
->kernel_dsos
, filename
);
371 map
= map__new2(start
, dso
, MAP__FUNCTION
);
375 if (machine__is_host(machine
))
376 dso
->symtab_type
= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
;
378 dso
->symtab_type
= DSO_BINARY_TYPE__GUEST_KMODULE
;
379 map_groups__insert(&machine
->kmaps
, map
);
383 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
386 size_t ret
= __dsos__fprintf(&machines
->host
.kernel_dsos
, fp
) +
387 __dsos__fprintf(&machines
->host
.user_dsos
, fp
);
389 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
390 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
391 ret
+= __dsos__fprintf(&pos
->kernel_dsos
, fp
);
392 ret
+= __dsos__fprintf(&pos
->user_dsos
, fp
);
398 size_t machine__fprintf_dsos_buildid(struct machine
*machine
, FILE *fp
,
399 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
401 return __dsos__fprintf_buildid(&machine
->kernel_dsos
, fp
, skip
, parm
) +
402 __dsos__fprintf_buildid(&machine
->user_dsos
, fp
, skip
, parm
);
405 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
406 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
409 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
411 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
412 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
413 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
418 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
422 struct dso
*kdso
= machine
->vmlinux_maps
[MAP__FUNCTION
]->dso
;
424 if (kdso
->has_build_id
) {
425 char filename
[PATH_MAX
];
426 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
)))
427 printed
+= fprintf(fp
, "[0] %s\n", filename
);
430 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
431 printed
+= fprintf(fp
, "[%d] %s\n",
432 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
437 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
442 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
443 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
445 ret
+= thread__fprintf(pos
, fp
);
451 static struct dso
*machine__get_kernel(struct machine
*machine
)
453 const char *vmlinux_name
= NULL
;
456 if (machine__is_host(machine
)) {
457 vmlinux_name
= symbol_conf
.vmlinux_name
;
459 vmlinux_name
= "[kernel.kallsyms]";
461 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
467 if (machine__is_default_guest(machine
))
468 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
470 vmlinux_name
= machine__mmap_name(machine
, bf
,
473 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
475 DSO_TYPE_GUEST_KERNEL
);
478 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
479 dso__read_running_kernel_build_id(kernel
, machine
);
484 struct process_args
{
488 static int symbol__in_kernel(void *arg
, const char *name
,
489 char type __maybe_unused
, u64 start
)
491 struct process_args
*args
= arg
;
493 if (strchr(name
, '['))
500 static void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
503 if (machine__is_default_guest(machine
))
504 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
506 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
509 /* Figure out the start address of kernel map from /proc/kallsyms */
510 static u64
machine__get_kernel_start_addr(struct machine
*machine
)
512 char filename
[PATH_MAX
];
513 struct process_args args
;
515 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
517 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
520 if (kallsyms__parse(filename
, &args
, symbol__in_kernel
) <= 0)
526 int __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
529 u64 start
= machine__get_kernel_start_addr(machine
);
531 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
534 machine
->vmlinux_maps
[type
] = map__new2(start
, kernel
, type
);
535 if (machine
->vmlinux_maps
[type
] == NULL
)
538 machine
->vmlinux_maps
[type
]->map_ip
=
539 machine
->vmlinux_maps
[type
]->unmap_ip
=
541 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
542 kmap
->kmaps
= &machine
->kmaps
;
543 map_groups__insert(&machine
->kmaps
,
544 machine
->vmlinux_maps
[type
]);
550 void machine__destroy_kernel_maps(struct machine
*machine
)
554 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
557 if (machine
->vmlinux_maps
[type
] == NULL
)
560 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
561 map_groups__remove(&machine
->kmaps
,
562 machine
->vmlinux_maps
[type
]);
563 if (kmap
->ref_reloc_sym
) {
565 * ref_reloc_sym is shared among all maps, so free just
568 if (type
== MAP__FUNCTION
) {
569 zfree((char **)&kmap
->ref_reloc_sym
->name
);
570 zfree(&kmap
->ref_reloc_sym
);
572 kmap
->ref_reloc_sym
= NULL
;
575 map__delete(machine
->vmlinux_maps
[type
]);
576 machine
->vmlinux_maps
[type
] = NULL
;
580 int machines__create_guest_kernel_maps(struct machines
*machines
)
583 struct dirent
**namelist
= NULL
;
589 if (symbol_conf
.default_guest_vmlinux_name
||
590 symbol_conf
.default_guest_modules
||
591 symbol_conf
.default_guest_kallsyms
) {
592 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
595 if (symbol_conf
.guestmount
) {
596 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
599 for (i
= 0; i
< items
; i
++) {
600 if (!isdigit(namelist
[i
]->d_name
[0])) {
601 /* Filter out . and .. */
604 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
605 if ((*endp
!= '\0') ||
606 (endp
== namelist
[i
]->d_name
) ||
608 pr_debug("invalid directory (%s). Skipping.\n",
609 namelist
[i
]->d_name
);
612 sprintf(path
, "%s/%s/proc/kallsyms",
613 symbol_conf
.guestmount
,
614 namelist
[i
]->d_name
);
615 ret
= access(path
, R_OK
);
617 pr_debug("Can't access file %s\n", path
);
620 machines__create_kernel_maps(machines
, pid
);
629 void machines__destroy_kernel_maps(struct machines
*machines
)
631 struct rb_node
*next
= rb_first(&machines
->guests
);
633 machine__destroy_kernel_maps(&machines
->host
);
636 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
638 next
= rb_next(&pos
->rb_node
);
639 rb_erase(&pos
->rb_node
, &machines
->guests
);
640 machine__delete(pos
);
644 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
646 struct machine
*machine
= machines__findnew(machines
, pid
);
651 return machine__create_kernel_maps(machine
);
654 int machine__load_kallsyms(struct machine
*machine
, const char *filename
,
655 enum map_type type
, symbol_filter_t filter
)
657 struct map
*map
= machine
->vmlinux_maps
[type
];
658 int ret
= dso__load_kallsyms(map
->dso
, filename
, map
, filter
);
661 dso__set_loaded(map
->dso
, type
);
663 * Since /proc/kallsyms will have multiple sessions for the
664 * kernel, with modules between them, fixup the end of all
667 __map_groups__fixup_end(&machine
->kmaps
, type
);
673 int machine__load_vmlinux_path(struct machine
*machine
, enum map_type type
,
674 symbol_filter_t filter
)
676 struct map
*map
= machine
->vmlinux_maps
[type
];
677 int ret
= dso__load_vmlinux_path(map
->dso
, map
, filter
);
680 dso__set_loaded(map
->dso
, type
);
685 static void map_groups__fixup_end(struct map_groups
*mg
)
688 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
689 __map_groups__fixup_end(mg
, i
);
692 static char *get_kernel_version(const char *root_dir
)
694 char version
[PATH_MAX
];
697 const char *prefix
= "Linux version ";
699 sprintf(version
, "%s/proc/version", root_dir
);
700 file
= fopen(version
, "r");
705 tmp
= fgets(version
, sizeof(version
), file
);
708 name
= strstr(version
, prefix
);
711 name
+= strlen(prefix
);
712 tmp
= strchr(name
, ' ');
719 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
720 const char *dir_name
, int depth
)
723 DIR *dir
= opendir(dir_name
);
727 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
731 while ((dent
= readdir(dir
)) != NULL
) {
735 /*sshfs might return bad dent->d_type, so we have to stat*/
736 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
740 if (S_ISDIR(st
.st_mode
)) {
741 if (!strcmp(dent
->d_name
, ".") ||
742 !strcmp(dent
->d_name
, ".."))
745 /* Do not follow top-level source and build symlinks */
747 if (!strcmp(dent
->d_name
, "source") ||
748 !strcmp(dent
->d_name
, "build"))
752 ret
= map_groups__set_modules_path_dir(mg
, path
,
757 char *dot
= strrchr(dent
->d_name
, '.'),
762 if (dot
== NULL
|| strcmp(dot
, ".ko"))
764 snprintf(dso_name
, sizeof(dso_name
), "[%.*s]",
765 (int)(dot
- dent
->d_name
), dent
->d_name
);
767 strxfrchar(dso_name
, '-', '_');
768 map
= map_groups__find_by_name(mg
, MAP__FUNCTION
,
773 long_name
= strdup(path
);
774 if (long_name
== NULL
) {
778 dso__set_long_name(map
->dso
, long_name
, true);
779 dso__kernel_module_get_build_id(map
->dso
, "");
788 static int machine__set_modules_path(struct machine
*machine
)
791 char modules_path
[PATH_MAX
];
793 version
= get_kernel_version(machine
->root_dir
);
797 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
798 machine
->root_dir
, version
);
801 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
, 0);
804 static int machine__create_module(void *arg
, const char *name
, u64 start
)
806 struct machine
*machine
= arg
;
809 map
= machine__new_module(machine
, start
, name
);
813 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
818 static int machine__create_modules(struct machine
*machine
)
823 if (machine__is_default_guest(machine
)) {
824 modules
= symbol_conf
.default_guest_modules
;
826 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
830 if (symbol__restricted_filename(modules
, "/proc/modules"))
833 if (modules__parse(modules
, machine
, machine__create_module
))
836 if (!machine__set_modules_path(machine
))
839 pr_debug("Problems setting modules path maps, continuing anyway...\n");
844 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
846 int machine__create_kernel_maps(struct machine
*machine
)
848 struct dso
*kernel
= machine__get_kernel(machine
);
849 char filename
[PATH_MAX
];
854 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
856 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
857 addr
= kallsyms__get_function_start(filename
, name
);
864 if (kernel
== NULL
||
865 __machine__create_kernel_maps(machine
, kernel
) < 0)
868 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
869 if (machine__is_host(machine
))
870 pr_debug("Problems creating module maps, "
871 "continuing anyway...\n");
873 pr_debug("Problems creating module maps for guest %d, "
874 "continuing anyway...\n", machine
->pid
);
878 * Now that we have all the maps created, just set the ->end of them:
880 map_groups__fixup_end(&machine
->kmaps
);
882 if (maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
, name
,
884 machine__destroy_kernel_maps(machine
);
891 static void machine__set_kernel_mmap_len(struct machine
*machine
,
892 union perf_event
*event
)
896 for (i
= 0; i
< MAP__NR_TYPES
; i
++) {
897 machine
->vmlinux_maps
[i
]->start
= event
->mmap
.start
;
898 machine
->vmlinux_maps
[i
]->end
= (event
->mmap
.start
+
901 * Be a bit paranoid here, some perf.data file came with
902 * a zero sized synthesized MMAP event for the kernel.
904 if (machine
->vmlinux_maps
[i
]->end
== 0)
905 machine
->vmlinux_maps
[i
]->end
= ~0ULL;
909 static bool machine__uses_kcore(struct machine
*machine
)
913 list_for_each_entry(dso
, &machine
->kernel_dsos
, node
) {
914 if (dso__is_kcore(dso
))
921 static int machine__process_kernel_mmap_event(struct machine
*machine
,
922 union perf_event
*event
)
925 char kmmap_prefix
[PATH_MAX
];
926 enum dso_kernel_type kernel_type
;
929 /* If we have maps from kcore then we do not need or want any others */
930 if (machine__uses_kcore(machine
))
933 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
934 if (machine__is_host(machine
))
935 kernel_type
= DSO_TYPE_KERNEL
;
937 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
939 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
941 strlen(kmmap_prefix
) - 1) == 0;
942 if (event
->mmap
.filename
[0] == '/' ||
943 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
945 char short_module_name
[1024];
948 if (event
->mmap
.filename
[0] == '/') {
949 name
= strrchr(event
->mmap
.filename
, '/');
954 dot
= strrchr(name
, '.');
957 snprintf(short_module_name
, sizeof(short_module_name
),
958 "[%.*s]", (int)(dot
- name
), name
);
959 strxfrchar(short_module_name
, '-', '_');
961 strcpy(short_module_name
, event
->mmap
.filename
);
963 map
= machine__new_module(machine
, event
->mmap
.start
,
964 event
->mmap
.filename
);
968 name
= strdup(short_module_name
);
972 dso__set_short_name(map
->dso
, name
, true);
973 map
->end
= map
->start
+ event
->mmap
.len
;
974 } else if (is_kernel_mmap
) {
975 const char *symbol_name
= (event
->mmap
.filename
+
976 strlen(kmmap_prefix
));
978 * Should be there already, from the build-id table in
981 struct dso
*kernel
= __dsos__findnew(&machine
->kernel_dsos
,
986 kernel
->kernel
= kernel_type
;
987 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
990 machine__set_kernel_mmap_len(machine
, event
);
993 * Avoid using a zero address (kptr_restrict) for the ref reloc
994 * symbol. Effectively having zero here means that at record
995 * time /proc/sys/kernel/kptr_restrict was non zero.
997 if (event
->mmap
.pgoff
!= 0) {
998 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
1003 if (machine__is_default_guest(machine
)) {
1005 * preload dso of guest kernel and modules
1007 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
1016 int machine__process_mmap2_event(struct machine
*machine
,
1017 union perf_event
*event
,
1018 struct perf_sample
*sample __maybe_unused
)
1020 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1021 struct thread
*thread
;
1027 perf_event__fprintf_mmap2(event
, stdout
);
1029 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1030 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1031 ret
= machine__process_kernel_mmap_event(machine
, event
);
1037 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1042 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1043 type
= MAP__VARIABLE
;
1045 type
= MAP__FUNCTION
;
1047 map
= map__new(&machine
->user_dsos
, event
->mmap2
.start
,
1048 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1049 event
->mmap2
.pid
, event
->mmap2
.maj
,
1050 event
->mmap2
.min
, event
->mmap2
.ino
,
1051 event
->mmap2
.ino_generation
,
1052 event
->mmap2
.filename
, type
);
1057 thread__insert_map(thread
, map
);
1061 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1065 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1066 struct perf_sample
*sample __maybe_unused
)
1068 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1069 struct thread
*thread
;
1075 perf_event__fprintf_mmap(event
, stdout
);
1077 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1078 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1079 ret
= machine__process_kernel_mmap_event(machine
, event
);
1085 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1090 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1091 type
= MAP__VARIABLE
;
1093 type
= MAP__FUNCTION
;
1095 map
= map__new(&machine
->user_dsos
, event
->mmap
.start
,
1096 event
->mmap
.len
, event
->mmap
.pgoff
,
1097 event
->mmap
.pid
, 0, 0, 0, 0,
1098 event
->mmap
.filename
,
1104 thread__insert_map(thread
, map
);
1108 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1112 static void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1114 machine
->last_match
= NULL
;
1115 rb_erase(&th
->rb_node
, &machine
->threads
);
1117 * We may have references to this thread, for instance in some hist_entry
1118 * instances, so just move them to a separate list.
1120 list_add_tail(&th
->node
, &machine
->dead_threads
);
1123 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1124 struct perf_sample
*sample
)
1126 struct thread
*thread
= machine__find_thread(machine
,
1129 struct thread
*parent
= machine__findnew_thread(machine
,
1133 /* if a thread currently exists for the thread id remove it */
1135 machine__remove_thread(machine
, thread
);
1137 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1140 perf_event__fprintf_task(event
, stdout
);
1142 if (thread
== NULL
|| parent
== NULL
||
1143 thread__fork(thread
, parent
, sample
->time
) < 0) {
1144 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1151 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1152 struct perf_sample
*sample __maybe_unused
)
1154 struct thread
*thread
= machine__find_thread(machine
,
1159 perf_event__fprintf_task(event
, stdout
);
1162 thread__exited(thread
);
1167 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1168 struct perf_sample
*sample
)
1172 switch (event
->header
.type
) {
1173 case PERF_RECORD_COMM
:
1174 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1175 case PERF_RECORD_MMAP
:
1176 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1177 case PERF_RECORD_MMAP2
:
1178 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1179 case PERF_RECORD_FORK
:
1180 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1181 case PERF_RECORD_EXIT
:
1182 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1183 case PERF_RECORD_LOST
:
1184 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1193 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1195 if (sym
->name
&& !regexec(regex
, sym
->name
, 0, NULL
, 0))
1200 static void ip__resolve_ams(struct machine
*machine
, struct thread
*thread
,
1201 struct addr_map_symbol
*ams
,
1204 struct addr_location al
;
1206 memset(&al
, 0, sizeof(al
));
1208 * We cannot use the header.misc hint to determine whether a
1209 * branch stack address is user, kernel, guest, hypervisor.
1210 * Branches may straddle the kernel/user/hypervisor boundaries.
1211 * Thus, we have to try consecutively until we find a match
1212 * or else, the symbol is unknown
1214 thread__find_cpumode_addr_location(thread
, machine
, MAP__FUNCTION
, ip
, &al
);
1217 ams
->al_addr
= al
.addr
;
1222 static void ip__resolve_data(struct machine
*machine
, struct thread
*thread
,
1223 u8 m
, struct addr_map_symbol
*ams
, u64 addr
)
1225 struct addr_location al
;
1227 memset(&al
, 0, sizeof(al
));
1229 thread__find_addr_location(thread
, machine
, m
, MAP__VARIABLE
, addr
,
1232 ams
->al_addr
= al
.addr
;
1237 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
1238 struct addr_location
*al
)
1240 struct mem_info
*mi
= zalloc(sizeof(*mi
));
1245 ip__resolve_ams(al
->machine
, al
->thread
, &mi
->iaddr
, sample
->ip
);
1246 ip__resolve_data(al
->machine
, al
->thread
, al
->cpumode
,
1247 &mi
->daddr
, sample
->addr
);
1248 mi
->data_src
.val
= sample
->data_src
;
1253 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
1254 struct addr_location
*al
)
1257 const struct branch_stack
*bs
= sample
->branch_stack
;
1258 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
1263 for (i
= 0; i
< bs
->nr
; i
++) {
1264 ip__resolve_ams(al
->machine
, al
->thread
, &bi
[i
].to
, bs
->entries
[i
].to
);
1265 ip__resolve_ams(al
->machine
, al
->thread
, &bi
[i
].from
, bs
->entries
[i
].from
);
1266 bi
[i
].flags
= bs
->entries
[i
].flags
;
1271 static int machine__resolve_callchain_sample(struct machine
*machine
,
1272 struct thread
*thread
,
1273 struct ip_callchain
*chain
,
1274 struct symbol
**parent
,
1275 struct addr_location
*root_al
,
1278 u8 cpumode
= PERF_RECORD_MISC_USER
;
1279 int chain_nr
= min(max_stack
, (int)chain
->nr
);
1283 callchain_cursor_reset(&callchain_cursor
);
1285 if (chain
->nr
> PERF_MAX_STACK_DEPTH
) {
1286 pr_warning("corrupted callchain. skipping...\n");
1290 for (i
= 0; i
< chain_nr
; i
++) {
1292 struct addr_location al
;
1294 if (callchain_param
.order
== ORDER_CALLEE
)
1297 ip
= chain
->ips
[chain
->nr
- i
- 1];
1299 if (ip
>= PERF_CONTEXT_MAX
) {
1301 case PERF_CONTEXT_HV
:
1302 cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
1304 case PERF_CONTEXT_KERNEL
:
1305 cpumode
= PERF_RECORD_MISC_KERNEL
;
1307 case PERF_CONTEXT_USER
:
1308 cpumode
= PERF_RECORD_MISC_USER
;
1311 pr_debug("invalid callchain context: "
1312 "%"PRId64
"\n", (s64
) ip
);
1314 * It seems the callchain is corrupted.
1317 callchain_cursor_reset(&callchain_cursor
);
1324 thread__find_addr_location(thread
, machine
, cpumode
,
1325 MAP__FUNCTION
, ip
, &al
);
1326 if (al
.sym
!= NULL
) {
1327 if (sort__has_parent
&& !*parent
&&
1328 symbol__match_regex(al
.sym
, &parent_regex
))
1330 else if (have_ignore_callees
&& root_al
&&
1331 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
1332 /* Treat this symbol as the root,
1333 forgetting its callees. */
1335 callchain_cursor_reset(&callchain_cursor
);
1339 err
= callchain_cursor_append(&callchain_cursor
,
1340 ip
, al
.map
, al
.sym
);
1348 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
1350 struct callchain_cursor
*cursor
= arg
;
1351 return callchain_cursor_append(cursor
, entry
->ip
,
1352 entry
->map
, entry
->sym
);
1355 int machine__resolve_callchain(struct machine
*machine
,
1356 struct perf_evsel
*evsel
,
1357 struct thread
*thread
,
1358 struct perf_sample
*sample
,
1359 struct symbol
**parent
,
1360 struct addr_location
*root_al
,
1365 ret
= machine__resolve_callchain_sample(machine
, thread
,
1366 sample
->callchain
, parent
,
1367 root_al
, max_stack
);
1371 /* Can we do dwarf post unwind? */
1372 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
1373 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
1376 /* Bail out if nothing was captured. */
1377 if ((!sample
->user_regs
.regs
) ||
1378 (!sample
->user_stack
.size
))
1381 return unwind__get_entries(unwind_entry
, &callchain_cursor
, machine
,
1382 thread
, sample
, max_stack
);
1386 int machine__for_each_thread(struct machine
*machine
,
1387 int (*fn
)(struct thread
*thread
, void *p
),
1391 struct thread
*thread
;
1394 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
1395 thread
= rb_entry(nd
, struct thread
, rb_node
);
1396 rc
= fn(thread
, priv
);
1401 list_for_each_entry(thread
, &machine
->dead_threads
, node
) {
1402 rc
= fn(thread
, priv
);
1409 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
1410 struct target
*target
, struct thread_map
*threads
,
1411 perf_event__handler_t process
, bool data_mmap
)
1413 if (target__has_task(target
))
1414 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
);
1415 else if (target__has_cpu(target
))
1416 return perf_event__synthesize_threads(tool
, process
, machine
, data_mmap
);
1417 /* command specified */