12 #include <symbol/kallsyms.h>
15 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
17 map_groups__init(&machine
->kmaps
);
18 RB_CLEAR_NODE(&machine
->rb_node
);
19 INIT_LIST_HEAD(&machine
->user_dsos
);
20 INIT_LIST_HEAD(&machine
->kernel_dsos
);
22 machine
->threads
= RB_ROOT
;
23 INIT_LIST_HEAD(&machine
->dead_threads
);
24 machine
->last_match
= NULL
;
26 machine
->kmaps
.machine
= machine
;
29 machine
->symbol_filter
= NULL
;
30 machine
->id_hdr_size
= 0;
32 machine
->root_dir
= strdup(root_dir
);
33 if (machine
->root_dir
== NULL
)
36 if (pid
!= HOST_KERNEL_ID
) {
37 struct thread
*thread
= machine__findnew_thread(machine
, 0,
44 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
45 thread__set_comm(thread
, comm
, 0);
51 struct machine
*machine__new_host(void)
53 struct machine
*machine
= malloc(sizeof(*machine
));
55 if (machine
!= NULL
) {
56 machine__init(machine
, "", HOST_KERNEL_ID
);
58 if (machine__create_kernel_maps(machine
) < 0)
68 static void dsos__delete(struct list_head
*dsos
)
72 list_for_each_entry_safe(pos
, n
, dsos
, node
) {
78 void machine__delete_dead_threads(struct machine
*machine
)
82 list_for_each_entry_safe(t
, n
, &machine
->dead_threads
, node
) {
88 void machine__delete_threads(struct machine
*machine
)
90 struct rb_node
*nd
= rb_first(&machine
->threads
);
93 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
95 rb_erase(&t
->rb_node
, &machine
->threads
);
101 void machine__exit(struct machine
*machine
)
103 map_groups__exit(&machine
->kmaps
);
104 dsos__delete(&machine
->user_dsos
);
105 dsos__delete(&machine
->kernel_dsos
);
106 zfree(&machine
->root_dir
);
109 void machine__delete(struct machine
*machine
)
111 machine__exit(machine
);
115 void machines__init(struct machines
*machines
)
117 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
118 machines
->guests
= RB_ROOT
;
119 machines
->symbol_filter
= NULL
;
122 void machines__exit(struct machines
*machines
)
124 machine__exit(&machines
->host
);
128 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
129 const char *root_dir
)
131 struct rb_node
**p
= &machines
->guests
.rb_node
;
132 struct rb_node
*parent
= NULL
;
133 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
138 if (machine__init(machine
, root_dir
, pid
) != 0) {
143 machine
->symbol_filter
= machines
->symbol_filter
;
147 pos
= rb_entry(parent
, struct machine
, rb_node
);
154 rb_link_node(&machine
->rb_node
, parent
, p
);
155 rb_insert_color(&machine
->rb_node
, &machines
->guests
);
160 void machines__set_symbol_filter(struct machines
*machines
,
161 symbol_filter_t symbol_filter
)
165 machines
->symbol_filter
= symbol_filter
;
166 machines
->host
.symbol_filter
= symbol_filter
;
168 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
169 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
171 machine
->symbol_filter
= symbol_filter
;
175 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
177 struct rb_node
**p
= &machines
->guests
.rb_node
;
178 struct rb_node
*parent
= NULL
;
179 struct machine
*machine
;
180 struct machine
*default_machine
= NULL
;
182 if (pid
== HOST_KERNEL_ID
)
183 return &machines
->host
;
187 machine
= rb_entry(parent
, struct machine
, rb_node
);
188 if (pid
< machine
->pid
)
190 else if (pid
> machine
->pid
)
195 default_machine
= machine
;
198 return default_machine
;
201 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
204 const char *root_dir
= "";
205 struct machine
*machine
= machines__find(machines
, pid
);
207 if (machine
&& (machine
->pid
== pid
))
210 if ((pid
!= HOST_KERNEL_ID
) &&
211 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
212 (symbol_conf
.guestmount
)) {
213 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
214 if (access(path
, R_OK
)) {
215 static struct strlist
*seen
;
218 seen
= strlist__new(true, NULL
);
220 if (!strlist__has_entry(seen
, path
)) {
221 pr_err("Can't access file %s\n", path
);
222 strlist__add(seen
, path
);
230 machine
= machines__add(machines
, pid
, root_dir
);
235 void machines__process_guests(struct machines
*machines
,
236 machine__process_t process
, void *data
)
240 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
241 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
246 char *machine__mmap_name(struct machine
*machine
, char *bf
, size_t size
)
248 if (machine__is_host(machine
))
249 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
250 else if (machine__is_default_guest(machine
))
251 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
253 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms",
260 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
262 struct rb_node
*node
;
263 struct machine
*machine
;
265 machines
->host
.id_hdr_size
= id_hdr_size
;
267 for (node
= rb_first(&machines
->guests
); node
; node
= rb_next(node
)) {
268 machine
= rb_entry(node
, struct machine
, rb_node
);
269 machine
->id_hdr_size
= id_hdr_size
;
275 static struct thread
*__machine__findnew_thread(struct machine
*machine
,
276 pid_t pid
, pid_t tid
,
279 struct rb_node
**p
= &machine
->threads
.rb_node
;
280 struct rb_node
*parent
= NULL
;
284 * Front-end cache - TID lookups come in blocks,
285 * so most of the time we dont have to look up
288 if (machine
->last_match
&& machine
->last_match
->tid
== tid
) {
289 if (pid
&& pid
!= machine
->last_match
->pid_
)
290 machine
->last_match
->pid_
= pid
;
291 return machine
->last_match
;
296 th
= rb_entry(parent
, struct thread
, rb_node
);
298 if (th
->tid
== tid
) {
299 machine
->last_match
= th
;
300 if (pid
&& pid
!= th
->pid_
)
314 th
= thread__new(pid
, tid
);
316 rb_link_node(&th
->rb_node
, parent
, p
);
317 rb_insert_color(&th
->rb_node
, &machine
->threads
);
318 machine
->last_match
= th
;
324 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
327 return __machine__findnew_thread(machine
, pid
, tid
, true);
330 struct thread
*machine__find_thread(struct machine
*machine
, pid_t tid
)
332 return __machine__findnew_thread(machine
, 0, tid
, false);
335 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
336 struct perf_sample
*sample
)
338 struct thread
*thread
= machine__findnew_thread(machine
,
343 perf_event__fprintf_comm(event
, stdout
);
345 if (thread
== NULL
|| thread__set_comm(thread
, event
->comm
.comm
, sample
->time
)) {
346 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
353 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
354 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
356 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
357 event
->lost
.id
, event
->lost
.lost
);
361 struct map
*machine__new_module(struct machine
*machine
, u64 start
,
362 const char *filename
)
365 struct dso
*dso
= __dsos__findnew(&machine
->kernel_dsos
, filename
);
370 map
= map__new2(start
, dso
, MAP__FUNCTION
);
374 if (machine__is_host(machine
))
375 dso
->symtab_type
= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
;
377 dso
->symtab_type
= DSO_BINARY_TYPE__GUEST_KMODULE
;
378 map_groups__insert(&machine
->kmaps
, map
);
382 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
385 size_t ret
= __dsos__fprintf(&machines
->host
.kernel_dsos
, fp
) +
386 __dsos__fprintf(&machines
->host
.user_dsos
, fp
);
388 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
389 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
390 ret
+= __dsos__fprintf(&pos
->kernel_dsos
, fp
);
391 ret
+= __dsos__fprintf(&pos
->user_dsos
, fp
);
397 size_t machine__fprintf_dsos_buildid(struct machine
*machine
, FILE *fp
,
398 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
400 return __dsos__fprintf_buildid(&machine
->kernel_dsos
, fp
, skip
, parm
) +
401 __dsos__fprintf_buildid(&machine
->user_dsos
, fp
, skip
, parm
);
404 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
405 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
408 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
410 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
411 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
412 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
417 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
421 struct dso
*kdso
= machine
->vmlinux_maps
[MAP__FUNCTION
]->dso
;
423 if (kdso
->has_build_id
) {
424 char filename
[PATH_MAX
];
425 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
)))
426 printed
+= fprintf(fp
, "[0] %s\n", filename
);
429 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
430 printed
+= fprintf(fp
, "[%d] %s\n",
431 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
436 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
441 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
442 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
444 ret
+= thread__fprintf(pos
, fp
);
450 static struct dso
*machine__get_kernel(struct machine
*machine
)
452 const char *vmlinux_name
= NULL
;
455 if (machine__is_host(machine
)) {
456 vmlinux_name
= symbol_conf
.vmlinux_name
;
458 vmlinux_name
= "[kernel.kallsyms]";
460 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
466 if (machine__is_default_guest(machine
))
467 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
469 vmlinux_name
= machine__mmap_name(machine
, bf
,
472 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
474 DSO_TYPE_GUEST_KERNEL
);
477 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
478 dso__read_running_kernel_build_id(kernel
, machine
);
483 struct process_args
{
487 static int symbol__in_kernel(void *arg
, const char *name
,
488 char type __maybe_unused
, u64 start
)
490 struct process_args
*args
= arg
;
492 if (strchr(name
, '['))
499 /* Figure out the start address of kernel map from /proc/kallsyms */
500 static u64
machine__get_kernel_start_addr(struct machine
*machine
)
502 const char *filename
;
504 struct process_args args
;
506 if (machine__is_default_guest(machine
))
507 filename
= (char *)symbol_conf
.default_guest_kallsyms
;
509 sprintf(path
, "%s/proc/kallsyms", machine
->root_dir
);
513 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
516 if (kallsyms__parse(filename
, &args
, symbol__in_kernel
) <= 0)
522 int __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
525 u64 start
= machine__get_kernel_start_addr(machine
);
527 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
530 machine
->vmlinux_maps
[type
] = map__new2(start
, kernel
, type
);
531 if (machine
->vmlinux_maps
[type
] == NULL
)
534 machine
->vmlinux_maps
[type
]->map_ip
=
535 machine
->vmlinux_maps
[type
]->unmap_ip
=
537 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
538 kmap
->kmaps
= &machine
->kmaps
;
539 map_groups__insert(&machine
->kmaps
,
540 machine
->vmlinux_maps
[type
]);
546 void machine__destroy_kernel_maps(struct machine
*machine
)
550 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
553 if (machine
->vmlinux_maps
[type
] == NULL
)
556 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
557 map_groups__remove(&machine
->kmaps
,
558 machine
->vmlinux_maps
[type
]);
559 if (kmap
->ref_reloc_sym
) {
561 * ref_reloc_sym is shared among all maps, so free just
564 if (type
== MAP__FUNCTION
) {
565 zfree((char **)&kmap
->ref_reloc_sym
->name
);
566 zfree(&kmap
->ref_reloc_sym
);
568 kmap
->ref_reloc_sym
= NULL
;
571 map__delete(machine
->vmlinux_maps
[type
]);
572 machine
->vmlinux_maps
[type
] = NULL
;
576 int machines__create_guest_kernel_maps(struct machines
*machines
)
579 struct dirent
**namelist
= NULL
;
585 if (symbol_conf
.default_guest_vmlinux_name
||
586 symbol_conf
.default_guest_modules
||
587 symbol_conf
.default_guest_kallsyms
) {
588 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
591 if (symbol_conf
.guestmount
) {
592 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
595 for (i
= 0; i
< items
; i
++) {
596 if (!isdigit(namelist
[i
]->d_name
[0])) {
597 /* Filter out . and .. */
600 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
601 if ((*endp
!= '\0') ||
602 (endp
== namelist
[i
]->d_name
) ||
604 pr_debug("invalid directory (%s). Skipping.\n",
605 namelist
[i
]->d_name
);
608 sprintf(path
, "%s/%s/proc/kallsyms",
609 symbol_conf
.guestmount
,
610 namelist
[i
]->d_name
);
611 ret
= access(path
, R_OK
);
613 pr_debug("Can't access file %s\n", path
);
616 machines__create_kernel_maps(machines
, pid
);
625 void machines__destroy_kernel_maps(struct machines
*machines
)
627 struct rb_node
*next
= rb_first(&machines
->guests
);
629 machine__destroy_kernel_maps(&machines
->host
);
632 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
634 next
= rb_next(&pos
->rb_node
);
635 rb_erase(&pos
->rb_node
, &machines
->guests
);
636 machine__delete(pos
);
640 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
642 struct machine
*machine
= machines__findnew(machines
, pid
);
647 return machine__create_kernel_maps(machine
);
650 int machine__load_kallsyms(struct machine
*machine
, const char *filename
,
651 enum map_type type
, symbol_filter_t filter
)
653 struct map
*map
= machine
->vmlinux_maps
[type
];
654 int ret
= dso__load_kallsyms(map
->dso
, filename
, map
, filter
);
657 dso__set_loaded(map
->dso
, type
);
659 * Since /proc/kallsyms will have multiple sessions for the
660 * kernel, with modules between them, fixup the end of all
663 __map_groups__fixup_end(&machine
->kmaps
, type
);
669 int machine__load_vmlinux_path(struct machine
*machine
, enum map_type type
,
670 symbol_filter_t filter
)
672 struct map
*map
= machine
->vmlinux_maps
[type
];
673 int ret
= dso__load_vmlinux_path(map
->dso
, map
, filter
);
676 dso__set_loaded(map
->dso
, type
);
681 static void map_groups__fixup_end(struct map_groups
*mg
)
684 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
685 __map_groups__fixup_end(mg
, i
);
688 static char *get_kernel_version(const char *root_dir
)
690 char version
[PATH_MAX
];
693 const char *prefix
= "Linux version ";
695 sprintf(version
, "%s/proc/version", root_dir
);
696 file
= fopen(version
, "r");
701 tmp
= fgets(version
, sizeof(version
), file
);
704 name
= strstr(version
, prefix
);
707 name
+= strlen(prefix
);
708 tmp
= strchr(name
, ' ');
715 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
716 const char *dir_name
)
719 DIR *dir
= opendir(dir_name
);
723 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
727 while ((dent
= readdir(dir
)) != NULL
) {
731 /*sshfs might return bad dent->d_type, so we have to stat*/
732 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
736 if (S_ISDIR(st
.st_mode
)) {
737 if (!strcmp(dent
->d_name
, ".") ||
738 !strcmp(dent
->d_name
, ".."))
741 ret
= map_groups__set_modules_path_dir(mg
, path
);
745 char *dot
= strrchr(dent
->d_name
, '.'),
750 if (dot
== NULL
|| strcmp(dot
, ".ko"))
752 snprintf(dso_name
, sizeof(dso_name
), "[%.*s]",
753 (int)(dot
- dent
->d_name
), dent
->d_name
);
755 strxfrchar(dso_name
, '-', '_');
756 map
= map_groups__find_by_name(mg
, MAP__FUNCTION
,
761 long_name
= strdup(path
);
762 if (long_name
== NULL
) {
766 dso__set_long_name(map
->dso
, long_name
, true);
767 dso__kernel_module_get_build_id(map
->dso
, "");
776 static int machine__set_modules_path(struct machine
*machine
)
779 char modules_path
[PATH_MAX
];
781 version
= get_kernel_version(machine
->root_dir
);
785 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s/kernel",
786 machine
->root_dir
, version
);
789 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
);
792 static int machine__create_module(void *arg
, const char *name
, u64 start
)
794 struct machine
*machine
= arg
;
797 map
= machine__new_module(machine
, start
, name
);
801 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
806 static int machine__create_modules(struct machine
*machine
)
811 if (machine__is_default_guest(machine
)) {
812 modules
= symbol_conf
.default_guest_modules
;
814 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
818 if (symbol__restricted_filename(modules
, "/proc/modules"))
821 if (modules__parse(modules
, machine
, machine__create_module
))
824 if (!machine__set_modules_path(machine
))
827 pr_debug("Problems setting modules path maps, continuing anyway...\n");
832 int machine__create_kernel_maps(struct machine
*machine
)
834 struct dso
*kernel
= machine__get_kernel(machine
);
836 if (kernel
== NULL
||
837 __machine__create_kernel_maps(machine
, kernel
) < 0)
840 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
841 if (machine__is_host(machine
))
842 pr_debug("Problems creating module maps, "
843 "continuing anyway...\n");
845 pr_debug("Problems creating module maps for guest %d, "
846 "continuing anyway...\n", machine
->pid
);
850 * Now that we have all the maps created, just set the ->end of them:
852 map_groups__fixup_end(&machine
->kmaps
);
856 static void machine__set_kernel_mmap_len(struct machine
*machine
,
857 union perf_event
*event
)
861 for (i
= 0; i
< MAP__NR_TYPES
; i
++) {
862 machine
->vmlinux_maps
[i
]->start
= event
->mmap
.start
;
863 machine
->vmlinux_maps
[i
]->end
= (event
->mmap
.start
+
866 * Be a bit paranoid here, some perf.data file came with
867 * a zero sized synthesized MMAP event for the kernel.
869 if (machine
->vmlinux_maps
[i
]->end
== 0)
870 machine
->vmlinux_maps
[i
]->end
= ~0ULL;
874 static bool machine__uses_kcore(struct machine
*machine
)
878 list_for_each_entry(dso
, &machine
->kernel_dsos
, node
) {
879 if (dso__is_kcore(dso
))
886 static int machine__process_kernel_mmap_event(struct machine
*machine
,
887 union perf_event
*event
)
890 char kmmap_prefix
[PATH_MAX
];
891 enum dso_kernel_type kernel_type
;
894 /* If we have maps from kcore then we do not need or want any others */
895 if (machine__uses_kcore(machine
))
898 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
899 if (machine__is_host(machine
))
900 kernel_type
= DSO_TYPE_KERNEL
;
902 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
904 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
906 strlen(kmmap_prefix
) - 1) == 0;
907 if (event
->mmap
.filename
[0] == '/' ||
908 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
910 char short_module_name
[1024];
913 if (event
->mmap
.filename
[0] == '/') {
914 name
= strrchr(event
->mmap
.filename
, '/');
919 dot
= strrchr(name
, '.');
922 snprintf(short_module_name
, sizeof(short_module_name
),
923 "[%.*s]", (int)(dot
- name
), name
);
924 strxfrchar(short_module_name
, '-', '_');
926 strcpy(short_module_name
, event
->mmap
.filename
);
928 map
= machine__new_module(machine
, event
->mmap
.start
,
929 event
->mmap
.filename
);
933 name
= strdup(short_module_name
);
937 dso__set_short_name(map
->dso
, name
, true);
938 map
->end
= map
->start
+ event
->mmap
.len
;
939 } else if (is_kernel_mmap
) {
940 const char *symbol_name
= (event
->mmap
.filename
+
941 strlen(kmmap_prefix
));
943 * Should be there already, from the build-id table in
946 struct dso
*kernel
= __dsos__findnew(&machine
->kernel_dsos
,
951 kernel
->kernel
= kernel_type
;
952 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
955 machine__set_kernel_mmap_len(machine
, event
);
958 * Avoid using a zero address (kptr_restrict) for the ref reloc
959 * symbol. Effectively having zero here means that at record
960 * time /proc/sys/kernel/kptr_restrict was non zero.
962 if (event
->mmap
.pgoff
!= 0) {
963 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
968 if (machine__is_default_guest(machine
)) {
970 * preload dso of guest kernel and modules
972 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
981 int machine__process_mmap2_event(struct machine
*machine
,
982 union perf_event
*event
,
983 struct perf_sample
*sample __maybe_unused
)
985 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
986 struct thread
*thread
;
992 perf_event__fprintf_mmap2(event
, stdout
);
994 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
995 cpumode
== PERF_RECORD_MISC_KERNEL
) {
996 ret
= machine__process_kernel_mmap_event(machine
, event
);
1002 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1007 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1008 type
= MAP__VARIABLE
;
1010 type
= MAP__FUNCTION
;
1012 map
= map__new(&machine
->user_dsos
, event
->mmap2
.start
,
1013 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1014 event
->mmap2
.pid
, event
->mmap2
.maj
,
1015 event
->mmap2
.min
, event
->mmap2
.ino
,
1016 event
->mmap2
.ino_generation
,
1017 event
->mmap2
.filename
, type
);
1022 thread__insert_map(thread
, map
);
1026 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1030 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1031 struct perf_sample
*sample __maybe_unused
)
1033 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1034 struct thread
*thread
;
1040 perf_event__fprintf_mmap(event
, stdout
);
1042 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1043 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1044 ret
= machine__process_kernel_mmap_event(machine
, event
);
1050 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1055 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1056 type
= MAP__VARIABLE
;
1058 type
= MAP__FUNCTION
;
1060 map
= map__new(&machine
->user_dsos
, event
->mmap
.start
,
1061 event
->mmap
.len
, event
->mmap
.pgoff
,
1062 event
->mmap
.pid
, 0, 0, 0, 0,
1063 event
->mmap
.filename
,
1069 thread__insert_map(thread
, map
);
1073 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1077 static void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1079 machine
->last_match
= NULL
;
1080 rb_erase(&th
->rb_node
, &machine
->threads
);
1082 * We may have references to this thread, for instance in some hist_entry
1083 * instances, so just move them to a separate list.
1085 list_add_tail(&th
->node
, &machine
->dead_threads
);
1088 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1089 struct perf_sample
*sample
)
1091 struct thread
*thread
= machine__find_thread(machine
, event
->fork
.tid
);
1092 struct thread
*parent
= machine__findnew_thread(machine
,
1096 /* if a thread currently exists for the thread id remove it */
1098 machine__remove_thread(machine
, thread
);
1100 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1103 perf_event__fprintf_task(event
, stdout
);
1105 if (thread
== NULL
|| parent
== NULL
||
1106 thread__fork(thread
, parent
, sample
->time
) < 0) {
1107 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1114 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1115 struct perf_sample
*sample __maybe_unused
)
1117 struct thread
*thread
= machine__find_thread(machine
, event
->fork
.tid
);
1120 perf_event__fprintf_task(event
, stdout
);
1123 thread__exited(thread
);
1128 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1129 struct perf_sample
*sample
)
1133 switch (event
->header
.type
) {
1134 case PERF_RECORD_COMM
:
1135 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1136 case PERF_RECORD_MMAP
:
1137 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1138 case PERF_RECORD_MMAP2
:
1139 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1140 case PERF_RECORD_FORK
:
1141 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1142 case PERF_RECORD_EXIT
:
1143 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1144 case PERF_RECORD_LOST
:
1145 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1154 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1156 if (sym
->name
&& !regexec(regex
, sym
->name
, 0, NULL
, 0))
1161 static const u8 cpumodes
[] = {
1162 PERF_RECORD_MISC_USER
,
1163 PERF_RECORD_MISC_KERNEL
,
1164 PERF_RECORD_MISC_GUEST_USER
,
1165 PERF_RECORD_MISC_GUEST_KERNEL
1167 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
1169 static void ip__resolve_ams(struct machine
*machine
, struct thread
*thread
,
1170 struct addr_map_symbol
*ams
,
1173 struct addr_location al
;
1177 memset(&al
, 0, sizeof(al
));
1179 for (i
= 0; i
< NCPUMODES
; i
++) {
1182 * We cannot use the header.misc hint to determine whether a
1183 * branch stack address is user, kernel, guest, hypervisor.
1184 * Branches may straddle the kernel/user/hypervisor boundaries.
1185 * Thus, we have to try consecutively until we find a match
1186 * or else, the symbol is unknown
1188 thread__find_addr_location(thread
, machine
, m
, MAP__FUNCTION
,
1195 ams
->al_addr
= al
.addr
;
1200 static void ip__resolve_data(struct machine
*machine
, struct thread
*thread
,
1201 u8 m
, struct addr_map_symbol
*ams
, u64 addr
)
1203 struct addr_location al
;
1205 memset(&al
, 0, sizeof(al
));
1207 thread__find_addr_location(thread
, machine
, m
, MAP__VARIABLE
, addr
,
1210 ams
->al_addr
= al
.addr
;
1215 struct mem_info
*machine__resolve_mem(struct machine
*machine
,
1217 struct perf_sample
*sample
,
1220 struct mem_info
*mi
= zalloc(sizeof(*mi
));
1225 ip__resolve_ams(machine
, thr
, &mi
->iaddr
, sample
->ip
);
1226 ip__resolve_data(machine
, thr
, cpumode
, &mi
->daddr
, sample
->addr
);
1227 mi
->data_src
.val
= sample
->data_src
;
1232 struct branch_info
*machine__resolve_bstack(struct machine
*machine
,
1234 struct branch_stack
*bs
)
1236 struct branch_info
*bi
;
1239 bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
1243 for (i
= 0; i
< bs
->nr
; i
++) {
1244 ip__resolve_ams(machine
, thr
, &bi
[i
].to
, bs
->entries
[i
].to
);
1245 ip__resolve_ams(machine
, thr
, &bi
[i
].from
, bs
->entries
[i
].from
);
1246 bi
[i
].flags
= bs
->entries
[i
].flags
;
1251 static int machine__resolve_callchain_sample(struct machine
*machine
,
1252 struct thread
*thread
,
1253 struct ip_callchain
*chain
,
1254 struct symbol
**parent
,
1255 struct addr_location
*root_al
,
1258 u8 cpumode
= PERF_RECORD_MISC_USER
;
1259 int chain_nr
= min(max_stack
, (int)chain
->nr
);
1263 callchain_cursor_reset(&callchain_cursor
);
1265 if (chain
->nr
> PERF_MAX_STACK_DEPTH
) {
1266 pr_warning("corrupted callchain. skipping...\n");
1270 for (i
= 0; i
< chain_nr
; i
++) {
1272 struct addr_location al
;
1274 if (callchain_param
.order
== ORDER_CALLEE
)
1277 ip
= chain
->ips
[chain
->nr
- i
- 1];
1279 if (ip
>= PERF_CONTEXT_MAX
) {
1281 case PERF_CONTEXT_HV
:
1282 cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
1284 case PERF_CONTEXT_KERNEL
:
1285 cpumode
= PERF_RECORD_MISC_KERNEL
;
1287 case PERF_CONTEXT_USER
:
1288 cpumode
= PERF_RECORD_MISC_USER
;
1291 pr_debug("invalid callchain context: "
1292 "%"PRId64
"\n", (s64
) ip
);
1294 * It seems the callchain is corrupted.
1297 callchain_cursor_reset(&callchain_cursor
);
1303 al
.filtered
= false;
1304 thread__find_addr_location(thread
, machine
, cpumode
,
1305 MAP__FUNCTION
, ip
, &al
);
1306 if (al
.sym
!= NULL
) {
1307 if (sort__has_parent
&& !*parent
&&
1308 symbol__match_regex(al
.sym
, &parent_regex
))
1310 else if (have_ignore_callees
&& root_al
&&
1311 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
1312 /* Treat this symbol as the root,
1313 forgetting its callees. */
1315 callchain_cursor_reset(&callchain_cursor
);
1319 err
= callchain_cursor_append(&callchain_cursor
,
1320 ip
, al
.map
, al
.sym
);
1328 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
1330 struct callchain_cursor
*cursor
= arg
;
1331 return callchain_cursor_append(cursor
, entry
->ip
,
1332 entry
->map
, entry
->sym
);
1335 int machine__resolve_callchain(struct machine
*machine
,
1336 struct perf_evsel
*evsel
,
1337 struct thread
*thread
,
1338 struct perf_sample
*sample
,
1339 struct symbol
**parent
,
1340 struct addr_location
*root_al
,
1345 ret
= machine__resolve_callchain_sample(machine
, thread
,
1346 sample
->callchain
, parent
,
1347 root_al
, max_stack
);
1351 /* Can we do dwarf post unwind? */
1352 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
1353 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
1356 /* Bail out if nothing was captured. */
1357 if ((!sample
->user_regs
.regs
) ||
1358 (!sample
->user_stack
.size
))
1361 return unwind__get_entries(unwind_entry
, &callchain_cursor
, machine
,
1362 thread
, evsel
->attr
.sample_regs_user
,
1367 int machine__for_each_thread(struct machine
*machine
,
1368 int (*fn
)(struct thread
*thread
, void *p
),
1372 struct thread
*thread
;
1375 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
1376 thread
= rb_entry(nd
, struct thread
, rb_node
);
1377 rc
= fn(thread
, priv
);
1382 list_for_each_entry(thread
, &machine
->dead_threads
, node
) {
1383 rc
= fn(thread
, priv
);
1390 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
1391 struct target
*target
, struct thread_map
*threads
,
1392 perf_event__handler_t process
, bool data_mmap
)
1394 if (target__has_task(target
))
1395 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
);
1396 else if (target__has_cpu(target
))
1397 return perf_event__synthesize_threads(tool
, process
, machine
, data_mmap
);
1398 /* command specified */