14 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
16 map_groups__init(&machine
->kmaps
);
17 RB_CLEAR_NODE(&machine
->rb_node
);
18 INIT_LIST_HEAD(&machine
->user_dsos
);
19 INIT_LIST_HEAD(&machine
->kernel_dsos
);
21 machine
->threads
= RB_ROOT
;
22 INIT_LIST_HEAD(&machine
->dead_threads
);
23 machine
->last_match
= NULL
;
25 machine
->kmaps
.machine
= machine
;
28 machine
->symbol_filter
= NULL
;
30 machine
->root_dir
= strdup(root_dir
);
31 if (machine
->root_dir
== NULL
)
34 if (pid
!= HOST_KERNEL_ID
) {
35 struct thread
*thread
= machine__findnew_thread(machine
, 0,
42 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
43 thread__set_comm(thread
, comm
);
49 static void dsos__delete(struct list_head
*dsos
)
53 list_for_each_entry_safe(pos
, n
, dsos
, node
) {
59 void machine__delete_dead_threads(struct machine
*machine
)
63 list_for_each_entry_safe(t
, n
, &machine
->dead_threads
, node
) {
69 void machine__delete_threads(struct machine
*machine
)
71 struct rb_node
*nd
= rb_first(&machine
->threads
);
74 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
76 rb_erase(&t
->rb_node
, &machine
->threads
);
82 void machine__exit(struct machine
*machine
)
84 map_groups__exit(&machine
->kmaps
);
85 dsos__delete(&machine
->user_dsos
);
86 dsos__delete(&machine
->kernel_dsos
);
87 free(machine
->root_dir
);
88 machine
->root_dir
= NULL
;
91 void machine__delete(struct machine
*machine
)
93 machine__exit(machine
);
97 void machines__init(struct machines
*machines
)
99 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
100 machines
->guests
= RB_ROOT
;
101 machines
->symbol_filter
= NULL
;
104 void machines__exit(struct machines
*machines
)
106 machine__exit(&machines
->host
);
110 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
111 const char *root_dir
)
113 struct rb_node
**p
= &machines
->guests
.rb_node
;
114 struct rb_node
*parent
= NULL
;
115 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
120 if (machine__init(machine
, root_dir
, pid
) != 0) {
125 machine
->symbol_filter
= machines
->symbol_filter
;
129 pos
= rb_entry(parent
, struct machine
, rb_node
);
136 rb_link_node(&machine
->rb_node
, parent
, p
);
137 rb_insert_color(&machine
->rb_node
, &machines
->guests
);
142 void machines__set_symbol_filter(struct machines
*machines
,
143 symbol_filter_t symbol_filter
)
147 machines
->symbol_filter
= symbol_filter
;
148 machines
->host
.symbol_filter
= symbol_filter
;
150 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
151 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
153 machine
->symbol_filter
= symbol_filter
;
157 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
159 struct rb_node
**p
= &machines
->guests
.rb_node
;
160 struct rb_node
*parent
= NULL
;
161 struct machine
*machine
;
162 struct machine
*default_machine
= NULL
;
164 if (pid
== HOST_KERNEL_ID
)
165 return &machines
->host
;
169 machine
= rb_entry(parent
, struct machine
, rb_node
);
170 if (pid
< machine
->pid
)
172 else if (pid
> machine
->pid
)
177 default_machine
= machine
;
180 return default_machine
;
183 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
186 const char *root_dir
= "";
187 struct machine
*machine
= machines__find(machines
, pid
);
189 if (machine
&& (machine
->pid
== pid
))
192 if ((pid
!= HOST_KERNEL_ID
) &&
193 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
194 (symbol_conf
.guestmount
)) {
195 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
196 if (access(path
, R_OK
)) {
197 static struct strlist
*seen
;
200 seen
= strlist__new(true, NULL
);
202 if (!strlist__has_entry(seen
, path
)) {
203 pr_err("Can't access file %s\n", path
);
204 strlist__add(seen
, path
);
212 machine
= machines__add(machines
, pid
, root_dir
);
217 void machines__process_guests(struct machines
*machines
,
218 machine__process_t process
, void *data
)
222 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
223 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
228 char *machine__mmap_name(struct machine
*machine
, char *bf
, size_t size
)
230 if (machine__is_host(machine
))
231 snprintf(bf
, size
, "[%s]", "kernel.kallsyms");
232 else if (machine__is_default_guest(machine
))
233 snprintf(bf
, size
, "[%s]", "guest.kernel.kallsyms");
235 snprintf(bf
, size
, "[%s.%d]", "guest.kernel.kallsyms",
242 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
244 struct rb_node
*node
;
245 struct machine
*machine
;
247 machines
->host
.id_hdr_size
= id_hdr_size
;
249 for (node
= rb_first(&machines
->guests
); node
; node
= rb_next(node
)) {
250 machine
= rb_entry(node
, struct machine
, rb_node
);
251 machine
->id_hdr_size
= id_hdr_size
;
257 static struct thread
*__machine__findnew_thread(struct machine
*machine
,
258 pid_t pid
, pid_t tid
,
261 struct rb_node
**p
= &machine
->threads
.rb_node
;
262 struct rb_node
*parent
= NULL
;
266 * Front-end cache - TID lookups come in blocks,
267 * so most of the time we dont have to look up
270 if (machine
->last_match
&& machine
->last_match
->tid
== tid
) {
271 if (pid
&& pid
!= machine
->last_match
->pid_
)
272 machine
->last_match
->pid_
= pid
;
273 return machine
->last_match
;
278 th
= rb_entry(parent
, struct thread
, rb_node
);
280 if (th
->tid
== tid
) {
281 machine
->last_match
= th
;
282 if (pid
&& pid
!= th
->pid_
)
296 th
= thread__new(pid
, tid
);
298 rb_link_node(&th
->rb_node
, parent
, p
);
299 rb_insert_color(&th
->rb_node
, &machine
->threads
);
300 machine
->last_match
= th
;
306 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
309 return __machine__findnew_thread(machine
, pid
, tid
, true);
312 struct thread
*machine__find_thread(struct machine
*machine
, pid_t tid
)
314 return __machine__findnew_thread(machine
, 0, tid
, false);
317 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
)
319 struct thread
*thread
= machine__findnew_thread(machine
,
324 perf_event__fprintf_comm(event
, stdout
);
326 if (thread
== NULL
|| thread__set_comm(thread
, event
->comm
.comm
)) {
327 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
334 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
335 union perf_event
*event
)
337 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
338 event
->lost
.id
, event
->lost
.lost
);
342 struct map
*machine__new_module(struct machine
*machine
, u64 start
,
343 const char *filename
)
346 struct dso
*dso
= __dsos__findnew(&machine
->kernel_dsos
, filename
);
351 map
= map__new2(start
, dso
, MAP__FUNCTION
);
355 if (machine__is_host(machine
))
356 dso
->symtab_type
= DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
;
358 dso
->symtab_type
= DSO_BINARY_TYPE__GUEST_KMODULE
;
359 map_groups__insert(&machine
->kmaps
, map
);
363 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
366 size_t ret
= __dsos__fprintf(&machines
->host
.kernel_dsos
, fp
) +
367 __dsos__fprintf(&machines
->host
.user_dsos
, fp
);
369 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
370 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
371 ret
+= __dsos__fprintf(&pos
->kernel_dsos
, fp
);
372 ret
+= __dsos__fprintf(&pos
->user_dsos
, fp
);
378 size_t machine__fprintf_dsos_buildid(struct machine
*machine
, FILE *fp
,
379 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
381 return __dsos__fprintf_buildid(&machine
->kernel_dsos
, fp
, skip
, parm
) +
382 __dsos__fprintf_buildid(&machine
->user_dsos
, fp
, skip
, parm
);
385 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
386 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
389 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
391 for (nd
= rb_first(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
392 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
393 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
398 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
402 struct dso
*kdso
= machine
->vmlinux_maps
[MAP__FUNCTION
]->dso
;
404 if (kdso
->has_build_id
) {
405 char filename
[PATH_MAX
];
406 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
)))
407 printed
+= fprintf(fp
, "[0] %s\n", filename
);
410 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
411 printed
+= fprintf(fp
, "[%d] %s\n",
412 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
417 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
422 for (nd
= rb_first(&machine
->threads
); nd
; nd
= rb_next(nd
)) {
423 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
425 ret
+= thread__fprintf(pos
, fp
);
431 static struct dso
*machine__get_kernel(struct machine
*machine
)
433 const char *vmlinux_name
= NULL
;
436 if (machine__is_host(machine
)) {
437 vmlinux_name
= symbol_conf
.vmlinux_name
;
439 vmlinux_name
= "[kernel.kallsyms]";
441 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
447 if (machine__is_default_guest(machine
))
448 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
450 vmlinux_name
= machine__mmap_name(machine
, bf
,
453 kernel
= dso__kernel_findnew(machine
, vmlinux_name
,
455 DSO_TYPE_GUEST_KERNEL
);
458 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
459 dso__read_running_kernel_build_id(kernel
, machine
);
464 struct process_args
{
468 static int symbol__in_kernel(void *arg
, const char *name
,
469 char type __maybe_unused
, u64 start
)
471 struct process_args
*args
= arg
;
473 if (strchr(name
, '['))
480 /* Figure out the start address of kernel map from /proc/kallsyms */
481 static u64
machine__get_kernel_start_addr(struct machine
*machine
)
483 const char *filename
;
485 struct process_args args
;
487 if (machine__is_host(machine
)) {
488 filename
= "/proc/kallsyms";
490 if (machine__is_default_guest(machine
))
491 filename
= (char *)symbol_conf
.default_guest_kallsyms
;
493 sprintf(path
, "%s/proc/kallsyms", machine
->root_dir
);
498 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
501 if (kallsyms__parse(filename
, &args
, symbol__in_kernel
) <= 0)
507 int __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
510 u64 start
= machine__get_kernel_start_addr(machine
);
512 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
515 machine
->vmlinux_maps
[type
] = map__new2(start
, kernel
, type
);
516 if (machine
->vmlinux_maps
[type
] == NULL
)
519 machine
->vmlinux_maps
[type
]->map_ip
=
520 machine
->vmlinux_maps
[type
]->unmap_ip
=
522 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
523 kmap
->kmaps
= &machine
->kmaps
;
524 map_groups__insert(&machine
->kmaps
,
525 machine
->vmlinux_maps
[type
]);
531 void machine__destroy_kernel_maps(struct machine
*machine
)
535 for (type
= 0; type
< MAP__NR_TYPES
; ++type
) {
538 if (machine
->vmlinux_maps
[type
] == NULL
)
541 kmap
= map__kmap(machine
->vmlinux_maps
[type
]);
542 map_groups__remove(&machine
->kmaps
,
543 machine
->vmlinux_maps
[type
]);
544 if (kmap
->ref_reloc_sym
) {
546 * ref_reloc_sym is shared among all maps, so free just
549 if (type
== MAP__FUNCTION
) {
550 free((char *)kmap
->ref_reloc_sym
->name
);
551 kmap
->ref_reloc_sym
->name
= NULL
;
552 free(kmap
->ref_reloc_sym
);
554 kmap
->ref_reloc_sym
= NULL
;
557 map__delete(machine
->vmlinux_maps
[type
]);
558 machine
->vmlinux_maps
[type
] = NULL
;
562 int machines__create_guest_kernel_maps(struct machines
*machines
)
565 struct dirent
**namelist
= NULL
;
571 if (symbol_conf
.default_guest_vmlinux_name
||
572 symbol_conf
.default_guest_modules
||
573 symbol_conf
.default_guest_kallsyms
) {
574 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
577 if (symbol_conf
.guestmount
) {
578 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
581 for (i
= 0; i
< items
; i
++) {
582 if (!isdigit(namelist
[i
]->d_name
[0])) {
583 /* Filter out . and .. */
586 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
587 if ((*endp
!= '\0') ||
588 (endp
== namelist
[i
]->d_name
) ||
590 pr_debug("invalid directory (%s). Skipping.\n",
591 namelist
[i
]->d_name
);
594 sprintf(path
, "%s/%s/proc/kallsyms",
595 symbol_conf
.guestmount
,
596 namelist
[i
]->d_name
);
597 ret
= access(path
, R_OK
);
599 pr_debug("Can't access file %s\n", path
);
602 machines__create_kernel_maps(machines
, pid
);
611 void machines__destroy_kernel_maps(struct machines
*machines
)
613 struct rb_node
*next
= rb_first(&machines
->guests
);
615 machine__destroy_kernel_maps(&machines
->host
);
618 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
620 next
= rb_next(&pos
->rb_node
);
621 rb_erase(&pos
->rb_node
, &machines
->guests
);
622 machine__delete(pos
);
626 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
628 struct machine
*machine
= machines__findnew(machines
, pid
);
633 return machine__create_kernel_maps(machine
);
636 int machine__load_kallsyms(struct machine
*machine
, const char *filename
,
637 enum map_type type
, symbol_filter_t filter
)
639 struct map
*map
= machine
->vmlinux_maps
[type
];
640 int ret
= dso__load_kallsyms(map
->dso
, filename
, map
, filter
);
643 dso__set_loaded(map
->dso
, type
);
645 * Since /proc/kallsyms will have multiple sessions for the
646 * kernel, with modules between them, fixup the end of all
649 __map_groups__fixup_end(&machine
->kmaps
, type
);
655 int machine__load_vmlinux_path(struct machine
*machine
, enum map_type type
,
656 symbol_filter_t filter
)
658 struct map
*map
= machine
->vmlinux_maps
[type
];
659 int ret
= dso__load_vmlinux_path(map
->dso
, map
, filter
);
662 dso__set_loaded(map
->dso
, type
);
667 static void map_groups__fixup_end(struct map_groups
*mg
)
670 for (i
= 0; i
< MAP__NR_TYPES
; ++i
)
671 __map_groups__fixup_end(mg
, i
);
674 static char *get_kernel_version(const char *root_dir
)
676 char version
[PATH_MAX
];
679 const char *prefix
= "Linux version ";
681 sprintf(version
, "%s/proc/version", root_dir
);
682 file
= fopen(version
, "r");
687 tmp
= fgets(version
, sizeof(version
), file
);
690 name
= strstr(version
, prefix
);
693 name
+= strlen(prefix
);
694 tmp
= strchr(name
, ' ');
701 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
702 const char *dir_name
)
705 DIR *dir
= opendir(dir_name
);
709 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
713 while ((dent
= readdir(dir
)) != NULL
) {
717 /*sshfs might return bad dent->d_type, so we have to stat*/
718 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
722 if (S_ISDIR(st
.st_mode
)) {
723 if (!strcmp(dent
->d_name
, ".") ||
724 !strcmp(dent
->d_name
, ".."))
727 ret
= map_groups__set_modules_path_dir(mg
, path
);
731 char *dot
= strrchr(dent
->d_name
, '.'),
736 if (dot
== NULL
|| strcmp(dot
, ".ko"))
738 snprintf(dso_name
, sizeof(dso_name
), "[%.*s]",
739 (int)(dot
- dent
->d_name
), dent
->d_name
);
741 strxfrchar(dso_name
, '-', '_');
742 map
= map_groups__find_by_name(mg
, MAP__FUNCTION
,
747 long_name
= strdup(path
);
748 if (long_name
== NULL
) {
752 dso__set_long_name(map
->dso
, long_name
);
753 map
->dso
->lname_alloc
= 1;
754 dso__kernel_module_get_build_id(map
->dso
, "");
763 static int machine__set_modules_path(struct machine
*machine
)
766 char modules_path
[PATH_MAX
];
768 version
= get_kernel_version(machine
->root_dir
);
772 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s/kernel",
773 machine
->root_dir
, version
);
776 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
);
779 static int machine__create_modules(struct machine
*machine
)
788 if (machine__is_default_guest(machine
))
789 modules
= symbol_conf
.default_guest_modules
;
791 sprintf(path
, "%s/proc/modules", machine
->root_dir
);
795 if (symbol__restricted_filename(modules
, "/proc/modules"))
798 file
= fopen(modules
, "r");
802 while (!feof(file
)) {
808 line_len
= getline(&line
, &n
, file
);
815 line
[--line_len
] = '\0'; /* \n */
817 sep
= strrchr(line
, 'x');
821 hex2u64(sep
+ 1, &start
);
823 sep
= strchr(line
, ' ');
829 snprintf(name
, sizeof(name
), "[%s]", line
);
830 map
= machine__new_module(machine
, start
, name
);
832 goto out_delete_line
;
833 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
839 if (machine__set_modules_path(machine
) < 0) {
840 pr_debug("Problems setting modules path maps, continuing anyway...\n");
850 int machine__create_kernel_maps(struct machine
*machine
)
852 struct dso
*kernel
= machine__get_kernel(machine
);
854 if (kernel
== NULL
||
855 __machine__create_kernel_maps(machine
, kernel
) < 0)
858 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
859 if (machine__is_host(machine
))
860 pr_debug("Problems creating module maps, "
861 "continuing anyway...\n");
863 pr_debug("Problems creating module maps for guest %d, "
864 "continuing anyway...\n", machine
->pid
);
868 * Now that we have all the maps created, just set the ->end of them:
870 map_groups__fixup_end(&machine
->kmaps
);
874 static void machine__set_kernel_mmap_len(struct machine
*machine
,
875 union perf_event
*event
)
879 for (i
= 0; i
< MAP__NR_TYPES
; i
++) {
880 machine
->vmlinux_maps
[i
]->start
= event
->mmap
.start
;
881 machine
->vmlinux_maps
[i
]->end
= (event
->mmap
.start
+
884 * Be a bit paranoid here, some perf.data file came with
885 * a zero sized synthesized MMAP event for the kernel.
887 if (machine
->vmlinux_maps
[i
]->end
== 0)
888 machine
->vmlinux_maps
[i
]->end
= ~0ULL;
892 static bool machine__uses_kcore(struct machine
*machine
)
896 list_for_each_entry(dso
, &machine
->kernel_dsos
, node
) {
897 if (dso__is_kcore(dso
))
904 static int machine__process_kernel_mmap_event(struct machine
*machine
,
905 union perf_event
*event
)
908 char kmmap_prefix
[PATH_MAX
];
909 enum dso_kernel_type kernel_type
;
912 /* If we have maps from kcore then we do not need or want any others */
913 if (machine__uses_kcore(machine
))
916 machine__mmap_name(machine
, kmmap_prefix
, sizeof(kmmap_prefix
));
917 if (machine__is_host(machine
))
918 kernel_type
= DSO_TYPE_KERNEL
;
920 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
922 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
924 strlen(kmmap_prefix
) - 1) == 0;
925 if (event
->mmap
.filename
[0] == '/' ||
926 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
928 char short_module_name
[1024];
931 if (event
->mmap
.filename
[0] == '/') {
932 name
= strrchr(event
->mmap
.filename
, '/');
937 dot
= strrchr(name
, '.');
940 snprintf(short_module_name
, sizeof(short_module_name
),
941 "[%.*s]", (int)(dot
- name
), name
);
942 strxfrchar(short_module_name
, '-', '_');
944 strcpy(short_module_name
, event
->mmap
.filename
);
946 map
= machine__new_module(machine
, event
->mmap
.start
,
947 event
->mmap
.filename
);
951 name
= strdup(short_module_name
);
955 map
->dso
->short_name
= name
;
956 map
->dso
->sname_alloc
= 1;
957 map
->end
= map
->start
+ event
->mmap
.len
;
958 } else if (is_kernel_mmap
) {
959 const char *symbol_name
= (event
->mmap
.filename
+
960 strlen(kmmap_prefix
));
962 * Should be there already, from the build-id table in
965 struct dso
*kernel
= __dsos__findnew(&machine
->kernel_dsos
,
970 kernel
->kernel
= kernel_type
;
971 if (__machine__create_kernel_maps(machine
, kernel
) < 0)
974 machine__set_kernel_mmap_len(machine
, event
);
977 * Avoid using a zero address (kptr_restrict) for the ref reloc
978 * symbol. Effectively having zero here means that at record
979 * time /proc/sys/kernel/kptr_restrict was non zero.
981 if (event
->mmap
.pgoff
!= 0) {
982 maps__set_kallsyms_ref_reloc_sym(machine
->vmlinux_maps
,
987 if (machine__is_default_guest(machine
)) {
989 * preload dso of guest kernel and modules
991 dso__load(kernel
, machine
->vmlinux_maps
[MAP__FUNCTION
],
1000 int machine__process_mmap2_event(struct machine
*machine
,
1001 union perf_event
*event
)
1003 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1004 struct thread
*thread
;
1010 perf_event__fprintf_mmap2(event
, stdout
);
1012 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1013 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1014 ret
= machine__process_kernel_mmap_event(machine
, event
);
1020 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1025 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1026 type
= MAP__VARIABLE
;
1028 type
= MAP__FUNCTION
;
1030 map
= map__new(&machine
->user_dsos
, event
->mmap2
.start
,
1031 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1032 event
->mmap2
.pid
, event
->mmap2
.maj
,
1033 event
->mmap2
.min
, event
->mmap2
.ino
,
1034 event
->mmap2
.ino_generation
,
1035 event
->mmap2
.filename
, type
);
1040 thread__insert_map(thread
, map
);
1044 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1048 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
)
1050 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1051 struct thread
*thread
;
1057 perf_event__fprintf_mmap(event
, stdout
);
1059 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1060 cpumode
== PERF_RECORD_MISC_KERNEL
) {
1061 ret
= machine__process_kernel_mmap_event(machine
, event
);
1067 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1072 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
)
1073 type
= MAP__VARIABLE
;
1075 type
= MAP__FUNCTION
;
1077 map
= map__new(&machine
->user_dsos
, event
->mmap
.start
,
1078 event
->mmap
.len
, event
->mmap
.pgoff
,
1079 event
->mmap
.pid
, 0, 0, 0, 0,
1080 event
->mmap
.filename
,
1086 thread__insert_map(thread
, map
);
1090 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1094 static void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1096 machine
->last_match
= NULL
;
1097 rb_erase(&th
->rb_node
, &machine
->threads
);
1099 * We may have references to this thread, for instance in some hist_entry
1100 * instances, so just move them to a separate list.
1102 list_add_tail(&th
->node
, &machine
->dead_threads
);
1105 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
)
1107 struct thread
*thread
= machine__find_thread(machine
, event
->fork
.tid
);
1108 struct thread
*parent
= machine__findnew_thread(machine
,
1112 /* if a thread currently exists for the thread id remove it */
1114 machine__remove_thread(machine
, thread
);
1116 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1119 perf_event__fprintf_task(event
, stdout
);
1121 if (thread
== NULL
|| parent
== NULL
||
1122 thread__fork(thread
, parent
) < 0) {
1123 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1130 int machine__process_exit_event(struct machine
*machine __maybe_unused
,
1131 union perf_event
*event
)
1133 struct thread
*thread
= machine__find_thread(machine
, event
->fork
.tid
);
1136 perf_event__fprintf_task(event
, stdout
);
1139 thread__exited(thread
);
1144 int machine__process_event(struct machine
*machine
, union perf_event
*event
)
1148 switch (event
->header
.type
) {
1149 case PERF_RECORD_COMM
:
1150 ret
= machine__process_comm_event(machine
, event
); break;
1151 case PERF_RECORD_MMAP
:
1152 ret
= machine__process_mmap_event(machine
, event
); break;
1153 case PERF_RECORD_MMAP2
:
1154 ret
= machine__process_mmap2_event(machine
, event
); break;
1155 case PERF_RECORD_FORK
:
1156 ret
= machine__process_fork_event(machine
, event
); break;
1157 case PERF_RECORD_EXIT
:
1158 ret
= machine__process_exit_event(machine
, event
); break;
1159 case PERF_RECORD_LOST
:
1160 ret
= machine__process_lost_event(machine
, event
); break;
1169 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1171 if (sym
->name
&& !regexec(regex
, sym
->name
, 0, NULL
, 0))
1176 static const u8 cpumodes
[] = {
1177 PERF_RECORD_MISC_USER
,
1178 PERF_RECORD_MISC_KERNEL
,
1179 PERF_RECORD_MISC_GUEST_USER
,
1180 PERF_RECORD_MISC_GUEST_KERNEL
1182 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
1184 static void ip__resolve_ams(struct machine
*machine
, struct thread
*thread
,
1185 struct addr_map_symbol
*ams
,
1188 struct addr_location al
;
1192 memset(&al
, 0, sizeof(al
));
1194 for (i
= 0; i
< NCPUMODES
; i
++) {
1197 * We cannot use the header.misc hint to determine whether a
1198 * branch stack address is user, kernel, guest, hypervisor.
1199 * Branches may straddle the kernel/user/hypervisor boundaries.
1200 * Thus, we have to try consecutively until we find a match
1201 * or else, the symbol is unknown
1203 thread__find_addr_location(thread
, machine
, m
, MAP__FUNCTION
,
1210 ams
->al_addr
= al
.addr
;
1215 static void ip__resolve_data(struct machine
*machine
, struct thread
*thread
,
1216 u8 m
, struct addr_map_symbol
*ams
, u64 addr
)
1218 struct addr_location al
;
1220 memset(&al
, 0, sizeof(al
));
1222 thread__find_addr_location(thread
, machine
, m
, MAP__VARIABLE
, addr
,
1225 ams
->al_addr
= al
.addr
;
1230 struct mem_info
*machine__resolve_mem(struct machine
*machine
,
1232 struct perf_sample
*sample
,
1235 struct mem_info
*mi
= zalloc(sizeof(*mi
));
1240 ip__resolve_ams(machine
, thr
, &mi
->iaddr
, sample
->ip
);
1241 ip__resolve_data(machine
, thr
, cpumode
, &mi
->daddr
, sample
->addr
);
1242 mi
->data_src
.val
= sample
->data_src
;
1247 struct branch_info
*machine__resolve_bstack(struct machine
*machine
,
1249 struct branch_stack
*bs
)
1251 struct branch_info
*bi
;
1254 bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
1258 for (i
= 0; i
< bs
->nr
; i
++) {
1259 ip__resolve_ams(machine
, thr
, &bi
[i
].to
, bs
->entries
[i
].to
);
1260 ip__resolve_ams(machine
, thr
, &bi
[i
].from
, bs
->entries
[i
].from
);
1261 bi
[i
].flags
= bs
->entries
[i
].flags
;
1266 static int machine__resolve_callchain_sample(struct machine
*machine
,
1267 struct thread
*thread
,
1268 struct ip_callchain
*chain
,
1269 struct symbol
**parent
,
1270 struct addr_location
*root_al
)
1272 u8 cpumode
= PERF_RECORD_MISC_USER
;
1276 callchain_cursor_reset(&callchain_cursor
);
1278 if (chain
->nr
> PERF_MAX_STACK_DEPTH
) {
1279 pr_warning("corrupted callchain. skipping...\n");
1283 for (i
= 0; i
< chain
->nr
; i
++) {
1285 struct addr_location al
;
1287 if (callchain_param
.order
== ORDER_CALLEE
)
1290 ip
= chain
->ips
[chain
->nr
- i
- 1];
1292 if (ip
>= PERF_CONTEXT_MAX
) {
1294 case PERF_CONTEXT_HV
:
1295 cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
1297 case PERF_CONTEXT_KERNEL
:
1298 cpumode
= PERF_RECORD_MISC_KERNEL
;
1300 case PERF_CONTEXT_USER
:
1301 cpumode
= PERF_RECORD_MISC_USER
;
1304 pr_debug("invalid callchain context: "
1305 "%"PRId64
"\n", (s64
) ip
);
1307 * It seems the callchain is corrupted.
1310 callchain_cursor_reset(&callchain_cursor
);
1316 al
.filtered
= false;
1317 thread__find_addr_location(thread
, machine
, cpumode
,
1318 MAP__FUNCTION
, ip
, &al
);
1319 if (al
.sym
!= NULL
) {
1320 if (sort__has_parent
&& !*parent
&&
1321 symbol__match_regex(al
.sym
, &parent_regex
))
1323 else if (have_ignore_callees
&& root_al
&&
1324 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
1325 /* Treat this symbol as the root,
1326 forgetting its callees. */
1328 callchain_cursor_reset(&callchain_cursor
);
1330 if (!symbol_conf
.use_callchain
)
1334 err
= callchain_cursor_append(&callchain_cursor
,
1335 ip
, al
.map
, al
.sym
);
1343 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
1345 struct callchain_cursor
*cursor
= arg
;
1346 return callchain_cursor_append(cursor
, entry
->ip
,
1347 entry
->map
, entry
->sym
);
1350 int machine__resolve_callchain(struct machine
*machine
,
1351 struct perf_evsel
*evsel
,
1352 struct thread
*thread
,
1353 struct perf_sample
*sample
,
1354 struct symbol
**parent
,
1355 struct addr_location
*root_al
)
1359 ret
= machine__resolve_callchain_sample(machine
, thread
,
1360 sample
->callchain
, parent
, root_al
);
1364 /* Can we do dwarf post unwind? */
1365 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
1366 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
1369 /* Bail out if nothing was captured. */
1370 if ((!sample
->user_regs
.regs
) ||
1371 (!sample
->user_stack
.size
))
1374 return unwind__get_entries(unwind_entry
, &callchain_cursor
, machine
,
1375 thread
, evsel
->attr
.sample_regs_user
,