1 // SPDX-License-Identifier: GPL-2.0
16 #include "map_symbol.h"
18 #include "mem-events.h"
28 #include <sys/types.h>
32 #include "linux/hash.h"
34 #include "bpf-event.h"
35 #include <internal/lib.h> // page_size
38 #include <linux/ctype.h>
39 #include <symbol/kallsyms.h>
40 #include <linux/mman.h>
41 #include <linux/string.h>
42 #include <linux/zalloc.h>
44 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
);
46 static struct dso
*machine__kernel_dso(struct machine
*machine
)
48 return machine
->vmlinux_map
->dso
;
51 static void dsos__init(struct dsos
*dsos
)
53 INIT_LIST_HEAD(&dsos
->head
);
55 init_rwsem(&dsos
->lock
);
58 static void machine__threads_init(struct machine
*machine
)
62 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
63 struct threads
*threads
= &machine
->threads
[i
];
64 threads
->entries
= RB_ROOT_CACHED
;
65 init_rwsem(&threads
->lock
);
67 INIT_LIST_HEAD(&threads
->dead
);
68 threads
->last_match
= NULL
;
72 static int machine__set_mmap_name(struct machine
*machine
)
74 if (machine__is_host(machine
))
75 machine
->mmap_name
= strdup("[kernel.kallsyms]");
76 else if (machine__is_default_guest(machine
))
77 machine
->mmap_name
= strdup("[guest.kernel.kallsyms]");
78 else if (asprintf(&machine
->mmap_name
, "[guest.kernel.kallsyms.%d]",
80 machine
->mmap_name
= NULL
;
82 return machine
->mmap_name
? 0 : -ENOMEM
;
85 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
89 memset(machine
, 0, sizeof(*machine
));
90 maps__init(&machine
->kmaps
, machine
);
91 RB_CLEAR_NODE(&machine
->rb_node
);
92 dsos__init(&machine
->dsos
);
94 machine__threads_init(machine
);
96 machine
->vdso_info
= NULL
;
101 machine
->id_hdr_size
= 0;
102 machine
->kptr_restrict_warned
= false;
103 machine
->comm_exec
= false;
104 machine
->kernel_start
= 0;
105 machine
->vmlinux_map
= NULL
;
107 machine
->root_dir
= strdup(root_dir
);
108 if (machine
->root_dir
== NULL
)
111 if (machine__set_mmap_name(machine
))
114 if (pid
!= HOST_KERNEL_ID
) {
115 struct thread
*thread
= machine__findnew_thread(machine
, -1,
122 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
123 thread__set_comm(thread
, comm
, 0);
127 machine
->current_tid
= NULL
;
132 zfree(&machine
->root_dir
);
133 zfree(&machine
->mmap_name
);
138 struct machine
*machine__new_host(void)
140 struct machine
*machine
= malloc(sizeof(*machine
));
142 if (machine
!= NULL
) {
143 machine__init(machine
, "", HOST_KERNEL_ID
);
145 if (machine__create_kernel_maps(machine
) < 0)
155 struct machine
*machine__new_kallsyms(void)
157 struct machine
*machine
= machine__new_host();
160 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
161 * ask for not using the kcore parsing code, once this one is fixed
162 * to create a map per module.
164 if (machine
&& machine__load_kallsyms(machine
, "/proc/kallsyms") <= 0) {
165 machine__delete(machine
);
172 static void dsos__purge(struct dsos
*dsos
)
176 down_write(&dsos
->lock
);
178 list_for_each_entry_safe(pos
, n
, &dsos
->head
, node
) {
179 RB_CLEAR_NODE(&pos
->rb_node
);
181 list_del_init(&pos
->node
);
185 up_write(&dsos
->lock
);
188 static void dsos__exit(struct dsos
*dsos
)
191 exit_rwsem(&dsos
->lock
);
194 void machine__delete_threads(struct machine
*machine
)
199 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
200 struct threads
*threads
= &machine
->threads
[i
];
201 down_write(&threads
->lock
);
202 nd
= rb_first_cached(&threads
->entries
);
204 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
207 __machine__remove_thread(machine
, t
, false);
209 up_write(&threads
->lock
);
213 void machine__exit(struct machine
*machine
)
220 machine__destroy_kernel_maps(machine
);
221 maps__exit(&machine
->kmaps
);
222 dsos__exit(&machine
->dsos
);
223 machine__exit_vdso(machine
);
224 zfree(&machine
->root_dir
);
225 zfree(&machine
->mmap_name
);
226 zfree(&machine
->current_tid
);
228 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
229 struct threads
*threads
= &machine
->threads
[i
];
230 struct thread
*thread
, *n
;
232 * Forget about the dead, at this point whatever threads were
233 * left in the dead lists better have a reference count taken
234 * by who is using them, and then, when they drop those references
235 * and it finally hits zero, thread__put() will check and see that
236 * its not in the dead threads list and will not try to remove it
237 * from there, just calling thread__delete() straight away.
239 list_for_each_entry_safe(thread
, n
, &threads
->dead
, node
)
240 list_del_init(&thread
->node
);
242 exit_rwsem(&threads
->lock
);
246 void machine__delete(struct machine
*machine
)
249 machine__exit(machine
);
254 void machines__init(struct machines
*machines
)
256 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
257 machines
->guests
= RB_ROOT_CACHED
;
260 void machines__exit(struct machines
*machines
)
262 machine__exit(&machines
->host
);
266 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
267 const char *root_dir
)
269 struct rb_node
**p
= &machines
->guests
.rb_root
.rb_node
;
270 struct rb_node
*parent
= NULL
;
271 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
272 bool leftmost
= true;
277 if (machine__init(machine
, root_dir
, pid
) != 0) {
284 pos
= rb_entry(parent
, struct machine
, rb_node
);
293 rb_link_node(&machine
->rb_node
, parent
, p
);
294 rb_insert_color_cached(&machine
->rb_node
, &machines
->guests
, leftmost
);
299 void machines__set_comm_exec(struct machines
*machines
, bool comm_exec
)
303 machines
->host
.comm_exec
= comm_exec
;
305 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
306 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
308 machine
->comm_exec
= comm_exec
;
312 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
314 struct rb_node
**p
= &machines
->guests
.rb_root
.rb_node
;
315 struct rb_node
*parent
= NULL
;
316 struct machine
*machine
;
317 struct machine
*default_machine
= NULL
;
319 if (pid
== HOST_KERNEL_ID
)
320 return &machines
->host
;
324 machine
= rb_entry(parent
, struct machine
, rb_node
);
325 if (pid
< machine
->pid
)
327 else if (pid
> machine
->pid
)
332 default_machine
= machine
;
335 return default_machine
;
338 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
341 const char *root_dir
= "";
342 struct machine
*machine
= machines__find(machines
, pid
);
344 if (machine
&& (machine
->pid
== pid
))
347 if ((pid
!= HOST_KERNEL_ID
) &&
348 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
349 (symbol_conf
.guestmount
)) {
350 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
351 if (access(path
, R_OK
)) {
352 static struct strlist
*seen
;
355 seen
= strlist__new(NULL
, NULL
);
357 if (!strlist__has_entry(seen
, path
)) {
358 pr_err("Can't access file %s\n", path
);
359 strlist__add(seen
, path
);
367 machine
= machines__add(machines
, pid
, root_dir
);
372 void machines__process_guests(struct machines
*machines
,
373 machine__process_t process
, void *data
)
377 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
378 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
383 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
385 struct rb_node
*node
;
386 struct machine
*machine
;
388 machines
->host
.id_hdr_size
= id_hdr_size
;
390 for (node
= rb_first_cached(&machines
->guests
); node
;
391 node
= rb_next(node
)) {
392 machine
= rb_entry(node
, struct machine
, rb_node
);
393 machine
->id_hdr_size
= id_hdr_size
;
399 static void machine__update_thread_pid(struct machine
*machine
,
400 struct thread
*th
, pid_t pid
)
402 struct thread
*leader
;
404 if (pid
== th
->pid_
|| pid
== -1 || th
->pid_
!= -1)
409 if (th
->pid_
== th
->tid
)
412 leader
= __machine__findnew_thread(machine
, th
->pid_
, th
->pid_
);
417 leader
->maps
= maps__new(machine
);
422 if (th
->maps
== leader
->maps
)
427 * Maps are created from MMAP events which provide the pid and
428 * tid. Consequently there never should be any maps on a thread
429 * with an unknown pid. Just print an error if there are.
431 if (!maps__empty(th
->maps
))
432 pr_err("Discarding thread maps for %d:%d\n",
437 th
->maps
= maps__get(leader
->maps
);
442 pr_err("Failed to join map groups for %d:%d\n", th
->pid_
, th
->tid
);
447 * Front-end cache - TID lookups come in blocks,
448 * so most of the time we dont have to look up
451 static struct thread
*
452 __threads__get_last_match(struct threads
*threads
, struct machine
*machine
,
457 th
= threads
->last_match
;
459 if (th
->tid
== tid
) {
460 machine__update_thread_pid(machine
, th
, pid
);
461 return thread__get(th
);
464 threads
->last_match
= NULL
;
470 static struct thread
*
471 threads__get_last_match(struct threads
*threads
, struct machine
*machine
,
474 struct thread
*th
= NULL
;
476 if (perf_singlethreaded
)
477 th
= __threads__get_last_match(threads
, machine
, pid
, tid
);
483 __threads__set_last_match(struct threads
*threads
, struct thread
*th
)
485 threads
->last_match
= th
;
489 threads__set_last_match(struct threads
*threads
, struct thread
*th
)
491 if (perf_singlethreaded
)
492 __threads__set_last_match(threads
, th
);
496 * Caller must eventually drop thread->refcnt returned with a successful
497 * lookup/new thread inserted.
499 static struct thread
*____machine__findnew_thread(struct machine
*machine
,
500 struct threads
*threads
,
501 pid_t pid
, pid_t tid
,
504 struct rb_node
**p
= &threads
->entries
.rb_root
.rb_node
;
505 struct rb_node
*parent
= NULL
;
507 bool leftmost
= true;
509 th
= threads__get_last_match(threads
, machine
, pid
, tid
);
515 th
= rb_entry(parent
, struct thread
, rb_node
);
517 if (th
->tid
== tid
) {
518 threads__set_last_match(threads
, th
);
519 machine__update_thread_pid(machine
, th
, pid
);
520 return thread__get(th
);
534 th
= thread__new(pid
, tid
);
536 rb_link_node(&th
->rb_node
, parent
, p
);
537 rb_insert_color_cached(&th
->rb_node
, &threads
->entries
, leftmost
);
540 * We have to initialize maps separately after rb tree is updated.
542 * The reason is that we call machine__findnew_thread
543 * within thread__init_maps to find the thread
544 * leader and that would screwed the rb tree.
546 if (thread__init_maps(th
, machine
)) {
547 rb_erase_cached(&th
->rb_node
, &threads
->entries
);
548 RB_CLEAR_NODE(&th
->rb_node
);
553 * It is now in the rbtree, get a ref
556 threads__set_last_match(threads
, th
);
563 struct thread
*__machine__findnew_thread(struct machine
*machine
, pid_t pid
, pid_t tid
)
565 return ____machine__findnew_thread(machine
, machine__threads(machine
, tid
), pid
, tid
, true);
568 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
571 struct threads
*threads
= machine__threads(machine
, tid
);
574 down_write(&threads
->lock
);
575 th
= __machine__findnew_thread(machine
, pid
, tid
);
576 up_write(&threads
->lock
);
580 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
583 struct threads
*threads
= machine__threads(machine
, tid
);
586 down_read(&threads
->lock
);
587 th
= ____machine__findnew_thread(machine
, threads
, pid
, tid
, false);
588 up_read(&threads
->lock
);
592 struct comm
*machine__thread_exec_comm(struct machine
*machine
,
593 struct thread
*thread
)
595 if (machine
->comm_exec
)
596 return thread__exec_comm(thread
);
598 return thread__comm(thread
);
601 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
602 struct perf_sample
*sample
)
604 struct thread
*thread
= machine__findnew_thread(machine
,
607 bool exec
= event
->header
.misc
& PERF_RECORD_MISC_COMM_EXEC
;
611 machine
->comm_exec
= true;
614 perf_event__fprintf_comm(event
, stdout
);
616 if (thread
== NULL
||
617 __thread__set_comm(thread
, event
->comm
.comm
, sample
->time
, exec
)) {
618 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
627 int machine__process_namespaces_event(struct machine
*machine __maybe_unused
,
628 union perf_event
*event
,
629 struct perf_sample
*sample __maybe_unused
)
631 struct thread
*thread
= machine__findnew_thread(machine
,
632 event
->namespaces
.pid
,
633 event
->namespaces
.tid
);
636 WARN_ONCE(event
->namespaces
.nr_namespaces
> NR_NAMESPACES
,
637 "\nWARNING: kernel seems to support more namespaces than perf"
638 " tool.\nTry updating the perf tool..\n\n");
640 WARN_ONCE(event
->namespaces
.nr_namespaces
< NR_NAMESPACES
,
641 "\nWARNING: perf tool seems to support more namespaces than"
642 " the kernel.\nTry updating the kernel..\n\n");
645 perf_event__fprintf_namespaces(event
, stdout
);
647 if (thread
== NULL
||
648 thread__set_namespaces(thread
, sample
->time
, &event
->namespaces
)) {
649 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
658 int machine__process_cgroup_event(struct machine
*machine
,
659 union perf_event
*event
,
660 struct perf_sample
*sample __maybe_unused
)
665 perf_event__fprintf_cgroup(event
, stdout
);
667 cgrp
= cgroup__findnew(machine
->env
, event
->cgroup
.id
, event
->cgroup
.path
);
674 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
675 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
677 dump_printf(": id:%" PRI_lu64
": lost:%" PRI_lu64
"\n",
678 event
->lost
.id
, event
->lost
.lost
);
682 int machine__process_lost_samples_event(struct machine
*machine __maybe_unused
,
683 union perf_event
*event
, struct perf_sample
*sample
)
685 dump_printf(": id:%" PRIu64
": lost samples :%" PRI_lu64
"\n",
686 sample
->id
, event
->lost_samples
.lost
);
690 static struct dso
*machine__findnew_module_dso(struct machine
*machine
,
692 const char *filename
)
696 down_write(&machine
->dsos
.lock
);
698 dso
= __dsos__find(&machine
->dsos
, m
->name
, true);
700 dso
= __dsos__addnew(&machine
->dsos
, m
->name
);
704 dso__set_module_info(dso
, m
, machine
);
705 dso__set_long_name(dso
, strdup(filename
), true);
706 dso
->kernel
= DSO_SPACE__KERNEL
;
711 up_write(&machine
->dsos
.lock
);
715 int machine__process_aux_event(struct machine
*machine __maybe_unused
,
716 union perf_event
*event
)
719 perf_event__fprintf_aux(event
, stdout
);
723 int machine__process_itrace_start_event(struct machine
*machine __maybe_unused
,
724 union perf_event
*event
)
727 perf_event__fprintf_itrace_start(event
, stdout
);
731 int machine__process_switch_event(struct machine
*machine __maybe_unused
,
732 union perf_event
*event
)
735 perf_event__fprintf_switch(event
, stdout
);
739 static int is_bpf_image(const char *name
)
741 return strncmp(name
, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
742 strncmp(name
, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
745 static int machine__process_ksymbol_register(struct machine
*machine
,
746 union perf_event
*event
,
747 struct perf_sample
*sample __maybe_unused
)
750 struct map
*map
= maps__find(&machine
->kmaps
, event
->ksymbol
.addr
);
753 struct dso
*dso
= dso__new(event
->ksymbol
.name
);
756 dso
->kernel
= DSO_SPACE__KERNEL
;
757 map
= map__new2(0, dso
);
765 if (event
->ksymbol
.ksym_type
== PERF_RECORD_KSYMBOL_TYPE_OOL
) {
766 map
->dso
->binary_type
= DSO_BINARY_TYPE__OOL
;
767 map
->dso
->data
.file_size
= event
->ksymbol
.len
;
768 dso__set_loaded(map
->dso
);
771 map
->start
= event
->ksymbol
.addr
;
772 map
->end
= map
->start
+ event
->ksymbol
.len
;
773 maps__insert(&machine
->kmaps
, map
);
774 dso__set_loaded(dso
);
776 if (is_bpf_image(event
->ksymbol
.name
)) {
777 dso
->binary_type
= DSO_BINARY_TYPE__BPF_IMAGE
;
778 dso__set_long_name(dso
, "", false);
782 sym
= symbol__new(map
->map_ip(map
, map
->start
),
784 0, 0, event
->ksymbol
.name
);
787 dso__insert_symbol(map
->dso
, sym
);
791 static int machine__process_ksymbol_unregister(struct machine
*machine
,
792 union perf_event
*event
,
793 struct perf_sample
*sample __maybe_unused
)
797 map
= maps__find(&machine
->kmaps
, event
->ksymbol
.addr
);
799 maps__remove(&machine
->kmaps
, map
);
804 int machine__process_ksymbol(struct machine
*machine __maybe_unused
,
805 union perf_event
*event
,
806 struct perf_sample
*sample
)
809 perf_event__fprintf_ksymbol(event
, stdout
);
811 if (event
->ksymbol
.flags
& PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER
)
812 return machine__process_ksymbol_unregister(machine
, event
,
814 return machine__process_ksymbol_register(machine
, event
, sample
);
817 int machine__process_text_poke(struct machine
*machine
, union perf_event
*event
,
818 struct perf_sample
*sample __maybe_unused
)
820 struct map
*map
= maps__find(&machine
->kmaps
, event
->text_poke
.addr
);
821 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
824 perf_event__fprintf_text_poke(event
, machine
, stdout
);
826 if (!event
->text_poke
.new_len
)
829 if (cpumode
!= PERF_RECORD_MISC_KERNEL
) {
830 pr_debug("%s: unsupported cpumode - ignoring\n", __func__
);
834 if (map
&& map
->dso
) {
835 u8
*new_bytes
= event
->text_poke
.bytes
+ event
->text_poke
.old_len
;
839 * Kernel maps might be changed when loading symbols so loading
840 * must be done prior to using kernel maps.
843 ret
= dso__data_write_cache_addr(map
->dso
, map
, machine
,
844 event
->text_poke
.addr
,
846 event
->text_poke
.new_len
);
847 if (ret
!= event
->text_poke
.new_len
)
848 pr_debug("Failed to write kernel text poke at %#" PRI_lx64
"\n",
849 event
->text_poke
.addr
);
851 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64
"\n",
852 event
->text_poke
.addr
);
858 static struct map
*machine__addnew_module_map(struct machine
*machine
, u64 start
,
859 const char *filename
)
861 struct map
*map
= NULL
;
865 if (kmod_path__parse_name(&m
, filename
))
868 dso
= machine__findnew_module_dso(machine
, &m
, filename
);
872 map
= map__new2(start
, dso
);
876 maps__insert(&machine
->kmaps
, map
);
878 /* Put the map here because maps__insert alread got it */
881 /* put the dso here, corresponding to machine__findnew_module_dso */
887 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
890 size_t ret
= __dsos__fprintf(&machines
->host
.dsos
.head
, fp
);
892 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
893 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
894 ret
+= __dsos__fprintf(&pos
->dsos
.head
, fp
);
900 size_t machine__fprintf_dsos_buildid(struct machine
*m
, FILE *fp
,
901 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
903 return __dsos__fprintf_buildid(&m
->dsos
.head
, fp
, skip
, parm
);
906 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
907 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
910 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
912 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
913 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
914 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
919 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
923 struct dso
*kdso
= machine__kernel_dso(machine
);
925 if (kdso
->has_build_id
) {
926 char filename
[PATH_MAX
];
927 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
),
929 printed
+= fprintf(fp
, "[0] %s\n", filename
);
932 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
933 printed
+= fprintf(fp
, "[%d] %s\n",
934 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
939 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
945 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
946 struct threads
*threads
= &machine
->threads
[i
];
948 down_read(&threads
->lock
);
950 ret
= fprintf(fp
, "Threads: %u\n", threads
->nr
);
952 for (nd
= rb_first_cached(&threads
->entries
); nd
;
954 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
956 ret
+= thread__fprintf(pos
, fp
);
959 up_read(&threads
->lock
);
964 static struct dso
*machine__get_kernel(struct machine
*machine
)
966 const char *vmlinux_name
= machine
->mmap_name
;
969 if (machine__is_host(machine
)) {
970 if (symbol_conf
.vmlinux_name
)
971 vmlinux_name
= symbol_conf
.vmlinux_name
;
973 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
974 "[kernel]", DSO_SPACE__KERNEL
);
976 if (symbol_conf
.default_guest_vmlinux_name
)
977 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
979 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
981 DSO_SPACE__KERNEL_GUEST
);
984 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
985 dso__read_running_kernel_build_id(kernel
, machine
);
990 struct process_args
{
994 void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
997 if (machine__is_default_guest(machine
))
998 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
1000 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
1003 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
1005 /* Figure out the start address of kernel map from /proc/kallsyms.
1006 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1007 * symbol_name if it's not that important.
1009 static int machine__get_running_kernel_start(struct machine
*machine
,
1010 const char **symbol_name
,
1011 u64
*start
, u64
*end
)
1013 char filename
[PATH_MAX
];
1018 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
1020 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
1023 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
1024 err
= kallsyms__get_function_start(filename
, name
, &addr
);
1033 *symbol_name
= name
;
1037 err
= kallsyms__get_function_start(filename
, "_etext", &addr
);
1044 int machine__create_extra_kernel_map(struct machine
*machine
,
1046 struct extra_kernel_map
*xm
)
1051 map
= map__new2(xm
->start
, kernel
);
1056 map
->pgoff
= xm
->pgoff
;
1058 kmap
= map__kmap(map
);
1060 strlcpy(kmap
->name
, xm
->name
, KMAP_NAME_LEN
);
1062 maps__insert(&machine
->kmaps
, map
);
1064 pr_debug2("Added extra kernel map %s %" PRIx64
"-%" PRIx64
"\n",
1065 kmap
->name
, map
->start
, map
->end
);
1072 static u64
find_entry_trampoline(struct dso
*dso
)
1074 /* Duplicates are removed so lookup all aliases */
1075 const char *syms
[] = {
1076 "_entry_trampoline",
1077 "__entry_trampoline_start",
1078 "entry_SYSCALL_64_trampoline",
1080 struct symbol
*sym
= dso__first_symbol(dso
);
1083 for (; sym
; sym
= dso__next_symbol(sym
)) {
1084 if (sym
->binding
!= STB_GLOBAL
)
1086 for (i
= 0; i
< ARRAY_SIZE(syms
); i
++) {
1087 if (!strcmp(sym
->name
, syms
[i
]))
1096 * These values can be used for kernels that do not have symbols for the entry
1097 * trampolines in kallsyms.
1099 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1100 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1101 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1103 /* Map x86_64 PTI entry trampolines */
1104 int machine__map_x86_64_entry_trampolines(struct machine
*machine
,
1107 struct maps
*kmaps
= &machine
->kmaps
;
1108 int nr_cpus_avail
, cpu
;
1114 * In the vmlinux case, pgoff is a virtual address which must now be
1115 * mapped to a vmlinux offset.
1117 maps__for_each_entry(kmaps
, map
) {
1118 struct kmap
*kmap
= __map__kmap(map
);
1119 struct map
*dest_map
;
1121 if (!kmap
|| !is_entry_trampoline(kmap
->name
))
1124 dest_map
= maps__find(kmaps
, map
->pgoff
);
1125 if (dest_map
!= map
)
1126 map
->pgoff
= dest_map
->map_ip(dest_map
, map
->pgoff
);
1129 if (found
|| machine
->trampolines_mapped
)
1132 pgoff
= find_entry_trampoline(kernel
);
1136 nr_cpus_avail
= machine__nr_cpus_avail(machine
);
1138 /* Add a 1 page map for each CPU's entry trampoline */
1139 for (cpu
= 0; cpu
< nr_cpus_avail
; cpu
++) {
1140 u64 va
= X86_64_CPU_ENTRY_AREA_PER_CPU
+
1141 cpu
* X86_64_CPU_ENTRY_AREA_SIZE
+
1142 X86_64_ENTRY_TRAMPOLINE
;
1143 struct extra_kernel_map xm
= {
1145 .end
= va
+ page_size
,
1149 strlcpy(xm
.name
, ENTRY_TRAMPOLINE_NAME
, KMAP_NAME_LEN
);
1151 if (machine__create_extra_kernel_map(machine
, kernel
, &xm
) < 0)
1155 machine
->trampolines_mapped
= nr_cpus_avail
;
1160 int __weak
machine__create_extra_kernel_maps(struct machine
*machine __maybe_unused
,
1161 struct dso
*kernel __maybe_unused
)
1167 __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
1169 /* In case of renewal the kernel map, destroy previous one */
1170 machine__destroy_kernel_maps(machine
);
1172 machine
->vmlinux_map
= map__new2(0, kernel
);
1173 if (machine
->vmlinux_map
== NULL
)
1176 machine
->vmlinux_map
->map_ip
= machine
->vmlinux_map
->unmap_ip
= identity__map_ip
;
1177 maps__insert(&machine
->kmaps
, machine
->vmlinux_map
);
1181 void machine__destroy_kernel_maps(struct machine
*machine
)
1184 struct map
*map
= machine__kernel_map(machine
);
1189 kmap
= map__kmap(map
);
1190 maps__remove(&machine
->kmaps
, map
);
1191 if (kmap
&& kmap
->ref_reloc_sym
) {
1192 zfree((char **)&kmap
->ref_reloc_sym
->name
);
1193 zfree(&kmap
->ref_reloc_sym
);
1196 map__zput(machine
->vmlinux_map
);
1199 int machines__create_guest_kernel_maps(struct machines
*machines
)
1202 struct dirent
**namelist
= NULL
;
1204 char path
[PATH_MAX
];
1208 if (symbol_conf
.default_guest_vmlinux_name
||
1209 symbol_conf
.default_guest_modules
||
1210 symbol_conf
.default_guest_kallsyms
) {
1211 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
1214 if (symbol_conf
.guestmount
) {
1215 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
1218 for (i
= 0; i
< items
; i
++) {
1219 if (!isdigit(namelist
[i
]->d_name
[0])) {
1220 /* Filter out . and .. */
1223 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
1224 if ((*endp
!= '\0') ||
1225 (endp
== namelist
[i
]->d_name
) ||
1226 (errno
== ERANGE
)) {
1227 pr_debug("invalid directory (%s). Skipping.\n",
1228 namelist
[i
]->d_name
);
1231 sprintf(path
, "%s/%s/proc/kallsyms",
1232 symbol_conf
.guestmount
,
1233 namelist
[i
]->d_name
);
1234 ret
= access(path
, R_OK
);
1236 pr_debug("Can't access file %s\n", path
);
1239 machines__create_kernel_maps(machines
, pid
);
1248 void machines__destroy_kernel_maps(struct machines
*machines
)
1250 struct rb_node
*next
= rb_first_cached(&machines
->guests
);
1252 machine__destroy_kernel_maps(&machines
->host
);
1255 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
1257 next
= rb_next(&pos
->rb_node
);
1258 rb_erase_cached(&pos
->rb_node
, &machines
->guests
);
1259 machine__delete(pos
);
1263 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
1265 struct machine
*machine
= machines__findnew(machines
, pid
);
1267 if (machine
== NULL
)
1270 return machine__create_kernel_maps(machine
);
1273 int machine__load_kallsyms(struct machine
*machine
, const char *filename
)
1275 struct map
*map
= machine__kernel_map(machine
);
1276 int ret
= __dso__load_kallsyms(map
->dso
, filename
, map
, true);
1279 dso__set_loaded(map
->dso
);
1281 * Since /proc/kallsyms will have multiple sessions for the
1282 * kernel, with modules between them, fixup the end of all
1285 maps__fixup_end(&machine
->kmaps
);
1291 int machine__load_vmlinux_path(struct machine
*machine
)
1293 struct map
*map
= machine__kernel_map(machine
);
1294 int ret
= dso__load_vmlinux_path(map
->dso
, map
);
1297 dso__set_loaded(map
->dso
);
1302 static char *get_kernel_version(const char *root_dir
)
1304 char version
[PATH_MAX
];
1307 const char *prefix
= "Linux version ";
1309 sprintf(version
, "%s/proc/version", root_dir
);
1310 file
= fopen(version
, "r");
1314 tmp
= fgets(version
, sizeof(version
), file
);
1319 name
= strstr(version
, prefix
);
1322 name
+= strlen(prefix
);
1323 tmp
= strchr(name
, ' ');
1327 return strdup(name
);
1330 static bool is_kmod_dso(struct dso
*dso
)
1332 return dso
->symtab_type
== DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
||
1333 dso
->symtab_type
== DSO_BINARY_TYPE__GUEST_KMODULE
;
1336 static int maps__set_module_path(struct maps
*maps
, const char *path
, struct kmod_path
*m
)
1339 struct map
*map
= maps__find_by_name(maps
, m
->name
);
1344 long_name
= strdup(path
);
1345 if (long_name
== NULL
)
1348 dso__set_long_name(map
->dso
, long_name
, true);
1349 dso__kernel_module_get_build_id(map
->dso
, "");
1352 * Full name could reveal us kmod compression, so
1353 * we need to update the symtab_type if needed.
1355 if (m
->comp
&& is_kmod_dso(map
->dso
)) {
1356 map
->dso
->symtab_type
++;
1357 map
->dso
->comp
= m
->comp
;
1363 static int maps__set_modules_path_dir(struct maps
*maps
, const char *dir_name
, int depth
)
1365 struct dirent
*dent
;
1366 DIR *dir
= opendir(dir_name
);
1370 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
1374 while ((dent
= readdir(dir
)) != NULL
) {
1375 char path
[PATH_MAX
];
1378 /*sshfs might return bad dent->d_type, so we have to stat*/
1379 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
1380 if (stat(path
, &st
))
1383 if (S_ISDIR(st
.st_mode
)) {
1384 if (!strcmp(dent
->d_name
, ".") ||
1385 !strcmp(dent
->d_name
, ".."))
1388 /* Do not follow top-level source and build symlinks */
1390 if (!strcmp(dent
->d_name
, "source") ||
1391 !strcmp(dent
->d_name
, "build"))
1395 ret
= maps__set_modules_path_dir(maps
, path
, depth
+ 1);
1401 ret
= kmod_path__parse_name(&m
, dent
->d_name
);
1406 ret
= maps__set_module_path(maps
, path
, &m
);
1420 static int machine__set_modules_path(struct machine
*machine
)
1423 char modules_path
[PATH_MAX
];
1425 version
= get_kernel_version(machine
->root_dir
);
1429 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
1430 machine
->root_dir
, version
);
1433 return maps__set_modules_path_dir(&machine
->kmaps
, modules_path
, 0);
1435 int __weak
arch__fix_module_text_start(u64
*start __maybe_unused
,
1436 u64
*size __maybe_unused
,
1437 const char *name __maybe_unused
)
1442 static int machine__create_module(void *arg
, const char *name
, u64 start
,
1445 struct machine
*machine
= arg
;
1448 if (arch__fix_module_text_start(&start
, &size
, name
) < 0)
1451 map
= machine__addnew_module_map(machine
, start
, name
);
1454 map
->end
= start
+ size
;
1456 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
1461 static int machine__create_modules(struct machine
*machine
)
1463 const char *modules
;
1464 char path
[PATH_MAX
];
1466 if (machine__is_default_guest(machine
)) {
1467 modules
= symbol_conf
.default_guest_modules
;
1469 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
1473 if (symbol__restricted_filename(modules
, "/proc/modules"))
1476 if (modules__parse(modules
, machine
, machine__create_module
))
1479 if (!machine__set_modules_path(machine
))
1482 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1487 static void machine__set_kernel_mmap(struct machine
*machine
,
1490 machine
->vmlinux_map
->start
= start
;
1491 machine
->vmlinux_map
->end
= end
;
1493 * Be a bit paranoid here, some perf.data file came with
1494 * a zero sized synthesized MMAP event for the kernel.
1496 if (start
== 0 && end
== 0)
1497 machine
->vmlinux_map
->end
= ~0ULL;
1500 static void machine__update_kernel_mmap(struct machine
*machine
,
1503 struct map
*map
= machine__kernel_map(machine
);
1506 maps__remove(&machine
->kmaps
, map
);
1508 machine__set_kernel_mmap(machine
, start
, end
);
1510 maps__insert(&machine
->kmaps
, map
);
1514 int machine__create_kernel_maps(struct machine
*machine
)
1516 struct dso
*kernel
= machine__get_kernel(machine
);
1517 const char *name
= NULL
;
1519 u64 start
= 0, end
= ~0ULL;
1525 ret
= __machine__create_kernel_maps(machine
, kernel
);
1529 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
1530 if (machine__is_host(machine
))
1531 pr_debug("Problems creating module maps, "
1532 "continuing anyway...\n");
1534 pr_debug("Problems creating module maps for guest %d, "
1535 "continuing anyway...\n", machine
->pid
);
1538 if (!machine__get_running_kernel_start(machine
, &name
, &start
, &end
)) {
1540 map__set_kallsyms_ref_reloc_sym(machine
->vmlinux_map
, name
, start
)) {
1541 machine__destroy_kernel_maps(machine
);
1547 * we have a real start address now, so re-order the kmaps
1548 * assume it's the last in the kmaps
1550 machine__update_kernel_mmap(machine
, start
, end
);
1553 if (machine__create_extra_kernel_maps(machine
, kernel
))
1554 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1557 /* update end address of the kernel map using adjacent module address */
1558 map
= map__next(machine__kernel_map(machine
));
1560 machine__set_kernel_mmap(machine
, start
, map
->start
);
1568 static bool machine__uses_kcore(struct machine
*machine
)
1572 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1573 if (dso__is_kcore(dso
))
1580 static bool perf_event__is_extra_kernel_mmap(struct machine
*machine
,
1581 union perf_event
*event
)
1583 return machine__is(machine
, "x86_64") &&
1584 is_entry_trampoline(event
->mmap
.filename
);
1587 static int machine__process_extra_kernel_map(struct machine
*machine
,
1588 union perf_event
*event
)
1590 struct dso
*kernel
= machine__kernel_dso(machine
);
1591 struct extra_kernel_map xm
= {
1592 .start
= event
->mmap
.start
,
1593 .end
= event
->mmap
.start
+ event
->mmap
.len
,
1594 .pgoff
= event
->mmap
.pgoff
,
1600 strlcpy(xm
.name
, event
->mmap
.filename
, KMAP_NAME_LEN
);
1602 return machine__create_extra_kernel_map(machine
, kernel
, &xm
);
1605 static int machine__process_kernel_mmap_event(struct machine
*machine
,
1606 union perf_event
*event
)
1609 enum dso_space_type dso_space
;
1610 bool is_kernel_mmap
;
1612 /* If we have maps from kcore then we do not need or want any others */
1613 if (machine__uses_kcore(machine
))
1616 if (machine__is_host(machine
))
1617 dso_space
= DSO_SPACE__KERNEL
;
1619 dso_space
= DSO_SPACE__KERNEL_GUEST
;
1621 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
1623 strlen(machine
->mmap_name
) - 1) == 0;
1624 if (event
->mmap
.filename
[0] == '/' ||
1625 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
1626 map
= machine__addnew_module_map(machine
, event
->mmap
.start
,
1627 event
->mmap
.filename
);
1631 map
->end
= map
->start
+ event
->mmap
.len
;
1632 } else if (is_kernel_mmap
) {
1633 const char *symbol_name
= (event
->mmap
.filename
+
1634 strlen(machine
->mmap_name
));
1636 * Should be there already, from the build-id table in
1639 struct dso
*kernel
= NULL
;
1642 down_read(&machine
->dsos
.lock
);
1644 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1647 * The cpumode passed to is_kernel_module is not the
1648 * cpumode of *this* event. If we insist on passing
1649 * correct cpumode to is_kernel_module, we should
1650 * record the cpumode when we adding this dso to the
1653 * However we don't really need passing correct
1654 * cpumode. We know the correct cpumode must be kernel
1655 * mode (if not, we should not link it onto kernel_dsos
1658 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1659 * is_kernel_module() treats it as a kernel cpumode.
1663 is_kernel_module(dso
->long_name
,
1664 PERF_RECORD_MISC_CPUMODE_UNKNOWN
))
1672 up_read(&machine
->dsos
.lock
);
1675 kernel
= machine__findnew_dso(machine
, machine
->mmap_name
);
1679 kernel
->kernel
= dso_space
;
1680 if (__machine__create_kernel_maps(machine
, kernel
) < 0) {
1685 if (strstr(kernel
->long_name
, "vmlinux"))
1686 dso__set_short_name(kernel
, "[kernel.vmlinux]", false);
1688 machine__update_kernel_mmap(machine
, event
->mmap
.start
,
1689 event
->mmap
.start
+ event
->mmap
.len
);
1692 * Avoid using a zero address (kptr_restrict) for the ref reloc
1693 * symbol. Effectively having zero here means that at record
1694 * time /proc/sys/kernel/kptr_restrict was non zero.
1696 if (event
->mmap
.pgoff
!= 0) {
1697 map__set_kallsyms_ref_reloc_sym(machine
->vmlinux_map
,
1702 if (machine__is_default_guest(machine
)) {
1704 * preload dso of guest kernel and modules
1706 dso__load(kernel
, machine__kernel_map(machine
));
1708 } else if (perf_event__is_extra_kernel_mmap(machine
, event
)) {
1709 return machine__process_extra_kernel_map(machine
, event
);
1716 int machine__process_mmap2_event(struct machine
*machine
,
1717 union perf_event
*event
,
1718 struct perf_sample
*sample
)
1720 struct thread
*thread
;
1722 struct dso_id dso_id
= {
1723 .maj
= event
->mmap2
.maj
,
1724 .min
= event
->mmap2
.min
,
1725 .ino
= event
->mmap2
.ino
,
1726 .ino_generation
= event
->mmap2
.ino_generation
,
1731 perf_event__fprintf_mmap2(event
, stdout
);
1733 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1734 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1735 ret
= machine__process_kernel_mmap_event(machine
, event
);
1741 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1746 map
= map__new(machine
, event
->mmap2
.start
,
1747 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1748 &dso_id
, event
->mmap2
.prot
,
1750 event
->mmap2
.filename
, thread
);
1753 goto out_problem_map
;
1755 ret
= thread__insert_map(thread
, map
);
1757 goto out_problem_insert
;
1759 thread__put(thread
);
1766 thread__put(thread
);
1768 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1772 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1773 struct perf_sample
*sample
)
1775 struct thread
*thread
;
1781 perf_event__fprintf_mmap(event
, stdout
);
1783 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1784 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1785 ret
= machine__process_kernel_mmap_event(machine
, event
);
1791 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1796 if (!(event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
))
1799 map
= map__new(machine
, event
->mmap
.start
,
1800 event
->mmap
.len
, event
->mmap
.pgoff
,
1801 NULL
, prot
, 0, event
->mmap
.filename
, thread
);
1804 goto out_problem_map
;
1806 ret
= thread__insert_map(thread
, map
);
1808 goto out_problem_insert
;
1810 thread__put(thread
);
1817 thread__put(thread
);
1819 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1823 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
)
1825 struct threads
*threads
= machine__threads(machine
, th
->tid
);
1827 if (threads
->last_match
== th
)
1828 threads__set_last_match(threads
, NULL
);
1831 down_write(&threads
->lock
);
1833 BUG_ON(refcount_read(&th
->refcnt
) == 0);
1835 rb_erase_cached(&th
->rb_node
, &threads
->entries
);
1836 RB_CLEAR_NODE(&th
->rb_node
);
1839 * Move it first to the dead_threads list, then drop the reference,
1840 * if this is the last reference, then the thread__delete destructor
1841 * will be called and we will remove it from the dead_threads list.
1843 list_add_tail(&th
->node
, &threads
->dead
);
1846 * We need to do the put here because if this is the last refcount,
1847 * then we will be touching the threads->dead head when removing the
1853 up_write(&threads
->lock
);
1856 void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1858 return __machine__remove_thread(machine
, th
, true);
1861 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1862 struct perf_sample
*sample
)
1864 struct thread
*thread
= machine__find_thread(machine
,
1867 struct thread
*parent
= machine__findnew_thread(machine
,
1870 bool do_maps_clone
= true;
1874 perf_event__fprintf_task(event
, stdout
);
1877 * There may be an existing thread that is not actually the parent,
1878 * either because we are processing events out of order, or because the
1879 * (fork) event that would have removed the thread was lost. Assume the
1880 * latter case and continue on as best we can.
1882 if (parent
->pid_
!= (pid_t
)event
->fork
.ppid
) {
1883 dump_printf("removing erroneous parent thread %d/%d\n",
1884 parent
->pid_
, parent
->tid
);
1885 machine__remove_thread(machine
, parent
);
1886 thread__put(parent
);
1887 parent
= machine__findnew_thread(machine
, event
->fork
.ppid
,
1891 /* if a thread currently exists for the thread id remove it */
1892 if (thread
!= NULL
) {
1893 machine__remove_thread(machine
, thread
);
1894 thread__put(thread
);
1897 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1900 * When synthesizing FORK events, we are trying to create thread
1901 * objects for the already running tasks on the machine.
1903 * Normally, for a kernel FORK event, we want to clone the parent's
1904 * maps because that is what the kernel just did.
1906 * But when synthesizing, this should not be done. If we do, we end up
1907 * with overlapping maps as we process the sythesized MMAP2 events that
1908 * get delivered shortly thereafter.
1910 * Use the FORK event misc flags in an internal way to signal this
1911 * situation, so we can elide the map clone when appropriate.
1913 if (event
->fork
.header
.misc
& PERF_RECORD_MISC_FORK_EXEC
)
1914 do_maps_clone
= false;
1916 if (thread
== NULL
|| parent
== NULL
||
1917 thread__fork(thread
, parent
, sample
->time
, do_maps_clone
) < 0) {
1918 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1921 thread__put(thread
);
1922 thread__put(parent
);
1927 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1928 struct perf_sample
*sample __maybe_unused
)
1930 struct thread
*thread
= machine__find_thread(machine
,
1935 perf_event__fprintf_task(event
, stdout
);
1937 if (thread
!= NULL
) {
1938 thread__exited(thread
);
1939 thread__put(thread
);
1945 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1946 struct perf_sample
*sample
)
1950 switch (event
->header
.type
) {
1951 case PERF_RECORD_COMM
:
1952 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1953 case PERF_RECORD_MMAP
:
1954 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1955 case PERF_RECORD_NAMESPACES
:
1956 ret
= machine__process_namespaces_event(machine
, event
, sample
); break;
1957 case PERF_RECORD_CGROUP
:
1958 ret
= machine__process_cgroup_event(machine
, event
, sample
); break;
1959 case PERF_RECORD_MMAP2
:
1960 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1961 case PERF_RECORD_FORK
:
1962 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1963 case PERF_RECORD_EXIT
:
1964 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1965 case PERF_RECORD_LOST
:
1966 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1967 case PERF_RECORD_AUX
:
1968 ret
= machine__process_aux_event(machine
, event
); break;
1969 case PERF_RECORD_ITRACE_START
:
1970 ret
= machine__process_itrace_start_event(machine
, event
); break;
1971 case PERF_RECORD_LOST_SAMPLES
:
1972 ret
= machine__process_lost_samples_event(machine
, event
, sample
); break;
1973 case PERF_RECORD_SWITCH
:
1974 case PERF_RECORD_SWITCH_CPU_WIDE
:
1975 ret
= machine__process_switch_event(machine
, event
); break;
1976 case PERF_RECORD_KSYMBOL
:
1977 ret
= machine__process_ksymbol(machine
, event
, sample
); break;
1978 case PERF_RECORD_BPF_EVENT
:
1979 ret
= machine__process_bpf(machine
, event
, sample
); break;
1980 case PERF_RECORD_TEXT_POKE
:
1981 ret
= machine__process_text_poke(machine
, event
, sample
); break;
1990 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1992 if (!regexec(regex
, sym
->name
, 0, NULL
, 0))
1997 static void ip__resolve_ams(struct thread
*thread
,
1998 struct addr_map_symbol
*ams
,
2001 struct addr_location al
;
2003 memset(&al
, 0, sizeof(al
));
2005 * We cannot use the header.misc hint to determine whether a
2006 * branch stack address is user, kernel, guest, hypervisor.
2007 * Branches may straddle the kernel/user/hypervisor boundaries.
2008 * Thus, we have to try consecutively until we find a match
2009 * or else, the symbol is unknown
2011 thread__find_cpumode_addr_location(thread
, ip
, &al
);
2014 ams
->al_addr
= al
.addr
;
2015 ams
->ms
.maps
= al
.maps
;
2016 ams
->ms
.sym
= al
.sym
;
2017 ams
->ms
.map
= al
.map
;
2021 static void ip__resolve_data(struct thread
*thread
,
2022 u8 m
, struct addr_map_symbol
*ams
,
2023 u64 addr
, u64 phys_addr
)
2025 struct addr_location al
;
2027 memset(&al
, 0, sizeof(al
));
2029 thread__find_symbol(thread
, m
, addr
, &al
);
2032 ams
->al_addr
= al
.addr
;
2033 ams
->ms
.maps
= al
.maps
;
2034 ams
->ms
.sym
= al
.sym
;
2035 ams
->ms
.map
= al
.map
;
2036 ams
->phys_addr
= phys_addr
;
2039 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
2040 struct addr_location
*al
)
2042 struct mem_info
*mi
= mem_info__new();
2047 ip__resolve_ams(al
->thread
, &mi
->iaddr
, sample
->ip
);
2048 ip__resolve_data(al
->thread
, al
->cpumode
, &mi
->daddr
,
2049 sample
->addr
, sample
->phys_addr
);
2050 mi
->data_src
.val
= sample
->data_src
;
2055 static char *callchain_srcline(struct map_symbol
*ms
, u64 ip
)
2057 struct map
*map
= ms
->map
;
2058 char *srcline
= NULL
;
2060 if (!map
|| callchain_param
.key
== CCKEY_FUNCTION
)
2063 srcline
= srcline__tree_find(&map
->dso
->srclines
, ip
);
2065 bool show_sym
= false;
2066 bool show_addr
= callchain_param
.key
== CCKEY_ADDRESS
;
2068 srcline
= get_srcline(map
->dso
, map__rip_2objdump(map
, ip
),
2069 ms
->sym
, show_sym
, show_addr
, ip
);
2070 srcline__tree_insert(&map
->dso
->srclines
, ip
, srcline
);
2081 static int add_callchain_ip(struct thread
*thread
,
2082 struct callchain_cursor
*cursor
,
2083 struct symbol
**parent
,
2084 struct addr_location
*root_al
,
2088 struct branch_flags
*flags
,
2089 struct iterations
*iter
,
2092 struct map_symbol ms
;
2093 struct addr_location al
;
2094 int nr_loop_iter
= 0;
2095 u64 iter_cycles
= 0;
2096 const char *srcline
= NULL
;
2101 thread__find_cpumode_addr_location(thread
, ip
, &al
);
2103 if (ip
>= PERF_CONTEXT_MAX
) {
2105 case PERF_CONTEXT_HV
:
2106 *cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
2108 case PERF_CONTEXT_KERNEL
:
2109 *cpumode
= PERF_RECORD_MISC_KERNEL
;
2111 case PERF_CONTEXT_USER
:
2112 *cpumode
= PERF_RECORD_MISC_USER
;
2115 pr_debug("invalid callchain context: "
2116 "%"PRId64
"\n", (s64
) ip
);
2118 * It seems the callchain is corrupted.
2121 callchain_cursor_reset(cursor
);
2126 thread__find_symbol(thread
, *cpumode
, ip
, &al
);
2129 if (al
.sym
!= NULL
) {
2130 if (perf_hpp_list
.parent
&& !*parent
&&
2131 symbol__match_regex(al
.sym
, &parent_regex
))
2133 else if (have_ignore_callees
&& root_al
&&
2134 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
2135 /* Treat this symbol as the root,
2136 forgetting its callees. */
2138 callchain_cursor_reset(cursor
);
2142 if (symbol_conf
.hide_unresolved
&& al
.sym
== NULL
)
2146 nr_loop_iter
= iter
->nr_loop_iter
;
2147 iter_cycles
= iter
->cycles
;
2153 srcline
= callchain_srcline(&ms
, al
.addr
);
2154 return callchain_cursor_append(cursor
, ip
, &ms
,
2155 branch
, flags
, nr_loop_iter
,
2156 iter_cycles
, branch_from
, srcline
);
2159 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
2160 struct addr_location
*al
)
2163 const struct branch_stack
*bs
= sample
->branch_stack
;
2164 struct branch_entry
*entries
= perf_sample__branch_entries(sample
);
2165 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
2170 for (i
= 0; i
< bs
->nr
; i
++) {
2171 ip__resolve_ams(al
->thread
, &bi
[i
].to
, entries
[i
].to
);
2172 ip__resolve_ams(al
->thread
, &bi
[i
].from
, entries
[i
].from
);
2173 bi
[i
].flags
= entries
[i
].flags
;
2178 static void save_iterations(struct iterations
*iter
,
2179 struct branch_entry
*be
, int nr
)
2183 iter
->nr_loop_iter
++;
2186 for (i
= 0; i
< nr
; i
++)
2187 iter
->cycles
+= be
[i
].flags
.cycles
;
2192 #define NO_ENTRY 0xff
2194 #define PERF_MAX_BRANCH_DEPTH 127
2197 static int remove_loops(struct branch_entry
*l
, int nr
,
2198 struct iterations
*iter
)
2201 unsigned char chash
[CHASHSZ
];
2203 memset(chash
, NO_ENTRY
, sizeof(chash
));
2205 BUG_ON(PERF_MAX_BRANCH_DEPTH
> 255);
2207 for (i
= 0; i
< nr
; i
++) {
2208 int h
= hash_64(l
[i
].from
, CHASHBITS
) % CHASHSZ
;
2210 /* no collision handling for now */
2211 if (chash
[h
] == NO_ENTRY
) {
2213 } else if (l
[chash
[h
]].from
== l
[i
].from
) {
2214 bool is_loop
= true;
2215 /* check if it is a real loop */
2217 for (j
= chash
[h
]; j
< i
&& i
+ off
< nr
; j
++, off
++)
2218 if (l
[j
].from
!= l
[i
+ off
].from
) {
2225 save_iterations(iter
+ i
+ off
,
2228 memmove(iter
+ i
, iter
+ i
+ off
,
2231 memmove(l
+ i
, l
+ i
+ off
,
2242 static int lbr_callchain_add_kernel_ip(struct thread
*thread
,
2243 struct callchain_cursor
*cursor
,
2244 struct perf_sample
*sample
,
2245 struct symbol
**parent
,
2246 struct addr_location
*root_al
,
2248 bool callee
, int end
)
2250 struct ip_callchain
*chain
= sample
->callchain
;
2251 u8 cpumode
= PERF_RECORD_MISC_USER
;
2255 for (i
= 0; i
< end
+ 1; i
++) {
2256 err
= add_callchain_ip(thread
, cursor
, parent
,
2257 root_al
, &cpumode
, chain
->ips
[i
],
2258 false, NULL
, NULL
, branch_from
);
2265 for (i
= end
; i
>= 0; i
--) {
2266 err
= add_callchain_ip(thread
, cursor
, parent
,
2267 root_al
, &cpumode
, chain
->ips
[i
],
2268 false, NULL
, NULL
, branch_from
);
2276 static void save_lbr_cursor_node(struct thread
*thread
,
2277 struct callchain_cursor
*cursor
,
2280 struct lbr_stitch
*lbr_stitch
= thread
->lbr_stitch
;
2285 if (cursor
->pos
== cursor
->nr
) {
2286 lbr_stitch
->prev_lbr_cursor
[idx
].valid
= false;
2291 cursor
->curr
= cursor
->first
;
2293 cursor
->curr
= cursor
->curr
->next
;
2294 memcpy(&lbr_stitch
->prev_lbr_cursor
[idx
], cursor
->curr
,
2295 sizeof(struct callchain_cursor_node
));
2297 lbr_stitch
->prev_lbr_cursor
[idx
].valid
= true;
2301 static int lbr_callchain_add_lbr_ip(struct thread
*thread
,
2302 struct callchain_cursor
*cursor
,
2303 struct perf_sample
*sample
,
2304 struct symbol
**parent
,
2305 struct addr_location
*root_al
,
2309 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
2310 struct branch_entry
*entries
= perf_sample__branch_entries(sample
);
2311 u8 cpumode
= PERF_RECORD_MISC_USER
;
2312 int lbr_nr
= lbr_stack
->nr
;
2313 struct branch_flags
*flags
;
2318 * The curr and pos are not used in writing session. They are cleared
2319 * in callchain_cursor_commit() when the writing session is closed.
2320 * Using curr and pos to track the current cursor node.
2322 if (thread
->lbr_stitch
) {
2323 cursor
->curr
= NULL
;
2324 cursor
->pos
= cursor
->nr
;
2326 cursor
->curr
= cursor
->first
;
2327 for (i
= 0; i
< (int)(cursor
->nr
- 1); i
++)
2328 cursor
->curr
= cursor
->curr
->next
;
2333 /* Add LBR ip from first entries.to */
2335 flags
= &entries
[0].flags
;
2336 *branch_from
= entries
[0].from
;
2337 err
= add_callchain_ip(thread
, cursor
, parent
,
2338 root_al
, &cpumode
, ip
,
2345 * The number of cursor node increases.
2346 * Move the current cursor node.
2347 * But does not need to save current cursor node for entry 0.
2348 * It's impossible to stitch the whole LBRs of previous sample.
2350 if (thread
->lbr_stitch
&& (cursor
->pos
!= cursor
->nr
)) {
2352 cursor
->curr
= cursor
->first
;
2354 cursor
->curr
= cursor
->curr
->next
;
2358 /* Add LBR ip from entries.from one by one. */
2359 for (i
= 0; i
< lbr_nr
; i
++) {
2360 ip
= entries
[i
].from
;
2361 flags
= &entries
[i
].flags
;
2362 err
= add_callchain_ip(thread
, cursor
, parent
,
2363 root_al
, &cpumode
, ip
,
2368 save_lbr_cursor_node(thread
, cursor
, i
);
2373 /* Add LBR ip from entries.from one by one. */
2374 for (i
= lbr_nr
- 1; i
>= 0; i
--) {
2375 ip
= entries
[i
].from
;
2376 flags
= &entries
[i
].flags
;
2377 err
= add_callchain_ip(thread
, cursor
, parent
,
2378 root_al
, &cpumode
, ip
,
2383 save_lbr_cursor_node(thread
, cursor
, i
);
2386 /* Add LBR ip from first entries.to */
2388 flags
= &entries
[0].flags
;
2389 *branch_from
= entries
[0].from
;
2390 err
= add_callchain_ip(thread
, cursor
, parent
,
2391 root_al
, &cpumode
, ip
,
2400 static int lbr_callchain_add_stitched_lbr_ip(struct thread
*thread
,
2401 struct callchain_cursor
*cursor
)
2403 struct lbr_stitch
*lbr_stitch
= thread
->lbr_stitch
;
2404 struct callchain_cursor_node
*cnode
;
2405 struct stitch_list
*stitch_node
;
2408 list_for_each_entry(stitch_node
, &lbr_stitch
->lists
, node
) {
2409 cnode
= &stitch_node
->cursor
;
2411 err
= callchain_cursor_append(cursor
, cnode
->ip
,
2414 &cnode
->branch_flags
,
2415 cnode
->nr_loop_iter
,
2425 static struct stitch_list
*get_stitch_node(struct thread
*thread
)
2427 struct lbr_stitch
*lbr_stitch
= thread
->lbr_stitch
;
2428 struct stitch_list
*stitch_node
;
2430 if (!list_empty(&lbr_stitch
->free_lists
)) {
2431 stitch_node
= list_first_entry(&lbr_stitch
->free_lists
,
2432 struct stitch_list
, node
);
2433 list_del(&stitch_node
->node
);
2438 return malloc(sizeof(struct stitch_list
));
2441 static bool has_stitched_lbr(struct thread
*thread
,
2442 struct perf_sample
*cur
,
2443 struct perf_sample
*prev
,
2444 unsigned int max_lbr
,
2447 struct branch_stack
*cur_stack
= cur
->branch_stack
;
2448 struct branch_entry
*cur_entries
= perf_sample__branch_entries(cur
);
2449 struct branch_stack
*prev_stack
= prev
->branch_stack
;
2450 struct branch_entry
*prev_entries
= perf_sample__branch_entries(prev
);
2451 struct lbr_stitch
*lbr_stitch
= thread
->lbr_stitch
;
2452 int i
, j
, nr_identical_branches
= 0;
2453 struct stitch_list
*stitch_node
;
2454 u64 cur_base
, distance
;
2456 if (!cur_stack
|| !prev_stack
)
2459 /* Find the physical index of the base-of-stack for current sample. */
2460 cur_base
= max_lbr
- cur_stack
->nr
+ cur_stack
->hw_idx
+ 1;
2462 distance
= (prev_stack
->hw_idx
> cur_base
) ? (prev_stack
->hw_idx
- cur_base
) :
2463 (max_lbr
+ prev_stack
->hw_idx
- cur_base
);
2464 /* Previous sample has shorter stack. Nothing can be stitched. */
2465 if (distance
+ 1 > prev_stack
->nr
)
2469 * Check if there are identical LBRs between two samples.
2470 * Identicall LBRs must have same from, to and flags values. Also,
2471 * they have to be saved in the same LBR registers (same physical
2474 * Starts from the base-of-stack of current sample.
2476 for (i
= distance
, j
= cur_stack
->nr
- 1; (i
>= 0) && (j
>= 0); i
--, j
--) {
2477 if ((prev_entries
[i
].from
!= cur_entries
[j
].from
) ||
2478 (prev_entries
[i
].to
!= cur_entries
[j
].to
) ||
2479 (prev_entries
[i
].flags
.value
!= cur_entries
[j
].flags
.value
))
2481 nr_identical_branches
++;
2484 if (!nr_identical_branches
)
2488 * Save the LBRs between the base-of-stack of previous sample
2489 * and the base-of-stack of current sample into lbr_stitch->lists.
2490 * These LBRs will be stitched later.
2492 for (i
= prev_stack
->nr
- 1; i
> (int)distance
; i
--) {
2494 if (!lbr_stitch
->prev_lbr_cursor
[i
].valid
)
2497 stitch_node
= get_stitch_node(thread
);
2501 memcpy(&stitch_node
->cursor
, &lbr_stitch
->prev_lbr_cursor
[i
],
2502 sizeof(struct callchain_cursor_node
));
2505 list_add(&stitch_node
->node
, &lbr_stitch
->lists
);
2507 list_add_tail(&stitch_node
->node
, &lbr_stitch
->lists
);
2513 static bool alloc_lbr_stitch(struct thread
*thread
, unsigned int max_lbr
)
2515 if (thread
->lbr_stitch
)
2518 thread
->lbr_stitch
= zalloc(sizeof(*thread
->lbr_stitch
));
2519 if (!thread
->lbr_stitch
)
2522 thread
->lbr_stitch
->prev_lbr_cursor
= calloc(max_lbr
+ 1, sizeof(struct callchain_cursor_node
));
2523 if (!thread
->lbr_stitch
->prev_lbr_cursor
)
2524 goto free_lbr_stitch
;
2526 INIT_LIST_HEAD(&thread
->lbr_stitch
->lists
);
2527 INIT_LIST_HEAD(&thread
->lbr_stitch
->free_lists
);
2532 zfree(&thread
->lbr_stitch
);
2534 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2535 thread
->lbr_stitch_enable
= false;
2540 * Recolve LBR callstack chain sample
2542 * 1 on success get LBR callchain information
2543 * 0 no available LBR callchain information, should try fp
2544 * negative error code on other errors.
2546 static int resolve_lbr_callchain_sample(struct thread
*thread
,
2547 struct callchain_cursor
*cursor
,
2548 struct perf_sample
*sample
,
2549 struct symbol
**parent
,
2550 struct addr_location
*root_al
,
2552 unsigned int max_lbr
)
2554 bool callee
= (callchain_param
.order
== ORDER_CALLEE
);
2555 struct ip_callchain
*chain
= sample
->callchain
;
2556 int chain_nr
= min(max_stack
, (int)chain
->nr
), i
;
2557 struct lbr_stitch
*lbr_stitch
;
2558 bool stitched_lbr
= false;
2559 u64 branch_from
= 0;
2562 for (i
= 0; i
< chain_nr
; i
++) {
2563 if (chain
->ips
[i
] == PERF_CONTEXT_USER
)
2567 /* LBR only affects the user callchain */
2571 if (thread
->lbr_stitch_enable
&& !sample
->no_hw_idx
&&
2572 (max_lbr
> 0) && alloc_lbr_stitch(thread
, max_lbr
)) {
2573 lbr_stitch
= thread
->lbr_stitch
;
2575 stitched_lbr
= has_stitched_lbr(thread
, sample
,
2576 &lbr_stitch
->prev_sample
,
2579 if (!stitched_lbr
&& !list_empty(&lbr_stitch
->lists
)) {
2580 list_replace_init(&lbr_stitch
->lists
,
2581 &lbr_stitch
->free_lists
);
2583 memcpy(&lbr_stitch
->prev_sample
, sample
, sizeof(*sample
));
2588 err
= lbr_callchain_add_kernel_ip(thread
, cursor
, sample
,
2589 parent
, root_al
, branch_from
,
2594 err
= lbr_callchain_add_lbr_ip(thread
, cursor
, sample
, parent
,
2595 root_al
, &branch_from
, true);
2600 err
= lbr_callchain_add_stitched_lbr_ip(thread
, cursor
);
2607 err
= lbr_callchain_add_stitched_lbr_ip(thread
, cursor
);
2611 err
= lbr_callchain_add_lbr_ip(thread
, cursor
, sample
, parent
,
2612 root_al
, &branch_from
, false);
2617 err
= lbr_callchain_add_kernel_ip(thread
, cursor
, sample
,
2618 parent
, root_al
, branch_from
,
2626 return (err
< 0) ? err
: 0;
2629 static int find_prev_cpumode(struct ip_callchain
*chain
, struct thread
*thread
,
2630 struct callchain_cursor
*cursor
,
2631 struct symbol
**parent
,
2632 struct addr_location
*root_al
,
2633 u8
*cpumode
, int ent
)
2637 while (--ent
>= 0) {
2638 u64 ip
= chain
->ips
[ent
];
2640 if (ip
>= PERF_CONTEXT_MAX
) {
2641 err
= add_callchain_ip(thread
, cursor
, parent
,
2642 root_al
, cpumode
, ip
,
2643 false, NULL
, NULL
, 0);
2650 static int thread__resolve_callchain_sample(struct thread
*thread
,
2651 struct callchain_cursor
*cursor
,
2652 struct evsel
*evsel
,
2653 struct perf_sample
*sample
,
2654 struct symbol
**parent
,
2655 struct addr_location
*root_al
,
2658 struct branch_stack
*branch
= sample
->branch_stack
;
2659 struct branch_entry
*entries
= perf_sample__branch_entries(sample
);
2660 struct ip_callchain
*chain
= sample
->callchain
;
2662 u8 cpumode
= PERF_RECORD_MISC_USER
;
2663 int i
, j
, err
, nr_entries
;
2668 chain_nr
= chain
->nr
;
2670 if (evsel__has_branch_callstack(evsel
)) {
2671 struct perf_env
*env
= evsel__env(evsel
);
2673 err
= resolve_lbr_callchain_sample(thread
, cursor
, sample
, parent
,
2675 !env
? 0 : env
->max_branches
);
2677 return (err
< 0) ? err
: 0;
2681 * Based on DWARF debug information, some architectures skip
2682 * a callchain entry saved by the kernel.
2684 skip_idx
= arch_skip_callchain_idx(thread
, chain
);
2687 * Add branches to call stack for easier browsing. This gives
2688 * more context for a sample than just the callers.
2690 * This uses individual histograms of paths compared to the
2691 * aggregated histograms the normal LBR mode uses.
2693 * Limitations for now:
2694 * - No extra filters
2695 * - No annotations (should annotate somehow)
2698 if (branch
&& callchain_param
.branch_callstack
) {
2699 int nr
= min(max_stack
, (int)branch
->nr
);
2700 struct branch_entry be
[nr
];
2701 struct iterations iter
[nr
];
2703 if (branch
->nr
> PERF_MAX_BRANCH_DEPTH
) {
2704 pr_warning("corrupted branch chain. skipping...\n");
2708 for (i
= 0; i
< nr
; i
++) {
2709 if (callchain_param
.order
== ORDER_CALLEE
) {
2716 * Check for overlap into the callchain.
2717 * The return address is one off compared to
2718 * the branch entry. To adjust for this
2719 * assume the calling instruction is not longer
2722 if (i
== skip_idx
||
2723 chain
->ips
[first_call
] >= PERF_CONTEXT_MAX
)
2725 else if (be
[i
].from
< chain
->ips
[first_call
] &&
2726 be
[i
].from
>= chain
->ips
[first_call
] - 8)
2729 be
[i
] = entries
[branch
->nr
- i
- 1];
2732 memset(iter
, 0, sizeof(struct iterations
) * nr
);
2733 nr
= remove_loops(be
, nr
, iter
);
2735 for (i
= 0; i
< nr
; i
++) {
2736 err
= add_callchain_ip(thread
, cursor
, parent
,
2743 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
,
2760 if (chain
&& callchain_param
.order
!= ORDER_CALLEE
) {
2761 err
= find_prev_cpumode(chain
, thread
, cursor
, parent
, root_al
,
2762 &cpumode
, chain
->nr
- first_call
);
2764 return (err
< 0) ? err
: 0;
2766 for (i
= first_call
, nr_entries
= 0;
2767 i
< chain_nr
&& nr_entries
< max_stack
; i
++) {
2770 if (callchain_param
.order
== ORDER_CALLEE
)
2773 j
= chain
->nr
- i
- 1;
2775 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2780 if (ip
< PERF_CONTEXT_MAX
)
2782 else if (callchain_param
.order
!= ORDER_CALLEE
) {
2783 err
= find_prev_cpumode(chain
, thread
, cursor
, parent
,
2784 root_al
, &cpumode
, j
);
2786 return (err
< 0) ? err
: 0;
2790 err
= add_callchain_ip(thread
, cursor
, parent
,
2791 root_al
, &cpumode
, ip
,
2792 false, NULL
, NULL
, 0);
2795 return (err
< 0) ? err
: 0;
2801 static int append_inlines(struct callchain_cursor
*cursor
, struct map_symbol
*ms
, u64 ip
)
2803 struct symbol
*sym
= ms
->sym
;
2804 struct map
*map
= ms
->map
;
2805 struct inline_node
*inline_node
;
2806 struct inline_list
*ilist
;
2810 if (!symbol_conf
.inline_name
|| !map
|| !sym
)
2813 addr
= map__map_ip(map
, ip
);
2814 addr
= map__rip_2objdump(map
, addr
);
2816 inline_node
= inlines__tree_find(&map
->dso
->inlined_nodes
, addr
);
2818 inline_node
= dso__parse_addr_inlines(map
->dso
, addr
, sym
);
2821 inlines__tree_insert(&map
->dso
->inlined_nodes
, inline_node
);
2824 list_for_each_entry(ilist
, &inline_node
->val
, list
) {
2825 struct map_symbol ilist_ms
= {
2828 .sym
= ilist
->symbol
,
2830 ret
= callchain_cursor_append(cursor
, ip
, &ilist_ms
, false,
2831 NULL
, 0, 0, 0, ilist
->srcline
);
2840 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
2842 struct callchain_cursor
*cursor
= arg
;
2843 const char *srcline
= NULL
;
2844 u64 addr
= entry
->ip
;
2846 if (symbol_conf
.hide_unresolved
&& entry
->ms
.sym
== NULL
)
2849 if (append_inlines(cursor
, &entry
->ms
, entry
->ip
) == 0)
2853 * Convert entry->ip from a virtual address to an offset in
2854 * its corresponding binary.
2857 addr
= map__map_ip(entry
->ms
.map
, entry
->ip
);
2859 srcline
= callchain_srcline(&entry
->ms
, addr
);
2860 return callchain_cursor_append(cursor
, entry
->ip
, &entry
->ms
,
2861 false, NULL
, 0, 0, 0, srcline
);
2864 static int thread__resolve_callchain_unwind(struct thread
*thread
,
2865 struct callchain_cursor
*cursor
,
2866 struct evsel
*evsel
,
2867 struct perf_sample
*sample
,
2870 /* Can we do dwarf post unwind? */
2871 if (!((evsel
->core
.attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
2872 (evsel
->core
.attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
2875 /* Bail out if nothing was captured. */
2876 if ((!sample
->user_regs
.regs
) ||
2877 (!sample
->user_stack
.size
))
2880 return unwind__get_entries(unwind_entry
, cursor
,
2881 thread
, sample
, max_stack
);
2884 int thread__resolve_callchain(struct thread
*thread
,
2885 struct callchain_cursor
*cursor
,
2886 struct evsel
*evsel
,
2887 struct perf_sample
*sample
,
2888 struct symbol
**parent
,
2889 struct addr_location
*root_al
,
2894 callchain_cursor_reset(cursor
);
2896 if (callchain_param
.order
== ORDER_CALLEE
) {
2897 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2903 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2907 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2912 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2921 int machine__for_each_thread(struct machine
*machine
,
2922 int (*fn
)(struct thread
*thread
, void *p
),
2925 struct threads
*threads
;
2927 struct thread
*thread
;
2931 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
2932 threads
= &machine
->threads
[i
];
2933 for (nd
= rb_first_cached(&threads
->entries
); nd
;
2935 thread
= rb_entry(nd
, struct thread
, rb_node
);
2936 rc
= fn(thread
, priv
);
2941 list_for_each_entry(thread
, &threads
->dead
, node
) {
2942 rc
= fn(thread
, priv
);
2950 int machines__for_each_thread(struct machines
*machines
,
2951 int (*fn
)(struct thread
*thread
, void *p
),
2957 rc
= machine__for_each_thread(&machines
->host
, fn
, priv
);
2961 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
2962 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
2964 rc
= machine__for_each_thread(machine
, fn
, priv
);
2971 pid_t
machine__get_current_tid(struct machine
*machine
, int cpu
)
2973 int nr_cpus
= min(machine
->env
->nr_cpus_online
, MAX_NR_CPUS
);
2975 if (cpu
< 0 || cpu
>= nr_cpus
|| !machine
->current_tid
)
2978 return machine
->current_tid
[cpu
];
2981 int machine__set_current_tid(struct machine
*machine
, int cpu
, pid_t pid
,
2984 struct thread
*thread
;
2985 int nr_cpus
= min(machine
->env
->nr_cpus_online
, MAX_NR_CPUS
);
2990 if (!machine
->current_tid
) {
2993 machine
->current_tid
= calloc(nr_cpus
, sizeof(pid_t
));
2994 if (!machine
->current_tid
)
2996 for (i
= 0; i
< nr_cpus
; i
++)
2997 machine
->current_tid
[i
] = -1;
3000 if (cpu
>= nr_cpus
) {
3001 pr_err("Requested CPU %d too large. ", cpu
);
3002 pr_err("Consider raising MAX_NR_CPUS\n");
3006 machine
->current_tid
[cpu
] = tid
;
3008 thread
= machine__findnew_thread(machine
, pid
, tid
);
3013 thread__put(thread
);
3019 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
3020 * normalized arch is needed.
3022 bool machine__is(struct machine
*machine
, const char *arch
)
3024 return machine
&& !strcmp(perf_env__raw_arch(machine
->env
), arch
);
3027 int machine__nr_cpus_avail(struct machine
*machine
)
3029 return machine
? perf_env__nr_cpus_avail(machine
->env
) : 0;
3032 int machine__get_kernel_start(struct machine
*machine
)
3034 struct map
*map
= machine__kernel_map(machine
);
3038 * The only addresses above 2^63 are kernel addresses of a 64-bit
3039 * kernel. Note that addresses are unsigned so that on a 32-bit system
3040 * all addresses including kernel addresses are less than 2^32. In
3041 * that case (32-bit system), if the kernel mapping is unknown, all
3042 * addresses will be assumed to be in user space - see
3043 * machine__kernel_ip().
3045 machine
->kernel_start
= 1ULL << 63;
3047 err
= map__load(map
);
3049 * On x86_64, PTI entry trampolines are less than the
3050 * start of kernel text, but still above 2^63. So leave
3051 * kernel_start = 1ULL << 63 for x86_64.
3053 if (!err
&& !machine__is(machine
, "x86_64"))
3054 machine
->kernel_start
= map
->start
;
3059 u8
machine__addr_cpumode(struct machine
*machine
, u8 cpumode
, u64 addr
)
3061 u8 addr_cpumode
= cpumode
;
3064 if (!machine
->single_address_space
)
3067 kernel_ip
= machine__kernel_ip(machine
, addr
);
3069 case PERF_RECORD_MISC_KERNEL
:
3070 case PERF_RECORD_MISC_USER
:
3071 addr_cpumode
= kernel_ip
? PERF_RECORD_MISC_KERNEL
:
3072 PERF_RECORD_MISC_USER
;
3074 case PERF_RECORD_MISC_GUEST_KERNEL
:
3075 case PERF_RECORD_MISC_GUEST_USER
:
3076 addr_cpumode
= kernel_ip
? PERF_RECORD_MISC_GUEST_KERNEL
:
3077 PERF_RECORD_MISC_GUEST_USER
;
3083 return addr_cpumode
;
3086 struct dso
*machine__findnew_dso_id(struct machine
*machine
, const char *filename
, struct dso_id
*id
)
3088 return dsos__findnew_id(&machine
->dsos
, filename
, id
);
3091 struct dso
*machine__findnew_dso(struct machine
*machine
, const char *filename
)
3093 return machine__findnew_dso_id(machine
, filename
, NULL
);
3096 char *machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
3098 struct machine
*machine
= vmachine
;
3100 struct symbol
*sym
= machine__find_kernel_symbol(machine
, *addrp
, &map
);
3105 *modp
= __map__is_kmodule(map
) ? (char *)map
->dso
->short_name
: NULL
;
3106 *addrp
= map
->unmap_ip(map
, sym
->start
);