1 // SPDX-License-Identifier: GPL-2.0
16 #include "map_symbol.h"
18 #include "mem-events.h"
30 #include <sys/types.h>
34 #include "linux/hash.h"
36 #include "bpf-event.h"
37 #include <internal/lib.h> // page_size
39 #include "arm64-frame-pointer-unwind-support.h"
41 #include <linux/ctype.h>
42 #include <symbol/kallsyms.h>
43 #include <linux/mman.h>
44 #include <linux/string.h>
45 #include <linux/zalloc.h>
47 static struct dso
*machine__kernel_dso(struct machine
*machine
)
49 return map__dso(machine
->vmlinux_map
);
52 static int machine__set_mmap_name(struct machine
*machine
)
54 if (machine__is_host(machine
))
55 machine
->mmap_name
= strdup("[kernel.kallsyms]");
56 else if (machine__is_default_guest(machine
))
57 machine
->mmap_name
= strdup("[guest.kernel.kallsyms]");
58 else if (asprintf(&machine
->mmap_name
, "[guest.kernel.kallsyms.%d]",
60 machine
->mmap_name
= NULL
;
62 return machine
->mmap_name
? 0 : -ENOMEM
;
65 static void thread__set_guest_comm(struct thread
*thread
, pid_t pid
)
69 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
70 thread__set_comm(thread
, comm
, 0);
73 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
77 memset(machine
, 0, sizeof(*machine
));
78 machine
->kmaps
= maps__new(machine
);
79 if (machine
->kmaps
== NULL
)
82 RB_CLEAR_NODE(&machine
->rb_node
);
83 dsos__init(&machine
->dsos
);
85 threads__init(&machine
->threads
);
87 machine
->vdso_info
= NULL
;
92 machine
->id_hdr_size
= 0;
93 machine
->kptr_restrict_warned
= false;
94 machine
->comm_exec
= false;
95 machine
->kernel_start
= 0;
96 machine
->vmlinux_map
= NULL
;
98 machine
->root_dir
= strdup(root_dir
);
99 if (machine
->root_dir
== NULL
)
102 if (machine__set_mmap_name(machine
))
105 if (pid
!= HOST_KERNEL_ID
) {
106 struct thread
*thread
= machine__findnew_thread(machine
, -1,
112 thread__set_guest_comm(thread
, pid
);
116 machine
->current_tid
= NULL
;
121 zfree(&machine
->kmaps
);
122 zfree(&machine
->root_dir
);
123 zfree(&machine
->mmap_name
);
128 struct machine
*machine__new_host(void)
130 struct machine
*machine
= malloc(sizeof(*machine
));
132 if (machine
!= NULL
) {
133 machine__init(machine
, "", HOST_KERNEL_ID
);
135 if (machine__create_kernel_maps(machine
) < 0)
145 struct machine
*machine__new_kallsyms(void)
147 struct machine
*machine
= machine__new_host();
150 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
151 * ask for not using the kcore parsing code, once this one is fixed
152 * to create a map per module.
154 if (machine
&& machine__load_kallsyms(machine
, "/proc/kallsyms") <= 0) {
155 machine__delete(machine
);
162 void machine__delete_threads(struct machine
*machine
)
164 threads__remove_all_threads(&machine
->threads
);
167 void machine__exit(struct machine
*machine
)
172 machine__destroy_kernel_maps(machine
);
173 maps__zput(machine
->kmaps
);
174 dsos__exit(&machine
->dsos
);
175 machine__exit_vdso(machine
);
176 zfree(&machine
->root_dir
);
177 zfree(&machine
->mmap_name
);
178 zfree(&machine
->current_tid
);
179 zfree(&machine
->kallsyms_filename
);
181 threads__exit(&machine
->threads
);
184 void machine__delete(struct machine
*machine
)
187 machine__exit(machine
);
192 void machines__init(struct machines
*machines
)
194 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
195 machines
->guests
= RB_ROOT_CACHED
;
198 void machines__exit(struct machines
*machines
)
200 machine__exit(&machines
->host
);
204 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
205 const char *root_dir
)
207 struct rb_node
**p
= &machines
->guests
.rb_root
.rb_node
;
208 struct rb_node
*parent
= NULL
;
209 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
210 bool leftmost
= true;
215 if (machine__init(machine
, root_dir
, pid
) != 0) {
222 pos
= rb_entry(parent
, struct machine
, rb_node
);
231 rb_link_node(&machine
->rb_node
, parent
, p
);
232 rb_insert_color_cached(&machine
->rb_node
, &machines
->guests
, leftmost
);
234 machine
->machines
= machines
;
239 void machines__set_comm_exec(struct machines
*machines
, bool comm_exec
)
243 machines
->host
.comm_exec
= comm_exec
;
245 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
246 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
248 machine
->comm_exec
= comm_exec
;
252 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
254 struct rb_node
**p
= &machines
->guests
.rb_root
.rb_node
;
255 struct rb_node
*parent
= NULL
;
256 struct machine
*machine
;
257 struct machine
*default_machine
= NULL
;
259 if (pid
== HOST_KERNEL_ID
)
260 return &machines
->host
;
264 machine
= rb_entry(parent
, struct machine
, rb_node
);
265 if (pid
< machine
->pid
)
267 else if (pid
> machine
->pid
)
272 default_machine
= machine
;
275 return default_machine
;
278 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
281 const char *root_dir
= "";
282 struct machine
*machine
= machines__find(machines
, pid
);
284 if (machine
&& (machine
->pid
== pid
))
287 if ((pid
!= HOST_KERNEL_ID
) &&
288 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
289 (symbol_conf
.guestmount
)) {
290 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
291 if (access(path
, R_OK
)) {
292 static struct strlist
*seen
;
295 seen
= strlist__new(NULL
, NULL
);
297 if (!strlist__has_entry(seen
, path
)) {
298 pr_err("Can't access file %s\n", path
);
299 strlist__add(seen
, path
);
307 machine
= machines__add(machines
, pid
, root_dir
);
312 struct machine
*machines__find_guest(struct machines
*machines
, pid_t pid
)
314 struct machine
*machine
= machines__find(machines
, pid
);
317 machine
= machines__findnew(machines
, DEFAULT_GUEST_KERNEL_ID
);
322 * A common case for KVM test programs is that the test program acts as the
323 * hypervisor, creating, running and destroying the virtual machine, and
324 * providing the guest object code from its own object code. In this case,
325 * the VM is not running an OS, but only the functions loaded into it by the
326 * hypervisor test program, and conveniently, loaded at the same virtual
329 * Normally to resolve addresses, MMAP events are needed to map addresses
330 * back to the object code and debug symbols for that object code.
332 * Currently, there is no way to get such mapping information from guests
333 * but, in the scenario described above, the guest has the same mappings
334 * as the hypervisor, so support for that scenario can be achieved.
336 * To support that, copy the host thread's maps to the guest thread's maps.
337 * Note, we do not discover the guest until we encounter a guest event,
338 * which works well because it is not until then that we know that the host
339 * thread's maps have been set up.
341 * This function returns the guest thread. Apart from keeping the data
342 * structures sane, using a thread belonging to the guest machine, instead
343 * of the host thread, allows it to have its own comm (refer
344 * thread__set_guest_comm()).
346 static struct thread
*findnew_guest_code(struct machine
*machine
,
347 struct machine
*host_machine
,
350 struct thread
*host_thread
;
351 struct thread
*thread
;
357 thread
= machine__findnew_thread(machine
, -1, pid
);
361 /* Assume maps are set up if there are any */
362 if (!maps__empty(thread__maps(thread
)))
365 host_thread
= machine__find_thread(host_machine
, -1, pid
);
369 thread__set_guest_comm(thread
, pid
);
372 * Guest code can be found in hypervisor process at the same address
375 err
= maps__copy_from(thread__maps(thread
), thread__maps(host_thread
));
376 thread__put(host_thread
);
383 thread__zput(thread
);
387 struct thread
*machines__findnew_guest_code(struct machines
*machines
, pid_t pid
)
389 struct machine
*host_machine
= machines__find(machines
, HOST_KERNEL_ID
);
390 struct machine
*machine
= machines__findnew(machines
, pid
);
392 return findnew_guest_code(machine
, host_machine
, pid
);
395 struct thread
*machine__findnew_guest_code(struct machine
*machine
, pid_t pid
)
397 struct machines
*machines
= machine
->machines
;
398 struct machine
*host_machine
;
403 host_machine
= machines__find(machines
, HOST_KERNEL_ID
);
405 return findnew_guest_code(machine
, host_machine
, pid
);
408 void machines__process_guests(struct machines
*machines
,
409 machine__process_t process
, void *data
)
413 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
414 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
419 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
421 struct rb_node
*node
;
422 struct machine
*machine
;
424 machines
->host
.id_hdr_size
= id_hdr_size
;
426 for (node
= rb_first_cached(&machines
->guests
); node
;
427 node
= rb_next(node
)) {
428 machine
= rb_entry(node
, struct machine
, rb_node
);
429 machine
->id_hdr_size
= id_hdr_size
;
435 static void machine__update_thread_pid(struct machine
*machine
,
436 struct thread
*th
, pid_t pid
)
438 struct thread
*leader
;
440 if (pid
== thread__pid(th
) || pid
== -1 || thread__pid(th
) != -1)
443 thread__set_pid(th
, pid
);
445 if (thread__pid(th
) == thread__tid(th
))
448 leader
= machine__findnew_thread(machine
, thread__pid(th
), thread__pid(th
));
452 if (!thread__maps(leader
))
453 thread__set_maps(leader
, maps__new(machine
));
455 if (!thread__maps(leader
))
458 if (thread__maps(th
) == thread__maps(leader
))
461 if (thread__maps(th
)) {
463 * Maps are created from MMAP events which provide the pid and
464 * tid. Consequently there never should be any maps on a thread
465 * with an unknown pid. Just print an error if there are.
467 if (!maps__empty(thread__maps(th
)))
468 pr_err("Discarding thread maps for %d:%d\n",
469 thread__pid(th
), thread__tid(th
));
470 maps__put(thread__maps(th
));
473 thread__set_maps(th
, maps__get(thread__maps(leader
)));
478 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th
), thread__tid(th
));
483 * Caller must eventually drop thread->refcnt returned with a successful
484 * lookup/new thread inserted.
486 static struct thread
*__machine__findnew_thread(struct machine
*machine
,
491 struct thread
*th
= threads__find(&machine
->threads
, tid
);
495 machine__update_thread_pid(machine
, th
, pid
);
501 th
= threads__findnew(&machine
->threads
, pid
, tid
, &created
);
504 * We have to initialize maps separately after rb tree is
507 * The reason is that we call machine__findnew_thread within
508 * thread__init_maps to find the thread leader and that would
509 * screwed the rb tree.
511 if (thread__init_maps(th
, machine
)) {
512 pr_err("Thread init failed thread %d\n", pid
);
513 threads__remove(&machine
->threads
, th
);
518 machine__update_thread_pid(machine
, th
, pid
);
523 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
, pid_t tid
)
525 return __machine__findnew_thread(machine
, pid
, tid
, /*create=*/true);
528 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
531 return __machine__findnew_thread(machine
, pid
, tid
, /*create=*/false);
535 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
536 * So here a single thread is created for that, but actually there is a separate
537 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
538 * is only 1. That causes problems for some tools, requiring workarounds. For
539 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
541 struct thread
*machine__idle_thread(struct machine
*machine
)
543 struct thread
*thread
= machine__findnew_thread(machine
, 0, 0);
545 if (!thread
|| thread__set_comm(thread
, "swapper", 0) ||
546 thread__set_namespaces(thread
, 0, NULL
))
547 pr_err("problem inserting idle task for machine pid %d\n", machine
->pid
);
552 struct comm
*machine__thread_exec_comm(struct machine
*machine
,
553 struct thread
*thread
)
555 if (machine
->comm_exec
)
556 return thread__exec_comm(thread
);
558 return thread__comm(thread
);
561 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
562 struct perf_sample
*sample
)
564 struct thread
*thread
= machine__findnew_thread(machine
,
567 bool exec
= event
->header
.misc
& PERF_RECORD_MISC_COMM_EXEC
;
571 machine
->comm_exec
= true;
574 perf_event__fprintf_comm(event
, stdout
);
576 if (thread
== NULL
||
577 __thread__set_comm(thread
, event
->comm
.comm
, sample
->time
, exec
)) {
578 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
587 int machine__process_namespaces_event(struct machine
*machine __maybe_unused
,
588 union perf_event
*event
,
589 struct perf_sample
*sample __maybe_unused
)
591 struct thread
*thread
= machine__findnew_thread(machine
,
592 event
->namespaces
.pid
,
593 event
->namespaces
.tid
);
596 WARN_ONCE(event
->namespaces
.nr_namespaces
> NR_NAMESPACES
,
597 "\nWARNING: kernel seems to support more namespaces than perf"
598 " tool.\nTry updating the perf tool..\n\n");
600 WARN_ONCE(event
->namespaces
.nr_namespaces
< NR_NAMESPACES
,
601 "\nWARNING: perf tool seems to support more namespaces than"
602 " the kernel.\nTry updating the kernel..\n\n");
605 perf_event__fprintf_namespaces(event
, stdout
);
607 if (thread
== NULL
||
608 thread__set_namespaces(thread
, sample
->time
, &event
->namespaces
)) {
609 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
618 int machine__process_cgroup_event(struct machine
*machine
,
619 union perf_event
*event
,
620 struct perf_sample
*sample __maybe_unused
)
625 perf_event__fprintf_cgroup(event
, stdout
);
627 cgrp
= cgroup__findnew(machine
->env
, event
->cgroup
.id
, event
->cgroup
.path
);
634 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
635 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
637 dump_printf(": id:%" PRI_lu64
": lost:%" PRI_lu64
"\n",
638 event
->lost
.id
, event
->lost
.lost
);
642 int machine__process_lost_samples_event(struct machine
*machine __maybe_unused
,
643 union perf_event
*event
, struct perf_sample
*sample
)
645 dump_printf(": id:%" PRIu64
": lost samples :%" PRI_lu64
"%s\n",
646 sample
->id
, event
->lost_samples
.lost
,
647 event
->header
.misc
& PERF_RECORD_MISC_LOST_SAMPLES_BPF
? " (BPF)" : "");
651 int machine__process_aux_event(struct machine
*machine __maybe_unused
,
652 union perf_event
*event
)
655 perf_event__fprintf_aux(event
, stdout
);
659 int machine__process_itrace_start_event(struct machine
*machine __maybe_unused
,
660 union perf_event
*event
)
663 perf_event__fprintf_itrace_start(event
, stdout
);
667 int machine__process_aux_output_hw_id_event(struct machine
*machine __maybe_unused
,
668 union perf_event
*event
)
671 perf_event__fprintf_aux_output_hw_id(event
, stdout
);
675 int machine__process_switch_event(struct machine
*machine __maybe_unused
,
676 union perf_event
*event
)
679 perf_event__fprintf_switch(event
, stdout
);
683 static int machine__process_ksymbol_register(struct machine
*machine
,
684 union perf_event
*event
,
685 struct perf_sample
*sample __maybe_unused
)
688 struct dso
*dso
= NULL
;
689 struct map
*map
= maps__find(machine__kernel_maps(machine
), event
->ksymbol
.addr
);
693 dso
= dso__new(event
->ksymbol
.name
);
699 dso__set_kernel(dso
, DSO_SPACE__KERNEL
);
700 map
= map__new2(0, dso
);
705 if (event
->ksymbol
.ksym_type
== PERF_RECORD_KSYMBOL_TYPE_OOL
) {
706 dso__set_binary_type(dso
, DSO_BINARY_TYPE__OOL
);
707 dso__data(dso
)->file_size
= event
->ksymbol
.len
;
708 dso__set_loaded(dso
);
711 map__set_start(map
, event
->ksymbol
.addr
);
712 map__set_end(map
, map__start(map
) + event
->ksymbol
.len
);
713 err
= maps__insert(machine__kernel_maps(machine
), map
);
719 dso__set_loaded(dso
);
721 if (is_bpf_image(event
->ksymbol
.name
)) {
722 dso__set_binary_type(dso
, DSO_BINARY_TYPE__BPF_IMAGE
);
723 dso__set_long_name(dso
, "", false);
726 dso
= dso__get(map__dso(map
));
729 sym
= symbol__new(map__map_ip(map
, map__start(map
)),
731 0, 0, event
->ksymbol
.name
);
736 dso__insert_symbol(dso
, sym
);
743 static int machine__process_ksymbol_unregister(struct machine
*machine
,
744 union perf_event
*event
,
745 struct perf_sample
*sample __maybe_unused
)
750 map
= maps__find(machine__kernel_maps(machine
), event
->ksymbol
.addr
);
754 if (!RC_CHK_EQUAL(map
, machine
->vmlinux_map
))
755 maps__remove(machine__kernel_maps(machine
), map
);
757 struct dso
*dso
= map__dso(map
);
759 sym
= dso__find_symbol(dso
, map__map_ip(map
, map__start(map
)));
761 dso__delete_symbol(dso
, sym
);
767 int machine__process_ksymbol(struct machine
*machine __maybe_unused
,
768 union perf_event
*event
,
769 struct perf_sample
*sample
)
772 perf_event__fprintf_ksymbol(event
, stdout
);
774 if (event
->ksymbol
.flags
& PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER
)
775 return machine__process_ksymbol_unregister(machine
, event
,
777 return machine__process_ksymbol_register(machine
, event
, sample
);
780 int machine__process_text_poke(struct machine
*machine
, union perf_event
*event
,
781 struct perf_sample
*sample __maybe_unused
)
783 struct map
*map
= maps__find(machine__kernel_maps(machine
), event
->text_poke
.addr
);
784 u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
785 struct dso
*dso
= map
? map__dso(map
) : NULL
;
788 perf_event__fprintf_text_poke(event
, machine
, stdout
);
790 if (!event
->text_poke
.new_len
)
793 if (cpumode
!= PERF_RECORD_MISC_KERNEL
) {
794 pr_debug("%s: unsupported cpumode - ignoring\n", __func__
);
799 u8
*new_bytes
= event
->text_poke
.bytes
+ event
->text_poke
.old_len
;
803 * Kernel maps might be changed when loading symbols so loading
804 * must be done prior to using kernel maps.
807 ret
= dso__data_write_cache_addr(dso
, map
, machine
,
808 event
->text_poke
.addr
,
810 event
->text_poke
.new_len
);
811 if (ret
!= event
->text_poke
.new_len
)
812 pr_debug("Failed to write kernel text poke at %#" PRI_lx64
"\n",
813 event
->text_poke
.addr
);
815 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64
"\n",
816 event
->text_poke
.addr
);
823 static struct map
*machine__addnew_module_map(struct machine
*machine
, u64 start
,
824 const char *filename
)
826 struct map
*map
= NULL
;
831 if (kmod_path__parse_name(&m
, filename
))
834 dso
= dsos__findnew_module_dso(&machine
->dsos
, machine
, &m
, filename
);
838 map
= map__new2(start
, dso
);
842 err
= maps__insert(machine__kernel_maps(machine
), map
);
843 /* If maps__insert failed, return NULL. */
849 /* put the dso here, corresponding to machine__findnew_module_dso */
855 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
858 size_t ret
= dsos__fprintf(&machines
->host
.dsos
, fp
);
860 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
861 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
862 ret
+= dsos__fprintf(&pos
->dsos
, fp
);
868 size_t machine__fprintf_dsos_buildid(struct machine
*m
, FILE *fp
,
869 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
871 return dsos__fprintf_buildid(&m
->dsos
, fp
, skip
, parm
);
874 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
875 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
878 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
880 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
881 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
882 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
887 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
891 struct dso
*kdso
= machine__kernel_dso(machine
);
893 if (dso__has_build_id(kdso
)) {
894 char filename
[PATH_MAX
];
896 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
), false))
897 printed
+= fprintf(fp
, "[0] %s\n", filename
);
900 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
) {
901 printed
+= fprintf(fp
, "[%d] %s\n", i
+ dso__has_build_id(kdso
),
907 struct machine_fprintf_cb_args
{
912 static int machine_fprintf_cb(struct thread
*thread
, void *data
)
914 struct machine_fprintf_cb_args
*args
= data
;
916 /* TODO: handle fprintf errors. */
917 args
->printed
+= thread__fprintf(thread
, args
->fp
);
921 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
923 struct machine_fprintf_cb_args args
= {
927 size_t ret
= fprintf(fp
, "Threads: %zu\n", threads__nr(&machine
->threads
));
929 machine__for_each_thread(machine
, machine_fprintf_cb
, &args
);
930 return ret
+ args
.printed
;
933 static struct dso
*machine__get_kernel(struct machine
*machine
)
935 const char *vmlinux_name
= machine
->mmap_name
;
938 if (machine__is_host(machine
)) {
939 if (symbol_conf
.vmlinux_name
)
940 vmlinux_name
= symbol_conf
.vmlinux_name
;
942 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
943 "[kernel]", DSO_SPACE__KERNEL
);
945 if (symbol_conf
.default_guest_vmlinux_name
)
946 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
948 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
950 DSO_SPACE__KERNEL_GUEST
);
953 if (kernel
!= NULL
&& (!dso__has_build_id(kernel
)))
954 dso__read_running_kernel_build_id(kernel
, machine
);
959 void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
962 if (machine__is_default_guest(machine
))
963 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
965 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
968 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
970 /* Figure out the start address of kernel map from /proc/kallsyms.
971 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
972 * symbol_name if it's not that important.
974 static int machine__get_running_kernel_start(struct machine
*machine
,
975 const char **symbol_name
,
976 u64
*start
, u64
*end
)
978 char filename
[PATH_MAX
];
983 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
985 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
988 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
989 err
= kallsyms__get_function_start(filename
, name
, &addr
);
1002 err
= kallsyms__get_symbol_start(filename
, "_edata", &addr
);
1004 err
= kallsyms__get_function_start(filename
, "_etext", &addr
);
1011 int machine__create_extra_kernel_map(struct machine
*machine
,
1013 struct extra_kernel_map
*xm
)
1019 map
= map__new2(xm
->start
, kernel
);
1023 map__set_end(map
, xm
->end
);
1024 map__set_pgoff(map
, xm
->pgoff
);
1026 kmap
= map__kmap(map
);
1028 strlcpy(kmap
->name
, xm
->name
, KMAP_NAME_LEN
);
1030 err
= maps__insert(machine__kernel_maps(machine
), map
);
1033 pr_debug2("Added extra kernel map %s %" PRIx64
"-%" PRIx64
"\n",
1034 kmap
->name
, map__start(map
), map__end(map
));
1042 static u64
find_entry_trampoline(struct dso
*dso
)
1044 /* Duplicates are removed so lookup all aliases */
1045 const char *syms
[] = {
1046 "_entry_trampoline",
1047 "__entry_trampoline_start",
1048 "entry_SYSCALL_64_trampoline",
1050 struct symbol
*sym
= dso__first_symbol(dso
);
1053 for (; sym
; sym
= dso__next_symbol(sym
)) {
1054 if (sym
->binding
!= STB_GLOBAL
)
1056 for (i
= 0; i
< ARRAY_SIZE(syms
); i
++) {
1057 if (!strcmp(sym
->name
, syms
[i
]))
1066 * These values can be used for kernels that do not have symbols for the entry
1067 * trampolines in kallsyms.
1069 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1070 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1071 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1073 struct machine__map_x86_64_entry_trampolines_args
{
1078 static int machine__map_x86_64_entry_trampolines_cb(struct map
*map
, void *data
)
1080 struct machine__map_x86_64_entry_trampolines_args
*args
= data
;
1081 struct map
*dest_map
;
1082 struct kmap
*kmap
= __map__kmap(map
);
1084 if (!kmap
|| !is_entry_trampoline(kmap
->name
))
1087 dest_map
= maps__find(args
->kmaps
, map__pgoff(map
));
1088 if (RC_CHK_ACCESS(dest_map
) != RC_CHK_ACCESS(map
))
1089 map__set_pgoff(map
, map__map_ip(dest_map
, map__pgoff(map
)));
1096 /* Map x86_64 PTI entry trampolines */
1097 int machine__map_x86_64_entry_trampolines(struct machine
*machine
,
1100 struct machine__map_x86_64_entry_trampolines_args args
= {
1101 .kmaps
= machine__kernel_maps(machine
),
1104 int nr_cpus_avail
, cpu
;
1108 * In the vmlinux case, pgoff is a virtual address which must now be
1109 * mapped to a vmlinux offset.
1111 maps__for_each_map(args
.kmaps
, machine__map_x86_64_entry_trampolines_cb
, &args
);
1113 if (args
.found
|| machine
->trampolines_mapped
)
1116 pgoff
= find_entry_trampoline(kernel
);
1120 nr_cpus_avail
= machine__nr_cpus_avail(machine
);
1122 /* Add a 1 page map for each CPU's entry trampoline */
1123 for (cpu
= 0; cpu
< nr_cpus_avail
; cpu
++) {
1124 u64 va
= X86_64_CPU_ENTRY_AREA_PER_CPU
+
1125 cpu
* X86_64_CPU_ENTRY_AREA_SIZE
+
1126 X86_64_ENTRY_TRAMPOLINE
;
1127 struct extra_kernel_map xm
= {
1129 .end
= va
+ page_size
,
1133 strlcpy(xm
.name
, ENTRY_TRAMPOLINE_NAME
, KMAP_NAME_LEN
);
1135 if (machine__create_extra_kernel_map(machine
, kernel
, &xm
) < 0)
1139 machine
->trampolines_mapped
= nr_cpus_avail
;
1144 int __weak
machine__create_extra_kernel_maps(struct machine
*machine __maybe_unused
,
1145 struct dso
*kernel __maybe_unused
)
1151 __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
1153 /* In case of renewal the kernel map, destroy previous one */
1154 machine__destroy_kernel_maps(machine
);
1156 map__put(machine
->vmlinux_map
);
1157 machine
->vmlinux_map
= map__new2(0, kernel
);
1158 if (machine
->vmlinux_map
== NULL
)
1161 map__set_mapping_type(machine
->vmlinux_map
, MAPPING_TYPE__IDENTITY
);
1162 return maps__insert(machine__kernel_maps(machine
), machine
->vmlinux_map
);
1165 void machine__destroy_kernel_maps(struct machine
*machine
)
1168 struct map
*map
= machine__kernel_map(machine
);
1173 kmap
= map__kmap(map
);
1174 maps__remove(machine__kernel_maps(machine
), map
);
1175 if (kmap
&& kmap
->ref_reloc_sym
) {
1176 zfree((char **)&kmap
->ref_reloc_sym
->name
);
1177 zfree(&kmap
->ref_reloc_sym
);
1180 map__zput(machine
->vmlinux_map
);
1183 int machines__create_guest_kernel_maps(struct machines
*machines
)
1186 struct dirent
**namelist
= NULL
;
1188 char path
[PATH_MAX
];
1192 if (symbol_conf
.default_guest_vmlinux_name
||
1193 symbol_conf
.default_guest_modules
||
1194 symbol_conf
.default_guest_kallsyms
) {
1195 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
1198 if (symbol_conf
.guestmount
) {
1199 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
1202 for (i
= 0; i
< items
; i
++) {
1203 if (!isdigit(namelist
[i
]->d_name
[0])) {
1204 /* Filter out . and .. */
1207 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
1208 if ((*endp
!= '\0') ||
1209 (endp
== namelist
[i
]->d_name
) ||
1210 (errno
== ERANGE
)) {
1211 pr_debug("invalid directory (%s). Skipping.\n",
1212 namelist
[i
]->d_name
);
1215 sprintf(path
, "%s/%s/proc/kallsyms",
1216 symbol_conf
.guestmount
,
1217 namelist
[i
]->d_name
);
1218 ret
= access(path
, R_OK
);
1220 pr_debug("Can't access file %s\n", path
);
1223 machines__create_kernel_maps(machines
, pid
);
1232 void machines__destroy_kernel_maps(struct machines
*machines
)
1234 struct rb_node
*next
= rb_first_cached(&machines
->guests
);
1236 machine__destroy_kernel_maps(&machines
->host
);
1239 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
1241 next
= rb_next(&pos
->rb_node
);
1242 rb_erase_cached(&pos
->rb_node
, &machines
->guests
);
1243 machine__delete(pos
);
1247 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
1249 struct machine
*machine
= machines__findnew(machines
, pid
);
1251 if (machine
== NULL
)
1254 return machine__create_kernel_maps(machine
);
1257 int machine__load_kallsyms(struct machine
*machine
, const char *filename
)
1259 struct map
*map
= machine__kernel_map(machine
);
1260 struct dso
*dso
= map__dso(map
);
1261 int ret
= __dso__load_kallsyms(dso
, filename
, map
, true);
1264 dso__set_loaded(dso
);
1266 * Since /proc/kallsyms will have multiple sessions for the
1267 * kernel, with modules between them, fixup the end of all
1270 maps__fixup_end(machine__kernel_maps(machine
));
1276 int machine__load_vmlinux_path(struct machine
*machine
)
1278 struct map
*map
= machine__kernel_map(machine
);
1279 struct dso
*dso
= map__dso(map
);
1280 int ret
= dso__load_vmlinux_path(dso
, map
);
1283 dso__set_loaded(dso
);
1288 static char *get_kernel_version(const char *root_dir
)
1290 char version
[PATH_MAX
];
1293 const char *prefix
= "Linux version ";
1295 sprintf(version
, "%s/proc/version", root_dir
);
1296 file
= fopen(version
, "r");
1300 tmp
= fgets(version
, sizeof(version
), file
);
1305 name
= strstr(version
, prefix
);
1308 name
+= strlen(prefix
);
1309 tmp
= strchr(name
, ' ');
1313 return strdup(name
);
1316 static bool is_kmod_dso(struct dso
*dso
)
1318 return dso__symtab_type(dso
) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
||
1319 dso__symtab_type(dso
) == DSO_BINARY_TYPE__GUEST_KMODULE
;
1322 static int maps__set_module_path(struct maps
*maps
, const char *path
, struct kmod_path
*m
)
1326 struct map
*map
= maps__find_by_name(maps
, m
->name
);
1331 long_name
= strdup(path
);
1332 if (long_name
== NULL
) {
1337 dso
= map__dso(map
);
1338 dso__set_long_name(dso
, long_name
, true);
1339 dso__kernel_module_get_build_id(dso
, "");
1342 * Full name could reveal us kmod compression, so
1343 * we need to update the symtab_type if needed.
1345 if (m
->comp
&& is_kmod_dso(dso
)) {
1346 dso__set_symtab_type(dso
, dso__symtab_type(dso
)+1);
1347 dso__set_comp(dso
, m
->comp
);
1353 static int maps__set_modules_path_dir(struct maps
*maps
, const char *dir_name
, int depth
)
1355 struct dirent
*dent
;
1356 DIR *dir
= opendir(dir_name
);
1360 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
1364 while ((dent
= readdir(dir
)) != NULL
) {
1365 char path
[PATH_MAX
];
1368 /*sshfs might return bad dent->d_type, so we have to stat*/
1369 path__join(path
, sizeof(path
), dir_name
, dent
->d_name
);
1370 if (stat(path
, &st
))
1373 if (S_ISDIR(st
.st_mode
)) {
1374 if (!strcmp(dent
->d_name
, ".") ||
1375 !strcmp(dent
->d_name
, ".."))
1378 /* Do not follow top-level source and build symlinks */
1380 if (!strcmp(dent
->d_name
, "source") ||
1381 !strcmp(dent
->d_name
, "build"))
1385 ret
= maps__set_modules_path_dir(maps
, path
, depth
+ 1);
1391 ret
= kmod_path__parse_name(&m
, dent
->d_name
);
1396 ret
= maps__set_module_path(maps
, path
, &m
);
1410 static int machine__set_modules_path(struct machine
*machine
)
1413 char modules_path
[PATH_MAX
];
1415 version
= get_kernel_version(machine
->root_dir
);
1419 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
1420 machine
->root_dir
, version
);
1423 return maps__set_modules_path_dir(machine__kernel_maps(machine
), modules_path
, 0);
1425 int __weak
arch__fix_module_text_start(u64
*start __maybe_unused
,
1426 u64
*size __maybe_unused
,
1427 const char *name __maybe_unused
)
1432 static int machine__create_module(void *arg
, const char *name
, u64 start
,
1435 struct machine
*machine
= arg
;
1438 if (arch__fix_module_text_start(&start
, &size
, name
) < 0)
1441 map
= machine__addnew_module_map(machine
, start
, name
);
1444 map__set_end(map
, start
+ size
);
1446 dso__kernel_module_get_build_id(map__dso(map
), machine
->root_dir
);
1451 static int machine__create_modules(struct machine
*machine
)
1453 const char *modules
;
1454 char path
[PATH_MAX
];
1456 if (machine__is_default_guest(machine
)) {
1457 modules
= symbol_conf
.default_guest_modules
;
1459 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
1463 if (symbol__restricted_filename(modules
, "/proc/modules"))
1466 if (modules__parse(modules
, machine
, machine__create_module
))
1469 if (!machine__set_modules_path(machine
))
1472 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1477 static void machine__set_kernel_mmap(struct machine
*machine
,
1480 map__set_start(machine
->vmlinux_map
, start
);
1481 map__set_end(machine
->vmlinux_map
, end
);
1483 * Be a bit paranoid here, some perf.data file came with
1484 * a zero sized synthesized MMAP event for the kernel.
1486 if (start
== 0 && end
== 0)
1487 map__set_end(machine
->vmlinux_map
, ~0ULL);
1490 static int machine__update_kernel_mmap(struct machine
*machine
,
1493 struct map
*orig
, *updated
;
1496 orig
= machine
->vmlinux_map
;
1497 updated
= map__get(orig
);
1499 machine
->vmlinux_map
= updated
;
1500 maps__remove(machine__kernel_maps(machine
), orig
);
1501 machine__set_kernel_mmap(machine
, start
, end
);
1502 err
= maps__insert(machine__kernel_maps(machine
), updated
);
1508 int machine__create_kernel_maps(struct machine
*machine
)
1510 struct dso
*kernel
= machine__get_kernel(machine
);
1511 const char *name
= NULL
;
1512 u64 start
= 0, end
= ~0ULL;
1518 ret
= __machine__create_kernel_maps(machine
, kernel
);
1522 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
1523 if (machine__is_host(machine
))
1524 pr_debug("Problems creating module maps, "
1525 "continuing anyway...\n");
1527 pr_debug("Problems creating module maps for guest %d, "
1528 "continuing anyway...\n", machine
->pid
);
1531 if (!machine__get_running_kernel_start(machine
, &name
, &start
, &end
)) {
1533 map__set_kallsyms_ref_reloc_sym(machine
->vmlinux_map
, name
, start
)) {
1534 machine__destroy_kernel_maps(machine
);
1540 * we have a real start address now, so re-order the kmaps
1541 * assume it's the last in the kmaps
1543 ret
= machine__update_kernel_mmap(machine
, start
, end
);
1548 if (machine__create_extra_kernel_maps(machine
, kernel
))
1549 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1552 /* update end address of the kernel map using adjacent module address */
1553 struct map
*next
= maps__find_next_entry(machine__kernel_maps(machine
),
1554 machine__kernel_map(machine
));
1557 machine__set_kernel_mmap(machine
, start
, map__start(next
));
1567 static int machine__uses_kcore_cb(struct dso
*dso
, void *data __maybe_unused
)
1569 return dso__is_kcore(dso
) ? 1 : 0;
1572 static bool machine__uses_kcore(struct machine
*machine
)
1574 return dsos__for_each_dso(&machine
->dsos
, machine__uses_kcore_cb
, NULL
) != 0 ? true : false;
1577 static bool perf_event__is_extra_kernel_mmap(struct machine
*machine
,
1578 struct extra_kernel_map
*xm
)
1580 return machine__is(machine
, "x86_64") &&
1581 is_entry_trampoline(xm
->name
);
1584 static int machine__process_extra_kernel_map(struct machine
*machine
,
1585 struct extra_kernel_map
*xm
)
1587 struct dso
*kernel
= machine__kernel_dso(machine
);
1592 return machine__create_extra_kernel_map(machine
, kernel
, xm
);
1595 static int machine__process_kernel_mmap_event(struct machine
*machine
,
1596 struct extra_kernel_map
*xm
,
1597 struct build_id
*bid
)
1599 enum dso_space_type dso_space
;
1600 bool is_kernel_mmap
;
1601 const char *mmap_name
= machine
->mmap_name
;
1603 /* If we have maps from kcore then we do not need or want any others */
1604 if (machine__uses_kcore(machine
))
1607 if (machine__is_host(machine
))
1608 dso_space
= DSO_SPACE__KERNEL
;
1610 dso_space
= DSO_SPACE__KERNEL_GUEST
;
1612 is_kernel_mmap
= memcmp(xm
->name
, mmap_name
, strlen(mmap_name
) - 1) == 0;
1613 if (!is_kernel_mmap
&& !machine__is_host(machine
)) {
1615 * If the event was recorded inside the guest and injected into
1616 * the host perf.data file, then it will match a host mmap_name,
1617 * so try that - see machine__set_mmap_name().
1619 mmap_name
= "[kernel.kallsyms]";
1620 is_kernel_mmap
= memcmp(xm
->name
, mmap_name
, strlen(mmap_name
) - 1) == 0;
1622 if (xm
->name
[0] == '/' ||
1623 (!is_kernel_mmap
&& xm
->name
[0] == '[')) {
1624 struct map
*map
= machine__addnew_module_map(machine
, xm
->start
, xm
->name
);
1629 map__set_end(map
, map__start(map
) + xm
->end
- xm
->start
);
1631 if (build_id__is_defined(bid
))
1632 dso__set_build_id(map__dso(map
), bid
);
1635 } else if (is_kernel_mmap
) {
1636 const char *symbol_name
= xm
->name
+ strlen(mmap_name
);
1638 * Should be there already, from the build-id table in
1641 struct dso
*kernel
= dsos__find_kernel_dso(&machine
->dsos
);
1644 kernel
= machine__findnew_dso(machine
, machine
->mmap_name
);
1648 dso__set_kernel(kernel
, dso_space
);
1649 if (__machine__create_kernel_maps(machine
, kernel
) < 0) {
1654 if (strstr(dso__long_name(kernel
), "vmlinux"))
1655 dso__set_short_name(kernel
, "[kernel.vmlinux]", false);
1657 if (machine__update_kernel_mmap(machine
, xm
->start
, xm
->end
) < 0) {
1662 if (build_id__is_defined(bid
))
1663 dso__set_build_id(kernel
, bid
);
1666 * Avoid using a zero address (kptr_restrict) for the ref reloc
1667 * symbol. Effectively having zero here means that at record
1668 * time /proc/sys/kernel/kptr_restrict was non zero.
1670 if (xm
->pgoff
!= 0) {
1671 map__set_kallsyms_ref_reloc_sym(machine
->vmlinux_map
,
1676 if (machine__is_default_guest(machine
)) {
1678 * preload dso of guest kernel and modules
1680 dso__load(kernel
, machine__kernel_map(machine
));
1683 } else if (perf_event__is_extra_kernel_mmap(machine
, xm
)) {
1684 return machine__process_extra_kernel_map(machine
, xm
);
1691 int machine__process_mmap2_event(struct machine
*machine
,
1692 union perf_event
*event
,
1693 struct perf_sample
*sample
)
1695 struct thread
*thread
;
1697 struct dso_id dso_id
= {
1698 .maj
= event
->mmap2
.maj
,
1699 .min
= event
->mmap2
.min
,
1700 .ino
= event
->mmap2
.ino
,
1701 .ino_generation
= event
->mmap2
.ino_generation
,
1703 struct build_id __bid
, *bid
= NULL
;
1707 perf_event__fprintf_mmap2(event
, stdout
);
1709 if (event
->header
.misc
& PERF_RECORD_MISC_MMAP_BUILD_ID
) {
1711 build_id__init(bid
, event
->mmap2
.build_id
, event
->mmap2
.build_id_size
);
1714 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1715 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1716 struct extra_kernel_map xm
= {
1717 .start
= event
->mmap2
.start
,
1718 .end
= event
->mmap2
.start
+ event
->mmap2
.len
,
1719 .pgoff
= event
->mmap2
.pgoff
,
1722 strlcpy(xm
.name
, event
->mmap2
.filename
, KMAP_NAME_LEN
);
1723 ret
= machine__process_kernel_mmap_event(machine
, &xm
, bid
);
1729 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1734 map
= map__new(machine
, event
->mmap2
.start
,
1735 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1736 &dso_id
, event
->mmap2
.prot
,
1737 event
->mmap2
.flags
, bid
,
1738 event
->mmap2
.filename
, thread
);
1741 goto out_problem_map
;
1743 ret
= thread__insert_map(thread
, map
);
1745 goto out_problem_insert
;
1747 thread__put(thread
);
1754 thread__put(thread
);
1756 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1760 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1761 struct perf_sample
*sample
)
1763 struct thread
*thread
;
1769 perf_event__fprintf_mmap(event
, stdout
);
1771 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1772 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1773 struct extra_kernel_map xm
= {
1774 .start
= event
->mmap
.start
,
1775 .end
= event
->mmap
.start
+ event
->mmap
.len
,
1776 .pgoff
= event
->mmap
.pgoff
,
1779 strlcpy(xm
.name
, event
->mmap
.filename
, KMAP_NAME_LEN
);
1780 ret
= machine__process_kernel_mmap_event(machine
, &xm
, NULL
);
1786 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1791 if (!(event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
))
1794 map
= map__new(machine
, event
->mmap
.start
,
1795 event
->mmap
.len
, event
->mmap
.pgoff
,
1796 NULL
, prot
, 0, NULL
, event
->mmap
.filename
, thread
);
1799 goto out_problem_map
;
1801 ret
= thread__insert_map(thread
, map
);
1803 goto out_problem_insert
;
1805 thread__put(thread
);
1812 thread__put(thread
);
1814 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1818 void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1820 return threads__remove(&machine
->threads
, th
);
1823 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1824 struct perf_sample
*sample
)
1826 struct thread
*thread
= machine__find_thread(machine
,
1829 struct thread
*parent
= machine__findnew_thread(machine
,
1832 bool do_maps_clone
= true;
1836 perf_event__fprintf_task(event
, stdout
);
1839 * There may be an existing thread that is not actually the parent,
1840 * either because we are processing events out of order, or because the
1841 * (fork) event that would have removed the thread was lost. Assume the
1842 * latter case and continue on as best we can.
1844 if (thread__pid(parent
) != (pid_t
)event
->fork
.ppid
) {
1845 dump_printf("removing erroneous parent thread %d/%d\n",
1846 thread__pid(parent
), thread__tid(parent
));
1847 machine__remove_thread(machine
, parent
);
1848 thread__put(parent
);
1849 parent
= machine__findnew_thread(machine
, event
->fork
.ppid
,
1853 /* if a thread currently exists for the thread id remove it */
1854 if (thread
!= NULL
) {
1855 machine__remove_thread(machine
, thread
);
1856 thread__put(thread
);
1859 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1862 * When synthesizing FORK events, we are trying to create thread
1863 * objects for the already running tasks on the machine.
1865 * Normally, for a kernel FORK event, we want to clone the parent's
1866 * maps because that is what the kernel just did.
1868 * But when synthesizing, this should not be done. If we do, we end up
1869 * with overlapping maps as we process the synthesized MMAP2 events that
1870 * get delivered shortly thereafter.
1872 * Use the FORK event misc flags in an internal way to signal this
1873 * situation, so we can elide the map clone when appropriate.
1875 if (event
->fork
.header
.misc
& PERF_RECORD_MISC_FORK_EXEC
)
1876 do_maps_clone
= false;
1878 if (thread
== NULL
|| parent
== NULL
||
1879 thread__fork(thread
, parent
, sample
->time
, do_maps_clone
) < 0) {
1880 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1883 thread__put(thread
);
1884 thread__put(parent
);
1889 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1890 struct perf_sample
*sample __maybe_unused
)
1892 struct thread
*thread
= machine__find_thread(machine
,
1897 perf_event__fprintf_task(event
, stdout
);
1899 if (thread
!= NULL
) {
1900 if (symbol_conf
.keep_exited_threads
)
1901 thread__set_exited(thread
, /*exited=*/true);
1903 machine__remove_thread(machine
, thread
);
1905 thread__put(thread
);
1909 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1910 struct perf_sample
*sample
)
1914 switch (event
->header
.type
) {
1915 case PERF_RECORD_COMM
:
1916 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1917 case PERF_RECORD_MMAP
:
1918 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1919 case PERF_RECORD_NAMESPACES
:
1920 ret
= machine__process_namespaces_event(machine
, event
, sample
); break;
1921 case PERF_RECORD_CGROUP
:
1922 ret
= machine__process_cgroup_event(machine
, event
, sample
); break;
1923 case PERF_RECORD_MMAP2
:
1924 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1925 case PERF_RECORD_FORK
:
1926 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1927 case PERF_RECORD_EXIT
:
1928 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1929 case PERF_RECORD_LOST
:
1930 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1931 case PERF_RECORD_AUX
:
1932 ret
= machine__process_aux_event(machine
, event
); break;
1933 case PERF_RECORD_ITRACE_START
:
1934 ret
= machine__process_itrace_start_event(machine
, event
); break;
1935 case PERF_RECORD_LOST_SAMPLES
:
1936 ret
= machine__process_lost_samples_event(machine
, event
, sample
); break;
1937 case PERF_RECORD_SWITCH
:
1938 case PERF_RECORD_SWITCH_CPU_WIDE
:
1939 ret
= machine__process_switch_event(machine
, event
); break;
1940 case PERF_RECORD_KSYMBOL
:
1941 ret
= machine__process_ksymbol(machine
, event
, sample
); break;
1942 case PERF_RECORD_BPF_EVENT
:
1943 ret
= machine__process_bpf(machine
, event
, sample
); break;
1944 case PERF_RECORD_TEXT_POKE
:
1945 ret
= machine__process_text_poke(machine
, event
, sample
); break;
1946 case PERF_RECORD_AUX_OUTPUT_HW_ID
:
1947 ret
= machine__process_aux_output_hw_id_event(machine
, event
); break;
1956 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1958 return regexec(regex
, sym
->name
, 0, NULL
, 0) == 0;
1961 static void ip__resolve_ams(struct thread
*thread
,
1962 struct addr_map_symbol
*ams
,
1965 struct addr_location al
;
1967 addr_location__init(&al
);
1969 * We cannot use the header.misc hint to determine whether a
1970 * branch stack address is user, kernel, guest, hypervisor.
1971 * Branches may straddle the kernel/user/hypervisor boundaries.
1972 * Thus, we have to try consecutively until we find a match
1973 * or else, the symbol is unknown
1975 thread__find_cpumode_addr_location(thread
, ip
, &al
);
1978 ams
->al_addr
= al
.addr
;
1979 ams
->al_level
= al
.level
;
1980 ams
->ms
.maps
= maps__get(al
.maps
);
1981 ams
->ms
.sym
= al
.sym
;
1982 ams
->ms
.map
= map__get(al
.map
);
1984 ams
->data_page_size
= 0;
1985 addr_location__exit(&al
);
1988 static void ip__resolve_data(struct thread
*thread
,
1989 u8 m
, struct addr_map_symbol
*ams
,
1990 u64 addr
, u64 phys_addr
, u64 daddr_page_size
)
1992 struct addr_location al
;
1994 addr_location__init(&al
);
1996 thread__find_symbol(thread
, m
, addr
, &al
);
1999 ams
->al_addr
= al
.addr
;
2000 ams
->al_level
= al
.level
;
2001 ams
->ms
.maps
= maps__get(al
.maps
);
2002 ams
->ms
.sym
= al
.sym
;
2003 ams
->ms
.map
= map__get(al
.map
);
2004 ams
->phys_addr
= phys_addr
;
2005 ams
->data_page_size
= daddr_page_size
;
2006 addr_location__exit(&al
);
2009 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
2010 struct addr_location
*al
)
2012 struct mem_info
*mi
= mem_info__new();
2017 ip__resolve_ams(al
->thread
, mem_info__iaddr(mi
), sample
->ip
);
2018 ip__resolve_data(al
->thread
, al
->cpumode
, mem_info__daddr(mi
),
2019 sample
->addr
, sample
->phys_addr
,
2020 sample
->data_page_size
);
2021 mem_info__data_src(mi
)->val
= sample
->data_src
;
2026 static char *callchain_srcline(struct map_symbol
*ms
, u64 ip
)
2028 struct map
*map
= ms
->map
;
2029 char *srcline
= NULL
;
2032 if (!map
|| callchain_param
.key
== CCKEY_FUNCTION
)
2035 dso
= map__dso(map
);
2036 srcline
= srcline__tree_find(dso__srclines(dso
), ip
);
2038 bool show_sym
= false;
2039 bool show_addr
= callchain_param
.key
== CCKEY_ADDRESS
;
2041 srcline
= get_srcline(dso
, map__rip_2objdump(map
, ip
),
2042 ms
->sym
, show_sym
, show_addr
, ip
);
2043 srcline__tree_insert(dso__srclines(dso
), ip
, srcline
);
2054 static int add_callchain_ip(struct thread
*thread
,
2055 struct callchain_cursor
*cursor
,
2056 struct symbol
**parent
,
2057 struct addr_location
*root_al
,
2061 struct branch_flags
*flags
,
2062 struct iterations
*iter
,
2066 struct map_symbol ms
= {};
2067 struct addr_location al
;
2068 int nr_loop_iter
= 0, err
= 0;
2069 u64 iter_cycles
= 0;
2070 const char *srcline
= NULL
;
2072 addr_location__init(&al
);
2077 thread__find_cpumode_addr_location(thread
, ip
, &al
);
2079 if (ip
>= PERF_CONTEXT_MAX
) {
2081 case PERF_CONTEXT_HV
:
2082 *cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
2084 case PERF_CONTEXT_KERNEL
:
2085 *cpumode
= PERF_RECORD_MISC_KERNEL
;
2087 case PERF_CONTEXT_USER
:
2088 *cpumode
= PERF_RECORD_MISC_USER
;
2091 pr_debug("invalid callchain context: "
2092 "%"PRId64
"\n", (s64
) ip
);
2094 * It seems the callchain is corrupted.
2097 callchain_cursor_reset(cursor
);
2104 thread__find_symbol(thread
, *cpumode
, ip
, &al
);
2107 if (al
.sym
!= NULL
) {
2108 if (perf_hpp_list
.parent
&& !*parent
&&
2109 symbol__match_regex(al
.sym
, &parent_regex
))
2111 else if (have_ignore_callees
&& root_al
&&
2112 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
2113 /* Treat this symbol as the root,
2114 forgetting its callees. */
2115 addr_location__copy(root_al
, &al
);
2116 callchain_cursor_reset(cursor
);
2120 if (symbol_conf
.hide_unresolved
&& al
.sym
== NULL
)
2124 nr_loop_iter
= iter
->nr_loop_iter
;
2125 iter_cycles
= iter
->cycles
;
2128 ms
.maps
= maps__get(al
.maps
);
2129 ms
.map
= map__get(al
.map
);
2131 srcline
= callchain_srcline(&ms
, al
.addr
);
2132 err
= callchain_cursor_append(cursor
, ip
, &ms
,
2133 branch
, flags
, nr_loop_iter
,
2134 iter_cycles
, branch_from
, srcline
);
2136 addr_location__exit(&al
);
2137 map_symbol__exit(&ms
);
2141 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
2142 struct addr_location
*al
)
2145 const struct branch_stack
*bs
= sample
->branch_stack
;
2146 struct branch_entry
*entries
= perf_sample__branch_entries(sample
);
2147 u64
*branch_stack_cntr
= sample
->branch_stack_cntr
;
2148 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
2153 for (i
= 0; i
< bs
->nr
; i
++) {
2154 ip__resolve_ams(al
->thread
, &bi
[i
].to
, entries
[i
].to
);
2155 ip__resolve_ams(al
->thread
, &bi
[i
].from
, entries
[i
].from
);
2156 bi
[i
].flags
= entries
[i
].flags
;
2157 if (branch_stack_cntr
)
2158 bi
[i
].branch_stack_cntr
= branch_stack_cntr
[i
];
2163 static void save_iterations(struct iterations
*iter
,
2164 struct branch_entry
*be
, int nr
)
2168 iter
->nr_loop_iter
++;
2171 for (i
= 0; i
< nr
; i
++)
2172 iter
->cycles
+= be
[i
].flags
.cycles
;
2177 #define NO_ENTRY 0xff
2179 #define PERF_MAX_BRANCH_DEPTH 127
2182 static int remove_loops(struct branch_entry
*l
, int nr
,
2183 struct iterations
*iter
)
2186 unsigned char chash
[CHASHSZ
];
2188 memset(chash
, NO_ENTRY
, sizeof(chash
));
2190 BUG_ON(PERF_MAX_BRANCH_DEPTH
> 255);
2192 for (i
= 0; i
< nr
; i
++) {
2193 int h
= hash_64(l
[i
].from
, CHASHBITS
) % CHASHSZ
;
2195 /* no collision handling for now */
2196 if (chash
[h
] == NO_ENTRY
) {
2198 } else if (l
[chash
[h
]].from
== l
[i
].from
) {
2199 bool is_loop
= true;
2200 /* check if it is a real loop */
2202 for (j
= chash
[h
]; j
< i
&& i
+ off
< nr
; j
++, off
++)
2203 if (l
[j
].from
!= l
[i
+ off
].from
) {
2210 save_iterations(iter
+ i
+ off
,
2213 memmove(iter
+ i
, iter
+ i
+ off
,
2216 memmove(l
+ i
, l
+ i
+ off
,
2227 static int lbr_callchain_add_kernel_ip(struct thread
*thread
,
2228 struct callchain_cursor
*cursor
,
2229 struct perf_sample
*sample
,
2230 struct symbol
**parent
,
2231 struct addr_location
*root_al
,
2233 bool callee
, int end
,
2236 struct ip_callchain
*chain
= sample
->callchain
;
2237 u8 cpumode
= PERF_RECORD_MISC_USER
;
2241 for (i
= 0; i
< end
+ 1; i
++) {
2242 err
= add_callchain_ip(thread
, cursor
, parent
,
2243 root_al
, &cpumode
, chain
->ips
[i
],
2244 false, NULL
, NULL
, branch_from
,
2252 for (i
= end
; i
>= 0; i
--) {
2253 err
= add_callchain_ip(thread
, cursor
, parent
,
2254 root_al
, &cpumode
, chain
->ips
[i
],
2255 false, NULL
, NULL
, branch_from
,
2264 static void save_lbr_cursor_node(struct thread
*thread
,
2265 struct callchain_cursor
*cursor
,
2268 struct lbr_stitch
*lbr_stitch
= thread__lbr_stitch(thread
);
2273 if (cursor
->pos
== cursor
->nr
) {
2274 lbr_stitch
->prev_lbr_cursor
[idx
].valid
= false;
2279 cursor
->curr
= cursor
->first
;
2281 cursor
->curr
= cursor
->curr
->next
;
2283 map_symbol__exit(&lbr_stitch
->prev_lbr_cursor
[idx
].ms
);
2284 memcpy(&lbr_stitch
->prev_lbr_cursor
[idx
], cursor
->curr
,
2285 sizeof(struct callchain_cursor_node
));
2286 lbr_stitch
->prev_lbr_cursor
[idx
].ms
.maps
= maps__get(cursor
->curr
->ms
.maps
);
2287 lbr_stitch
->prev_lbr_cursor
[idx
].ms
.map
= map__get(cursor
->curr
->ms
.map
);
2289 lbr_stitch
->prev_lbr_cursor
[idx
].valid
= true;
2293 static int lbr_callchain_add_lbr_ip(struct thread
*thread
,
2294 struct callchain_cursor
*cursor
,
2295 struct perf_sample
*sample
,
2296 struct symbol
**parent
,
2297 struct addr_location
*root_al
,
2302 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
2303 struct branch_entry
*entries
= perf_sample__branch_entries(sample
);
2304 u8 cpumode
= PERF_RECORD_MISC_USER
;
2305 int lbr_nr
= lbr_stack
->nr
;
2306 struct branch_flags
*flags
;
2311 * The curr and pos are not used in writing session. They are cleared
2312 * in callchain_cursor_commit() when the writing session is closed.
2313 * Using curr and pos to track the current cursor node.
2315 if (thread__lbr_stitch(thread
)) {
2316 cursor
->curr
= NULL
;
2317 cursor
->pos
= cursor
->nr
;
2319 cursor
->curr
= cursor
->first
;
2320 for (i
= 0; i
< (int)(cursor
->nr
- 1); i
++)
2321 cursor
->curr
= cursor
->curr
->next
;
2326 /* Add LBR ip from first entries.to */
2328 flags
= &entries
[0].flags
;
2329 *branch_from
= entries
[0].from
;
2330 err
= add_callchain_ip(thread
, cursor
, parent
,
2331 root_al
, &cpumode
, ip
,
2333 *branch_from
, symbols
);
2338 * The number of cursor node increases.
2339 * Move the current cursor node.
2340 * But does not need to save current cursor node for entry 0.
2341 * It's impossible to stitch the whole LBRs of previous sample.
2343 if (thread__lbr_stitch(thread
) && (cursor
->pos
!= cursor
->nr
)) {
2345 cursor
->curr
= cursor
->first
;
2347 cursor
->curr
= cursor
->curr
->next
;
2351 /* Add LBR ip from entries.from one by one. */
2352 for (i
= 0; i
< lbr_nr
; i
++) {
2353 ip
= entries
[i
].from
;
2354 flags
= &entries
[i
].flags
;
2355 err
= add_callchain_ip(thread
, cursor
, parent
,
2356 root_al
, &cpumode
, ip
,
2358 *branch_from
, symbols
);
2361 save_lbr_cursor_node(thread
, cursor
, i
);
2366 /* Add LBR ip from entries.from one by one. */
2367 for (i
= lbr_nr
- 1; i
>= 0; i
--) {
2368 ip
= entries
[i
].from
;
2369 flags
= &entries
[i
].flags
;
2370 err
= add_callchain_ip(thread
, cursor
, parent
,
2371 root_al
, &cpumode
, ip
,
2373 *branch_from
, symbols
);
2376 save_lbr_cursor_node(thread
, cursor
, i
);
2380 /* Add LBR ip from first entries.to */
2382 flags
= &entries
[0].flags
;
2383 *branch_from
= entries
[0].from
;
2384 err
= add_callchain_ip(thread
, cursor
, parent
,
2385 root_al
, &cpumode
, ip
,
2387 *branch_from
, symbols
);
2395 static int lbr_callchain_add_stitched_lbr_ip(struct thread
*thread
,
2396 struct callchain_cursor
*cursor
)
2398 struct lbr_stitch
*lbr_stitch
= thread__lbr_stitch(thread
);
2399 struct callchain_cursor_node
*cnode
;
2400 struct stitch_list
*stitch_node
;
2403 list_for_each_entry(stitch_node
, &lbr_stitch
->lists
, node
) {
2404 cnode
= &stitch_node
->cursor
;
2406 err
= callchain_cursor_append(cursor
, cnode
->ip
,
2409 &cnode
->branch_flags
,
2410 cnode
->nr_loop_iter
,
2420 static struct stitch_list
*get_stitch_node(struct thread
*thread
)
2422 struct lbr_stitch
*lbr_stitch
= thread__lbr_stitch(thread
);
2423 struct stitch_list
*stitch_node
;
2425 if (!list_empty(&lbr_stitch
->free_lists
)) {
2426 stitch_node
= list_first_entry(&lbr_stitch
->free_lists
,
2427 struct stitch_list
, node
);
2428 list_del(&stitch_node
->node
);
2433 return malloc(sizeof(struct stitch_list
));
2436 static bool has_stitched_lbr(struct thread
*thread
,
2437 struct perf_sample
*cur
,
2438 struct perf_sample
*prev
,
2439 unsigned int max_lbr
,
2442 struct branch_stack
*cur_stack
= cur
->branch_stack
;
2443 struct branch_entry
*cur_entries
= perf_sample__branch_entries(cur
);
2444 struct branch_stack
*prev_stack
= prev
->branch_stack
;
2445 struct branch_entry
*prev_entries
= perf_sample__branch_entries(prev
);
2446 struct lbr_stitch
*lbr_stitch
= thread__lbr_stitch(thread
);
2447 int i
, j
, nr_identical_branches
= 0;
2448 struct stitch_list
*stitch_node
;
2449 u64 cur_base
, distance
;
2451 if (!cur_stack
|| !prev_stack
)
2454 /* Find the physical index of the base-of-stack for current sample. */
2455 cur_base
= max_lbr
- cur_stack
->nr
+ cur_stack
->hw_idx
+ 1;
2457 distance
= (prev_stack
->hw_idx
> cur_base
) ? (prev_stack
->hw_idx
- cur_base
) :
2458 (max_lbr
+ prev_stack
->hw_idx
- cur_base
);
2459 /* Previous sample has shorter stack. Nothing can be stitched. */
2460 if (distance
+ 1 > prev_stack
->nr
)
2464 * Check if there are identical LBRs between two samples.
2465 * Identical LBRs must have same from, to and flags values. Also,
2466 * they have to be saved in the same LBR registers (same physical
2469 * Starts from the base-of-stack of current sample.
2471 for (i
= distance
, j
= cur_stack
->nr
- 1; (i
>= 0) && (j
>= 0); i
--, j
--) {
2472 if ((prev_entries
[i
].from
!= cur_entries
[j
].from
) ||
2473 (prev_entries
[i
].to
!= cur_entries
[j
].to
) ||
2474 (prev_entries
[i
].flags
.value
!= cur_entries
[j
].flags
.value
))
2476 nr_identical_branches
++;
2479 if (!nr_identical_branches
)
2483 * Save the LBRs between the base-of-stack of previous sample
2484 * and the base-of-stack of current sample into lbr_stitch->lists.
2485 * These LBRs will be stitched later.
2487 for (i
= prev_stack
->nr
- 1; i
> (int)distance
; i
--) {
2489 if (!lbr_stitch
->prev_lbr_cursor
[i
].valid
)
2492 stitch_node
= get_stitch_node(thread
);
2496 memcpy(&stitch_node
->cursor
, &lbr_stitch
->prev_lbr_cursor
[i
],
2497 sizeof(struct callchain_cursor_node
));
2499 stitch_node
->cursor
.ms
.maps
= maps__get(lbr_stitch
->prev_lbr_cursor
[i
].ms
.maps
);
2500 stitch_node
->cursor
.ms
.map
= map__get(lbr_stitch
->prev_lbr_cursor
[i
].ms
.map
);
2503 list_add(&stitch_node
->node
, &lbr_stitch
->lists
);
2505 list_add_tail(&stitch_node
->node
, &lbr_stitch
->lists
);
2511 static bool alloc_lbr_stitch(struct thread
*thread
, unsigned int max_lbr
)
2513 if (thread__lbr_stitch(thread
))
2516 thread__set_lbr_stitch(thread
, zalloc(sizeof(struct lbr_stitch
)));
2517 if (!thread__lbr_stitch(thread
))
2520 thread__lbr_stitch(thread
)->prev_lbr_cursor
=
2521 calloc(max_lbr
+ 1, sizeof(struct callchain_cursor_node
));
2522 if (!thread__lbr_stitch(thread
)->prev_lbr_cursor
)
2523 goto free_lbr_stitch
;
2525 thread__lbr_stitch(thread
)->prev_lbr_cursor_size
= max_lbr
+ 1;
2527 INIT_LIST_HEAD(&thread__lbr_stitch(thread
)->lists
);
2528 INIT_LIST_HEAD(&thread__lbr_stitch(thread
)->free_lists
);
2533 free(thread__lbr_stitch(thread
));
2534 thread__set_lbr_stitch(thread
, NULL
);
2536 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2537 thread__set_lbr_stitch_enable(thread
, false);
2542 * Resolve LBR callstack chain sample
2544 * 1 on success get LBR callchain information
2545 * 0 no available LBR callchain information, should try fp
2546 * negative error code on other errors.
2548 static int resolve_lbr_callchain_sample(struct thread
*thread
,
2549 struct callchain_cursor
*cursor
,
2550 struct perf_sample
*sample
,
2551 struct symbol
**parent
,
2552 struct addr_location
*root_al
,
2554 unsigned int max_lbr
,
2557 bool callee
= (callchain_param
.order
== ORDER_CALLEE
);
2558 struct ip_callchain
*chain
= sample
->callchain
;
2559 int chain_nr
= min(max_stack
, (int)chain
->nr
), i
;
2560 struct lbr_stitch
*lbr_stitch
;
2561 bool stitched_lbr
= false;
2562 u64 branch_from
= 0;
2565 for (i
= 0; i
< chain_nr
; i
++) {
2566 if (chain
->ips
[i
] == PERF_CONTEXT_USER
)
2570 /* LBR only affects the user callchain */
2574 if (thread__lbr_stitch_enable(thread
) && !sample
->no_hw_idx
&&
2575 (max_lbr
> 0) && alloc_lbr_stitch(thread
, max_lbr
)) {
2576 lbr_stitch
= thread__lbr_stitch(thread
);
2578 stitched_lbr
= has_stitched_lbr(thread
, sample
,
2579 &lbr_stitch
->prev_sample
,
2582 if (!stitched_lbr
&& !list_empty(&lbr_stitch
->lists
)) {
2583 struct stitch_list
*stitch_node
;
2585 list_for_each_entry(stitch_node
, &lbr_stitch
->lists
, node
)
2586 map_symbol__exit(&stitch_node
->cursor
.ms
);
2588 list_splice_init(&lbr_stitch
->lists
, &lbr_stitch
->free_lists
);
2590 memcpy(&lbr_stitch
->prev_sample
, sample
, sizeof(*sample
));
2595 err
= lbr_callchain_add_kernel_ip(thread
, cursor
, sample
,
2596 parent
, root_al
, branch_from
,
2601 err
= lbr_callchain_add_lbr_ip(thread
, cursor
, sample
, parent
,
2602 root_al
, &branch_from
, true, symbols
);
2607 err
= lbr_callchain_add_stitched_lbr_ip(thread
, cursor
);
2614 err
= lbr_callchain_add_stitched_lbr_ip(thread
, cursor
);
2618 err
= lbr_callchain_add_lbr_ip(thread
, cursor
, sample
, parent
,
2619 root_al
, &branch_from
, false, symbols
);
2624 err
= lbr_callchain_add_kernel_ip(thread
, cursor
, sample
,
2625 parent
, root_al
, branch_from
,
2633 return (err
< 0) ? err
: 0;
2636 static int find_prev_cpumode(struct ip_callchain
*chain
, struct thread
*thread
,
2637 struct callchain_cursor
*cursor
,
2638 struct symbol
**parent
,
2639 struct addr_location
*root_al
,
2640 u8
*cpumode
, int ent
, bool symbols
)
2644 while (--ent
>= 0) {
2645 u64 ip
= chain
->ips
[ent
];
2647 if (ip
>= PERF_CONTEXT_MAX
) {
2648 err
= add_callchain_ip(thread
, cursor
, parent
,
2649 root_al
, cpumode
, ip
,
2650 false, NULL
, NULL
, 0, symbols
);
2657 static u64
get_leaf_frame_caller(struct perf_sample
*sample
,
2658 struct thread
*thread
, int usr_idx
)
2660 if (machine__normalized_is(maps__machine(thread__maps(thread
)), "arm64"))
2661 return get_leaf_frame_caller_aarch64(sample
, thread
, usr_idx
);
2666 static int thread__resolve_callchain_sample(struct thread
*thread
,
2667 struct callchain_cursor
*cursor
,
2668 struct evsel
*evsel
,
2669 struct perf_sample
*sample
,
2670 struct symbol
**parent
,
2671 struct addr_location
*root_al
,
2675 struct branch_stack
*branch
= sample
->branch_stack
;
2676 struct branch_entry
*entries
= perf_sample__branch_entries(sample
);
2677 struct ip_callchain
*chain
= sample
->callchain
;
2679 u8 cpumode
= PERF_RECORD_MISC_USER
;
2680 int i
, j
, err
, nr_entries
, usr_idx
;
2683 u64 leaf_frame_caller
;
2686 chain_nr
= chain
->nr
;
2688 if (evsel__has_branch_callstack(evsel
)) {
2689 struct perf_env
*env
= evsel__env(evsel
);
2691 err
= resolve_lbr_callchain_sample(thread
, cursor
, sample
, parent
,
2693 !env
? 0 : env
->max_branches
,
2696 return (err
< 0) ? err
: 0;
2700 * Based on DWARF debug information, some architectures skip
2701 * a callchain entry saved by the kernel.
2703 skip_idx
= arch_skip_callchain_idx(thread
, chain
);
2706 * Add branches to call stack for easier browsing. This gives
2707 * more context for a sample than just the callers.
2709 * This uses individual histograms of paths compared to the
2710 * aggregated histograms the normal LBR mode uses.
2712 * Limitations for now:
2713 * - No extra filters
2714 * - No annotations (should annotate somehow)
2717 if (branch
&& callchain_param
.branch_callstack
) {
2718 int nr
= min(max_stack
, (int)branch
->nr
);
2719 struct branch_entry be
[nr
];
2720 struct iterations iter
[nr
];
2722 if (branch
->nr
> PERF_MAX_BRANCH_DEPTH
) {
2723 pr_warning("corrupted branch chain. skipping...\n");
2727 for (i
= 0; i
< nr
; i
++) {
2728 if (callchain_param
.order
== ORDER_CALLEE
) {
2735 * Check for overlap into the callchain.
2736 * The return address is one off compared to
2737 * the branch entry. To adjust for this
2738 * assume the calling instruction is not longer
2741 if (i
== skip_idx
||
2742 chain
->ips
[first_call
] >= PERF_CONTEXT_MAX
)
2744 else if (be
[i
].from
< chain
->ips
[first_call
] &&
2745 be
[i
].from
>= chain
->ips
[first_call
] - 8)
2748 be
[i
] = entries
[branch
->nr
- i
- 1];
2751 memset(iter
, 0, sizeof(struct iterations
) * nr
);
2752 nr
= remove_loops(be
, nr
, iter
);
2754 for (i
= 0; i
< nr
; i
++) {
2755 err
= add_callchain_ip(thread
, cursor
, parent
,
2759 NULL
, be
[i
].from
, symbols
);
2762 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
,
2765 &iter
[i
], 0, symbols
);
2780 if (chain
&& callchain_param
.order
!= ORDER_CALLEE
) {
2781 err
= find_prev_cpumode(chain
, thread
, cursor
, parent
, root_al
,
2782 &cpumode
, chain
->nr
- first_call
, symbols
);
2784 return (err
< 0) ? err
: 0;
2786 for (i
= first_call
, nr_entries
= 0;
2787 i
< chain_nr
&& nr_entries
< max_stack
; i
++) {
2790 if (callchain_param
.order
== ORDER_CALLEE
)
2793 j
= chain
->nr
- i
- 1;
2795 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2800 if (ip
< PERF_CONTEXT_MAX
)
2802 else if (callchain_param
.order
!= ORDER_CALLEE
) {
2803 err
= find_prev_cpumode(chain
, thread
, cursor
, parent
,
2804 root_al
, &cpumode
, j
, symbols
);
2806 return (err
< 0) ? err
: 0;
2811 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
2812 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
2813 * the index will be different in order to add the missing frame
2814 * at the right place.
2817 usr_idx
= callchain_param
.order
== ORDER_CALLEE
? j
-2 : j
-1;
2819 if (usr_idx
>= 0 && chain
->ips
[usr_idx
] == PERF_CONTEXT_USER
) {
2821 leaf_frame_caller
= get_leaf_frame_caller(sample
, thread
, usr_idx
);
2824 * check if leaf_frame_Caller != ip to not add the same
2828 if (leaf_frame_caller
&& leaf_frame_caller
!= ip
) {
2830 err
= add_callchain_ip(thread
, cursor
, parent
,
2831 root_al
, &cpumode
, leaf_frame_caller
,
2832 false, NULL
, NULL
, 0, symbols
);
2834 return (err
< 0) ? err
: 0;
2838 err
= add_callchain_ip(thread
, cursor
, parent
,
2839 root_al
, &cpumode
, ip
,
2840 false, NULL
, NULL
, 0, symbols
);
2843 return (err
< 0) ? err
: 0;
2849 static int append_inlines(struct callchain_cursor
*cursor
, struct map_symbol
*ms
, u64 ip
)
2851 struct symbol
*sym
= ms
->sym
;
2852 struct map
*map
= ms
->map
;
2853 struct inline_node
*inline_node
;
2854 struct inline_list
*ilist
;
2858 struct map_symbol ilist_ms
;
2860 if (!symbol_conf
.inline_name
|| !map
|| !sym
)
2863 addr
= map__dso_map_ip(map
, ip
);
2864 addr
= map__rip_2objdump(map
, addr
);
2865 dso
= map__dso(map
);
2867 inline_node
= inlines__tree_find(dso__inlined_nodes(dso
), addr
);
2869 inline_node
= dso__parse_addr_inlines(dso
, addr
, sym
);
2872 inlines__tree_insert(dso__inlined_nodes(dso
), inline_node
);
2875 ilist_ms
= (struct map_symbol
) {
2876 .maps
= maps__get(ms
->maps
),
2877 .map
= map__get(map
),
2879 list_for_each_entry(ilist
, &inline_node
->val
, list
) {
2880 ilist_ms
.sym
= ilist
->symbol
;
2881 ret
= callchain_cursor_append(cursor
, ip
, &ilist_ms
, false,
2882 NULL
, 0, 0, 0, ilist
->srcline
);
2887 map_symbol__exit(&ilist_ms
);
2892 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
2894 struct callchain_cursor
*cursor
= arg
;
2895 const char *srcline
= NULL
;
2896 u64 addr
= entry
->ip
;
2898 if (symbol_conf
.hide_unresolved
&& entry
->ms
.sym
== NULL
)
2901 if (append_inlines(cursor
, &entry
->ms
, entry
->ip
) == 0)
2905 * Convert entry->ip from a virtual address to an offset in
2906 * its corresponding binary.
2909 addr
= map__dso_map_ip(entry
->ms
.map
, entry
->ip
);
2911 srcline
= callchain_srcline(&entry
->ms
, addr
);
2912 return callchain_cursor_append(cursor
, entry
->ip
, &entry
->ms
,
2913 false, NULL
, 0, 0, 0, srcline
);
2916 static int thread__resolve_callchain_unwind(struct thread
*thread
,
2917 struct callchain_cursor
*cursor
,
2918 struct evsel
*evsel
,
2919 struct perf_sample
*sample
,
2920 int max_stack
, bool symbols
)
2922 /* Can we do dwarf post unwind? */
2923 if (!((evsel
->core
.attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
2924 (evsel
->core
.attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
2927 /* Bail out if nothing was captured. */
2928 if ((!sample
->user_regs
.regs
) ||
2929 (!sample
->user_stack
.size
))
2933 pr_debug("Not resolving symbols with an unwinder isn't currently supported\n");
2935 return unwind__get_entries(unwind_entry
, cursor
,
2936 thread
, sample
, max_stack
, false);
2939 int __thread__resolve_callchain(struct thread
*thread
,
2940 struct callchain_cursor
*cursor
,
2941 struct evsel
*evsel
,
2942 struct perf_sample
*sample
,
2943 struct symbol
**parent
,
2944 struct addr_location
*root_al
,
2953 callchain_cursor_reset(cursor
);
2955 if (callchain_param
.order
== ORDER_CALLEE
) {
2956 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2959 max_stack
, symbols
);
2962 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2964 max_stack
, symbols
);
2966 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2968 max_stack
, symbols
);
2971 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2974 max_stack
, symbols
);
2980 int machine__for_each_thread(struct machine
*machine
,
2981 int (*fn
)(struct thread
*thread
, void *p
),
2984 return threads__for_each_thread(&machine
->threads
, fn
, priv
);
2987 int machines__for_each_thread(struct machines
*machines
,
2988 int (*fn
)(struct thread
*thread
, void *p
),
2994 rc
= machine__for_each_thread(&machines
->host
, fn
, priv
);
2998 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
2999 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
3001 rc
= machine__for_each_thread(machine
, fn
, priv
);
3009 static int thread_list_cb(struct thread
*thread
, void *data
)
3011 struct list_head
*list
= data
;
3012 struct thread_list
*entry
= malloc(sizeof(*entry
));
3017 entry
->thread
= thread__get(thread
);
3018 list_add_tail(&entry
->list
, list
);
3022 int machine__thread_list(struct machine
*machine
, struct list_head
*list
)
3024 return machine__for_each_thread(machine
, thread_list_cb
, list
);
3027 void thread_list__delete(struct list_head
*list
)
3029 struct thread_list
*pos
, *next
;
3031 list_for_each_entry_safe(pos
, next
, list
, list
) {
3032 thread__zput(pos
->thread
);
3033 list_del(&pos
->list
);
3038 pid_t
machine__get_current_tid(struct machine
*machine
, int cpu
)
3040 if (cpu
< 0 || (size_t)cpu
>= machine
->current_tid_sz
)
3043 return machine
->current_tid
[cpu
];
3046 int machine__set_current_tid(struct machine
*machine
, int cpu
, pid_t pid
,
3049 struct thread
*thread
;
3050 const pid_t init_val
= -1;
3055 if (realloc_array_as_needed(machine
->current_tid
,
3056 machine
->current_tid_sz
,
3061 machine
->current_tid
[cpu
] = tid
;
3063 thread
= machine__findnew_thread(machine
, pid
, tid
);
3067 thread__set_cpu(thread
, cpu
);
3068 thread__put(thread
);
3074 * Compares the raw arch string. N.B. see instead perf_env__arch() or
3075 * machine__normalized_is() if a normalized arch is needed.
3077 bool machine__is(struct machine
*machine
, const char *arch
)
3079 return machine
&& !strcmp(perf_env__raw_arch(machine
->env
), arch
);
3082 bool machine__normalized_is(struct machine
*machine
, const char *arch
)
3084 return machine
&& !strcmp(perf_env__arch(machine
->env
), arch
);
3087 int machine__nr_cpus_avail(struct machine
*machine
)
3089 return machine
? perf_env__nr_cpus_avail(machine
->env
) : 0;
3092 int machine__get_kernel_start(struct machine
*machine
)
3094 struct map
*map
= machine__kernel_map(machine
);
3098 * The only addresses above 2^63 are kernel addresses of a 64-bit
3099 * kernel. Note that addresses are unsigned so that on a 32-bit system
3100 * all addresses including kernel addresses are less than 2^32. In
3101 * that case (32-bit system), if the kernel mapping is unknown, all
3102 * addresses will be assumed to be in user space - see
3103 * machine__kernel_ip().
3105 machine
->kernel_start
= 1ULL << 63;
3107 err
= map__load(map
);
3109 * On x86_64, PTI entry trampolines are less than the
3110 * start of kernel text, but still above 2^63. So leave
3111 * kernel_start = 1ULL << 63 for x86_64.
3113 if (!err
&& !machine__is(machine
, "x86_64"))
3114 machine
->kernel_start
= map__start(map
);
3119 u8
machine__addr_cpumode(struct machine
*machine
, u8 cpumode
, u64 addr
)
3121 u8 addr_cpumode
= cpumode
;
3124 if (!machine
->single_address_space
)
3127 kernel_ip
= machine__kernel_ip(machine
, addr
);
3129 case PERF_RECORD_MISC_KERNEL
:
3130 case PERF_RECORD_MISC_USER
:
3131 addr_cpumode
= kernel_ip
? PERF_RECORD_MISC_KERNEL
:
3132 PERF_RECORD_MISC_USER
;
3134 case PERF_RECORD_MISC_GUEST_KERNEL
:
3135 case PERF_RECORD_MISC_GUEST_USER
:
3136 addr_cpumode
= kernel_ip
? PERF_RECORD_MISC_GUEST_KERNEL
:
3137 PERF_RECORD_MISC_GUEST_USER
;
3143 return addr_cpumode
;
3146 struct dso
*machine__findnew_dso_id(struct machine
*machine
, const char *filename
,
3147 const struct dso_id
*id
)
3149 return dsos__findnew_id(&machine
->dsos
, filename
, id
);
3152 struct dso
*machine__findnew_dso(struct machine
*machine
, const char *filename
)
3154 return machine__findnew_dso_id(machine
, filename
, NULL
);
3157 char *machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
3159 struct machine
*machine
= vmachine
;
3161 struct symbol
*sym
= machine__find_kernel_symbol(machine
, *addrp
, &map
);
3166 *modp
= __map__is_kmodule(map
) ? (char *)dso__short_name(map__dso(map
)) : NULL
;
3167 *addrp
= map__unmap_ip(map
, sym
->start
);
3171 struct machine__for_each_dso_cb_args
{
3172 struct machine
*machine
;
3177 static int machine__for_each_dso_cb(struct dso
*dso
, void *data
)
3179 struct machine__for_each_dso_cb_args
*args
= data
;
3181 return args
->fn(dso
, args
->machine
, args
->priv
);
3184 int machine__for_each_dso(struct machine
*machine
, machine__dso_t fn
, void *priv
)
3186 struct machine__for_each_dso_cb_args args
= {
3192 return dsos__for_each_dso(&machine
->dsos
, machine__for_each_dso_cb
, &args
);
3195 int machine__for_each_kernel_map(struct machine
*machine
, machine__map_t fn
, void *priv
)
3197 struct maps
*maps
= machine__kernel_maps(machine
);
3199 return maps__for_each_map(maps
, fn
, priv
);
3202 bool machine__is_lock_function(struct machine
*machine
, u64 addr
)
3204 if (!machine
->sched
.text_start
) {
3206 struct symbol
*sym
= machine__find_kernel_symbol_by_name(machine
, "__sched_text_start", &kmap
);
3209 /* to avoid retry */
3210 machine
->sched
.text_start
= 1;
3214 machine
->sched
.text_start
= map__unmap_ip(kmap
, sym
->start
);
3216 /* should not fail from here */
3217 sym
= machine__find_kernel_symbol_by_name(machine
, "__sched_text_end", &kmap
);
3218 machine
->sched
.text_end
= map__unmap_ip(kmap
, sym
->start
);
3220 sym
= machine__find_kernel_symbol_by_name(machine
, "__lock_text_start", &kmap
);
3221 machine
->lock
.text_start
= map__unmap_ip(kmap
, sym
->start
);
3223 sym
= machine__find_kernel_symbol_by_name(machine
, "__lock_text_end", &kmap
);
3224 machine
->lock
.text_end
= map__unmap_ip(kmap
, sym
->start
);
3226 sym
= machine__find_kernel_symbol_by_name(machine
, "__traceiter_contention_begin", &kmap
);
3228 machine
->traceiter
.text_start
= map__unmap_ip(kmap
, sym
->start
);
3229 machine
->traceiter
.text_end
= map__unmap_ip(kmap
, sym
->end
);
3231 sym
= machine__find_kernel_symbol_by_name(machine
, "trace_contention_begin", &kmap
);
3233 machine
->trace
.text_start
= map__unmap_ip(kmap
, sym
->start
);
3234 machine
->trace
.text_end
= map__unmap_ip(kmap
, sym
->end
);
3238 /* failed to get kernel symbols */
3239 if (machine
->sched
.text_start
== 1)
3242 /* mutex and rwsem functions are in sched text section */
3243 if (machine
->sched
.text_start
<= addr
&& addr
< machine
->sched
.text_end
)
3246 /* spinlock functions are in lock text section */
3247 if (machine
->lock
.text_start
<= addr
&& addr
< machine
->lock
.text_end
)
3250 /* traceiter functions currently don't have their own section
3251 * but we consider them lock functions
3253 if (machine
->traceiter
.text_start
!= 0) {
3254 if (machine
->traceiter
.text_start
<= addr
&& addr
< machine
->traceiter
.text_end
)
3258 if (machine
->trace
.text_start
!= 0) {
3259 if (machine
->trace
.text_start
<= addr
&& addr
< machine
->trace
.text_end
)
3266 int machine__hit_all_dsos(struct machine
*machine
)
3268 return dsos__hit_all(&machine
->dsos
);