1 // SPDX-License-Identifier: GPL-2.0-only
3 * arm64 callchain support
5 * Copyright (C) 2015 ARM Limited
7 #include <linux/perf_event.h>
8 #include <linux/uaccess.h>
10 #include <asm/pointer_auth.h>
11 #include <asm/stacktrace.h>
14 struct frame_tail __user
*fp
;
16 } __attribute__((packed
));
19 * Get the return address for a single stackframe and return a pointer to the
22 static struct frame_tail __user
*
23 user_backtrace(struct frame_tail __user
*tail
,
24 struct perf_callchain_entry_ctx
*entry
)
26 struct frame_tail buftail
;
30 /* Also check accessibility of one struct frame_tail beyond */
31 if (!access_ok(tail
, sizeof(buftail
)))
35 err
= __copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
));
41 lr
= ptrauth_strip_insn_pac(buftail
.lr
);
43 perf_callchain_store(entry
, lr
);
46 * Frame pointers should strictly progress back up the stack
47 * (towards higher addresses).
49 if (tail
>= buftail
.fp
)
57 * The registers we're interested in are at the end of the variable
58 * length saved register structure. The fp points at the end of this
59 * structure so the address of this struct is:
60 * (struct compat_frame_tail *)(xxx->fp)-1
62 * This code has been adapted from the ARM OProfile support.
64 struct compat_frame_tail
{
65 compat_uptr_t fp
; /* a (struct compat_frame_tail *) in compat mode */
68 } __attribute__((packed
));
70 static struct compat_frame_tail __user
*
71 compat_user_backtrace(struct compat_frame_tail __user
*tail
,
72 struct perf_callchain_entry_ctx
*entry
)
74 struct compat_frame_tail buftail
;
77 /* Also check accessibility of one struct frame_tail beyond */
78 if (!access_ok(tail
, sizeof(buftail
)))
82 err
= __copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
));
88 perf_callchain_store(entry
, buftail
.lr
);
91 * Frame pointers should strictly progress back up the stack
92 * (towards higher addresses).
94 if (tail
+ 1 >= (struct compat_frame_tail __user
*)
95 compat_ptr(buftail
.fp
))
98 return (struct compat_frame_tail __user
*)compat_ptr(buftail
.fp
) - 1;
100 #endif /* CONFIG_COMPAT */
102 void perf_callchain_user(struct perf_callchain_entry_ctx
*entry
,
103 struct pt_regs
*regs
)
105 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
106 /* We don't support guest os callchain now */
110 perf_callchain_store(entry
, regs
->pc
);
112 if (!compat_user_mode(regs
)) {
114 struct frame_tail __user
*tail
;
116 tail
= (struct frame_tail __user
*)regs
->regs
[29];
118 while (entry
->nr
< entry
->max_stack
&&
119 tail
&& !((unsigned long)tail
& 0xf))
120 tail
= user_backtrace(tail
, entry
);
123 /* AARCH32 compat mode */
124 struct compat_frame_tail __user
*tail
;
126 tail
= (struct compat_frame_tail __user
*)regs
->compat_fp
- 1;
128 while ((entry
->nr
< entry
->max_stack
) &&
129 tail
&& !((unsigned long)tail
& 0x3))
130 tail
= compat_user_backtrace(tail
, entry
);
136 * Gets called by walk_stackframe() for every stackframe. This will be called
137 * whist unwinding the stackframe and is like a subroutine return so we use
140 static bool callchain_trace(void *data
, unsigned long pc
)
142 struct perf_callchain_entry_ctx
*entry
= data
;
143 perf_callchain_store(entry
, pc
);
147 void perf_callchain_kernel(struct perf_callchain_entry_ctx
*entry
,
148 struct pt_regs
*regs
)
150 struct stackframe frame
;
152 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
153 /* We don't support guest os callchain now */
157 start_backtrace(&frame
, regs
->regs
[29], regs
->pc
);
158 walk_stackframe(current
, &frame
, callchain_trace
, entry
);
161 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
163 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest())
164 return perf_guest_cbs
->get_guest_ip();
166 return instruction_pointer(regs
);
169 unsigned long perf_misc_flags(struct pt_regs
*regs
)
173 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
174 if (perf_guest_cbs
->is_user_mode())
175 misc
|= PERF_RECORD_MISC_GUEST_USER
;
177 misc
|= PERF_RECORD_MISC_GUEST_KERNEL
;
180 misc
|= PERF_RECORD_MISC_USER
;
182 misc
|= PERF_RECORD_MISC_KERNEL
;