1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Performance counter callchain support - powerpc architecture code
5 * Copyright © 2009 Paul Mackerras, IBM Corporation.
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/perf_event.h>
10 #include <linux/percpu.h>
11 #include <linux/uaccess.h>
13 #include <asm/ptrace.h>
14 #include <asm/pgtable.h>
15 #include <asm/sigcontext.h>
16 #include <asm/ucontext.h>
19 #include "../kernel/ppc32.h"
21 #include <asm/pte-walk.h>
25 * Is sp valid as the address of the next kernel stack frame after prev_sp?
26 * The next frame may be in a different stack area but should not go
27 * back down in the same stack area.
29 static int valid_next_sp(unsigned long sp
, unsigned long prev_sp
)
32 return 0; /* must be 16-byte aligned */
33 if (!validate_sp(sp
, current
, STACK_FRAME_OVERHEAD
))
35 if (sp
>= prev_sp
+ STACK_FRAME_MIN_SIZE
)
38 * sp could decrease when we jump off an interrupt stack
39 * back to the regular process stack.
41 if ((sp
& ~(THREAD_SIZE
- 1)) != (prev_sp
& ~(THREAD_SIZE
- 1)))
47 perf_callchain_kernel(struct perf_callchain_entry_ctx
*entry
, struct pt_regs
*regs
)
49 unsigned long sp
, next_sp
;
50 unsigned long next_ip
;
57 perf_callchain_store(entry
, perf_instruction_pointer(regs
));
59 if (!validate_sp(sp
, current
, STACK_FRAME_OVERHEAD
))
63 fp
= (unsigned long *) sp
;
66 if (next_sp
== sp
+ STACK_INT_FRAME_SIZE
&&
67 fp
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
69 * This looks like an interrupt frame for an
70 * interrupt that occurred in the kernel
72 regs
= (struct pt_regs
*)(sp
+ STACK_FRAME_OVERHEAD
);
76 perf_callchain_store_context(entry
, PERF_CONTEXT_KERNEL
);
82 next_ip
= fp
[STACK_FRAME_LR_SAVE
];
85 * We can't tell which of the first two addresses
86 * we get are valid, but we can filter out the
87 * obviously bogus ones here. We replace them
88 * with 0 rather than removing them entirely so
89 * that userspace can tell which is which.
91 if ((level
== 1 && next_ip
== lr
) ||
92 (level
<= 1 && !kernel_text_address(next_ip
)))
98 perf_callchain_store(entry
, next_ip
);
99 if (!valid_next_sp(next_sp
, sp
))
107 * On 64-bit we don't want to invoke hash_page on user addresses from
108 * interrupt context, so if the access faults, we read the page tables
109 * to find which page (if any) is mapped and access it directly.
111 static int read_user_stack_slow(void __user
*ptr
, void *buf
, int nb
)
117 unsigned long addr
= (unsigned long) ptr
;
118 unsigned long offset
;
119 unsigned long pfn
, flags
;
122 pgdir
= current
->mm
->pgd
;
126 local_irq_save(flags
);
127 ptep
= find_current_mm_pte(pgdir
, addr
, NULL
, &shift
);
133 /* align address to page boundary */
134 offset
= addr
& ((1UL << shift
) - 1);
136 pte
= READ_ONCE(*ptep
);
137 if (!pte_present(pte
) || !pte_user(pte
))
140 if (!page_is_ram(pfn
))
143 /* no highmem to worry about here */
144 kaddr
= pfn_to_kaddr(pfn
);
145 memcpy(buf
, kaddr
+ offset
, nb
);
148 local_irq_restore(flags
);
152 static int read_user_stack_64(unsigned long __user
*ptr
, unsigned long *ret
)
154 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned long) ||
155 ((unsigned long)ptr
& 7))
159 if (!__get_user_inatomic(*ret
, ptr
)) {
165 return read_user_stack_slow(ptr
, ret
, 8);
168 static int read_user_stack_32(unsigned int __user
*ptr
, unsigned int *ret
)
170 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned int) ||
171 ((unsigned long)ptr
& 3))
175 if (!__get_user_inatomic(*ret
, ptr
)) {
181 return read_user_stack_slow(ptr
, ret
, 4);
184 static inline int valid_user_sp(unsigned long sp
, int is_64
)
186 if (!sp
|| (sp
& 7) || sp
> (is_64
? TASK_SIZE
: 0x100000000UL
) - 32)
192 * 64-bit user processes use the same stack frame for RT and non-RT signals.
194 struct signal_frame_64
{
195 char dummy
[__SIGNAL_FRAMESIZE
];
197 unsigned long unused
[2];
198 unsigned int tramp
[6];
199 struct siginfo
*pinfo
;
205 static int is_sigreturn_64_address(unsigned long nip
, unsigned long fp
)
207 if (nip
== fp
+ offsetof(struct signal_frame_64
, tramp
))
209 if (vdso64_rt_sigtramp
&& current
->mm
->context
.vdso_base
&&
210 nip
== current
->mm
->context
.vdso_base
+ vdso64_rt_sigtramp
)
216 * Do some sanity checking on the signal frame pointed to by sp.
217 * We check the pinfo and puc pointers in the frame.
219 static int sane_signal_64_frame(unsigned long sp
)
221 struct signal_frame_64 __user
*sf
;
222 unsigned long pinfo
, puc
;
224 sf
= (struct signal_frame_64 __user
*) sp
;
225 if (read_user_stack_64((unsigned long __user
*) &sf
->pinfo
, &pinfo
) ||
226 read_user_stack_64((unsigned long __user
*) &sf
->puc
, &puc
))
228 return pinfo
== (unsigned long) &sf
->info
&&
229 puc
== (unsigned long) &sf
->uc
;
232 static void perf_callchain_user_64(struct perf_callchain_entry_ctx
*entry
,
233 struct pt_regs
*regs
)
235 unsigned long sp
, next_sp
;
236 unsigned long next_ip
;
239 struct signal_frame_64 __user
*sigframe
;
240 unsigned long __user
*fp
, *uregs
;
242 next_ip
= perf_instruction_pointer(regs
);
245 perf_callchain_store(entry
, next_ip
);
247 while (entry
->nr
< entry
->max_stack
) {
248 fp
= (unsigned long __user
*) sp
;
249 if (!valid_user_sp(sp
, 1) || read_user_stack_64(fp
, &next_sp
))
251 if (level
> 0 && read_user_stack_64(&fp
[2], &next_ip
))
255 * Note: the next_sp - sp >= signal frame size check
256 * is true when next_sp < sp, which can happen when
257 * transitioning from an alternate signal stack to the
260 if (next_sp
- sp
>= sizeof(struct signal_frame_64
) &&
261 (is_sigreturn_64_address(next_ip
, sp
) ||
262 (level
<= 1 && is_sigreturn_64_address(lr
, sp
))) &&
263 sane_signal_64_frame(sp
)) {
265 * This looks like an signal frame
267 sigframe
= (struct signal_frame_64 __user
*) sp
;
268 uregs
= sigframe
->uc
.uc_mcontext
.gp_regs
;
269 if (read_user_stack_64(&uregs
[PT_NIP
], &next_ip
) ||
270 read_user_stack_64(&uregs
[PT_LNK
], &lr
) ||
271 read_user_stack_64(&uregs
[PT_R1
], &sp
))
274 perf_callchain_store_context(entry
, PERF_CONTEXT_USER
);
275 perf_callchain_store(entry
, next_ip
);
281 perf_callchain_store(entry
, next_ip
);
287 #else /* CONFIG_PPC64 */
289 * On 32-bit we just access the address and let hash_page create a
290 * HPTE if necessary, so there is no need to fall back to reading
291 * the page tables. Since this is called at interrupt level,
292 * do_page_fault() won't treat a DSI as a page fault.
294 static int read_user_stack_32(unsigned int __user
*ptr
, unsigned int *ret
)
298 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned int) ||
299 ((unsigned long)ptr
& 3))
303 rc
= __get_user_inatomic(*ret
, ptr
);
309 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx
*entry
,
310 struct pt_regs
*regs
)
314 static inline int valid_user_sp(unsigned long sp
, int is_64
)
316 if (!sp
|| (sp
& 7) || sp
> TASK_SIZE
- 32)
321 #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
322 #define sigcontext32 sigcontext
323 #define mcontext32 mcontext
324 #define ucontext32 ucontext
325 #define compat_siginfo_t struct siginfo
327 #endif /* CONFIG_PPC64 */
330 * Layout for non-RT signal frames
332 struct signal_frame_32
{
333 char dummy
[__SIGNAL_FRAMESIZE32
];
334 struct sigcontext32 sctx
;
335 struct mcontext32 mctx
;
340 * Layout for RT signal frames
342 struct rt_signal_frame_32
{
343 char dummy
[__SIGNAL_FRAMESIZE32
+ 16];
344 compat_siginfo_t info
;
345 struct ucontext32 uc
;
349 static int is_sigreturn_32_address(unsigned int nip
, unsigned int fp
)
351 if (nip
== fp
+ offsetof(struct signal_frame_32
, mctx
.mc_pad
))
353 if (vdso32_sigtramp
&& current
->mm
->context
.vdso_base
&&
354 nip
== current
->mm
->context
.vdso_base
+ vdso32_sigtramp
)
359 static int is_rt_sigreturn_32_address(unsigned int nip
, unsigned int fp
)
361 if (nip
== fp
+ offsetof(struct rt_signal_frame_32
,
362 uc
.uc_mcontext
.mc_pad
))
364 if (vdso32_rt_sigtramp
&& current
->mm
->context
.vdso_base
&&
365 nip
== current
->mm
->context
.vdso_base
+ vdso32_rt_sigtramp
)
370 static int sane_signal_32_frame(unsigned int sp
)
372 struct signal_frame_32 __user
*sf
;
375 sf
= (struct signal_frame_32 __user
*) (unsigned long) sp
;
376 if (read_user_stack_32((unsigned int __user
*) &sf
->sctx
.regs
, ®s
))
378 return regs
== (unsigned long) &sf
->mctx
;
381 static int sane_rt_signal_32_frame(unsigned int sp
)
383 struct rt_signal_frame_32 __user
*sf
;
386 sf
= (struct rt_signal_frame_32 __user
*) (unsigned long) sp
;
387 if (read_user_stack_32((unsigned int __user
*) &sf
->uc
.uc_regs
, ®s
))
389 return regs
== (unsigned long) &sf
->uc
.uc_mcontext
;
392 static unsigned int __user
*signal_frame_32_regs(unsigned int sp
,
393 unsigned int next_sp
, unsigned int next_ip
)
395 struct mcontext32 __user
*mctx
= NULL
;
396 struct signal_frame_32 __user
*sf
;
397 struct rt_signal_frame_32 __user
*rt_sf
;
400 * Note: the next_sp - sp >= signal frame size check
401 * is true when next_sp < sp, for example, when
402 * transitioning from an alternate signal stack to the
405 if (next_sp
- sp
>= sizeof(struct signal_frame_32
) &&
406 is_sigreturn_32_address(next_ip
, sp
) &&
407 sane_signal_32_frame(sp
)) {
408 sf
= (struct signal_frame_32 __user
*) (unsigned long) sp
;
412 if (!mctx
&& next_sp
- sp
>= sizeof(struct rt_signal_frame_32
) &&
413 is_rt_sigreturn_32_address(next_ip
, sp
) &&
414 sane_rt_signal_32_frame(sp
)) {
415 rt_sf
= (struct rt_signal_frame_32 __user
*) (unsigned long) sp
;
416 mctx
= &rt_sf
->uc
.uc_mcontext
;
421 return mctx
->mc_gregs
;
424 static void perf_callchain_user_32(struct perf_callchain_entry_ctx
*entry
,
425 struct pt_regs
*regs
)
427 unsigned int sp
, next_sp
;
428 unsigned int next_ip
;
431 unsigned int __user
*fp
, *uregs
;
433 next_ip
= perf_instruction_pointer(regs
);
436 perf_callchain_store(entry
, next_ip
);
438 while (entry
->nr
< entry
->max_stack
) {
439 fp
= (unsigned int __user
*) (unsigned long) sp
;
440 if (!valid_user_sp(sp
, 0) || read_user_stack_32(fp
, &next_sp
))
442 if (level
> 0 && read_user_stack_32(&fp
[1], &next_ip
))
445 uregs
= signal_frame_32_regs(sp
, next_sp
, next_ip
);
446 if (!uregs
&& level
<= 1)
447 uregs
= signal_frame_32_regs(sp
, next_sp
, lr
);
450 * This looks like an signal frame, so restart
451 * the stack trace with the values in it.
453 if (read_user_stack_32(&uregs
[PT_NIP
], &next_ip
) ||
454 read_user_stack_32(&uregs
[PT_LNK
], &lr
) ||
455 read_user_stack_32(&uregs
[PT_R1
], &sp
))
458 perf_callchain_store_context(entry
, PERF_CONTEXT_USER
);
459 perf_callchain_store(entry
, next_ip
);
465 perf_callchain_store(entry
, next_ip
);
472 perf_callchain_user(struct perf_callchain_entry_ctx
*entry
, struct pt_regs
*regs
)
474 if (!is_32bit_task())
475 perf_callchain_user_64(entry
, regs
);
477 perf_callchain_user_32(entry
, regs
);