2 * Performance counter callchain support - powerpc architecture code
4 * Copyright © 2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/pgtable.h>
19 #include <asm/sigcontext.h>
20 #include <asm/ucontext.h>
23 #include "../kernel/ppc32.h"
25 #include <asm/pte-walk.h>
29 * Is sp valid as the address of the next kernel stack frame after prev_sp?
30 * The next frame may be in a different stack area but should not go
31 * back down in the same stack area.
33 static int valid_next_sp(unsigned long sp
, unsigned long prev_sp
)
36 return 0; /* must be 16-byte aligned */
37 if (!validate_sp(sp
, current
, STACK_FRAME_OVERHEAD
))
39 if (sp
>= prev_sp
+ STACK_FRAME_MIN_SIZE
)
42 * sp could decrease when we jump off an interrupt stack
43 * back to the regular process stack.
45 if ((sp
& ~(THREAD_SIZE
- 1)) != (prev_sp
& ~(THREAD_SIZE
- 1)))
51 perf_callchain_kernel(struct perf_callchain_entry_ctx
*entry
, struct pt_regs
*regs
)
53 unsigned long sp
, next_sp
;
54 unsigned long next_ip
;
61 perf_callchain_store(entry
, perf_instruction_pointer(regs
));
63 if (!validate_sp(sp
, current
, STACK_FRAME_OVERHEAD
))
67 fp
= (unsigned long *) sp
;
70 if (next_sp
== sp
+ STACK_INT_FRAME_SIZE
&&
71 fp
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
73 * This looks like an interrupt frame for an
74 * interrupt that occurred in the kernel
76 regs
= (struct pt_regs
*)(sp
+ STACK_FRAME_OVERHEAD
);
80 perf_callchain_store_context(entry
, PERF_CONTEXT_KERNEL
);
86 next_ip
= fp
[STACK_FRAME_LR_SAVE
];
89 * We can't tell which of the first two addresses
90 * we get are valid, but we can filter out the
91 * obviously bogus ones here. We replace them
92 * with 0 rather than removing them entirely so
93 * that userspace can tell which is which.
95 if ((level
== 1 && next_ip
== lr
) ||
96 (level
<= 1 && !kernel_text_address(next_ip
)))
102 perf_callchain_store(entry
, next_ip
);
103 if (!valid_next_sp(next_sp
, sp
))
111 * On 64-bit we don't want to invoke hash_page on user addresses from
112 * interrupt context, so if the access faults, we read the page tables
113 * to find which page (if any) is mapped and access it directly.
115 static int read_user_stack_slow(void __user
*ptr
, void *buf
, int nb
)
121 unsigned long addr
= (unsigned long) ptr
;
122 unsigned long offset
;
123 unsigned long pfn
, flags
;
126 pgdir
= current
->mm
->pgd
;
130 local_irq_save(flags
);
131 ptep
= find_current_mm_pte(pgdir
, addr
, NULL
, &shift
);
137 /* align address to page boundary */
138 offset
= addr
& ((1UL << shift
) - 1);
140 pte
= READ_ONCE(*ptep
);
141 if (!pte_present(pte
) || !pte_user(pte
))
144 if (!page_is_ram(pfn
))
147 /* no highmem to worry about here */
148 kaddr
= pfn_to_kaddr(pfn
);
149 memcpy(buf
, kaddr
+ offset
, nb
);
152 local_irq_restore(flags
);
156 static int read_user_stack_64(unsigned long __user
*ptr
, unsigned long *ret
)
158 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned long) ||
159 ((unsigned long)ptr
& 7))
163 if (!__get_user_inatomic(*ret
, ptr
)) {
169 return read_user_stack_slow(ptr
, ret
, 8);
172 static int read_user_stack_32(unsigned int __user
*ptr
, unsigned int *ret
)
174 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned int) ||
175 ((unsigned long)ptr
& 3))
179 if (!__get_user_inatomic(*ret
, ptr
)) {
185 return read_user_stack_slow(ptr
, ret
, 4);
188 static inline int valid_user_sp(unsigned long sp
, int is_64
)
190 if (!sp
|| (sp
& 7) || sp
> (is_64
? TASK_SIZE
: 0x100000000UL
) - 32)
196 * 64-bit user processes use the same stack frame for RT and non-RT signals.
198 struct signal_frame_64
{
199 char dummy
[__SIGNAL_FRAMESIZE
];
201 unsigned long unused
[2];
202 unsigned int tramp
[6];
203 struct siginfo
*pinfo
;
209 static int is_sigreturn_64_address(unsigned long nip
, unsigned long fp
)
211 if (nip
== fp
+ offsetof(struct signal_frame_64
, tramp
))
213 if (vdso64_rt_sigtramp
&& current
->mm
->context
.vdso_base
&&
214 nip
== current
->mm
->context
.vdso_base
+ vdso64_rt_sigtramp
)
220 * Do some sanity checking on the signal frame pointed to by sp.
221 * We check the pinfo and puc pointers in the frame.
223 static int sane_signal_64_frame(unsigned long sp
)
225 struct signal_frame_64 __user
*sf
;
226 unsigned long pinfo
, puc
;
228 sf
= (struct signal_frame_64 __user
*) sp
;
229 if (read_user_stack_64((unsigned long __user
*) &sf
->pinfo
, &pinfo
) ||
230 read_user_stack_64((unsigned long __user
*) &sf
->puc
, &puc
))
232 return pinfo
== (unsigned long) &sf
->info
&&
233 puc
== (unsigned long) &sf
->uc
;
236 static void perf_callchain_user_64(struct perf_callchain_entry_ctx
*entry
,
237 struct pt_regs
*regs
)
239 unsigned long sp
, next_sp
;
240 unsigned long next_ip
;
243 struct signal_frame_64 __user
*sigframe
;
244 unsigned long __user
*fp
, *uregs
;
246 next_ip
= perf_instruction_pointer(regs
);
249 perf_callchain_store(entry
, next_ip
);
251 while (entry
->nr
< entry
->max_stack
) {
252 fp
= (unsigned long __user
*) sp
;
253 if (!valid_user_sp(sp
, 1) || read_user_stack_64(fp
, &next_sp
))
255 if (level
> 0 && read_user_stack_64(&fp
[2], &next_ip
))
259 * Note: the next_sp - sp >= signal frame size check
260 * is true when next_sp < sp, which can happen when
261 * transitioning from an alternate signal stack to the
264 if (next_sp
- sp
>= sizeof(struct signal_frame_64
) &&
265 (is_sigreturn_64_address(next_ip
, sp
) ||
266 (level
<= 1 && is_sigreturn_64_address(lr
, sp
))) &&
267 sane_signal_64_frame(sp
)) {
269 * This looks like an signal frame
271 sigframe
= (struct signal_frame_64 __user
*) sp
;
272 uregs
= sigframe
->uc
.uc_mcontext
.gp_regs
;
273 if (read_user_stack_64(&uregs
[PT_NIP
], &next_ip
) ||
274 read_user_stack_64(&uregs
[PT_LNK
], &lr
) ||
275 read_user_stack_64(&uregs
[PT_R1
], &sp
))
278 perf_callchain_store_context(entry
, PERF_CONTEXT_USER
);
279 perf_callchain_store(entry
, next_ip
);
285 perf_callchain_store(entry
, next_ip
);
291 static inline int current_is_64bit(void)
294 * We can't use test_thread_flag() here because we may be on an
295 * interrupt stack, and the thread flags don't get copied over
296 * from the thread_info on the main stack to the interrupt stack.
298 return !test_ti_thread_flag(task_thread_info(current
), TIF_32BIT
);
301 #else /* CONFIG_PPC64 */
303 * On 32-bit we just access the address and let hash_page create a
304 * HPTE if necessary, so there is no need to fall back to reading
305 * the page tables. Since this is called at interrupt level,
306 * do_page_fault() won't treat a DSI as a page fault.
308 static int read_user_stack_32(unsigned int __user
*ptr
, unsigned int *ret
)
312 if ((unsigned long)ptr
> TASK_SIZE
- sizeof(unsigned int) ||
313 ((unsigned long)ptr
& 3))
317 rc
= __get_user_inatomic(*ret
, ptr
);
323 static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx
*entry
,
324 struct pt_regs
*regs
)
328 static inline int current_is_64bit(void)
333 static inline int valid_user_sp(unsigned long sp
, int is_64
)
335 if (!sp
|| (sp
& 7) || sp
> TASK_SIZE
- 32)
340 #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
341 #define sigcontext32 sigcontext
342 #define mcontext32 mcontext
343 #define ucontext32 ucontext
344 #define compat_siginfo_t struct siginfo
346 #endif /* CONFIG_PPC64 */
349 * Layout for non-RT signal frames
351 struct signal_frame_32
{
352 char dummy
[__SIGNAL_FRAMESIZE32
];
353 struct sigcontext32 sctx
;
354 struct mcontext32 mctx
;
359 * Layout for RT signal frames
361 struct rt_signal_frame_32
{
362 char dummy
[__SIGNAL_FRAMESIZE32
+ 16];
363 compat_siginfo_t info
;
364 struct ucontext32 uc
;
368 static int is_sigreturn_32_address(unsigned int nip
, unsigned int fp
)
370 if (nip
== fp
+ offsetof(struct signal_frame_32
, mctx
.mc_pad
))
372 if (vdso32_sigtramp
&& current
->mm
->context
.vdso_base
&&
373 nip
== current
->mm
->context
.vdso_base
+ vdso32_sigtramp
)
378 static int is_rt_sigreturn_32_address(unsigned int nip
, unsigned int fp
)
380 if (nip
== fp
+ offsetof(struct rt_signal_frame_32
,
381 uc
.uc_mcontext
.mc_pad
))
383 if (vdso32_rt_sigtramp
&& current
->mm
->context
.vdso_base
&&
384 nip
== current
->mm
->context
.vdso_base
+ vdso32_rt_sigtramp
)
389 static int sane_signal_32_frame(unsigned int sp
)
391 struct signal_frame_32 __user
*sf
;
394 sf
= (struct signal_frame_32 __user
*) (unsigned long) sp
;
395 if (read_user_stack_32((unsigned int __user
*) &sf
->sctx
.regs
, ®s
))
397 return regs
== (unsigned long) &sf
->mctx
;
400 static int sane_rt_signal_32_frame(unsigned int sp
)
402 struct rt_signal_frame_32 __user
*sf
;
405 sf
= (struct rt_signal_frame_32 __user
*) (unsigned long) sp
;
406 if (read_user_stack_32((unsigned int __user
*) &sf
->uc
.uc_regs
, ®s
))
408 return regs
== (unsigned long) &sf
->uc
.uc_mcontext
;
411 static unsigned int __user
*signal_frame_32_regs(unsigned int sp
,
412 unsigned int next_sp
, unsigned int next_ip
)
414 struct mcontext32 __user
*mctx
= NULL
;
415 struct signal_frame_32 __user
*sf
;
416 struct rt_signal_frame_32 __user
*rt_sf
;
419 * Note: the next_sp - sp >= signal frame size check
420 * is true when next_sp < sp, for example, when
421 * transitioning from an alternate signal stack to the
424 if (next_sp
- sp
>= sizeof(struct signal_frame_32
) &&
425 is_sigreturn_32_address(next_ip
, sp
) &&
426 sane_signal_32_frame(sp
)) {
427 sf
= (struct signal_frame_32 __user
*) (unsigned long) sp
;
431 if (!mctx
&& next_sp
- sp
>= sizeof(struct rt_signal_frame_32
) &&
432 is_rt_sigreturn_32_address(next_ip
, sp
) &&
433 sane_rt_signal_32_frame(sp
)) {
434 rt_sf
= (struct rt_signal_frame_32 __user
*) (unsigned long) sp
;
435 mctx
= &rt_sf
->uc
.uc_mcontext
;
440 return mctx
->mc_gregs
;
443 static void perf_callchain_user_32(struct perf_callchain_entry_ctx
*entry
,
444 struct pt_regs
*regs
)
446 unsigned int sp
, next_sp
;
447 unsigned int next_ip
;
450 unsigned int __user
*fp
, *uregs
;
452 next_ip
= perf_instruction_pointer(regs
);
455 perf_callchain_store(entry
, next_ip
);
457 while (entry
->nr
< entry
->max_stack
) {
458 fp
= (unsigned int __user
*) (unsigned long) sp
;
459 if (!valid_user_sp(sp
, 0) || read_user_stack_32(fp
, &next_sp
))
461 if (level
> 0 && read_user_stack_32(&fp
[1], &next_ip
))
464 uregs
= signal_frame_32_regs(sp
, next_sp
, next_ip
);
465 if (!uregs
&& level
<= 1)
466 uregs
= signal_frame_32_regs(sp
, next_sp
, lr
);
469 * This looks like an signal frame, so restart
470 * the stack trace with the values in it.
472 if (read_user_stack_32(&uregs
[PT_NIP
], &next_ip
) ||
473 read_user_stack_32(&uregs
[PT_LNK
], &lr
) ||
474 read_user_stack_32(&uregs
[PT_R1
], &sp
))
477 perf_callchain_store_context(entry
, PERF_CONTEXT_USER
);
478 perf_callchain_store(entry
, next_ip
);
484 perf_callchain_store(entry
, next_ip
);
491 perf_callchain_user(struct perf_callchain_entry_ctx
*entry
, struct pt_regs
*regs
)
493 if (current_is_64bit())
494 perf_callchain_user_64(entry
, regs
);
496 perf_callchain_user_32(entry
, regs
);