x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / arm / kernel / perf_callchain.c
blob22bf1f64d99a44291bd037a74b61ec1d249ec01e
1 /*
2 * ARM callchain support
4 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
5 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
7 * This code is based on the ARM OProfile backtrace code.
8 */
9 #include <linux/perf_event.h>
10 #include <linux/uaccess.h>
12 #include <asm/stacktrace.h>
15 * The registers we're interested in are at the end of the variable
16 * length saved register structure. The fp points at the end of this
17 * structure so the address of this struct is:
18 * (struct frame_tail *)(xxx->fp)-1
20 * This code has been adapted from the ARM OProfile support.
22 struct frame_tail {
23 struct frame_tail __user *fp;
24 unsigned long sp;
25 unsigned long lr;
26 } __attribute__((packed));
29 * Get the return address for a single stackframe and return a pointer to the
30 * next frame tail.
32 static struct frame_tail __user *
33 user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry_ctx *entry)
36 struct frame_tail buftail;
37 unsigned long err;
39 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
40 return NULL;
42 pagefault_disable();
43 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
44 pagefault_enable();
46 if (err)
47 return NULL;
49 perf_callchain_store(entry, buftail.lr);
52 * Frame pointers should strictly progress back up the stack
53 * (towards higher addresses).
55 if (tail + 1 >= buftail.fp)
56 return NULL;
58 return buftail.fp - 1;
61 void
62 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
64 struct frame_tail __user *tail;
66 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
67 /* We don't support guest os callchain now */
68 return;
71 perf_callchain_store(entry, regs->ARM_pc);
73 if (!current->mm)
74 return;
76 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
78 while ((entry->nr < entry->max_stack) &&
79 tail && !((unsigned long)tail & 0x3))
80 tail = user_backtrace(tail, entry);
84 * Gets called by walk_stackframe() for every stackframe. This will be called
85 * whist unwinding the stackframe and is like a subroutine return so we use
86 * the PC.
88 static int
89 callchain_trace(struct stackframe *fr,
90 void *data)
92 struct perf_callchain_entry_ctx *entry = data;
93 perf_callchain_store(entry, fr->pc);
94 return 0;
97 void
98 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
100 struct stackframe fr;
102 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
103 /* We don't support guest os callchain now */
104 return;
107 arm_get_current_stackframe(regs, &fr);
108 walk_stackframe(&fr, callchain_trace, entry);
111 unsigned long perf_instruction_pointer(struct pt_regs *regs)
113 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
114 return perf_guest_cbs->get_guest_ip();
116 return instruction_pointer(regs);
119 unsigned long perf_misc_flags(struct pt_regs *regs)
121 int misc = 0;
123 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
124 if (perf_guest_cbs->is_user_mode())
125 misc |= PERF_RECORD_MISC_GUEST_USER;
126 else
127 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
128 } else {
129 if (user_mode(regs))
130 misc |= PERF_RECORD_MISC_USER;
131 else
132 misc |= PERF_RECORD_MISC_KERNEL;
135 return misc;