USB: serial: option: reimplement interface masking
[linux/fpc-iii.git] / arch / arm / kernel / perf_callchain.c
blob08e43a32a693bd810f98d366e19c03e1e3767d63
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ARM callchain support
5 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
6 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8 * This code is based on the ARM OProfile backtrace code.
9 */
10 #include <linux/perf_event.h>
11 #include <linux/uaccess.h>
13 #include <asm/stacktrace.h>
16 * The registers we're interested in are at the end of the variable
17 * length saved register structure. The fp points at the end of this
18 * structure so the address of this struct is:
19 * (struct frame_tail *)(xxx->fp)-1
21 * This code has been adapted from the ARM OProfile support.
23 struct frame_tail {
24 struct frame_tail __user *fp;
25 unsigned long sp;
26 unsigned long lr;
27 } __attribute__((packed));
30 * Get the return address for a single stackframe and return a pointer to the
31 * next frame tail.
33 static struct frame_tail __user *
34 user_backtrace(struct frame_tail __user *tail,
35 struct perf_callchain_entry_ctx *entry)
37 struct frame_tail buftail;
38 unsigned long err;
40 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
41 return NULL;
43 pagefault_disable();
44 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
45 pagefault_enable();
47 if (err)
48 return NULL;
50 perf_callchain_store(entry, buftail.lr);
53 * Frame pointers should strictly progress back up the stack
54 * (towards higher addresses).
56 if (tail + 1 >= buftail.fp)
57 return NULL;
59 return buftail.fp - 1;
62 void
63 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
65 struct frame_tail __user *tail;
67 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
68 /* We don't support guest os callchain now */
69 return;
72 perf_callchain_store(entry, regs->ARM_pc);
74 if (!current->mm)
75 return;
77 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
79 while ((entry->nr < entry->max_stack) &&
80 tail && !((unsigned long)tail & 0x3))
81 tail = user_backtrace(tail, entry);
85 * Gets called by walk_stackframe() for every stackframe. This will be called
86 * whist unwinding the stackframe and is like a subroutine return so we use
87 * the PC.
89 static int
90 callchain_trace(struct stackframe *fr,
91 void *data)
93 struct perf_callchain_entry_ctx *entry = data;
94 perf_callchain_store(entry, fr->pc);
95 return 0;
98 void
99 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
101 struct stackframe fr;
103 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
104 /* We don't support guest os callchain now */
105 return;
108 arm_get_current_stackframe(regs, &fr);
109 walk_stackframe(&fr, callchain_trace, entry);
112 unsigned long perf_instruction_pointer(struct pt_regs *regs)
114 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
115 return perf_guest_cbs->get_guest_ip();
117 return instruction_pointer(regs);
120 unsigned long perf_misc_flags(struct pt_regs *regs)
122 int misc = 0;
124 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
125 if (perf_guest_cbs->is_user_mode())
126 misc |= PERF_RECORD_MISC_GUEST_USER;
127 else
128 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
129 } else {
130 if (user_mode(regs))
131 misc |= PERF_RECORD_MISC_USER;
132 else
133 misc |= PERF_RECORD_MISC_KERNEL;
136 return misc;