x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / arm / kernel / ftrace.c
blob34e56647dceeee88d99f65d5fd0a6e00fb46a0fd
1 /*
2 * Dynamic function tracing support.
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
7 * For licencing details, see COPYING.
9 * Defines low-level handling of mcount calls when the kernel
10 * is compiled with the -pg flag. When using dynamic ftrace, the
11 * mcount call-sites get patched with NOP till they are enabled.
12 * All code mutation routines here are called under stop_machine().
15 #include <linux/ftrace.h>
16 #include <linux/uaccess.h>
18 #include <asm/cacheflush.h>
19 #include <asm/opcodes.h>
20 #include <asm/ftrace.h>
22 #include "insn.h"
24 #ifdef CONFIG_THUMB2_KERNEL
25 #define NOP 0xf85deb04 /* pop.w {lr} */
26 #else
27 #define NOP 0xe8bd4000 /* pop {lr} */
28 #endif
30 #ifdef CONFIG_DYNAMIC_FTRACE
31 #ifdef CONFIG_OLD_MCOUNT
32 #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
33 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
35 #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
37 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
39 return rec->arch.old_mcount ? OLD_NOP : NOP;
42 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
44 if (!rec->arch.old_mcount)
45 return addr;
47 if (addr == MCOUNT_ADDR)
48 addr = OLD_MCOUNT_ADDR;
49 else if (addr == FTRACE_ADDR)
50 addr = OLD_FTRACE_ADDR;
52 return addr;
54 #else
55 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
57 return NOP;
60 static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
62 return addr;
64 #endif
66 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
68 return arm_gen_branch_link(pc, addr);
71 static int ftrace_modify_code(unsigned long pc, unsigned long old,
72 unsigned long new, bool validate)
74 unsigned long replaced;
76 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
77 old = __opcode_to_mem_thumb32(old);
78 new = __opcode_to_mem_thumb32(new);
79 } else {
80 old = __opcode_to_mem_arm(old);
81 new = __opcode_to_mem_arm(new);
84 if (validate) {
85 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
86 return -EFAULT;
88 if (replaced != old)
89 return -EINVAL;
92 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
93 return -EPERM;
95 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
97 return 0;
100 int ftrace_update_ftrace_func(ftrace_func_t func)
102 unsigned long pc;
103 unsigned long new;
104 int ret;
106 pc = (unsigned long)&ftrace_call;
107 new = ftrace_call_replace(pc, (unsigned long)func);
109 ret = ftrace_modify_code(pc, 0, new, false);
111 #ifdef CONFIG_OLD_MCOUNT
112 if (!ret) {
113 pc = (unsigned long)&ftrace_call_old;
114 new = ftrace_call_replace(pc, (unsigned long)func);
116 ret = ftrace_modify_code(pc, 0, new, false);
118 #endif
120 return ret;
123 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
125 unsigned long new, old;
126 unsigned long ip = rec->ip;
128 old = ftrace_nop_replace(rec);
129 new = ftrace_call_replace(ip, adjust_address(rec, addr));
131 return ftrace_modify_code(rec->ip, old, new, true);
134 int ftrace_make_nop(struct module *mod,
135 struct dyn_ftrace *rec, unsigned long addr)
137 unsigned long ip = rec->ip;
138 unsigned long old;
139 unsigned long new;
140 int ret;
142 old = ftrace_call_replace(ip, adjust_address(rec, addr));
143 new = ftrace_nop_replace(rec);
144 ret = ftrace_modify_code(ip, old, new, true);
146 #ifdef CONFIG_OLD_MCOUNT
147 if (ret == -EINVAL && addr == MCOUNT_ADDR) {
148 rec->arch.old_mcount = true;
150 old = ftrace_call_replace(ip, adjust_address(rec, addr));
151 new = ftrace_nop_replace(rec);
152 ret = ftrace_modify_code(ip, old, new, true);
154 #endif
156 return ret;
159 int __init ftrace_dyn_arch_init(void *data)
161 *(unsigned long *)data = 0;
163 return 0;
165 #endif /* CONFIG_DYNAMIC_FTRACE */
167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
168 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
169 unsigned long frame_pointer)
171 unsigned long return_hooker = (unsigned long) &return_to_handler;
172 struct ftrace_graph_ent trace;
173 unsigned long old;
174 int err;
176 if (unlikely(atomic_read(&current->tracing_graph_pause)))
177 return;
179 old = *parent;
180 *parent = return_hooker;
182 trace.func = self_addr;
183 trace.depth = current->curr_ret_stack + 1;
185 /* Only trace if the calling function expects to */
186 if (!ftrace_graph_entry(&trace)) {
187 *parent = old;
188 return;
191 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
192 frame_pointer);
193 if (err == -EBUSY) {
194 *parent = old;
195 return;
199 #ifdef CONFIG_DYNAMIC_FTRACE
200 extern unsigned long ftrace_graph_call;
201 extern unsigned long ftrace_graph_call_old;
202 extern void ftrace_graph_caller_old(void);
204 static int __ftrace_modify_caller(unsigned long *callsite,
205 void (*func) (void), bool enable)
207 unsigned long caller_fn = (unsigned long) func;
208 unsigned long pc = (unsigned long) callsite;
209 unsigned long branch = arm_gen_branch(pc, caller_fn);
210 unsigned long nop = 0xe1a00000; /* mov r0, r0 */
211 unsigned long old = enable ? nop : branch;
212 unsigned long new = enable ? branch : nop;
214 return ftrace_modify_code(pc, old, new, true);
217 static int ftrace_modify_graph_caller(bool enable)
219 int ret;
221 ret = __ftrace_modify_caller(&ftrace_graph_call,
222 ftrace_graph_caller,
223 enable);
225 #ifdef CONFIG_OLD_MCOUNT
226 if (!ret)
227 ret = __ftrace_modify_caller(&ftrace_graph_call_old,
228 ftrace_graph_caller_old,
229 enable);
230 #endif
232 return ret;
235 int ftrace_enable_ftrace_graph_caller(void)
237 return ftrace_modify_graph_caller(true);
240 int ftrace_disable_ftrace_graph_caller(void)
242 return ftrace_modify_graph_caller(false);
244 #endif /* CONFIG_DYNAMIC_FTRACE */
245 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */