2 * Dynamic function tracing support.
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
7 * For licencing details, see COPYING.
9 * Defines low-level handling of mcount calls when the kernel
10 * is compiled with the -pg flag. When using dynamic ftrace, the
11 * mcount call-sites get patched with NOP till they are enabled.
12 * All code mutation routines here are called under stop_machine().
15 #include <linux/ftrace.h>
16 #include <linux/uaccess.h>
17 #include <linux/module.h>
18 #include <linux/stop_machine.h>
20 #include <asm/cacheflush.h>
21 #include <asm/opcodes.h>
22 #include <asm/ftrace.h>
24 #include <asm/set_memory.h>
26 #ifdef CONFIG_THUMB2_KERNEL
27 #define NOP 0xf85deb04 /* pop.w {lr} */
29 #define NOP 0xe8bd4000 /* pop {lr} */
32 #ifdef CONFIG_DYNAMIC_FTRACE
34 static int __ftrace_modify_code(void *data
)
39 ftrace_modify_all_code(*command
);
45 void arch_ftrace_update_code(int command
)
47 stop_machine(__ftrace_modify_code
, &command
, NULL
);
50 static unsigned long ftrace_nop_replace(struct dyn_ftrace
*rec
)
55 static unsigned long adjust_address(struct dyn_ftrace
*rec
, unsigned long addr
)
60 int ftrace_arch_code_modify_prepare(void)
62 set_all_modules_text_rw();
66 int ftrace_arch_code_modify_post_process(void)
68 set_all_modules_text_ro();
69 /* Make sure any TLB misses during machine stop are cleared. */
74 static unsigned long ftrace_call_replace(unsigned long pc
, unsigned long addr
)
76 return arm_gen_branch_link(pc
, addr
);
79 static int ftrace_modify_code(unsigned long pc
, unsigned long old
,
80 unsigned long new, bool validate
)
82 unsigned long replaced
;
84 if (IS_ENABLED(CONFIG_THUMB2_KERNEL
)) {
85 old
= __opcode_to_mem_thumb32(old
);
86 new = __opcode_to_mem_thumb32(new);
88 old
= __opcode_to_mem_arm(old
);
89 new = __opcode_to_mem_arm(new);
93 if (probe_kernel_read(&replaced
, (void *)pc
, MCOUNT_INSN_SIZE
))
100 if (probe_kernel_write((void *)pc
, &new, MCOUNT_INSN_SIZE
))
103 flush_icache_range(pc
, pc
+ MCOUNT_INSN_SIZE
);
108 int ftrace_update_ftrace_func(ftrace_func_t func
)
114 pc
= (unsigned long)&ftrace_call
;
115 new = ftrace_call_replace(pc
, (unsigned long)func
);
117 ret
= ftrace_modify_code(pc
, 0, new, false);
119 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
121 pc
= (unsigned long)&ftrace_regs_call
;
122 new = ftrace_call_replace(pc
, (unsigned long)func
);
124 ret
= ftrace_modify_code(pc
, 0, new, false);
131 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
133 unsigned long new, old
;
134 unsigned long ip
= rec
->ip
;
136 old
= ftrace_nop_replace(rec
);
138 new = ftrace_call_replace(ip
, adjust_address(rec
, addr
));
140 return ftrace_modify_code(rec
->ip
, old
, new, true);
143 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
145 int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
148 unsigned long new, old
;
149 unsigned long ip
= rec
->ip
;
151 old
= ftrace_call_replace(ip
, adjust_address(rec
, old_addr
));
153 new = ftrace_call_replace(ip
, adjust_address(rec
, addr
));
155 return ftrace_modify_code(rec
->ip
, old
, new, true);
160 int ftrace_make_nop(struct module
*mod
,
161 struct dyn_ftrace
*rec
, unsigned long addr
)
163 unsigned long ip
= rec
->ip
;
168 old
= ftrace_call_replace(ip
, adjust_address(rec
, addr
));
169 new = ftrace_nop_replace(rec
);
170 ret
= ftrace_modify_code(ip
, old
, new, true);
175 int __init
ftrace_dyn_arch_init(void)
179 #endif /* CONFIG_DYNAMIC_FTRACE */
181 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
182 void prepare_ftrace_return(unsigned long *parent
, unsigned long self_addr
,
183 unsigned long frame_pointer
)
185 unsigned long return_hooker
= (unsigned long) &return_to_handler
;
186 struct ftrace_graph_ent trace
;
190 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
194 *parent
= return_hooker
;
196 trace
.func
= self_addr
;
197 trace
.depth
= current
->curr_ret_stack
+ 1;
199 /* Only trace if the calling function expects to */
200 if (!ftrace_graph_entry(&trace
)) {
205 err
= ftrace_push_return_trace(old
, self_addr
, &trace
.depth
,
206 frame_pointer
, NULL
);
213 #ifdef CONFIG_DYNAMIC_FTRACE
214 extern unsigned long ftrace_graph_call
;
215 extern unsigned long ftrace_graph_call_old
;
216 extern void ftrace_graph_caller_old(void);
217 extern unsigned long ftrace_graph_regs_call
;
218 extern void ftrace_graph_regs_caller(void);
220 static int __ftrace_modify_caller(unsigned long *callsite
,
221 void (*func
) (void), bool enable
)
223 unsigned long caller_fn
= (unsigned long) func
;
224 unsigned long pc
= (unsigned long) callsite
;
225 unsigned long branch
= arm_gen_branch(pc
, caller_fn
);
226 unsigned long nop
= 0xe1a00000; /* mov r0, r0 */
227 unsigned long old
= enable
? nop
: branch
;
228 unsigned long new = enable
? branch
: nop
;
230 return ftrace_modify_code(pc
, old
, new, true);
233 static int ftrace_modify_graph_caller(bool enable
)
237 ret
= __ftrace_modify_caller(&ftrace_graph_call
,
241 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
243 ret
= __ftrace_modify_caller(&ftrace_graph_regs_call
,
244 ftrace_graph_regs_caller
,
252 int ftrace_enable_ftrace_graph_caller(void)
254 return ftrace_modify_graph_caller(true);
257 int ftrace_disable_ftrace_graph_caller(void)
259 return ftrace_modify_graph_caller(false);
261 #endif /* CONFIG_DYNAMIC_FTRACE */
262 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */