2 * Dynamic function tracing support.
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
7 * For licencing details, see COPYING.
9 * Defines low-level handling of mcount calls when the kernel
10 * is compiled with the -pg flag. When using dynamic ftrace, the
11 * mcount call-sites get patched with NOP till they are enabled.
12 * All code mutation routines here are called under stop_machine().
15 #include <linux/ftrace.h>
16 #include <linux/uaccess.h>
18 #include <asm/cacheflush.h>
19 #include <asm/ftrace.h>
21 #ifdef CONFIG_THUMB2_KERNEL
22 #define NOP 0xeb04f85d /* pop.w {lr} */
24 #define NOP 0xe8bd4000 /* pop {lr} */
27 #ifdef CONFIG_DYNAMIC_FTRACE
28 #ifdef CONFIG_OLD_MCOUNT
29 #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
30 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
32 #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
34 static unsigned long ftrace_nop_replace(struct dyn_ftrace
*rec
)
36 return rec
->arch
.old_mcount
? OLD_NOP
: NOP
;
39 static unsigned long adjust_address(struct dyn_ftrace
*rec
, unsigned long addr
)
41 if (!rec
->arch
.old_mcount
)
44 if (addr
== MCOUNT_ADDR
)
45 addr
= OLD_MCOUNT_ADDR
;
46 else if (addr
== FTRACE_ADDR
)
47 addr
= OLD_FTRACE_ADDR
;
52 static unsigned long ftrace_nop_replace(struct dyn_ftrace
*rec
)
57 static unsigned long adjust_address(struct dyn_ftrace
*rec
, unsigned long addr
)
63 #ifdef CONFIG_THUMB2_KERNEL
64 static unsigned long ftrace_gen_branch(unsigned long pc
, unsigned long addr
,
67 unsigned long s
, j1
, j2
, i1
, i2
, imm10
, imm11
;
68 unsigned long first
, second
;
71 offset
= (long)addr
- (long)(pc
+ 4);
72 if (offset
< -16777216 || offset
> 16777214) {
77 s
= (offset
>> 24) & 0x1;
78 i1
= (offset
>> 23) & 0x1;
79 i2
= (offset
>> 22) & 0x1;
80 imm10
= (offset
>> 12) & 0x3ff;
81 imm11
= (offset
>> 1) & 0x7ff;
86 first
= 0xf000 | (s
<< 10) | imm10
;
87 second
= 0x9000 | (j1
<< 13) | (j2
<< 11) | imm11
;
91 return (second
<< 16) | first
;
94 static unsigned long ftrace_gen_branch(unsigned long pc
, unsigned long addr
,
97 unsigned long opcode
= 0xea000000;
103 offset
= (long)addr
- (long)(pc
+ 8);
104 if (unlikely(offset
< -33554432 || offset
> 33554428)) {
105 /* Can't generate branches that far (from ARM ARM). Ftrace
106 * doesn't generate branches outside of kernel text.
112 offset
= (offset
>> 2) & 0x00ffffff;
114 return opcode
| offset
;
118 static unsigned long ftrace_call_replace(unsigned long pc
, unsigned long addr
)
120 return ftrace_gen_branch(pc
, addr
, true);
123 static int ftrace_modify_code(unsigned long pc
, unsigned long old
,
126 unsigned long replaced
;
128 if (probe_kernel_read(&replaced
, (void *)pc
, MCOUNT_INSN_SIZE
))
134 if (probe_kernel_write((void *)pc
, &new, MCOUNT_INSN_SIZE
))
137 flush_icache_range(pc
, pc
+ MCOUNT_INSN_SIZE
);
142 int ftrace_update_ftrace_func(ftrace_func_t func
)
144 unsigned long pc
, old
;
148 pc
= (unsigned long)&ftrace_call
;
149 memcpy(&old
, &ftrace_call
, MCOUNT_INSN_SIZE
);
150 new = ftrace_call_replace(pc
, (unsigned long)func
);
152 ret
= ftrace_modify_code(pc
, old
, new);
154 #ifdef CONFIG_OLD_MCOUNT
156 pc
= (unsigned long)&ftrace_call_old
;
157 memcpy(&old
, &ftrace_call_old
, MCOUNT_INSN_SIZE
);
158 new = ftrace_call_replace(pc
, (unsigned long)func
);
160 ret
= ftrace_modify_code(pc
, old
, new);
167 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
169 unsigned long new, old
;
170 unsigned long ip
= rec
->ip
;
172 old
= ftrace_nop_replace(rec
);
173 new = ftrace_call_replace(ip
, adjust_address(rec
, addr
));
175 return ftrace_modify_code(rec
->ip
, old
, new);
178 int ftrace_make_nop(struct module
*mod
,
179 struct dyn_ftrace
*rec
, unsigned long addr
)
181 unsigned long ip
= rec
->ip
;
186 old
= ftrace_call_replace(ip
, adjust_address(rec
, addr
));
187 new = ftrace_nop_replace(rec
);
188 ret
= ftrace_modify_code(ip
, old
, new);
190 #ifdef CONFIG_OLD_MCOUNT
191 if (ret
== -EINVAL
&& addr
== MCOUNT_ADDR
) {
192 rec
->arch
.old_mcount
= true;
194 old
= ftrace_call_replace(ip
, adjust_address(rec
, addr
));
195 new = ftrace_nop_replace(rec
);
196 ret
= ftrace_modify_code(ip
, old
, new);
203 int __init
ftrace_dyn_arch_init(void *data
)
205 *(unsigned long *)data
= 0;
209 #endif /* CONFIG_DYNAMIC_FTRACE */
211 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
212 void prepare_ftrace_return(unsigned long *parent
, unsigned long self_addr
,
213 unsigned long frame_pointer
)
215 unsigned long return_hooker
= (unsigned long) &return_to_handler
;
216 struct ftrace_graph_ent trace
;
220 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
224 *parent
= return_hooker
;
226 err
= ftrace_push_return_trace(old
, self_addr
, &trace
.depth
,
233 trace
.func
= self_addr
;
235 /* Only trace if the calling function expects to */
236 if (!ftrace_graph_entry(&trace
)) {
237 current
->curr_ret_stack
--;
242 #ifdef CONFIG_DYNAMIC_FTRACE
243 extern unsigned long ftrace_graph_call
;
244 extern unsigned long ftrace_graph_call_old
;
245 extern void ftrace_graph_caller_old(void);
247 static int __ftrace_modify_caller(unsigned long *callsite
,
248 void (*func
) (void), bool enable
)
250 unsigned long caller_fn
= (unsigned long) func
;
251 unsigned long pc
= (unsigned long) callsite
;
252 unsigned long branch
= ftrace_gen_branch(pc
, caller_fn
, false);
253 unsigned long nop
= 0xe1a00000; /* mov r0, r0 */
254 unsigned long old
= enable
? nop
: branch
;
255 unsigned long new = enable
? branch
: nop
;
257 return ftrace_modify_code(pc
, old
, new);
260 static int ftrace_modify_graph_caller(bool enable
)
264 ret
= __ftrace_modify_caller(&ftrace_graph_call
,
268 #ifdef CONFIG_OLD_MCOUNT
270 ret
= __ftrace_modify_caller(&ftrace_graph_call_old
,
271 ftrace_graph_caller_old
,
278 int ftrace_enable_ftrace_graph_caller(void)
280 return ftrace_modify_graph_caller(true);
283 int ftrace_disable_ftrace_graph_caller(void)
285 return ftrace_modify_graph_caller(false);
287 #endif /* CONFIG_DYNAMIC_FTRACE */
288 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */