2 * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
3 * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
5 * Code for replacing ftrace calls with jumps.
7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
9 * Thanks goes to Ingo Molnar, for suggesting the idea.
10 * Mathieu Desnoyers, for suggesting postponing the modifications.
11 * Arjan van de Ven, for keeping me straight, and explaining to me
12 * the dangers of modifying code on the run.
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/string.h>
17 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <asm/ftrace.h>
21 #include <asm/cacheflush.h>
22 #include <asm/unistd.h>
23 #include <trace/syscall.h>
25 #ifdef CONFIG_DYNAMIC_FTRACE
26 static unsigned char ftrace_replaced_code
[MCOUNT_INSN_SIZE
];
28 static unsigned char ftrace_nop
[4];
30 * If we're trying to nop out a call to a function, we instead
31 * place a call to the address after the memory table.
34 * 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1
35 * 8c011062: 22 4f sts.l pr,@-r15
36 * 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0
37 * 8c011066: 2b 41 jmp @r1
38 * 8c011068: 2a 40 lds r0,pr
40 * 8c01106c: 68 24 .word 0x2468 <--- ip
41 * 8c01106e: 1d 8c .word 0x8c1d
42 * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE
44 * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
45 * past the _mcount call and continue executing code like normal.
47 static unsigned char *ftrace_nop_replace(unsigned long ip
)
49 __raw_writel(ip
+ MCOUNT_INSN_SIZE
, ftrace_nop
);
53 static unsigned char *ftrace_call_replace(unsigned long ip
, unsigned long addr
)
55 /* Place the address in the memory table. */
56 __raw_writel(addr
, ftrace_replaced_code
);
59 * No locking needed, this must be called via kstop_machine
60 * which in essence is like running on a uniprocessor machine.
62 return ftrace_replaced_code
;
65 static int ftrace_modify_code(unsigned long ip
, unsigned char *old_code
,
66 unsigned char *new_code
)
68 unsigned char replaced
[MCOUNT_INSN_SIZE
];
71 * Note: Due to modules and __init, code can
72 * disappear and change, we need to protect against faulting
73 * as well as code changing. We do this by using the
74 * probe_kernel_* functions.
76 * No real locking needed, this code is run through
77 * kstop_machine, or before SMP starts.
80 /* read the text we want to modify */
81 if (probe_kernel_read(replaced
, (void *)ip
, MCOUNT_INSN_SIZE
))
84 /* Make sure it is what we expect it to be */
85 if (memcmp(replaced
, old_code
, MCOUNT_INSN_SIZE
) != 0)
88 /* replace the text with the new text */
89 if (probe_kernel_write((void *)ip
, new_code
, MCOUNT_INSN_SIZE
))
92 flush_icache_range(ip
, ip
+ MCOUNT_INSN_SIZE
);
97 int ftrace_update_ftrace_func(ftrace_func_t func
)
99 unsigned long ip
= (unsigned long)(&ftrace_call
) + MCOUNT_INSN_OFFSET
;
100 unsigned char old
[MCOUNT_INSN_SIZE
], *new;
102 memcpy(old
, (unsigned char *)ip
, MCOUNT_INSN_SIZE
);
103 new = ftrace_call_replace(ip
, (unsigned long)func
);
105 return ftrace_modify_code(ip
, old
, new);
108 int ftrace_make_nop(struct module
*mod
,
109 struct dyn_ftrace
*rec
, unsigned long addr
)
111 unsigned char *new, *old
;
112 unsigned long ip
= rec
->ip
;
114 old
= ftrace_call_replace(ip
, addr
);
115 new = ftrace_nop_replace(ip
);
117 return ftrace_modify_code(rec
->ip
, old
, new);
120 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
122 unsigned char *new, *old
;
123 unsigned long ip
= rec
->ip
;
125 old
= ftrace_nop_replace(ip
);
126 new = ftrace_call_replace(ip
, addr
);
128 return ftrace_modify_code(rec
->ip
, old
, new);
131 int __init
ftrace_dyn_arch_init(void *data
)
133 /* The return code is retured via data */
134 __raw_writel(0, (unsigned long)data
);
138 #endif /* CONFIG_DYNAMIC_FTRACE */
140 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
141 #ifdef CONFIG_DYNAMIC_FTRACE
142 extern void ftrace_graph_call(void);
144 static int ftrace_mod(unsigned long ip
, unsigned long old_addr
,
145 unsigned long new_addr
)
147 unsigned char code
[MCOUNT_INSN_SIZE
];
149 if (probe_kernel_read(code
, (void *)ip
, MCOUNT_INSN_SIZE
))
152 if (old_addr
!= __raw_readl((unsigned long *)code
))
155 __raw_writel(new_addr
, ip
);
159 int ftrace_enable_ftrace_graph_caller(void)
161 unsigned long ip
, old_addr
, new_addr
;
163 ip
= (unsigned long)(&ftrace_graph_call
) + GRAPH_INSN_OFFSET
;
164 old_addr
= (unsigned long)(&skip_trace
);
165 new_addr
= (unsigned long)(&ftrace_graph_caller
);
167 return ftrace_mod(ip
, old_addr
, new_addr
);
170 int ftrace_disable_ftrace_graph_caller(void)
172 unsigned long ip
, old_addr
, new_addr
;
174 ip
= (unsigned long)(&ftrace_graph_call
) + GRAPH_INSN_OFFSET
;
175 old_addr
= (unsigned long)(&ftrace_graph_caller
);
176 new_addr
= (unsigned long)(&skip_trace
);
178 return ftrace_mod(ip
, old_addr
, new_addr
);
180 #endif /* CONFIG_DYNAMIC_FTRACE */
183 * Hook the return address and push it in the stack of return addrs
184 * in the current thread info.
186 * This is the main routine for the function graph tracer. The function
187 * graph tracer essentially works like this:
189 * parent is the stack address containing self_addr's return address.
190 * We pull the real return address out of parent and store it in
191 * current's ret_stack. Then, we replace the return address on the stack
192 * with the address of return_to_handler. self_addr is the function that
195 * When self_addr returns, it will jump to return_to_handler which calls
196 * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
197 * return address off of current's ret_stack and jump to it.
199 void prepare_ftrace_return(unsigned long *parent
, unsigned long self_addr
)
203 struct ftrace_graph_ent trace
;
204 unsigned long return_hooker
= (unsigned long)&return_to_handler
;
206 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
210 * Protect against fault, even if it shouldn't
211 * happen. This tool is too much intrusive to
212 * ignore such a protection.
214 __asm__
__volatile__(
221 ".section .fixup, \"ax\" \n\t"
229 ".section __ex_table,\"a\" \n\t"
233 : "=&r" (old
), "=r" (faulted
)
234 : "r" (parent
), "r" (return_hooker
)
237 if (unlikely(faulted
)) {
243 err
= ftrace_push_return_trace(old
, self_addr
, &trace
.depth
, 0);
245 __raw_writel(old
, parent
);
249 trace
.func
= self_addr
;
251 /* Only trace if the calling function expects to */
252 if (!ftrace_graph_entry(&trace
)) {
253 current
->curr_ret_stack
--;
254 __raw_writel(old
, parent
);
257 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
259 #ifdef CONFIG_FTRACE_SYSCALLS
261 extern unsigned long __start_syscalls_metadata
[];
262 extern unsigned long __stop_syscalls_metadata
[];
263 extern unsigned long *sys_call_table
;
265 static struct syscall_metadata
**syscalls_metadata
;
267 static struct syscall_metadata
*find_syscall_meta(unsigned long *syscall
)
269 struct syscall_metadata
*start
;
270 struct syscall_metadata
*stop
;
271 char str
[KSYM_SYMBOL_LEN
];
274 start
= (struct syscall_metadata
*)__start_syscalls_metadata
;
275 stop
= (struct syscall_metadata
*)__stop_syscalls_metadata
;
276 kallsyms_lookup((unsigned long) syscall
, NULL
, NULL
, NULL
, str
);
278 for ( ; start
< stop
; start
++) {
279 if (start
->name
&& !strcmp(start
->name
, str
))
286 struct syscall_metadata
*syscall_nr_to_meta(int nr
)
288 if (!syscalls_metadata
|| nr
>= FTRACE_SYSCALL_MAX
|| nr
< 0)
291 return syscalls_metadata
[nr
];
294 int syscall_name_to_nr(char *name
)
298 if (!syscalls_metadata
)
300 for (i
= 0; i
< NR_syscalls
; i
++)
301 if (syscalls_metadata
[i
])
302 if (!strcmp(syscalls_metadata
[i
]->name
, name
))
307 void set_syscall_enter_id(int num
, int id
)
309 syscalls_metadata
[num
]->enter_id
= id
;
312 void set_syscall_exit_id(int num
, int id
)
314 syscalls_metadata
[num
]->exit_id
= id
;
317 static int __init
arch_init_ftrace_syscalls(void)
320 struct syscall_metadata
*meta
;
321 unsigned long **psys_syscall_table
= &sys_call_table
;
323 syscalls_metadata
= kzalloc(sizeof(*syscalls_metadata
) *
324 FTRACE_SYSCALL_MAX
, GFP_KERNEL
);
325 if (!syscalls_metadata
) {
330 for (i
= 0; i
< FTRACE_SYSCALL_MAX
; i
++) {
331 meta
= find_syscall_meta(psys_syscall_table
[i
]);
332 syscalls_metadata
[i
] = meta
;
337 arch_initcall(arch_init_ftrace_syscalls
);
338 #endif /* CONFIG_FTRACE_SYSCALLS */