Merge tag 'locks-v3.16-2' of git://git.samba.org/jlayton/linux
[linux/fpc-iii.git] / arch / powerpc / kernel / ftrace.c
blobf202d0731b065466ba8151df98b11cd6a3cd720c
1 /*
2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
13 #include <linux/spinlock.h>
14 #include <linux/hardirq.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
22 #include <asm/cacheflush.h>
23 #include <asm/code-patching.h>
24 #include <asm/ftrace.h>
25 #include <asm/syscall.h>
28 #ifdef CONFIG_DYNAMIC_FTRACE
29 static unsigned int
30 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
32 unsigned int op;
34 addr = ppc_function_entry((void *)addr);
36 /* if (link) set op to 'bl' else 'b' */
37 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
39 return op;
42 static int
43 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
45 unsigned int replaced;
48 * Note: Due to modules and __init, code can
49 * disappear and change, we need to protect against faulting
50 * as well as code changing. We do this by using the
51 * probe_kernel_* functions.
53 * No real locking needed, this code is run through
54 * kstop_machine, or before SMP starts.
57 /* read the text we want to modify */
58 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
59 return -EFAULT;
61 /* Make sure it is what we expect it to be */
62 if (replaced != old)
63 return -EINVAL;
65 /* replace the text with the new text */
66 if (patch_instruction((unsigned int *)ip, new))
67 return -EPERM;
69 return 0;
73 * Helper functions that are the same for both PPC64 and PPC32.
75 static int test_24bit_addr(unsigned long ip, unsigned long addr)
77 addr = ppc_function_entry((void *)addr);
79 /* use the create_branch to verify that this offset can be branched */
80 return create_branch((unsigned int *)ip, addr, 0);
83 #ifdef CONFIG_MODULES
85 static int is_bl_op(unsigned int op)
87 return (op & 0xfc000003) == 0x48000001;
90 static unsigned long find_bl_target(unsigned long ip, unsigned int op)
92 static int offset;
94 offset = (op & 0x03fffffc);
95 /* make it signed */
96 if (offset & 0x02000000)
97 offset |= 0xfe000000;
99 return ip + (long)offset;
102 #ifdef CONFIG_PPC64
103 static int
104 __ftrace_make_nop(struct module *mod,
105 struct dyn_ftrace *rec, unsigned long addr)
107 unsigned int op;
108 unsigned long ptr;
109 unsigned long ip = rec->ip;
110 void *tramp;
112 /* read where this goes */
113 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
114 return -EFAULT;
116 /* Make sure that that this is still a 24bit jump */
117 if (!is_bl_op(op)) {
118 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
119 return -EINVAL;
122 /* lets find where the pointer goes */
123 tramp = (void *)find_bl_target(ip, op);
125 pr_devel("ip:%lx jumps to %p", ip, tramp);
127 if (!is_module_trampoline(tramp)) {
128 printk(KERN_ERR "Not a trampoline\n");
129 return -EINVAL;
132 if (module_trampoline_target(mod, tramp, &ptr)) {
133 printk(KERN_ERR "Failed to get trampoline target\n");
134 return -EFAULT;
137 pr_devel("trampoline target %lx", ptr);
139 /* This should match what was called */
140 if (ptr != ppc_function_entry((void *)addr)) {
141 printk(KERN_ERR "addr %lx does not match expected %lx\n",
142 ptr, ppc_function_entry((void *)addr));
143 return -EINVAL;
147 * Our original call site looks like:
149 * bl <tramp>
150 * ld r2,XX(r1)
152 * Milton Miller pointed out that we can not simply nop the branch.
153 * If a task was preempted when calling a trace function, the nops
154 * will remove the way to restore the TOC in r2 and the r2 TOC will
155 * get corrupted.
157 * Use a b +8 to jump over the load.
159 op = 0x48000008; /* b +8 */
161 if (patch_instruction((unsigned int *)ip, op))
162 return -EPERM;
164 return 0;
167 #else /* !PPC64 */
168 static int
169 __ftrace_make_nop(struct module *mod,
170 struct dyn_ftrace *rec, unsigned long addr)
172 unsigned int op;
173 unsigned int jmp[4];
174 unsigned long ip = rec->ip;
175 unsigned long tramp;
177 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
178 return -EFAULT;
180 /* Make sure that that this is still a 24bit jump */
181 if (!is_bl_op(op)) {
182 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
183 return -EINVAL;
186 /* lets find where the pointer goes */
187 tramp = find_bl_target(ip, op);
190 * On PPC32 the trampoline looks like:
191 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
192 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
193 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
194 * 0x4e, 0x80, 0x04, 0x20 bctr
197 pr_devel("ip:%lx jumps to %lx", ip, tramp);
199 /* Find where the trampoline jumps to */
200 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
201 printk(KERN_ERR "Failed to read %lx\n", tramp);
202 return -EFAULT;
205 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
207 /* verify that this is what we expect it to be */
208 if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
209 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
210 (jmp[2] != 0x7d8903a6) ||
211 (jmp[3] != 0x4e800420)) {
212 printk(KERN_ERR "Not a trampoline\n");
213 return -EINVAL;
216 tramp = (jmp[1] & 0xffff) |
217 ((jmp[0] & 0xffff) << 16);
218 if (tramp & 0x8000)
219 tramp -= 0x10000;
221 pr_devel(" %lx ", tramp);
223 if (tramp != addr) {
224 printk(KERN_ERR
225 "Trampoline location %08lx does not match addr\n",
226 tramp);
227 return -EINVAL;
230 op = PPC_INST_NOP;
232 if (patch_instruction((unsigned int *)ip, op))
233 return -EPERM;
235 return 0;
237 #endif /* PPC64 */
238 #endif /* CONFIG_MODULES */
240 int ftrace_make_nop(struct module *mod,
241 struct dyn_ftrace *rec, unsigned long addr)
243 unsigned long ip = rec->ip;
244 unsigned int old, new;
247 * If the calling address is more that 24 bits away,
248 * then we had to use a trampoline to make the call.
249 * Otherwise just update the call site.
251 if (test_24bit_addr(ip, addr)) {
252 /* within range */
253 old = ftrace_call_replace(ip, addr, 1);
254 new = PPC_INST_NOP;
255 return ftrace_modify_code(ip, old, new);
258 #ifdef CONFIG_MODULES
260 * Out of range jumps are called from modules.
261 * We should either already have a pointer to the module
262 * or it has been passed in.
264 if (!rec->arch.mod) {
265 if (!mod) {
266 printk(KERN_ERR "No module loaded addr=%lx\n",
267 addr);
268 return -EFAULT;
270 rec->arch.mod = mod;
271 } else if (mod) {
272 if (mod != rec->arch.mod) {
273 printk(KERN_ERR
274 "Record mod %p not equal to passed in mod %p\n",
275 rec->arch.mod, mod);
276 return -EINVAL;
278 /* nothing to do if mod == rec->arch.mod */
279 } else
280 mod = rec->arch.mod;
282 return __ftrace_make_nop(mod, rec, addr);
283 #else
284 /* We should not get here without modules */
285 return -EINVAL;
286 #endif /* CONFIG_MODULES */
289 #ifdef CONFIG_MODULES
290 #ifdef CONFIG_PPC64
291 static int
292 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
294 unsigned int op[2];
295 void *ip = (void *)rec->ip;
297 /* read where this goes */
298 if (probe_kernel_read(op, ip, sizeof(op)))
299 return -EFAULT;
302 * We expect to see:
304 * b +8
305 * ld r2,XX(r1)
307 * The load offset is different depending on the ABI. For simplicity
308 * just mask it out when doing the compare.
310 if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
311 printk(KERN_ERR "Unexpected call sequence: %x %x\n",
312 op[0], op[1]);
313 return -EINVAL;
316 /* If we never set up a trampoline to ftrace_caller, then bail */
317 if (!rec->arch.mod->arch.tramp) {
318 printk(KERN_ERR "No ftrace trampoline\n");
319 return -EINVAL;
322 /* Ensure branch is within 24 bits */
323 if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
324 printk(KERN_ERR "Branch out of range");
325 return -EINVAL;
328 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
329 printk(KERN_ERR "REL24 out of range!\n");
330 return -EINVAL;
333 return 0;
335 #else
336 static int
337 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
339 unsigned int op;
340 unsigned long ip = rec->ip;
342 /* read where this goes */
343 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
344 return -EFAULT;
346 /* It should be pointing to a nop */
347 if (op != PPC_INST_NOP) {
348 printk(KERN_ERR "Expected NOP but have %x\n", op);
349 return -EINVAL;
352 /* If we never set up a trampoline to ftrace_caller, then bail */
353 if (!rec->arch.mod->arch.tramp) {
354 printk(KERN_ERR "No ftrace trampoline\n");
355 return -EINVAL;
358 /* create the branch to the trampoline */
359 op = create_branch((unsigned int *)ip,
360 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
361 if (!op) {
362 printk(KERN_ERR "REL24 out of range!\n");
363 return -EINVAL;
366 pr_devel("write to %lx\n", rec->ip);
368 if (patch_instruction((unsigned int *)ip, op))
369 return -EPERM;
371 return 0;
373 #endif /* CONFIG_PPC64 */
374 #endif /* CONFIG_MODULES */
376 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
378 unsigned long ip = rec->ip;
379 unsigned int old, new;
382 * If the calling address is more that 24 bits away,
383 * then we had to use a trampoline to make the call.
384 * Otherwise just update the call site.
386 if (test_24bit_addr(ip, addr)) {
387 /* within range */
388 old = PPC_INST_NOP;
389 new = ftrace_call_replace(ip, addr, 1);
390 return ftrace_modify_code(ip, old, new);
393 #ifdef CONFIG_MODULES
395 * Out of range jumps are called from modules.
396 * Being that we are converting from nop, it had better
397 * already have a module defined.
399 if (!rec->arch.mod) {
400 printk(KERN_ERR "No module loaded\n");
401 return -EINVAL;
404 return __ftrace_make_call(rec, addr);
405 #else
406 /* We should not get here without modules */
407 return -EINVAL;
408 #endif /* CONFIG_MODULES */
411 int ftrace_update_ftrace_func(ftrace_func_t func)
413 unsigned long ip = (unsigned long)(&ftrace_call);
414 unsigned int old, new;
415 int ret;
417 old = *(unsigned int *)&ftrace_call;
418 new = ftrace_call_replace(ip, (unsigned long)func, 1);
419 ret = ftrace_modify_code(ip, old, new);
421 return ret;
424 static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
426 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
427 int ret;
429 ret = ftrace_update_record(rec, enable);
431 switch (ret) {
432 case FTRACE_UPDATE_IGNORE:
433 return 0;
434 case FTRACE_UPDATE_MAKE_CALL:
435 return ftrace_make_call(rec, ftrace_addr);
436 case FTRACE_UPDATE_MAKE_NOP:
437 return ftrace_make_nop(NULL, rec, ftrace_addr);
440 return 0;
443 void ftrace_replace_code(int enable)
445 struct ftrace_rec_iter *iter;
446 struct dyn_ftrace *rec;
447 int ret;
449 for (iter = ftrace_rec_iter_start(); iter;
450 iter = ftrace_rec_iter_next(iter)) {
451 rec = ftrace_rec_iter_record(iter);
452 ret = __ftrace_replace_code(rec, enable);
453 if (ret) {
454 ftrace_bug(ret, rec->ip);
455 return;
460 void arch_ftrace_update_code(int command)
462 if (command & FTRACE_UPDATE_CALLS)
463 ftrace_replace_code(1);
464 else if (command & FTRACE_DISABLE_CALLS)
465 ftrace_replace_code(0);
467 if (command & FTRACE_UPDATE_TRACE_FUNC)
468 ftrace_update_ftrace_func(ftrace_trace_function);
470 if (command & FTRACE_START_FUNC_RET)
471 ftrace_enable_ftrace_graph_caller();
472 else if (command & FTRACE_STOP_FUNC_RET)
473 ftrace_disable_ftrace_graph_caller();
476 int __init ftrace_dyn_arch_init(void)
478 return 0;
480 #endif /* CONFIG_DYNAMIC_FTRACE */
482 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
484 #ifdef CONFIG_DYNAMIC_FTRACE
485 extern void ftrace_graph_call(void);
486 extern void ftrace_graph_stub(void);
488 int ftrace_enable_ftrace_graph_caller(void)
490 unsigned long ip = (unsigned long)(&ftrace_graph_call);
491 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
492 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
493 unsigned int old, new;
495 old = ftrace_call_replace(ip, stub, 0);
496 new = ftrace_call_replace(ip, addr, 0);
498 return ftrace_modify_code(ip, old, new);
501 int ftrace_disable_ftrace_graph_caller(void)
503 unsigned long ip = (unsigned long)(&ftrace_graph_call);
504 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
505 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
506 unsigned int old, new;
508 old = ftrace_call_replace(ip, addr, 0);
509 new = ftrace_call_replace(ip, stub, 0);
511 return ftrace_modify_code(ip, old, new);
513 #endif /* CONFIG_DYNAMIC_FTRACE */
515 #ifdef CONFIG_PPC64
516 extern void mod_return_to_handler(void);
517 #endif
520 * Hook the return address and push it in the stack of return addrs
521 * in current thread info.
523 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
525 unsigned long old;
526 int faulted;
527 struct ftrace_graph_ent trace;
528 unsigned long return_hooker = (unsigned long)&return_to_handler;
530 if (unlikely(atomic_read(&current->tracing_graph_pause)))
531 return;
533 #ifdef CONFIG_PPC64
534 /* non core kernel code needs to save and restore the TOC */
535 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
536 return_hooker = (unsigned long)&mod_return_to_handler;
537 #endif
539 return_hooker = ppc_function_entry((void *)return_hooker);
542 * Protect against fault, even if it shouldn't
543 * happen. This tool is too much intrusive to
544 * ignore such a protection.
546 asm volatile(
547 "1: " PPC_LL "%[old], 0(%[parent])\n"
548 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
549 " li %[faulted], 0\n"
550 "3:\n"
552 ".section .fixup, \"ax\"\n"
553 "4: li %[faulted], 1\n"
554 " b 3b\n"
555 ".previous\n"
557 ".section __ex_table,\"a\"\n"
558 PPC_LONG_ALIGN "\n"
559 PPC_LONG "1b,4b\n"
560 PPC_LONG "2b,4b\n"
561 ".previous"
563 : [old] "=&r" (old), [faulted] "=r" (faulted)
564 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
565 : "memory"
568 if (unlikely(faulted)) {
569 ftrace_graph_stop();
570 WARN_ON(1);
571 return;
574 trace.func = self_addr;
575 trace.depth = current->curr_ret_stack + 1;
577 /* Only trace if the calling function expects to */
578 if (!ftrace_graph_entry(&trace)) {
579 *parent = old;
580 return;
583 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
584 *parent = old;
586 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
588 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
589 unsigned long __init arch_syscall_addr(int nr)
591 return sys_call_table[nr*2];
593 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */