powerpc: Fix personality handling in ppc64_personality()
[linux/fpc-iii.git] / arch / powerpc / kernel / ftrace.c
blob1fb78561096accfff7e60062a4fbc0bddfcfec55
1 /*
2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
13 #include <linux/spinlock.h>
14 #include <linux/hardirq.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
22 #include <asm/cacheflush.h>
23 #include <asm/code-patching.h>
24 #include <asm/ftrace.h>
25 #include <asm/syscall.h>
28 #ifdef CONFIG_DYNAMIC_FTRACE
29 static unsigned int
30 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
32 unsigned int op;
34 addr = ppc_function_entry((void *)addr);
36 /* if (link) set op to 'bl' else 'b' */
37 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
39 return op;
42 static int
43 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
45 unsigned int replaced;
48 * Note: Due to modules and __init, code can
49 * disappear and change, we need to protect against faulting
50 * as well as code changing. We do this by using the
51 * probe_kernel_* functions.
53 * No real locking needed, this code is run through
54 * kstop_machine, or before SMP starts.
57 /* read the text we want to modify */
58 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
59 return -EFAULT;
61 /* Make sure it is what we expect it to be */
62 if (replaced != old)
63 return -EINVAL;
65 /* replace the text with the new text */
66 if (patch_instruction((unsigned int *)ip, new))
67 return -EPERM;
69 return 0;
73 * Helper functions that are the same for both PPC64 and PPC32.
75 static int test_24bit_addr(unsigned long ip, unsigned long addr)
78 /* use the create_branch to verify that this offset can be branched */
79 return create_branch((unsigned int *)ip, addr, 0);
82 #ifdef CONFIG_MODULES
84 static int is_bl_op(unsigned int op)
86 return (op & 0xfc000003) == 0x48000001;
89 static unsigned long find_bl_target(unsigned long ip, unsigned int op)
91 static int offset;
93 offset = (op & 0x03fffffc);
94 /* make it signed */
95 if (offset & 0x02000000)
96 offset |= 0xfe000000;
98 return ip + (long)offset;
101 #ifdef CONFIG_PPC64
102 static int
103 __ftrace_make_nop(struct module *mod,
104 struct dyn_ftrace *rec, unsigned long addr)
106 unsigned int op;
107 unsigned int jmp[5];
108 unsigned long ptr;
109 unsigned long ip = rec->ip;
110 unsigned long tramp;
111 int offset;
113 /* read where this goes */
114 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
115 return -EFAULT;
117 /* Make sure that that this is still a 24bit jump */
118 if (!is_bl_op(op)) {
119 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
120 return -EINVAL;
123 /* lets find where the pointer goes */
124 tramp = find_bl_target(ip, op);
127 * On PPC64 the trampoline looks like:
128 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
129 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
130 * Where the bytes 2,3,6 and 7 make up the 32bit offset
131 * to the TOC that holds the pointer.
132 * to jump to.
133 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
134 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
135 * The actually address is 32 bytes from the offset
136 * into the TOC.
137 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
140 pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
142 /* Find where the trampoline jumps to */
143 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
144 printk(KERN_ERR "Failed to read %lx\n", tramp);
145 return -EFAULT;
148 pr_devel(" %08x %08x", jmp[0], jmp[1]);
150 /* verify that this is what we expect it to be */
151 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
152 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
153 (jmp[2] != 0xf8410028) ||
154 (jmp[3] != 0xe96c0020) ||
155 (jmp[4] != 0xe84c0028)) {
156 printk(KERN_ERR "Not a trampoline\n");
157 return -EINVAL;
160 /* The bottom half is signed extended */
161 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
162 (int)((short)jmp[1]);
164 pr_devel(" %x ", offset);
166 /* get the address this jumps too */
167 tramp = mod->arch.toc + offset + 32;
168 pr_devel("toc: %lx", tramp);
170 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
171 printk(KERN_ERR "Failed to read %lx\n", tramp);
172 return -EFAULT;
175 pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
177 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
179 /* This should match what was called */
180 if (ptr != ppc_function_entry((void *)addr)) {
181 printk(KERN_ERR "addr does not match %lx\n", ptr);
182 return -EINVAL;
186 * We want to nop the line, but the next line is
187 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
188 * This needs to be turned to a nop too.
190 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
191 return -EFAULT;
193 if (op != 0xe8410028) {
194 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
195 return -EINVAL;
199 * Milton Miller pointed out that we can not blindly do nops.
200 * If a task was preempted when calling a trace function,
201 * the nops will remove the way to restore the TOC in r2
202 * and the r2 TOC will get corrupted.
206 * Replace:
207 * bl <tramp> <==== will be replaced with "b 1f"
208 * ld r2,40(r1)
209 * 1:
211 op = 0x48000008; /* b +8 */
213 if (patch_instruction((unsigned int *)ip, op))
214 return -EPERM;
216 return 0;
219 #else /* !PPC64 */
220 static int
221 __ftrace_make_nop(struct module *mod,
222 struct dyn_ftrace *rec, unsigned long addr)
224 unsigned int op;
225 unsigned int jmp[4];
226 unsigned long ip = rec->ip;
227 unsigned long tramp;
229 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
230 return -EFAULT;
232 /* Make sure that that this is still a 24bit jump */
233 if (!is_bl_op(op)) {
234 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
235 return -EINVAL;
238 /* lets find where the pointer goes */
239 tramp = find_bl_target(ip, op);
242 * On PPC32 the trampoline looks like:
243 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
244 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
245 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
246 * 0x4e, 0x80, 0x04, 0x20 bctr
249 pr_devel("ip:%lx jumps to %lx", ip, tramp);
251 /* Find where the trampoline jumps to */
252 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
253 printk(KERN_ERR "Failed to read %lx\n", tramp);
254 return -EFAULT;
257 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
259 /* verify that this is what we expect it to be */
260 if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
261 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
262 (jmp[2] != 0x7d8903a6) ||
263 (jmp[3] != 0x4e800420)) {
264 printk(KERN_ERR "Not a trampoline\n");
265 return -EINVAL;
268 tramp = (jmp[1] & 0xffff) |
269 ((jmp[0] & 0xffff) << 16);
270 if (tramp & 0x8000)
271 tramp -= 0x10000;
273 pr_devel(" %lx ", tramp);
275 if (tramp != addr) {
276 printk(KERN_ERR
277 "Trampoline location %08lx does not match addr\n",
278 tramp);
279 return -EINVAL;
282 op = PPC_INST_NOP;
284 if (patch_instruction((unsigned int *)ip, op))
285 return -EPERM;
287 return 0;
289 #endif /* PPC64 */
290 #endif /* CONFIG_MODULES */
292 int ftrace_make_nop(struct module *mod,
293 struct dyn_ftrace *rec, unsigned long addr)
295 unsigned long ip = rec->ip;
296 unsigned int old, new;
299 * If the calling address is more that 24 bits away,
300 * then we had to use a trampoline to make the call.
301 * Otherwise just update the call site.
303 if (test_24bit_addr(ip, addr)) {
304 /* within range */
305 old = ftrace_call_replace(ip, addr, 1);
306 new = PPC_INST_NOP;
307 return ftrace_modify_code(ip, old, new);
310 #ifdef CONFIG_MODULES
312 * Out of range jumps are called from modules.
313 * We should either already have a pointer to the module
314 * or it has been passed in.
316 if (!rec->arch.mod) {
317 if (!mod) {
318 printk(KERN_ERR "No module loaded addr=%lx\n",
319 addr);
320 return -EFAULT;
322 rec->arch.mod = mod;
323 } else if (mod) {
324 if (mod != rec->arch.mod) {
325 printk(KERN_ERR
326 "Record mod %p not equal to passed in mod %p\n",
327 rec->arch.mod, mod);
328 return -EINVAL;
330 /* nothing to do if mod == rec->arch.mod */
331 } else
332 mod = rec->arch.mod;
334 return __ftrace_make_nop(mod, rec, addr);
335 #else
336 /* We should not get here without modules */
337 return -EINVAL;
338 #endif /* CONFIG_MODULES */
341 #ifdef CONFIG_MODULES
342 #ifdef CONFIG_PPC64
343 static int
344 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
346 unsigned int op[2];
347 unsigned long ip = rec->ip;
349 /* read where this goes */
350 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
351 return -EFAULT;
354 * It should be pointing to two nops or
355 * b +8; ld r2,40(r1)
357 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
358 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
359 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
360 return -EINVAL;
363 /* If we never set up a trampoline to ftrace_caller, then bail */
364 if (!rec->arch.mod->arch.tramp) {
365 printk(KERN_ERR "No ftrace trampoline\n");
366 return -EINVAL;
369 /* create the branch to the trampoline */
370 op[0] = create_branch((unsigned int *)ip,
371 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
372 if (!op[0]) {
373 printk(KERN_ERR "REL24 out of range!\n");
374 return -EINVAL;
377 /* ld r2,40(r1) */
378 op[1] = 0xe8410028;
380 pr_devel("write to %lx\n", rec->ip);
382 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
383 return -EPERM;
385 flush_icache_range(ip, ip + 8);
387 return 0;
389 #else
390 static int
391 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
393 unsigned int op;
394 unsigned long ip = rec->ip;
396 /* read where this goes */
397 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
398 return -EFAULT;
400 /* It should be pointing to a nop */
401 if (op != PPC_INST_NOP) {
402 printk(KERN_ERR "Expected NOP but have %x\n", op);
403 return -EINVAL;
406 /* If we never set up a trampoline to ftrace_caller, then bail */
407 if (!rec->arch.mod->arch.tramp) {
408 printk(KERN_ERR "No ftrace trampoline\n");
409 return -EINVAL;
412 /* create the branch to the trampoline */
413 op = create_branch((unsigned int *)ip,
414 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
415 if (!op) {
416 printk(KERN_ERR "REL24 out of range!\n");
417 return -EINVAL;
420 pr_devel("write to %lx\n", rec->ip);
422 if (patch_instruction((unsigned int *)ip, op))
423 return -EPERM;
425 return 0;
427 #endif /* CONFIG_PPC64 */
428 #endif /* CONFIG_MODULES */
430 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
432 unsigned long ip = rec->ip;
433 unsigned int old, new;
436 * If the calling address is more that 24 bits away,
437 * then we had to use a trampoline to make the call.
438 * Otherwise just update the call site.
440 if (test_24bit_addr(ip, addr)) {
441 /* within range */
442 old = PPC_INST_NOP;
443 new = ftrace_call_replace(ip, addr, 1);
444 return ftrace_modify_code(ip, old, new);
447 #ifdef CONFIG_MODULES
449 * Out of range jumps are called from modules.
450 * Being that we are converting from nop, it had better
451 * already have a module defined.
453 if (!rec->arch.mod) {
454 printk(KERN_ERR "No module loaded\n");
455 return -EINVAL;
458 return __ftrace_make_call(rec, addr);
459 #else
460 /* We should not get here without modules */
461 return -EINVAL;
462 #endif /* CONFIG_MODULES */
465 int ftrace_update_ftrace_func(ftrace_func_t func)
467 unsigned long ip = (unsigned long)(&ftrace_call);
468 unsigned int old, new;
469 int ret;
471 old = *(unsigned int *)&ftrace_call;
472 new = ftrace_call_replace(ip, (unsigned long)func, 1);
473 ret = ftrace_modify_code(ip, old, new);
475 return ret;
478 static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
480 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
481 int ret;
483 ret = ftrace_update_record(rec, enable);
485 switch (ret) {
486 case FTRACE_UPDATE_IGNORE:
487 return 0;
488 case FTRACE_UPDATE_MAKE_CALL:
489 return ftrace_make_call(rec, ftrace_addr);
490 case FTRACE_UPDATE_MAKE_NOP:
491 return ftrace_make_nop(NULL, rec, ftrace_addr);
494 return 0;
497 void ftrace_replace_code(int enable)
499 struct ftrace_rec_iter *iter;
500 struct dyn_ftrace *rec;
501 int ret;
503 for (iter = ftrace_rec_iter_start(); iter;
504 iter = ftrace_rec_iter_next(iter)) {
505 rec = ftrace_rec_iter_record(iter);
506 ret = __ftrace_replace_code(rec, enable);
507 if (ret) {
508 ftrace_bug(ret, rec->ip);
509 return;
514 void arch_ftrace_update_code(int command)
516 if (command & FTRACE_UPDATE_CALLS)
517 ftrace_replace_code(1);
518 else if (command & FTRACE_DISABLE_CALLS)
519 ftrace_replace_code(0);
521 if (command & FTRACE_UPDATE_TRACE_FUNC)
522 ftrace_update_ftrace_func(ftrace_trace_function);
524 if (command & FTRACE_START_FUNC_RET)
525 ftrace_enable_ftrace_graph_caller();
526 else if (command & FTRACE_STOP_FUNC_RET)
527 ftrace_disable_ftrace_graph_caller();
530 int __init ftrace_dyn_arch_init(void *data)
532 /* caller expects data to be zero */
533 unsigned long *p = data;
535 *p = 0;
537 return 0;
539 #endif /* CONFIG_DYNAMIC_FTRACE */
541 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
543 #ifdef CONFIG_DYNAMIC_FTRACE
544 extern void ftrace_graph_call(void);
545 extern void ftrace_graph_stub(void);
547 int ftrace_enable_ftrace_graph_caller(void)
549 unsigned long ip = (unsigned long)(&ftrace_graph_call);
550 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
551 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
552 unsigned int old, new;
554 old = ftrace_call_replace(ip, stub, 0);
555 new = ftrace_call_replace(ip, addr, 0);
557 return ftrace_modify_code(ip, old, new);
560 int ftrace_disable_ftrace_graph_caller(void)
562 unsigned long ip = (unsigned long)(&ftrace_graph_call);
563 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
564 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
565 unsigned int old, new;
567 old = ftrace_call_replace(ip, addr, 0);
568 new = ftrace_call_replace(ip, stub, 0);
570 return ftrace_modify_code(ip, old, new);
572 #endif /* CONFIG_DYNAMIC_FTRACE */
574 #ifdef CONFIG_PPC64
575 extern void mod_return_to_handler(void);
576 #endif
579 * Hook the return address and push it in the stack of return addrs
580 * in current thread info.
582 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
584 unsigned long old;
585 int faulted;
586 struct ftrace_graph_ent trace;
587 unsigned long return_hooker = (unsigned long)&return_to_handler;
589 if (unlikely(atomic_read(&current->tracing_graph_pause)))
590 return;
592 #ifdef CONFIG_PPC64
593 /* non core kernel code needs to save and restore the TOC */
594 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
595 return_hooker = (unsigned long)&mod_return_to_handler;
596 #endif
598 return_hooker = ppc_function_entry((void *)return_hooker);
601 * Protect against fault, even if it shouldn't
602 * happen. This tool is too much intrusive to
603 * ignore such a protection.
605 asm volatile(
606 "1: " PPC_LL "%[old], 0(%[parent])\n"
607 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
608 " li %[faulted], 0\n"
609 "3:\n"
611 ".section .fixup, \"ax\"\n"
612 "4: li %[faulted], 1\n"
613 " b 3b\n"
614 ".previous\n"
616 ".section __ex_table,\"a\"\n"
617 PPC_LONG_ALIGN "\n"
618 PPC_LONG "1b,4b\n"
619 PPC_LONG "2b,4b\n"
620 ".previous"
622 : [old] "=&r" (old), [faulted] "=r" (faulted)
623 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
624 : "memory"
627 if (unlikely(faulted)) {
628 ftrace_graph_stop();
629 WARN_ON(1);
630 return;
633 trace.func = self_addr;
634 trace.depth = current->curr_ret_stack + 1;
636 /* Only trace if the calling function expects to */
637 if (!ftrace_graph_entry(&trace)) {
638 *parent = old;
639 return;
642 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
643 *parent = old;
645 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
647 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
648 unsigned long __init arch_syscall_addr(int nr)
650 return sys_call_table[nr*2];
652 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */