4 * Copyright (C) 2009-2010 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
8 #include <linux/ftrace.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/uaccess.h>
12 #include <linux/atomic.h>
13 #include <asm/cacheflush.h>
15 #ifdef CONFIG_DYNAMIC_FTRACE
17 static const unsigned char mnop
[] = {
18 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
19 0x03, 0xc0, 0x00, 0x18, /* MNOP; */
22 static void bfin_make_pcrel24(unsigned char *insn
, unsigned long src
,
25 uint32_t pcrel
= (dst
- src
) >> 1;
26 insn
[0] = pcrel
>> 16;
31 #define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
33 static int ftrace_modify_code(unsigned long ip
, const unsigned char *code
,
36 int ret
= probe_kernel_write((void *)ip
, (void *)code
, len
);
37 flush_icache_range(ip
, ip
+ len
);
41 int ftrace_make_nop(struct module
*mod
, struct dyn_ftrace
*rec
,
44 /* Turn the mcount call site into two MNOPs as those are 32bit insns */
45 return ftrace_modify_code(rec
->ip
, mnop
, sizeof(mnop
));
48 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
50 /* Restore the mcount call site */
51 unsigned char call
[8];
52 call
[0] = 0x67; /* [--SP] = RETS; */
54 bfin_make_pcrel24(&call
[2], rec
->ip
+ 2, addr
);
55 call
[6] = 0x27; /* RETS = [SP++]; */
57 return ftrace_modify_code(rec
->ip
, call
, sizeof(call
));
60 int ftrace_update_ftrace_func(ftrace_func_t func
)
62 unsigned char call
[4];
63 unsigned long ip
= (unsigned long)&ftrace_call
;
64 bfin_make_pcrel24(call
, ip
, func
);
65 return ftrace_modify_code(ip
, call
, sizeof(call
));
68 int __init
ftrace_dyn_arch_init(void)
75 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
77 # ifdef CONFIG_DYNAMIC_FTRACE
79 extern void ftrace_graph_call(void);
81 int ftrace_enable_ftrace_graph_caller(void)
83 unsigned long ip
= (unsigned long)&ftrace_graph_call
;
84 uint16_t jump_pcrel12
= ((unsigned long)&ftrace_graph_caller
- ip
) >> 1;
85 jump_pcrel12
|= 0x2000;
86 return ftrace_modify_code(ip
, (void *)&jump_pcrel12
, sizeof(jump_pcrel12
));
89 int ftrace_disable_ftrace_graph_caller(void)
91 return ftrace_modify_code((unsigned long)&ftrace_graph_call
, empty_zero_page
, 2);
97 * Hook the return address and push it in the stack of return addrs
98 * in current thread info.
100 void prepare_ftrace_return(unsigned long *parent
, unsigned long self_addr
,
101 unsigned long frame_pointer
)
103 struct ftrace_graph_ent trace
;
104 unsigned long return_hooker
= (unsigned long)&return_to_handler
;
106 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
109 if (ftrace_push_return_trace(*parent
, self_addr
, &trace
.depth
,
110 frame_pointer
, NULL
) == -EBUSY
)
113 trace
.func
= self_addr
;
115 /* Only trace if the calling function expects to */
116 if (!ftrace_graph_entry(&trace
)) {
117 current
->curr_ret_stack
--;
121 /* all is well in the world ! hijack RETS ... */
122 *parent
= return_hooker
;