2 * Dynamic function tracer architecture backend.
4 * Copyright IBM Corp. 2009,2014
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/moduleloader.h>
11 #include <linux/hardirq.h>
12 #include <linux/uaccess.h>
13 #include <linux/ftrace.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/kprobes.h>
17 #include <trace/syscall.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/cacheflush.h>
23 * The mcount code looks like this:
24 * stg %r14,8(%r15) # offset 0
25 * larl %r1,<&counter> # offset 6
26 * brasl %r14,_mcount # offset 12
27 * lg %r14,8(%r15) # offset 18
28 * Total length is 24 bytes. Only the first instruction will be patched
29 * by ftrace_make_call / ftrace_make_nop.
30 * The enabled ftrace code block looks like this:
31 * > brasl %r0,ftrace_caller # offset 0
32 * larl %r1,<&counter> # offset 6
33 * brasl %r14,_mcount # offset 12
34 * lg %r14,8(%r15) # offset 18
35 * The ftrace function gets called with a non-standard C function call ABI
36 * where r0 contains the return address. It is also expected that the called
37 * function only clobbers r0 and r1, but restores r2-r15.
38 * For module code we can't directly jump to ftrace caller, but need a
39 * trampoline (ftrace_plt), which clobbers also r1.
40 * The return point of the ftrace function has offset 24, so execution
41 * continues behind the mcount block.
42 * The disabled ftrace code block looks like this:
43 * > jg .+24 # offset 0
44 * larl %r1,<&counter> # offset 6
45 * brasl %r14,_mcount # offset 12
46 * lg %r14,8(%r15) # offset 18
47 * The jg instruction branches to offset 24 to skip as many instructions
49 * In case we use gcc's hotpatch feature the original and also the disabled
50 * function prologue contains only a single six byte instruction and looks
52 * > brcl 0,0 # offset 0
53 * To enable ftrace the code gets patched like above and afterwards looks
55 * > brasl %r0,ftrace_caller # offset 0
58 unsigned long ftrace_plt
;
60 static inline void ftrace_generate_orig_insn(struct ftrace_insn
*insn
)
62 #ifdef CC_USING_HOTPATCH
69 insn
->disp
= 0xf0080024;
73 static inline int is_kprobe_on_ftrace(struct ftrace_insn
*insn
)
76 if (insn
->opc
== BREAKPOINT_INSTRUCTION
)
82 static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn
*insn
)
85 insn
->opc
= BREAKPOINT_INSTRUCTION
;
86 insn
->disp
= KPROBE_ON_FTRACE_NOP
;
90 static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn
*insn
)
93 insn
->opc
= BREAKPOINT_INSTRUCTION
;
94 insn
->disp
= KPROBE_ON_FTRACE_CALL
;
98 int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
104 int ftrace_make_nop(struct module
*mod
, struct dyn_ftrace
*rec
,
107 struct ftrace_insn orig
, new, old
;
109 if (probe_kernel_read(&old
, (void *) rec
->ip
, sizeof(old
)))
111 if (addr
== MCOUNT_ADDR
) {
112 /* Initial code replacement */
113 ftrace_generate_orig_insn(&orig
);
114 ftrace_generate_nop_insn(&new);
115 } else if (is_kprobe_on_ftrace(&old
)) {
117 * If we find a breakpoint instruction, a kprobe has been
118 * placed at the beginning of the function. We write the
119 * constant KPROBE_ON_FTRACE_NOP into the remaining four
120 * bytes of the original instruction so that the kprobes
121 * handler can execute a nop, if it reaches this breakpoint.
123 ftrace_generate_kprobe_call_insn(&orig
);
124 ftrace_generate_kprobe_nop_insn(&new);
126 /* Replace ftrace call with a nop. */
127 ftrace_generate_call_insn(&orig
, rec
->ip
);
128 ftrace_generate_nop_insn(&new);
130 /* Verify that the to be replaced code matches what we expect. */
131 if (memcmp(&orig
, &old
, sizeof(old
)))
133 s390_kernel_write((void *) rec
->ip
, &new, sizeof(new));
137 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
139 struct ftrace_insn orig
, new, old
;
141 if (probe_kernel_read(&old
, (void *) rec
->ip
, sizeof(old
)))
143 if (is_kprobe_on_ftrace(&old
)) {
145 * If we find a breakpoint instruction, a kprobe has been
146 * placed at the beginning of the function. We write the
147 * constant KPROBE_ON_FTRACE_CALL into the remaining four
148 * bytes of the original instruction so that the kprobes
149 * handler can execute a brasl if it reaches this breakpoint.
151 ftrace_generate_kprobe_nop_insn(&orig
);
152 ftrace_generate_kprobe_call_insn(&new);
154 /* Replace nop with an ftrace call. */
155 ftrace_generate_nop_insn(&orig
);
156 ftrace_generate_call_insn(&new, rec
->ip
);
158 /* Verify that the to be replaced code matches what we expect. */
159 if (memcmp(&orig
, &old
, sizeof(old
)))
161 s390_kernel_write((void *) rec
->ip
, &new, sizeof(new));
165 int ftrace_update_ftrace_func(ftrace_func_t func
)
170 int __init
ftrace_dyn_arch_init(void)
175 static int __init
ftrace_plt_init(void)
179 ftrace_plt
= (unsigned long) module_alloc(PAGE_SIZE
);
181 panic("cannot allocate ftrace plt\n");
182 ip
= (unsigned int *) ftrace_plt
;
183 ip
[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
186 ip
[3] = FTRACE_ADDR
>> 32;
187 ip
[4] = FTRACE_ADDR
& 0xffffffff;
188 set_memory_ro(ftrace_plt
, 1);
191 device_initcall(ftrace_plt_init
);
193 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
195 * Hook the return address and push it in the stack of return addresses
196 * in current thread info.
198 unsigned long prepare_ftrace_return(unsigned long parent
, unsigned long ip
)
200 struct ftrace_graph_ent trace
;
202 if (unlikely(ftrace_graph_is_dead()))
204 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
206 ip
-= MCOUNT_INSN_SIZE
;
208 trace
.depth
= current
->curr_ret_stack
+ 1;
209 /* Only trace if the calling function expects to. */
210 if (!ftrace_graph_entry(&trace
))
212 if (ftrace_push_return_trace(parent
, ip
, &trace
.depth
, 0,
215 parent
= (unsigned long) return_to_handler
;
219 NOKPROBE_SYMBOL(prepare_ftrace_return
);
222 * Patch the kernel code at ftrace_graph_caller location. The instruction
223 * there is branch relative on condition. To enable the ftrace graph code
224 * block, we simply patch the mask field of the instruction to zero and
225 * turn the instruction into a nop.
226 * To disable the ftrace graph code the mask field will be patched to
227 * all ones, which turns the instruction into an unconditional branch.
229 int ftrace_enable_ftrace_graph_caller(void)
231 u8 op
= 0x04; /* set mask field to zero */
233 s390_kernel_write(__va(ftrace_graph_caller
)+1, &op
, sizeof(op
));
237 int ftrace_disable_ftrace_graph_caller(void)
239 u8 op
= 0xf4; /* set mask field to all ones */
241 s390_kernel_write(__va(ftrace_graph_caller
)+1, &op
, sizeof(op
));
245 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */