1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm64/kernel/entry-ftrace.S
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
9 #include <linux/linkage.h>
10 #include <asm/asm-offsets.h>
11 #include <asm/assembler.h>
12 #include <asm/ftrace.h>
15 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
17 * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
18 * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
19 * ftrace_make_call() have patched those NOPs to:
24 * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
26 * Each instrumented function follows the AAPCS, so here x0-x8 and x19-x30 are
27 * live, and x9-x18 are safe to clobber.
29 * We save the callsite's context into a pt_regs before invoking any ftrace
30 * callbacks. So that we can get a sensible backtrace, we create a stack record
31 * for the callsite and the ftrace entry assembly. This is not sufficient for
32 * reliable stacktrace: until we create the callsite stack record, its caller
33 * is missing from the LR and existing chain of frame records.
35 .macro ftrace_regs_entry, allregs=0
36 /* Make room for pt_regs, plus a callee frame */
37 sub sp, sp, #(S_FRAME_SIZE + 16)
39 /* Save function arguments (and x9 for simplicity) */
40 stp x0, x1, [sp, #S_X0]
41 stp x2, x3, [sp, #S_X2]
42 stp x4, x5, [sp, #S_X4]
43 stp x6, x7, [sp, #S_X6]
44 stp x8, x9, [sp, #S_X8]
46 /* Optionally save the callee-saved registers, always save the FP */
48 stp x10, x11, [sp, #S_X10]
49 stp x12, x13, [sp, #S_X12]
50 stp x14, x15, [sp, #S_X14]
51 stp x16, x17, [sp, #S_X16]
52 stp x18, x19, [sp, #S_X18]
53 stp x20, x21, [sp, #S_X20]
54 stp x22, x23, [sp, #S_X22]
55 stp x24, x25, [sp, #S_X24]
56 stp x26, x27, [sp, #S_X26]
57 stp x28, x29, [sp, #S_X28]
62 /* Save the callsite's SP and LR */
63 add x10, sp, #(S_FRAME_SIZE + 16)
64 stp x9, x10, [sp, #S_LR]
66 /* Save the PC after the ftrace callsite */
69 /* Create a frame record for the callsite above pt_regs */
70 stp x29, x9, [sp, #S_FRAME_SIZE]
71 add x29, sp, #S_FRAME_SIZE
73 /* Create our frame record within pt_regs. */
74 stp x29, x30, [sp, #S_STACKFRAME]
75 add x29, sp, #S_STACKFRAME
78 ENTRY(ftrace_regs_caller)
81 ENDPROC(ftrace_regs_caller)
86 ENDPROC(ftrace_caller)
89 sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
90 mov x1, x9 // parent_ip (callsite's LR)
91 ldr_l x2, function_trace_op // op
97 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
98 GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
99 nop // If enabled, this will be replaced
100 // "b ftrace_graph_caller"
104 * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
105 * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
106 * to restore x0-x8, x29, and x30.
108 ftrace_common_return:
109 /* Restore function arguments */
111 ldp x2, x3, [sp, #S_X2]
112 ldp x4, x5, [sp, #S_X4]
113 ldp x6, x7, [sp, #S_X6]
116 /* Restore the callsite's FP, LR, PC */
121 /* Restore the callsite's SP */
122 add sp, sp, #S_FRAME_SIZE + 16
125 ENDPROC(ftrace_common)
127 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
128 ENTRY(ftrace_graph_caller)
130 sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
131 add x1, sp, #S_LR // parent_ip (callsite's LR)
132 ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
133 bl prepare_ftrace_return
134 b ftrace_common_return
135 ENDPROC(ftrace_graph_caller)
138 #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
141 * Gcc with -pg will put the following code in the beginning of each function:
144 * [function's body ...]
145 * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
148 * Please note that x0 as an argument will not be used here because we can
149 * get lr(x30) of instrumented function at any time by winding up call stack
150 * as long as the kernel is compiled without -fomit-frame-pointer.
151 * (or CONFIG_FRAME_POINTER, this is forced on arm64)
153 * stack layout after mcount_enter in _mcount():
155 * current sp/fp => 0:+-----+
156 * in _mcount() | x29 | -> instrumented function's fp
158 * | x30 | -> _mcount()'s lr (= instrumented function's pc)
159 * old sp => +16:+-----+
160 * when instrumented | |
161 * function calls | ... |
164 * instrumented => +xx:+-----+
165 * function's fp | x29 | -> parent's fp
167 * | x30 | -> instrumented function's lr (= parent's pc)
173 stp x29, x30, [sp, #-16]!
178 ldp x29, x30, [sp], #16
182 .macro mcount_adjust_addr rd, rn
183 sub \rd, \rn, #AARCH64_INSN_SIZE
186 /* for instrumented function's parent */
187 .macro mcount_get_parent_fp reg
192 /* for instrumented function */
193 .macro mcount_get_pc0 reg
194 mcount_adjust_addr \reg, x30
197 .macro mcount_get_pc reg
199 mcount_adjust_addr \reg, \reg
202 .macro mcount_get_lr reg
207 .macro mcount_get_lr_addr reg
212 #ifndef CONFIG_DYNAMIC_FTRACE
214 * void _mcount(unsigned long return_address)
215 * @return_address: return address to instrumented function
217 * This function makes calls, if enabled, to:
218 * - tracer function to probe instrumented function's entry,
219 * - ftrace_graph_caller to set up an exit hook
224 ldr_l x2, ftrace_trace_function
226 cmp x0, x2 // if (ftrace_trace_function
227 b.eq skip_ftrace_call // != ftrace_stub) {
229 mcount_get_pc x0 // function's pc
230 mcount_get_lr x1 // function's lr (= parent's pc)
231 blr x2 // (*ftrace_trace_function)(pc, lr);
233 skip_ftrace_call: // }
234 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
235 ldr_l x2, ftrace_graph_return
236 cmp x0, x2 // if ((ftrace_graph_return
237 b.ne ftrace_graph_caller // != ftrace_stub)
239 ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
240 adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
242 b.ne ftrace_graph_caller // ftrace_graph_caller();
243 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
246 EXPORT_SYMBOL(_mcount)
249 #else /* CONFIG_DYNAMIC_FTRACE */
251 * _mcount() is used to build the kernel with -pg option, but all the branch
252 * instructions to _mcount() are replaced to NOP initially at kernel start up,
253 * and later on, NOP to branch to ftrace_caller() when enabled or branch to
254 * NOP when disabled per-function base.
259 EXPORT_SYMBOL(_mcount)
263 * void ftrace_caller(unsigned long return_address)
264 * @return_address: return address to instrumented function
266 * This function is a counterpart of _mcount() in 'static' ftrace, and
268 * - tracer function to probe instrumented function's entry,
269 * - ftrace_graph_caller to set up an exit hook
274 mcount_get_pc0 x0 // function's pc
275 mcount_get_lr x1 // function's lr
277 GLOBAL(ftrace_call) // tracer(pc, lr);
278 nop // This will be replaced with "bl xxx"
279 // where xxx can be any kind of tracer.
281 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
282 GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
283 nop // If enabled, this will be replaced
284 // "b ftrace_graph_caller"
288 ENDPROC(ftrace_caller)
289 #endif /* CONFIG_DYNAMIC_FTRACE */
291 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
293 * void ftrace_graph_caller(void)
295 * Called from _mcount() or ftrace_caller() when function_graph tracer is
297 * This function w/ prepare_ftrace_return() fakes link register's value on
298 * the call stack in order to intercept instrumented function's return path
299 * and run return_to_handler() later on its exit.
301 ENTRY(ftrace_graph_caller)
302 mcount_get_pc x0 // function's pc
303 mcount_get_lr_addr x1 // pointer to function's saved lr
304 mcount_get_parent_fp x2 // parent's fp
305 bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
308 ENDPROC(ftrace_graph_caller)
309 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
310 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
316 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
318 * void return_to_handler(void)
320 * Run ftrace_return_to_handler() before going back to parent.
321 * @fp is checked against the value passed by ftrace_graph_caller().
323 ENTRY(return_to_handler)
324 /* save return value regs */
327 stp x2, x3, [sp, #16]
328 stp x4, x5, [sp, #32]
329 stp x6, x7, [sp, #48]
331 mov x0, x29 // parent's fp
332 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
333 mov x30, x0 // restore the original return address
335 /* restore return value regs */
337 ldp x2, x3, [sp, #16]
338 ldp x4, x5, [sp, #32]
339 ldp x6, x7, [sp, #48]
343 END(return_to_handler)
344 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */