treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / arm64 / kernel / ftrace.c
blob8618faa82e6d23a0893dedb2e1230ebb1707b6b3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/kernel/ftrace.c
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 */
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/swab.h>
12 #include <linux/uaccess.h>
14 #include <asm/cacheflush.h>
15 #include <asm/debug-monitors.h>
16 #include <asm/ftrace.h>
17 #include <asm/insn.h>
19 #ifdef CONFIG_DYNAMIC_FTRACE
21 * Replace a single instruction, which may be a branch or NOP.
22 * If @validate == true, a replaced instruction is checked against 'old'.
24 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
25 bool validate)
27 u32 replaced;
30 * Note:
31 * We are paranoid about modifying text, as if a bug were to happen, it
32 * could cause us to read or write to someplace that could cause harm.
33 * Carefully read and modify the code with aarch64_insn_*() which uses
34 * probe_kernel_*(), and make sure what we read is what we expected it
35 * to be before modifying it.
37 if (validate) {
38 if (aarch64_insn_read((void *)pc, &replaced))
39 return -EFAULT;
41 if (replaced != old)
42 return -EINVAL;
44 if (aarch64_insn_patch_text_nosync((void *)pc, new))
45 return -EPERM;
47 return 0;
51 * Replace tracer function in ftrace_caller()
53 int ftrace_update_ftrace_func(ftrace_func_t func)
55 unsigned long pc;
56 u32 new;
58 pc = (unsigned long)&ftrace_call;
59 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
60 AARCH64_INSN_BRANCH_LINK);
62 return ftrace_modify_code(pc, 0, new, false);
65 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
67 #ifdef CONFIG_ARM64_MODULE_PLTS
68 struct plt_entry *plt = mod->arch.ftrace_trampolines;
70 if (addr == FTRACE_ADDR)
71 return &plt[FTRACE_PLT_IDX];
72 if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS))
73 return &plt[FTRACE_REGS_PLT_IDX];
74 #endif
75 return NULL;
79 * Turn on the call to ftrace_caller() in instrumented function
81 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
83 unsigned long pc = rec->ip;
84 u32 old, new;
85 long offset = (long)pc - (long)addr;
87 if (offset < -SZ_128M || offset >= SZ_128M) {
88 struct module *mod;
89 struct plt_entry *plt;
91 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
92 return -EINVAL;
95 * On kernels that support module PLTs, the offset between the
96 * branch instruction and its target may legally exceed the
97 * range of an ordinary relative 'bl' opcode. In this case, we
98 * need to branch via a trampoline in the module.
100 * NOTE: __module_text_address() must be called with preemption
101 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
102 * retains its validity throughout the remainder of this code.
104 preempt_disable();
105 mod = __module_text_address(pc);
106 preempt_enable();
108 if (WARN_ON(!mod))
109 return -EINVAL;
111 plt = get_ftrace_plt(mod, addr);
112 if (!plt) {
113 pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
114 return -EINVAL;
117 addr = (unsigned long)plt;
120 old = aarch64_insn_gen_nop();
121 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
123 return ftrace_modify_code(pc, old, new, true);
126 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
127 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
128 unsigned long addr)
130 unsigned long pc = rec->ip;
131 u32 old, new;
133 old = aarch64_insn_gen_branch_imm(pc, old_addr,
134 AARCH64_INSN_BRANCH_LINK);
135 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
137 return ftrace_modify_code(pc, old, new, true);
141 * The compiler has inserted two NOPs before the regular function prologue.
142 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
143 * and x9-x18 are free for our use.
145 * At runtime we want to be able to swing a single NOP <-> BL to enable or
146 * disable the ftrace call. The BL requires us to save the original LR value,
147 * so here we insert a <MOV X9, LR> over the first NOP so the instructions
148 * before the regular prologue are:
150 * | Compiled | Disabled | Enabled |
151 * +----------+------------+------------+
152 * | NOP | MOV X9, LR | MOV X9, LR |
153 * | NOP | NOP | BL <entry> |
155 * The LR value will be recovered by ftrace_regs_entry, and restored into LR
156 * before returning to the regular function prologue. When a function is not
157 * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
159 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
160 * the BL.
162 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
164 unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
165 u32 old, new;
167 old = aarch64_insn_gen_nop();
168 new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
169 AARCH64_INSN_REG_LR,
170 AARCH64_INSN_VARIANT_64BIT);
171 return ftrace_modify_code(pc, old, new, true);
173 #endif
176 * Turn off the call to ftrace_caller() in instrumented function
178 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
179 unsigned long addr)
181 unsigned long pc = rec->ip;
182 bool validate = true;
183 u32 old = 0, new;
184 long offset = (long)pc - (long)addr;
186 if (offset < -SZ_128M || offset >= SZ_128M) {
187 u32 replaced;
189 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
190 return -EINVAL;
193 * 'mod' is only set at module load time, but if we end up
194 * dealing with an out-of-range condition, we can assume it
195 * is due to a module being loaded far away from the kernel.
197 if (!mod) {
198 preempt_disable();
199 mod = __module_text_address(pc);
200 preempt_enable();
202 if (WARN_ON(!mod))
203 return -EINVAL;
207 * The instruction we are about to patch may be a branch and
208 * link instruction that was redirected via a PLT entry. In
209 * this case, the normal validation will fail, but we can at
210 * least check that we are dealing with a branch and link
211 * instruction that points into the right module.
213 if (aarch64_insn_read((void *)pc, &replaced))
214 return -EFAULT;
216 if (!aarch64_insn_is_bl(replaced) ||
217 !within_module(pc + aarch64_get_branch_offset(replaced),
218 mod))
219 return -EINVAL;
221 validate = false;
222 } else {
223 old = aarch64_insn_gen_branch_imm(pc, addr,
224 AARCH64_INSN_BRANCH_LINK);
227 new = aarch64_insn_gen_nop();
229 return ftrace_modify_code(pc, old, new, validate);
232 void arch_ftrace_update_code(int command)
234 command |= FTRACE_MAY_SLEEP;
235 ftrace_modify_all_code(command);
238 int __init ftrace_dyn_arch_init(void)
240 return 0;
242 #endif /* CONFIG_DYNAMIC_FTRACE */
244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
246 * function_graph tracer expects ftrace_return_to_handler() to be called
247 * on the way back to parent. For this purpose, this function is called
248 * in _mcount() or ftrace_caller() to replace return address (*parent) on
249 * the call stack to return_to_handler.
251 * Note that @frame_pointer is used only for sanity check later.
253 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
254 unsigned long frame_pointer)
256 unsigned long return_hooker = (unsigned long)&return_to_handler;
257 unsigned long old;
259 if (unlikely(atomic_read(&current->tracing_graph_pause)))
260 return;
263 * Note:
264 * No protection against faulting at *parent, which may be seen
265 * on other archs. It's unlikely on AArch64.
267 old = *parent;
269 if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
270 *parent = return_hooker;
273 #ifdef CONFIG_DYNAMIC_FTRACE
275 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
276 * depending on @enable.
278 static int ftrace_modify_graph_caller(bool enable)
280 unsigned long pc = (unsigned long)&ftrace_graph_call;
281 u32 branch, nop;
283 branch = aarch64_insn_gen_branch_imm(pc,
284 (unsigned long)ftrace_graph_caller,
285 AARCH64_INSN_BRANCH_NOLINK);
286 nop = aarch64_insn_gen_nop();
288 if (enable)
289 return ftrace_modify_code(pc, nop, branch, true);
290 else
291 return ftrace_modify_code(pc, branch, nop, true);
294 int ftrace_enable_ftrace_graph_caller(void)
296 return ftrace_modify_graph_caller(true);
299 int ftrace_disable_ftrace_graph_caller(void)
301 return ftrace_modify_graph_caller(false);
303 #endif /* CONFIG_DYNAMIC_FTRACE */
304 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */