Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / nds32 / kernel / ftrace.c
blob414f8a780cc385c853681dbad8a032f0820557e9
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ftrace.h>
4 #include <linux/uaccess.h>
5 #include <asm/cacheflush.h>
7 #ifndef CONFIG_DYNAMIC_FTRACE
8 extern void (*ftrace_trace_function)(unsigned long, unsigned long,
9 struct ftrace_ops*, struct pt_regs*);
10 extern void ftrace_graph_caller(void);
12 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
13 struct ftrace_ops *op, struct ftrace_regs *fregs)
15 __asm__ (""); /* avoid to optimize as pure function */
18 noinline void _mcount(unsigned long parent_ip)
20 /* save all state by the compiler prologue */
22 unsigned long ip = (unsigned long)__builtin_return_address(0);
24 if (ftrace_trace_function != ftrace_stub)
25 ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
26 NULL, NULL);
28 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
29 if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
30 || ftrace_graph_entry != ftrace_graph_entry_stub)
31 ftrace_graph_caller();
32 #endif
34 /* restore all state by the compiler epilogue */
36 EXPORT_SYMBOL(_mcount);
38 #else /* CONFIG_DYNAMIC_FTRACE */
40 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
41 struct ftrace_ops *op, struct ftrace_regs *fregs)
43 __asm__ (""); /* avoid to optimize as pure function */
46 noinline void __naked _mcount(unsigned long parent_ip)
48 __asm__ (""); /* avoid to optimize as pure function */
50 EXPORT_SYMBOL(_mcount);
52 #define XSTR(s) STR(s)
53 #define STR(s) #s
54 void _ftrace_caller(unsigned long parent_ip)
56 /* save all state needed by the compiler prologue */
59 * prepare arguments for real tracing function
60 * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
61 * second arg : parent_ip
63 __asm__ __volatile__ (
64 "move $r1, %0 \n\t"
65 "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
67 : "r" (parent_ip), "r" (__builtin_return_address(0)));
69 /* a placeholder for the call to a real tracing function */
70 __asm__ __volatile__ (
71 "ftrace_call: \n\t"
72 "nop \n\t"
73 "nop \n\t"
74 "nop \n\t");
76 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
77 /* a placeholder for the call to ftrace_graph_caller */
78 __asm__ __volatile__ (
79 "ftrace_graph_call: \n\t"
80 "nop \n\t"
81 "nop \n\t"
82 "nop \n\t");
83 #endif
84 /* restore all state needed by the compiler epilogue */
87 int __init ftrace_dyn_arch_init(void)
89 return 0;
92 static unsigned long gen_sethi_insn(unsigned long addr)
94 unsigned long opcode = 0x46000000;
95 unsigned long imm = addr >> 12;
96 unsigned long rt_num = 0xf << 20;
98 return ENDIAN_CONVERT(opcode | rt_num | imm);
101 static unsigned long gen_ori_insn(unsigned long addr)
103 unsigned long opcode = 0x58000000;
104 unsigned long imm = addr & 0x0000fff;
105 unsigned long rt_num = 0xf << 20;
106 unsigned long ra_num = 0xf << 15;
108 return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
111 static unsigned long gen_jral_insn(unsigned long addr)
113 unsigned long opcode = 0x4a000001;
114 unsigned long rt_num = 0x1e << 20;
115 unsigned long rb_num = 0xf << 10;
117 return ENDIAN_CONVERT(opcode | rt_num | rb_num);
120 static void ftrace_gen_call_insn(unsigned long *call_insns,
121 unsigned long addr)
123 call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */
124 call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */
125 call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */
128 static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
129 unsigned long *new_insn, bool validate)
131 unsigned long orig_insn[3];
133 if (validate) {
134 if (copy_from_kernel_nofault(orig_insn, (void *)pc,
135 MCOUNT_INSN_SIZE))
136 return -EFAULT;
137 if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
138 return -EINVAL;
141 if (copy_to_kernel_nofault((void *)pc, new_insn, MCOUNT_INSN_SIZE))
142 return -EPERM;
144 return 0;
147 static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
148 unsigned long *new_insn, bool validate)
150 int ret;
152 ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
153 if (ret)
154 return ret;
156 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
158 return ret;
161 int ftrace_update_ftrace_func(ftrace_func_t func)
163 unsigned long pc = (unsigned long)&ftrace_call;
164 unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
165 unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
167 if (func != ftrace_stub)
168 ftrace_gen_call_insn(new_insn, (unsigned long)func);
170 return ftrace_modify_code(pc, old_insn, new_insn, false);
173 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
175 unsigned long pc = rec->ip;
176 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
177 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
179 ftrace_gen_call_insn(call_insn, addr);
181 return ftrace_modify_code(pc, nop_insn, call_insn, true);
184 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
185 unsigned long addr)
187 unsigned long pc = rec->ip;
188 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
189 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
191 ftrace_gen_call_insn(call_insn, addr);
193 return ftrace_modify_code(pc, call_insn, nop_insn, true);
195 #endif /* CONFIG_DYNAMIC_FTRACE */
197 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
198 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
199 unsigned long frame_pointer)
201 unsigned long return_hooker = (unsigned long)&return_to_handler;
202 unsigned long old;
204 if (unlikely(atomic_read(&current->tracing_graph_pause)))
205 return;
207 old = *parent;
209 if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
210 *parent = return_hooker;
213 noinline void ftrace_graph_caller(void)
215 unsigned long *parent_ip =
216 (unsigned long *)(__builtin_frame_address(2) - 4);
218 unsigned long selfpc =
219 (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
221 unsigned long frame_pointer =
222 (unsigned long)__builtin_frame_address(3);
224 prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
227 extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
228 void __naked return_to_handler(void)
230 __asm__ __volatile__ (
231 /* save state needed by the ABI */
232 "smw.adm $r0,[$sp],$r1,#0x0 \n\t"
234 /* get original return address */
235 "move $r0, $fp \n\t"
236 "bal ftrace_return_to_handler\n\t"
237 "move $lp, $r0 \n\t"
239 /* restore state nedded by the ABI */
240 "lmw.bim $r0,[$sp],$r1,#0x0 \n\t");
243 #ifdef CONFIG_DYNAMIC_FTRACE
244 extern unsigned long ftrace_graph_call;
246 static int ftrace_modify_graph_caller(bool enable)
248 unsigned long pc = (unsigned long)&ftrace_graph_call;
249 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
250 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
252 ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
254 if (enable)
255 return ftrace_modify_code(pc, nop_insn, call_insn, true);
256 else
257 return ftrace_modify_code(pc, call_insn, nop_insn, true);
260 int ftrace_enable_ftrace_graph_caller(void)
262 return ftrace_modify_graph_caller(true);
265 int ftrace_disable_ftrace_graph_caller(void)
267 return ftrace_modify_graph_caller(false);
269 #endif /* CONFIG_DYNAMIC_FTRACE */
271 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
274 #ifdef CONFIG_TRACE_IRQFLAGS
275 noinline void __trace_hardirqs_off(void)
277 trace_hardirqs_off();
279 noinline void __trace_hardirqs_on(void)
281 trace_hardirqs_on();
283 #endif /* CONFIG_TRACE_IRQFLAGS */