1 // SPDX-License-Identifier: GPL-2.0
3 * Code for tracing calls in Linux kernel.
4 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
6 * based on code for x86 which is:
7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
9 * future possible enhancements:
10 * - add CONFIG_STACK_TRACER
13 #include <linux/init.h>
14 #include <linux/ftrace.h>
15 #include <linux/uaccess.h>
16 #include <linux/kprobes.h>
17 #include <linux/ptrace.h>
18 #include <linux/jump_label.h>
20 #include <asm/assembly.h>
21 #include <asm/sections.h>
22 #include <asm/ftrace.h>
23 #include <asm/patch.h>
25 #define __hot __section(".text.hot")
27 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
28 static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable
);
31 * Hook the return address and push it in the stack of return addrs
32 * in current thread info.
34 static void __hot
prepare_ftrace_return(unsigned long *parent
,
35 unsigned long self_addr
)
38 extern int parisc_return_to_handler
;
40 if (unlikely(ftrace_graph_is_dead()))
43 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
48 if (!function_graph_enter(old
, self_addr
, 0, NULL
))
49 /* activate parisc_return_to_handler() as return point */
50 *parent
= (unsigned long) &parisc_return_to_handler
;
52 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
54 static ftrace_func_t ftrace_func
;
56 asmlinkage
void notrace __hot
ftrace_function_trampoline(unsigned long parent
,
57 unsigned long self_addr
,
58 unsigned long org_sp_gr3
,
59 struct ftrace_regs
*fregs
)
61 extern struct ftrace_ops
*function_trace_op
;
63 ftrace_func(self_addr
, parent
, function_trace_op
, fregs
);
65 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
66 if (static_branch_unlikely(&ftrace_graph_enable
)) {
67 unsigned long *parent_rp
;
69 /* calculate pointer to %rp in stack */
70 parent_rp
= (unsigned long *) (org_sp_gr3
- RP_OFFSET
);
71 /* sanity check: parent_rp should hold parent */
72 if (*parent_rp
!= parent
)
75 prepare_ftrace_return(parent_rp
, self_addr
);
81 #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
82 int ftrace_enable_ftrace_graph_caller(void)
84 static_key_enable(&ftrace_graph_enable
.key
);
88 int ftrace_disable_ftrace_graph_caller(void)
90 static_key_enable(&ftrace_graph_enable
.key
);
95 #ifdef CONFIG_DYNAMIC_FTRACE
96 int ftrace_update_ftrace_func(ftrace_func_t func
)
102 int ftrace_modify_call(struct dyn_ftrace
*rec
, unsigned long old_addr
,
108 unsigned long ftrace_call_adjust(unsigned long addr
)
110 return addr
+(FTRACE_PATCHABLE_FUNCTION_SIZE
-1)*4;
113 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
115 u32 insn
[FTRACE_PATCHABLE_FUNCTION_SIZE
];
121 unsigned long addr2
=
122 (unsigned long)dereference_function_descriptor((void *)addr
);
124 u32 ftrace_trampoline
[] = {
125 0x73c10208, /* std,ma r1,100(sp) */
126 0x0c2110c1, /* ldd -10(r1),r1 */
127 0xe820d002, /* bve,n (r1) */
130 0xe83f1fd7, /* b,l,n .-14,r1 */
133 u32 ftrace_trampoline_unaligned
[] = {
136 0x37de0200, /* ldo 100(sp),sp */
137 0x73c13e01, /* std r1,-100(sp) */
138 0x34213ff9, /* ldo -4(r1),r1 */
139 0x50213fc1, /* ldd -20(r1),r1 */
140 0xe820d002, /* bve,n (r1) */
141 0xe83f1fcf, /* b,l,n .-20,r1 */
144 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned
) >
145 FTRACE_PATCHABLE_FUNCTION_SIZE
);
147 u32 ftrace_trampoline
[] = {
149 0x6fc10080, /* stw,ma r1,40(sp) */
150 0x48213fd1, /* ldw -18(r1),r1 */
151 0xe820c002, /* bv,n r0(r1) */
152 0xe83f1fdf, /* b,l,n .-c,r1 */
156 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline
) >
157 FTRACE_PATCHABLE_FUNCTION_SIZE
);
159 size
= sizeof(ftrace_trampoline
);
160 tramp
= ftrace_trampoline
;
164 size
= sizeof(ftrace_trampoline_unaligned
);
165 tramp
= ftrace_trampoline_unaligned
;
169 ip
= (void *)(rec
->ip
+ 4 - size
);
171 ret
= copy_from_kernel_nofault(insn
, ip
, size
);
175 for (i
= 0; i
< size
/ 4; i
++) {
176 if (insn
[i
] != INSN_NOP
)
180 __patch_text_multiple(ip
, tramp
, size
);
184 int ftrace_make_nop(struct module
*mod
, struct dyn_ftrace
*rec
,
187 u32 insn
[FTRACE_PATCHABLE_FUNCTION_SIZE
];
190 for (i
= 0; i
< ARRAY_SIZE(insn
); i
++)
193 __patch_text((void *)rec
->ip
, INSN_NOP
);
194 __patch_text_multiple((void *)rec
->ip
+ 4 - sizeof(insn
),
195 insn
, sizeof(insn
)-4);
200 #ifdef CONFIG_KPROBES_ON_FTRACE
201 void kprobe_ftrace_handler(unsigned long ip
, unsigned long parent_ip
,
202 struct ftrace_ops
*ops
, struct ftrace_regs
*fregs
)
204 struct kprobe_ctlblk
*kcb
;
205 struct pt_regs
*regs
;
209 if (unlikely(kprobe_ftrace_disabled
))
212 bit
= ftrace_test_recursion_trylock(ip
, parent_ip
);
216 regs
= ftrace_get_regs(fregs
);
217 p
= get_kprobe((kprobe_opcode_t
*)ip
);
218 if (unlikely(!p
) || kprobe_disabled(p
))
221 if (kprobe_running()) {
222 kprobes_inc_nmissed_count(p
);
226 __this_cpu_write(current_kprobe
, p
);
228 kcb
= get_kprobe_ctlblk();
229 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
232 regs
->iaoq
[1] = ip
+ 4;
234 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
)) {
235 regs
->iaoq
[0] = ip
+ 4;
236 regs
->iaoq
[1] = ip
+ 8;
238 if (unlikely(p
->post_handler
)) {
239 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
240 p
->post_handler(p
, regs
, 0);
243 __this_cpu_write(current_kprobe
, NULL
);
245 ftrace_test_recursion_unlock(bit
);
247 NOKPROBE_SYMBOL(kprobe_ftrace_handler
);
249 int arch_prepare_kprobe_ftrace(struct kprobe
*p
)
251 p
->ainsn
.insn
= NULL
;