2 * Dynamic function tracer architecture backend.
4 * Copyright IBM Corp. 2009
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <trace/syscall.h>
16 #include <asm/lowcore.h>
18 #ifdef CONFIG_DYNAMIC_FTRACE
20 void ftrace_disable_code(void);
21 void ftrace_disable_return(void);
22 void ftrace_call_code(void);
23 void ftrace_nop_code(void);
25 #define FTRACE_INSN_SIZE 4
31 "ftrace_disable_code:\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC
)"\n"
36 "ftrace_disable_return:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE
)"\n");
49 " stg %r14,8(%r15)\n");
51 #else /* CONFIG_64BIT */
55 "ftrace_disable_code:\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC
)"\n"
59 "ftrace_disable_return:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE
)"\n");
78 " st %r14,4(%r15)\n");
80 #endif /* CONFIG_64BIT */
82 static int ftrace_modify_code(unsigned long ip
,
83 void *old_code
, int old_size
,
84 void *new_code
, int new_size
)
86 unsigned char replaced
[MCOUNT_INSN_SIZE
];
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
93 * This however is just a simple sanity check.
95 if (probe_kernel_read(replaced
, (void *)ip
, old_size
))
97 if (memcmp(replaced
, old_code
, old_size
) != 0)
99 if (probe_kernel_write((void *)ip
, new_code
, new_size
))
104 static int ftrace_make_initial_nop(struct module
*mod
, struct dyn_ftrace
*rec
,
107 return ftrace_modify_code(rec
->ip
,
108 ftrace_call_code
, FTRACE_INSN_SIZE
,
109 ftrace_disable_code
, MCOUNT_INSN_SIZE
);
112 int ftrace_make_nop(struct module
*mod
, struct dyn_ftrace
*rec
,
115 if (addr
== MCOUNT_ADDR
)
116 return ftrace_make_initial_nop(mod
, rec
, addr
);
117 return ftrace_modify_code(rec
->ip
,
118 ftrace_call_code
, FTRACE_INSN_SIZE
,
119 ftrace_nop_code
, FTRACE_INSN_SIZE
);
122 int ftrace_make_call(struct dyn_ftrace
*rec
, unsigned long addr
)
124 return ftrace_modify_code(rec
->ip
,
125 ftrace_nop_code
, FTRACE_INSN_SIZE
,
126 ftrace_call_code
, FTRACE_INSN_SIZE
);
129 int ftrace_update_ftrace_func(ftrace_func_t func
)
131 ftrace_dyn_func
= (unsigned long)func
;
135 int __init
ftrace_dyn_arch_init(void *data
)
137 *(unsigned long *)data
= 0;
141 #endif /* CONFIG_DYNAMIC_FTRACE */
143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
144 #ifdef CONFIG_DYNAMIC_FTRACE
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
152 int ftrace_enable_ftrace_graph_caller(void)
154 unsigned short opcode
= 0xa704;
156 return probe_kernel_write(ftrace_graph_caller
, &opcode
, sizeof(opcode
));
159 int ftrace_disable_ftrace_graph_caller(void)
161 unsigned short opcode
= 0xa7f4;
163 return probe_kernel_write(ftrace_graph_caller
, &opcode
, sizeof(opcode
));
166 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr
)
168 return addr
- (ftrace_disable_return
- ftrace_disable_code
);
171 #else /* CONFIG_DYNAMIC_FTRACE */
173 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr
)
175 return addr
- MCOUNT_OFFSET_RET
;
178 #endif /* CONFIG_DYNAMIC_FTRACE */
181 * Hook the return address and push it in the stack of return addresses
182 * in current thread info.
184 unsigned long prepare_ftrace_return(unsigned long ip
, unsigned long parent
)
186 struct ftrace_graph_ent trace
;
188 if (unlikely(atomic_read(¤t
->tracing_graph_pause
)))
190 if (ftrace_push_return_trace(parent
, ip
, &trace
.depth
, 0) == -EBUSY
)
192 trace
.func
= ftrace_mcount_call_adjust(ip
) & PSW_ADDR_INSN
;
193 /* Only trace if the calling function expects to. */
194 if (!ftrace_graph_entry(&trace
)) {
195 current
->curr_ret_stack
--;
198 parent
= (unsigned long)return_to_handler
;
202 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
204 #ifdef CONFIG_FTRACE_SYSCALLS
206 extern unsigned long __start_syscalls_metadata
[];
207 extern unsigned long __stop_syscalls_metadata
[];
208 extern unsigned int sys_call_table
[];
210 static struct syscall_metadata
**syscalls_metadata
;
212 struct syscall_metadata
*syscall_nr_to_meta(int nr
)
214 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
217 return syscalls_metadata
[nr
];
220 int syscall_name_to_nr(char *name
)
224 if (!syscalls_metadata
)
226 for (i
= 0; i
< NR_syscalls
; i
++)
227 if (syscalls_metadata
[i
])
228 if (!strcmp(syscalls_metadata
[i
]->name
, name
))
233 void set_syscall_enter_id(int num
, int id
)
235 syscalls_metadata
[num
]->enter_id
= id
;
238 void set_syscall_exit_id(int num
, int id
)
240 syscalls_metadata
[num
]->exit_id
= id
;
243 static struct syscall_metadata
*find_syscall_meta(unsigned long syscall
)
245 struct syscall_metadata
*start
;
246 struct syscall_metadata
*stop
;
247 char str
[KSYM_SYMBOL_LEN
];
249 start
= (struct syscall_metadata
*)__start_syscalls_metadata
;
250 stop
= (struct syscall_metadata
*)__stop_syscalls_metadata
;
251 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
253 for ( ; start
< stop
; start
++) {
254 if (start
->name
&& !strcmp(start
->name
+ 3, str
+ 3))
260 static int __init
arch_init_ftrace_syscalls(void)
262 struct syscall_metadata
*meta
;
264 syscalls_metadata
= kzalloc(sizeof(*syscalls_metadata
) * NR_syscalls
,
266 if (!syscalls_metadata
)
268 for (i
= 0; i
< NR_syscalls
; i
++) {
269 meta
= find_syscall_meta((unsigned long)sys_call_table
[i
]);
270 syscalls_metadata
[i
] = meta
;
274 arch_initcall(arch_init_ftrace_syscalls
);