debugfs: Modified default dir of debugfs for debugging UHCI.
[linux/fpc-iii.git] / arch / s390 / kernel / ftrace.c
blob57bdcb1e3cdf0391928565dc26118e5272b3a21b
1 /*
2 * Dynamic function tracer architecture backend.
4 * Copyright IBM Corp. 2009
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 */
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <trace/syscall.h>
16 #include <asm/lowcore.h>
18 #ifdef CONFIG_DYNAMIC_FTRACE
20 void ftrace_disable_code(void);
21 void ftrace_disable_return(void);
22 void ftrace_call_code(void);
23 void ftrace_nop_code(void);
25 #define FTRACE_INSN_SIZE 4
27 #ifdef CONFIG_64BIT
29 asm(
30 " .align 4\n"
31 "ftrace_disable_code:\n"
32 " j 0f\n"
33 " .word 0x0024\n"
34 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
35 " basr %r14,%r1\n"
36 "ftrace_disable_return:\n"
37 " lg %r14,8(15)\n"
38 " lgr %r0,%r0\n"
39 "0:\n");
41 asm(
42 " .align 4\n"
43 "ftrace_nop_code:\n"
44 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
46 asm(
47 " .align 4\n"
48 "ftrace_call_code:\n"
49 " stg %r14,8(%r15)\n");
51 #else /* CONFIG_64BIT */
53 asm(
54 " .align 4\n"
55 "ftrace_disable_code:\n"
56 " j 0f\n"
57 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
58 " basr %r14,%r1\n"
59 "ftrace_disable_return:\n"
60 " l %r14,4(%r15)\n"
61 " j 0f\n"
62 " bcr 0,%r7\n"
63 " bcr 0,%r7\n"
64 " bcr 0,%r7\n"
65 " bcr 0,%r7\n"
66 " bcr 0,%r7\n"
67 " bcr 0,%r7\n"
68 "0:\n");
70 asm(
71 " .align 4\n"
72 "ftrace_nop_code:\n"
73 " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
75 asm(
76 " .align 4\n"
77 "ftrace_call_code:\n"
78 " st %r14,4(%r15)\n");
80 #endif /* CONFIG_64BIT */
82 static int ftrace_modify_code(unsigned long ip,
83 void *old_code, int old_size,
84 void *new_code, int new_size)
86 unsigned char replaced[MCOUNT_INSN_SIZE];
89 * Note: Due to modules code can disappear and change.
90 * We need to protect against faulting as well as code
91 * changing. We do this by using the probe_kernel_*
92 * functions.
93 * This however is just a simple sanity check.
95 if (probe_kernel_read(replaced, (void *)ip, old_size))
96 return -EFAULT;
97 if (memcmp(replaced, old_code, old_size) != 0)
98 return -EINVAL;
99 if (probe_kernel_write((void *)ip, new_code, new_size))
100 return -EPERM;
101 return 0;
104 static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
105 unsigned long addr)
107 return ftrace_modify_code(rec->ip,
108 ftrace_call_code, FTRACE_INSN_SIZE,
109 ftrace_disable_code, MCOUNT_INSN_SIZE);
112 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
113 unsigned long addr)
115 if (addr == MCOUNT_ADDR)
116 return ftrace_make_initial_nop(mod, rec, addr);
117 return ftrace_modify_code(rec->ip,
118 ftrace_call_code, FTRACE_INSN_SIZE,
119 ftrace_nop_code, FTRACE_INSN_SIZE);
122 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
124 return ftrace_modify_code(rec->ip,
125 ftrace_nop_code, FTRACE_INSN_SIZE,
126 ftrace_call_code, FTRACE_INSN_SIZE);
129 int ftrace_update_ftrace_func(ftrace_func_t func)
131 ftrace_dyn_func = (unsigned long)func;
132 return 0;
135 int __init ftrace_dyn_arch_init(void *data)
137 *(unsigned long *)data = 0;
138 return 0;
141 #endif /* CONFIG_DYNAMIC_FTRACE */
143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
144 #ifdef CONFIG_DYNAMIC_FTRACE
146 * Patch the kernel code at ftrace_graph_caller location:
147 * The instruction there is branch relative on condition. The condition mask
148 * is either all ones (always branch aka disable ftrace_graph_caller) or all
149 * zeroes (nop aka enable ftrace_graph_caller).
150 * Instruction format for brc is a7m4xxxx where m is the condition mask.
152 int ftrace_enable_ftrace_graph_caller(void)
154 unsigned short opcode = 0xa704;
156 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
159 int ftrace_disable_ftrace_graph_caller(void)
161 unsigned short opcode = 0xa7f4;
163 return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
166 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
168 return addr - (ftrace_disable_return - ftrace_disable_code);
171 #else /* CONFIG_DYNAMIC_FTRACE */
173 static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
175 return addr - MCOUNT_OFFSET_RET;
178 #endif /* CONFIG_DYNAMIC_FTRACE */
181 * Hook the return address and push it in the stack of return addresses
182 * in current thread info.
184 unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
186 struct ftrace_graph_ent trace;
188 /* Nmi's are currently unsupported. */
189 if (unlikely(in_nmi()))
190 goto out;
191 if (unlikely(atomic_read(&current->tracing_graph_pause)))
192 goto out;
193 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
194 goto out;
195 trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
196 /* Only trace if the calling function expects to. */
197 if (!ftrace_graph_entry(&trace)) {
198 current->curr_ret_stack--;
199 goto out;
201 parent = (unsigned long)return_to_handler;
202 out:
203 return parent;
205 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
207 #ifdef CONFIG_FTRACE_SYSCALLS
209 extern unsigned long __start_syscalls_metadata[];
210 extern unsigned long __stop_syscalls_metadata[];
211 extern unsigned int sys_call_table[];
213 static struct syscall_metadata **syscalls_metadata;
215 struct syscall_metadata *syscall_nr_to_meta(int nr)
217 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
218 return NULL;
220 return syscalls_metadata[nr];
223 int syscall_name_to_nr(char *name)
225 int i;
227 if (!syscalls_metadata)
228 return -1;
229 for (i = 0; i < NR_syscalls; i++)
230 if (syscalls_metadata[i])
231 if (!strcmp(syscalls_metadata[i]->name, name))
232 return i;
233 return -1;
236 void set_syscall_enter_id(int num, int id)
238 syscalls_metadata[num]->enter_id = id;
241 void set_syscall_exit_id(int num, int id)
243 syscalls_metadata[num]->exit_id = id;
246 static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
248 struct syscall_metadata *start;
249 struct syscall_metadata *stop;
250 char str[KSYM_SYMBOL_LEN];
252 start = (struct syscall_metadata *)__start_syscalls_metadata;
253 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
254 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
256 for ( ; start < stop; start++) {
257 if (start->name && !strcmp(start->name + 3, str + 3))
258 return start;
260 return NULL;
263 static int __init arch_init_ftrace_syscalls(void)
265 struct syscall_metadata *meta;
266 int i;
267 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
268 GFP_KERNEL);
269 if (!syscalls_metadata)
270 return -ENOMEM;
271 for (i = 0; i < NR_syscalls; i++) {
272 meta = find_syscall_meta((unsigned long)sys_call_table[i]);
273 syscalls_metadata[i] = meta;
275 return 0;
277 arch_initcall(arch_init_ftrace_syscalls);
278 #endif