Linux 3.7.1
[linux/fpc-iii.git] / kernel / trace / trace_functions.c
blob507a7a9630bf06a8f69c5e4dbe1224716574acd3
1 /*
2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
18 #include "trace.h"
20 /* function tracing enabled */
21 static int ftrace_function_enabled;
23 static struct trace_array *func_trace;
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
28 static int function_trace_init(struct trace_array *tr)
30 func_trace = tr;
31 tr->cpu = get_cpu();
32 put_cpu();
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
36 return 0;
39 static void function_trace_reset(struct trace_array *tr)
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
45 static void function_trace_start(struct trace_array *tr)
47 tracing_reset_online_cpus(tr);
50 static void
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
52 struct ftrace_ops *op, struct pt_regs *pt_regs)
54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data;
56 unsigned long flags;
57 long disabled;
58 int cpu;
59 int pc;
61 if (unlikely(!ftrace_function_enabled))
62 return;
64 pc = preempt_count();
65 preempt_disable_notrace();
66 local_save_flags(flags);
67 cpu = raw_smp_processor_id();
68 data = tr->data[cpu];
69 disabled = atomic_inc_return(&data->disabled);
71 if (likely(disabled == 1))
72 trace_function(tr, ip, parent_ip, flags, pc);
74 atomic_dec(&data->disabled);
75 preempt_enable_notrace();
78 /* Our option */
79 enum {
80 TRACE_FUNC_OPT_STACK = 0x1,
83 static struct tracer_flags func_flags;
85 static void
86 function_trace_call(unsigned long ip, unsigned long parent_ip,
87 struct ftrace_ops *op, struct pt_regs *pt_regs)
90 struct trace_array *tr = func_trace;
91 struct trace_array_cpu *data;
92 unsigned long flags;
93 long disabled;
94 int cpu;
95 int pc;
97 if (unlikely(!ftrace_function_enabled))
98 return;
101 * Need to use raw, since this must be called before the
102 * recursive protection is performed.
104 local_irq_save(flags);
105 cpu = raw_smp_processor_id();
106 data = tr->data[cpu];
107 disabled = atomic_inc_return(&data->disabled);
109 if (likely(disabled == 1)) {
110 pc = preempt_count();
111 trace_function(tr, ip, parent_ip, flags, pc);
114 atomic_dec(&data->disabled);
115 local_irq_restore(flags);
118 static void
119 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
120 struct ftrace_ops *op, struct pt_regs *pt_regs)
122 struct trace_array *tr = func_trace;
123 struct trace_array_cpu *data;
124 unsigned long flags;
125 long disabled;
126 int cpu;
127 int pc;
129 if (unlikely(!ftrace_function_enabled))
130 return;
133 * Need to use raw, since this must be called before the
134 * recursive protection is performed.
136 local_irq_save(flags);
137 cpu = raw_smp_processor_id();
138 data = tr->data[cpu];
139 disabled = atomic_inc_return(&data->disabled);
141 if (likely(disabled == 1)) {
142 pc = preempt_count();
143 trace_function(tr, ip, parent_ip, flags, pc);
145 * skip over 5 funcs:
146 * __ftrace_trace_stack,
147 * __trace_stack,
148 * function_stack_trace_call
149 * ftrace_list_func
150 * ftrace_call
152 __trace_stack(tr, flags, 5, pc);
155 atomic_dec(&data->disabled);
156 local_irq_restore(flags);
160 static struct ftrace_ops trace_ops __read_mostly =
162 .func = function_trace_call,
163 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
166 static struct ftrace_ops trace_stack_ops __read_mostly =
168 .func = function_stack_trace_call,
169 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
172 static struct tracer_opt func_opts[] = {
173 #ifdef CONFIG_STACKTRACE
174 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
175 #endif
176 { } /* Always set a last empty entry */
179 static struct tracer_flags func_flags = {
180 .val = 0, /* By default: all flags disabled */
181 .opts = func_opts
184 static void tracing_start_function_trace(void)
186 ftrace_function_enabled = 0;
188 if (trace_flags & TRACE_ITER_PREEMPTONLY)
189 trace_ops.func = function_trace_call_preempt_only;
190 else
191 trace_ops.func = function_trace_call;
193 if (func_flags.val & TRACE_FUNC_OPT_STACK)
194 register_ftrace_function(&trace_stack_ops);
195 else
196 register_ftrace_function(&trace_ops);
198 ftrace_function_enabled = 1;
201 static void tracing_stop_function_trace(void)
203 ftrace_function_enabled = 0;
205 if (func_flags.val & TRACE_FUNC_OPT_STACK)
206 unregister_ftrace_function(&trace_stack_ops);
207 else
208 unregister_ftrace_function(&trace_ops);
211 static int func_set_flag(u32 old_flags, u32 bit, int set)
213 switch (bit) {
214 case TRACE_FUNC_OPT_STACK:
215 /* do nothing if already set */
216 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
217 break;
219 if (set) {
220 unregister_ftrace_function(&trace_ops);
221 register_ftrace_function(&trace_stack_ops);
222 } else {
223 unregister_ftrace_function(&trace_stack_ops);
224 register_ftrace_function(&trace_ops);
227 break;
228 default:
229 return -EINVAL;
232 return 0;
235 static struct tracer function_trace __read_mostly =
237 .name = "function",
238 .init = function_trace_init,
239 .reset = function_trace_reset,
240 .start = function_trace_start,
241 .wait_pipe = poll_wait_pipe,
242 .flags = &func_flags,
243 .set_flag = func_set_flag,
244 #ifdef CONFIG_FTRACE_SELFTEST
245 .selftest = trace_selftest_startup_function,
246 #endif
249 #ifdef CONFIG_DYNAMIC_FTRACE
250 static void
251 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
253 long *count = (long *)data;
255 if (tracing_is_on())
256 return;
258 if (!*count)
259 return;
261 if (*count != -1)
262 (*count)--;
264 tracing_on();
267 static void
268 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
270 long *count = (long *)data;
272 if (!tracing_is_on())
273 return;
275 if (!*count)
276 return;
278 if (*count != -1)
279 (*count)--;
281 tracing_off();
284 static int
285 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
286 struct ftrace_probe_ops *ops, void *data);
288 static struct ftrace_probe_ops traceon_probe_ops = {
289 .func = ftrace_traceon,
290 .print = ftrace_trace_onoff_print,
293 static struct ftrace_probe_ops traceoff_probe_ops = {
294 .func = ftrace_traceoff,
295 .print = ftrace_trace_onoff_print,
298 static int
299 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
300 struct ftrace_probe_ops *ops, void *data)
302 long count = (long)data;
304 seq_printf(m, "%ps:", (void *)ip);
306 if (ops == &traceon_probe_ops)
307 seq_printf(m, "traceon");
308 else
309 seq_printf(m, "traceoff");
311 if (count == -1)
312 seq_printf(m, ":unlimited\n");
313 else
314 seq_printf(m, ":count=%ld\n", count);
316 return 0;
319 static int
320 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
322 struct ftrace_probe_ops *ops;
324 /* we register both traceon and traceoff to this callback */
325 if (strcmp(cmd, "traceon") == 0)
326 ops = &traceon_probe_ops;
327 else
328 ops = &traceoff_probe_ops;
330 unregister_ftrace_function_probe_func(glob, ops);
332 return 0;
335 static int
336 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
337 char *glob, char *cmd, char *param, int enable)
339 struct ftrace_probe_ops *ops;
340 void *count = (void *)-1;
341 char *number;
342 int ret;
344 /* hash funcs only work with set_ftrace_filter */
345 if (!enable)
346 return -EINVAL;
348 if (glob[0] == '!')
349 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
351 /* we register both traceon and traceoff to this callback */
352 if (strcmp(cmd, "traceon") == 0)
353 ops = &traceon_probe_ops;
354 else
355 ops = &traceoff_probe_ops;
357 if (!param)
358 goto out_reg;
360 number = strsep(&param, ":");
362 if (!strlen(number))
363 goto out_reg;
366 * We use the callback data field (which is a pointer)
367 * as our counter.
369 ret = strict_strtoul(number, 0, (unsigned long *)&count);
370 if (ret)
371 return ret;
373 out_reg:
374 ret = register_ftrace_function_probe(glob, ops, count);
376 return ret < 0 ? ret : 0;
379 static struct ftrace_func_command ftrace_traceon_cmd = {
380 .name = "traceon",
381 .func = ftrace_trace_onoff_callback,
384 static struct ftrace_func_command ftrace_traceoff_cmd = {
385 .name = "traceoff",
386 .func = ftrace_trace_onoff_callback,
389 static int __init init_func_cmd_traceon(void)
391 int ret;
393 ret = register_ftrace_command(&ftrace_traceoff_cmd);
394 if (ret)
395 return ret;
397 ret = register_ftrace_command(&ftrace_traceon_cmd);
398 if (ret)
399 unregister_ftrace_command(&ftrace_traceoff_cmd);
400 return ret;
402 #else
403 static inline int init_func_cmd_traceon(void)
405 return 0;
407 #endif /* CONFIG_DYNAMIC_FTRACE */
409 static __init int init_function_trace(void)
411 init_func_cmd_traceon();
412 return register_tracer(&function_trace);
414 device_initcall(init_function_trace);