2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
20 /* function tracing enabled */
21 static int ftrace_function_enabled
;
23 static struct trace_array
*func_trace
;
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
28 static int function_trace_init(struct trace_array
*tr
)
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
39 static void function_trace_reset(struct trace_array
*tr
)
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
45 static void function_trace_start(struct trace_array
*tr
)
47 tracing_reset_online_cpus(tr
);
51 function_trace_call_preempt_only(unsigned long ip
, unsigned long parent_ip
,
52 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
54 struct trace_array
*tr
= func_trace
;
55 struct trace_array_cpu
*data
;
61 if (unlikely(!ftrace_function_enabled
))
65 preempt_disable_notrace();
66 local_save_flags(flags
);
67 cpu
= raw_smp_processor_id();
69 disabled
= atomic_inc_return(&data
->disabled
);
71 if (likely(disabled
== 1))
72 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
74 atomic_dec(&data
->disabled
);
75 preempt_enable_notrace();
80 TRACE_FUNC_OPT_STACK
= 0x1,
83 static struct tracer_flags func_flags
;
86 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
87 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
90 struct trace_array
*tr
= func_trace
;
91 struct trace_array_cpu
*data
;
97 if (unlikely(!ftrace_function_enabled
))
101 * Need to use raw, since this must be called before the
102 * recursive protection is performed.
104 local_irq_save(flags
);
105 cpu
= raw_smp_processor_id();
106 data
= tr
->data
[cpu
];
107 disabled
= atomic_inc_return(&data
->disabled
);
109 if (likely(disabled
== 1)) {
110 pc
= preempt_count();
111 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
114 atomic_dec(&data
->disabled
);
115 local_irq_restore(flags
);
119 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
120 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
122 struct trace_array
*tr
= func_trace
;
123 struct trace_array_cpu
*data
;
129 if (unlikely(!ftrace_function_enabled
))
133 * Need to use raw, since this must be called before the
134 * recursive protection is performed.
136 local_irq_save(flags
);
137 cpu
= raw_smp_processor_id();
138 data
= tr
->data
[cpu
];
139 disabled
= atomic_inc_return(&data
->disabled
);
141 if (likely(disabled
== 1)) {
142 pc
= preempt_count();
143 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
146 * __ftrace_trace_stack,
148 * function_stack_trace_call
152 __trace_stack(tr
, flags
, 5, pc
);
155 atomic_dec(&data
->disabled
);
156 local_irq_restore(flags
);
160 static struct ftrace_ops trace_ops __read_mostly
=
162 .func
= function_trace_call
,
163 .flags
= FTRACE_OPS_FL_GLOBAL
| FTRACE_OPS_FL_RECURSION_SAFE
,
166 static struct ftrace_ops trace_stack_ops __read_mostly
=
168 .func
= function_stack_trace_call
,
169 .flags
= FTRACE_OPS_FL_GLOBAL
| FTRACE_OPS_FL_RECURSION_SAFE
,
172 static struct tracer_opt func_opts
[] = {
173 #ifdef CONFIG_STACKTRACE
174 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
176 { } /* Always set a last empty entry */
179 static struct tracer_flags func_flags
= {
180 .val
= 0, /* By default: all flags disabled */
184 static void tracing_start_function_trace(void)
186 ftrace_function_enabled
= 0;
188 if (trace_flags
& TRACE_ITER_PREEMPTONLY
)
189 trace_ops
.func
= function_trace_call_preempt_only
;
191 trace_ops
.func
= function_trace_call
;
193 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
194 register_ftrace_function(&trace_stack_ops
);
196 register_ftrace_function(&trace_ops
);
198 ftrace_function_enabled
= 1;
201 static void tracing_stop_function_trace(void)
203 ftrace_function_enabled
= 0;
205 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
206 unregister_ftrace_function(&trace_stack_ops
);
208 unregister_ftrace_function(&trace_ops
);
211 static int func_set_flag(u32 old_flags
, u32 bit
, int set
)
214 case TRACE_FUNC_OPT_STACK
:
215 /* do nothing if already set */
216 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
220 unregister_ftrace_function(&trace_ops
);
221 register_ftrace_function(&trace_stack_ops
);
223 unregister_ftrace_function(&trace_stack_ops
);
224 register_ftrace_function(&trace_ops
);
235 static struct tracer function_trace __read_mostly
=
238 .init
= function_trace_init
,
239 .reset
= function_trace_reset
,
240 .start
= function_trace_start
,
241 .wait_pipe
= poll_wait_pipe
,
242 .flags
= &func_flags
,
243 .set_flag
= func_set_flag
,
244 #ifdef CONFIG_FTRACE_SELFTEST
245 .selftest
= trace_selftest_startup_function
,
249 #ifdef CONFIG_DYNAMIC_FTRACE
251 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
, void **data
)
253 long *count
= (long *)data
;
268 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
, void **data
)
270 long *count
= (long *)data
;
272 if (!tracing_is_on())
285 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
286 struct ftrace_probe_ops
*ops
, void *data
);
288 static struct ftrace_probe_ops traceon_probe_ops
= {
289 .func
= ftrace_traceon
,
290 .print
= ftrace_trace_onoff_print
,
293 static struct ftrace_probe_ops traceoff_probe_ops
= {
294 .func
= ftrace_traceoff
,
295 .print
= ftrace_trace_onoff_print
,
299 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
300 struct ftrace_probe_ops
*ops
, void *data
)
302 long count
= (long)data
;
304 seq_printf(m
, "%ps:", (void *)ip
);
306 if (ops
== &traceon_probe_ops
)
307 seq_printf(m
, "traceon");
309 seq_printf(m
, "traceoff");
312 seq_printf(m
, ":unlimited\n");
314 seq_printf(m
, ":count=%ld\n", count
);
320 ftrace_trace_onoff_unreg(char *glob
, char *cmd
, char *param
)
322 struct ftrace_probe_ops
*ops
;
324 /* we register both traceon and traceoff to this callback */
325 if (strcmp(cmd
, "traceon") == 0)
326 ops
= &traceon_probe_ops
;
328 ops
= &traceoff_probe_ops
;
330 unregister_ftrace_function_probe_func(glob
, ops
);
336 ftrace_trace_onoff_callback(struct ftrace_hash
*hash
,
337 char *glob
, char *cmd
, char *param
, int enable
)
339 struct ftrace_probe_ops
*ops
;
340 void *count
= (void *)-1;
344 /* hash funcs only work with set_ftrace_filter */
349 return ftrace_trace_onoff_unreg(glob
+1, cmd
, param
);
351 /* we register both traceon and traceoff to this callback */
352 if (strcmp(cmd
, "traceon") == 0)
353 ops
= &traceon_probe_ops
;
355 ops
= &traceoff_probe_ops
;
360 number
= strsep(¶m
, ":");
366 * We use the callback data field (which is a pointer)
369 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
374 ret
= register_ftrace_function_probe(glob
, ops
, count
);
376 return ret
< 0 ? ret
: 0;
379 static struct ftrace_func_command ftrace_traceon_cmd
= {
381 .func
= ftrace_trace_onoff_callback
,
384 static struct ftrace_func_command ftrace_traceoff_cmd
= {
386 .func
= ftrace_trace_onoff_callback
,
389 static int __init
init_func_cmd_traceon(void)
393 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
397 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
399 unregister_ftrace_command(&ftrace_traceoff_cmd
);
403 static inline int init_func_cmd_traceon(void)
407 #endif /* CONFIG_DYNAMIC_FTRACE */
409 static __init
int init_function_trace(void)
411 init_func_cmd_traceon();
412 return register_tracer(&function_trace
);
414 core_initcall(init_function_trace
);