2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/pstore.h>
21 /* function tracing enabled */
22 static int ftrace_function_enabled
;
24 static struct trace_array
*func_trace
;
26 static void tracing_start_function_trace(void);
27 static void tracing_stop_function_trace(void);
29 static int function_trace_init(struct trace_array
*tr
)
35 tracing_start_cmdline_record();
36 tracing_start_function_trace();
40 static void function_trace_reset(struct trace_array
*tr
)
42 tracing_stop_function_trace();
43 tracing_stop_cmdline_record();
46 static void function_trace_start(struct trace_array
*tr
)
48 tracing_reset_online_cpus(tr
);
52 function_trace_call_preempt_only(unsigned long ip
, unsigned long parent_ip
)
54 struct trace_array
*tr
= func_trace
;
55 struct trace_array_cpu
*data
;
61 if (unlikely(!ftrace_function_enabled
))
65 preempt_disable_notrace();
66 local_save_flags(flags
);
67 cpu
= raw_smp_processor_id();
69 disabled
= atomic_inc_return(&data
->disabled
);
71 if (likely(disabled
== 1))
72 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
74 atomic_dec(&data
->disabled
);
75 preempt_enable_notrace();
80 TRACE_FUNC_OPT_STACK
= 0x1,
81 TRACE_FUNC_OPT_PSTORE
= 0x2,
84 static struct tracer_flags func_flags
;
87 function_trace_call(unsigned long ip
, unsigned long parent_ip
)
89 struct trace_array
*tr
= func_trace
;
90 struct trace_array_cpu
*data
;
96 if (unlikely(!ftrace_function_enabled
))
100 * Need to use raw, since this must be called before the
101 * recursive protection is performed.
103 local_irq_save(flags
);
104 cpu
= raw_smp_processor_id();
105 data
= tr
->data
[cpu
];
106 disabled
= atomic_inc_return(&data
->disabled
);
108 if (likely(disabled
== 1)) {
110 * So far tracing doesn't support multiple buffers, so
111 * we make an explicit call for now.
113 if (unlikely(func_flags
.val
& TRACE_FUNC_OPT_PSTORE
))
114 pstore_ftrace_call(ip
, parent_ip
);
115 pc
= preempt_count();
116 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
119 atomic_dec(&data
->disabled
);
120 local_irq_restore(flags
);
124 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
)
126 struct trace_array
*tr
= func_trace
;
127 struct trace_array_cpu
*data
;
133 if (unlikely(!ftrace_function_enabled
))
137 * Need to use raw, since this must be called before the
138 * recursive protection is performed.
140 local_irq_save(flags
);
141 cpu
= raw_smp_processor_id();
142 data
= tr
->data
[cpu
];
143 disabled
= atomic_inc_return(&data
->disabled
);
145 if (likely(disabled
== 1)) {
146 pc
= preempt_count();
147 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
150 * __ftrace_trace_stack,
152 * function_stack_trace_call
156 __trace_stack(tr
, flags
, 5, pc
);
159 atomic_dec(&data
->disabled
);
160 local_irq_restore(flags
);
164 static struct ftrace_ops trace_ops __read_mostly
=
166 .func
= function_trace_call
,
167 .flags
= FTRACE_OPS_FL_GLOBAL
,
170 static struct ftrace_ops trace_stack_ops __read_mostly
=
172 .func
= function_stack_trace_call
,
173 .flags
= FTRACE_OPS_FL_GLOBAL
,
176 static struct tracer_opt func_opts
[] = {
177 #ifdef CONFIG_STACKTRACE
178 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
180 #ifdef CONFIG_PSTORE_FTRACE
181 { TRACER_OPT(func_pstore
, TRACE_FUNC_OPT_PSTORE
) },
183 { } /* Always set a last empty entry */
186 static struct tracer_flags func_flags
= {
187 .val
= 0, /* By default: all flags disabled */
191 static void tracing_start_function_trace(void)
193 ftrace_function_enabled
= 0;
195 if (trace_flags
& TRACE_ITER_PREEMPTONLY
)
196 trace_ops
.func
= function_trace_call_preempt_only
;
198 trace_ops
.func
= function_trace_call
;
200 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
201 register_ftrace_function(&trace_stack_ops
);
203 register_ftrace_function(&trace_ops
);
205 ftrace_function_enabled
= 1;
208 static void tracing_stop_function_trace(void)
210 ftrace_function_enabled
= 0;
212 if (func_flags
.val
& TRACE_FUNC_OPT_STACK
)
213 unregister_ftrace_function(&trace_stack_ops
);
215 unregister_ftrace_function(&trace_ops
);
218 static int func_set_flag(u32 old_flags
, u32 bit
, int set
)
221 case TRACE_FUNC_OPT_STACK
:
222 /* do nothing if already set */
223 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
227 unregister_ftrace_function(&trace_ops
);
228 register_ftrace_function(&trace_stack_ops
);
230 unregister_ftrace_function(&trace_stack_ops
);
231 register_ftrace_function(&trace_ops
);
235 case TRACE_FUNC_OPT_PSTORE
:
244 static struct tracer function_trace __read_mostly
=
247 .init
= function_trace_init
,
248 .reset
= function_trace_reset
,
249 .start
= function_trace_start
,
250 .wait_pipe
= poll_wait_pipe
,
251 .flags
= &func_flags
,
252 .set_flag
= func_set_flag
,
253 #ifdef CONFIG_FTRACE_SELFTEST
254 .selftest
= trace_selftest_startup_function
,
258 #ifdef CONFIG_DYNAMIC_FTRACE
260 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
, void **data
)
262 long *count
= (long *)data
;
277 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
, void **data
)
279 long *count
= (long *)data
;
281 if (!tracing_is_on())
294 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
295 struct ftrace_probe_ops
*ops
, void *data
);
297 static struct ftrace_probe_ops traceon_probe_ops
= {
298 .func
= ftrace_traceon
,
299 .print
= ftrace_trace_onoff_print
,
302 static struct ftrace_probe_ops traceoff_probe_ops
= {
303 .func
= ftrace_traceoff
,
304 .print
= ftrace_trace_onoff_print
,
308 ftrace_trace_onoff_print(struct seq_file
*m
, unsigned long ip
,
309 struct ftrace_probe_ops
*ops
, void *data
)
311 long count
= (long)data
;
313 seq_printf(m
, "%ps:", (void *)ip
);
315 if (ops
== &traceon_probe_ops
)
316 seq_printf(m
, "traceon");
318 seq_printf(m
, "traceoff");
321 seq_printf(m
, ":unlimited\n");
323 seq_printf(m
, ":count=%ld\n", count
);
329 ftrace_trace_onoff_unreg(char *glob
, char *cmd
, char *param
)
331 struct ftrace_probe_ops
*ops
;
333 /* we register both traceon and traceoff to this callback */
334 if (strcmp(cmd
, "traceon") == 0)
335 ops
= &traceon_probe_ops
;
337 ops
= &traceoff_probe_ops
;
339 unregister_ftrace_function_probe_func(glob
, ops
);
345 ftrace_trace_onoff_callback(struct ftrace_hash
*hash
,
346 char *glob
, char *cmd
, char *param
, int enable
)
348 struct ftrace_probe_ops
*ops
;
349 void *count
= (void *)-1;
353 /* hash funcs only work with set_ftrace_filter */
358 return ftrace_trace_onoff_unreg(glob
+1, cmd
, param
);
360 /* we register both traceon and traceoff to this callback */
361 if (strcmp(cmd
, "traceon") == 0)
362 ops
= &traceon_probe_ops
;
364 ops
= &traceoff_probe_ops
;
369 number
= strsep(¶m
, ":");
375 * We use the callback data field (which is a pointer)
378 ret
= strict_strtoul(number
, 0, (unsigned long *)&count
);
383 ret
= register_ftrace_function_probe(glob
, ops
, count
);
385 return ret
< 0 ? ret
: 0;
388 static struct ftrace_func_command ftrace_traceon_cmd
= {
390 .func
= ftrace_trace_onoff_callback
,
393 static struct ftrace_func_command ftrace_traceoff_cmd
= {
395 .func
= ftrace_trace_onoff_callback
,
398 static int __init
init_func_cmd_traceon(void)
402 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
406 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
408 unregister_ftrace_command(&ftrace_traceoff_cmd
);
412 static inline int init_func_cmd_traceon(void)
416 #endif /* CONFIG_DYNAMIC_FTRACE */
418 static __init
int init_function_trace(void)
420 init_func_cmd_traceon();
421 return register_tracer(&function_trace
);
423 device_initcall(init_function_trace
);