2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
21 static void tracing_start_function_trace(struct trace_array
*tr
);
22 static void tracing_stop_function_trace(struct trace_array
*tr
);
24 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
25 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
);
27 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
28 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
);
29 static struct tracer_flags func_flags
;
33 TRACE_FUNC_OPT_STACK
= 0x1,
36 static int allocate_ftrace_ops(struct trace_array
*tr
)
38 struct ftrace_ops
*ops
;
40 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
44 /* Currently only the non stack verision is supported */
45 ops
->func
= function_trace_call
;
46 ops
->flags
= FTRACE_OPS_FL_RECURSION_SAFE
;
54 int ftrace_create_function_files(struct trace_array
*tr
,
55 struct dentry
*parent
)
60 * The top level array uses the "global_ops", and the files are
63 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
66 ret
= allocate_ftrace_ops(tr
);
70 ftrace_create_filter_files(tr
->ops
, parent
);
75 void ftrace_destroy_function_files(struct trace_array
*tr
)
77 ftrace_destroy_filter_files(tr
->ops
);
82 static int function_trace_init(struct trace_array
*tr
)
87 * Instance trace_arrays get their ops allocated
88 * at instance creation. Unless it failed
94 /* Currently only the global instance can do stack tracing */
95 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&&
96 func_flags
.val
& TRACE_FUNC_OPT_STACK
)
97 func
= function_stack_trace_call
;
99 func
= function_trace_call
;
101 ftrace_init_array_ops(tr
, func
);
103 tr
->trace_buffer
.cpu
= get_cpu();
106 tracing_start_cmdline_record();
107 tracing_start_function_trace(tr
);
111 static void function_trace_reset(struct trace_array
*tr
)
113 tracing_stop_function_trace(tr
);
114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr
);
118 static void function_trace_start(struct trace_array
*tr
)
120 tracing_reset_online_cpus(&tr
->trace_buffer
);
124 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
125 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
127 struct trace_array
*tr
= op
->private;
128 struct trace_array_cpu
*data
;
134 if (unlikely(!tr
->function_enabled
))
137 pc
= preempt_count();
138 preempt_disable_notrace();
140 bit
= trace_test_and_set_recursion(TRACE_FTRACE_START
, TRACE_FTRACE_MAX
);
144 cpu
= smp_processor_id();
145 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
146 if (!atomic_read(&data
->disabled
)) {
147 local_save_flags(flags
);
148 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
150 trace_clear_recursion(bit
);
153 preempt_enable_notrace();
157 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
158 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
160 struct trace_array
*tr
= op
->private;
161 struct trace_array_cpu
*data
;
167 if (unlikely(!tr
->function_enabled
))
171 * Need to use raw, since this must be called before the
172 * recursive protection is performed.
174 local_irq_save(flags
);
175 cpu
= raw_smp_processor_id();
176 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
177 disabled
= atomic_inc_return(&data
->disabled
);
179 if (likely(disabled
== 1)) {
180 pc
= preempt_count();
181 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
184 * __ftrace_trace_stack,
186 * function_stack_trace_call
190 __trace_stack(tr
, flags
, 5, pc
);
193 atomic_dec(&data
->disabled
);
194 local_irq_restore(flags
);
197 static struct tracer_opt func_opts
[] = {
198 #ifdef CONFIG_STACKTRACE
199 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
201 { } /* Always set a last empty entry */
204 static struct tracer_flags func_flags
= {
205 .val
= 0, /* By default: all flags disabled */
209 static void tracing_start_function_trace(struct trace_array
*tr
)
211 tr
->function_enabled
= 0;
212 register_ftrace_function(tr
->ops
);
213 tr
->function_enabled
= 1;
216 static void tracing_stop_function_trace(struct trace_array
*tr
)
218 tr
->function_enabled
= 0;
219 unregister_ftrace_function(tr
->ops
);
222 static struct tracer function_trace
;
225 func_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
228 case TRACE_FUNC_OPT_STACK
:
229 /* do nothing if already set */
230 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
233 /* We can change this flag when not running. */
234 if (tr
->current_trace
!= &function_trace
)
237 unregister_ftrace_function(tr
->ops
);
240 tr
->ops
->func
= function_stack_trace_call
;
241 register_ftrace_function(tr
->ops
);
243 tr
->ops
->func
= function_trace_call
;
244 register_ftrace_function(tr
->ops
);
255 static struct tracer function_trace __tracer_data
=
258 .init
= function_trace_init
,
259 .reset
= function_trace_reset
,
260 .start
= function_trace_start
,
261 .flags
= &func_flags
,
262 .set_flag
= func_set_flag
,
263 .allow_instances
= true,
264 #ifdef CONFIG_FTRACE_SELFTEST
265 .selftest
= trace_selftest_startup_function
,
269 #ifdef CONFIG_DYNAMIC_FTRACE
270 static void update_traceon_count(void **data
, bool on
)
272 long *count
= (long *)data
;
273 long old_count
= *count
;
276 * Tracing gets disabled (or enabled) once per count.
277 * This function can be called at the same time on multiple CPUs.
278 * It is fine if both disable (or enable) tracing, as disabling
279 * (or enabling) the second time doesn't do anything as the
280 * state of the tracer is already disabled (or enabled).
281 * What needs to be synchronized in this case is that the count
282 * only gets decremented once, even if the tracer is disabled
283 * (or enabled) twice, as the second one is really a nop.
285 * The memory barriers guarantee that we only decrement the
286 * counter once. First the count is read to a local variable
287 * and a read barrier is used to make sure that it is loaded
288 * before checking if the tracer is in the state we want.
289 * If the tracer is not in the state we want, then the count
290 * is guaranteed to be the old count.
292 * Next the tracer is set to the state we want (disabled or enabled)
293 * then a write memory barrier is used to make sure that
294 * the new state is visible before changing the counter by
295 * one minus the old counter. This guarantees that another CPU
296 * executing this code will see the new state before seeing
297 * the new counter value, and would not do anything if the new
300 * Note, there is no synchronization between this and a user
301 * setting the tracing_on file. But we currently don't care
307 /* Make sure we see count before checking tracing state */
310 if (on
== !!tracing_is_on())
322 /* Make sure tracing state is visible before updating count */
325 *count
= old_count
- 1;
329 ftrace_traceon_count(unsigned long ip
, unsigned long parent_ip
, void **data
)
331 update_traceon_count(data
, 1);
335 ftrace_traceoff_count(unsigned long ip
, unsigned long parent_ip
, void **data
)
337 update_traceon_count(data
, 0);
341 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
, void **data
)
350 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
, void **data
)
352 if (!tracing_is_on())
360 * ftrace_stacktrace()
361 * function_trace_probe_call()
362 * ftrace_ops_list_func()
368 ftrace_stacktrace(unsigned long ip
, unsigned long parent_ip
, void **data
)
370 trace_dump_stack(STACK_SKIP
);
374 ftrace_stacktrace_count(unsigned long ip
, unsigned long parent_ip
, void **data
)
376 long *count
= (long *)data
;
381 * Stack traces should only execute the number of times the
382 * user specified in the counter.
386 if (!tracing_is_on())
395 if (old_count
== -1) {
396 trace_dump_stack(STACK_SKIP
);
400 new_count
= old_count
- 1;
401 new_count
= cmpxchg(count
, old_count
, new_count
);
402 if (new_count
== old_count
)
403 trace_dump_stack(STACK_SKIP
);
405 } while (new_count
!= old_count
);
408 static int update_count(void **data
)
410 unsigned long *count
= (long *)data
;
422 ftrace_dump_probe(unsigned long ip
, unsigned long parent_ip
, void **data
)
424 if (update_count(data
))
425 ftrace_dump(DUMP_ALL
);
428 /* Only dump the current CPU buffer. */
430 ftrace_cpudump_probe(unsigned long ip
, unsigned long parent_ip
, void **data
)
432 if (update_count(data
))
433 ftrace_dump(DUMP_ORIG
);
437 ftrace_probe_print(const char *name
, struct seq_file
*m
,
438 unsigned long ip
, void *data
)
440 long count
= (long)data
;
442 seq_printf(m
, "%ps:%s", (void *)ip
, name
);
445 seq_puts(m
, ":unlimited\n");
447 seq_printf(m
, ":count=%ld\n", count
);
453 ftrace_traceon_print(struct seq_file
*m
, unsigned long ip
,
454 struct ftrace_probe_ops
*ops
, void *data
)
456 return ftrace_probe_print("traceon", m
, ip
, data
);
460 ftrace_traceoff_print(struct seq_file
*m
, unsigned long ip
,
461 struct ftrace_probe_ops
*ops
, void *data
)
463 return ftrace_probe_print("traceoff", m
, ip
, data
);
467 ftrace_stacktrace_print(struct seq_file
*m
, unsigned long ip
,
468 struct ftrace_probe_ops
*ops
, void *data
)
470 return ftrace_probe_print("stacktrace", m
, ip
, data
);
474 ftrace_dump_print(struct seq_file
*m
, unsigned long ip
,
475 struct ftrace_probe_ops
*ops
, void *data
)
477 return ftrace_probe_print("dump", m
, ip
, data
);
481 ftrace_cpudump_print(struct seq_file
*m
, unsigned long ip
,
482 struct ftrace_probe_ops
*ops
, void *data
)
484 return ftrace_probe_print("cpudump", m
, ip
, data
);
487 static struct ftrace_probe_ops traceon_count_probe_ops
= {
488 .func
= ftrace_traceon_count
,
489 .print
= ftrace_traceon_print
,
492 static struct ftrace_probe_ops traceoff_count_probe_ops
= {
493 .func
= ftrace_traceoff_count
,
494 .print
= ftrace_traceoff_print
,
497 static struct ftrace_probe_ops stacktrace_count_probe_ops
= {
498 .func
= ftrace_stacktrace_count
,
499 .print
= ftrace_stacktrace_print
,
502 static struct ftrace_probe_ops dump_probe_ops
= {
503 .func
= ftrace_dump_probe
,
504 .print
= ftrace_dump_print
,
507 static struct ftrace_probe_ops cpudump_probe_ops
= {
508 .func
= ftrace_cpudump_probe
,
509 .print
= ftrace_cpudump_print
,
512 static struct ftrace_probe_ops traceon_probe_ops
= {
513 .func
= ftrace_traceon
,
514 .print
= ftrace_traceon_print
,
517 static struct ftrace_probe_ops traceoff_probe_ops
= {
518 .func
= ftrace_traceoff
,
519 .print
= ftrace_traceoff_print
,
522 static struct ftrace_probe_ops stacktrace_probe_ops
= {
523 .func
= ftrace_stacktrace
,
524 .print
= ftrace_stacktrace_print
,
528 ftrace_trace_probe_callback(struct ftrace_probe_ops
*ops
,
529 struct ftrace_hash
*hash
, char *glob
,
530 char *cmd
, char *param
, int enable
)
532 void *count
= (void *)-1;
536 /* hash funcs only work with set_ftrace_filter */
540 if (glob
[0] == '!') {
541 unregister_ftrace_function_probe_func(glob
+1, ops
);
548 number
= strsep(¶m
, ":");
554 * We use the callback data field (which is a pointer)
557 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
562 ret
= register_ftrace_function_probe(glob
, ops
, count
);
564 return ret
< 0 ? ret
: 0;
568 ftrace_trace_onoff_callback(struct ftrace_hash
*hash
,
569 char *glob
, char *cmd
, char *param
, int enable
)
571 struct ftrace_probe_ops
*ops
;
573 /* we register both traceon and traceoff to this callback */
574 if (strcmp(cmd
, "traceon") == 0)
575 ops
= param
? &traceon_count_probe_ops
: &traceon_probe_ops
;
577 ops
= param
? &traceoff_count_probe_ops
: &traceoff_probe_ops
;
579 return ftrace_trace_probe_callback(ops
, hash
, glob
, cmd
,
584 ftrace_stacktrace_callback(struct ftrace_hash
*hash
,
585 char *glob
, char *cmd
, char *param
, int enable
)
587 struct ftrace_probe_ops
*ops
;
589 ops
= param
? &stacktrace_count_probe_ops
: &stacktrace_probe_ops
;
591 return ftrace_trace_probe_callback(ops
, hash
, glob
, cmd
,
596 ftrace_dump_callback(struct ftrace_hash
*hash
,
597 char *glob
, char *cmd
, char *param
, int enable
)
599 struct ftrace_probe_ops
*ops
;
601 ops
= &dump_probe_ops
;
603 /* Only dump once. */
604 return ftrace_trace_probe_callback(ops
, hash
, glob
, cmd
,
609 ftrace_cpudump_callback(struct ftrace_hash
*hash
,
610 char *glob
, char *cmd
, char *param
, int enable
)
612 struct ftrace_probe_ops
*ops
;
614 ops
= &cpudump_probe_ops
;
616 /* Only dump once. */
617 return ftrace_trace_probe_callback(ops
, hash
, glob
, cmd
,
621 static struct ftrace_func_command ftrace_traceon_cmd
= {
623 .func
= ftrace_trace_onoff_callback
,
626 static struct ftrace_func_command ftrace_traceoff_cmd
= {
628 .func
= ftrace_trace_onoff_callback
,
631 static struct ftrace_func_command ftrace_stacktrace_cmd
= {
632 .name
= "stacktrace",
633 .func
= ftrace_stacktrace_callback
,
636 static struct ftrace_func_command ftrace_dump_cmd
= {
638 .func
= ftrace_dump_callback
,
641 static struct ftrace_func_command ftrace_cpudump_cmd
= {
643 .func
= ftrace_cpudump_callback
,
646 static int __init
init_func_cmd_traceon(void)
650 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
654 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
656 goto out_free_traceoff
;
658 ret
= register_ftrace_command(&ftrace_stacktrace_cmd
);
660 goto out_free_traceon
;
662 ret
= register_ftrace_command(&ftrace_dump_cmd
);
664 goto out_free_stacktrace
;
666 ret
= register_ftrace_command(&ftrace_cpudump_cmd
);
673 unregister_ftrace_command(&ftrace_dump_cmd
);
675 unregister_ftrace_command(&ftrace_stacktrace_cmd
);
677 unregister_ftrace_command(&ftrace_traceon_cmd
);
679 unregister_ftrace_command(&ftrace_traceoff_cmd
);
684 static inline int init_func_cmd_traceon(void)
688 #endif /* CONFIG_DYNAMIC_FTRACE */
690 static __init
int init_function_trace(void)
692 init_func_cmd_traceon();
693 return register_tracer(&function_trace
);
695 core_initcall(init_function_trace
);