1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
22 static void tracing_start_function_trace(struct trace_array
*tr
);
23 static void tracing_stop_function_trace(struct trace_array
*tr
);
25 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
26 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
28 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
29 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
30 static struct tracer_flags func_flags
;
34 TRACE_FUNC_OPT_STACK
= 0x1,
37 int ftrace_allocate_ftrace_ops(struct trace_array
*tr
)
39 struct ftrace_ops
*ops
;
41 /* The top level array uses the "global_ops" */
42 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
45 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
49 /* Currently only the non stack version is supported */
50 ops
->func
= function_trace_call
;
51 ops
->flags
= FTRACE_OPS_FL_PID
;
59 void ftrace_free_ftrace_ops(struct trace_array
*tr
)
65 int ftrace_create_function_files(struct trace_array
*tr
,
66 struct dentry
*parent
)
69 * The top level array uses the "global_ops", and the files are
72 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
78 ftrace_create_filter_files(tr
->ops
, parent
);
83 void ftrace_destroy_function_files(struct trace_array
*tr
)
85 ftrace_destroy_filter_files(tr
->ops
);
86 ftrace_free_ftrace_ops(tr
);
89 static int function_trace_init(struct trace_array
*tr
)
93 * Instance trace_arrays get their ops allocated
94 * at instance creation. Unless it failed
100 /* Currently only the global instance can do stack tracing */
101 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&&
102 func_flags
.val
& TRACE_FUNC_OPT_STACK
)
103 func
= function_stack_trace_call
;
105 func
= function_trace_call
;
107 ftrace_init_array_ops(tr
, func
);
109 tr
->array_buffer
.cpu
= get_cpu();
112 tracing_start_cmdline_record();
113 tracing_start_function_trace(tr
);
117 static void function_trace_reset(struct trace_array
*tr
)
119 tracing_stop_function_trace(tr
);
120 tracing_stop_cmdline_record();
121 ftrace_reset_array_ops(tr
);
124 static void function_trace_start(struct trace_array
*tr
)
126 tracing_reset_online_cpus(&tr
->array_buffer
);
130 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
131 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
133 struct trace_array
*tr
= op
->private;
134 struct trace_array_cpu
*data
;
140 if (unlikely(!tr
->function_enabled
))
143 bit
= ftrace_test_recursion_trylock(ip
, parent_ip
);
147 pc
= preempt_count();
148 preempt_disable_notrace();
150 cpu
= smp_processor_id();
151 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
152 if (!atomic_read(&data
->disabled
)) {
153 local_save_flags(flags
);
154 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
156 ftrace_test_recursion_unlock(bit
);
157 preempt_enable_notrace();
160 #ifdef CONFIG_UNWINDER_ORC
164 * function_stack_trace_call()
172 * function_stack_trace_call()
179 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
180 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
182 struct trace_array
*tr
= op
->private;
183 struct trace_array_cpu
*data
;
189 if (unlikely(!tr
->function_enabled
))
193 * Need to use raw, since this must be called before the
194 * recursive protection is performed.
196 local_irq_save(flags
);
197 cpu
= raw_smp_processor_id();
198 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
199 disabled
= atomic_inc_return(&data
->disabled
);
201 if (likely(disabled
== 1)) {
202 pc
= preempt_count();
203 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
204 __trace_stack(tr
, flags
, STACK_SKIP
, pc
);
207 atomic_dec(&data
->disabled
);
208 local_irq_restore(flags
);
211 static struct tracer_opt func_opts
[] = {
212 #ifdef CONFIG_STACKTRACE
213 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
215 { } /* Always set a last empty entry */
218 static struct tracer_flags func_flags
= {
219 .val
= 0, /* By default: all flags disabled */
223 static void tracing_start_function_trace(struct trace_array
*tr
)
225 tr
->function_enabled
= 0;
226 register_ftrace_function(tr
->ops
);
227 tr
->function_enabled
= 1;
230 static void tracing_stop_function_trace(struct trace_array
*tr
)
232 tr
->function_enabled
= 0;
233 unregister_ftrace_function(tr
->ops
);
236 static struct tracer function_trace
;
239 func_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
242 case TRACE_FUNC_OPT_STACK
:
243 /* do nothing if already set */
244 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
247 /* We can change this flag when not running. */
248 if (tr
->current_trace
!= &function_trace
)
251 unregister_ftrace_function(tr
->ops
);
254 tr
->ops
->func
= function_stack_trace_call
;
255 register_ftrace_function(tr
->ops
);
257 tr
->ops
->func
= function_trace_call
;
258 register_ftrace_function(tr
->ops
);
269 static struct tracer function_trace __tracer_data
=
272 .init
= function_trace_init
,
273 .reset
= function_trace_reset
,
274 .start
= function_trace_start
,
275 .flags
= &func_flags
,
276 .set_flag
= func_set_flag
,
277 .allow_instances
= true,
278 #ifdef CONFIG_FTRACE_SELFTEST
279 .selftest
= trace_selftest_startup_function
,
283 #ifdef CONFIG_DYNAMIC_FTRACE
284 static void update_traceon_count(struct ftrace_probe_ops
*ops
,
286 struct trace_array
*tr
, bool on
,
289 struct ftrace_func_mapper
*mapper
= data
;
294 * Tracing gets disabled (or enabled) once per count.
295 * This function can be called at the same time on multiple CPUs.
296 * It is fine if both disable (or enable) tracing, as disabling
297 * (or enabling) the second time doesn't do anything as the
298 * state of the tracer is already disabled (or enabled).
299 * What needs to be synchronized in this case is that the count
300 * only gets decremented once, even if the tracer is disabled
301 * (or enabled) twice, as the second one is really a nop.
303 * The memory barriers guarantee that we only decrement the
304 * counter once. First the count is read to a local variable
305 * and a read barrier is used to make sure that it is loaded
306 * before checking if the tracer is in the state we want.
307 * If the tracer is not in the state we want, then the count
308 * is guaranteed to be the old count.
310 * Next the tracer is set to the state we want (disabled or enabled)
311 * then a write memory barrier is used to make sure that
312 * the new state is visible before changing the counter by
313 * one minus the old counter. This guarantees that another CPU
314 * executing this code will see the new state before seeing
315 * the new counter value, and would not do anything if the new
318 * Note, there is no synchronization between this and a user
319 * setting the tracing_on file. But we currently don't care
322 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
328 /* Make sure we see count before checking tracing state */
331 if (on
== !!tracer_tracing_is_on(tr
))
335 tracer_tracing_on(tr
);
337 tracer_tracing_off(tr
);
339 /* Make sure tracing state is visible before updating count */
342 *count
= old_count
- 1;
346 ftrace_traceon_count(unsigned long ip
, unsigned long parent_ip
,
347 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
350 update_traceon_count(ops
, ip
, tr
, 1, data
);
354 ftrace_traceoff_count(unsigned long ip
, unsigned long parent_ip
,
355 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
358 update_traceon_count(ops
, ip
, tr
, 0, data
);
362 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
,
363 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
366 if (tracer_tracing_is_on(tr
))
369 tracer_tracing_on(tr
);
373 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
,
374 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
377 if (!tracer_tracing_is_on(tr
))
380 tracer_tracing_off(tr
);
383 #ifdef CONFIG_UNWINDER_ORC
387 * function_trace_probe_call()
388 * ftrace_ops_assist_func()
391 #define FTRACE_STACK_SKIP 3
397 * ftrace_stacktrace()
398 * function_trace_probe_call()
399 * ftrace_ops_assist_func()
402 #define FTRACE_STACK_SKIP 5
405 static __always_inline
void trace_stack(struct trace_array
*tr
)
410 local_save_flags(flags
);
411 pc
= preempt_count();
413 __trace_stack(tr
, flags
, FTRACE_STACK_SKIP
, pc
);
417 ftrace_stacktrace(unsigned long ip
, unsigned long parent_ip
,
418 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
425 ftrace_stacktrace_count(unsigned long ip
, unsigned long parent_ip
,
426 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
429 struct ftrace_func_mapper
*mapper
= data
;
434 if (!tracing_is_on())
443 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
446 * Stack traces should only execute the number of times the
447 * user specified in the counter.
455 new_count
= old_count
- 1;
456 new_count
= cmpxchg(count
, old_count
, new_count
);
457 if (new_count
== old_count
)
460 if (!tracing_is_on())
463 } while (new_count
!= old_count
);
466 static int update_count(struct ftrace_probe_ops
*ops
, unsigned long ip
,
469 struct ftrace_func_mapper
*mapper
= data
;
473 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
485 ftrace_dump_probe(unsigned long ip
, unsigned long parent_ip
,
486 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
489 if (update_count(ops
, ip
, data
))
490 ftrace_dump(DUMP_ALL
);
493 /* Only dump the current CPU buffer. */
495 ftrace_cpudump_probe(unsigned long ip
, unsigned long parent_ip
,
496 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
499 if (update_count(ops
, ip
, data
))
500 ftrace_dump(DUMP_ORIG
);
504 ftrace_probe_print(const char *name
, struct seq_file
*m
,
505 unsigned long ip
, struct ftrace_probe_ops
*ops
,
508 struct ftrace_func_mapper
*mapper
= data
;
511 seq_printf(m
, "%ps:%s", (void *)ip
, name
);
514 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
517 seq_printf(m
, ":count=%ld\n", *count
);
519 seq_puts(m
, ":unlimited\n");
525 ftrace_traceon_print(struct seq_file
*m
, unsigned long ip
,
526 struct ftrace_probe_ops
*ops
,
529 return ftrace_probe_print("traceon", m
, ip
, ops
, data
);
533 ftrace_traceoff_print(struct seq_file
*m
, unsigned long ip
,
534 struct ftrace_probe_ops
*ops
, void *data
)
536 return ftrace_probe_print("traceoff", m
, ip
, ops
, data
);
540 ftrace_stacktrace_print(struct seq_file
*m
, unsigned long ip
,
541 struct ftrace_probe_ops
*ops
, void *data
)
543 return ftrace_probe_print("stacktrace", m
, ip
, ops
, data
);
547 ftrace_dump_print(struct seq_file
*m
, unsigned long ip
,
548 struct ftrace_probe_ops
*ops
, void *data
)
550 return ftrace_probe_print("dump", m
, ip
, ops
, data
);
554 ftrace_cpudump_print(struct seq_file
*m
, unsigned long ip
,
555 struct ftrace_probe_ops
*ops
, void *data
)
557 return ftrace_probe_print("cpudump", m
, ip
, ops
, data
);
562 ftrace_count_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
563 unsigned long ip
, void *init_data
, void **data
)
565 struct ftrace_func_mapper
*mapper
= *data
;
568 mapper
= allocate_ftrace_func_mapper();
574 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
578 ftrace_count_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
579 unsigned long ip
, void *data
)
581 struct ftrace_func_mapper
*mapper
= data
;
584 free_ftrace_func_mapper(mapper
, NULL
);
588 ftrace_func_mapper_remove_ip(mapper
, ip
);
591 static struct ftrace_probe_ops traceon_count_probe_ops
= {
592 .func
= ftrace_traceon_count
,
593 .print
= ftrace_traceon_print
,
594 .init
= ftrace_count_init
,
595 .free
= ftrace_count_free
,
598 static struct ftrace_probe_ops traceoff_count_probe_ops
= {
599 .func
= ftrace_traceoff_count
,
600 .print
= ftrace_traceoff_print
,
601 .init
= ftrace_count_init
,
602 .free
= ftrace_count_free
,
605 static struct ftrace_probe_ops stacktrace_count_probe_ops
= {
606 .func
= ftrace_stacktrace_count
,
607 .print
= ftrace_stacktrace_print
,
608 .init
= ftrace_count_init
,
609 .free
= ftrace_count_free
,
612 static struct ftrace_probe_ops dump_probe_ops
= {
613 .func
= ftrace_dump_probe
,
614 .print
= ftrace_dump_print
,
615 .init
= ftrace_count_init
,
616 .free
= ftrace_count_free
,
619 static struct ftrace_probe_ops cpudump_probe_ops
= {
620 .func
= ftrace_cpudump_probe
,
621 .print
= ftrace_cpudump_print
,
624 static struct ftrace_probe_ops traceon_probe_ops
= {
625 .func
= ftrace_traceon
,
626 .print
= ftrace_traceon_print
,
629 static struct ftrace_probe_ops traceoff_probe_ops
= {
630 .func
= ftrace_traceoff
,
631 .print
= ftrace_traceoff_print
,
634 static struct ftrace_probe_ops stacktrace_probe_ops
= {
635 .func
= ftrace_stacktrace
,
636 .print
= ftrace_stacktrace_print
,
640 ftrace_trace_probe_callback(struct trace_array
*tr
,
641 struct ftrace_probe_ops
*ops
,
642 struct ftrace_hash
*hash
, char *glob
,
643 char *cmd
, char *param
, int enable
)
645 void *count
= (void *)-1;
649 /* hash funcs only work with set_ftrace_filter */
654 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
659 number
= strsep(¶m
, ":");
665 * We use the callback data field (which is a pointer)
668 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
673 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
675 return ret
< 0 ? ret
: 0;
679 ftrace_trace_onoff_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
680 char *glob
, char *cmd
, char *param
, int enable
)
682 struct ftrace_probe_ops
*ops
;
687 /* we register both traceon and traceoff to this callback */
688 if (strcmp(cmd
, "traceon") == 0)
689 ops
= param
? &traceon_count_probe_ops
: &traceon_probe_ops
;
691 ops
= param
? &traceoff_count_probe_ops
: &traceoff_probe_ops
;
693 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
698 ftrace_stacktrace_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
699 char *glob
, char *cmd
, char *param
, int enable
)
701 struct ftrace_probe_ops
*ops
;
706 ops
= param
? &stacktrace_count_probe_ops
: &stacktrace_probe_ops
;
708 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
713 ftrace_dump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
714 char *glob
, char *cmd
, char *param
, int enable
)
716 struct ftrace_probe_ops
*ops
;
721 ops
= &dump_probe_ops
;
723 /* Only dump once. */
724 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
729 ftrace_cpudump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
730 char *glob
, char *cmd
, char *param
, int enable
)
732 struct ftrace_probe_ops
*ops
;
737 ops
= &cpudump_probe_ops
;
739 /* Only dump once. */
740 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
744 static struct ftrace_func_command ftrace_traceon_cmd
= {
746 .func
= ftrace_trace_onoff_callback
,
749 static struct ftrace_func_command ftrace_traceoff_cmd
= {
751 .func
= ftrace_trace_onoff_callback
,
754 static struct ftrace_func_command ftrace_stacktrace_cmd
= {
755 .name
= "stacktrace",
756 .func
= ftrace_stacktrace_callback
,
759 static struct ftrace_func_command ftrace_dump_cmd
= {
761 .func
= ftrace_dump_callback
,
764 static struct ftrace_func_command ftrace_cpudump_cmd
= {
766 .func
= ftrace_cpudump_callback
,
769 static int __init
init_func_cmd_traceon(void)
773 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
777 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
779 goto out_free_traceoff
;
781 ret
= register_ftrace_command(&ftrace_stacktrace_cmd
);
783 goto out_free_traceon
;
785 ret
= register_ftrace_command(&ftrace_dump_cmd
);
787 goto out_free_stacktrace
;
789 ret
= register_ftrace_command(&ftrace_cpudump_cmd
);
796 unregister_ftrace_command(&ftrace_dump_cmd
);
798 unregister_ftrace_command(&ftrace_stacktrace_cmd
);
800 unregister_ftrace_command(&ftrace_traceon_cmd
);
802 unregister_ftrace_command(&ftrace_traceoff_cmd
);
807 static inline int init_func_cmd_traceon(void)
811 #endif /* CONFIG_DYNAMIC_FTRACE */
813 __init
int init_function_trace(void)
815 init_func_cmd_traceon();
816 return register_tracer(&function_trace
);