1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
22 static void tracing_start_function_trace(struct trace_array
*tr
);
23 static void tracing_stop_function_trace(struct trace_array
*tr
);
25 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
26 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
);
28 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
29 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
);
30 static struct tracer_flags func_flags
;
34 TRACE_FUNC_OPT_STACK
= 0x1,
37 static int allocate_ftrace_ops(struct trace_array
*tr
)
39 struct ftrace_ops
*ops
;
41 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
45 /* Currently only the non stack verision is supported */
46 ops
->func
= function_trace_call
;
47 ops
->flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_PID
;
55 int ftrace_create_function_files(struct trace_array
*tr
,
56 struct dentry
*parent
)
61 * The top level array uses the "global_ops", and the files are
64 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
67 ret
= allocate_ftrace_ops(tr
);
71 ftrace_create_filter_files(tr
->ops
, parent
);
76 void ftrace_destroy_function_files(struct trace_array
*tr
)
78 ftrace_destroy_filter_files(tr
->ops
);
83 static int function_trace_init(struct trace_array
*tr
)
88 * Instance trace_arrays get their ops allocated
89 * at instance creation. Unless it failed
95 /* Currently only the global instance can do stack tracing */
96 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
&&
97 func_flags
.val
& TRACE_FUNC_OPT_STACK
)
98 func
= function_stack_trace_call
;
100 func
= function_trace_call
;
102 ftrace_init_array_ops(tr
, func
);
104 tr
->trace_buffer
.cpu
= get_cpu();
107 tracing_start_cmdline_record();
108 tracing_start_function_trace(tr
);
112 static void function_trace_reset(struct trace_array
*tr
)
114 tracing_stop_function_trace(tr
);
115 tracing_stop_cmdline_record();
116 ftrace_reset_array_ops(tr
);
119 static void function_trace_start(struct trace_array
*tr
)
121 tracing_reset_online_cpus(&tr
->trace_buffer
);
125 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
126 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
128 struct trace_array
*tr
= op
->private;
129 struct trace_array_cpu
*data
;
135 if (unlikely(!tr
->function_enabled
))
138 pc
= preempt_count();
139 preempt_disable_notrace();
141 bit
= trace_test_and_set_recursion(TRACE_FTRACE_START
, TRACE_FTRACE_MAX
);
145 cpu
= smp_processor_id();
146 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
147 if (!atomic_read(&data
->disabled
)) {
148 local_save_flags(flags
);
149 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
151 trace_clear_recursion(bit
);
154 preempt_enable_notrace();
157 #ifdef CONFIG_UNWINDER_ORC
161 * function_stack_trace_call()
169 * function_stack_trace_call()
176 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
177 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
179 struct trace_array
*tr
= op
->private;
180 struct trace_array_cpu
*data
;
186 if (unlikely(!tr
->function_enabled
))
190 * Need to use raw, since this must be called before the
191 * recursive protection is performed.
193 local_irq_save(flags
);
194 cpu
= raw_smp_processor_id();
195 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
196 disabled
= atomic_inc_return(&data
->disabled
);
198 if (likely(disabled
== 1)) {
199 pc
= preempt_count();
200 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
201 __trace_stack(tr
, flags
, STACK_SKIP
, pc
);
204 atomic_dec(&data
->disabled
);
205 local_irq_restore(flags
);
208 static struct tracer_opt func_opts
[] = {
209 #ifdef CONFIG_STACKTRACE
210 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
212 { } /* Always set a last empty entry */
215 static struct tracer_flags func_flags
= {
216 .val
= 0, /* By default: all flags disabled */
220 static void tracing_start_function_trace(struct trace_array
*tr
)
222 tr
->function_enabled
= 0;
223 register_ftrace_function(tr
->ops
);
224 tr
->function_enabled
= 1;
227 static void tracing_stop_function_trace(struct trace_array
*tr
)
229 tr
->function_enabled
= 0;
230 unregister_ftrace_function(tr
->ops
);
233 static struct tracer function_trace
;
236 func_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
239 case TRACE_FUNC_OPT_STACK
:
240 /* do nothing if already set */
241 if (!!set
== !!(func_flags
.val
& TRACE_FUNC_OPT_STACK
))
244 /* We can change this flag when not running. */
245 if (tr
->current_trace
!= &function_trace
)
248 unregister_ftrace_function(tr
->ops
);
251 tr
->ops
->func
= function_stack_trace_call
;
252 register_ftrace_function(tr
->ops
);
254 tr
->ops
->func
= function_trace_call
;
255 register_ftrace_function(tr
->ops
);
266 static struct tracer function_trace __tracer_data
=
269 .init
= function_trace_init
,
270 .reset
= function_trace_reset
,
271 .start
= function_trace_start
,
272 .flags
= &func_flags
,
273 .set_flag
= func_set_flag
,
274 .allow_instances
= true,
275 #ifdef CONFIG_FTRACE_SELFTEST
276 .selftest
= trace_selftest_startup_function
,
280 #ifdef CONFIG_DYNAMIC_FTRACE
281 static void update_traceon_count(struct ftrace_probe_ops
*ops
,
283 struct trace_array
*tr
, bool on
,
286 struct ftrace_func_mapper
*mapper
= data
;
291 * Tracing gets disabled (or enabled) once per count.
292 * This function can be called at the same time on multiple CPUs.
293 * It is fine if both disable (or enable) tracing, as disabling
294 * (or enabling) the second time doesn't do anything as the
295 * state of the tracer is already disabled (or enabled).
296 * What needs to be synchronized in this case is that the count
297 * only gets decremented once, even if the tracer is disabled
298 * (or enabled) twice, as the second one is really a nop.
300 * The memory barriers guarantee that we only decrement the
301 * counter once. First the count is read to a local variable
302 * and a read barrier is used to make sure that it is loaded
303 * before checking if the tracer is in the state we want.
304 * If the tracer is not in the state we want, then the count
305 * is guaranteed to be the old count.
307 * Next the tracer is set to the state we want (disabled or enabled)
308 * then a write memory barrier is used to make sure that
309 * the new state is visible before changing the counter by
310 * one minus the old counter. This guarantees that another CPU
311 * executing this code will see the new state before seeing
312 * the new counter value, and would not do anything if the new
315 * Note, there is no synchronization between this and a user
316 * setting the tracing_on file. But we currently don't care
319 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
325 /* Make sure we see count before checking tracing state */
328 if (on
== !!tracer_tracing_is_on(tr
))
332 tracer_tracing_on(tr
);
334 tracer_tracing_off(tr
);
336 /* Make sure tracing state is visible before updating count */
339 *count
= old_count
- 1;
343 ftrace_traceon_count(unsigned long ip
, unsigned long parent_ip
,
344 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
347 update_traceon_count(ops
, ip
, tr
, 1, data
);
351 ftrace_traceoff_count(unsigned long ip
, unsigned long parent_ip
,
352 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
355 update_traceon_count(ops
, ip
, tr
, 0, data
);
359 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
,
360 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
363 if (tracer_tracing_is_on(tr
))
366 tracer_tracing_on(tr
);
370 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
,
371 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
374 if (!tracer_tracing_is_on(tr
))
377 tracer_tracing_off(tr
);
380 #ifdef CONFIG_UNWINDER_ORC
384 * function_trace_probe_call()
385 * ftrace_ops_assist_func()
388 #define FTRACE_STACK_SKIP 3
394 * ftrace_stacktrace()
395 * function_trace_probe_call()
396 * ftrace_ops_assist_func()
399 #define FTRACE_STACK_SKIP 5
402 static __always_inline
void trace_stack(struct trace_array
*tr
)
407 local_save_flags(flags
);
408 pc
= preempt_count();
410 __trace_stack(tr
, flags
, FTRACE_STACK_SKIP
, pc
);
414 ftrace_stacktrace(unsigned long ip
, unsigned long parent_ip
,
415 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
422 ftrace_stacktrace_count(unsigned long ip
, unsigned long parent_ip
,
423 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
426 struct ftrace_func_mapper
*mapper
= data
;
431 if (!tracing_is_on())
440 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
443 * Stack traces should only execute the number of times the
444 * user specified in the counter.
452 new_count
= old_count
- 1;
453 new_count
= cmpxchg(count
, old_count
, new_count
);
454 if (new_count
== old_count
)
457 if (!tracing_is_on())
460 } while (new_count
!= old_count
);
463 static int update_count(struct ftrace_probe_ops
*ops
, unsigned long ip
,
466 struct ftrace_func_mapper
*mapper
= data
;
470 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
482 ftrace_dump_probe(unsigned long ip
, unsigned long parent_ip
,
483 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
486 if (update_count(ops
, ip
, data
))
487 ftrace_dump(DUMP_ALL
);
490 /* Only dump the current CPU buffer. */
492 ftrace_cpudump_probe(unsigned long ip
, unsigned long parent_ip
,
493 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
496 if (update_count(ops
, ip
, data
))
497 ftrace_dump(DUMP_ORIG
);
501 ftrace_probe_print(const char *name
, struct seq_file
*m
,
502 unsigned long ip
, struct ftrace_probe_ops
*ops
,
505 struct ftrace_func_mapper
*mapper
= data
;
508 seq_printf(m
, "%ps:%s", (void *)ip
, name
);
511 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
514 seq_printf(m
, ":count=%ld\n", *count
);
516 seq_puts(m
, ":unlimited\n");
522 ftrace_traceon_print(struct seq_file
*m
, unsigned long ip
,
523 struct ftrace_probe_ops
*ops
,
526 return ftrace_probe_print("traceon", m
, ip
, ops
, data
);
530 ftrace_traceoff_print(struct seq_file
*m
, unsigned long ip
,
531 struct ftrace_probe_ops
*ops
, void *data
)
533 return ftrace_probe_print("traceoff", m
, ip
, ops
, data
);
537 ftrace_stacktrace_print(struct seq_file
*m
, unsigned long ip
,
538 struct ftrace_probe_ops
*ops
, void *data
)
540 return ftrace_probe_print("stacktrace", m
, ip
, ops
, data
);
544 ftrace_dump_print(struct seq_file
*m
, unsigned long ip
,
545 struct ftrace_probe_ops
*ops
, void *data
)
547 return ftrace_probe_print("dump", m
, ip
, ops
, data
);
551 ftrace_cpudump_print(struct seq_file
*m
, unsigned long ip
,
552 struct ftrace_probe_ops
*ops
, void *data
)
554 return ftrace_probe_print("cpudump", m
, ip
, ops
, data
);
559 ftrace_count_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
560 unsigned long ip
, void *init_data
, void **data
)
562 struct ftrace_func_mapper
*mapper
= *data
;
565 mapper
= allocate_ftrace_func_mapper();
571 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
575 ftrace_count_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
576 unsigned long ip
, void *data
)
578 struct ftrace_func_mapper
*mapper
= data
;
581 free_ftrace_func_mapper(mapper
, NULL
);
585 ftrace_func_mapper_remove_ip(mapper
, ip
);
588 static struct ftrace_probe_ops traceon_count_probe_ops
= {
589 .func
= ftrace_traceon_count
,
590 .print
= ftrace_traceon_print
,
591 .init
= ftrace_count_init
,
592 .free
= ftrace_count_free
,
595 static struct ftrace_probe_ops traceoff_count_probe_ops
= {
596 .func
= ftrace_traceoff_count
,
597 .print
= ftrace_traceoff_print
,
598 .init
= ftrace_count_init
,
599 .free
= ftrace_count_free
,
602 static struct ftrace_probe_ops stacktrace_count_probe_ops
= {
603 .func
= ftrace_stacktrace_count
,
604 .print
= ftrace_stacktrace_print
,
605 .init
= ftrace_count_init
,
606 .free
= ftrace_count_free
,
609 static struct ftrace_probe_ops dump_probe_ops
= {
610 .func
= ftrace_dump_probe
,
611 .print
= ftrace_dump_print
,
612 .init
= ftrace_count_init
,
613 .free
= ftrace_count_free
,
616 static struct ftrace_probe_ops cpudump_probe_ops
= {
617 .func
= ftrace_cpudump_probe
,
618 .print
= ftrace_cpudump_print
,
621 static struct ftrace_probe_ops traceon_probe_ops
= {
622 .func
= ftrace_traceon
,
623 .print
= ftrace_traceon_print
,
626 static struct ftrace_probe_ops traceoff_probe_ops
= {
627 .func
= ftrace_traceoff
,
628 .print
= ftrace_traceoff_print
,
631 static struct ftrace_probe_ops stacktrace_probe_ops
= {
632 .func
= ftrace_stacktrace
,
633 .print
= ftrace_stacktrace_print
,
637 ftrace_trace_probe_callback(struct trace_array
*tr
,
638 struct ftrace_probe_ops
*ops
,
639 struct ftrace_hash
*hash
, char *glob
,
640 char *cmd
, char *param
, int enable
)
642 void *count
= (void *)-1;
646 /* hash funcs only work with set_ftrace_filter */
651 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
656 number
= strsep(¶m
, ":");
662 * We use the callback data field (which is a pointer)
665 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
670 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
672 return ret
< 0 ? ret
: 0;
676 ftrace_trace_onoff_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
677 char *glob
, char *cmd
, char *param
, int enable
)
679 struct ftrace_probe_ops
*ops
;
684 /* we register both traceon and traceoff to this callback */
685 if (strcmp(cmd
, "traceon") == 0)
686 ops
= param
? &traceon_count_probe_ops
: &traceon_probe_ops
;
688 ops
= param
? &traceoff_count_probe_ops
: &traceoff_probe_ops
;
690 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
695 ftrace_stacktrace_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
696 char *glob
, char *cmd
, char *param
, int enable
)
698 struct ftrace_probe_ops
*ops
;
703 ops
= param
? &stacktrace_count_probe_ops
: &stacktrace_probe_ops
;
705 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
710 ftrace_dump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
711 char *glob
, char *cmd
, char *param
, int enable
)
713 struct ftrace_probe_ops
*ops
;
718 ops
= &dump_probe_ops
;
720 /* Only dump once. */
721 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
726 ftrace_cpudump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
727 char *glob
, char *cmd
, char *param
, int enable
)
729 struct ftrace_probe_ops
*ops
;
734 ops
= &cpudump_probe_ops
;
736 /* Only dump once. */
737 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
741 static struct ftrace_func_command ftrace_traceon_cmd
= {
743 .func
= ftrace_trace_onoff_callback
,
746 static struct ftrace_func_command ftrace_traceoff_cmd
= {
748 .func
= ftrace_trace_onoff_callback
,
751 static struct ftrace_func_command ftrace_stacktrace_cmd
= {
752 .name
= "stacktrace",
753 .func
= ftrace_stacktrace_callback
,
756 static struct ftrace_func_command ftrace_dump_cmd
= {
758 .func
= ftrace_dump_callback
,
761 static struct ftrace_func_command ftrace_cpudump_cmd
= {
763 .func
= ftrace_cpudump_callback
,
766 static int __init
init_func_cmd_traceon(void)
770 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
774 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
776 goto out_free_traceoff
;
778 ret
= register_ftrace_command(&ftrace_stacktrace_cmd
);
780 goto out_free_traceon
;
782 ret
= register_ftrace_command(&ftrace_dump_cmd
);
784 goto out_free_stacktrace
;
786 ret
= register_ftrace_command(&ftrace_cpudump_cmd
);
793 unregister_ftrace_command(&ftrace_dump_cmd
);
795 unregister_ftrace_command(&ftrace_stacktrace_cmd
);
797 unregister_ftrace_command(&ftrace_traceon_cmd
);
799 unregister_ftrace_command(&ftrace_traceoff_cmd
);
804 static inline int init_func_cmd_traceon(void)
808 #endif /* CONFIG_DYNAMIC_FTRACE */
810 __init
int init_function_trace(void)
812 init_func_cmd_traceon();
813 return register_tracer(&function_trace
);