1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
22 static void tracing_start_function_trace(struct trace_array
*tr
);
23 static void tracing_stop_function_trace(struct trace_array
*tr
);
25 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
26 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
28 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
29 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
31 function_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
32 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
34 function_stack_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
35 struct ftrace_ops
*op
,
36 struct ftrace_regs
*fregs
);
37 static struct tracer_flags func_flags
;
42 TRACE_FUNC_NO_OPTS
= 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK
= 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS
= 0x2,
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT
= 0x4
50 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
52 int ftrace_allocate_ftrace_ops(struct trace_array
*tr
)
54 struct ftrace_ops
*ops
;
56 /* The top level array uses the "global_ops" */
57 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
60 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
64 /* Currently only the non stack version is supported */
65 ops
->func
= function_trace_call
;
66 ops
->flags
= FTRACE_OPS_FL_PID
;
74 void ftrace_free_ftrace_ops(struct trace_array
*tr
)
80 int ftrace_create_function_files(struct trace_array
*tr
,
81 struct dentry
*parent
)
85 * The top level array uses the "global_ops", and the files are
88 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
94 ret
= allocate_fgraph_ops(tr
, tr
->ops
);
100 ftrace_create_filter_files(tr
->ops
, parent
);
105 void ftrace_destroy_function_files(struct trace_array
*tr
)
107 ftrace_destroy_filter_files(tr
->ops
);
108 ftrace_free_ftrace_ops(tr
);
112 static ftrace_func_t
select_trace_function(u32 flags_val
)
114 switch (flags_val
& TRACE_FUNC_OPT_MASK
) {
115 case TRACE_FUNC_NO_OPTS
:
116 return function_trace_call
;
117 case TRACE_FUNC_OPT_STACK
:
118 return function_stack_trace_call
;
119 case TRACE_FUNC_OPT_NO_REPEATS
:
120 return function_no_repeats_trace_call
;
121 case TRACE_FUNC_OPT_STACK
| TRACE_FUNC_OPT_NO_REPEATS
:
122 return function_stack_no_repeats_trace_call
;
128 static bool handle_func_repeats(struct trace_array
*tr
, u32 flags_val
)
130 if (!tr
->last_func_repeats
&&
131 (flags_val
& TRACE_FUNC_OPT_NO_REPEATS
)) {
132 tr
->last_func_repeats
= alloc_percpu(struct trace_func_repeats
);
133 if (!tr
->last_func_repeats
)
140 static int function_trace_init(struct trace_array
*tr
)
144 * Instance trace_arrays get their ops allocated
145 * at instance creation. Unless it failed
151 func
= select_trace_function(func_flags
.val
);
155 if (!handle_func_repeats(tr
, func_flags
.val
))
158 ftrace_init_array_ops(tr
, func
);
160 tr
->array_buffer
.cpu
= raw_smp_processor_id();
162 tracing_start_cmdline_record();
163 tracing_start_function_trace(tr
);
167 static void function_trace_reset(struct trace_array
*tr
)
169 tracing_stop_function_trace(tr
);
170 tracing_stop_cmdline_record();
171 ftrace_reset_array_ops(tr
);
174 static void function_trace_start(struct trace_array
*tr
)
176 tracing_reset_online_cpus(&tr
->array_buffer
);
179 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
180 static __always_inline
unsigned long
181 function_get_true_parent_ip(unsigned long parent_ip
, struct ftrace_regs
*fregs
)
183 unsigned long true_parent_ip
;
186 true_parent_ip
= parent_ip
;
187 if (unlikely(parent_ip
== (unsigned long)&return_to_handler
) && fregs
)
188 true_parent_ip
= ftrace_graph_ret_addr(current
, &idx
, parent_ip
,
189 (unsigned long *)ftrace_regs_get_stack_pointer(fregs
));
190 return true_parent_ip
;
193 static __always_inline
unsigned long
194 function_get_true_parent_ip(unsigned long parent_ip
, struct ftrace_regs
*fregs
)
201 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
202 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
204 struct trace_array
*tr
= op
->private;
205 struct trace_array_cpu
*data
;
206 unsigned int trace_ctx
;
209 if (unlikely(!tr
->function_enabled
))
212 bit
= ftrace_test_recursion_trylock(ip
, parent_ip
);
216 parent_ip
= function_get_true_parent_ip(parent_ip
, fregs
);
218 trace_ctx
= tracing_gen_ctx();
220 data
= this_cpu_ptr(tr
->array_buffer
.data
);
221 if (!atomic_read(&data
->disabled
))
222 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
224 ftrace_test_recursion_unlock(bit
);
227 #ifdef CONFIG_UNWINDER_ORC
231 * function_stack_trace_call()
239 * function_stack_trace_call()
246 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
247 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
249 struct trace_array
*tr
= op
->private;
250 struct trace_array_cpu
*data
;
254 unsigned int trace_ctx
;
255 int skip
= STACK_SKIP
;
257 if (unlikely(!tr
->function_enabled
))
261 * Need to use raw, since this must be called before the
262 * recursive protection is performed.
264 local_irq_save(flags
);
265 parent_ip
= function_get_true_parent_ip(parent_ip
, fregs
);
266 cpu
= raw_smp_processor_id();
267 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
268 disabled
= atomic_inc_return(&data
->disabled
);
270 if (likely(disabled
== 1)) {
271 trace_ctx
= tracing_gen_ctx_flags(flags
);
272 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
273 #ifdef CONFIG_UNWINDER_FRAME_POINTER
274 if (ftrace_pids_enabled(op
))
277 __trace_stack(tr
, trace_ctx
, skip
);
280 atomic_dec(&data
->disabled
);
281 local_irq_restore(flags
);
284 static inline bool is_repeat_check(struct trace_array
*tr
,
285 struct trace_func_repeats
*last_info
,
286 unsigned long ip
, unsigned long parent_ip
)
288 if (last_info
->ip
== ip
&&
289 last_info
->parent_ip
== parent_ip
&&
290 last_info
->count
< U16_MAX
) {
291 last_info
->ts_last_call
=
292 ring_buffer_time_stamp(tr
->array_buffer
.buffer
);
300 static inline void process_repeats(struct trace_array
*tr
,
301 unsigned long ip
, unsigned long parent_ip
,
302 struct trace_func_repeats
*last_info
,
303 unsigned int trace_ctx
)
305 if (last_info
->count
) {
306 trace_last_func_repeats(tr
, last_info
, trace_ctx
);
307 last_info
->count
= 0;
311 last_info
->parent_ip
= parent_ip
;
315 function_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
316 struct ftrace_ops
*op
,
317 struct ftrace_regs
*fregs
)
319 struct trace_func_repeats
*last_info
;
320 struct trace_array
*tr
= op
->private;
321 struct trace_array_cpu
*data
;
322 unsigned int trace_ctx
;
326 if (unlikely(!tr
->function_enabled
))
329 bit
= ftrace_test_recursion_trylock(ip
, parent_ip
);
333 parent_ip
= function_get_true_parent_ip(parent_ip
, fregs
);
334 data
= this_cpu_ptr(tr
->array_buffer
.data
);
335 if (atomic_read(&data
->disabled
))
339 * An interrupt may happen at any place here. But as far as I can see,
340 * the only damage that this can cause is to mess up the repetition
341 * counter without valuable data being lost.
342 * TODO: think about a solution that is better than just hoping to be
345 last_info
= this_cpu_ptr(tr
->last_func_repeats
);
346 if (is_repeat_check(tr
, last_info
, ip
, parent_ip
))
349 local_save_flags(flags
);
350 trace_ctx
= tracing_gen_ctx_flags(flags
);
351 process_repeats(tr
, ip
, parent_ip
, last_info
, trace_ctx
);
353 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
356 ftrace_test_recursion_unlock(bit
);
360 function_stack_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
361 struct ftrace_ops
*op
,
362 struct ftrace_regs
*fregs
)
364 struct trace_func_repeats
*last_info
;
365 struct trace_array
*tr
= op
->private;
366 struct trace_array_cpu
*data
;
370 unsigned int trace_ctx
;
372 if (unlikely(!tr
->function_enabled
))
376 * Need to use raw, since this must be called before the
377 * recursive protection is performed.
379 local_irq_save(flags
);
380 parent_ip
= function_get_true_parent_ip(parent_ip
, fregs
);
381 cpu
= raw_smp_processor_id();
382 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
383 disabled
= atomic_inc_return(&data
->disabled
);
385 if (likely(disabled
== 1)) {
386 last_info
= per_cpu_ptr(tr
->last_func_repeats
, cpu
);
387 if (is_repeat_check(tr
, last_info
, ip
, parent_ip
))
390 trace_ctx
= tracing_gen_ctx_flags(flags
);
391 process_repeats(tr
, ip
, parent_ip
, last_info
, trace_ctx
);
393 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
394 __trace_stack(tr
, trace_ctx
, STACK_SKIP
);
398 atomic_dec(&data
->disabled
);
399 local_irq_restore(flags
);
402 static struct tracer_opt func_opts
[] = {
403 #ifdef CONFIG_STACKTRACE
404 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
406 { TRACER_OPT(func
-no
-repeats
, TRACE_FUNC_OPT_NO_REPEATS
) },
407 { } /* Always set a last empty entry */
410 static struct tracer_flags func_flags
= {
411 .val
= TRACE_FUNC_NO_OPTS
, /* By default: all flags disabled */
415 static void tracing_start_function_trace(struct trace_array
*tr
)
417 tr
->function_enabled
= 0;
418 register_ftrace_function(tr
->ops
);
419 tr
->function_enabled
= 1;
422 static void tracing_stop_function_trace(struct trace_array
*tr
)
424 tr
->function_enabled
= 0;
425 unregister_ftrace_function(tr
->ops
);
428 static struct tracer function_trace
;
431 func_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
436 /* Do nothing if already set. */
437 if (!!set
== !!(func_flags
.val
& bit
))
440 /* We can change this flag only when not running. */
441 if (tr
->current_trace
!= &function_trace
)
444 new_flags
= (func_flags
.val
& ~bit
) | (set
? bit
: 0);
445 func
= select_trace_function(new_flags
);
449 /* Check if there's anything to change. */
450 if (tr
->ops
->func
== func
)
453 if (!handle_func_repeats(tr
, new_flags
))
456 unregister_ftrace_function(tr
->ops
);
457 tr
->ops
->func
= func
;
458 register_ftrace_function(tr
->ops
);
463 static struct tracer function_trace __tracer_data
=
466 .init
= function_trace_init
,
467 .reset
= function_trace_reset
,
468 .start
= function_trace_start
,
469 .flags
= &func_flags
,
470 .set_flag
= func_set_flag
,
471 .allow_instances
= true,
472 #ifdef CONFIG_FTRACE_SELFTEST
473 .selftest
= trace_selftest_startup_function
,
477 #ifdef CONFIG_DYNAMIC_FTRACE
478 static void update_traceon_count(struct ftrace_probe_ops
*ops
,
480 struct trace_array
*tr
, bool on
,
483 struct ftrace_func_mapper
*mapper
= data
;
488 * Tracing gets disabled (or enabled) once per count.
489 * This function can be called at the same time on multiple CPUs.
490 * It is fine if both disable (or enable) tracing, as disabling
491 * (or enabling) the second time doesn't do anything as the
492 * state of the tracer is already disabled (or enabled).
493 * What needs to be synchronized in this case is that the count
494 * only gets decremented once, even if the tracer is disabled
495 * (or enabled) twice, as the second one is really a nop.
497 * The memory barriers guarantee that we only decrement the
498 * counter once. First the count is read to a local variable
499 * and a read barrier is used to make sure that it is loaded
500 * before checking if the tracer is in the state we want.
501 * If the tracer is not in the state we want, then the count
502 * is guaranteed to be the old count.
504 * Next the tracer is set to the state we want (disabled or enabled)
505 * then a write memory barrier is used to make sure that
506 * the new state is visible before changing the counter by
507 * one minus the old counter. This guarantees that another CPU
508 * executing this code will see the new state before seeing
509 * the new counter value, and would not do anything if the new
512 * Note, there is no synchronization between this and a user
513 * setting the tracing_on file. But we currently don't care
516 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
522 /* Make sure we see count before checking tracing state */
525 if (on
== !!tracer_tracing_is_on(tr
))
529 tracer_tracing_on(tr
);
531 tracer_tracing_off(tr
);
533 /* Make sure tracing state is visible before updating count */
536 *count
= old_count
- 1;
540 ftrace_traceon_count(unsigned long ip
, unsigned long parent_ip
,
541 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
544 update_traceon_count(ops
, ip
, tr
, 1, data
);
548 ftrace_traceoff_count(unsigned long ip
, unsigned long parent_ip
,
549 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
552 update_traceon_count(ops
, ip
, tr
, 0, data
);
556 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
,
557 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
560 if (tracer_tracing_is_on(tr
))
563 tracer_tracing_on(tr
);
567 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
,
568 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
571 if (!tracer_tracing_is_on(tr
))
574 tracer_tracing_off(tr
);
577 #ifdef CONFIG_UNWINDER_ORC
581 * function_trace_probe_call()
582 * ftrace_ops_assist_func()
585 #define FTRACE_STACK_SKIP 3
591 * ftrace_stacktrace()
592 * function_trace_probe_call()
593 * ftrace_ops_assist_func()
596 #define FTRACE_STACK_SKIP 5
599 static __always_inline
void trace_stack(struct trace_array
*tr
)
601 unsigned int trace_ctx
;
603 trace_ctx
= tracing_gen_ctx();
605 __trace_stack(tr
, trace_ctx
, FTRACE_STACK_SKIP
);
609 ftrace_stacktrace(unsigned long ip
, unsigned long parent_ip
,
610 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
617 ftrace_stacktrace_count(unsigned long ip
, unsigned long parent_ip
,
618 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
621 struct ftrace_func_mapper
*mapper
= data
;
626 if (!tracing_is_on())
635 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
638 * Stack traces should only execute the number of times the
639 * user specified in the counter.
647 new_count
= old_count
- 1;
648 new_count
= cmpxchg(count
, old_count
, new_count
);
649 if (new_count
== old_count
)
652 if (!tracing_is_on())
655 } while (new_count
!= old_count
);
658 static int update_count(struct ftrace_probe_ops
*ops
, unsigned long ip
,
661 struct ftrace_func_mapper
*mapper
= data
;
665 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
677 ftrace_dump_probe(unsigned long ip
, unsigned long parent_ip
,
678 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
681 if (update_count(ops
, ip
, data
))
682 ftrace_dump(DUMP_ALL
);
685 /* Only dump the current CPU buffer. */
687 ftrace_cpudump_probe(unsigned long ip
, unsigned long parent_ip
,
688 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
691 if (update_count(ops
, ip
, data
))
692 ftrace_dump(DUMP_ORIG
);
696 ftrace_probe_print(const char *name
, struct seq_file
*m
,
697 unsigned long ip
, struct ftrace_probe_ops
*ops
,
700 struct ftrace_func_mapper
*mapper
= data
;
703 seq_printf(m
, "%ps:%s", (void *)ip
, name
);
706 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
709 seq_printf(m
, ":count=%ld\n", *count
);
711 seq_puts(m
, ":unlimited\n");
717 ftrace_traceon_print(struct seq_file
*m
, unsigned long ip
,
718 struct ftrace_probe_ops
*ops
,
721 return ftrace_probe_print("traceon", m
, ip
, ops
, data
);
725 ftrace_traceoff_print(struct seq_file
*m
, unsigned long ip
,
726 struct ftrace_probe_ops
*ops
, void *data
)
728 return ftrace_probe_print("traceoff", m
, ip
, ops
, data
);
732 ftrace_stacktrace_print(struct seq_file
*m
, unsigned long ip
,
733 struct ftrace_probe_ops
*ops
, void *data
)
735 return ftrace_probe_print("stacktrace", m
, ip
, ops
, data
);
739 ftrace_dump_print(struct seq_file
*m
, unsigned long ip
,
740 struct ftrace_probe_ops
*ops
, void *data
)
742 return ftrace_probe_print("dump", m
, ip
, ops
, data
);
746 ftrace_cpudump_print(struct seq_file
*m
, unsigned long ip
,
747 struct ftrace_probe_ops
*ops
, void *data
)
749 return ftrace_probe_print("cpudump", m
, ip
, ops
, data
);
754 ftrace_count_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
755 unsigned long ip
, void *init_data
, void **data
)
757 struct ftrace_func_mapper
*mapper
= *data
;
760 mapper
= allocate_ftrace_func_mapper();
766 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
770 ftrace_count_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
771 unsigned long ip
, void *data
)
773 struct ftrace_func_mapper
*mapper
= data
;
776 free_ftrace_func_mapper(mapper
, NULL
);
780 ftrace_func_mapper_remove_ip(mapper
, ip
);
783 static struct ftrace_probe_ops traceon_count_probe_ops
= {
784 .func
= ftrace_traceon_count
,
785 .print
= ftrace_traceon_print
,
786 .init
= ftrace_count_init
,
787 .free
= ftrace_count_free
,
790 static struct ftrace_probe_ops traceoff_count_probe_ops
= {
791 .func
= ftrace_traceoff_count
,
792 .print
= ftrace_traceoff_print
,
793 .init
= ftrace_count_init
,
794 .free
= ftrace_count_free
,
797 static struct ftrace_probe_ops stacktrace_count_probe_ops
= {
798 .func
= ftrace_stacktrace_count
,
799 .print
= ftrace_stacktrace_print
,
800 .init
= ftrace_count_init
,
801 .free
= ftrace_count_free
,
804 static struct ftrace_probe_ops dump_probe_ops
= {
805 .func
= ftrace_dump_probe
,
806 .print
= ftrace_dump_print
,
807 .init
= ftrace_count_init
,
808 .free
= ftrace_count_free
,
811 static struct ftrace_probe_ops cpudump_probe_ops
= {
812 .func
= ftrace_cpudump_probe
,
813 .print
= ftrace_cpudump_print
,
816 static struct ftrace_probe_ops traceon_probe_ops
= {
817 .func
= ftrace_traceon
,
818 .print
= ftrace_traceon_print
,
821 static struct ftrace_probe_ops traceoff_probe_ops
= {
822 .func
= ftrace_traceoff
,
823 .print
= ftrace_traceoff_print
,
826 static struct ftrace_probe_ops stacktrace_probe_ops
= {
827 .func
= ftrace_stacktrace
,
828 .print
= ftrace_stacktrace_print
,
832 ftrace_trace_probe_callback(struct trace_array
*tr
,
833 struct ftrace_probe_ops
*ops
,
834 struct ftrace_hash
*hash
, char *glob
,
835 char *cmd
, char *param
, int enable
)
837 void *count
= (void *)-1;
841 /* hash funcs only work with set_ftrace_filter */
846 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
851 number
= strsep(¶m
, ":");
857 * We use the callback data field (which is a pointer)
860 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
865 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
867 return ret
< 0 ? ret
: 0;
871 ftrace_trace_onoff_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
872 char *glob
, char *cmd
, char *param
, int enable
)
874 struct ftrace_probe_ops
*ops
;
879 /* we register both traceon and traceoff to this callback */
880 if (strcmp(cmd
, "traceon") == 0)
881 ops
= param
? &traceon_count_probe_ops
: &traceon_probe_ops
;
883 ops
= param
? &traceoff_count_probe_ops
: &traceoff_probe_ops
;
885 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
890 ftrace_stacktrace_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
891 char *glob
, char *cmd
, char *param
, int enable
)
893 struct ftrace_probe_ops
*ops
;
898 ops
= param
? &stacktrace_count_probe_ops
: &stacktrace_probe_ops
;
900 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
905 ftrace_dump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
906 char *glob
, char *cmd
, char *param
, int enable
)
908 struct ftrace_probe_ops
*ops
;
913 ops
= &dump_probe_ops
;
915 /* Only dump once. */
916 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
921 ftrace_cpudump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
922 char *glob
, char *cmd
, char *param
, int enable
)
924 struct ftrace_probe_ops
*ops
;
929 ops
= &cpudump_probe_ops
;
931 /* Only dump once. */
932 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
936 static struct ftrace_func_command ftrace_traceon_cmd
= {
938 .func
= ftrace_trace_onoff_callback
,
941 static struct ftrace_func_command ftrace_traceoff_cmd
= {
943 .func
= ftrace_trace_onoff_callback
,
946 static struct ftrace_func_command ftrace_stacktrace_cmd
= {
947 .name
= "stacktrace",
948 .func
= ftrace_stacktrace_callback
,
951 static struct ftrace_func_command ftrace_dump_cmd
= {
953 .func
= ftrace_dump_callback
,
956 static struct ftrace_func_command ftrace_cpudump_cmd
= {
958 .func
= ftrace_cpudump_callback
,
961 static int __init
init_func_cmd_traceon(void)
965 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
969 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
971 goto out_free_traceoff
;
973 ret
= register_ftrace_command(&ftrace_stacktrace_cmd
);
975 goto out_free_traceon
;
977 ret
= register_ftrace_command(&ftrace_dump_cmd
);
979 goto out_free_stacktrace
;
981 ret
= register_ftrace_command(&ftrace_cpudump_cmd
);
988 unregister_ftrace_command(&ftrace_dump_cmd
);
990 unregister_ftrace_command(&ftrace_stacktrace_cmd
);
992 unregister_ftrace_command(&ftrace_traceon_cmd
);
994 unregister_ftrace_command(&ftrace_traceoff_cmd
);
999 static inline int init_func_cmd_traceon(void)
1003 #endif /* CONFIG_DYNAMIC_FTRACE */
1005 __init
int init_function_trace(void)
1007 init_func_cmd_traceon();
1008 return register_tracer(&function_trace
);