1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
22 static void tracing_start_function_trace(struct trace_array
*tr
);
23 static void tracing_stop_function_trace(struct trace_array
*tr
);
25 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
26 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
28 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
29 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
31 function_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
32 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
);
34 function_stack_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
35 struct ftrace_ops
*op
,
36 struct ftrace_regs
*fregs
);
37 static struct tracer_flags func_flags
;
42 TRACE_FUNC_NO_OPTS
= 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK
= 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS
= 0x2,
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT
= 0x4
50 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
52 int ftrace_allocate_ftrace_ops(struct trace_array
*tr
)
54 struct ftrace_ops
*ops
;
56 /* The top level array uses the "global_ops" */
57 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
60 ops
= kzalloc(sizeof(*ops
), GFP_KERNEL
);
64 /* Currently only the non stack version is supported */
65 ops
->func
= function_trace_call
;
66 ops
->flags
= FTRACE_OPS_FL_PID
;
74 void ftrace_free_ftrace_ops(struct trace_array
*tr
)
80 int ftrace_create_function_files(struct trace_array
*tr
,
81 struct dentry
*parent
)
85 * The top level array uses the "global_ops", and the files are
88 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
94 ret
= allocate_fgraph_ops(tr
, tr
->ops
);
100 ftrace_create_filter_files(tr
->ops
, parent
);
105 void ftrace_destroy_function_files(struct trace_array
*tr
)
107 ftrace_destroy_filter_files(tr
->ops
);
108 ftrace_free_ftrace_ops(tr
);
112 static ftrace_func_t
select_trace_function(u32 flags_val
)
114 switch (flags_val
& TRACE_FUNC_OPT_MASK
) {
115 case TRACE_FUNC_NO_OPTS
:
116 return function_trace_call
;
117 case TRACE_FUNC_OPT_STACK
:
118 return function_stack_trace_call
;
119 case TRACE_FUNC_OPT_NO_REPEATS
:
120 return function_no_repeats_trace_call
;
121 case TRACE_FUNC_OPT_STACK
| TRACE_FUNC_OPT_NO_REPEATS
:
122 return function_stack_no_repeats_trace_call
;
128 static bool handle_func_repeats(struct trace_array
*tr
, u32 flags_val
)
130 if (!tr
->last_func_repeats
&&
131 (flags_val
& TRACE_FUNC_OPT_NO_REPEATS
)) {
132 tr
->last_func_repeats
= alloc_percpu(struct trace_func_repeats
);
133 if (!tr
->last_func_repeats
)
140 static int function_trace_init(struct trace_array
*tr
)
144 * Instance trace_arrays get their ops allocated
145 * at instance creation. Unless it failed
151 func
= select_trace_function(func_flags
.val
);
155 if (!handle_func_repeats(tr
, func_flags
.val
))
158 ftrace_init_array_ops(tr
, func
);
160 tr
->array_buffer
.cpu
= raw_smp_processor_id();
162 tracing_start_cmdline_record();
163 tracing_start_function_trace(tr
);
167 static void function_trace_reset(struct trace_array
*tr
)
169 tracing_stop_function_trace(tr
);
170 tracing_stop_cmdline_record();
171 ftrace_reset_array_ops(tr
);
174 static void function_trace_start(struct trace_array
*tr
)
176 tracing_reset_online_cpus(&tr
->array_buffer
);
180 function_trace_call(unsigned long ip
, unsigned long parent_ip
,
181 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
183 struct trace_array
*tr
= op
->private;
184 struct trace_array_cpu
*data
;
185 unsigned int trace_ctx
;
189 if (unlikely(!tr
->function_enabled
))
192 bit
= ftrace_test_recursion_trylock(ip
, parent_ip
);
196 trace_ctx
= tracing_gen_ctx();
198 cpu
= smp_processor_id();
199 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
200 if (!atomic_read(&data
->disabled
))
201 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
203 ftrace_test_recursion_unlock(bit
);
206 #ifdef CONFIG_UNWINDER_ORC
210 * function_stack_trace_call()
218 * function_stack_trace_call()
225 function_stack_trace_call(unsigned long ip
, unsigned long parent_ip
,
226 struct ftrace_ops
*op
, struct ftrace_regs
*fregs
)
228 struct trace_array
*tr
= op
->private;
229 struct trace_array_cpu
*data
;
233 unsigned int trace_ctx
;
234 int skip
= STACK_SKIP
;
236 if (unlikely(!tr
->function_enabled
))
240 * Need to use raw, since this must be called before the
241 * recursive protection is performed.
243 local_irq_save(flags
);
244 cpu
= raw_smp_processor_id();
245 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
246 disabled
= atomic_inc_return(&data
->disabled
);
248 if (likely(disabled
== 1)) {
249 trace_ctx
= tracing_gen_ctx_flags(flags
);
250 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
251 #ifdef CONFIG_UNWINDER_FRAME_POINTER
252 if (ftrace_pids_enabled(op
))
255 __trace_stack(tr
, trace_ctx
, skip
);
258 atomic_dec(&data
->disabled
);
259 local_irq_restore(flags
);
262 static inline bool is_repeat_check(struct trace_array
*tr
,
263 struct trace_func_repeats
*last_info
,
264 unsigned long ip
, unsigned long parent_ip
)
266 if (last_info
->ip
== ip
&&
267 last_info
->parent_ip
== parent_ip
&&
268 last_info
->count
< U16_MAX
) {
269 last_info
->ts_last_call
=
270 ring_buffer_time_stamp(tr
->array_buffer
.buffer
);
278 static inline void process_repeats(struct trace_array
*tr
,
279 unsigned long ip
, unsigned long parent_ip
,
280 struct trace_func_repeats
*last_info
,
281 unsigned int trace_ctx
)
283 if (last_info
->count
) {
284 trace_last_func_repeats(tr
, last_info
, trace_ctx
);
285 last_info
->count
= 0;
289 last_info
->parent_ip
= parent_ip
;
293 function_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
294 struct ftrace_ops
*op
,
295 struct ftrace_regs
*fregs
)
297 struct trace_func_repeats
*last_info
;
298 struct trace_array
*tr
= op
->private;
299 struct trace_array_cpu
*data
;
300 unsigned int trace_ctx
;
305 if (unlikely(!tr
->function_enabled
))
308 bit
= ftrace_test_recursion_trylock(ip
, parent_ip
);
312 cpu
= smp_processor_id();
313 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
314 if (atomic_read(&data
->disabled
))
318 * An interrupt may happen at any place here. But as far as I can see,
319 * the only damage that this can cause is to mess up the repetition
320 * counter without valuable data being lost.
321 * TODO: think about a solution that is better than just hoping to be
324 last_info
= per_cpu_ptr(tr
->last_func_repeats
, cpu
);
325 if (is_repeat_check(tr
, last_info
, ip
, parent_ip
))
328 local_save_flags(flags
);
329 trace_ctx
= tracing_gen_ctx_flags(flags
);
330 process_repeats(tr
, ip
, parent_ip
, last_info
, trace_ctx
);
332 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
335 ftrace_test_recursion_unlock(bit
);
339 function_stack_no_repeats_trace_call(unsigned long ip
, unsigned long parent_ip
,
340 struct ftrace_ops
*op
,
341 struct ftrace_regs
*fregs
)
343 struct trace_func_repeats
*last_info
;
344 struct trace_array
*tr
= op
->private;
345 struct trace_array_cpu
*data
;
349 unsigned int trace_ctx
;
351 if (unlikely(!tr
->function_enabled
))
355 * Need to use raw, since this must be called before the
356 * recursive protection is performed.
358 local_irq_save(flags
);
359 cpu
= raw_smp_processor_id();
360 data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
361 disabled
= atomic_inc_return(&data
->disabled
);
363 if (likely(disabled
== 1)) {
364 last_info
= per_cpu_ptr(tr
->last_func_repeats
, cpu
);
365 if (is_repeat_check(tr
, last_info
, ip
, parent_ip
))
368 trace_ctx
= tracing_gen_ctx_flags(flags
);
369 process_repeats(tr
, ip
, parent_ip
, last_info
, trace_ctx
);
371 trace_function(tr
, ip
, parent_ip
, trace_ctx
);
372 __trace_stack(tr
, trace_ctx
, STACK_SKIP
);
376 atomic_dec(&data
->disabled
);
377 local_irq_restore(flags
);
380 static struct tracer_opt func_opts
[] = {
381 #ifdef CONFIG_STACKTRACE
382 { TRACER_OPT(func_stack_trace
, TRACE_FUNC_OPT_STACK
) },
384 { TRACER_OPT(func
-no
-repeats
, TRACE_FUNC_OPT_NO_REPEATS
) },
385 { } /* Always set a last empty entry */
388 static struct tracer_flags func_flags
= {
389 .val
= TRACE_FUNC_NO_OPTS
, /* By default: all flags disabled */
393 static void tracing_start_function_trace(struct trace_array
*tr
)
395 tr
->function_enabled
= 0;
396 register_ftrace_function(tr
->ops
);
397 tr
->function_enabled
= 1;
400 static void tracing_stop_function_trace(struct trace_array
*tr
)
402 tr
->function_enabled
= 0;
403 unregister_ftrace_function(tr
->ops
);
406 static struct tracer function_trace
;
409 func_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
414 /* Do nothing if already set. */
415 if (!!set
== !!(func_flags
.val
& bit
))
418 /* We can change this flag only when not running. */
419 if (tr
->current_trace
!= &function_trace
)
422 new_flags
= (func_flags
.val
& ~bit
) | (set
? bit
: 0);
423 func
= select_trace_function(new_flags
);
427 /* Check if there's anything to change. */
428 if (tr
->ops
->func
== func
)
431 if (!handle_func_repeats(tr
, new_flags
))
434 unregister_ftrace_function(tr
->ops
);
435 tr
->ops
->func
= func
;
436 register_ftrace_function(tr
->ops
);
441 static struct tracer function_trace __tracer_data
=
444 .init
= function_trace_init
,
445 .reset
= function_trace_reset
,
446 .start
= function_trace_start
,
447 .flags
= &func_flags
,
448 .set_flag
= func_set_flag
,
449 .allow_instances
= true,
450 #ifdef CONFIG_FTRACE_SELFTEST
451 .selftest
= trace_selftest_startup_function
,
455 #ifdef CONFIG_DYNAMIC_FTRACE
456 static void update_traceon_count(struct ftrace_probe_ops
*ops
,
458 struct trace_array
*tr
, bool on
,
461 struct ftrace_func_mapper
*mapper
= data
;
466 * Tracing gets disabled (or enabled) once per count.
467 * This function can be called at the same time on multiple CPUs.
468 * It is fine if both disable (or enable) tracing, as disabling
469 * (or enabling) the second time doesn't do anything as the
470 * state of the tracer is already disabled (or enabled).
471 * What needs to be synchronized in this case is that the count
472 * only gets decremented once, even if the tracer is disabled
473 * (or enabled) twice, as the second one is really a nop.
475 * The memory barriers guarantee that we only decrement the
476 * counter once. First the count is read to a local variable
477 * and a read barrier is used to make sure that it is loaded
478 * before checking if the tracer is in the state we want.
479 * If the tracer is not in the state we want, then the count
480 * is guaranteed to be the old count.
482 * Next the tracer is set to the state we want (disabled or enabled)
483 * then a write memory barrier is used to make sure that
484 * the new state is visible before changing the counter by
485 * one minus the old counter. This guarantees that another CPU
486 * executing this code will see the new state before seeing
487 * the new counter value, and would not do anything if the new
490 * Note, there is no synchronization between this and a user
491 * setting the tracing_on file. But we currently don't care
494 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
500 /* Make sure we see count before checking tracing state */
503 if (on
== !!tracer_tracing_is_on(tr
))
507 tracer_tracing_on(tr
);
509 tracer_tracing_off(tr
);
511 /* Make sure tracing state is visible before updating count */
514 *count
= old_count
- 1;
518 ftrace_traceon_count(unsigned long ip
, unsigned long parent_ip
,
519 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
522 update_traceon_count(ops
, ip
, tr
, 1, data
);
526 ftrace_traceoff_count(unsigned long ip
, unsigned long parent_ip
,
527 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
530 update_traceon_count(ops
, ip
, tr
, 0, data
);
534 ftrace_traceon(unsigned long ip
, unsigned long parent_ip
,
535 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
538 if (tracer_tracing_is_on(tr
))
541 tracer_tracing_on(tr
);
545 ftrace_traceoff(unsigned long ip
, unsigned long parent_ip
,
546 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
549 if (!tracer_tracing_is_on(tr
))
552 tracer_tracing_off(tr
);
555 #ifdef CONFIG_UNWINDER_ORC
559 * function_trace_probe_call()
560 * ftrace_ops_assist_func()
563 #define FTRACE_STACK_SKIP 3
569 * ftrace_stacktrace()
570 * function_trace_probe_call()
571 * ftrace_ops_assist_func()
574 #define FTRACE_STACK_SKIP 5
577 static __always_inline
void trace_stack(struct trace_array
*tr
)
579 unsigned int trace_ctx
;
581 trace_ctx
= tracing_gen_ctx();
583 __trace_stack(tr
, trace_ctx
, FTRACE_STACK_SKIP
);
587 ftrace_stacktrace(unsigned long ip
, unsigned long parent_ip
,
588 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
595 ftrace_stacktrace_count(unsigned long ip
, unsigned long parent_ip
,
596 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
599 struct ftrace_func_mapper
*mapper
= data
;
604 if (!tracing_is_on())
613 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
616 * Stack traces should only execute the number of times the
617 * user specified in the counter.
625 new_count
= old_count
- 1;
626 new_count
= cmpxchg(count
, old_count
, new_count
);
627 if (new_count
== old_count
)
630 if (!tracing_is_on())
633 } while (new_count
!= old_count
);
636 static int update_count(struct ftrace_probe_ops
*ops
, unsigned long ip
,
639 struct ftrace_func_mapper
*mapper
= data
;
643 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
655 ftrace_dump_probe(unsigned long ip
, unsigned long parent_ip
,
656 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
659 if (update_count(ops
, ip
, data
))
660 ftrace_dump(DUMP_ALL
);
663 /* Only dump the current CPU buffer. */
665 ftrace_cpudump_probe(unsigned long ip
, unsigned long parent_ip
,
666 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
669 if (update_count(ops
, ip
, data
))
670 ftrace_dump(DUMP_ORIG
);
674 ftrace_probe_print(const char *name
, struct seq_file
*m
,
675 unsigned long ip
, struct ftrace_probe_ops
*ops
,
678 struct ftrace_func_mapper
*mapper
= data
;
681 seq_printf(m
, "%ps:%s", (void *)ip
, name
);
684 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
687 seq_printf(m
, ":count=%ld\n", *count
);
689 seq_puts(m
, ":unlimited\n");
695 ftrace_traceon_print(struct seq_file
*m
, unsigned long ip
,
696 struct ftrace_probe_ops
*ops
,
699 return ftrace_probe_print("traceon", m
, ip
, ops
, data
);
703 ftrace_traceoff_print(struct seq_file
*m
, unsigned long ip
,
704 struct ftrace_probe_ops
*ops
, void *data
)
706 return ftrace_probe_print("traceoff", m
, ip
, ops
, data
);
710 ftrace_stacktrace_print(struct seq_file
*m
, unsigned long ip
,
711 struct ftrace_probe_ops
*ops
, void *data
)
713 return ftrace_probe_print("stacktrace", m
, ip
, ops
, data
);
717 ftrace_dump_print(struct seq_file
*m
, unsigned long ip
,
718 struct ftrace_probe_ops
*ops
, void *data
)
720 return ftrace_probe_print("dump", m
, ip
, ops
, data
);
724 ftrace_cpudump_print(struct seq_file
*m
, unsigned long ip
,
725 struct ftrace_probe_ops
*ops
, void *data
)
727 return ftrace_probe_print("cpudump", m
, ip
, ops
, data
);
732 ftrace_count_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
733 unsigned long ip
, void *init_data
, void **data
)
735 struct ftrace_func_mapper
*mapper
= *data
;
738 mapper
= allocate_ftrace_func_mapper();
744 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
748 ftrace_count_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
749 unsigned long ip
, void *data
)
751 struct ftrace_func_mapper
*mapper
= data
;
754 free_ftrace_func_mapper(mapper
, NULL
);
758 ftrace_func_mapper_remove_ip(mapper
, ip
);
761 static struct ftrace_probe_ops traceon_count_probe_ops
= {
762 .func
= ftrace_traceon_count
,
763 .print
= ftrace_traceon_print
,
764 .init
= ftrace_count_init
,
765 .free
= ftrace_count_free
,
768 static struct ftrace_probe_ops traceoff_count_probe_ops
= {
769 .func
= ftrace_traceoff_count
,
770 .print
= ftrace_traceoff_print
,
771 .init
= ftrace_count_init
,
772 .free
= ftrace_count_free
,
775 static struct ftrace_probe_ops stacktrace_count_probe_ops
= {
776 .func
= ftrace_stacktrace_count
,
777 .print
= ftrace_stacktrace_print
,
778 .init
= ftrace_count_init
,
779 .free
= ftrace_count_free
,
782 static struct ftrace_probe_ops dump_probe_ops
= {
783 .func
= ftrace_dump_probe
,
784 .print
= ftrace_dump_print
,
785 .init
= ftrace_count_init
,
786 .free
= ftrace_count_free
,
789 static struct ftrace_probe_ops cpudump_probe_ops
= {
790 .func
= ftrace_cpudump_probe
,
791 .print
= ftrace_cpudump_print
,
794 static struct ftrace_probe_ops traceon_probe_ops
= {
795 .func
= ftrace_traceon
,
796 .print
= ftrace_traceon_print
,
799 static struct ftrace_probe_ops traceoff_probe_ops
= {
800 .func
= ftrace_traceoff
,
801 .print
= ftrace_traceoff_print
,
804 static struct ftrace_probe_ops stacktrace_probe_ops
= {
805 .func
= ftrace_stacktrace
,
806 .print
= ftrace_stacktrace_print
,
810 ftrace_trace_probe_callback(struct trace_array
*tr
,
811 struct ftrace_probe_ops
*ops
,
812 struct ftrace_hash
*hash
, char *glob
,
813 char *cmd
, char *param
, int enable
)
815 void *count
= (void *)-1;
819 /* hash funcs only work with set_ftrace_filter */
824 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
829 number
= strsep(¶m
, ":");
835 * We use the callback data field (which is a pointer)
838 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
843 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
845 return ret
< 0 ? ret
: 0;
849 ftrace_trace_onoff_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
850 char *glob
, char *cmd
, char *param
, int enable
)
852 struct ftrace_probe_ops
*ops
;
857 /* we register both traceon and traceoff to this callback */
858 if (strcmp(cmd
, "traceon") == 0)
859 ops
= param
? &traceon_count_probe_ops
: &traceon_probe_ops
;
861 ops
= param
? &traceoff_count_probe_ops
: &traceoff_probe_ops
;
863 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
868 ftrace_stacktrace_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
869 char *glob
, char *cmd
, char *param
, int enable
)
871 struct ftrace_probe_ops
*ops
;
876 ops
= param
? &stacktrace_count_probe_ops
: &stacktrace_probe_ops
;
878 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
883 ftrace_dump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
884 char *glob
, char *cmd
, char *param
, int enable
)
886 struct ftrace_probe_ops
*ops
;
891 ops
= &dump_probe_ops
;
893 /* Only dump once. */
894 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
899 ftrace_cpudump_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
900 char *glob
, char *cmd
, char *param
, int enable
)
902 struct ftrace_probe_ops
*ops
;
907 ops
= &cpudump_probe_ops
;
909 /* Only dump once. */
910 return ftrace_trace_probe_callback(tr
, ops
, hash
, glob
, cmd
,
914 static struct ftrace_func_command ftrace_traceon_cmd
= {
916 .func
= ftrace_trace_onoff_callback
,
919 static struct ftrace_func_command ftrace_traceoff_cmd
= {
921 .func
= ftrace_trace_onoff_callback
,
924 static struct ftrace_func_command ftrace_stacktrace_cmd
= {
925 .name
= "stacktrace",
926 .func
= ftrace_stacktrace_callback
,
929 static struct ftrace_func_command ftrace_dump_cmd
= {
931 .func
= ftrace_dump_callback
,
934 static struct ftrace_func_command ftrace_cpudump_cmd
= {
936 .func
= ftrace_cpudump_callback
,
939 static int __init
init_func_cmd_traceon(void)
943 ret
= register_ftrace_command(&ftrace_traceoff_cmd
);
947 ret
= register_ftrace_command(&ftrace_traceon_cmd
);
949 goto out_free_traceoff
;
951 ret
= register_ftrace_command(&ftrace_stacktrace_cmd
);
953 goto out_free_traceon
;
955 ret
= register_ftrace_command(&ftrace_dump_cmd
);
957 goto out_free_stacktrace
;
959 ret
= register_ftrace_command(&ftrace_cpudump_cmd
);
966 unregister_ftrace_command(&ftrace_dump_cmd
);
968 unregister_ftrace_command(&ftrace_stacktrace_cmd
);
970 unregister_ftrace_command(&ftrace_traceon_cmd
);
972 unregister_ftrace_command(&ftrace_traceoff_cmd
);
977 static inline int init_func_cmd_traceon(void)
981 #endif /* CONFIG_DYNAMIC_FTRACE */
983 __init
int init_function_trace(void)
985 init_func_cmd_traceon();
986 return register_tracer(&function_trace
);