printf: Remove unused 'bprintf'
[drm/drm-misc.git] / kernel / trace / trace_functions.c
blob74c353164ca126198ef8bb2442776aac027bfd7d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
20 #include "trace.h"
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
39 /* Our option */
40 enum {
42 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
50 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
54 struct ftrace_ops *ops;
56 /* The top level array uses the "global_ops" */
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 return 0;
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 if (!ops)
62 return -ENOMEM;
64 /* Currently only the non stack version is supported */
65 ops->func = function_trace_call;
66 ops->flags = FTRACE_OPS_FL_PID;
68 tr->ops = ops;
69 ops->private = tr;
71 return 0;
74 void ftrace_free_ftrace_ops(struct trace_array *tr)
76 kfree(tr->ops);
77 tr->ops = NULL;
80 int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
83 int ret;
85 * The top level array uses the "global_ops", and the files are
86 * created on boot up.
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
89 return 0;
91 if (!tr->ops)
92 return -EINVAL;
94 ret = allocate_fgraph_ops(tr, tr->ops);
95 if (ret) {
96 kfree(tr->ops);
97 return ret;
100 ftrace_create_filter_files(tr->ops, parent);
102 return 0;
105 void ftrace_destroy_function_files(struct trace_array *tr)
107 ftrace_destroy_filter_files(tr->ops);
108 ftrace_free_ftrace_ops(tr);
109 free_fgraph_ops(tr);
112 static ftrace_func_t select_trace_function(u32 flags_val)
114 switch (flags_val & TRACE_FUNC_OPT_MASK) {
115 case TRACE_FUNC_NO_OPTS:
116 return function_trace_call;
117 case TRACE_FUNC_OPT_STACK:
118 return function_stack_trace_call;
119 case TRACE_FUNC_OPT_NO_REPEATS:
120 return function_no_repeats_trace_call;
121 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
122 return function_stack_no_repeats_trace_call;
123 default:
124 return NULL;
128 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
130 if (!tr->last_func_repeats &&
131 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
132 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
133 if (!tr->last_func_repeats)
134 return false;
137 return true;
140 static int function_trace_init(struct trace_array *tr)
142 ftrace_func_t func;
144 * Instance trace_arrays get their ops allocated
145 * at instance creation. Unless it failed
146 * the allocation.
148 if (!tr->ops)
149 return -ENOMEM;
151 func = select_trace_function(func_flags.val);
152 if (!func)
153 return -EINVAL;
155 if (!handle_func_repeats(tr, func_flags.val))
156 return -ENOMEM;
158 ftrace_init_array_ops(tr, func);
160 tr->array_buffer.cpu = raw_smp_processor_id();
162 tracing_start_cmdline_record();
163 tracing_start_function_trace(tr);
164 return 0;
167 static void function_trace_reset(struct trace_array *tr)
169 tracing_stop_function_trace(tr);
170 tracing_stop_cmdline_record();
171 ftrace_reset_array_ops(tr);
174 static void function_trace_start(struct trace_array *tr)
176 tracing_reset_online_cpus(&tr->array_buffer);
179 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
180 static __always_inline unsigned long
181 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
183 unsigned long true_parent_ip;
184 int idx = 0;
186 true_parent_ip = parent_ip;
187 if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs)
188 true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
189 (unsigned long *)ftrace_regs_get_stack_pointer(fregs));
190 return true_parent_ip;
192 #else
193 static __always_inline unsigned long
194 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
196 return parent_ip;
198 #endif
200 static void
201 function_trace_call(unsigned long ip, unsigned long parent_ip,
202 struct ftrace_ops *op, struct ftrace_regs *fregs)
204 struct trace_array *tr = op->private;
205 struct trace_array_cpu *data;
206 unsigned int trace_ctx;
207 int bit;
209 if (unlikely(!tr->function_enabled))
210 return;
212 bit = ftrace_test_recursion_trylock(ip, parent_ip);
213 if (bit < 0)
214 return;
216 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
218 trace_ctx = tracing_gen_ctx();
220 data = this_cpu_ptr(tr->array_buffer.data);
221 if (!atomic_read(&data->disabled))
222 trace_function(tr, ip, parent_ip, trace_ctx);
224 ftrace_test_recursion_unlock(bit);
227 #ifdef CONFIG_UNWINDER_ORC
229 * Skip 2:
231 * function_stack_trace_call()
232 * ftrace_call()
234 #define STACK_SKIP 2
235 #else
237 * Skip 3:
238 * __trace_stack()
239 * function_stack_trace_call()
240 * ftrace_call()
242 #define STACK_SKIP 3
243 #endif
245 static void
246 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
247 struct ftrace_ops *op, struct ftrace_regs *fregs)
249 struct trace_array *tr = op->private;
250 struct trace_array_cpu *data;
251 unsigned long flags;
252 long disabled;
253 int cpu;
254 unsigned int trace_ctx;
255 int skip = STACK_SKIP;
257 if (unlikely(!tr->function_enabled))
258 return;
261 * Need to use raw, since this must be called before the
262 * recursive protection is performed.
264 local_irq_save(flags);
265 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
266 cpu = raw_smp_processor_id();
267 data = per_cpu_ptr(tr->array_buffer.data, cpu);
268 disabled = atomic_inc_return(&data->disabled);
270 if (likely(disabled == 1)) {
271 trace_ctx = tracing_gen_ctx_flags(flags);
272 trace_function(tr, ip, parent_ip, trace_ctx);
273 #ifdef CONFIG_UNWINDER_FRAME_POINTER
274 if (ftrace_pids_enabled(op))
275 skip++;
276 #endif
277 __trace_stack(tr, trace_ctx, skip);
280 atomic_dec(&data->disabled);
281 local_irq_restore(flags);
284 static inline bool is_repeat_check(struct trace_array *tr,
285 struct trace_func_repeats *last_info,
286 unsigned long ip, unsigned long parent_ip)
288 if (last_info->ip == ip &&
289 last_info->parent_ip == parent_ip &&
290 last_info->count < U16_MAX) {
291 last_info->ts_last_call =
292 ring_buffer_time_stamp(tr->array_buffer.buffer);
293 last_info->count++;
294 return true;
297 return false;
300 static inline void process_repeats(struct trace_array *tr,
301 unsigned long ip, unsigned long parent_ip,
302 struct trace_func_repeats *last_info,
303 unsigned int trace_ctx)
305 if (last_info->count) {
306 trace_last_func_repeats(tr, last_info, trace_ctx);
307 last_info->count = 0;
310 last_info->ip = ip;
311 last_info->parent_ip = parent_ip;
314 static void
315 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
316 struct ftrace_ops *op,
317 struct ftrace_regs *fregs)
319 struct trace_func_repeats *last_info;
320 struct trace_array *tr = op->private;
321 struct trace_array_cpu *data;
322 unsigned int trace_ctx;
323 unsigned long flags;
324 int bit;
326 if (unlikely(!tr->function_enabled))
327 return;
329 bit = ftrace_test_recursion_trylock(ip, parent_ip);
330 if (bit < 0)
331 return;
333 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
334 data = this_cpu_ptr(tr->array_buffer.data);
335 if (atomic_read(&data->disabled))
336 goto out;
339 * An interrupt may happen at any place here. But as far as I can see,
340 * the only damage that this can cause is to mess up the repetition
341 * counter without valuable data being lost.
342 * TODO: think about a solution that is better than just hoping to be
343 * lucky.
345 last_info = this_cpu_ptr(tr->last_func_repeats);
346 if (is_repeat_check(tr, last_info, ip, parent_ip))
347 goto out;
349 local_save_flags(flags);
350 trace_ctx = tracing_gen_ctx_flags(flags);
351 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
353 trace_function(tr, ip, parent_ip, trace_ctx);
355 out:
356 ftrace_test_recursion_unlock(bit);
359 static void
360 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
361 struct ftrace_ops *op,
362 struct ftrace_regs *fregs)
364 struct trace_func_repeats *last_info;
365 struct trace_array *tr = op->private;
366 struct trace_array_cpu *data;
367 unsigned long flags;
368 long disabled;
369 int cpu;
370 unsigned int trace_ctx;
372 if (unlikely(!tr->function_enabled))
373 return;
376 * Need to use raw, since this must be called before the
377 * recursive protection is performed.
379 local_irq_save(flags);
380 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
381 cpu = raw_smp_processor_id();
382 data = per_cpu_ptr(tr->array_buffer.data, cpu);
383 disabled = atomic_inc_return(&data->disabled);
385 if (likely(disabled == 1)) {
386 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
387 if (is_repeat_check(tr, last_info, ip, parent_ip))
388 goto out;
390 trace_ctx = tracing_gen_ctx_flags(flags);
391 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
393 trace_function(tr, ip, parent_ip, trace_ctx);
394 __trace_stack(tr, trace_ctx, STACK_SKIP);
397 out:
398 atomic_dec(&data->disabled);
399 local_irq_restore(flags);
402 static struct tracer_opt func_opts[] = {
403 #ifdef CONFIG_STACKTRACE
404 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
405 #endif
406 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
407 { } /* Always set a last empty entry */
410 static struct tracer_flags func_flags = {
411 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
412 .opts = func_opts
415 static void tracing_start_function_trace(struct trace_array *tr)
417 tr->function_enabled = 0;
418 register_ftrace_function(tr->ops);
419 tr->function_enabled = 1;
422 static void tracing_stop_function_trace(struct trace_array *tr)
424 tr->function_enabled = 0;
425 unregister_ftrace_function(tr->ops);
428 static struct tracer function_trace;
430 static int
431 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
433 ftrace_func_t func;
434 u32 new_flags;
436 /* Do nothing if already set. */
437 if (!!set == !!(func_flags.val & bit))
438 return 0;
440 /* We can change this flag only when not running. */
441 if (tr->current_trace != &function_trace)
442 return 0;
444 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
445 func = select_trace_function(new_flags);
446 if (!func)
447 return -EINVAL;
449 /* Check if there's anything to change. */
450 if (tr->ops->func == func)
451 return 0;
453 if (!handle_func_repeats(tr, new_flags))
454 return -ENOMEM;
456 unregister_ftrace_function(tr->ops);
457 tr->ops->func = func;
458 register_ftrace_function(tr->ops);
460 return 0;
463 static struct tracer function_trace __tracer_data =
465 .name = "function",
466 .init = function_trace_init,
467 .reset = function_trace_reset,
468 .start = function_trace_start,
469 .flags = &func_flags,
470 .set_flag = func_set_flag,
471 .allow_instances = true,
472 #ifdef CONFIG_FTRACE_SELFTEST
473 .selftest = trace_selftest_startup_function,
474 #endif
477 #ifdef CONFIG_DYNAMIC_FTRACE
478 static void update_traceon_count(struct ftrace_probe_ops *ops,
479 unsigned long ip,
480 struct trace_array *tr, bool on,
481 void *data)
483 struct ftrace_func_mapper *mapper = data;
484 long *count;
485 long old_count;
488 * Tracing gets disabled (or enabled) once per count.
489 * This function can be called at the same time on multiple CPUs.
490 * It is fine if both disable (or enable) tracing, as disabling
491 * (or enabling) the second time doesn't do anything as the
492 * state of the tracer is already disabled (or enabled).
493 * What needs to be synchronized in this case is that the count
494 * only gets decremented once, even if the tracer is disabled
495 * (or enabled) twice, as the second one is really a nop.
497 * The memory barriers guarantee that we only decrement the
498 * counter once. First the count is read to a local variable
499 * and a read barrier is used to make sure that it is loaded
500 * before checking if the tracer is in the state we want.
501 * If the tracer is not in the state we want, then the count
502 * is guaranteed to be the old count.
504 * Next the tracer is set to the state we want (disabled or enabled)
505 * then a write memory barrier is used to make sure that
506 * the new state is visible before changing the counter by
507 * one minus the old counter. This guarantees that another CPU
508 * executing this code will see the new state before seeing
509 * the new counter value, and would not do anything if the new
510 * counter is seen.
512 * Note, there is no synchronization between this and a user
513 * setting the tracing_on file. But we currently don't care
514 * about that.
516 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
517 old_count = *count;
519 if (old_count <= 0)
520 return;
522 /* Make sure we see count before checking tracing state */
523 smp_rmb();
525 if (on == !!tracer_tracing_is_on(tr))
526 return;
528 if (on)
529 tracer_tracing_on(tr);
530 else
531 tracer_tracing_off(tr);
533 /* Make sure tracing state is visible before updating count */
534 smp_wmb();
536 *count = old_count - 1;
539 static void
540 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
541 struct trace_array *tr, struct ftrace_probe_ops *ops,
542 void *data)
544 update_traceon_count(ops, ip, tr, 1, data);
547 static void
548 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
549 struct trace_array *tr, struct ftrace_probe_ops *ops,
550 void *data)
552 update_traceon_count(ops, ip, tr, 0, data);
555 static void
556 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
557 struct trace_array *tr, struct ftrace_probe_ops *ops,
558 void *data)
560 if (tracer_tracing_is_on(tr))
561 return;
563 tracer_tracing_on(tr);
566 static void
567 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
568 struct trace_array *tr, struct ftrace_probe_ops *ops,
569 void *data)
571 if (!tracer_tracing_is_on(tr))
572 return;
574 tracer_tracing_off(tr);
577 #ifdef CONFIG_UNWINDER_ORC
579 * Skip 3:
581 * function_trace_probe_call()
582 * ftrace_ops_assist_func()
583 * ftrace_call()
585 #define FTRACE_STACK_SKIP 3
586 #else
588 * Skip 5:
590 * __trace_stack()
591 * ftrace_stacktrace()
592 * function_trace_probe_call()
593 * ftrace_ops_assist_func()
594 * ftrace_call()
596 #define FTRACE_STACK_SKIP 5
597 #endif
599 static __always_inline void trace_stack(struct trace_array *tr)
601 unsigned int trace_ctx;
603 trace_ctx = tracing_gen_ctx();
605 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
608 static void
609 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
610 struct trace_array *tr, struct ftrace_probe_ops *ops,
611 void *data)
613 trace_stack(tr);
616 static void
617 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
618 struct trace_array *tr, struct ftrace_probe_ops *ops,
619 void *data)
621 struct ftrace_func_mapper *mapper = data;
622 long *count;
623 long old_count;
624 long new_count;
626 if (!tracing_is_on())
627 return;
629 /* unlimited? */
630 if (!mapper) {
631 trace_stack(tr);
632 return;
635 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
638 * Stack traces should only execute the number of times the
639 * user specified in the counter.
641 do {
642 old_count = *count;
644 if (!old_count)
645 return;
647 new_count = old_count - 1;
648 new_count = cmpxchg(count, old_count, new_count);
649 if (new_count == old_count)
650 trace_stack(tr);
652 if (!tracing_is_on())
653 return;
655 } while (new_count != old_count);
658 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
659 void *data)
661 struct ftrace_func_mapper *mapper = data;
662 long *count = NULL;
664 if (mapper)
665 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
667 if (count) {
668 if (*count <= 0)
669 return 0;
670 (*count)--;
673 return 1;
676 static void
677 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
678 struct trace_array *tr, struct ftrace_probe_ops *ops,
679 void *data)
681 if (update_count(ops, ip, data))
682 ftrace_dump(DUMP_ALL);
685 /* Only dump the current CPU buffer. */
686 static void
687 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
688 struct trace_array *tr, struct ftrace_probe_ops *ops,
689 void *data)
691 if (update_count(ops, ip, data))
692 ftrace_dump(DUMP_ORIG);
695 static int
696 ftrace_probe_print(const char *name, struct seq_file *m,
697 unsigned long ip, struct ftrace_probe_ops *ops,
698 void *data)
700 struct ftrace_func_mapper *mapper = data;
701 long *count = NULL;
703 seq_printf(m, "%ps:%s", (void *)ip, name);
705 if (mapper)
706 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
708 if (count)
709 seq_printf(m, ":count=%ld\n", *count);
710 else
711 seq_puts(m, ":unlimited\n");
713 return 0;
716 static int
717 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
718 struct ftrace_probe_ops *ops,
719 void *data)
721 return ftrace_probe_print("traceon", m, ip, ops, data);
724 static int
725 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
726 struct ftrace_probe_ops *ops, void *data)
728 return ftrace_probe_print("traceoff", m, ip, ops, data);
731 static int
732 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
733 struct ftrace_probe_ops *ops, void *data)
735 return ftrace_probe_print("stacktrace", m, ip, ops, data);
738 static int
739 ftrace_dump_print(struct seq_file *m, unsigned long ip,
740 struct ftrace_probe_ops *ops, void *data)
742 return ftrace_probe_print("dump", m, ip, ops, data);
745 static int
746 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
747 struct ftrace_probe_ops *ops, void *data)
749 return ftrace_probe_print("cpudump", m, ip, ops, data);
753 static int
754 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
755 unsigned long ip, void *init_data, void **data)
757 struct ftrace_func_mapper *mapper = *data;
759 if (!mapper) {
760 mapper = allocate_ftrace_func_mapper();
761 if (!mapper)
762 return -ENOMEM;
763 *data = mapper;
766 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
769 static void
770 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
771 unsigned long ip, void *data)
773 struct ftrace_func_mapper *mapper = data;
775 if (!ip) {
776 free_ftrace_func_mapper(mapper, NULL);
777 return;
780 ftrace_func_mapper_remove_ip(mapper, ip);
783 static struct ftrace_probe_ops traceon_count_probe_ops = {
784 .func = ftrace_traceon_count,
785 .print = ftrace_traceon_print,
786 .init = ftrace_count_init,
787 .free = ftrace_count_free,
790 static struct ftrace_probe_ops traceoff_count_probe_ops = {
791 .func = ftrace_traceoff_count,
792 .print = ftrace_traceoff_print,
793 .init = ftrace_count_init,
794 .free = ftrace_count_free,
797 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
798 .func = ftrace_stacktrace_count,
799 .print = ftrace_stacktrace_print,
800 .init = ftrace_count_init,
801 .free = ftrace_count_free,
804 static struct ftrace_probe_ops dump_probe_ops = {
805 .func = ftrace_dump_probe,
806 .print = ftrace_dump_print,
807 .init = ftrace_count_init,
808 .free = ftrace_count_free,
811 static struct ftrace_probe_ops cpudump_probe_ops = {
812 .func = ftrace_cpudump_probe,
813 .print = ftrace_cpudump_print,
816 static struct ftrace_probe_ops traceon_probe_ops = {
817 .func = ftrace_traceon,
818 .print = ftrace_traceon_print,
821 static struct ftrace_probe_ops traceoff_probe_ops = {
822 .func = ftrace_traceoff,
823 .print = ftrace_traceoff_print,
826 static struct ftrace_probe_ops stacktrace_probe_ops = {
827 .func = ftrace_stacktrace,
828 .print = ftrace_stacktrace_print,
831 static int
832 ftrace_trace_probe_callback(struct trace_array *tr,
833 struct ftrace_probe_ops *ops,
834 struct ftrace_hash *hash, char *glob,
835 char *cmd, char *param, int enable)
837 void *count = (void *)-1;
838 char *number;
839 int ret;
841 /* hash funcs only work with set_ftrace_filter */
842 if (!enable)
843 return -EINVAL;
845 if (glob[0] == '!')
846 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
848 if (!param)
849 goto out_reg;
851 number = strsep(&param, ":");
853 if (!strlen(number))
854 goto out_reg;
857 * We use the callback data field (which is a pointer)
858 * as our counter.
860 ret = kstrtoul(number, 0, (unsigned long *)&count);
861 if (ret)
862 return ret;
864 out_reg:
865 ret = register_ftrace_function_probe(glob, tr, ops, count);
867 return ret < 0 ? ret : 0;
870 static int
871 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
872 char *glob, char *cmd, char *param, int enable)
874 struct ftrace_probe_ops *ops;
876 if (!tr)
877 return -ENODEV;
879 /* we register both traceon and traceoff to this callback */
880 if (strcmp(cmd, "traceon") == 0)
881 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
882 else
883 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
885 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
886 param, enable);
889 static int
890 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
891 char *glob, char *cmd, char *param, int enable)
893 struct ftrace_probe_ops *ops;
895 if (!tr)
896 return -ENODEV;
898 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
900 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
901 param, enable);
904 static int
905 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
906 char *glob, char *cmd, char *param, int enable)
908 struct ftrace_probe_ops *ops;
910 if (!tr)
911 return -ENODEV;
913 ops = &dump_probe_ops;
915 /* Only dump once. */
916 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
917 "1", enable);
920 static int
921 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
922 char *glob, char *cmd, char *param, int enable)
924 struct ftrace_probe_ops *ops;
926 if (!tr)
927 return -ENODEV;
929 ops = &cpudump_probe_ops;
931 /* Only dump once. */
932 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
933 "1", enable);
936 static struct ftrace_func_command ftrace_traceon_cmd = {
937 .name = "traceon",
938 .func = ftrace_trace_onoff_callback,
941 static struct ftrace_func_command ftrace_traceoff_cmd = {
942 .name = "traceoff",
943 .func = ftrace_trace_onoff_callback,
946 static struct ftrace_func_command ftrace_stacktrace_cmd = {
947 .name = "stacktrace",
948 .func = ftrace_stacktrace_callback,
951 static struct ftrace_func_command ftrace_dump_cmd = {
952 .name = "dump",
953 .func = ftrace_dump_callback,
956 static struct ftrace_func_command ftrace_cpudump_cmd = {
957 .name = "cpudump",
958 .func = ftrace_cpudump_callback,
961 static int __init init_func_cmd_traceon(void)
963 int ret;
965 ret = register_ftrace_command(&ftrace_traceoff_cmd);
966 if (ret)
967 return ret;
969 ret = register_ftrace_command(&ftrace_traceon_cmd);
970 if (ret)
971 goto out_free_traceoff;
973 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
974 if (ret)
975 goto out_free_traceon;
977 ret = register_ftrace_command(&ftrace_dump_cmd);
978 if (ret)
979 goto out_free_stacktrace;
981 ret = register_ftrace_command(&ftrace_cpudump_cmd);
982 if (ret)
983 goto out_free_dump;
985 return 0;
987 out_free_dump:
988 unregister_ftrace_command(&ftrace_dump_cmd);
989 out_free_stacktrace:
990 unregister_ftrace_command(&ftrace_stacktrace_cmd);
991 out_free_traceon:
992 unregister_ftrace_command(&ftrace_traceon_cmd);
993 out_free_traceoff:
994 unregister_ftrace_command(&ftrace_traceoff_cmd);
996 return ret;
998 #else
999 static inline int init_func_cmd_traceon(void)
1001 return 0;
1003 #endif /* CONFIG_DYNAMIC_FTRACE */
1005 __init int init_function_trace(void)
1007 init_func_cmd_traceon();
1008 return register_tracer(&function_trace);