KVM: x86: Use jmp to invoke kvm_spurious_fault() from .fixup
[linux/fpc-iii.git] / kernel / trace / trace_functions.c
blobfcd41a166405b3321bee4443bce3d3da6f9b5c9e
1 /*
2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
17 #include <linux/fs.h>
19 #include "trace.h"
21 static void tracing_start_function_trace(struct trace_array *tr);
22 static void tracing_stop_function_trace(struct trace_array *tr);
23 static void
24 function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26 static void
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29 static struct tracer_flags func_flags;
31 /* Our option */
32 enum {
33 TRACE_FUNC_OPT_STACK = 0x1,
36 static int allocate_ftrace_ops(struct trace_array *tr)
38 struct ftrace_ops *ops;
40 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
41 if (!ops)
42 return -ENOMEM;
44 /* Currently only the non stack verision is supported */
45 ops->func = function_trace_call;
46 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
48 tr->ops = ops;
49 ops->private = tr;
50 return 0;
54 int ftrace_create_function_files(struct trace_array *tr,
55 struct dentry *parent)
57 int ret;
60 * The top level array uses the "global_ops", and the files are
61 * created on boot up.
63 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
64 return 0;
66 ret = allocate_ftrace_ops(tr);
67 if (ret)
68 return ret;
70 ftrace_create_filter_files(tr->ops, parent);
72 return 0;
75 void ftrace_destroy_function_files(struct trace_array *tr)
77 ftrace_destroy_filter_files(tr->ops);
78 kfree(tr->ops);
79 tr->ops = NULL;
82 static int function_trace_init(struct trace_array *tr)
84 ftrace_func_t func;
87 * Instance trace_arrays get their ops allocated
88 * at instance creation. Unless it failed
89 * the allocation.
91 if (!tr->ops)
92 return -ENOMEM;
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
101 ftrace_init_array_ops(tr, func);
103 tr->trace_buffer.cpu = get_cpu();
104 put_cpu();
106 tracing_start_cmdline_record();
107 tracing_start_function_trace(tr);
108 return 0;
111 static void function_trace_reset(struct trace_array *tr)
113 tracing_stop_function_trace(tr);
114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
118 static void function_trace_start(struct trace_array *tr)
120 tracing_reset_online_cpus(&tr->trace_buffer);
123 static void
124 function_trace_call(unsigned long ip, unsigned long parent_ip,
125 struct ftrace_ops *op, struct pt_regs *pt_regs)
127 struct trace_array *tr = op->private;
128 struct trace_array_cpu *data;
129 unsigned long flags;
130 int bit;
131 int cpu;
132 int pc;
134 if (unlikely(!tr->function_enabled))
135 return;
137 pc = preempt_count();
138 preempt_disable_notrace();
140 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141 if (bit < 0)
142 goto out;
144 cpu = smp_processor_id();
145 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146 if (!atomic_read(&data->disabled)) {
147 local_save_flags(flags);
148 trace_function(tr, ip, parent_ip, flags, pc);
150 trace_clear_recursion(bit);
152 out:
153 preempt_enable_notrace();
156 static void
157 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158 struct ftrace_ops *op, struct pt_regs *pt_regs)
160 struct trace_array *tr = op->private;
161 struct trace_array_cpu *data;
162 unsigned long flags;
163 long disabled;
164 int cpu;
165 int pc;
167 if (unlikely(!tr->function_enabled))
168 return;
171 * Need to use raw, since this must be called before the
172 * recursive protection is performed.
174 local_irq_save(flags);
175 cpu = raw_smp_processor_id();
176 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177 disabled = atomic_inc_return(&data->disabled);
179 if (likely(disabled == 1)) {
180 pc = preempt_count();
181 trace_function(tr, ip, parent_ip, flags, pc);
183 * skip over 5 funcs:
184 * __ftrace_trace_stack,
185 * __trace_stack,
186 * function_stack_trace_call
187 * ftrace_list_func
188 * ftrace_call
190 __trace_stack(tr, flags, 5, pc);
193 atomic_dec(&data->disabled);
194 local_irq_restore(flags);
197 static struct tracer_opt func_opts[] = {
198 #ifdef CONFIG_STACKTRACE
199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200 #endif
201 { } /* Always set a last empty entry */
204 static struct tracer_flags func_flags = {
205 .val = 0, /* By default: all flags disabled */
206 .opts = func_opts
209 static void tracing_start_function_trace(struct trace_array *tr)
211 tr->function_enabled = 0;
212 register_ftrace_function(tr->ops);
213 tr->function_enabled = 1;
216 static void tracing_stop_function_trace(struct trace_array *tr)
218 tr->function_enabled = 0;
219 unregister_ftrace_function(tr->ops);
222 static int
223 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
225 switch (bit) {
226 case TRACE_FUNC_OPT_STACK:
227 /* do nothing if already set */
228 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
229 break;
231 unregister_ftrace_function(tr->ops);
233 if (set) {
234 tr->ops->func = function_stack_trace_call;
235 register_ftrace_function(tr->ops);
236 } else {
237 tr->ops->func = function_trace_call;
238 register_ftrace_function(tr->ops);
241 break;
242 default:
243 return -EINVAL;
246 return 0;
249 static struct tracer function_trace __tracer_data =
251 .name = "function",
252 .init = function_trace_init,
253 .reset = function_trace_reset,
254 .start = function_trace_start,
255 .flags = &func_flags,
256 .set_flag = func_set_flag,
257 .allow_instances = true,
258 #ifdef CONFIG_FTRACE_SELFTEST
259 .selftest = trace_selftest_startup_function,
260 #endif
263 #ifdef CONFIG_DYNAMIC_FTRACE
264 static void update_traceon_count(void **data, bool on)
266 long *count = (long *)data;
267 long old_count = *count;
270 * Tracing gets disabled (or enabled) once per count.
271 * This function can be called at the same time on multiple CPUs.
272 * It is fine if both disable (or enable) tracing, as disabling
273 * (or enabling) the second time doesn't do anything as the
274 * state of the tracer is already disabled (or enabled).
275 * What needs to be synchronized in this case is that the count
276 * only gets decremented once, even if the tracer is disabled
277 * (or enabled) twice, as the second one is really a nop.
279 * The memory barriers guarantee that we only decrement the
280 * counter once. First the count is read to a local variable
281 * and a read barrier is used to make sure that it is loaded
282 * before checking if the tracer is in the state we want.
283 * If the tracer is not in the state we want, then the count
284 * is guaranteed to be the old count.
286 * Next the tracer is set to the state we want (disabled or enabled)
287 * then a write memory barrier is used to make sure that
288 * the new state is visible before changing the counter by
289 * one minus the old counter. This guarantees that another CPU
290 * executing this code will see the new state before seeing
291 * the new counter value, and would not do anything if the new
292 * counter is seen.
294 * Note, there is no synchronization between this and a user
295 * setting the tracing_on file. But we currently don't care
296 * about that.
298 if (!old_count)
299 return;
301 /* Make sure we see count before checking tracing state */
302 smp_rmb();
304 if (on == !!tracing_is_on())
305 return;
307 if (on)
308 tracing_on();
309 else
310 tracing_off();
312 /* unlimited? */
313 if (old_count == -1)
314 return;
316 /* Make sure tracing state is visible before updating count */
317 smp_wmb();
319 *count = old_count - 1;
322 static void
323 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
325 update_traceon_count(data, 1);
328 static void
329 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
331 update_traceon_count(data, 0);
334 static void
335 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
337 if (tracing_is_on())
338 return;
340 tracing_on();
343 static void
344 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
346 if (!tracing_is_on())
347 return;
349 tracing_off();
353 * Skip 4:
354 * ftrace_stacktrace()
355 * function_trace_probe_call()
356 * ftrace_ops_list_func()
357 * ftrace_call()
359 #define STACK_SKIP 4
361 static void
362 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
364 trace_dump_stack(STACK_SKIP);
367 static void
368 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
370 long *count = (long *)data;
371 long old_count;
372 long new_count;
375 * Stack traces should only execute the number of times the
376 * user specified in the counter.
378 do {
380 if (!tracing_is_on())
381 return;
383 old_count = *count;
385 if (!old_count)
386 return;
388 /* unlimited? */
389 if (old_count == -1) {
390 trace_dump_stack(STACK_SKIP);
391 return;
394 new_count = old_count - 1;
395 new_count = cmpxchg(count, old_count, new_count);
396 if (new_count == old_count)
397 trace_dump_stack(STACK_SKIP);
399 } while (new_count != old_count);
402 static int update_count(void **data)
404 unsigned long *count = (long *)data;
406 if (!*count)
407 return 0;
409 if (*count != -1)
410 (*count)--;
412 return 1;
415 static void
416 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
418 if (update_count(data))
419 ftrace_dump(DUMP_ALL);
422 /* Only dump the current CPU buffer. */
423 static void
424 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
426 if (update_count(data))
427 ftrace_dump(DUMP_ORIG);
430 static int
431 ftrace_probe_print(const char *name, struct seq_file *m,
432 unsigned long ip, void *data)
434 long count = (long)data;
436 seq_printf(m, "%ps:%s", (void *)ip, name);
438 if (count == -1)
439 seq_puts(m, ":unlimited\n");
440 else
441 seq_printf(m, ":count=%ld\n", count);
443 return 0;
446 static int
447 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
448 struct ftrace_probe_ops *ops, void *data)
450 return ftrace_probe_print("traceon", m, ip, data);
453 static int
454 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
455 struct ftrace_probe_ops *ops, void *data)
457 return ftrace_probe_print("traceoff", m, ip, data);
460 static int
461 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
462 struct ftrace_probe_ops *ops, void *data)
464 return ftrace_probe_print("stacktrace", m, ip, data);
467 static int
468 ftrace_dump_print(struct seq_file *m, unsigned long ip,
469 struct ftrace_probe_ops *ops, void *data)
471 return ftrace_probe_print("dump", m, ip, data);
474 static int
475 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
476 struct ftrace_probe_ops *ops, void *data)
478 return ftrace_probe_print("cpudump", m, ip, data);
481 static struct ftrace_probe_ops traceon_count_probe_ops = {
482 .func = ftrace_traceon_count,
483 .print = ftrace_traceon_print,
486 static struct ftrace_probe_ops traceoff_count_probe_ops = {
487 .func = ftrace_traceoff_count,
488 .print = ftrace_traceoff_print,
491 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
492 .func = ftrace_stacktrace_count,
493 .print = ftrace_stacktrace_print,
496 static struct ftrace_probe_ops dump_probe_ops = {
497 .func = ftrace_dump_probe,
498 .print = ftrace_dump_print,
501 static struct ftrace_probe_ops cpudump_probe_ops = {
502 .func = ftrace_cpudump_probe,
503 .print = ftrace_cpudump_print,
506 static struct ftrace_probe_ops traceon_probe_ops = {
507 .func = ftrace_traceon,
508 .print = ftrace_traceon_print,
511 static struct ftrace_probe_ops traceoff_probe_ops = {
512 .func = ftrace_traceoff,
513 .print = ftrace_traceoff_print,
516 static struct ftrace_probe_ops stacktrace_probe_ops = {
517 .func = ftrace_stacktrace,
518 .print = ftrace_stacktrace_print,
521 static int
522 ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
523 struct ftrace_hash *hash, char *glob,
524 char *cmd, char *param, int enable)
526 void *count = (void *)-1;
527 char *number;
528 int ret;
530 /* hash funcs only work with set_ftrace_filter */
531 if (!enable)
532 return -EINVAL;
534 if (glob[0] == '!') {
535 unregister_ftrace_function_probe_func(glob+1, ops);
536 return 0;
539 if (!param)
540 goto out_reg;
542 number = strsep(&param, ":");
544 if (!strlen(number))
545 goto out_reg;
548 * We use the callback data field (which is a pointer)
549 * as our counter.
551 ret = kstrtoul(number, 0, (unsigned long *)&count);
552 if (ret)
553 return ret;
555 out_reg:
556 ret = register_ftrace_function_probe(glob, ops, count);
558 return ret < 0 ? ret : 0;
561 static int
562 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
563 char *glob, char *cmd, char *param, int enable)
565 struct ftrace_probe_ops *ops;
567 /* we register both traceon and traceoff to this callback */
568 if (strcmp(cmd, "traceon") == 0)
569 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
570 else
571 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
573 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
574 param, enable);
577 static int
578 ftrace_stacktrace_callback(struct ftrace_hash *hash,
579 char *glob, char *cmd, char *param, int enable)
581 struct ftrace_probe_ops *ops;
583 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
585 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
586 param, enable);
589 static int
590 ftrace_dump_callback(struct ftrace_hash *hash,
591 char *glob, char *cmd, char *param, int enable)
593 struct ftrace_probe_ops *ops;
595 ops = &dump_probe_ops;
597 /* Only dump once. */
598 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
599 "1", enable);
602 static int
603 ftrace_cpudump_callback(struct ftrace_hash *hash,
604 char *glob, char *cmd, char *param, int enable)
606 struct ftrace_probe_ops *ops;
608 ops = &cpudump_probe_ops;
610 /* Only dump once. */
611 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
612 "1", enable);
615 static struct ftrace_func_command ftrace_traceon_cmd = {
616 .name = "traceon",
617 .func = ftrace_trace_onoff_callback,
620 static struct ftrace_func_command ftrace_traceoff_cmd = {
621 .name = "traceoff",
622 .func = ftrace_trace_onoff_callback,
625 static struct ftrace_func_command ftrace_stacktrace_cmd = {
626 .name = "stacktrace",
627 .func = ftrace_stacktrace_callback,
630 static struct ftrace_func_command ftrace_dump_cmd = {
631 .name = "dump",
632 .func = ftrace_dump_callback,
635 static struct ftrace_func_command ftrace_cpudump_cmd = {
636 .name = "cpudump",
637 .func = ftrace_cpudump_callback,
640 static int __init init_func_cmd_traceon(void)
642 int ret;
644 ret = register_ftrace_command(&ftrace_traceoff_cmd);
645 if (ret)
646 return ret;
648 ret = register_ftrace_command(&ftrace_traceon_cmd);
649 if (ret)
650 goto out_free_traceoff;
652 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
653 if (ret)
654 goto out_free_traceon;
656 ret = register_ftrace_command(&ftrace_dump_cmd);
657 if (ret)
658 goto out_free_stacktrace;
660 ret = register_ftrace_command(&ftrace_cpudump_cmd);
661 if (ret)
662 goto out_free_dump;
664 return 0;
666 out_free_dump:
667 unregister_ftrace_command(&ftrace_dump_cmd);
668 out_free_stacktrace:
669 unregister_ftrace_command(&ftrace_stacktrace_cmd);
670 out_free_traceon:
671 unregister_ftrace_command(&ftrace_traceon_cmd);
672 out_free_traceoff:
673 unregister_ftrace_command(&ftrace_traceoff_cmd);
675 return ret;
677 #else
678 static inline int init_func_cmd_traceon(void)
680 return 0;
682 #endif /* CONFIG_DYNAMIC_FTRACE */
684 static __init int init_function_trace(void)
686 init_func_cmd_traceon();
687 return register_tracer(&function_trace);
689 core_initcall(init_function_trace);