MIPS: BCM1480: Remove checks for CONFIG_SIBYTE_BCM1480_PROF
[linux/fpc-iii.git] / kernel / trace / trace_sched_wakeup.c
blobe14da5e97a69d86611d236488ce5669a28c64be5
1 /*
2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/module.h>
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <linux/sched/rt.h>
19 #include <linux/sched/deadline.h>
20 #include <trace/events/sched.h>
21 #include "trace.h"
23 static struct trace_array *wakeup_trace;
24 static int __read_mostly tracer_enabled;
26 static struct task_struct *wakeup_task;
27 static int wakeup_cpu;
28 static int wakeup_current_cpu;
29 static unsigned wakeup_prio = -1;
30 static int wakeup_rt;
31 static int wakeup_dl;
32 static int tracing_dl = 0;
34 static arch_spinlock_t wakeup_lock =
35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37 static void wakeup_reset(struct trace_array *tr);
38 static void __wakeup_reset(struct trace_array *tr);
39 static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
40 static void wakeup_graph_return(struct ftrace_graph_ret *trace);
42 static int save_flags;
43 static bool function_enabled;
45 #define TRACE_DISPLAY_GRAPH 1
47 static struct tracer_opt trace_opts[] = {
48 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
49 /* display latency trace as call graph */
50 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
51 #endif
52 { } /* Empty entry */
55 static struct tracer_flags tracer_flags = {
56 .val = 0,
57 .opts = trace_opts,
60 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
62 #ifdef CONFIG_FUNCTION_TRACER
65 * Prologue for the wakeup function tracers.
67 * Returns 1 if it is OK to continue, and preemption
68 * is disabled and data->disabled is incremented.
69 * 0 if the trace is to be ignored, and preemption
70 * is not disabled and data->disabled is
71 * kept the same.
73 * Note, this function is also used outside this ifdef but
74 * inside the #ifdef of the function graph tracer below.
75 * This is OK, since the function graph tracer is
76 * dependent on the function tracer.
78 static int
79 func_prolog_preempt_disable(struct trace_array *tr,
80 struct trace_array_cpu **data,
81 int *pc)
83 long disabled;
84 int cpu;
86 if (likely(!wakeup_task))
87 return 0;
89 *pc = preempt_count();
90 preempt_disable_notrace();
92 cpu = raw_smp_processor_id();
93 if (cpu != wakeup_current_cpu)
94 goto out_enable;
96 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
97 disabled = atomic_inc_return(&(*data)->disabled);
98 if (unlikely(disabled != 1))
99 goto out;
101 return 1;
103 out:
104 atomic_dec(&(*data)->disabled);
106 out_enable:
107 preempt_enable_notrace();
108 return 0;
112 * wakeup uses its own tracer function to keep the overhead down:
114 static void
115 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
116 struct ftrace_ops *op, struct pt_regs *pt_regs)
118 struct trace_array *tr = wakeup_trace;
119 struct trace_array_cpu *data;
120 unsigned long flags;
121 int pc;
123 if (!func_prolog_preempt_disable(tr, &data, &pc))
124 return;
126 local_irq_save(flags);
127 trace_function(tr, ip, parent_ip, flags, pc);
128 local_irq_restore(flags);
130 atomic_dec(&data->disabled);
131 preempt_enable_notrace();
134 static struct ftrace_ops trace_ops __read_mostly =
136 .func = wakeup_tracer_call,
137 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
139 #endif /* CONFIG_FUNCTION_TRACER */
141 static int register_wakeup_function(int graph, int set)
143 int ret;
145 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
146 if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
147 return 0;
149 if (graph)
150 ret = register_ftrace_graph(&wakeup_graph_return,
151 &wakeup_graph_entry);
152 else
153 ret = register_ftrace_function(&trace_ops);
155 if (!ret)
156 function_enabled = true;
158 return ret;
161 static void unregister_wakeup_function(int graph)
163 if (!function_enabled)
164 return;
166 if (graph)
167 unregister_ftrace_graph();
168 else
169 unregister_ftrace_function(&trace_ops);
171 function_enabled = false;
174 static void wakeup_function_set(int set)
176 if (set)
177 register_wakeup_function(is_graph(), 1);
178 else
179 unregister_wakeup_function(is_graph());
182 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
184 struct tracer *tracer = tr->current_trace;
186 if (mask & TRACE_ITER_FUNCTION)
187 wakeup_function_set(set);
189 return trace_keep_overwrite(tracer, mask, set);
192 static int start_func_tracer(int graph)
194 int ret;
196 ret = register_wakeup_function(graph, 0);
198 if (!ret && tracing_is_enabled())
199 tracer_enabled = 1;
200 else
201 tracer_enabled = 0;
203 return ret;
206 static void stop_func_tracer(int graph)
208 tracer_enabled = 0;
210 unregister_wakeup_function(graph);
213 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
214 static int
215 wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
218 if (!(bit & TRACE_DISPLAY_GRAPH))
219 return -EINVAL;
221 if (!(is_graph() ^ set))
222 return 0;
224 stop_func_tracer(!set);
226 wakeup_reset(wakeup_trace);
227 tracing_max_latency = 0;
229 return start_func_tracer(set);
232 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
234 struct trace_array *tr = wakeup_trace;
235 struct trace_array_cpu *data;
236 unsigned long flags;
237 int pc, ret = 0;
239 if (!func_prolog_preempt_disable(tr, &data, &pc))
240 return 0;
242 local_save_flags(flags);
243 ret = __trace_graph_entry(tr, trace, flags, pc);
244 atomic_dec(&data->disabled);
245 preempt_enable_notrace();
247 return ret;
250 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
252 struct trace_array *tr = wakeup_trace;
253 struct trace_array_cpu *data;
254 unsigned long flags;
255 int pc;
257 if (!func_prolog_preempt_disable(tr, &data, &pc))
258 return;
260 local_save_flags(flags);
261 __trace_graph_return(tr, trace, flags, pc);
262 atomic_dec(&data->disabled);
264 preempt_enable_notrace();
265 return;
268 static void wakeup_trace_open(struct trace_iterator *iter)
270 if (is_graph())
271 graph_trace_open(iter);
274 static void wakeup_trace_close(struct trace_iterator *iter)
276 if (iter->private)
277 graph_trace_close(iter);
280 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
281 TRACE_GRAPH_PRINT_ABS_TIME | \
282 TRACE_GRAPH_PRINT_DURATION)
284 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
287 * In graph mode call the graph tracer output function,
288 * otherwise go with the TRACE_FN event handler
290 if (is_graph())
291 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
293 return TRACE_TYPE_UNHANDLED;
296 static void wakeup_print_header(struct seq_file *s)
298 if (is_graph())
299 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
300 else
301 trace_default_header(s);
304 static void
305 __trace_function(struct trace_array *tr,
306 unsigned long ip, unsigned long parent_ip,
307 unsigned long flags, int pc)
309 if (is_graph())
310 trace_graph_function(tr, ip, parent_ip, flags, pc);
311 else
312 trace_function(tr, ip, parent_ip, flags, pc);
314 #else
315 #define __trace_function trace_function
317 static int
318 wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
320 return -EINVAL;
323 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
325 return -1;
328 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
330 return TRACE_TYPE_UNHANDLED;
333 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
334 static void wakeup_trace_open(struct trace_iterator *iter) { }
335 static void wakeup_trace_close(struct trace_iterator *iter) { }
337 #ifdef CONFIG_FUNCTION_TRACER
338 static void wakeup_print_header(struct seq_file *s)
340 trace_default_header(s);
342 #else
343 static void wakeup_print_header(struct seq_file *s)
345 trace_latency_header(s);
347 #endif /* CONFIG_FUNCTION_TRACER */
348 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
351 * Should this new latency be reported/recorded?
353 static int report_latency(cycle_t delta)
355 if (tracing_thresh) {
356 if (delta < tracing_thresh)
357 return 0;
358 } else {
359 if (delta <= tracing_max_latency)
360 return 0;
362 return 1;
365 static void
366 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
368 if (task != wakeup_task)
369 return;
371 wakeup_current_cpu = cpu;
374 static void notrace
375 probe_wakeup_sched_switch(void *ignore,
376 struct task_struct *prev, struct task_struct *next)
378 struct trace_array_cpu *data;
379 cycle_t T0, T1, delta;
380 unsigned long flags;
381 long disabled;
382 int cpu;
383 int pc;
385 tracing_record_cmdline(prev);
387 if (unlikely(!tracer_enabled))
388 return;
391 * When we start a new trace, we set wakeup_task to NULL
392 * and then set tracer_enabled = 1. We want to make sure
393 * that another CPU does not see the tracer_enabled = 1
394 * and the wakeup_task with an older task, that might
395 * actually be the same as next.
397 smp_rmb();
399 if (next != wakeup_task)
400 return;
402 pc = preempt_count();
404 /* disable local data, not wakeup_cpu data */
405 cpu = raw_smp_processor_id();
406 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
407 if (likely(disabled != 1))
408 goto out;
410 local_irq_save(flags);
411 arch_spin_lock(&wakeup_lock);
413 /* We could race with grabbing wakeup_lock */
414 if (unlikely(!tracer_enabled || next != wakeup_task))
415 goto out_unlock;
417 /* The task we are waiting for is waking up */
418 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
420 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
421 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
423 T0 = data->preempt_timestamp;
424 T1 = ftrace_now(cpu);
425 delta = T1-T0;
427 if (!report_latency(delta))
428 goto out_unlock;
430 if (likely(!is_tracing_stopped())) {
431 tracing_max_latency = delta;
432 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
435 out_unlock:
436 __wakeup_reset(wakeup_trace);
437 arch_spin_unlock(&wakeup_lock);
438 local_irq_restore(flags);
439 out:
440 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
443 static void __wakeup_reset(struct trace_array *tr)
445 wakeup_cpu = -1;
446 wakeup_prio = -1;
447 tracing_dl = 0;
449 if (wakeup_task)
450 put_task_struct(wakeup_task);
452 wakeup_task = NULL;
455 static void wakeup_reset(struct trace_array *tr)
457 unsigned long flags;
459 tracing_reset_online_cpus(&tr->trace_buffer);
461 local_irq_save(flags);
462 arch_spin_lock(&wakeup_lock);
463 __wakeup_reset(tr);
464 arch_spin_unlock(&wakeup_lock);
465 local_irq_restore(flags);
468 static void
469 probe_wakeup(void *ignore, struct task_struct *p, int success)
471 struct trace_array_cpu *data;
472 int cpu = smp_processor_id();
473 unsigned long flags;
474 long disabled;
475 int pc;
477 if (likely(!tracer_enabled))
478 return;
480 tracing_record_cmdline(p);
481 tracing_record_cmdline(current);
484 * Semantic is like this:
485 * - wakeup tracer handles all tasks in the system, independently
486 * from their scheduling class;
487 * - wakeup_rt tracer handles tasks belonging to sched_dl and
488 * sched_rt class;
489 * - wakeup_dl handles tasks belonging to sched_dl class only.
491 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
492 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
493 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
494 return;
496 pc = preempt_count();
497 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
498 if (unlikely(disabled != 1))
499 goto out;
501 /* interrupts should be off from try_to_wake_up */
502 arch_spin_lock(&wakeup_lock);
504 /* check for races. */
505 if (!tracer_enabled || tracing_dl ||
506 (!dl_task(p) && p->prio >= wakeup_prio))
507 goto out_locked;
509 /* reset the trace */
510 __wakeup_reset(wakeup_trace);
512 wakeup_cpu = task_cpu(p);
513 wakeup_current_cpu = wakeup_cpu;
514 wakeup_prio = p->prio;
517 * Once you start tracing a -deadline task, don't bother tracing
518 * another task until the first one wakes up.
520 if (dl_task(p))
521 tracing_dl = 1;
522 else
523 tracing_dl = 0;
525 wakeup_task = p;
526 get_task_struct(wakeup_task);
528 local_save_flags(flags);
530 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
531 data->preempt_timestamp = ftrace_now(cpu);
532 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
535 * We must be careful in using CALLER_ADDR2. But since wake_up
536 * is not called by an assembly function (where as schedule is)
537 * it should be safe to use it here.
539 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
541 out_locked:
542 arch_spin_unlock(&wakeup_lock);
543 out:
544 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
547 static void start_wakeup_tracer(struct trace_array *tr)
549 int ret;
551 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
552 if (ret) {
553 pr_info("wakeup trace: Couldn't activate tracepoint"
554 " probe to kernel_sched_wakeup\n");
555 return;
558 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
559 if (ret) {
560 pr_info("wakeup trace: Couldn't activate tracepoint"
561 " probe to kernel_sched_wakeup_new\n");
562 goto fail_deprobe;
565 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
566 if (ret) {
567 pr_info("sched trace: Couldn't activate tracepoint"
568 " probe to kernel_sched_switch\n");
569 goto fail_deprobe_wake_new;
572 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
573 if (ret) {
574 pr_info("wakeup trace: Couldn't activate tracepoint"
575 " probe to kernel_sched_migrate_task\n");
576 return;
579 wakeup_reset(tr);
582 * Don't let the tracer_enabled = 1 show up before
583 * the wakeup_task is reset. This may be overkill since
584 * wakeup_reset does a spin_unlock after setting the
585 * wakeup_task to NULL, but I want to be safe.
586 * This is a slow path anyway.
588 smp_wmb();
590 if (start_func_tracer(is_graph()))
591 printk(KERN_ERR "failed to start wakeup tracer\n");
593 return;
594 fail_deprobe_wake_new:
595 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
596 fail_deprobe:
597 unregister_trace_sched_wakeup(probe_wakeup, NULL);
600 static void stop_wakeup_tracer(struct trace_array *tr)
602 tracer_enabled = 0;
603 stop_func_tracer(is_graph());
604 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
605 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
606 unregister_trace_sched_wakeup(probe_wakeup, NULL);
607 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
610 static int __wakeup_tracer_init(struct trace_array *tr)
612 save_flags = trace_flags;
614 /* non overwrite screws up the latency tracers */
615 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
616 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
618 tracing_max_latency = 0;
619 wakeup_trace = tr;
620 start_wakeup_tracer(tr);
621 return 0;
624 static int wakeup_tracer_init(struct trace_array *tr)
626 wakeup_dl = 0;
627 wakeup_rt = 0;
628 return __wakeup_tracer_init(tr);
631 static int wakeup_rt_tracer_init(struct trace_array *tr)
633 wakeup_dl = 0;
634 wakeup_rt = 1;
635 return __wakeup_tracer_init(tr);
638 static int wakeup_dl_tracer_init(struct trace_array *tr)
640 wakeup_dl = 1;
641 wakeup_rt = 0;
642 return __wakeup_tracer_init(tr);
645 static void wakeup_tracer_reset(struct trace_array *tr)
647 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
648 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
650 stop_wakeup_tracer(tr);
651 /* make sure we put back any tasks we are tracing */
652 wakeup_reset(tr);
654 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
655 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
658 static void wakeup_tracer_start(struct trace_array *tr)
660 wakeup_reset(tr);
661 tracer_enabled = 1;
664 static void wakeup_tracer_stop(struct trace_array *tr)
666 tracer_enabled = 0;
669 static struct tracer wakeup_tracer __read_mostly =
671 .name = "wakeup",
672 .init = wakeup_tracer_init,
673 .reset = wakeup_tracer_reset,
674 .start = wakeup_tracer_start,
675 .stop = wakeup_tracer_stop,
676 .print_max = true,
677 .print_header = wakeup_print_header,
678 .print_line = wakeup_print_line,
679 .flags = &tracer_flags,
680 .set_flag = wakeup_set_flag,
681 .flag_changed = wakeup_flag_changed,
682 #ifdef CONFIG_FTRACE_SELFTEST
683 .selftest = trace_selftest_startup_wakeup,
684 #endif
685 .open = wakeup_trace_open,
686 .close = wakeup_trace_close,
687 .use_max_tr = true,
690 static struct tracer wakeup_rt_tracer __read_mostly =
692 .name = "wakeup_rt",
693 .init = wakeup_rt_tracer_init,
694 .reset = wakeup_tracer_reset,
695 .start = wakeup_tracer_start,
696 .stop = wakeup_tracer_stop,
697 .wait_pipe = poll_wait_pipe,
698 .print_max = true,
699 .print_header = wakeup_print_header,
700 .print_line = wakeup_print_line,
701 .flags = &tracer_flags,
702 .set_flag = wakeup_set_flag,
703 .flag_changed = wakeup_flag_changed,
704 #ifdef CONFIG_FTRACE_SELFTEST
705 .selftest = trace_selftest_startup_wakeup,
706 #endif
707 .open = wakeup_trace_open,
708 .close = wakeup_trace_close,
709 .use_max_tr = true,
712 static struct tracer wakeup_dl_tracer __read_mostly =
714 .name = "wakeup_dl",
715 .init = wakeup_dl_tracer_init,
716 .reset = wakeup_tracer_reset,
717 .start = wakeup_tracer_start,
718 .stop = wakeup_tracer_stop,
719 .wait_pipe = poll_wait_pipe,
720 .print_max = true,
721 .print_header = wakeup_print_header,
722 .print_line = wakeup_print_line,
723 .flags = &tracer_flags,
724 .set_flag = wakeup_set_flag,
725 .flag_changed = wakeup_flag_changed,
726 #ifdef CONFIG_FTRACE_SELFTEST
727 .selftest = trace_selftest_startup_wakeup,
728 #endif
729 .open = wakeup_trace_open,
730 .close = wakeup_trace_close,
731 .use_max_tr = true,
734 __init static int init_wakeup_tracer(void)
736 int ret;
738 ret = register_tracer(&wakeup_tracer);
739 if (ret)
740 return ret;
742 ret = register_tracer(&wakeup_rt_tracer);
743 if (ret)
744 return ret;
746 ret = register_tracer(&wakeup_dl_tracer);
747 if (ret)
748 return ret;
750 return 0;
752 core_initcall(init_wakeup_tracer);