2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <trace/events/sched.h>
21 static struct trace_array
*wakeup_trace
;
22 static int __read_mostly tracer_enabled
;
24 static struct task_struct
*wakeup_task
;
25 static int wakeup_cpu
;
26 static int wakeup_current_cpu
;
27 static unsigned wakeup_prio
= -1;
30 static int tracing_dl
= 0;
32 static arch_spinlock_t wakeup_lock
=
33 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
35 static void wakeup_reset(struct trace_array
*tr
);
36 static void __wakeup_reset(struct trace_array
*tr
);
37 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
);
38 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
);
40 static int save_flags
;
41 static bool function_enabled
;
43 #define TRACE_DISPLAY_GRAPH 1
45 static struct tracer_opt trace_opts
[] = {
46 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
47 /* display latency trace as call graph */
48 { TRACER_OPT(display
-graph
, TRACE_DISPLAY_GRAPH
) },
53 static struct tracer_flags tracer_flags
= {
58 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
60 #ifdef CONFIG_FUNCTION_TRACER
63 * Prologue for the wakeup function tracers.
65 * Returns 1 if it is OK to continue, and preemption
66 * is disabled and data->disabled is incremented.
67 * 0 if the trace is to be ignored, and preemption
68 * is not disabled and data->disabled is
71 * Note, this function is also used outside this ifdef but
72 * inside the #ifdef of the function graph tracer below.
73 * This is OK, since the function graph tracer is
74 * dependent on the function tracer.
77 func_prolog_preempt_disable(struct trace_array
*tr
,
78 struct trace_array_cpu
**data
,
84 if (likely(!wakeup_task
))
87 *pc
= preempt_count();
88 preempt_disable_notrace();
90 cpu
= raw_smp_processor_id();
91 if (cpu
!= wakeup_current_cpu
)
94 *data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
95 disabled
= atomic_inc_return(&(*data
)->disabled
);
96 if (unlikely(disabled
!= 1))
102 atomic_dec(&(*data
)->disabled
);
105 preempt_enable_notrace();
110 * wakeup uses its own tracer function to keep the overhead down:
113 wakeup_tracer_call(unsigned long ip
, unsigned long parent_ip
,
114 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
116 struct trace_array
*tr
= wakeup_trace
;
117 struct trace_array_cpu
*data
;
121 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
124 local_irq_save(flags
);
125 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
126 local_irq_restore(flags
);
128 atomic_dec(&data
->disabled
);
129 preempt_enable_notrace();
131 #endif /* CONFIG_FUNCTION_TRACER */
133 static int register_wakeup_function(struct trace_array
*tr
, int graph
, int set
)
137 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
138 if (function_enabled
|| (!set
&& !(trace_flags
& TRACE_ITER_FUNCTION
)))
142 ret
= register_ftrace_graph(&wakeup_graph_return
,
143 &wakeup_graph_entry
);
145 ret
= register_ftrace_function(tr
->ops
);
148 function_enabled
= true;
153 static void unregister_wakeup_function(struct trace_array
*tr
, int graph
)
155 if (!function_enabled
)
159 unregister_ftrace_graph();
161 unregister_ftrace_function(tr
->ops
);
163 function_enabled
= false;
166 static void wakeup_function_set(struct trace_array
*tr
, int set
)
169 register_wakeup_function(tr
, is_graph(), 1);
171 unregister_wakeup_function(tr
, is_graph());
174 static int wakeup_flag_changed(struct trace_array
*tr
, u32 mask
, int set
)
176 struct tracer
*tracer
= tr
->current_trace
;
178 if (mask
& TRACE_ITER_FUNCTION
)
179 wakeup_function_set(tr
, set
);
181 return trace_keep_overwrite(tracer
, mask
, set
);
184 static int start_func_tracer(struct trace_array
*tr
, int graph
)
188 ret
= register_wakeup_function(tr
, graph
, 0);
190 if (!ret
&& tracing_is_enabled())
198 static void stop_func_tracer(struct trace_array
*tr
, int graph
)
202 unregister_wakeup_function(tr
, graph
);
205 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
207 wakeup_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
210 if (!(bit
& TRACE_DISPLAY_GRAPH
))
213 if (!(is_graph() ^ set
))
216 stop_func_tracer(tr
, !set
);
218 wakeup_reset(wakeup_trace
);
221 return start_func_tracer(tr
, set
);
224 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
)
226 struct trace_array
*tr
= wakeup_trace
;
227 struct trace_array_cpu
*data
;
231 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
234 local_save_flags(flags
);
235 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
236 atomic_dec(&data
->disabled
);
237 preempt_enable_notrace();
242 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
)
244 struct trace_array
*tr
= wakeup_trace
;
245 struct trace_array_cpu
*data
;
249 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
252 local_save_flags(flags
);
253 __trace_graph_return(tr
, trace
, flags
, pc
);
254 atomic_dec(&data
->disabled
);
256 preempt_enable_notrace();
260 static void wakeup_trace_open(struct trace_iterator
*iter
)
263 graph_trace_open(iter
);
266 static void wakeup_trace_close(struct trace_iterator
*iter
)
269 graph_trace_close(iter
);
272 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
273 TRACE_GRAPH_PRINT_ABS_TIME | \
274 TRACE_GRAPH_PRINT_DURATION)
276 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
279 * In graph mode call the graph tracer output function,
280 * otherwise go with the TRACE_FN event handler
283 return print_graph_function_flags(iter
, GRAPH_TRACER_FLAGS
);
285 return TRACE_TYPE_UNHANDLED
;
288 static void wakeup_print_header(struct seq_file
*s
)
291 print_graph_headers_flags(s
, GRAPH_TRACER_FLAGS
);
293 trace_default_header(s
);
297 __trace_function(struct trace_array
*tr
,
298 unsigned long ip
, unsigned long parent_ip
,
299 unsigned long flags
, int pc
)
302 trace_graph_function(tr
, ip
, parent_ip
, flags
, pc
);
304 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
307 #define __trace_function trace_function
310 wakeup_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
315 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
)
320 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
322 return TRACE_TYPE_UNHANDLED
;
325 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
) { }
326 static void wakeup_trace_open(struct trace_iterator
*iter
) { }
327 static void wakeup_trace_close(struct trace_iterator
*iter
) { }
329 #ifdef CONFIG_FUNCTION_TRACER
330 static void wakeup_print_header(struct seq_file
*s
)
332 trace_default_header(s
);
335 static void wakeup_print_header(struct seq_file
*s
)
337 trace_latency_header(s
);
339 #endif /* CONFIG_FUNCTION_TRACER */
340 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
343 * Should this new latency be reported/recorded?
345 static int report_latency(struct trace_array
*tr
, cycle_t delta
)
347 if (tracing_thresh
) {
348 if (delta
< tracing_thresh
)
351 if (delta
<= tr
->max_latency
)
358 probe_wakeup_migrate_task(void *ignore
, struct task_struct
*task
, int cpu
)
360 if (task
!= wakeup_task
)
363 wakeup_current_cpu
= cpu
;
367 tracing_sched_switch_trace(struct trace_array
*tr
,
368 struct task_struct
*prev
,
369 struct task_struct
*next
,
370 unsigned long flags
, int pc
)
372 struct trace_event_call
*call
= &event_context_switch
;
373 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
374 struct ring_buffer_event
*event
;
375 struct ctx_switch_entry
*entry
;
377 event
= trace_buffer_lock_reserve(buffer
, TRACE_CTX
,
378 sizeof(*entry
), flags
, pc
);
381 entry
= ring_buffer_event_data(event
);
382 entry
->prev_pid
= prev
->pid
;
383 entry
->prev_prio
= prev
->prio
;
384 entry
->prev_state
= prev
->state
;
385 entry
->next_pid
= next
->pid
;
386 entry
->next_prio
= next
->prio
;
387 entry
->next_state
= next
->state
;
388 entry
->next_cpu
= task_cpu(next
);
390 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
391 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
395 tracing_sched_wakeup_trace(struct trace_array
*tr
,
396 struct task_struct
*wakee
,
397 struct task_struct
*curr
,
398 unsigned long flags
, int pc
)
400 struct trace_event_call
*call
= &event_wakeup
;
401 struct ring_buffer_event
*event
;
402 struct ctx_switch_entry
*entry
;
403 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
405 event
= trace_buffer_lock_reserve(buffer
, TRACE_WAKE
,
406 sizeof(*entry
), flags
, pc
);
409 entry
= ring_buffer_event_data(event
);
410 entry
->prev_pid
= curr
->pid
;
411 entry
->prev_prio
= curr
->prio
;
412 entry
->prev_state
= curr
->state
;
413 entry
->next_pid
= wakee
->pid
;
414 entry
->next_prio
= wakee
->prio
;
415 entry
->next_state
= wakee
->state
;
416 entry
->next_cpu
= task_cpu(wakee
);
418 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
419 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
423 probe_wakeup_sched_switch(void *ignore
,
424 struct task_struct
*prev
, struct task_struct
*next
)
426 struct trace_array_cpu
*data
;
427 cycle_t T0
, T1
, delta
;
433 tracing_record_cmdline(prev
);
435 if (unlikely(!tracer_enabled
))
439 * When we start a new trace, we set wakeup_task to NULL
440 * and then set tracer_enabled = 1. We want to make sure
441 * that another CPU does not see the tracer_enabled = 1
442 * and the wakeup_task with an older task, that might
443 * actually be the same as next.
447 if (next
!= wakeup_task
)
450 pc
= preempt_count();
452 /* disable local data, not wakeup_cpu data */
453 cpu
= raw_smp_processor_id();
454 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
455 if (likely(disabled
!= 1))
458 local_irq_save(flags
);
459 arch_spin_lock(&wakeup_lock
);
461 /* We could race with grabbing wakeup_lock */
462 if (unlikely(!tracer_enabled
|| next
!= wakeup_task
))
465 /* The task we are waiting for is waking up */
466 data
= per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, wakeup_cpu
);
468 __trace_function(wakeup_trace
, CALLER_ADDR0
, CALLER_ADDR1
, flags
, pc
);
469 tracing_sched_switch_trace(wakeup_trace
, prev
, next
, flags
, pc
);
471 T0
= data
->preempt_timestamp
;
472 T1
= ftrace_now(cpu
);
475 if (!report_latency(wakeup_trace
, delta
))
478 if (likely(!is_tracing_stopped())) {
479 wakeup_trace
->max_latency
= delta
;
480 update_max_tr(wakeup_trace
, wakeup_task
, wakeup_cpu
);
484 __wakeup_reset(wakeup_trace
);
485 arch_spin_unlock(&wakeup_lock
);
486 local_irq_restore(flags
);
488 atomic_dec(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
491 static void __wakeup_reset(struct trace_array
*tr
)
498 put_task_struct(wakeup_task
);
503 static void wakeup_reset(struct trace_array
*tr
)
507 tracing_reset_online_cpus(&tr
->trace_buffer
);
509 local_irq_save(flags
);
510 arch_spin_lock(&wakeup_lock
);
512 arch_spin_unlock(&wakeup_lock
);
513 local_irq_restore(flags
);
517 probe_wakeup(void *ignore
, struct task_struct
*p
, int success
)
519 struct trace_array_cpu
*data
;
520 int cpu
= smp_processor_id();
525 if (likely(!tracer_enabled
))
528 tracing_record_cmdline(p
);
529 tracing_record_cmdline(current
);
532 * Semantic is like this:
533 * - wakeup tracer handles all tasks in the system, independently
534 * from their scheduling class;
535 * - wakeup_rt tracer handles tasks belonging to sched_dl and
537 * - wakeup_dl handles tasks belonging to sched_dl class only.
539 if (tracing_dl
|| (wakeup_dl
&& !dl_task(p
)) ||
540 (wakeup_rt
&& !dl_task(p
) && !rt_task(p
)) ||
541 (!dl_task(p
) && (p
->prio
>= wakeup_prio
|| p
->prio
>= current
->prio
)))
544 pc
= preempt_count();
545 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
546 if (unlikely(disabled
!= 1))
549 /* interrupts should be off from try_to_wake_up */
550 arch_spin_lock(&wakeup_lock
);
552 /* check for races. */
553 if (!tracer_enabled
|| tracing_dl
||
554 (!dl_task(p
) && p
->prio
>= wakeup_prio
))
557 /* reset the trace */
558 __wakeup_reset(wakeup_trace
);
560 wakeup_cpu
= task_cpu(p
);
561 wakeup_current_cpu
= wakeup_cpu
;
562 wakeup_prio
= p
->prio
;
565 * Once you start tracing a -deadline task, don't bother tracing
566 * another task until the first one wakes up.
574 get_task_struct(wakeup_task
);
576 local_save_flags(flags
);
578 data
= per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, wakeup_cpu
);
579 data
->preempt_timestamp
= ftrace_now(cpu
);
580 tracing_sched_wakeup_trace(wakeup_trace
, p
, current
, flags
, pc
);
583 * We must be careful in using CALLER_ADDR2. But since wake_up
584 * is not called by an assembly function (where as schedule is)
585 * it should be safe to use it here.
587 __trace_function(wakeup_trace
, CALLER_ADDR1
, CALLER_ADDR2
, flags
, pc
);
590 arch_spin_unlock(&wakeup_lock
);
592 atomic_dec(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
595 static void start_wakeup_tracer(struct trace_array
*tr
)
599 ret
= register_trace_sched_wakeup(probe_wakeup
, NULL
);
601 pr_info("wakeup trace: Couldn't activate tracepoint"
602 " probe to kernel_sched_wakeup\n");
606 ret
= register_trace_sched_wakeup_new(probe_wakeup
, NULL
);
608 pr_info("wakeup trace: Couldn't activate tracepoint"
609 " probe to kernel_sched_wakeup_new\n");
613 ret
= register_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
615 pr_info("sched trace: Couldn't activate tracepoint"
616 " probe to kernel_sched_switch\n");
617 goto fail_deprobe_wake_new
;
620 ret
= register_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
622 pr_info("wakeup trace: Couldn't activate tracepoint"
623 " probe to kernel_sched_migrate_task\n");
630 * Don't let the tracer_enabled = 1 show up before
631 * the wakeup_task is reset. This may be overkill since
632 * wakeup_reset does a spin_unlock after setting the
633 * wakeup_task to NULL, but I want to be safe.
634 * This is a slow path anyway.
638 if (start_func_tracer(tr
, is_graph()))
639 printk(KERN_ERR
"failed to start wakeup tracer\n");
642 fail_deprobe_wake_new
:
643 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
645 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
648 static void stop_wakeup_tracer(struct trace_array
*tr
)
651 stop_func_tracer(tr
, is_graph());
652 unregister_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
653 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
654 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
655 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
658 static bool wakeup_busy
;
660 static int __wakeup_tracer_init(struct trace_array
*tr
)
662 save_flags
= trace_flags
;
664 /* non overwrite screws up the latency tracers */
665 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, 1);
666 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, 1);
670 ftrace_init_array_ops(tr
, wakeup_tracer_call
);
671 start_wakeup_tracer(tr
);
677 static int wakeup_tracer_init(struct trace_array
*tr
)
684 return __wakeup_tracer_init(tr
);
687 static int wakeup_rt_tracer_init(struct trace_array
*tr
)
694 return __wakeup_tracer_init(tr
);
697 static int wakeup_dl_tracer_init(struct trace_array
*tr
)
704 return __wakeup_tracer_init(tr
);
707 static void wakeup_tracer_reset(struct trace_array
*tr
)
709 int lat_flag
= save_flags
& TRACE_ITER_LATENCY_FMT
;
710 int overwrite_flag
= save_flags
& TRACE_ITER_OVERWRITE
;
712 stop_wakeup_tracer(tr
);
713 /* make sure we put back any tasks we are tracing */
716 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, lat_flag
);
717 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, overwrite_flag
);
718 ftrace_reset_array_ops(tr
);
722 static void wakeup_tracer_start(struct trace_array
*tr
)
728 static void wakeup_tracer_stop(struct trace_array
*tr
)
733 static struct tracer wakeup_tracer __read_mostly
=
736 .init
= wakeup_tracer_init
,
737 .reset
= wakeup_tracer_reset
,
738 .start
= wakeup_tracer_start
,
739 .stop
= wakeup_tracer_stop
,
741 .print_header
= wakeup_print_header
,
742 .print_line
= wakeup_print_line
,
743 .flags
= &tracer_flags
,
744 .set_flag
= wakeup_set_flag
,
745 .flag_changed
= wakeup_flag_changed
,
746 #ifdef CONFIG_FTRACE_SELFTEST
747 .selftest
= trace_selftest_startup_wakeup
,
749 .open
= wakeup_trace_open
,
750 .close
= wakeup_trace_close
,
751 .allow_instances
= true,
755 static struct tracer wakeup_rt_tracer __read_mostly
=
758 .init
= wakeup_rt_tracer_init
,
759 .reset
= wakeup_tracer_reset
,
760 .start
= wakeup_tracer_start
,
761 .stop
= wakeup_tracer_stop
,
763 .print_header
= wakeup_print_header
,
764 .print_line
= wakeup_print_line
,
765 .flags
= &tracer_flags
,
766 .set_flag
= wakeup_set_flag
,
767 .flag_changed
= wakeup_flag_changed
,
768 #ifdef CONFIG_FTRACE_SELFTEST
769 .selftest
= trace_selftest_startup_wakeup
,
771 .open
= wakeup_trace_open
,
772 .close
= wakeup_trace_close
,
773 .allow_instances
= true,
777 static struct tracer wakeup_dl_tracer __read_mostly
=
780 .init
= wakeup_dl_tracer_init
,
781 .reset
= wakeup_tracer_reset
,
782 .start
= wakeup_tracer_start
,
783 .stop
= wakeup_tracer_stop
,
785 .print_header
= wakeup_print_header
,
786 .print_line
= wakeup_print_line
,
787 .flags
= &tracer_flags
,
788 .set_flag
= wakeup_set_flag
,
789 .flag_changed
= wakeup_flag_changed
,
790 #ifdef CONFIG_FTRACE_SELFTEST
791 .selftest
= trace_selftest_startup_wakeup
,
793 .open
= wakeup_trace_open
,
794 .close
= wakeup_trace_close
,
798 __init
static int init_wakeup_tracer(void)
802 ret
= register_tracer(&wakeup_tracer
);
806 ret
= register_tracer(&wakeup_rt_tracer
);
810 ret
= register_tracer(&wakeup_dl_tracer
);
816 core_initcall(init_wakeup_tracer
);