2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <trace/events/sched.h>
21 static struct trace_array
*wakeup_trace
;
22 static int __read_mostly tracer_enabled
;
24 static struct task_struct
*wakeup_task
;
25 static int wakeup_cpu
;
26 static int wakeup_current_cpu
;
27 static unsigned wakeup_prio
= -1;
30 static int tracing_dl
= 0;
32 static arch_spinlock_t wakeup_lock
=
33 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
35 static void wakeup_reset(struct trace_array
*tr
);
36 static void __wakeup_reset(struct trace_array
*tr
);
38 static int save_flags
;
40 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
41 static int wakeup_display_graph(struct trace_array
*tr
, int set
);
42 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
44 static inline int wakeup_display_graph(struct trace_array
*tr
, int set
)
48 # define is_graph(tr) false
52 #ifdef CONFIG_FUNCTION_TRACER
54 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
);
55 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
);
57 static bool function_enabled
;
60 * Prologue for the wakeup function tracers.
62 * Returns 1 if it is OK to continue, and preemption
63 * is disabled and data->disabled is incremented.
64 * 0 if the trace is to be ignored, and preemption
65 * is not disabled and data->disabled is
68 * Note, this function is also used outside this ifdef but
69 * inside the #ifdef of the function graph tracer below.
70 * This is OK, since the function graph tracer is
71 * dependent on the function tracer.
74 func_prolog_preempt_disable(struct trace_array
*tr
,
75 struct trace_array_cpu
**data
,
81 if (likely(!wakeup_task
))
84 *pc
= preempt_count();
85 preempt_disable_notrace();
87 cpu
= raw_smp_processor_id();
88 if (cpu
!= wakeup_current_cpu
)
91 *data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
92 disabled
= atomic_inc_return(&(*data
)->disabled
);
93 if (unlikely(disabled
!= 1))
99 atomic_dec(&(*data
)->disabled
);
102 preempt_enable_notrace();
107 * wakeup uses its own tracer function to keep the overhead down:
110 wakeup_tracer_call(unsigned long ip
, unsigned long parent_ip
,
111 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
113 struct trace_array
*tr
= wakeup_trace
;
114 struct trace_array_cpu
*data
;
118 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
121 local_irq_save(flags
);
122 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
123 local_irq_restore(flags
);
125 atomic_dec(&data
->disabled
);
126 preempt_enable_notrace();
129 static int register_wakeup_function(struct trace_array
*tr
, int graph
, int set
)
133 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
134 if (function_enabled
|| (!set
&& !(tr
->trace_flags
& TRACE_ITER_FUNCTION
)))
138 ret
= register_ftrace_graph(&wakeup_graph_return
,
139 &wakeup_graph_entry
);
141 ret
= register_ftrace_function(tr
->ops
);
144 function_enabled
= true;
149 static void unregister_wakeup_function(struct trace_array
*tr
, int graph
)
151 if (!function_enabled
)
155 unregister_ftrace_graph();
157 unregister_ftrace_function(tr
->ops
);
159 function_enabled
= false;
162 static int wakeup_function_set(struct trace_array
*tr
, u32 mask
, int set
)
164 if (!(mask
& TRACE_ITER_FUNCTION
))
168 register_wakeup_function(tr
, is_graph(tr
), 1);
170 unregister_wakeup_function(tr
, is_graph(tr
));
174 static int register_wakeup_function(struct trace_array
*tr
, int graph
, int set
)
178 static void unregister_wakeup_function(struct trace_array
*tr
, int graph
) { }
179 static int wakeup_function_set(struct trace_array
*tr
, u32 mask
, int set
)
183 #endif /* CONFIG_FUNCTION_TRACER */
185 static int wakeup_flag_changed(struct trace_array
*tr
, u32 mask
, int set
)
187 struct tracer
*tracer
= tr
->current_trace
;
189 if (wakeup_function_set(tr
, mask
, set
))
192 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
193 if (mask
& TRACE_ITER_DISPLAY_GRAPH
)
194 return wakeup_display_graph(tr
, set
);
197 return trace_keep_overwrite(tracer
, mask
, set
);
200 static int start_func_tracer(struct trace_array
*tr
, int graph
)
204 ret
= register_wakeup_function(tr
, graph
, 0);
206 if (!ret
&& tracing_is_enabled())
214 static void stop_func_tracer(struct trace_array
*tr
, int graph
)
218 unregister_wakeup_function(tr
, graph
);
221 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
222 static int wakeup_display_graph(struct trace_array
*tr
, int set
)
224 if (!(is_graph(tr
) ^ set
))
227 stop_func_tracer(tr
, !set
);
229 wakeup_reset(wakeup_trace
);
232 return start_func_tracer(tr
, set
);
235 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
)
237 struct trace_array
*tr
= wakeup_trace
;
238 struct trace_array_cpu
*data
;
242 if (ftrace_graph_ignore_func(trace
))
245 * Do not trace a function if it's filtered by set_graph_notrace.
246 * Make the index of ret stack negative to indicate that it should
247 * ignore further functions. But it needs its own ret stack entry
248 * to recover the original index in order to continue tracing after
249 * returning from the function.
251 if (ftrace_graph_notrace_addr(trace
->func
))
254 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
257 local_save_flags(flags
);
258 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
259 atomic_dec(&data
->disabled
);
260 preempt_enable_notrace();
265 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
)
267 struct trace_array
*tr
= wakeup_trace
;
268 struct trace_array_cpu
*data
;
272 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
275 local_save_flags(flags
);
276 __trace_graph_return(tr
, trace
, flags
, pc
);
277 atomic_dec(&data
->disabled
);
279 preempt_enable_notrace();
283 static void wakeup_trace_open(struct trace_iterator
*iter
)
285 if (is_graph(iter
->tr
))
286 graph_trace_open(iter
);
289 static void wakeup_trace_close(struct trace_iterator
*iter
)
292 graph_trace_close(iter
);
295 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
296 TRACE_GRAPH_PRINT_ABS_TIME | \
297 TRACE_GRAPH_PRINT_DURATION)
299 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
302 * In graph mode call the graph tracer output function,
303 * otherwise go with the TRACE_FN event handler
305 if (is_graph(iter
->tr
))
306 return print_graph_function_flags(iter
, GRAPH_TRACER_FLAGS
);
308 return TRACE_TYPE_UNHANDLED
;
311 static void wakeup_print_header(struct seq_file
*s
)
313 if (is_graph(wakeup_trace
))
314 print_graph_headers_flags(s
, GRAPH_TRACER_FLAGS
);
316 trace_default_header(s
);
320 __trace_function(struct trace_array
*tr
,
321 unsigned long ip
, unsigned long parent_ip
,
322 unsigned long flags
, int pc
)
325 trace_graph_function(tr
, ip
, parent_ip
, flags
, pc
);
327 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
330 #define __trace_function trace_function
332 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
334 return TRACE_TYPE_UNHANDLED
;
337 static void wakeup_trace_open(struct trace_iterator
*iter
) { }
338 static void wakeup_trace_close(struct trace_iterator
*iter
) { }
340 #ifdef CONFIG_FUNCTION_TRACER
341 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
)
345 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
) { }
346 static void wakeup_print_header(struct seq_file
*s
)
348 trace_default_header(s
);
351 static void wakeup_print_header(struct seq_file
*s
)
353 trace_latency_header(s
);
355 #endif /* CONFIG_FUNCTION_TRACER */
356 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
359 * Should this new latency be reported/recorded?
361 static bool report_latency(struct trace_array
*tr
, u64 delta
)
363 if (tracing_thresh
) {
364 if (delta
< tracing_thresh
)
367 if (delta
<= tr
->max_latency
)
374 probe_wakeup_migrate_task(void *ignore
, struct task_struct
*task
, int cpu
)
376 if (task
!= wakeup_task
)
379 wakeup_current_cpu
= cpu
;
383 tracing_sched_switch_trace(struct trace_array
*tr
,
384 struct task_struct
*prev
,
385 struct task_struct
*next
,
386 unsigned long flags
, int pc
)
388 struct trace_event_call
*call
= &event_context_switch
;
389 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
390 struct ring_buffer_event
*event
;
391 struct ctx_switch_entry
*entry
;
393 event
= trace_buffer_lock_reserve(buffer
, TRACE_CTX
,
394 sizeof(*entry
), flags
, pc
);
397 entry
= ring_buffer_event_data(event
);
398 entry
->prev_pid
= prev
->pid
;
399 entry
->prev_prio
= prev
->prio
;
400 entry
->prev_state
= prev
->state
;
401 entry
->next_pid
= next
->pid
;
402 entry
->next_prio
= next
->prio
;
403 entry
->next_state
= next
->state
;
404 entry
->next_cpu
= task_cpu(next
);
406 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
407 trace_buffer_unlock_commit(tr
, buffer
, event
, flags
, pc
);
411 tracing_sched_wakeup_trace(struct trace_array
*tr
,
412 struct task_struct
*wakee
,
413 struct task_struct
*curr
,
414 unsigned long flags
, int pc
)
416 struct trace_event_call
*call
= &event_wakeup
;
417 struct ring_buffer_event
*event
;
418 struct ctx_switch_entry
*entry
;
419 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
421 event
= trace_buffer_lock_reserve(buffer
, TRACE_WAKE
,
422 sizeof(*entry
), flags
, pc
);
425 entry
= ring_buffer_event_data(event
);
426 entry
->prev_pid
= curr
->pid
;
427 entry
->prev_prio
= curr
->prio
;
428 entry
->prev_state
= curr
->state
;
429 entry
->next_pid
= wakee
->pid
;
430 entry
->next_prio
= wakee
->prio
;
431 entry
->next_state
= wakee
->state
;
432 entry
->next_cpu
= task_cpu(wakee
);
434 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
435 trace_buffer_unlock_commit(tr
, buffer
, event
, flags
, pc
);
439 probe_wakeup_sched_switch(void *ignore
, bool preempt
,
440 struct task_struct
*prev
, struct task_struct
*next
)
442 struct trace_array_cpu
*data
;
449 tracing_record_cmdline(prev
);
451 if (unlikely(!tracer_enabled
))
455 * When we start a new trace, we set wakeup_task to NULL
456 * and then set tracer_enabled = 1. We want to make sure
457 * that another CPU does not see the tracer_enabled = 1
458 * and the wakeup_task with an older task, that might
459 * actually be the same as next.
463 if (next
!= wakeup_task
)
466 pc
= preempt_count();
468 /* disable local data, not wakeup_cpu data */
469 cpu
= raw_smp_processor_id();
470 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
471 if (likely(disabled
!= 1))
474 local_irq_save(flags
);
475 arch_spin_lock(&wakeup_lock
);
477 /* We could race with grabbing wakeup_lock */
478 if (unlikely(!tracer_enabled
|| next
!= wakeup_task
))
481 /* The task we are waiting for is waking up */
482 data
= per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, wakeup_cpu
);
484 __trace_function(wakeup_trace
, CALLER_ADDR0
, CALLER_ADDR1
, flags
, pc
);
485 tracing_sched_switch_trace(wakeup_trace
, prev
, next
, flags
, pc
);
487 T0
= data
->preempt_timestamp
;
488 T1
= ftrace_now(cpu
);
491 if (!report_latency(wakeup_trace
, delta
))
494 if (likely(!is_tracing_stopped())) {
495 wakeup_trace
->max_latency
= delta
;
496 update_max_tr(wakeup_trace
, wakeup_task
, wakeup_cpu
);
500 __wakeup_reset(wakeup_trace
);
501 arch_spin_unlock(&wakeup_lock
);
502 local_irq_restore(flags
);
504 atomic_dec(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
507 static void __wakeup_reset(struct trace_array
*tr
)
514 put_task_struct(wakeup_task
);
519 static void wakeup_reset(struct trace_array
*tr
)
523 tracing_reset_online_cpus(&tr
->trace_buffer
);
525 local_irq_save(flags
);
526 arch_spin_lock(&wakeup_lock
);
528 arch_spin_unlock(&wakeup_lock
);
529 local_irq_restore(flags
);
533 probe_wakeup(void *ignore
, struct task_struct
*p
)
535 struct trace_array_cpu
*data
;
536 int cpu
= smp_processor_id();
541 if (likely(!tracer_enabled
))
544 tracing_record_cmdline(p
);
545 tracing_record_cmdline(current
);
548 * Semantic is like this:
549 * - wakeup tracer handles all tasks in the system, independently
550 * from their scheduling class;
551 * - wakeup_rt tracer handles tasks belonging to sched_dl and
553 * - wakeup_dl handles tasks belonging to sched_dl class only.
555 if (tracing_dl
|| (wakeup_dl
&& !dl_task(p
)) ||
556 (wakeup_rt
&& !dl_task(p
) && !rt_task(p
)) ||
557 (!dl_task(p
) && (p
->prio
>= wakeup_prio
|| p
->prio
>= current
->prio
)))
560 pc
= preempt_count();
561 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
562 if (unlikely(disabled
!= 1))
565 /* interrupts should be off from try_to_wake_up */
566 arch_spin_lock(&wakeup_lock
);
568 /* check for races. */
569 if (!tracer_enabled
|| tracing_dl
||
570 (!dl_task(p
) && p
->prio
>= wakeup_prio
))
573 /* reset the trace */
574 __wakeup_reset(wakeup_trace
);
576 wakeup_cpu
= task_cpu(p
);
577 wakeup_current_cpu
= wakeup_cpu
;
578 wakeup_prio
= p
->prio
;
581 * Once you start tracing a -deadline task, don't bother tracing
582 * another task until the first one wakes up.
590 get_task_struct(wakeup_task
);
592 local_save_flags(flags
);
594 data
= per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, wakeup_cpu
);
595 data
->preempt_timestamp
= ftrace_now(cpu
);
596 tracing_sched_wakeup_trace(wakeup_trace
, p
, current
, flags
, pc
);
599 * We must be careful in using CALLER_ADDR2. But since wake_up
600 * is not called by an assembly function (where as schedule is)
601 * it should be safe to use it here.
603 __trace_function(wakeup_trace
, CALLER_ADDR1
, CALLER_ADDR2
, flags
, pc
);
606 arch_spin_unlock(&wakeup_lock
);
608 atomic_dec(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
611 static void start_wakeup_tracer(struct trace_array
*tr
)
615 ret
= register_trace_sched_wakeup(probe_wakeup
, NULL
);
617 pr_info("wakeup trace: Couldn't activate tracepoint"
618 " probe to kernel_sched_wakeup\n");
622 ret
= register_trace_sched_wakeup_new(probe_wakeup
, NULL
);
624 pr_info("wakeup trace: Couldn't activate tracepoint"
625 " probe to kernel_sched_wakeup_new\n");
629 ret
= register_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
631 pr_info("sched trace: Couldn't activate tracepoint"
632 " probe to kernel_sched_switch\n");
633 goto fail_deprobe_wake_new
;
636 ret
= register_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
638 pr_info("wakeup trace: Couldn't activate tracepoint"
639 " probe to kernel_sched_migrate_task\n");
646 * Don't let the tracer_enabled = 1 show up before
647 * the wakeup_task is reset. This may be overkill since
648 * wakeup_reset does a spin_unlock after setting the
649 * wakeup_task to NULL, but I want to be safe.
650 * This is a slow path anyway.
654 if (start_func_tracer(tr
, is_graph(tr
)))
655 printk(KERN_ERR
"failed to start wakeup tracer\n");
658 fail_deprobe_wake_new
:
659 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
661 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
664 static void stop_wakeup_tracer(struct trace_array
*tr
)
667 stop_func_tracer(tr
, is_graph(tr
));
668 unregister_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
669 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
670 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
671 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
674 static bool wakeup_busy
;
676 static int __wakeup_tracer_init(struct trace_array
*tr
)
678 save_flags
= tr
->trace_flags
;
680 /* non overwrite screws up the latency tracers */
681 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, 1);
682 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, 1);
686 ftrace_init_array_ops(tr
, wakeup_tracer_call
);
687 start_wakeup_tracer(tr
);
693 static int wakeup_tracer_init(struct trace_array
*tr
)
700 return __wakeup_tracer_init(tr
);
703 static int wakeup_rt_tracer_init(struct trace_array
*tr
)
710 return __wakeup_tracer_init(tr
);
713 static int wakeup_dl_tracer_init(struct trace_array
*tr
)
720 return __wakeup_tracer_init(tr
);
723 static void wakeup_tracer_reset(struct trace_array
*tr
)
725 int lat_flag
= save_flags
& TRACE_ITER_LATENCY_FMT
;
726 int overwrite_flag
= save_flags
& TRACE_ITER_OVERWRITE
;
728 stop_wakeup_tracer(tr
);
729 /* make sure we put back any tasks we are tracing */
732 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, lat_flag
);
733 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, overwrite_flag
);
734 ftrace_reset_array_ops(tr
);
738 static void wakeup_tracer_start(struct trace_array
*tr
)
744 static void wakeup_tracer_stop(struct trace_array
*tr
)
749 static struct tracer wakeup_tracer __read_mostly
=
752 .init
= wakeup_tracer_init
,
753 .reset
= wakeup_tracer_reset
,
754 .start
= wakeup_tracer_start
,
755 .stop
= wakeup_tracer_stop
,
757 .print_header
= wakeup_print_header
,
758 .print_line
= wakeup_print_line
,
759 .flag_changed
= wakeup_flag_changed
,
760 #ifdef CONFIG_FTRACE_SELFTEST
761 .selftest
= trace_selftest_startup_wakeup
,
763 .open
= wakeup_trace_open
,
764 .close
= wakeup_trace_close
,
765 .allow_instances
= true,
769 static struct tracer wakeup_rt_tracer __read_mostly
=
772 .init
= wakeup_rt_tracer_init
,
773 .reset
= wakeup_tracer_reset
,
774 .start
= wakeup_tracer_start
,
775 .stop
= wakeup_tracer_stop
,
777 .print_header
= wakeup_print_header
,
778 .print_line
= wakeup_print_line
,
779 .flag_changed
= wakeup_flag_changed
,
780 #ifdef CONFIG_FTRACE_SELFTEST
781 .selftest
= trace_selftest_startup_wakeup
,
783 .open
= wakeup_trace_open
,
784 .close
= wakeup_trace_close
,
785 .allow_instances
= true,
789 static struct tracer wakeup_dl_tracer __read_mostly
=
792 .init
= wakeup_dl_tracer_init
,
793 .reset
= wakeup_tracer_reset
,
794 .start
= wakeup_tracer_start
,
795 .stop
= wakeup_tracer_stop
,
797 .print_header
= wakeup_print_header
,
798 .print_line
= wakeup_print_line
,
799 .flag_changed
= wakeup_flag_changed
,
800 #ifdef CONFIG_FTRACE_SELFTEST
801 .selftest
= trace_selftest_startup_wakeup
,
803 .open
= wakeup_trace_open
,
804 .close
= wakeup_trace_close
,
805 .allow_instances
= true,
809 __init
static int init_wakeup_tracer(void)
813 ret
= register_tracer(&wakeup_tracer
);
817 ret
= register_tracer(&wakeup_rt_tracer
);
821 ret
= register_tracer(&wakeup_dl_tracer
);
827 core_initcall(init_wakeup_tracer
);