1 // SPDX-License-Identifier: GPL-2.0
3 * trace task wakeup timings
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/module.h>
14 #include <linux/kallsyms.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/deadline.h>
19 #include <trace/events/sched.h>
22 static struct trace_array
*wakeup_trace
;
23 static int __read_mostly tracer_enabled
;
25 static struct task_struct
*wakeup_task
;
26 static int wakeup_cpu
;
27 static int wakeup_current_cpu
;
28 static unsigned wakeup_prio
= -1;
31 static int tracing_dl
= 0;
33 static arch_spinlock_t wakeup_lock
=
34 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
36 static void wakeup_reset(struct trace_array
*tr
);
37 static void __wakeup_reset(struct trace_array
*tr
);
38 static int start_func_tracer(struct trace_array
*tr
, int graph
);
39 static void stop_func_tracer(struct trace_array
*tr
, int graph
);
41 static int save_flags
;
43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
44 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
46 # define is_graph(tr) false
49 #ifdef CONFIG_FUNCTION_TRACER
51 static bool function_enabled
;
54 * Prologue for the wakeup function tracers.
56 * Returns 1 if it is OK to continue, and preemption
57 * is disabled and data->disabled is incremented.
58 * 0 if the trace is to be ignored, and preemption
59 * is not disabled and data->disabled is
62 * Note, this function is also used outside this ifdef but
63 * inside the #ifdef of the function graph tracer below.
64 * This is OK, since the function graph tracer is
65 * dependent on the function tracer.
68 func_prolog_preempt_disable(struct trace_array
*tr
,
69 struct trace_array_cpu
**data
,
75 if (likely(!wakeup_task
))
78 *pc
= preempt_count();
79 preempt_disable_notrace();
81 cpu
= raw_smp_processor_id();
82 if (cpu
!= wakeup_current_cpu
)
85 *data
= per_cpu_ptr(tr
->array_buffer
.data
, cpu
);
86 disabled
= atomic_inc_return(&(*data
)->disabled
);
87 if (unlikely(disabled
!= 1))
93 atomic_dec(&(*data
)->disabled
);
96 preempt_enable_notrace();
100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
102 static int wakeup_display_graph(struct trace_array
*tr
, int set
)
104 if (!(is_graph(tr
) ^ set
))
107 stop_func_tracer(tr
, !set
);
109 wakeup_reset(wakeup_trace
);
112 return start_func_tracer(tr
, set
);
115 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
)
117 struct trace_array
*tr
= wakeup_trace
;
118 struct trace_array_cpu
*data
;
122 if (ftrace_graph_ignore_func(trace
))
125 * Do not trace a function if it's filtered by set_graph_notrace.
126 * Make the index of ret stack negative to indicate that it should
127 * ignore further functions. But it needs its own ret stack entry
128 * to recover the original index in order to continue tracing after
129 * returning from the function.
131 if (ftrace_graph_notrace_addr(trace
->func
))
134 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
137 local_save_flags(flags
);
138 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
139 atomic_dec(&data
->disabled
);
140 preempt_enable_notrace();
145 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
)
147 struct trace_array
*tr
= wakeup_trace
;
148 struct trace_array_cpu
*data
;
152 ftrace_graph_addr_finish(trace
);
154 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
157 local_save_flags(flags
);
158 __trace_graph_return(tr
, trace
, flags
, pc
);
159 atomic_dec(&data
->disabled
);
161 preempt_enable_notrace();
165 static struct fgraph_ops fgraph_wakeup_ops
= {
166 .entryfunc
= &wakeup_graph_entry
,
167 .retfunc
= &wakeup_graph_return
,
170 static void wakeup_trace_open(struct trace_iterator
*iter
)
172 if (is_graph(iter
->tr
))
173 graph_trace_open(iter
);
176 static void wakeup_trace_close(struct trace_iterator
*iter
)
179 graph_trace_close(iter
);
182 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
183 TRACE_GRAPH_PRINT_CPU | \
184 TRACE_GRAPH_PRINT_REL_TIME | \
185 TRACE_GRAPH_PRINT_DURATION | \
186 TRACE_GRAPH_PRINT_OVERHEAD | \
187 TRACE_GRAPH_PRINT_IRQS)
189 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
192 * In graph mode call the graph tracer output function,
193 * otherwise go with the TRACE_FN event handler
195 if (is_graph(iter
->tr
))
196 return print_graph_function_flags(iter
, GRAPH_TRACER_FLAGS
);
198 return TRACE_TYPE_UNHANDLED
;
201 static void wakeup_print_header(struct seq_file
*s
)
203 if (is_graph(wakeup_trace
))
204 print_graph_headers_flags(s
, GRAPH_TRACER_FLAGS
);
206 trace_default_header(s
);
208 #endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
211 * wakeup uses its own tracer function to keep the overhead down:
214 wakeup_tracer_call(unsigned long ip
, unsigned long parent_ip
,
215 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
217 struct trace_array
*tr
= wakeup_trace
;
218 struct trace_array_cpu
*data
;
222 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
225 local_irq_save(flags
);
226 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
227 local_irq_restore(flags
);
229 atomic_dec(&data
->disabled
);
230 preempt_enable_notrace();
233 static int register_wakeup_function(struct trace_array
*tr
, int graph
, int set
)
237 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
238 if (function_enabled
|| (!set
&& !(tr
->trace_flags
& TRACE_ITER_FUNCTION
)))
242 ret
= register_ftrace_graph(&fgraph_wakeup_ops
);
244 ret
= register_ftrace_function(tr
->ops
);
247 function_enabled
= true;
252 static void unregister_wakeup_function(struct trace_array
*tr
, int graph
)
254 if (!function_enabled
)
258 unregister_ftrace_graph(&fgraph_wakeup_ops
);
260 unregister_ftrace_function(tr
->ops
);
262 function_enabled
= false;
265 static int wakeup_function_set(struct trace_array
*tr
, u32 mask
, int set
)
267 if (!(mask
& TRACE_ITER_FUNCTION
))
271 register_wakeup_function(tr
, is_graph(tr
), 1);
273 unregister_wakeup_function(tr
, is_graph(tr
));
276 #else /* CONFIG_FUNCTION_TRACER */
277 static int register_wakeup_function(struct trace_array
*tr
, int graph
, int set
)
281 static void unregister_wakeup_function(struct trace_array
*tr
, int graph
) { }
282 static int wakeup_function_set(struct trace_array
*tr
, u32 mask
, int set
)
286 #endif /* else CONFIG_FUNCTION_TRACER */
288 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
289 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
291 return TRACE_TYPE_UNHANDLED
;
294 static void wakeup_trace_open(struct trace_iterator
*iter
) { }
295 static void wakeup_trace_close(struct trace_iterator
*iter
) { }
297 static void wakeup_print_header(struct seq_file
*s
)
299 trace_default_header(s
);
301 #endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
304 __trace_function(struct trace_array
*tr
,
305 unsigned long ip
, unsigned long parent_ip
,
306 unsigned long flags
, int pc
)
309 trace_graph_function(tr
, ip
, parent_ip
, flags
, pc
);
311 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
314 static int wakeup_flag_changed(struct trace_array
*tr
, u32 mask
, int set
)
316 struct tracer
*tracer
= tr
->current_trace
;
318 if (wakeup_function_set(tr
, mask
, set
))
321 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
322 if (mask
& TRACE_ITER_DISPLAY_GRAPH
)
323 return wakeup_display_graph(tr
, set
);
326 return trace_keep_overwrite(tracer
, mask
, set
);
329 static int start_func_tracer(struct trace_array
*tr
, int graph
)
333 ret
= register_wakeup_function(tr
, graph
, 0);
335 if (!ret
&& tracing_is_enabled())
343 static void stop_func_tracer(struct trace_array
*tr
, int graph
)
347 unregister_wakeup_function(tr
, graph
);
351 * Should this new latency be reported/recorded?
353 static bool report_latency(struct trace_array
*tr
, u64 delta
)
355 if (tracing_thresh
) {
356 if (delta
< tracing_thresh
)
359 if (delta
<= tr
->max_latency
)
366 probe_wakeup_migrate_task(void *ignore
, struct task_struct
*task
, int cpu
)
368 if (task
!= wakeup_task
)
371 wakeup_current_cpu
= cpu
;
375 tracing_sched_switch_trace(struct trace_array
*tr
,
376 struct task_struct
*prev
,
377 struct task_struct
*next
,
378 unsigned long flags
, int pc
)
380 struct trace_event_call
*call
= &event_context_switch
;
381 struct trace_buffer
*buffer
= tr
->array_buffer
.buffer
;
382 struct ring_buffer_event
*event
;
383 struct ctx_switch_entry
*entry
;
385 event
= trace_buffer_lock_reserve(buffer
, TRACE_CTX
,
386 sizeof(*entry
), flags
, pc
);
389 entry
= ring_buffer_event_data(event
);
390 entry
->prev_pid
= prev
->pid
;
391 entry
->prev_prio
= prev
->prio
;
392 entry
->prev_state
= task_state_index(prev
);
393 entry
->next_pid
= next
->pid
;
394 entry
->next_prio
= next
->prio
;
395 entry
->next_state
= task_state_index(next
);
396 entry
->next_cpu
= task_cpu(next
);
398 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
399 trace_buffer_unlock_commit(tr
, buffer
, event
, flags
, pc
);
403 tracing_sched_wakeup_trace(struct trace_array
*tr
,
404 struct task_struct
*wakee
,
405 struct task_struct
*curr
,
406 unsigned long flags
, int pc
)
408 struct trace_event_call
*call
= &event_wakeup
;
409 struct ring_buffer_event
*event
;
410 struct ctx_switch_entry
*entry
;
411 struct trace_buffer
*buffer
= tr
->array_buffer
.buffer
;
413 event
= trace_buffer_lock_reserve(buffer
, TRACE_WAKE
,
414 sizeof(*entry
), flags
, pc
);
417 entry
= ring_buffer_event_data(event
);
418 entry
->prev_pid
= curr
->pid
;
419 entry
->prev_prio
= curr
->prio
;
420 entry
->prev_state
= task_state_index(curr
);
421 entry
->next_pid
= wakee
->pid
;
422 entry
->next_prio
= wakee
->prio
;
423 entry
->next_state
= task_state_index(wakee
);
424 entry
->next_cpu
= task_cpu(wakee
);
426 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
427 trace_buffer_unlock_commit(tr
, buffer
, event
, flags
, pc
);
431 probe_wakeup_sched_switch(void *ignore
, bool preempt
,
432 struct task_struct
*prev
, struct task_struct
*next
)
434 struct trace_array_cpu
*data
;
441 tracing_record_cmdline(prev
);
443 if (unlikely(!tracer_enabled
))
447 * When we start a new trace, we set wakeup_task to NULL
448 * and then set tracer_enabled = 1. We want to make sure
449 * that another CPU does not see the tracer_enabled = 1
450 * and the wakeup_task with an older task, that might
451 * actually be the same as next.
455 if (next
!= wakeup_task
)
458 pc
= preempt_count();
460 /* disable local data, not wakeup_cpu data */
461 cpu
= raw_smp_processor_id();
462 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->array_buffer
.data
, cpu
)->disabled
);
463 if (likely(disabled
!= 1))
466 local_irq_save(flags
);
467 arch_spin_lock(&wakeup_lock
);
469 /* We could race with grabbing wakeup_lock */
470 if (unlikely(!tracer_enabled
|| next
!= wakeup_task
))
473 /* The task we are waiting for is waking up */
474 data
= per_cpu_ptr(wakeup_trace
->array_buffer
.data
, wakeup_cpu
);
476 __trace_function(wakeup_trace
, CALLER_ADDR0
, CALLER_ADDR1
, flags
, pc
);
477 tracing_sched_switch_trace(wakeup_trace
, prev
, next
, flags
, pc
);
478 __trace_stack(wakeup_trace
, flags
, 0, pc
);
480 T0
= data
->preempt_timestamp
;
481 T1
= ftrace_now(cpu
);
484 if (!report_latency(wakeup_trace
, delta
))
487 if (likely(!is_tracing_stopped())) {
488 wakeup_trace
->max_latency
= delta
;
489 update_max_tr(wakeup_trace
, wakeup_task
, wakeup_cpu
, NULL
);
493 __wakeup_reset(wakeup_trace
);
494 arch_spin_unlock(&wakeup_lock
);
495 local_irq_restore(flags
);
497 atomic_dec(&per_cpu_ptr(wakeup_trace
->array_buffer
.data
, cpu
)->disabled
);
500 static void __wakeup_reset(struct trace_array
*tr
)
507 put_task_struct(wakeup_task
);
512 static void wakeup_reset(struct trace_array
*tr
)
516 tracing_reset_online_cpus(&tr
->array_buffer
);
518 local_irq_save(flags
);
519 arch_spin_lock(&wakeup_lock
);
521 arch_spin_unlock(&wakeup_lock
);
522 local_irq_restore(flags
);
526 probe_wakeup(void *ignore
, struct task_struct
*p
)
528 struct trace_array_cpu
*data
;
529 int cpu
= smp_processor_id();
534 if (likely(!tracer_enabled
))
537 tracing_record_cmdline(p
);
538 tracing_record_cmdline(current
);
541 * Semantic is like this:
542 * - wakeup tracer handles all tasks in the system, independently
543 * from their scheduling class;
544 * - wakeup_rt tracer handles tasks belonging to sched_dl and
546 * - wakeup_dl handles tasks belonging to sched_dl class only.
548 if (tracing_dl
|| (wakeup_dl
&& !dl_task(p
)) ||
549 (wakeup_rt
&& !dl_task(p
) && !rt_task(p
)) ||
550 (!dl_task(p
) && (p
->prio
>= wakeup_prio
|| p
->prio
>= current
->prio
)))
553 pc
= preempt_count();
554 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->array_buffer
.data
, cpu
)->disabled
);
555 if (unlikely(disabled
!= 1))
558 /* interrupts should be off from try_to_wake_up */
559 arch_spin_lock(&wakeup_lock
);
561 /* check for races. */
562 if (!tracer_enabled
|| tracing_dl
||
563 (!dl_task(p
) && p
->prio
>= wakeup_prio
))
566 /* reset the trace */
567 __wakeup_reset(wakeup_trace
);
569 wakeup_cpu
= task_cpu(p
);
570 wakeup_current_cpu
= wakeup_cpu
;
571 wakeup_prio
= p
->prio
;
574 * Once you start tracing a -deadline task, don't bother tracing
575 * another task until the first one wakes up.
582 wakeup_task
= get_task_struct(p
);
584 local_save_flags(flags
);
586 data
= per_cpu_ptr(wakeup_trace
->array_buffer
.data
, wakeup_cpu
);
587 data
->preempt_timestamp
= ftrace_now(cpu
);
588 tracing_sched_wakeup_trace(wakeup_trace
, p
, current
, flags
, pc
);
589 __trace_stack(wakeup_trace
, flags
, 0, pc
);
592 * We must be careful in using CALLER_ADDR2. But since wake_up
593 * is not called by an assembly function (where as schedule is)
594 * it should be safe to use it here.
596 __trace_function(wakeup_trace
, CALLER_ADDR1
, CALLER_ADDR2
, flags
, pc
);
599 arch_spin_unlock(&wakeup_lock
);
601 atomic_dec(&per_cpu_ptr(wakeup_trace
->array_buffer
.data
, cpu
)->disabled
);
604 static void start_wakeup_tracer(struct trace_array
*tr
)
608 ret
= register_trace_sched_wakeup(probe_wakeup
, NULL
);
610 pr_info("wakeup trace: Couldn't activate tracepoint"
611 " probe to kernel_sched_wakeup\n");
615 ret
= register_trace_sched_wakeup_new(probe_wakeup
, NULL
);
617 pr_info("wakeup trace: Couldn't activate tracepoint"
618 " probe to kernel_sched_wakeup_new\n");
622 ret
= register_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
624 pr_info("sched trace: Couldn't activate tracepoint"
625 " probe to kernel_sched_switch\n");
626 goto fail_deprobe_wake_new
;
629 ret
= register_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
631 pr_info("wakeup trace: Couldn't activate tracepoint"
632 " probe to kernel_sched_migrate_task\n");
633 goto fail_deprobe_sched_switch
;
639 * Don't let the tracer_enabled = 1 show up before
640 * the wakeup_task is reset. This may be overkill since
641 * wakeup_reset does a spin_unlock after setting the
642 * wakeup_task to NULL, but I want to be safe.
643 * This is a slow path anyway.
647 if (start_func_tracer(tr
, is_graph(tr
)))
648 printk(KERN_ERR
"failed to start wakeup tracer\n");
651 fail_deprobe_sched_switch
:
652 unregister_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
653 fail_deprobe_wake_new
:
654 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
656 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
659 static void stop_wakeup_tracer(struct trace_array
*tr
)
662 stop_func_tracer(tr
, is_graph(tr
));
663 unregister_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
664 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
665 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
666 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
669 static bool wakeup_busy
;
671 static int __wakeup_tracer_init(struct trace_array
*tr
)
673 save_flags
= tr
->trace_flags
;
675 /* non overwrite screws up the latency tracers */
676 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, 1);
677 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, 1);
681 ftrace_init_array_ops(tr
, wakeup_tracer_call
);
682 start_wakeup_tracer(tr
);
688 static int wakeup_tracer_init(struct trace_array
*tr
)
695 return __wakeup_tracer_init(tr
);
698 static int wakeup_rt_tracer_init(struct trace_array
*tr
)
705 return __wakeup_tracer_init(tr
);
708 static int wakeup_dl_tracer_init(struct trace_array
*tr
)
715 return __wakeup_tracer_init(tr
);
718 static void wakeup_tracer_reset(struct trace_array
*tr
)
720 int lat_flag
= save_flags
& TRACE_ITER_LATENCY_FMT
;
721 int overwrite_flag
= save_flags
& TRACE_ITER_OVERWRITE
;
723 stop_wakeup_tracer(tr
);
724 /* make sure we put back any tasks we are tracing */
727 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, lat_flag
);
728 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, overwrite_flag
);
729 ftrace_reset_array_ops(tr
);
733 static void wakeup_tracer_start(struct trace_array
*tr
)
739 static void wakeup_tracer_stop(struct trace_array
*tr
)
744 static struct tracer wakeup_tracer __read_mostly
=
747 .init
= wakeup_tracer_init
,
748 .reset
= wakeup_tracer_reset
,
749 .start
= wakeup_tracer_start
,
750 .stop
= wakeup_tracer_stop
,
752 .print_header
= wakeup_print_header
,
753 .print_line
= wakeup_print_line
,
754 .flag_changed
= wakeup_flag_changed
,
755 #ifdef CONFIG_FTRACE_SELFTEST
756 .selftest
= trace_selftest_startup_wakeup
,
758 .open
= wakeup_trace_open
,
759 .close
= wakeup_trace_close
,
760 .allow_instances
= true,
764 static struct tracer wakeup_rt_tracer __read_mostly
=
767 .init
= wakeup_rt_tracer_init
,
768 .reset
= wakeup_tracer_reset
,
769 .start
= wakeup_tracer_start
,
770 .stop
= wakeup_tracer_stop
,
772 .print_header
= wakeup_print_header
,
773 .print_line
= wakeup_print_line
,
774 .flag_changed
= wakeup_flag_changed
,
775 #ifdef CONFIG_FTRACE_SELFTEST
776 .selftest
= trace_selftest_startup_wakeup
,
778 .open
= wakeup_trace_open
,
779 .close
= wakeup_trace_close
,
780 .allow_instances
= true,
784 static struct tracer wakeup_dl_tracer __read_mostly
=
787 .init
= wakeup_dl_tracer_init
,
788 .reset
= wakeup_tracer_reset
,
789 .start
= wakeup_tracer_start
,
790 .stop
= wakeup_tracer_stop
,
792 .print_header
= wakeup_print_header
,
793 .print_line
= wakeup_print_line
,
794 .flag_changed
= wakeup_flag_changed
,
795 #ifdef CONFIG_FTRACE_SELFTEST
796 .selftest
= trace_selftest_startup_wakeup
,
798 .open
= wakeup_trace_open
,
799 .close
= wakeup_trace_close
,
800 .allow_instances
= true,
804 __init
static int init_wakeup_tracer(void)
808 ret
= register_tracer(&wakeup_tracer
);
812 ret
= register_tracer(&wakeup_rt_tracer
);
816 ret
= register_tracer(&wakeup_dl_tracer
);
822 core_initcall(init_wakeup_tracer
);