4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/events/sched.h>
17 static struct trace_array
*ctx_trace
;
18 static int __read_mostly tracer_enabled
;
20 static DEFINE_MUTEX(sched_register_mutex
);
21 static int sched_stopped
;
25 tracing_sched_switch_trace(struct trace_array
*tr
,
26 struct task_struct
*prev
,
27 struct task_struct
*next
,
28 unsigned long flags
, int pc
)
30 struct ftrace_event_call
*call
= &event_context_switch
;
31 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
32 struct ring_buffer_event
*event
;
33 struct ctx_switch_entry
*entry
;
35 event
= trace_buffer_lock_reserve(buffer
, TRACE_CTX
,
36 sizeof(*entry
), flags
, pc
);
39 entry
= ring_buffer_event_data(event
);
40 entry
->prev_pid
= prev
->pid
;
41 entry
->prev_prio
= prev
->prio
;
42 entry
->prev_state
= prev
->state
;
43 entry
->next_pid
= next
->pid
;
44 entry
->next_prio
= next
->prio
;
45 entry
->next_state
= next
->state
;
46 entry
->next_cpu
= task_cpu(next
);
48 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
49 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
53 probe_sched_switch(void *ignore
, struct task_struct
*prev
, struct task_struct
*next
)
55 struct trace_array_cpu
*data
;
60 if (unlikely(!sched_ref
))
63 tracing_record_cmdline(prev
);
64 tracing_record_cmdline(next
);
66 if (!tracer_enabled
|| sched_stopped
)
70 local_irq_save(flags
);
71 cpu
= raw_smp_processor_id();
72 data
= per_cpu_ptr(ctx_trace
->trace_buffer
.data
, cpu
);
74 if (likely(!atomic_read(&data
->disabled
)))
75 tracing_sched_switch_trace(ctx_trace
, prev
, next
, flags
, pc
);
77 local_irq_restore(flags
);
81 tracing_sched_wakeup_trace(struct trace_array
*tr
,
82 struct task_struct
*wakee
,
83 struct task_struct
*curr
,
84 unsigned long flags
, int pc
)
86 struct ftrace_event_call
*call
= &event_wakeup
;
87 struct ring_buffer_event
*event
;
88 struct ctx_switch_entry
*entry
;
89 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
91 event
= trace_buffer_lock_reserve(buffer
, TRACE_WAKE
,
92 sizeof(*entry
), flags
, pc
);
95 entry
= ring_buffer_event_data(event
);
96 entry
->prev_pid
= curr
->pid
;
97 entry
->prev_prio
= curr
->prio
;
98 entry
->prev_state
= curr
->state
;
99 entry
->next_pid
= wakee
->pid
;
100 entry
->next_prio
= wakee
->prio
;
101 entry
->next_state
= wakee
->state
;
102 entry
->next_cpu
= task_cpu(wakee
);
104 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
105 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
109 probe_sched_wakeup(void *ignore
, struct task_struct
*wakee
, int success
)
111 struct trace_array_cpu
*data
;
115 if (unlikely(!sched_ref
))
118 tracing_record_cmdline(current
);
120 if (!tracer_enabled
|| sched_stopped
)
123 pc
= preempt_count();
124 local_irq_save(flags
);
125 cpu
= raw_smp_processor_id();
126 data
= per_cpu_ptr(ctx_trace
->trace_buffer
.data
, cpu
);
128 if (likely(!atomic_read(&data
->disabled
)))
129 tracing_sched_wakeup_trace(ctx_trace
, wakee
, current
,
132 local_irq_restore(flags
);
135 static int tracing_sched_register(void)
139 ret
= register_trace_sched_wakeup(probe_sched_wakeup
, NULL
);
141 pr_info("wakeup trace: Couldn't activate tracepoint"
142 " probe to kernel_sched_wakeup\n");
146 ret
= register_trace_sched_wakeup_new(probe_sched_wakeup
, NULL
);
148 pr_info("wakeup trace: Couldn't activate tracepoint"
149 " probe to kernel_sched_wakeup_new\n");
153 ret
= register_trace_sched_switch(probe_sched_switch
, NULL
);
155 pr_info("sched trace: Couldn't activate tracepoint"
156 " probe to kernel_sched_switch\n");
157 goto fail_deprobe_wake_new
;
161 fail_deprobe_wake_new
:
162 unregister_trace_sched_wakeup_new(probe_sched_wakeup
, NULL
);
164 unregister_trace_sched_wakeup(probe_sched_wakeup
, NULL
);
168 static void tracing_sched_unregister(void)
170 unregister_trace_sched_switch(probe_sched_switch
, NULL
);
171 unregister_trace_sched_wakeup_new(probe_sched_wakeup
, NULL
);
172 unregister_trace_sched_wakeup(probe_sched_wakeup
, NULL
);
175 static void tracing_start_sched_switch(void)
177 mutex_lock(&sched_register_mutex
);
179 tracing_sched_register();
180 mutex_unlock(&sched_register_mutex
);
183 static void tracing_stop_sched_switch(void)
185 mutex_lock(&sched_register_mutex
);
187 tracing_sched_unregister();
188 mutex_unlock(&sched_register_mutex
);
191 void tracing_start_cmdline_record(void)
193 tracing_start_sched_switch();
196 void tracing_stop_cmdline_record(void)
198 tracing_stop_sched_switch();
202 * tracing_start_sched_switch_record - start tracing context switches
204 * Turns on context switch tracing for a tracer.
206 void tracing_start_sched_switch_record(void)
208 if (unlikely(!ctx_trace
)) {
213 tracing_start_sched_switch();
215 mutex_lock(&sched_register_mutex
);
217 mutex_unlock(&sched_register_mutex
);
221 * tracing_stop_sched_switch_record - start tracing context switches
223 * Turns off context switch tracing for a tracer.
225 void tracing_stop_sched_switch_record(void)
227 mutex_lock(&sched_register_mutex
);
229 WARN_ON(tracer_enabled
< 0);
230 mutex_unlock(&sched_register_mutex
);
232 tracing_stop_sched_switch();
236 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
237 * @tr: trace array pointer to assign
239 * Some tracers might want to record the context switches in their
240 * trace. This function lets those tracers assign the trace array
243 void tracing_sched_switch_assign_trace(struct trace_array
*tr
)