2 #define TRACE_SYSTEM sched
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
13 TRACE_EVENT(sched_kthread_stop
,
15 TP_PROTO(struct task_struct
*t
),
20 __array( char, comm
, TASK_COMM_LEN
)
25 memcpy(__entry
->comm
, t
->comm
, TASK_COMM_LEN
);
26 __entry
->pid
= t
->pid
;
29 TP_printk("comm=%s pid=%d", __entry
->comm
, __entry
->pid
)
33 * Tracepoint for the return value of the kthread stopping:
35 TRACE_EVENT(sched_kthread_stop_ret
,
49 TP_printk("ret=%d", __entry
->ret
)
53 * Tracepoint for waiting on task to unschedule:
55 TRACE_EVENT(sched_wait_task
,
57 TP_PROTO(struct task_struct
*p
),
62 __array( char, comm
, TASK_COMM_LEN
)
68 memcpy(__entry
->comm
, p
->comm
, TASK_COMM_LEN
);
69 __entry
->pid
= p
->pid
;
70 __entry
->prio
= p
->prio
;
73 TP_printk("comm=%s pid=%d prio=%d",
74 __entry
->comm
, __entry
->pid
, __entry
->prio
)
78 * Tracepoint for waking up a task:
80 DECLARE_EVENT_CLASS(sched_wakeup_template
,
82 TP_PROTO(struct task_struct
*p
, int success
),
87 __array( char, comm
, TASK_COMM_LEN
)
90 __field( int, success
)
91 __field( int, target_cpu
)
95 memcpy(__entry
->comm
, p
->comm
, TASK_COMM_LEN
);
96 __entry
->pid
= p
->pid
;
97 __entry
->prio
= p
->prio
;
98 __entry
->success
= success
;
99 __entry
->target_cpu
= task_cpu(p
);
102 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
103 __entry
->comm
, __entry
->pid
, __entry
->prio
,
104 __entry
->success
, __entry
->target_cpu
)
107 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup
,
108 TP_PROTO(struct task_struct
*p
, int success
),
109 TP_ARGS(p
, success
));
112 * Tracepoint for waking up a new task:
114 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup_new
,
115 TP_PROTO(struct task_struct
*p
, int success
),
116 TP_ARGS(p
, success
));
118 #ifdef CREATE_TRACE_POINTS
119 static inline long __trace_sched_switch_state(struct task_struct
*p
)
121 long state
= p
->state
;
123 #ifdef CONFIG_PREEMPT
125 * For all intents and purposes a preempted task is a running task.
127 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
128 state
= TASK_RUNNING
;
136 * Tracepoint for task switches, performed by the scheduler:
138 TRACE_EVENT(sched_switch
,
140 TP_PROTO(struct task_struct
*prev
,
141 struct task_struct
*next
),
146 __array( char, prev_comm
, TASK_COMM_LEN
)
147 __field( pid_t
, prev_pid
)
148 __field( int, prev_prio
)
149 __field( long, prev_state
)
150 __array( char, next_comm
, TASK_COMM_LEN
)
151 __field( pid_t
, next_pid
)
152 __field( int, next_prio
)
156 memcpy(__entry
->next_comm
, next
->comm
, TASK_COMM_LEN
);
157 __entry
->prev_pid
= prev
->pid
;
158 __entry
->prev_prio
= prev
->prio
;
159 __entry
->prev_state
= __trace_sched_switch_state(prev
);
160 memcpy(__entry
->prev_comm
, prev
->comm
, TASK_COMM_LEN
);
161 __entry
->next_pid
= next
->pid
;
162 __entry
->next_prio
= next
->prio
;
165 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
166 __entry
->prev_comm
, __entry
->prev_pid
, __entry
->prev_prio
,
167 __entry
->prev_state
?
168 __print_flags(__entry
->prev_state
, "|",
169 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
170 { 16, "Z" }, { 32, "X" }, { 64, "x" },
172 __entry
->next_comm
, __entry
->next_pid
, __entry
->next_prio
)
176 * Tracepoint for a task being migrated:
178 TRACE_EVENT(sched_migrate_task
,
180 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
182 TP_ARGS(p
, dest_cpu
),
185 __array( char, comm
, TASK_COMM_LEN
)
186 __field( pid_t
, pid
)
188 __field( int, orig_cpu
)
189 __field( int, dest_cpu
)
193 memcpy(__entry
->comm
, p
->comm
, TASK_COMM_LEN
);
194 __entry
->pid
= p
->pid
;
195 __entry
->prio
= p
->prio
;
196 __entry
->orig_cpu
= task_cpu(p
);
197 __entry
->dest_cpu
= dest_cpu
;
200 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
201 __entry
->comm
, __entry
->pid
, __entry
->prio
,
202 __entry
->orig_cpu
, __entry
->dest_cpu
)
205 DECLARE_EVENT_CLASS(sched_process_template
,
207 TP_PROTO(struct task_struct
*p
),
212 __array( char, comm
, TASK_COMM_LEN
)
213 __field( pid_t
, pid
)
218 memcpy(__entry
->comm
, p
->comm
, TASK_COMM_LEN
);
219 __entry
->pid
= p
->pid
;
220 __entry
->prio
= p
->prio
;
223 TP_printk("comm=%s pid=%d prio=%d",
224 __entry
->comm
, __entry
->pid
, __entry
->prio
)
228 * Tracepoint for freeing a task:
230 DEFINE_EVENT(sched_process_template
, sched_process_free
,
231 TP_PROTO(struct task_struct
*p
),
236 * Tracepoint for a task exiting:
238 DEFINE_EVENT(sched_process_template
, sched_process_exit
,
239 TP_PROTO(struct task_struct
*p
),
243 * Tracepoint for a waiting task:
245 TRACE_EVENT(sched_process_wait
,
247 TP_PROTO(struct pid
*pid
),
252 __array( char, comm
, TASK_COMM_LEN
)
253 __field( pid_t
, pid
)
258 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
259 __entry
->pid
= pid_nr(pid
);
260 __entry
->prio
= current
->prio
;
263 TP_printk("comm=%s pid=%d prio=%d",
264 __entry
->comm
, __entry
->pid
, __entry
->prio
)
268 * Tracepoint for do_fork:
270 TRACE_EVENT(sched_process_fork
,
272 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
274 TP_ARGS(parent
, child
),
277 __array( char, parent_comm
, TASK_COMM_LEN
)
278 __field( pid_t
, parent_pid
)
279 __array( char, child_comm
, TASK_COMM_LEN
)
280 __field( pid_t
, child_pid
)
284 memcpy(__entry
->parent_comm
, parent
->comm
, TASK_COMM_LEN
);
285 __entry
->parent_pid
= parent
->pid
;
286 memcpy(__entry
->child_comm
, child
->comm
, TASK_COMM_LEN
);
287 __entry
->child_pid
= child
->pid
;
290 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
291 __entry
->parent_comm
, __entry
->parent_pid
,
292 __entry
->child_comm
, __entry
->child_pid
)
296 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
297 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
299 DECLARE_EVENT_CLASS(sched_stat_template
,
301 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
306 __array( char, comm
, TASK_COMM_LEN
)
307 __field( pid_t
, pid
)
308 __field( u64
, delay
)
312 memcpy(__entry
->comm
, tsk
->comm
, TASK_COMM_LEN
);
313 __entry
->pid
= tsk
->pid
;
314 __entry
->delay
= delay
;
320 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
321 __entry
->comm
, __entry
->pid
,
322 (unsigned long long)__entry
->delay
)
327 * Tracepoint for accounting wait time (time the task is runnable
328 * but not actually running due to scheduler contention).
330 DEFINE_EVENT(sched_stat_template
, sched_stat_wait
,
331 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
332 TP_ARGS(tsk
, delay
));
335 * Tracepoint for accounting sleep time (time the task is not runnable,
336 * including iowait, see below).
338 DEFINE_EVENT(sched_stat_template
, sched_stat_sleep
,
339 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
340 TP_ARGS(tsk
, delay
));
343 * Tracepoint for accounting iowait time (time the task is not runnable
344 * due to waiting on IO to complete).
346 DEFINE_EVENT(sched_stat_template
, sched_stat_iowait
,
347 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
348 TP_ARGS(tsk
, delay
));
351 * Tracepoint for accounting runtime (time the task is executing
354 TRACE_EVENT(sched_stat_runtime
,
356 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
358 TP_ARGS(tsk
, runtime
, vruntime
),
361 __array( char, comm
, TASK_COMM_LEN
)
362 __field( pid_t
, pid
)
363 __field( u64
, runtime
)
364 __field( u64
, vruntime
)
368 memcpy(__entry
->comm
, tsk
->comm
, TASK_COMM_LEN
);
369 __entry
->pid
= tsk
->pid
;
370 __entry
->runtime
= runtime
;
371 __entry
->vruntime
= vruntime
;
374 __perf_count(runtime
);
377 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
378 __entry
->comm
, __entry
->pid
,
379 (unsigned long long)__entry
->runtime
,
380 (unsigned long long)__entry
->vruntime
)
383 #endif /* _TRACE_SCHED_H */
385 /* This part must be outside protection */
386 #include <trace/define_trace.h>