2 #define TRACE_SYSTEM sched
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
12 * Tracepoint for calling kthread_stop, performed to end a kthread:
14 TRACE_EVENT(sched_kthread_stop
,
16 TP_PROTO(struct task_struct
*t
),
21 __array( char, comm
, TASK_COMM_LEN
)
26 memcpy(__entry
->comm
, t
->comm
, TASK_COMM_LEN
);
27 __entry
->pid
= t
->pid
;
30 TP_printk("comm=%s pid=%d", __entry
->comm
, __entry
->pid
)
34 * Tracepoint for the return value of the kthread stopping:
36 TRACE_EVENT(sched_kthread_stop_ret
,
50 TP_printk("ret=%d", __entry
->ret
)
54 * Tracepoint for waking up a task:
56 DECLARE_EVENT_CLASS(sched_wakeup_template
,
58 TP_PROTO(struct task_struct
*p
),
60 TP_ARGS(__perf_task(p
)),
63 __array( char, comm
, TASK_COMM_LEN
)
66 __field( int, success
)
67 __field( int, target_cpu
)
71 memcpy(__entry
->comm
, p
->comm
, TASK_COMM_LEN
);
72 __entry
->pid
= p
->pid
;
73 __entry
->prio
= p
->prio
;
74 __entry
->success
= 1; /* rudiment, kill when possible */
75 __entry
->target_cpu
= task_cpu(p
);
78 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
79 __entry
->comm
, __entry
->pid
, __entry
->prio
,
84 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
85 * called from the waking context.
87 DEFINE_EVENT(sched_wakeup_template
, sched_waking
,
88 TP_PROTO(struct task_struct
*p
),
92 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
93 * It it not always called from the waking context.
95 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup
,
96 TP_PROTO(struct task_struct
*p
),
100 * Tracepoint for waking up a new task:
102 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup_new
,
103 TP_PROTO(struct task_struct
*p
),
106 #ifdef CREATE_TRACE_POINTS
107 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
109 #ifdef CONFIG_SCHED_DEBUG
110 BUG_ON(p
!= current
);
111 #endif /* CONFIG_SCHED_DEBUG */
114 * Preemption ignores task state, therefore preempted tasks are always
115 * RUNNING (we will not have dequeued if state != RUNNING).
117 return preempt
? TASK_RUNNING
| TASK_STATE_MAX
: p
->state
;
119 #endif /* CREATE_TRACE_POINTS */
122 * Tracepoint for task switches, performed by the scheduler:
124 TRACE_EVENT(sched_switch
,
126 TP_PROTO(bool preempt
,
127 struct task_struct
*prev
,
128 struct task_struct
*next
),
130 TP_ARGS(preempt
, prev
, next
),
133 __array( char, prev_comm
, TASK_COMM_LEN
)
134 __field( pid_t
, prev_pid
)
135 __field( int, prev_prio
)
136 __field( long, prev_state
)
137 __array( char, next_comm
, TASK_COMM_LEN
)
138 __field( pid_t
, next_pid
)
139 __field( int, next_prio
)
143 memcpy(__entry
->next_comm
, next
->comm
, TASK_COMM_LEN
);
144 __entry
->prev_pid
= prev
->pid
;
145 __entry
->prev_prio
= prev
->prio
;
146 __entry
->prev_state
= __trace_sched_switch_state(preempt
, prev
);
147 memcpy(__entry
->prev_comm
, prev
->comm
, TASK_COMM_LEN
);
148 __entry
->next_pid
= next
->pid
;
149 __entry
->next_prio
= next
->prio
;
152 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
153 __entry
->prev_comm
, __entry
->prev_pid
, __entry
->prev_prio
,
154 __entry
->prev_state
& (TASK_STATE_MAX
-1) ?
155 __print_flags(__entry
->prev_state
& (TASK_STATE_MAX
-1), "|",
156 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
157 { 16, "Z" }, { 32, "X" }, { 64, "x" },
158 { 128, "K" }, { 256, "W" }, { 512, "P" },
159 { 1024, "N" }) : "R",
160 __entry
->prev_state
& TASK_STATE_MAX
? "+" : "",
161 __entry
->next_comm
, __entry
->next_pid
, __entry
->next_prio
)
165 * Tracepoint for a task being migrated:
167 TRACE_EVENT(sched_migrate_task
,
169 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
171 TP_ARGS(p
, dest_cpu
),
174 __array( char, comm
, TASK_COMM_LEN
)
175 __field( pid_t
, pid
)
177 __field( int, orig_cpu
)
178 __field( int, dest_cpu
)
182 memcpy(__entry
->comm
, p
->comm
, TASK_COMM_LEN
);
183 __entry
->pid
= p
->pid
;
184 __entry
->prio
= p
->prio
;
185 __entry
->orig_cpu
= task_cpu(p
);
186 __entry
->dest_cpu
= dest_cpu
;
189 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
190 __entry
->comm
, __entry
->pid
, __entry
->prio
,
191 __entry
->orig_cpu
, __entry
->dest_cpu
)
194 DECLARE_EVENT_CLASS(sched_process_template
,
196 TP_PROTO(struct task_struct
*p
),
201 __array( char, comm
, TASK_COMM_LEN
)
202 __field( pid_t
, pid
)
207 memcpy(__entry
->comm
, p
->comm
, TASK_COMM_LEN
);
208 __entry
->pid
= p
->pid
;
209 __entry
->prio
= p
->prio
;
212 TP_printk("comm=%s pid=%d prio=%d",
213 __entry
->comm
, __entry
->pid
, __entry
->prio
)
217 * Tracepoint for freeing a task:
219 DEFINE_EVENT(sched_process_template
, sched_process_free
,
220 TP_PROTO(struct task_struct
*p
),
225 * Tracepoint for a task exiting:
227 DEFINE_EVENT(sched_process_template
, sched_process_exit
,
228 TP_PROTO(struct task_struct
*p
),
232 * Tracepoint for waiting on task to unschedule:
234 DEFINE_EVENT(sched_process_template
, sched_wait_task
,
235 TP_PROTO(struct task_struct
*p
),
239 * Tracepoint for a waiting task:
241 TRACE_EVENT(sched_process_wait
,
243 TP_PROTO(struct pid
*pid
),
248 __array( char, comm
, TASK_COMM_LEN
)
249 __field( pid_t
, pid
)
254 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
255 __entry
->pid
= pid_nr(pid
);
256 __entry
->prio
= current
->prio
;
259 TP_printk("comm=%s pid=%d prio=%d",
260 __entry
->comm
, __entry
->pid
, __entry
->prio
)
264 * Tracepoint for do_fork:
266 TRACE_EVENT(sched_process_fork
,
268 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
270 TP_ARGS(parent
, child
),
273 __array( char, parent_comm
, TASK_COMM_LEN
)
274 __field( pid_t
, parent_pid
)
275 __array( char, child_comm
, TASK_COMM_LEN
)
276 __field( pid_t
, child_pid
)
280 memcpy(__entry
->parent_comm
, parent
->comm
, TASK_COMM_LEN
);
281 __entry
->parent_pid
= parent
->pid
;
282 memcpy(__entry
->child_comm
, child
->comm
, TASK_COMM_LEN
);
283 __entry
->child_pid
= child
->pid
;
286 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
287 __entry
->parent_comm
, __entry
->parent_pid
,
288 __entry
->child_comm
, __entry
->child_pid
)
292 * Tracepoint for exec:
294 TRACE_EVENT(sched_process_exec
,
296 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
297 struct linux_binprm
*bprm
),
299 TP_ARGS(p
, old_pid
, bprm
),
302 __string( filename
, bprm
->filename
)
303 __field( pid_t
, pid
)
304 __field( pid_t
, old_pid
)
308 __assign_str(filename
, bprm
->filename
);
309 __entry
->pid
= p
->pid
;
310 __entry
->old_pid
= old_pid
;
313 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename
),
314 __entry
->pid
, __entry
->old_pid
)
318 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
319 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
321 DECLARE_EVENT_CLASS(sched_stat_template
,
323 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
325 TP_ARGS(__perf_task(tsk
), __perf_count(delay
)),
328 __array( char, comm
, TASK_COMM_LEN
)
329 __field( pid_t
, pid
)
330 __field( u64
, delay
)
334 memcpy(__entry
->comm
, tsk
->comm
, TASK_COMM_LEN
);
335 __entry
->pid
= tsk
->pid
;
336 __entry
->delay
= delay
;
339 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
340 __entry
->comm
, __entry
->pid
,
341 (unsigned long long)__entry
->delay
)
346 * Tracepoint for accounting wait time (time the task is runnable
347 * but not actually running due to scheduler contention).
349 DEFINE_EVENT(sched_stat_template
, sched_stat_wait
,
350 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
351 TP_ARGS(tsk
, delay
));
354 * Tracepoint for accounting sleep time (time the task is not runnable,
355 * including iowait, see below).
357 DEFINE_EVENT(sched_stat_template
, sched_stat_sleep
,
358 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
359 TP_ARGS(tsk
, delay
));
362 * Tracepoint for accounting iowait time (time the task is not runnable
363 * due to waiting on IO to complete).
365 DEFINE_EVENT(sched_stat_template
, sched_stat_iowait
,
366 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
367 TP_ARGS(tsk
, delay
));
370 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
372 DEFINE_EVENT(sched_stat_template
, sched_stat_blocked
,
373 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
374 TP_ARGS(tsk
, delay
));
377 * Tracepoint for accounting runtime (time the task is executing
380 DECLARE_EVENT_CLASS(sched_stat_runtime
,
382 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
384 TP_ARGS(tsk
, __perf_count(runtime
), vruntime
),
387 __array( char, comm
, TASK_COMM_LEN
)
388 __field( pid_t
, pid
)
389 __field( u64
, runtime
)
390 __field( u64
, vruntime
)
394 memcpy(__entry
->comm
, tsk
->comm
, TASK_COMM_LEN
);
395 __entry
->pid
= tsk
->pid
;
396 __entry
->runtime
= runtime
;
397 __entry
->vruntime
= vruntime
;
400 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
401 __entry
->comm
, __entry
->pid
,
402 (unsigned long long)__entry
->runtime
,
403 (unsigned long long)__entry
->vruntime
)
406 DEFINE_EVENT(sched_stat_runtime
, sched_stat_runtime
,
407 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
408 TP_ARGS(tsk
, runtime
, vruntime
));
411 * Tracepoint for showing priority inheritance modifying a tasks
414 TRACE_EVENT(sched_pi_setprio
,
416 TP_PROTO(struct task_struct
*tsk
, int newprio
),
418 TP_ARGS(tsk
, newprio
),
421 __array( char, comm
, TASK_COMM_LEN
)
422 __field( pid_t
, pid
)
423 __field( int, oldprio
)
424 __field( int, newprio
)
428 memcpy(__entry
->comm
, tsk
->comm
, TASK_COMM_LEN
);
429 __entry
->pid
= tsk
->pid
;
430 __entry
->oldprio
= tsk
->prio
;
431 __entry
->newprio
= newprio
;
434 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
435 __entry
->comm
, __entry
->pid
,
436 __entry
->oldprio
, __entry
->newprio
)
439 #ifdef CONFIG_DETECT_HUNG_TASK
440 TRACE_EVENT(sched_process_hang
,
441 TP_PROTO(struct task_struct
*tsk
),
445 __array( char, comm
, TASK_COMM_LEN
)
446 __field( pid_t
, pid
)
450 memcpy(__entry
->comm
, tsk
->comm
, TASK_COMM_LEN
);
451 __entry
->pid
= tsk
->pid
;
454 TP_printk("comm=%s pid=%d", __entry
->comm
, __entry
->pid
)
456 #endif /* CONFIG_DETECT_HUNG_TASK */
458 DECLARE_EVENT_CLASS(sched_move_task_template
,
460 TP_PROTO(struct task_struct
*tsk
, int src_cpu
, int dst_cpu
),
462 TP_ARGS(tsk
, src_cpu
, dst_cpu
),
465 __field( pid_t
, pid
)
466 __field( pid_t
, tgid
)
467 __field( pid_t
, ngid
)
468 __field( int, src_cpu
)
469 __field( int, src_nid
)
470 __field( int, dst_cpu
)
471 __field( int, dst_nid
)
475 __entry
->pid
= task_pid_nr(tsk
);
476 __entry
->tgid
= task_tgid_nr(tsk
);
477 __entry
->ngid
= task_numa_group_id(tsk
);
478 __entry
->src_cpu
= src_cpu
;
479 __entry
->src_nid
= cpu_to_node(src_cpu
);
480 __entry
->dst_cpu
= dst_cpu
;
481 __entry
->dst_nid
= cpu_to_node(dst_cpu
);
484 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
485 __entry
->pid
, __entry
->tgid
, __entry
->ngid
,
486 __entry
->src_cpu
, __entry
->src_nid
,
487 __entry
->dst_cpu
, __entry
->dst_nid
)
491 * Tracks migration of tasks from one runqueue to another. Can be used to
492 * detect if automatic NUMA balancing is bouncing between nodes
494 DEFINE_EVENT(sched_move_task_template
, sched_move_numa
,
495 TP_PROTO(struct task_struct
*tsk
, int src_cpu
, int dst_cpu
),
497 TP_ARGS(tsk
, src_cpu
, dst_cpu
)
500 DEFINE_EVENT(sched_move_task_template
, sched_stick_numa
,
501 TP_PROTO(struct task_struct
*tsk
, int src_cpu
, int dst_cpu
),
503 TP_ARGS(tsk
, src_cpu
, dst_cpu
)
506 TRACE_EVENT(sched_swap_numa
,
508 TP_PROTO(struct task_struct
*src_tsk
, int src_cpu
,
509 struct task_struct
*dst_tsk
, int dst_cpu
),
511 TP_ARGS(src_tsk
, src_cpu
, dst_tsk
, dst_cpu
),
514 __field( pid_t
, src_pid
)
515 __field( pid_t
, src_tgid
)
516 __field( pid_t
, src_ngid
)
517 __field( int, src_cpu
)
518 __field( int, src_nid
)
519 __field( pid_t
, dst_pid
)
520 __field( pid_t
, dst_tgid
)
521 __field( pid_t
, dst_ngid
)
522 __field( int, dst_cpu
)
523 __field( int, dst_nid
)
527 __entry
->src_pid
= task_pid_nr(src_tsk
);
528 __entry
->src_tgid
= task_tgid_nr(src_tsk
);
529 __entry
->src_ngid
= task_numa_group_id(src_tsk
);
530 __entry
->src_cpu
= src_cpu
;
531 __entry
->src_nid
= cpu_to_node(src_cpu
);
532 __entry
->dst_pid
= task_pid_nr(dst_tsk
);
533 __entry
->dst_tgid
= task_tgid_nr(dst_tsk
);
534 __entry
->dst_ngid
= task_numa_group_id(dst_tsk
);
535 __entry
->dst_cpu
= dst_cpu
;
536 __entry
->dst_nid
= cpu_to_node(dst_cpu
);
539 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
540 __entry
->src_pid
, __entry
->src_tgid
, __entry
->src_ngid
,
541 __entry
->src_cpu
, __entry
->src_nid
,
542 __entry
->dst_pid
, __entry
->dst_tgid
, __entry
->dst_ngid
,
543 __entry
->dst_cpu
, __entry
->dst_nid
)
547 * Tracepoint for waking a polling cpu without an IPI.
549 TRACE_EVENT(sched_wake_idle_without_ipi
,
563 TP_printk("cpu=%d", __entry
->cpu
)
565 #endif /* _TRACE_SCHED_H */
567 /* This part must be outside protection */
568 #include <trace/define_trace.h>