4 * Print the CFS rbtree and other debugging details
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 static DEFINE_SPINLOCK(sched_debug_lock
);
17 * This allows printing both to /proc/sched_debug and
20 #define SEQ_printf(m, x...) \
29 * Ease the printing of nsec fields:
31 static long long nsec_high(unsigned long long nsec
)
33 if ((long long)nsec
< 0) {
35 do_div(nsec
, 1000000);
38 do_div(nsec
, 1000000);
43 static unsigned long nsec_low(unsigned long long nsec
)
45 if ((long long)nsec
< 0)
48 return do_div(nsec
, 1000000);
51 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
53 #define SCHED_FEAT(name, enabled) \
56 static const char * const sched_feat_names
[] = {
62 static int sched_feat_show(struct seq_file
*m
, void *v
)
66 for (i
= 0; i
< __SCHED_FEAT_NR
; i
++) {
67 if (!(sysctl_sched_features
& (1UL << i
)))
69 seq_printf(m
, "%s ", sched_feat_names
[i
]);
76 #ifdef HAVE_JUMP_LABEL
78 #define jump_label_key__true STATIC_KEY_INIT_TRUE
79 #define jump_label_key__false STATIC_KEY_INIT_FALSE
81 #define SCHED_FEAT(name, enabled) \
82 jump_label_key__##enabled ,
84 struct static_key sched_feat_keys
[__SCHED_FEAT_NR
] = {
90 static void sched_feat_disable(int i
)
92 static_key_disable(&sched_feat_keys
[i
]);
95 static void sched_feat_enable(int i
)
97 static_key_enable(&sched_feat_keys
[i
]);
100 static void sched_feat_disable(int i
) { };
101 static void sched_feat_enable(int i
) { };
102 #endif /* HAVE_JUMP_LABEL */
104 static int sched_feat_set(char *cmp
)
109 if (strncmp(cmp
, "NO_", 3) == 0) {
114 for (i
= 0; i
< __SCHED_FEAT_NR
; i
++) {
115 if (strcmp(cmp
, sched_feat_names
[i
]) == 0) {
117 sysctl_sched_features
&= ~(1UL << i
);
118 sched_feat_disable(i
);
120 sysctl_sched_features
|= (1UL << i
);
121 sched_feat_enable(i
);
131 sched_feat_write(struct file
*filp
, const char __user
*ubuf
,
132 size_t cnt
, loff_t
*ppos
)
142 if (copy_from_user(&buf
, ubuf
, cnt
))
148 /* Ensure the static_key remains in a consistent state */
149 inode
= file_inode(filp
);
151 i
= sched_feat_set(cmp
);
153 if (i
== __SCHED_FEAT_NR
)
161 static int sched_feat_open(struct inode
*inode
, struct file
*filp
)
163 return single_open(filp
, sched_feat_show
, NULL
);
166 static const struct file_operations sched_feat_fops
= {
167 .open
= sched_feat_open
,
168 .write
= sched_feat_write
,
171 .release
= single_release
,
174 __read_mostly
bool sched_debug_enabled
;
176 static __init
int sched_init_debug(void)
178 debugfs_create_file("sched_features", 0644, NULL
, NULL
,
181 debugfs_create_bool("sched_debug", 0644, NULL
,
182 &sched_debug_enabled
);
186 late_initcall(sched_init_debug
);
192 static struct ctl_table sd_ctl_dir
[] = {
194 .procname
= "sched_domain",
200 static struct ctl_table sd_ctl_root
[] = {
202 .procname
= "kernel",
209 static struct ctl_table
*sd_alloc_ctl_entry(int n
)
211 struct ctl_table
*entry
=
212 kcalloc(n
, sizeof(struct ctl_table
), GFP_KERNEL
);
217 static void sd_free_ctl_entry(struct ctl_table
**tablep
)
219 struct ctl_table
*entry
;
222 * In the intermediate directories, both the child directory and
223 * procname are dynamically allocated and could fail but the mode
224 * will always be set. In the lowest directory the names are
225 * static strings and all have proc handlers.
227 for (entry
= *tablep
; entry
->mode
; entry
++) {
229 sd_free_ctl_entry(&entry
->child
);
230 if (entry
->proc_handler
== NULL
)
231 kfree(entry
->procname
);
238 static int min_load_idx
= 0;
239 static int max_load_idx
= CPU_LOAD_IDX_MAX
-1;
242 set_table_entry(struct ctl_table
*entry
,
243 const char *procname
, void *data
, int maxlen
,
244 umode_t mode
, proc_handler
*proc_handler
,
247 entry
->procname
= procname
;
249 entry
->maxlen
= maxlen
;
251 entry
->proc_handler
= proc_handler
;
254 entry
->extra1
= &min_load_idx
;
255 entry
->extra2
= &max_load_idx
;
259 static struct ctl_table
*
260 sd_alloc_ctl_domain_table(struct sched_domain
*sd
)
262 struct ctl_table
*table
= sd_alloc_ctl_entry(14);
267 set_table_entry(&table
[0] , "min_interval", &sd
->min_interval
, sizeof(long), 0644, proc_doulongvec_minmax
, false);
268 set_table_entry(&table
[1] , "max_interval", &sd
->max_interval
, sizeof(long), 0644, proc_doulongvec_minmax
, false);
269 set_table_entry(&table
[2] , "busy_idx", &sd
->busy_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
270 set_table_entry(&table
[3] , "idle_idx", &sd
->idle_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
271 set_table_entry(&table
[4] , "newidle_idx", &sd
->newidle_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
272 set_table_entry(&table
[5] , "wake_idx", &sd
->wake_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
273 set_table_entry(&table
[6] , "forkexec_idx", &sd
->forkexec_idx
, sizeof(int) , 0644, proc_dointvec_minmax
, true );
274 set_table_entry(&table
[7] , "busy_factor", &sd
->busy_factor
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
275 set_table_entry(&table
[8] , "imbalance_pct", &sd
->imbalance_pct
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
276 set_table_entry(&table
[9] , "cache_nice_tries", &sd
->cache_nice_tries
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
277 set_table_entry(&table
[10], "flags", &sd
->flags
, sizeof(int) , 0644, proc_dointvec_minmax
, false);
278 set_table_entry(&table
[11], "max_newidle_lb_cost", &sd
->max_newidle_lb_cost
, sizeof(long), 0644, proc_doulongvec_minmax
, false);
279 set_table_entry(&table
[12], "name", sd
->name
, CORENAME_MAX_SIZE
, 0444, proc_dostring
, false);
280 /* &table[13] is terminator */
285 static struct ctl_table
*sd_alloc_ctl_cpu_table(int cpu
)
287 struct ctl_table
*entry
, *table
;
288 struct sched_domain
*sd
;
289 int domain_num
= 0, i
;
292 for_each_domain(cpu
, sd
)
294 entry
= table
= sd_alloc_ctl_entry(domain_num
+ 1);
299 for_each_domain(cpu
, sd
) {
300 snprintf(buf
, 32, "domain%d", i
);
301 entry
->procname
= kstrdup(buf
, GFP_KERNEL
);
303 entry
->child
= sd_alloc_ctl_domain_table(sd
);
310 static cpumask_var_t sd_sysctl_cpus
;
311 static struct ctl_table_header
*sd_sysctl_header
;
313 void register_sched_domain_sysctl(void)
315 static struct ctl_table
*cpu_entries
;
316 static struct ctl_table
**cpu_idx
;
321 cpu_entries
= sd_alloc_ctl_entry(num_possible_cpus() + 1);
325 WARN_ON(sd_ctl_dir
[0].child
);
326 sd_ctl_dir
[0].child
= cpu_entries
;
330 struct ctl_table
*e
= cpu_entries
;
332 cpu_idx
= kcalloc(nr_cpu_ids
, sizeof(struct ctl_table
*), GFP_KERNEL
);
336 /* deal with sparse possible map */
337 for_each_possible_cpu(i
) {
343 if (!cpumask_available(sd_sysctl_cpus
)) {
344 if (!alloc_cpumask_var(&sd_sysctl_cpus
, GFP_KERNEL
))
347 /* init to possible to not have holes in @cpu_entries */
348 cpumask_copy(sd_sysctl_cpus
, cpu_possible_mask
);
351 for_each_cpu(i
, sd_sysctl_cpus
) {
352 struct ctl_table
*e
= cpu_idx
[i
];
355 sd_free_ctl_entry(&e
->child
);
358 snprintf(buf
, 32, "cpu%d", i
);
359 e
->procname
= kstrdup(buf
, GFP_KERNEL
);
362 e
->child
= sd_alloc_ctl_cpu_table(i
);
364 __cpumask_clear_cpu(i
, sd_sysctl_cpus
);
367 WARN_ON(sd_sysctl_header
);
368 sd_sysctl_header
= register_sysctl_table(sd_ctl_root
);
371 void dirty_sched_domain_sysctl(int cpu
)
373 if (cpumask_available(sd_sysctl_cpus
))
374 __cpumask_set_cpu(cpu
, sd_sysctl_cpus
);
377 /* may be called multiple times per register */
378 void unregister_sched_domain_sysctl(void)
380 unregister_sysctl_table(sd_sysctl_header
);
381 sd_sysctl_header
= NULL
;
383 #endif /* CONFIG_SYSCTL */
384 #endif /* CONFIG_SMP */
386 #ifdef CONFIG_FAIR_GROUP_SCHED
387 static void print_cfs_group_stats(struct seq_file
*m
, int cpu
, struct task_group
*tg
)
389 struct sched_entity
*se
= tg
->se
[cpu
];
391 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
392 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
393 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
394 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
401 PN(se
->sum_exec_runtime
);
403 if (schedstat_enabled()) {
404 PN_SCHEDSTAT(se
->statistics
.wait_start
);
405 PN_SCHEDSTAT(se
->statistics
.sleep_start
);
406 PN_SCHEDSTAT(se
->statistics
.block_start
);
407 PN_SCHEDSTAT(se
->statistics
.sleep_max
);
408 PN_SCHEDSTAT(se
->statistics
.block_max
);
409 PN_SCHEDSTAT(se
->statistics
.exec_max
);
410 PN_SCHEDSTAT(se
->statistics
.slice_max
);
411 PN_SCHEDSTAT(se
->statistics
.wait_max
);
412 PN_SCHEDSTAT(se
->statistics
.wait_sum
);
413 P_SCHEDSTAT(se
->statistics
.wait_count
);
417 P(se
->runnable_weight
);
421 P(se
->avg
.runnable_load_avg
);
431 #ifdef CONFIG_CGROUP_SCHED
432 static char group_path
[PATH_MAX
];
434 static char *task_group_path(struct task_group
*tg
)
436 if (autogroup_path(tg
, group_path
, PATH_MAX
))
439 cgroup_path(tg
->css
.cgroup
, group_path
, PATH_MAX
);
446 print_task(struct seq_file
*m
, struct rq
*rq
, struct task_struct
*p
)
451 SEQ_printf(m
, " %c", task_state_to_char(p
));
453 SEQ_printf(m
, "%15s %5d %9Ld.%06ld %9Ld %5d ",
454 p
->comm
, task_pid_nr(p
),
455 SPLIT_NS(p
->se
.vruntime
),
456 (long long)(p
->nvcsw
+ p
->nivcsw
),
459 SEQ_printf(m
, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
460 SPLIT_NS(schedstat_val_or_zero(p
->se
.statistics
.wait_sum
)),
461 SPLIT_NS(p
->se
.sum_exec_runtime
),
462 SPLIT_NS(schedstat_val_or_zero(p
->se
.statistics
.sum_sleep_runtime
)));
464 #ifdef CONFIG_NUMA_BALANCING
465 SEQ_printf(m
, " %d %d", task_node(p
), task_numa_group_id(p
));
467 #ifdef CONFIG_CGROUP_SCHED
468 SEQ_printf(m
, " %s", task_group_path(task_group(p
)));
474 static void print_rq(struct seq_file
*m
, struct rq
*rq
, int rq_cpu
)
476 struct task_struct
*g
, *p
;
479 SEQ_printf(m
, "runnable tasks:\n");
480 SEQ_printf(m
, " S task PID tree-key switches prio"
481 " wait-time sum-exec sum-sleep\n");
482 SEQ_printf(m
, "-------------------------------------------------------"
483 "----------------------------------------------------\n");
486 for_each_process_thread(g
, p
) {
487 if (task_cpu(p
) != rq_cpu
)
490 print_task(m
, rq
, p
);
495 void print_cfs_rq(struct seq_file
*m
, int cpu
, struct cfs_rq
*cfs_rq
)
497 s64 MIN_vruntime
= -1, min_vruntime
, max_vruntime
= -1,
498 spread
, rq0_min_vruntime
, spread0
;
499 struct rq
*rq
= cpu_rq(cpu
);
500 struct sched_entity
*last
;
503 #ifdef CONFIG_FAIR_GROUP_SCHED
505 SEQ_printf(m
, "cfs_rq[%d]:%s\n", cpu
, task_group_path(cfs_rq
->tg
));
508 SEQ_printf(m
, "cfs_rq[%d]:\n", cpu
);
510 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "exec_clock",
511 SPLIT_NS(cfs_rq
->exec_clock
));
513 raw_spin_lock_irqsave(&rq
->lock
, flags
);
514 if (rb_first_cached(&cfs_rq
->tasks_timeline
))
515 MIN_vruntime
= (__pick_first_entity(cfs_rq
))->vruntime
;
516 last
= __pick_last_entity(cfs_rq
);
518 max_vruntime
= last
->vruntime
;
519 min_vruntime
= cfs_rq
->min_vruntime
;
520 rq0_min_vruntime
= cpu_rq(0)->cfs
.min_vruntime
;
521 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
522 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
523 SPLIT_NS(MIN_vruntime
));
524 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "min_vruntime",
525 SPLIT_NS(min_vruntime
));
526 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "max_vruntime",
527 SPLIT_NS(max_vruntime
));
528 spread
= max_vruntime
- MIN_vruntime
;
529 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread",
531 spread0
= min_vruntime
- rq0_min_vruntime
;
532 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread0",
534 SEQ_printf(m
, " .%-30s: %d\n", "nr_spread_over",
535 cfs_rq
->nr_spread_over
);
536 SEQ_printf(m
, " .%-30s: %d\n", "nr_running", cfs_rq
->nr_running
);
537 SEQ_printf(m
, " .%-30s: %ld\n", "load", cfs_rq
->load
.weight
);
539 SEQ_printf(m
, " .%-30s: %ld\n", "runnable_weight", cfs_rq
->runnable_weight
);
540 SEQ_printf(m
, " .%-30s: %lu\n", "load_avg",
541 cfs_rq
->avg
.load_avg
);
542 SEQ_printf(m
, " .%-30s: %lu\n", "runnable_load_avg",
543 cfs_rq
->avg
.runnable_load_avg
);
544 SEQ_printf(m
, " .%-30s: %lu\n", "util_avg",
545 cfs_rq
->avg
.util_avg
);
546 SEQ_printf(m
, " .%-30s: %u\n", "util_est_enqueued",
547 cfs_rq
->avg
.util_est
.enqueued
);
548 SEQ_printf(m
, " .%-30s: %ld\n", "removed.load_avg",
549 cfs_rq
->removed
.load_avg
);
550 SEQ_printf(m
, " .%-30s: %ld\n", "removed.util_avg",
551 cfs_rq
->removed
.util_avg
);
552 SEQ_printf(m
, " .%-30s: %ld\n", "removed.runnable_sum",
553 cfs_rq
->removed
.runnable_sum
);
554 #ifdef CONFIG_FAIR_GROUP_SCHED
555 SEQ_printf(m
, " .%-30s: %lu\n", "tg_load_avg_contrib",
556 cfs_rq
->tg_load_avg_contrib
);
557 SEQ_printf(m
, " .%-30s: %ld\n", "tg_load_avg",
558 atomic_long_read(&cfs_rq
->tg
->load_avg
));
561 #ifdef CONFIG_CFS_BANDWIDTH
562 SEQ_printf(m
, " .%-30s: %d\n", "throttled",
564 SEQ_printf(m
, " .%-30s: %d\n", "throttle_count",
565 cfs_rq
->throttle_count
);
568 #ifdef CONFIG_FAIR_GROUP_SCHED
569 print_cfs_group_stats(m
, cpu
, cfs_rq
->tg
);
573 void print_rt_rq(struct seq_file
*m
, int cpu
, struct rt_rq
*rt_rq
)
575 #ifdef CONFIG_RT_GROUP_SCHED
577 SEQ_printf(m
, "rt_rq[%d]:%s\n", cpu
, task_group_path(rt_rq
->tg
));
580 SEQ_printf(m
, "rt_rq[%d]:\n", cpu
);
584 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
586 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
588 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
603 void print_dl_rq(struct seq_file
*m
, int cpu
, struct dl_rq
*dl_rq
)
608 SEQ_printf(m
, "dl_rq[%d]:\n", cpu
);
611 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
616 dl_bw
= &cpu_rq(cpu
)->rd
->dl_bw
;
618 dl_bw
= &dl_rq
->dl_bw
;
620 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->bw", dl_bw
->bw
);
621 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw
->total_bw
);
626 extern __read_mostly
int sched_clock_running
;
628 static void print_cpu(struct seq_file
*m
, int cpu
)
630 struct rq
*rq
= cpu_rq(cpu
);
635 unsigned int freq
= cpu_khz
? : 1;
637 SEQ_printf(m
, "cpu#%d, %u.%03u MHz\n",
638 cpu
, freq
/ 1000, (freq
% 1000));
641 SEQ_printf(m
, "cpu#%d\n", cpu
);
646 if (sizeof(rq->x) == 4) \
647 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
649 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
653 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
656 SEQ_printf(m
, " .%-30s: %lu\n", "load",
660 P(nr_uninterruptible
);
662 SEQ_printf(m
, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq
->curr
)));
674 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
676 P64(max_idle_balance_cost
);
680 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
681 if (schedstat_enabled()) {
690 spin_lock_irqsave(&sched_debug_lock
, flags
);
691 print_cfs_stats(m
, cpu
);
692 print_rt_stats(m
, cpu
);
693 print_dl_stats(m
, cpu
);
695 print_rq(m
, rq
, cpu
);
696 spin_unlock_irqrestore(&sched_debug_lock
, flags
);
700 static const char *sched_tunable_scaling_names
[] = {
706 static void sched_debug_header(struct seq_file
*m
)
708 u64 ktime
, sched_clk
, cpu_clk
;
711 local_irq_save(flags
);
712 ktime
= ktime_to_ns(ktime_get());
713 sched_clk
= sched_clock();
714 cpu_clk
= local_clock();
715 local_irq_restore(flags
);
717 SEQ_printf(m
, "Sched Debug Version: v0.11, %s %.*s\n",
718 init_utsname()->release
,
719 (int)strcspn(init_utsname()->version
, " "),
720 init_utsname()->version
);
723 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
725 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
730 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
731 P(sched_clock_stable());
737 SEQ_printf(m
, "sysctl_sched\n");
740 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
742 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
743 PN(sysctl_sched_latency
);
744 PN(sysctl_sched_min_granularity
);
745 PN(sysctl_sched_wakeup_granularity
);
746 P(sysctl_sched_child_runs_first
);
747 P(sysctl_sched_features
);
751 SEQ_printf(m
, " .%-40s: %d (%s)\n",
752 "sysctl_sched_tunable_scaling",
753 sysctl_sched_tunable_scaling
,
754 sched_tunable_scaling_names
[sysctl_sched_tunable_scaling
]);
758 static int sched_debug_show(struct seq_file
*m
, void *v
)
760 int cpu
= (unsigned long)(v
- 2);
765 sched_debug_header(m
);
770 void sysrq_sched_debug_show(void)
774 sched_debug_header(NULL
);
775 for_each_online_cpu(cpu
)
776 print_cpu(NULL
, cpu
);
781 * This itererator needs some explanation.
782 * It returns 1 for the header position.
783 * This means 2 is CPU 0.
784 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
785 * to use cpumask_* to iterate over the CPUs.
787 static void *sched_debug_start(struct seq_file
*file
, loff_t
*offset
)
789 unsigned long n
= *offset
;
797 n
= cpumask_next(n
- 1, cpu_online_mask
);
799 n
= cpumask_first(cpu_online_mask
);
804 return (void *)(unsigned long)(n
+ 2);
809 static void *sched_debug_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
812 return sched_debug_start(file
, offset
);
815 static void sched_debug_stop(struct seq_file
*file
, void *data
)
819 static const struct seq_operations sched_debug_sops
= {
820 .start
= sched_debug_start
,
821 .next
= sched_debug_next
,
822 .stop
= sched_debug_stop
,
823 .show
= sched_debug_show
,
826 static int sched_debug_release(struct inode
*inode
, struct file
*file
)
828 seq_release(inode
, file
);
833 static int sched_debug_open(struct inode
*inode
, struct file
*filp
)
837 ret
= seq_open(filp
, &sched_debug_sops
);
842 static const struct file_operations sched_debug_fops
= {
843 .open
= sched_debug_open
,
846 .release
= sched_debug_release
,
849 static int __init
init_sched_debug_procfs(void)
851 struct proc_dir_entry
*pe
;
853 pe
= proc_create("sched_debug", 0444, NULL
, &sched_debug_fops
);
859 __initcall(init_sched_debug_procfs
);
861 #define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
862 #define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
863 #define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
864 #define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
867 #ifdef CONFIG_NUMA_BALANCING
868 void print_numa_stats(struct seq_file
*m
, int node
, unsigned long tsf
,
869 unsigned long tpf
, unsigned long gsf
, unsigned long gpf
)
871 SEQ_printf(m
, "numa_faults node=%d ", node
);
872 SEQ_printf(m
, "task_private=%lu task_shared=%lu ", tsf
, tpf
);
873 SEQ_printf(m
, "group_private=%lu group_shared=%lu\n", gsf
, gpf
);
878 static void sched_show_numa(struct task_struct
*p
, struct seq_file
*m
)
880 #ifdef CONFIG_NUMA_BALANCING
881 struct mempolicy
*pol
;
884 P(mm
->numa_scan_seq
);
888 if (pol
&& !(pol
->flags
& MPOL_F_MORON
))
893 P(numa_pages_migrated
);
894 P(numa_preferred_nid
);
895 P(total_numa_faults
);
896 SEQ_printf(m
, "current_node=%d, numa_group_id=%d\n",
897 task_node(p
), task_numa_group_id(p
));
898 show_numa_stats(p
, m
);
903 void proc_sched_show_task(struct task_struct
*p
, struct pid_namespace
*ns
,
906 unsigned long nr_switches
;
908 SEQ_printf(m
, "%s (%d, #threads: %d)\n", p
->comm
, task_pid_nr_ns(p
, ns
),
911 "---------------------------------------------------------"
914 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
916 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
917 #define P_SCHEDSTAT(F) \
918 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
920 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
922 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
923 #define PN_SCHEDSTAT(F) \
924 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
928 PN(se
.sum_exec_runtime
);
930 nr_switches
= p
->nvcsw
+ p
->nivcsw
;
934 if (schedstat_enabled()) {
935 u64 avg_atom
, avg_per_cpu
;
937 PN_SCHEDSTAT(se
.statistics
.sum_sleep_runtime
);
938 PN_SCHEDSTAT(se
.statistics
.wait_start
);
939 PN_SCHEDSTAT(se
.statistics
.sleep_start
);
940 PN_SCHEDSTAT(se
.statistics
.block_start
);
941 PN_SCHEDSTAT(se
.statistics
.sleep_max
);
942 PN_SCHEDSTAT(se
.statistics
.block_max
);
943 PN_SCHEDSTAT(se
.statistics
.exec_max
);
944 PN_SCHEDSTAT(se
.statistics
.slice_max
);
945 PN_SCHEDSTAT(se
.statistics
.wait_max
);
946 PN_SCHEDSTAT(se
.statistics
.wait_sum
);
947 P_SCHEDSTAT(se
.statistics
.wait_count
);
948 PN_SCHEDSTAT(se
.statistics
.iowait_sum
);
949 P_SCHEDSTAT(se
.statistics
.iowait_count
);
950 P_SCHEDSTAT(se
.statistics
.nr_migrations_cold
);
951 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_affine
);
952 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_running
);
953 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_hot
);
954 P_SCHEDSTAT(se
.statistics
.nr_forced_migrations
);
955 P_SCHEDSTAT(se
.statistics
.nr_wakeups
);
956 P_SCHEDSTAT(se
.statistics
.nr_wakeups_sync
);
957 P_SCHEDSTAT(se
.statistics
.nr_wakeups_migrate
);
958 P_SCHEDSTAT(se
.statistics
.nr_wakeups_local
);
959 P_SCHEDSTAT(se
.statistics
.nr_wakeups_remote
);
960 P_SCHEDSTAT(se
.statistics
.nr_wakeups_affine
);
961 P_SCHEDSTAT(se
.statistics
.nr_wakeups_affine_attempts
);
962 P_SCHEDSTAT(se
.statistics
.nr_wakeups_passive
);
963 P_SCHEDSTAT(se
.statistics
.nr_wakeups_idle
);
965 avg_atom
= p
->se
.sum_exec_runtime
;
967 avg_atom
= div64_ul(avg_atom
, nr_switches
);
971 avg_per_cpu
= p
->se
.sum_exec_runtime
;
972 if (p
->se
.nr_migrations
) {
973 avg_per_cpu
= div64_u64(avg_per_cpu
,
974 p
->se
.nr_migrations
);
984 SEQ_printf(m
, "%-45s:%21Ld\n",
985 "nr_voluntary_switches", (long long)p
->nvcsw
);
986 SEQ_printf(m
, "%-45s:%21Ld\n",
987 "nr_involuntary_switches", (long long)p
->nivcsw
);
990 P(se
.runnable_weight
);
993 P(se
.avg
.runnable_load_sum
);
996 P(se
.avg
.runnable_load_avg
);
998 P(se
.avg
.last_update_time
);
999 P(se
.avg
.util_est
.ewma
);
1000 P(se
.avg
.util_est
.enqueued
);
1004 if (p
->policy
== SCHED_DEADLINE
) {
1016 unsigned int this_cpu
= raw_smp_processor_id();
1019 t0
= cpu_clock(this_cpu
);
1020 t1
= cpu_clock(this_cpu
);
1021 SEQ_printf(m
, "%-45s:%21Ld\n",
1022 "clock-delta", (long long)(t1
-t0
));
1025 sched_show_numa(p
, m
);
1028 void proc_sched_set_task(struct task_struct
*p
)
1030 #ifdef CONFIG_SCHEDSTATS
1031 memset(&p
->se
.statistics
, 0, sizeof(p
->se
.statistics
));