6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 #include <linux/mempolicy.h>
19 #include <linux/debugfs.h>
23 static DEFINE_SPINLOCK(sched_debug_lock
);
26 * This allows printing both to /proc/sched_debug and
29 #define SEQ_printf(m, x...) \
38 * Ease the printing of nsec fields:
40 static long long nsec_high(unsigned long long nsec
)
42 if ((long long)nsec
< 0) {
44 do_div(nsec
, 1000000);
47 do_div(nsec
, 1000000);
52 static unsigned long nsec_low(unsigned long long nsec
)
54 if ((long long)nsec
< 0)
57 return do_div(nsec
, 1000000);
60 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
62 #define SCHED_FEAT(name, enabled) \
65 static const char * const sched_feat_names
[] = {
71 static int sched_feat_show(struct seq_file
*m
, void *v
)
75 for (i
= 0; i
< __SCHED_FEAT_NR
; i
++) {
76 if (!(sysctl_sched_features
& (1UL << i
)))
78 seq_printf(m
, "%s ", sched_feat_names
[i
]);
85 #ifdef HAVE_JUMP_LABEL
87 #define jump_label_key__true STATIC_KEY_INIT_TRUE
88 #define jump_label_key__false STATIC_KEY_INIT_FALSE
90 #define SCHED_FEAT(name, enabled) \
91 jump_label_key__##enabled ,
93 struct static_key sched_feat_keys
[__SCHED_FEAT_NR
] = {
99 static void sched_feat_disable(int i
)
101 static_key_disable(&sched_feat_keys
[i
]);
104 static void sched_feat_enable(int i
)
106 static_key_enable(&sched_feat_keys
[i
]);
109 static void sched_feat_disable(int i
) { };
110 static void sched_feat_enable(int i
) { };
111 #endif /* HAVE_JUMP_LABEL */
113 static int sched_feat_set(char *cmp
)
118 if (strncmp(cmp
, "NO_", 3) == 0) {
123 for (i
= 0; i
< __SCHED_FEAT_NR
; i
++) {
124 if (strcmp(cmp
, sched_feat_names
[i
]) == 0) {
126 sysctl_sched_features
&= ~(1UL << i
);
127 sched_feat_disable(i
);
129 sysctl_sched_features
|= (1UL << i
);
130 sched_feat_enable(i
);
140 sched_feat_write(struct file
*filp
, const char __user
*ubuf
,
141 size_t cnt
, loff_t
*ppos
)
151 if (copy_from_user(&buf
, ubuf
, cnt
))
157 /* Ensure the static_key remains in a consistent state */
158 inode
= file_inode(filp
);
160 i
= sched_feat_set(cmp
);
162 if (i
== __SCHED_FEAT_NR
)
170 static int sched_feat_open(struct inode
*inode
, struct file
*filp
)
172 return single_open(filp
, sched_feat_show
, NULL
);
175 static const struct file_operations sched_feat_fops
= {
176 .open
= sched_feat_open
,
177 .write
= sched_feat_write
,
180 .release
= single_release
,
183 static __init
int sched_init_debug(void)
185 debugfs_create_file("sched_features", 0644, NULL
, NULL
,
190 late_initcall(sched_init_debug
);
196 static struct ctl_table sd_ctl_dir
[] = {
198 .procname
= "sched_domain",
204 static struct ctl_table sd_ctl_root
[] = {
206 .procname
= "kernel",
213 static struct ctl_table
*sd_alloc_ctl_entry(int n
)
215 struct ctl_table
*entry
=
216 kcalloc(n
, sizeof(struct ctl_table
), GFP_KERNEL
);
221 static void sd_free_ctl_entry(struct ctl_table
**tablep
)
223 struct ctl_table
*entry
;
226 * In the intermediate directories, both the child directory and
227 * procname are dynamically allocated and could fail but the mode
228 * will always be set. In the lowest directory the names are
229 * static strings and all have proc handlers.
231 for (entry
= *tablep
; entry
->mode
; entry
++) {
233 sd_free_ctl_entry(&entry
->child
);
234 if (entry
->proc_handler
== NULL
)
235 kfree(entry
->procname
);
242 static int min_load_idx
= 0;
243 static int max_load_idx
= CPU_LOAD_IDX_MAX
-1;
246 set_table_entry(struct ctl_table
*entry
,
247 const char *procname
, void *data
, int maxlen
,
248 umode_t mode
, proc_handler
*proc_handler
,
251 entry
->procname
= procname
;
253 entry
->maxlen
= maxlen
;
255 entry
->proc_handler
= proc_handler
;
258 entry
->extra1
= &min_load_idx
;
259 entry
->extra2
= &max_load_idx
;
263 static struct ctl_table
*
264 sd_alloc_ctl_domain_table(struct sched_domain
*sd
)
266 struct ctl_table
*table
= sd_alloc_ctl_entry(14);
271 set_table_entry(&table
[0], "min_interval", &sd
->min_interval
,
272 sizeof(long), 0644, proc_doulongvec_minmax
, false);
273 set_table_entry(&table
[1], "max_interval", &sd
->max_interval
,
274 sizeof(long), 0644, proc_doulongvec_minmax
, false);
275 set_table_entry(&table
[2], "busy_idx", &sd
->busy_idx
,
276 sizeof(int), 0644, proc_dointvec_minmax
, true);
277 set_table_entry(&table
[3], "idle_idx", &sd
->idle_idx
,
278 sizeof(int), 0644, proc_dointvec_minmax
, true);
279 set_table_entry(&table
[4], "newidle_idx", &sd
->newidle_idx
,
280 sizeof(int), 0644, proc_dointvec_minmax
, true);
281 set_table_entry(&table
[5], "wake_idx", &sd
->wake_idx
,
282 sizeof(int), 0644, proc_dointvec_minmax
, true);
283 set_table_entry(&table
[6], "forkexec_idx", &sd
->forkexec_idx
,
284 sizeof(int), 0644, proc_dointvec_minmax
, true);
285 set_table_entry(&table
[7], "busy_factor", &sd
->busy_factor
,
286 sizeof(int), 0644, proc_dointvec_minmax
, false);
287 set_table_entry(&table
[8], "imbalance_pct", &sd
->imbalance_pct
,
288 sizeof(int), 0644, proc_dointvec_minmax
, false);
289 set_table_entry(&table
[9], "cache_nice_tries",
290 &sd
->cache_nice_tries
,
291 sizeof(int), 0644, proc_dointvec_minmax
, false);
292 set_table_entry(&table
[10], "flags", &sd
->flags
,
293 sizeof(int), 0644, proc_dointvec_minmax
, false);
294 set_table_entry(&table
[11], "max_newidle_lb_cost",
295 &sd
->max_newidle_lb_cost
,
296 sizeof(long), 0644, proc_doulongvec_minmax
, false);
297 set_table_entry(&table
[12], "name", sd
->name
,
298 CORENAME_MAX_SIZE
, 0444, proc_dostring
, false);
299 /* &table[13] is terminator */
304 static struct ctl_table
*sd_alloc_ctl_cpu_table(int cpu
)
306 struct ctl_table
*entry
, *table
;
307 struct sched_domain
*sd
;
308 int domain_num
= 0, i
;
311 for_each_domain(cpu
, sd
)
313 entry
= table
= sd_alloc_ctl_entry(domain_num
+ 1);
318 for_each_domain(cpu
, sd
) {
319 snprintf(buf
, 32, "domain%d", i
);
320 entry
->procname
= kstrdup(buf
, GFP_KERNEL
);
322 entry
->child
= sd_alloc_ctl_domain_table(sd
);
329 static struct ctl_table_header
*sd_sysctl_header
;
330 void register_sched_domain_sysctl(void)
332 int i
, cpu_num
= num_possible_cpus();
333 struct ctl_table
*entry
= sd_alloc_ctl_entry(cpu_num
+ 1);
336 WARN_ON(sd_ctl_dir
[0].child
);
337 sd_ctl_dir
[0].child
= entry
;
342 for_each_possible_cpu(i
) {
343 snprintf(buf
, 32, "cpu%d", i
);
344 entry
->procname
= kstrdup(buf
, GFP_KERNEL
);
346 entry
->child
= sd_alloc_ctl_cpu_table(i
);
350 WARN_ON(sd_sysctl_header
);
351 sd_sysctl_header
= register_sysctl_table(sd_ctl_root
);
354 /* may be called multiple times per register */
355 void unregister_sched_domain_sysctl(void)
357 unregister_sysctl_table(sd_sysctl_header
);
358 sd_sysctl_header
= NULL
;
359 if (sd_ctl_dir
[0].child
)
360 sd_free_ctl_entry(&sd_ctl_dir
[0].child
);
362 #endif /* CONFIG_SYSCTL */
363 #endif /* CONFIG_SMP */
365 #ifdef CONFIG_FAIR_GROUP_SCHED
366 static void print_cfs_group_stats(struct seq_file
*m
, int cpu
, struct task_group
*tg
)
368 struct sched_entity
*se
= tg
->se
[cpu
];
371 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
373 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
380 PN(se
->sum_exec_runtime
);
381 #ifdef CONFIG_SCHEDSTATS
382 if (schedstat_enabled()) {
383 PN(se
->statistics
.wait_start
);
384 PN(se
->statistics
.sleep_start
);
385 PN(se
->statistics
.block_start
);
386 PN(se
->statistics
.sleep_max
);
387 PN(se
->statistics
.block_max
);
388 PN(se
->statistics
.exec_max
);
389 PN(se
->statistics
.slice_max
);
390 PN(se
->statistics
.wait_max
);
391 PN(se
->statistics
.wait_sum
);
392 P(se
->statistics
.wait_count
);
405 #ifdef CONFIG_CGROUP_SCHED
406 static char group_path
[PATH_MAX
];
408 static char *task_group_path(struct task_group
*tg
)
410 if (autogroup_path(tg
, group_path
, PATH_MAX
))
413 return cgroup_path(tg
->css
.cgroup
, group_path
, PATH_MAX
);
418 print_task(struct seq_file
*m
, struct rq
*rq
, struct task_struct
*p
)
425 SEQ_printf(m
, "%15s %5d %9Ld.%06ld %9Ld %5d ",
426 p
->comm
, task_pid_nr(p
),
427 SPLIT_NS(p
->se
.vruntime
),
428 (long long)(p
->nvcsw
+ p
->nivcsw
),
430 #ifdef CONFIG_SCHEDSTATS
431 if (schedstat_enabled()) {
432 SEQ_printf(m
, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
433 SPLIT_NS(p
->se
.statistics
.wait_sum
),
434 SPLIT_NS(p
->se
.sum_exec_runtime
),
435 SPLIT_NS(p
->se
.statistics
.sum_sleep_runtime
));
438 SEQ_printf(m
, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
440 SPLIT_NS(p
->se
.sum_exec_runtime
),
443 #ifdef CONFIG_NUMA_BALANCING
444 SEQ_printf(m
, " %d %d", task_node(p
), task_numa_group_id(p
));
446 #ifdef CONFIG_CGROUP_SCHED
447 SEQ_printf(m
, " %s", task_group_path(task_group(p
)));
453 static void print_rq(struct seq_file
*m
, struct rq
*rq
, int rq_cpu
)
455 struct task_struct
*g
, *p
;
458 "\nrunnable tasks:\n"
459 " task PID tree-key switches prio"
460 " wait-time sum-exec sum-sleep\n"
461 "------------------------------------------------------"
462 "----------------------------------------------------\n");
465 for_each_process_thread(g
, p
) {
466 if (task_cpu(p
) != rq_cpu
)
469 print_task(m
, rq
, p
);
474 void print_cfs_rq(struct seq_file
*m
, int cpu
, struct cfs_rq
*cfs_rq
)
476 s64 MIN_vruntime
= -1, min_vruntime
, max_vruntime
= -1,
477 spread
, rq0_min_vruntime
, spread0
;
478 struct rq
*rq
= cpu_rq(cpu
);
479 struct sched_entity
*last
;
482 #ifdef CONFIG_FAIR_GROUP_SCHED
483 SEQ_printf(m
, "\ncfs_rq[%d]:%s\n", cpu
, task_group_path(cfs_rq
->tg
));
485 SEQ_printf(m
, "\ncfs_rq[%d]:\n", cpu
);
487 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "exec_clock",
488 SPLIT_NS(cfs_rq
->exec_clock
));
490 raw_spin_lock_irqsave(&rq
->lock
, flags
);
491 if (cfs_rq
->rb_leftmost
)
492 MIN_vruntime
= (__pick_first_entity(cfs_rq
))->vruntime
;
493 last
= __pick_last_entity(cfs_rq
);
495 max_vruntime
= last
->vruntime
;
496 min_vruntime
= cfs_rq
->min_vruntime
;
497 rq0_min_vruntime
= cpu_rq(0)->cfs
.min_vruntime
;
498 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
499 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
500 SPLIT_NS(MIN_vruntime
));
501 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "min_vruntime",
502 SPLIT_NS(min_vruntime
));
503 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "max_vruntime",
504 SPLIT_NS(max_vruntime
));
505 spread
= max_vruntime
- MIN_vruntime
;
506 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread",
508 spread0
= min_vruntime
- rq0_min_vruntime
;
509 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread0",
511 SEQ_printf(m
, " .%-30s: %d\n", "nr_spread_over",
512 cfs_rq
->nr_spread_over
);
513 SEQ_printf(m
, " .%-30s: %d\n", "nr_running", cfs_rq
->nr_running
);
514 SEQ_printf(m
, " .%-30s: %ld\n", "load", cfs_rq
->load
.weight
);
516 SEQ_printf(m
, " .%-30s: %lu\n", "load_avg",
517 cfs_rq
->avg
.load_avg
);
518 SEQ_printf(m
, " .%-30s: %lu\n", "runnable_load_avg",
519 cfs_rq
->runnable_load_avg
);
520 SEQ_printf(m
, " .%-30s: %lu\n", "util_avg",
521 cfs_rq
->avg
.util_avg
);
522 SEQ_printf(m
, " .%-30s: %ld\n", "removed_load_avg",
523 atomic_long_read(&cfs_rq
->removed_load_avg
));
524 SEQ_printf(m
, " .%-30s: %ld\n", "removed_util_avg",
525 atomic_long_read(&cfs_rq
->removed_util_avg
));
526 #ifdef CONFIG_FAIR_GROUP_SCHED
527 SEQ_printf(m
, " .%-30s: %lu\n", "tg_load_avg_contrib",
528 cfs_rq
->tg_load_avg_contrib
);
529 SEQ_printf(m
, " .%-30s: %ld\n", "tg_load_avg",
530 atomic_long_read(&cfs_rq
->tg
->load_avg
));
533 #ifdef CONFIG_CFS_BANDWIDTH
534 SEQ_printf(m
, " .%-30s: %d\n", "throttled",
536 SEQ_printf(m
, " .%-30s: %d\n", "throttle_count",
537 cfs_rq
->throttle_count
);
540 #ifdef CONFIG_FAIR_GROUP_SCHED
541 print_cfs_group_stats(m
, cpu
, cfs_rq
->tg
);
545 void print_rt_rq(struct seq_file
*m
, int cpu
, struct rt_rq
*rt_rq
)
547 #ifdef CONFIG_RT_GROUP_SCHED
548 SEQ_printf(m
, "\nrt_rq[%d]:%s\n", cpu
, task_group_path(rt_rq
->tg
));
550 SEQ_printf(m
, "\nrt_rq[%d]:\n", cpu
);
554 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
556 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
567 void print_dl_rq(struct seq_file
*m
, int cpu
, struct dl_rq
*dl_rq
)
571 SEQ_printf(m
, "\ndl_rq[%d]:\n", cpu
);
572 SEQ_printf(m
, " .%-30s: %ld\n", "dl_nr_running", dl_rq
->dl_nr_running
);
574 dl_bw
= &cpu_rq(cpu
)->rd
->dl_bw
;
576 dl_bw
= &dl_rq
->dl_bw
;
578 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->bw", dl_bw
->bw
);
579 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw
->total_bw
);
582 extern __read_mostly
int sched_clock_running
;
584 static void print_cpu(struct seq_file
*m
, int cpu
)
586 struct rq
*rq
= cpu_rq(cpu
);
591 unsigned int freq
= cpu_khz
? : 1;
593 SEQ_printf(m
, "cpu#%d, %u.%03u MHz\n",
594 cpu
, freq
/ 1000, (freq
% 1000));
597 SEQ_printf(m
, "cpu#%d\n", cpu
);
602 if (sizeof(rq->x) == 4) \
603 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
605 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
609 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
612 SEQ_printf(m
, " .%-30s: %lu\n", "load",
616 P(nr_uninterruptible
);
618 SEQ_printf(m
, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq
->curr
)));
629 #ifdef CONFIG_SCHEDSTATS
630 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
631 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
635 P64(max_idle_balance_cost
);
638 if (schedstat_enabled()) {
649 spin_lock_irqsave(&sched_debug_lock
, flags
);
650 print_cfs_stats(m
, cpu
);
651 print_rt_stats(m
, cpu
);
652 print_dl_stats(m
, cpu
);
654 print_rq(m
, rq
, cpu
);
655 spin_unlock_irqrestore(&sched_debug_lock
, flags
);
659 static const char *sched_tunable_scaling_names
[] = {
665 static void sched_debug_header(struct seq_file
*m
)
667 u64 ktime
, sched_clk
, cpu_clk
;
670 local_irq_save(flags
);
671 ktime
= ktime_to_ns(ktime_get());
672 sched_clk
= sched_clock();
673 cpu_clk
= local_clock();
674 local_irq_restore(flags
);
676 SEQ_printf(m
, "Sched Debug Version: v0.11, %s %.*s\n",
677 init_utsname()->release
,
678 (int)strcspn(init_utsname()->version
, " "),
679 init_utsname()->version
);
682 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
684 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
689 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
690 P(sched_clock_stable());
696 SEQ_printf(m
, "sysctl_sched\n");
699 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
701 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
702 PN(sysctl_sched_latency
);
703 PN(sysctl_sched_min_granularity
);
704 PN(sysctl_sched_wakeup_granularity
);
705 P(sysctl_sched_child_runs_first
);
706 P(sysctl_sched_features
);
710 SEQ_printf(m
, " .%-40s: %d (%s)\n",
711 "sysctl_sched_tunable_scaling",
712 sysctl_sched_tunable_scaling
,
713 sched_tunable_scaling_names
[sysctl_sched_tunable_scaling
]);
717 static int sched_debug_show(struct seq_file
*m
, void *v
)
719 int cpu
= (unsigned long)(v
- 2);
724 sched_debug_header(m
);
729 void sysrq_sched_debug_show(void)
733 sched_debug_header(NULL
);
734 for_each_online_cpu(cpu
)
735 print_cpu(NULL
, cpu
);
740 * This itererator needs some explanation.
741 * It returns 1 for the header position.
742 * This means 2 is cpu 0.
743 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
744 * to use cpumask_* to iterate over the cpus.
746 static void *sched_debug_start(struct seq_file
*file
, loff_t
*offset
)
748 unsigned long n
= *offset
;
756 n
= cpumask_next(n
- 1, cpu_online_mask
);
758 n
= cpumask_first(cpu_online_mask
);
763 return (void *)(unsigned long)(n
+ 2);
767 static void *sched_debug_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
770 return sched_debug_start(file
, offset
);
773 static void sched_debug_stop(struct seq_file
*file
, void *data
)
777 static const struct seq_operations sched_debug_sops
= {
778 .start
= sched_debug_start
,
779 .next
= sched_debug_next
,
780 .stop
= sched_debug_stop
,
781 .show
= sched_debug_show
,
784 static int sched_debug_release(struct inode
*inode
, struct file
*file
)
786 seq_release(inode
, file
);
791 static int sched_debug_open(struct inode
*inode
, struct file
*filp
)
795 ret
= seq_open(filp
, &sched_debug_sops
);
800 static const struct file_operations sched_debug_fops
= {
801 .open
= sched_debug_open
,
804 .release
= sched_debug_release
,
807 static int __init
init_sched_debug_procfs(void)
809 struct proc_dir_entry
*pe
;
811 pe
= proc_create("sched_debug", 0444, NULL
, &sched_debug_fops
);
817 __initcall(init_sched_debug_procfs
);
820 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
822 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
824 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
826 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
829 #ifdef CONFIG_NUMA_BALANCING
830 void print_numa_stats(struct seq_file
*m
, int node
, unsigned long tsf
,
831 unsigned long tpf
, unsigned long gsf
, unsigned long gpf
)
833 SEQ_printf(m
, "numa_faults node=%d ", node
);
834 SEQ_printf(m
, "task_private=%lu task_shared=%lu ", tsf
, tpf
);
835 SEQ_printf(m
, "group_private=%lu group_shared=%lu\n", gsf
, gpf
);
840 static void sched_show_numa(struct task_struct
*p
, struct seq_file
*m
)
842 #ifdef CONFIG_NUMA_BALANCING
843 struct mempolicy
*pol
;
846 P(mm
->numa_scan_seq
);
850 if (pol
&& !(pol
->flags
& MPOL_F_MORON
))
855 P(numa_pages_migrated
);
856 P(numa_preferred_nid
);
857 P(total_numa_faults
);
858 SEQ_printf(m
, "current_node=%d, numa_group_id=%d\n",
859 task_node(p
), task_numa_group_id(p
));
860 show_numa_stats(p
, m
);
865 void proc_sched_show_task(struct task_struct
*p
, struct seq_file
*m
)
867 unsigned long nr_switches
;
869 SEQ_printf(m
, "%s (%d, #threads: %d)\n", p
->comm
, task_pid_nr(p
),
872 "---------------------------------------------------------"
875 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
877 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
879 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
881 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
885 PN(se
.sum_exec_runtime
);
887 nr_switches
= p
->nvcsw
+ p
->nivcsw
;
889 #ifdef CONFIG_SCHEDSTATS
892 if (schedstat_enabled()) {
893 u64 avg_atom
, avg_per_cpu
;
895 PN(se
.statistics
.sum_sleep_runtime
);
896 PN(se
.statistics
.wait_start
);
897 PN(se
.statistics
.sleep_start
);
898 PN(se
.statistics
.block_start
);
899 PN(se
.statistics
.sleep_max
);
900 PN(se
.statistics
.block_max
);
901 PN(se
.statistics
.exec_max
);
902 PN(se
.statistics
.slice_max
);
903 PN(se
.statistics
.wait_max
);
904 PN(se
.statistics
.wait_sum
);
905 P(se
.statistics
.wait_count
);
906 PN(se
.statistics
.iowait_sum
);
907 P(se
.statistics
.iowait_count
);
908 P(se
.statistics
.nr_migrations_cold
);
909 P(se
.statistics
.nr_failed_migrations_affine
);
910 P(se
.statistics
.nr_failed_migrations_running
);
911 P(se
.statistics
.nr_failed_migrations_hot
);
912 P(se
.statistics
.nr_forced_migrations
);
913 P(se
.statistics
.nr_wakeups
);
914 P(se
.statistics
.nr_wakeups_sync
);
915 P(se
.statistics
.nr_wakeups_migrate
);
916 P(se
.statistics
.nr_wakeups_local
);
917 P(se
.statistics
.nr_wakeups_remote
);
918 P(se
.statistics
.nr_wakeups_affine
);
919 P(se
.statistics
.nr_wakeups_affine_attempts
);
920 P(se
.statistics
.nr_wakeups_passive
);
921 P(se
.statistics
.nr_wakeups_idle
);
923 avg_atom
= p
->se
.sum_exec_runtime
;
925 avg_atom
= div64_ul(avg_atom
, nr_switches
);
929 avg_per_cpu
= p
->se
.sum_exec_runtime
;
930 if (p
->se
.nr_migrations
) {
931 avg_per_cpu
= div64_u64(avg_per_cpu
,
932 p
->se
.nr_migrations
);
942 SEQ_printf(m
, "%-45s:%21Ld\n",
943 "nr_voluntary_switches", (long long)p
->nvcsw
);
944 SEQ_printf(m
, "%-45s:%21Ld\n",
945 "nr_involuntary_switches", (long long)p
->nivcsw
);
953 P(se
.avg
.last_update_time
);
963 unsigned int this_cpu
= raw_smp_processor_id();
966 t0
= cpu_clock(this_cpu
);
967 t1
= cpu_clock(this_cpu
);
968 SEQ_printf(m
, "%-45s:%21Ld\n",
969 "clock-delta", (long long)(t1
-t0
));
972 sched_show_numa(p
, m
);
975 void proc_sched_set_task(struct task_struct
*p
)
977 #ifdef CONFIG_SCHEDSTATS
978 memset(&p
->se
.statistics
, 0, sizeof(p
->se
.statistics
));