1 // SPDX-License-Identifier: GPL-2.0-only
5 * Print the CFS rbtree and other debugging details
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
11 static DEFINE_SPINLOCK(sched_debug_lock
);
14 * This allows printing both to /proc/sched_debug and
17 #define SEQ_printf(m, x...) \
26 * Ease the printing of nsec fields:
28 static long long nsec_high(unsigned long long nsec
)
30 if ((long long)nsec
< 0) {
32 do_div(nsec
, 1000000);
35 do_div(nsec
, 1000000);
40 static unsigned long nsec_low(unsigned long long nsec
)
42 if ((long long)nsec
< 0)
45 return do_div(nsec
, 1000000);
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
50 #define SCHED_FEAT(name, enabled) \
53 static const char * const sched_feat_names
[] = {
59 static int sched_feat_show(struct seq_file
*m
, void *v
)
63 for (i
= 0; i
< __SCHED_FEAT_NR
; i
++) {
64 if (!(sysctl_sched_features
& (1UL << i
)))
66 seq_printf(m
, "%s ", sched_feat_names
[i
]);
73 #ifdef CONFIG_JUMP_LABEL
75 #define jump_label_key__true STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
78 #define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
81 struct static_key sched_feat_keys
[__SCHED_FEAT_NR
] = {
87 static void sched_feat_disable(int i
)
89 static_key_disable_cpuslocked(&sched_feat_keys
[i
]);
92 static void sched_feat_enable(int i
)
94 static_key_enable_cpuslocked(&sched_feat_keys
[i
]);
97 static void sched_feat_disable(int i
) { };
98 static void sched_feat_enable(int i
) { };
99 #endif /* CONFIG_JUMP_LABEL */
101 static int sched_feat_set(char *cmp
)
106 if (strncmp(cmp
, "NO_", 3) == 0) {
111 i
= match_string(sched_feat_names
, __SCHED_FEAT_NR
, cmp
);
116 sysctl_sched_features
&= ~(1UL << i
);
117 sched_feat_disable(i
);
119 sysctl_sched_features
|= (1UL << i
);
120 sched_feat_enable(i
);
127 sched_feat_write(struct file
*filp
, const char __user
*ubuf
,
128 size_t cnt
, loff_t
*ppos
)
138 if (copy_from_user(&buf
, ubuf
, cnt
))
144 /* Ensure the static_key remains in a consistent state */
145 inode
= file_inode(filp
);
148 ret
= sched_feat_set(cmp
);
159 static int sched_feat_open(struct inode
*inode
, struct file
*filp
)
161 return single_open(filp
, sched_feat_show
, NULL
);
164 static const struct file_operations sched_feat_fops
= {
165 .open
= sched_feat_open
,
166 .write
= sched_feat_write
,
169 .release
= single_release
,
172 __read_mostly
bool sched_debug_enabled
;
174 static __init
int sched_init_debug(void)
176 debugfs_create_file("sched_features", 0644, NULL
, NULL
,
179 debugfs_create_bool("sched_debug", 0644, NULL
,
180 &sched_debug_enabled
);
184 late_initcall(sched_init_debug
);
190 static struct ctl_table sd_ctl_dir
[] = {
192 .procname
= "sched_domain",
198 static struct ctl_table sd_ctl_root
[] = {
200 .procname
= "kernel",
207 static struct ctl_table
*sd_alloc_ctl_entry(int n
)
209 struct ctl_table
*entry
=
210 kcalloc(n
, sizeof(struct ctl_table
), GFP_KERNEL
);
215 static void sd_free_ctl_entry(struct ctl_table
**tablep
)
217 struct ctl_table
*entry
;
220 * In the intermediate directories, both the child directory and
221 * procname are dynamically allocated and could fail but the mode
222 * will always be set. In the lowest directory the names are
223 * static strings and all have proc handlers.
225 for (entry
= *tablep
; entry
->mode
; entry
++) {
227 sd_free_ctl_entry(&entry
->child
);
228 if (entry
->proc_handler
== NULL
)
229 kfree(entry
->procname
);
237 set_table_entry(struct ctl_table
*entry
,
238 const char *procname
, void *data
, int maxlen
,
239 umode_t mode
, proc_handler
*proc_handler
)
241 entry
->procname
= procname
;
243 entry
->maxlen
= maxlen
;
245 entry
->proc_handler
= proc_handler
;
248 static int sd_ctl_doflags(struct ctl_table
*table
, int write
,
249 void *buffer
, size_t *lenp
, loff_t
*ppos
)
251 unsigned long flags
= *(unsigned long *)table
->data
;
252 size_t data_size
= 0;
260 for_each_set_bit(idx
, &flags
, __SD_FLAG_CNT
) {
261 char *name
= sd_flag_debug
[idx
].name
;
263 /* Name plus whitespace */
264 data_size
+= strlen(name
) + 1;
267 if (*ppos
> data_size
) {
272 buf
= kcalloc(data_size
+ 1, sizeof(*buf
), GFP_KERNEL
);
276 for_each_set_bit(idx
, &flags
, __SD_FLAG_CNT
) {
277 char *name
= sd_flag_debug
[idx
].name
;
279 len
+= snprintf(buf
+ len
, strlen(name
) + 2, "%s ", name
);
288 memcpy(buffer
, tmp
, len
);
290 ((char *)buffer
)[len
] = '\n';
302 static struct ctl_table
*
303 sd_alloc_ctl_domain_table(struct sched_domain
*sd
)
305 struct ctl_table
*table
= sd_alloc_ctl_entry(9);
310 set_table_entry(&table
[0], "min_interval", &sd
->min_interval
, sizeof(long), 0644, proc_doulongvec_minmax
);
311 set_table_entry(&table
[1], "max_interval", &sd
->max_interval
, sizeof(long), 0644, proc_doulongvec_minmax
);
312 set_table_entry(&table
[2], "busy_factor", &sd
->busy_factor
, sizeof(int), 0644, proc_dointvec_minmax
);
313 set_table_entry(&table
[3], "imbalance_pct", &sd
->imbalance_pct
, sizeof(int), 0644, proc_dointvec_minmax
);
314 set_table_entry(&table
[4], "cache_nice_tries", &sd
->cache_nice_tries
, sizeof(int), 0644, proc_dointvec_minmax
);
315 set_table_entry(&table
[5], "flags", &sd
->flags
, sizeof(int), 0444, sd_ctl_doflags
);
316 set_table_entry(&table
[6], "max_newidle_lb_cost", &sd
->max_newidle_lb_cost
, sizeof(long), 0644, proc_doulongvec_minmax
);
317 set_table_entry(&table
[7], "name", sd
->name
, CORENAME_MAX_SIZE
, 0444, proc_dostring
);
318 /* &table[8] is terminator */
323 static struct ctl_table
*sd_alloc_ctl_cpu_table(int cpu
)
325 struct ctl_table
*entry
, *table
;
326 struct sched_domain
*sd
;
327 int domain_num
= 0, i
;
330 for_each_domain(cpu
, sd
)
332 entry
= table
= sd_alloc_ctl_entry(domain_num
+ 1);
337 for_each_domain(cpu
, sd
) {
338 snprintf(buf
, 32, "domain%d", i
);
339 entry
->procname
= kstrdup(buf
, GFP_KERNEL
);
341 entry
->child
= sd_alloc_ctl_domain_table(sd
);
348 static cpumask_var_t sd_sysctl_cpus
;
349 static struct ctl_table_header
*sd_sysctl_header
;
351 void register_sched_domain_sysctl(void)
353 static struct ctl_table
*cpu_entries
;
354 static struct ctl_table
**cpu_idx
;
355 static bool init_done
= false;
360 cpu_entries
= sd_alloc_ctl_entry(num_possible_cpus() + 1);
364 WARN_ON(sd_ctl_dir
[0].child
);
365 sd_ctl_dir
[0].child
= cpu_entries
;
369 struct ctl_table
*e
= cpu_entries
;
371 cpu_idx
= kcalloc(nr_cpu_ids
, sizeof(struct ctl_table
*), GFP_KERNEL
);
375 /* deal with sparse possible map */
376 for_each_possible_cpu(i
) {
382 if (!cpumask_available(sd_sysctl_cpus
)) {
383 if (!alloc_cpumask_var(&sd_sysctl_cpus
, GFP_KERNEL
))
389 /* init to possible to not have holes in @cpu_entries */
390 cpumask_copy(sd_sysctl_cpus
, cpu_possible_mask
);
393 for_each_cpu(i
, sd_sysctl_cpus
) {
394 struct ctl_table
*e
= cpu_idx
[i
];
397 sd_free_ctl_entry(&e
->child
);
400 snprintf(buf
, 32, "cpu%d", i
);
401 e
->procname
= kstrdup(buf
, GFP_KERNEL
);
404 e
->child
= sd_alloc_ctl_cpu_table(i
);
406 __cpumask_clear_cpu(i
, sd_sysctl_cpus
);
409 WARN_ON(sd_sysctl_header
);
410 sd_sysctl_header
= register_sysctl_table(sd_ctl_root
);
413 void dirty_sched_domain_sysctl(int cpu
)
415 if (cpumask_available(sd_sysctl_cpus
))
416 __cpumask_set_cpu(cpu
, sd_sysctl_cpus
);
419 /* may be called multiple times per register */
420 void unregister_sched_domain_sysctl(void)
422 unregister_sysctl_table(sd_sysctl_header
);
423 sd_sysctl_header
= NULL
;
425 #endif /* CONFIG_SYSCTL */
426 #endif /* CONFIG_SMP */
428 #ifdef CONFIG_FAIR_GROUP_SCHED
429 static void print_cfs_group_stats(struct seq_file
*m
, int cpu
, struct task_group
*tg
)
431 struct sched_entity
*se
= tg
->se
[cpu
];
433 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
434 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
435 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
436 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
443 PN(se
->sum_exec_runtime
);
445 if (schedstat_enabled()) {
446 PN_SCHEDSTAT(se
->statistics
.wait_start
);
447 PN_SCHEDSTAT(se
->statistics
.sleep_start
);
448 PN_SCHEDSTAT(se
->statistics
.block_start
);
449 PN_SCHEDSTAT(se
->statistics
.sleep_max
);
450 PN_SCHEDSTAT(se
->statistics
.block_max
);
451 PN_SCHEDSTAT(se
->statistics
.exec_max
);
452 PN_SCHEDSTAT(se
->statistics
.slice_max
);
453 PN_SCHEDSTAT(se
->statistics
.wait_max
);
454 PN_SCHEDSTAT(se
->statistics
.wait_sum
);
455 P_SCHEDSTAT(se
->statistics
.wait_count
);
462 P(se
->avg
.runnable_avg
);
472 #ifdef CONFIG_CGROUP_SCHED
473 static char group_path
[PATH_MAX
];
475 static char *task_group_path(struct task_group
*tg
)
477 if (autogroup_path(tg
, group_path
, PATH_MAX
))
480 cgroup_path(tg
->css
.cgroup
, group_path
, PATH_MAX
);
487 print_task(struct seq_file
*m
, struct rq
*rq
, struct task_struct
*p
)
492 SEQ_printf(m
, " %c", task_state_to_char(p
));
494 SEQ_printf(m
, " %15s %5d %9Ld.%06ld %9Ld %5d ",
495 p
->comm
, task_pid_nr(p
),
496 SPLIT_NS(p
->se
.vruntime
),
497 (long long)(p
->nvcsw
+ p
->nivcsw
),
500 SEQ_printf(m
, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
501 SPLIT_NS(schedstat_val_or_zero(p
->se
.statistics
.wait_sum
)),
502 SPLIT_NS(p
->se
.sum_exec_runtime
),
503 SPLIT_NS(schedstat_val_or_zero(p
->se
.statistics
.sum_sleep_runtime
)));
505 #ifdef CONFIG_NUMA_BALANCING
506 SEQ_printf(m
, " %d %d", task_node(p
), task_numa_group_id(p
));
508 #ifdef CONFIG_CGROUP_SCHED
509 SEQ_printf(m
, " %s", task_group_path(task_group(p
)));
515 static void print_rq(struct seq_file
*m
, struct rq
*rq
, int rq_cpu
)
517 struct task_struct
*g
, *p
;
520 SEQ_printf(m
, "runnable tasks:\n");
521 SEQ_printf(m
, " S task PID tree-key switches prio"
522 " wait-time sum-exec sum-sleep\n");
523 SEQ_printf(m
, "-------------------------------------------------------"
524 "------------------------------------------------------\n");
527 for_each_process_thread(g
, p
) {
528 if (task_cpu(p
) != rq_cpu
)
531 print_task(m
, rq
, p
);
536 void print_cfs_rq(struct seq_file
*m
, int cpu
, struct cfs_rq
*cfs_rq
)
538 s64 MIN_vruntime
= -1, min_vruntime
, max_vruntime
= -1,
539 spread
, rq0_min_vruntime
, spread0
;
540 struct rq
*rq
= cpu_rq(cpu
);
541 struct sched_entity
*last
;
544 #ifdef CONFIG_FAIR_GROUP_SCHED
546 SEQ_printf(m
, "cfs_rq[%d]:%s\n", cpu
, task_group_path(cfs_rq
->tg
));
549 SEQ_printf(m
, "cfs_rq[%d]:\n", cpu
);
551 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "exec_clock",
552 SPLIT_NS(cfs_rq
->exec_clock
));
554 raw_spin_lock_irqsave(&rq
->lock
, flags
);
555 if (rb_first_cached(&cfs_rq
->tasks_timeline
))
556 MIN_vruntime
= (__pick_first_entity(cfs_rq
))->vruntime
;
557 last
= __pick_last_entity(cfs_rq
);
559 max_vruntime
= last
->vruntime
;
560 min_vruntime
= cfs_rq
->min_vruntime
;
561 rq0_min_vruntime
= cpu_rq(0)->cfs
.min_vruntime
;
562 raw_spin_unlock_irqrestore(&rq
->lock
, flags
);
563 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
564 SPLIT_NS(MIN_vruntime
));
565 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "min_vruntime",
566 SPLIT_NS(min_vruntime
));
567 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "max_vruntime",
568 SPLIT_NS(max_vruntime
));
569 spread
= max_vruntime
- MIN_vruntime
;
570 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread",
572 spread0
= min_vruntime
- rq0_min_vruntime
;
573 SEQ_printf(m
, " .%-30s: %Ld.%06ld\n", "spread0",
575 SEQ_printf(m
, " .%-30s: %d\n", "nr_spread_over",
576 cfs_rq
->nr_spread_over
);
577 SEQ_printf(m
, " .%-30s: %d\n", "nr_running", cfs_rq
->nr_running
);
578 SEQ_printf(m
, " .%-30s: %ld\n", "load", cfs_rq
->load
.weight
);
580 SEQ_printf(m
, " .%-30s: %lu\n", "load_avg",
581 cfs_rq
->avg
.load_avg
);
582 SEQ_printf(m
, " .%-30s: %lu\n", "runnable_avg",
583 cfs_rq
->avg
.runnable_avg
);
584 SEQ_printf(m
, " .%-30s: %lu\n", "util_avg",
585 cfs_rq
->avg
.util_avg
);
586 SEQ_printf(m
, " .%-30s: %u\n", "util_est_enqueued",
587 cfs_rq
->avg
.util_est
.enqueued
);
588 SEQ_printf(m
, " .%-30s: %ld\n", "removed.load_avg",
589 cfs_rq
->removed
.load_avg
);
590 SEQ_printf(m
, " .%-30s: %ld\n", "removed.util_avg",
591 cfs_rq
->removed
.util_avg
);
592 SEQ_printf(m
, " .%-30s: %ld\n", "removed.runnable_avg",
593 cfs_rq
->removed
.runnable_avg
);
594 #ifdef CONFIG_FAIR_GROUP_SCHED
595 SEQ_printf(m
, " .%-30s: %lu\n", "tg_load_avg_contrib",
596 cfs_rq
->tg_load_avg_contrib
);
597 SEQ_printf(m
, " .%-30s: %ld\n", "tg_load_avg",
598 atomic_long_read(&cfs_rq
->tg
->load_avg
));
601 #ifdef CONFIG_CFS_BANDWIDTH
602 SEQ_printf(m
, " .%-30s: %d\n", "throttled",
604 SEQ_printf(m
, " .%-30s: %d\n", "throttle_count",
605 cfs_rq
->throttle_count
);
608 #ifdef CONFIG_FAIR_GROUP_SCHED
609 print_cfs_group_stats(m
, cpu
, cfs_rq
->tg
);
613 void print_rt_rq(struct seq_file
*m
, int cpu
, struct rt_rq
*rt_rq
)
615 #ifdef CONFIG_RT_GROUP_SCHED
617 SEQ_printf(m
, "rt_rq[%d]:%s\n", cpu
, task_group_path(rt_rq
->tg
));
620 SEQ_printf(m
, "rt_rq[%d]:\n", cpu
);
624 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
626 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
628 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
643 void print_dl_rq(struct seq_file
*m
, int cpu
, struct dl_rq
*dl_rq
)
648 SEQ_printf(m
, "dl_rq[%d]:\n", cpu
);
651 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
656 dl_bw
= &cpu_rq(cpu
)->rd
->dl_bw
;
658 dl_bw
= &dl_rq
->dl_bw
;
660 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->bw", dl_bw
->bw
);
661 SEQ_printf(m
, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw
->total_bw
);
666 static void print_cpu(struct seq_file
*m
, int cpu
)
668 struct rq
*rq
= cpu_rq(cpu
);
673 unsigned int freq
= cpu_khz
? : 1;
675 SEQ_printf(m
, "cpu#%d, %u.%03u MHz\n",
676 cpu
, freq
/ 1000, (freq
% 1000));
679 SEQ_printf(m
, "cpu#%d\n", cpu
);
684 if (sizeof(rq->x) == 4) \
685 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
687 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
691 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
695 P(nr_uninterruptible
);
697 SEQ_printf(m
, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq
->curr
)));
704 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
706 P64(max_idle_balance_cost
);
710 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
711 if (schedstat_enabled()) {
720 spin_lock_irqsave(&sched_debug_lock
, flags
);
721 print_cfs_stats(m
, cpu
);
722 print_rt_stats(m
, cpu
);
723 print_dl_stats(m
, cpu
);
725 print_rq(m
, rq
, cpu
);
726 spin_unlock_irqrestore(&sched_debug_lock
, flags
);
730 static const char *sched_tunable_scaling_names
[] = {
736 static void sched_debug_header(struct seq_file
*m
)
738 u64 ktime
, sched_clk
, cpu_clk
;
741 local_irq_save(flags
);
742 ktime
= ktime_to_ns(ktime_get());
743 sched_clk
= sched_clock();
744 cpu_clk
= local_clock();
745 local_irq_restore(flags
);
747 SEQ_printf(m
, "Sched Debug Version: v0.11, %s %.*s\n",
748 init_utsname()->release
,
749 (int)strcspn(init_utsname()->version
, " "),
750 init_utsname()->version
);
753 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
755 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
760 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
761 P(sched_clock_stable());
767 SEQ_printf(m
, "sysctl_sched\n");
770 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
772 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
773 PN(sysctl_sched_latency
);
774 PN(sysctl_sched_min_granularity
);
775 PN(sysctl_sched_wakeup_granularity
);
776 P(sysctl_sched_child_runs_first
);
777 P(sysctl_sched_features
);
781 SEQ_printf(m
, " .%-40s: %d (%s)\n",
782 "sysctl_sched_tunable_scaling",
783 sysctl_sched_tunable_scaling
,
784 sched_tunable_scaling_names
[sysctl_sched_tunable_scaling
]);
788 static int sched_debug_show(struct seq_file
*m
, void *v
)
790 int cpu
= (unsigned long)(v
- 2);
795 sched_debug_header(m
);
800 void sysrq_sched_debug_show(void)
804 sched_debug_header(NULL
);
805 for_each_online_cpu(cpu
) {
807 * Need to reset softlockup watchdogs on all CPUs, because
808 * another CPU might be blocked waiting for us to process
809 * an IPI or stop_machine.
811 touch_nmi_watchdog();
812 touch_all_softlockup_watchdogs();
813 print_cpu(NULL
, cpu
);
818 * This itererator needs some explanation.
819 * It returns 1 for the header position.
820 * This means 2 is CPU 0.
821 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
822 * to use cpumask_* to iterate over the CPUs.
824 static void *sched_debug_start(struct seq_file
*file
, loff_t
*offset
)
826 unsigned long n
= *offset
;
834 n
= cpumask_next(n
- 1, cpu_online_mask
);
836 n
= cpumask_first(cpu_online_mask
);
841 return (void *)(unsigned long)(n
+ 2);
846 static void *sched_debug_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
849 return sched_debug_start(file
, offset
);
852 static void sched_debug_stop(struct seq_file
*file
, void *data
)
856 static const struct seq_operations sched_debug_sops
= {
857 .start
= sched_debug_start
,
858 .next
= sched_debug_next
,
859 .stop
= sched_debug_stop
,
860 .show
= sched_debug_show
,
863 static int __init
init_sched_debug_procfs(void)
865 if (!proc_create_seq("sched_debug", 0444, NULL
, &sched_debug_sops
))
870 __initcall(init_sched_debug_procfs
);
872 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
873 #define __P(F) __PS(#F, F)
874 #define P(F) __PS(#F, p->F)
875 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
876 #define __PN(F) __PSN(#F, F)
877 #define PN(F) __PSN(#F, p->F)
880 #ifdef CONFIG_NUMA_BALANCING
881 void print_numa_stats(struct seq_file
*m
, int node
, unsigned long tsf
,
882 unsigned long tpf
, unsigned long gsf
, unsigned long gpf
)
884 SEQ_printf(m
, "numa_faults node=%d ", node
);
885 SEQ_printf(m
, "task_private=%lu task_shared=%lu ", tpf
, tsf
);
886 SEQ_printf(m
, "group_private=%lu group_shared=%lu\n", gpf
, gsf
);
891 static void sched_show_numa(struct task_struct
*p
, struct seq_file
*m
)
893 #ifdef CONFIG_NUMA_BALANCING
894 struct mempolicy
*pol
;
897 P(mm
->numa_scan_seq
);
901 if (pol
&& !(pol
->flags
& MPOL_F_MORON
))
906 P(numa_pages_migrated
);
907 P(numa_preferred_nid
);
908 P(total_numa_faults
);
909 SEQ_printf(m
, "current_node=%d, numa_group_id=%d\n",
910 task_node(p
), task_numa_group_id(p
));
911 show_numa_stats(p
, m
);
916 void proc_sched_show_task(struct task_struct
*p
, struct pid_namespace
*ns
,
919 unsigned long nr_switches
;
921 SEQ_printf(m
, "%s (%d, #threads: %d)\n", p
->comm
, task_pid_nr_ns(p
, ns
),
924 "---------------------------------------------------------"
927 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
928 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
932 PN(se
.sum_exec_runtime
);
934 nr_switches
= p
->nvcsw
+ p
->nivcsw
;
938 if (schedstat_enabled()) {
939 u64 avg_atom
, avg_per_cpu
;
941 PN_SCHEDSTAT(se
.statistics
.sum_sleep_runtime
);
942 PN_SCHEDSTAT(se
.statistics
.wait_start
);
943 PN_SCHEDSTAT(se
.statistics
.sleep_start
);
944 PN_SCHEDSTAT(se
.statistics
.block_start
);
945 PN_SCHEDSTAT(se
.statistics
.sleep_max
);
946 PN_SCHEDSTAT(se
.statistics
.block_max
);
947 PN_SCHEDSTAT(se
.statistics
.exec_max
);
948 PN_SCHEDSTAT(se
.statistics
.slice_max
);
949 PN_SCHEDSTAT(se
.statistics
.wait_max
);
950 PN_SCHEDSTAT(se
.statistics
.wait_sum
);
951 P_SCHEDSTAT(se
.statistics
.wait_count
);
952 PN_SCHEDSTAT(se
.statistics
.iowait_sum
);
953 P_SCHEDSTAT(se
.statistics
.iowait_count
);
954 P_SCHEDSTAT(se
.statistics
.nr_migrations_cold
);
955 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_affine
);
956 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_running
);
957 P_SCHEDSTAT(se
.statistics
.nr_failed_migrations_hot
);
958 P_SCHEDSTAT(se
.statistics
.nr_forced_migrations
);
959 P_SCHEDSTAT(se
.statistics
.nr_wakeups
);
960 P_SCHEDSTAT(se
.statistics
.nr_wakeups_sync
);
961 P_SCHEDSTAT(se
.statistics
.nr_wakeups_migrate
);
962 P_SCHEDSTAT(se
.statistics
.nr_wakeups_local
);
963 P_SCHEDSTAT(se
.statistics
.nr_wakeups_remote
);
964 P_SCHEDSTAT(se
.statistics
.nr_wakeups_affine
);
965 P_SCHEDSTAT(se
.statistics
.nr_wakeups_affine_attempts
);
966 P_SCHEDSTAT(se
.statistics
.nr_wakeups_passive
);
967 P_SCHEDSTAT(se
.statistics
.nr_wakeups_idle
);
969 avg_atom
= p
->se
.sum_exec_runtime
;
971 avg_atom
= div64_ul(avg_atom
, nr_switches
);
975 avg_per_cpu
= p
->se
.sum_exec_runtime
;
976 if (p
->se
.nr_migrations
) {
977 avg_per_cpu
= div64_u64(avg_per_cpu
,
978 p
->se
.nr_migrations
);
988 __PS("nr_voluntary_switches", p
->nvcsw
);
989 __PS("nr_involuntary_switches", p
->nivcsw
);
994 P(se
.avg
.runnable_sum
);
997 P(se
.avg
.runnable_avg
);
999 P(se
.avg
.last_update_time
);
1000 P(se
.avg
.util_est
.ewma
);
1001 P(se
.avg
.util_est
.enqueued
);
1003 #ifdef CONFIG_UCLAMP_TASK
1004 __PS("uclamp.min", p
->uclamp_req
[UCLAMP_MIN
].value
);
1005 __PS("uclamp.max", p
->uclamp_req
[UCLAMP_MAX
].value
);
1006 __PS("effective uclamp.min", uclamp_eff_value(p
, UCLAMP_MIN
));
1007 __PS("effective uclamp.max", uclamp_eff_value(p
, UCLAMP_MAX
));
1011 if (task_has_dl_policy(p
)) {
1019 unsigned int this_cpu
= raw_smp_processor_id();
1022 t0
= cpu_clock(this_cpu
);
1023 t1
= cpu_clock(this_cpu
);
1024 __PS("clock-delta", t1
-t0
);
1027 sched_show_numa(p
, m
);
1030 void proc_sched_set_task(struct task_struct
*p
)
1032 #ifdef CONFIG_SCHEDSTATS
1033 memset(&p
->se
.statistics
, 0, sizeof(p
->se
.statistics
));