1 // SPDX-License-Identifier: GPL-2.0
3 * /proc/schedstat implementation
6 void __update_stats_wait_start(struct rq
*rq
, struct task_struct
*p
,
7 struct sched_statistics
*stats
)
9 u64 wait_start
, prev_wait_start
;
11 wait_start
= rq_clock(rq
);
12 prev_wait_start
= schedstat_val(stats
->wait_start
);
14 if (p
&& likely(wait_start
> prev_wait_start
))
15 wait_start
-= prev_wait_start
;
17 __schedstat_set(stats
->wait_start
, wait_start
);
20 void __update_stats_wait_end(struct rq
*rq
, struct task_struct
*p
,
21 struct sched_statistics
*stats
)
23 u64 delta
= rq_clock(rq
) - schedstat_val(stats
->wait_start
);
26 if (task_on_rq_migrating(p
)) {
28 * Preserve migrating task's wait time so wait_start
29 * time stamp can be adjusted to accumulate wait time
32 __schedstat_set(stats
->wait_start
, delta
);
37 trace_sched_stat_wait(p
, delta
);
40 __schedstat_set(stats
->wait_max
,
41 max(schedstat_val(stats
->wait_max
), delta
));
42 __schedstat_inc(stats
->wait_count
);
43 __schedstat_add(stats
->wait_sum
, delta
);
44 __schedstat_set(stats
->wait_start
, 0);
47 void __update_stats_enqueue_sleeper(struct rq
*rq
, struct task_struct
*p
,
48 struct sched_statistics
*stats
)
50 u64 sleep_start
, block_start
;
52 sleep_start
= schedstat_val(stats
->sleep_start
);
53 block_start
= schedstat_val(stats
->block_start
);
56 u64 delta
= rq_clock(rq
) - sleep_start
;
61 if (unlikely(delta
> schedstat_val(stats
->sleep_max
)))
62 __schedstat_set(stats
->sleep_max
, delta
);
64 __schedstat_set(stats
->sleep_start
, 0);
65 __schedstat_add(stats
->sum_sleep_runtime
, delta
);
68 account_scheduler_latency(p
, delta
>> 10, 1);
69 trace_sched_stat_sleep(p
, delta
);
74 u64 delta
= rq_clock(rq
) - block_start
;
79 if (unlikely(delta
> schedstat_val(stats
->block_max
)))
80 __schedstat_set(stats
->block_max
, delta
);
82 __schedstat_set(stats
->block_start
, 0);
83 __schedstat_add(stats
->sum_sleep_runtime
, delta
);
84 __schedstat_add(stats
->sum_block_runtime
, delta
);
88 __schedstat_add(stats
->iowait_sum
, delta
);
89 __schedstat_inc(stats
->iowait_count
);
90 trace_sched_stat_iowait(p
, delta
);
93 trace_sched_stat_blocked(p
, delta
);
95 account_scheduler_latency(p
, delta
>> 10, 0);
101 * Current schedstat API version.
103 * Bump this up when changing the output format or the meaning of an existing
104 * format, so that tools can adapt (or abort)
106 #define SCHEDSTAT_VERSION 16
108 static int show_schedstat(struct seq_file
*seq
, void *v
)
112 if (v
== (void *)1) {
113 seq_printf(seq
, "version %d\n", SCHEDSTAT_VERSION
);
114 seq_printf(seq
, "timestamp %lu\n", jiffies
);
118 struct sched_domain
*sd
;
121 cpu
= (unsigned long)(v
- 2);
124 /* runqueue-specific stats */
126 "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
128 rq
->sched_count
, rq
->sched_goidle
,
129 rq
->ttwu_count
, rq
->ttwu_local
,
131 rq
->rq_sched_info
.run_delay
, rq
->rq_sched_info
.pcount
);
133 seq_printf(seq
, "\n");
136 /* domain-specific stats */
138 for_each_domain(cpu
, sd
) {
139 enum cpu_idle_type itype
;
141 seq_printf(seq
, "domain%d %*pb", dcount
++,
142 cpumask_pr_args(sched_domain_span(sd
)));
143 for (itype
= 0; itype
< CPU_MAX_IDLE_TYPES
; itype
++) {
144 seq_printf(seq
, " %u %u %u %u %u %u %u %u",
146 sd
->lb_balanced
[itype
],
147 sd
->lb_failed
[itype
],
148 sd
->lb_imbalance
[itype
],
149 sd
->lb_gained
[itype
],
150 sd
->lb_hot_gained
[itype
],
151 sd
->lb_nobusyq
[itype
],
152 sd
->lb_nobusyg
[itype
]);
155 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
156 sd
->alb_count
, sd
->alb_failed
, sd
->alb_pushed
,
157 sd
->sbe_count
, sd
->sbe_balanced
, sd
->sbe_pushed
,
158 sd
->sbf_count
, sd
->sbf_balanced
, sd
->sbf_pushed
,
159 sd
->ttwu_wake_remote
, sd
->ttwu_move_affine
,
160 sd
->ttwu_move_balance
);
169 * This iterator needs some explanation.
170 * It returns 1 for the header position.
171 * This means 2 is cpu 0.
172 * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
173 * to use cpumask_* to iterate over the CPUs.
175 static void *schedstat_start(struct seq_file
*file
, loff_t
*offset
)
177 unsigned long n
= *offset
;
185 n
= cpumask_next(n
- 1, cpu_online_mask
);
187 n
= cpumask_first(cpu_online_mask
);
192 return (void *)(unsigned long)(n
+ 2);
197 static void *schedstat_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
201 return schedstat_start(file
, offset
);
204 static void schedstat_stop(struct seq_file
*file
, void *data
)
208 static const struct seq_operations schedstat_sops
= {
209 .start
= schedstat_start
,
210 .next
= schedstat_next
,
211 .stop
= schedstat_stop
,
212 .show
= show_schedstat
,
215 static int __init
proc_schedstat_init(void)
217 proc_create_seq("schedstat", 0, NULL
, &schedstat_sops
);
220 subsys_initcall(proc_schedstat_init
);