drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / kernel / sched / stats.c
blobeb0cdcd4d9212e3a5ce1696f4257f93c66dacfb2
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * /proc/schedstat implementation
4 */
6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
7 struct sched_statistics *stats)
9 u64 wait_start, prev_wait_start;
11 wait_start = rq_clock(rq);
12 prev_wait_start = schedstat_val(stats->wait_start);
14 if (p && likely(wait_start > prev_wait_start))
15 wait_start -= prev_wait_start;
17 __schedstat_set(stats->wait_start, wait_start);
20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
21 struct sched_statistics *stats)
23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
25 if (p) {
26 if (task_on_rq_migrating(p)) {
28 * Preserve migrating task's wait time so wait_start
29 * time stamp can be adjusted to accumulate wait time
30 * prior to migration.
32 __schedstat_set(stats->wait_start, delta);
34 return;
37 trace_sched_stat_wait(p, delta);
40 __schedstat_set(stats->wait_max,
41 max(schedstat_val(stats->wait_max), delta));
42 __schedstat_inc(stats->wait_count);
43 __schedstat_add(stats->wait_sum, delta);
44 __schedstat_set(stats->wait_start, 0);
47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
48 struct sched_statistics *stats)
50 u64 sleep_start, block_start;
52 sleep_start = schedstat_val(stats->sleep_start);
53 block_start = schedstat_val(stats->block_start);
55 if (sleep_start) {
56 u64 delta = rq_clock(rq) - sleep_start;
58 if ((s64)delta < 0)
59 delta = 0;
61 if (unlikely(delta > schedstat_val(stats->sleep_max)))
62 __schedstat_set(stats->sleep_max, delta);
64 __schedstat_set(stats->sleep_start, 0);
65 __schedstat_add(stats->sum_sleep_runtime, delta);
67 if (p) {
68 account_scheduler_latency(p, delta >> 10, 1);
69 trace_sched_stat_sleep(p, delta);
73 if (block_start) {
74 u64 delta = rq_clock(rq) - block_start;
76 if ((s64)delta < 0)
77 delta = 0;
79 if (unlikely(delta > schedstat_val(stats->block_max)))
80 __schedstat_set(stats->block_max, delta);
82 __schedstat_set(stats->block_start, 0);
83 __schedstat_add(stats->sum_sleep_runtime, delta);
84 __schedstat_add(stats->sum_block_runtime, delta);
86 if (p) {
87 if (p->in_iowait) {
88 __schedstat_add(stats->iowait_sum, delta);
89 __schedstat_inc(stats->iowait_count);
90 trace_sched_stat_iowait(p, delta);
93 trace_sched_stat_blocked(p, delta);
95 account_scheduler_latency(p, delta >> 10, 0);
101 * Current schedstat API version.
103 * Bump this up when changing the output format or the meaning of an existing
104 * format, so that tools can adapt (or abort)
106 #define SCHEDSTAT_VERSION 16
108 static int show_schedstat(struct seq_file *seq, void *v)
110 int cpu;
112 if (v == (void *)1) {
113 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
114 seq_printf(seq, "timestamp %lu\n", jiffies);
115 } else {
116 struct rq *rq;
117 #ifdef CONFIG_SMP
118 struct sched_domain *sd;
119 int dcount = 0;
120 #endif
121 cpu = (unsigned long)(v - 2);
122 rq = cpu_rq(cpu);
124 /* runqueue-specific stats */
125 seq_printf(seq,
126 "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
127 cpu, rq->yld_count,
128 rq->sched_count, rq->sched_goidle,
129 rq->ttwu_count, rq->ttwu_local,
130 rq->rq_cpu_time,
131 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
133 seq_printf(seq, "\n");
135 #ifdef CONFIG_SMP
136 /* domain-specific stats */
137 rcu_read_lock();
138 for_each_domain(cpu, sd) {
139 enum cpu_idle_type itype;
141 seq_printf(seq, "domain%d %*pb", dcount++,
142 cpumask_pr_args(sched_domain_span(sd)));
143 for (itype = 0; itype < CPU_MAX_IDLE_TYPES; itype++) {
144 seq_printf(seq, " %u %u %u %u %u %u %u %u",
145 sd->lb_count[itype],
146 sd->lb_balanced[itype],
147 sd->lb_failed[itype],
148 sd->lb_imbalance[itype],
149 sd->lb_gained[itype],
150 sd->lb_hot_gained[itype],
151 sd->lb_nobusyq[itype],
152 sd->lb_nobusyg[itype]);
154 seq_printf(seq,
155 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
156 sd->alb_count, sd->alb_failed, sd->alb_pushed,
157 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
158 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
159 sd->ttwu_wake_remote, sd->ttwu_move_affine,
160 sd->ttwu_move_balance);
162 rcu_read_unlock();
163 #endif
165 return 0;
169 * This iterator needs some explanation.
170 * It returns 1 for the header position.
171 * This means 2 is cpu 0.
172 * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
173 * to use cpumask_* to iterate over the CPUs.
175 static void *schedstat_start(struct seq_file *file, loff_t *offset)
177 unsigned long n = *offset;
179 if (n == 0)
180 return (void *) 1;
182 n--;
184 if (n > 0)
185 n = cpumask_next(n - 1, cpu_online_mask);
186 else
187 n = cpumask_first(cpu_online_mask);
189 *offset = n + 1;
191 if (n < nr_cpu_ids)
192 return (void *)(unsigned long)(n + 2);
194 return NULL;
197 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
199 (*offset)++;
201 return schedstat_start(file, offset);
204 static void schedstat_stop(struct seq_file *file, void *data)
208 static const struct seq_operations schedstat_sops = {
209 .start = schedstat_start,
210 .next = schedstat_next,
211 .stop = schedstat_stop,
212 .show = show_schedstat,
215 static int __init proc_schedstat_init(void)
217 proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
218 return 0;
220 subsys_initcall(proc_schedstat_init);