2 * Pressure stall information for CPU, memory and IO
4 * Copyright (c) 2018 Facebook, Inc.
5 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 * When CPU, memory and IO are contended, tasks experience delays that
8 * reduce throughput and introduce latencies into the workload. Memory
9 * and IO contention, in addition, can cause a full loss of forward
10 * progress in which the CPU goes idle.
12 * This code aggregates individual task delays into resource pressure
13 * metrics that indicate problems with both workload health and
14 * resource utilization.
18 * The time in which a task can execute on a CPU is our baseline for
19 * productivity. Pressure expresses the amount of time in which this
20 * potential cannot be realized due to resource contention.
22 * This concept of productivity has two components: the workload and
23 * the CPU. To measure the impact of pressure on both, we define two
24 * contention states for a resource: SOME and FULL.
26 * In the SOME state of a given resource, one or more tasks are
27 * delayed on that resource. This affects the workload's ability to
28 * perform work, but the CPU may still be executing other tasks.
30 * In the FULL state of a given resource, all non-idle tasks are
31 * delayed on that resource such that nobody is advancing and the CPU
32 * goes idle. This leaves both workload and CPU unproductive.
34 * (Naturally, the FULL state doesn't exist for the CPU resource.)
36 * SOME = nr_delayed_tasks != 0
37 * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
39 * The percentage of wallclock time spent in those compound stall
40 * states gives pressure numbers between 0 and 100 for each resource,
41 * where the SOME percentage indicates workload slowdowns and the FULL
42 * percentage indicates reduced CPU utilization:
44 * %SOME = time(SOME) / period
45 * %FULL = time(FULL) / period
49 * The more tasks and available CPUs there are, the more work can be
50 * performed concurrently. This means that the potential that can go
51 * unrealized due to resource contention *also* scales with non-idle
54 * Consider a scenario where 257 number crunching tasks are trying to
55 * run concurrently on 256 CPUs. If we simply aggregated the task
56 * states, we would have to conclude a CPU SOME pressure number of
57 * 100%, since *somebody* is waiting on a runqueue at all
58 * times. However, that is clearly not the amount of contention the
59 * workload is experiencing: only one out of 256 possible exceution
60 * threads will be contended at any given time, or about 0.4%.
62 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
63 * given time *one* of the tasks is delayed due to a lack of memory.
64 * Again, looking purely at the task state would yield a memory FULL
65 * pressure number of 0%, since *somebody* is always making forward
66 * progress. But again this wouldn't capture the amount of execution
67 * potential lost, which is 1 out of 4 CPUs, or 25%.
69 * To calculate wasted potential (pressure) with multiple processors,
70 * we have to base our calculation on the number of non-idle tasks in
71 * conjunction with the number of available CPUs, which is the number
72 * of potential execution threads. SOME becomes then the proportion of
73 * delayed tasks to possibe threads, and FULL is the share of possible
74 * threads that are unproductive due to delays:
76 * threads = min(nr_nonidle_tasks, nr_cpus)
77 * SOME = min(nr_delayed_tasks / threads, 1)
78 * FULL = (threads - min(nr_running_tasks, threads)) / threads
80 * For the 257 number crunchers on 256 CPUs, this yields:
82 * threads = min(257, 256)
83 * SOME = min(1 / 256, 1) = 0.4%
84 * FULL = (256 - min(257, 256)) / 256 = 0%
86 * For the 1 out of 4 memory-delayed tasks, this yields:
89 * SOME = min(1 / 4, 1) = 25%
90 * FULL = (4 - min(3, 4)) / 4 = 25%
92 * [ Substitute nr_cpus with 1, and you can see that it's a natural
93 * extension of the single-CPU model. ]
97 * To assess the precise time spent in each such state, we would have
98 * to freeze the system on task changes and start/stop the state
99 * clocks accordingly. Obviously that doesn't scale in practice.
101 * Because the scheduler aims to distribute the compute load evenly
102 * among the available CPUs, we can track task state locally to each
103 * CPU and, at much lower frequency, extrapolate the global state for
104 * the cumulative stall times and the running averages.
106 * For each runqueue, we track:
108 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
109 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
110 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
112 * and then periodically aggregate:
114 * tNONIDLE = sum(tNONIDLE[i])
116 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
117 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
119 * %SOME = tSOME / period
120 * %FULL = tFULL / period
122 * This gives us an approximation of pressure that is practical
123 * cost-wise, yet way more sensitive and accurate than periodic
124 * sampling of the aggregate task states would be.
127 #include <linux/sched/loadavg.h>
128 #include <linux/seq_file.h>
129 #include <linux/proc_fs.h>
130 #include <linux/seqlock.h>
131 #include <linux/cgroup.h>
132 #include <linux/module.h>
133 #include <linux/sched.h>
134 #include <linux/psi.h>
137 static int psi_bug __read_mostly
;
139 bool psi_disabled __read_mostly
;
140 core_param(psi_disabled
, psi_disabled
, bool, 0644);
142 /* Running averages - we need to be higher-res than loadavg */
143 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
144 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
145 #define EXP_60s 1981 /* 1/exp(2s/60s) */
146 #define EXP_300s 2034 /* 1/exp(2s/300s) */
148 /* Sampling frequency in nanoseconds */
149 static u64 psi_period __read_mostly
;
151 /* System-level pressure and stall tracking */
152 static DEFINE_PER_CPU(struct psi_group_cpu
, system_group_pcpu
);
153 static struct psi_group psi_system
= {
154 .pcpu
= &system_group_pcpu
,
157 static void psi_update_work(struct work_struct
*work
);
159 static void group_init(struct psi_group
*group
)
163 for_each_possible_cpu(cpu
)
164 seqcount_init(&per_cpu_ptr(group
->pcpu
, cpu
)->seq
);
165 group
->next_update
= sched_clock() + psi_period
;
166 INIT_DELAYED_WORK(&group
->clock_work
, psi_update_work
);
167 mutex_init(&group
->stat_lock
);
170 void __init
psi_init(void)
175 psi_period
= jiffies_to_nsecs(PSI_FREQ
);
176 group_init(&psi_system
);
179 static bool test_state(unsigned int *tasks
, enum psi_states state
)
183 return tasks
[NR_IOWAIT
];
185 return tasks
[NR_IOWAIT
] && !tasks
[NR_RUNNING
];
187 return tasks
[NR_MEMSTALL
];
189 return tasks
[NR_MEMSTALL
] && !tasks
[NR_RUNNING
];
191 return tasks
[NR_RUNNING
] > 1;
193 return tasks
[NR_IOWAIT
] || tasks
[NR_MEMSTALL
] ||
200 static void get_recent_times(struct psi_group
*group
, int cpu
, u32
*times
)
202 struct psi_group_cpu
*groupc
= per_cpu_ptr(group
->pcpu
, cpu
);
203 unsigned int tasks
[NR_PSI_TASK_COUNTS
];
204 u64 now
, state_start
;
208 /* Snapshot a coherent view of the CPU state */
210 seq
= read_seqcount_begin(&groupc
->seq
);
211 now
= cpu_clock(cpu
);
212 memcpy(times
, groupc
->times
, sizeof(groupc
->times
));
213 memcpy(tasks
, groupc
->tasks
, sizeof(groupc
->tasks
));
214 state_start
= groupc
->state_start
;
215 } while (read_seqcount_retry(&groupc
->seq
, seq
));
217 /* Calculate state time deltas against the previous snapshot */
218 for (s
= 0; s
< NR_PSI_STATES
; s
++) {
221 * In addition to already concluded states, we also
222 * incorporate currently active states on the CPU,
223 * since states may last for many sampling periods.
225 * This way we keep our delta sampling buckets small
226 * (u32) and our reported pressure close to what's
227 * actually happening.
229 if (test_state(tasks
, s
))
230 times
[s
] += now
- state_start
;
232 delta
= times
[s
] - groupc
->times_prev
[s
];
233 groupc
->times_prev
[s
] = times
[s
];
239 static void calc_avgs(unsigned long avg
[3], int missed_periods
,
240 u64 time
, u64 period
)
244 /* Fill in zeroes for periods of no activity */
245 if (missed_periods
) {
246 avg
[0] = calc_load_n(avg
[0], EXP_10s
, 0, missed_periods
);
247 avg
[1] = calc_load_n(avg
[1], EXP_60s
, 0, missed_periods
);
248 avg
[2] = calc_load_n(avg
[2], EXP_300s
, 0, missed_periods
);
251 /* Sample the most recent active period */
252 pct
= div_u64(time
* 100, period
);
254 avg
[0] = calc_load(avg
[0], EXP_10s
, pct
);
255 avg
[1] = calc_load(avg
[1], EXP_60s
, pct
);
256 avg
[2] = calc_load(avg
[2], EXP_300s
, pct
);
259 static bool update_stats(struct psi_group
*group
)
261 u64 deltas
[NR_PSI_STATES
- 1] = { 0, };
262 unsigned long missed_periods
= 0;
263 unsigned long nonidle_total
= 0;
264 u64 now
, expires
, period
;
268 mutex_lock(&group
->stat_lock
);
271 * Collect the per-cpu time buckets and average them into a
272 * single time sample that is normalized to wallclock time.
274 * For averaging, each CPU is weighted by its non-idle time in
275 * the sampling period. This eliminates artifacts from uneven
276 * loading, or even entirely idle CPUs.
278 for_each_possible_cpu(cpu
) {
279 u32 times
[NR_PSI_STATES
];
282 get_recent_times(group
, cpu
, times
);
284 nonidle
= nsecs_to_jiffies(times
[PSI_NONIDLE
]);
285 nonidle_total
+= nonidle
;
287 for (s
= 0; s
< PSI_NONIDLE
; s
++)
288 deltas
[s
] += (u64
)times
[s
] * nonidle
;
292 * Integrate the sample into the running statistics that are
293 * reported to userspace: the cumulative stall times and the
296 * Pressure percentages are sampled at PSI_FREQ. We might be
297 * called more often when the user polls more frequently than
298 * that; we might be called less often when there is no task
299 * activity, thus no data, and clock ticks are sporadic. The
300 * below handles both.
304 for (s
= 0; s
< NR_PSI_STATES
- 1; s
++)
305 group
->total
[s
] += div_u64(deltas
[s
], max(nonidle_total
, 1UL));
309 expires
= group
->next_update
;
312 if (now
- expires
> psi_period
)
313 missed_periods
= div_u64(now
- expires
, psi_period
);
316 * The periodic clock tick can get delayed for various
317 * reasons, especially on loaded systems. To avoid clock
318 * drift, we schedule the clock in fixed psi_period intervals.
319 * But the deltas we sample out of the per-cpu buckets above
320 * are based on the actual time elapsing between clock ticks.
322 group
->next_update
= expires
+ ((1 + missed_periods
) * psi_period
);
323 period
= now
- (group
->last_update
+ (missed_periods
* psi_period
));
324 group
->last_update
= now
;
326 for (s
= 0; s
< NR_PSI_STATES
- 1; s
++) {
329 sample
= group
->total
[s
] - group
->total_prev
[s
];
331 * Due to the lockless sampling of the time buckets,
332 * recorded time deltas can slip into the next period,
333 * which under full pressure can result in samples in
334 * excess of the period length.
336 * We don't want to report non-sensical pressures in
337 * excess of 100%, nor do we want to drop such events
338 * on the floor. Instead we punt any overage into the
339 * future until pressure subsides. By doing this we
340 * don't underreport the occurring pressure curve, we
341 * just report it delayed by one period length.
343 * The error isn't cumulative. As soon as another
344 * delta slips from a period P to P+1, by definition
345 * it frees up its time T in P.
349 group
->total_prev
[s
] += sample
;
350 calc_avgs(group
->avg
[s
], missed_periods
, sample
, period
);
353 mutex_unlock(&group
->stat_lock
);
354 return nonidle_total
;
357 static void psi_update_work(struct work_struct
*work
)
359 struct delayed_work
*dwork
;
360 struct psi_group
*group
;
363 dwork
= to_delayed_work(work
);
364 group
= container_of(dwork
, struct psi_group
, clock_work
);
367 * If there is task activity, periodically fold the per-cpu
368 * times and feed samples into the running averages. If things
369 * are idle and there is no data to process, stop the clock.
370 * Once restarted, we'll catch up the running averages in one
371 * go - see calc_avgs() and missed_periods.
374 nonidle
= update_stats(group
);
377 unsigned long delay
= 0;
381 if (group
->next_update
> now
)
382 delay
= nsecs_to_jiffies(group
->next_update
- now
) + 1;
383 schedule_delayed_work(dwork
, delay
);
387 static void record_times(struct psi_group_cpu
*groupc
, int cpu
,
393 now
= cpu_clock(cpu
);
394 delta
= now
- groupc
->state_start
;
395 groupc
->state_start
= now
;
397 if (test_state(groupc
->tasks
, PSI_IO_SOME
)) {
398 groupc
->times
[PSI_IO_SOME
] += delta
;
399 if (test_state(groupc
->tasks
, PSI_IO_FULL
))
400 groupc
->times
[PSI_IO_FULL
] += delta
;
403 if (test_state(groupc
->tasks
, PSI_MEM_SOME
)) {
404 groupc
->times
[PSI_MEM_SOME
] += delta
;
405 if (test_state(groupc
->tasks
, PSI_MEM_FULL
))
406 groupc
->times
[PSI_MEM_FULL
] += delta
;
407 else if (memstall_tick
) {
410 * Since we care about lost potential, a
411 * memstall is FULL when there are no other
412 * working tasks, but also when the CPU is
413 * actively reclaiming and nothing productive
414 * could run even if it were runnable.
416 * When the timer tick sees a reclaiming CPU,
417 * regardless of runnable tasks, sample a FULL
418 * tick (or less if it hasn't been a full tick
419 * since the last state change).
421 sample
= min(delta
, (u32
)jiffies_to_nsecs(1));
422 groupc
->times
[PSI_MEM_FULL
] += sample
;
426 if (test_state(groupc
->tasks
, PSI_CPU_SOME
))
427 groupc
->times
[PSI_CPU_SOME
] += delta
;
429 if (test_state(groupc
->tasks
, PSI_NONIDLE
))
430 groupc
->times
[PSI_NONIDLE
] += delta
;
433 static void psi_group_change(struct psi_group
*group
, int cpu
,
434 unsigned int clear
, unsigned int set
)
436 struct psi_group_cpu
*groupc
;
439 groupc
= per_cpu_ptr(group
->pcpu
, cpu
);
442 * First we assess the aggregate resource states this CPU's
443 * tasks have been in since the last change, and account any
444 * SOME and FULL time these may have resulted in.
446 * Then we update the task counts according to the state
447 * change requested through the @clear and @set bits.
449 write_seqcount_begin(&groupc
->seq
);
451 record_times(groupc
, cpu
, false);
453 for (t
= 0, m
= clear
; m
; m
&= ~(1 << t
), t
++) {
456 if (groupc
->tasks
[t
] == 0 && !psi_bug
) {
457 printk_deferred(KERN_ERR
"psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
458 cpu
, t
, groupc
->tasks
[0],
459 groupc
->tasks
[1], groupc
->tasks
[2],
466 for (t
= 0; set
; set
&= ~(1 << t
), t
++)
470 write_seqcount_end(&groupc
->seq
);
472 if (!delayed_work_pending(&group
->clock_work
))
473 schedule_delayed_work(&group
->clock_work
, PSI_FREQ
);
476 static struct psi_group
*iterate_groups(struct task_struct
*task
, void **iter
)
478 #ifdef CONFIG_CGROUPS
479 struct cgroup
*cgroup
= NULL
;
482 cgroup
= task
->cgroups
->dfl_cgrp
;
483 else if (*iter
== &psi_system
)
486 cgroup
= cgroup_parent(*iter
);
488 if (cgroup
&& cgroup_parent(cgroup
)) {
490 return cgroup_psi(cgroup
);
500 void psi_task_change(struct task_struct
*task
, int clear
, int set
)
502 int cpu
= task_cpu(task
);
503 struct psi_group
*group
;
509 if (((task
->psi_flags
& set
) ||
510 (task
->psi_flags
& clear
) != clear
) &&
512 printk_deferred(KERN_ERR
"psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
513 task
->pid
, task
->comm
, cpu
,
514 task
->psi_flags
, clear
, set
);
518 task
->psi_flags
&= ~clear
;
519 task
->psi_flags
|= set
;
521 while ((group
= iterate_groups(task
, &iter
)))
522 psi_group_change(group
, cpu
, clear
, set
);
525 void psi_memstall_tick(struct task_struct
*task
, int cpu
)
527 struct psi_group
*group
;
530 while ((group
= iterate_groups(task
, &iter
))) {
531 struct psi_group_cpu
*groupc
;
533 groupc
= per_cpu_ptr(group
->pcpu
, cpu
);
534 write_seqcount_begin(&groupc
->seq
);
535 record_times(groupc
, cpu
, true);
536 write_seqcount_end(&groupc
->seq
);
541 * psi_memstall_enter - mark the beginning of a memory stall section
542 * @flags: flags to handle nested sections
544 * Marks the calling task as being stalled due to a lack of memory,
545 * such as waiting for a refault or performing reclaim.
547 void psi_memstall_enter(unsigned long *flags
)
555 *flags
= current
->flags
& PF_MEMSTALL
;
559 * PF_MEMSTALL setting & accounting needs to be atomic wrt
560 * changes to the task's scheduling state, otherwise we can
561 * race with CPU migration.
563 rq
= this_rq_lock_irq(&rf
);
565 current
->flags
|= PF_MEMSTALL
;
566 psi_task_change(current
, 0, TSK_MEMSTALL
);
568 rq_unlock_irq(rq
, &rf
);
572 * psi_memstall_leave - mark the end of an memory stall section
573 * @flags: flags to handle nested memdelay sections
575 * Marks the calling task as no longer stalled due to lack of memory.
577 void psi_memstall_leave(unsigned long *flags
)
588 * PF_MEMSTALL clearing & accounting needs to be atomic wrt
589 * changes to the task's scheduling state, otherwise we could
590 * race with CPU migration.
592 rq
= this_rq_lock_irq(&rf
);
594 current
->flags
&= ~PF_MEMSTALL
;
595 psi_task_change(current
, TSK_MEMSTALL
, 0);
597 rq_unlock_irq(rq
, &rf
);
600 #ifdef CONFIG_CGROUPS
601 int psi_cgroup_alloc(struct cgroup
*cgroup
)
606 cgroup
->psi
.pcpu
= alloc_percpu(struct psi_group_cpu
);
607 if (!cgroup
->psi
.pcpu
)
609 group_init(&cgroup
->psi
);
613 void psi_cgroup_free(struct cgroup
*cgroup
)
618 cancel_delayed_work_sync(&cgroup
->psi
.clock_work
);
619 free_percpu(cgroup
->psi
.pcpu
);
623 * cgroup_move_task - move task to a different cgroup
625 * @to: the target css_set
627 * Move task to a new cgroup and safely migrate its associated stall
628 * state between the different groups.
630 * This function acquires the task's rq lock to lock out concurrent
631 * changes to the task's scheduling state and - in case the task is
632 * running - concurrent changes to its stall state.
634 void cgroup_move_task(struct task_struct
*task
, struct css_set
*to
)
636 bool move_psi
= !psi_disabled
;
637 unsigned int task_flags
= 0;
642 rq
= task_rq_lock(task
, &rf
);
644 if (task_on_rq_queued(task
))
645 task_flags
= TSK_RUNNING
;
646 else if (task
->in_iowait
)
647 task_flags
= TSK_IOWAIT
;
649 if (task
->flags
& PF_MEMSTALL
)
650 task_flags
|= TSK_MEMSTALL
;
653 psi_task_change(task
, task_flags
, 0);
657 * Lame to do this here, but the scheduler cannot be locked
658 * from the outside, so we move cgroups from inside sched/.
660 rcu_assign_pointer(task
->cgroups
, to
);
664 psi_task_change(task
, 0, task_flags
);
666 task_rq_unlock(rq
, task
, &rf
);
669 #endif /* CONFIG_CGROUPS */
671 int psi_show(struct seq_file
*m
, struct psi_group
*group
, enum psi_res res
)
680 for (full
= 0; full
< 2 - (res
== PSI_CPU
); full
++) {
681 unsigned long avg
[3];
685 for (w
= 0; w
< 3; w
++)
686 avg
[w
] = group
->avg
[res
* 2 + full
][w
];
687 total
= div_u64(group
->total
[res
* 2 + full
], NSEC_PER_USEC
);
689 seq_printf(m
, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
690 full
? "full" : "some",
691 LOAD_INT(avg
[0]), LOAD_FRAC(avg
[0]),
692 LOAD_INT(avg
[1]), LOAD_FRAC(avg
[1]),
693 LOAD_INT(avg
[2]), LOAD_FRAC(avg
[2]),
700 static int psi_io_show(struct seq_file
*m
, void *v
)
702 return psi_show(m
, &psi_system
, PSI_IO
);
705 static int psi_memory_show(struct seq_file
*m
, void *v
)
707 return psi_show(m
, &psi_system
, PSI_MEM
);
710 static int psi_cpu_show(struct seq_file
*m
, void *v
)
712 return psi_show(m
, &psi_system
, PSI_CPU
);
715 static int psi_io_open(struct inode
*inode
, struct file
*file
)
717 return single_open(file
, psi_io_show
, NULL
);
720 static int psi_memory_open(struct inode
*inode
, struct file
*file
)
722 return single_open(file
, psi_memory_show
, NULL
);
725 static int psi_cpu_open(struct inode
*inode
, struct file
*file
)
727 return single_open(file
, psi_cpu_show
, NULL
);
730 static const struct file_operations psi_io_fops
= {
734 .release
= single_release
,
737 static const struct file_operations psi_memory_fops
= {
738 .open
= psi_memory_open
,
741 .release
= single_release
,
744 static const struct file_operations psi_cpu_fops
= {
745 .open
= psi_cpu_open
,
748 .release
= single_release
,
751 static int __init
psi_proc_init(void)
753 proc_mkdir("pressure", NULL
);
754 proc_create("pressure/io", 0, NULL
, &psi_io_fops
);
755 proc_create("pressure/memory", 0, NULL
, &psi_memory_fops
);
756 proc_create("pressure/cpu", 0, NULL
, &psi_cpu_fops
);
759 module_init(psi_proc_init
);