2 * kernel/sched/loadavg.c
4 * This file contains the magic bits required to compute the global loadavg
5 * figure. Its a silly number but people think its important. We go through
6 * great pains to make it work on big machines and tickless kernels.
9 #include <linux/export.h>
10 #include <linux/sched/loadavg.h>
15 * Global load-average calculations
17 * We take a distributed and async approach to calculating the global load-avg
18 * in order to minimize overhead.
20 * The global load average is an exponentially decaying average of nr_running +
23 * Once every LOAD_FREQ:
26 * for_each_possible_cpu(cpu)
27 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
29 * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
31 * Due to a number of reasons the above turns in the mess below:
33 * - for_each_possible_cpu() is prohibitively expensive on machines with
34 * serious number of cpus, therefore we need to take a distributed approach
35 * to calculating nr_active.
37 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
38 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
40 * So assuming nr_active := 0 when we start out -- true per definition, we
41 * can simply take per-cpu deltas and fold those into a global accumulate
42 * to obtain the same result. See calc_load_fold_active().
44 * Furthermore, in order to avoid synchronizing all per-cpu delta folding
45 * across the machine, we assume 10 ticks is sufficient time for every
46 * cpu to have completed this task.
48 * This places an upper-bound on the IRQ-off latency of the machine. Then
49 * again, being late doesn't loose the delta, just wrecks the sample.
51 * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
52 * this would add another cross-cpu cacheline miss and atomic operation
53 * to the wakeup path. Instead we increment on whatever cpu the task ran
54 * when it went into uninterruptible state and decrement on whatever cpu
55 * did the wakeup. This means that only the sum of nr_uninterruptible over
56 * all cpus yields the correct result.
58 * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
61 /* Variables and functions for calc_load */
62 atomic_long_t calc_load_tasks
;
63 unsigned long calc_load_update
;
64 unsigned long avenrun
[3];
65 EXPORT_SYMBOL(avenrun
); /* should be removed */
68 * get_avenrun - get the load average array
69 * @loads: pointer to dest load array
70 * @offset: offset to add
71 * @shift: shift count to shift the result left
73 * These values are estimates at best, so no need for locking.
75 void get_avenrun(unsigned long *loads
, unsigned long offset
, int shift
)
77 loads
[0] = (avenrun
[0] + offset
) << shift
;
78 loads
[1] = (avenrun
[1] + offset
) << shift
;
79 loads
[2] = (avenrun
[2] + offset
) << shift
;
82 long calc_load_fold_active(struct rq
*this_rq
, long adjust
)
84 long nr_active
, delta
= 0;
86 nr_active
= this_rq
->nr_running
- adjust
;
87 nr_active
+= (long)this_rq
->nr_uninterruptible
;
89 if (nr_active
!= this_rq
->calc_load_active
) {
90 delta
= nr_active
- this_rq
->calc_load_active
;
91 this_rq
->calc_load_active
= nr_active
;
98 * a1 = a0 * e + a * (1 - e)
101 calc_load(unsigned long load
, unsigned long exp
, unsigned long active
)
103 unsigned long newload
;
105 newload
= load
* exp
+ active
* (FIXED_1
- exp
);
107 newload
+= FIXED_1
-1;
109 return newload
/ FIXED_1
;
112 #ifdef CONFIG_NO_HZ_COMMON
114 * Handle NO_HZ for the global load-average.
116 * Since the above described distributed algorithm to compute the global
117 * load-average relies on per-cpu sampling from the tick, it is affected by
120 * The basic idea is to fold the nr_active delta into a global idle-delta upon
121 * entering NO_HZ state such that we can include this as an 'extra' cpu delta
122 * when we read the global state.
124 * Obviously reality has to ruin such a delightfully simple scheme:
126 * - When we go NO_HZ idle during the window, we can negate our sample
127 * contribution, causing under-accounting.
129 * We avoid this by keeping two idle-delta counters and flipping them
130 * when the window starts, thus separating old and new NO_HZ load.
132 * The only trick is the slight shift in index flip for read vs write.
136 * |-|-----------|-|-----------|-|-----------|-|
137 * r:0 0 1 1 0 0 1 1 0
138 * w:0 1 1 0 0 1 1 0 0
140 * This ensures we'll fold the old idle contribution in this window while
141 * accumlating the new one.
143 * - When we wake up from NO_HZ idle during the window, we push up our
144 * contribution, since we effectively move our sample point to a known
147 * This is solved by pushing the window forward, and thus skipping the
148 * sample, for this cpu (effectively using the idle-delta for this cpu which
149 * was in effect at the time the window opened). This also solves the issue
150 * of having to deal with a cpu having been in NOHZ idle for multiple
151 * LOAD_FREQ intervals.
153 * When making the ILB scale, we should try to pull this in as well.
155 static atomic_long_t calc_load_idle
[2];
156 static int calc_load_idx
;
158 static inline int calc_load_write_idx(void)
160 int idx
= calc_load_idx
;
163 * See calc_global_nohz(), if we observe the new index, we also
164 * need to observe the new update time.
169 * If the folding window started, make sure we start writing in the
172 if (!time_before(jiffies
, READ_ONCE(calc_load_update
)))
178 static inline int calc_load_read_idx(void)
180 return calc_load_idx
& 1;
183 void calc_load_enter_idle(void)
185 struct rq
*this_rq
= this_rq();
189 * We're going into NOHZ mode, if there's any pending delta, fold it
190 * into the pending idle delta.
192 delta
= calc_load_fold_active(this_rq
, 0);
194 int idx
= calc_load_write_idx();
196 atomic_long_add(delta
, &calc_load_idle
[idx
]);
200 void calc_load_exit_idle(void)
202 struct rq
*this_rq
= this_rq();
205 * If we're still before the pending sample window, we're done.
207 this_rq
->calc_load_update
= READ_ONCE(calc_load_update
);
208 if (time_before(jiffies
, this_rq
->calc_load_update
))
212 * We woke inside or after the sample window, this means we're already
213 * accounted through the nohz accounting, so skip the entire deal and
214 * sync up for the next window.
216 if (time_before(jiffies
, this_rq
->calc_load_update
+ 10))
217 this_rq
->calc_load_update
+= LOAD_FREQ
;
220 static long calc_load_fold_idle(void)
222 int idx
= calc_load_read_idx();
225 if (atomic_long_read(&calc_load_idle
[idx
]))
226 delta
= atomic_long_xchg(&calc_load_idle
[idx
], 0);
232 * fixed_power_int - compute: x^n, in O(log n) time
234 * @x: base of the power
235 * @frac_bits: fractional bits of @x
236 * @n: power to raise @x to.
238 * By exploiting the relation between the definition of the natural power
239 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
240 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
241 * (where: n_i \elem {0, 1}, the binary vector representing n),
242 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
243 * of course trivially computable in O(log_2 n), the length of our binary
247 fixed_power_int(unsigned long x
, unsigned int frac_bits
, unsigned int n
)
249 unsigned long result
= 1UL << frac_bits
;
255 result
+= 1UL << (frac_bits
- 1);
256 result
>>= frac_bits
;
262 x
+= 1UL << (frac_bits
- 1);
271 * a1 = a0 * e + a * (1 - e)
273 * a2 = a1 * e + a * (1 - e)
274 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
275 * = a0 * e^2 + a * (1 - e) * (1 + e)
277 * a3 = a2 * e + a * (1 - e)
278 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
279 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
283 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
284 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
285 * = a0 * e^n + a * (1 - e^n)
287 * [1] application of the geometric series:
290 * S_n := \Sum x^i = -------------
294 calc_load_n(unsigned long load
, unsigned long exp
,
295 unsigned long active
, unsigned int n
)
297 return calc_load(load
, fixed_power_int(exp
, FSHIFT
, n
), active
);
301 * NO_HZ can leave us missing all per-cpu ticks calling
302 * calc_load_account_active(), but since an idle CPU folds its delta into
303 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
304 * in the pending idle delta if our idle period crossed a load cycle boundary.
306 * Once we've updated the global active value, we need to apply the exponential
307 * weights adjusted to the number of cycles missed.
309 static void calc_global_nohz(void)
311 unsigned long sample_window
;
312 long delta
, active
, n
;
314 sample_window
= READ_ONCE(calc_load_update
);
315 if (!time_before(jiffies
, sample_window
+ 10)) {
317 * Catch-up, fold however many we are behind still
319 delta
= jiffies
- sample_window
- 10;
320 n
= 1 + (delta
/ LOAD_FREQ
);
322 active
= atomic_long_read(&calc_load_tasks
);
323 active
= active
> 0 ? active
* FIXED_1
: 0;
325 avenrun
[0] = calc_load_n(avenrun
[0], EXP_1
, active
, n
);
326 avenrun
[1] = calc_load_n(avenrun
[1], EXP_5
, active
, n
);
327 avenrun
[2] = calc_load_n(avenrun
[2], EXP_15
, active
, n
);
329 WRITE_ONCE(calc_load_update
, sample_window
+ n
* LOAD_FREQ
);
333 * Flip the idle index...
335 * Make sure we first write the new time then flip the index, so that
336 * calc_load_write_idx() will see the new time when it reads the new
337 * index, this avoids a double flip messing things up.
342 #else /* !CONFIG_NO_HZ_COMMON */
344 static inline long calc_load_fold_idle(void) { return 0; }
345 static inline void calc_global_nohz(void) { }
347 #endif /* CONFIG_NO_HZ_COMMON */
350 * calc_load - update the avenrun load estimates 10 ticks after the
351 * CPUs have updated calc_load_tasks.
353 * Called from the global timer code.
355 void calc_global_load(unsigned long ticks
)
357 unsigned long sample_window
;
360 sample_window
= READ_ONCE(calc_load_update
);
361 if (time_before(jiffies
, sample_window
+ 10))
365 * Fold the 'old' idle-delta to include all NO_HZ cpus.
367 delta
= calc_load_fold_idle();
369 atomic_long_add(delta
, &calc_load_tasks
);
371 active
= atomic_long_read(&calc_load_tasks
);
372 active
= active
> 0 ? active
* FIXED_1
: 0;
374 avenrun
[0] = calc_load(avenrun
[0], EXP_1
, active
);
375 avenrun
[1] = calc_load(avenrun
[1], EXP_5
, active
);
376 avenrun
[2] = calc_load(avenrun
[2], EXP_15
, active
);
378 WRITE_ONCE(calc_load_update
, sample_window
+ LOAD_FREQ
);
381 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
387 * Called from scheduler_tick() to periodically update this CPU's
390 void calc_global_load_tick(struct rq
*this_rq
)
394 if (time_before(jiffies
, this_rq
->calc_load_update
))
397 delta
= calc_load_fold_active(this_rq
, 0);
399 atomic_long_add(delta
, &calc_load_tasks
);
401 this_rq
->calc_load_update
+= LOAD_FREQ
;