2 * menu.c - the menu idle governor
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 * Copyright (C) 2009 Intel Corporation
7 * Arjan van de Ven <arjan@linux.intel.com>
9 * This code is licenced under the GPL version 2 as described
10 * in the COPYING file that acompanies the Linux Kernel.
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/time.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/sched.h>
20 #include <linux/sched/loadavg.h>
21 #include <linux/sched/stat.h>
22 #include <linux/math64.h>
25 * Please note when changing the tuning values:
26 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
27 * a scaling operation multiplication may overflow on 32 bit platforms.
28 * In that case, #define RESOLUTION as ULL to get 64 bit result:
29 * #define RESOLUTION 1024ULL
31 * The default values do not overflow.
34 #define INTERVAL_SHIFT 3
35 #define INTERVALS (1UL << INTERVAL_SHIFT)
36 #define RESOLUTION 1024
38 #define MAX_INTERESTING 50000
42 * Concepts and ideas behind the menu governor
44 * For the menu governor, there are 3 decision factors for picking a C
46 * 1) Energy break even point
47 * 2) Performance impact
48 * 3) Latency tolerance (from pmqos infrastructure)
49 * These these three factors are treated independently.
51 * Energy break even point
52 * -----------------------
53 * C state entry and exit have an energy cost, and a certain amount of time in
54 * the C state is required to actually break even on this cost. CPUIDLE
55 * provides us this duration in the "target_residency" field. So all that we
56 * need is a good prediction of how long we'll be idle. Like the traditional
57 * menu governor, we start with the actual known "next timer event" time.
59 * Since there are other source of wakeups (interrupts for example) than
60 * the next timer event, this estimation is rather optimistic. To get a
61 * more realistic estimate, a correction factor is applied to the estimate,
62 * that is based on historic behavior. For example, if in the past the actual
63 * duration always was 50% of the next timer tick, the correction factor will
66 * menu uses a running average for this correction factor, however it uses a
67 * set of factors, not just a single factor. This stems from the realization
68 * that the ratio is dependent on the order of magnitude of the expected
69 * duration; if we expect 500 milliseconds of idle time the likelihood of
70 * getting an interrupt very early is much higher than if we expect 50 micro
71 * seconds of idle time. A second independent factor that has big impact on
72 * the actual factor is if there is (disk) IO outstanding or not.
73 * (as a special twist, we consider every sleep longer than 50 milliseconds
74 * as perfect; there are no power gains for sleeping longer than this)
76 * For these two reasons we keep an array of 12 independent factors, that gets
77 * indexed based on the magnitude of the expected duration as well as the
78 * "is IO outstanding" property.
80 * Repeatable-interval-detector
81 * ----------------------------
82 * There are some cases where "next timer" is a completely unusable predictor:
83 * Those cases where the interval is fixed, for example due to hardware
84 * interrupt mitigation, but also due to fixed transfer rate devices such as
86 * For this, we use a different predictor: We track the duration of the last 8
87 * intervals and if the stand deviation of these 8 intervals is below a
88 * threshold value, we use the average of these intervals as prediction.
90 * Limiting Performance Impact
91 * ---------------------------
92 * C states, especially those with large exit latencies, can have a real
93 * noticeable impact on workloads, which is not acceptable for most sysadmins,
94 * and in addition, less performance has a power price of its own.
96 * As a general rule of thumb, menu assumes that the following heuristic
98 * The busier the system, the less impact of C states is acceptable
100 * This rule-of-thumb is implemented using a performance-multiplier:
101 * If the exit latency times the performance multiplier is longer than
102 * the predicted duration, the C state is not considered a candidate
103 * for selection due to a too high performance impact. So the higher
104 * this multiplier is, the longer we need to be idle to pick a deep C
105 * state, and thus the less likely a busy CPU will hit such a deep
108 * Two factors are used in determing this multiplier:
109 * a value of 10 is added for each point of "per cpu load average" we have.
110 * a value of 5 points is added for each process that is waiting for
112 * (these values are experimentally determined)
114 * The load average factor gives a longer term (few seconds) input to the
115 * decision, while the iowait value gives a cpu local instantanious input.
116 * The iowait factor may look low, but realize that this is also already
117 * represented in the system load average.
126 unsigned int next_timer_us
;
127 unsigned int predicted_us
;
129 unsigned int correction_factor
[BUCKETS
];
130 unsigned int intervals
[INTERVALS
];
135 #define LOAD_INT(x) ((x) >> FSHIFT)
136 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
138 static inline int get_loadavg(unsigned long load
)
140 return LOAD_INT(load
) * 10 + LOAD_FRAC(load
) / 10;
143 static inline int which_bucket(unsigned int duration
, unsigned long nr_iowaiters
)
148 * We keep two groups of stats; one with no
149 * IO pending, one without.
150 * This allows us to calculate
162 if (duration
< 10000)
164 if (duration
< 100000)
170 * Return a multiplier for the exit latency that is intended
171 * to take performance requirements into account.
172 * The more performance critical we estimate the system
173 * to be, the higher this multiplier, and thus the higher
174 * the barrier to go to an expensive C state.
176 static inline int performance_multiplier(unsigned long nr_iowaiters
, unsigned long load
)
180 /* for higher loadavg, we are more reluctant */
182 mult
+= 2 * get_loadavg(load
);
184 /* for IO wait tasks (per cpu!) we add 5x each */
185 mult
+= 10 * nr_iowaiters
;
190 static DEFINE_PER_CPU(struct menu_device
, menu_devices
);
192 static void menu_update(struct cpuidle_driver
*drv
, struct cpuidle_device
*dev
);
195 * Try detecting repeating patterns by keeping track of the last 8
196 * intervals, and checking if the standard deviation of that set
197 * of points is below a threshold. If it is... then use the
198 * average of these 8 points as the estimated value.
200 static unsigned int get_typical_interval(struct menu_device
*data
)
203 unsigned int max
, thresh
, avg
;
204 uint64_t sum
, variance
;
206 thresh
= UINT_MAX
; /* Discard outliers above this value */
210 /* First calculate the average of past intervals */
214 for (i
= 0; i
< INTERVALS
; i
++) {
215 unsigned int value
= data
->intervals
[i
];
216 if (value
<= thresh
) {
223 if (divisor
== INTERVALS
)
224 avg
= sum
>> INTERVAL_SHIFT
;
226 avg
= div_u64(sum
, divisor
);
228 /* Then try to determine variance */
230 for (i
= 0; i
< INTERVALS
; i
++) {
231 unsigned int value
= data
->intervals
[i
];
232 if (value
<= thresh
) {
233 int64_t diff
= (int64_t)value
- avg
;
234 variance
+= diff
* diff
;
237 if (divisor
== INTERVALS
)
238 variance
>>= INTERVAL_SHIFT
;
240 do_div(variance
, divisor
);
243 * The typical interval is obtained when standard deviation is
244 * small (stddev <= 20 us, variance <= 400 us^2) or standard
245 * deviation is small compared to the average interval (avg >
246 * 6*stddev, avg^2 > 36*variance). The average is smaller than
247 * UINT_MAX aka U32_MAX, so computing its square does not
248 * overflow a u64. We simply reject this candidate average if
249 * the standard deviation is greater than 715 s (which is
252 * Use this result only if there is no timer to wake us up sooner.
254 if (likely(variance
<= U64_MAX
/36)) {
255 if ((((u64
)avg
*avg
> variance
*36) && (divisor
* 4 >= INTERVALS
* 3))
256 || variance
<= 400) {
262 * If we have outliers to the upside in our distribution, discard
263 * those by setting the threshold to exclude these outliers, then
264 * calculate the average and standard deviation again. Once we get
265 * down to the bottom 3/4 of our samples, stop excluding samples.
267 * This can deal with workloads that have long pauses interspersed
268 * with sporadic activity with a bunch of short pauses.
270 if ((divisor
* 4) <= INTERVALS
* 3)
278 * menu_select - selects the next idle state to enter
279 * @drv: cpuidle driver containing state data
281 * @stop_tick: indication on whether or not to stop the tick
283 static int menu_select(struct cpuidle_driver
*drv
, struct cpuidle_device
*dev
,
286 struct menu_device
*data
= this_cpu_ptr(&menu_devices
);
287 int latency_req
= cpuidle_governor_latency_req(dev
->cpu
);
291 unsigned int interactivity_req
;
292 unsigned int expected_interval
;
293 unsigned long nr_iowaiters
, cpu_load
;
296 if (data
->needs_update
) {
297 menu_update(drv
, dev
);
298 data
->needs_update
= 0;
301 /* Special case when user has set very strict latency requirement */
302 if (unlikely(latency_req
== 0)) {
307 /* determine the expected residency time, round up */
308 data
->next_timer_us
= ktime_to_us(tick_nohz_get_sleep_length(&delta_next
));
310 get_iowait_load(&nr_iowaiters
, &cpu_load
);
311 data
->bucket
= which_bucket(data
->next_timer_us
, nr_iowaiters
);
314 * Force the result of multiplication to be 64 bits even if both
315 * operands are 32 bits.
316 * Make sure to round up for half microseconds.
318 data
->predicted_us
= DIV_ROUND_CLOSEST_ULL((uint64_t)data
->next_timer_us
*
319 data
->correction_factor
[data
->bucket
],
322 expected_interval
= get_typical_interval(data
);
323 expected_interval
= min(expected_interval
, data
->next_timer_us
);
326 if (drv
->states
[0].flags
& CPUIDLE_FLAG_POLLING
) {
327 struct cpuidle_state
*s
= &drv
->states
[1];
328 unsigned int polling_threshold
;
331 * Default to a physical idle state, not to busy polling, unless
332 * a timer is going to trigger really really soon.
334 polling_threshold
= max_t(unsigned int, 20, s
->target_residency
);
335 if (data
->next_timer_us
> polling_threshold
&&
336 latency_req
> s
->exit_latency
&& !s
->disabled
&&
337 !dev
->states_usage
[1].disable
)
342 * Use the lowest expected idle interval to pick the idle state.
344 data
->predicted_us
= min(data
->predicted_us
, expected_interval
);
346 if (tick_nohz_tick_stopped()) {
348 * If the tick is already stopped, the cost of possible short
349 * idle duration misprediction is much higher, because the CPU
350 * may be stuck in a shallow idle state for a long time as a
351 * result of it. In that case say we might mispredict and use
352 * the known time till the closest timer event for the idle
355 if (data
->predicted_us
< TICK_USEC
)
356 data
->predicted_us
= ktime_to_us(delta_next
);
359 * Use the performance multiplier and the user-configurable
360 * latency_req to determine the maximum exit latency.
362 interactivity_req
= data
->predicted_us
/ performance_multiplier(nr_iowaiters
, cpu_load
);
363 if (latency_req
> interactivity_req
)
364 latency_req
= interactivity_req
;
367 expected_interval
= data
->predicted_us
;
369 * Find the idle state with the lowest power while satisfying
373 for (i
= first_idx
; i
< drv
->state_count
; i
++) {
374 struct cpuidle_state
*s
= &drv
->states
[i
];
375 struct cpuidle_state_usage
*su
= &dev
->states_usage
[i
];
377 if (s
->disabled
|| su
->disable
)
380 idx
= i
; /* first enabled state */
381 if (s
->target_residency
> data
->predicted_us
) {
382 if (data
->predicted_us
< TICK_USEC
)
385 if (!tick_nohz_tick_stopped()) {
387 * If the state selected so far is shallow,
388 * waking up early won't hurt, so retain the
389 * tick in that case and let the governor run
390 * again in the next iteration of the loop.
392 expected_interval
= drv
->states
[idx
].target_residency
;
397 * If the state selected so far is shallow and this
398 * state's target residency matches the time till the
399 * closest timer event, select this one to avoid getting
400 * stuck in the shallow one for too long.
402 if (drv
->states
[idx
].target_residency
< TICK_USEC
&&
403 s
->target_residency
<= ktime_to_us(delta_next
))
408 if (s
->exit_latency
> latency_req
) {
410 * If we break out of the loop for latency reasons, use
411 * the target residency of the selected state as the
412 * expected idle duration so that the tick is retained
413 * as long as that target residency is low enough.
415 expected_interval
= drv
->states
[idx
].target_residency
;
422 idx
= 0; /* No states enabled. Must use 0. */
425 * Don't stop the tick if the selected state is a polling one or if the
426 * expected idle duration is shorter than the tick period length.
428 if (((drv
->states
[idx
].flags
& CPUIDLE_FLAG_POLLING
) ||
429 expected_interval
< TICK_USEC
) && !tick_nohz_tick_stopped()) {
430 unsigned int delta_next_us
= ktime_to_us(delta_next
);
434 if (idx
> 0 && drv
->states
[idx
].target_residency
> delta_next_us
) {
436 * The tick is not going to be stopped and the target
437 * residency of the state to be returned is not within
438 * the time until the next timer event including the
439 * tick, so try to correct that.
441 for (i
= idx
- 1; i
>= 0; i
--) {
442 if (drv
->states
[i
].disabled
||
443 dev
->states_usage
[i
].disable
)
447 if (drv
->states
[i
].target_residency
<= delta_next_us
)
454 data
->last_state_idx
= idx
;
456 return data
->last_state_idx
;
460 * menu_reflect - records that data structures need update
462 * @index: the index of actual entered state
464 * NOTE: it's important to be fast here because this operation will add to
465 * the overall exit latency.
467 static void menu_reflect(struct cpuidle_device
*dev
, int index
)
469 struct menu_device
*data
= this_cpu_ptr(&menu_devices
);
471 data
->last_state_idx
= index
;
472 data
->needs_update
= 1;
473 data
->tick_wakeup
= tick_nohz_idle_got_tick();
477 * menu_update - attempts to guess what happened after entry
478 * @drv: cpuidle driver containing state data
481 static void menu_update(struct cpuidle_driver
*drv
, struct cpuidle_device
*dev
)
483 struct menu_device
*data
= this_cpu_ptr(&menu_devices
);
484 int last_idx
= data
->last_state_idx
;
485 struct cpuidle_state
*target
= &drv
->states
[last_idx
];
486 unsigned int measured_us
;
487 unsigned int new_factor
;
490 * Try to figure out how much time passed between entry to low
491 * power state and occurrence of the wakeup event.
493 * If the entered idle state didn't support residency measurements,
494 * we use them anyway if they are short, and if long,
495 * truncate to the whole expected time.
497 * Any measured amount of time will include the exit latency.
498 * Since we are interested in when the wakeup begun, not when it
499 * was completed, we must subtract the exit latency. However, if
500 * the measured amount of time is less than the exit latency,
501 * assume the state was never reached and the exit latency is 0.
504 if (data
->tick_wakeup
&& data
->next_timer_us
> TICK_USEC
) {
506 * The nohz code said that there wouldn't be any events within
507 * the tick boundary (if the tick was stopped), but the idle
508 * duration predictor had a differing opinion. Since the CPU
509 * was woken up by a tick (that wasn't stopped after all), the
510 * predictor was not quite right, so assume that the CPU could
511 * have been idle long (but not forever) to help the idle
512 * duration predictor do a better job next time.
514 measured_us
= 9 * MAX_INTERESTING
/ 10;
515 } else if ((drv
->states
[last_idx
].flags
& CPUIDLE_FLAG_POLLING
) &&
516 dev
->poll_time_limit
) {
518 * The CPU exited the "polling" state due to a time limit, so
519 * the idle duration prediction leading to the selection of that
520 * state was inaccurate. If a better prediction had been made,
521 * the CPU might have been woken up from idle by the next timer.
522 * Assume that to be the case.
524 measured_us
= data
->next_timer_us
;
527 measured_us
= cpuidle_get_last_residency(dev
);
529 /* Deduct exit latency */
530 if (measured_us
> 2 * target
->exit_latency
)
531 measured_us
-= target
->exit_latency
;
536 /* Make sure our coefficients do not exceed unity */
537 if (measured_us
> data
->next_timer_us
)
538 measured_us
= data
->next_timer_us
;
540 /* Update our correction ratio */
541 new_factor
= data
->correction_factor
[data
->bucket
];
542 new_factor
-= new_factor
/ DECAY
;
544 if (data
->next_timer_us
> 0 && measured_us
< MAX_INTERESTING
)
545 new_factor
+= RESOLUTION
* measured_us
/ data
->next_timer_us
;
548 * we were idle so long that we count it as a perfect
551 new_factor
+= RESOLUTION
;
554 * We don't want 0 as factor; we always want at least
555 * a tiny bit of estimated time. Fortunately, due to rounding,
556 * new_factor will stay nonzero regardless of measured_us values
557 * and the compiler can eliminate this test as long as DECAY > 1.
559 if (DECAY
== 1 && unlikely(new_factor
== 0))
562 data
->correction_factor
[data
->bucket
] = new_factor
;
564 /* update the repeating-pattern data */
565 data
->intervals
[data
->interval_ptr
++] = measured_us
;
566 if (data
->interval_ptr
>= INTERVALS
)
567 data
->interval_ptr
= 0;
571 * menu_enable_device - scans a CPU's states and does setup
572 * @drv: cpuidle driver
575 static int menu_enable_device(struct cpuidle_driver
*drv
,
576 struct cpuidle_device
*dev
)
578 struct menu_device
*data
= &per_cpu(menu_devices
, dev
->cpu
);
581 memset(data
, 0, sizeof(struct menu_device
));
584 * if the correction factor is 0 (eg first time init or cpu hotplug
585 * etc), we actually want to start out with a unity factor.
587 for(i
= 0; i
< BUCKETS
; i
++)
588 data
->correction_factor
[i
] = RESOLUTION
* DECAY
;
593 static struct cpuidle_governor menu_governor
= {
596 .enable
= menu_enable_device
,
597 .select
= menu_select
,
598 .reflect
= menu_reflect
,
602 * init_menu - initializes the governor
604 static int __init
init_menu(void)
606 return cpuidle_register_governor(&menu_governor
);
609 postcore_initcall(init_menu
);