2 * menu.c - the menu idle governor
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 * Copyright (C) 2009 Intel Corporation
7 * Arjan van de Ven <arjan@linux.intel.com>
9 * This code is licenced under the GPL version 2 as described
10 * in the COPYING file that acompanies the Linux Kernel.
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 #include <linux/module.h>
25 * Please note when changing the tuning values:
26 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
27 * a scaling operation multiplication may overflow on 32 bit platforms.
28 * In that case, #define RESOLUTION as ULL to get 64 bit result:
29 * #define RESOLUTION 1024ULL
31 * The default values do not overflow.
35 #define RESOLUTION 1024
37 #define MAX_INTERESTING 50000
38 #define STDDEV_THRESH 400
42 * Concepts and ideas behind the menu governor
44 * For the menu governor, there are 3 decision factors for picking a C
46 * 1) Energy break even point
47 * 2) Performance impact
48 * 3) Latency tolerance (from pmqos infrastructure)
49 * These these three factors are treated independently.
51 * Energy break even point
52 * -----------------------
53 * C state entry and exit have an energy cost, and a certain amount of time in
54 * the C state is required to actually break even on this cost. CPUIDLE
55 * provides us this duration in the "target_residency" field. So all that we
56 * need is a good prediction of how long we'll be idle. Like the traditional
57 * menu governor, we start with the actual known "next timer event" time.
59 * Since there are other source of wakeups (interrupts for example) than
60 * the next timer event, this estimation is rather optimistic. To get a
61 * more realistic estimate, a correction factor is applied to the estimate,
62 * that is based on historic behavior. For example, if in the past the actual
63 * duration always was 50% of the next timer tick, the correction factor will
66 * menu uses a running average for this correction factor, however it uses a
67 * set of factors, not just a single factor. This stems from the realization
68 * that the ratio is dependent on the order of magnitude of the expected
69 * duration; if we expect 500 milliseconds of idle time the likelihood of
70 * getting an interrupt very early is much higher than if we expect 50 micro
71 * seconds of idle time. A second independent factor that has big impact on
72 * the actual factor is if there is (disk) IO outstanding or not.
73 * (as a special twist, we consider every sleep longer than 50 milliseconds
74 * as perfect; there are no power gains for sleeping longer than this)
76 * For these two reasons we keep an array of 12 independent factors, that gets
77 * indexed based on the magnitude of the expected duration as well as the
78 * "is IO outstanding" property.
80 * Repeatable-interval-detector
81 * ----------------------------
82 * There are some cases where "next timer" is a completely unusable predictor:
83 * Those cases where the interval is fixed, for example due to hardware
84 * interrupt mitigation, but also due to fixed transfer rate devices such as
86 * For this, we use a different predictor: We track the duration of the last 8
87 * intervals and if the stand deviation of these 8 intervals is below a
88 * threshold value, we use the average of these intervals as prediction.
90 * Limiting Performance Impact
91 * ---------------------------
92 * C states, especially those with large exit latencies, can have a real
93 * noticeable impact on workloads, which is not acceptable for most sysadmins,
94 * and in addition, less performance has a power price of its own.
96 * As a general rule of thumb, menu assumes that the following heuristic
98 * The busier the system, the less impact of C states is acceptable
100 * This rule-of-thumb is implemented using a performance-multiplier:
101 * If the exit latency times the performance multiplier is longer than
102 * the predicted duration, the C state is not considered a candidate
103 * for selection due to a too high performance impact. So the higher
104 * this multiplier is, the longer we need to be idle to pick a deep C
105 * state, and thus the less likely a busy CPU will hit such a deep
108 * Two factors are used in determing this multiplier:
109 * a value of 10 is added for each point of "per cpu load average" we have.
110 * a value of 5 points is added for each process that is waiting for
112 * (these values are experimentally determined)
114 * The load average factor gives a longer term (few seconds) input to the
115 * decision, while the iowait value gives a cpu local instantanious input.
116 * The iowait factor may look low, but realize that this is also already
117 * represented in the system load average.
125 unsigned int expected_us
;
126 unsigned int predicted_us
;
127 unsigned int exit_us
;
129 unsigned int correction_factor
[BUCKETS
];
130 unsigned int intervals
[INTERVALS
];
135 #define LOAD_INT(x) ((x) >> FSHIFT)
136 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
138 static int get_loadavg(void)
140 unsigned long this = this_cpu_load();
143 return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
146 static inline int which_bucket(unsigned int duration
)
151 * We keep two groups of stats; one with no
152 * IO pending, one without.
153 * This allows us to calculate
156 if (nr_iowait_cpu(smp_processor_id()))
165 if (duration
< 10000)
167 if (duration
< 100000)
173 * Return a multiplier for the exit latency that is intended
174 * to take performance requirements into account.
175 * The more performance critical we estimate the system
176 * to be, the higher this multiplier, and thus the higher
177 * the barrier to go to an expensive C state.
179 static inline int performance_multiplier(void)
183 /* for higher loadavg, we are more reluctant */
185 mult
+= 2 * get_loadavg();
187 /* for IO wait tasks (per cpu!) we add 5x each */
188 mult
+= 10 * nr_iowait_cpu(smp_processor_id());
193 static DEFINE_PER_CPU(struct menu_device
, menu_devices
);
195 static void menu_update(struct cpuidle_driver
*drv
, struct cpuidle_device
*dev
);
197 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
198 static u64
div_round64(u64 dividend
, u32 divisor
)
200 return div_u64(dividend
+ (divisor
/ 2), divisor
);
204 * Try detecting repeating patterns by keeping track of the last 8
205 * intervals, and checking if the standard deviation of that set
206 * of points is below a threshold. If it is... then use the
207 * average of these 8 points as the estimated value.
209 static void get_typical_interval(struct menu_device
*data
)
212 unsigned int max
, thresh
;
213 uint64_t avg
, stddev
;
215 thresh
= UINT_MAX
; /* Discard outliers above this value */
219 /* First calculate the average of past intervals */
223 for (i
= 0; i
< INTERVALS
; i
++) {
224 unsigned int value
= data
->intervals
[i
];
225 if (value
<= thresh
) {
232 do_div(avg
, divisor
);
234 /* Then try to determine standard deviation */
236 for (i
= 0; i
< INTERVALS
; i
++) {
237 unsigned int value
= data
->intervals
[i
];
238 if (value
<= thresh
) {
239 int64_t diff
= value
- avg
;
240 stddev
+= diff
* diff
;
243 do_div(stddev
, divisor
);
245 * The typical interval is obtained when standard deviation is small
246 * or standard deviation is small compared to the average interval.
248 * int_sqrt() formal parameter type is unsigned long. When the
249 * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
250 * the resulting squared standard deviation exceeds the input domain
251 * of int_sqrt on platforms where unsigned long is 32 bits in size.
252 * In such case reject the candidate average.
254 * Use this result only if there is no timer to wake us up sooner.
256 if (likely(stddev
<= ULONG_MAX
)) {
257 stddev
= int_sqrt(stddev
);
258 if (((avg
> stddev
* 6) && (divisor
* 4 >= INTERVALS
* 3))
260 if (data
->expected_us
> avg
)
261 data
->predicted_us
= avg
;
267 * If we have outliers to the upside in our distribution, discard
268 * those by setting the threshold to exclude these outliers, then
269 * calculate the average and standard deviation again. Once we get
270 * down to the bottom 3/4 of our samples, stop excluding samples.
272 * This can deal with workloads that have long pauses interspersed
273 * with sporadic activity with a bunch of short pauses.
275 if ((divisor
* 4) <= INTERVALS
* 3)
283 * menu_select - selects the next idle state to enter
284 * @drv: cpuidle driver containing state data
287 static int menu_select(struct cpuidle_driver
*drv
, struct cpuidle_device
*dev
)
289 struct menu_device
*data
= &__get_cpu_var(menu_devices
);
290 int latency_req
= pm_qos_request(PM_QOS_CPU_DMA_LATENCY
);
295 if (data
->needs_update
) {
296 menu_update(drv
, dev
);
297 data
->needs_update
= 0;
300 data
->last_state_idx
= 0;
303 /* Special case when user has set very strict latency requirement */
304 if (unlikely(latency_req
== 0))
307 /* determine the expected residency time, round up */
308 t
= ktime_to_timespec(tick_nohz_get_sleep_length());
310 t
.tv_sec
* USEC_PER_SEC
+ t
.tv_nsec
/ NSEC_PER_USEC
;
313 data
->bucket
= which_bucket(data
->expected_us
);
315 multiplier
= performance_multiplier();
318 * if the correction factor is 0 (eg first time init or cpu hotplug
319 * etc), we actually want to start out with a unity factor.
321 if (data
->correction_factor
[data
->bucket
] == 0)
322 data
->correction_factor
[data
->bucket
] = RESOLUTION
* DECAY
;
325 * Force the result of multiplication to be 64 bits even if both
326 * operands are 32 bits.
327 * Make sure to round up for half microseconds.
329 data
->predicted_us
= div_round64((uint64_t)data
->expected_us
*
330 data
->correction_factor
[data
->bucket
],
333 get_typical_interval(data
);
336 * We want to default to C1 (hlt), not to busy polling
337 * unless the timer is happening really really soon.
339 if (data
->expected_us
> 5 &&
340 !drv
->states
[CPUIDLE_DRIVER_STATE_START
].disabled
&&
341 dev
->states_usage
[CPUIDLE_DRIVER_STATE_START
].disable
== 0)
342 data
->last_state_idx
= CPUIDLE_DRIVER_STATE_START
;
345 * Find the idle state with the lowest power while satisfying
348 for (i
= CPUIDLE_DRIVER_STATE_START
; i
< drv
->state_count
; i
++) {
349 struct cpuidle_state
*s
= &drv
->states
[i
];
350 struct cpuidle_state_usage
*su
= &dev
->states_usage
[i
];
352 if (s
->disabled
|| su
->disable
)
354 if (s
->target_residency
> data
->predicted_us
)
356 if (s
->exit_latency
> latency_req
)
358 if (s
->exit_latency
* multiplier
> data
->predicted_us
)
361 data
->last_state_idx
= i
;
362 data
->exit_us
= s
->exit_latency
;
365 return data
->last_state_idx
;
369 * menu_reflect - records that data structures need update
371 * @index: the index of actual entered state
373 * NOTE: it's important to be fast here because this operation will add to
374 * the overall exit latency.
376 static void menu_reflect(struct cpuidle_device
*dev
, int index
)
378 struct menu_device
*data
= &__get_cpu_var(menu_devices
);
379 data
->last_state_idx
= index
;
381 data
->needs_update
= 1;
385 * menu_update - attempts to guess what happened after entry
386 * @drv: cpuidle driver containing state data
389 static void menu_update(struct cpuidle_driver
*drv
, struct cpuidle_device
*dev
)
391 struct menu_device
*data
= &__get_cpu_var(menu_devices
);
392 int last_idx
= data
->last_state_idx
;
393 unsigned int last_idle_us
= cpuidle_get_last_residency(dev
);
394 struct cpuidle_state
*target
= &drv
->states
[last_idx
];
395 unsigned int measured_us
;
396 unsigned int new_factor
;
399 * Ugh, this idle state doesn't support residency measurements, so we
400 * are basically lost in the dark. As a compromise, assume we slept
401 * for the whole expected time.
403 if (unlikely(!(target
->flags
& CPUIDLE_FLAG_TIME_VALID
)))
404 last_idle_us
= data
->expected_us
;
407 measured_us
= last_idle_us
;
410 * We correct for the exit latency; we are assuming here that the
411 * exit latency happens after the event that we're interested in.
413 if (measured_us
> data
->exit_us
)
414 measured_us
-= data
->exit_us
;
417 /* Update our correction ratio */
418 new_factor
= data
->correction_factor
[data
->bucket
];
419 new_factor
-= new_factor
/ DECAY
;
421 if (data
->expected_us
> 0 && measured_us
< MAX_INTERESTING
)
422 new_factor
+= RESOLUTION
* measured_us
/ data
->expected_us
;
425 * we were idle so long that we count it as a perfect
428 new_factor
+= RESOLUTION
;
431 * We don't want 0 as factor; we always want at least
432 * a tiny bit of estimated time. Fortunately, due to rounding,
433 * new_factor will stay nonzero regardless of measured_us values
434 * and the compiler can eliminate this test as long as DECAY > 1.
436 if (DECAY
== 1 && unlikely(new_factor
== 0))
439 data
->correction_factor
[data
->bucket
] = new_factor
;
441 /* update the repeating-pattern data */
442 data
->intervals
[data
->interval_ptr
++] = last_idle_us
;
443 if (data
->interval_ptr
>= INTERVALS
)
444 data
->interval_ptr
= 0;
448 * menu_enable_device - scans a CPU's states and does setup
449 * @drv: cpuidle driver
452 static int menu_enable_device(struct cpuidle_driver
*drv
,
453 struct cpuidle_device
*dev
)
455 struct menu_device
*data
= &per_cpu(menu_devices
, dev
->cpu
);
457 memset(data
, 0, sizeof(struct menu_device
));
462 static struct cpuidle_governor menu_governor
= {
465 .enable
= menu_enable_device
,
466 .select
= menu_select
,
467 .reflect
= menu_reflect
,
468 .owner
= THIS_MODULE
,
472 * init_menu - initializes the governor
474 static int __init
init_menu(void)
476 return cpuidle_register_governor(&menu_governor
);
479 postcore_initcall(init_menu
);