ipv[4|6]: correct dropwatch false positive in local_deliver_finish
[linux/fpc-iii.git] / drivers / cpuidle / governors / ladder.c
blob9b784051ec12b47564b3d07429096556b2fc2035
1 /*
2 * ladder.c - the residency ladder algorithm
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 * Shaohua Li <shaohua.li@intel.com>
10 * Adam Belay <abelay@novell.com>
12 * This code is licenced under the GPL.
15 #include <linux/kernel.h>
16 #include <linux/cpuidle.h>
17 #include <linux/pm_qos.h>
18 #include <linux/module.h>
19 #include <linux/jiffies.h>
21 #include <asm/io.h>
22 #include <asm/uaccess.h>
24 #define PROMOTION_COUNT 4
25 #define DEMOTION_COUNT 1
27 struct ladder_device_state {
28 struct {
29 u32 promotion_count;
30 u32 demotion_count;
31 u32 promotion_time;
32 u32 demotion_time;
33 } threshold;
34 struct {
35 int promotion_count;
36 int demotion_count;
37 } stats;
40 struct ladder_device {
41 struct ladder_device_state states[CPUIDLE_STATE_MAX];
42 int last_state_idx;
45 static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
47 /**
48 * ladder_do_selection - prepares private data for a state change
49 * @ldev: the ladder device
50 * @old_idx: the current state index
51 * @new_idx: the new target state index
53 static inline void ladder_do_selection(struct ladder_device *ldev,
54 int old_idx, int new_idx)
56 ldev->states[old_idx].stats.promotion_count = 0;
57 ldev->states[old_idx].stats.demotion_count = 0;
58 ldev->last_state_idx = new_idx;
61 /**
62 * ladder_select_state - selects the next state to enter
63 * @drv: cpuidle driver
64 * @dev: the CPU
66 static int ladder_select_state(struct cpuidle_driver *drv,
67 struct cpuidle_device *dev)
69 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
70 struct ladder_device_state *last_state;
71 int last_residency, last_idx = ldev->last_state_idx;
72 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
74 /* Special case when user has set very strict latency requirement */
75 if (unlikely(latency_req == 0)) {
76 ladder_do_selection(ldev, last_idx, 0);
77 return 0;
80 last_state = &ldev->states[last_idx];
82 if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
83 last_residency = cpuidle_get_last_residency(dev) - \
84 drv->states[last_idx].exit_latency;
86 else
87 last_residency = last_state->threshold.promotion_time + 1;
89 /* consider promotion */
90 if (last_idx < drv->state_count - 1 &&
91 !drv->states[last_idx + 1].disabled &&
92 !dev->states_usage[last_idx + 1].disable &&
93 last_residency > last_state->threshold.promotion_time &&
94 drv->states[last_idx + 1].exit_latency <= latency_req) {
95 last_state->stats.promotion_count++;
96 last_state->stats.demotion_count = 0;
97 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
98 ladder_do_selection(ldev, last_idx, last_idx + 1);
99 return last_idx + 1;
103 /* consider demotion */
104 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
105 (drv->states[last_idx].disabled ||
106 dev->states_usage[last_idx].disable ||
107 drv->states[last_idx].exit_latency > latency_req)) {
108 int i;
110 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
111 if (drv->states[i].exit_latency <= latency_req)
112 break;
114 ladder_do_selection(ldev, last_idx, i);
115 return i;
118 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
119 last_residency < last_state->threshold.demotion_time) {
120 last_state->stats.demotion_count++;
121 last_state->stats.promotion_count = 0;
122 if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
123 ladder_do_selection(ldev, last_idx, last_idx - 1);
124 return last_idx - 1;
128 /* otherwise remain at the current state */
129 return last_idx;
133 * ladder_enable_device - setup for the governor
134 * @drv: cpuidle driver
135 * @dev: the CPU
137 static int ladder_enable_device(struct cpuidle_driver *drv,
138 struct cpuidle_device *dev)
140 int i;
141 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
142 struct ladder_device_state *lstate;
143 struct cpuidle_state *state;
145 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
147 for (i = 0; i < drv->state_count; i++) {
148 state = &drv->states[i];
149 lstate = &ldev->states[i];
151 lstate->stats.promotion_count = 0;
152 lstate->stats.demotion_count = 0;
154 lstate->threshold.promotion_count = PROMOTION_COUNT;
155 lstate->threshold.demotion_count = DEMOTION_COUNT;
157 if (i < drv->state_count - 1)
158 lstate->threshold.promotion_time = state->exit_latency;
159 if (i > 0)
160 lstate->threshold.demotion_time = state->exit_latency;
163 return 0;
167 * ladder_reflect - update the correct last_state_idx
168 * @dev: the CPU
169 * @index: the index of actual state entered
171 static void ladder_reflect(struct cpuidle_device *dev, int index)
173 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
174 if (index > 0)
175 ldev->last_state_idx = index;
178 static struct cpuidle_governor ladder_governor = {
179 .name = "ladder",
180 .rating = 10,
181 .enable = ladder_enable_device,
182 .select = ladder_select_state,
183 .reflect = ladder_reflect,
184 .owner = THIS_MODULE,
188 * init_ladder - initializes the governor
190 static int __init init_ladder(void)
192 return cpuidle_register_governor(&ladder_governor);
196 * exit_ladder - exits the governor
198 static void __exit exit_ladder(void)
200 cpuidle_unregister_governor(&ladder_governor);
203 MODULE_LICENSE("GPL");
204 module_init(init_ladder);
205 module_exit(exit_ladder);