Merge commit 'refs/merge-requests/1' of git://gitorious.org/linux-on-wince-htc/linux_...
[htc-linux.git] / drivers / cpufreq / cpufreq_interactive.c
blob36859a786644ba1b49a25a87dc2b912d4301f1a0
1 /*
2 * drivers/cpufreq/cpufreq_interactive.c
4 * Copyright (C) 2010 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * Author: Mike Chan (mike@android.com)
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/mutex.h>
23 #include <linux/sched.h>
24 #include <linux/tick.h>
25 #include <linux/timer.h>
26 #include <linux/workqueue.h>
28 #include <asm/cputime.h>
30 static void (*pm_idle_old)(void);
31 static atomic_t active_count = ATOMIC_INIT(0);
33 static DEFINE_PER_CPU(struct timer_list, cpu_timer);
35 static DEFINE_PER_CPU(u64, time_in_idle);
36 static DEFINE_PER_CPU(u64, idle_exit_time);
38 static struct cpufreq_policy *policy;
39 static unsigned int target_freq;
41 /* Workqueues handle frequency scaling */
42 static struct workqueue_struct *up_wq;
43 static struct workqueue_struct *down_wq;
44 static struct work_struct freq_scale_work;
46 static u64 freq_change_time;
47 static u64 freq_change_time_in_idle;
49 static cpumask_t work_cpumask;
52 * The minimum ammount of time to spend at a frequency before we can ramp down,
53 * default is 50ms.
55 #define DEFAULT_MIN_SAMPLE_TIME 50000;
56 static unsigned long min_sample_time;
58 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
59 unsigned int event);
61 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
62 static
63 #endif
64 struct cpufreq_governor cpufreq_gov_interactive = {
65 .name = "interactive",
66 .governor = cpufreq_governor_interactive,
67 .max_transition_latency = 10000000,
68 .owner = THIS_MODULE,
71 static void cpufreq_interactive_timer(unsigned long data)
73 u64 delta_idle;
74 u64 update_time;
75 u64 *cpu_time_in_idle;
76 u64 *cpu_idle_exit_time;
77 struct timer_list *t;
79 u64 now_idle = get_cpu_idle_time_us(data,
80 &update_time);
83 cpu_time_in_idle = &per_cpu(time_in_idle, data);
84 cpu_idle_exit_time = &per_cpu(idle_exit_time, data);
86 if (update_time == *cpu_idle_exit_time)
87 return;
89 delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle);
91 /* Scale up if there were no idle cycles since coming out of idle */
92 if (delta_idle == 0) {
93 if (policy->cur == policy->max)
94 return;
96 if (nr_running() < 1)
97 return;
99 target_freq = policy->max;
100 cpumask_set_cpu(data, &work_cpumask);
101 queue_work(up_wq, &freq_scale_work);
102 return;
106 * There is a window where if the cpu utlization can go from low to high
107 * between the timer expiring, delta_idle will be > 0 and the cpu will
108 * be 100% busy, preventing idle from running, and this timer from
109 * firing. So setup another timer to fire to check cpu utlization.
110 * Do not setup the timer if there is no scheduled work.
112 t = &per_cpu(cpu_timer, data);
113 if (!timer_pending(t) && nr_running() > 0) {
114 *cpu_time_in_idle = get_cpu_idle_time_us(
115 data, cpu_idle_exit_time);
116 mod_timer(t, jiffies + 2);
119 if (policy->cur == policy->min)
120 return;
123 * Do not scale down unless we have been at this frequency for the
124 * minimum sample time.
126 if (cputime64_sub(update_time, freq_change_time) < min_sample_time)
127 return;
129 target_freq = policy->min;
130 cpumask_set_cpu(data, &work_cpumask);
131 queue_work(down_wq, &freq_scale_work);
134 static void cpufreq_idle(void)
136 struct timer_list *t;
137 u64 *cpu_time_in_idle;
138 u64 *cpu_idle_exit_time;
140 pm_idle_old();
142 if (!cpumask_test_cpu(smp_processor_id(), policy->cpus))
143 return;
145 /* Timer to fire in 1-2 ticks, jiffie aligned. */
146 t = &per_cpu(cpu_timer, smp_processor_id());
147 cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id());
148 cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id());
150 if (timer_pending(t) == 0) {
151 *cpu_time_in_idle = get_cpu_idle_time_us(
152 smp_processor_id(), cpu_idle_exit_time);
153 mod_timer(t, jiffies + 2);
158 * Choose the cpu frequency based off the load. For now choose the minimum
159 * frequency that will satisfy the load, which is not always the lower power.
161 static unsigned int cpufreq_interactive_calc_freq(unsigned int cpu)
163 unsigned int delta_time;
164 unsigned int idle_time;
165 unsigned int cpu_load;
166 u64 current_wall_time;
167 u64 current_idle_time;;
169 current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time);
171 idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle;
172 delta_time = (unsigned int) current_wall_time - freq_change_time;
174 cpu_load = 100 * (delta_time - idle_time) / delta_time;
176 return policy->cur * cpu_load / 100;
180 /* We use the same work function to sale up and down */
181 static void cpufreq_interactive_freq_change_time_work(struct work_struct *work)
183 unsigned int cpu;
184 cpumask_t *tmp_mask = &work_cpumask;
185 for_each_cpu(cpu, tmp_mask) {
186 if (target_freq == policy->max) {
187 if (nr_running() == 1) {
188 cpumask_clear_cpu(cpu, &work_cpumask);
189 return;
192 __cpufreq_driver_target(policy, target_freq,
193 CPUFREQ_RELATION_H);
194 } else {
195 target_freq = cpufreq_interactive_calc_freq(cpu);
196 __cpufreq_driver_target(policy, target_freq,
197 CPUFREQ_RELATION_L);
199 freq_change_time_in_idle = get_cpu_idle_time_us(cpu,
200 &freq_change_time);
202 cpumask_clear_cpu(cpu, &work_cpumask);
208 static ssize_t show_min_sample_time(struct kobject *kobj,
209 struct attribute *attr, char *buf)
211 return sprintf(buf, "%lu\n", min_sample_time);
214 static ssize_t store_min_sample_time(struct kobject *kobj,
215 struct attribute *attr, const char *buf, size_t count)
217 return strict_strtoul(buf, 0, &min_sample_time);
220 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
221 show_min_sample_time, store_min_sample_time);
223 static struct attribute *interactive_attributes[] = {
224 &min_sample_time_attr.attr,
225 NULL,
228 static struct attribute_group interactive_attr_group = {
229 .attrs = interactive_attributes,
230 .name = "interactive",
233 static int cpufreq_governor_interactive(struct cpufreq_policy *new_policy,
234 unsigned int event)
236 int rc;
237 switch (event) {
238 case CPUFREQ_GOV_START:
239 if (!cpu_online(new_policy->cpu))
240 return -EINVAL;
243 * Do not register the idle hook and create sysfs
244 * entries if we have already done so.
246 if (atomic_inc_return(&active_count) > 1)
247 return 0;
249 rc = sysfs_create_group(cpufreq_global_kobject,
250 &interactive_attr_group);
251 if (rc)
252 return rc;
254 pm_idle_old = pm_idle;
255 pm_idle = cpufreq_idle;
256 policy = new_policy;
257 break;
259 case CPUFREQ_GOV_STOP:
260 if (atomic_dec_return(&active_count) > 1)
261 return 0;
263 sysfs_remove_group(cpufreq_global_kobject,
264 &interactive_attr_group);
266 pm_idle = pm_idle_old;
267 del_timer(&per_cpu(cpu_timer, new_policy->cpu));
268 break;
270 case CPUFREQ_GOV_LIMITS:
271 if (new_policy->max < new_policy->cur)
272 __cpufreq_driver_target(new_policy,
273 new_policy->max, CPUFREQ_RELATION_H);
274 else if (new_policy->min > new_policy->cur)
275 __cpufreq_driver_target(new_policy,
276 new_policy->min, CPUFREQ_RELATION_L);
277 break;
279 return 0;
282 static int __init cpufreq_interactive_init(void)
284 unsigned int i;
285 struct timer_list *t;
286 min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
288 /* Initalize per-cpu timers */
289 for_each_possible_cpu(i) {
290 t = &per_cpu(cpu_timer, i);
291 init_timer_deferrable(t);
292 t->function = cpufreq_interactive_timer;
293 t->data = i;
296 /* Scale up is high priority */
297 up_wq = create_rt_workqueue("kinteractive_up");
298 down_wq = create_workqueue("knteractive_down");
300 INIT_WORK(&freq_scale_work, cpufreq_interactive_freq_change_time_work);
302 return cpufreq_register_governor(&cpufreq_gov_interactive);
305 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
306 pure_initcall(cpufreq_interactive_init);
307 #else
308 module_init(cpufreq_interactive_init);
309 #endif
311 static void __exit cpufreq_interactive_exit(void)
313 cpufreq_unregister_governor(&cpufreq_gov_interactive);
314 destroy_workqueue(up_wq);
315 destroy_workqueue(down_wq);
318 module_exit(cpufreq_interactive_exit);
320 MODULE_AUTHOR("Mike Chan <mike@android.com>");
321 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
322 "Latency sensitive workloads");
323 MODULE_LICENSE("GPL");