2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
17 struct cs_policy_dbs_info
{
18 struct policy_dbs_info policy_dbs
;
19 unsigned int down_skip
;
20 unsigned int requested_freq
;
23 static inline struct cs_policy_dbs_info
*to_dbs_info(struct policy_dbs_info
*policy_dbs
)
25 return container_of(policy_dbs
, struct cs_policy_dbs_info
, policy_dbs
);
28 struct cs_dbs_tuners
{
29 unsigned int down_threshold
;
30 unsigned int freq_step
;
33 /* Conservative governor macros */
34 #define DEF_FREQUENCY_UP_THRESHOLD (80)
35 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
36 #define DEF_FREQUENCY_STEP (5)
37 #define DEF_SAMPLING_DOWN_FACTOR (1)
38 #define MAX_SAMPLING_DOWN_FACTOR (10)
40 static inline unsigned int get_freq_step(struct cs_dbs_tuners
*cs_tuners
,
41 struct cpufreq_policy
*policy
)
43 unsigned int freq_step
= (cs_tuners
->freq_step
* policy
->max
) / 100;
45 /* max freq cannot be less than 100. But who knows... */
46 if (unlikely(freq_step
== 0))
47 freq_step
= DEF_FREQUENCY_STEP
;
53 * Every sampling_rate, we check, if current idle time is less than 20%
54 * (default), then we try to increase frequency. Every sampling_rate *
55 * sampling_down_factor, we check, if current idle time is more than 80%
56 * (default), then we try to decrease frequency
58 * Frequency updates happen at minimum steps of 5% (default) of maximum
61 static unsigned int cs_dbs_update(struct cpufreq_policy
*policy
)
63 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
64 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy_dbs
);
65 unsigned int requested_freq
= dbs_info
->requested_freq
;
66 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
67 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
68 unsigned int load
= dbs_update(policy
);
69 unsigned int freq_step
;
72 * break out if we 'cannot' reduce the speed as the user might
73 * want freq_step to be zero
75 if (cs_tuners
->freq_step
== 0)
79 * If requested_freq is out of range, it is likely that the limits
80 * changed in the meantime, so fall back to current frequency in that
83 if (requested_freq
> policy
->max
|| requested_freq
< policy
->min
)
84 requested_freq
= policy
->cur
;
86 freq_step
= get_freq_step(cs_tuners
, policy
);
89 * Decrease requested_freq one freq_step for each idle period that
90 * we didn't update the frequency.
92 if (policy_dbs
->idle_periods
< UINT_MAX
) {
93 unsigned int freq_steps
= policy_dbs
->idle_periods
* freq_step
;
95 if (requested_freq
> freq_steps
)
96 requested_freq
-= freq_steps
;
98 requested_freq
= policy
->min
;
100 policy_dbs
->idle_periods
= UINT_MAX
;
103 /* Check for frequency increase */
104 if (load
> dbs_data
->up_threshold
) {
105 dbs_info
->down_skip
= 0;
107 /* if we are already at full speed then break out early */
108 if (requested_freq
== policy
->max
)
111 requested_freq
+= freq_step
;
112 if (requested_freq
> policy
->max
)
113 requested_freq
= policy
->max
;
115 __cpufreq_driver_target(policy
, requested_freq
, CPUFREQ_RELATION_H
);
116 dbs_info
->requested_freq
= requested_freq
;
120 /* if sampling_down_factor is active break out early */
121 if (++dbs_info
->down_skip
< dbs_data
->sampling_down_factor
)
123 dbs_info
->down_skip
= 0;
125 /* Check for frequency decrease */
126 if (load
< cs_tuners
->down_threshold
) {
128 * if we cannot reduce the frequency anymore, break out early
130 if (requested_freq
== policy
->min
)
133 if (requested_freq
> freq_step
)
134 requested_freq
-= freq_step
;
136 requested_freq
= policy
->min
;
138 __cpufreq_driver_target(policy
, requested_freq
, CPUFREQ_RELATION_L
);
139 dbs_info
->requested_freq
= requested_freq
;
143 return dbs_data
->sampling_rate
;
146 /************************** sysfs interface ************************/
148 static ssize_t
store_sampling_down_factor(struct gov_attr_set
*attr_set
,
149 const char *buf
, size_t count
)
151 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
154 ret
= sscanf(buf
, "%u", &input
);
156 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
159 dbs_data
->sampling_down_factor
= input
;
163 static ssize_t
store_up_threshold(struct gov_attr_set
*attr_set
,
164 const char *buf
, size_t count
)
166 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
167 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
170 ret
= sscanf(buf
, "%u", &input
);
172 if (ret
!= 1 || input
> 100 || input
<= cs_tuners
->down_threshold
)
175 dbs_data
->up_threshold
= input
;
179 static ssize_t
store_down_threshold(struct gov_attr_set
*attr_set
,
180 const char *buf
, size_t count
)
182 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
183 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
186 ret
= sscanf(buf
, "%u", &input
);
188 /* cannot be lower than 11 otherwise freq will not fall */
189 if (ret
!= 1 || input
< 11 || input
> 100 ||
190 input
>= dbs_data
->up_threshold
)
193 cs_tuners
->down_threshold
= input
;
197 static ssize_t
store_ignore_nice_load(struct gov_attr_set
*attr_set
,
198 const char *buf
, size_t count
)
200 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
204 ret
= sscanf(buf
, "%u", &input
);
211 if (input
== dbs_data
->ignore_nice_load
) /* nothing to do */
214 dbs_data
->ignore_nice_load
= input
;
216 /* we need to re-evaluate prev_cpu_idle */
217 gov_update_cpu_data(dbs_data
);
222 static ssize_t
store_freq_step(struct gov_attr_set
*attr_set
, const char *buf
,
225 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
226 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
229 ret
= sscanf(buf
, "%u", &input
);
238 * no need to test here if freq_step is zero as the user might actually
239 * want this, they would be crazy though :)
241 cs_tuners
->freq_step
= input
;
245 gov_show_one_common(sampling_rate
);
246 gov_show_one_common(sampling_down_factor
);
247 gov_show_one_common(up_threshold
);
248 gov_show_one_common(ignore_nice_load
);
249 gov_show_one_common(min_sampling_rate
);
250 gov_show_one(cs
, down_threshold
);
251 gov_show_one(cs
, freq_step
);
253 gov_attr_rw(sampling_rate
);
254 gov_attr_rw(sampling_down_factor
);
255 gov_attr_rw(up_threshold
);
256 gov_attr_rw(ignore_nice_load
);
257 gov_attr_ro(min_sampling_rate
);
258 gov_attr_rw(down_threshold
);
259 gov_attr_rw(freq_step
);
261 static struct attribute
*cs_attributes
[] = {
262 &min_sampling_rate
.attr
,
264 &sampling_down_factor
.attr
,
266 &down_threshold
.attr
,
267 &ignore_nice_load
.attr
,
272 /************************** sysfs end ************************/
274 static struct policy_dbs_info
*cs_alloc(void)
276 struct cs_policy_dbs_info
*dbs_info
;
278 dbs_info
= kzalloc(sizeof(*dbs_info
), GFP_KERNEL
);
279 return dbs_info
? &dbs_info
->policy_dbs
: NULL
;
282 static void cs_free(struct policy_dbs_info
*policy_dbs
)
284 kfree(to_dbs_info(policy_dbs
));
287 static int cs_init(struct dbs_data
*dbs_data
)
289 struct cs_dbs_tuners
*tuners
;
291 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
295 tuners
->down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
;
296 tuners
->freq_step
= DEF_FREQUENCY_STEP
;
297 dbs_data
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
298 dbs_data
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
299 dbs_data
->ignore_nice_load
= 0;
301 dbs_data
->tuners
= tuners
;
302 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
303 jiffies_to_usecs(10);
308 static void cs_exit(struct dbs_data
*dbs_data
)
310 kfree(dbs_data
->tuners
);
313 static void cs_start(struct cpufreq_policy
*policy
)
315 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy
->governor_data
);
317 dbs_info
->down_skip
= 0;
318 dbs_info
->requested_freq
= policy
->cur
;
321 static struct dbs_governor cs_governor
= {
322 .gov
= CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
323 .kobj_type
= { .default_attrs
= cs_attributes
},
324 .gov_dbs_update
= cs_dbs_update
,
332 #define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
334 static int __init
cpufreq_gov_dbs_init(void)
336 return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE
);
339 static void __exit
cpufreq_gov_dbs_exit(void)
341 cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE
);
344 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
345 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
346 "Low Latency Frequency Transition capable processors "
347 "optimised for use in a battery environment");
348 MODULE_LICENSE("GPL");
350 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
351 struct cpufreq_governor
*cpufreq_default_governor(void)
353 return CPU_FREQ_GOV_CONSERVATIVE
;
356 fs_initcall(cpufreq_gov_dbs_init
);
358 module_init(cpufreq_gov_dbs_init
);
360 module_exit(cpufreq_gov_dbs_exit
);