2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
17 struct cs_policy_dbs_info
{
18 struct policy_dbs_info policy_dbs
;
19 unsigned int down_skip
;
20 unsigned int requested_freq
;
23 static inline struct cs_policy_dbs_info
*to_dbs_info(struct policy_dbs_info
*policy_dbs
)
25 return container_of(policy_dbs
, struct cs_policy_dbs_info
, policy_dbs
);
28 struct cs_dbs_tuners
{
29 unsigned int down_threshold
;
30 unsigned int freq_step
;
33 /* Conservative governor macros */
34 #define DEF_FREQUENCY_UP_THRESHOLD (80)
35 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
36 #define DEF_FREQUENCY_STEP (5)
37 #define DEF_SAMPLING_DOWN_FACTOR (1)
38 #define MAX_SAMPLING_DOWN_FACTOR (10)
40 static inline unsigned int get_freq_step(struct cs_dbs_tuners
*cs_tuners
,
41 struct cpufreq_policy
*policy
)
43 unsigned int freq_step
= (cs_tuners
->freq_step
* policy
->max
) / 100;
45 /* max freq cannot be less than 100. But who knows... */
46 if (unlikely(freq_step
== 0))
47 freq_step
= DEF_FREQUENCY_STEP
;
53 * Every sampling_rate, we check, if current idle time is less than 20%
54 * (default), then we try to increase frequency. Every sampling_rate *
55 * sampling_down_factor, we check, if current idle time is more than 80%
56 * (default), then we try to decrease frequency
58 * Frequency updates happen at minimum steps of 5% (default) of maximum
61 static unsigned int cs_dbs_update(struct cpufreq_policy
*policy
)
63 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
64 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy_dbs
);
65 unsigned int requested_freq
= dbs_info
->requested_freq
;
66 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
67 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
68 unsigned int load
= dbs_update(policy
);
69 unsigned int freq_step
;
72 * break out if we 'cannot' reduce the speed as the user might
73 * want freq_step to be zero
75 if (cs_tuners
->freq_step
== 0)
79 * If requested_freq is out of range, it is likely that the limits
80 * changed in the meantime, so fall back to current frequency in that
83 if (requested_freq
> policy
->max
|| requested_freq
< policy
->min
) {
84 requested_freq
= policy
->cur
;
85 dbs_info
->requested_freq
= requested_freq
;
88 freq_step
= get_freq_step(cs_tuners
, policy
);
91 * Decrease requested_freq one freq_step for each idle period that
92 * we didn't update the frequency.
94 if (policy_dbs
->idle_periods
< UINT_MAX
) {
95 unsigned int freq_steps
= policy_dbs
->idle_periods
* freq_step
;
97 if (requested_freq
> policy
->min
+ freq_steps
)
98 requested_freq
-= freq_steps
;
100 requested_freq
= policy
->min
;
102 policy_dbs
->idle_periods
= UINT_MAX
;
105 /* Check for frequency increase */
106 if (load
> dbs_data
->up_threshold
) {
107 dbs_info
->down_skip
= 0;
109 /* if we are already at full speed then break out early */
110 if (requested_freq
== policy
->max
)
113 requested_freq
+= freq_step
;
114 if (requested_freq
> policy
->max
)
115 requested_freq
= policy
->max
;
117 __cpufreq_driver_target(policy
, requested_freq
, CPUFREQ_RELATION_H
);
118 dbs_info
->requested_freq
= requested_freq
;
122 /* if sampling_down_factor is active break out early */
123 if (++dbs_info
->down_skip
< dbs_data
->sampling_down_factor
)
125 dbs_info
->down_skip
= 0;
127 /* Check for frequency decrease */
128 if (load
< cs_tuners
->down_threshold
) {
130 * if we cannot reduce the frequency anymore, break out early
132 if (requested_freq
== policy
->min
)
135 if (requested_freq
> freq_step
)
136 requested_freq
-= freq_step
;
138 requested_freq
= policy
->min
;
140 __cpufreq_driver_target(policy
, requested_freq
, CPUFREQ_RELATION_L
);
141 dbs_info
->requested_freq
= requested_freq
;
145 return dbs_data
->sampling_rate
;
148 /************************** sysfs interface ************************/
150 static ssize_t
store_sampling_down_factor(struct gov_attr_set
*attr_set
,
151 const char *buf
, size_t count
)
153 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
156 ret
= sscanf(buf
, "%u", &input
);
158 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
161 dbs_data
->sampling_down_factor
= input
;
165 static ssize_t
store_up_threshold(struct gov_attr_set
*attr_set
,
166 const char *buf
, size_t count
)
168 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
169 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
172 ret
= sscanf(buf
, "%u", &input
);
174 if (ret
!= 1 || input
> 100 || input
<= cs_tuners
->down_threshold
)
177 dbs_data
->up_threshold
= input
;
181 static ssize_t
store_down_threshold(struct gov_attr_set
*attr_set
,
182 const char *buf
, size_t count
)
184 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
185 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
188 ret
= sscanf(buf
, "%u", &input
);
190 /* cannot be lower than 1 otherwise freq will not fall */
191 if (ret
!= 1 || input
< 1 || input
> 100 ||
192 input
>= dbs_data
->up_threshold
)
195 cs_tuners
->down_threshold
= input
;
199 static ssize_t
store_ignore_nice_load(struct gov_attr_set
*attr_set
,
200 const char *buf
, size_t count
)
202 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
206 ret
= sscanf(buf
, "%u", &input
);
213 if (input
== dbs_data
->ignore_nice_load
) /* nothing to do */
216 dbs_data
->ignore_nice_load
= input
;
218 /* we need to re-evaluate prev_cpu_idle */
219 gov_update_cpu_data(dbs_data
);
224 static ssize_t
store_freq_step(struct gov_attr_set
*attr_set
, const char *buf
,
227 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
228 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
231 ret
= sscanf(buf
, "%u", &input
);
240 * no need to test here if freq_step is zero as the user might actually
241 * want this, they would be crazy though :)
243 cs_tuners
->freq_step
= input
;
247 gov_show_one_common(sampling_rate
);
248 gov_show_one_common(sampling_down_factor
);
249 gov_show_one_common(up_threshold
);
250 gov_show_one_common(ignore_nice_load
);
251 gov_show_one(cs
, down_threshold
);
252 gov_show_one(cs
, freq_step
);
254 gov_attr_rw(sampling_rate
);
255 gov_attr_rw(sampling_down_factor
);
256 gov_attr_rw(up_threshold
);
257 gov_attr_rw(ignore_nice_load
);
258 gov_attr_rw(down_threshold
);
259 gov_attr_rw(freq_step
);
261 static struct attribute
*cs_attributes
[] = {
263 &sampling_down_factor
.attr
,
265 &down_threshold
.attr
,
266 &ignore_nice_load
.attr
,
271 /************************** sysfs end ************************/
273 static struct policy_dbs_info
*cs_alloc(void)
275 struct cs_policy_dbs_info
*dbs_info
;
277 dbs_info
= kzalloc(sizeof(*dbs_info
), GFP_KERNEL
);
278 return dbs_info
? &dbs_info
->policy_dbs
: NULL
;
281 static void cs_free(struct policy_dbs_info
*policy_dbs
)
283 kfree(to_dbs_info(policy_dbs
));
286 static int cs_init(struct dbs_data
*dbs_data
)
288 struct cs_dbs_tuners
*tuners
;
290 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
294 tuners
->down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
;
295 tuners
->freq_step
= DEF_FREQUENCY_STEP
;
296 dbs_data
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
297 dbs_data
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
298 dbs_data
->ignore_nice_load
= 0;
299 dbs_data
->tuners
= tuners
;
304 static void cs_exit(struct dbs_data
*dbs_data
)
306 kfree(dbs_data
->tuners
);
309 static void cs_start(struct cpufreq_policy
*policy
)
311 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy
->governor_data
);
313 dbs_info
->down_skip
= 0;
314 dbs_info
->requested_freq
= policy
->cur
;
317 static struct dbs_governor cs_governor
= {
318 .gov
= CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
319 .kobj_type
= { .default_attrs
= cs_attributes
},
320 .gov_dbs_update
= cs_dbs_update
,
328 #define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
330 static int __init
cpufreq_gov_dbs_init(void)
332 return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE
);
335 static void __exit
cpufreq_gov_dbs_exit(void)
337 cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE
);
340 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
341 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
342 "Low Latency Frequency Transition capable processors "
343 "optimised for use in a battery environment");
344 MODULE_LICENSE("GPL");
346 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
347 struct cpufreq_governor
*cpufreq_default_governor(void)
349 return CPU_FREQ_GOV_CONSERVATIVE
;
352 fs_initcall(cpufreq_gov_dbs_init
);
354 module_init(cpufreq_gov_dbs_init
);
356 module_exit(cpufreq_gov_dbs_exit
);