2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
17 struct cs_policy_dbs_info
{
18 struct policy_dbs_info policy_dbs
;
19 unsigned int down_skip
;
20 unsigned int requested_freq
;
23 static inline struct cs_policy_dbs_info
*to_dbs_info(struct policy_dbs_info
*policy_dbs
)
25 return container_of(policy_dbs
, struct cs_policy_dbs_info
, policy_dbs
);
28 struct cs_dbs_tuners
{
29 unsigned int down_threshold
;
30 unsigned int freq_step
;
33 /* Conservative governor macros */
34 #define DEF_FREQUENCY_UP_THRESHOLD (80)
35 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
36 #define DEF_FREQUENCY_STEP (5)
37 #define DEF_SAMPLING_DOWN_FACTOR (1)
38 #define MAX_SAMPLING_DOWN_FACTOR (10)
40 static inline unsigned int get_freq_target(struct cs_dbs_tuners
*cs_tuners
,
41 struct cpufreq_policy
*policy
)
43 unsigned int freq_target
= (cs_tuners
->freq_step
* policy
->max
) / 100;
45 /* max freq cannot be less than 100. But who knows... */
46 if (unlikely(freq_target
== 0))
47 freq_target
= DEF_FREQUENCY_STEP
;
53 * Every sampling_rate, we check, if current idle time is less than 20%
54 * (default), then we try to increase frequency. Every sampling_rate *
55 * sampling_down_factor, we check, if current idle time is more than 80%
56 * (default), then we try to decrease frequency
58 * Any frequency increase takes it to the maximum frequency. Frequency reduction
59 * happens at minimum steps of 5% (default) of maximum frequency
61 static unsigned int cs_dbs_timer(struct cpufreq_policy
*policy
)
63 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
64 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy_dbs
);
65 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
66 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
67 unsigned int load
= dbs_update(policy
);
70 * break out if we 'cannot' reduce the speed as the user might
71 * want freq_step to be zero
73 if (cs_tuners
->freq_step
== 0)
76 /* Check for frequency increase */
77 if (load
> dbs_data
->up_threshold
) {
78 dbs_info
->down_skip
= 0;
80 /* if we are already at full speed then break out early */
81 if (dbs_info
->requested_freq
== policy
->max
)
84 dbs_info
->requested_freq
+= get_freq_target(cs_tuners
, policy
);
86 if (dbs_info
->requested_freq
> policy
->max
)
87 dbs_info
->requested_freq
= policy
->max
;
89 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
94 /* if sampling_down_factor is active break out early */
95 if (++dbs_info
->down_skip
< dbs_data
->sampling_down_factor
)
97 dbs_info
->down_skip
= 0;
99 /* Check for frequency decrease */
100 if (load
< cs_tuners
->down_threshold
) {
101 unsigned int freq_target
;
103 * if we cannot reduce the frequency anymore, break out early
105 if (policy
->cur
== policy
->min
)
108 freq_target
= get_freq_target(cs_tuners
, policy
);
109 if (dbs_info
->requested_freq
> freq_target
)
110 dbs_info
->requested_freq
-= freq_target
;
112 dbs_info
->requested_freq
= policy
->min
;
114 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
119 return dbs_data
->sampling_rate
;
122 static int dbs_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
125 static struct notifier_block cs_cpufreq_notifier_block
= {
126 .notifier_call
= dbs_cpufreq_notifier
,
129 /************************** sysfs interface ************************/
130 static struct dbs_governor cs_dbs_gov
;
132 static ssize_t
store_sampling_down_factor(struct gov_attr_set
*attr_set
,
133 const char *buf
, size_t count
)
135 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
138 ret
= sscanf(buf
, "%u", &input
);
140 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
143 dbs_data
->sampling_down_factor
= input
;
147 static ssize_t
store_up_threshold(struct gov_attr_set
*attr_set
,
148 const char *buf
, size_t count
)
150 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
151 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
154 ret
= sscanf(buf
, "%u", &input
);
156 if (ret
!= 1 || input
> 100 || input
<= cs_tuners
->down_threshold
)
159 dbs_data
->up_threshold
= input
;
163 static ssize_t
store_down_threshold(struct gov_attr_set
*attr_set
,
164 const char *buf
, size_t count
)
166 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
167 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
170 ret
= sscanf(buf
, "%u", &input
);
172 /* cannot be lower than 11 otherwise freq will not fall */
173 if (ret
!= 1 || input
< 11 || input
> 100 ||
174 input
>= dbs_data
->up_threshold
)
177 cs_tuners
->down_threshold
= input
;
181 static ssize_t
store_ignore_nice_load(struct gov_attr_set
*attr_set
,
182 const char *buf
, size_t count
)
184 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
188 ret
= sscanf(buf
, "%u", &input
);
195 if (input
== dbs_data
->ignore_nice_load
) /* nothing to do */
198 dbs_data
->ignore_nice_load
= input
;
200 /* we need to re-evaluate prev_cpu_idle */
201 gov_update_cpu_data(dbs_data
);
206 static ssize_t
store_freq_step(struct gov_attr_set
*attr_set
, const char *buf
,
209 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
210 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
213 ret
= sscanf(buf
, "%u", &input
);
222 * no need to test here if freq_step is zero as the user might actually
223 * want this, they would be crazy though :)
225 cs_tuners
->freq_step
= input
;
229 gov_show_one_common(sampling_rate
);
230 gov_show_one_common(sampling_down_factor
);
231 gov_show_one_common(up_threshold
);
232 gov_show_one_common(ignore_nice_load
);
233 gov_show_one_common(min_sampling_rate
);
234 gov_show_one(cs
, down_threshold
);
235 gov_show_one(cs
, freq_step
);
237 gov_attr_rw(sampling_rate
);
238 gov_attr_rw(sampling_down_factor
);
239 gov_attr_rw(up_threshold
);
240 gov_attr_rw(ignore_nice_load
);
241 gov_attr_ro(min_sampling_rate
);
242 gov_attr_rw(down_threshold
);
243 gov_attr_rw(freq_step
);
245 static struct attribute
*cs_attributes
[] = {
246 &min_sampling_rate
.attr
,
248 &sampling_down_factor
.attr
,
250 &down_threshold
.attr
,
251 &ignore_nice_load
.attr
,
256 /************************** sysfs end ************************/
258 static struct policy_dbs_info
*cs_alloc(void)
260 struct cs_policy_dbs_info
*dbs_info
;
262 dbs_info
= kzalloc(sizeof(*dbs_info
), GFP_KERNEL
);
263 return dbs_info
? &dbs_info
->policy_dbs
: NULL
;
266 static void cs_free(struct policy_dbs_info
*policy_dbs
)
268 kfree(to_dbs_info(policy_dbs
));
271 static int cs_init(struct dbs_data
*dbs_data
, bool notify
)
273 struct cs_dbs_tuners
*tuners
;
275 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
277 pr_err("%s: kzalloc failed\n", __func__
);
281 tuners
->down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
;
282 tuners
->freq_step
= DEF_FREQUENCY_STEP
;
283 dbs_data
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
284 dbs_data
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
285 dbs_data
->ignore_nice_load
= 0;
287 dbs_data
->tuners
= tuners
;
288 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
289 jiffies_to_usecs(10);
292 cpufreq_register_notifier(&cs_cpufreq_notifier_block
,
293 CPUFREQ_TRANSITION_NOTIFIER
);
298 static void cs_exit(struct dbs_data
*dbs_data
, bool notify
)
301 cpufreq_unregister_notifier(&cs_cpufreq_notifier_block
,
302 CPUFREQ_TRANSITION_NOTIFIER
);
304 kfree(dbs_data
->tuners
);
307 static void cs_start(struct cpufreq_policy
*policy
)
309 struct cs_policy_dbs_info
*dbs_info
= to_dbs_info(policy
->governor_data
);
311 dbs_info
->down_skip
= 0;
312 dbs_info
->requested_freq
= policy
->cur
;
315 static struct dbs_governor cs_dbs_gov
= {
317 .name
= "conservative",
318 .governor
= cpufreq_governor_dbs
,
319 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
320 .owner
= THIS_MODULE
,
322 .kobj_type
= { .default_attrs
= cs_attributes
},
323 .gov_dbs_timer
= cs_dbs_timer
,
331 #define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
333 static int dbs_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
336 struct cpufreq_freqs
*freq
= data
;
337 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(freq
->cpu
);
338 struct cs_policy_dbs_info
*dbs_info
;
343 /* policy isn't governed by conservative governor */
344 if (policy
->governor
!= CPU_FREQ_GOV_CONSERVATIVE
)
347 dbs_info
= to_dbs_info(policy
->governor_data
);
349 * we only care if our internally tracked freq moves outside the 'valid'
350 * ranges of frequency available to us otherwise we do not change it
352 if (dbs_info
->requested_freq
> policy
->max
353 || dbs_info
->requested_freq
< policy
->min
)
354 dbs_info
->requested_freq
= freq
->new;
359 static int __init
cpufreq_gov_dbs_init(void)
361 return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE
);
364 static void __exit
cpufreq_gov_dbs_exit(void)
366 cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE
);
369 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
370 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
371 "Low Latency Frequency Transition capable processors "
372 "optimised for use in a battery environment");
373 MODULE_LICENSE("GPL");
375 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
376 struct cpufreq_governor
*cpufreq_default_governor(void)
378 return CPU_FREQ_GOV_CONSERVATIVE
;
381 fs_initcall(cpufreq_gov_dbs_init
);
383 module_init(cpufreq_gov_dbs_init
);
385 module_exit(cpufreq_gov_dbs_exit
);