2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/slab.h>
15 #include "cpufreq_governor.h"
17 /* Conservative governor macros */
18 #define DEF_FREQUENCY_UP_THRESHOLD (80)
19 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
20 #define DEF_FREQUENCY_STEP (5)
21 #define DEF_SAMPLING_DOWN_FACTOR (1)
22 #define MAX_SAMPLING_DOWN_FACTOR (10)
24 static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s
, cs_cpu_dbs_info
);
26 static int cs_cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
29 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
32 struct cpufreq_governor cpufreq_gov_conservative
= {
33 .name
= "conservative",
34 .governor
= cs_cpufreq_governor_dbs
,
35 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
39 static inline unsigned int get_freq_target(struct cs_dbs_tuners
*cs_tuners
,
40 struct cpufreq_policy
*policy
)
42 unsigned int freq_target
= (cs_tuners
->freq_step
* policy
->max
) / 100;
44 /* max freq cannot be less than 100. But who knows... */
45 if (unlikely(freq_target
== 0))
46 freq_target
= DEF_FREQUENCY_STEP
;
52 * Every sampling_rate, we check, if current idle time is less than 20%
53 * (default), then we try to increase frequency. Every sampling_rate *
54 * sampling_down_factor, we check, if current idle time is more than 80%
55 * (default), then we try to decrease frequency
57 * Any frequency increase takes it to the maximum frequency. Frequency reduction
58 * happens at minimum steps of 5% (default) of maximum frequency
60 static void cs_check_cpu(int cpu
, unsigned int load
)
62 struct cs_cpu_dbs_info_s
*dbs_info
= &per_cpu(cs_cpu_dbs_info
, cpu
);
63 struct cpufreq_policy
*policy
= dbs_info
->cdbs
.shared
->policy
;
64 struct dbs_data
*dbs_data
= policy
->governor_data
;
65 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
68 * break out if we 'cannot' reduce the speed as the user might
69 * want freq_step to be zero
71 if (cs_tuners
->freq_step
== 0)
74 /* Check for frequency increase */
75 if (load
> cs_tuners
->up_threshold
) {
76 dbs_info
->down_skip
= 0;
78 /* if we are already at full speed then break out early */
79 if (dbs_info
->requested_freq
== policy
->max
)
82 dbs_info
->requested_freq
+= get_freq_target(cs_tuners
, policy
);
84 if (dbs_info
->requested_freq
> policy
->max
)
85 dbs_info
->requested_freq
= policy
->max
;
87 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
92 /* if sampling_down_factor is active break out early */
93 if (++dbs_info
->down_skip
< cs_tuners
->sampling_down_factor
)
95 dbs_info
->down_skip
= 0;
97 /* Check for frequency decrease */
98 if (load
< cs_tuners
->down_threshold
) {
99 unsigned int freq_target
;
101 * if we cannot reduce the frequency anymore, break out early
103 if (policy
->cur
== policy
->min
)
106 freq_target
= get_freq_target(cs_tuners
, policy
);
107 if (dbs_info
->requested_freq
> freq_target
)
108 dbs_info
->requested_freq
-= freq_target
;
110 dbs_info
->requested_freq
= policy
->min
;
112 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
118 static unsigned int cs_dbs_timer(struct cpu_dbs_info
*cdbs
,
119 struct dbs_data
*dbs_data
, bool modify_all
)
121 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
124 dbs_check_cpu(dbs_data
, cdbs
->shared
->policy
->cpu
);
126 return delay_for_sampling_rate(cs_tuners
->sampling_rate
);
129 static int dbs_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
132 struct cpufreq_freqs
*freq
= data
;
133 struct cs_cpu_dbs_info_s
*dbs_info
=
134 &per_cpu(cs_cpu_dbs_info
, freq
->cpu
);
135 struct cpufreq_policy
*policy
= cpufreq_cpu_get_raw(freq
->cpu
);
140 /* policy isn't governed by conservative governor */
141 if (policy
->governor
!= &cpufreq_gov_conservative
)
145 * we only care if our internally tracked freq moves outside the 'valid'
146 * ranges of frequency available to us otherwise we do not change it
148 if (dbs_info
->requested_freq
> policy
->max
149 || dbs_info
->requested_freq
< policy
->min
)
150 dbs_info
->requested_freq
= freq
->new;
155 static struct notifier_block cs_cpufreq_notifier_block
= {
156 .notifier_call
= dbs_cpufreq_notifier
,
159 /************************** sysfs interface ************************/
160 static struct common_dbs_data cs_dbs_cdata
;
162 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
163 const char *buf
, size_t count
)
165 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
168 ret
= sscanf(buf
, "%u", &input
);
170 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
173 cs_tuners
->sampling_down_factor
= input
;
177 static ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
,
180 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
183 ret
= sscanf(buf
, "%u", &input
);
188 cs_tuners
->sampling_rate
= max(input
, dbs_data
->min_sampling_rate
);
192 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
195 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
198 ret
= sscanf(buf
, "%u", &input
);
200 if (ret
!= 1 || input
> 100 || input
<= cs_tuners
->down_threshold
)
203 cs_tuners
->up_threshold
= input
;
207 static ssize_t
store_down_threshold(struct dbs_data
*dbs_data
, const char *buf
,
210 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
213 ret
= sscanf(buf
, "%u", &input
);
215 /* cannot be lower than 1 otherwise freq will not fall */
216 if (ret
!= 1 || input
< 1 || input
> 100 ||
217 input
>= cs_tuners
->up_threshold
)
220 cs_tuners
->down_threshold
= input
;
224 static ssize_t
store_ignore_nice_load(struct dbs_data
*dbs_data
,
225 const char *buf
, size_t count
)
227 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
228 unsigned int input
, j
;
231 ret
= sscanf(buf
, "%u", &input
);
238 if (input
== cs_tuners
->ignore_nice_load
) /* nothing to do */
241 cs_tuners
->ignore_nice_load
= input
;
243 /* we need to re-evaluate prev_cpu_idle */
244 for_each_online_cpu(j
) {
245 struct cs_cpu_dbs_info_s
*dbs_info
;
246 dbs_info
= &per_cpu(cs_cpu_dbs_info
, j
);
247 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
248 &dbs_info
->cdbs
.prev_cpu_wall
, 0);
249 if (cs_tuners
->ignore_nice_load
)
250 dbs_info
->cdbs
.prev_cpu_nice
=
251 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
256 static ssize_t
store_freq_step(struct dbs_data
*dbs_data
, const char *buf
,
259 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
262 ret
= sscanf(buf
, "%u", &input
);
271 * no need to test here if freq_step is zero as the user might actually
272 * want this, they would be crazy though :)
274 cs_tuners
->freq_step
= input
;
278 show_store_one(cs
, sampling_rate
);
279 show_store_one(cs
, sampling_down_factor
);
280 show_store_one(cs
, up_threshold
);
281 show_store_one(cs
, down_threshold
);
282 show_store_one(cs
, ignore_nice_load
);
283 show_store_one(cs
, freq_step
);
284 declare_show_sampling_rate_min(cs
);
286 gov_sys_pol_attr_rw(sampling_rate
);
287 gov_sys_pol_attr_rw(sampling_down_factor
);
288 gov_sys_pol_attr_rw(up_threshold
);
289 gov_sys_pol_attr_rw(down_threshold
);
290 gov_sys_pol_attr_rw(ignore_nice_load
);
291 gov_sys_pol_attr_rw(freq_step
);
292 gov_sys_pol_attr_ro(sampling_rate_min
);
294 static struct attribute
*dbs_attributes_gov_sys
[] = {
295 &sampling_rate_min_gov_sys
.attr
,
296 &sampling_rate_gov_sys
.attr
,
297 &sampling_down_factor_gov_sys
.attr
,
298 &up_threshold_gov_sys
.attr
,
299 &down_threshold_gov_sys
.attr
,
300 &ignore_nice_load_gov_sys
.attr
,
301 &freq_step_gov_sys
.attr
,
305 static struct attribute_group cs_attr_group_gov_sys
= {
306 .attrs
= dbs_attributes_gov_sys
,
307 .name
= "conservative",
310 static struct attribute
*dbs_attributes_gov_pol
[] = {
311 &sampling_rate_min_gov_pol
.attr
,
312 &sampling_rate_gov_pol
.attr
,
313 &sampling_down_factor_gov_pol
.attr
,
314 &up_threshold_gov_pol
.attr
,
315 &down_threshold_gov_pol
.attr
,
316 &ignore_nice_load_gov_pol
.attr
,
317 &freq_step_gov_pol
.attr
,
321 static struct attribute_group cs_attr_group_gov_pol
= {
322 .attrs
= dbs_attributes_gov_pol
,
323 .name
= "conservative",
326 /************************** sysfs end ************************/
328 static int cs_init(struct dbs_data
*dbs_data
, bool notify
)
330 struct cs_dbs_tuners
*tuners
;
332 tuners
= kzalloc(sizeof(*tuners
), GFP_KERNEL
);
334 pr_err("%s: kzalloc failed\n", __func__
);
338 tuners
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
339 tuners
->down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
;
340 tuners
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
341 tuners
->ignore_nice_load
= 0;
342 tuners
->freq_step
= DEF_FREQUENCY_STEP
;
344 dbs_data
->tuners
= tuners
;
345 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
346 jiffies_to_usecs(10);
349 cpufreq_register_notifier(&cs_cpufreq_notifier_block
,
350 CPUFREQ_TRANSITION_NOTIFIER
);
355 static void cs_exit(struct dbs_data
*dbs_data
, bool notify
)
358 cpufreq_unregister_notifier(&cs_cpufreq_notifier_block
,
359 CPUFREQ_TRANSITION_NOTIFIER
);
361 kfree(dbs_data
->tuners
);
364 define_get_cpu_dbs_routines(cs_cpu_dbs_info
);
366 static struct common_dbs_data cs_dbs_cdata
= {
367 .governor
= GOV_CONSERVATIVE
,
368 .attr_group_gov_sys
= &cs_attr_group_gov_sys
,
369 .attr_group_gov_pol
= &cs_attr_group_gov_pol
,
370 .get_cpu_cdbs
= get_cpu_cdbs
,
371 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
372 .gov_dbs_timer
= cs_dbs_timer
,
373 .gov_check_cpu
= cs_check_cpu
,
376 .mutex
= __MUTEX_INITIALIZER(cs_dbs_cdata
.mutex
),
379 static int cs_cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
382 return cpufreq_governor_dbs(policy
, &cs_dbs_cdata
, event
);
385 static int __init
cpufreq_gov_dbs_init(void)
387 return cpufreq_register_governor(&cpufreq_gov_conservative
);
390 static void __exit
cpufreq_gov_dbs_exit(void)
392 cpufreq_unregister_governor(&cpufreq_gov_conservative
);
395 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
396 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
397 "Low Latency Frequency Transition capable processors "
398 "optimised for use in a battery environment");
399 MODULE_LICENSE("GPL");
401 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
402 fs_initcall(cpufreq_gov_dbs_init
);
404 module_init(cpufreq_gov_dbs_init
);
406 module_exit(cpufreq_gov_dbs_exit
);