2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
24 #include "cpufreq_governor.h"
26 static DEFINE_PER_CPU(struct cpu_dbs_info
, cpu_dbs
);
28 static DEFINE_MUTEX(gov_dbs_data_mutex
);
30 /* Common sysfs tunables */
32 * store_sampling_rate - update sampling rate effective immediately if needed.
34 * If new rate is smaller than the old, simply updating
35 * dbs.sampling_rate might not be appropriate. For example, if the
36 * original sampling_rate was 1 second and the requested new sampling rate is 10
37 * ms because the user needs immediate reaction from ondemand governor, but not
38 * sure if higher frequency will be required or not, then, the governor may
39 * change the sampling rate too late; up to 1 second later. Thus, if we are
40 * reducing the sampling rate, we need to make the new value effective
43 * This must be called with dbs_data->mutex held, otherwise traversing
44 * policy_dbs_list isn't safe.
46 ssize_t
store_sampling_rate(struct gov_attr_set
*attr_set
, const char *buf
,
49 struct dbs_data
*dbs_data
= to_dbs_data(attr_set
);
50 struct policy_dbs_info
*policy_dbs
;
53 ret
= sscanf(buf
, "%u", &rate
);
57 dbs_data
->sampling_rate
= max(rate
, dbs_data
->min_sampling_rate
);
60 * We are operating under dbs_data->mutex and so the list and its
61 * entries can't be freed concurrently.
63 list_for_each_entry(policy_dbs
, &attr_set
->policy_list
, list
) {
64 mutex_lock(&policy_dbs
->timer_mutex
);
66 * On 32-bit architectures this may race with the
67 * sample_delay_ns read in dbs_update_util_handler(), but that
68 * really doesn't matter. If the read returns a value that's
69 * too big, the sample will be skipped, but the next invocation
70 * of dbs_update_util_handler() (when the update has been
71 * completed) will take a sample.
73 * If this runs in parallel with dbs_work_handler(), we may end
74 * up overwriting the sample_delay_ns value that it has just
75 * written, but it will be corrected next time a sample is
76 * taken, so it shouldn't be significant.
78 gov_update_sample_delay(policy_dbs
, 0);
79 mutex_unlock(&policy_dbs
->timer_mutex
);
84 EXPORT_SYMBOL_GPL(store_sampling_rate
);
87 * gov_update_cpu_data - Update CPU load data.
88 * @dbs_data: Top-level governor data pointer.
90 * Update CPU load data for all CPUs in the domain governed by @dbs_data
91 * (that may be a single policy or a bunch of them if governor tunables are
94 * Call under the @dbs_data mutex.
96 void gov_update_cpu_data(struct dbs_data
*dbs_data
)
98 struct policy_dbs_info
*policy_dbs
;
100 list_for_each_entry(policy_dbs
, &dbs_data
->attr_set
.policy_list
, list
) {
103 for_each_cpu(j
, policy_dbs
->policy
->cpus
) {
104 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
106 j_cdbs
->prev_cpu_idle
= get_cpu_idle_time(j
, &j_cdbs
->prev_update_time
,
107 dbs_data
->io_is_busy
);
108 if (dbs_data
->ignore_nice_load
)
109 j_cdbs
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
113 EXPORT_SYMBOL_GPL(gov_update_cpu_data
);
115 unsigned int dbs_update(struct cpufreq_policy
*policy
)
117 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
118 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
119 unsigned int ignore_nice
= dbs_data
->ignore_nice_load
;
120 unsigned int max_load
= 0;
121 unsigned int sampling_rate
, io_busy
, j
;
124 * Sometimes governors may use an additional multiplier to increase
125 * sample delays temporarily. Apply that multiplier to sampling_rate
126 * so as to keep the wake-up-from-idle detection logic a bit
129 sampling_rate
= dbs_data
->sampling_rate
* policy_dbs
->rate_mult
;
131 * For the purpose of ondemand, waiting for disk IO is an indication
132 * that you're performance critical, and not that the system is actually
133 * idle, so do not add the iowait time to the CPU idle time then.
135 io_busy
= dbs_data
->io_is_busy
;
137 /* Get Absolute Load */
138 for_each_cpu(j
, policy
->cpus
) {
139 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
140 u64 update_time
, cur_idle_time
;
141 unsigned int idle_time
, time_elapsed
;
144 cur_idle_time
= get_cpu_idle_time(j
, &update_time
, io_busy
);
146 time_elapsed
= update_time
- j_cdbs
->prev_update_time
;
147 j_cdbs
->prev_update_time
= update_time
;
149 idle_time
= cur_idle_time
- j_cdbs
->prev_cpu_idle
;
150 j_cdbs
->prev_cpu_idle
= cur_idle_time
;
153 u64 cur_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
155 idle_time
+= cputime_to_usecs(cur_nice
- j_cdbs
->prev_cpu_nice
);
156 j_cdbs
->prev_cpu_nice
= cur_nice
;
159 if (unlikely(!time_elapsed
)) {
161 * That can only happen when this function is called
162 * twice in a row with a very short interval between the
163 * calls, so the previous load value can be used then.
165 load
= j_cdbs
->prev_load
;
166 } else if (unlikely(time_elapsed
> 2 * sampling_rate
&&
167 j_cdbs
->prev_load
)) {
169 * If the CPU had gone completely idle and a task has
170 * just woken up on this CPU now, it would be unfair to
171 * calculate 'load' the usual way for this elapsed
172 * time-window, because it would show near-zero load,
173 * irrespective of how CPU intensive that task actually
174 * was. This is undesirable for latency-sensitive bursty
177 * To avoid this, reuse the 'load' from the previous
178 * time-window and give this task a chance to start with
179 * a reasonably high CPU frequency. However, that
180 * shouldn't be over-done, lest we get stuck at a high
181 * load (high frequency) for too long, even when the
182 * current system load has actually dropped down, so
183 * clear prev_load to guarantee that the load will be
184 * computed again next time.
186 * Detecting this situation is easy: the governor's
187 * utilization update handler would not have run during
188 * CPU-idle periods. Hence, an unusually large
189 * 'time_elapsed' (as compared to the sampling rate)
190 * indicates this scenario.
192 load
= j_cdbs
->prev_load
;
193 j_cdbs
->prev_load
= 0;
195 if (time_elapsed
>= idle_time
) {
196 load
= 100 * (time_elapsed
- idle_time
) / time_elapsed
;
199 * That can happen if idle_time is returned by
200 * get_cpu_idle_time_jiffy(). In that case
201 * idle_time is roughly equal to the difference
202 * between time_elapsed and "busy time" obtained
203 * from CPU statistics. Then, the "busy time"
204 * can end up being greater than time_elapsed
205 * (for example, if jiffies_64 and the CPU
206 * statistics are updated by different CPUs),
207 * so idle_time may in fact be negative. That
208 * means, though, that the CPU was busy all
209 * the time (on the rough average) during the
210 * last sampling interval and 100 can be
211 * returned as the load.
213 load
= (int)idle_time
< 0 ? 100 : 0;
215 j_cdbs
->prev_load
= load
;
223 EXPORT_SYMBOL_GPL(dbs_update
);
225 static void dbs_work_handler(struct work_struct
*work
)
227 struct policy_dbs_info
*policy_dbs
;
228 struct cpufreq_policy
*policy
;
229 struct dbs_governor
*gov
;
231 policy_dbs
= container_of(work
, struct policy_dbs_info
, work
);
232 policy
= policy_dbs
->policy
;
233 gov
= dbs_governor_of(policy
);
236 * Make sure cpufreq_governor_limits() isn't evaluating load or the
237 * ondemand governor isn't updating the sampling rate in parallel.
239 mutex_lock(&policy_dbs
->timer_mutex
);
240 gov_update_sample_delay(policy_dbs
, gov
->gov_dbs_timer(policy
));
241 mutex_unlock(&policy_dbs
->timer_mutex
);
243 /* Allow the utilization update handler to queue up more work. */
244 atomic_set(&policy_dbs
->work_count
, 0);
246 * If the update below is reordered with respect to the sample delay
247 * modification, the utilization update handler may end up using a stale
248 * sample delay value.
251 policy_dbs
->work_in_progress
= false;
254 static void dbs_irq_work(struct irq_work
*irq_work
)
256 struct policy_dbs_info
*policy_dbs
;
258 policy_dbs
= container_of(irq_work
, struct policy_dbs_info
, irq_work
);
259 schedule_work_on(smp_processor_id(), &policy_dbs
->work
);
262 static void dbs_update_util_handler(struct update_util_data
*data
, u64 time
,
265 struct cpu_dbs_info
*cdbs
= container_of(data
, struct cpu_dbs_info
, update_util
);
266 struct policy_dbs_info
*policy_dbs
= cdbs
->policy_dbs
;
270 * The work may not be allowed to be queued up right now.
272 * - Work has already been queued up or is in progress.
273 * - It is too early (too little time from the previous sample).
275 if (policy_dbs
->work_in_progress
)
279 * If the reads below are reordered before the check above, the value
280 * of sample_delay_ns used in the computation may be stale.
283 lst
= READ_ONCE(policy_dbs
->last_sample_time
);
284 delta_ns
= time
- lst
;
285 if ((s64
)delta_ns
< policy_dbs
->sample_delay_ns
)
289 * If the policy is not shared, the irq_work may be queued up right away
290 * at this point. Otherwise, we need to ensure that only one of the
291 * CPUs sharing the policy will do that.
293 if (policy_dbs
->is_shared
) {
294 if (!atomic_add_unless(&policy_dbs
->work_count
, 1, 1))
298 * If another CPU updated last_sample_time in the meantime, we
299 * shouldn't be here, so clear the work counter and bail out.
301 if (unlikely(lst
!= READ_ONCE(policy_dbs
->last_sample_time
))) {
302 atomic_set(&policy_dbs
->work_count
, 0);
307 policy_dbs
->last_sample_time
= time
;
308 policy_dbs
->work_in_progress
= true;
309 irq_work_queue(&policy_dbs
->irq_work
);
312 static void gov_set_update_util(struct policy_dbs_info
*policy_dbs
,
313 unsigned int delay_us
)
315 struct cpufreq_policy
*policy
= policy_dbs
->policy
;
318 gov_update_sample_delay(policy_dbs
, delay_us
);
319 policy_dbs
->last_sample_time
= 0;
321 for_each_cpu(cpu
, policy
->cpus
) {
322 struct cpu_dbs_info
*cdbs
= &per_cpu(cpu_dbs
, cpu
);
324 cpufreq_add_update_util_hook(cpu
, &cdbs
->update_util
,
325 dbs_update_util_handler
);
329 static inline void gov_clear_update_util(struct cpufreq_policy
*policy
)
333 for_each_cpu(i
, policy
->cpus
)
334 cpufreq_remove_update_util_hook(i
);
339 static struct policy_dbs_info
*alloc_policy_dbs_info(struct cpufreq_policy
*policy
,
340 struct dbs_governor
*gov
)
342 struct policy_dbs_info
*policy_dbs
;
345 /* Allocate memory for per-policy governor data. */
346 policy_dbs
= gov
->alloc();
350 policy_dbs
->policy
= policy
;
351 mutex_init(&policy_dbs
->timer_mutex
);
352 atomic_set(&policy_dbs
->work_count
, 0);
353 init_irq_work(&policy_dbs
->irq_work
, dbs_irq_work
);
354 INIT_WORK(&policy_dbs
->work
, dbs_work_handler
);
356 /* Set policy_dbs for all CPUs, online+offline */
357 for_each_cpu(j
, policy
->related_cpus
) {
358 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
360 j_cdbs
->policy_dbs
= policy_dbs
;
365 static void free_policy_dbs_info(struct policy_dbs_info
*policy_dbs
,
366 struct dbs_governor
*gov
)
370 mutex_destroy(&policy_dbs
->timer_mutex
);
372 for_each_cpu(j
, policy_dbs
->policy
->related_cpus
) {
373 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
375 j_cdbs
->policy_dbs
= NULL
;
376 j_cdbs
->update_util
.func
= NULL
;
378 gov
->free(policy_dbs
);
381 int cpufreq_dbs_governor_init(struct cpufreq_policy
*policy
)
383 struct dbs_governor
*gov
= dbs_governor_of(policy
);
384 struct dbs_data
*dbs_data
;
385 struct policy_dbs_info
*policy_dbs
;
386 unsigned int latency
;
389 /* State should be equivalent to EXIT */
390 if (policy
->governor_data
)
393 policy_dbs
= alloc_policy_dbs_info(policy
, gov
);
397 /* Protect gov->gdbs_data against concurrent updates. */
398 mutex_lock(&gov_dbs_data_mutex
);
400 dbs_data
= gov
->gdbs_data
;
402 if (WARN_ON(have_governor_per_policy())) {
404 goto free_policy_dbs_info
;
406 policy_dbs
->dbs_data
= dbs_data
;
407 policy
->governor_data
= policy_dbs
;
409 gov_attr_set_get(&dbs_data
->attr_set
, &policy_dbs
->list
);
413 dbs_data
= kzalloc(sizeof(*dbs_data
), GFP_KERNEL
);
416 goto free_policy_dbs_info
;
419 gov_attr_set_init(&dbs_data
->attr_set
, &policy_dbs
->list
);
421 ret
= gov
->init(dbs_data
);
423 goto free_policy_dbs_info
;
425 /* policy latency is in ns. Convert it to us first */
426 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
430 /* Bring kernel and HW constraints together */
431 dbs_data
->min_sampling_rate
= max(dbs_data
->min_sampling_rate
,
432 MIN_LATENCY_MULTIPLIER
* latency
);
433 dbs_data
->sampling_rate
= max(dbs_data
->min_sampling_rate
,
434 LATENCY_MULTIPLIER
* latency
);
436 if (!have_governor_per_policy())
437 gov
->gdbs_data
= dbs_data
;
439 policy_dbs
->dbs_data
= dbs_data
;
440 policy
->governor_data
= policy_dbs
;
442 gov
->kobj_type
.sysfs_ops
= &governor_sysfs_ops
;
443 ret
= kobject_init_and_add(&dbs_data
->attr_set
.kobj
, &gov
->kobj_type
,
444 get_governor_parent_kobj(policy
),
445 "%s", gov
->gov
.name
);
449 /* Failure, so roll back. */
450 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret
);
452 policy
->governor_data
= NULL
;
454 if (!have_governor_per_policy())
455 gov
->gdbs_data
= NULL
;
459 free_policy_dbs_info
:
460 free_policy_dbs_info(policy_dbs
, gov
);
463 mutex_unlock(&gov_dbs_data_mutex
);
466 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init
);
468 void cpufreq_dbs_governor_exit(struct cpufreq_policy
*policy
)
470 struct dbs_governor
*gov
= dbs_governor_of(policy
);
471 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
472 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
475 /* Protect gov->gdbs_data against concurrent updates. */
476 mutex_lock(&gov_dbs_data_mutex
);
478 count
= gov_attr_set_put(&dbs_data
->attr_set
, &policy_dbs
->list
);
480 policy
->governor_data
= NULL
;
483 if (!have_governor_per_policy())
484 gov
->gdbs_data
= NULL
;
490 free_policy_dbs_info(policy_dbs
, gov
);
492 mutex_unlock(&gov_dbs_data_mutex
);
494 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit
);
496 int cpufreq_dbs_governor_start(struct cpufreq_policy
*policy
)
498 struct dbs_governor
*gov
= dbs_governor_of(policy
);
499 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
500 struct dbs_data
*dbs_data
= policy_dbs
->dbs_data
;
501 unsigned int sampling_rate
, ignore_nice
, j
;
502 unsigned int io_busy
;
507 policy_dbs
->is_shared
= policy_is_shared(policy
);
508 policy_dbs
->rate_mult
= 1;
510 sampling_rate
= dbs_data
->sampling_rate
;
511 ignore_nice
= dbs_data
->ignore_nice_load
;
512 io_busy
= dbs_data
->io_is_busy
;
514 for_each_cpu(j
, policy
->cpus
) {
515 struct cpu_dbs_info
*j_cdbs
= &per_cpu(cpu_dbs
, j
);
517 j_cdbs
->prev_cpu_idle
= get_cpu_idle_time(j
, &j_cdbs
->prev_update_time
, io_busy
);
519 * Make the first invocation of dbs_update() compute the load.
521 j_cdbs
->prev_load
= 0;
524 j_cdbs
->prev_cpu_nice
= kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
529 gov_set_update_util(policy_dbs
, sampling_rate
);
532 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start
);
534 void cpufreq_dbs_governor_stop(struct cpufreq_policy
*policy
)
536 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
538 gov_clear_update_util(policy_dbs
->policy
);
539 irq_work_sync(&policy_dbs
->irq_work
);
540 cancel_work_sync(&policy_dbs
->work
);
541 atomic_set(&policy_dbs
->work_count
, 0);
542 policy_dbs
->work_in_progress
= false;
544 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop
);
546 void cpufreq_dbs_governor_limits(struct cpufreq_policy
*policy
)
548 struct policy_dbs_info
*policy_dbs
= policy
->governor_data
;
550 mutex_lock(&policy_dbs
->timer_mutex
);
551 cpufreq_policy_apply_limits(policy
);
552 gov_update_sample_delay(policy_dbs
, 0);
554 mutex_unlock(&policy_dbs
->timer_mutex
);
556 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits
);