1 // SPDX-License-Identifier: GPL-2.0
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/sched/cpufreq.h>
14 #include <trace/events/power.h>
16 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
18 struct sugov_tunables
{
19 struct gov_attr_set attr_set
;
20 unsigned int rate_limit_us
;
24 struct cpufreq_policy
*policy
;
26 struct sugov_tunables
*tunables
;
27 struct list_head tunables_hook
;
29 raw_spinlock_t update_lock
; /* For shared policies */
30 u64 last_freq_update_time
;
31 s64 freq_update_delay_ns
;
32 unsigned int next_freq
;
33 unsigned int cached_raw_freq
;
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work
;
37 struct kthread_work work
;
38 struct mutex work_lock
;
39 struct kthread_worker worker
;
40 struct task_struct
*thread
;
41 bool work_in_progress
;
44 bool need_freq_update
;
48 struct update_util_data update_util
;
49 struct sugov_policy
*sg_policy
;
52 bool iowait_boost_pending
;
53 unsigned int iowait_boost
;
60 /* The field below is for single-CPU policies only: */
61 #ifdef CONFIG_NO_HZ_COMMON
62 unsigned long saved_idle_calls
;
66 static DEFINE_PER_CPU(struct sugov_cpu
, sugov_cpu
);
68 /************************ Governor internals ***********************/
70 static bool sugov_should_update_freq(struct sugov_policy
*sg_policy
, u64 time
)
75 * Since cpufreq_update_util() is called with rq->lock held for
76 * the @target_cpu, our per-CPU data is fully serialized.
78 * However, drivers cannot in general deal with cross-CPU
79 * requests, so while get_next_freq() will work, our
80 * sugov_update_commit() call may not for the fast switching platforms.
82 * Hence stop here for remote requests if they aren't supported
83 * by the hardware, as calculating the frequency is pointless if
84 * we cannot in fact act on it.
86 * This is needed on the slow switching platforms too to prevent CPUs
87 * going offline from leaving stale IRQ work items behind.
89 if (!cpufreq_this_cpu_can_update(sg_policy
->policy
))
92 if (unlikely(sg_policy
->limits_changed
)) {
93 sg_policy
->limits_changed
= false;
94 sg_policy
->need_freq_update
= true;
98 delta_ns
= time
- sg_policy
->last_freq_update_time
;
100 return delta_ns
>= sg_policy
->freq_update_delay_ns
;
103 static bool sugov_update_next_freq(struct sugov_policy
*sg_policy
, u64 time
,
104 unsigned int next_freq
)
106 if (sg_policy
->need_freq_update
)
107 sg_policy
->need_freq_update
= cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS
);
108 else if (sg_policy
->next_freq
== next_freq
)
111 sg_policy
->next_freq
= next_freq
;
112 sg_policy
->last_freq_update_time
= time
;
117 static void sugov_fast_switch(struct sugov_policy
*sg_policy
, u64 time
,
118 unsigned int next_freq
)
120 if (sugov_update_next_freq(sg_policy
, time
, next_freq
))
121 cpufreq_driver_fast_switch(sg_policy
->policy
, next_freq
);
124 static void sugov_deferred_update(struct sugov_policy
*sg_policy
, u64 time
,
125 unsigned int next_freq
)
127 if (!sugov_update_next_freq(sg_policy
, time
, next_freq
))
130 if (!sg_policy
->work_in_progress
) {
131 sg_policy
->work_in_progress
= true;
132 irq_work_queue(&sg_policy
->irq_work
);
137 * get_next_freq - Compute a new frequency for a given cpufreq policy.
138 * @sg_policy: schedutil policy object to compute the new frequency for.
139 * @util: Current CPU utilization.
140 * @max: CPU capacity.
142 * If the utilization is frequency-invariant, choose the new frequency to be
143 * proportional to it, that is
145 * next_freq = C * max_freq * util / max
147 * Otherwise, approximate the would-be frequency-invariant utilization by
148 * util_raw * (curr_freq / max_freq) which leads to
150 * next_freq = C * curr_freq * util_raw / max
152 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
154 * The lowest driver-supported frequency which is equal or greater than the raw
155 * next_freq (as calculated above) is returned, subject to policy min/max and
156 * cpufreq driver limitations.
158 static unsigned int get_next_freq(struct sugov_policy
*sg_policy
,
159 unsigned long util
, unsigned long max
)
161 struct cpufreq_policy
*policy
= sg_policy
->policy
;
162 unsigned int freq
= arch_scale_freq_invariant() ?
163 policy
->cpuinfo
.max_freq
: policy
->cur
;
165 freq
= map_util_freq(util
, freq
, max
);
167 if (freq
== sg_policy
->cached_raw_freq
&& !sg_policy
->need_freq_update
)
168 return sg_policy
->next_freq
;
170 sg_policy
->cached_raw_freq
= freq
;
171 return cpufreq_driver_resolve_freq(policy
, freq
);
175 * This function computes an effective utilization for the given CPU, to be
176 * used for frequency selection given the linear relation: f = u * f_max.
178 * The scheduler tracks the following metrics:
180 * cpu_util_{cfs,rt,dl,irq}()
183 * Where the cfs,rt and dl util numbers are tracked with the same metric and
184 * synchronized windows and are thus directly comparable.
186 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
187 * which excludes things like IRQ and steal-time. These latter are then accrued
188 * in the irq utilization.
190 * The DL bandwidth number otoh is not a measured metric but a value computed
191 * based on the task model parameters and gives the minimal utilization
192 * required to meet deadlines.
194 unsigned long schedutil_cpu_util(int cpu
, unsigned long util_cfs
,
195 unsigned long max
, enum schedutil_type type
,
196 struct task_struct
*p
)
198 unsigned long dl_util
, util
, irq
;
199 struct rq
*rq
= cpu_rq(cpu
);
201 if (!uclamp_is_used() &&
202 type
== FREQUENCY_UTIL
&& rt_rq_is_runnable(&rq
->rt
)) {
207 * Early check to see if IRQ/steal time saturates the CPU, can be
208 * because of inaccuracies in how we track these -- see
209 * update_irq_load_avg().
211 irq
= cpu_util_irq(rq
);
212 if (unlikely(irq
>= max
))
216 * Because the time spend on RT/DL tasks is visible as 'lost' time to
217 * CFS tasks and we use the same metric to track the effective
218 * utilization (PELT windows are synchronized) we can directly add them
219 * to obtain the CPU's actual utilization.
221 * CFS and RT utilization can be boosted or capped, depending on
222 * utilization clamp constraints requested by currently RUNNABLE
224 * When there are no CFS RUNNABLE tasks, clamps are released and
225 * frequency will be gracefully reduced with the utilization decay.
227 util
= util_cfs
+ cpu_util_rt(rq
);
228 if (type
== FREQUENCY_UTIL
)
229 util
= uclamp_rq_util_with(rq
, util
, p
);
231 dl_util
= cpu_util_dl(rq
);
234 * For frequency selection we do not make cpu_util_dl() a permanent part
235 * of this sum because we want to use cpu_bw_dl() later on, but we need
236 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
237 * that we select f_max when there is no idle time.
239 * NOTE: numerical errors or stop class might cause us to not quite hit
240 * saturation when we should -- something for later.
242 if (util
+ dl_util
>= max
)
246 * OTOH, for energy computation we need the estimated running time, so
247 * include util_dl and ignore dl_bw.
249 if (type
== ENERGY_UTIL
)
253 * There is still idle time; further improve the number by using the
254 * irq metric. Because IRQ/steal time is hidden from the task clock we
255 * need to scale the task numbers:
258 * U' = irq + --------- * U
261 util
= scale_irq_capacity(util
, irq
, max
);
265 * Bandwidth required by DEADLINE must always be granted while, for
266 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
267 * to gracefully reduce the frequency when no tasks show up for longer
270 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
271 * bw_dl as requested freq. However, cpufreq is not yet ready for such
272 * an interface. So, we only do the latter for now.
274 if (type
== FREQUENCY_UTIL
)
275 util
+= cpu_bw_dl(rq
);
277 return min(max
, util
);
280 static void sugov_get_util(struct sugov_cpu
*sg_cpu
)
282 struct rq
*rq
= cpu_rq(sg_cpu
->cpu
);
283 unsigned long max
= arch_scale_cpu_capacity(sg_cpu
->cpu
);
286 sg_cpu
->bw_dl
= cpu_bw_dl(rq
);
287 sg_cpu
->util
= schedutil_cpu_util(sg_cpu
->cpu
, cpu_util_cfs(rq
), max
,
288 FREQUENCY_UTIL
, NULL
);
292 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
293 * @sg_cpu: the sugov data for the CPU to boost
294 * @time: the update time from the caller
295 * @set_iowait_boost: true if an IO boost has been requested
297 * The IO wait boost of a task is disabled after a tick since the last update
298 * of a CPU. If a new IO wait boost is requested after more then a tick, then
299 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
300 * efficiency by ignoring sporadic wakeups from IO.
302 static bool sugov_iowait_reset(struct sugov_cpu
*sg_cpu
, u64 time
,
303 bool set_iowait_boost
)
305 s64 delta_ns
= time
- sg_cpu
->last_update
;
307 /* Reset boost only if a tick has elapsed since last request */
308 if (delta_ns
<= TICK_NSEC
)
311 sg_cpu
->iowait_boost
= set_iowait_boost
? IOWAIT_BOOST_MIN
: 0;
312 sg_cpu
->iowait_boost_pending
= set_iowait_boost
;
318 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
319 * @sg_cpu: the sugov data for the CPU to boost
320 * @time: the update time from the caller
321 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
323 * Each time a task wakes up after an IO operation, the CPU utilization can be
324 * boosted to a certain utilization which doubles at each "frequent and
325 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
326 * of the maximum OPP.
328 * To keep doubling, an IO boost has to be requested at least once per tick,
329 * otherwise we restart from the utilization of the minimum OPP.
331 static void sugov_iowait_boost(struct sugov_cpu
*sg_cpu
, u64 time
,
334 bool set_iowait_boost
= flags
& SCHED_CPUFREQ_IOWAIT
;
336 /* Reset boost if the CPU appears to have been idle enough */
337 if (sg_cpu
->iowait_boost
&&
338 sugov_iowait_reset(sg_cpu
, time
, set_iowait_boost
))
341 /* Boost only tasks waking up after IO */
342 if (!set_iowait_boost
)
345 /* Ensure boost doubles only one time at each request */
346 if (sg_cpu
->iowait_boost_pending
)
348 sg_cpu
->iowait_boost_pending
= true;
350 /* Double the boost at each request */
351 if (sg_cpu
->iowait_boost
) {
352 sg_cpu
->iowait_boost
=
353 min_t(unsigned int, sg_cpu
->iowait_boost
<< 1, SCHED_CAPACITY_SCALE
);
357 /* First wakeup after IO: start with minimum boost */
358 sg_cpu
->iowait_boost
= IOWAIT_BOOST_MIN
;
362 * sugov_iowait_apply() - Apply the IO boost to a CPU.
363 * @sg_cpu: the sugov data for the cpu to boost
364 * @time: the update time from the caller
366 * A CPU running a task which woken up after an IO operation can have its
367 * utilization boosted to speed up the completion of those IO operations.
368 * The IO boost value is increased each time a task wakes up from IO, in
369 * sugov_iowait_apply(), and it's instead decreased by this function,
370 * each time an increase has not been requested (!iowait_boost_pending).
372 * A CPU which also appears to have been idle for at least one tick has also
373 * its IO boost utilization reset.
375 * This mechanism is designed to boost high frequently IO waiting tasks, while
376 * being more conservative on tasks which does sporadic IO operations.
378 static void sugov_iowait_apply(struct sugov_cpu
*sg_cpu
, u64 time
)
382 /* No boost currently required */
383 if (!sg_cpu
->iowait_boost
)
386 /* Reset boost if the CPU appears to have been idle enough */
387 if (sugov_iowait_reset(sg_cpu
, time
, false))
390 if (!sg_cpu
->iowait_boost_pending
) {
392 * No boost pending; reduce the boost value.
394 sg_cpu
->iowait_boost
>>= 1;
395 if (sg_cpu
->iowait_boost
< IOWAIT_BOOST_MIN
) {
396 sg_cpu
->iowait_boost
= 0;
401 sg_cpu
->iowait_boost_pending
= false;
404 * sg_cpu->util is already in capacity scale; convert iowait_boost
405 * into the same scale so we can compare.
407 boost
= (sg_cpu
->iowait_boost
* sg_cpu
->max
) >> SCHED_CAPACITY_SHIFT
;
408 if (sg_cpu
->util
< boost
)
409 sg_cpu
->util
= boost
;
412 #ifdef CONFIG_NO_HZ_COMMON
413 static bool sugov_cpu_is_busy(struct sugov_cpu
*sg_cpu
)
415 unsigned long idle_calls
= tick_nohz_get_idle_calls_cpu(sg_cpu
->cpu
);
416 bool ret
= idle_calls
== sg_cpu
->saved_idle_calls
;
418 sg_cpu
->saved_idle_calls
= idle_calls
;
422 static inline bool sugov_cpu_is_busy(struct sugov_cpu
*sg_cpu
) { return false; }
423 #endif /* CONFIG_NO_HZ_COMMON */
426 * Make sugov_should_update_freq() ignore the rate limit when DL
427 * has increased the utilization.
429 static inline void ignore_dl_rate_limit(struct sugov_cpu
*sg_cpu
, struct sugov_policy
*sg_policy
)
431 if (cpu_bw_dl(cpu_rq(sg_cpu
->cpu
)) > sg_cpu
->bw_dl
)
432 sg_policy
->limits_changed
= true;
435 static inline bool sugov_update_single_common(struct sugov_cpu
*sg_cpu
,
436 u64 time
, unsigned int flags
)
438 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
440 sugov_iowait_boost(sg_cpu
, time
, flags
);
441 sg_cpu
->last_update
= time
;
443 ignore_dl_rate_limit(sg_cpu
, sg_policy
);
445 if (!sugov_should_update_freq(sg_policy
, time
))
448 sugov_get_util(sg_cpu
);
449 sugov_iowait_apply(sg_cpu
, time
);
454 static void sugov_update_single_freq(struct update_util_data
*hook
, u64 time
,
457 struct sugov_cpu
*sg_cpu
= container_of(hook
, struct sugov_cpu
, update_util
);
458 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
459 unsigned int cached_freq
= sg_policy
->cached_raw_freq
;
462 if (!sugov_update_single_common(sg_cpu
, time
, flags
))
465 next_f
= get_next_freq(sg_policy
, sg_cpu
->util
, sg_cpu
->max
);
467 * Do not reduce the frequency if the CPU has not been idle
468 * recently, as the reduction is likely to be premature then.
470 if (sugov_cpu_is_busy(sg_cpu
) && next_f
< sg_policy
->next_freq
) {
471 next_f
= sg_policy
->next_freq
;
473 /* Restore cached freq as next_freq has changed */
474 sg_policy
->cached_raw_freq
= cached_freq
;
478 * This code runs under rq->lock for the target CPU, so it won't run
479 * concurrently on two different CPUs for the same target and it is not
480 * necessary to acquire the lock in the fast switch case.
482 if (sg_policy
->policy
->fast_switch_enabled
) {
483 sugov_fast_switch(sg_policy
, time
, next_f
);
485 raw_spin_lock(&sg_policy
->update_lock
);
486 sugov_deferred_update(sg_policy
, time
, next_f
);
487 raw_spin_unlock(&sg_policy
->update_lock
);
491 static void sugov_update_single_perf(struct update_util_data
*hook
, u64 time
,
494 struct sugov_cpu
*sg_cpu
= container_of(hook
, struct sugov_cpu
, update_util
);
495 unsigned long prev_util
= sg_cpu
->util
;
498 * Fall back to the "frequency" path if frequency invariance is not
499 * supported, because the direct mapping between the utilization and
500 * the performance levels depends on the frequency invariance.
502 if (!arch_scale_freq_invariant()) {
503 sugov_update_single_freq(hook
, time
, flags
);
507 if (!sugov_update_single_common(sg_cpu
, time
, flags
))
511 * Do not reduce the target performance level if the CPU has not been
512 * idle recently, as the reduction is likely to be premature then.
514 if (sugov_cpu_is_busy(sg_cpu
) && sg_cpu
->util
< prev_util
)
515 sg_cpu
->util
= prev_util
;
517 cpufreq_driver_adjust_perf(sg_cpu
->cpu
, map_util_perf(sg_cpu
->bw_dl
),
518 map_util_perf(sg_cpu
->util
), sg_cpu
->max
);
520 sg_cpu
->sg_policy
->last_freq_update_time
= time
;
523 static unsigned int sugov_next_freq_shared(struct sugov_cpu
*sg_cpu
, u64 time
)
525 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
526 struct cpufreq_policy
*policy
= sg_policy
->policy
;
527 unsigned long util
= 0, max
= 1;
530 for_each_cpu(j
, policy
->cpus
) {
531 struct sugov_cpu
*j_sg_cpu
= &per_cpu(sugov_cpu
, j
);
532 unsigned long j_util
, j_max
;
534 sugov_get_util(j_sg_cpu
);
535 sugov_iowait_apply(j_sg_cpu
, time
);
536 j_util
= j_sg_cpu
->util
;
537 j_max
= j_sg_cpu
->max
;
539 if (j_util
* max
> j_max
* util
) {
545 return get_next_freq(sg_policy
, util
, max
);
549 sugov_update_shared(struct update_util_data
*hook
, u64 time
, unsigned int flags
)
551 struct sugov_cpu
*sg_cpu
= container_of(hook
, struct sugov_cpu
, update_util
);
552 struct sugov_policy
*sg_policy
= sg_cpu
->sg_policy
;
555 raw_spin_lock(&sg_policy
->update_lock
);
557 sugov_iowait_boost(sg_cpu
, time
, flags
);
558 sg_cpu
->last_update
= time
;
560 ignore_dl_rate_limit(sg_cpu
, sg_policy
);
562 if (sugov_should_update_freq(sg_policy
, time
)) {
563 next_f
= sugov_next_freq_shared(sg_cpu
, time
);
565 if (sg_policy
->policy
->fast_switch_enabled
)
566 sugov_fast_switch(sg_policy
, time
, next_f
);
568 sugov_deferred_update(sg_policy
, time
, next_f
);
571 raw_spin_unlock(&sg_policy
->update_lock
);
574 static void sugov_work(struct kthread_work
*work
)
576 struct sugov_policy
*sg_policy
= container_of(work
, struct sugov_policy
, work
);
581 * Hold sg_policy->update_lock shortly to handle the case where:
582 * incase sg_policy->next_freq is read here, and then updated by
583 * sugov_deferred_update() just before work_in_progress is set to false
584 * here, we may miss queueing the new update.
586 * Note: If a work was queued after the update_lock is released,
587 * sugov_work() will just be called again by kthread_work code; and the
588 * request will be proceed before the sugov thread sleeps.
590 raw_spin_lock_irqsave(&sg_policy
->update_lock
, flags
);
591 freq
= sg_policy
->next_freq
;
592 sg_policy
->work_in_progress
= false;
593 raw_spin_unlock_irqrestore(&sg_policy
->update_lock
, flags
);
595 mutex_lock(&sg_policy
->work_lock
);
596 __cpufreq_driver_target(sg_policy
->policy
, freq
, CPUFREQ_RELATION_L
);
597 mutex_unlock(&sg_policy
->work_lock
);
600 static void sugov_irq_work(struct irq_work
*irq_work
)
602 struct sugov_policy
*sg_policy
;
604 sg_policy
= container_of(irq_work
, struct sugov_policy
, irq_work
);
606 kthread_queue_work(&sg_policy
->worker
, &sg_policy
->work
);
609 /************************** sysfs interface ************************/
611 static struct sugov_tunables
*global_tunables
;
612 static DEFINE_MUTEX(global_tunables_lock
);
614 static inline struct sugov_tunables
*to_sugov_tunables(struct gov_attr_set
*attr_set
)
616 return container_of(attr_set
, struct sugov_tunables
, attr_set
);
619 static ssize_t
rate_limit_us_show(struct gov_attr_set
*attr_set
, char *buf
)
621 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
623 return sprintf(buf
, "%u\n", tunables
->rate_limit_us
);
627 rate_limit_us_store(struct gov_attr_set
*attr_set
, const char *buf
, size_t count
)
629 struct sugov_tunables
*tunables
= to_sugov_tunables(attr_set
);
630 struct sugov_policy
*sg_policy
;
631 unsigned int rate_limit_us
;
633 if (kstrtouint(buf
, 10, &rate_limit_us
))
636 tunables
->rate_limit_us
= rate_limit_us
;
638 list_for_each_entry(sg_policy
, &attr_set
->policy_list
, tunables_hook
)
639 sg_policy
->freq_update_delay_ns
= rate_limit_us
* NSEC_PER_USEC
;
644 static struct governor_attr rate_limit_us
= __ATTR_RW(rate_limit_us
);
646 static struct attribute
*sugov_attrs
[] = {
650 ATTRIBUTE_GROUPS(sugov
);
652 static struct kobj_type sugov_tunables_ktype
= {
653 .default_groups
= sugov_groups
,
654 .sysfs_ops
= &governor_sysfs_ops
,
657 /********************** cpufreq governor interface *********************/
659 struct cpufreq_governor schedutil_gov
;
661 static struct sugov_policy
*sugov_policy_alloc(struct cpufreq_policy
*policy
)
663 struct sugov_policy
*sg_policy
;
665 sg_policy
= kzalloc(sizeof(*sg_policy
), GFP_KERNEL
);
669 sg_policy
->policy
= policy
;
670 raw_spin_lock_init(&sg_policy
->update_lock
);
674 static void sugov_policy_free(struct sugov_policy
*sg_policy
)
679 static int sugov_kthread_create(struct sugov_policy
*sg_policy
)
681 struct task_struct
*thread
;
682 struct sched_attr attr
= {
683 .size
= sizeof(struct sched_attr
),
684 .sched_policy
= SCHED_DEADLINE
,
685 .sched_flags
= SCHED_FLAG_SUGOV
,
689 * Fake (unused) bandwidth; workaround to "fix"
690 * priority inheritance.
692 .sched_runtime
= 1000000,
693 .sched_deadline
= 10000000,
694 .sched_period
= 10000000,
696 struct cpufreq_policy
*policy
= sg_policy
->policy
;
699 /* kthread only required for slow path */
700 if (policy
->fast_switch_enabled
)
703 kthread_init_work(&sg_policy
->work
, sugov_work
);
704 kthread_init_worker(&sg_policy
->worker
);
705 thread
= kthread_create(kthread_worker_fn
, &sg_policy
->worker
,
707 cpumask_first(policy
->related_cpus
));
708 if (IS_ERR(thread
)) {
709 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread
));
710 return PTR_ERR(thread
);
713 ret
= sched_setattr_nocheck(thread
, &attr
);
715 kthread_stop(thread
);
716 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__
);
720 sg_policy
->thread
= thread
;
721 kthread_bind_mask(thread
, policy
->related_cpus
);
722 init_irq_work(&sg_policy
->irq_work
, sugov_irq_work
);
723 mutex_init(&sg_policy
->work_lock
);
725 wake_up_process(thread
);
730 static void sugov_kthread_stop(struct sugov_policy
*sg_policy
)
732 /* kthread only required for slow path */
733 if (sg_policy
->policy
->fast_switch_enabled
)
736 kthread_flush_worker(&sg_policy
->worker
);
737 kthread_stop(sg_policy
->thread
);
738 mutex_destroy(&sg_policy
->work_lock
);
741 static struct sugov_tunables
*sugov_tunables_alloc(struct sugov_policy
*sg_policy
)
743 struct sugov_tunables
*tunables
;
745 tunables
= kzalloc(sizeof(*tunables
), GFP_KERNEL
);
747 gov_attr_set_init(&tunables
->attr_set
, &sg_policy
->tunables_hook
);
748 if (!have_governor_per_policy())
749 global_tunables
= tunables
;
754 static void sugov_tunables_free(struct sugov_tunables
*tunables
)
756 if (!have_governor_per_policy())
757 global_tunables
= NULL
;
762 static int sugov_init(struct cpufreq_policy
*policy
)
764 struct sugov_policy
*sg_policy
;
765 struct sugov_tunables
*tunables
;
768 /* State should be equivalent to EXIT */
769 if (policy
->governor_data
)
772 cpufreq_enable_fast_switch(policy
);
774 sg_policy
= sugov_policy_alloc(policy
);
777 goto disable_fast_switch
;
780 ret
= sugov_kthread_create(sg_policy
);
784 mutex_lock(&global_tunables_lock
);
786 if (global_tunables
) {
787 if (WARN_ON(have_governor_per_policy())) {
791 policy
->governor_data
= sg_policy
;
792 sg_policy
->tunables
= global_tunables
;
794 gov_attr_set_get(&global_tunables
->attr_set
, &sg_policy
->tunables_hook
);
798 tunables
= sugov_tunables_alloc(sg_policy
);
804 tunables
->rate_limit_us
= cpufreq_policy_transition_delay_us(policy
);
806 policy
->governor_data
= sg_policy
;
807 sg_policy
->tunables
= tunables
;
809 ret
= kobject_init_and_add(&tunables
->attr_set
.kobj
, &sugov_tunables_ktype
,
810 get_governor_parent_kobj(policy
), "%s",
816 mutex_unlock(&global_tunables_lock
);
820 kobject_put(&tunables
->attr_set
.kobj
);
821 policy
->governor_data
= NULL
;
822 sugov_tunables_free(tunables
);
825 sugov_kthread_stop(sg_policy
);
826 mutex_unlock(&global_tunables_lock
);
829 sugov_policy_free(sg_policy
);
832 cpufreq_disable_fast_switch(policy
);
834 pr_err("initialization failed (error %d)\n", ret
);
838 static void sugov_exit(struct cpufreq_policy
*policy
)
840 struct sugov_policy
*sg_policy
= policy
->governor_data
;
841 struct sugov_tunables
*tunables
= sg_policy
->tunables
;
844 mutex_lock(&global_tunables_lock
);
846 count
= gov_attr_set_put(&tunables
->attr_set
, &sg_policy
->tunables_hook
);
847 policy
->governor_data
= NULL
;
849 sugov_tunables_free(tunables
);
851 mutex_unlock(&global_tunables_lock
);
853 sugov_kthread_stop(sg_policy
);
854 sugov_policy_free(sg_policy
);
855 cpufreq_disable_fast_switch(policy
);
858 static int sugov_start(struct cpufreq_policy
*policy
)
860 struct sugov_policy
*sg_policy
= policy
->governor_data
;
861 void (*uu
)(struct update_util_data
*data
, u64 time
, unsigned int flags
);
864 sg_policy
->freq_update_delay_ns
= sg_policy
->tunables
->rate_limit_us
* NSEC_PER_USEC
;
865 sg_policy
->last_freq_update_time
= 0;
866 sg_policy
->next_freq
= 0;
867 sg_policy
->work_in_progress
= false;
868 sg_policy
->limits_changed
= false;
869 sg_policy
->cached_raw_freq
= 0;
871 sg_policy
->need_freq_update
= cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS
);
873 for_each_cpu(cpu
, policy
->cpus
) {
874 struct sugov_cpu
*sg_cpu
= &per_cpu(sugov_cpu
, cpu
);
876 memset(sg_cpu
, 0, sizeof(*sg_cpu
));
878 sg_cpu
->sg_policy
= sg_policy
;
881 if (policy_is_shared(policy
))
882 uu
= sugov_update_shared
;
883 else if (policy
->fast_switch_enabled
&& cpufreq_driver_has_adjust_perf())
884 uu
= sugov_update_single_perf
;
886 uu
= sugov_update_single_freq
;
888 for_each_cpu(cpu
, policy
->cpus
) {
889 struct sugov_cpu
*sg_cpu
= &per_cpu(sugov_cpu
, cpu
);
891 cpufreq_add_update_util_hook(cpu
, &sg_cpu
->update_util
, uu
);
896 static void sugov_stop(struct cpufreq_policy
*policy
)
898 struct sugov_policy
*sg_policy
= policy
->governor_data
;
901 for_each_cpu(cpu
, policy
->cpus
)
902 cpufreq_remove_update_util_hook(cpu
);
906 if (!policy
->fast_switch_enabled
) {
907 irq_work_sync(&sg_policy
->irq_work
);
908 kthread_cancel_work_sync(&sg_policy
->work
);
912 static void sugov_limits(struct cpufreq_policy
*policy
)
914 struct sugov_policy
*sg_policy
= policy
->governor_data
;
916 if (!policy
->fast_switch_enabled
) {
917 mutex_lock(&sg_policy
->work_lock
);
918 cpufreq_policy_apply_limits(policy
);
919 mutex_unlock(&sg_policy
->work_lock
);
922 sg_policy
->limits_changed
= true;
925 struct cpufreq_governor schedutil_gov
= {
927 .owner
= THIS_MODULE
,
928 .flags
= CPUFREQ_GOV_DYNAMIC_SWITCHING
,
931 .start
= sugov_start
,
933 .limits
= sugov_limits
,
936 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
937 struct cpufreq_governor
*cpufreq_default_governor(void)
939 return &schedutil_gov
;
943 cpufreq_governor_init(schedutil_gov
);
945 #ifdef CONFIG_ENERGY_MODEL
946 static void rebuild_sd_workfn(struct work_struct
*work
)
948 rebuild_sched_domains_energy();
950 static DECLARE_WORK(rebuild_sd_work
, rebuild_sd_workfn
);
953 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
954 * on governor changes to make sure the scheduler knows about it.
956 void sched_cpufreq_governor_change(struct cpufreq_policy
*policy
,
957 struct cpufreq_governor
*old_gov
)
959 if (old_gov
== &schedutil_gov
|| policy
->governor
== &schedutil_gov
) {
961 * When called from the cpufreq_register_driver() path, the
962 * cpu_hotplug_lock is already held, so use a work item to
963 * avoid nested locking in rebuild_sched_domains().
965 schedule_work(&rebuild_sd_work
);