2 * ARM big.LITTLE Platforms CPUFreq support
4 * Copyright (C) 2013 ARM Ltd.
5 * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
7 * Copyright (C) 2013 Linaro.
8 * Viresh Kumar <viresh.kumar@linaro.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/clk.h>
23 #include <linux/cpu.h>
24 #include <linux/cpufreq.h>
25 #include <linux/cpumask.h>
26 #include <linux/cpu_cooling.h>
27 #include <linux/export.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/of_platform.h>
31 #include <linux/pm_opp.h>
32 #include <linux/slab.h>
33 #include <linux/topology.h>
34 #include <linux/types.h>
36 #include "arm_big_little.h"
38 /* Currently we support only two clusters */
41 #define MAX_CLUSTERS 2
43 #ifdef CONFIG_BL_SWITCHER
44 #include <asm/bL_switcher.h>
45 static bool bL_switching_enabled
;
46 #define is_bL_switching_enabled() bL_switching_enabled
47 #define set_switching_enabled(x) (bL_switching_enabled = (x))
49 #define is_bL_switching_enabled() false
50 #define set_switching_enabled(x) do { } while (0)
51 #define bL_switch_request(...) do { } while (0)
52 #define bL_switcher_put_enabled() do { } while (0)
53 #define bL_switcher_get_enabled() do { } while (0)
56 #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
57 #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
59 static struct thermal_cooling_device
*cdev
[MAX_CLUSTERS
];
60 static struct cpufreq_arm_bL_ops
*arm_bL_ops
;
61 static struct clk
*clk
[MAX_CLUSTERS
];
62 static struct cpufreq_frequency_table
*freq_table
[MAX_CLUSTERS
+ 1];
63 static atomic_t cluster_usage
[MAX_CLUSTERS
+ 1];
65 static unsigned int clk_big_min
; /* (Big) clock frequencies */
66 static unsigned int clk_little_max
; /* Maximum clock frequency (Little) */
68 static DEFINE_PER_CPU(unsigned int, physical_cluster
);
69 static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq
);
71 static struct mutex cluster_lock
[MAX_CLUSTERS
];
73 static inline int raw_cpu_to_cluster(int cpu
)
75 return topology_physical_package_id(cpu
);
78 static inline int cpu_to_cluster(int cpu
)
80 return is_bL_switching_enabled() ?
81 MAX_CLUSTERS
: raw_cpu_to_cluster(cpu
);
84 static unsigned int find_cluster_maxfreq(int cluster
)
87 u32 max_freq
= 0, cpu_freq
;
89 for_each_online_cpu(j
) {
90 cpu_freq
= per_cpu(cpu_last_req_freq
, j
);
92 if ((cluster
== per_cpu(physical_cluster
, j
)) &&
93 (max_freq
< cpu_freq
))
97 pr_debug("%s: cluster: %d, max freq: %d\n", __func__
, cluster
,
103 static unsigned int clk_get_cpu_rate(unsigned int cpu
)
105 u32 cur_cluster
= per_cpu(physical_cluster
, cpu
);
106 u32 rate
= clk_get_rate(clk
[cur_cluster
]) / 1000;
108 /* For switcher we use virtual A7 clock rates */
109 if (is_bL_switching_enabled())
110 rate
= VIRT_FREQ(cur_cluster
, rate
);
112 pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__
, cpu
,
118 static unsigned int bL_cpufreq_get_rate(unsigned int cpu
)
120 if (is_bL_switching_enabled()) {
121 pr_debug("%s: freq: %d\n", __func__
, per_cpu(cpu_last_req_freq
,
124 return per_cpu(cpu_last_req_freq
, cpu
);
126 return clk_get_cpu_rate(cpu
);
131 bL_cpufreq_set_rate(u32 cpu
, u32 old_cluster
, u32 new_cluster
, u32 rate
)
133 u32 new_rate
, prev_rate
;
135 bool bLs
= is_bL_switching_enabled();
137 mutex_lock(&cluster_lock
[new_cluster
]);
140 prev_rate
= per_cpu(cpu_last_req_freq
, cpu
);
141 per_cpu(cpu_last_req_freq
, cpu
) = rate
;
142 per_cpu(physical_cluster
, cpu
) = new_cluster
;
144 new_rate
= find_cluster_maxfreq(new_cluster
);
145 new_rate
= ACTUAL_FREQ(new_cluster
, new_rate
);
150 pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
151 __func__
, cpu
, old_cluster
, new_cluster
, new_rate
);
153 ret
= clk_set_rate(clk
[new_cluster
], new_rate
* 1000);
156 * FIXME: clk_set_rate hasn't returned an error here however it
157 * may be that clk_change_rate failed due to hardware or
158 * firmware issues and wasn't able to report that due to the
159 * current design of the clk core layer. To work around this
160 * problem we will read back the clock rate and check it is
161 * correct. This needs to be removed once clk core is fixed.
163 if (clk_get_rate(clk
[new_cluster
]) != new_rate
* 1000)
168 pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret
,
171 per_cpu(cpu_last_req_freq
, cpu
) = prev_rate
;
172 per_cpu(physical_cluster
, cpu
) = old_cluster
;
175 mutex_unlock(&cluster_lock
[new_cluster
]);
180 mutex_unlock(&cluster_lock
[new_cluster
]);
182 /* Recalc freq for old cluster when switching clusters */
183 if (old_cluster
!= new_cluster
) {
184 pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
185 __func__
, cpu
, old_cluster
, new_cluster
);
188 bL_switch_request(cpu
, new_cluster
);
190 mutex_lock(&cluster_lock
[old_cluster
]);
192 /* Set freq of old cluster if there are cpus left on it */
193 new_rate
= find_cluster_maxfreq(old_cluster
);
194 new_rate
= ACTUAL_FREQ(old_cluster
, new_rate
);
197 pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
198 __func__
, old_cluster
, new_rate
);
200 if (clk_set_rate(clk
[old_cluster
], new_rate
* 1000))
201 pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
202 __func__
, ret
, old_cluster
);
204 mutex_unlock(&cluster_lock
[old_cluster
]);
210 /* Set clock frequency */
211 static int bL_cpufreq_set_target(struct cpufreq_policy
*policy
,
214 u32 cpu
= policy
->cpu
, cur_cluster
, new_cluster
, actual_cluster
;
215 unsigned int freqs_new
;
217 cur_cluster
= cpu_to_cluster(cpu
);
218 new_cluster
= actual_cluster
= per_cpu(physical_cluster
, cpu
);
220 freqs_new
= freq_table
[cur_cluster
][index
].frequency
;
222 if (is_bL_switching_enabled()) {
223 if ((actual_cluster
== A15_CLUSTER
) &&
224 (freqs_new
< clk_big_min
)) {
225 new_cluster
= A7_CLUSTER
;
226 } else if ((actual_cluster
== A7_CLUSTER
) &&
227 (freqs_new
> clk_little_max
)) {
228 new_cluster
= A15_CLUSTER
;
232 return bL_cpufreq_set_rate(cpu
, actual_cluster
, new_cluster
, freqs_new
);
235 static inline u32
get_table_count(struct cpufreq_frequency_table
*table
)
239 for (count
= 0; table
[count
].frequency
!= CPUFREQ_TABLE_END
; count
++)
245 /* get the minimum frequency in the cpufreq_frequency_table */
246 static inline u32
get_table_min(struct cpufreq_frequency_table
*table
)
248 struct cpufreq_frequency_table
*pos
;
249 uint32_t min_freq
= ~0;
250 cpufreq_for_each_entry(pos
, table
)
251 if (pos
->frequency
< min_freq
)
252 min_freq
= pos
->frequency
;
256 /* get the maximum frequency in the cpufreq_frequency_table */
257 static inline u32
get_table_max(struct cpufreq_frequency_table
*table
)
259 struct cpufreq_frequency_table
*pos
;
260 uint32_t max_freq
= 0;
261 cpufreq_for_each_entry(pos
, table
)
262 if (pos
->frequency
> max_freq
)
263 max_freq
= pos
->frequency
;
267 static int merge_cluster_tables(void)
269 int i
, j
, k
= 0, count
= 1;
270 struct cpufreq_frequency_table
*table
;
272 for (i
= 0; i
< MAX_CLUSTERS
; i
++)
273 count
+= get_table_count(freq_table
[i
]);
275 table
= kzalloc(sizeof(*table
) * count
, GFP_KERNEL
);
279 freq_table
[MAX_CLUSTERS
] = table
;
281 /* Add in reverse order to get freqs in increasing order */
282 for (i
= MAX_CLUSTERS
- 1; i
>= 0; i
--) {
283 for (j
= 0; freq_table
[i
][j
].frequency
!= CPUFREQ_TABLE_END
;
285 table
[k
].frequency
= VIRT_FREQ(i
,
286 freq_table
[i
][j
].frequency
);
287 pr_debug("%s: index: %d, freq: %d\n", __func__
, k
,
293 table
[k
].driver_data
= k
;
294 table
[k
].frequency
= CPUFREQ_TABLE_END
;
296 pr_debug("%s: End, table: %p, count: %d\n", __func__
, table
, k
);
301 static void _put_cluster_clk_and_freq_table(struct device
*cpu_dev
,
302 const struct cpumask
*cpumask
)
304 u32 cluster
= raw_cpu_to_cluster(cpu_dev
->id
);
306 if (!freq_table
[cluster
])
309 clk_put(clk
[cluster
]);
310 dev_pm_opp_free_cpufreq_table(cpu_dev
, &freq_table
[cluster
]);
311 if (arm_bL_ops
->free_opp_table
)
312 arm_bL_ops
->free_opp_table(cpumask
);
313 dev_dbg(cpu_dev
, "%s: cluster: %d\n", __func__
, cluster
);
316 static void put_cluster_clk_and_freq_table(struct device
*cpu_dev
,
317 const struct cpumask
*cpumask
)
319 u32 cluster
= cpu_to_cluster(cpu_dev
->id
);
322 if (atomic_dec_return(&cluster_usage
[cluster
]))
325 if (cluster
< MAX_CLUSTERS
)
326 return _put_cluster_clk_and_freq_table(cpu_dev
, cpumask
);
328 for_each_present_cpu(i
) {
329 struct device
*cdev
= get_cpu_device(i
);
331 pr_err("%s: failed to get cpu%d device\n", __func__
, i
);
335 _put_cluster_clk_and_freq_table(cdev
, cpumask
);
338 /* free virtual table */
339 kfree(freq_table
[cluster
]);
342 static int _get_cluster_clk_and_freq_table(struct device
*cpu_dev
,
343 const struct cpumask
*cpumask
)
345 u32 cluster
= raw_cpu_to_cluster(cpu_dev
->id
);
348 if (freq_table
[cluster
])
351 ret
= arm_bL_ops
->init_opp_table(cpumask
);
353 dev_err(cpu_dev
, "%s: init_opp_table failed, cpu: %d, err: %d\n",
354 __func__
, cpu_dev
->id
, ret
);
358 ret
= dev_pm_opp_init_cpufreq_table(cpu_dev
, &freq_table
[cluster
]);
360 dev_err(cpu_dev
, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
361 __func__
, cpu_dev
->id
, ret
);
365 clk
[cluster
] = clk_get(cpu_dev
, NULL
);
366 if (!IS_ERR(clk
[cluster
])) {
367 dev_dbg(cpu_dev
, "%s: clk: %p & freq table: %p, cluster: %d\n",
368 __func__
, clk
[cluster
], freq_table
[cluster
],
373 dev_err(cpu_dev
, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
374 __func__
, cpu_dev
->id
, cluster
);
375 ret
= PTR_ERR(clk
[cluster
]);
376 dev_pm_opp_free_cpufreq_table(cpu_dev
, &freq_table
[cluster
]);
379 if (arm_bL_ops
->free_opp_table
)
380 arm_bL_ops
->free_opp_table(cpumask
);
382 dev_err(cpu_dev
, "%s: Failed to get data for cluster: %d\n", __func__
,
387 static int get_cluster_clk_and_freq_table(struct device
*cpu_dev
,
388 const struct cpumask
*cpumask
)
390 u32 cluster
= cpu_to_cluster(cpu_dev
->id
);
393 if (atomic_inc_return(&cluster_usage
[cluster
]) != 1)
396 if (cluster
< MAX_CLUSTERS
) {
397 ret
= _get_cluster_clk_and_freq_table(cpu_dev
, cpumask
);
399 atomic_dec(&cluster_usage
[cluster
]);
404 * Get data for all clusters and fill virtual cluster with a merge of
407 for_each_present_cpu(i
) {
408 struct device
*cdev
= get_cpu_device(i
);
410 pr_err("%s: failed to get cpu%d device\n", __func__
, i
);
414 ret
= _get_cluster_clk_and_freq_table(cdev
, cpumask
);
419 ret
= merge_cluster_tables();
423 /* Assuming 2 cluster, set clk_big_min and clk_little_max */
424 clk_big_min
= get_table_min(freq_table
[0]);
425 clk_little_max
= VIRT_FREQ(1, get_table_max(freq_table
[1]));
427 pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
428 __func__
, cluster
, clk_big_min
, clk_little_max
);
433 for_each_present_cpu(i
) {
434 struct device
*cdev
= get_cpu_device(i
);
436 pr_err("%s: failed to get cpu%d device\n", __func__
, i
);
440 _put_cluster_clk_and_freq_table(cdev
, cpumask
);
443 atomic_dec(&cluster_usage
[cluster
]);
448 /* Per-CPU initialization */
449 static int bL_cpufreq_init(struct cpufreq_policy
*policy
)
451 u32 cur_cluster
= cpu_to_cluster(policy
->cpu
);
452 struct device
*cpu_dev
;
455 cpu_dev
= get_cpu_device(policy
->cpu
);
457 pr_err("%s: failed to get cpu%d device\n", __func__
,
462 if (cur_cluster
< MAX_CLUSTERS
) {
465 cpumask_copy(policy
->cpus
, topology_core_cpumask(policy
->cpu
));
467 for_each_cpu(cpu
, policy
->cpus
)
468 per_cpu(physical_cluster
, cpu
) = cur_cluster
;
470 /* Assumption: during init, we are always running on A15 */
471 per_cpu(physical_cluster
, policy
->cpu
) = A15_CLUSTER
;
474 ret
= get_cluster_clk_and_freq_table(cpu_dev
, policy
->cpus
);
478 ret
= cpufreq_table_validate_and_show(policy
, freq_table
[cur_cluster
]);
480 dev_err(cpu_dev
, "CPU %d, cluster: %d invalid freq table\n",
481 policy
->cpu
, cur_cluster
);
482 put_cluster_clk_and_freq_table(cpu_dev
, policy
->cpus
);
486 if (arm_bL_ops
->get_transition_latency
)
487 policy
->cpuinfo
.transition_latency
=
488 arm_bL_ops
->get_transition_latency(cpu_dev
);
490 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
492 if (is_bL_switching_enabled())
493 per_cpu(cpu_last_req_freq
, policy
->cpu
) = clk_get_cpu_rate(policy
->cpu
);
495 dev_info(cpu_dev
, "%s: CPU %d initialized\n", __func__
, policy
->cpu
);
499 static int bL_cpufreq_exit(struct cpufreq_policy
*policy
)
501 struct device
*cpu_dev
;
502 int cur_cluster
= cpu_to_cluster(policy
->cpu
);
504 if (cur_cluster
< MAX_CLUSTERS
) {
505 cpufreq_cooling_unregister(cdev
[cur_cluster
]);
506 cdev
[cur_cluster
] = NULL
;
509 cpu_dev
= get_cpu_device(policy
->cpu
);
511 pr_err("%s: failed to get cpu%d device\n", __func__
,
516 put_cluster_clk_and_freq_table(cpu_dev
, policy
->related_cpus
);
517 dev_dbg(cpu_dev
, "%s: Exited, cpu: %d\n", __func__
, policy
->cpu
);
522 static void bL_cpufreq_ready(struct cpufreq_policy
*policy
)
524 struct device
*cpu_dev
= get_cpu_device(policy
->cpu
);
525 int cur_cluster
= cpu_to_cluster(policy
->cpu
);
526 struct device_node
*np
;
528 /* Do not register a cpu_cooling device if we are in IKS mode */
529 if (cur_cluster
>= MAX_CLUSTERS
)
532 np
= of_node_get(cpu_dev
->of_node
);
536 if (of_find_property(np
, "#cooling-cells", NULL
)) {
537 u32 power_coefficient
= 0;
539 of_property_read_u32(np
, "dynamic-power-coefficient",
542 cdev
[cur_cluster
] = of_cpufreq_power_cooling_register(np
,
543 policy
->related_cpus
, power_coefficient
, NULL
);
544 if (IS_ERR(cdev
[cur_cluster
])) {
546 "running cpufreq without cooling device: %ld\n",
547 PTR_ERR(cdev
[cur_cluster
]));
548 cdev
[cur_cluster
] = NULL
;
554 static struct cpufreq_driver bL_cpufreq_driver
= {
555 .name
= "arm-big-little",
556 .flags
= CPUFREQ_STICKY
|
557 CPUFREQ_HAVE_GOVERNOR_PER_POLICY
|
558 CPUFREQ_NEED_INITIAL_FREQ_CHECK
,
559 .verify
= cpufreq_generic_frequency_table_verify
,
560 .target_index
= bL_cpufreq_set_target
,
561 .get
= bL_cpufreq_get_rate
,
562 .init
= bL_cpufreq_init
,
563 .exit
= bL_cpufreq_exit
,
564 .ready
= bL_cpufreq_ready
,
565 .attr
= cpufreq_generic_attr
,
568 #ifdef CONFIG_BL_SWITCHER
569 static int bL_cpufreq_switcher_notifier(struct notifier_block
*nfb
,
570 unsigned long action
, void *_arg
)
572 pr_debug("%s: action: %ld\n", __func__
, action
);
575 case BL_NOTIFY_PRE_ENABLE
:
576 case BL_NOTIFY_PRE_DISABLE
:
577 cpufreq_unregister_driver(&bL_cpufreq_driver
);
580 case BL_NOTIFY_POST_ENABLE
:
581 set_switching_enabled(true);
582 cpufreq_register_driver(&bL_cpufreq_driver
);
585 case BL_NOTIFY_POST_DISABLE
:
586 set_switching_enabled(false);
587 cpufreq_register_driver(&bL_cpufreq_driver
);
597 static struct notifier_block bL_switcher_notifier
= {
598 .notifier_call
= bL_cpufreq_switcher_notifier
,
601 static int __bLs_register_notifier(void)
603 return bL_switcher_register_notifier(&bL_switcher_notifier
);
606 static int __bLs_unregister_notifier(void)
608 return bL_switcher_unregister_notifier(&bL_switcher_notifier
);
611 static int __bLs_register_notifier(void) { return 0; }
612 static int __bLs_unregister_notifier(void) { return 0; }
615 int bL_cpufreq_register(struct cpufreq_arm_bL_ops
*ops
)
620 pr_debug("%s: Already registered: %s, exiting\n", __func__
,
625 if (!ops
|| !strlen(ops
->name
) || !ops
->init_opp_table
) {
626 pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__
);
632 set_switching_enabled(bL_switcher_get_enabled());
634 for (i
= 0; i
< MAX_CLUSTERS
; i
++)
635 mutex_init(&cluster_lock
[i
]);
637 ret
= cpufreq_register_driver(&bL_cpufreq_driver
);
639 pr_info("%s: Failed registering platform driver: %s, err: %d\n",
640 __func__
, ops
->name
, ret
);
643 ret
= __bLs_register_notifier();
645 cpufreq_unregister_driver(&bL_cpufreq_driver
);
648 pr_info("%s: Registered platform driver: %s\n",
649 __func__
, ops
->name
);
653 bL_switcher_put_enabled();
656 EXPORT_SYMBOL_GPL(bL_cpufreq_register
);
658 void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops
*ops
)
660 if (arm_bL_ops
!= ops
) {
661 pr_err("%s: Registered with: %s, can't unregister, exiting\n",
662 __func__
, arm_bL_ops
->name
);
666 bL_switcher_get_enabled();
667 __bLs_unregister_notifier();
668 cpufreq_unregister_driver(&bL_cpufreq_driver
);
669 bL_switcher_put_enabled();
670 pr_info("%s: Un-registered platform driver: %s\n", __func__
,
674 EXPORT_SYMBOL_GPL(bL_cpufreq_unregister
);
676 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
677 MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
678 MODULE_LICENSE("GPL v2");