2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
7 * EXYNOS5440 - CPU frequency scaling support
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/pm_opp.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 /* Register definitions */
28 #define XMU_DVFS_CTRL 0x0060
29 #define XMU_PMU_P0_7 0x0064
30 #define XMU_C0_3_PSTATE 0x0090
31 #define XMU_P_LIMIT 0x00a0
32 #define XMU_P_STATUS 0x00a4
33 #define XMU_PMUEVTEN 0x00d0
34 #define XMU_PMUIRQEN 0x00d4
35 #define XMU_PMUIRQ 0x00d8
37 /* PMU mask and shift definations */
38 #define P_VALUE_MASK 0x7
40 #define XMU_DVFS_CTRL_EN_SHIFT 0
42 #define P0_7_CPUCLKDEV_SHIFT 21
43 #define P0_7_CPUCLKDEV_MASK 0x7
44 #define P0_7_ATBCLKDEV_SHIFT 18
45 #define P0_7_ATBCLKDEV_MASK 0x7
46 #define P0_7_CSCLKDEV_SHIFT 15
47 #define P0_7_CSCLKDEV_MASK 0x7
48 #define P0_7_CPUEMA_SHIFT 28
49 #define P0_7_CPUEMA_MASK 0xf
50 #define P0_7_L2EMA_SHIFT 24
51 #define P0_7_L2EMA_MASK 0xf
52 #define P0_7_VDD_SHIFT 8
53 #define P0_7_VDD_MASK 0x7f
54 #define P0_7_FREQ_SHIFT 0
55 #define P0_7_FREQ_MASK 0xff
57 #define C0_3_PSTATE_VALID_SHIFT 8
58 #define C0_3_PSTATE_CURR_SHIFT 4
59 #define C0_3_PSTATE_NEW_SHIFT 0
61 #define PSTATE_CHANGED_EVTEN_SHIFT 0
63 #define PSTATE_CHANGED_IRQEN_SHIFT 0
65 #define PSTATE_CHANGED_SHIFT 0
67 /* some constant values for clock divider calculation */
68 #define CPU_DIV_FREQ_MAX 500
69 #define CPU_DBG_FREQ_MAX 375
70 #define CPU_ATB_FREQ_MAX 500
72 #define PMIC_LOW_VOLT 0x30
73 #define PMIC_HIGH_VOLT 0x28
75 #define CPUEMA_HIGH 0x2
76 #define CPUEMA_MID 0x4
77 #define CPUEMA_LOW 0x7
79 #define L2EMA_HIGH 0x1
84 /* frequency unit is 20MHZ */
86 #define MAX_VOLTAGE 1550000 /* In microvolt */
87 #define VOLTAGE_STEP 12500 /* In microvolt */
89 #define CPUFREQ_NAME "exynos5440_dvfs"
90 #define DEF_TRANS_LATENCY 100000
92 enum cpufreq_level_index
{
96 #define CPUFREQ_LEVEL_END (L7 + 1)
98 struct exynos_dvfs_data
{
100 struct resource
*mem
;
103 unsigned int latency
;
104 struct cpufreq_frequency_table
*freq_table
;
105 unsigned int freq_count
;
108 struct work_struct irq_work
;
111 static struct exynos_dvfs_data
*dvfs_info
;
112 static DEFINE_MUTEX(cpufreq_lock
);
113 static struct cpufreq_freqs freqs
;
115 static int init_div_table(void)
117 struct cpufreq_frequency_table
*pos
, *freq_tbl
= dvfs_info
->freq_table
;
118 unsigned int tmp
, clk_div
, ema_div
, freq
, volt_id
, idx
;
119 struct dev_pm_opp
*opp
;
121 cpufreq_for_each_entry_idx(pos
, freq_tbl
, idx
) {
122 opp
= dev_pm_opp_find_freq_exact(dvfs_info
->dev
,
123 pos
->frequency
* 1000, true);
125 dev_err(dvfs_info
->dev
,
126 "failed to find valid OPP for %u KHZ\n",
131 freq
= pos
->frequency
/ 1000; /* In MHZ */
132 clk_div
= ((freq
/ CPU_DIV_FREQ_MAX
) & P0_7_CPUCLKDEV_MASK
)
133 << P0_7_CPUCLKDEV_SHIFT
;
134 clk_div
|= ((freq
/ CPU_ATB_FREQ_MAX
) & P0_7_ATBCLKDEV_MASK
)
135 << P0_7_ATBCLKDEV_SHIFT
;
136 clk_div
|= ((freq
/ CPU_DBG_FREQ_MAX
) & P0_7_CSCLKDEV_MASK
)
137 << P0_7_CSCLKDEV_SHIFT
;
140 volt_id
= dev_pm_opp_get_voltage(opp
);
142 volt_id
= (MAX_VOLTAGE
- volt_id
) / VOLTAGE_STEP
;
143 if (volt_id
< PMIC_HIGH_VOLT
) {
144 ema_div
= (CPUEMA_HIGH
<< P0_7_CPUEMA_SHIFT
) |
145 (L2EMA_HIGH
<< P0_7_L2EMA_SHIFT
);
146 } else if (volt_id
> PMIC_LOW_VOLT
) {
147 ema_div
= (CPUEMA_LOW
<< P0_7_CPUEMA_SHIFT
) |
148 (L2EMA_LOW
<< P0_7_L2EMA_SHIFT
);
150 ema_div
= (CPUEMA_MID
<< P0_7_CPUEMA_SHIFT
) |
151 (L2EMA_MID
<< P0_7_L2EMA_SHIFT
);
154 tmp
= (clk_div
| ema_div
| (volt_id
<< P0_7_VDD_SHIFT
)
155 | ((freq
/ FREQ_UNIT
) << P0_7_FREQ_SHIFT
));
157 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMU_P0_7
+ 4 * idx
);
164 static void exynos_enable_dvfs(unsigned int cur_frequency
)
166 unsigned int tmp
, cpu
;
167 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
168 struct cpufreq_frequency_table
*pos
;
170 __raw_writel(0, dvfs_info
->base
+ XMU_DVFS_CTRL
);
172 /* Enable PSTATE Change Event */
173 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUEVTEN
);
174 tmp
|= (1 << PSTATE_CHANGED_EVTEN_SHIFT
);
175 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUEVTEN
);
177 /* Enable PSTATE Change IRQ */
178 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQEN
);
179 tmp
|= (1 << PSTATE_CHANGED_IRQEN_SHIFT
);
180 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQEN
);
182 /* Set initial performance index */
183 cpufreq_for_each_entry(pos
, freq_table
)
184 if (pos
->frequency
== cur_frequency
)
187 if (pos
->frequency
== CPUFREQ_TABLE_END
) {
188 dev_crit(dvfs_info
->dev
, "Boot up frequency not supported\n");
189 /* Assign the highest frequency */
191 cur_frequency
= pos
->frequency
;
194 dev_info(dvfs_info
->dev
, "Setting dvfs initial frequency = %uKHZ",
197 for (cpu
= 0; cpu
< CONFIG_NR_CPUS
; cpu
++) {
198 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
199 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
200 tmp
|= ((pos
- freq_table
) << C0_3_PSTATE_NEW_SHIFT
);
201 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
205 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT
,
206 dvfs_info
->base
+ XMU_DVFS_CTRL
);
209 static int exynos_target(struct cpufreq_policy
*policy
, unsigned int index
)
213 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
215 mutex_lock(&cpufreq_lock
);
217 freqs
.old
= policy
->cur
;
218 freqs
.new = freq_table
[index
].frequency
;
220 cpufreq_freq_transition_begin(policy
, &freqs
);
222 /* Set the target frequency in all C0_3_PSTATE register */
223 for_each_cpu(i
, policy
->cpus
) {
224 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
225 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
226 tmp
|= (index
<< C0_3_PSTATE_NEW_SHIFT
);
228 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
230 mutex_unlock(&cpufreq_lock
);
234 static void exynos_cpufreq_work(struct work_struct
*work
)
236 unsigned int cur_pstate
, index
;
237 struct cpufreq_policy
*policy
= cpufreq_cpu_get(0); /* boot CPU */
238 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
240 /* Ensure we can access cpufreq structures */
241 if (unlikely(dvfs_info
->dvfs_enabled
== false))
244 mutex_lock(&cpufreq_lock
);
245 freqs
.old
= policy
->cur
;
247 cur_pstate
= __raw_readl(dvfs_info
->base
+ XMU_P_STATUS
);
248 if (cur_pstate
>> C0_3_PSTATE_VALID_SHIFT
& 0x1)
249 index
= (cur_pstate
>> C0_3_PSTATE_CURR_SHIFT
) & P_VALUE_MASK
;
251 index
= (cur_pstate
>> C0_3_PSTATE_NEW_SHIFT
) & P_VALUE_MASK
;
253 if (likely(index
< dvfs_info
->freq_count
)) {
254 freqs
.new = freq_table
[index
].frequency
;
256 dev_crit(dvfs_info
->dev
, "New frequency out of range\n");
257 freqs
.new = freqs
.old
;
259 cpufreq_freq_transition_end(policy
, &freqs
, 0);
261 cpufreq_cpu_put(policy
);
262 mutex_unlock(&cpufreq_lock
);
264 enable_irq(dvfs_info
->irq
);
267 static irqreturn_t
exynos_cpufreq_irq(int irq
, void *id
)
271 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQ
);
272 if (tmp
>> PSTATE_CHANGED_SHIFT
& 0x1) {
273 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQ
);
274 disable_irq_nosync(irq
);
275 schedule_work(&dvfs_info
->irq_work
);
280 static void exynos_sort_descend_freq_table(void)
282 struct cpufreq_frequency_table
*freq_tbl
= dvfs_info
->freq_table
;
284 unsigned int tmp_freq
;
286 * Exynos5440 clock controller state logic expects the cpufreq table to
287 * be in descending order. But the OPP library constructs the table in
288 * ascending order. So to make the table descending we just need to
289 * swap the i element with the N - i element.
291 for (i
= 0; i
< dvfs_info
->freq_count
/ 2; i
++) {
292 index
= dvfs_info
->freq_count
- i
- 1;
293 tmp_freq
= freq_tbl
[i
].frequency
;
294 freq_tbl
[i
].frequency
= freq_tbl
[index
].frequency
;
295 freq_tbl
[index
].frequency
= tmp_freq
;
299 static int exynos_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
301 policy
->clk
= dvfs_info
->cpu_clk
;
302 return cpufreq_generic_init(policy
, dvfs_info
->freq_table
,
306 static struct cpufreq_driver exynos_driver
= {
307 .flags
= CPUFREQ_STICKY
| CPUFREQ_ASYNC_NOTIFICATION
|
308 CPUFREQ_NEED_INITIAL_FREQ_CHECK
,
309 .verify
= cpufreq_generic_frequency_table_verify
,
310 .target_index
= exynos_target
,
311 .get
= cpufreq_generic_get
,
312 .init
= exynos_cpufreq_cpu_init
,
313 .name
= CPUFREQ_NAME
,
314 .attr
= cpufreq_generic_attr
,
317 static const struct of_device_id exynos_cpufreq_match
[] = {
319 .compatible
= "samsung,exynos5440-cpufreq",
323 MODULE_DEVICE_TABLE(of
, exynos_cpufreq_match
);
325 static int exynos_cpufreq_probe(struct platform_device
*pdev
)
328 struct device_node
*np
;
330 unsigned int cur_frequency
;
332 np
= pdev
->dev
.of_node
;
336 dvfs_info
= devm_kzalloc(&pdev
->dev
, sizeof(*dvfs_info
), GFP_KERNEL
);
342 dvfs_info
->dev
= &pdev
->dev
;
344 ret
= of_address_to_resource(np
, 0, &res
);
348 dvfs_info
->base
= devm_ioremap_resource(dvfs_info
->dev
, &res
);
349 if (IS_ERR(dvfs_info
->base
)) {
350 ret
= PTR_ERR(dvfs_info
->base
);
354 dvfs_info
->irq
= irq_of_parse_and_map(np
, 0);
355 if (!dvfs_info
->irq
) {
356 dev_err(dvfs_info
->dev
, "No cpufreq irq found\n");
361 ret
= dev_pm_opp_of_add_table(dvfs_info
->dev
);
363 dev_err(dvfs_info
->dev
, "failed to init OPP table: %d\n", ret
);
367 ret
= dev_pm_opp_init_cpufreq_table(dvfs_info
->dev
,
368 &dvfs_info
->freq_table
);
370 dev_err(dvfs_info
->dev
,
371 "failed to init cpufreq table: %d\n", ret
);
374 dvfs_info
->freq_count
= dev_pm_opp_get_opp_count(dvfs_info
->dev
);
375 exynos_sort_descend_freq_table();
377 if (of_property_read_u32(np
, "clock-latency", &dvfs_info
->latency
))
378 dvfs_info
->latency
= DEF_TRANS_LATENCY
;
380 dvfs_info
->cpu_clk
= devm_clk_get(dvfs_info
->dev
, "armclk");
381 if (IS_ERR(dvfs_info
->cpu_clk
)) {
382 dev_err(dvfs_info
->dev
, "Failed to get cpu clock\n");
383 ret
= PTR_ERR(dvfs_info
->cpu_clk
);
387 cur_frequency
= clk_get_rate(dvfs_info
->cpu_clk
);
388 if (!cur_frequency
) {
389 dev_err(dvfs_info
->dev
, "Failed to get clock rate\n");
393 cur_frequency
/= 1000;
395 INIT_WORK(&dvfs_info
->irq_work
, exynos_cpufreq_work
);
396 ret
= devm_request_irq(dvfs_info
->dev
, dvfs_info
->irq
,
397 exynos_cpufreq_irq
, IRQF_TRIGGER_NONE
,
398 CPUFREQ_NAME
, dvfs_info
);
400 dev_err(dvfs_info
->dev
, "Failed to register IRQ\n");
404 ret
= init_div_table();
406 dev_err(dvfs_info
->dev
, "Failed to initialise div table\n");
410 exynos_enable_dvfs(cur_frequency
);
411 ret
= cpufreq_register_driver(&exynos_driver
);
413 dev_err(dvfs_info
->dev
,
414 "%s: failed to register cpufreq driver\n", __func__
);
419 dvfs_info
->dvfs_enabled
= true;
423 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
425 dev_pm_opp_of_remove_table(dvfs_info
->dev
);
428 dev_err(&pdev
->dev
, "%s: failed initialization\n", __func__
);
432 static int exynos_cpufreq_remove(struct platform_device
*pdev
)
434 cpufreq_unregister_driver(&exynos_driver
);
435 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
436 dev_pm_opp_of_remove_table(dvfs_info
->dev
);
440 static struct platform_driver exynos_cpufreq_platdrv
= {
442 .name
= "exynos5440-cpufreq",
443 .of_match_table
= exynos_cpufreq_match
,
445 .probe
= exynos_cpufreq_probe
,
446 .remove
= exynos_cpufreq_remove
,
448 module_platform_driver(exynos_cpufreq_platdrv
);
450 MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
451 MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
452 MODULE_LICENSE("GPL");