2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
7 * EXYNOS5440 - CPU frequency scaling support
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/pm_opp.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 /* Register definitions */
28 #define XMU_DVFS_CTRL 0x0060
29 #define XMU_PMU_P0_7 0x0064
30 #define XMU_C0_3_PSTATE 0x0090
31 #define XMU_P_LIMIT 0x00a0
32 #define XMU_P_STATUS 0x00a4
33 #define XMU_PMUEVTEN 0x00d0
34 #define XMU_PMUIRQEN 0x00d4
35 #define XMU_PMUIRQ 0x00d8
37 /* PMU mask and shift definations */
38 #define P_VALUE_MASK 0x7
40 #define XMU_DVFS_CTRL_EN_SHIFT 0
42 #define P0_7_CPUCLKDEV_SHIFT 21
43 #define P0_7_CPUCLKDEV_MASK 0x7
44 #define P0_7_ATBCLKDEV_SHIFT 18
45 #define P0_7_ATBCLKDEV_MASK 0x7
46 #define P0_7_CSCLKDEV_SHIFT 15
47 #define P0_7_CSCLKDEV_MASK 0x7
48 #define P0_7_CPUEMA_SHIFT 28
49 #define P0_7_CPUEMA_MASK 0xf
50 #define P0_7_L2EMA_SHIFT 24
51 #define P0_7_L2EMA_MASK 0xf
52 #define P0_7_VDD_SHIFT 8
53 #define P0_7_VDD_MASK 0x7f
54 #define P0_7_FREQ_SHIFT 0
55 #define P0_7_FREQ_MASK 0xff
57 #define C0_3_PSTATE_VALID_SHIFT 8
58 #define C0_3_PSTATE_CURR_SHIFT 4
59 #define C0_3_PSTATE_NEW_SHIFT 0
61 #define PSTATE_CHANGED_EVTEN_SHIFT 0
63 #define PSTATE_CHANGED_IRQEN_SHIFT 0
65 #define PSTATE_CHANGED_SHIFT 0
67 /* some constant values for clock divider calculation */
68 #define CPU_DIV_FREQ_MAX 500
69 #define CPU_DBG_FREQ_MAX 375
70 #define CPU_ATB_FREQ_MAX 500
72 #define PMIC_LOW_VOLT 0x30
73 #define PMIC_HIGH_VOLT 0x28
75 #define CPUEMA_HIGH 0x2
76 #define CPUEMA_MID 0x4
77 #define CPUEMA_LOW 0x7
79 #define L2EMA_HIGH 0x1
84 /* frequency unit is 20MHZ */
86 #define MAX_VOLTAGE 1550000 /* In microvolt */
87 #define VOLTAGE_STEP 12500 /* In microvolt */
89 #define CPUFREQ_NAME "exynos5440_dvfs"
90 #define DEF_TRANS_LATENCY 100000
92 enum cpufreq_level_index
{
96 #define CPUFREQ_LEVEL_END (L7 + 1)
98 struct exynos_dvfs_data
{
100 struct resource
*mem
;
103 unsigned int latency
;
104 struct cpufreq_frequency_table
*freq_table
;
105 unsigned int freq_count
;
108 struct work_struct irq_work
;
111 static struct exynos_dvfs_data
*dvfs_info
;
112 static DEFINE_MUTEX(cpufreq_lock
);
113 static struct cpufreq_freqs freqs
;
115 static int init_div_table(void)
117 struct cpufreq_frequency_table
*pos
, *freq_tbl
= dvfs_info
->freq_table
;
118 unsigned int tmp
, clk_div
, ema_div
, freq
, volt_id
;
119 struct dev_pm_opp
*opp
;
121 cpufreq_for_each_entry(pos
, freq_tbl
) {
122 opp
= dev_pm_opp_find_freq_exact(dvfs_info
->dev
,
123 pos
->frequency
* 1000, true);
125 dev_err(dvfs_info
->dev
,
126 "failed to find valid OPP for %u KHZ\n",
131 freq
= pos
->frequency
/ 1000; /* In MHZ */
132 clk_div
= ((freq
/ CPU_DIV_FREQ_MAX
) & P0_7_CPUCLKDEV_MASK
)
133 << P0_7_CPUCLKDEV_SHIFT
;
134 clk_div
|= ((freq
/ CPU_ATB_FREQ_MAX
) & P0_7_ATBCLKDEV_MASK
)
135 << P0_7_ATBCLKDEV_SHIFT
;
136 clk_div
|= ((freq
/ CPU_DBG_FREQ_MAX
) & P0_7_CSCLKDEV_MASK
)
137 << P0_7_CSCLKDEV_SHIFT
;
140 volt_id
= dev_pm_opp_get_voltage(opp
);
142 volt_id
= (MAX_VOLTAGE
- volt_id
) / VOLTAGE_STEP
;
143 if (volt_id
< PMIC_HIGH_VOLT
) {
144 ema_div
= (CPUEMA_HIGH
<< P0_7_CPUEMA_SHIFT
) |
145 (L2EMA_HIGH
<< P0_7_L2EMA_SHIFT
);
146 } else if (volt_id
> PMIC_LOW_VOLT
) {
147 ema_div
= (CPUEMA_LOW
<< P0_7_CPUEMA_SHIFT
) |
148 (L2EMA_LOW
<< P0_7_L2EMA_SHIFT
);
150 ema_div
= (CPUEMA_MID
<< P0_7_CPUEMA_SHIFT
) |
151 (L2EMA_MID
<< P0_7_L2EMA_SHIFT
);
154 tmp
= (clk_div
| ema_div
| (volt_id
<< P0_7_VDD_SHIFT
)
155 | ((freq
/ FREQ_UNIT
) << P0_7_FREQ_SHIFT
));
157 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMU_P0_7
+ 4 *
165 static void exynos_enable_dvfs(unsigned int cur_frequency
)
167 unsigned int tmp
, cpu
;
168 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
169 struct cpufreq_frequency_table
*pos
;
171 __raw_writel(0, dvfs_info
->base
+ XMU_DVFS_CTRL
);
173 /* Enable PSTATE Change Event */
174 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUEVTEN
);
175 tmp
|= (1 << PSTATE_CHANGED_EVTEN_SHIFT
);
176 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUEVTEN
);
178 /* Enable PSTATE Change IRQ */
179 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQEN
);
180 tmp
|= (1 << PSTATE_CHANGED_IRQEN_SHIFT
);
181 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQEN
);
183 /* Set initial performance index */
184 cpufreq_for_each_entry(pos
, freq_table
)
185 if (pos
->frequency
== cur_frequency
)
188 if (pos
->frequency
== CPUFREQ_TABLE_END
) {
189 dev_crit(dvfs_info
->dev
, "Boot up frequency not supported\n");
190 /* Assign the highest frequency */
192 cur_frequency
= pos
->frequency
;
195 dev_info(dvfs_info
->dev
, "Setting dvfs initial frequency = %uKHZ",
198 for (cpu
= 0; cpu
< CONFIG_NR_CPUS
; cpu
++) {
199 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
200 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
201 tmp
|= ((pos
- freq_table
) << C0_3_PSTATE_NEW_SHIFT
);
202 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
206 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT
,
207 dvfs_info
->base
+ XMU_DVFS_CTRL
);
210 static int exynos_target(struct cpufreq_policy
*policy
, unsigned int index
)
214 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
216 mutex_lock(&cpufreq_lock
);
218 freqs
.old
= policy
->cur
;
219 freqs
.new = freq_table
[index
].frequency
;
221 cpufreq_freq_transition_begin(policy
, &freqs
);
223 /* Set the target frequency in all C0_3_PSTATE register */
224 for_each_cpu(i
, policy
->cpus
) {
225 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
226 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
227 tmp
|= (index
<< C0_3_PSTATE_NEW_SHIFT
);
229 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
231 mutex_unlock(&cpufreq_lock
);
235 static void exynos_cpufreq_work(struct work_struct
*work
)
237 unsigned int cur_pstate
, index
;
238 struct cpufreq_policy
*policy
= cpufreq_cpu_get(0); /* boot CPU */
239 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
241 /* Ensure we can access cpufreq structures */
242 if (unlikely(dvfs_info
->dvfs_enabled
== false))
245 mutex_lock(&cpufreq_lock
);
246 freqs
.old
= policy
->cur
;
248 cur_pstate
= __raw_readl(dvfs_info
->base
+ XMU_P_STATUS
);
249 if (cur_pstate
>> C0_3_PSTATE_VALID_SHIFT
& 0x1)
250 index
= (cur_pstate
>> C0_3_PSTATE_CURR_SHIFT
) & P_VALUE_MASK
;
252 index
= (cur_pstate
>> C0_3_PSTATE_NEW_SHIFT
) & P_VALUE_MASK
;
254 if (likely(index
< dvfs_info
->freq_count
)) {
255 freqs
.new = freq_table
[index
].frequency
;
257 dev_crit(dvfs_info
->dev
, "New frequency out of range\n");
258 freqs
.new = freqs
.old
;
260 cpufreq_freq_transition_end(policy
, &freqs
, 0);
262 cpufreq_cpu_put(policy
);
263 mutex_unlock(&cpufreq_lock
);
265 enable_irq(dvfs_info
->irq
);
268 static irqreturn_t
exynos_cpufreq_irq(int irq
, void *id
)
272 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQ
);
273 if (tmp
>> PSTATE_CHANGED_SHIFT
& 0x1) {
274 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQ
);
275 disable_irq_nosync(irq
);
276 schedule_work(&dvfs_info
->irq_work
);
281 static void exynos_sort_descend_freq_table(void)
283 struct cpufreq_frequency_table
*freq_tbl
= dvfs_info
->freq_table
;
285 unsigned int tmp_freq
;
287 * Exynos5440 clock controller state logic expects the cpufreq table to
288 * be in descending order. But the OPP library constructs the table in
289 * ascending order. So to make the table descending we just need to
290 * swap the i element with the N - i element.
292 for (i
= 0; i
< dvfs_info
->freq_count
/ 2; i
++) {
293 index
= dvfs_info
->freq_count
- i
- 1;
294 tmp_freq
= freq_tbl
[i
].frequency
;
295 freq_tbl
[i
].frequency
= freq_tbl
[index
].frequency
;
296 freq_tbl
[index
].frequency
= tmp_freq
;
300 static int exynos_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
302 policy
->clk
= dvfs_info
->cpu_clk
;
303 return cpufreq_generic_init(policy
, dvfs_info
->freq_table
,
307 static struct cpufreq_driver exynos_driver
= {
308 .flags
= CPUFREQ_STICKY
| CPUFREQ_ASYNC_NOTIFICATION
|
309 CPUFREQ_NEED_INITIAL_FREQ_CHECK
,
310 .verify
= cpufreq_generic_frequency_table_verify
,
311 .target_index
= exynos_target
,
312 .get
= cpufreq_generic_get
,
313 .init
= exynos_cpufreq_cpu_init
,
314 .name
= CPUFREQ_NAME
,
315 .attr
= cpufreq_generic_attr
,
318 static const struct of_device_id exynos_cpufreq_match
[] = {
320 .compatible
= "samsung,exynos5440-cpufreq",
324 MODULE_DEVICE_TABLE(of
, exynos_cpufreq_match
);
326 static int exynos_cpufreq_probe(struct platform_device
*pdev
)
329 struct device_node
*np
;
331 unsigned int cur_frequency
;
333 np
= pdev
->dev
.of_node
;
337 dvfs_info
= devm_kzalloc(&pdev
->dev
, sizeof(*dvfs_info
), GFP_KERNEL
);
343 dvfs_info
->dev
= &pdev
->dev
;
345 ret
= of_address_to_resource(np
, 0, &res
);
349 dvfs_info
->base
= devm_ioremap_resource(dvfs_info
->dev
, &res
);
350 if (IS_ERR(dvfs_info
->base
)) {
351 ret
= PTR_ERR(dvfs_info
->base
);
355 dvfs_info
->irq
= irq_of_parse_and_map(np
, 0);
356 if (!dvfs_info
->irq
) {
357 dev_err(dvfs_info
->dev
, "No cpufreq irq found\n");
362 ret
= dev_pm_opp_of_add_table(dvfs_info
->dev
);
364 dev_err(dvfs_info
->dev
, "failed to init OPP table: %d\n", ret
);
368 ret
= dev_pm_opp_init_cpufreq_table(dvfs_info
->dev
,
369 &dvfs_info
->freq_table
);
371 dev_err(dvfs_info
->dev
,
372 "failed to init cpufreq table: %d\n", ret
);
375 dvfs_info
->freq_count
= dev_pm_opp_get_opp_count(dvfs_info
->dev
);
376 exynos_sort_descend_freq_table();
378 if (of_property_read_u32(np
, "clock-latency", &dvfs_info
->latency
))
379 dvfs_info
->latency
= DEF_TRANS_LATENCY
;
381 dvfs_info
->cpu_clk
= devm_clk_get(dvfs_info
->dev
, "armclk");
382 if (IS_ERR(dvfs_info
->cpu_clk
)) {
383 dev_err(dvfs_info
->dev
, "Failed to get cpu clock\n");
384 ret
= PTR_ERR(dvfs_info
->cpu_clk
);
388 cur_frequency
= clk_get_rate(dvfs_info
->cpu_clk
);
389 if (!cur_frequency
) {
390 dev_err(dvfs_info
->dev
, "Failed to get clock rate\n");
394 cur_frequency
/= 1000;
396 INIT_WORK(&dvfs_info
->irq_work
, exynos_cpufreq_work
);
397 ret
= devm_request_irq(dvfs_info
->dev
, dvfs_info
->irq
,
398 exynos_cpufreq_irq
, IRQF_TRIGGER_NONE
,
399 CPUFREQ_NAME
, dvfs_info
);
401 dev_err(dvfs_info
->dev
, "Failed to register IRQ\n");
405 ret
= init_div_table();
407 dev_err(dvfs_info
->dev
, "Failed to initialise div table\n");
411 exynos_enable_dvfs(cur_frequency
);
412 ret
= cpufreq_register_driver(&exynos_driver
);
414 dev_err(dvfs_info
->dev
,
415 "%s: failed to register cpufreq driver\n", __func__
);
420 dvfs_info
->dvfs_enabled
= true;
424 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
426 dev_pm_opp_of_remove_table(dvfs_info
->dev
);
429 dev_err(&pdev
->dev
, "%s: failed initialization\n", __func__
);
433 static int exynos_cpufreq_remove(struct platform_device
*pdev
)
435 cpufreq_unregister_driver(&exynos_driver
);
436 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
437 dev_pm_opp_of_remove_table(dvfs_info
->dev
);
441 static struct platform_driver exynos_cpufreq_platdrv
= {
443 .name
= "exynos5440-cpufreq",
444 .of_match_table
= exynos_cpufreq_match
,
446 .probe
= exynos_cpufreq_probe
,
447 .remove
= exynos_cpufreq_remove
,
449 module_platform_driver(exynos_cpufreq_platdrv
);
451 MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
452 MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
453 MODULE_LICENSE("GPL");