2 * Marvell MVEBU CPU clock handling.
4 * Copyright (C) 2012 Marvell
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/of_address.h>
19 #include <linux/delay.h>
20 #include <linux/mvebu-pmsu.h>
21 #include <asm/smp_plat.h>
23 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
24 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
25 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
26 #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
27 #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
28 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
29 #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
31 #define PMU_DFS_RATIO_SHIFT 16
32 #define PMU_DFS_RATIO_MASK 0x3F
39 const char *parent_name
;
40 void __iomem
*reg_base
;
41 void __iomem
*pmu_dfs
;
44 static struct clk
**clks
;
46 static struct clk_onecell_data clk_data
;
48 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
50 static unsigned long clk_cpu_recalc_rate(struct clk_hw
*hwclk
,
51 unsigned long parent_rate
)
53 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
56 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
);
57 div
= (reg
>> (cpuclk
->cpu
* 8)) & SYS_CTRL_CLK_DIVIDER_MASK
;
58 return parent_rate
/ div
;
61 static long clk_cpu_round_rate(struct clk_hw
*hwclk
, unsigned long rate
,
62 unsigned long *parent_rate
)
64 /* Valid ratio are 1:1, 1:2 and 1:3 */
67 div
= *parent_rate
/ rate
;
73 return *parent_rate
/ div
;
76 static int clk_cpu_off_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
77 unsigned long parent_rate
)
80 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
84 div
= parent_rate
/ rate
;
85 reg
= (readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
)
86 & (~(SYS_CTRL_CLK_DIVIDER_MASK
<< (cpuclk
->cpu
* 8))))
87 | (div
<< (cpuclk
->cpu
* 8));
88 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
);
89 /* Set clock divider reload smooth bit mask */
90 reload_mask
= 1 << (20 + cpuclk
->cpu
);
92 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
)
94 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
96 /* Now trigger the clock update */
97 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
)
99 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
101 /* Wait for clocks to settle down then clear reload request */
103 reg
&= ~(reload_mask
| 1 << 24);
104 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
110 static int clk_cpu_on_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
111 unsigned long parent_rate
)
114 unsigned long fabric_div
, target_div
, cur_rate
;
115 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
118 * PMU DFS registers are not mapped, Device Tree does not
119 * describes them. We cannot change the frequency dynamically.
121 if (!cpuclk
->pmu_dfs
)
124 cur_rate
= clk_hw_get_rate(hwclk
);
126 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET
);
127 fabric_div
= (reg
>> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT
) &
128 SYS_CTRL_CLK_DIVIDER_MASK
;
130 /* Frequency is going up */
131 if (rate
== 2 * cur_rate
)
132 target_div
= fabric_div
/ 2;
133 /* Frequency is going down */
135 target_div
= fabric_div
;
140 reg
= readl(cpuclk
->pmu_dfs
);
141 reg
&= ~(PMU_DFS_RATIO_MASK
<< PMU_DFS_RATIO_SHIFT
);
142 reg
|= (target_div
<< PMU_DFS_RATIO_SHIFT
);
143 writel(reg
, cpuclk
->pmu_dfs
);
145 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
146 reg
|= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL
<<
147 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT
);
148 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
150 return mvebu_pmsu_dfs_request(cpuclk
->cpu
);
153 static int clk_cpu_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
154 unsigned long parent_rate
)
156 if (__clk_is_enabled(hwclk
->clk
))
157 return clk_cpu_on_set_rate(hwclk
, rate
, parent_rate
);
159 return clk_cpu_off_set_rate(hwclk
, rate
, parent_rate
);
162 static const struct clk_ops cpu_ops
= {
163 .recalc_rate
= clk_cpu_recalc_rate
,
164 .round_rate
= clk_cpu_round_rate
,
165 .set_rate
= clk_cpu_set_rate
,
168 static void __init
of_cpu_clk_setup(struct device_node
*node
)
170 struct cpu_clk
*cpuclk
;
171 void __iomem
*clock_complex_base
= of_iomap(node
, 0);
172 void __iomem
*pmu_dfs_base
= of_iomap(node
, 1);
174 struct device_node
*dn
;
176 if (clock_complex_base
== NULL
) {
177 pr_err("%s: clock-complex base register not set\n",
182 if (pmu_dfs_base
== NULL
)
183 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
186 for_each_node_by_type(dn
, "cpu")
189 cpuclk
= kzalloc(ncpus
* sizeof(*cpuclk
), GFP_KERNEL
);
190 if (WARN_ON(!cpuclk
))
193 clks
= kzalloc(ncpus
* sizeof(*clks
), GFP_KERNEL
);
197 for_each_node_by_type(dn
, "cpu") {
198 struct clk_init_data init
;
200 char *clk_name
= kzalloc(5, GFP_KERNEL
);
203 if (WARN_ON(!clk_name
))
206 err
= of_property_read_u32(dn
, "reg", &cpu
);
210 sprintf(clk_name
, "cpu%d", cpu
);
212 cpuclk
[cpu
].parent_name
= of_clk_get_parent_name(node
, 0);
213 cpuclk
[cpu
].clk_name
= clk_name
;
214 cpuclk
[cpu
].cpu
= cpu
;
215 cpuclk
[cpu
].reg_base
= clock_complex_base
;
217 cpuclk
[cpu
].pmu_dfs
= pmu_dfs_base
+ 4 * cpu
;
218 cpuclk
[cpu
].hw
.init
= &init
;
220 init
.name
= cpuclk
[cpu
].clk_name
;
223 init
.parent_names
= &cpuclk
[cpu
].parent_name
;
224 init
.num_parents
= 1;
226 clk
= clk_register(NULL
, &cpuclk
[cpu
].hw
);
227 if (WARN_ON(IS_ERR(clk
)))
231 clk_data
.clk_num
= MAX_CPU
;
232 clk_data
.clks
= clks
;
233 of_clk_add_provider(node
, of_clk_src_onecell_get
, &clk_data
);
239 kfree(cpuclk
[ncpus
].clk_name
);
243 iounmap(clock_complex_base
);
246 CLK_OF_DECLARE(armada_xp_cpu_clock
, "marvell,armada-xp-cpu-clock",