2 * Marvell MVEBU CPU clock handling.
4 * Copyright (C) 2012 Marvell
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
12 #include <linux/kernel.h>
13 #include <linux/clkdev.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
18 #include <linux/delay.h>
19 #include <linux/mvebu-pmsu.h>
20 #include <asm/smp_plat.h>
22 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
23 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
24 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
25 #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
26 #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
27 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
28 #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
30 #define PMU_DFS_RATIO_SHIFT 16
31 #define PMU_DFS_RATIO_MASK 0x3F
38 const char *parent_name
;
39 void __iomem
*reg_base
;
40 void __iomem
*pmu_dfs
;
43 static struct clk
**clks
;
45 static struct clk_onecell_data clk_data
;
47 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
49 static unsigned long clk_cpu_recalc_rate(struct clk_hw
*hwclk
,
50 unsigned long parent_rate
)
52 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
55 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
);
56 div
= (reg
>> (cpuclk
->cpu
* 8)) & SYS_CTRL_CLK_DIVIDER_MASK
;
57 return parent_rate
/ div
;
60 static long clk_cpu_round_rate(struct clk_hw
*hwclk
, unsigned long rate
,
61 unsigned long *parent_rate
)
63 /* Valid ratio are 1:1, 1:2 and 1:3 */
66 div
= *parent_rate
/ rate
;
72 return *parent_rate
/ div
;
75 static int clk_cpu_off_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
76 unsigned long parent_rate
)
79 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
83 div
= parent_rate
/ rate
;
84 reg
= (readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
)
85 & (~(SYS_CTRL_CLK_DIVIDER_MASK
<< (cpuclk
->cpu
* 8))))
86 | (div
<< (cpuclk
->cpu
* 8));
87 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
);
88 /* Set clock divider reload smooth bit mask */
89 reload_mask
= 1 << (20 + cpuclk
->cpu
);
91 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
)
93 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
95 /* Now trigger the clock update */
96 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
)
98 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
100 /* Wait for clocks to settle down then clear reload request */
102 reg
&= ~(reload_mask
| 1 << 24);
103 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
109 static int clk_cpu_on_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
110 unsigned long parent_rate
)
113 unsigned long fabric_div
, target_div
, cur_rate
;
114 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
117 * PMU DFS registers are not mapped, Device Tree does not
118 * describes them. We cannot change the frequency dynamically.
120 if (!cpuclk
->pmu_dfs
)
123 cur_rate
= __clk_get_rate(hwclk
->clk
);
125 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET
);
126 fabric_div
= (reg
>> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT
) &
127 SYS_CTRL_CLK_DIVIDER_MASK
;
129 /* Frequency is going up */
130 if (rate
== 2 * cur_rate
)
131 target_div
= fabric_div
/ 2;
132 /* Frequency is going down */
134 target_div
= fabric_div
;
139 reg
= readl(cpuclk
->pmu_dfs
);
140 reg
&= ~(PMU_DFS_RATIO_MASK
<< PMU_DFS_RATIO_SHIFT
);
141 reg
|= (target_div
<< PMU_DFS_RATIO_SHIFT
);
142 writel(reg
, cpuclk
->pmu_dfs
);
144 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
145 reg
|= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL
<<
146 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT
);
147 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
149 return mvebu_pmsu_dfs_request(cpuclk
->cpu
);
152 static int clk_cpu_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
153 unsigned long parent_rate
)
155 if (__clk_is_enabled(hwclk
->clk
))
156 return clk_cpu_on_set_rate(hwclk
, rate
, parent_rate
);
158 return clk_cpu_off_set_rate(hwclk
, rate
, parent_rate
);
161 static const struct clk_ops cpu_ops
= {
162 .recalc_rate
= clk_cpu_recalc_rate
,
163 .round_rate
= clk_cpu_round_rate
,
164 .set_rate
= clk_cpu_set_rate
,
167 static void __init
of_cpu_clk_setup(struct device_node
*node
)
169 struct cpu_clk
*cpuclk
;
170 void __iomem
*clock_complex_base
= of_iomap(node
, 0);
171 void __iomem
*pmu_dfs_base
= of_iomap(node
, 1);
173 struct device_node
*dn
;
175 if (clock_complex_base
== NULL
) {
176 pr_err("%s: clock-complex base register not set\n",
181 if (pmu_dfs_base
== NULL
)
182 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
185 for_each_node_by_type(dn
, "cpu")
188 cpuclk
= kzalloc(ncpus
* sizeof(*cpuclk
), GFP_KERNEL
);
189 if (WARN_ON(!cpuclk
))
192 clks
= kzalloc(ncpus
* sizeof(*clks
), GFP_KERNEL
);
196 for_each_node_by_type(dn
, "cpu") {
197 struct clk_init_data init
;
199 struct clk
*parent_clk
;
200 char *clk_name
= kzalloc(5, GFP_KERNEL
);
203 if (WARN_ON(!clk_name
))
206 err
= of_property_read_u32(dn
, "reg", &cpu
);
210 sprintf(clk_name
, "cpu%d", cpu
);
211 parent_clk
= of_clk_get(node
, 0);
213 cpuclk
[cpu
].parent_name
= __clk_get_name(parent_clk
);
214 cpuclk
[cpu
].clk_name
= clk_name
;
215 cpuclk
[cpu
].cpu
= cpu
;
216 cpuclk
[cpu
].reg_base
= clock_complex_base
;
218 cpuclk
[cpu
].pmu_dfs
= pmu_dfs_base
+ 4 * cpu
;
219 cpuclk
[cpu
].hw
.init
= &init
;
221 init
.name
= cpuclk
[cpu
].clk_name
;
224 init
.parent_names
= &cpuclk
[cpu
].parent_name
;
225 init
.num_parents
= 1;
227 clk
= clk_register(NULL
, &cpuclk
[cpu
].hw
);
228 if (WARN_ON(IS_ERR(clk
)))
232 clk_data
.clk_num
= MAX_CPU
;
233 clk_data
.clks
= clks
;
234 of_clk_add_provider(node
, of_clk_src_onecell_get
, &clk_data
);
240 kfree(cpuclk
[ncpus
].clk_name
);
244 iounmap(clock_complex_base
);
247 CLK_OF_DECLARE(armada_xp_cpu_clock
, "marvell,armada-xp-cpu-clock",