1 // SPDX-License-Identifier: GPL-2.0
3 * Marvell MVEBU CPU clock handling.
5 * Copyright (C) 2012 Marvell
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/clk.h>
13 #include <linux/clk-provider.h>
14 #include <linux/of_address.h>
17 #include <linux/delay.h>
18 #include <linux/mvebu-pmsu.h>
19 #include <asm/smp_plat.h>
21 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
22 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
23 #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
24 #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
25 #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
26 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
27 #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
29 #define PMU_DFS_RATIO_SHIFT 16
30 #define PMU_DFS_RATIO_MASK 0x3F
37 const char *parent_name
;
38 void __iomem
*reg_base
;
39 void __iomem
*pmu_dfs
;
42 static struct clk
**clks
;
44 static struct clk_onecell_data clk_data
;
46 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
48 static unsigned long clk_cpu_recalc_rate(struct clk_hw
*hwclk
,
49 unsigned long parent_rate
)
51 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
54 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
);
55 div
= (reg
>> (cpuclk
->cpu
* 8)) & SYS_CTRL_CLK_DIVIDER_MASK
;
56 return parent_rate
/ div
;
59 static long clk_cpu_round_rate(struct clk_hw
*hwclk
, unsigned long rate
,
60 unsigned long *parent_rate
)
62 /* Valid ratio are 1:1, 1:2 and 1:3 */
65 div
= *parent_rate
/ rate
;
71 return *parent_rate
/ div
;
74 static int clk_cpu_off_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
75 unsigned long parent_rate
)
78 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
82 div
= parent_rate
/ rate
;
83 reg
= (readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
)
84 & (~(SYS_CTRL_CLK_DIVIDER_MASK
<< (cpuclk
->cpu
* 8))))
85 | (div
<< (cpuclk
->cpu
* 8));
86 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET
);
87 /* Set clock divider reload smooth bit mask */
88 reload_mask
= 1 << (20 + cpuclk
->cpu
);
90 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
)
92 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
94 /* Now trigger the clock update */
95 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
)
97 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
99 /* Wait for clocks to settle down then clear reload request */
101 reg
&= ~(reload_mask
| 1 << 24);
102 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
108 static int clk_cpu_on_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
109 unsigned long parent_rate
)
112 unsigned long fabric_div
, target_div
, cur_rate
;
113 struct cpu_clk
*cpuclk
= to_cpu_clk(hwclk
);
116 * PMU DFS registers are not mapped, Device Tree does not
117 * describes them. We cannot change the frequency dynamically.
119 if (!cpuclk
->pmu_dfs
)
122 cur_rate
= clk_hw_get_rate(hwclk
);
124 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET
);
125 fabric_div
= (reg
>> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT
) &
126 SYS_CTRL_CLK_DIVIDER_MASK
;
128 /* Frequency is going up */
129 if (rate
== 2 * cur_rate
)
130 target_div
= fabric_div
/ 2;
131 /* Frequency is going down */
133 target_div
= fabric_div
;
138 reg
= readl(cpuclk
->pmu_dfs
);
139 reg
&= ~(PMU_DFS_RATIO_MASK
<< PMU_DFS_RATIO_SHIFT
);
140 reg
|= (target_div
<< PMU_DFS_RATIO_SHIFT
);
141 writel(reg
, cpuclk
->pmu_dfs
);
143 reg
= readl(cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
144 reg
|= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL
<<
145 SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT
);
146 writel(reg
, cpuclk
->reg_base
+ SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET
);
148 return mvebu_pmsu_dfs_request(cpuclk
->cpu
);
151 static int clk_cpu_set_rate(struct clk_hw
*hwclk
, unsigned long rate
,
152 unsigned long parent_rate
)
154 if (__clk_is_enabled(hwclk
->clk
))
155 return clk_cpu_on_set_rate(hwclk
, rate
, parent_rate
);
157 return clk_cpu_off_set_rate(hwclk
, rate
, parent_rate
);
160 static const struct clk_ops cpu_ops
= {
161 .recalc_rate
= clk_cpu_recalc_rate
,
162 .round_rate
= clk_cpu_round_rate
,
163 .set_rate
= clk_cpu_set_rate
,
166 static void __init
of_cpu_clk_setup(struct device_node
*node
)
168 struct cpu_clk
*cpuclk
;
169 void __iomem
*clock_complex_base
= of_iomap(node
, 0);
170 void __iomem
*pmu_dfs_base
= of_iomap(node
, 1);
171 int ncpus
= num_possible_cpus();
174 if (clock_complex_base
== NULL
) {
175 pr_err("%s: clock-complex base register not set\n",
180 if (pmu_dfs_base
== NULL
)
181 pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
184 cpuclk
= kcalloc(ncpus
, sizeof(*cpuclk
), GFP_KERNEL
);
185 if (WARN_ON(!cpuclk
))
188 clks
= kcalloc(ncpus
, sizeof(*clks
), GFP_KERNEL
);
192 for_each_possible_cpu(cpu
) {
193 struct clk_init_data init
;
195 char *clk_name
= kzalloc(5, GFP_KERNEL
);
197 if (WARN_ON(!clk_name
))
200 sprintf(clk_name
, "cpu%d", cpu
);
202 cpuclk
[cpu
].parent_name
= of_clk_get_parent_name(node
, 0);
203 cpuclk
[cpu
].clk_name
= clk_name
;
204 cpuclk
[cpu
].cpu
= cpu
;
205 cpuclk
[cpu
].reg_base
= clock_complex_base
;
207 cpuclk
[cpu
].pmu_dfs
= pmu_dfs_base
+ 4 * cpu
;
208 cpuclk
[cpu
].hw
.init
= &init
;
210 init
.name
= cpuclk
[cpu
].clk_name
;
213 init
.parent_names
= &cpuclk
[cpu
].parent_name
;
214 init
.num_parents
= 1;
216 clk
= clk_register(NULL
, &cpuclk
[cpu
].hw
);
217 if (WARN_ON(IS_ERR(clk
)))
221 clk_data
.clk_num
= MAX_CPU
;
222 clk_data
.clks
= clks
;
223 of_clk_add_provider(node
, of_clk_src_onecell_get
, &clk_data
);
229 kfree(cpuclk
[ncpus
].clk_name
);
233 iounmap(clock_complex_base
);
236 CLK_OF_DECLARE(armada_xp_cpu_clock
, "marvell,armada-xp-cpu-clock",
239 static void __init
of_mv98dx3236_cpu_clk_setup(struct device_node
*node
)
241 of_clk_add_provider(node
, of_clk_src_simple_get
, NULL
);
244 CLK_OF_DECLARE(mv98dx3236_cpu_clock
, "marvell,mv98dx3236-cpu-clock",
245 of_mv98dx3236_cpu_clk_setup
);