1 // SPDX-License-Identifier: GPL-2.0+
3 * Marvell Armada AP CPU Clock Controller
5 * Copyright (C) 2018 Marvell
7 * Omri Itach <omrii@marvell.com>
8 * Gregory Clement <gregory.clement@bootlin.com>
11 #define pr_fmt(fmt) "ap-cpu-clk: " fmt
13 #include <linux/clk-provider.h>
14 #include <linux/clk.h>
15 #include <linux/mfd/syscon.h>
17 #include <linux/of_address.h>
18 #include <linux/platform_device.h>
19 #include <linux/regmap.h>
20 #include "armada_ap_cp_helper.h"
22 #define AP806_CPU_CLUSTER0 0
23 #define AP806_CPU_CLUSTER1 1
24 #define AP806_CPUS_PER_CLUSTER 2
25 #define APN806_CPU1_MASK 0x1
27 #define APN806_CLUSTER_NUM_OFFSET 8
28 #define APN806_CLUSTER_NUM_MASK BIT(APN806_CLUSTER_NUM_OFFSET)
30 #define APN806_MAX_DIVIDER 32
33 * struct cpu_dfs_regs: CPU DFS register mapping
34 * @divider_reg: full integer ratio from PLL frequency to CPU clock frequency
35 * @force_reg: request to force new ratio regardless of relation to other clocks
36 * @ratio_reg: central request to switch ratios
39 unsigned int divider_reg
;
40 unsigned int force_reg
;
41 unsigned int ratio_reg
;
42 unsigned int ratio_state_reg
;
43 unsigned int divider_mask
;
44 unsigned int cluster_offset
;
45 unsigned int force_mask
;
49 int ratio_state_offset
;
50 int ratio_state_cluster_offset
;
53 /* AP806 CPU DFS register mapping*/
54 #define AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET 0x278
55 #define AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET 0x280
56 #define AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET 0x284
57 #define AP806_CA72MP2_0_PLL_SR_REG_OFFSET 0xC94
59 #define AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x14
60 #define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 0
61 #define AP806_PLL_CR_CPU_CLK_DIV_RATIO 0
62 #define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \
63 (0x3f << AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET)
64 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 24
65 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \
66 (0x1 << AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET)
67 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 16
68 #define AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET 0
69 #define AP806_CA72MP2_0_PLL_RATIO_STATE 11
71 #define STATUS_POLL_PERIOD_US 1
72 #define STATUS_POLL_TIMEOUT_US 1000000
74 #define to_ap_cpu_clk(_hw) container_of(_hw, struct ap_cpu_clk, hw)
76 static const struct cpu_dfs_regs ap806_dfs_regs
= {
77 .divider_reg
= AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET
,
78 .force_reg
= AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET
,
79 .ratio_reg
= AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET
,
80 .ratio_state_reg
= AP806_CA72MP2_0_PLL_SR_REG_OFFSET
,
81 .divider_mask
= AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK
,
82 .cluster_offset
= AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET
,
83 .force_mask
= AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK
,
84 .divider_offset
= AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET
,
85 .divider_ratio
= AP806_PLL_CR_CPU_CLK_DIV_RATIO
,
86 .ratio_offset
= AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET
,
87 .ratio_state_offset
= AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET
,
88 .ratio_state_cluster_offset
= AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET
,
91 /* AP807 CPU DFS register mapping */
92 #define AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET 0x278
93 #define AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET 0x27c
94 #define AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET 0xc98
95 #define AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x8
96 #define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 18
97 #define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \
98 (0x3f << AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET)
99 #define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET 12
100 #define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK \
101 (0x3f << AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET)
102 #define AP807_PLL_CR_CPU_CLK_DIV_RATIO 3
103 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 0
104 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \
105 (0x3 << AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET)
106 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 6
107 #define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET 20
108 #define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET 3
110 static const struct cpu_dfs_regs ap807_dfs_regs
= {
111 .divider_reg
= AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET
,
112 .force_reg
= AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET
,
113 .ratio_reg
= AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET
,
114 .ratio_state_reg
= AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET
,
115 .divider_mask
= AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK
,
116 .cluster_offset
= AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET
,
117 .force_mask
= AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK
,
118 .divider_offset
= AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET
,
119 .divider_ratio
= AP807_PLL_CR_CPU_CLK_DIV_RATIO
,
120 .ratio_offset
= AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET
,
121 .ratio_state_offset
= AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET
,
122 .ratio_state_cluster_offset
=
123 AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET
127 * struct ap806_clk: CPU cluster clock controller instance
128 * @cluster: Cluster clock controller index
129 * @clk_name: Cluster clock controller name
130 * @dev : Cluster clock device
131 * @hw: HW specific structure of Cluster clock controller
132 * @pll_cr_base: CA72MP2 Register base (Device Sample at Reset register)
135 unsigned int cluster
;
136 const char *clk_name
;
139 struct regmap
*pll_cr_base
;
140 const struct cpu_dfs_regs
*pll_regs
;
143 static unsigned long ap_cpu_clk_recalc_rate(struct clk_hw
*hw
,
144 unsigned long parent_rate
)
146 struct ap_cpu_clk
*clk
= to_ap_cpu_clk(hw
);
147 unsigned int cpu_clkdiv_reg
;
148 int cpu_clkdiv_ratio
;
150 cpu_clkdiv_reg
= clk
->pll_regs
->divider_reg
+
151 (clk
->cluster
* clk
->pll_regs
->cluster_offset
);
152 regmap_read(clk
->pll_cr_base
, cpu_clkdiv_reg
, &cpu_clkdiv_ratio
);
153 cpu_clkdiv_ratio
&= clk
->pll_regs
->divider_mask
;
154 cpu_clkdiv_ratio
>>= clk
->pll_regs
->divider_offset
;
156 return parent_rate
/ cpu_clkdiv_ratio
;
159 static int ap_cpu_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
160 unsigned long parent_rate
)
162 struct ap_cpu_clk
*clk
= to_ap_cpu_clk(hw
);
163 int ret
, reg
, divider
= parent_rate
/ rate
;
164 unsigned int cpu_clkdiv_reg
, cpu_force_reg
, cpu_ratio_reg
, stable_bit
;
166 cpu_clkdiv_reg
= clk
->pll_regs
->divider_reg
+
167 (clk
->cluster
* clk
->pll_regs
->cluster_offset
);
168 cpu_force_reg
= clk
->pll_regs
->force_reg
+
169 (clk
->cluster
* clk
->pll_regs
->cluster_offset
);
170 cpu_ratio_reg
= clk
->pll_regs
->ratio_reg
+
171 (clk
->cluster
* clk
->pll_regs
->cluster_offset
);
173 regmap_read(clk
->pll_cr_base
, cpu_clkdiv_reg
, ®
);
174 reg
&= ~(clk
->pll_regs
->divider_mask
);
175 reg
|= (divider
<< clk
->pll_regs
->divider_offset
);
178 * AP807 CPU divider has two channels with ratio 1:3 and divider_ratio
179 * is 1. Otherwise, in the case of the AP806, divider_ratio is 0.
181 if (clk
->pll_regs
->divider_ratio
) {
182 reg
&= ~(AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK
);
183 reg
|= ((divider
* clk
->pll_regs
->divider_ratio
) <<
184 AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET
);
186 regmap_write(clk
->pll_cr_base
, cpu_clkdiv_reg
, reg
);
189 regmap_update_bits(clk
->pll_cr_base
, cpu_force_reg
,
190 clk
->pll_regs
->force_mask
,
191 clk
->pll_regs
->force_mask
);
193 regmap_update_bits(clk
->pll_cr_base
, cpu_ratio_reg
,
194 BIT(clk
->pll_regs
->ratio_offset
),
195 BIT(clk
->pll_regs
->ratio_offset
));
197 stable_bit
= BIT(clk
->pll_regs
->ratio_state_offset
+
199 clk
->pll_regs
->ratio_state_cluster_offset
);
200 ret
= regmap_read_poll_timeout(clk
->pll_cr_base
,
201 clk
->pll_regs
->ratio_state_reg
, reg
,
202 reg
& stable_bit
, STATUS_POLL_PERIOD_US
,
203 STATUS_POLL_TIMEOUT_US
);
207 regmap_update_bits(clk
->pll_cr_base
, cpu_ratio_reg
,
208 BIT(clk
->pll_regs
->ratio_offset
), 0);
213 static long ap_cpu_clk_round_rate(struct clk_hw
*hw
, unsigned long rate
,
214 unsigned long *parent_rate
)
216 int divider
= *parent_rate
/ rate
;
218 divider
= min(divider
, APN806_MAX_DIVIDER
);
220 return *parent_rate
/ divider
;
223 static const struct clk_ops ap_cpu_clk_ops
= {
224 .recalc_rate
= ap_cpu_clk_recalc_rate
,
225 .round_rate
= ap_cpu_clk_round_rate
,
226 .set_rate
= ap_cpu_clk_set_rate
,
229 static int ap_cpu_clock_probe(struct platform_device
*pdev
)
231 int ret
, nclusters
= 0, cluster_index
= 0;
232 struct device
*dev
= &pdev
->dev
;
233 struct device_node
*dn
, *np
= dev
->of_node
;
234 struct clk_hw_onecell_data
*ap_cpu_data
;
235 struct ap_cpu_clk
*ap_cpu_clk
;
236 struct regmap
*regmap
;
238 regmap
= syscon_node_to_regmap(np
->parent
);
239 if (IS_ERR(regmap
)) {
240 pr_err("cannot get pll_cr_base regmap\n");
241 return PTR_ERR(regmap
);
245 * AP806 has 4 cpus and DFS for AP806 is controlled per
246 * cluster (2 CPUs per cluster), cpu0 and cpu1 are fixed to
247 * cluster0 while cpu2 and cpu3 are fixed to cluster1 whether
248 * they are enabled or not. Since cpu0 is the boot cpu, then
249 * cluster0 must exist. If cpu2 or cpu3 is enabled, cluster1
250 * will exist and the cluster number is 2; otherwise the
251 * cluster number is 1.
254 for_each_of_cpu_node(dn
) {
257 cpu
= of_get_cpu_hwid(dn
, 0);
258 if (WARN_ON(cpu
== OF_BAD_ADDR
)) {
263 /* If cpu2 or cpu3 is enabled */
264 if (cpu
& APN806_CLUSTER_NUM_MASK
) {
271 * DFS for AP806 is controlled per cluster (2 CPUs per cluster),
272 * so allocate structs per cluster
274 ap_cpu_clk
= devm_kcalloc(dev
, nclusters
, sizeof(*ap_cpu_clk
),
279 ap_cpu_data
= devm_kzalloc(dev
, struct_size(ap_cpu_data
, hws
,
285 for_each_of_cpu_node(dn
) {
286 char *clk_name
= "cpu-cluster-0";
287 struct clk_init_data init
;
288 const char *parent_name
;
292 cpu
= of_get_cpu_hwid(dn
, 0);
293 if (WARN_ON(cpu
== OF_BAD_ADDR
)) {
298 cluster_index
= cpu
& APN806_CLUSTER_NUM_MASK
;
299 cluster_index
>>= APN806_CLUSTER_NUM_OFFSET
;
301 /* Initialize once for one cluster */
302 if (ap_cpu_data
->hws
[cluster_index
])
305 parent
= of_clk_get(np
, cluster_index
);
306 if (IS_ERR(parent
)) {
307 dev_err(dev
, "Could not get the clock parent\n");
311 parent_name
= __clk_get_name(parent
);
312 clk_name
[12] += cluster_index
;
313 ap_cpu_clk
[cluster_index
].clk_name
=
314 ap_cp_unique_name(dev
, np
->parent
, clk_name
);
315 ap_cpu_clk
[cluster_index
].cluster
= cluster_index
;
316 ap_cpu_clk
[cluster_index
].pll_cr_base
= regmap
;
317 ap_cpu_clk
[cluster_index
].hw
.init
= &init
;
318 ap_cpu_clk
[cluster_index
].dev
= dev
;
319 ap_cpu_clk
[cluster_index
].pll_regs
= of_device_get_match_data(&pdev
->dev
);
321 init
.name
= ap_cpu_clk
[cluster_index
].clk_name
;
322 init
.ops
= &ap_cpu_clk_ops
;
323 init
.num_parents
= 1;
324 init
.parent_names
= &parent_name
;
326 ret
= devm_clk_hw_register(dev
, &ap_cpu_clk
[cluster_index
].hw
);
331 ap_cpu_data
->hws
[cluster_index
] = &ap_cpu_clk
[cluster_index
].hw
;
334 ap_cpu_data
->num
= cluster_index
+ 1;
336 ret
= of_clk_add_hw_provider(np
, of_clk_hw_onecell_get
, ap_cpu_data
);
338 dev_err(dev
, "failed to register OF clock provider\n");
343 static const struct of_device_id ap_cpu_clock_of_match
[] = {
345 .compatible
= "marvell,ap806-cpu-clock",
346 .data
= &ap806_dfs_regs
,
349 .compatible
= "marvell,ap807-cpu-clock",
350 .data
= &ap807_dfs_regs
,
355 static struct platform_driver ap_cpu_clock_driver
= {
356 .probe
= ap_cpu_clock_probe
,
358 .name
= "marvell-ap-cpu-clock",
359 .of_match_table
= ap_cpu_clock_of_match
,
360 .suppress_bind_attrs
= true,
363 builtin_platform_driver(ap_cpu_clock_driver
);