1 // SPDX-License-Identifier: GPL-2.0
3 * R-Car Gen4 Clock Pulse Generator
5 * Copyright (C) 2021 Renesas Electronics Corp.
7 * Based on rcar-gen3-cpg.c
9 * Copyright (C) 2015-2018 Glider bvba
10 * Copyright (C) 2019 Renesas Electronics Corp.
13 #include <linux/bitfield.h>
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
20 #include <linux/iopoll.h>
21 #include <linux/slab.h>
23 #include "renesas-cpg-mssr.h"
24 #include "rcar-gen4-cpg.h"
25 #include "rcar-cpg-lib.h"
27 static const struct rcar_gen4_cpg_pll_config
*cpg_pll_config __initdata
;
28 static unsigned int cpg_clk_extalr __initdata
;
29 static u32 cpg_mode __initdata
;
31 #define CPG_PLLECR 0x0820 /* PLL Enable Control Register */
33 #define CPG_PLLECR_PLLST(n) BIT(8 + ((n) < 3 ? (n) - 1 : \
34 (n) > 3 ? (n) + 1 : n)) /* PLLn Circuit Status */
36 #define CPG_PLL1CR0 0x830 /* PLLn Control Registers */
37 #define CPG_PLL1CR1 0x8b0
38 #define CPG_PLL2CR0 0x834
39 #define CPG_PLL2CR1 0x8b8
40 #define CPG_PLL3CR0 0x83c
41 #define CPG_PLL3CR1 0x8c0
42 #define CPG_PLL4CR0 0x844
43 #define CPG_PLL4CR1 0x8c8
44 #define CPG_PLL6CR0 0x84c
45 #define CPG_PLL6CR1 0x8d8
47 #define CPG_PLLxCR0_KICK BIT(31)
48 #define CPG_PLLxCR0_SSMODE GENMASK(18, 16) /* PLL mode */
49 #define CPG_PLLxCR0_SSMODE_FM BIT(18) /* Fractional Multiplication */
50 #define CPG_PLLxCR0_SSMODE_DITH BIT(17) /* Frequency Dithering */
51 #define CPG_PLLxCR0_SSMODE_CENT BIT(16) /* Center (vs. Down) Spread Dithering */
52 #define CPG_PLLxCR0_SSFREQ GENMASK(14, 8) /* SSCG Modulation Frequency */
53 #define CPG_PLLxCR0_SSDEPT GENMASK(6, 0) /* SSCG Modulation Depth */
55 /* Fractional 8.25 PLL */
56 #define CPG_PLLxCR0_NI8 GENMASK(27, 20) /* Integer mult. factor */
57 #define CPG_PLLxCR1_NF25 GENMASK(24, 0) /* Fractional mult. factor */
59 /* Fractional 9.24 PLL */
60 #define CPG_PLLxCR0_NI9 GENMASK(28, 20) /* Integer mult. factor */
61 #define CPG_PLLxCR1_NF24 GENMASK(23, 0) /* Fractional mult. factor */
63 #define CPG_PLLxCR_STC GENMASK(30, 24) /* R_Car V3U PLLxCR */
65 #define CPG_RPCCKCR 0x874 /* RPC Clock Freq. Control Register */
67 #define CPG_SD0CKCR1 0x8a4 /* SD-IF0 Clock Freq. Control Reg. 1 */
69 #define CPG_SD0CKCR1_SDSRC_SEL GENMASK(30, 29) /* SDSRC clock freq. select */
74 void __iomem
*pllcr0_reg
;
75 void __iomem
*pllcr1_reg
;
76 void __iomem
*pllecr_reg
;
77 u32 pllecr_pllst_mask
;
80 #define to_pll_clk(_hw) container_of(_hw, struct cpg_pll_clk, hw)
82 static unsigned long cpg_pll_8_25_clk_recalc_rate(struct clk_hw
*hw
,
83 unsigned long parent_rate
)
85 struct cpg_pll_clk
*pll_clk
= to_pll_clk(hw
);
86 u32 cr0
= readl(pll_clk
->pllcr0_reg
);
90 ni
= (FIELD_GET(CPG_PLLxCR0_NI8
, cr0
) + 1) * 2;
91 rate
= parent_rate
* ni
;
92 if (cr0
& CPG_PLLxCR0_SSMODE_FM
) {
93 nf
= FIELD_GET(CPG_PLLxCR1_NF25
, readl(pll_clk
->pllcr1_reg
));
94 rate
+= mul_u64_u32_shr(parent_rate
, nf
, 24);
100 static int cpg_pll_8_25_clk_determine_rate(struct clk_hw
*hw
,
101 struct clk_rate_request
*req
)
103 struct cpg_pll_clk
*pll_clk
= to_pll_clk(hw
);
104 unsigned int min_mult
, max_mult
, ni
, nf
;
105 u32 cr0
= readl(pll_clk
->pllcr0_reg
);
108 prate
= req
->best_parent_rate
* 2;
109 min_mult
= max(div64_ul(req
->min_rate
, prate
), 1ULL);
110 max_mult
= min(div64_ul(req
->max_rate
, prate
), 256ULL);
111 if (max_mult
< min_mult
)
114 if (cr0
& CPG_PLLxCR0_SSMODE_FM
) {
115 ni
= div64_ul(req
->rate
, prate
);
120 ni
= min(ni
, max_mult
);
121 nf
= div64_ul((u64
)(req
->rate
- prate
* ni
) << 24,
122 req
->best_parent_rate
);
125 ni
= DIV_ROUND_CLOSEST_ULL(req
->rate
, prate
);
126 ni
= clamp(ni
, min_mult
, max_mult
);
129 req
->rate
= prate
* ni
+ mul_u64_u32_shr(req
->best_parent_rate
, nf
, 24);
134 static int cpg_pll_8_25_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
135 unsigned long parent_rate
)
137 struct cpg_pll_clk
*pll_clk
= to_pll_clk(hw
);
138 unsigned long prate
= parent_rate
* 2;
139 u32 cr0
= readl(pll_clk
->pllcr0_reg
);
143 if (cr0
& CPG_PLLxCR0_SSMODE_FM
) {
144 ni
= div64_ul(rate
, prate
);
150 nf
= div64_ul((u64
)(rate
- prate
* ni
) << 24,
154 ni
= DIV_ROUND_CLOSEST_ULL(rate
, prate
);
155 ni
= clamp(ni
, 1U, 256U);
158 if (readl(pll_clk
->pllcr0_reg
) & CPG_PLLxCR0_KICK
)
161 cpg_reg_modify(pll_clk
->pllcr0_reg
, CPG_PLLxCR0_NI8
,
162 FIELD_PREP(CPG_PLLxCR0_NI8
, ni
- 1));
163 if (cr0
& CPG_PLLxCR0_SSMODE_FM
)
164 cpg_reg_modify(pll_clk
->pllcr1_reg
, CPG_PLLxCR1_NF25
,
165 FIELD_PREP(CPG_PLLxCR1_NF25
, nf
));
168 * Set KICK bit in PLLxCR0 to update hardware setting and wait for
169 * clock change completion.
171 cpg_reg_modify(pll_clk
->pllcr0_reg
, 0, CPG_PLLxCR0_KICK
);
174 * Note: There is no HW information about the worst case latency.
176 * Using experimental measurements, it seems that no more than
177 * ~45 µs are needed, independently of the CPU rate.
178 * Since this value might be dependent on external xtal rate, pll
179 * rate or even the other emulation clocks rate, use 1000 as a
180 * "super" safe value.
182 return readl_poll_timeout(pll_clk
->pllecr_reg
, val
,
183 val
& pll_clk
->pllecr_pllst_mask
, 0, 1000);
186 static const struct clk_ops cpg_pll_f8_25_clk_ops
= {
187 .recalc_rate
= cpg_pll_8_25_clk_recalc_rate
,
190 static const struct clk_ops cpg_pll_v8_25_clk_ops
= {
191 .recalc_rate
= cpg_pll_8_25_clk_recalc_rate
,
192 .determine_rate
= cpg_pll_8_25_clk_determine_rate
,
193 .set_rate
= cpg_pll_8_25_clk_set_rate
,
196 static unsigned long cpg_pll_9_24_clk_recalc_rate(struct clk_hw
*hw
,
197 unsigned long parent_rate
)
199 struct cpg_pll_clk
*pll_clk
= to_pll_clk(hw
);
200 u32 cr0
= readl(pll_clk
->pllcr0_reg
);
204 ni
= FIELD_GET(CPG_PLLxCR0_NI9
, cr0
) + 1;
205 rate
= parent_rate
* ni
;
206 if (cr0
& CPG_PLLxCR0_SSMODE_FM
) {
207 nf
= FIELD_GET(CPG_PLLxCR1_NF24
, readl(pll_clk
->pllcr1_reg
));
208 rate
+= mul_u64_u32_shr(parent_rate
, nf
, 24);
216 static const struct clk_ops cpg_pll_f9_24_clk_ops
= {
217 .recalc_rate
= cpg_pll_9_24_clk_recalc_rate
,
220 static struct clk
* __init
cpg_pll_clk_register(const char *name
,
221 const char *parent_name
,
224 const struct clk_ops
*ops
)
226 static const struct { u16 cr0
, cr1
; } pll_cr_offsets
[] __initconst
= {
227 [1 - 1] = { CPG_PLL1CR0
, CPG_PLL1CR1
},
228 [2 - 1] = { CPG_PLL2CR0
, CPG_PLL2CR1
},
229 [3 - 1] = { CPG_PLL3CR0
, CPG_PLL3CR1
},
230 [4 - 1] = { CPG_PLL4CR0
, CPG_PLL4CR1
},
231 [6 - 1] = { CPG_PLL6CR0
, CPG_PLL6CR1
},
233 struct clk_init_data init
= {};
234 struct cpg_pll_clk
*pll_clk
;
237 pll_clk
= kzalloc(sizeof(*pll_clk
), GFP_KERNEL
);
239 return ERR_PTR(-ENOMEM
);
243 init
.parent_names
= &parent_name
;
244 init
.num_parents
= 1;
246 pll_clk
->hw
.init
= &init
;
247 pll_clk
->pllcr0_reg
= base
+ pll_cr_offsets
[index
- 1].cr0
;
248 pll_clk
->pllcr1_reg
= base
+ pll_cr_offsets
[index
- 1].cr1
;
249 pll_clk
->pllecr_reg
= base
+ CPG_PLLECR
;
250 pll_clk
->pllecr_pllst_mask
= CPG_PLLECR_PLLST(index
);
252 clk
= clk_register(NULL
, &pll_clk
->hw
);
260 * Z0 Clock & Z1 Clock
262 #define CPG_FRQCRB 0x00000804
263 #define CPG_FRQCRB_KICK BIT(31)
264 #define CPG_FRQCRC0 0x00000808
265 #define CPG_FRQCRC1 0x000008e0
270 void __iomem
*kick_reg
;
271 unsigned long max_rate
; /* Maximum rate for normal mode */
272 unsigned int fixed_div
;
276 #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
278 static unsigned long cpg_z_clk_recalc_rate(struct clk_hw
*hw
,
279 unsigned long parent_rate
)
281 struct cpg_z_clk
*zclk
= to_z_clk(hw
);
285 val
= readl(zclk
->reg
) & zclk
->mask
;
286 mult
= 32 - (val
>> __ffs(zclk
->mask
));
288 return DIV_ROUND_CLOSEST_ULL((u64
)parent_rate
* mult
,
289 32 * zclk
->fixed_div
);
292 static int cpg_z_clk_determine_rate(struct clk_hw
*hw
,
293 struct clk_rate_request
*req
)
295 struct cpg_z_clk
*zclk
= to_z_clk(hw
);
296 unsigned int min_mult
, max_mult
, mult
;
297 unsigned long rate
, prate
;
299 rate
= min(req
->rate
, req
->max_rate
);
300 if (rate
<= zclk
->max_rate
) {
301 /* Set parent rate to initial value for normal modes */
302 prate
= zclk
->max_rate
;
304 /* Set increased parent rate for boost modes */
307 req
->best_parent_rate
= clk_hw_round_rate(clk_hw_get_parent(hw
),
308 prate
* zclk
->fixed_div
);
310 prate
= req
->best_parent_rate
/ zclk
->fixed_div
;
311 min_mult
= max(div64_ul(req
->min_rate
* 32ULL, prate
), 1ULL);
312 max_mult
= min(div64_ul(req
->max_rate
* 32ULL, prate
), 32ULL);
313 if (max_mult
< min_mult
)
316 mult
= DIV_ROUND_CLOSEST_ULL(rate
* 32ULL, prate
);
317 mult
= clamp(mult
, min_mult
, max_mult
);
319 req
->rate
= DIV_ROUND_CLOSEST_ULL((u64
)prate
* mult
, 32);
323 static int cpg_z_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
324 unsigned long parent_rate
)
326 struct cpg_z_clk
*zclk
= to_z_clk(hw
);
330 mult
= DIV64_U64_ROUND_CLOSEST(rate
* 32ULL * zclk
->fixed_div
,
332 mult
= clamp(mult
, 1U, 32U);
334 if (readl(zclk
->kick_reg
) & CPG_FRQCRB_KICK
)
337 cpg_reg_modify(zclk
->reg
, zclk
->mask
, (32 - mult
) << __ffs(zclk
->mask
));
340 * Set KICK bit in FRQCRB to update hardware setting and wait for
341 * clock change completion.
343 cpg_reg_modify(zclk
->kick_reg
, 0, CPG_FRQCRB_KICK
);
346 * Note: There is no HW information about the worst case latency.
348 * Using experimental measurements, it seems that no more than
349 * ~10 iterations are needed, independently of the CPU rate.
350 * Since this value might be dependent on external xtal rate, pll1
351 * rate or even the other emulation clocks rate, use 1000 as a
352 * "super" safe value.
354 for (i
= 1000; i
; i
--) {
355 if (!(readl(zclk
->kick_reg
) & CPG_FRQCRB_KICK
))
364 static const struct clk_ops cpg_z_clk_ops
= {
365 .recalc_rate
= cpg_z_clk_recalc_rate
,
366 .determine_rate
= cpg_z_clk_determine_rate
,
367 .set_rate
= cpg_z_clk_set_rate
,
370 static struct clk
* __init
cpg_z_clk_register(const char *name
,
371 const char *parent_name
,
376 struct clk_init_data init
= {};
377 struct cpg_z_clk
*zclk
;
380 zclk
= kzalloc(sizeof(*zclk
), GFP_KERNEL
);
382 return ERR_PTR(-ENOMEM
);
385 init
.ops
= &cpg_z_clk_ops
;
386 init
.flags
= CLK_SET_RATE_PARENT
;
387 init
.parent_names
= &parent_name
;
388 init
.num_parents
= 1;
391 zclk
->reg
= reg
+ CPG_FRQCRC0
;
393 zclk
->reg
= reg
+ CPG_FRQCRC1
;
396 zclk
->kick_reg
= reg
+ CPG_FRQCRB
;
397 zclk
->hw
.init
= &init
;
398 zclk
->mask
= GENMASK(offset
+ 4, offset
);
399 zclk
->fixed_div
= div
; /* PLLVCO x 1/div x SYS-CPU divider */
401 clk
= clk_register(NULL
, &zclk
->hw
);
407 zclk
->max_rate
= clk_hw_get_rate(clk_hw_get_parent(&zclk
->hw
)) /
415 static const struct clk_div_table cpg_rpcsrc_div_table
[] = {
416 { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
419 struct clk
* __init
rcar_gen4_cpg_clk_register(struct device
*dev
,
420 const struct cpg_core_clk
*core
, const struct cpg_mssr_info
*info
,
421 struct clk
**clks
, void __iomem
*base
,
422 struct raw_notifier_head
*notifiers
)
424 const struct clk
*parent
;
425 unsigned int mult
= 1;
426 unsigned int div
= 1;
429 parent
= clks
[core
->parent
& 0xffff]; /* some types use high bits */
431 return ERR_CAST(parent
);
433 switch (core
->type
) {
434 case CLK_TYPE_GEN4_MAIN
:
435 div
= cpg_pll_config
->extal_div
;
438 case CLK_TYPE_GEN4_PLL1
:
439 mult
= cpg_pll_config
->pll1_mult
;
440 div
= cpg_pll_config
->pll1_div
;
443 case CLK_TYPE_GEN4_PLL5
:
444 mult
= cpg_pll_config
->pll5_mult
;
445 div
= cpg_pll_config
->pll5_div
;
448 case CLK_TYPE_GEN4_PLL2X_3X
:
449 value
= readl(base
+ core
->offset
);
450 mult
= (FIELD_GET(CPG_PLLxCR_STC
, value
) + 1) * 2;
453 case CLK_TYPE_GEN4_PLL_F8_25
:
454 return cpg_pll_clk_register(core
->name
, __clk_get_name(parent
),
456 &cpg_pll_f8_25_clk_ops
);
458 case CLK_TYPE_GEN4_PLL_V8_25
:
459 return cpg_pll_clk_register(core
->name
, __clk_get_name(parent
),
461 &cpg_pll_v8_25_clk_ops
);
463 case CLK_TYPE_GEN4_PLL_V9_24
:
464 /* Variable fractional 9.24 is not yet supported, using fixed */
466 case CLK_TYPE_GEN4_PLL_F9_24
:
467 return cpg_pll_clk_register(core
->name
, __clk_get_name(parent
),
469 &cpg_pll_f9_24_clk_ops
);
471 case CLK_TYPE_GEN4_Z
:
472 return cpg_z_clk_register(core
->name
, __clk_get_name(parent
),
473 base
, core
->div
, core
->offset
);
475 case CLK_TYPE_GEN4_SDSRC
:
476 value
= readl(base
+ CPG_SD0CKCR1
);
477 div
= FIELD_GET(CPG_SD0CKCR1_SDSRC_SEL
, value
) + 4;
480 case CLK_TYPE_GEN4_SDH
:
481 return cpg_sdh_clk_register(core
->name
, base
+ core
->offset
,
482 __clk_get_name(parent
), notifiers
);
484 case CLK_TYPE_GEN4_SD
:
485 return cpg_sd_clk_register(core
->name
, base
+ core
->offset
,
486 __clk_get_name(parent
));
488 case CLK_TYPE_GEN4_MDSEL
:
490 * Clock selectable between two parents and two fixed dividers
493 if (cpg_mode
& BIT(core
->offset
)) {
494 div
= core
->div
& 0xffff;
496 parent
= clks
[core
->parent
>> 16];
498 return ERR_CAST(parent
);
499 div
= core
->div
>> 16;
504 case CLK_TYPE_GEN4_OSC
:
506 * Clock combining OSC EXTAL predivider and a fixed divider
508 div
= cpg_pll_config
->osc_prediv
* core
->div
;
511 case CLK_TYPE_GEN4_RPCSRC
:
512 return clk_register_divider_table(NULL
, core
->name
,
513 __clk_get_name(parent
), 0,
514 base
+ CPG_RPCCKCR
, 3, 2, 0,
515 cpg_rpcsrc_div_table
,
518 case CLK_TYPE_GEN4_RPC
:
519 return cpg_rpc_clk_register(core
->name
, base
+ CPG_RPCCKCR
,
520 __clk_get_name(parent
), notifiers
);
522 case CLK_TYPE_GEN4_RPCD2
:
523 return cpg_rpcd2_clk_register(core
->name
, base
+ CPG_RPCCKCR
,
524 __clk_get_name(parent
));
527 return ERR_PTR(-EINVAL
);
530 return clk_register_fixed_factor(NULL
, core
->name
,
531 __clk_get_name(parent
), 0, mult
, div
);
534 int __init
rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config
*config
,
535 unsigned int clk_extalr
, u32 mode
)
537 cpg_pll_config
= config
;
538 cpg_clk_extalr
= clk_extalr
;