1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
6 * Serge Semin <Sergey.Semin@baikalelectronics.ru>
7 * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
9 * Baikal-T1 CCU PLL interface driver
12 #define pr_fmt(fmt) "bt1-ccu-pll: " fmt
14 #include <linux/kernel.h>
15 #include <linux/printk.h>
16 #include <linux/limits.h>
17 #include <linux/bits.h>
18 #include <linux/bitfield.h>
19 #include <linux/slab.h>
20 #include <linux/clk-provider.h>
22 #include <linux/spinlock.h>
23 #include <linux/regmap.h>
24 #include <linux/iopoll.h>
25 #include <linux/time64.h>
26 #include <linux/rational.h>
27 #include <linux/debugfs.h>
31 #define CCU_PLL_CTL 0x000
32 #define CCU_PLL_CTL_EN BIT(0)
33 #define CCU_PLL_CTL_RST BIT(1)
34 #define CCU_PLL_CTL_CLKR_FLD 2
35 #define CCU_PLL_CTL_CLKR_MASK GENMASK(7, CCU_PLL_CTL_CLKR_FLD)
36 #define CCU_PLL_CTL_CLKF_FLD 8
37 #define CCU_PLL_CTL_CLKF_MASK GENMASK(20, CCU_PLL_CTL_CLKF_FLD)
38 #define CCU_PLL_CTL_CLKOD_FLD 21
39 #define CCU_PLL_CTL_CLKOD_MASK GENMASK(24, CCU_PLL_CTL_CLKOD_FLD)
40 #define CCU_PLL_CTL_BYPASS BIT(30)
41 #define CCU_PLL_CTL_LOCK BIT(31)
42 #define CCU_PLL_CTL1 0x004
43 #define CCU_PLL_CTL1_BWADJ_FLD 3
44 #define CCU_PLL_CTL1_BWADJ_MASK GENMASK(14, CCU_PLL_CTL1_BWADJ_FLD)
46 #define CCU_PLL_LOCK_CHECK_RETRIES 50
48 #define CCU_PLL_NR_MAX \
49 ((CCU_PLL_CTL_CLKR_MASK >> CCU_PLL_CTL_CLKR_FLD) + 1)
50 #define CCU_PLL_NF_MAX \
51 ((CCU_PLL_CTL_CLKF_MASK >> (CCU_PLL_CTL_CLKF_FLD + 1)) + 1)
52 #define CCU_PLL_OD_MAX \
53 ((CCU_PLL_CTL_CLKOD_MASK >> CCU_PLL_CTL_CLKOD_FLD) + 1)
54 #define CCU_PLL_NB_MAX \
55 ((CCU_PLL_CTL1_BWADJ_MASK >> CCU_PLL_CTL1_BWADJ_FLD) + 1)
56 #define CCU_PLL_FDIV_MIN 427000UL
57 #define CCU_PLL_FDIV_MAX 3500000000UL
58 #define CCU_PLL_FOUT_MIN 200000000UL
59 #define CCU_PLL_FOUT_MAX 2500000000UL
60 #define CCU_PLL_FVCO_MIN 700000000UL
61 #define CCU_PLL_FVCO_MAX 3500000000UL
62 #define CCU_PLL_CLKOD_FACTOR 2
64 static inline unsigned long ccu_pll_lock_delay_us(unsigned long ref_clk
,
67 u64 us
= 500ULL * nr
* USEC_PER_SEC
;
74 static inline unsigned long ccu_pll_calc_freq(unsigned long ref_clk
,
88 static int ccu_pll_reset(struct ccu_pll
*pll
, unsigned long ref_clk
,
94 ud
= ccu_pll_lock_delay_us(ref_clk
, nr
);
95 ut
= ud
* CCU_PLL_LOCK_CHECK_RETRIES
;
97 regmap_update_bits(pll
->sys_regs
, pll
->reg_ctl
,
98 CCU_PLL_CTL_RST
, CCU_PLL_CTL_RST
);
100 return regmap_read_poll_timeout_atomic(pll
->sys_regs
, pll
->reg_ctl
, val
,
101 val
& CCU_PLL_CTL_LOCK
, ud
, ut
);
104 static int ccu_pll_enable(struct clk_hw
*hw
)
106 struct clk_hw
*parent_hw
= clk_hw_get_parent(hw
);
107 struct ccu_pll
*pll
= to_ccu_pll(hw
);
113 pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw
));
117 regmap_read(pll
->sys_regs
, pll
->reg_ctl
, &val
);
118 if (val
& CCU_PLL_CTL_EN
)
121 spin_lock_irqsave(&pll
->lock
, flags
);
122 regmap_write(pll
->sys_regs
, pll
->reg_ctl
, val
| CCU_PLL_CTL_EN
);
123 ret
= ccu_pll_reset(pll
, clk_hw_get_rate(parent_hw
),
124 FIELD_GET(CCU_PLL_CTL_CLKR_MASK
, val
) + 1);
125 spin_unlock_irqrestore(&pll
->lock
, flags
);
127 pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw
));
132 static void ccu_pll_disable(struct clk_hw
*hw
)
134 struct ccu_pll
*pll
= to_ccu_pll(hw
);
137 spin_lock_irqsave(&pll
->lock
, flags
);
138 regmap_update_bits(pll
->sys_regs
, pll
->reg_ctl
, CCU_PLL_CTL_EN
, 0);
139 spin_unlock_irqrestore(&pll
->lock
, flags
);
142 static int ccu_pll_is_enabled(struct clk_hw
*hw
)
144 struct ccu_pll
*pll
= to_ccu_pll(hw
);
147 regmap_read(pll
->sys_regs
, pll
->reg_ctl
, &val
);
149 return !!(val
& CCU_PLL_CTL_EN
);
152 static unsigned long ccu_pll_recalc_rate(struct clk_hw
*hw
,
153 unsigned long parent_rate
)
155 struct ccu_pll
*pll
= to_ccu_pll(hw
);
156 unsigned long nr
, nf
, od
;
159 regmap_read(pll
->sys_regs
, pll
->reg_ctl
, &val
);
160 nr
= FIELD_GET(CCU_PLL_CTL_CLKR_MASK
, val
) + 1;
161 nf
= FIELD_GET(CCU_PLL_CTL_CLKF_MASK
, val
) + 1;
162 od
= FIELD_GET(CCU_PLL_CTL_CLKOD_MASK
, val
) + 1;
164 return ccu_pll_calc_freq(parent_rate
, nr
, nf
, od
);
167 static void ccu_pll_calc_factors(unsigned long rate
, unsigned long parent_rate
,
168 unsigned long *nr
, unsigned long *nf
,
171 unsigned long err
, freq
, min_err
= ULONG_MAX
;
172 unsigned long num
, denom
, n1
, d1
, nri
;
173 unsigned long nr_max
, nf_max
, od_max
;
176 * Make sure PLL is working with valid input signal (Fdiv). If
177 * you want to speed the function up just reduce CCU_PLL_NR_MAX.
178 * This will cause a worse approximation though.
180 nri
= (parent_rate
/ CCU_PLL_FDIV_MAX
) + 1;
181 nr_max
= min(parent_rate
/ CCU_PLL_FDIV_MIN
, CCU_PLL_NR_MAX
);
184 * Find a closest [nr;nf;od] vector taking into account the
185 * limitations like: 1) 700MHz <= Fvco <= 3.5GHz, 2) PLL Od is
186 * either 1 or even number within the acceptable range (alas 1s
187 * is also excluded by the next loop).
189 for (; nri
<= nr_max
; ++nri
) {
190 /* Use Od factor to fulfill the limitation 2). */
191 num
= CCU_PLL_CLKOD_FACTOR
* rate
;
192 denom
= parent_rate
/ nri
;
195 * Make sure Fvco is within the acceptable range to fulfill
196 * the condition 1). Note due to the CCU_PLL_CLKOD_FACTOR value
197 * the actual upper limit is also divided by that factor.
198 * It's not big problem for us since practically there is no
199 * need in clocks with that high frequency.
201 nf_max
= min(CCU_PLL_FVCO_MAX
/ denom
, CCU_PLL_NF_MAX
);
202 od_max
= CCU_PLL_OD_MAX
/ CCU_PLL_CLKOD_FACTOR
;
205 * Bypass the out-of-bound values, which can't be properly
206 * handled by the rational fraction approximation algorithm.
208 if (num
/ denom
>= nf_max
) {
211 } else if (denom
/ num
>= od_max
) {
215 rational_best_approximation(num
, denom
, nf_max
, od_max
,
219 /* Select the best approximation of the target rate. */
220 freq
= ccu_pll_calc_freq(parent_rate
, nri
, n1
, d1
);
221 err
= abs((int64_t)freq
- num
);
226 *od
= CCU_PLL_CLKOD_FACTOR
* d1
;
231 static long ccu_pll_round_rate(struct clk_hw
*hw
, unsigned long rate
,
232 unsigned long *parent_rate
)
234 unsigned long nr
= 1, nf
= 1, od
= 1;
236 ccu_pll_calc_factors(rate
, *parent_rate
, &nr
, &nf
, &od
);
238 return ccu_pll_calc_freq(*parent_rate
, nr
, nf
, od
);
242 * This method is used for PLLs, which support the on-the-fly dividers
243 * adjustment. So there is no need in gating such clocks.
245 static int ccu_pll_set_rate_reset(struct clk_hw
*hw
, unsigned long rate
,
246 unsigned long parent_rate
)
248 struct ccu_pll
*pll
= to_ccu_pll(hw
);
249 unsigned long nr
, nf
, od
;
254 ccu_pll_calc_factors(rate
, parent_rate
, &nr
, &nf
, &od
);
256 mask
= CCU_PLL_CTL_CLKR_MASK
| CCU_PLL_CTL_CLKF_MASK
|
257 CCU_PLL_CTL_CLKOD_MASK
;
258 val
= FIELD_PREP(CCU_PLL_CTL_CLKR_MASK
, nr
- 1) |
259 FIELD_PREP(CCU_PLL_CTL_CLKF_MASK
, nf
- 1) |
260 FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK
, od
- 1);
262 spin_lock_irqsave(&pll
->lock
, flags
);
263 regmap_update_bits(pll
->sys_regs
, pll
->reg_ctl
, mask
, val
);
264 ret
= ccu_pll_reset(pll
, parent_rate
, nr
);
265 spin_unlock_irqrestore(&pll
->lock
, flags
);
267 pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw
));
273 * This method is used for PLLs, which don't support the on-the-fly dividers
274 * adjustment. So the corresponding clocks are supposed to be gated first.
276 static int ccu_pll_set_rate_norst(struct clk_hw
*hw
, unsigned long rate
,
277 unsigned long parent_rate
)
279 struct ccu_pll
*pll
= to_ccu_pll(hw
);
280 unsigned long nr
, nf
, od
;
284 ccu_pll_calc_factors(rate
, parent_rate
, &nr
, &nf
, &od
);
287 * Disable PLL if it was enabled by default or left enabled by the
290 mask
= CCU_PLL_CTL_CLKR_MASK
| CCU_PLL_CTL_CLKF_MASK
|
291 CCU_PLL_CTL_CLKOD_MASK
| CCU_PLL_CTL_EN
;
292 val
= FIELD_PREP(CCU_PLL_CTL_CLKR_MASK
, nr
- 1) |
293 FIELD_PREP(CCU_PLL_CTL_CLKF_MASK
, nf
- 1) |
294 FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK
, od
- 1);
296 spin_lock_irqsave(&pll
->lock
, flags
);
297 regmap_update_bits(pll
->sys_regs
, pll
->reg_ctl
, mask
, val
);
298 spin_unlock_irqrestore(&pll
->lock
, flags
);
303 #ifdef CONFIG_DEBUG_FS
305 struct ccu_pll_dbgfs_bit
{
312 struct ccu_pll_dbgfs_fld
{
322 #define CCU_PLL_DBGFS_BIT_ATTR(_name, _reg, _mask) \
329 #define CCU_PLL_DBGFS_FLD_ATTR(_name, _reg, _lsb, _mask, _min, _max) \
339 static const struct ccu_pll_dbgfs_bit ccu_pll_bits
[] = {
340 CCU_PLL_DBGFS_BIT_ATTR("pll_en", CCU_PLL_CTL
, CCU_PLL_CTL_EN
),
341 CCU_PLL_DBGFS_BIT_ATTR("pll_rst", CCU_PLL_CTL
, CCU_PLL_CTL_RST
),
342 CCU_PLL_DBGFS_BIT_ATTR("pll_bypass", CCU_PLL_CTL
, CCU_PLL_CTL_BYPASS
),
343 CCU_PLL_DBGFS_BIT_ATTR("pll_lock", CCU_PLL_CTL
, CCU_PLL_CTL_LOCK
)
346 #define CCU_PLL_DBGFS_BIT_NUM ARRAY_SIZE(ccu_pll_bits)
348 static const struct ccu_pll_dbgfs_fld ccu_pll_flds
[] = {
349 CCU_PLL_DBGFS_FLD_ATTR("pll_nr", CCU_PLL_CTL
, CCU_PLL_CTL_CLKR_FLD
,
350 CCU_PLL_CTL_CLKR_MASK
, 1, CCU_PLL_NR_MAX
),
351 CCU_PLL_DBGFS_FLD_ATTR("pll_nf", CCU_PLL_CTL
, CCU_PLL_CTL_CLKF_FLD
,
352 CCU_PLL_CTL_CLKF_MASK
, 1, CCU_PLL_NF_MAX
),
353 CCU_PLL_DBGFS_FLD_ATTR("pll_od", CCU_PLL_CTL
, CCU_PLL_CTL_CLKOD_FLD
,
354 CCU_PLL_CTL_CLKOD_MASK
, 1, CCU_PLL_OD_MAX
),
355 CCU_PLL_DBGFS_FLD_ATTR("pll_nb", CCU_PLL_CTL1
, CCU_PLL_CTL1_BWADJ_FLD
,
356 CCU_PLL_CTL1_BWADJ_MASK
, 1, CCU_PLL_NB_MAX
)
359 #define CCU_PLL_DBGFS_FLD_NUM ARRAY_SIZE(ccu_pll_flds)
362 * It can be dangerous to change the PLL settings behind clock framework back,
363 * therefore we don't provide any kernel config based compile time option for
364 * this feature to enable.
366 #undef CCU_PLL_ALLOW_WRITE_DEBUGFS
367 #ifdef CCU_PLL_ALLOW_WRITE_DEBUGFS
369 static int ccu_pll_dbgfs_bit_set(void *priv
, u64 val
)
371 const struct ccu_pll_dbgfs_bit
*bit
= priv
;
372 struct ccu_pll
*pll
= bit
->pll
;
375 spin_lock_irqsave(&pll
->lock
, flags
);
376 regmap_update_bits(pll
->sys_regs
, pll
->reg_ctl
+ bit
->reg
,
377 bit
->mask
, val
? bit
->mask
: 0);
378 spin_unlock_irqrestore(&pll
->lock
, flags
);
383 static int ccu_pll_dbgfs_fld_set(void *priv
, u64 val
)
385 struct ccu_pll_dbgfs_fld
*fld
= priv
;
386 struct ccu_pll
*pll
= fld
->pll
;
390 val
= clamp_t(u64
, val
, fld
->min
, fld
->max
);
391 data
= ((val
- 1) << fld
->lsb
) & fld
->mask
;
393 spin_lock_irqsave(&pll
->lock
, flags
);
394 regmap_update_bits(pll
->sys_regs
, pll
->reg_ctl
+ fld
->reg
, fld
->mask
,
396 spin_unlock_irqrestore(&pll
->lock
, flags
);
401 #define ccu_pll_dbgfs_mode 0644
403 #else /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
405 #define ccu_pll_dbgfs_bit_set NULL
406 #define ccu_pll_dbgfs_fld_set NULL
407 #define ccu_pll_dbgfs_mode 0444
409 #endif /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
411 static int ccu_pll_dbgfs_bit_get(void *priv
, u64
*val
)
413 struct ccu_pll_dbgfs_bit
*bit
= priv
;
414 struct ccu_pll
*pll
= bit
->pll
;
417 regmap_read(pll
->sys_regs
, pll
->reg_ctl
+ bit
->reg
, &data
);
418 *val
= !!(data
& bit
->mask
);
422 DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_bit_fops
,
423 ccu_pll_dbgfs_bit_get
, ccu_pll_dbgfs_bit_set
, "%llu\n");
425 static int ccu_pll_dbgfs_fld_get(void *priv
, u64
*val
)
427 struct ccu_pll_dbgfs_fld
*fld
= priv
;
428 struct ccu_pll
*pll
= fld
->pll
;
431 regmap_read(pll
->sys_regs
, pll
->reg_ctl
+ fld
->reg
, &data
);
432 *val
= ((data
& fld
->mask
) >> fld
->lsb
) + 1;
436 DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_fld_fops
,
437 ccu_pll_dbgfs_fld_get
, ccu_pll_dbgfs_fld_set
, "%llu\n");
439 static void ccu_pll_debug_init(struct clk_hw
*hw
, struct dentry
*dentry
)
441 struct ccu_pll
*pll
= to_ccu_pll(hw
);
442 struct ccu_pll_dbgfs_bit
*bits
;
443 struct ccu_pll_dbgfs_fld
*flds
;
446 bits
= kcalloc(CCU_PLL_DBGFS_BIT_NUM
, sizeof(*bits
), GFP_KERNEL
);
450 for (idx
= 0; idx
< CCU_PLL_DBGFS_BIT_NUM
; ++idx
) {
451 bits
[idx
] = ccu_pll_bits
[idx
];
454 debugfs_create_file_unsafe(bits
[idx
].name
, ccu_pll_dbgfs_mode
,
456 &ccu_pll_dbgfs_bit_fops
);
459 flds
= kcalloc(CCU_PLL_DBGFS_FLD_NUM
, sizeof(*flds
), GFP_KERNEL
);
463 for (idx
= 0; idx
< CCU_PLL_DBGFS_FLD_NUM
; ++idx
) {
464 flds
[idx
] = ccu_pll_flds
[idx
];
467 debugfs_create_file_unsafe(flds
[idx
].name
, ccu_pll_dbgfs_mode
,
469 &ccu_pll_dbgfs_fld_fops
);
473 #else /* !CONFIG_DEBUG_FS */
475 #define ccu_pll_debug_init NULL
477 #endif /* !CONFIG_DEBUG_FS */
479 static const struct clk_ops ccu_pll_gate_to_set_ops
= {
480 .enable
= ccu_pll_enable
,
481 .disable
= ccu_pll_disable
,
482 .is_enabled
= ccu_pll_is_enabled
,
483 .recalc_rate
= ccu_pll_recalc_rate
,
484 .round_rate
= ccu_pll_round_rate
,
485 .set_rate
= ccu_pll_set_rate_norst
,
486 .debug_init
= ccu_pll_debug_init
489 static const struct clk_ops ccu_pll_straight_set_ops
= {
490 .enable
= ccu_pll_enable
,
491 .disable
= ccu_pll_disable
,
492 .is_enabled
= ccu_pll_is_enabled
,
493 .recalc_rate
= ccu_pll_recalc_rate
,
494 .round_rate
= ccu_pll_round_rate
,
495 .set_rate
= ccu_pll_set_rate_reset
,
496 .debug_init
= ccu_pll_debug_init
499 struct ccu_pll
*ccu_pll_hw_register(const struct ccu_pll_init_data
*pll_init
)
501 struct clk_parent_data parent_data
= { };
502 struct clk_init_data hw_init
= { };
507 return ERR_PTR(-EINVAL
);
509 pll
= kzalloc(sizeof(*pll
), GFP_KERNEL
);
511 return ERR_PTR(-ENOMEM
);
514 * Note since Baikal-T1 System Controller registers are MMIO-backed
515 * we won't check the regmap IO operations return status, because it
516 * must be zero anyway.
518 pll
->hw
.init
= &hw_init
;
519 pll
->reg_ctl
= pll_init
->base
+ CCU_PLL_CTL
;
520 pll
->reg_ctl1
= pll_init
->base
+ CCU_PLL_CTL1
;
521 pll
->sys_regs
= pll_init
->sys_regs
;
522 pll
->id
= pll_init
->id
;
523 spin_lock_init(&pll
->lock
);
525 hw_init
.name
= pll_init
->name
;
526 hw_init
.flags
= pll_init
->flags
;
528 if (hw_init
.flags
& CLK_SET_RATE_GATE
)
529 hw_init
.ops
= &ccu_pll_gate_to_set_ops
;
531 hw_init
.ops
= &ccu_pll_straight_set_ops
;
533 if (!pll_init
->parent_name
) {
537 parent_data
.fw_name
= pll_init
->parent_name
;
538 hw_init
.parent_data
= &parent_data
;
539 hw_init
.num_parents
= 1;
541 ret
= of_clk_hw_register(pll_init
->np
, &pll
->hw
);
553 void ccu_pll_hw_unregister(struct ccu_pll
*pll
)
555 clk_hw_unregister(&pll
->hw
);