1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx 'Clocking Wizard' driver
5 * Copyright (C) 2013 - 2021 Xilinx
7 * Sören Brinkmann <soren.brinkmann@xilinx.com>
11 #include <linux/bitfield.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/slab.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/overflow.h>
21 #include <linux/err.h>
22 #include <linux/iopoll.h>
24 #define WZRD_NUM_OUTPUTS 7
25 #define WZRD_ACLK_MAX_FREQ 250000000UL
27 #define WZRD_CLK_CFG_REG(v, n) (0x200 + 0x130 * (v) + 4 * (n))
29 #define WZRD_CLKOUT0_FRAC_EN BIT(18)
30 #define WZRD_CLKFBOUT_1 0
31 #define WZRD_CLKFBOUT_2 1
32 #define WZRD_CLKOUT0_1 2
33 #define WZRD_CLKOUT0_2 3
34 #define WZRD_DESKEW_2 20
35 #define WZRD_DIVCLK 21
36 #define WZRD_CLKFBOUT_4 51
37 #define WZRD_CLKFBOUT_3 48
38 #define WZRD_DUTY_CYCLE 2
41 #define WZRD_CLKFBOUT_FRAC_EN BIT(1)
42 #define WZRD_CLKFBOUT_PREDIV2 (BIT(11) | BIT(12) | BIT(9))
43 #define WZRD_MULT_PREDIV2 (BIT(10) | BIT(9) | BIT(12))
44 #define WZRD_CLKFBOUT_EDGE BIT(8)
45 #define WZRD_P5EN BIT(13)
46 #define WZRD_P5EN_SHIFT 13
47 #define WZRD_P5FEDGE BIT(15)
48 #define WZRD_DIVCLK_EDGE BIT(10)
49 #define WZRD_P5FEDGE_SHIFT 15
50 #define WZRD_CLKOUT0_PREDIV2 BIT(11)
51 #define WZRD_EDGE_SHIFT 8
53 #define WZRD_CLKFBOUT_MULT_SHIFT 8
54 #define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
55 #define WZRD_CLKFBOUT_L_SHIFT 0
56 #define WZRD_CLKFBOUT_H_SHIFT 8
57 #define WZRD_CLKFBOUT_L_MASK GENMASK(7, 0)
58 #define WZRD_CLKFBOUT_H_MASK GENMASK(15, 8)
59 #define WZRD_CLKFBOUT_FRAC_SHIFT 16
60 #define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
61 #define WZRD_VERSAL_FRAC_MASK GENMASK(5, 0)
62 #define WZRD_DIVCLK_DIVIDE_SHIFT 0
63 #define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
64 #define WZRD_CLKOUT_DIVIDE_SHIFT 0
65 #define WZRD_CLKOUT_DIVIDE_WIDTH 8
66 #define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
67 #define WZRD_CLKOUT_FRAC_SHIFT 8
68 #define WZRD_CLKOUT_FRAC_MASK 0x3ff
69 #define WZRD_CLKOUT0_FRAC_MASK GENMASK(17, 8)
71 #define WZRD_DR_MAX_INT_DIV_VALUE 255
72 #define WZRD_DR_STATUS_REG_OFFSET 0x04
73 #define WZRD_DR_LOCK_BIT_MASK 0x00000001
74 #define WZRD_DR_INIT_REG_OFFSET 0x25C
75 #define WZRD_DR_INIT_VERSAL_OFFSET 0x14
76 #define WZRD_DR_DIV_TO_PHASE_OFFSET 4
77 #define WZRD_DR_BEGIN_DYNA_RECONF 0x03
78 #define WZRD_DR_BEGIN_DYNA_RECONF_5_2 0x07
79 #define WZRD_DR_BEGIN_DYNA_RECONF1_5_2 0x02
81 #define WZRD_USEC_POLL 10
82 #define WZRD_TIMEOUT_POLL 1000
83 #define WZRD_FRAC_GRADIENT 64
84 #define PREDIV2_MULT 2
86 /* Divider limits, from UG572 Table 3-4 for Ultrascale+ */
91 #define WZRD_M_MAX 128
93 #define WZRD_D_MAX 106
94 #define WZRD_VCO_MIN 800000000
95 #define WZRD_VCO_MAX 1600000000
97 #define WZRD_O_MAX 128
98 #define VER_WZRD_M_MIN 4
99 #define VER_WZRD_M_MAX 432
100 #define VER_WZRD_D_MIN 1
101 #define VER_WZRD_D_MAX 123
102 #define VER_WZRD_VCO_MIN 2160000000ULL
103 #define VER_WZRD_VCO_MAX 4320000000ULL
104 #define VER_WZRD_O_MIN 2
105 #define VER_WZRD_O_MAX 511
106 #define WZRD_MIN_ERR 20000
107 #define WZRD_FRAC_POINTS 1000
109 /* Get the mask from width */
110 #define div_mask(width) ((1 << (width)) - 1)
112 /* Extract divider instance from clock hardware instance */
113 #define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
115 enum clk_wzrd_int_clks
{
123 * struct clk_wzrd - Clock wizard private data structure
125 * @nb: Notifier block
127 * @clk_in1: Handle to input clock 'clk_in1'
128 * @axi_clk: Handle to input clock 's_axi_aclk'
129 * @clks_internal: Internal clocks
130 * @speed_grade: Speed grade of the device
131 * @suspended: Flag indicating power state of the device
132 * @clk_data: Output clock data
135 struct notifier_block nb
;
139 struct clk_hw
*clks_internal
[wzrd_clk_int_max
];
140 unsigned int speed_grade
;
142 struct clk_hw_onecell_data clk_data
;
146 * struct clk_wzrd_divider - clock divider specific to clk_wzrd
148 * @hw: handle between common and hardware-specific interfaces
149 * @base: base address of register containing the divider
150 * @offset: offset address of register containing the divider
151 * @shift: shift to the divider bit field
152 * @width: width of the divider bit field
153 * @flags: clk_wzrd divider flags
154 * @table: array of value/divider pairs, last entry should have div = 0
155 * @m: value of the multiplier
156 * @d: value of the common divider
157 * @o: value of the leaf divider
158 * @lock: register lock
160 struct clk_wzrd_divider
{
167 const struct clk_div_table
*table
;
171 spinlock_t
*lock
; /* divider lock */
174 struct versal_clk_data
{
178 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
180 /* maximum frequencies for input/output clocks per speed grade */
181 static const unsigned long clk_wzrd_max_freq
[] = {
187 /* spin lock variable for clk_wzrd */
188 static DEFINE_SPINLOCK(clkwzrd_lock
);
190 static unsigned long clk_wzrd_recalc_rate_ver(struct clk_hw
*hw
,
191 unsigned long parent_rate
)
193 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
194 void __iomem
*div_addr
= divider
->base
+ divider
->offset
;
195 u32 div
, p5en
, edge
, prediv2
, all
;
196 unsigned int vall
, valh
;
198 edge
= !!(readl(div_addr
) & WZRD_CLKFBOUT_EDGE
);
199 p5en
= !!(readl(div_addr
) & WZRD_P5EN
);
200 prediv2
= !!(readl(div_addr
) & WZRD_CLKOUT0_PREDIV2
);
201 vall
= readl(div_addr
+ 4) & WZRD_CLKFBOUT_L_MASK
;
202 valh
= readl(div_addr
+ 4) >> WZRD_CLKFBOUT_H_SHIFT
;
203 all
= valh
+ vall
+ edge
;
208 div
= 2 * all
+ prediv2
* p5en
;
212 return DIV_ROUND_UP_ULL((u64
)parent_rate
, div
);
215 static unsigned long clk_wzrd_recalc_rate(struct clk_hw
*hw
,
216 unsigned long parent_rate
)
218 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
219 void __iomem
*div_addr
= divider
->base
+ divider
->offset
;
222 val
= readl(div_addr
) >> divider
->shift
;
223 val
&= div_mask(divider
->width
);
225 return divider_recalc_rate(hw
, parent_rate
, val
, divider
->table
,
226 divider
->flags
, divider
->width
);
229 static int clk_wzrd_ver_dynamic_reconfig(struct clk_hw
*hw
, unsigned long rate
,
230 unsigned long parent_rate
)
232 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
233 void __iomem
*div_addr
= divider
->base
+ divider
->offset
;
234 u32 value
, regh
, edged
, p5en
, p5fedge
, regval
, regval1
;
238 spin_lock_irqsave(divider
->lock
, flags
);
240 value
= DIV_ROUND_CLOSEST(parent_rate
, rate
);
243 regval1
= readl(div_addr
);
244 regval1
|= WZRD_CLKFBOUT_PREDIV2
;
245 regval1
= regval1
& ~(WZRD_CLKFBOUT_EDGE
| WZRD_P5EN
| WZRD_P5FEDGE
);
248 regval1
|= (edged
<< WZRD_EDGE_SHIFT
);
252 regval1
= regval1
| p5en
<< WZRD_P5EN_SHIFT
| p5fedge
<< WZRD_P5FEDGE_SHIFT
;
253 writel(regval1
, div_addr
);
255 regval
= regh
| regh
<< WZRD_CLKFBOUT_H_SHIFT
;
256 writel(regval
, div_addr
+ 4);
257 /* Check status register */
258 err
= readl_poll_timeout_atomic(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
,
259 value
, value
& WZRD_DR_LOCK_BIT_MASK
,
260 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
264 /* Initiate reconfiguration */
265 writel(WZRD_DR_BEGIN_DYNA_RECONF
,
266 divider
->base
+ WZRD_DR_INIT_VERSAL_OFFSET
);
268 /* Check status register */
269 err
= readl_poll_timeout_atomic(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
,
270 value
, value
& WZRD_DR_LOCK_BIT_MASK
,
271 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
273 spin_unlock_irqrestore(divider
->lock
, flags
);
277 static int clk_wzrd_dynamic_reconfig(struct clk_hw
*hw
, unsigned long rate
,
278 unsigned long parent_rate
)
280 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
281 void __iomem
*div_addr
= divider
->base
+ divider
->offset
;
286 spin_lock_irqsave(divider
->lock
, flags
);
288 value
= DIV_ROUND_CLOSEST(parent_rate
, rate
);
290 /* Cap the value to max */
291 min_t(u32
, value
, WZRD_DR_MAX_INT_DIV_VALUE
);
293 /* Set divisor and clear phase offset */
294 writel(value
, div_addr
);
295 writel(0x00, div_addr
+ WZRD_DR_DIV_TO_PHASE_OFFSET
);
297 /* Check status register */
298 err
= readl_poll_timeout_atomic(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
,
299 value
, value
& WZRD_DR_LOCK_BIT_MASK
,
300 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
304 /* Initiate reconfiguration */
305 writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2
,
306 divider
->base
+ WZRD_DR_INIT_REG_OFFSET
);
307 writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2
,
308 divider
->base
+ WZRD_DR_INIT_REG_OFFSET
);
310 /* Check status register */
311 err
= readl_poll_timeout_atomic(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
,
312 value
, value
& WZRD_DR_LOCK_BIT_MASK
,
313 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
315 spin_unlock_irqrestore(divider
->lock
, flags
);
319 static long clk_wzrd_round_rate(struct clk_hw
*hw
, unsigned long rate
,
320 unsigned long *prate
)
325 * since we don't change parent rate we just round rate to closest
328 div
= DIV_ROUND_CLOSEST(*prate
, rate
);
333 static int clk_wzrd_get_divisors_ver(struct clk_hw
*hw
, unsigned long rate
,
334 unsigned long parent_rate
)
336 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
337 u64 vco_freq
, freq
, diff
, vcomin
, vcomax
;
339 u32 mmin
, mmax
, dmin
, dmax
, omin
, omax
;
341 mmin
= VER_WZRD_M_MIN
;
342 mmax
= VER_WZRD_M_MAX
;
343 dmin
= VER_WZRD_D_MIN
;
344 dmax
= VER_WZRD_D_MAX
;
345 omin
= VER_WZRD_O_MIN
;
346 omax
= VER_WZRD_O_MAX
;
347 vcomin
= VER_WZRD_VCO_MIN
;
348 vcomax
= VER_WZRD_VCO_MAX
;
350 for (m
= mmin
; m
<= mmax
; m
++) {
351 for (d
= dmin
; d
<= dmax
; d
++) {
352 vco_freq
= DIV_ROUND_CLOSEST((parent_rate
* m
), d
);
353 if (vco_freq
>= vcomin
&& vco_freq
<= vcomax
) {
354 for (o
= omin
; o
<= omax
; o
++) {
355 freq
= DIV_ROUND_CLOSEST_ULL(vco_freq
, o
);
356 diff
= abs(freq
- rate
);
358 if (diff
< WZRD_MIN_ERR
) {
371 static int clk_wzrd_get_divisors(struct clk_hw
*hw
, unsigned long rate
,
372 unsigned long parent_rate
)
374 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
375 u64 vco_freq
, freq
, diff
, vcomin
, vcomax
;
377 u32 mmin
, mmax
, dmin
, dmax
, omin
, omax
;
385 vcomin
= WZRD_VCO_MIN
;
386 vcomax
= WZRD_VCO_MAX
;
388 for (m
= mmin
; m
<= mmax
; m
++) {
389 for (d
= dmin
; d
<= dmax
; d
++) {
390 vco_freq
= DIV_ROUND_CLOSEST((parent_rate
* m
), d
);
391 if (vco_freq
>= vcomin
&& vco_freq
<= vcomax
) {
392 for (o
= omin
; o
<= omax
; o
++) {
393 freq
= DIV_ROUND_CLOSEST_ULL(vco_freq
, o
);
394 diff
= abs(freq
- rate
);
396 if (diff
< WZRD_MIN_ERR
) {
409 static int clk_wzrd_reconfig(struct clk_wzrd_divider
*divider
, void __iomem
*div_addr
)
414 /* Check status register */
415 err
= readl_poll_timeout_atomic(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
, value
,
416 value
& WZRD_DR_LOCK_BIT_MASK
,
417 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
421 /* Initiate reconfiguration */
422 writel(WZRD_DR_BEGIN_DYNA_RECONF
, div_addr
);
423 /* Check status register */
424 return readl_poll_timeout_atomic(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
, value
,
425 value
& WZRD_DR_LOCK_BIT_MASK
,
426 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
429 static int clk_wzrd_dynamic_ver_all_nolock(struct clk_hw
*hw
, unsigned long rate
,
430 unsigned long parent_rate
)
432 u32 regh
, edged
, p5en
, p5fedge
, value2
, m
, regval
, regval1
, value
;
433 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
434 void __iomem
*div_addr
;
437 err
= clk_wzrd_get_divisors_ver(hw
, rate
, parent_rate
);
441 writel(0, divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4
));
444 edged
= m
% WZRD_DUTY_CYCLE
;
445 regh
= m
/ WZRD_DUTY_CYCLE
;
446 regval1
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1,
448 regval1
|= WZRD_MULT_PREDIV2
;
450 regval1
= regval1
| WZRD_CLKFBOUT_EDGE
;
452 regval1
= regval1
& ~WZRD_CLKFBOUT_EDGE
;
454 writel(regval1
, divider
->base
+ WZRD_CLK_CFG_REG(1,
456 regval1
= regh
| regh
<< WZRD_CLKFBOUT_H_SHIFT
;
457 writel(regval1
, divider
->base
+ WZRD_CLK_CFG_REG(1,
461 edged
= value2
% WZRD_DUTY_CYCLE
;
462 regh
= (value2
/ WZRD_DUTY_CYCLE
);
463 regval1
= FIELD_PREP(WZRD_DIVCLK_EDGE
, edged
);
464 writel(regval1
, divider
->base
+ WZRD_CLK_CFG_REG(1,
466 regval1
= regh
| regh
<< WZRD_CLKFBOUT_H_SHIFT
;
467 writel(regval1
, divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_DIVCLK
));
470 regh
= value
/ WZRD_O_DIV
;
471 regval1
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1,
473 regval1
|= WZRD_CLKFBOUT_PREDIV2
;
474 regval1
= regval1
& ~(WZRD_CLKFBOUT_EDGE
| WZRD_P5EN
| WZRD_P5FEDGE
);
476 if (value
% WZRD_O_DIV
> 1) {
478 regval1
|= edged
<< WZRD_CLKFBOUT_H_SHIFT
;
481 p5fedge
= value
% WZRD_DUTY_CYCLE
;
482 p5en
= value
% WZRD_DUTY_CYCLE
;
484 regval1
= regval1
| FIELD_PREP(WZRD_P5EN
, p5en
) | FIELD_PREP(WZRD_P5FEDGE
, p5fedge
);
485 writel(regval1
, divider
->base
+ WZRD_CLK_CFG_REG(1,
487 regval
= regh
| regh
<< WZRD_CLKFBOUT_H_SHIFT
;
488 writel(regval
, divider
->base
+ WZRD_CLK_CFG_REG(1,
490 div_addr
= divider
->base
+ WZRD_DR_INIT_VERSAL_OFFSET
;
492 return clk_wzrd_reconfig(divider
, div_addr
);
495 static int clk_wzrd_dynamic_all_nolock(struct clk_hw
*hw
, unsigned long rate
,
496 unsigned long parent_rate
)
498 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
499 unsigned long vco_freq
, rate_div
, clockout0_div
;
500 void __iomem
*div_addr
;
504 err
= clk_wzrd_get_divisors(hw
, rate
, parent_rate
);
508 vco_freq
= DIV_ROUND_CLOSEST(parent_rate
* divider
->m
, divider
->d
);
509 rate_div
= DIV_ROUND_CLOSEST_ULL((vco_freq
* WZRD_FRAC_POINTS
), rate
);
511 clockout0_div
= div_u64(rate_div
, WZRD_FRAC_POINTS
);
513 pre
= DIV_ROUND_CLOSEST_ULL(vco_freq
* WZRD_FRAC_POINTS
, rate
);
514 f
= (pre
- (clockout0_div
* WZRD_FRAC_POINTS
));
515 f
&= WZRD_CLKOUT_FRAC_MASK
;
517 reg
= FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK
, clockout0_div
) |
518 FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK
, f
);
520 writel(reg
, divider
->base
+ WZRD_CLK_CFG_REG(0, 2));
521 /* Set divisor and clear phase offset */
522 reg
= FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK
, divider
->m
) |
523 FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK
, divider
->d
);
524 writel(reg
, divider
->base
+ WZRD_CLK_CFG_REG(0, 0));
525 writel(divider
->o
, divider
->base
+ WZRD_CLK_CFG_REG(0, 2));
526 writel(0, divider
->base
+ WZRD_CLK_CFG_REG(0, 3));
527 div_addr
= divider
->base
+ WZRD_DR_INIT_REG_OFFSET
;
528 return clk_wzrd_reconfig(divider
, div_addr
);
531 static int clk_wzrd_dynamic_all(struct clk_hw
*hw
, unsigned long rate
,
532 unsigned long parent_rate
)
534 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
538 spin_lock_irqsave(divider
->lock
, flags
);
540 ret
= clk_wzrd_dynamic_all_nolock(hw
, rate
, parent_rate
);
542 spin_unlock_irqrestore(divider
->lock
, flags
);
547 static int clk_wzrd_dynamic_all_ver(struct clk_hw
*hw
, unsigned long rate
,
548 unsigned long parent_rate
)
550 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
554 spin_lock_irqsave(divider
->lock
, flags
);
556 ret
= clk_wzrd_dynamic_ver_all_nolock(hw
, rate
, parent_rate
);
558 spin_unlock_irqrestore(divider
->lock
, flags
);
563 static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw
*hw
,
564 unsigned long parent_rate
)
566 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
567 u32 m
, d
, o
, div
, reg
, f
;
569 reg
= readl(divider
->base
+ WZRD_CLK_CFG_REG(0, 0));
570 d
= FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK
, reg
);
571 m
= FIELD_GET(WZRD_CLKFBOUT_MULT_MASK
, reg
);
572 reg
= readl(divider
->base
+ WZRD_CLK_CFG_REG(0, 2));
573 o
= FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK
, reg
);
574 f
= FIELD_GET(WZRD_CLKOUT0_FRAC_MASK
, reg
);
576 div
= DIV_ROUND_CLOSEST(d
* (WZRD_FRAC_POINTS
* o
+ f
), WZRD_FRAC_POINTS
);
577 return divider_recalc_rate(hw
, parent_rate
* m
, div
, divider
->table
,
578 divider
->flags
, divider
->width
);
581 static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw
*hw
,
582 unsigned long parent_rate
)
584 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
585 u32 edged
, div2
, p5en
, edge
, prediv2
, all
, regl
, regh
, mult
;
588 edge
= !!(readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_1
)) &
591 reg
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_2
));
592 regl
= FIELD_GET(WZRD_CLKFBOUT_L_MASK
, reg
);
593 regh
= FIELD_GET(WZRD_CLKFBOUT_H_MASK
, reg
);
595 mult
= regl
+ regh
+ edge
;
599 regl
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4
)) &
600 WZRD_CLKFBOUT_FRAC_EN
;
602 regl
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_3
))
603 & WZRD_VERSAL_FRAC_MASK
;
604 mult
= mult
* WZRD_FRAC_GRADIENT
+ regl
;
605 parent_rate
= DIV_ROUND_CLOSEST((parent_rate
* mult
), WZRD_FRAC_GRADIENT
);
607 parent_rate
= parent_rate
* mult
;
611 reg
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_1
));
612 edged
= FIELD_GET(WZRD_CLKFBOUT_EDGE
, reg
);
613 p5en
= FIELD_GET(WZRD_P5EN
, reg
);
614 prediv2
= FIELD_GET(WZRD_CLKOUT0_PREDIV2
, reg
);
616 reg
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_2
));
618 regl
= FIELD_GET(WZRD_CLKFBOUT_L_MASK
, reg
);
620 regh
= FIELD_GET(WZRD_CLKFBOUT_H_MASK
, reg
);
621 all
= regh
+ regl
+ edged
;
626 div2
= PREDIV2_MULT
* all
+ p5en
;
631 edged
= !!(readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_DESKEW_2
)) &
633 reg
= readl(divider
->base
+ WZRD_CLK_CFG_REG(1, WZRD_DIVCLK
));
635 regl
= FIELD_GET(WZRD_CLKFBOUT_L_MASK
, reg
);
637 regh
= FIELD_GET(WZRD_CLKFBOUT_H_MASK
, reg
);
638 div
= regl
+ regh
+ edged
;
643 return divider_recalc_rate(hw
, parent_rate
, div
, divider
->table
,
644 divider
->flags
, divider
->width
);
647 static long clk_wzrd_round_rate_all(struct clk_hw
*hw
, unsigned long rate
,
648 unsigned long *prate
)
650 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
651 unsigned long int_freq
;
655 err
= clk_wzrd_get_divisors(hw
, rate
, *prate
);
664 int_freq
= divider_recalc_rate(hw
, *prate
* m
, div
, divider
->table
,
665 divider
->flags
, divider
->width
);
667 if (rate
> int_freq
) {
668 f
= DIV_ROUND_CLOSEST_ULL(rate
* WZRD_FRAC_POINTS
, int_freq
);
669 rate
= DIV_ROUND_CLOSEST(int_freq
* f
, WZRD_FRAC_POINTS
);
674 static const struct clk_ops clk_wzrd_ver_divider_ops
= {
675 .round_rate
= clk_wzrd_round_rate
,
676 .set_rate
= clk_wzrd_ver_dynamic_reconfig
,
677 .recalc_rate
= clk_wzrd_recalc_rate_ver
,
680 static const struct clk_ops clk_wzrd_ver_div_all_ops
= {
681 .round_rate
= clk_wzrd_round_rate_all
,
682 .set_rate
= clk_wzrd_dynamic_all_ver
,
683 .recalc_rate
= clk_wzrd_recalc_rate_all_ver
,
686 static const struct clk_ops clk_wzrd_clk_divider_ops
= {
687 .round_rate
= clk_wzrd_round_rate
,
688 .set_rate
= clk_wzrd_dynamic_reconfig
,
689 .recalc_rate
= clk_wzrd_recalc_rate
,
692 static const struct clk_ops clk_wzrd_clk_div_all_ops
= {
693 .round_rate
= clk_wzrd_round_rate_all
,
694 .set_rate
= clk_wzrd_dynamic_all
,
695 .recalc_rate
= clk_wzrd_recalc_rate_all
,
698 static unsigned long clk_wzrd_recalc_ratef(struct clk_hw
*hw
,
699 unsigned long parent_rate
)
703 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
704 void __iomem
*div_addr
= divider
->base
+ divider
->offset
;
706 val
= readl(div_addr
);
707 div
= val
& div_mask(divider
->width
);
708 frac
= (val
>> WZRD_CLKOUT_FRAC_SHIFT
) & WZRD_CLKOUT_FRAC_MASK
;
710 return mult_frac(parent_rate
, 1000, (div
* 1000) + frac
);
713 static int clk_wzrd_dynamic_reconfig_f(struct clk_hw
*hw
, unsigned long rate
,
714 unsigned long parent_rate
)
718 unsigned long rate_div
, f
, clockout0_div
;
719 struct clk_wzrd_divider
*divider
= to_clk_wzrd_divider(hw
);
720 void __iomem
*div_addr
= divider
->base
+ divider
->offset
;
722 rate_div
= DIV_ROUND_DOWN_ULL(parent_rate
* 1000, rate
);
723 clockout0_div
= rate_div
/ 1000;
725 pre
= DIV_ROUND_CLOSEST((parent_rate
* 1000), rate
);
726 f
= (u32
)(pre
- (clockout0_div
* 1000));
727 f
= f
& WZRD_CLKOUT_FRAC_MASK
;
728 f
= f
<< WZRD_CLKOUT_DIVIDE_WIDTH
;
730 value
= (f
| (clockout0_div
& WZRD_CLKOUT_DIVIDE_MASK
));
732 /* Set divisor and clear phase offset */
733 writel(value
, div_addr
);
734 writel(0x0, div_addr
+ WZRD_DR_DIV_TO_PHASE_OFFSET
);
736 /* Check status register */
737 err
= readl_poll_timeout(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
, value
,
738 value
& WZRD_DR_LOCK_BIT_MASK
,
739 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
743 /* Initiate reconfiguration */
744 writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2
,
745 divider
->base
+ WZRD_DR_INIT_REG_OFFSET
);
746 writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2
,
747 divider
->base
+ WZRD_DR_INIT_REG_OFFSET
);
749 /* Check status register */
750 return readl_poll_timeout(divider
->base
+ WZRD_DR_STATUS_REG_OFFSET
, value
,
751 value
& WZRD_DR_LOCK_BIT_MASK
,
752 WZRD_USEC_POLL
, WZRD_TIMEOUT_POLL
);
755 static long clk_wzrd_round_rate_f(struct clk_hw
*hw
, unsigned long rate
,
756 unsigned long *prate
)
761 static const struct clk_ops clk_wzrd_clk_divider_ops_f
= {
762 .round_rate
= clk_wzrd_round_rate_f
,
763 .set_rate
= clk_wzrd_dynamic_reconfig_f
,
764 .recalc_rate
= clk_wzrd_recalc_ratef
,
767 static struct clk_hw
*clk_wzrd_register_divf(struct device
*dev
,
769 const char *parent_name
,
771 void __iomem
*base
, u16 offset
,
773 u8 clk_divider_flags
,
777 struct clk_wzrd_divider
*div
;
779 struct clk_init_data init
;
782 div
= devm_kzalloc(dev
, sizeof(*div
), GFP_KERNEL
);
784 return ERR_PTR(-ENOMEM
);
788 init
.ops
= &clk_wzrd_clk_divider_ops_f
;
791 init
.parent_names
= &parent_name
;
792 init
.num_parents
= 1;
795 div
->offset
= offset
;
798 div
->flags
= clk_divider_flags
;
800 div
->hw
.init
= &init
;
803 ret
= devm_clk_hw_register(dev
, hw
);
810 static struct clk_hw
*clk_wzrd_ver_register_divider(struct device
*dev
,
812 const char *parent_name
,
817 u8 clk_divider_flags
,
821 struct clk_wzrd_divider
*div
;
823 struct clk_init_data init
;
826 div
= devm_kzalloc(dev
, sizeof(*div
), GFP_KERNEL
);
828 return ERR_PTR(-ENOMEM
);
831 if (clk_divider_flags
& CLK_DIVIDER_READ_ONLY
)
832 init
.ops
= &clk_divider_ro_ops
;
833 else if (div_type
== DIV_O
)
834 init
.ops
= &clk_wzrd_ver_divider_ops
;
836 init
.ops
= &clk_wzrd_ver_div_all_ops
;
838 init
.parent_names
= &parent_name
;
839 init
.num_parents
= 1;
842 div
->offset
= offset
;
845 div
->flags
= clk_divider_flags
;
847 div
->hw
.init
= &init
;
850 ret
= devm_clk_hw_register(dev
, hw
);
857 static struct clk_hw
*clk_wzrd_register_divider(struct device
*dev
,
859 const char *parent_name
,
861 void __iomem
*base
, u16 offset
,
863 u8 clk_divider_flags
,
867 struct clk_wzrd_divider
*div
;
869 struct clk_init_data init
;
872 div
= devm_kzalloc(dev
, sizeof(*div
), GFP_KERNEL
);
874 return ERR_PTR(-ENOMEM
);
877 if (clk_divider_flags
& CLK_DIVIDER_READ_ONLY
)
878 init
.ops
= &clk_divider_ro_ops
;
879 else if (div_type
== DIV_O
)
880 init
.ops
= &clk_wzrd_clk_divider_ops
;
882 init
.ops
= &clk_wzrd_clk_div_all_ops
;
884 init
.parent_names
= &parent_name
;
885 init
.num_parents
= 1;
888 div
->offset
= offset
;
891 div
->flags
= clk_divider_flags
;
893 div
->hw
.init
= &init
;
896 ret
= devm_clk_hw_register(dev
, hw
);
903 static int clk_wzrd_clk_notifier(struct notifier_block
*nb
, unsigned long event
,
907 struct clk_notifier_data
*ndata
= data
;
908 struct clk_wzrd
*clk_wzrd
= to_clk_wzrd(nb
);
910 if (clk_wzrd
->suspended
)
913 if (ndata
->clk
== clk_wzrd
->clk_in1
)
914 max
= clk_wzrd_max_freq
[clk_wzrd
->speed_grade
- 1];
915 else if (ndata
->clk
== clk_wzrd
->axi_clk
)
916 max
= WZRD_ACLK_MAX_FREQ
;
918 return NOTIFY_DONE
; /* should never happen */
921 case PRE_RATE_CHANGE
:
922 if (ndata
->new_rate
> max
)
925 case POST_RATE_CHANGE
:
926 case ABORT_RATE_CHANGE
:
932 static int __maybe_unused
clk_wzrd_suspend(struct device
*dev
)
934 struct clk_wzrd
*clk_wzrd
= dev_get_drvdata(dev
);
936 clk_disable_unprepare(clk_wzrd
->axi_clk
);
937 clk_wzrd
->suspended
= true;
942 static int __maybe_unused
clk_wzrd_resume(struct device
*dev
)
945 struct clk_wzrd
*clk_wzrd
= dev_get_drvdata(dev
);
947 ret
= clk_prepare_enable(clk_wzrd
->axi_clk
);
949 dev_err(dev
, "unable to enable s_axi_aclk\n");
953 clk_wzrd
->suspended
= false;
958 static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops
, clk_wzrd_suspend
,
961 static const struct versal_clk_data versal_data
= {
965 static int clk_wzrd_register_output_clocks(struct device
*dev
, int nr_outputs
)
967 const char *clkout_name
, *clk_name
, *clk_mul_name
;
968 struct clk_wzrd
*clk_wzrd
= dev_get_drvdata(dev
);
969 u32 regl
, regh
, edge
, regld
, reghd
, edged
, div
;
970 const struct versal_clk_data
*data
;
971 unsigned long flags
= 0;
972 bool is_versal
= false;
973 void __iomem
*ctrl_reg
;
974 u32 reg
, reg_f
, mult
;
977 data
= device_get_match_data(dev
);
979 is_versal
= data
->is_versal
;
981 clkout_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_out0", dev_name(dev
));
986 if (nr_outputs
== 1) {
987 clk_wzrd
->clk_data
.hws
[0] = clk_wzrd_ver_register_divider
989 __clk_get_name(clk_wzrd
->clk_in1
), 0,
990 clk_wzrd
->base
, WZRD_CLK_CFG_REG(is_versal
, 3),
991 WZRD_CLKOUT_DIVIDE_SHIFT
,
992 WZRD_CLKOUT_DIVIDE_WIDTH
,
993 CLK_DIVIDER_ONE_BASED
| CLK_DIVIDER_ALLOW_ZERO
,
994 DIV_ALL
, &clkwzrd_lock
);
998 /* register multiplier */
999 edge
= !!(readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 0)) &
1001 regl
= (readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 1)) &
1002 WZRD_CLKFBOUT_L_MASK
) >> WZRD_CLKFBOUT_L_SHIFT
;
1003 regh
= (readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 1)) &
1004 WZRD_CLKFBOUT_H_MASK
) >> WZRD_CLKFBOUT_H_SHIFT
;
1005 mult
= regl
+ regh
+ edge
;
1008 mult
= mult
* WZRD_FRAC_GRADIENT
;
1010 regl
= readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 51)) &
1011 WZRD_CLKFBOUT_FRAC_EN
;
1013 regl
= readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 48)) &
1014 WZRD_VERSAL_FRAC_MASK
;
1019 if (nr_outputs
== 1) {
1020 clk_wzrd
->clk_data
.hws
[0] = clk_wzrd_register_divider
1022 __clk_get_name(clk_wzrd
->clk_in1
), 0,
1023 clk_wzrd
->base
, WZRD_CLK_CFG_REG(is_versal
, 3),
1024 WZRD_CLKOUT_DIVIDE_SHIFT
,
1025 WZRD_CLKOUT_DIVIDE_WIDTH
,
1026 CLK_DIVIDER_ONE_BASED
| CLK_DIVIDER_ALLOW_ZERO
,
1027 DIV_ALL
, &clkwzrd_lock
);
1031 reg
= readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 0));
1032 reg_f
= reg
& WZRD_CLKFBOUT_FRAC_MASK
;
1033 reg_f
= reg_f
>> WZRD_CLKFBOUT_FRAC_SHIFT
;
1035 reg
= reg
& WZRD_CLKFBOUT_MULT_MASK
;
1036 reg
= reg
>> WZRD_CLKFBOUT_MULT_SHIFT
;
1037 mult
= (reg
* 1000) + reg_f
;
1040 clk_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_mul", dev_name(dev
));
1043 clk_wzrd
->clks_internal
[wzrd_clk_mul
] = devm_clk_hw_register_fixed_factor
1045 __clk_get_name(clk_wzrd
->clk_in1
),
1047 if (IS_ERR(clk_wzrd
->clks_internal
[wzrd_clk_mul
])) {
1048 dev_err(dev
, "unable to register fixed-factor clock\n");
1049 return PTR_ERR(clk_wzrd
->clks_internal
[wzrd_clk_mul
]);
1052 clk_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_mul_div", dev_name(dev
));
1057 edged
= !!(readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 20)) &
1059 regld
= (readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 21)) &
1060 WZRD_CLKFBOUT_L_MASK
) >> WZRD_CLKFBOUT_L_SHIFT
;
1061 reghd
= (readl(clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 21)) &
1062 WZRD_CLKFBOUT_H_MASK
) >> WZRD_CLKFBOUT_H_SHIFT
;
1063 div
= (regld
+ reghd
+ edged
);
1067 clk_mul_name
= clk_hw_get_name(clk_wzrd
->clks_internal
[wzrd_clk_mul
]);
1068 clk_wzrd
->clks_internal
[wzrd_clk_mul_div
] =
1069 devm_clk_hw_register_fixed_factor(dev
, clk_name
, clk_mul_name
, 0, 1, div
);
1071 ctrl_reg
= clk_wzrd
->base
+ WZRD_CLK_CFG_REG(is_versal
, 0);
1072 clk_wzrd
->clks_internal
[wzrd_clk_mul_div
] = devm_clk_hw_register_divider
1074 clk_hw_get_name(clk_wzrd
->clks_internal
[wzrd_clk_mul
]),
1075 flags
, ctrl_reg
, 0, 8, CLK_DIVIDER_ONE_BASED
|
1076 CLK_DIVIDER_ALLOW_ZERO
, &clkwzrd_lock
);
1078 if (IS_ERR(clk_wzrd
->clks_internal
[wzrd_clk_mul_div
])) {
1079 dev_err(dev
, "unable to register divider clock\n");
1080 return PTR_ERR(clk_wzrd
->clks_internal
[wzrd_clk_mul_div
]);
1083 /* register div per output */
1084 for (i
= nr_outputs
- 1; i
>= 0 ; i
--) {
1085 clkout_name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s_out%d", dev_name(dev
), i
);
1090 clk_wzrd
->clk_data
.hws
[i
] = clk_wzrd_ver_register_divider
1092 clkout_name
, clk_name
, 0,
1094 (WZRD_CLK_CFG_REG(is_versal
, 3) + i
* 8),
1095 WZRD_CLKOUT_DIVIDE_SHIFT
,
1096 WZRD_CLKOUT_DIVIDE_WIDTH
,
1097 CLK_DIVIDER_ONE_BASED
|
1098 CLK_DIVIDER_ALLOW_ZERO
,
1099 DIV_O
, &clkwzrd_lock
);
1102 clk_wzrd
->clk_data
.hws
[i
] = clk_wzrd_register_divf
1103 (dev
, clkout_name
, clk_name
, flags
, clk_wzrd
->base
,
1104 (WZRD_CLK_CFG_REG(is_versal
, 2) + i
* 12),
1105 WZRD_CLKOUT_DIVIDE_SHIFT
,
1106 WZRD_CLKOUT_DIVIDE_WIDTH
,
1107 CLK_DIVIDER_ONE_BASED
| CLK_DIVIDER_ALLOW_ZERO
,
1108 DIV_O
, &clkwzrd_lock
);
1110 clk_wzrd
->clk_data
.hws
[i
] = clk_wzrd_register_divider
1111 (dev
, clkout_name
, clk_name
, 0, clk_wzrd
->base
,
1112 (WZRD_CLK_CFG_REG(is_versal
, 2) + i
* 12),
1113 WZRD_CLKOUT_DIVIDE_SHIFT
,
1114 WZRD_CLKOUT_DIVIDE_WIDTH
,
1115 CLK_DIVIDER_ONE_BASED
| CLK_DIVIDER_ALLOW_ZERO
,
1116 DIV_O
, &clkwzrd_lock
);
1118 if (IS_ERR(clk_wzrd
->clk_data
.hws
[i
])) {
1119 dev_err(dev
, "unable to register divider clock\n");
1120 return PTR_ERR(clk_wzrd
->clk_data
.hws
[i
]);
1127 static int clk_wzrd_probe(struct platform_device
*pdev
)
1129 struct device_node
*np
= pdev
->dev
.of_node
;
1130 struct clk_wzrd
*clk_wzrd
;
1135 ret
= of_property_read_u32(np
, "xlnx,nr-outputs", &nr_outputs
);
1136 if (ret
|| nr_outputs
> WZRD_NUM_OUTPUTS
)
1139 clk_wzrd
= devm_kzalloc(&pdev
->dev
, struct_size(clk_wzrd
, clk_data
.hws
, nr_outputs
),
1143 platform_set_drvdata(pdev
, clk_wzrd
);
1145 clk_wzrd
->base
= devm_platform_ioremap_resource(pdev
, 0);
1146 if (IS_ERR(clk_wzrd
->base
))
1147 return PTR_ERR(clk_wzrd
->base
);
1149 clk_wzrd
->axi_clk
= devm_clk_get_enabled(&pdev
->dev
, "s_axi_aclk");
1150 if (IS_ERR(clk_wzrd
->axi_clk
))
1151 return dev_err_probe(&pdev
->dev
, PTR_ERR(clk_wzrd
->axi_clk
),
1152 "s_axi_aclk not found\n");
1153 rate
= clk_get_rate(clk_wzrd
->axi_clk
);
1154 if (rate
> WZRD_ACLK_MAX_FREQ
) {
1155 dev_err(&pdev
->dev
, "s_axi_aclk frequency (%lu) too high\n", rate
);
1159 if (!of_property_present(np
, "xlnx,static-config")) {
1160 ret
= of_property_read_u32(np
, "xlnx,speed-grade", &clk_wzrd
->speed_grade
);
1162 if (clk_wzrd
->speed_grade
< 1 || clk_wzrd
->speed_grade
> 3) {
1163 dev_warn(&pdev
->dev
, "invalid speed grade '%d'\n",
1164 clk_wzrd
->speed_grade
);
1165 clk_wzrd
->speed_grade
= 0;
1169 clk_wzrd
->clk_in1
= devm_clk_get(&pdev
->dev
, "clk_in1");
1170 if (IS_ERR(clk_wzrd
->clk_in1
))
1171 return dev_err_probe(&pdev
->dev
, PTR_ERR(clk_wzrd
->clk_in1
),
1172 "clk_in1 not found\n");
1174 ret
= clk_wzrd_register_output_clocks(&pdev
->dev
, nr_outputs
);
1178 clk_wzrd
->clk_data
.num
= nr_outputs
;
1179 ret
= devm_of_clk_add_hw_provider(&pdev
->dev
, of_clk_hw_onecell_get
,
1180 &clk_wzrd
->clk_data
);
1182 dev_err(&pdev
->dev
, "unable to register clock provider\n");
1186 if (clk_wzrd
->speed_grade
) {
1187 clk_wzrd
->nb
.notifier_call
= clk_wzrd_clk_notifier
;
1189 ret
= devm_clk_notifier_register(&pdev
->dev
, clk_wzrd
->clk_in1
,
1192 dev_warn(&pdev
->dev
,
1193 "unable to register clock notifier\n");
1195 ret
= devm_clk_notifier_register(&pdev
->dev
, clk_wzrd
->axi_clk
,
1198 dev_warn(&pdev
->dev
,
1199 "unable to register clock notifier\n");
1206 static const struct of_device_id clk_wzrd_ids
[] = {
1207 { .compatible
= "xlnx,versal-clk-wizard", .data
= &versal_data
},
1208 { .compatible
= "xlnx,clocking-wizard" },
1209 { .compatible
= "xlnx,clocking-wizard-v5.2" },
1210 { .compatible
= "xlnx,clocking-wizard-v6.0" },
1213 MODULE_DEVICE_TABLE(of
, clk_wzrd_ids
);
1215 static struct platform_driver clk_wzrd_driver
= {
1217 .name
= "clk-wizard",
1218 .of_match_table
= clk_wzrd_ids
,
1219 .pm
= &clk_wzrd_dev_pm_ops
,
1221 .probe
= clk_wzrd_probe
,
1223 module_platform_driver(clk_wzrd_driver
);
1225 MODULE_LICENSE("GPL");
1226 MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
1227 MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");