1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (C) 2014 Broadcom Corporation
4 #include <linux/kernel.h>
6 #include <linux/clk-provider.h>
9 #include <linux/clkdev.h>
10 #include <linux/of_address.h>
11 #include <linux/delay.h>
13 #include "clk-iproc.h"
15 #define PLL_VCO_HIGH_SHIFT 19
16 #define PLL_VCO_LOW_SHIFT 30
19 * PLL MACRO_SELECT modes 0 to 5 choose pre-calculated PLL output frequencies
20 * from a look-up table. Mode 7 allows user to manipulate PLL clock dividers
22 #define PLL_USER_MODE 7
24 /* number of delay loops waiting for PLL to lock */
25 #define LOCK_DELAY 100
27 /* number of VCO frequency bands */
28 #define NUM_FREQ_BANDS 8
30 #define NUM_KP_BANDS 3
37 static const unsigned int kp_table
[NUM_KP_BANDS
][NUM_FREQ_BANDS
] = {
38 { 5, 6, 6, 7, 7, 8, 9, 10 },
39 { 4, 4, 5, 5, 6, 7, 8, 9 },
40 { 4, 5, 5, 6, 7, 8, 9, 10 },
43 static const unsigned long ref_freq_table
[NUM_FREQ_BANDS
][2] = {
44 { 10000000, 12500000 },
45 { 12500000, 15000000 },
46 { 15000000, 20000000 },
47 { 20000000, 25000000 },
48 { 25000000, 50000000 },
49 { 50000000, 75000000 },
50 { 75000000, 100000000 },
51 { 100000000, 125000000 },
56 VCO_MID
= 1200000000U,
57 VCO_HIGH
= 2200000000U,
58 VCO_HIGH_HIGH
= 3100000000U,
59 VCO_MAX
= 4000000000U,
63 void __iomem
*status_base
;
64 void __iomem
*control_base
;
65 void __iomem
*pwr_base
;
66 void __iomem
*asiu_base
;
68 const struct iproc_pll_ctrl
*ctrl
;
69 const struct iproc_pll_vco_param
*vco_param
;
70 unsigned int num_vco_entries
;
75 struct iproc_pll
*pll
;
76 const struct iproc_clk_ctrl
*ctrl
;
79 #define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
81 static int pll_calc_param(unsigned long target_rate
,
82 unsigned long parent_rate
,
83 struct iproc_pll_vco_param
*vco_out
)
85 u64 ndiv_int
, ndiv_frac
, residual
;
87 ndiv_int
= target_rate
/ parent_rate
;
89 if (!ndiv_int
|| (ndiv_int
> 255))
92 residual
= target_rate
- (ndiv_int
* parent_rate
);
96 * Add half of the divisor so the result will be rounded to closest
97 * instead of rounded down.
99 residual
+= (parent_rate
/ 2);
100 ndiv_frac
= div64_u64((u64
)residual
, (u64
)parent_rate
);
102 vco_out
->ndiv_int
= ndiv_int
;
103 vco_out
->ndiv_frac
= ndiv_frac
;
106 vco_out
->rate
= vco_out
->ndiv_int
* parent_rate
;
107 residual
= (u64
)vco_out
->ndiv_frac
* (u64
)parent_rate
;
109 vco_out
->rate
+= residual
;
115 * Based on the target frequency, find a match from the VCO frequency parameter
116 * table and return its index
118 static int pll_get_rate_index(struct iproc_pll
*pll
, unsigned int target_rate
)
122 for (i
= 0; i
< pll
->num_vco_entries
; i
++)
123 if (target_rate
== pll
->vco_param
[i
].rate
)
126 if (i
>= pll
->num_vco_entries
)
132 static int get_kp(unsigned long ref_freq
, enum kp_band kp_index
)
136 if (ref_freq
< ref_freq_table
[0][0])
139 for (i
= 0; i
< NUM_FREQ_BANDS
; i
++) {
140 if (ref_freq
>= ref_freq_table
[i
][0] &&
141 ref_freq
< ref_freq_table
[i
][1])
142 return kp_table
[kp_index
][i
];
147 static int pll_wait_for_lock(struct iproc_pll
*pll
)
150 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
152 for (i
= 0; i
< LOCK_DELAY
; i
++) {
153 u32 val
= readl(pll
->status_base
+ ctrl
->status
.offset
);
155 if (val
& (1 << ctrl
->status
.shift
))
163 static void iproc_pll_write(const struct iproc_pll
*pll
, void __iomem
*base
,
164 const u32 offset
, u32 val
)
166 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
168 writel(val
, base
+ offset
);
170 if (unlikely(ctrl
->flags
& IPROC_CLK_NEEDS_READ_BACK
&&
171 (base
== pll
->status_base
|| base
== pll
->control_base
)))
172 val
= readl(base
+ offset
);
175 static void __pll_disable(struct iproc_pll
*pll
)
177 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
180 if (ctrl
->flags
& IPROC_CLK_PLL_ASIU
) {
181 val
= readl(pll
->asiu_base
+ ctrl
->asiu
.offset
);
182 val
&= ~(1 << ctrl
->asiu
.en_shift
);
183 iproc_pll_write(pll
, pll
->asiu_base
, ctrl
->asiu
.offset
, val
);
186 if (ctrl
->flags
& IPROC_CLK_EMBED_PWRCTRL
) {
187 val
= readl(pll
->control_base
+ ctrl
->aon
.offset
);
188 val
|= bit_mask(ctrl
->aon
.pwr_width
) << ctrl
->aon
.pwr_shift
;
189 iproc_pll_write(pll
, pll
->control_base
, ctrl
->aon
.offset
, val
);
193 /* latch input value so core power can be shut down */
194 val
= readl(pll
->pwr_base
+ ctrl
->aon
.offset
);
195 val
|= 1 << ctrl
->aon
.iso_shift
;
196 iproc_pll_write(pll
, pll
->pwr_base
, ctrl
->aon
.offset
, val
);
198 /* power down the core */
199 val
&= ~(bit_mask(ctrl
->aon
.pwr_width
) << ctrl
->aon
.pwr_shift
);
200 iproc_pll_write(pll
, pll
->pwr_base
, ctrl
->aon
.offset
, val
);
204 static int __pll_enable(struct iproc_pll
*pll
)
206 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
209 if (ctrl
->flags
& IPROC_CLK_EMBED_PWRCTRL
) {
210 val
= readl(pll
->control_base
+ ctrl
->aon
.offset
);
211 val
&= ~(bit_mask(ctrl
->aon
.pwr_width
) << ctrl
->aon
.pwr_shift
);
212 iproc_pll_write(pll
, pll
->control_base
, ctrl
->aon
.offset
, val
);
216 /* power up the PLL and make sure it's not latched */
217 val
= readl(pll
->pwr_base
+ ctrl
->aon
.offset
);
218 val
|= bit_mask(ctrl
->aon
.pwr_width
) << ctrl
->aon
.pwr_shift
;
219 val
&= ~(1 << ctrl
->aon
.iso_shift
);
220 iproc_pll_write(pll
, pll
->pwr_base
, ctrl
->aon
.offset
, val
);
223 /* certain PLLs also need to be ungated from the ASIU top level */
224 if (ctrl
->flags
& IPROC_CLK_PLL_ASIU
) {
225 val
= readl(pll
->asiu_base
+ ctrl
->asiu
.offset
);
226 val
|= (1 << ctrl
->asiu
.en_shift
);
227 iproc_pll_write(pll
, pll
->asiu_base
, ctrl
->asiu
.offset
, val
);
233 static void __pll_put_in_reset(struct iproc_pll
*pll
)
236 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
237 const struct iproc_pll_reset_ctrl
*reset
= &ctrl
->reset
;
239 val
= readl(pll
->control_base
+ reset
->offset
);
240 if (ctrl
->flags
& IPROC_CLK_PLL_RESET_ACTIVE_LOW
)
241 val
|= BIT(reset
->reset_shift
) | BIT(reset
->p_reset_shift
);
243 val
&= ~(BIT(reset
->reset_shift
) | BIT(reset
->p_reset_shift
));
244 iproc_pll_write(pll
, pll
->control_base
, reset
->offset
, val
);
247 static void __pll_bring_out_reset(struct iproc_pll
*pll
, unsigned int kp
,
248 unsigned int ka
, unsigned int ki
)
251 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
252 const struct iproc_pll_reset_ctrl
*reset
= &ctrl
->reset
;
253 const struct iproc_pll_dig_filter_ctrl
*dig_filter
= &ctrl
->dig_filter
;
255 val
= readl(pll
->control_base
+ dig_filter
->offset
);
256 val
&= ~(bit_mask(dig_filter
->ki_width
) << dig_filter
->ki_shift
|
257 bit_mask(dig_filter
->kp_width
) << dig_filter
->kp_shift
|
258 bit_mask(dig_filter
->ka_width
) << dig_filter
->ka_shift
);
259 val
|= ki
<< dig_filter
->ki_shift
| kp
<< dig_filter
->kp_shift
|
260 ka
<< dig_filter
->ka_shift
;
261 iproc_pll_write(pll
, pll
->control_base
, dig_filter
->offset
, val
);
263 val
= readl(pll
->control_base
+ reset
->offset
);
264 if (ctrl
->flags
& IPROC_CLK_PLL_RESET_ACTIVE_LOW
)
265 val
&= ~(BIT(reset
->reset_shift
) | BIT(reset
->p_reset_shift
));
267 val
|= BIT(reset
->reset_shift
) | BIT(reset
->p_reset_shift
);
268 iproc_pll_write(pll
, pll
->control_base
, reset
->offset
, val
);
272 * Determines if the change to be applied to the PLL is minor (just an update
273 * or the fractional divider). If so, then we can avoid going through a
274 * disruptive reset and lock sequence.
276 static bool pll_fractional_change_only(struct iproc_pll
*pll
,
277 struct iproc_pll_vco_param
*vco
)
279 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
284 /* PLL needs to be locked */
285 val
= readl(pll
->status_base
+ ctrl
->status
.offset
);
286 if ((val
& (1 << ctrl
->status
.shift
)) == 0)
289 val
= readl(pll
->control_base
+ ctrl
->ndiv_int
.offset
);
290 ndiv_int
= (val
>> ctrl
->ndiv_int
.shift
) &
291 bit_mask(ctrl
->ndiv_int
.width
);
293 if (ndiv_int
!= vco
->ndiv_int
)
296 val
= readl(pll
->control_base
+ ctrl
->pdiv
.offset
);
297 pdiv
= (val
>> ctrl
->pdiv
.shift
) & bit_mask(ctrl
->pdiv
.width
);
299 if (pdiv
!= vco
->pdiv
)
305 static int pll_set_rate(struct iproc_clk
*clk
, struct iproc_pll_vco_param
*vco
,
306 unsigned long parent_rate
)
308 struct iproc_pll
*pll
= clk
->pll
;
309 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
310 int ka
= 0, ki
, kp
, ret
;
311 unsigned long rate
= vco
->rate
;
313 enum kp_band kp_index
;
314 unsigned long ref_freq
;
315 const char *clk_name
= clk_hw_get_name(&clk
->hw
);
318 * reference frequency = parent frequency / PDIV
319 * If PDIV = 0, then it becomes a multiplier (x2)
322 ref_freq
= parent_rate
* 2;
324 ref_freq
= parent_rate
/ vco
->pdiv
;
326 /* determine Ki and Kp index based on target VCO frequency */
327 if (rate
>= VCO_LOW
&& rate
< VCO_HIGH
) {
329 kp_index
= KP_BAND_MID
;
330 } else if (rate
>= VCO_HIGH
&& rate
< VCO_HIGH_HIGH
) {
332 kp_index
= KP_BAND_HIGH
;
333 } else if (rate
>= VCO_HIGH_HIGH
&& rate
< VCO_MAX
) {
335 kp_index
= KP_BAND_HIGH_HIGH
;
337 pr_err("%s: pll: %s has invalid rate: %lu\n", __func__
,
342 kp
= get_kp(ref_freq
, kp_index
);
344 pr_err("%s: pll: %s has invalid kp\n", __func__
, clk_name
);
348 ret
= __pll_enable(pll
);
350 pr_err("%s: pll: %s fails to enable\n", __func__
, clk_name
);
354 if (pll_fractional_change_only(clk
->pll
, vco
)) {
355 /* program fractional part of NDIV */
356 if (ctrl
->flags
& IPROC_CLK_PLL_HAS_NDIV_FRAC
) {
357 val
= readl(pll
->control_base
+ ctrl
->ndiv_frac
.offset
);
358 val
&= ~(bit_mask(ctrl
->ndiv_frac
.width
) <<
359 ctrl
->ndiv_frac
.shift
);
360 val
|= vco
->ndiv_frac
<< ctrl
->ndiv_frac
.shift
;
361 iproc_pll_write(pll
, pll
->control_base
,
362 ctrl
->ndiv_frac
.offset
, val
);
367 /* put PLL in reset */
368 __pll_put_in_reset(pll
);
370 /* set PLL in user mode before modifying PLL controls */
371 if (ctrl
->flags
& IPROC_CLK_PLL_USER_MODE_ON
) {
372 val
= readl(pll
->control_base
+ ctrl
->macro_mode
.offset
);
373 val
&= ~(bit_mask(ctrl
->macro_mode
.width
) <<
374 ctrl
->macro_mode
.shift
);
375 val
|= PLL_USER_MODE
<< ctrl
->macro_mode
.shift
;
376 iproc_pll_write(pll
, pll
->control_base
,
377 ctrl
->macro_mode
.offset
, val
);
380 iproc_pll_write(pll
, pll
->control_base
, ctrl
->vco_ctrl
.u_offset
, 0);
382 val
= readl(pll
->control_base
+ ctrl
->vco_ctrl
.l_offset
);
384 if (rate
>= VCO_LOW
&& rate
< VCO_MID
)
385 val
|= (1 << PLL_VCO_LOW_SHIFT
);
388 val
&= ~(1 << PLL_VCO_HIGH_SHIFT
);
390 val
|= (1 << PLL_VCO_HIGH_SHIFT
);
392 iproc_pll_write(pll
, pll
->control_base
, ctrl
->vco_ctrl
.l_offset
, val
);
394 /* program integer part of NDIV */
395 val
= readl(pll
->control_base
+ ctrl
->ndiv_int
.offset
);
396 val
&= ~(bit_mask(ctrl
->ndiv_int
.width
) << ctrl
->ndiv_int
.shift
);
397 val
|= vco
->ndiv_int
<< ctrl
->ndiv_int
.shift
;
398 iproc_pll_write(pll
, pll
->control_base
, ctrl
->ndiv_int
.offset
, val
);
400 /* program fractional part of NDIV */
401 if (ctrl
->flags
& IPROC_CLK_PLL_HAS_NDIV_FRAC
) {
402 val
= readl(pll
->control_base
+ ctrl
->ndiv_frac
.offset
);
403 val
&= ~(bit_mask(ctrl
->ndiv_frac
.width
) <<
404 ctrl
->ndiv_frac
.shift
);
405 val
|= vco
->ndiv_frac
<< ctrl
->ndiv_frac
.shift
;
406 iproc_pll_write(pll
, pll
->control_base
, ctrl
->ndiv_frac
.offset
,
411 val
= readl(pll
->control_base
+ ctrl
->pdiv
.offset
);
412 val
&= ~(bit_mask(ctrl
->pdiv
.width
) << ctrl
->pdiv
.shift
);
413 val
|= vco
->pdiv
<< ctrl
->pdiv
.shift
;
414 iproc_pll_write(pll
, pll
->control_base
, ctrl
->pdiv
.offset
, val
);
416 __pll_bring_out_reset(pll
, kp
, ka
, ki
);
418 ret
= pll_wait_for_lock(pll
);
420 pr_err("%s: pll: %s failed to lock\n", __func__
, clk_name
);
427 static int iproc_pll_enable(struct clk_hw
*hw
)
429 struct iproc_clk
*clk
= to_iproc_clk(hw
);
430 struct iproc_pll
*pll
= clk
->pll
;
432 return __pll_enable(pll
);
435 static void iproc_pll_disable(struct clk_hw
*hw
)
437 struct iproc_clk
*clk
= to_iproc_clk(hw
);
438 struct iproc_pll
*pll
= clk
->pll
;
439 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
441 if (ctrl
->flags
& IPROC_CLK_AON
)
447 static unsigned long iproc_pll_recalc_rate(struct clk_hw
*hw
,
448 unsigned long parent_rate
)
450 struct iproc_clk
*clk
= to_iproc_clk(hw
);
451 struct iproc_pll
*pll
= clk
->pll
;
452 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
454 u64 ndiv
, ndiv_int
, ndiv_frac
;
458 if (parent_rate
== 0)
461 /* PLL needs to be locked */
462 val
= readl(pll
->status_base
+ ctrl
->status
.offset
);
463 if ((val
& (1 << ctrl
->status
.shift
)) == 0)
467 * PLL output frequency =
469 * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
471 val
= readl(pll
->control_base
+ ctrl
->ndiv_int
.offset
);
472 ndiv_int
= (val
>> ctrl
->ndiv_int
.shift
) &
473 bit_mask(ctrl
->ndiv_int
.width
);
474 ndiv
= ndiv_int
<< 20;
476 if (ctrl
->flags
& IPROC_CLK_PLL_HAS_NDIV_FRAC
) {
477 val
= readl(pll
->control_base
+ ctrl
->ndiv_frac
.offset
);
478 ndiv_frac
= (val
>> ctrl
->ndiv_frac
.shift
) &
479 bit_mask(ctrl
->ndiv_frac
.width
);
483 val
= readl(pll
->control_base
+ ctrl
->pdiv
.offset
);
484 pdiv
= (val
>> ctrl
->pdiv
.shift
) & bit_mask(ctrl
->pdiv
.width
);
486 rate
= (ndiv
* parent_rate
) >> 20;
496 static int iproc_pll_determine_rate(struct clk_hw
*hw
,
497 struct clk_rate_request
*req
)
500 struct iproc_clk
*clk
= to_iproc_clk(hw
);
501 struct iproc_pll
*pll
= clk
->pll
;
502 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
503 unsigned long diff
, best_diff
;
504 unsigned int best_idx
= 0;
507 if (req
->rate
== 0 || req
->best_parent_rate
== 0)
510 if (ctrl
->flags
& IPROC_CLK_PLL_CALC_PARAM
) {
511 struct iproc_pll_vco_param vco_param
;
513 ret
= pll_calc_param(req
->rate
, req
->best_parent_rate
,
518 req
->rate
= vco_param
.rate
;
525 best_diff
= ULONG_MAX
;
526 for (i
= 0; i
< pll
->num_vco_entries
; i
++) {
527 diff
= abs(req
->rate
- pll
->vco_param
[i
].rate
);
528 if (diff
<= best_diff
) {
532 /* break now if perfect match */
537 req
->rate
= pll
->vco_param
[best_idx
].rate
;
542 static int iproc_pll_set_rate(struct clk_hw
*hw
, unsigned long rate
,
543 unsigned long parent_rate
)
545 struct iproc_clk
*clk
= to_iproc_clk(hw
);
546 struct iproc_pll
*pll
= clk
->pll
;
547 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
548 struct iproc_pll_vco_param vco_param
;
551 if (ctrl
->flags
& IPROC_CLK_PLL_CALC_PARAM
) {
552 ret
= pll_calc_param(rate
, parent_rate
, &vco_param
);
556 rate_index
= pll_get_rate_index(pll
, rate
);
560 vco_param
= pll
->vco_param
[rate_index
];
563 ret
= pll_set_rate(clk
, &vco_param
, parent_rate
);
567 static const struct clk_ops iproc_pll_ops
= {
568 .enable
= iproc_pll_enable
,
569 .disable
= iproc_pll_disable
,
570 .recalc_rate
= iproc_pll_recalc_rate
,
571 .determine_rate
= iproc_pll_determine_rate
,
572 .set_rate
= iproc_pll_set_rate
,
575 static int iproc_clk_enable(struct clk_hw
*hw
)
577 struct iproc_clk
*clk
= to_iproc_clk(hw
);
578 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
579 struct iproc_pll
*pll
= clk
->pll
;
582 /* channel enable is active low */
583 val
= readl(pll
->control_base
+ ctrl
->enable
.offset
);
584 val
&= ~(1 << ctrl
->enable
.enable_shift
);
585 iproc_pll_write(pll
, pll
->control_base
, ctrl
->enable
.offset
, val
);
587 /* also make sure channel is not held */
588 val
= readl(pll
->control_base
+ ctrl
->enable
.offset
);
589 val
&= ~(1 << ctrl
->enable
.hold_shift
);
590 iproc_pll_write(pll
, pll
->control_base
, ctrl
->enable
.offset
, val
);
595 static void iproc_clk_disable(struct clk_hw
*hw
)
597 struct iproc_clk
*clk
= to_iproc_clk(hw
);
598 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
599 struct iproc_pll
*pll
= clk
->pll
;
602 if (ctrl
->flags
& IPROC_CLK_AON
)
605 val
= readl(pll
->control_base
+ ctrl
->enable
.offset
);
606 val
|= 1 << ctrl
->enable
.enable_shift
;
607 iproc_pll_write(pll
, pll
->control_base
, ctrl
->enable
.offset
, val
);
610 static unsigned long iproc_clk_recalc_rate(struct clk_hw
*hw
,
611 unsigned long parent_rate
)
613 struct iproc_clk
*clk
= to_iproc_clk(hw
);
614 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
615 struct iproc_pll
*pll
= clk
->pll
;
620 if (parent_rate
== 0)
623 val
= readl(pll
->control_base
+ ctrl
->mdiv
.offset
);
624 mdiv
= (val
>> ctrl
->mdiv
.shift
) & bit_mask(ctrl
->mdiv
.width
);
628 if (ctrl
->flags
& IPROC_CLK_MCLK_DIV_BY_2
)
629 rate
= parent_rate
/ (mdiv
* 2);
631 rate
= parent_rate
/ mdiv
;
636 static int iproc_clk_determine_rate(struct clk_hw
*hw
,
637 struct clk_rate_request
*req
)
639 unsigned int bestdiv
;
643 if (req
->rate
== req
->best_parent_rate
)
646 bestdiv
= DIV_ROUND_CLOSEST(req
->best_parent_rate
, req
->rate
);
648 req
->rate
= req
->best_parent_rate
;
653 req
->rate
= req
->best_parent_rate
/ bestdiv
;
658 static int iproc_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
659 unsigned long parent_rate
)
661 struct iproc_clk
*clk
= to_iproc_clk(hw
);
662 const struct iproc_clk_ctrl
*ctrl
= clk
->ctrl
;
663 struct iproc_pll
*pll
= clk
->pll
;
667 if (rate
== 0 || parent_rate
== 0)
670 div
= DIV_ROUND_CLOSEST(parent_rate
, rate
);
671 if (ctrl
->flags
& IPROC_CLK_MCLK_DIV_BY_2
)
677 val
= readl(pll
->control_base
+ ctrl
->mdiv
.offset
);
679 val
&= ~(bit_mask(ctrl
->mdiv
.width
) << ctrl
->mdiv
.shift
);
681 val
&= ~(bit_mask(ctrl
->mdiv
.width
) << ctrl
->mdiv
.shift
);
682 val
|= div
<< ctrl
->mdiv
.shift
;
684 iproc_pll_write(pll
, pll
->control_base
, ctrl
->mdiv
.offset
, val
);
689 static const struct clk_ops iproc_clk_ops
= {
690 .enable
= iproc_clk_enable
,
691 .disable
= iproc_clk_disable
,
692 .recalc_rate
= iproc_clk_recalc_rate
,
693 .determine_rate
= iproc_clk_determine_rate
,
694 .set_rate
= iproc_clk_set_rate
,
698 * Some PLLs require the PLL SW override bit to be set before changes can be
701 static void iproc_pll_sw_cfg(struct iproc_pll
*pll
)
703 const struct iproc_pll_ctrl
*ctrl
= pll
->ctrl
;
705 if (ctrl
->flags
& IPROC_CLK_PLL_NEEDS_SW_CFG
) {
708 val
= readl(pll
->control_base
+ ctrl
->sw_ctrl
.offset
);
709 val
|= BIT(ctrl
->sw_ctrl
.shift
);
710 iproc_pll_write(pll
, pll
->control_base
, ctrl
->sw_ctrl
.offset
,
715 void iproc_pll_clk_setup(struct device_node
*node
,
716 const struct iproc_pll_ctrl
*pll_ctrl
,
717 const struct iproc_pll_vco_param
*vco
,
718 unsigned int num_vco_entries
,
719 const struct iproc_clk_ctrl
*clk_ctrl
,
720 unsigned int num_clks
)
723 struct iproc_pll
*pll
;
724 struct iproc_clk
*iclk
;
725 struct clk_init_data init
;
726 const char *parent_name
;
727 struct iproc_clk
*iclk_array
;
728 struct clk_hw_onecell_data
*clk_data
;
729 const char *clk_name
;
731 if (WARN_ON(!pll_ctrl
) || WARN_ON(!clk_ctrl
))
734 pll
= kzalloc(sizeof(*pll
), GFP_KERNEL
);
738 clk_data
= kzalloc(struct_size(clk_data
, hws
, num_clks
), GFP_KERNEL
);
739 if (WARN_ON(!clk_data
))
741 clk_data
->num
= num_clks
;
743 iclk_array
= kcalloc(num_clks
, sizeof(struct iproc_clk
), GFP_KERNEL
);
744 if (WARN_ON(!iclk_array
))
747 pll
->control_base
= of_iomap(node
, 0);
748 if (WARN_ON(!pll
->control_base
))
751 /* Some SoCs do not require the pwr_base, thus failing is not fatal */
752 pll
->pwr_base
= of_iomap(node
, 1);
754 /* some PLLs require gating control at the top ASIU level */
755 if (pll_ctrl
->flags
& IPROC_CLK_PLL_ASIU
) {
756 pll
->asiu_base
= of_iomap(node
, 2);
757 if (WARN_ON(!pll
->asiu_base
))
761 if (pll_ctrl
->flags
& IPROC_CLK_PLL_SPLIT_STAT_CTRL
) {
762 /* Some SoCs have a split status/control. If this does not
763 * exist, assume they are unified.
765 pll
->status_base
= of_iomap(node
, 2);
766 if (!pll
->status_base
)
767 goto err_status_iomap
;
769 pll
->status_base
= pll
->control_base
;
771 /* initialize and register the PLL itself */
772 pll
->ctrl
= pll_ctrl
;
774 iclk
= &iclk_array
[0];
777 ret
= of_property_read_string_index(node
, "clock-output-names",
780 goto err_pll_register
;
782 init
.name
= clk_name
;
783 init
.ops
= &iproc_pll_ops
;
785 parent_name
= of_clk_get_parent_name(node
, 0);
786 init
.parent_names
= (parent_name
? &parent_name
: NULL
);
787 init
.num_parents
= (parent_name
? 1 : 0);
788 iclk
->hw
.init
= &init
;
791 pll
->num_vco_entries
= num_vco_entries
;
792 pll
->vco_param
= vco
;
795 iproc_pll_sw_cfg(pll
);
797 ret
= clk_hw_register(NULL
, &iclk
->hw
);
799 goto err_pll_register
;
801 clk_data
->hws
[0] = &iclk
->hw
;
802 parent_name
= clk_name
;
804 /* now initialize and register all leaf clocks */
805 for (i
= 1; i
< num_clks
; i
++) {
806 memset(&init
, 0, sizeof(init
));
808 ret
= of_property_read_string_index(node
, "clock-output-names",
811 goto err_clk_register
;
813 iclk
= &iclk_array
[i
];
815 iclk
->ctrl
= &clk_ctrl
[i
];
817 init
.name
= clk_name
;
818 init
.ops
= &iproc_clk_ops
;
820 init
.parent_names
= (parent_name
? &parent_name
: NULL
);
821 init
.num_parents
= (parent_name
? 1 : 0);
822 iclk
->hw
.init
= &init
;
824 ret
= clk_hw_register(NULL
, &iclk
->hw
);
826 goto err_clk_register
;
828 clk_data
->hws
[i
] = &iclk
->hw
;
831 ret
= of_clk_add_hw_provider(node
, of_clk_hw_onecell_get
, clk_data
);
833 goto err_clk_register
;
839 clk_hw_unregister(clk_data
->hws
[i
]);
842 if (pll
->status_base
!= pll
->control_base
)
843 iounmap(pll
->status_base
);
847 iounmap(pll
->asiu_base
);
851 iounmap(pll
->pwr_base
);
853 iounmap(pll
->control_base
);