WIP FPC-III support
[linux/fpc-iii.git] / drivers / clk / bcm / clk-iproc-pll.c
blob274441e2ddb2891d916f2747ff5e8a55b3fd27e9
1 /*
2 * Copyright (C) 2014 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/clk-provider.h>
17 #include <linux/io.h>
18 #include <linux/of.h>
19 #include <linux/clkdev.h>
20 #include <linux/of_address.h>
21 #include <linux/delay.h>
23 #include "clk-iproc.h"
25 #define PLL_VCO_HIGH_SHIFT 19
26 #define PLL_VCO_LOW_SHIFT 30
29 * PLL MACRO_SELECT modes 0 to 5 choose pre-calculated PLL output frequencies
30 * from a look-up table. Mode 7 allows user to manipulate PLL clock dividers
32 #define PLL_USER_MODE 7
34 /* number of delay loops waiting for PLL to lock */
35 #define LOCK_DELAY 100
37 /* number of VCO frequency bands */
38 #define NUM_FREQ_BANDS 8
40 #define NUM_KP_BANDS 3
41 enum kp_band {
42 KP_BAND_MID = 0,
43 KP_BAND_HIGH,
44 KP_BAND_HIGH_HIGH
47 static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = {
48 { 5, 6, 6, 7, 7, 8, 9, 10 },
49 { 4, 4, 5, 5, 6, 7, 8, 9 },
50 { 4, 5, 5, 6, 7, 8, 9, 10 },
53 static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = {
54 { 10000000, 12500000 },
55 { 12500000, 15000000 },
56 { 15000000, 20000000 },
57 { 20000000, 25000000 },
58 { 25000000, 50000000 },
59 { 50000000, 75000000 },
60 { 75000000, 100000000 },
61 { 100000000, 125000000 },
64 enum vco_freq_range {
65 VCO_LOW = 700000000U,
66 VCO_MID = 1200000000U,
67 VCO_HIGH = 2200000000U,
68 VCO_HIGH_HIGH = 3100000000U,
69 VCO_MAX = 4000000000U,
72 struct iproc_pll {
73 void __iomem *status_base;
74 void __iomem *control_base;
75 void __iomem *pwr_base;
76 void __iomem *asiu_base;
78 const struct iproc_pll_ctrl *ctrl;
79 const struct iproc_pll_vco_param *vco_param;
80 unsigned int num_vco_entries;
83 struct iproc_clk {
84 struct clk_hw hw;
85 struct iproc_pll *pll;
86 const struct iproc_clk_ctrl *ctrl;
89 #define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
91 static int pll_calc_param(unsigned long target_rate,
92 unsigned long parent_rate,
93 struct iproc_pll_vco_param *vco_out)
95 u64 ndiv_int, ndiv_frac, residual;
97 ndiv_int = target_rate / parent_rate;
99 if (!ndiv_int || (ndiv_int > 255))
100 return -EINVAL;
102 residual = target_rate - (ndiv_int * parent_rate);
103 residual <<= 20;
106 * Add half of the divisor so the result will be rounded to closest
107 * instead of rounded down.
109 residual += (parent_rate / 2);
110 ndiv_frac = div64_u64((u64)residual, (u64)parent_rate);
112 vco_out->ndiv_int = ndiv_int;
113 vco_out->ndiv_frac = ndiv_frac;
114 vco_out->pdiv = 1;
116 vco_out->rate = vco_out->ndiv_int * parent_rate;
117 residual = (u64)vco_out->ndiv_frac * (u64)parent_rate;
118 residual >>= 20;
119 vco_out->rate += residual;
121 return 0;
125 * Based on the target frequency, find a match from the VCO frequency parameter
126 * table and return its index
128 static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate)
130 int i;
132 for (i = 0; i < pll->num_vco_entries; i++)
133 if (target_rate == pll->vco_param[i].rate)
134 break;
136 if (i >= pll->num_vco_entries)
137 return -EINVAL;
139 return i;
142 static int get_kp(unsigned long ref_freq, enum kp_band kp_index)
144 int i;
146 if (ref_freq < ref_freq_table[0][0])
147 return -EINVAL;
149 for (i = 0; i < NUM_FREQ_BANDS; i++) {
150 if (ref_freq >= ref_freq_table[i][0] &&
151 ref_freq < ref_freq_table[i][1])
152 return kp_table[kp_index][i];
154 return -EINVAL;
157 static int pll_wait_for_lock(struct iproc_pll *pll)
159 int i;
160 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
162 for (i = 0; i < LOCK_DELAY; i++) {
163 u32 val = readl(pll->status_base + ctrl->status.offset);
165 if (val & (1 << ctrl->status.shift))
166 return 0;
167 udelay(10);
170 return -EIO;
173 static void iproc_pll_write(const struct iproc_pll *pll, void __iomem *base,
174 const u32 offset, u32 val)
176 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
178 writel(val, base + offset);
180 if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK &&
181 (base == pll->status_base || base == pll->control_base)))
182 val = readl(base + offset);
185 static void __pll_disable(struct iproc_pll *pll)
187 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
188 u32 val;
190 if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
191 val = readl(pll->asiu_base + ctrl->asiu.offset);
192 val &= ~(1 << ctrl->asiu.en_shift);
193 iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
196 if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
197 val = readl(pll->control_base + ctrl->aon.offset);
198 val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
199 iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
202 if (pll->pwr_base) {
203 /* latch input value so core power can be shut down */
204 val = readl(pll->pwr_base + ctrl->aon.offset);
205 val |= 1 << ctrl->aon.iso_shift;
206 iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
208 /* power down the core */
209 val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
210 iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
214 static int __pll_enable(struct iproc_pll *pll)
216 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
217 u32 val;
219 if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
220 val = readl(pll->control_base + ctrl->aon.offset);
221 val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
222 iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
225 if (pll->pwr_base) {
226 /* power up the PLL and make sure it's not latched */
227 val = readl(pll->pwr_base + ctrl->aon.offset);
228 val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
229 val &= ~(1 << ctrl->aon.iso_shift);
230 iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
233 /* certain PLLs also need to be ungated from the ASIU top level */
234 if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
235 val = readl(pll->asiu_base + ctrl->asiu.offset);
236 val |= (1 << ctrl->asiu.en_shift);
237 iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
240 return 0;
243 static void __pll_put_in_reset(struct iproc_pll *pll)
245 u32 val;
246 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
247 const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
249 val = readl(pll->control_base + reset->offset);
250 if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
251 val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
252 else
253 val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
254 iproc_pll_write(pll, pll->control_base, reset->offset, val);
257 static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
258 unsigned int ka, unsigned int ki)
260 u32 val;
261 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
262 const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
263 const struct iproc_pll_dig_filter_ctrl *dig_filter = &ctrl->dig_filter;
265 val = readl(pll->control_base + dig_filter->offset);
266 val &= ~(bit_mask(dig_filter->ki_width) << dig_filter->ki_shift |
267 bit_mask(dig_filter->kp_width) << dig_filter->kp_shift |
268 bit_mask(dig_filter->ka_width) << dig_filter->ka_shift);
269 val |= ki << dig_filter->ki_shift | kp << dig_filter->kp_shift |
270 ka << dig_filter->ka_shift;
271 iproc_pll_write(pll, pll->control_base, dig_filter->offset, val);
273 val = readl(pll->control_base + reset->offset);
274 if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
275 val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
276 else
277 val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
278 iproc_pll_write(pll, pll->control_base, reset->offset, val);
282 * Determines if the change to be applied to the PLL is minor (just an update
283 * or the fractional divider). If so, then we can avoid going through a
284 * disruptive reset and lock sequence.
286 static bool pll_fractional_change_only(struct iproc_pll *pll,
287 struct iproc_pll_vco_param *vco)
289 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
290 u32 val;
291 u32 ndiv_int;
292 unsigned int pdiv;
294 /* PLL needs to be locked */
295 val = readl(pll->status_base + ctrl->status.offset);
296 if ((val & (1 << ctrl->status.shift)) == 0)
297 return false;
299 val = readl(pll->control_base + ctrl->ndiv_int.offset);
300 ndiv_int = (val >> ctrl->ndiv_int.shift) &
301 bit_mask(ctrl->ndiv_int.width);
303 if (ndiv_int != vco->ndiv_int)
304 return false;
306 val = readl(pll->control_base + ctrl->pdiv.offset);
307 pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
309 if (pdiv != vco->pdiv)
310 return false;
312 return true;
315 static int pll_set_rate(struct iproc_clk *clk, struct iproc_pll_vco_param *vco,
316 unsigned long parent_rate)
318 struct iproc_pll *pll = clk->pll;
319 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
320 int ka = 0, ki, kp, ret;
321 unsigned long rate = vco->rate;
322 u32 val;
323 enum kp_band kp_index;
324 unsigned long ref_freq;
325 const char *clk_name = clk_hw_get_name(&clk->hw);
328 * reference frequency = parent frequency / PDIV
329 * If PDIV = 0, then it becomes a multiplier (x2)
331 if (vco->pdiv == 0)
332 ref_freq = parent_rate * 2;
333 else
334 ref_freq = parent_rate / vco->pdiv;
336 /* determine Ki and Kp index based on target VCO frequency */
337 if (rate >= VCO_LOW && rate < VCO_HIGH) {
338 ki = 4;
339 kp_index = KP_BAND_MID;
340 } else if (rate >= VCO_HIGH && rate < VCO_HIGH_HIGH) {
341 ki = 3;
342 kp_index = KP_BAND_HIGH;
343 } else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) {
344 ki = 3;
345 kp_index = KP_BAND_HIGH_HIGH;
346 } else {
347 pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
348 clk_name, rate);
349 return -EINVAL;
352 kp = get_kp(ref_freq, kp_index);
353 if (kp < 0) {
354 pr_err("%s: pll: %s has invalid kp\n", __func__, clk_name);
355 return kp;
358 ret = __pll_enable(pll);
359 if (ret) {
360 pr_err("%s: pll: %s fails to enable\n", __func__, clk_name);
361 return ret;
364 if (pll_fractional_change_only(clk->pll, vco)) {
365 /* program fractional part of NDIV */
366 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
367 val = readl(pll->control_base + ctrl->ndiv_frac.offset);
368 val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
369 ctrl->ndiv_frac.shift);
370 val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
371 iproc_pll_write(pll, pll->control_base,
372 ctrl->ndiv_frac.offset, val);
373 return 0;
377 /* put PLL in reset */
378 __pll_put_in_reset(pll);
380 /* set PLL in user mode before modifying PLL controls */
381 if (ctrl->flags & IPROC_CLK_PLL_USER_MODE_ON) {
382 val = readl(pll->control_base + ctrl->macro_mode.offset);
383 val &= ~(bit_mask(ctrl->macro_mode.width) <<
384 ctrl->macro_mode.shift);
385 val |= PLL_USER_MODE << ctrl->macro_mode.shift;
386 iproc_pll_write(pll, pll->control_base,
387 ctrl->macro_mode.offset, val);
390 iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0);
392 val = readl(pll->control_base + ctrl->vco_ctrl.l_offset);
394 if (rate >= VCO_LOW && rate < VCO_MID)
395 val |= (1 << PLL_VCO_LOW_SHIFT);
397 if (rate < VCO_HIGH)
398 val &= ~(1 << PLL_VCO_HIGH_SHIFT);
399 else
400 val |= (1 << PLL_VCO_HIGH_SHIFT);
402 iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.l_offset, val);
404 /* program integer part of NDIV */
405 val = readl(pll->control_base + ctrl->ndiv_int.offset);
406 val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift);
407 val |= vco->ndiv_int << ctrl->ndiv_int.shift;
408 iproc_pll_write(pll, pll->control_base, ctrl->ndiv_int.offset, val);
410 /* program fractional part of NDIV */
411 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
412 val = readl(pll->control_base + ctrl->ndiv_frac.offset);
413 val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
414 ctrl->ndiv_frac.shift);
415 val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
416 iproc_pll_write(pll, pll->control_base, ctrl->ndiv_frac.offset,
417 val);
420 /* program PDIV */
421 val = readl(pll->control_base + ctrl->pdiv.offset);
422 val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift);
423 val |= vco->pdiv << ctrl->pdiv.shift;
424 iproc_pll_write(pll, pll->control_base, ctrl->pdiv.offset, val);
426 __pll_bring_out_reset(pll, kp, ka, ki);
428 ret = pll_wait_for_lock(pll);
429 if (ret < 0) {
430 pr_err("%s: pll: %s failed to lock\n", __func__, clk_name);
431 return ret;
434 return 0;
437 static int iproc_pll_enable(struct clk_hw *hw)
439 struct iproc_clk *clk = to_iproc_clk(hw);
440 struct iproc_pll *pll = clk->pll;
442 return __pll_enable(pll);
445 static void iproc_pll_disable(struct clk_hw *hw)
447 struct iproc_clk *clk = to_iproc_clk(hw);
448 struct iproc_pll *pll = clk->pll;
449 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
451 if (ctrl->flags & IPROC_CLK_AON)
452 return;
454 __pll_disable(pll);
457 static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
458 unsigned long parent_rate)
460 struct iproc_clk *clk = to_iproc_clk(hw);
461 struct iproc_pll *pll = clk->pll;
462 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
463 u32 val;
464 u64 ndiv, ndiv_int, ndiv_frac;
465 unsigned int pdiv;
466 unsigned long rate;
468 if (parent_rate == 0)
469 return 0;
471 /* PLL needs to be locked */
472 val = readl(pll->status_base + ctrl->status.offset);
473 if ((val & (1 << ctrl->status.shift)) == 0)
474 return 0;
477 * PLL output frequency =
479 * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
481 val = readl(pll->control_base + ctrl->ndiv_int.offset);
482 ndiv_int = (val >> ctrl->ndiv_int.shift) &
483 bit_mask(ctrl->ndiv_int.width);
484 ndiv = ndiv_int << 20;
486 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
487 val = readl(pll->control_base + ctrl->ndiv_frac.offset);
488 ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
489 bit_mask(ctrl->ndiv_frac.width);
490 ndiv += ndiv_frac;
493 val = readl(pll->control_base + ctrl->pdiv.offset);
494 pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
496 rate = (ndiv * parent_rate) >> 20;
498 if (pdiv == 0)
499 rate *= 2;
500 else
501 rate /= pdiv;
503 return rate;
506 static int iproc_pll_determine_rate(struct clk_hw *hw,
507 struct clk_rate_request *req)
509 unsigned int i;
510 struct iproc_clk *clk = to_iproc_clk(hw);
511 struct iproc_pll *pll = clk->pll;
512 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
513 unsigned long diff, best_diff;
514 unsigned int best_idx = 0;
515 int ret;
517 if (req->rate == 0 || req->best_parent_rate == 0)
518 return -EINVAL;
520 if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
521 struct iproc_pll_vco_param vco_param;
523 ret = pll_calc_param(req->rate, req->best_parent_rate,
524 &vco_param);
525 if (ret)
526 return ret;
528 req->rate = vco_param.rate;
529 return 0;
532 if (!pll->vco_param)
533 return -EINVAL;
535 best_diff = ULONG_MAX;
536 for (i = 0; i < pll->num_vco_entries; i++) {
537 diff = abs(req->rate - pll->vco_param[i].rate);
538 if (diff <= best_diff) {
539 best_diff = diff;
540 best_idx = i;
542 /* break now if perfect match */
543 if (diff == 0)
544 break;
547 req->rate = pll->vco_param[best_idx].rate;
549 return 0;
552 static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
553 unsigned long parent_rate)
555 struct iproc_clk *clk = to_iproc_clk(hw);
556 struct iproc_pll *pll = clk->pll;
557 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
558 struct iproc_pll_vco_param vco_param;
559 int rate_index, ret;
561 if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
562 ret = pll_calc_param(rate, parent_rate, &vco_param);
563 if (ret)
564 return ret;
565 } else {
566 rate_index = pll_get_rate_index(pll, rate);
567 if (rate_index < 0)
568 return rate_index;
570 vco_param = pll->vco_param[rate_index];
573 ret = pll_set_rate(clk, &vco_param, parent_rate);
574 return ret;
577 static const struct clk_ops iproc_pll_ops = {
578 .enable = iproc_pll_enable,
579 .disable = iproc_pll_disable,
580 .recalc_rate = iproc_pll_recalc_rate,
581 .determine_rate = iproc_pll_determine_rate,
582 .set_rate = iproc_pll_set_rate,
585 static int iproc_clk_enable(struct clk_hw *hw)
587 struct iproc_clk *clk = to_iproc_clk(hw);
588 const struct iproc_clk_ctrl *ctrl = clk->ctrl;
589 struct iproc_pll *pll = clk->pll;
590 u32 val;
592 /* channel enable is active low */
593 val = readl(pll->control_base + ctrl->enable.offset);
594 val &= ~(1 << ctrl->enable.enable_shift);
595 iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
597 /* also make sure channel is not held */
598 val = readl(pll->control_base + ctrl->enable.offset);
599 val &= ~(1 << ctrl->enable.hold_shift);
600 iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
602 return 0;
605 static void iproc_clk_disable(struct clk_hw *hw)
607 struct iproc_clk *clk = to_iproc_clk(hw);
608 const struct iproc_clk_ctrl *ctrl = clk->ctrl;
609 struct iproc_pll *pll = clk->pll;
610 u32 val;
612 if (ctrl->flags & IPROC_CLK_AON)
613 return;
615 val = readl(pll->control_base + ctrl->enable.offset);
616 val |= 1 << ctrl->enable.enable_shift;
617 iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
620 static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
621 unsigned long parent_rate)
623 struct iproc_clk *clk = to_iproc_clk(hw);
624 const struct iproc_clk_ctrl *ctrl = clk->ctrl;
625 struct iproc_pll *pll = clk->pll;
626 u32 val;
627 unsigned int mdiv;
628 unsigned long rate;
630 if (parent_rate == 0)
631 return 0;
633 val = readl(pll->control_base + ctrl->mdiv.offset);
634 mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width);
635 if (mdiv == 0)
636 mdiv = 256;
638 if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
639 rate = parent_rate / (mdiv * 2);
640 else
641 rate = parent_rate / mdiv;
643 return rate;
646 static int iproc_clk_determine_rate(struct clk_hw *hw,
647 struct clk_rate_request *req)
649 unsigned int bestdiv;
651 if (req->rate == 0)
652 return -EINVAL;
653 if (req->rate == req->best_parent_rate)
654 return 0;
656 bestdiv = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
657 if (bestdiv < 2)
658 req->rate = req->best_parent_rate;
660 if (bestdiv > 256)
661 bestdiv = 256;
663 req->rate = req->best_parent_rate / bestdiv;
665 return 0;
668 static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
669 unsigned long parent_rate)
671 struct iproc_clk *clk = to_iproc_clk(hw);
672 const struct iproc_clk_ctrl *ctrl = clk->ctrl;
673 struct iproc_pll *pll = clk->pll;
674 u32 val;
675 unsigned int div;
677 if (rate == 0 || parent_rate == 0)
678 return -EINVAL;
680 div = DIV_ROUND_CLOSEST(parent_rate, rate);
681 if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
682 div /= 2;
684 if (div > 256)
685 return -EINVAL;
687 val = readl(pll->control_base + ctrl->mdiv.offset);
688 if (div == 256) {
689 val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
690 } else {
691 val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
692 val |= div << ctrl->mdiv.shift;
694 iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
696 return 0;
699 static const struct clk_ops iproc_clk_ops = {
700 .enable = iproc_clk_enable,
701 .disable = iproc_clk_disable,
702 .recalc_rate = iproc_clk_recalc_rate,
703 .determine_rate = iproc_clk_determine_rate,
704 .set_rate = iproc_clk_set_rate,
708 * Some PLLs require the PLL SW override bit to be set before changes can be
709 * applied to the PLL
711 static void iproc_pll_sw_cfg(struct iproc_pll *pll)
713 const struct iproc_pll_ctrl *ctrl = pll->ctrl;
715 if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) {
716 u32 val;
718 val = readl(pll->control_base + ctrl->sw_ctrl.offset);
719 val |= BIT(ctrl->sw_ctrl.shift);
720 iproc_pll_write(pll, pll->control_base, ctrl->sw_ctrl.offset,
721 val);
725 void iproc_pll_clk_setup(struct device_node *node,
726 const struct iproc_pll_ctrl *pll_ctrl,
727 const struct iproc_pll_vco_param *vco,
728 unsigned int num_vco_entries,
729 const struct iproc_clk_ctrl *clk_ctrl,
730 unsigned int num_clks)
732 int i, ret;
733 struct iproc_pll *pll;
734 struct iproc_clk *iclk;
735 struct clk_init_data init;
736 const char *parent_name;
737 struct iproc_clk *iclk_array;
738 struct clk_hw_onecell_data *clk_data;
740 if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
741 return;
743 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
744 if (WARN_ON(!pll))
745 return;
747 clk_data = kzalloc(struct_size(clk_data, hws, num_clks), GFP_KERNEL);
748 if (WARN_ON(!clk_data))
749 goto err_clk_data;
750 clk_data->num = num_clks;
752 iclk_array = kcalloc(num_clks, sizeof(struct iproc_clk), GFP_KERNEL);
753 if (WARN_ON(!iclk_array))
754 goto err_clks;
756 pll->control_base = of_iomap(node, 0);
757 if (WARN_ON(!pll->control_base))
758 goto err_pll_iomap;
760 /* Some SoCs do not require the pwr_base, thus failing is not fatal */
761 pll->pwr_base = of_iomap(node, 1);
763 /* some PLLs require gating control at the top ASIU level */
764 if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) {
765 pll->asiu_base = of_iomap(node, 2);
766 if (WARN_ON(!pll->asiu_base))
767 goto err_asiu_iomap;
770 if (pll_ctrl->flags & IPROC_CLK_PLL_SPLIT_STAT_CTRL) {
771 /* Some SoCs have a split status/control. If this does not
772 * exist, assume they are unified.
774 pll->status_base = of_iomap(node, 2);
775 if (!pll->status_base)
776 goto err_status_iomap;
777 } else
778 pll->status_base = pll->control_base;
780 /* initialize and register the PLL itself */
781 pll->ctrl = pll_ctrl;
783 iclk = &iclk_array[0];
784 iclk->pll = pll;
786 init.name = node->name;
787 init.ops = &iproc_pll_ops;
788 init.flags = 0;
789 parent_name = of_clk_get_parent_name(node, 0);
790 init.parent_names = (parent_name ? &parent_name : NULL);
791 init.num_parents = (parent_name ? 1 : 0);
792 iclk->hw.init = &init;
794 if (vco) {
795 pll->num_vco_entries = num_vco_entries;
796 pll->vco_param = vco;
799 iproc_pll_sw_cfg(pll);
801 ret = clk_hw_register(NULL, &iclk->hw);
802 if (WARN_ON(ret))
803 goto err_pll_register;
805 clk_data->hws[0] = &iclk->hw;
807 /* now initialize and register all leaf clocks */
808 for (i = 1; i < num_clks; i++) {
809 const char *clk_name;
811 memset(&init, 0, sizeof(init));
812 parent_name = node->name;
814 ret = of_property_read_string_index(node, "clock-output-names",
815 i, &clk_name);
816 if (WARN_ON(ret))
817 goto err_clk_register;
819 iclk = &iclk_array[i];
820 iclk->pll = pll;
821 iclk->ctrl = &clk_ctrl[i];
823 init.name = clk_name;
824 init.ops = &iproc_clk_ops;
825 init.flags = 0;
826 init.parent_names = (parent_name ? &parent_name : NULL);
827 init.num_parents = (parent_name ? 1 : 0);
828 iclk->hw.init = &init;
830 ret = clk_hw_register(NULL, &iclk->hw);
831 if (WARN_ON(ret))
832 goto err_clk_register;
834 clk_data->hws[i] = &iclk->hw;
837 ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
838 if (WARN_ON(ret))
839 goto err_clk_register;
841 return;
843 err_clk_register:
844 while (--i >= 0)
845 clk_hw_unregister(clk_data->hws[i]);
847 err_pll_register:
848 if (pll->status_base != pll->control_base)
849 iounmap(pll->status_base);
851 err_status_iomap:
852 if (pll->asiu_base)
853 iounmap(pll->asiu_base);
855 err_asiu_iomap:
856 if (pll->pwr_base)
857 iounmap(pll->pwr_base);
859 iounmap(pll->control_base);
861 err_pll_iomap:
862 kfree(iclk_array);
864 err_clks:
865 kfree(clk_data);
867 err_clk_data:
868 kfree(pll);