2 * Purna Chandra Mandal,<purna.mandal@microchip.com>
3 * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 #include <linux/clk-provider.h>
15 #include <linux/delay.h>
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <asm/mach-pic32/pic32.h>
20 #include <asm/traps.h>
24 /* OSCCON Reg fields */
25 #define OSC_CUR_MASK 0x07
26 #define OSC_CUR_SHIFT 12
27 #define OSC_NEW_MASK 0x07
28 #define OSC_NEW_SHIFT 8
29 #define OSC_SWEN BIT(0)
31 /* SPLLCON Reg fields */
32 #define PLL_RANGE_MASK 0x07
33 #define PLL_RANGE_SHIFT 0
34 #define PLL_ICLK_MASK 0x01
35 #define PLL_ICLK_SHIFT 7
36 #define PLL_IDIV_MASK 0x07
37 #define PLL_IDIV_SHIFT 8
38 #define PLL_ODIV_MASK 0x07
39 #define PLL_ODIV_SHIFT 24
40 #define PLL_MULT_MASK 0x7F
41 #define PLL_MULT_SHIFT 16
42 #define PLL_MULT_MAX 128
43 #define PLL_ODIV_MIN 1
44 #define PLL_ODIV_MAX 5
46 /* Peripheral Bus Clock Reg Fields */
47 #define PB_DIV_MASK 0x7f
48 #define PB_DIV_SHIFT 0
49 #define PB_DIV_READY BIT(11)
50 #define PB_DIV_ENABLE BIT(15)
51 #define PB_DIV_MAX 128
54 /* Reference Oscillator Control Reg fields */
55 #define REFO_SEL_MASK 0x0f
56 #define REFO_SEL_SHIFT 0
57 #define REFO_ACTIVE BIT(8)
58 #define REFO_DIVSW_EN BIT(9)
59 #define REFO_OE BIT(12)
60 #define REFO_ON BIT(15)
61 #define REFO_DIV_SHIFT 16
62 #define REFO_DIV_MASK 0x7fff
64 /* Reference Oscillator Trim Register Fields */
65 #define REFO_TRIM_REG 0x10
66 #define REFO_TRIM_MASK 0x1ff
67 #define REFO_TRIM_SHIFT 23
68 #define REFO_TRIM_MAX 511
70 /* Mux Slew Control Register fields */
71 #define SLEW_BUSY BIT(0)
72 #define SLEW_DOWNEN BIT(1)
73 #define SLEW_UPEN BIT(2)
75 #define SLEW_DIV_SHIFT 8
76 #define SLEW_SYSDIV 0x0f
77 #define SLEW_SYSDIV_SHIFT 20
79 /* Clock Poll Timeout */
80 #define LOCK_TIMEOUT_US USEC_PER_MSEC
82 /* SoC specific clock needed during SPLL clock rate switch */
83 static struct clk_hw
*pic32_sclk_hw
;
85 /* add instruction pipeline delay while CPU clock is in-transition. */
88 __asm__ __volatile__("nop"); \
89 __asm__ __volatile__("nop"); \
90 __asm__ __volatile__("nop"); \
91 __asm__ __volatile__("nop"); \
92 __asm__ __volatile__("nop"); \
95 /* Perpheral bus clocks */
96 struct pic32_periph_clk
{
98 void __iomem
*ctrl_reg
;
99 struct pic32_clk_common
*core
;
102 #define clkhw_to_pbclk(_hw) container_of(_hw, struct pic32_periph_clk, hw)
104 static int pbclk_is_enabled(struct clk_hw
*hw
)
106 struct pic32_periph_clk
*pb
= clkhw_to_pbclk(hw
);
108 return readl(pb
->ctrl_reg
) & PB_DIV_ENABLE
;
111 static int pbclk_enable(struct clk_hw
*hw
)
113 struct pic32_periph_clk
*pb
= clkhw_to_pbclk(hw
);
115 writel(PB_DIV_ENABLE
, PIC32_SET(pb
->ctrl_reg
));
119 static void pbclk_disable(struct clk_hw
*hw
)
121 struct pic32_periph_clk
*pb
= clkhw_to_pbclk(hw
);
123 writel(PB_DIV_ENABLE
, PIC32_CLR(pb
->ctrl_reg
));
126 static unsigned long calc_best_divided_rate(unsigned long rate
,
127 unsigned long parent_rate
,
131 unsigned long divided_rate
, divided_rate_down
, best_rate
;
132 unsigned long div
, div_up
;
134 /* eq. clk_rate = parent_rate / divider.
136 * Find best divider to produce closest of target divided rate.
138 div
= parent_rate
/ rate
;
139 div
= clamp_val(div
, divider_min
, divider_max
);
140 div_up
= clamp_val(div
+ 1, divider_min
, divider_max
);
142 divided_rate
= parent_rate
/ div
;
143 divided_rate_down
= parent_rate
/ div_up
;
144 if (abs(rate
- divided_rate_down
) < abs(rate
- divided_rate
))
145 best_rate
= divided_rate_down
;
147 best_rate
= divided_rate
;
152 static inline u32
pbclk_read_pbdiv(struct pic32_periph_clk
*pb
)
154 return ((readl(pb
->ctrl_reg
) >> PB_DIV_SHIFT
) & PB_DIV_MASK
) + 1;
157 static unsigned long pbclk_recalc_rate(struct clk_hw
*hw
,
158 unsigned long parent_rate
)
160 struct pic32_periph_clk
*pb
= clkhw_to_pbclk(hw
);
162 return parent_rate
/ pbclk_read_pbdiv(pb
);
165 static long pbclk_round_rate(struct clk_hw
*hw
, unsigned long rate
,
166 unsigned long *parent_rate
)
168 return calc_best_divided_rate(rate
, *parent_rate
,
169 PB_DIV_MAX
, PB_DIV_MIN
);
172 static int pbclk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
173 unsigned long parent_rate
)
175 struct pic32_periph_clk
*pb
= clkhw_to_pbclk(hw
);
180 /* check & wait for DIV_READY */
181 err
= readl_poll_timeout(pb
->ctrl_reg
, v
, v
& PB_DIV_READY
,
186 /* calculate clkdiv and best rate */
187 div
= DIV_ROUND_CLOSEST(parent_rate
, rate
);
189 spin_lock_irqsave(&pb
->core
->reg_lock
, flags
);
192 v
= readl(pb
->ctrl_reg
);
196 pic32_syskey_unlock();
198 writel(v
, pb
->ctrl_reg
);
200 spin_unlock_irqrestore(&pb
->core
->reg_lock
, flags
);
202 /* wait again for DIV_READY */
203 err
= readl_poll_timeout(pb
->ctrl_reg
, v
, v
& PB_DIV_READY
,
208 /* confirm that new div is applied correctly */
209 return (pbclk_read_pbdiv(pb
) == div
) ? 0 : -EBUSY
;
212 const struct clk_ops pic32_pbclk_ops
= {
213 .enable
= pbclk_enable
,
214 .disable
= pbclk_disable
,
215 .is_enabled
= pbclk_is_enabled
,
216 .recalc_rate
= pbclk_recalc_rate
,
217 .round_rate
= pbclk_round_rate
,
218 .set_rate
= pbclk_set_rate
,
221 struct clk
*pic32_periph_clk_register(const struct pic32_periph_clk_data
*desc
,
222 struct pic32_clk_common
*core
)
224 struct pic32_periph_clk
*pbclk
;
227 pbclk
= devm_kzalloc(core
->dev
, sizeof(*pbclk
), GFP_KERNEL
);
229 return ERR_PTR(-ENOMEM
);
231 pbclk
->hw
.init
= &desc
->init_data
;
233 pbclk
->ctrl_reg
= desc
->ctrl_reg
+ core
->iobase
;
235 clk
= devm_clk_register(core
->dev
, &pbclk
->hw
);
237 dev_err(core
->dev
, "%s: clk_register() failed\n", __func__
);
238 devm_kfree(core
->dev
, pbclk
);
244 /* Reference oscillator operations */
245 struct pic32_ref_osc
{
247 void __iomem
*ctrl_reg
;
248 const u32
*parent_map
;
249 struct pic32_clk_common
*core
;
252 #define clkhw_to_refosc(_hw) container_of(_hw, struct pic32_ref_osc, hw)
254 static int roclk_is_enabled(struct clk_hw
*hw
)
256 struct pic32_ref_osc
*refo
= clkhw_to_refosc(hw
);
258 return readl(refo
->ctrl_reg
) & REFO_ON
;
261 static int roclk_enable(struct clk_hw
*hw
)
263 struct pic32_ref_osc
*refo
= clkhw_to_refosc(hw
);
265 writel(REFO_ON
| REFO_OE
, PIC32_SET(refo
->ctrl_reg
));
269 static void roclk_disable(struct clk_hw
*hw
)
271 struct pic32_ref_osc
*refo
= clkhw_to_refosc(hw
);
273 writel(REFO_ON
| REFO_OE
, PIC32_CLR(refo
->ctrl_reg
));
276 static void roclk_init(struct clk_hw
*hw
)
278 /* initialize clock in disabled state */
282 static u8
roclk_get_parent(struct clk_hw
*hw
)
284 struct pic32_ref_osc
*refo
= clkhw_to_refosc(hw
);
287 v
= (readl(refo
->ctrl_reg
) >> REFO_SEL_SHIFT
) & REFO_SEL_MASK
;
289 if (!refo
->parent_map
)
292 for (i
= 0; i
< clk_hw_get_num_parents(hw
); i
++)
293 if (refo
->parent_map
[i
] == v
)
299 static unsigned long roclk_calc_rate(unsigned long parent_rate
,
300 u32 rodiv
, u32 rotrim
)
304 /* fout = fin / [2 * {div + (trim / 512)}]
305 * = fin * 512 / [1024 * div + 2 * trim]
306 * = fin * 256 / (512 * div + trim)
307 * = (fin << 8) / ((div << 9) + trim)
310 rodiv
= (rodiv
<< 9) + rotrim
;
311 rate64
= parent_rate
;
313 do_div(rate64
, rodiv
);
315 rate64
= parent_rate
/ (rodiv
<< 1);
317 rate64
= parent_rate
;
322 static void roclk_calc_div_trim(unsigned long rate
,
323 unsigned long parent_rate
,
324 u32
*rodiv_p
, u32
*rotrim_p
)
326 u32 div
, rotrim
, rodiv
;
329 /* Find integer approximation of floating-point arithmetic.
330 * fout = fin / [2 * {rodiv + (rotrim / 512)}] ... (1)
331 * i.e. fout = fin / 2 * DIV
332 * whereas DIV = rodiv + (rotrim / 512)
334 * Since kernel does not perform floating-point arithmatic so
335 * (rotrim/512) will be zero. And DIV & rodiv will result same.
337 * ie. fout = (fin * 256) / [(512 * rodiv) + rotrim] ... from (1)
338 * ie. rotrim = ((fin * 256) / fout) - (512 * DIV)
340 if (parent_rate
<= rate
) {
346 div
= parent_rate
/ (rate
<< 1);
350 frac
-= (u64
)(div
<< 9);
352 rodiv
= (div
> REFO_DIV_MASK
) ? REFO_DIV_MASK
: div
;
353 rotrim
= (frac
>= REFO_TRIM_MAX
) ? REFO_TRIM_MAX
: frac
;
363 static unsigned long roclk_recalc_rate(struct clk_hw
*hw
,
364 unsigned long parent_rate
)
366 struct pic32_ref_osc
*refo
= clkhw_to_refosc(hw
);
367 u32 v
, rodiv
, rotrim
;
370 v
= readl(refo
->ctrl_reg
);
371 rodiv
= (v
>> REFO_DIV_SHIFT
) & REFO_DIV_MASK
;
374 v
= readl(refo
->ctrl_reg
+ REFO_TRIM_REG
);
375 rotrim
= (v
>> REFO_TRIM_SHIFT
) & REFO_TRIM_MASK
;
377 return roclk_calc_rate(parent_rate
, rodiv
, rotrim
);
380 static long roclk_round_rate(struct clk_hw
*hw
, unsigned long rate
,
381 unsigned long *parent_rate
)
385 /* calculate dividers for new rate */
386 roclk_calc_div_trim(rate
, *parent_rate
, &rodiv
, &rotrim
);
388 /* caclulate new rate (rounding) based on new rodiv & rotrim */
389 return roclk_calc_rate(*parent_rate
, rodiv
, rotrim
);
392 static int roclk_determine_rate(struct clk_hw
*hw
,
393 struct clk_rate_request
*req
)
395 struct clk_hw
*parent_clk
, *best_parent_clk
= NULL
;
396 unsigned int i
, delta
, best_delta
= -1;
397 unsigned long parent_rate
, best_parent_rate
= 0;
398 unsigned long best
= 0, nearest_rate
;
400 /* find a parent which can generate nearest clkrate >= rate */
401 for (i
= 0; i
< clk_hw_get_num_parents(hw
); i
++) {
403 parent_clk
= clk_hw_get_parent_by_index(hw
, i
);
407 /* skip if parent runs slower than target rate */
408 parent_rate
= clk_hw_get_rate(parent_clk
);
409 if (req
->rate
> parent_rate
)
412 nearest_rate
= roclk_round_rate(hw
, req
->rate
, &parent_rate
);
413 delta
= abs(nearest_rate
- req
->rate
);
414 if ((nearest_rate
>= req
->rate
) && (delta
< best_delta
)) {
415 best_parent_clk
= parent_clk
;
416 best_parent_rate
= parent_rate
;
425 /* if no match found, retain old rate */
426 if (!best_parent_clk
) {
427 pr_err("%s:%s, no parent found for rate %lu.\n",
428 __func__
, clk_hw_get_name(hw
), req
->rate
);
429 return clk_hw_get_rate(hw
);
432 pr_debug("%s,rate %lu, best_parent(%s, %lu), best %lu, delta %d\n",
433 clk_hw_get_name(hw
), req
->rate
,
434 clk_hw_get_name(best_parent_clk
), best_parent_rate
,
437 if (req
->best_parent_rate
)
438 req
->best_parent_rate
= best_parent_rate
;
440 if (req
->best_parent_hw
)
441 req
->best_parent_hw
= best_parent_clk
;
446 static int roclk_set_parent(struct clk_hw
*hw
, u8 index
)
448 struct pic32_ref_osc
*refo
= clkhw_to_refosc(hw
);
453 if (refo
->parent_map
)
454 index
= refo
->parent_map
[index
];
456 /* wait until ACTIVE bit is zero or timeout */
457 err
= readl_poll_timeout(refo
->ctrl_reg
, v
, !(v
& REFO_ACTIVE
),
460 pr_err("%s: poll failed, clk active\n", clk_hw_get_name(hw
));
464 spin_lock_irqsave(&refo
->core
->reg_lock
, flags
);
466 pic32_syskey_unlock();
468 /* calculate & apply new */
469 v
= readl(refo
->ctrl_reg
);
470 v
&= ~(REFO_SEL_MASK
<< REFO_SEL_SHIFT
);
471 v
|= index
<< REFO_SEL_SHIFT
;
473 writel(v
, refo
->ctrl_reg
);
475 spin_unlock_irqrestore(&refo
->core
->reg_lock
, flags
);
480 static int roclk_set_rate_and_parent(struct clk_hw
*hw
,
482 unsigned long parent_rate
,
485 struct pic32_ref_osc
*refo
= clkhw_to_refosc(hw
);
490 /* calculate new rodiv & rotrim for new rate */
491 roclk_calc_div_trim(rate
, parent_rate
, &rodiv
, &trim
);
493 pr_debug("parent_rate = %lu, rate = %lu, div = %d, trim = %d\n",
494 parent_rate
, rate
, rodiv
, trim
);
496 /* wait till source change is active */
497 err
= readl_poll_timeout(refo
->ctrl_reg
, v
,
498 !(v
& (REFO_ACTIVE
| REFO_DIVSW_EN
)),
501 pr_err("%s: poll timedout, clock is still active\n", __func__
);
505 spin_lock_irqsave(&refo
->core
->reg_lock
, flags
);
506 v
= readl(refo
->ctrl_reg
);
508 pic32_syskey_unlock();
510 /* apply parent, if required */
511 if (refo
->parent_map
)
512 index
= refo
->parent_map
[index
];
514 v
&= ~(REFO_SEL_MASK
<< REFO_SEL_SHIFT
);
515 v
|= index
<< REFO_SEL_SHIFT
;
518 v
&= ~(REFO_DIV_MASK
<< REFO_DIV_SHIFT
);
519 v
|= rodiv
<< REFO_DIV_SHIFT
;
520 writel(v
, refo
->ctrl_reg
);
523 v
= readl(refo
->ctrl_reg
+ REFO_TRIM_REG
);
524 v
&= ~(REFO_TRIM_MASK
<< REFO_TRIM_SHIFT
);
525 v
|= trim
<< REFO_TRIM_SHIFT
;
526 writel(v
, refo
->ctrl_reg
+ REFO_TRIM_REG
);
528 /* enable & activate divider switching */
529 writel(REFO_ON
| REFO_DIVSW_EN
, PIC32_SET(refo
->ctrl_reg
));
531 /* wait till divswen is in-progress */
532 err
= readl_poll_timeout_atomic(refo
->ctrl_reg
, v
, !(v
& REFO_DIVSW_EN
),
534 /* leave the clk gated as it was */
535 writel(REFO_ON
, PIC32_CLR(refo
->ctrl_reg
));
537 spin_unlock_irqrestore(&refo
->core
->reg_lock
, flags
);
542 static int roclk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
543 unsigned long parent_rate
)
545 u8 index
= roclk_get_parent(hw
);
547 return roclk_set_rate_and_parent(hw
, rate
, parent_rate
, index
);
550 const struct clk_ops pic32_roclk_ops
= {
551 .enable
= roclk_enable
,
552 .disable
= roclk_disable
,
553 .is_enabled
= roclk_is_enabled
,
554 .get_parent
= roclk_get_parent
,
555 .set_parent
= roclk_set_parent
,
556 .determine_rate
= roclk_determine_rate
,
557 .recalc_rate
= roclk_recalc_rate
,
558 .set_rate_and_parent
= roclk_set_rate_and_parent
,
559 .set_rate
= roclk_set_rate
,
563 struct clk
*pic32_refo_clk_register(const struct pic32_ref_osc_data
*data
,
564 struct pic32_clk_common
*core
)
566 struct pic32_ref_osc
*refo
;
569 refo
= devm_kzalloc(core
->dev
, sizeof(*refo
), GFP_KERNEL
);
571 return ERR_PTR(-ENOMEM
);
574 refo
->hw
.init
= &data
->init_data
;
575 refo
->ctrl_reg
= data
->ctrl_reg
+ core
->iobase
;
576 refo
->parent_map
= data
->parent_map
;
578 clk
= devm_clk_register(core
->dev
, &refo
->hw
);
580 dev_err(core
->dev
, "%s: clk_register() failed\n", __func__
);
585 struct pic32_sys_pll
{
587 void __iomem
*ctrl_reg
;
588 void __iomem
*status_reg
;
590 u32 idiv
; /* PLL iclk divider, treated fixed */
591 struct pic32_clk_common
*core
;
594 #define clkhw_to_spll(_hw) container_of(_hw, struct pic32_sys_pll, hw)
596 static inline u32
spll_odiv_to_divider(u32 odiv
)
598 odiv
= clamp_val(odiv
, PLL_ODIV_MIN
, PLL_ODIV_MAX
);
603 static unsigned long spll_calc_mult_div(struct pic32_sys_pll
*pll
,
605 unsigned long parent_rate
,
606 u32
*mult_p
, u32
*odiv_p
)
608 u32 mul
, div
, best_mul
= 1, best_div
= 1;
609 unsigned long new_rate
, best_rate
= rate
;
610 unsigned int best_delta
= -1, delta
, match_found
= 0;
613 parent_rate
/= pll
->idiv
;
615 for (mul
= 1; mul
<= PLL_MULT_MAX
; mul
++) {
616 for (div
= PLL_ODIV_MIN
; div
<= PLL_ODIV_MAX
; div
++) {
617 rate64
= parent_rate
;
619 do_div(rate64
, 1 << div
);
621 delta
= abs(rate
- new_rate
);
622 if ((new_rate
>= rate
) && (delta
< best_delta
)) {
624 best_rate
= new_rate
;
633 pr_warn("spll: no match found\n");
637 pr_debug("rate %lu, par_rate %lu/mult %u, div %u, best_rate %lu\n",
638 rate
, parent_rate
, best_mul
, best_div
, best_rate
);
641 *mult_p
= best_mul
- 1;
649 static unsigned long spll_clk_recalc_rate(struct clk_hw
*hw
,
650 unsigned long parent_rate
)
652 struct pic32_sys_pll
*pll
= clkhw_to_spll(hw
);
653 unsigned long pll_in_rate
;
654 u32 mult
, odiv
, div
, v
;
657 v
= readl(pll
->ctrl_reg
);
658 odiv
= ((v
>> PLL_ODIV_SHIFT
) & PLL_ODIV_MASK
);
659 mult
= ((v
>> PLL_MULT_SHIFT
) & PLL_MULT_MASK
) + 1;
660 div
= spll_odiv_to_divider(odiv
);
662 /* pll_in_rate = parent_rate / idiv
663 * pll_out_rate = pll_in_rate * mult / div;
665 pll_in_rate
= parent_rate
/ pll
->idiv
;
666 rate64
= pll_in_rate
;
673 static long spll_clk_round_rate(struct clk_hw
*hw
, unsigned long rate
,
674 unsigned long *parent_rate
)
676 struct pic32_sys_pll
*pll
= clkhw_to_spll(hw
);
678 return spll_calc_mult_div(pll
, rate
, *parent_rate
, NULL
, NULL
);
681 static int spll_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
682 unsigned long parent_rate
)
684 struct pic32_sys_pll
*pll
= clkhw_to_spll(hw
);
685 unsigned long ret
, flags
;
689 ret
= spll_calc_mult_div(pll
, rate
, parent_rate
, &mult
, &odiv
);
694 * We can't change SPLL counters when it is in-active use
695 * by SYSCLK. So check before applying new counters/rate.
698 /* Is spll_clk active parent of sys_clk ? */
699 if (unlikely(clk_hw_get_parent(pic32_sclk_hw
) == hw
)) {
700 pr_err("%s: failed, clk in-use\n", __func__
);
704 spin_lock_irqsave(&pll
->core
->reg_lock
, flags
);
706 /* apply new multiplier & divisor */
707 v
= readl(pll
->ctrl_reg
);
708 v
&= ~(PLL_MULT_MASK
<< PLL_MULT_SHIFT
);
709 v
&= ~(PLL_ODIV_MASK
<< PLL_ODIV_SHIFT
);
710 v
|= (mult
<< PLL_MULT_SHIFT
) | (odiv
<< PLL_ODIV_SHIFT
);
712 /* sys unlock before write */
713 pic32_syskey_unlock();
715 writel(v
, pll
->ctrl_reg
);
718 /* insert few nops (5-stage) to ensure CPU does not hang */
722 /* Wait until PLL is locked (maximum 100 usecs). */
723 err
= readl_poll_timeout_atomic(pll
->status_reg
, v
,
724 v
& pll
->lock_mask
, 1, 100);
725 spin_unlock_irqrestore(&pll
->core
->reg_lock
, flags
);
730 /* SPLL clock operation */
731 const struct clk_ops pic32_spll_ops
= {
732 .recalc_rate
= spll_clk_recalc_rate
,
733 .round_rate
= spll_clk_round_rate
,
734 .set_rate
= spll_clk_set_rate
,
737 struct clk
*pic32_spll_clk_register(const struct pic32_sys_pll_data
*data
,
738 struct pic32_clk_common
*core
)
740 struct pic32_sys_pll
*spll
;
743 spll
= devm_kzalloc(core
->dev
, sizeof(*spll
), GFP_KERNEL
);
745 return ERR_PTR(-ENOMEM
);
748 spll
->hw
.init
= &data
->init_data
;
749 spll
->ctrl_reg
= data
->ctrl_reg
+ core
->iobase
;
750 spll
->status_reg
= data
->status_reg
+ core
->iobase
;
751 spll
->lock_mask
= data
->lock_mask
;
753 /* cache PLL idiv; PLL driver uses it as constant.*/
754 spll
->idiv
= (readl(spll
->ctrl_reg
) >> PLL_IDIV_SHIFT
) & PLL_IDIV_MASK
;
757 clk
= devm_clk_register(core
->dev
, &spll
->hw
);
759 dev_err(core
->dev
, "sys_pll: clk_register() failed\n");
764 /* System mux clock(aka SCLK) */
766 struct pic32_sys_clk
{
768 void __iomem
*mux_reg
;
769 void __iomem
*slew_reg
;
771 const u32
*parent_map
;
772 struct pic32_clk_common
*core
;
775 #define clkhw_to_sys_clk(_hw) container_of(_hw, struct pic32_sys_clk, hw)
777 static unsigned long sclk_get_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
779 struct pic32_sys_clk
*sclk
= clkhw_to_sys_clk(hw
);
782 div
= (readl(sclk
->slew_reg
) >> SLEW_SYSDIV_SHIFT
) & SLEW_SYSDIV
;
783 div
+= 1; /* sys-div to divider */
785 return parent_rate
/ div
;
788 static long sclk_round_rate(struct clk_hw
*hw
, unsigned long rate
,
789 unsigned long *parent_rate
)
791 return calc_best_divided_rate(rate
, *parent_rate
, SLEW_SYSDIV
, 1);
794 static int sclk_set_rate(struct clk_hw
*hw
,
795 unsigned long rate
, unsigned long parent_rate
)
797 struct pic32_sys_clk
*sclk
= clkhw_to_sys_clk(hw
);
802 div
= parent_rate
/ rate
;
804 spin_lock_irqsave(&sclk
->core
->reg_lock
, flags
);
807 v
= readl(sclk
->slew_reg
);
808 v
&= ~(SLEW_SYSDIV
<< SLEW_SYSDIV_SHIFT
);
809 v
|= (div
- 1) << SLEW_SYSDIV_SHIFT
;
811 pic32_syskey_unlock();
813 writel(v
, sclk
->slew_reg
);
815 /* wait until BUSY is cleared */
816 err
= readl_poll_timeout_atomic(sclk
->slew_reg
, v
,
817 !(v
& SLEW_BUSY
), 1, LOCK_TIMEOUT_US
);
819 spin_unlock_irqrestore(&sclk
->core
->reg_lock
, flags
);
824 static u8
sclk_get_parent(struct clk_hw
*hw
)
826 struct pic32_sys_clk
*sclk
= clkhw_to_sys_clk(hw
);
829 v
= (readl(sclk
->mux_reg
) >> OSC_CUR_SHIFT
) & OSC_CUR_MASK
;
831 if (!sclk
->parent_map
)
834 for (i
= 0; i
< clk_hw_get_num_parents(hw
); i
++)
835 if (sclk
->parent_map
[i
] == v
)
840 static int sclk_set_parent(struct clk_hw
*hw
, u8 index
)
842 struct pic32_sys_clk
*sclk
= clkhw_to_sys_clk(hw
);
847 spin_lock_irqsave(&sclk
->core
->reg_lock
, flags
);
850 nosc
= sclk
->parent_map
? sclk
->parent_map
[index
] : index
;
853 v
= readl(sclk
->mux_reg
);
854 v
&= ~(OSC_NEW_MASK
<< OSC_NEW_SHIFT
);
855 v
|= nosc
<< OSC_NEW_SHIFT
;
857 pic32_syskey_unlock();
859 writel(v
, sclk
->mux_reg
);
862 writel(OSC_SWEN
, PIC32_SET(sclk
->mux_reg
));
865 /* add nop to flush pipeline (as cpu_clk is in-flux) */
868 /* wait for SWEN bit to clear */
869 err
= readl_poll_timeout_atomic(sclk
->slew_reg
, v
,
870 !(v
& OSC_SWEN
), 1, LOCK_TIMEOUT_US
);
872 spin_unlock_irqrestore(&sclk
->core
->reg_lock
, flags
);
875 * SCLK clock-switching logic might reject a clock switching request
876 * if pre-requisites (like new clk_src not present or unstable) are
878 * So confirm before claiming success.
880 cosc
= (readl(sclk
->mux_reg
) >> OSC_CUR_SHIFT
) & OSC_CUR_MASK
;
882 pr_err("%s: err, failed to set_parent() to %d, current %d\n",
883 clk_hw_get_name(hw
), nosc
, cosc
);
890 static void sclk_init(struct clk_hw
*hw
)
892 struct pic32_sys_clk
*sclk
= clkhw_to_sys_clk(hw
);
896 /* Maintain reference to this clk, required in spll_clk_set_rate() */
899 /* apply slew divider on both up and down scaling */
900 if (sclk
->slew_div
) {
901 spin_lock_irqsave(&sclk
->core
->reg_lock
, flags
);
902 v
= readl(sclk
->slew_reg
);
903 v
&= ~(SLEW_DIV
<< SLEW_DIV_SHIFT
);
904 v
|= sclk
->slew_div
<< SLEW_DIV_SHIFT
;
905 v
|= SLEW_DOWNEN
| SLEW_UPEN
;
906 writel(v
, sclk
->slew_reg
);
907 spin_unlock_irqrestore(&sclk
->core
->reg_lock
, flags
);
911 /* sclk with post-divider */
912 const struct clk_ops pic32_sclk_ops
= {
913 .get_parent
= sclk_get_parent
,
914 .set_parent
= sclk_set_parent
,
915 .round_rate
= sclk_round_rate
,
916 .set_rate
= sclk_set_rate
,
917 .recalc_rate
= sclk_get_rate
,
919 .determine_rate
= __clk_mux_determine_rate
,
922 /* sclk with no slew and no post-divider */
923 const struct clk_ops pic32_sclk_no_div_ops
= {
924 .get_parent
= sclk_get_parent
,
925 .set_parent
= sclk_set_parent
,
927 .determine_rate
= __clk_mux_determine_rate
,
930 struct clk
*pic32_sys_clk_register(const struct pic32_sys_clk_data
*data
,
931 struct pic32_clk_common
*core
)
933 struct pic32_sys_clk
*sclk
;
936 sclk
= devm_kzalloc(core
->dev
, sizeof(*sclk
), GFP_KERNEL
);
938 return ERR_PTR(-ENOMEM
);
941 sclk
->hw
.init
= &data
->init_data
;
942 sclk
->mux_reg
= data
->mux_reg
+ core
->iobase
;
943 sclk
->slew_reg
= data
->slew_reg
+ core
->iobase
;
944 sclk
->slew_div
= data
->slew_div
;
945 sclk
->parent_map
= data
->parent_map
;
947 clk
= devm_clk_register(core
->dev
, &sclk
->hw
);
949 dev_err(core
->dev
, "%s: clk register failed\n", __func__
);
954 /* secondary oscillator */
955 struct pic32_sec_osc
{
957 void __iomem
*enable_reg
;
958 void __iomem
*status_reg
;
961 unsigned long fixed_rate
;
962 struct pic32_clk_common
*core
;
965 #define clkhw_to_sosc(_hw) container_of(_hw, struct pic32_sec_osc, hw)
966 static int sosc_clk_enable(struct clk_hw
*hw
)
968 struct pic32_sec_osc
*sosc
= clkhw_to_sosc(hw
);
972 pic32_syskey_unlock();
973 writel(sosc
->enable_mask
, PIC32_SET(sosc
->enable_reg
));
975 /* wait till warm-up period expires or ready-status is updated */
976 return readl_poll_timeout_atomic(sosc
->status_reg
, v
,
977 v
& sosc
->status_mask
, 1, 100);
980 static void sosc_clk_disable(struct clk_hw
*hw
)
982 struct pic32_sec_osc
*sosc
= clkhw_to_sosc(hw
);
984 pic32_syskey_unlock();
985 writel(sosc
->enable_mask
, PIC32_CLR(sosc
->enable_reg
));
988 static int sosc_clk_is_enabled(struct clk_hw
*hw
)
990 struct pic32_sec_osc
*sosc
= clkhw_to_sosc(hw
);
993 /* check enabled and ready status */
994 enabled
= readl(sosc
->enable_reg
) & sosc
->enable_mask
;
995 ready
= readl(sosc
->status_reg
) & sosc
->status_mask
;
997 return enabled
&& ready
;
1000 static unsigned long sosc_clk_calc_rate(struct clk_hw
*hw
,
1001 unsigned long parent_rate
)
1003 return clkhw_to_sosc(hw
)->fixed_rate
;
1006 const struct clk_ops pic32_sosc_ops
= {
1007 .enable
= sosc_clk_enable
,
1008 .disable
= sosc_clk_disable
,
1009 .is_enabled
= sosc_clk_is_enabled
,
1010 .recalc_rate
= sosc_clk_calc_rate
,
1013 struct clk
*pic32_sosc_clk_register(const struct pic32_sec_osc_data
*data
,
1014 struct pic32_clk_common
*core
)
1016 struct pic32_sec_osc
*sosc
;
1018 sosc
= devm_kzalloc(core
->dev
, sizeof(*sosc
), GFP_KERNEL
);
1020 return ERR_PTR(-ENOMEM
);
1023 sosc
->hw
.init
= &data
->init_data
;
1024 sosc
->fixed_rate
= data
->fixed_rate
;
1025 sosc
->enable_mask
= data
->enable_mask
;
1026 sosc
->status_mask
= data
->status_mask
;
1027 sosc
->enable_reg
= data
->enable_reg
+ core
->iobase
;
1028 sosc
->status_reg
= data
->status_reg
+ core
->iobase
;
1030 return devm_clk_register(core
->dev
, &sosc
->hw
);