2 * Clock manipulation routines for Freescale STMP37XX/STMP378X
4 * Author: Vitaly Wool <vital@embeddedalley.com>
6 * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
7 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
11 * The code contained herein is licensed under the GNU General Public
12 * License. You may obtain a copy of the GNU General Public License
13 * Version 2 or later at the following locations:
15 * http://www.opensource.org/licenses/gpl-license.html
16 * http://www.gnu.org/copyleft/gpl.html
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/clk.h>
23 #include <linux/spinlock.h>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/delay.h>
29 #include <asm/mach-types.h>
30 #include <asm/clkdev.h>
31 #include <mach/platform.h>
32 #include <mach/regs-clkctrl.h>
36 static DEFINE_SPINLOCK(clocks_lock
);
38 static struct clk osc_24M
;
39 static struct clk pll_clk
;
40 static struct clk cpu_clk
;
41 static struct clk hclk
;
43 static int propagate_rate(struct clk
*);
45 static inline int clk_is_busy(struct clk
*clk
)
47 return __raw_readl(clk
->busy_reg
) & (1 << clk
->busy_bit
);
50 static inline int clk_good(struct clk
*clk
)
52 return clk
&& !IS_ERR(clk
) && clk
->ops
;
55 static int std_clk_enable(struct clk
*clk
)
57 if (clk
->enable_reg
) {
58 u32 clk_reg
= __raw_readl(clk
->enable_reg
);
59 if (clk
->enable_negate
)
60 clk_reg
&= ~(1 << clk
->enable_shift
);
62 clk_reg
|= (1 << clk
->enable_shift
);
63 __raw_writel(clk_reg
, clk
->enable_reg
);
65 udelay(clk
->enable_wait
);
71 static int std_clk_disable(struct clk
*clk
)
73 if (clk
->enable_reg
) {
74 u32 clk_reg
= __raw_readl(clk
->enable_reg
);
75 if (clk
->enable_negate
)
76 clk_reg
|= (1 << clk
->enable_shift
);
78 clk_reg
&= ~(1 << clk
->enable_shift
);
79 __raw_writel(clk_reg
, clk
->enable_reg
);
85 static int io_set_rate(struct clk
*clk
, u32 rate
)
87 u32 reg_frac
, clkctrl_frac
;
88 int i
, ret
= 0, mask
= 0x1f;
90 clkctrl_frac
= (clk
->parent
->rate
* 18 + rate
- 1) / rate
;
92 if (clkctrl_frac
< 18 || clkctrl_frac
> 35) {
97 reg_frac
= __raw_readl(clk
->scale_reg
);
98 reg_frac
&= ~(mask
<< clk
->scale_shift
);
99 __raw_writel(reg_frac
| (clkctrl_frac
<< clk
->scale_shift
),
102 for (i
= 10000; i
; i
--)
103 if (!clk_is_busy(clk
))
114 static long io_get_rate(struct clk
*clk
)
116 long rate
= clk
->parent
->rate
* 18;
119 rate
/= (__raw_readl(clk
->scale_reg
) >> clk
->scale_shift
) & mask
;
125 static long per_get_rate(struct clk
*clk
)
127 long rate
= clk
->parent
->rate
;
129 const int mask
= 0xff;
131 if (clk
->enable_reg
&&
132 !(__raw_readl(clk
->enable_reg
) & clk
->enable_shift
))
135 div
= (__raw_readl(clk
->scale_reg
) >> clk
->scale_shift
) & mask
;
144 static int per_set_rate(struct clk
*clk
, u32 rate
)
147 int div
= (clk
->parent
->rate
+ rate
- 1) / rate
;
149 const int mask
= 0xff;
153 if (div
== 0 || div
> mask
)
156 reg_frac
= __raw_readl(clk
->scale_reg
);
157 reg_frac
&= ~(mask
<< clk
->scale_shift
);
160 __raw_writel(reg_frac
| (div
<< clk
->scale_shift
),
164 for (i
= 10000; i
; i
--)
165 if (!clk_is_busy(clk
))
179 printk(KERN_ERR
"%s: error %d\n", __func__
, ret
);
183 static long lcdif_get_rate(struct clk
*clk
)
185 long rate
= clk
->parent
->rate
;
187 const int mask
= 0xff;
189 div
= (__raw_readl(clk
->scale_reg
) >> clk
->scale_shift
) & mask
;
192 div
= (__raw_readl(REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
) &
193 BM_CLKCTRL_FRAC_PIXFRAC
) >> BP_CLKCTRL_FRAC_PIXFRAC
;
201 static int lcdif_set_rate(struct clk
*clk
, u32 rate
)
205 * On 3700, we can get most timings exact by modifying ref_pix
206 * and the divider, but keeping the phase timings at 1 (2
209 * ref_pix can be between 480e6*18/35=246.9MHz and 480e6*18/18=480MHz,
210 * which is between 18/(18*480e6)=2.084ns and 35/(18*480e6)=4.050ns.
212 * ns_cycle >= 2*18e3/(18*480) = 25/6
213 * ns_cycle <= 2*35e3/(18*480) = 875/108
215 * Multiply the ns_cycle by 'div' to lengthen it until it fits the
216 * bounds. This is the divider we'll use after ref_pix.
218 * 6 * ns_cycle >= 25 * div
219 * 108 * ns_cycle <= 875 * div
221 u32 ns_cycle
= 1000000 / rate
;
223 u32 lowest_result
= (u32
) -1;
224 u32 lowest_div
= 0, lowest_fracdiv
= 0;
226 for (div
= 1; div
< 256; ++div
) {
229 int lower_bound
= 6 * ns_cycle
>= 25 * div
;
230 int upper_bound
= 108 * ns_cycle
<= 875 * div
;
236 * Found a matching div. Calculate fractional divider needed,
239 fracdiv
= ((clk
->parent
->rate
/ 1000 * 18 / 2) *
240 ns_cycle
+ 1000 * div
- 1) /
242 if (fracdiv
< 18 || fracdiv
> 35) {
246 /* Calculate the actual cycle time this results in */
247 ps_result
= 6250 * div
* fracdiv
/ 27;
249 /* Use the fastest result that doesn't break ns_cycle */
250 if (ps_result
<= lowest_result
) {
251 lowest_result
= ps_result
;
253 lowest_fracdiv
= fracdiv
;
257 if (div
>= 256 || lowest_result
== (u32
) -1) {
261 pr_debug("Programming PFD=%u,DIV=%u ref_pix=%uMHz "
262 "PIXCLK=%uMHz cycle=%u.%03uns\n",
263 lowest_fracdiv
, lowest_div
,
264 480*18/lowest_fracdiv
, 480*18/lowest_fracdiv
/lowest_div
,
265 lowest_result
/ 1000, lowest_result
% 1000);
267 /* Program ref_pix phase fractional divider */
268 reg_val
= __raw_readl(REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
);
269 reg_val
&= ~BM_CLKCTRL_FRAC_PIXFRAC
;
270 reg_val
|= BF(lowest_fracdiv
, CLKCTRL_FRAC_PIXFRAC
);
271 __raw_writel(reg_val
, REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
);
274 stmp3xxx_clearl(BM_CLKCTRL_FRAC_CLKGATEPIX
,
275 REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
);
277 /* Program pix divider */
278 reg_val
= __raw_readl(clk
->scale_reg
);
279 reg_val
&= ~(BM_CLKCTRL_PIX_DIV
| BM_CLKCTRL_PIX_CLKGATE
);
280 reg_val
|= BF(lowest_div
, CLKCTRL_PIX_DIV
);
281 __raw_writel(reg_val
, clk
->scale_reg
);
283 /* Wait for divider update */
286 for (i
= 10000; i
; i
--)
287 if (!clk_is_busy(clk
))
295 /* Switch to ref_pix source */
296 reg_val
= __raw_readl(REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
);
297 reg_val
&= ~BM_CLKCTRL_CLKSEQ_BYPASS_PIX
;
298 __raw_writel(reg_val
, REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
);
305 static int cpu_set_rate(struct clk
*clk
, u32 rate
)
311 else if (rate
== 24000) {
312 /* switch to the 24M source */
313 clk_set_parent(clk
, &osc_24M
);
318 u32 clkctrl_frac
= 1;
320 for ( ; c
< 0x40; c
++) {
321 u32 f
= (pll_clk
.rate
*18/c
+ rate
/2) / rate
;
324 if (f
< 18 || f
> 35)
326 s1
= pll_clk
.rate
*18/clkctrl_frac
/clkctrl_cpu
- rate
;
327 s2
= pll_clk
.rate
*18/c
/f
- rate
;
328 pr_debug("%s: s1 %d, s2 %d\n", __func__
, s1
, s2
);
329 if (abs(s1
) > abs(s2
)) {
336 pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__
,
337 clkctrl_cpu
, clkctrl_frac
);
339 int d
= pll_clk
.rate
*18/clkctrl_frac
/clkctrl_cpu
-
342 clkctrl_frac
< 18 || clkctrl_frac
> 35)
347 val
= __raw_readl(clk
->scale_reg
);
348 val
&= ~(0x3f << clk
->scale_shift
);
350 clk_set_parent(clk
, &osc_24M
);
352 __raw_writel(val
, clk
->scale_reg
);
354 __raw_writel(1<<7, clk
->scale_reg
+ 8);
355 /* write clkctrl_cpu */
356 clk
->saved_div
= clkctrl_cpu
;
358 reg_val
= __raw_readl(REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CPU
);
360 reg_val
|= clkctrl_cpu
;
361 __raw_writel(reg_val
, REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CPU
);
363 for (i
= 10000; i
; i
--)
364 if (!clk_is_busy(clk
))
367 printk(KERN_ERR
"couldn't set up CPU divisor\n");
370 clk_set_parent(clk
, &pll_clk
);
377 static long cpu_get_rate(struct clk
*clk
)
379 long rate
= clk
->parent
->rate
* 18;
381 rate
/= (__raw_readl(clk
->scale_reg
) >> clk
->scale_shift
) & 0x3f;
382 rate
/= __raw_readl(REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CPU
) & 0x3f;
383 rate
= ((rate
+ 9) / 10) * 10;
389 static long cpu_round_rate(struct clk
*clk
, u32 rate
)
400 (pll_clk
.rate
*18 / clkctrl_cpu
+ rate
/2) / rate
;
401 if (clkctrl_frac
> 35)
403 if (pll_clk
.rate
*18 / clkctrl_frac
/ clkctrl_cpu
/10 ==
406 } while (pll_clk
.rate
/ 2 >= clkctrl_cpu
++ * rate
);
407 if (pll_clk
.rate
/ 2 < (clkctrl_cpu
- 1) * rate
)
409 pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__
,
410 clkctrl_cpu
, clkctrl_frac
);
411 if (clkctrl_frac
< 18)
413 if (clkctrl_frac
> 35)
416 r
= pll_clk
.rate
* 18;
419 r
= 10 * ((r
+ 9) / 10);
424 static long emi_get_rate(struct clk
*clk
)
426 long rate
= clk
->parent
->rate
* 18;
428 rate
/= (__raw_readl(clk
->scale_reg
) >> clk
->scale_shift
) & 0x3f;
429 rate
/= __raw_readl(REGS_CLKCTRL_BASE
+ HW_CLKCTRL_EMI
) & 0x3f;
435 static int clkseq_set_parent(struct clk
*clk
, struct clk
*parent
)
441 if (parent
== &osc_24M
)
444 if (clk
->bypass_reg
) {
445 #ifdef CONFIG_ARCH_STMP378X
446 u32 hbus_val
, cpu_val
;
448 if (clk
== &cpu_clk
&& shift
== 4) {
449 hbus_val
= __raw_readl(REGS_CLKCTRL_BASE
+
451 cpu_val
= __raw_readl(REGS_CLKCTRL_BASE
+
454 hbus_val
&= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN
|
455 BM_CLKCTRL_HBUS_DIV
);
456 clk
->saved_div
= cpu_val
& BM_CLKCTRL_CPU_DIV_CPU
;
457 cpu_val
&= ~BM_CLKCTRL_CPU_DIV_CPU
;
460 if (machine_is_stmp378x()) {
461 __raw_writel(hbus_val
,
462 REGS_CLKCTRL_BASE
+ HW_CLKCTRL_HBUS
);
463 __raw_writel(cpu_val
,
464 REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CPU
);
467 } else if (clk
== &cpu_clk
&& shift
== 8) {
468 hbus_val
= __raw_readl(REGS_CLKCTRL_BASE
+
470 cpu_val
= __raw_readl(REGS_CLKCTRL_BASE
+
472 hbus_val
&= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN
|
473 BM_CLKCTRL_HBUS_DIV
);
475 cpu_val
&= ~BM_CLKCTRL_CPU_DIV_CPU
;
477 cpu_val
|= clk
->saved_div
;
481 if (machine_is_stmp378x()) {
482 __raw_writel(hbus_val
,
483 REGS_CLKCTRL_BASE
+ HW_CLKCTRL_HBUS
);
484 __raw_writel(cpu_val
,
485 REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CPU
);
490 __raw_writel(1 << clk
->bypass_shift
, clk
->bypass_reg
+ shift
);
498 static int hbus_set_rate(struct clk
*clk
, u32 rate
)
503 struct clk
*parent
= clk
->parent
;
505 pr_debug("%s: rate %d, parent rate %d\n", __func__
, rate
,
508 if (rate
> parent
->rate
)
511 if (((parent
->rate
+ rate
/2) / rate
) * rate
!= parent
->rate
&&
512 parent
->rate
/ rate
< 32) {
513 pr_debug("%s: switching to fractional mode\n", __func__
);
518 div
= (32 * rate
+ parent
->rate
/ 2) / parent
->rate
;
520 div
= (parent
->rate
+ rate
- 1) / rate
;
521 pr_debug("%s: div calculated is %d\n", __func__
, div
);
522 if (!div
|| div
> 0x1f)
525 clk_set_parent(&cpu_clk
, &osc_24M
);
527 clkctrl_hbus
= __raw_readl(clk
->scale_reg
);
528 clkctrl_hbus
&= ~0x3f;
530 clkctrl_hbus
|= (is_frac
<< 5);
532 __raw_writel(clkctrl_hbus
, clk
->scale_reg
);
535 for (i
= 10000; i
; i
--)
536 if (!clk_is_busy(clk
))
539 printk(KERN_ERR
"couldn't set up CPU divisor\n");
543 clk_set_parent(&cpu_clk
, &pll_clk
);
544 __raw_writel(clkctrl_hbus
, clk
->scale_reg
);
549 static long hbus_get_rate(struct clk
*clk
)
551 long rate
= clk
->parent
->rate
;
553 if (__raw_readl(clk
->scale_reg
) & 0x20) {
554 rate
*= __raw_readl(clk
->scale_reg
) & 0x1f;
557 rate
/= __raw_readl(clk
->scale_reg
) & 0x1f;
563 static int xbus_set_rate(struct clk
*clk
, u32 rate
)
568 pr_debug("%s: rate %d, parent rate %d\n", __func__
, rate
,
571 div
= (clk
->parent
->rate
+ rate
- 1) / rate
;
572 pr_debug("%s: div calculated is %d\n", __func__
, div
);
573 if (!div
|| div
> 0x3ff)
576 clkctrl_xbus
= __raw_readl(clk
->scale_reg
);
577 clkctrl_xbus
&= ~0x3ff;
579 __raw_writel(clkctrl_xbus
, clk
->scale_reg
);
582 for (i
= 10000; i
; i
--)
583 if (!clk_is_busy(clk
))
586 printk(KERN_ERR
"couldn't set up xbus divisor\n");
593 static long xbus_get_rate(struct clk
*clk
)
595 long rate
= clk
->parent
->rate
;
597 rate
/= __raw_readl(clk
->scale_reg
) & 0x3ff;
606 static struct clk_ops std_ops
= {
607 .enable
= std_clk_enable
,
608 .disable
= std_clk_disable
,
609 .get_rate
= per_get_rate
,
610 .set_rate
= per_set_rate
,
611 .set_parent
= clkseq_set_parent
,
614 static struct clk_ops min_ops
= {
615 .enable
= std_clk_enable
,
616 .disable
= std_clk_disable
,
619 static struct clk_ops cpu_ops
= {
620 .enable
= std_clk_enable
,
621 .disable
= std_clk_disable
,
622 .get_rate
= cpu_get_rate
,
623 .set_rate
= cpu_set_rate
,
624 .round_rate
= cpu_round_rate
,
625 .set_parent
= clkseq_set_parent
,
628 static struct clk_ops io_ops
= {
629 .enable
= std_clk_enable
,
630 .disable
= std_clk_disable
,
631 .get_rate
= io_get_rate
,
632 .set_rate
= io_set_rate
,
635 static struct clk_ops hbus_ops
= {
636 .get_rate
= hbus_get_rate
,
637 .set_rate
= hbus_set_rate
,
640 static struct clk_ops xbus_ops
= {
641 .get_rate
= xbus_get_rate
,
642 .set_rate
= xbus_set_rate
,
645 static struct clk_ops lcdif_ops
= {
646 .enable
= std_clk_enable
,
647 .disable
= std_clk_disable
,
648 .get_rate
= lcdif_get_rate
,
649 .set_rate
= lcdif_set_rate
,
650 .set_parent
= clkseq_set_parent
,
653 static struct clk_ops emi_ops
= {
654 .get_rate
= emi_get_rate
,
657 /* List of on-chip clocks */
659 static struct clk osc_24M
= {
660 .flags
= FIXED_RATE
| ENABLED
,
664 static struct clk pll_clk
= {
666 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_PLLCTRL0
,
669 .flags
= FIXED_RATE
| ENABLED
,
674 static struct clk cpu_clk
= {
676 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
,
678 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
680 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CPU
,
682 .flags
= RATE_PROPAGATES
| ENABLED
,
686 static struct clk io_clk
= {
688 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
,
691 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
,
693 .flags
= RATE_PROPAGATES
| ENABLED
,
697 static struct clk hclk
= {
699 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_HBUS
,
700 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
702 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_HBUS
,
704 .flags
= RATE_PROPAGATES
| ENABLED
,
708 static struct clk xclk
= {
710 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XBUS
,
711 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XBUS
,
713 .flags
= RATE_PROPAGATES
| ENABLED
,
717 static struct clk uart_clk
= {
719 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XTAL
,
726 static struct clk audio_clk
= {
728 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XTAL
,
734 static struct clk pwm_clk
= {
736 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XTAL
,
742 static struct clk dri_clk
= {
744 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XTAL
,
750 static struct clk digctl_clk
= {
752 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XTAL
,
758 static struct clk timer_clk
= {
760 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_XTAL
,
767 static struct clk lcdif_clk
= {
769 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_PIX
,
770 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_PIX
,
772 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_PIX
,
775 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
777 .flags
= NEEDS_SET_PARENT
,
781 static struct clk ssp_clk
= {
783 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_SSP
,
784 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_SSP
,
786 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_SSP
,
788 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
791 .flags
= NEEDS_SET_PARENT
,
795 static struct clk gpmi_clk
= {
797 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_GPMI
,
798 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_GPMI
,
800 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_GPMI
,
803 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
805 .flags
= NEEDS_SET_PARENT
,
809 static struct clk spdif_clk
= {
811 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_SPDIF
,
817 static struct clk emi_clk
= {
819 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_EMI
,
822 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_FRAC
,
824 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_EMI
,
826 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
832 static struct clk ir_clk
= {
834 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_IR
,
837 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
842 static struct clk saif_clk
= {
844 .scale_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_SAIF
,
845 .busy_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_SAIF
,
847 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_SAIF
,
850 .bypass_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_CLKSEQ
,
855 static struct clk usb_clk
= {
857 .enable_reg
= REGS_CLKCTRL_BASE
+ HW_CLKCTRL_PLLCTRL0
,
863 /* list of all the clocks */
864 static struct clk_lookup onchip_clks
[] = {
928 static int __init
propagate_rate(struct clk
*clk
)
930 struct clk_lookup
*cl
;
932 for (cl
= onchip_clks
; cl
< onchip_clks
+ ARRAY_SIZE(onchip_clks
);
934 if (unlikely(!clk_good(cl
->clk
)))
936 if (cl
->clk
->parent
== clk
&& cl
->clk
->ops
->get_rate
) {
937 cl
->clk
->ops
->get_rate(cl
->clk
);
938 if (cl
->clk
->flags
& RATE_PROPAGATES
)
939 propagate_rate(cl
->clk
);
947 unsigned long clk_get_rate(struct clk
*clk
)
949 if (unlikely(!clk_good(clk
)))
955 if (clk
->ops
->get_rate
!= NULL
)
956 return clk
->ops
->get_rate(clk
);
958 return clk_get_rate(clk
->parent
);
960 EXPORT_SYMBOL(clk_get_rate
);
962 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
964 if (unlikely(!clk_good(clk
)))
967 if (clk
->ops
->round_rate
)
968 return clk
->ops
->round_rate(clk
, rate
);
972 EXPORT_SYMBOL(clk_round_rate
);
974 static inline int close_enough(long rate1
, long rate2
)
976 return rate1
&& !((rate2
- rate1
) * 1000 / rate1
);
979 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
983 if (unlikely(!clk_good(clk
)))
986 if (clk
->flags
& FIXED_RATE
|| !clk
->ops
->set_rate
)
989 else if (!close_enough(clk
->rate
, rate
)) {
990 ret
= clk
->ops
->set_rate(clk
, rate
);
994 if (clk
->flags
& RATE_PROPAGATES
)
1002 EXPORT_SYMBOL(clk_set_rate
);
1004 int clk_enable(struct clk
*clk
)
1006 unsigned long clocks_flags
;
1008 if (unlikely(!clk_good(clk
)))
1012 clk_enable(clk
->parent
);
1014 spin_lock_irqsave(&clocks_lock
, clocks_flags
);
1017 if (clk
->ops
&& clk
->ops
->enable
)
1018 clk
->ops
->enable(clk
);
1020 spin_unlock_irqrestore(&clocks_lock
, clocks_flags
);
1023 EXPORT_SYMBOL(clk_enable
);
1025 static void local_clk_disable(struct clk
*clk
)
1027 if (unlikely(!clk_good(clk
)))
1030 if (clk
->usage
== 0 && clk
->ops
->disable
)
1031 clk
->ops
->disable(clk
);
1034 local_clk_disable(clk
->parent
);
1037 void clk_disable(struct clk
*clk
)
1039 unsigned long clocks_flags
;
1041 if (unlikely(!clk_good(clk
)))
1044 spin_lock_irqsave(&clocks_lock
, clocks_flags
);
1046 if ((--clk
->usage
) == 0 && clk
->ops
->disable
)
1047 clk
->ops
->disable(clk
);
1049 spin_unlock_irqrestore(&clocks_lock
, clocks_flags
);
1051 clk_disable(clk
->parent
);
1053 EXPORT_SYMBOL(clk_disable
);
1055 /* Some additional API */
1056 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
1059 unsigned long clocks_flags
;
1061 if (unlikely(!clk_good(clk
)))
1064 if (!clk
->ops
->set_parent
)
1067 spin_lock_irqsave(&clocks_lock
, clocks_flags
);
1069 ret
= clk
->ops
->set_parent(clk
, parent
);
1071 /* disable if usage count is 0 */
1072 local_clk_disable(parent
);
1074 parent
->usage
+= clk
->usage
;
1075 clk
->parent
->usage
-= clk
->usage
;
1077 /* disable if new usage count is 0 */
1078 local_clk_disable(clk
->parent
);
1080 clk
->parent
= parent
;
1082 spin_unlock_irqrestore(&clocks_lock
, clocks_flags
);
1087 EXPORT_SYMBOL(clk_set_parent
);
1089 struct clk
*clk_get_parent(struct clk
*clk
)
1091 if (unlikely(!clk_good(clk
)))
1095 EXPORT_SYMBOL(clk_get_parent
);
1097 static int __init
clk_init(void)
1099 struct clk_lookup
*cl
;
1100 struct clk_ops
*ops
;
1102 spin_lock_init(&clocks_lock
);
1104 for (cl
= onchip_clks
; cl
< onchip_clks
+ ARRAY_SIZE(onchip_clks
);
1106 if (cl
->clk
->flags
& ENABLED
)
1107 clk_enable(cl
->clk
);
1109 local_clk_disable(cl
->clk
);
1113 if ((cl
->clk
->flags
& NEEDS_INITIALIZATION
) &&
1114 ops
&& ops
->set_rate
)
1115 ops
->set_rate(cl
->clk
, cl
->clk
->rate
);
1117 if (cl
->clk
->flags
& FIXED_RATE
) {
1118 if (cl
->clk
->flags
& RATE_PROPAGATES
)
1119 propagate_rate(cl
->clk
);
1121 if (ops
&& ops
->get_rate
)
1122 ops
->get_rate(cl
->clk
);
1125 if (cl
->clk
->flags
& NEEDS_SET_PARENT
) {
1126 if (ops
&& ops
->set_parent
)
1127 ops
->set_parent(cl
->clk
, cl
->clk
->parent
);
1135 arch_initcall(clk_init
);