2 * Clock and PLL control for DaVinci devices
4 * Copyright (C) 2006-2007 Texas Instruments.
5 * Copyright (C) 2008-2009 Deep Root Systems, LLC
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/clk.h>
18 #include <linux/err.h>
19 #include <linux/mutex.h>
21 #include <linux/delay.h>
23 #include <mach/hardware.h>
25 #include <mach/clock.h>
27 #include <mach/cputype.h>
30 static LIST_HEAD(clocks
);
31 static DEFINE_MUTEX(clocks_mutex
);
32 static DEFINE_SPINLOCK(clockfw_lock
);
34 static unsigned psc_domain(struct clk
*clk
)
36 return (clk
->flags
& PSC_DSP
)
37 ? DAVINCI_GPSC_DSPDOMAIN
38 : DAVINCI_GPSC_ARMDOMAIN
;
41 static void __clk_enable(struct clk
*clk
)
44 __clk_enable(clk
->parent
);
45 if (clk
->usecount
++ == 0 && (clk
->flags
& CLK_PSC
))
46 davinci_psc_config(psc_domain(clk
), clk
->gpsc
, clk
->lpsc
,
50 static void __clk_disable(struct clk
*clk
)
52 if (WARN_ON(clk
->usecount
== 0))
54 if (--clk
->usecount
== 0 && !(clk
->flags
& CLK_PLL
) &&
55 (clk
->flags
& CLK_PSC
))
56 davinci_psc_config(psc_domain(clk
), clk
->gpsc
, clk
->lpsc
,
59 __clk_disable(clk
->parent
);
62 int clk_enable(struct clk
*clk
)
66 if (clk
== NULL
|| IS_ERR(clk
))
69 spin_lock_irqsave(&clockfw_lock
, flags
);
71 spin_unlock_irqrestore(&clockfw_lock
, flags
);
75 EXPORT_SYMBOL(clk_enable
);
77 void clk_disable(struct clk
*clk
)
81 if (clk
== NULL
|| IS_ERR(clk
))
84 spin_lock_irqsave(&clockfw_lock
, flags
);
86 spin_unlock_irqrestore(&clockfw_lock
, flags
);
88 EXPORT_SYMBOL(clk_disable
);
90 unsigned long clk_get_rate(struct clk
*clk
)
92 if (clk
== NULL
|| IS_ERR(clk
))
97 EXPORT_SYMBOL(clk_get_rate
);
99 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
101 if (clk
== NULL
|| IS_ERR(clk
))
105 return clk
->round_rate(clk
, rate
);
109 EXPORT_SYMBOL(clk_round_rate
);
111 /* Propagate rate to children */
112 static void propagate_rate(struct clk
*root
)
116 list_for_each_entry(clk
, &root
->children
, childnode
) {
118 clk
->rate
= clk
->recalc(clk
);
123 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
128 if (clk
== NULL
|| IS_ERR(clk
))
132 ret
= clk
->set_rate(clk
, rate
);
134 spin_lock_irqsave(&clockfw_lock
, flags
);
137 clk
->rate
= clk
->recalc(clk
);
140 spin_unlock_irqrestore(&clockfw_lock
, flags
);
144 EXPORT_SYMBOL(clk_set_rate
);
146 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
150 if (clk
== NULL
|| IS_ERR(clk
))
153 /* Cannot change parent on enabled clock */
154 if (WARN_ON(clk
->usecount
))
157 mutex_lock(&clocks_mutex
);
158 clk
->parent
= parent
;
159 list_del_init(&clk
->childnode
);
160 list_add(&clk
->childnode
, &clk
->parent
->children
);
161 mutex_unlock(&clocks_mutex
);
163 spin_lock_irqsave(&clockfw_lock
, flags
);
165 clk
->rate
= clk
->recalc(clk
);
167 spin_unlock_irqrestore(&clockfw_lock
, flags
);
171 EXPORT_SYMBOL(clk_set_parent
);
173 int clk_register(struct clk
*clk
)
175 if (clk
== NULL
|| IS_ERR(clk
))
178 if (WARN(clk
->parent
&& !clk
->parent
->rate
,
179 "CLK: %s parent %s has no rate!\n",
180 clk
->name
, clk
->parent
->name
))
183 INIT_LIST_HEAD(&clk
->children
);
185 mutex_lock(&clocks_mutex
);
186 list_add_tail(&clk
->node
, &clocks
);
188 list_add_tail(&clk
->childnode
, &clk
->parent
->children
);
189 mutex_unlock(&clocks_mutex
);
191 /* If rate is already set, use it */
195 /* Else, see if there is a way to calculate it */
197 clk
->rate
= clk
->recalc(clk
);
199 /* Otherwise, default to parent rate */
200 else if (clk
->parent
)
201 clk
->rate
= clk
->parent
->rate
;
205 EXPORT_SYMBOL(clk_register
);
207 void clk_unregister(struct clk
*clk
)
209 if (clk
== NULL
|| IS_ERR(clk
))
212 mutex_lock(&clocks_mutex
);
213 list_del(&clk
->node
);
214 list_del(&clk
->childnode
);
215 mutex_unlock(&clocks_mutex
);
217 EXPORT_SYMBOL(clk_unregister
);
219 #ifdef CONFIG_DAVINCI_RESET_CLOCKS
221 * Disable any unused clocks left on by the bootloader
223 static int __init
clk_disable_unused(void)
227 spin_lock_irq(&clockfw_lock
);
228 list_for_each_entry(ck
, &clocks
, node
) {
229 if (ck
->usecount
> 0)
231 if (!(ck
->flags
& CLK_PSC
))
234 /* ignore if in Disabled or SwRstDisable states */
235 if (!davinci_psc_is_clk_active(ck
->gpsc
, ck
->lpsc
))
238 pr_debug("Clocks: disable unused %s\n", ck
->name
);
240 davinci_psc_config(psc_domain(ck
), ck
->gpsc
, ck
->lpsc
,
243 spin_unlock_irq(&clockfw_lock
);
247 late_initcall(clk_disable_unused
);
250 static unsigned long clk_sysclk_recalc(struct clk
*clk
)
253 struct pll_data
*pll
;
254 unsigned long rate
= clk
->rate
;
256 /* If this is the PLL base clock, no more calculations needed */
260 if (WARN_ON(!clk
->parent
))
263 rate
= clk
->parent
->rate
;
265 /* Otherwise, the parent must be a PLL */
266 if (WARN_ON(!clk
->parent
->pll_data
))
269 pll
= clk
->parent
->pll_data
;
271 /* If pre-PLL, source clock is before the multiplier and divider(s) */
272 if (clk
->flags
& PRE_PLL
)
273 rate
= pll
->input_rate
;
278 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
280 plldiv
= (v
& pll
->div_ratio_mask
) + 1;
288 int davinci_set_sysclk_rate(struct clk
*clk
, unsigned long rate
)
291 struct pll_data
*pll
;
295 /* If this is the PLL base clock, wrong function to call */
299 /* There must be a parent... */
300 if (WARN_ON(!clk
->parent
))
303 /* ... the parent must be a PLL... */
304 if (WARN_ON(!clk
->parent
->pll_data
))
307 /* ... and this clock must have a divider. */
308 if (WARN_ON(!clk
->div_reg
))
311 pll
= clk
->parent
->pll_data
;
313 input
= clk
->parent
->rate
;
315 /* If pre-PLL, source clock is before the multiplier and divider(s) */
316 if (clk
->flags
& PRE_PLL
)
317 input
= pll
->input_rate
;
321 * Can afford to provide an output little higher than requested
322 * only if maximum rate supported by hardware on this sysclk
326 ratio
= DIV_ROUND_CLOSEST(input
, rate
);
327 if (input
/ ratio
> clk
->maxrate
)
332 ratio
= DIV_ROUND_UP(input
, rate
);
337 if (ratio
> pll
->div_ratio_mask
)
341 v
= __raw_readl(pll
->base
+ PLLSTAT
);
342 } while (v
& PLLSTAT_GOSTAT
);
344 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
345 v
&= ~pll
->div_ratio_mask
;
346 v
|= ratio
| PLLDIV_EN
;
347 __raw_writel(v
, pll
->base
+ clk
->div_reg
);
349 v
= __raw_readl(pll
->base
+ PLLCMD
);
351 __raw_writel(v
, pll
->base
+ PLLCMD
);
354 v
= __raw_readl(pll
->base
+ PLLSTAT
);
355 } while (v
& PLLSTAT_GOSTAT
);
359 EXPORT_SYMBOL(davinci_set_sysclk_rate
);
361 static unsigned long clk_leafclk_recalc(struct clk
*clk
)
363 if (WARN_ON(!clk
->parent
))
366 return clk
->parent
->rate
;
369 int davinci_simple_set_rate(struct clk
*clk
, unsigned long rate
)
375 static unsigned long clk_pllclk_recalc(struct clk
*clk
)
377 u32 ctrl
, mult
= 1, prediv
= 1, postdiv
= 1;
379 struct pll_data
*pll
= clk
->pll_data
;
380 unsigned long rate
= clk
->rate
;
382 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
383 rate
= pll
->input_rate
= clk
->parent
->rate
;
385 if (ctrl
& PLLCTL_PLLEN
) {
387 mult
= __raw_readl(pll
->base
+ PLLM
);
388 if (cpu_is_davinci_dm365())
389 mult
= 2 * (mult
& PLLM_PLLM_MASK
);
391 mult
= (mult
& PLLM_PLLM_MASK
) + 1;
395 if (pll
->flags
& PLL_HAS_PREDIV
) {
396 prediv
= __raw_readl(pll
->base
+ PREDIV
);
397 if (prediv
& PLLDIV_EN
)
398 prediv
= (prediv
& pll
->div_ratio_mask
) + 1;
403 /* pre-divider is fixed, but (some?) chips won't report that */
404 if (cpu_is_davinci_dm355() && pll
->num
== 1)
407 if (pll
->flags
& PLL_HAS_POSTDIV
) {
408 postdiv
= __raw_readl(pll
->base
+ POSTDIV
);
409 if (postdiv
& PLLDIV_EN
)
410 postdiv
= (postdiv
& pll
->div_ratio_mask
) + 1;
421 pr_debug("PLL%d: input = %lu MHz [ ",
422 pll
->num
, clk
->parent
->rate
/ 1000000);
426 pr_debug("/ %d ", prediv
);
428 pr_debug("* %d ", mult
);
430 pr_debug("/ %d ", postdiv
);
431 pr_debug("] --> %lu MHz output.\n", rate
/ 1000000);
437 * davinci_set_pllrate - set the output rate of a given PLL.
439 * Note: Currently tested to work with OMAP-L138 only.
441 * @pll: pll whose rate needs to be changed.
442 * @prediv: The pre divider value. Passing 0 disables the pre-divider.
443 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
444 * @postdiv: The post divider value. Passing 0 disables the post-divider.
446 int davinci_set_pllrate(struct pll_data
*pll
, unsigned int prediv
,
447 unsigned int mult
, unsigned int postdiv
)
450 unsigned int locktime
;
453 if (pll
->base
== NULL
)
457 * PLL lock time required per OMAP-L138 datasheet is
458 * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
459 * as 4 and OSCIN cycle as 25 MHz.
462 locktime
= ((2000 * prediv
) / 100);
463 prediv
= (prediv
- 1) | PLLDIV_EN
;
465 locktime
= PLL_LOCK_TIME
;
468 postdiv
= (postdiv
- 1) | PLLDIV_EN
;
472 /* Protect against simultaneous calls to PLL setting seqeunce */
473 spin_lock_irqsave(&clockfw_lock
, flags
);
475 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
477 /* Switch the PLL to bypass mode */
478 ctrl
&= ~(PLLCTL_PLLENSRC
| PLLCTL_PLLEN
);
479 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
481 udelay(PLL_BYPASS_TIME
);
483 /* Reset and enable PLL */
484 ctrl
&= ~(PLLCTL_PLLRST
| PLLCTL_PLLDIS
);
485 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
487 if (pll
->flags
& PLL_HAS_PREDIV
)
488 __raw_writel(prediv
, pll
->base
+ PREDIV
);
490 __raw_writel(mult
, pll
->base
+ PLLM
);
492 if (pll
->flags
& PLL_HAS_POSTDIV
)
493 __raw_writel(postdiv
, pll
->base
+ POSTDIV
);
495 udelay(PLL_RESET_TIME
);
497 /* Bring PLL out of reset */
498 ctrl
|= PLLCTL_PLLRST
;
499 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
503 /* Remove PLL from bypass mode */
504 ctrl
|= PLLCTL_PLLEN
;
505 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
507 spin_unlock_irqrestore(&clockfw_lock
, flags
);
511 EXPORT_SYMBOL(davinci_set_pllrate
);
514 * davinci_set_refclk_rate() - Set the reference clock rate
515 * @rate: The new rate.
517 * Sets the reference clock rate to a given value. This will most likely
518 * result in the entire clock tree getting updated.
520 * This is used to support boards which use a reference clock different
521 * than that used by default in <soc>.c file. The reference clock rate
522 * should be updated early in the boot process; ideally soon after the
523 * clock tree has been initialized once with the default reference clock
524 * rate (davinci_common_init()).
526 * Returns 0 on success, error otherwise.
528 int davinci_set_refclk_rate(unsigned long rate
)
532 refclk
= clk_get(NULL
, "ref");
533 if (IS_ERR(refclk
)) {
534 pr_err("%s: failed to get reference clock.\n", __func__
);
535 return PTR_ERR(refclk
);
538 clk_set_rate(refclk
, rate
);
545 int __init
davinci_clk_init(struct clk_lookup
*clocks
)
547 struct clk_lookup
*c
;
549 size_t num_clocks
= 0;
551 for (c
= clocks
; c
->clk
; c
++) {
556 /* Check if clock is a PLL */
558 clk
->recalc
= clk_pllclk_recalc
;
560 /* Else, if it is a PLL-derived clock */
561 else if (clk
->flags
& CLK_PLL
)
562 clk
->recalc
= clk_sysclk_recalc
;
564 /* Otherwise, it is a leaf clock (PSC clock) */
565 else if (clk
->parent
)
566 clk
->recalc
= clk_leafclk_recalc
;
570 struct pll_data
*pll
= clk
->pll_data
;
572 if (!pll
->div_ratio_mask
)
573 pll
->div_ratio_mask
= PLLDIV_RATIO_MASK
;
575 if (pll
->phys_base
&& !pll
->base
) {
576 pll
->base
= ioremap(pll
->phys_base
, SZ_4K
);
582 clk
->rate
= clk
->recalc(clk
);
585 clk
->flags
|= CLK_PSC
;
590 /* Turn on clocks that Linux doesn't otherwise manage */
591 if (clk
->flags
& ALWAYS_ENABLED
)
595 clkdev_add_table(clocks
, num_clocks
);
600 #ifdef CONFIG_DEBUG_FS
602 #include <linux/debugfs.h>
603 #include <linux/seq_file.h>
605 #define CLKNAME_MAX 10 /* longest clock name */
610 dump_clock(struct seq_file
*s
, unsigned nest
, struct clk
*parent
)
613 char buf
[CLKNAME_MAX
+ NEST_DELTA
* NEST_MAX
];
617 if (parent
->flags
& CLK_PLL
)
619 else if (parent
->flags
& CLK_PSC
)
624 /* <nest spaces> name <pad to end> */
625 memset(buf
, ' ', sizeof(buf
) - 1);
626 buf
[sizeof(buf
) - 1] = 0;
627 i
= strlen(parent
->name
);
628 memcpy(buf
+ nest
, parent
->name
,
629 min(i
, (unsigned)(sizeof(buf
) - 1 - nest
)));
631 seq_printf(s
, "%s users=%2d %-3s %9ld Hz\n",
632 buf
, parent
->usecount
, state
, clk_get_rate(parent
));
633 /* REVISIT show device associations too */
635 /* cost is now small, but not linear... */
636 list_for_each_entry(clk
, &parent
->children
, childnode
) {
637 dump_clock(s
, nest
+ NEST_DELTA
, clk
);
641 static int davinci_ck_show(struct seq_file
*m
, void *v
)
646 * Show clock tree; We trust nonzero usecounts equate to PSC enables...
648 mutex_lock(&clocks_mutex
);
649 list_for_each_entry(clk
, &clocks
, node
)
651 dump_clock(m
, 0, clk
);
652 mutex_unlock(&clocks_mutex
);
657 static int davinci_ck_open(struct inode
*inode
, struct file
*file
)
659 return single_open(file
, davinci_ck_show
, NULL
);
662 static const struct file_operations davinci_ck_operations
= {
663 .open
= davinci_ck_open
,
666 .release
= single_release
,
669 static int __init
davinci_clk_debugfs_init(void)
671 debugfs_create_file("davinci_clocks", S_IFREG
| S_IRUGO
, NULL
, NULL
,
672 &davinci_ck_operations
);
676 device_initcall(davinci_clk_debugfs_init
);
677 #endif /* CONFIG_DEBUG_FS */