2 * Clock and PLL control for DaVinci devices
4 * Copyright (C) 2006-2007 Texas Instruments.
5 * Copyright (C) 2008-2009 Deep Root Systems, LLC
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/clk.h>
18 #include <linux/err.h>
19 #include <linux/mutex.h>
21 #include <linux/delay.h>
23 #include <mach/hardware.h>
25 #include <mach/clock.h>
27 #include <mach/cputype.h>
30 static LIST_HEAD(clocks
);
31 static DEFINE_MUTEX(clocks_mutex
);
32 static DEFINE_SPINLOCK(clockfw_lock
);
34 static void __clk_enable(struct clk
*clk
)
37 __clk_enable(clk
->parent
);
38 if (clk
->usecount
++ == 0) {
39 if (clk
->flags
& CLK_PSC
)
40 davinci_psc_config(clk
->domain
, clk
->gpsc
, clk
->lpsc
,
42 else if (clk
->clk_enable
)
47 static void __clk_disable(struct clk
*clk
)
49 if (WARN_ON(clk
->usecount
== 0))
51 if (--clk
->usecount
== 0) {
52 if (!(clk
->flags
& CLK_PLL
) && (clk
->flags
& CLK_PSC
))
53 davinci_psc_config(clk
->domain
, clk
->gpsc
, clk
->lpsc
,
55 else if (clk
->clk_disable
)
56 clk
->clk_disable(clk
);
59 __clk_disable(clk
->parent
);
62 int davinci_clk_reset(struct clk
*clk
, bool reset
)
66 if (clk
== NULL
|| IS_ERR(clk
))
69 spin_lock_irqsave(&clockfw_lock
, flags
);
70 if (clk
->flags
& CLK_PSC
)
71 davinci_psc_reset(clk
->gpsc
, clk
->lpsc
, reset
);
72 spin_unlock_irqrestore(&clockfw_lock
, flags
);
76 EXPORT_SYMBOL(davinci_clk_reset
);
78 int davinci_clk_reset_assert(struct clk
*clk
)
80 if (clk
== NULL
|| IS_ERR(clk
) || !clk
->reset
)
83 return clk
->reset(clk
, true);
85 EXPORT_SYMBOL(davinci_clk_reset_assert
);
87 int davinci_clk_reset_deassert(struct clk
*clk
)
89 if (clk
== NULL
|| IS_ERR(clk
) || !clk
->reset
)
92 return clk
->reset(clk
, false);
94 EXPORT_SYMBOL(davinci_clk_reset_deassert
);
96 int clk_enable(struct clk
*clk
)
100 if (clk
== NULL
|| IS_ERR(clk
))
103 spin_lock_irqsave(&clockfw_lock
, flags
);
105 spin_unlock_irqrestore(&clockfw_lock
, flags
);
109 EXPORT_SYMBOL(clk_enable
);
111 void clk_disable(struct clk
*clk
)
115 if (clk
== NULL
|| IS_ERR(clk
))
118 spin_lock_irqsave(&clockfw_lock
, flags
);
120 spin_unlock_irqrestore(&clockfw_lock
, flags
);
122 EXPORT_SYMBOL(clk_disable
);
124 unsigned long clk_get_rate(struct clk
*clk
)
126 if (clk
== NULL
|| IS_ERR(clk
))
131 EXPORT_SYMBOL(clk_get_rate
);
133 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
135 if (clk
== NULL
|| IS_ERR(clk
))
139 return clk
->round_rate(clk
, rate
);
143 EXPORT_SYMBOL(clk_round_rate
);
145 /* Propagate rate to children */
146 static void propagate_rate(struct clk
*root
)
150 list_for_each_entry(clk
, &root
->children
, childnode
) {
152 clk
->rate
= clk
->recalc(clk
);
157 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
162 if (clk
== NULL
|| IS_ERR(clk
))
166 ret
= clk
->set_rate(clk
, rate
);
168 spin_lock_irqsave(&clockfw_lock
, flags
);
171 clk
->rate
= clk
->recalc(clk
);
174 spin_unlock_irqrestore(&clockfw_lock
, flags
);
178 EXPORT_SYMBOL(clk_set_rate
);
180 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
184 if (clk
== NULL
|| IS_ERR(clk
))
187 /* Cannot change parent on enabled clock */
188 if (WARN_ON(clk
->usecount
))
191 mutex_lock(&clocks_mutex
);
192 clk
->parent
= parent
;
193 list_del_init(&clk
->childnode
);
194 list_add(&clk
->childnode
, &clk
->parent
->children
);
195 mutex_unlock(&clocks_mutex
);
197 spin_lock_irqsave(&clockfw_lock
, flags
);
199 clk
->rate
= clk
->recalc(clk
);
201 spin_unlock_irqrestore(&clockfw_lock
, flags
);
205 EXPORT_SYMBOL(clk_set_parent
);
207 int clk_register(struct clk
*clk
)
209 if (clk
== NULL
|| IS_ERR(clk
))
212 if (WARN(clk
->parent
&& !clk
->parent
->rate
,
213 "CLK: %s parent %s has no rate!\n",
214 clk
->name
, clk
->parent
->name
))
217 INIT_LIST_HEAD(&clk
->children
);
219 mutex_lock(&clocks_mutex
);
220 list_add_tail(&clk
->node
, &clocks
);
222 list_add_tail(&clk
->childnode
, &clk
->parent
->children
);
223 mutex_unlock(&clocks_mutex
);
225 /* If rate is already set, use it */
229 /* Else, see if there is a way to calculate it */
231 clk
->rate
= clk
->recalc(clk
);
233 /* Otherwise, default to parent rate */
234 else if (clk
->parent
)
235 clk
->rate
= clk
->parent
->rate
;
239 EXPORT_SYMBOL(clk_register
);
241 void clk_unregister(struct clk
*clk
)
243 if (clk
== NULL
|| IS_ERR(clk
))
246 mutex_lock(&clocks_mutex
);
247 list_del(&clk
->node
);
248 list_del(&clk
->childnode
);
249 mutex_unlock(&clocks_mutex
);
251 EXPORT_SYMBOL(clk_unregister
);
253 #ifdef CONFIG_DAVINCI_RESET_CLOCKS
255 * Disable any unused clocks left on by the bootloader
257 int __init
davinci_clk_disable_unused(void)
261 spin_lock_irq(&clockfw_lock
);
262 list_for_each_entry(ck
, &clocks
, node
) {
263 if (ck
->usecount
> 0)
265 if (!(ck
->flags
& CLK_PSC
))
268 /* ignore if in Disabled or SwRstDisable states */
269 if (!davinci_psc_is_clk_active(ck
->gpsc
, ck
->lpsc
))
272 pr_debug("Clocks: disable unused %s\n", ck
->name
);
274 davinci_psc_config(ck
->domain
, ck
->gpsc
, ck
->lpsc
,
277 spin_unlock_irq(&clockfw_lock
);
283 static unsigned long clk_sysclk_recalc(struct clk
*clk
)
286 struct pll_data
*pll
;
287 unsigned long rate
= clk
->rate
;
289 /* If this is the PLL base clock, no more calculations needed */
293 if (WARN_ON(!clk
->parent
))
296 rate
= clk
->parent
->rate
;
298 /* Otherwise, the parent must be a PLL */
299 if (WARN_ON(!clk
->parent
->pll_data
))
302 pll
= clk
->parent
->pll_data
;
304 /* If pre-PLL, source clock is before the multiplier and divider(s) */
305 if (clk
->flags
& PRE_PLL
)
306 rate
= pll
->input_rate
;
311 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
313 plldiv
= (v
& pll
->div_ratio_mask
) + 1;
321 int davinci_set_sysclk_rate(struct clk
*clk
, unsigned long rate
)
324 struct pll_data
*pll
;
328 /* If this is the PLL base clock, wrong function to call */
332 /* There must be a parent... */
333 if (WARN_ON(!clk
->parent
))
336 /* ... the parent must be a PLL... */
337 if (WARN_ON(!clk
->parent
->pll_data
))
340 /* ... and this clock must have a divider. */
341 if (WARN_ON(!clk
->div_reg
))
344 pll
= clk
->parent
->pll_data
;
346 input
= clk
->parent
->rate
;
348 /* If pre-PLL, source clock is before the multiplier and divider(s) */
349 if (clk
->flags
& PRE_PLL
)
350 input
= pll
->input_rate
;
354 * Can afford to provide an output little higher than requested
355 * only if maximum rate supported by hardware on this sysclk
359 ratio
= DIV_ROUND_CLOSEST(input
, rate
);
360 if (input
/ ratio
> clk
->maxrate
)
365 ratio
= DIV_ROUND_UP(input
, rate
);
370 if (ratio
> pll
->div_ratio_mask
)
374 v
= __raw_readl(pll
->base
+ PLLSTAT
);
375 } while (v
& PLLSTAT_GOSTAT
);
377 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
378 v
&= ~pll
->div_ratio_mask
;
379 v
|= ratio
| PLLDIV_EN
;
380 __raw_writel(v
, pll
->base
+ clk
->div_reg
);
382 v
= __raw_readl(pll
->base
+ PLLCMD
);
384 __raw_writel(v
, pll
->base
+ PLLCMD
);
387 v
= __raw_readl(pll
->base
+ PLLSTAT
);
388 } while (v
& PLLSTAT_GOSTAT
);
392 EXPORT_SYMBOL(davinci_set_sysclk_rate
);
394 static unsigned long clk_leafclk_recalc(struct clk
*clk
)
396 if (WARN_ON(!clk
->parent
))
399 return clk
->parent
->rate
;
402 int davinci_simple_set_rate(struct clk
*clk
, unsigned long rate
)
408 static unsigned long clk_pllclk_recalc(struct clk
*clk
)
410 u32 ctrl
, mult
= 1, prediv
= 1, postdiv
= 1;
412 struct pll_data
*pll
= clk
->pll_data
;
413 unsigned long rate
= clk
->rate
;
415 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
416 rate
= pll
->input_rate
= clk
->parent
->rate
;
418 if (ctrl
& PLLCTL_PLLEN
) {
420 mult
= __raw_readl(pll
->base
+ PLLM
);
421 if (cpu_is_davinci_dm365())
422 mult
= 2 * (mult
& PLLM_PLLM_MASK
);
424 mult
= (mult
& PLLM_PLLM_MASK
) + 1;
428 if (pll
->flags
& PLL_HAS_PREDIV
) {
429 prediv
= __raw_readl(pll
->base
+ PREDIV
);
430 if (prediv
& PLLDIV_EN
)
431 prediv
= (prediv
& pll
->div_ratio_mask
) + 1;
436 /* pre-divider is fixed, but (some?) chips won't report that */
437 if (cpu_is_davinci_dm355() && pll
->num
== 1)
440 if (pll
->flags
& PLL_HAS_POSTDIV
) {
441 postdiv
= __raw_readl(pll
->base
+ POSTDIV
);
442 if (postdiv
& PLLDIV_EN
)
443 postdiv
= (postdiv
& pll
->div_ratio_mask
) + 1;
454 pr_debug("PLL%d: input = %lu MHz [ ",
455 pll
->num
, clk
->parent
->rate
/ 1000000);
459 pr_debug("/ %d ", prediv
);
461 pr_debug("* %d ", mult
);
463 pr_debug("/ %d ", postdiv
);
464 pr_debug("] --> %lu MHz output.\n", rate
/ 1000000);
470 * davinci_set_pllrate - set the output rate of a given PLL.
472 * Note: Currently tested to work with OMAP-L138 only.
474 * @pll: pll whose rate needs to be changed.
475 * @prediv: The pre divider value. Passing 0 disables the pre-divider.
476 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
477 * @postdiv: The post divider value. Passing 0 disables the post-divider.
479 int davinci_set_pllrate(struct pll_data
*pll
, unsigned int prediv
,
480 unsigned int mult
, unsigned int postdiv
)
483 unsigned int locktime
;
486 if (pll
->base
== NULL
)
490 * PLL lock time required per OMAP-L138 datasheet is
491 * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
492 * as 4 and OSCIN cycle as 25 MHz.
495 locktime
= ((2000 * prediv
) / 100);
496 prediv
= (prediv
- 1) | PLLDIV_EN
;
498 locktime
= PLL_LOCK_TIME
;
501 postdiv
= (postdiv
- 1) | PLLDIV_EN
;
505 /* Protect against simultaneous calls to PLL setting seqeunce */
506 spin_lock_irqsave(&clockfw_lock
, flags
);
508 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
510 /* Switch the PLL to bypass mode */
511 ctrl
&= ~(PLLCTL_PLLENSRC
| PLLCTL_PLLEN
);
512 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
514 udelay(PLL_BYPASS_TIME
);
516 /* Reset and enable PLL */
517 ctrl
&= ~(PLLCTL_PLLRST
| PLLCTL_PLLDIS
);
518 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
520 if (pll
->flags
& PLL_HAS_PREDIV
)
521 __raw_writel(prediv
, pll
->base
+ PREDIV
);
523 __raw_writel(mult
, pll
->base
+ PLLM
);
525 if (pll
->flags
& PLL_HAS_POSTDIV
)
526 __raw_writel(postdiv
, pll
->base
+ POSTDIV
);
528 udelay(PLL_RESET_TIME
);
530 /* Bring PLL out of reset */
531 ctrl
|= PLLCTL_PLLRST
;
532 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
536 /* Remove PLL from bypass mode */
537 ctrl
|= PLLCTL_PLLEN
;
538 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
540 spin_unlock_irqrestore(&clockfw_lock
, flags
);
544 EXPORT_SYMBOL(davinci_set_pllrate
);
547 * davinci_set_refclk_rate() - Set the reference clock rate
548 * @rate: The new rate.
550 * Sets the reference clock rate to a given value. This will most likely
551 * result in the entire clock tree getting updated.
553 * This is used to support boards which use a reference clock different
554 * than that used by default in <soc>.c file. The reference clock rate
555 * should be updated early in the boot process; ideally soon after the
556 * clock tree has been initialized once with the default reference clock
557 * rate (davinci_common_init()).
559 * Returns 0 on success, error otherwise.
561 int davinci_set_refclk_rate(unsigned long rate
)
565 refclk
= clk_get(NULL
, "ref");
566 if (IS_ERR(refclk
)) {
567 pr_err("%s: failed to get reference clock.\n", __func__
);
568 return PTR_ERR(refclk
);
571 clk_set_rate(refclk
, rate
);
578 int __init
davinci_clk_init(struct clk_lookup
*clocks
)
580 struct clk_lookup
*c
;
582 size_t num_clocks
= 0;
584 for (c
= clocks
; c
->clk
; c
++) {
589 /* Check if clock is a PLL */
591 clk
->recalc
= clk_pllclk_recalc
;
593 /* Else, if it is a PLL-derived clock */
594 else if (clk
->flags
& CLK_PLL
)
595 clk
->recalc
= clk_sysclk_recalc
;
597 /* Otherwise, it is a leaf clock (PSC clock) */
598 else if (clk
->parent
)
599 clk
->recalc
= clk_leafclk_recalc
;
603 struct pll_data
*pll
= clk
->pll_data
;
605 if (!pll
->div_ratio_mask
)
606 pll
->div_ratio_mask
= PLLDIV_RATIO_MASK
;
608 if (pll
->phys_base
&& !pll
->base
) {
609 pll
->base
= ioremap(pll
->phys_base
, SZ_4K
);
615 clk
->rate
= clk
->recalc(clk
);
618 clk
->flags
|= CLK_PSC
;
620 if (clk
->flags
& PSC_LRST
)
621 clk
->reset
= davinci_clk_reset
;
626 /* Turn on clocks that Linux doesn't otherwise manage */
627 if (clk
->flags
& ALWAYS_ENABLED
)
631 clkdev_add_table(clocks
, num_clocks
);
636 #ifdef CONFIG_DEBUG_FS
638 #include <linux/debugfs.h>
639 #include <linux/seq_file.h>
641 #define CLKNAME_MAX 10 /* longest clock name */
646 dump_clock(struct seq_file
*s
, unsigned nest
, struct clk
*parent
)
649 char buf
[CLKNAME_MAX
+ NEST_DELTA
* NEST_MAX
];
653 if (parent
->flags
& CLK_PLL
)
655 else if (parent
->flags
& CLK_PSC
)
660 /* <nest spaces> name <pad to end> */
661 memset(buf
, ' ', sizeof(buf
) - 1);
662 buf
[sizeof(buf
) - 1] = 0;
663 i
= strlen(parent
->name
);
664 memcpy(buf
+ nest
, parent
->name
,
665 min(i
, (unsigned)(sizeof(buf
) - 1 - nest
)));
667 seq_printf(s
, "%s users=%2d %-3s %9ld Hz\n",
668 buf
, parent
->usecount
, state
, clk_get_rate(parent
));
669 /* REVISIT show device associations too */
671 /* cost is now small, but not linear... */
672 list_for_each_entry(clk
, &parent
->children
, childnode
) {
673 dump_clock(s
, nest
+ NEST_DELTA
, clk
);
677 static int davinci_ck_show(struct seq_file
*m
, void *v
)
682 * Show clock tree; We trust nonzero usecounts equate to PSC enables...
684 mutex_lock(&clocks_mutex
);
685 list_for_each_entry(clk
, &clocks
, node
)
687 dump_clock(m
, 0, clk
);
688 mutex_unlock(&clocks_mutex
);
693 static int davinci_ck_open(struct inode
*inode
, struct file
*file
)
695 return single_open(file
, davinci_ck_show
, NULL
);
698 static const struct file_operations davinci_ck_operations
= {
699 .open
= davinci_ck_open
,
702 .release
= single_release
,
705 static int __init
davinci_clk_debugfs_init(void)
707 debugfs_create_file("davinci_clocks", S_IFREG
| S_IRUGO
, NULL
, NULL
,
708 &davinci_ck_operations
);
712 device_initcall(davinci_clk_debugfs_init
);
713 #endif /* CONFIG_DEBUG_FS */