2 * Clock and PLL control for DaVinci devices
4 * Copyright (C) 2006-2007 Texas Instruments.
5 * Copyright (C) 2008-2009 Deep Root Systems, LLC
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/clk.h>
18 #include <linux/err.h>
19 #include <linux/mutex.h>
21 #include <linux/delay.h>
23 #include <mach/hardware.h>
25 #include <mach/clock.h>
27 #include <mach/cputype.h>
30 static LIST_HEAD(clocks
);
31 static DEFINE_MUTEX(clocks_mutex
);
32 static DEFINE_SPINLOCK(clockfw_lock
);
34 static void __clk_enable(struct clk
*clk
)
37 __clk_enable(clk
->parent
);
38 if (clk
->usecount
++ == 0) {
39 if (clk
->flags
& CLK_PSC
)
40 davinci_psc_config(clk
->domain
, clk
->gpsc
, clk
->lpsc
,
42 else if (clk
->clk_enable
)
47 static void __clk_disable(struct clk
*clk
)
49 if (WARN_ON(clk
->usecount
== 0))
51 if (--clk
->usecount
== 0) {
52 if (!(clk
->flags
& CLK_PLL
) && (clk
->flags
& CLK_PSC
))
53 davinci_psc_config(clk
->domain
, clk
->gpsc
, clk
->lpsc
,
55 else if (clk
->clk_disable
)
56 clk
->clk_disable(clk
);
59 __clk_disable(clk
->parent
);
62 int davinci_clk_reset(struct clk
*clk
, bool reset
)
66 if (clk
== NULL
|| IS_ERR(clk
))
69 spin_lock_irqsave(&clockfw_lock
, flags
);
70 if (clk
->flags
& CLK_PSC
)
71 davinci_psc_reset(clk
->gpsc
, clk
->lpsc
, reset
);
72 spin_unlock_irqrestore(&clockfw_lock
, flags
);
76 EXPORT_SYMBOL(davinci_clk_reset
);
78 int davinci_clk_reset_assert(struct clk
*clk
)
80 if (clk
== NULL
|| IS_ERR(clk
) || !clk
->reset
)
83 return clk
->reset(clk
, true);
85 EXPORT_SYMBOL(davinci_clk_reset_assert
);
87 int davinci_clk_reset_deassert(struct clk
*clk
)
89 if (clk
== NULL
|| IS_ERR(clk
) || !clk
->reset
)
92 return clk
->reset(clk
, false);
94 EXPORT_SYMBOL(davinci_clk_reset_deassert
);
96 int clk_enable(struct clk
*clk
)
102 else if (IS_ERR(clk
))
105 spin_lock_irqsave(&clockfw_lock
, flags
);
107 spin_unlock_irqrestore(&clockfw_lock
, flags
);
111 EXPORT_SYMBOL(clk_enable
);
113 void clk_disable(struct clk
*clk
)
117 if (clk
== NULL
|| IS_ERR(clk
))
120 spin_lock_irqsave(&clockfw_lock
, flags
);
122 spin_unlock_irqrestore(&clockfw_lock
, flags
);
124 EXPORT_SYMBOL(clk_disable
);
126 unsigned long clk_get_rate(struct clk
*clk
)
128 if (clk
== NULL
|| IS_ERR(clk
))
133 EXPORT_SYMBOL(clk_get_rate
);
135 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
137 if (clk
== NULL
|| IS_ERR(clk
))
141 return clk
->round_rate(clk
, rate
);
145 EXPORT_SYMBOL(clk_round_rate
);
147 /* Propagate rate to children */
148 static void propagate_rate(struct clk
*root
)
152 list_for_each_entry(clk
, &root
->children
, childnode
) {
154 clk
->rate
= clk
->recalc(clk
);
159 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
166 else if (IS_ERR(clk
))
170 ret
= clk
->set_rate(clk
, rate
);
172 spin_lock_irqsave(&clockfw_lock
, flags
);
175 clk
->rate
= clk
->recalc(clk
);
178 spin_unlock_irqrestore(&clockfw_lock
, flags
);
182 EXPORT_SYMBOL(clk_set_rate
);
184 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
190 else if (IS_ERR(clk
))
193 /* Cannot change parent on enabled clock */
194 if (WARN_ON(clk
->usecount
))
197 mutex_lock(&clocks_mutex
);
198 clk
->parent
= parent
;
199 list_del_init(&clk
->childnode
);
200 list_add(&clk
->childnode
, &clk
->parent
->children
);
201 mutex_unlock(&clocks_mutex
);
203 spin_lock_irqsave(&clockfw_lock
, flags
);
205 clk
->rate
= clk
->recalc(clk
);
207 spin_unlock_irqrestore(&clockfw_lock
, flags
);
211 EXPORT_SYMBOL(clk_set_parent
);
213 int clk_register(struct clk
*clk
)
215 if (clk
== NULL
|| IS_ERR(clk
))
218 if (WARN(clk
->parent
&& !clk
->parent
->rate
,
219 "CLK: %s parent %s has no rate!\n",
220 clk
->name
, clk
->parent
->name
))
223 INIT_LIST_HEAD(&clk
->children
);
225 mutex_lock(&clocks_mutex
);
226 list_add_tail(&clk
->node
, &clocks
);
228 list_add_tail(&clk
->childnode
, &clk
->parent
->children
);
229 mutex_unlock(&clocks_mutex
);
231 /* If rate is already set, use it */
235 /* Else, see if there is a way to calculate it */
237 clk
->rate
= clk
->recalc(clk
);
239 /* Otherwise, default to parent rate */
240 else if (clk
->parent
)
241 clk
->rate
= clk
->parent
->rate
;
245 EXPORT_SYMBOL(clk_register
);
247 void clk_unregister(struct clk
*clk
)
249 if (clk
== NULL
|| IS_ERR(clk
))
252 mutex_lock(&clocks_mutex
);
253 list_del(&clk
->node
);
254 list_del(&clk
->childnode
);
255 mutex_unlock(&clocks_mutex
);
257 EXPORT_SYMBOL(clk_unregister
);
259 #ifdef CONFIG_DAVINCI_RESET_CLOCKS
261 * Disable any unused clocks left on by the bootloader
263 int __init
davinci_clk_disable_unused(void)
267 spin_lock_irq(&clockfw_lock
);
268 list_for_each_entry(ck
, &clocks
, node
) {
269 if (ck
->usecount
> 0)
271 if (!(ck
->flags
& CLK_PSC
))
274 /* ignore if in Disabled or SwRstDisable states */
275 if (!davinci_psc_is_clk_active(ck
->gpsc
, ck
->lpsc
))
278 pr_debug("Clocks: disable unused %s\n", ck
->name
);
280 davinci_psc_config(ck
->domain
, ck
->gpsc
, ck
->lpsc
,
283 spin_unlock_irq(&clockfw_lock
);
289 static unsigned long clk_sysclk_recalc(struct clk
*clk
)
292 struct pll_data
*pll
;
293 unsigned long rate
= clk
->rate
;
295 /* If this is the PLL base clock, no more calculations needed */
299 if (WARN_ON(!clk
->parent
))
302 rate
= clk
->parent
->rate
;
304 /* Otherwise, the parent must be a PLL */
305 if (WARN_ON(!clk
->parent
->pll_data
))
308 pll
= clk
->parent
->pll_data
;
310 /* If pre-PLL, source clock is before the multiplier and divider(s) */
311 if (clk
->flags
& PRE_PLL
)
312 rate
= pll
->input_rate
;
317 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
319 plldiv
= (v
& pll
->div_ratio_mask
) + 1;
327 int davinci_set_sysclk_rate(struct clk
*clk
, unsigned long rate
)
330 struct pll_data
*pll
;
334 /* If this is the PLL base clock, wrong function to call */
338 /* There must be a parent... */
339 if (WARN_ON(!clk
->parent
))
342 /* ... the parent must be a PLL... */
343 if (WARN_ON(!clk
->parent
->pll_data
))
346 /* ... and this clock must have a divider. */
347 if (WARN_ON(!clk
->div_reg
))
350 pll
= clk
->parent
->pll_data
;
352 input
= clk
->parent
->rate
;
354 /* If pre-PLL, source clock is before the multiplier and divider(s) */
355 if (clk
->flags
& PRE_PLL
)
356 input
= pll
->input_rate
;
360 * Can afford to provide an output little higher than requested
361 * only if maximum rate supported by hardware on this sysclk
365 ratio
= DIV_ROUND_CLOSEST(input
, rate
);
366 if (input
/ ratio
> clk
->maxrate
)
371 ratio
= DIV_ROUND_UP(input
, rate
);
376 if (ratio
> pll
->div_ratio_mask
)
380 v
= __raw_readl(pll
->base
+ PLLSTAT
);
381 } while (v
& PLLSTAT_GOSTAT
);
383 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
384 v
&= ~pll
->div_ratio_mask
;
385 v
|= ratio
| PLLDIV_EN
;
386 __raw_writel(v
, pll
->base
+ clk
->div_reg
);
388 v
= __raw_readl(pll
->base
+ PLLCMD
);
390 __raw_writel(v
, pll
->base
+ PLLCMD
);
393 v
= __raw_readl(pll
->base
+ PLLSTAT
);
394 } while (v
& PLLSTAT_GOSTAT
);
398 EXPORT_SYMBOL(davinci_set_sysclk_rate
);
400 static unsigned long clk_leafclk_recalc(struct clk
*clk
)
402 if (WARN_ON(!clk
->parent
))
405 return clk
->parent
->rate
;
408 int davinci_simple_set_rate(struct clk
*clk
, unsigned long rate
)
414 static unsigned long clk_pllclk_recalc(struct clk
*clk
)
416 u32 ctrl
, mult
= 1, prediv
= 1, postdiv
= 1;
418 struct pll_data
*pll
= clk
->pll_data
;
419 unsigned long rate
= clk
->rate
;
421 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
422 rate
= pll
->input_rate
= clk
->parent
->rate
;
424 if (ctrl
& PLLCTL_PLLEN
) {
426 mult
= __raw_readl(pll
->base
+ PLLM
);
427 if (cpu_is_davinci_dm365())
428 mult
= 2 * (mult
& PLLM_PLLM_MASK
);
430 mult
= (mult
& PLLM_PLLM_MASK
) + 1;
434 if (pll
->flags
& PLL_HAS_PREDIV
) {
435 prediv
= __raw_readl(pll
->base
+ PREDIV
);
436 if (prediv
& PLLDIV_EN
)
437 prediv
= (prediv
& pll
->div_ratio_mask
) + 1;
442 /* pre-divider is fixed, but (some?) chips won't report that */
443 if (cpu_is_davinci_dm355() && pll
->num
== 1)
446 if (pll
->flags
& PLL_HAS_POSTDIV
) {
447 postdiv
= __raw_readl(pll
->base
+ POSTDIV
);
448 if (postdiv
& PLLDIV_EN
)
449 postdiv
= (postdiv
& pll
->div_ratio_mask
) + 1;
460 pr_debug("PLL%d: input = %lu MHz [ ",
461 pll
->num
, clk
->parent
->rate
/ 1000000);
465 pr_debug("/ %d ", prediv
);
467 pr_debug("* %d ", mult
);
469 pr_debug("/ %d ", postdiv
);
470 pr_debug("] --> %lu MHz output.\n", rate
/ 1000000);
476 * davinci_set_pllrate - set the output rate of a given PLL.
478 * Note: Currently tested to work with OMAP-L138 only.
480 * @pll: pll whose rate needs to be changed.
481 * @prediv: The pre divider value. Passing 0 disables the pre-divider.
482 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
483 * @postdiv: The post divider value. Passing 0 disables the post-divider.
485 int davinci_set_pllrate(struct pll_data
*pll
, unsigned int prediv
,
486 unsigned int mult
, unsigned int postdiv
)
489 unsigned int locktime
;
492 if (pll
->base
== NULL
)
496 * PLL lock time required per OMAP-L138 datasheet is
497 * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
498 * as 4 and OSCIN cycle as 25 MHz.
501 locktime
= ((2000 * prediv
) / 100);
502 prediv
= (prediv
- 1) | PLLDIV_EN
;
504 locktime
= PLL_LOCK_TIME
;
507 postdiv
= (postdiv
- 1) | PLLDIV_EN
;
511 /* Protect against simultaneous calls to PLL setting seqeunce */
512 spin_lock_irqsave(&clockfw_lock
, flags
);
514 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
516 /* Switch the PLL to bypass mode */
517 ctrl
&= ~(PLLCTL_PLLENSRC
| PLLCTL_PLLEN
);
518 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
520 udelay(PLL_BYPASS_TIME
);
522 /* Reset and enable PLL */
523 ctrl
&= ~(PLLCTL_PLLRST
| PLLCTL_PLLDIS
);
524 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
526 if (pll
->flags
& PLL_HAS_PREDIV
)
527 __raw_writel(prediv
, pll
->base
+ PREDIV
);
529 __raw_writel(mult
, pll
->base
+ PLLM
);
531 if (pll
->flags
& PLL_HAS_POSTDIV
)
532 __raw_writel(postdiv
, pll
->base
+ POSTDIV
);
534 udelay(PLL_RESET_TIME
);
536 /* Bring PLL out of reset */
537 ctrl
|= PLLCTL_PLLRST
;
538 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
542 /* Remove PLL from bypass mode */
543 ctrl
|= PLLCTL_PLLEN
;
544 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
546 spin_unlock_irqrestore(&clockfw_lock
, flags
);
550 EXPORT_SYMBOL(davinci_set_pllrate
);
553 * davinci_set_refclk_rate() - Set the reference clock rate
554 * @rate: The new rate.
556 * Sets the reference clock rate to a given value. This will most likely
557 * result in the entire clock tree getting updated.
559 * This is used to support boards which use a reference clock different
560 * than that used by default in <soc>.c file. The reference clock rate
561 * should be updated early in the boot process; ideally soon after the
562 * clock tree has been initialized once with the default reference clock
563 * rate (davinci_common_init()).
565 * Returns 0 on success, error otherwise.
567 int davinci_set_refclk_rate(unsigned long rate
)
571 refclk
= clk_get(NULL
, "ref");
572 if (IS_ERR(refclk
)) {
573 pr_err("%s: failed to get reference clock\n", __func__
);
574 return PTR_ERR(refclk
);
577 clk_set_rate(refclk
, rate
);
584 int __init
davinci_clk_init(struct clk_lookup
*clocks
)
586 struct clk_lookup
*c
;
588 size_t num_clocks
= 0;
590 for (c
= clocks
; c
->clk
; c
++) {
595 /* Check if clock is a PLL */
597 clk
->recalc
= clk_pllclk_recalc
;
599 /* Else, if it is a PLL-derived clock */
600 else if (clk
->flags
& CLK_PLL
)
601 clk
->recalc
= clk_sysclk_recalc
;
603 /* Otherwise, it is a leaf clock (PSC clock) */
604 else if (clk
->parent
)
605 clk
->recalc
= clk_leafclk_recalc
;
609 struct pll_data
*pll
= clk
->pll_data
;
611 if (!pll
->div_ratio_mask
)
612 pll
->div_ratio_mask
= PLLDIV_RATIO_MASK
;
614 if (pll
->phys_base
&& !pll
->base
) {
615 pll
->base
= ioremap(pll
->phys_base
, SZ_4K
);
621 clk
->rate
= clk
->recalc(clk
);
624 clk
->flags
|= CLK_PSC
;
626 if (clk
->flags
& PSC_LRST
)
627 clk
->reset
= davinci_clk_reset
;
632 /* Turn on clocks that Linux doesn't otherwise manage */
633 if (clk
->flags
& ALWAYS_ENABLED
)
637 clkdev_add_table(clocks
, num_clocks
);
642 #ifdef CONFIG_DEBUG_FS
644 #include <linux/debugfs.h>
645 #include <linux/seq_file.h>
647 #define CLKNAME_MAX 10 /* longest clock name */
652 dump_clock(struct seq_file
*s
, unsigned nest
, struct clk
*parent
)
655 char buf
[CLKNAME_MAX
+ NEST_DELTA
* NEST_MAX
];
659 if (parent
->flags
& CLK_PLL
)
661 else if (parent
->flags
& CLK_PSC
)
666 /* <nest spaces> name <pad to end> */
667 memset(buf
, ' ', sizeof(buf
) - 1);
668 buf
[sizeof(buf
) - 1] = 0;
669 i
= strlen(parent
->name
);
670 memcpy(buf
+ nest
, parent
->name
,
671 min(i
, (unsigned)(sizeof(buf
) - 1 - nest
)));
673 seq_printf(s
, "%s users=%2d %-3s %9ld Hz\n",
674 buf
, parent
->usecount
, state
, clk_get_rate(parent
));
675 /* REVISIT show device associations too */
677 /* cost is now small, but not linear... */
678 list_for_each_entry(clk
, &parent
->children
, childnode
) {
679 dump_clock(s
, nest
+ NEST_DELTA
, clk
);
683 static int davinci_ck_show(struct seq_file
*m
, void *v
)
688 * Show clock tree; We trust nonzero usecounts equate to PSC enables...
690 mutex_lock(&clocks_mutex
);
691 list_for_each_entry(clk
, &clocks
, node
)
693 dump_clock(m
, 0, clk
);
694 mutex_unlock(&clocks_mutex
);
699 static int davinci_ck_open(struct inode
*inode
, struct file
*file
)
701 return single_open(file
, davinci_ck_show
, NULL
);
704 static const struct file_operations davinci_ck_operations
= {
705 .open
= davinci_ck_open
,
708 .release
= single_release
,
711 static int __init
davinci_clk_debugfs_init(void)
713 debugfs_create_file("davinci_clocks", S_IFREG
| S_IRUGO
, NULL
, NULL
,
714 &davinci_ck_operations
);
718 device_initcall(davinci_clk_debugfs_init
);
719 #endif /* CONFIG_DEBUG_FS */