2 * Clock and PLL control for DaVinci devices
4 * Copyright (C) 2006-2007 Texas Instruments.
5 * Copyright (C) 2008-2009 Deep Root Systems, LLC
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/clk.h>
18 #include <linux/err.h>
19 #include <linux/mutex.h>
21 #include <linux/delay.h>
23 #include <mach/hardware.h>
25 #include <mach/clock.h>
27 #include <mach/cputype.h>
30 static LIST_HEAD(clocks
);
31 static DEFINE_MUTEX(clocks_mutex
);
32 static DEFINE_SPINLOCK(clockfw_lock
);
34 static void __clk_enable(struct clk
*clk
)
37 __clk_enable(clk
->parent
);
38 if (clk
->usecount
++ == 0 && (clk
->flags
& CLK_PSC
))
39 davinci_psc_config(clk
->domain
, clk
->gpsc
, clk
->lpsc
,
43 static void __clk_disable(struct clk
*clk
)
45 if (WARN_ON(clk
->usecount
== 0))
47 if (--clk
->usecount
== 0 && !(clk
->flags
& CLK_PLL
) &&
48 (clk
->flags
& CLK_PSC
))
49 davinci_psc_config(clk
->domain
, clk
->gpsc
, clk
->lpsc
,
52 __clk_disable(clk
->parent
);
55 int clk_enable(struct clk
*clk
)
59 if (clk
== NULL
|| IS_ERR(clk
))
62 spin_lock_irqsave(&clockfw_lock
, flags
);
64 spin_unlock_irqrestore(&clockfw_lock
, flags
);
68 EXPORT_SYMBOL(clk_enable
);
70 void clk_disable(struct clk
*clk
)
74 if (clk
== NULL
|| IS_ERR(clk
))
77 spin_lock_irqsave(&clockfw_lock
, flags
);
79 spin_unlock_irqrestore(&clockfw_lock
, flags
);
81 EXPORT_SYMBOL(clk_disable
);
83 unsigned long clk_get_rate(struct clk
*clk
)
85 if (clk
== NULL
|| IS_ERR(clk
))
90 EXPORT_SYMBOL(clk_get_rate
);
92 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
94 if (clk
== NULL
|| IS_ERR(clk
))
98 return clk
->round_rate(clk
, rate
);
102 EXPORT_SYMBOL(clk_round_rate
);
104 /* Propagate rate to children */
105 static void propagate_rate(struct clk
*root
)
109 list_for_each_entry(clk
, &root
->children
, childnode
) {
111 clk
->rate
= clk
->recalc(clk
);
116 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
121 if (clk
== NULL
|| IS_ERR(clk
))
125 ret
= clk
->set_rate(clk
, rate
);
127 spin_lock_irqsave(&clockfw_lock
, flags
);
130 clk
->rate
= clk
->recalc(clk
);
133 spin_unlock_irqrestore(&clockfw_lock
, flags
);
137 EXPORT_SYMBOL(clk_set_rate
);
139 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
143 if (clk
== NULL
|| IS_ERR(clk
))
146 /* Cannot change parent on enabled clock */
147 if (WARN_ON(clk
->usecount
))
150 mutex_lock(&clocks_mutex
);
151 clk
->parent
= parent
;
152 list_del_init(&clk
->childnode
);
153 list_add(&clk
->childnode
, &clk
->parent
->children
);
154 mutex_unlock(&clocks_mutex
);
156 spin_lock_irqsave(&clockfw_lock
, flags
);
158 clk
->rate
= clk
->recalc(clk
);
160 spin_unlock_irqrestore(&clockfw_lock
, flags
);
164 EXPORT_SYMBOL(clk_set_parent
);
166 int clk_register(struct clk
*clk
)
168 if (clk
== NULL
|| IS_ERR(clk
))
171 if (WARN(clk
->parent
&& !clk
->parent
->rate
,
172 "CLK: %s parent %s has no rate!\n",
173 clk
->name
, clk
->parent
->name
))
176 INIT_LIST_HEAD(&clk
->children
);
178 mutex_lock(&clocks_mutex
);
179 list_add_tail(&clk
->node
, &clocks
);
181 list_add_tail(&clk
->childnode
, &clk
->parent
->children
);
182 mutex_unlock(&clocks_mutex
);
184 /* If rate is already set, use it */
188 /* Else, see if there is a way to calculate it */
190 clk
->rate
= clk
->recalc(clk
);
192 /* Otherwise, default to parent rate */
193 else if (clk
->parent
)
194 clk
->rate
= clk
->parent
->rate
;
198 EXPORT_SYMBOL(clk_register
);
200 void clk_unregister(struct clk
*clk
)
202 if (clk
== NULL
|| IS_ERR(clk
))
205 mutex_lock(&clocks_mutex
);
206 list_del(&clk
->node
);
207 list_del(&clk
->childnode
);
208 mutex_unlock(&clocks_mutex
);
210 EXPORT_SYMBOL(clk_unregister
);
212 #ifdef CONFIG_DAVINCI_RESET_CLOCKS
214 * Disable any unused clocks left on by the bootloader
216 static int __init
clk_disable_unused(void)
220 spin_lock_irq(&clockfw_lock
);
221 list_for_each_entry(ck
, &clocks
, node
) {
222 if (ck
->usecount
> 0)
224 if (!(ck
->flags
& CLK_PSC
))
227 /* ignore if in Disabled or SwRstDisable states */
228 if (!davinci_psc_is_clk_active(ck
->gpsc
, ck
->lpsc
))
231 pr_debug("Clocks: disable unused %s\n", ck
->name
);
233 davinci_psc_config(ck
->domain
, ck
->gpsc
, ck
->lpsc
,
236 spin_unlock_irq(&clockfw_lock
);
240 late_initcall(clk_disable_unused
);
243 static unsigned long clk_sysclk_recalc(struct clk
*clk
)
246 struct pll_data
*pll
;
247 unsigned long rate
= clk
->rate
;
249 /* If this is the PLL base clock, no more calculations needed */
253 if (WARN_ON(!clk
->parent
))
256 rate
= clk
->parent
->rate
;
258 /* Otherwise, the parent must be a PLL */
259 if (WARN_ON(!clk
->parent
->pll_data
))
262 pll
= clk
->parent
->pll_data
;
264 /* If pre-PLL, source clock is before the multiplier and divider(s) */
265 if (clk
->flags
& PRE_PLL
)
266 rate
= pll
->input_rate
;
271 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
273 plldiv
= (v
& pll
->div_ratio_mask
) + 1;
281 int davinci_set_sysclk_rate(struct clk
*clk
, unsigned long rate
)
284 struct pll_data
*pll
;
288 /* If this is the PLL base clock, wrong function to call */
292 /* There must be a parent... */
293 if (WARN_ON(!clk
->parent
))
296 /* ... the parent must be a PLL... */
297 if (WARN_ON(!clk
->parent
->pll_data
))
300 /* ... and this clock must have a divider. */
301 if (WARN_ON(!clk
->div_reg
))
304 pll
= clk
->parent
->pll_data
;
306 input
= clk
->parent
->rate
;
308 /* If pre-PLL, source clock is before the multiplier and divider(s) */
309 if (clk
->flags
& PRE_PLL
)
310 input
= pll
->input_rate
;
314 * Can afford to provide an output little higher than requested
315 * only if maximum rate supported by hardware on this sysclk
319 ratio
= DIV_ROUND_CLOSEST(input
, rate
);
320 if (input
/ ratio
> clk
->maxrate
)
325 ratio
= DIV_ROUND_UP(input
, rate
);
330 if (ratio
> pll
->div_ratio_mask
)
334 v
= __raw_readl(pll
->base
+ PLLSTAT
);
335 } while (v
& PLLSTAT_GOSTAT
);
337 v
= __raw_readl(pll
->base
+ clk
->div_reg
);
338 v
&= ~pll
->div_ratio_mask
;
339 v
|= ratio
| PLLDIV_EN
;
340 __raw_writel(v
, pll
->base
+ clk
->div_reg
);
342 v
= __raw_readl(pll
->base
+ PLLCMD
);
344 __raw_writel(v
, pll
->base
+ PLLCMD
);
347 v
= __raw_readl(pll
->base
+ PLLSTAT
);
348 } while (v
& PLLSTAT_GOSTAT
);
352 EXPORT_SYMBOL(davinci_set_sysclk_rate
);
354 static unsigned long clk_leafclk_recalc(struct clk
*clk
)
356 if (WARN_ON(!clk
->parent
))
359 return clk
->parent
->rate
;
362 int davinci_simple_set_rate(struct clk
*clk
, unsigned long rate
)
368 static unsigned long clk_pllclk_recalc(struct clk
*clk
)
370 u32 ctrl
, mult
= 1, prediv
= 1, postdiv
= 1;
372 struct pll_data
*pll
= clk
->pll_data
;
373 unsigned long rate
= clk
->rate
;
375 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
376 rate
= pll
->input_rate
= clk
->parent
->rate
;
378 if (ctrl
& PLLCTL_PLLEN
) {
380 mult
= __raw_readl(pll
->base
+ PLLM
);
381 if (cpu_is_davinci_dm365())
382 mult
= 2 * (mult
& PLLM_PLLM_MASK
);
384 mult
= (mult
& PLLM_PLLM_MASK
) + 1;
388 if (pll
->flags
& PLL_HAS_PREDIV
) {
389 prediv
= __raw_readl(pll
->base
+ PREDIV
);
390 if (prediv
& PLLDIV_EN
)
391 prediv
= (prediv
& pll
->div_ratio_mask
) + 1;
396 /* pre-divider is fixed, but (some?) chips won't report that */
397 if (cpu_is_davinci_dm355() && pll
->num
== 1)
400 if (pll
->flags
& PLL_HAS_POSTDIV
) {
401 postdiv
= __raw_readl(pll
->base
+ POSTDIV
);
402 if (postdiv
& PLLDIV_EN
)
403 postdiv
= (postdiv
& pll
->div_ratio_mask
) + 1;
414 pr_debug("PLL%d: input = %lu MHz [ ",
415 pll
->num
, clk
->parent
->rate
/ 1000000);
419 pr_debug("/ %d ", prediv
);
421 pr_debug("* %d ", mult
);
423 pr_debug("/ %d ", postdiv
);
424 pr_debug("] --> %lu MHz output.\n", rate
/ 1000000);
430 * davinci_set_pllrate - set the output rate of a given PLL.
432 * Note: Currently tested to work with OMAP-L138 only.
434 * @pll: pll whose rate needs to be changed.
435 * @prediv: The pre divider value. Passing 0 disables the pre-divider.
436 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
437 * @postdiv: The post divider value. Passing 0 disables the post-divider.
439 int davinci_set_pllrate(struct pll_data
*pll
, unsigned int prediv
,
440 unsigned int mult
, unsigned int postdiv
)
443 unsigned int locktime
;
446 if (pll
->base
== NULL
)
450 * PLL lock time required per OMAP-L138 datasheet is
451 * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
452 * as 4 and OSCIN cycle as 25 MHz.
455 locktime
= ((2000 * prediv
) / 100);
456 prediv
= (prediv
- 1) | PLLDIV_EN
;
458 locktime
= PLL_LOCK_TIME
;
461 postdiv
= (postdiv
- 1) | PLLDIV_EN
;
465 /* Protect against simultaneous calls to PLL setting seqeunce */
466 spin_lock_irqsave(&clockfw_lock
, flags
);
468 ctrl
= __raw_readl(pll
->base
+ PLLCTL
);
470 /* Switch the PLL to bypass mode */
471 ctrl
&= ~(PLLCTL_PLLENSRC
| PLLCTL_PLLEN
);
472 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
474 udelay(PLL_BYPASS_TIME
);
476 /* Reset and enable PLL */
477 ctrl
&= ~(PLLCTL_PLLRST
| PLLCTL_PLLDIS
);
478 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
480 if (pll
->flags
& PLL_HAS_PREDIV
)
481 __raw_writel(prediv
, pll
->base
+ PREDIV
);
483 __raw_writel(mult
, pll
->base
+ PLLM
);
485 if (pll
->flags
& PLL_HAS_POSTDIV
)
486 __raw_writel(postdiv
, pll
->base
+ POSTDIV
);
488 udelay(PLL_RESET_TIME
);
490 /* Bring PLL out of reset */
491 ctrl
|= PLLCTL_PLLRST
;
492 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
496 /* Remove PLL from bypass mode */
497 ctrl
|= PLLCTL_PLLEN
;
498 __raw_writel(ctrl
, pll
->base
+ PLLCTL
);
500 spin_unlock_irqrestore(&clockfw_lock
, flags
);
504 EXPORT_SYMBOL(davinci_set_pllrate
);
507 * davinci_set_refclk_rate() - Set the reference clock rate
508 * @rate: The new rate.
510 * Sets the reference clock rate to a given value. This will most likely
511 * result in the entire clock tree getting updated.
513 * This is used to support boards which use a reference clock different
514 * than that used by default in <soc>.c file. The reference clock rate
515 * should be updated early in the boot process; ideally soon after the
516 * clock tree has been initialized once with the default reference clock
517 * rate (davinci_common_init()).
519 * Returns 0 on success, error otherwise.
521 int davinci_set_refclk_rate(unsigned long rate
)
525 refclk
= clk_get(NULL
, "ref");
526 if (IS_ERR(refclk
)) {
527 pr_err("%s: failed to get reference clock.\n", __func__
);
528 return PTR_ERR(refclk
);
531 clk_set_rate(refclk
, rate
);
538 int __init
davinci_clk_init(struct clk_lookup
*clocks
)
540 struct clk_lookup
*c
;
542 size_t num_clocks
= 0;
544 for (c
= clocks
; c
->clk
; c
++) {
549 /* Check if clock is a PLL */
551 clk
->recalc
= clk_pllclk_recalc
;
553 /* Else, if it is a PLL-derived clock */
554 else if (clk
->flags
& CLK_PLL
)
555 clk
->recalc
= clk_sysclk_recalc
;
557 /* Otherwise, it is a leaf clock (PSC clock) */
558 else if (clk
->parent
)
559 clk
->recalc
= clk_leafclk_recalc
;
563 struct pll_data
*pll
= clk
->pll_data
;
565 if (!pll
->div_ratio_mask
)
566 pll
->div_ratio_mask
= PLLDIV_RATIO_MASK
;
568 if (pll
->phys_base
&& !pll
->base
) {
569 pll
->base
= ioremap(pll
->phys_base
, SZ_4K
);
575 clk
->rate
= clk
->recalc(clk
);
578 clk
->flags
|= CLK_PSC
;
583 /* Turn on clocks that Linux doesn't otherwise manage */
584 if (clk
->flags
& ALWAYS_ENABLED
)
588 clkdev_add_table(clocks
, num_clocks
);
593 #ifdef CONFIG_DEBUG_FS
595 #include <linux/debugfs.h>
596 #include <linux/seq_file.h>
598 #define CLKNAME_MAX 10 /* longest clock name */
603 dump_clock(struct seq_file
*s
, unsigned nest
, struct clk
*parent
)
606 char buf
[CLKNAME_MAX
+ NEST_DELTA
* NEST_MAX
];
610 if (parent
->flags
& CLK_PLL
)
612 else if (parent
->flags
& CLK_PSC
)
617 /* <nest spaces> name <pad to end> */
618 memset(buf
, ' ', sizeof(buf
) - 1);
619 buf
[sizeof(buf
) - 1] = 0;
620 i
= strlen(parent
->name
);
621 memcpy(buf
+ nest
, parent
->name
,
622 min(i
, (unsigned)(sizeof(buf
) - 1 - nest
)));
624 seq_printf(s
, "%s users=%2d %-3s %9ld Hz\n",
625 buf
, parent
->usecount
, state
, clk_get_rate(parent
));
626 /* REVISIT show device associations too */
628 /* cost is now small, but not linear... */
629 list_for_each_entry(clk
, &parent
->children
, childnode
) {
630 dump_clock(s
, nest
+ NEST_DELTA
, clk
);
634 static int davinci_ck_show(struct seq_file
*m
, void *v
)
639 * Show clock tree; We trust nonzero usecounts equate to PSC enables...
641 mutex_lock(&clocks_mutex
);
642 list_for_each_entry(clk
, &clocks
, node
)
644 dump_clock(m
, 0, clk
);
645 mutex_unlock(&clocks_mutex
);
650 static int davinci_ck_open(struct inode
*inode
, struct file
*file
)
652 return single_open(file
, davinci_ck_show
, NULL
);
655 static const struct file_operations davinci_ck_operations
= {
656 .open
= davinci_ck_open
,
659 .release
= single_release
,
662 static int __init
davinci_clk_debugfs_init(void)
664 debugfs_create_file("davinci_clocks", S_IFREG
| S_IRUGO
, NULL
, NULL
,
665 &davinci_ck_operations
);
669 device_initcall(davinci_clk_debugfs_init
);
670 #endif /* CONFIG_DEBUG_FS */