staging:iio:adc:ad7606 move to info_mask_(shared_by_type/separate)
[linux/fpc-iii.git] / arch / arm / mach-omap1 / clock.c
blob4f5fd4a084c06971e2faa21a255bae822a295043
1 /*
2 * linux/arch/arm/mach-omap1/clock.c
4 * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
5 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
7 * Modified to use omap shared clock framework by
8 * Tony Lindgren <tony@atomide.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/io.h>
20 #include <linux/clk.h>
21 #include <linux/clkdev.h>
23 #include <asm/mach-types.h>
25 #include <mach/hardware.h>
27 #include "soc.h"
28 #include "iomap.h"
29 #include "clock.h"
30 #include "opp.h"
31 #include "sram.h"
33 __u32 arm_idlect1_mask;
34 struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
36 static LIST_HEAD(clocks);
37 static DEFINE_MUTEX(clocks_mutex);
38 static DEFINE_SPINLOCK(clockfw_lock);
41 * Omap1 specific clock functions
44 unsigned long omap1_uart_recalc(struct clk *clk)
46 unsigned int val = __raw_readl(clk->enable_reg);
47 return val & clk->enable_bit ? 48000000 : 12000000;
50 unsigned long omap1_sossi_recalc(struct clk *clk)
52 u32 div = omap_readl(MOD_CONF_CTRL_1);
54 div = (div >> 17) & 0x7;
55 div++;
57 return clk->parent->rate / div;
60 static void omap1_clk_allow_idle(struct clk *clk)
62 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
64 if (!(clk->flags & CLOCK_IDLE_CONTROL))
65 return;
67 if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
68 arm_idlect1_mask |= 1 << iclk->idlect_shift;
71 static void omap1_clk_deny_idle(struct clk *clk)
73 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
75 if (!(clk->flags & CLOCK_IDLE_CONTROL))
76 return;
78 if (iclk->no_idle_count++ == 0)
79 arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
82 static __u16 verify_ckctl_value(__u16 newval)
84 /* This function checks for following limitations set
85 * by the hardware (all conditions must be true):
86 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
87 * ARM_CK >= TC_CK
88 * DSP_CK >= TC_CK
89 * DSPMMU_CK >= TC_CK
91 * In addition following rules are enforced:
92 * LCD_CK <= TC_CK
93 * ARMPER_CK <= TC_CK
95 * However, maximum frequencies are not checked for!
97 __u8 per_exp;
98 __u8 lcd_exp;
99 __u8 arm_exp;
100 __u8 dsp_exp;
101 __u8 tc_exp;
102 __u8 dspmmu_exp;
104 per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
105 lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
106 arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
107 dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
108 tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
109 dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
111 if (dspmmu_exp < dsp_exp)
112 dspmmu_exp = dsp_exp;
113 if (dspmmu_exp > dsp_exp+1)
114 dspmmu_exp = dsp_exp+1;
115 if (tc_exp < arm_exp)
116 tc_exp = arm_exp;
117 if (tc_exp < dspmmu_exp)
118 tc_exp = dspmmu_exp;
119 if (tc_exp > lcd_exp)
120 lcd_exp = tc_exp;
121 if (tc_exp > per_exp)
122 per_exp = tc_exp;
124 newval &= 0xf000;
125 newval |= per_exp << CKCTL_PERDIV_OFFSET;
126 newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
127 newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
128 newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
129 newval |= tc_exp << CKCTL_TCDIV_OFFSET;
130 newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
132 return newval;
135 static int calc_dsor_exp(struct clk *clk, unsigned long rate)
137 /* Note: If target frequency is too low, this function will return 4,
138 * which is invalid value. Caller must check for this value and act
139 * accordingly.
141 * Note: This function does not check for following limitations set
142 * by the hardware (all conditions must be true):
143 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
144 * ARM_CK >= TC_CK
145 * DSP_CK >= TC_CK
146 * DSPMMU_CK >= TC_CK
148 unsigned long realrate;
149 struct clk * parent;
150 unsigned dsor_exp;
152 parent = clk->parent;
153 if (unlikely(parent == NULL))
154 return -EIO;
156 realrate = parent->rate;
157 for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
158 if (realrate <= rate)
159 break;
161 realrate /= 2;
164 return dsor_exp;
167 unsigned long omap1_ckctl_recalc(struct clk *clk)
169 /* Calculate divisor encoded as 2-bit exponent */
170 int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
172 return clk->parent->rate / dsor;
175 unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
177 int dsor;
179 /* Calculate divisor encoded as 2-bit exponent
181 * The clock control bits are in DSP domain,
182 * so api_ck is needed for access.
183 * Note that DSP_CKCTL virt addr = phys addr, so
184 * we must use __raw_readw() instead of omap_readw().
186 omap1_clk_enable(api_ck_p);
187 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
188 omap1_clk_disable(api_ck_p);
190 return clk->parent->rate / dsor;
193 /* MPU virtual clock functions */
194 int omap1_select_table_rate(struct clk *clk, unsigned long rate)
196 /* Find the highest supported frequency <= rate and switch to it */
197 struct mpu_rate * ptr;
198 unsigned long ref_rate;
200 ref_rate = ck_ref_p->rate;
202 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
203 if (!(ptr->flags & cpu_mask))
204 continue;
206 if (ptr->xtal != ref_rate)
207 continue;
209 /* Can check only after xtal frequency check */
210 if (ptr->rate <= rate)
211 break;
214 if (!ptr->rate)
215 return -EINVAL;
218 * In most cases we should not need to reprogram DPLL.
219 * Reprogramming the DPLL is tricky, it must be done from SRAM.
221 omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
223 /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
224 ck_dpll1_p->rate = ptr->pll_rate;
226 return 0;
229 int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
231 int dsor_exp;
232 u16 regval;
234 dsor_exp = calc_dsor_exp(clk, rate);
235 if (dsor_exp > 3)
236 dsor_exp = -EINVAL;
237 if (dsor_exp < 0)
238 return dsor_exp;
240 regval = __raw_readw(DSP_CKCTL);
241 regval &= ~(3 << clk->rate_offset);
242 regval |= dsor_exp << clk->rate_offset;
243 __raw_writew(regval, DSP_CKCTL);
244 clk->rate = clk->parent->rate / (1 << dsor_exp);
246 return 0;
249 long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
251 int dsor_exp = calc_dsor_exp(clk, rate);
252 if (dsor_exp < 0)
253 return dsor_exp;
254 if (dsor_exp > 3)
255 dsor_exp = 3;
256 return clk->parent->rate / (1 << dsor_exp);
259 int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
261 int dsor_exp;
262 u16 regval;
264 dsor_exp = calc_dsor_exp(clk, rate);
265 if (dsor_exp > 3)
266 dsor_exp = -EINVAL;
267 if (dsor_exp < 0)
268 return dsor_exp;
270 regval = omap_readw(ARM_CKCTL);
271 regval &= ~(3 << clk->rate_offset);
272 regval |= dsor_exp << clk->rate_offset;
273 regval = verify_ckctl_value(regval);
274 omap_writew(regval, ARM_CKCTL);
275 clk->rate = clk->parent->rate / (1 << dsor_exp);
276 return 0;
279 long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
281 /* Find the highest supported frequency <= rate */
282 struct mpu_rate * ptr;
283 long highest_rate;
284 unsigned long ref_rate;
286 ref_rate = ck_ref_p->rate;
288 highest_rate = -EINVAL;
290 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
291 if (!(ptr->flags & cpu_mask))
292 continue;
294 if (ptr->xtal != ref_rate)
295 continue;
297 highest_rate = ptr->rate;
299 /* Can check only after xtal frequency check */
300 if (ptr->rate <= rate)
301 break;
304 return highest_rate;
307 static unsigned calc_ext_dsor(unsigned long rate)
309 unsigned dsor;
311 /* MCLK and BCLK divisor selection is not linear:
312 * freq = 96MHz / dsor
314 * RATIO_SEL range: dsor <-> RATIO_SEL
315 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
316 * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
317 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
318 * can not be used.
320 for (dsor = 2; dsor < 96; ++dsor) {
321 if ((dsor & 1) && dsor > 8)
322 continue;
323 if (rate >= 96000000 / dsor)
324 break;
326 return dsor;
329 /* XXX Only needed on 1510 */
330 int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
332 unsigned int val;
334 val = __raw_readl(clk->enable_reg);
335 if (rate == 12000000)
336 val &= ~(1 << clk->enable_bit);
337 else if (rate == 48000000)
338 val |= (1 << clk->enable_bit);
339 else
340 return -EINVAL;
341 __raw_writel(val, clk->enable_reg);
342 clk->rate = rate;
344 return 0;
347 /* External clock (MCLK & BCLK) functions */
348 int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
350 unsigned dsor;
351 __u16 ratio_bits;
353 dsor = calc_ext_dsor(rate);
354 clk->rate = 96000000 / dsor;
355 if (dsor > 8)
356 ratio_bits = ((dsor - 8) / 2 + 6) << 2;
357 else
358 ratio_bits = (dsor - 2) << 2;
360 ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
361 __raw_writew(ratio_bits, clk->enable_reg);
363 return 0;
366 int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
368 u32 l;
369 int div;
370 unsigned long p_rate;
372 p_rate = clk->parent->rate;
373 /* Round towards slower frequency */
374 div = (p_rate + rate - 1) / rate;
375 div--;
376 if (div < 0 || div > 7)
377 return -EINVAL;
379 l = omap_readl(MOD_CONF_CTRL_1);
380 l &= ~(7 << 17);
381 l |= div << 17;
382 omap_writel(l, MOD_CONF_CTRL_1);
384 clk->rate = p_rate / (div + 1);
386 return 0;
389 long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
391 return 96000000 / calc_ext_dsor(rate);
394 void omap1_init_ext_clk(struct clk *clk)
396 unsigned dsor;
397 __u16 ratio_bits;
399 /* Determine current rate and ensure clock is based on 96MHz APLL */
400 ratio_bits = __raw_readw(clk->enable_reg) & ~1;
401 __raw_writew(ratio_bits, clk->enable_reg);
403 ratio_bits = (ratio_bits & 0xfc) >> 2;
404 if (ratio_bits > 6)
405 dsor = (ratio_bits - 6) * 2 + 8;
406 else
407 dsor = ratio_bits + 2;
409 clk-> rate = 96000000 / dsor;
412 int omap1_clk_enable(struct clk *clk)
414 int ret = 0;
416 if (clk->usecount++ == 0) {
417 if (clk->parent) {
418 ret = omap1_clk_enable(clk->parent);
419 if (ret)
420 goto err;
422 if (clk->flags & CLOCK_NO_IDLE_PARENT)
423 omap1_clk_deny_idle(clk->parent);
426 ret = clk->ops->enable(clk);
427 if (ret) {
428 if (clk->parent)
429 omap1_clk_disable(clk->parent);
430 goto err;
433 return ret;
435 err:
436 clk->usecount--;
437 return ret;
440 void omap1_clk_disable(struct clk *clk)
442 if (clk->usecount > 0 && !(--clk->usecount)) {
443 clk->ops->disable(clk);
444 if (likely(clk->parent)) {
445 omap1_clk_disable(clk->parent);
446 if (clk->flags & CLOCK_NO_IDLE_PARENT)
447 omap1_clk_allow_idle(clk->parent);
452 static int omap1_clk_enable_generic(struct clk *clk)
454 __u16 regval16;
455 __u32 regval32;
457 if (unlikely(clk->enable_reg == NULL)) {
458 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
459 clk->name);
460 return -EINVAL;
463 if (clk->flags & ENABLE_REG_32BIT) {
464 regval32 = __raw_readl(clk->enable_reg);
465 regval32 |= (1 << clk->enable_bit);
466 __raw_writel(regval32, clk->enable_reg);
467 } else {
468 regval16 = __raw_readw(clk->enable_reg);
469 regval16 |= (1 << clk->enable_bit);
470 __raw_writew(regval16, clk->enable_reg);
473 return 0;
476 static void omap1_clk_disable_generic(struct clk *clk)
478 __u16 regval16;
479 __u32 regval32;
481 if (clk->enable_reg == NULL)
482 return;
484 if (clk->flags & ENABLE_REG_32BIT) {
485 regval32 = __raw_readl(clk->enable_reg);
486 regval32 &= ~(1 << clk->enable_bit);
487 __raw_writel(regval32, clk->enable_reg);
488 } else {
489 regval16 = __raw_readw(clk->enable_reg);
490 regval16 &= ~(1 << clk->enable_bit);
491 __raw_writew(regval16, clk->enable_reg);
495 const struct clkops clkops_generic = {
496 .enable = omap1_clk_enable_generic,
497 .disable = omap1_clk_disable_generic,
500 static int omap1_clk_enable_dsp_domain(struct clk *clk)
502 int retval;
504 retval = omap1_clk_enable(api_ck_p);
505 if (!retval) {
506 retval = omap1_clk_enable_generic(clk);
507 omap1_clk_disable(api_ck_p);
510 return retval;
513 static void omap1_clk_disable_dsp_domain(struct clk *clk)
515 if (omap1_clk_enable(api_ck_p) == 0) {
516 omap1_clk_disable_generic(clk);
517 omap1_clk_disable(api_ck_p);
521 const struct clkops clkops_dspck = {
522 .enable = omap1_clk_enable_dsp_domain,
523 .disable = omap1_clk_disable_dsp_domain,
526 /* XXX SYSC register handling does not belong in the clock framework */
527 static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
529 int ret;
530 struct uart_clk *uclk;
532 ret = omap1_clk_enable_generic(clk);
533 if (ret == 0) {
534 /* Set smart idle acknowledgement mode */
535 uclk = (struct uart_clk *)clk;
536 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
537 uclk->sysc_addr);
540 return ret;
543 /* XXX SYSC register handling does not belong in the clock framework */
544 static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
546 struct uart_clk *uclk;
548 /* Set force idle acknowledgement mode */
549 uclk = (struct uart_clk *)clk;
550 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
552 omap1_clk_disable_generic(clk);
555 /* XXX SYSC register handling does not belong in the clock framework */
556 const struct clkops clkops_uart_16xx = {
557 .enable = omap1_clk_enable_uart_functional_16xx,
558 .disable = omap1_clk_disable_uart_functional_16xx,
561 long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
563 if (clk->round_rate != NULL)
564 return clk->round_rate(clk, rate);
566 return clk->rate;
569 int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
571 int ret = -EINVAL;
573 if (clk->set_rate)
574 ret = clk->set_rate(clk, rate);
575 return ret;
579 * Omap1 clock reset and init functions
582 #ifdef CONFIG_OMAP_RESET_CLOCKS
584 void omap1_clk_disable_unused(struct clk *clk)
586 __u32 regval32;
588 /* Clocks in the DSP domain need api_ck. Just assume bootloader
589 * has not enabled any DSP clocks */
590 if (clk->enable_reg == DSP_IDLECT2) {
591 pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
592 clk->name);
593 return;
596 /* Is the clock already disabled? */
597 if (clk->flags & ENABLE_REG_32BIT)
598 regval32 = __raw_readl(clk->enable_reg);
599 else
600 regval32 = __raw_readw(clk->enable_reg);
602 if ((regval32 & (1 << clk->enable_bit)) == 0)
603 return;
605 printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
606 clk->ops->disable(clk);
607 printk(" done\n");
610 #endif
613 int clk_enable(struct clk *clk)
615 unsigned long flags;
616 int ret;
618 if (clk == NULL || IS_ERR(clk))
619 return -EINVAL;
621 spin_lock_irqsave(&clockfw_lock, flags);
622 ret = omap1_clk_enable(clk);
623 spin_unlock_irqrestore(&clockfw_lock, flags);
625 return ret;
627 EXPORT_SYMBOL(clk_enable);
629 void clk_disable(struct clk *clk)
631 unsigned long flags;
633 if (clk == NULL || IS_ERR(clk))
634 return;
636 spin_lock_irqsave(&clockfw_lock, flags);
637 if (clk->usecount == 0) {
638 pr_err("Trying disable clock %s with 0 usecount\n",
639 clk->name);
640 WARN_ON(1);
641 goto out;
644 omap1_clk_disable(clk);
646 out:
647 spin_unlock_irqrestore(&clockfw_lock, flags);
649 EXPORT_SYMBOL(clk_disable);
651 unsigned long clk_get_rate(struct clk *clk)
653 unsigned long flags;
654 unsigned long ret;
656 if (clk == NULL || IS_ERR(clk))
657 return 0;
659 spin_lock_irqsave(&clockfw_lock, flags);
660 ret = clk->rate;
661 spin_unlock_irqrestore(&clockfw_lock, flags);
663 return ret;
665 EXPORT_SYMBOL(clk_get_rate);
668 * Optional clock functions defined in include/linux/clk.h
671 long clk_round_rate(struct clk *clk, unsigned long rate)
673 unsigned long flags;
674 long ret;
676 if (clk == NULL || IS_ERR(clk))
677 return 0;
679 spin_lock_irqsave(&clockfw_lock, flags);
680 ret = omap1_clk_round_rate(clk, rate);
681 spin_unlock_irqrestore(&clockfw_lock, flags);
683 return ret;
685 EXPORT_SYMBOL(clk_round_rate);
687 int clk_set_rate(struct clk *clk, unsigned long rate)
689 unsigned long flags;
690 int ret = -EINVAL;
692 if (clk == NULL || IS_ERR(clk))
693 return ret;
695 spin_lock_irqsave(&clockfw_lock, flags);
696 ret = omap1_clk_set_rate(clk, rate);
697 if (ret == 0)
698 propagate_rate(clk);
699 spin_unlock_irqrestore(&clockfw_lock, flags);
701 return ret;
703 EXPORT_SYMBOL(clk_set_rate);
705 int clk_set_parent(struct clk *clk, struct clk *parent)
707 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
709 return -EINVAL;
711 EXPORT_SYMBOL(clk_set_parent);
713 struct clk *clk_get_parent(struct clk *clk)
715 return clk->parent;
717 EXPORT_SYMBOL(clk_get_parent);
720 * OMAP specific clock functions shared between omap1 and omap2
723 int __initdata mpurate;
726 * By default we use the rate set by the bootloader.
727 * You can override this with mpurate= cmdline option.
729 static int __init omap_clk_setup(char *str)
731 get_option(&str, &mpurate);
733 if (!mpurate)
734 return 1;
736 if (mpurate < 1000)
737 mpurate *= 1000000;
739 return 1;
741 __setup("mpurate=", omap_clk_setup);
743 /* Used for clocks that always have same value as the parent clock */
744 unsigned long followparent_recalc(struct clk *clk)
746 return clk->parent->rate;
750 * Used for clocks that have the same value as the parent clock,
751 * divided by some factor
753 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
755 WARN_ON(!clk->fixed_div);
757 return clk->parent->rate / clk->fixed_div;
760 void clk_reparent(struct clk *child, struct clk *parent)
762 list_del_init(&child->sibling);
763 if (parent)
764 list_add(&child->sibling, &parent->children);
765 child->parent = parent;
767 /* now do the debugfs renaming to reattach the child
768 to the proper parent */
771 /* Propagate rate to children */
772 void propagate_rate(struct clk *tclk)
774 struct clk *clkp;
776 list_for_each_entry(clkp, &tclk->children, sibling) {
777 if (clkp->recalc)
778 clkp->rate = clkp->recalc(clkp);
779 propagate_rate(clkp);
783 static LIST_HEAD(root_clks);
786 * recalculate_root_clocks - recalculate and propagate all root clocks
788 * Recalculates all root clocks (clocks with no parent), which if the
789 * clock's .recalc is set correctly, should also propagate their rates.
790 * Called at init.
792 void recalculate_root_clocks(void)
794 struct clk *clkp;
796 list_for_each_entry(clkp, &root_clks, sibling) {
797 if (clkp->recalc)
798 clkp->rate = clkp->recalc(clkp);
799 propagate_rate(clkp);
804 * clk_preinit - initialize any fields in the struct clk before clk init
805 * @clk: struct clk * to initialize
807 * Initialize any struct clk fields needed before normal clk initialization
808 * can run. No return value.
810 void clk_preinit(struct clk *clk)
812 INIT_LIST_HEAD(&clk->children);
815 int clk_register(struct clk *clk)
817 if (clk == NULL || IS_ERR(clk))
818 return -EINVAL;
821 * trap out already registered clocks
823 if (clk->node.next || clk->node.prev)
824 return 0;
826 mutex_lock(&clocks_mutex);
827 if (clk->parent)
828 list_add(&clk->sibling, &clk->parent->children);
829 else
830 list_add(&clk->sibling, &root_clks);
832 list_add(&clk->node, &clocks);
833 if (clk->init)
834 clk->init(clk);
835 mutex_unlock(&clocks_mutex);
837 return 0;
839 EXPORT_SYMBOL(clk_register);
841 void clk_unregister(struct clk *clk)
843 if (clk == NULL || IS_ERR(clk))
844 return;
846 mutex_lock(&clocks_mutex);
847 list_del(&clk->sibling);
848 list_del(&clk->node);
849 mutex_unlock(&clocks_mutex);
851 EXPORT_SYMBOL(clk_unregister);
853 void clk_enable_init_clocks(void)
855 struct clk *clkp;
857 list_for_each_entry(clkp, &clocks, node)
858 if (clkp->flags & ENABLE_ON_INIT)
859 clk_enable(clkp);
863 * omap_clk_get_by_name - locate OMAP struct clk by its name
864 * @name: name of the struct clk to locate
866 * Locate an OMAP struct clk by its name. Assumes that struct clk
867 * names are unique. Returns NULL if not found or a pointer to the
868 * struct clk if found.
870 struct clk *omap_clk_get_by_name(const char *name)
872 struct clk *c;
873 struct clk *ret = NULL;
875 mutex_lock(&clocks_mutex);
877 list_for_each_entry(c, &clocks, node) {
878 if (!strcmp(c->name, name)) {
879 ret = c;
880 break;
884 mutex_unlock(&clocks_mutex);
886 return ret;
889 int omap_clk_enable_autoidle_all(void)
891 struct clk *c;
892 unsigned long flags;
894 spin_lock_irqsave(&clockfw_lock, flags);
896 list_for_each_entry(c, &clocks, node)
897 if (c->ops->allow_idle)
898 c->ops->allow_idle(c);
900 spin_unlock_irqrestore(&clockfw_lock, flags);
902 return 0;
905 int omap_clk_disable_autoidle_all(void)
907 struct clk *c;
908 unsigned long flags;
910 spin_lock_irqsave(&clockfw_lock, flags);
912 list_for_each_entry(c, &clocks, node)
913 if (c->ops->deny_idle)
914 c->ops->deny_idle(c);
916 spin_unlock_irqrestore(&clockfw_lock, flags);
918 return 0;
922 * Low level helpers
924 static int clkll_enable_null(struct clk *clk)
926 return 0;
929 static void clkll_disable_null(struct clk *clk)
933 const struct clkops clkops_null = {
934 .enable = clkll_enable_null,
935 .disable = clkll_disable_null,
939 * Dummy clock
941 * Used for clock aliases that are needed on some OMAPs, but not others
943 struct clk dummy_ck = {
944 .name = "dummy",
945 .ops = &clkops_null,
952 #ifdef CONFIG_OMAP_RESET_CLOCKS
954 * Disable any unused clocks left on by the bootloader
956 static int __init clk_disable_unused(void)
958 struct clk *ck;
959 unsigned long flags;
961 pr_info("clock: disabling unused clocks to save power\n");
963 spin_lock_irqsave(&clockfw_lock, flags);
964 list_for_each_entry(ck, &clocks, node) {
965 if (ck->ops == &clkops_null)
966 continue;
968 if (ck->usecount > 0 || !ck->enable_reg)
969 continue;
971 omap1_clk_disable_unused(ck);
973 spin_unlock_irqrestore(&clockfw_lock, flags);
975 return 0;
977 late_initcall(clk_disable_unused);
978 late_initcall(omap_clk_enable_autoidle_all);
979 #endif
981 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
983 * debugfs support to trace clock tree hierarchy and attributes
986 #include <linux/debugfs.h>
987 #include <linux/seq_file.h>
989 static struct dentry *clk_debugfs_root;
991 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
993 struct clk *c;
994 struct clk *pa;
996 mutex_lock(&clocks_mutex);
997 seq_printf(s, "%-30s %-30s %-10s %s\n",
998 "clock-name", "parent-name", "rate", "use-count");
1000 list_for_each_entry(c, &clocks, node) {
1001 pa = c->parent;
1002 seq_printf(s, "%-30s %-30s %-10lu %d\n",
1003 c->name, pa ? pa->name : "none", c->rate,
1004 c->usecount);
1006 mutex_unlock(&clocks_mutex);
1008 return 0;
1011 static int clk_dbg_open(struct inode *inode, struct file *file)
1013 return single_open(file, clk_dbg_show_summary, inode->i_private);
1016 static const struct file_operations debug_clock_fops = {
1017 .open = clk_dbg_open,
1018 .read = seq_read,
1019 .llseek = seq_lseek,
1020 .release = single_release,
1023 static int clk_debugfs_register_one(struct clk *c)
1025 int err;
1026 struct dentry *d;
1027 struct clk *pa = c->parent;
1029 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1030 if (!d)
1031 return -ENOMEM;
1032 c->dent = d;
1034 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1035 if (!d) {
1036 err = -ENOMEM;
1037 goto err_out;
1039 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1040 if (!d) {
1041 err = -ENOMEM;
1042 goto err_out;
1044 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1045 if (!d) {
1046 err = -ENOMEM;
1047 goto err_out;
1049 return 0;
1051 err_out:
1052 debugfs_remove_recursive(c->dent);
1053 return err;
1056 static int clk_debugfs_register(struct clk *c)
1058 int err;
1059 struct clk *pa = c->parent;
1061 if (pa && !pa->dent) {
1062 err = clk_debugfs_register(pa);
1063 if (err)
1064 return err;
1067 if (!c->dent) {
1068 err = clk_debugfs_register_one(c);
1069 if (err)
1070 return err;
1072 return 0;
1075 static int __init clk_debugfs_init(void)
1077 struct clk *c;
1078 struct dentry *d;
1079 int err;
1081 d = debugfs_create_dir("clock", NULL);
1082 if (!d)
1083 return -ENOMEM;
1084 clk_debugfs_root = d;
1086 list_for_each_entry(c, &clocks, node) {
1087 err = clk_debugfs_register(c);
1088 if (err)
1089 goto err_out;
1092 d = debugfs_create_file("summary", S_IRUGO,
1093 d, NULL, &debug_clock_fops);
1094 if (!d)
1095 return -ENOMEM;
1097 return 0;
1098 err_out:
1099 debugfs_remove_recursive(clk_debugfs_root);
1100 return err;
1102 late_initcall(clk_debugfs_init);
1104 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */