2 * linux/arch/arm/mach-omap1/clock.c
4 * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
5 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
7 * Modified to use omap shared clock framework by
8 * Tony Lindgren <tony@atomide.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/clkdev.h>
23 #include <asm/mach-types.h>
25 #include <mach/hardware.h>
33 __u32 arm_idlect1_mask
;
34 struct clk
*api_ck_p
, *ck_dpll1_p
, *ck_ref_p
;
36 static LIST_HEAD(clocks
);
37 static DEFINE_MUTEX(clocks_mutex
);
38 static DEFINE_SPINLOCK(clockfw_lock
);
41 * Omap1 specific clock functions
44 unsigned long omap1_uart_recalc(struct clk
*clk
)
46 unsigned int val
= __raw_readl(clk
->enable_reg
);
47 return val
& clk
->enable_bit
? 48000000 : 12000000;
50 unsigned long omap1_sossi_recalc(struct clk
*clk
)
52 u32 div
= omap_readl(MOD_CONF_CTRL_1
);
54 div
= (div
>> 17) & 0x7;
57 return clk
->parent
->rate
/ div
;
60 static void omap1_clk_allow_idle(struct clk
*clk
)
62 struct arm_idlect1_clk
* iclk
= (struct arm_idlect1_clk
*)clk
;
64 if (!(clk
->flags
& CLOCK_IDLE_CONTROL
))
67 if (iclk
->no_idle_count
> 0 && !(--iclk
->no_idle_count
))
68 arm_idlect1_mask
|= 1 << iclk
->idlect_shift
;
71 static void omap1_clk_deny_idle(struct clk
*clk
)
73 struct arm_idlect1_clk
* iclk
= (struct arm_idlect1_clk
*)clk
;
75 if (!(clk
->flags
& CLOCK_IDLE_CONTROL
))
78 if (iclk
->no_idle_count
++ == 0)
79 arm_idlect1_mask
&= ~(1 << iclk
->idlect_shift
);
82 static __u16
verify_ckctl_value(__u16 newval
)
84 /* This function checks for following limitations set
85 * by the hardware (all conditions must be true):
86 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
91 * In addition following rules are enforced:
95 * However, maximum frequencies are not checked for!
104 per_exp
= (newval
>> CKCTL_PERDIV_OFFSET
) & 3;
105 lcd_exp
= (newval
>> CKCTL_LCDDIV_OFFSET
) & 3;
106 arm_exp
= (newval
>> CKCTL_ARMDIV_OFFSET
) & 3;
107 dsp_exp
= (newval
>> CKCTL_DSPDIV_OFFSET
) & 3;
108 tc_exp
= (newval
>> CKCTL_TCDIV_OFFSET
) & 3;
109 dspmmu_exp
= (newval
>> CKCTL_DSPMMUDIV_OFFSET
) & 3;
111 if (dspmmu_exp
< dsp_exp
)
112 dspmmu_exp
= dsp_exp
;
113 if (dspmmu_exp
> dsp_exp
+1)
114 dspmmu_exp
= dsp_exp
+1;
115 if (tc_exp
< arm_exp
)
117 if (tc_exp
< dspmmu_exp
)
119 if (tc_exp
> lcd_exp
)
121 if (tc_exp
> per_exp
)
125 newval
|= per_exp
<< CKCTL_PERDIV_OFFSET
;
126 newval
|= lcd_exp
<< CKCTL_LCDDIV_OFFSET
;
127 newval
|= arm_exp
<< CKCTL_ARMDIV_OFFSET
;
128 newval
|= dsp_exp
<< CKCTL_DSPDIV_OFFSET
;
129 newval
|= tc_exp
<< CKCTL_TCDIV_OFFSET
;
130 newval
|= dspmmu_exp
<< CKCTL_DSPMMUDIV_OFFSET
;
135 static int calc_dsor_exp(struct clk
*clk
, unsigned long rate
)
137 /* Note: If target frequency is too low, this function will return 4,
138 * which is invalid value. Caller must check for this value and act
141 * Note: This function does not check for following limitations set
142 * by the hardware (all conditions must be true):
143 * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
148 unsigned long realrate
;
152 parent
= clk
->parent
;
153 if (unlikely(parent
== NULL
))
156 realrate
= parent
->rate
;
157 for (dsor_exp
=0; dsor_exp
<4; dsor_exp
++) {
158 if (realrate
<= rate
)
167 unsigned long omap1_ckctl_recalc(struct clk
*clk
)
169 /* Calculate divisor encoded as 2-bit exponent */
170 int dsor
= 1 << (3 & (omap_readw(ARM_CKCTL
) >> clk
->rate_offset
));
172 return clk
->parent
->rate
/ dsor
;
175 unsigned long omap1_ckctl_recalc_dsp_domain(struct clk
*clk
)
179 /* Calculate divisor encoded as 2-bit exponent
181 * The clock control bits are in DSP domain,
182 * so api_ck is needed for access.
183 * Note that DSP_CKCTL virt addr = phys addr, so
184 * we must use __raw_readw() instead of omap_readw().
186 omap1_clk_enable(api_ck_p
);
187 dsor
= 1 << (3 & (__raw_readw(DSP_CKCTL
) >> clk
->rate_offset
));
188 omap1_clk_disable(api_ck_p
);
190 return clk
->parent
->rate
/ dsor
;
193 /* MPU virtual clock functions */
194 int omap1_select_table_rate(struct clk
*clk
, unsigned long rate
)
196 /* Find the highest supported frequency <= rate and switch to it */
197 struct mpu_rate
* ptr
;
198 unsigned long ref_rate
;
200 ref_rate
= ck_ref_p
->rate
;
202 for (ptr
= omap1_rate_table
; ptr
->rate
; ptr
++) {
203 if (!(ptr
->flags
& cpu_mask
))
206 if (ptr
->xtal
!= ref_rate
)
209 /* Can check only after xtal frequency check */
210 if (ptr
->rate
<= rate
)
218 * In most cases we should not need to reprogram DPLL.
219 * Reprogramming the DPLL is tricky, it must be done from SRAM.
221 omap_sram_reprogram_clock(ptr
->dpllctl_val
, ptr
->ckctl_val
);
223 /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
224 ck_dpll1_p
->rate
= ptr
->pll_rate
;
229 int omap1_clk_set_rate_dsp_domain(struct clk
*clk
, unsigned long rate
)
234 dsor_exp
= calc_dsor_exp(clk
, rate
);
240 regval
= __raw_readw(DSP_CKCTL
);
241 regval
&= ~(3 << clk
->rate_offset
);
242 regval
|= dsor_exp
<< clk
->rate_offset
;
243 __raw_writew(regval
, DSP_CKCTL
);
244 clk
->rate
= clk
->parent
->rate
/ (1 << dsor_exp
);
249 long omap1_clk_round_rate_ckctl_arm(struct clk
*clk
, unsigned long rate
)
251 int dsor_exp
= calc_dsor_exp(clk
, rate
);
256 return clk
->parent
->rate
/ (1 << dsor_exp
);
259 int omap1_clk_set_rate_ckctl_arm(struct clk
*clk
, unsigned long rate
)
264 dsor_exp
= calc_dsor_exp(clk
, rate
);
270 regval
= omap_readw(ARM_CKCTL
);
271 regval
&= ~(3 << clk
->rate_offset
);
272 regval
|= dsor_exp
<< clk
->rate_offset
;
273 regval
= verify_ckctl_value(regval
);
274 omap_writew(regval
, ARM_CKCTL
);
275 clk
->rate
= clk
->parent
->rate
/ (1 << dsor_exp
);
279 long omap1_round_to_table_rate(struct clk
*clk
, unsigned long rate
)
281 /* Find the highest supported frequency <= rate */
282 struct mpu_rate
* ptr
;
284 unsigned long ref_rate
;
286 ref_rate
= ck_ref_p
->rate
;
288 highest_rate
= -EINVAL
;
290 for (ptr
= omap1_rate_table
; ptr
->rate
; ptr
++) {
291 if (!(ptr
->flags
& cpu_mask
))
294 if (ptr
->xtal
!= ref_rate
)
297 highest_rate
= ptr
->rate
;
299 /* Can check only after xtal frequency check */
300 if (ptr
->rate
<= rate
)
307 static unsigned calc_ext_dsor(unsigned long rate
)
311 /* MCLK and BCLK divisor selection is not linear:
312 * freq = 96MHz / dsor
314 * RATIO_SEL range: dsor <-> RATIO_SEL
315 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
316 * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
317 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
320 for (dsor
= 2; dsor
< 96; ++dsor
) {
321 if ((dsor
& 1) && dsor
> 8)
323 if (rate
>= 96000000 / dsor
)
329 /* XXX Only needed on 1510 */
330 int omap1_set_uart_rate(struct clk
*clk
, unsigned long rate
)
334 val
= __raw_readl(clk
->enable_reg
);
335 if (rate
== 12000000)
336 val
&= ~(1 << clk
->enable_bit
);
337 else if (rate
== 48000000)
338 val
|= (1 << clk
->enable_bit
);
341 __raw_writel(val
, clk
->enable_reg
);
347 /* External clock (MCLK & BCLK) functions */
348 int omap1_set_ext_clk_rate(struct clk
*clk
, unsigned long rate
)
353 dsor
= calc_ext_dsor(rate
);
354 clk
->rate
= 96000000 / dsor
;
356 ratio_bits
= ((dsor
- 8) / 2 + 6) << 2;
358 ratio_bits
= (dsor
- 2) << 2;
360 ratio_bits
|= __raw_readw(clk
->enable_reg
) & ~0xfd;
361 __raw_writew(ratio_bits
, clk
->enable_reg
);
366 int omap1_set_sossi_rate(struct clk
*clk
, unsigned long rate
)
370 unsigned long p_rate
;
372 p_rate
= clk
->parent
->rate
;
373 /* Round towards slower frequency */
374 div
= (p_rate
+ rate
- 1) / rate
;
376 if (div
< 0 || div
> 7)
379 l
= omap_readl(MOD_CONF_CTRL_1
);
382 omap_writel(l
, MOD_CONF_CTRL_1
);
384 clk
->rate
= p_rate
/ (div
+ 1);
389 long omap1_round_ext_clk_rate(struct clk
*clk
, unsigned long rate
)
391 return 96000000 / calc_ext_dsor(rate
);
394 void omap1_init_ext_clk(struct clk
*clk
)
399 /* Determine current rate and ensure clock is based on 96MHz APLL */
400 ratio_bits
= __raw_readw(clk
->enable_reg
) & ~1;
401 __raw_writew(ratio_bits
, clk
->enable_reg
);
403 ratio_bits
= (ratio_bits
& 0xfc) >> 2;
405 dsor
= (ratio_bits
- 6) * 2 + 8;
407 dsor
= ratio_bits
+ 2;
409 clk
-> rate
= 96000000 / dsor
;
412 int omap1_clk_enable(struct clk
*clk
)
416 if (clk
->usecount
++ == 0) {
418 ret
= omap1_clk_enable(clk
->parent
);
422 if (clk
->flags
& CLOCK_NO_IDLE_PARENT
)
423 omap1_clk_deny_idle(clk
->parent
);
426 ret
= clk
->ops
->enable(clk
);
429 omap1_clk_disable(clk
->parent
);
440 void omap1_clk_disable(struct clk
*clk
)
442 if (clk
->usecount
> 0 && !(--clk
->usecount
)) {
443 clk
->ops
->disable(clk
);
444 if (likely(clk
->parent
)) {
445 omap1_clk_disable(clk
->parent
);
446 if (clk
->flags
& CLOCK_NO_IDLE_PARENT
)
447 omap1_clk_allow_idle(clk
->parent
);
452 static int omap1_clk_enable_generic(struct clk
*clk
)
457 if (unlikely(clk
->enable_reg
== NULL
)) {
458 printk(KERN_ERR
"clock.c: Enable for %s without enable code\n",
463 if (clk
->flags
& ENABLE_REG_32BIT
) {
464 regval32
= __raw_readl(clk
->enable_reg
);
465 regval32
|= (1 << clk
->enable_bit
);
466 __raw_writel(regval32
, clk
->enable_reg
);
468 regval16
= __raw_readw(clk
->enable_reg
);
469 regval16
|= (1 << clk
->enable_bit
);
470 __raw_writew(regval16
, clk
->enable_reg
);
476 static void omap1_clk_disable_generic(struct clk
*clk
)
481 if (clk
->enable_reg
== NULL
)
484 if (clk
->flags
& ENABLE_REG_32BIT
) {
485 regval32
= __raw_readl(clk
->enable_reg
);
486 regval32
&= ~(1 << clk
->enable_bit
);
487 __raw_writel(regval32
, clk
->enable_reg
);
489 regval16
= __raw_readw(clk
->enable_reg
);
490 regval16
&= ~(1 << clk
->enable_bit
);
491 __raw_writew(regval16
, clk
->enable_reg
);
495 const struct clkops clkops_generic
= {
496 .enable
= omap1_clk_enable_generic
,
497 .disable
= omap1_clk_disable_generic
,
500 static int omap1_clk_enable_dsp_domain(struct clk
*clk
)
504 retval
= omap1_clk_enable(api_ck_p
);
506 retval
= omap1_clk_enable_generic(clk
);
507 omap1_clk_disable(api_ck_p
);
513 static void omap1_clk_disable_dsp_domain(struct clk
*clk
)
515 if (omap1_clk_enable(api_ck_p
) == 0) {
516 omap1_clk_disable_generic(clk
);
517 omap1_clk_disable(api_ck_p
);
521 const struct clkops clkops_dspck
= {
522 .enable
= omap1_clk_enable_dsp_domain
,
523 .disable
= omap1_clk_disable_dsp_domain
,
526 /* XXX SYSC register handling does not belong in the clock framework */
527 static int omap1_clk_enable_uart_functional_16xx(struct clk
*clk
)
530 struct uart_clk
*uclk
;
532 ret
= omap1_clk_enable_generic(clk
);
534 /* Set smart idle acknowledgement mode */
535 uclk
= (struct uart_clk
*)clk
;
536 omap_writeb((omap_readb(uclk
->sysc_addr
) & ~0x10) | 8,
543 /* XXX SYSC register handling does not belong in the clock framework */
544 static void omap1_clk_disable_uart_functional_16xx(struct clk
*clk
)
546 struct uart_clk
*uclk
;
548 /* Set force idle acknowledgement mode */
549 uclk
= (struct uart_clk
*)clk
;
550 omap_writeb((omap_readb(uclk
->sysc_addr
) & ~0x18), uclk
->sysc_addr
);
552 omap1_clk_disable_generic(clk
);
555 /* XXX SYSC register handling does not belong in the clock framework */
556 const struct clkops clkops_uart_16xx
= {
557 .enable
= omap1_clk_enable_uart_functional_16xx
,
558 .disable
= omap1_clk_disable_uart_functional_16xx
,
561 long omap1_clk_round_rate(struct clk
*clk
, unsigned long rate
)
563 if (clk
->round_rate
!= NULL
)
564 return clk
->round_rate(clk
, rate
);
569 int omap1_clk_set_rate(struct clk
*clk
, unsigned long rate
)
574 ret
= clk
->set_rate(clk
, rate
);
579 * Omap1 clock reset and init functions
582 #ifdef CONFIG_OMAP_RESET_CLOCKS
584 void omap1_clk_disable_unused(struct clk
*clk
)
588 /* Clocks in the DSP domain need api_ck. Just assume bootloader
589 * has not enabled any DSP clocks */
590 if (clk
->enable_reg
== DSP_IDLECT2
) {
591 pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
596 /* Is the clock already disabled? */
597 if (clk
->flags
& ENABLE_REG_32BIT
)
598 regval32
= __raw_readl(clk
->enable_reg
);
600 regval32
= __raw_readw(clk
->enable_reg
);
602 if ((regval32
& (1 << clk
->enable_bit
)) == 0)
605 printk(KERN_INFO
"Disabling unused clock \"%s\"... ", clk
->name
);
606 clk
->ops
->disable(clk
);
613 int clk_enable(struct clk
*clk
)
618 if (clk
== NULL
|| IS_ERR(clk
))
621 spin_lock_irqsave(&clockfw_lock
, flags
);
622 ret
= omap1_clk_enable(clk
);
623 spin_unlock_irqrestore(&clockfw_lock
, flags
);
627 EXPORT_SYMBOL(clk_enable
);
629 void clk_disable(struct clk
*clk
)
633 if (clk
== NULL
|| IS_ERR(clk
))
636 spin_lock_irqsave(&clockfw_lock
, flags
);
637 if (clk
->usecount
== 0) {
638 pr_err("Trying disable clock %s with 0 usecount\n",
644 omap1_clk_disable(clk
);
647 spin_unlock_irqrestore(&clockfw_lock
, flags
);
649 EXPORT_SYMBOL(clk_disable
);
651 unsigned long clk_get_rate(struct clk
*clk
)
656 if (clk
== NULL
|| IS_ERR(clk
))
659 spin_lock_irqsave(&clockfw_lock
, flags
);
661 spin_unlock_irqrestore(&clockfw_lock
, flags
);
665 EXPORT_SYMBOL(clk_get_rate
);
668 * Optional clock functions defined in include/linux/clk.h
671 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
676 if (clk
== NULL
|| IS_ERR(clk
))
679 spin_lock_irqsave(&clockfw_lock
, flags
);
680 ret
= omap1_clk_round_rate(clk
, rate
);
681 spin_unlock_irqrestore(&clockfw_lock
, flags
);
685 EXPORT_SYMBOL(clk_round_rate
);
687 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
692 if (clk
== NULL
|| IS_ERR(clk
))
695 spin_lock_irqsave(&clockfw_lock
, flags
);
696 ret
= omap1_clk_set_rate(clk
, rate
);
699 spin_unlock_irqrestore(&clockfw_lock
, flags
);
703 EXPORT_SYMBOL(clk_set_rate
);
705 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
707 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
711 EXPORT_SYMBOL(clk_set_parent
);
713 struct clk
*clk_get_parent(struct clk
*clk
)
717 EXPORT_SYMBOL(clk_get_parent
);
720 * OMAP specific clock functions shared between omap1 and omap2
723 /* Used for clocks that always have same value as the parent clock */
724 unsigned long followparent_recalc(struct clk
*clk
)
726 return clk
->parent
->rate
;
730 * Used for clocks that have the same value as the parent clock,
731 * divided by some factor
733 unsigned long omap_fixed_divisor_recalc(struct clk
*clk
)
735 WARN_ON(!clk
->fixed_div
);
737 return clk
->parent
->rate
/ clk
->fixed_div
;
740 void clk_reparent(struct clk
*child
, struct clk
*parent
)
742 list_del_init(&child
->sibling
);
744 list_add(&child
->sibling
, &parent
->children
);
745 child
->parent
= parent
;
747 /* now do the debugfs renaming to reattach the child
748 to the proper parent */
751 /* Propagate rate to children */
752 void propagate_rate(struct clk
*tclk
)
756 list_for_each_entry(clkp
, &tclk
->children
, sibling
) {
758 clkp
->rate
= clkp
->recalc(clkp
);
759 propagate_rate(clkp
);
763 static LIST_HEAD(root_clks
);
766 * recalculate_root_clocks - recalculate and propagate all root clocks
768 * Recalculates all root clocks (clocks with no parent), which if the
769 * clock's .recalc is set correctly, should also propagate their rates.
772 void recalculate_root_clocks(void)
776 list_for_each_entry(clkp
, &root_clks
, sibling
) {
778 clkp
->rate
= clkp
->recalc(clkp
);
779 propagate_rate(clkp
);
784 * clk_preinit - initialize any fields in the struct clk before clk init
785 * @clk: struct clk * to initialize
787 * Initialize any struct clk fields needed before normal clk initialization
788 * can run. No return value.
790 void clk_preinit(struct clk
*clk
)
792 INIT_LIST_HEAD(&clk
->children
);
795 int clk_register(struct clk
*clk
)
797 if (clk
== NULL
|| IS_ERR(clk
))
801 * trap out already registered clocks
803 if (clk
->node
.next
|| clk
->node
.prev
)
806 mutex_lock(&clocks_mutex
);
808 list_add(&clk
->sibling
, &clk
->parent
->children
);
810 list_add(&clk
->sibling
, &root_clks
);
812 list_add(&clk
->node
, &clocks
);
815 mutex_unlock(&clocks_mutex
);
819 EXPORT_SYMBOL(clk_register
);
821 void clk_unregister(struct clk
*clk
)
823 if (clk
== NULL
|| IS_ERR(clk
))
826 mutex_lock(&clocks_mutex
);
827 list_del(&clk
->sibling
);
828 list_del(&clk
->node
);
829 mutex_unlock(&clocks_mutex
);
831 EXPORT_SYMBOL(clk_unregister
);
833 void clk_enable_init_clocks(void)
837 list_for_each_entry(clkp
, &clocks
, node
)
838 if (clkp
->flags
& ENABLE_ON_INIT
)
843 * omap_clk_get_by_name - locate OMAP struct clk by its name
844 * @name: name of the struct clk to locate
846 * Locate an OMAP struct clk by its name. Assumes that struct clk
847 * names are unique. Returns NULL if not found or a pointer to the
848 * struct clk if found.
850 struct clk
*omap_clk_get_by_name(const char *name
)
853 struct clk
*ret
= NULL
;
855 mutex_lock(&clocks_mutex
);
857 list_for_each_entry(c
, &clocks
, node
) {
858 if (!strcmp(c
->name
, name
)) {
864 mutex_unlock(&clocks_mutex
);
869 int omap_clk_enable_autoidle_all(void)
874 spin_lock_irqsave(&clockfw_lock
, flags
);
876 list_for_each_entry(c
, &clocks
, node
)
877 if (c
->ops
->allow_idle
)
878 c
->ops
->allow_idle(c
);
880 spin_unlock_irqrestore(&clockfw_lock
, flags
);
885 int omap_clk_disable_autoidle_all(void)
890 spin_lock_irqsave(&clockfw_lock
, flags
);
892 list_for_each_entry(c
, &clocks
, node
)
893 if (c
->ops
->deny_idle
)
894 c
->ops
->deny_idle(c
);
896 spin_unlock_irqrestore(&clockfw_lock
, flags
);
904 static int clkll_enable_null(struct clk
*clk
)
909 static void clkll_disable_null(struct clk
*clk
)
913 const struct clkops clkops_null
= {
914 .enable
= clkll_enable_null
,
915 .disable
= clkll_disable_null
,
921 * Used for clock aliases that are needed on some OMAPs, but not others
923 struct clk dummy_ck
= {
932 #ifdef CONFIG_OMAP_RESET_CLOCKS
934 * Disable any unused clocks left on by the bootloader
936 static int __init
clk_disable_unused(void)
941 pr_info("clock: disabling unused clocks to save power\n");
943 spin_lock_irqsave(&clockfw_lock
, flags
);
944 list_for_each_entry(ck
, &clocks
, node
) {
945 if (ck
->ops
== &clkops_null
)
948 if (ck
->usecount
> 0 || !ck
->enable_reg
)
951 omap1_clk_disable_unused(ck
);
953 spin_unlock_irqrestore(&clockfw_lock
, flags
);
957 late_initcall(clk_disable_unused
);
958 late_initcall(omap_clk_enable_autoidle_all
);
961 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
963 * debugfs support to trace clock tree hierarchy and attributes
966 #include <linux/debugfs.h>
967 #include <linux/seq_file.h>
969 static struct dentry
*clk_debugfs_root
;
971 static int clk_dbg_show_summary(struct seq_file
*s
, void *unused
)
976 mutex_lock(&clocks_mutex
);
977 seq_printf(s
, "%-30s %-30s %-10s %s\n",
978 "clock-name", "parent-name", "rate", "use-count");
980 list_for_each_entry(c
, &clocks
, node
) {
982 seq_printf(s
, "%-30s %-30s %-10lu %d\n",
983 c
->name
, pa
? pa
->name
: "none", c
->rate
,
986 mutex_unlock(&clocks_mutex
);
991 static int clk_dbg_open(struct inode
*inode
, struct file
*file
)
993 return single_open(file
, clk_dbg_show_summary
, inode
->i_private
);
996 static const struct file_operations debug_clock_fops
= {
997 .open
= clk_dbg_open
,
1000 .release
= single_release
,
1003 static int clk_debugfs_register_one(struct clk
*c
)
1007 struct clk
*pa
= c
->parent
;
1009 d
= debugfs_create_dir(c
->name
, pa
? pa
->dent
: clk_debugfs_root
);
1014 d
= debugfs_create_u8("usecount", S_IRUGO
, c
->dent
, (u8
*)&c
->usecount
);
1019 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dent
, (u32
*)&c
->rate
);
1024 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dent
, (u32
*)&c
->flags
);
1032 debugfs_remove_recursive(c
->dent
);
1036 static int clk_debugfs_register(struct clk
*c
)
1039 struct clk
*pa
= c
->parent
;
1041 if (pa
&& !pa
->dent
) {
1042 err
= clk_debugfs_register(pa
);
1048 err
= clk_debugfs_register_one(c
);
1055 static int __init
clk_debugfs_init(void)
1061 d
= debugfs_create_dir("clock", NULL
);
1064 clk_debugfs_root
= d
;
1066 list_for_each_entry(c
, &clocks
, node
) {
1067 err
= clk_debugfs_register(c
);
1072 d
= debugfs_create_file("summary", S_IRUGO
,
1073 d
, NULL
, &debug_clock_fops
);
1079 debugfs_remove_recursive(clk_debugfs_root
);
1082 late_initcall(clk_debugfs_init
);
1084 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */