2 * linux/arch/arm/plat-omap/clock.c
4 * Copyright (C) 2004 - 2008 Nokia corporation
5 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
7 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/clk.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpufreq.h>
24 #include <linux/debugfs.h>
27 #include <plat/clock.h>
29 static LIST_HEAD(clocks
);
30 static DEFINE_MUTEX(clocks_mutex
);
31 static DEFINE_SPINLOCK(clockfw_lock
);
33 static struct clk_functions
*arch_clock
;
35 /*-------------------------------------------------------------------------
36 * Standard clock functions defined in include/linux/clk.h
37 *-------------------------------------------------------------------------*/
39 int clk_enable(struct clk
*clk
)
44 if (clk
== NULL
|| IS_ERR(clk
))
47 spin_lock_irqsave(&clockfw_lock
, flags
);
48 if (arch_clock
->clk_enable
)
49 ret
= arch_clock
->clk_enable(clk
);
50 spin_unlock_irqrestore(&clockfw_lock
, flags
);
54 EXPORT_SYMBOL(clk_enable
);
56 void clk_disable(struct clk
*clk
)
60 if (clk
== NULL
|| IS_ERR(clk
))
63 spin_lock_irqsave(&clockfw_lock
, flags
);
64 if (clk
->usecount
== 0) {
65 printk(KERN_ERR
"Trying disable clock %s with 0 usecount\n",
71 if (arch_clock
->clk_disable
)
72 arch_clock
->clk_disable(clk
);
75 spin_unlock_irqrestore(&clockfw_lock
, flags
);
77 EXPORT_SYMBOL(clk_disable
);
79 unsigned long clk_get_rate(struct clk
*clk
)
82 unsigned long ret
= 0;
84 if (clk
== NULL
|| IS_ERR(clk
))
87 spin_lock_irqsave(&clockfw_lock
, flags
);
89 spin_unlock_irqrestore(&clockfw_lock
, flags
);
93 EXPORT_SYMBOL(clk_get_rate
);
95 /*-------------------------------------------------------------------------
96 * Optional clock functions defined in include/linux/clk.h
97 *-------------------------------------------------------------------------*/
99 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
104 if (clk
== NULL
|| IS_ERR(clk
))
107 spin_lock_irqsave(&clockfw_lock
, flags
);
108 if (arch_clock
->clk_round_rate
)
109 ret
= arch_clock
->clk_round_rate(clk
, rate
);
110 spin_unlock_irqrestore(&clockfw_lock
, flags
);
114 EXPORT_SYMBOL(clk_round_rate
);
116 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
121 if (clk
== NULL
|| IS_ERR(clk
))
124 spin_lock_irqsave(&clockfw_lock
, flags
);
125 if (arch_clock
->clk_set_rate
)
126 ret
= arch_clock
->clk_set_rate(clk
, rate
);
129 clk
->rate
= clk
->recalc(clk
);
132 spin_unlock_irqrestore(&clockfw_lock
, flags
);
136 EXPORT_SYMBOL(clk_set_rate
);
138 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
143 if (cpu_is_omap44xx())
144 /* OMAP4 clk framework not supported yet */
146 if (clk
== NULL
|| IS_ERR(clk
) || parent
== NULL
|| IS_ERR(parent
))
149 spin_lock_irqsave(&clockfw_lock
, flags
);
150 if (clk
->usecount
== 0) {
151 if (arch_clock
->clk_set_parent
)
152 ret
= arch_clock
->clk_set_parent(clk
, parent
);
155 clk
->rate
= clk
->recalc(clk
);
160 spin_unlock_irqrestore(&clockfw_lock
, flags
);
164 EXPORT_SYMBOL(clk_set_parent
);
166 struct clk
*clk_get_parent(struct clk
*clk
)
170 EXPORT_SYMBOL(clk_get_parent
);
172 /*-------------------------------------------------------------------------
173 * OMAP specific clock functions shared between omap1 and omap2
174 *-------------------------------------------------------------------------*/
176 unsigned int __initdata mpurate
;
179 * By default we use the rate set by the bootloader.
180 * You can override this with mpurate= cmdline option.
182 static int __init
omap_clk_setup(char *str
)
184 get_option(&str
, &mpurate
);
194 __setup("mpurate=", omap_clk_setup
);
196 /* Used for clocks that always have same value as the parent clock */
197 unsigned long followparent_recalc(struct clk
*clk
)
199 return clk
->parent
->rate
;
202 void clk_reparent(struct clk
*child
, struct clk
*parent
)
204 list_del_init(&child
->sibling
);
206 list_add(&child
->sibling
, &parent
->children
);
207 child
->parent
= parent
;
209 /* now do the debugfs renaming to reattach the child
210 to the proper parent */
213 /* Propagate rate to children */
214 void propagate_rate(struct clk
* tclk
)
218 list_for_each_entry(clkp
, &tclk
->children
, sibling
) {
220 clkp
->rate
= clkp
->recalc(clkp
);
221 propagate_rate(clkp
);
225 static LIST_HEAD(root_clks
);
228 * recalculate_root_clocks - recalculate and propagate all root clocks
230 * Recalculates all root clocks (clocks with no parent), which if the
231 * clock's .recalc is set correctly, should also propagate their rates.
234 void recalculate_root_clocks(void)
238 list_for_each_entry(clkp
, &root_clks
, sibling
) {
240 clkp
->rate
= clkp
->recalc(clkp
);
241 propagate_rate(clkp
);
246 * clk_preinit - initialize any fields in the struct clk before clk init
247 * @clk: struct clk * to initialize
249 * Initialize any struct clk fields needed before normal clk initialization
250 * can run. No return value.
252 void clk_preinit(struct clk
*clk
)
254 INIT_LIST_HEAD(&clk
->children
);
257 int clk_register(struct clk
*clk
)
259 if (clk
== NULL
|| IS_ERR(clk
))
263 * trap out already registered clocks
265 if (clk
->node
.next
|| clk
->node
.prev
)
268 mutex_lock(&clocks_mutex
);
270 list_add(&clk
->sibling
, &clk
->parent
->children
);
272 list_add(&clk
->sibling
, &root_clks
);
274 list_add(&clk
->node
, &clocks
);
277 mutex_unlock(&clocks_mutex
);
281 EXPORT_SYMBOL(clk_register
);
283 void clk_unregister(struct clk
*clk
)
285 if (clk
== NULL
|| IS_ERR(clk
))
288 mutex_lock(&clocks_mutex
);
289 list_del(&clk
->sibling
);
290 list_del(&clk
->node
);
291 mutex_unlock(&clocks_mutex
);
293 EXPORT_SYMBOL(clk_unregister
);
295 void clk_enable_init_clocks(void)
299 list_for_each_entry(clkp
, &clocks
, node
) {
300 if (clkp
->flags
& ENABLE_ON_INIT
)
308 static int clkll_enable_null(struct clk
*clk
)
313 static void clkll_disable_null(struct clk
*clk
)
317 const struct clkops clkops_null
= {
318 .enable
= clkll_enable_null
,
319 .disable
= clkll_disable_null
,
322 #ifdef CONFIG_CPU_FREQ
323 void clk_init_cpufreq_table(struct cpufreq_frequency_table
**table
)
327 spin_lock_irqsave(&clockfw_lock
, flags
);
328 if (arch_clock
->clk_init_cpufreq_table
)
329 arch_clock
->clk_init_cpufreq_table(table
);
330 spin_unlock_irqrestore(&clockfw_lock
, flags
);
333 void clk_exit_cpufreq_table(struct cpufreq_frequency_table
**table
)
337 spin_lock_irqsave(&clockfw_lock
, flags
);
338 if (arch_clock
->clk_exit_cpufreq_table
)
339 arch_clock
->clk_exit_cpufreq_table(table
);
340 spin_unlock_irqrestore(&clockfw_lock
, flags
);
344 /*-------------------------------------------------------------------------*/
346 #ifdef CONFIG_OMAP_RESET_CLOCKS
348 * Disable any unused clocks left on by the bootloader
350 static int __init
clk_disable_unused(void)
355 list_for_each_entry(ck
, &clocks
, node
) {
356 if (ck
->ops
== &clkops_null
)
359 if (ck
->usecount
> 0 || ck
->enable_reg
== 0)
362 spin_lock_irqsave(&clockfw_lock
, flags
);
363 if (arch_clock
->clk_disable_unused
)
364 arch_clock
->clk_disable_unused(ck
);
365 spin_unlock_irqrestore(&clockfw_lock
, flags
);
370 late_initcall(clk_disable_unused
);
373 int __init
clk_init(struct clk_functions
* custom_clocks
)
375 if (!custom_clocks
) {
376 printk(KERN_ERR
"No custom clock functions registered\n");
380 arch_clock
= custom_clocks
;
385 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
387 * debugfs support to trace clock tree hierarchy and attributes
389 static struct dentry
*clk_debugfs_root
;
391 static int clk_debugfs_register_one(struct clk
*c
)
394 struct dentry
*d
, *child
, *child_tmp
;
395 struct clk
*pa
= c
->parent
;
399 p
+= sprintf(p
, "%s", c
->name
);
401 sprintf(p
, ":%d", c
->id
);
402 d
= debugfs_create_dir(s
, pa
? pa
->dent
: clk_debugfs_root
);
407 d
= debugfs_create_u8("usecount", S_IRUGO
, c
->dent
, (u8
*)&c
->usecount
);
412 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dent
, (u32
*)&c
->rate
);
417 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dent
, (u32
*)&c
->flags
);
426 list_for_each_entry_safe(child
, child_tmp
, &d
->d_subdirs
, d_u
.d_child
)
427 debugfs_remove(child
);
428 debugfs_remove(c
->dent
);
432 static int clk_debugfs_register(struct clk
*c
)
435 struct clk
*pa
= c
->parent
;
437 if (pa
&& !pa
->dent
) {
438 err
= clk_debugfs_register(pa
);
444 err
= clk_debugfs_register_one(c
);
451 static int __init
clk_debugfs_init(void)
457 d
= debugfs_create_dir("clock", NULL
);
460 clk_debugfs_root
= d
;
462 list_for_each_entry(c
, &clocks
, node
) {
463 err
= clk_debugfs_register(c
);
469 debugfs_remove_recursive(clk_debugfs_root
);
472 late_initcall(clk_debugfs_init
);
474 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */