2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
4 * Copyright (C) 2005 - 2009 Paul Mundt
6 * This clock framework is derived from the OMAP version by:
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
15 * Copyright (C) 2008 Russell King.
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/list.h>
26 #include <linux/kobject.h>
27 #include <linux/sysdev.h>
28 #include <linux/seq_file.h>
29 #include <linux/err.h>
30 #include <linux/platform_device.h>
31 #include <linux/debugfs.h>
32 #include <linux/cpufreq.h>
33 #include <asm/clock.h>
34 #include <asm/machvec.h>
36 static LIST_HEAD(clock_list
);
37 static DEFINE_SPINLOCK(clock_lock
);
38 static DEFINE_MUTEX(clock_list_sem
);
40 void clk_rate_table_build(struct clk
*clk
,
41 struct cpufreq_frequency_table
*freq_table
,
43 struct clk_div_mult_table
*src_table
,
44 unsigned long *bitmap
)
46 unsigned long mult
, div
;
50 for (i
= 0; i
< nr_freqs
; i
++) {
54 if (src_table
->divisors
&& i
< src_table
->nr_divisors
)
55 div
= src_table
->divisors
[i
];
57 if (src_table
->multipliers
&& i
< src_table
->nr_multipliers
)
58 mult
= src_table
->multipliers
[i
];
60 if (!div
|| !mult
|| (bitmap
&& !test_bit(i
, bitmap
)))
61 freq
= CPUFREQ_ENTRY_INVALID
;
63 freq
= clk
->parent
->rate
* mult
/ div
;
65 freq_table
[i
].index
= i
;
66 freq_table
[i
].frequency
= freq
;
69 /* Termination entry */
70 freq_table
[i
].index
= i
;
71 freq_table
[i
].frequency
= CPUFREQ_TABLE_END
;
74 long clk_rate_table_round(struct clk
*clk
,
75 struct cpufreq_frequency_table
*freq_table
,
78 unsigned long rate_error
, rate_error_prev
= ~0UL;
79 unsigned long rate_best_fit
= rate
;
80 unsigned long highest
, lowest
;
85 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
86 unsigned long freq
= freq_table
[i
].frequency
;
88 if (freq
== CPUFREQ_ENTRY_INVALID
)
96 rate_error
= abs(freq
- rate
);
97 if (rate_error
< rate_error_prev
) {
99 rate_error_prev
= rate_error
;
107 rate_best_fit
= highest
;
109 rate_best_fit
= lowest
;
111 return rate_best_fit
;
114 int clk_rate_table_find(struct clk
*clk
,
115 struct cpufreq_frequency_table
*freq_table
,
120 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
121 unsigned long freq
= freq_table
[i
].frequency
;
123 if (freq
== CPUFREQ_ENTRY_INVALID
)
133 /* Used for clocks that always have same value as the parent clock */
134 unsigned long followparent_recalc(struct clk
*clk
)
136 return clk
->parent
? clk
->parent
->rate
: 0;
139 int clk_reparent(struct clk
*child
, struct clk
*parent
)
141 list_del_init(&child
->sibling
);
143 list_add(&child
->sibling
, &parent
->children
);
144 child
->parent
= parent
;
146 /* now do the debugfs renaming to reattach the child
147 to the proper parent */
152 /* Propagate rate to children */
153 void propagate_rate(struct clk
*tclk
)
157 list_for_each_entry(clkp
, &tclk
->children
, sibling
) {
158 if (clkp
->ops
&& clkp
->ops
->recalc
)
159 clkp
->rate
= clkp
->ops
->recalc(clkp
);
161 propagate_rate(clkp
);
165 static void __clk_disable(struct clk
*clk
)
167 if (clk
->usecount
== 0) {
168 printk(KERN_ERR
"Trying disable clock %s with 0 usecount\n",
174 if (!(--clk
->usecount
)) {
175 if (likely(clk
->ops
&& clk
->ops
->disable
))
176 clk
->ops
->disable(clk
);
177 if (likely(clk
->parent
))
178 __clk_disable(clk
->parent
);
182 void clk_disable(struct clk
*clk
)
189 spin_lock_irqsave(&clock_lock
, flags
);
191 spin_unlock_irqrestore(&clock_lock
, flags
);
193 EXPORT_SYMBOL_GPL(clk_disable
);
195 static int __clk_enable(struct clk
*clk
)
199 if (clk
->usecount
++ == 0) {
201 ret
= __clk_enable(clk
->parent
);
206 if (clk
->ops
&& clk
->ops
->enable
) {
207 ret
= clk
->ops
->enable(clk
);
210 __clk_disable(clk
->parent
);
222 int clk_enable(struct clk
*clk
)
230 spin_lock_irqsave(&clock_lock
, flags
);
231 ret
= __clk_enable(clk
);
232 spin_unlock_irqrestore(&clock_lock
, flags
);
236 EXPORT_SYMBOL_GPL(clk_enable
);
238 static LIST_HEAD(root_clks
);
241 * recalculate_root_clocks - recalculate and propagate all root clocks
243 * Recalculates all root clocks (clocks with no parent), which if the
244 * clock's .recalc is set correctly, should also propagate their rates.
247 void recalculate_root_clocks(void)
251 list_for_each_entry(clkp
, &root_clks
, sibling
) {
252 if (clkp
->ops
&& clkp
->ops
->recalc
)
253 clkp
->rate
= clkp
->ops
->recalc(clkp
);
254 propagate_rate(clkp
);
258 int clk_register(struct clk
*clk
)
260 if (clk
== NULL
|| IS_ERR(clk
))
264 * trap out already registered clocks
266 if (clk
->node
.next
|| clk
->node
.prev
)
269 mutex_lock(&clock_list_sem
);
271 INIT_LIST_HEAD(&clk
->children
);
275 list_add(&clk
->sibling
, &clk
->parent
->children
);
277 list_add(&clk
->sibling
, &root_clks
);
279 list_add(&clk
->node
, &clock_list
);
280 if (clk
->ops
&& clk
->ops
->init
)
282 mutex_unlock(&clock_list_sem
);
286 EXPORT_SYMBOL_GPL(clk_register
);
288 void clk_unregister(struct clk
*clk
)
290 mutex_lock(&clock_list_sem
);
291 list_del(&clk
->sibling
);
292 list_del(&clk
->node
);
293 mutex_unlock(&clock_list_sem
);
295 EXPORT_SYMBOL_GPL(clk_unregister
);
297 static void clk_enable_init_clocks(void)
301 list_for_each_entry(clkp
, &clock_list
, node
)
302 if (clkp
->flags
& CLK_ENABLE_ON_INIT
)
306 unsigned long clk_get_rate(struct clk
*clk
)
310 EXPORT_SYMBOL_GPL(clk_get_rate
);
312 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
314 return clk_set_rate_ex(clk
, rate
, 0);
316 EXPORT_SYMBOL_GPL(clk_set_rate
);
318 int clk_set_rate_ex(struct clk
*clk
, unsigned long rate
, int algo_id
)
320 int ret
= -EOPNOTSUPP
;
323 spin_lock_irqsave(&clock_lock
, flags
);
325 if (likely(clk
->ops
&& clk
->ops
->set_rate
)) {
326 ret
= clk
->ops
->set_rate(clk
, rate
, algo_id
);
334 if (clk
->ops
&& clk
->ops
->recalc
)
335 clk
->rate
= clk
->ops
->recalc(clk
);
340 spin_unlock_irqrestore(&clock_lock
, flags
);
344 EXPORT_SYMBOL_GPL(clk_set_rate_ex
);
346 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
353 if (clk
->parent
== parent
)
356 spin_lock_irqsave(&clock_lock
, flags
);
357 if (clk
->usecount
== 0) {
358 if (clk
->ops
->set_parent
)
359 ret
= clk
->ops
->set_parent(clk
, parent
);
361 ret
= clk_reparent(clk
, parent
);
364 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
365 clk
->name
, clk
->parent
->name
, clk
->rate
);
366 if (clk
->ops
->recalc
)
367 clk
->rate
= clk
->ops
->recalc(clk
);
372 spin_unlock_irqrestore(&clock_lock
, flags
);
376 EXPORT_SYMBOL_GPL(clk_set_parent
);
378 struct clk
*clk_get_parent(struct clk
*clk
)
382 EXPORT_SYMBOL_GPL(clk_get_parent
);
384 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
386 if (likely(clk
->ops
&& clk
->ops
->round_rate
)) {
387 unsigned long flags
, rounded
;
389 spin_lock_irqsave(&clock_lock
, flags
);
390 rounded
= clk
->ops
->round_rate(clk
, rate
);
391 spin_unlock_irqrestore(&clock_lock
, flags
);
396 return clk_get_rate(clk
);
398 EXPORT_SYMBOL_GPL(clk_round_rate
);
401 * Find the correct struct clk for the device and connection ID.
402 * We do slightly fuzzy matching here:
403 * An entry with a NULL ID is assumed to be a wildcard.
404 * If an entry has a device ID, it must match
405 * If an entry has a connection ID, it must match
406 * Then we take the most specific entry - with the following
407 * order of precidence: dev+con > dev only > con only.
409 static struct clk
*clk_find(const char *dev_id
, const char *con_id
)
411 struct clk_lookup
*p
;
412 struct clk
*clk
= NULL
;
415 list_for_each_entry(p
, &clock_list
, node
) {
418 if (!dev_id
|| strcmp(p
->dev_id
, dev_id
))
423 if (!con_id
|| strcmp(p
->con_id
, con_id
))
438 struct clk
*clk_get_sys(const char *dev_id
, const char *con_id
)
442 mutex_lock(&clock_list_sem
);
443 clk
= clk_find(dev_id
, con_id
);
444 mutex_unlock(&clock_list_sem
);
446 return clk
? clk
: ERR_PTR(-ENOENT
);
448 EXPORT_SYMBOL_GPL(clk_get_sys
);
451 * Returns a clock. Note that we first try to use device id on the bus
452 * and clock name. If this fails, we try to use clock name only.
454 struct clk
*clk_get(struct device
*dev
, const char *id
)
456 const char *dev_id
= dev
? dev_name(dev
) : NULL
;
457 struct clk
*p
, *clk
= ERR_PTR(-ENOENT
);
460 clk
= clk_get_sys(dev_id
, id
);
461 if (clk
&& !IS_ERR(clk
))
464 if (dev
== NULL
|| dev
->bus
!= &platform_bus_type
)
467 idno
= to_platform_device(dev
)->id
;
469 mutex_lock(&clock_list_sem
);
470 list_for_each_entry(p
, &clock_list
, node
) {
472 strcmp(id
, p
->name
) == 0 && try_module_get(p
->owner
)) {
478 list_for_each_entry(p
, &clock_list
, node
) {
479 if (strcmp(id
, p
->name
) == 0 && try_module_get(p
->owner
)) {
486 mutex_unlock(&clock_list_sem
);
490 EXPORT_SYMBOL_GPL(clk_get
);
492 void clk_put(struct clk
*clk
)
494 if (clk
&& !IS_ERR(clk
))
495 module_put(clk
->owner
);
497 EXPORT_SYMBOL_GPL(clk_put
);
500 static int clks_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
502 static pm_message_t prev_state
;
505 switch (state
.event
) {
507 /* Resumeing from hibernation */
508 if (prev_state
.event
!= PM_EVENT_FREEZE
)
511 list_for_each_entry(clkp
, &clock_list
, node
) {
512 if (likely(clkp
->ops
)) {
513 unsigned long rate
= clkp
->rate
;
515 if (likely(clkp
->ops
->set_parent
))
516 clkp
->ops
->set_parent(clkp
,
518 if (likely(clkp
->ops
->set_rate
))
519 clkp
->ops
->set_rate(clkp
,
521 else if (likely(clkp
->ops
->recalc
))
522 clkp
->rate
= clkp
->ops
->recalc(clkp
);
526 case PM_EVENT_FREEZE
:
528 case PM_EVENT_SUSPEND
:
536 static int clks_sysdev_resume(struct sys_device
*dev
)
538 return clks_sysdev_suspend(dev
, PMSG_ON
);
541 static struct sysdev_class clks_sysdev_class
= {
545 static struct sysdev_driver clks_sysdev_driver
= {
546 .suspend
= clks_sysdev_suspend
,
547 .resume
= clks_sysdev_resume
,
550 static struct sys_device clks_sysdev_dev
= {
551 .cls
= &clks_sysdev_class
,
554 static int __init
clk_sysdev_init(void)
556 sysdev_class_register(&clks_sysdev_class
);
557 sysdev_driver_register(&clks_sysdev_class
, &clks_sysdev_driver
);
558 sysdev_register(&clks_sysdev_dev
);
562 subsys_initcall(clk_sysdev_init
);
565 int __init
clk_init(void)
569 ret
= arch_clk_init();
571 pr_err("%s: CPU clock registration failed.\n", __func__
);
575 if (sh_mv
.mv_clk_init
) {
576 ret
= sh_mv
.mv_clk_init();
578 pr_err("%s: machvec clock initialization failed.\n",
584 /* Kick the child clocks.. */
585 recalculate_root_clocks();
587 /* Enable the necessary init clocks */
588 clk_enable_init_clocks();
594 * debugfs support to trace clock tree hierarchy and attributes
596 static struct dentry
*clk_debugfs_root
;
598 static int clk_debugfs_register_one(struct clk
*c
)
601 struct dentry
*d
, *child
;
602 struct clk
*pa
= c
->parent
;
606 p
+= sprintf(p
, "%s", c
->name
);
608 sprintf(p
, ":%d", c
->id
);
609 d
= debugfs_create_dir(s
, pa
? pa
->dentry
: clk_debugfs_root
);
614 d
= debugfs_create_u8("usecount", S_IRUGO
, c
->dentry
, (u8
*)&c
->usecount
);
619 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dentry
, (u32
*)&c
->rate
);
624 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dentry
, (u32
*)&c
->flags
);
633 list_for_each_entry(child
, &d
->d_subdirs
, d_u
.d_child
)
634 debugfs_remove(child
);
635 debugfs_remove(c
->dentry
);
639 static int clk_debugfs_register(struct clk
*c
)
642 struct clk
*pa
= c
->parent
;
644 if (pa
&& !pa
->dentry
) {
645 err
= clk_debugfs_register(pa
);
651 err
= clk_debugfs_register_one(c
);
658 static int __init
clk_debugfs_init(void)
664 d
= debugfs_create_dir("clock", NULL
);
667 clk_debugfs_root
= d
;
669 list_for_each_entry(c
, &clock_list
, node
) {
670 err
= clk_debugfs_register(c
);
676 debugfs_remove(clk_debugfs_root
); /* REVISIT: Cleanup correctly */
679 late_initcall(clk_debugfs_init
);