2 * SuperH clock framework
4 * Copyright (C) 2005 - 2010 Paul Mundt
6 * This clock framework is derived from the OMAP version by:
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
17 #define pr_fmt(fmt) "clock: " fmt
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/list.h>
24 #include <linux/sysdev.h>
25 #include <linux/seq_file.h>
26 #include <linux/err.h>
28 #include <linux/debugfs.h>
29 #include <linux/cpufreq.h>
30 #include <linux/clk.h>
31 #include <linux/sh_clk.h>
33 static LIST_HEAD(clock_list
);
34 static DEFINE_SPINLOCK(clock_lock
);
35 static DEFINE_MUTEX(clock_list_sem
);
37 void clk_rate_table_build(struct clk
*clk
,
38 struct cpufreq_frequency_table
*freq_table
,
40 struct clk_div_mult_table
*src_table
,
41 unsigned long *bitmap
)
43 unsigned long mult
, div
;
47 clk
->nr_freqs
= nr_freqs
;
49 for (i
= 0; i
< nr_freqs
; i
++) {
53 if (src_table
->divisors
&& i
< src_table
->nr_divisors
)
54 div
= src_table
->divisors
[i
];
56 if (src_table
->multipliers
&& i
< src_table
->nr_multipliers
)
57 mult
= src_table
->multipliers
[i
];
59 if (!div
|| !mult
|| (bitmap
&& !test_bit(i
, bitmap
)))
60 freq
= CPUFREQ_ENTRY_INVALID
;
62 freq
= clk
->parent
->rate
* mult
/ div
;
64 freq_table
[i
].index
= i
;
65 freq_table
[i
].frequency
= freq
;
68 /* Termination entry */
69 freq_table
[i
].index
= i
;
70 freq_table
[i
].frequency
= CPUFREQ_TABLE_END
;
73 struct clk_rate_round_data
;
75 struct clk_rate_round_data
{
77 unsigned int min
, max
;
78 long (*func
)(unsigned int, struct clk_rate_round_data
*);
82 #define for_each_frequency(pos, r, freq) \
83 for (pos = r->min, freq = r->func(pos, r); \
84 pos <= r->max; pos++, freq = r->func(pos, r)) \
85 if (unlikely(freq == 0)) \
89 static long clk_rate_round_helper(struct clk_rate_round_data
*rounder
)
91 unsigned long rate_error
, rate_error_prev
= ~0UL;
92 unsigned long highest
, lowest
, freq
;
93 long rate_best_fit
= -ENOENT
;
99 for_each_frequency(i
, rounder
, freq
) {
105 rate_error
= abs(freq
- rounder
->rate
);
106 if (rate_error
< rate_error_prev
) {
107 rate_best_fit
= freq
;
108 rate_error_prev
= rate_error
;
115 if (rounder
->rate
>= highest
)
116 rate_best_fit
= highest
;
117 if (rounder
->rate
<= lowest
)
118 rate_best_fit
= lowest
;
120 return rate_best_fit
;
123 static long clk_rate_table_iter(unsigned int pos
,
124 struct clk_rate_round_data
*rounder
)
126 struct cpufreq_frequency_table
*freq_table
= rounder
->arg
;
127 unsigned long freq
= freq_table
[pos
].frequency
;
129 if (freq
== CPUFREQ_ENTRY_INVALID
)
135 long clk_rate_table_round(struct clk
*clk
,
136 struct cpufreq_frequency_table
*freq_table
,
139 struct clk_rate_round_data table_round
= {
141 .max
= clk
->nr_freqs
- 1,
142 .func
= clk_rate_table_iter
,
147 if (clk
->nr_freqs
< 1)
150 return clk_rate_round_helper(&table_round
);
153 static long clk_rate_div_range_iter(unsigned int pos
,
154 struct clk_rate_round_data
*rounder
)
156 return clk_get_rate(rounder
->arg
) / pos
;
159 long clk_rate_div_range_round(struct clk
*clk
, unsigned int div_min
,
160 unsigned int div_max
, unsigned long rate
)
162 struct clk_rate_round_data div_range_round
= {
165 .func
= clk_rate_div_range_iter
,
166 .arg
= clk_get_parent(clk
),
170 return clk_rate_round_helper(&div_range_round
);
173 int clk_rate_table_find(struct clk
*clk
,
174 struct cpufreq_frequency_table
*freq_table
,
179 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
180 unsigned long freq
= freq_table
[i
].frequency
;
182 if (freq
== CPUFREQ_ENTRY_INVALID
)
192 /* Used for clocks that always have same value as the parent clock */
193 unsigned long followparent_recalc(struct clk
*clk
)
195 return clk
->parent
? clk
->parent
->rate
: 0;
198 int clk_reparent(struct clk
*child
, struct clk
*parent
)
200 list_del_init(&child
->sibling
);
202 list_add(&child
->sibling
, &parent
->children
);
203 child
->parent
= parent
;
205 /* now do the debugfs renaming to reattach the child
206 to the proper parent */
211 /* Propagate rate to children */
212 void propagate_rate(struct clk
*tclk
)
216 list_for_each_entry(clkp
, &tclk
->children
, sibling
) {
217 if (clkp
->ops
&& clkp
->ops
->recalc
)
218 clkp
->rate
= clkp
->ops
->recalc(clkp
);
220 propagate_rate(clkp
);
224 static void __clk_disable(struct clk
*clk
)
226 if (WARN(!clk
->usecount
, "Trying to disable clock %p with 0 usecount\n",
230 if (!(--clk
->usecount
)) {
231 if (likely(clk
->ops
&& clk
->ops
->disable
))
232 clk
->ops
->disable(clk
);
233 if (likely(clk
->parent
))
234 __clk_disable(clk
->parent
);
238 void clk_disable(struct clk
*clk
)
245 spin_lock_irqsave(&clock_lock
, flags
);
247 spin_unlock_irqrestore(&clock_lock
, flags
);
249 EXPORT_SYMBOL_GPL(clk_disable
);
251 static int __clk_enable(struct clk
*clk
)
255 if (clk
->usecount
++ == 0) {
257 ret
= __clk_enable(clk
->parent
);
262 if (clk
->ops
&& clk
->ops
->enable
) {
263 ret
= clk
->ops
->enable(clk
);
266 __clk_disable(clk
->parent
);
278 int clk_enable(struct clk
*clk
)
286 spin_lock_irqsave(&clock_lock
, flags
);
287 ret
= __clk_enable(clk
);
288 spin_unlock_irqrestore(&clock_lock
, flags
);
292 EXPORT_SYMBOL_GPL(clk_enable
);
294 static LIST_HEAD(root_clks
);
297 * recalculate_root_clocks - recalculate and propagate all root clocks
299 * Recalculates all root clocks (clocks with no parent), which if the
300 * clock's .recalc is set correctly, should also propagate their rates.
303 void recalculate_root_clocks(void)
307 list_for_each_entry(clkp
, &root_clks
, sibling
) {
308 if (clkp
->ops
&& clkp
->ops
->recalc
)
309 clkp
->rate
= clkp
->ops
->recalc(clkp
);
310 propagate_rate(clkp
);
314 static struct clk_mapping dummy_mapping
;
316 static struct clk
*lookup_root_clock(struct clk
*clk
)
324 static int clk_establish_mapping(struct clk
*clk
)
326 struct clk_mapping
*mapping
= clk
->mapping
;
329 * Propagate mappings.
335 * dummy mapping for root clocks with no specified ranges
338 clk
->mapping
= &dummy_mapping
;
343 * If we're on a child clock and it provides no mapping of its
344 * own, inherit the mapping from its root clock.
346 clkp
= lookup_root_clock(clk
);
347 mapping
= clkp
->mapping
;
352 * Establish initial mapping.
354 if (!mapping
->base
&& mapping
->phys
) {
355 kref_init(&mapping
->ref
);
357 mapping
->base
= ioremap_nocache(mapping
->phys
, mapping
->len
);
358 if (unlikely(!mapping
->base
))
360 } else if (mapping
->base
) {
362 * Bump the refcount for an existing mapping
364 kref_get(&mapping
->ref
);
367 clk
->mapping
= mapping
;
371 static void clk_destroy_mapping(struct kref
*kref
)
373 struct clk_mapping
*mapping
;
375 mapping
= container_of(kref
, struct clk_mapping
, ref
);
377 iounmap(mapping
->base
);
380 static void clk_teardown_mapping(struct clk
*clk
)
382 struct clk_mapping
*mapping
= clk
->mapping
;
385 if (mapping
== &dummy_mapping
)
388 kref_put(&mapping
->ref
, clk_destroy_mapping
);
392 int clk_register(struct clk
*clk
)
396 if (clk
== NULL
|| IS_ERR(clk
))
400 * trap out already registered clocks
402 if (clk
->node
.next
|| clk
->node
.prev
)
405 mutex_lock(&clock_list_sem
);
407 INIT_LIST_HEAD(&clk
->children
);
410 ret
= clk_establish_mapping(clk
);
415 list_add(&clk
->sibling
, &clk
->parent
->children
);
417 list_add(&clk
->sibling
, &root_clks
);
419 list_add(&clk
->node
, &clock_list
);
421 #ifdef CONFIG_SH_CLK_CPG_LEGACY
422 if (clk
->ops
&& clk
->ops
->init
)
427 mutex_unlock(&clock_list_sem
);
431 EXPORT_SYMBOL_GPL(clk_register
);
433 void clk_unregister(struct clk
*clk
)
435 mutex_lock(&clock_list_sem
);
436 list_del(&clk
->sibling
);
437 list_del(&clk
->node
);
438 clk_teardown_mapping(clk
);
439 mutex_unlock(&clock_list_sem
);
441 EXPORT_SYMBOL_GPL(clk_unregister
);
443 void clk_enable_init_clocks(void)
447 list_for_each_entry(clkp
, &clock_list
, node
)
448 if (clkp
->flags
& CLK_ENABLE_ON_INIT
)
452 unsigned long clk_get_rate(struct clk
*clk
)
456 EXPORT_SYMBOL_GPL(clk_get_rate
);
458 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
460 int ret
= -EOPNOTSUPP
;
463 spin_lock_irqsave(&clock_lock
, flags
);
465 if (likely(clk
->ops
&& clk
->ops
->set_rate
)) {
466 ret
= clk
->ops
->set_rate(clk
, rate
);
474 if (clk
->ops
&& clk
->ops
->recalc
)
475 clk
->rate
= clk
->ops
->recalc(clk
);
480 spin_unlock_irqrestore(&clock_lock
, flags
);
484 EXPORT_SYMBOL_GPL(clk_set_rate
);
486 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
493 if (clk
->parent
== parent
)
496 spin_lock_irqsave(&clock_lock
, flags
);
497 if (clk
->usecount
== 0) {
498 if (clk
->ops
->set_parent
)
499 ret
= clk
->ops
->set_parent(clk
, parent
);
501 ret
= clk_reparent(clk
, parent
);
504 if (clk
->ops
->recalc
)
505 clk
->rate
= clk
->ops
->recalc(clk
);
506 pr_debug("set parent of %p to %p (new rate %ld)\n",
507 clk
, clk
->parent
, clk
->rate
);
512 spin_unlock_irqrestore(&clock_lock
, flags
);
516 EXPORT_SYMBOL_GPL(clk_set_parent
);
518 struct clk
*clk_get_parent(struct clk
*clk
)
522 EXPORT_SYMBOL_GPL(clk_get_parent
);
524 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
526 if (likely(clk
->ops
&& clk
->ops
->round_rate
)) {
527 unsigned long flags
, rounded
;
529 spin_lock_irqsave(&clock_lock
, flags
);
530 rounded
= clk
->ops
->round_rate(clk
, rate
);
531 spin_unlock_irqrestore(&clock_lock
, flags
);
536 return clk_get_rate(clk
);
538 EXPORT_SYMBOL_GPL(clk_round_rate
);
540 long clk_round_parent(struct clk
*clk
, unsigned long target
,
541 unsigned long *best_freq
, unsigned long *parent_freq
,
542 unsigned int div_min
, unsigned int div_max
)
544 struct cpufreq_frequency_table
*freq
, *best
= NULL
;
545 unsigned long error
= ULONG_MAX
, freq_high
, freq_low
, div
;
546 struct clk
*parent
= clk_get_parent(clk
);
550 *best_freq
= clk_round_rate(clk
, target
);
551 return abs(target
- *best_freq
);
554 for (freq
= parent
->freq_table
; freq
->frequency
!= CPUFREQ_TABLE_END
;
556 if (freq
->frequency
== CPUFREQ_ENTRY_INVALID
)
559 if (unlikely(freq
->frequency
/ target
<= div_min
- 1)) {
560 unsigned long freq_max
;
562 freq_max
= (freq
->frequency
+ div_min
/ 2) / div_min
;
563 if (error
> target
- freq_max
) {
564 error
= target
- freq_max
;
567 *best_freq
= freq_max
;
570 pr_debug("too low freq %u, error %lu\n", freq
->frequency
,
579 if (unlikely(freq
->frequency
/ target
>= div_max
)) {
580 unsigned long freq_min
;
582 freq_min
= (freq
->frequency
+ div_max
/ 2) / div_max
;
583 if (error
> freq_min
- target
) {
584 error
= freq_min
- target
;
587 *best_freq
= freq_min
;
590 pr_debug("too high freq %u, error %lu\n", freq
->frequency
,
599 div
= freq
->frequency
/ target
;
600 freq_high
= freq
->frequency
/ div
;
601 freq_low
= freq
->frequency
/ (div
+ 1);
603 if (freq_high
- target
< error
) {
604 error
= freq_high
- target
;
607 *best_freq
= freq_high
;
610 if (target
- freq_low
< error
) {
611 error
= target
- freq_low
;
614 *best_freq
= freq_low
;
617 pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
618 freq
->frequency
, div
, freq_high
, div
+ 1, freq_low
,
619 *best_freq
, best
->frequency
);
626 *parent_freq
= best
->frequency
;
630 EXPORT_SYMBOL_GPL(clk_round_parent
);
633 static int clks_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
635 static pm_message_t prev_state
;
638 switch (state
.event
) {
640 /* Resumeing from hibernation */
641 if (prev_state
.event
!= PM_EVENT_FREEZE
)
644 list_for_each_entry(clkp
, &clock_list
, node
) {
645 if (likely(clkp
->ops
)) {
646 unsigned long rate
= clkp
->rate
;
648 if (likely(clkp
->ops
->set_parent
))
649 clkp
->ops
->set_parent(clkp
,
651 if (likely(clkp
->ops
->set_rate
))
652 clkp
->ops
->set_rate(clkp
, rate
);
653 else if (likely(clkp
->ops
->recalc
))
654 clkp
->rate
= clkp
->ops
->recalc(clkp
);
658 case PM_EVENT_FREEZE
:
660 case PM_EVENT_SUSPEND
:
668 static int clks_sysdev_resume(struct sys_device
*dev
)
670 return clks_sysdev_suspend(dev
, PMSG_ON
);
673 static struct sysdev_class clks_sysdev_class
= {
677 static struct sysdev_driver clks_sysdev_driver
= {
678 .suspend
= clks_sysdev_suspend
,
679 .resume
= clks_sysdev_resume
,
682 static struct sys_device clks_sysdev_dev
= {
683 .cls
= &clks_sysdev_class
,
686 static int __init
clk_sysdev_init(void)
688 sysdev_class_register(&clks_sysdev_class
);
689 sysdev_driver_register(&clks_sysdev_class
, &clks_sysdev_driver
);
690 sysdev_register(&clks_sysdev_dev
);
694 subsys_initcall(clk_sysdev_init
);
698 * debugfs support to trace clock tree hierarchy and attributes
700 static struct dentry
*clk_debugfs_root
;
702 static int clk_debugfs_register_one(struct clk
*c
)
705 struct dentry
*d
, *child
, *child_tmp
;
706 struct clk
*pa
= c
->parent
;
710 p
+= sprintf(p
, "%p", c
);
711 d
= debugfs_create_dir(s
, pa
? pa
->dentry
: clk_debugfs_root
);
716 d
= debugfs_create_u8("usecount", S_IRUGO
, c
->dentry
, (u8
*)&c
->usecount
);
721 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dentry
, (u32
*)&c
->rate
);
726 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dentry
, (u32
*)&c
->flags
);
735 list_for_each_entry_safe(child
, child_tmp
, &d
->d_subdirs
, d_u
.d_child
)
736 debugfs_remove(child
);
737 debugfs_remove(c
->dentry
);
741 static int clk_debugfs_register(struct clk
*c
)
744 struct clk
*pa
= c
->parent
;
746 if (pa
&& !pa
->dentry
) {
747 err
= clk_debugfs_register(pa
);
753 err
= clk_debugfs_register_one(c
);
760 static int __init
clk_debugfs_init(void)
766 d
= debugfs_create_dir("clock", NULL
);
769 clk_debugfs_root
= d
;
771 list_for_each_entry(c
, &clock_list
, node
) {
772 err
= clk_debugfs_register(c
);
778 debugfs_remove_recursive(clk_debugfs_root
);
781 late_initcall(clk_debugfs_init
);