3 * Copyright (C) 2010 Google, Inc.
6 * Colin Cross <ccross@google.com>
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/clk.h>
21 #include <linux/clkdev.h>
22 #include <linux/debugfs.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
39 * Each struct clk has a spinlock.
41 * To avoid AB-BA locking problems, locks must always be traversed from child
42 * clock to parent clock. For example, when enabling a clock, the clock's lock
43 * is taken, and then clk_enable is called on the parent, which take's the
44 * parent clock's lock. There is one exceptions to this ordering: When dumping
45 * the clock tree through debugfs. In this case, clk_lock_all is called,
46 * which attemps to iterate through the entire list of clocks and take every
47 * clock lock. If any call to spin_trylock fails, all locked clocks are
48 * unlocked, and the process is retried. When all the locks are held,
49 * the only clock operation that can be called is clk_get_rate_all_locked.
51 * Within a single clock, no clock operation can call another clock operation
52 * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any
53 * clock operation can call any other clock operation on any of it's possible
56 * An additional mutex, clock_list_lock, is used to protect the list of all
59 * The clock operations must lock internally to protect against
60 * read-modify-write on registers that are shared by multiple clocks
62 static DEFINE_MUTEX(clock_list_lock
);
63 static LIST_HEAD(clocks
);
65 struct clk
*tegra_get_clock_by_name(const char *name
)
68 struct clk
*ret
= NULL
;
69 mutex_lock(&clock_list_lock
);
70 list_for_each_entry(c
, &clocks
, node
) {
71 if (strcmp(c
->name
, name
) == 0) {
76 mutex_unlock(&clock_list_lock
);
80 /* Must be called with c->spinlock held */
81 static unsigned long clk_predict_rate_from_parent(struct clk
*c
, struct clk
*p
)
85 rate
= clk_get_rate(p
);
87 if (c
->mul
!= 0 && c
->div
!= 0) {
89 rate
+= c
->div
- 1; /* round up */
96 /* Must be called with c->spinlock held */
97 unsigned long clk_get_rate_locked(struct clk
*c
)
102 rate
= clk_predict_rate_from_parent(c
, c
->parent
);
109 unsigned long clk_get_rate(struct clk
*c
)
114 spin_lock_irqsave(&c
->spinlock
, flags
);
116 rate
= clk_get_rate_locked(c
);
118 spin_unlock_irqrestore(&c
->spinlock
, flags
);
122 EXPORT_SYMBOL(clk_get_rate
);
124 int clk_reparent(struct clk
*c
, struct clk
*parent
)
130 void clk_init(struct clk
*c
)
132 spin_lock_init(&c
->spinlock
);
134 if (c
->ops
&& c
->ops
->init
)
137 if (!c
->ops
|| !c
->ops
->enable
) {
141 c
->state
= c
->parent
->state
;
146 mutex_lock(&clock_list_lock
);
147 list_add(&c
->node
, &clocks
);
148 mutex_unlock(&clock_list_lock
);
151 int clk_enable(struct clk
*c
)
156 spin_lock_irqsave(&c
->spinlock
, flags
);
158 if (c
->refcnt
== 0) {
160 ret
= clk_enable(c
->parent
);
165 if (c
->ops
&& c
->ops
->enable
) {
166 ret
= c
->ops
->enable(c
);
169 clk_disable(c
->parent
);
178 spin_unlock_irqrestore(&c
->spinlock
, flags
);
181 EXPORT_SYMBOL(clk_enable
);
183 void clk_disable(struct clk
*c
)
187 spin_lock_irqsave(&c
->spinlock
, flags
);
189 if (c
->refcnt
== 0) {
190 WARN(1, "Attempting to disable clock %s with refcnt 0", c
->name
);
191 spin_unlock_irqrestore(&c
->spinlock
, flags
);
194 if (c
->refcnt
== 1) {
195 if (c
->ops
&& c
->ops
->disable
)
199 clk_disable(c
->parent
);
205 spin_unlock_irqrestore(&c
->spinlock
, flags
);
207 EXPORT_SYMBOL(clk_disable
);
209 int clk_set_parent(struct clk
*c
, struct clk
*parent
)
213 unsigned long new_rate
;
214 unsigned long old_rate
;
216 spin_lock_irqsave(&c
->spinlock
, flags
);
218 if (!c
->ops
|| !c
->ops
->set_parent
) {
223 new_rate
= clk_predict_rate_from_parent(c
, parent
);
224 old_rate
= clk_get_rate_locked(c
);
226 ret
= c
->ops
->set_parent(c
, parent
);
231 spin_unlock_irqrestore(&c
->spinlock
, flags
);
234 EXPORT_SYMBOL(clk_set_parent
);
236 struct clk
*clk_get_parent(struct clk
*c
)
240 EXPORT_SYMBOL(clk_get_parent
);
242 int clk_set_rate_locked(struct clk
*c
, unsigned long rate
)
246 if (!c
->ops
|| !c
->ops
->set_rate
)
249 if (rate
> c
->max_rate
)
252 if (c
->ops
&& c
->ops
->round_rate
) {
253 new_rate
= c
->ops
->round_rate(c
, rate
);
261 return c
->ops
->set_rate(c
, rate
);
264 int clk_set_rate(struct clk
*c
, unsigned long rate
)
269 spin_lock_irqsave(&c
->spinlock
, flags
);
271 ret
= clk_set_rate_locked(c
, rate
);
273 spin_unlock_irqrestore(&c
->spinlock
, flags
);
277 EXPORT_SYMBOL(clk_set_rate
);
280 /* Must be called with clocks lock and all indvidual clock locks held */
281 unsigned long clk_get_rate_all_locked(struct clk
*c
)
290 if (c
->mul
!= 0 && c
->div
!= 0) {
304 long clk_round_rate(struct clk
*c
, unsigned long rate
)
309 spin_lock_irqsave(&c
->spinlock
, flags
);
311 if (!c
->ops
|| !c
->ops
->round_rate
) {
316 if (rate
> c
->max_rate
)
319 ret
= c
->ops
->round_rate(c
, rate
);
322 spin_unlock_irqrestore(&c
->spinlock
, flags
);
325 EXPORT_SYMBOL(clk_round_rate
);
327 static int tegra_clk_init_one_from_table(struct tegra_clk_init_table
*table
)
334 c
= tegra_get_clock_by_name(table
->name
);
337 pr_warning("Unable to initialize clock %s\n",
343 p
= tegra_get_clock_by_name(table
->parent
);
345 pr_warning("Unable to find parent %s of clock %s\n",
346 table
->parent
, table
->name
);
350 if (c
->parent
!= p
) {
351 ret
= clk_set_parent(c
, p
);
353 pr_warning("Unable to set parent %s of clock %s: %d\n",
354 table
->parent
, table
->name
, ret
);
360 if (table
->rate
&& table
->rate
!= clk_get_rate(c
)) {
361 ret
= clk_set_rate(c
, table
->rate
);
363 pr_warning("Unable to set clock %s to rate %lu: %d\n",
364 table
->name
, table
->rate
, ret
);
369 if (table
->enabled
) {
372 pr_warning("Unable to enable clock %s: %d\n",
381 void tegra_clk_init_from_table(struct tegra_clk_init_table
*table
)
383 for (; table
->name
; table
++)
384 tegra_clk_init_one_from_table(table
);
386 EXPORT_SYMBOL(tegra_clk_init_from_table
);
388 void tegra_periph_reset_deassert(struct clk
*c
)
390 BUG_ON(!c
->ops
->reset
);
391 c
->ops
->reset(c
, false);
393 EXPORT_SYMBOL(tegra_periph_reset_deassert
);
395 void tegra_periph_reset_assert(struct clk
*c
)
397 BUG_ON(!c
->ops
->reset
);
398 c
->ops
->reset(c
, true);
400 EXPORT_SYMBOL(tegra_periph_reset_assert
);
402 /* Several extended clock configuration bits (e.g., clock routing, clock
403 * phase control) are included in PLL and peripheral clock source
405 int tegra_clk_cfg_ex(struct clk
*c
, enum tegra_clk_ex_param p
, u32 setting
)
410 spin_lock_irqsave(&c
->spinlock
, flags
);
412 if (!c
->ops
|| !c
->ops
->clk_cfg_ex
) {
416 ret
= c
->ops
->clk_cfg_ex(c
, p
, setting
);
419 spin_unlock_irqrestore(&c
->spinlock
, flags
);
424 #ifdef CONFIG_DEBUG_FS
426 static int __clk_lock_all_spinlocks(void)
430 list_for_each_entry(c
, &clocks
, node
)
431 if (!spin_trylock(&c
->spinlock
))
432 goto unlock_spinlocks
;
437 list_for_each_entry_continue_reverse(c
, &clocks
, node
)
438 spin_unlock(&c
->spinlock
);
443 static void __clk_unlock_all_spinlocks(void)
447 list_for_each_entry_reverse(c
, &clocks
, node
)
448 spin_unlock(&c
->spinlock
);
452 * This function retries until it can take all locks, and may take
453 * an arbitrarily long time to complete.
454 * Must be called with irqs enabled, returns with irqs disabled
455 * Must be called with clock_list_lock held
457 static void clk_lock_all(void)
463 ret
= __clk_lock_all_spinlocks();
465 goto failed_spinlocks
;
467 /* All locks taken successfully, return */
477 * Unlocks all clocks after a clk_lock_all
478 * Must be called with irqs disabled, returns with irqs enabled
479 * Must be called with clock_list_lock held
481 static void clk_unlock_all(void)
483 __clk_unlock_all_spinlocks();
488 static struct dentry
*clk_debugfs_root
;
491 static void clock_tree_show_one(struct seq_file
*s
, struct clk
*c
, int level
)
494 const char *state
= "uninit";
499 else if (c
->state
== OFF
)
502 if (c
->mul
!= 0 && c
->div
!= 0) {
503 if (c
->mul
> c
->div
) {
504 int mul
= c
->mul
/ c
->div
;
505 int mul2
= (c
->mul
* 10 / c
->div
) % 10;
506 int mul3
= (c
->mul
* 10) % c
->div
;
507 if (mul2
== 0 && mul3
== 0)
508 snprintf(div
, sizeof(div
), "x%d", mul
);
510 snprintf(div
, sizeof(div
), "x%d.%d", mul
, mul2
);
512 snprintf(div
, sizeof(div
), "x%d.%d..", mul
, mul2
);
514 snprintf(div
, sizeof(div
), "%d%s", c
->div
/ c
->mul
,
515 (c
->div
% c
->mul
) ? ".5" : "");
519 seq_printf(s
, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
521 c
->rate
> c
->max_rate
? '!' : ' ',
523 30 - level
* 3, c
->name
,
524 state
, c
->refcnt
, div
, clk_get_rate_all_locked(c
));
526 list_for_each_entry(child
, &clocks
, node
) {
527 if (child
->parent
!= c
)
530 clock_tree_show_one(s
, child
, level
+ 1);
534 static int clock_tree_show(struct seq_file
*s
, void *data
)
537 seq_printf(s
, " clock state ref div rate\n");
538 seq_printf(s
, "--------------------------------------------------------------\n");
540 mutex_lock(&clock_list_lock
);
544 list_for_each_entry(c
, &clocks
, node
)
545 if (c
->parent
== NULL
)
546 clock_tree_show_one(s
, c
, 0);
550 mutex_unlock(&clock_list_lock
);
554 static int clock_tree_open(struct inode
*inode
, struct file
*file
)
556 return single_open(file
, clock_tree_show
, inode
->i_private
);
559 static const struct file_operations clock_tree_fops
= {
560 .open
= clock_tree_open
,
563 .release
= single_release
,
566 static int possible_parents_show(struct seq_file
*s
, void *data
)
568 struct clk
*c
= s
->private;
571 for (i
= 0; c
->inputs
[i
].input
; i
++) {
572 char *first
= (i
== 0) ? "" : " ";
573 seq_printf(s
, "%s%s", first
, c
->inputs
[i
].input
->name
);
579 static int possible_parents_open(struct inode
*inode
, struct file
*file
)
581 return single_open(file
, possible_parents_show
, inode
->i_private
);
584 static const struct file_operations possible_parents_fops
= {
585 .open
= possible_parents_open
,
588 .release
= single_release
,
591 static int clk_debugfs_register_one(struct clk
*c
)
595 d
= debugfs_create_dir(c
->name
, clk_debugfs_root
);
600 d
= debugfs_create_u8("refcnt", S_IRUGO
, c
->dent
, (u8
*)&c
->refcnt
);
604 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dent
, (u32
*)&c
->rate
);
608 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dent
, (u32
*)&c
->flags
);
613 d
= debugfs_create_file("possible_parents", S_IRUGO
, c
->dent
,
614 c
, &possible_parents_fops
);
622 debugfs_remove_recursive(c
->dent
);
626 static int clk_debugfs_register(struct clk
*c
)
629 struct clk
*pa
= c
->parent
;
631 if (pa
&& !pa
->dent
) {
632 err
= clk_debugfs_register(pa
);
638 err
= clk_debugfs_register_one(c
);
645 int __init
tegra_clk_debugfs_init(void)
651 d
= debugfs_create_dir("clock", NULL
);
654 clk_debugfs_root
= d
;
656 d
= debugfs_create_file("clock_tree", S_IRUGO
, clk_debugfs_root
, NULL
,
661 list_for_each_entry(c
, &clocks
, node
) {
662 err
= clk_debugfs_register(c
);
668 debugfs_remove_recursive(clk_debugfs_root
);