PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / clk / mvebu / clk-corediv.c
blob7162615bcdcdda802e782fb631051c75b0921261
1 /*
2 * MVEBU Core divider clock
4 * Copyright (C) 2013 Marvell
6 * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include "common.h"
20 #define CORE_CLK_DIV_RATIO_MASK 0xff
21 #define CORE_CLK_DIV_RATIO_RELOAD BIT(8)
22 #define CORE_CLK_DIV_ENABLE_OFFSET 24
23 #define CORE_CLK_DIV_RATIO_OFFSET 0x8
25 struct clk_corediv_desc {
26 unsigned int mask;
27 unsigned int offset;
28 unsigned int fieldbit;
31 struct clk_corediv {
32 struct clk_hw hw;
33 void __iomem *reg;
34 struct clk_corediv_desc desc;
35 spinlock_t lock;
38 static struct clk_onecell_data clk_data;
40 static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = {
41 { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
44 #define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
46 static int clk_corediv_is_enabled(struct clk_hw *hwclk)
48 struct clk_corediv *corediv = to_corediv_clk(hwclk);
49 struct clk_corediv_desc *desc = &corediv->desc;
50 u32 enable_mask = BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET;
52 return !!(readl(corediv->reg) & enable_mask);
55 static int clk_corediv_enable(struct clk_hw *hwclk)
57 struct clk_corediv *corediv = to_corediv_clk(hwclk);
58 struct clk_corediv_desc *desc = &corediv->desc;
59 unsigned long flags = 0;
60 u32 reg;
62 spin_lock_irqsave(&corediv->lock, flags);
64 reg = readl(corediv->reg);
65 reg |= (BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
66 writel(reg, corediv->reg);
68 spin_unlock_irqrestore(&corediv->lock, flags);
70 return 0;
73 static void clk_corediv_disable(struct clk_hw *hwclk)
75 struct clk_corediv *corediv = to_corediv_clk(hwclk);
76 struct clk_corediv_desc *desc = &corediv->desc;
77 unsigned long flags = 0;
78 u32 reg;
80 spin_lock_irqsave(&corediv->lock, flags);
82 reg = readl(corediv->reg);
83 reg &= ~(BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET);
84 writel(reg, corediv->reg);
86 spin_unlock_irqrestore(&corediv->lock, flags);
89 static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
90 unsigned long parent_rate)
92 struct clk_corediv *corediv = to_corediv_clk(hwclk);
93 struct clk_corediv_desc *desc = &corediv->desc;
94 u32 reg, div;
96 reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
97 div = (reg >> desc->offset) & desc->mask;
98 return parent_rate / div;
101 static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
102 unsigned long *parent_rate)
104 /* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
105 u32 div;
107 div = *parent_rate / rate;
108 if (div < 4)
109 div = 4;
110 else if (div > 6)
111 div = 8;
113 return *parent_rate / div;
116 static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
117 unsigned long parent_rate)
119 struct clk_corediv *corediv = to_corediv_clk(hwclk);
120 struct clk_corediv_desc *desc = &corediv->desc;
121 unsigned long flags = 0;
122 u32 reg, div;
124 div = parent_rate / rate;
126 spin_lock_irqsave(&corediv->lock, flags);
128 /* Write new divider to the divider ratio register */
129 reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
130 reg &= ~(desc->mask << desc->offset);
131 reg |= (div & desc->mask) << desc->offset;
132 writel(reg, corediv->reg + CORE_CLK_DIV_RATIO_OFFSET);
134 /* Set reload-force for this clock */
135 reg = readl(corediv->reg) | BIT(desc->fieldbit);
136 writel(reg, corediv->reg);
138 /* Now trigger the clock update */
139 reg = readl(corediv->reg) | CORE_CLK_DIV_RATIO_RELOAD;
140 writel(reg, corediv->reg);
143 * Wait for clocks to settle down, and then clear all the
144 * ratios request and the reload request.
146 udelay(1000);
147 reg &= ~(CORE_CLK_DIV_RATIO_MASK | CORE_CLK_DIV_RATIO_RELOAD);
148 writel(reg, corediv->reg);
149 udelay(1000);
151 spin_unlock_irqrestore(&corediv->lock, flags);
153 return 0;
156 static const struct clk_ops corediv_ops = {
157 .enable = clk_corediv_enable,
158 .disable = clk_corediv_disable,
159 .is_enabled = clk_corediv_is_enabled,
160 .recalc_rate = clk_corediv_recalc_rate,
161 .round_rate = clk_corediv_round_rate,
162 .set_rate = clk_corediv_set_rate,
165 static void __init mvebu_corediv_clk_init(struct device_node *node)
167 struct clk_init_data init;
168 struct clk_corediv *corediv;
169 struct clk **clks;
170 void __iomem *base;
171 const char *parent_name;
172 const char *clk_name;
173 int i;
175 base = of_iomap(node, 0);
176 if (WARN_ON(!base))
177 return;
179 parent_name = of_clk_get_parent_name(node, 0);
181 clk_data.clk_num = ARRAY_SIZE(mvebu_corediv_desc);
183 /* clks holds the clock array */
184 clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
185 GFP_KERNEL);
186 if (WARN_ON(!clks))
187 goto err_unmap;
188 /* corediv holds the clock specific array */
189 corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
190 GFP_KERNEL);
191 if (WARN_ON(!corediv))
192 goto err_free_clks;
194 spin_lock_init(&corediv->lock);
196 for (i = 0; i < clk_data.clk_num; i++) {
197 of_property_read_string_index(node, "clock-output-names",
198 i, &clk_name);
199 init.num_parents = 1;
200 init.parent_names = &parent_name;
201 init.name = clk_name;
202 init.ops = &corediv_ops;
203 init.flags = 0;
205 corediv[i].desc = mvebu_corediv_desc[i];
206 corediv[i].reg = base;
207 corediv[i].hw.init = &init;
209 clks[i] = clk_register(NULL, &corediv[i].hw);
210 WARN_ON(IS_ERR(clks[i]));
213 clk_data.clks = clks;
214 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
215 return;
217 err_free_clks:
218 kfree(clks);
219 err_unmap:
220 iounmap(base);
222 CLK_OF_DECLARE(mvebu_corediv_clk, "marvell,armada-370-corediv-clock",
223 mvebu_corediv_clk_init);