mei: me: add cannon point device ids
[linux/fpc-iii.git] / drivers / clk / mvebu / clk-corediv.c
blob8491979f40965e2dbfafa20c23c4b4c54f9b40fc
1 /*
2 * MVEBU Core divider clock
4 * Copyright (C) 2013 Marvell
6 * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include "common.h"
20 #define CORE_CLK_DIV_RATIO_MASK 0xff
23 * This structure describes the hardware details (bit offset and mask)
24 * to configure one particular core divider clock. Those hardware
25 * details may differ from one SoC to another. This structure is
26 * therefore typically instantiated statically to describe the
27 * hardware details.
29 struct clk_corediv_desc {
30 unsigned int mask;
31 unsigned int offset;
32 unsigned int fieldbit;
36 * This structure describes the hardware details to configure the core
37 * divider clocks on a given SoC. Amongst others, it points to the
38 * array of core divider clock descriptors for this SoC, as well as
39 * the corresponding operations to manipulate them.
41 struct clk_corediv_soc_desc {
42 const struct clk_corediv_desc *descs;
43 unsigned int ndescs;
44 const struct clk_ops ops;
45 u32 ratio_reload;
46 u32 enable_bit_offset;
47 u32 ratio_offset;
51 * This structure represents one core divider clock for the clock
52 * framework, and is dynamically allocated for each core divider clock
53 * existing in the current SoC.
55 struct clk_corediv {
56 struct clk_hw hw;
57 void __iomem *reg;
58 const struct clk_corediv_desc *desc;
59 const struct clk_corediv_soc_desc *soc_desc;
60 spinlock_t lock;
63 static struct clk_onecell_data clk_data;
66 * Description of the core divider clocks available. For now, we
67 * support only NAND, and it is available at the same register
68 * locations regardless of the SoC.
70 static const struct clk_corediv_desc mvebu_corediv_desc[] = {
71 { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
74 static const struct clk_corediv_desc mv98dx3236_corediv_desc[] = {
75 { .mask = 0x0f, .offset = 6, .fieldbit = 26 }, /* NAND clock */
78 #define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
80 static int clk_corediv_is_enabled(struct clk_hw *hwclk)
82 struct clk_corediv *corediv = to_corediv_clk(hwclk);
83 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
84 const struct clk_corediv_desc *desc = corediv->desc;
85 u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset;
87 return !!(readl(corediv->reg) & enable_mask);
90 static int clk_corediv_enable(struct clk_hw *hwclk)
92 struct clk_corediv *corediv = to_corediv_clk(hwclk);
93 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
94 const struct clk_corediv_desc *desc = corediv->desc;
95 unsigned long flags = 0;
96 u32 reg;
98 spin_lock_irqsave(&corediv->lock, flags);
100 reg = readl(corediv->reg);
101 reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
102 writel(reg, corediv->reg);
104 spin_unlock_irqrestore(&corediv->lock, flags);
106 return 0;
109 static void clk_corediv_disable(struct clk_hw *hwclk)
111 struct clk_corediv *corediv = to_corediv_clk(hwclk);
112 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
113 const struct clk_corediv_desc *desc = corediv->desc;
114 unsigned long flags = 0;
115 u32 reg;
117 spin_lock_irqsave(&corediv->lock, flags);
119 reg = readl(corediv->reg);
120 reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
121 writel(reg, corediv->reg);
123 spin_unlock_irqrestore(&corediv->lock, flags);
126 static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
127 unsigned long parent_rate)
129 struct clk_corediv *corediv = to_corediv_clk(hwclk);
130 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
131 const struct clk_corediv_desc *desc = corediv->desc;
132 u32 reg, div;
134 reg = readl(corediv->reg + soc_desc->ratio_offset);
135 div = (reg >> desc->offset) & desc->mask;
136 return parent_rate / div;
139 static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
140 unsigned long *parent_rate)
142 /* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
143 u32 div;
145 div = *parent_rate / rate;
146 if (div < 4)
147 div = 4;
148 else if (div > 6)
149 div = 8;
151 return *parent_rate / div;
154 static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
155 unsigned long parent_rate)
157 struct clk_corediv *corediv = to_corediv_clk(hwclk);
158 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
159 const struct clk_corediv_desc *desc = corediv->desc;
160 unsigned long flags = 0;
161 u32 reg, div;
163 div = parent_rate / rate;
165 spin_lock_irqsave(&corediv->lock, flags);
167 /* Write new divider to the divider ratio register */
168 reg = readl(corediv->reg + soc_desc->ratio_offset);
169 reg &= ~(desc->mask << desc->offset);
170 reg |= (div & desc->mask) << desc->offset;
171 writel(reg, corediv->reg + soc_desc->ratio_offset);
173 /* Set reload-force for this clock */
174 reg = readl(corediv->reg) | BIT(desc->fieldbit);
175 writel(reg, corediv->reg);
177 /* Now trigger the clock update */
178 reg = readl(corediv->reg) | soc_desc->ratio_reload;
179 writel(reg, corediv->reg);
182 * Wait for clocks to settle down, and then clear all the
183 * ratios request and the reload request.
185 udelay(1000);
186 reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload);
187 writel(reg, corediv->reg);
188 udelay(1000);
190 spin_unlock_irqrestore(&corediv->lock, flags);
192 return 0;
195 static const struct clk_corediv_soc_desc armada370_corediv_soc = {
196 .descs = mvebu_corediv_desc,
197 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
198 .ops = {
199 .enable = clk_corediv_enable,
200 .disable = clk_corediv_disable,
201 .is_enabled = clk_corediv_is_enabled,
202 .recalc_rate = clk_corediv_recalc_rate,
203 .round_rate = clk_corediv_round_rate,
204 .set_rate = clk_corediv_set_rate,
206 .ratio_reload = BIT(8),
207 .enable_bit_offset = 24,
208 .ratio_offset = 0x8,
211 static const struct clk_corediv_soc_desc armada380_corediv_soc = {
212 .descs = mvebu_corediv_desc,
213 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
214 .ops = {
215 .enable = clk_corediv_enable,
216 .disable = clk_corediv_disable,
217 .is_enabled = clk_corediv_is_enabled,
218 .recalc_rate = clk_corediv_recalc_rate,
219 .round_rate = clk_corediv_round_rate,
220 .set_rate = clk_corediv_set_rate,
222 .ratio_reload = BIT(8),
223 .enable_bit_offset = 16,
224 .ratio_offset = 0x4,
227 static const struct clk_corediv_soc_desc armada375_corediv_soc = {
228 .descs = mvebu_corediv_desc,
229 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
230 .ops = {
231 .recalc_rate = clk_corediv_recalc_rate,
232 .round_rate = clk_corediv_round_rate,
233 .set_rate = clk_corediv_set_rate,
235 .ratio_reload = BIT(8),
236 .ratio_offset = 0x4,
239 static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = {
240 .descs = mv98dx3236_corediv_desc,
241 .ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc),
242 .ops = {
243 .recalc_rate = clk_corediv_recalc_rate,
244 .round_rate = clk_corediv_round_rate,
245 .set_rate = clk_corediv_set_rate,
247 .ratio_reload = BIT(10),
248 .ratio_offset = 0x8,
251 static void __init
252 mvebu_corediv_clk_init(struct device_node *node,
253 const struct clk_corediv_soc_desc *soc_desc)
255 struct clk_init_data init;
256 struct clk_corediv *corediv;
257 struct clk **clks;
258 void __iomem *base;
259 const char *parent_name;
260 const char *clk_name;
261 int i;
263 base = of_iomap(node, 0);
264 if (WARN_ON(!base))
265 return;
267 parent_name = of_clk_get_parent_name(node, 0);
269 clk_data.clk_num = soc_desc->ndescs;
271 /* clks holds the clock array */
272 clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
273 GFP_KERNEL);
274 if (WARN_ON(!clks))
275 goto err_unmap;
276 /* corediv holds the clock specific array */
277 corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
278 GFP_KERNEL);
279 if (WARN_ON(!corediv))
280 goto err_free_clks;
282 spin_lock_init(&corediv->lock);
284 for (i = 0; i < clk_data.clk_num; i++) {
285 of_property_read_string_index(node, "clock-output-names",
286 i, &clk_name);
287 init.num_parents = 1;
288 init.parent_names = &parent_name;
289 init.name = clk_name;
290 init.ops = &soc_desc->ops;
291 init.flags = 0;
293 corediv[i].soc_desc = soc_desc;
294 corediv[i].desc = soc_desc->descs + i;
295 corediv[i].reg = base;
296 corediv[i].hw.init = &init;
298 clks[i] = clk_register(NULL, &corediv[i].hw);
299 WARN_ON(IS_ERR(clks[i]));
302 clk_data.clks = clks;
303 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
304 return;
306 err_free_clks:
307 kfree(clks);
308 err_unmap:
309 iounmap(base);
312 static void __init armada370_corediv_clk_init(struct device_node *node)
314 return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
316 CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock",
317 armada370_corediv_clk_init);
319 static void __init armada375_corediv_clk_init(struct device_node *node)
321 return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
323 CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock",
324 armada375_corediv_clk_init);
326 static void __init armada380_corediv_clk_init(struct device_node *node)
328 return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
330 CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock",
331 armada380_corediv_clk_init);
333 static void __init mv98dx3236_corediv_clk_init(struct device_node *node)
335 return mvebu_corediv_clk_init(node, &mv98dx3236_corediv_soc);
337 CLK_OF_DECLARE(mv98dx3236_corediv_clk, "marvell,mv98dx3236-corediv-clock",
338 mv98dx3236_corediv_clk_init);