Revert "NFS: Make close(2) asynchronous when closing NFS O_DIRECT files"
[linux/fpc-iii.git] / drivers / clk / mvebu / clk-corediv.c
blobd1e5863d337525ba5138c1505bca994e1d2b3268
1 /*
2 * MVEBU Core divider clock
4 * Copyright (C) 2013 Marvell
6 * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include "common.h"
20 #define CORE_CLK_DIV_RATIO_MASK 0xff
23 * This structure describes the hardware details (bit offset and mask)
24 * to configure one particular core divider clock. Those hardware
25 * details may differ from one SoC to another. This structure is
26 * therefore typically instantiated statically to describe the
27 * hardware details.
29 struct clk_corediv_desc {
30 unsigned int mask;
31 unsigned int offset;
32 unsigned int fieldbit;
36 * This structure describes the hardware details to configure the core
37 * divider clocks on a given SoC. Amongst others, it points to the
38 * array of core divider clock descriptors for this SoC, as well as
39 * the corresponding operations to manipulate them.
41 struct clk_corediv_soc_desc {
42 const struct clk_corediv_desc *descs;
43 unsigned int ndescs;
44 const struct clk_ops ops;
45 u32 ratio_reload;
46 u32 enable_bit_offset;
47 u32 ratio_offset;
51 * This structure represents one core divider clock for the clock
52 * framework, and is dynamically allocated for each core divider clock
53 * existing in the current SoC.
55 struct clk_corediv {
56 struct clk_hw hw;
57 void __iomem *reg;
58 const struct clk_corediv_desc *desc;
59 const struct clk_corediv_soc_desc *soc_desc;
60 spinlock_t lock;
63 static struct clk_onecell_data clk_data;
66 * Description of the core divider clocks available. For now, we
67 * support only NAND, and it is available at the same register
68 * locations regardless of the SoC.
70 static const struct clk_corediv_desc mvebu_corediv_desc[] = {
71 { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
74 #define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
76 static int clk_corediv_is_enabled(struct clk_hw *hwclk)
78 struct clk_corediv *corediv = to_corediv_clk(hwclk);
79 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
80 const struct clk_corediv_desc *desc = corediv->desc;
81 u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset;
83 return !!(readl(corediv->reg) & enable_mask);
86 static int clk_corediv_enable(struct clk_hw *hwclk)
88 struct clk_corediv *corediv = to_corediv_clk(hwclk);
89 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
90 const struct clk_corediv_desc *desc = corediv->desc;
91 unsigned long flags = 0;
92 u32 reg;
94 spin_lock_irqsave(&corediv->lock, flags);
96 reg = readl(corediv->reg);
97 reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
98 writel(reg, corediv->reg);
100 spin_unlock_irqrestore(&corediv->lock, flags);
102 return 0;
105 static void clk_corediv_disable(struct clk_hw *hwclk)
107 struct clk_corediv *corediv = to_corediv_clk(hwclk);
108 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
109 const struct clk_corediv_desc *desc = corediv->desc;
110 unsigned long flags = 0;
111 u32 reg;
113 spin_lock_irqsave(&corediv->lock, flags);
115 reg = readl(corediv->reg);
116 reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
117 writel(reg, corediv->reg);
119 spin_unlock_irqrestore(&corediv->lock, flags);
122 static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
123 unsigned long parent_rate)
125 struct clk_corediv *corediv = to_corediv_clk(hwclk);
126 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
127 const struct clk_corediv_desc *desc = corediv->desc;
128 u32 reg, div;
130 reg = readl(corediv->reg + soc_desc->ratio_offset);
131 div = (reg >> desc->offset) & desc->mask;
132 return parent_rate / div;
135 static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
136 unsigned long *parent_rate)
138 /* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
139 u32 div;
141 div = *parent_rate / rate;
142 if (div < 4)
143 div = 4;
144 else if (div > 6)
145 div = 8;
147 return *parent_rate / div;
150 static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
151 unsigned long parent_rate)
153 struct clk_corediv *corediv = to_corediv_clk(hwclk);
154 const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
155 const struct clk_corediv_desc *desc = corediv->desc;
156 unsigned long flags = 0;
157 u32 reg, div;
159 div = parent_rate / rate;
161 spin_lock_irqsave(&corediv->lock, flags);
163 /* Write new divider to the divider ratio register */
164 reg = readl(corediv->reg + soc_desc->ratio_offset);
165 reg &= ~(desc->mask << desc->offset);
166 reg |= (div & desc->mask) << desc->offset;
167 writel(reg, corediv->reg + soc_desc->ratio_offset);
169 /* Set reload-force for this clock */
170 reg = readl(corediv->reg) | BIT(desc->fieldbit);
171 writel(reg, corediv->reg);
173 /* Now trigger the clock update */
174 reg = readl(corediv->reg) | soc_desc->ratio_reload;
175 writel(reg, corediv->reg);
178 * Wait for clocks to settle down, and then clear all the
179 * ratios request and the reload request.
181 udelay(1000);
182 reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload);
183 writel(reg, corediv->reg);
184 udelay(1000);
186 spin_unlock_irqrestore(&corediv->lock, flags);
188 return 0;
191 static const struct clk_corediv_soc_desc armada370_corediv_soc = {
192 .descs = mvebu_corediv_desc,
193 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
194 .ops = {
195 .enable = clk_corediv_enable,
196 .disable = clk_corediv_disable,
197 .is_enabled = clk_corediv_is_enabled,
198 .recalc_rate = clk_corediv_recalc_rate,
199 .round_rate = clk_corediv_round_rate,
200 .set_rate = clk_corediv_set_rate,
202 .ratio_reload = BIT(8),
203 .enable_bit_offset = 24,
204 .ratio_offset = 0x8,
207 static const struct clk_corediv_soc_desc armada380_corediv_soc = {
208 .descs = mvebu_corediv_desc,
209 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
210 .ops = {
211 .enable = clk_corediv_enable,
212 .disable = clk_corediv_disable,
213 .is_enabled = clk_corediv_is_enabled,
214 .recalc_rate = clk_corediv_recalc_rate,
215 .round_rate = clk_corediv_round_rate,
216 .set_rate = clk_corediv_set_rate,
218 .ratio_reload = BIT(8),
219 .enable_bit_offset = 16,
220 .ratio_offset = 0x4,
223 static const struct clk_corediv_soc_desc armada375_corediv_soc = {
224 .descs = mvebu_corediv_desc,
225 .ndescs = ARRAY_SIZE(mvebu_corediv_desc),
226 .ops = {
227 .recalc_rate = clk_corediv_recalc_rate,
228 .round_rate = clk_corediv_round_rate,
229 .set_rate = clk_corediv_set_rate,
231 .ratio_reload = BIT(8),
232 .ratio_offset = 0x4,
235 static void __init
236 mvebu_corediv_clk_init(struct device_node *node,
237 const struct clk_corediv_soc_desc *soc_desc)
239 struct clk_init_data init;
240 struct clk_corediv *corediv;
241 struct clk **clks;
242 void __iomem *base;
243 const char *parent_name;
244 const char *clk_name;
245 int i;
247 base = of_iomap(node, 0);
248 if (WARN_ON(!base))
249 return;
251 parent_name = of_clk_get_parent_name(node, 0);
253 clk_data.clk_num = soc_desc->ndescs;
255 /* clks holds the clock array */
256 clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
257 GFP_KERNEL);
258 if (WARN_ON(!clks))
259 goto err_unmap;
260 /* corediv holds the clock specific array */
261 corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
262 GFP_KERNEL);
263 if (WARN_ON(!corediv))
264 goto err_free_clks;
266 spin_lock_init(&corediv->lock);
268 for (i = 0; i < clk_data.clk_num; i++) {
269 of_property_read_string_index(node, "clock-output-names",
270 i, &clk_name);
271 init.num_parents = 1;
272 init.parent_names = &parent_name;
273 init.name = clk_name;
274 init.ops = &soc_desc->ops;
275 init.flags = 0;
277 corediv[i].soc_desc = soc_desc;
278 corediv[i].desc = soc_desc->descs + i;
279 corediv[i].reg = base;
280 corediv[i].hw.init = &init;
282 clks[i] = clk_register(NULL, &corediv[i].hw);
283 WARN_ON(IS_ERR(clks[i]));
286 clk_data.clks = clks;
287 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
288 return;
290 err_free_clks:
291 kfree(clks);
292 err_unmap:
293 iounmap(base);
296 static void __init armada370_corediv_clk_init(struct device_node *node)
298 return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
300 CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock",
301 armada370_corediv_clk_init);
303 static void __init armada375_corediv_clk_init(struct device_node *node)
305 return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
307 CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock",
308 armada375_corediv_clk_init);
310 static void __init armada380_corediv_clk_init(struct device_node *node)
312 return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
314 CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock",
315 armada380_corediv_clk_init);