PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / sh / clk / cpg.c
blob1ebe67cd18333c3b39f98d0e2badcd7b0a948090
1 /*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
17 #define CPG_CKSTP_BIT BIT(8)
19 static unsigned int sh_clk_read(struct clk *clk)
21 if (clk->flags & CLK_ENABLE_REG_8BIT)
22 return ioread8(clk->mapped_reg);
23 else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 return ioread16(clk->mapped_reg);
26 return ioread32(clk->mapped_reg);
29 static void sh_clk_write(int value, struct clk *clk)
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
39 static int sh_clk_mstp_enable(struct clk *clk)
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
42 return 0;
45 static void sh_clk_mstp_disable(struct clk *clk)
47 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
50 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 .enable = sh_clk_mstp_enable,
52 .disable = sh_clk_mstp_disable,
53 .recalc = followparent_recalc,
56 int __init sh_clk_mstp_register(struct clk *clks, int nr)
58 struct clk *clkp;
59 int ret = 0;
60 int k;
62 for (k = 0; !ret && (k < nr); k++) {
63 clkp = clks + k;
64 clkp->ops = &sh_clk_mstp_clk_ops;
65 ret |= clk_register(clkp);
68 return ret;
72 * Div/mult table lookup helpers
74 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
76 return clk->priv;
79 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
81 return clk_to_div_table(clk)->div_mult_table;
85 * Common div ops
87 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
89 return clk_rate_table_round(clk, clk->freq_table, rate);
92 static unsigned long sh_clk_div_recalc(struct clk *clk)
94 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 unsigned int idx;
97 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 table, clk->arch_flags ? &clk->arch_flags : NULL);
100 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
102 return clk->freq_table[idx].frequency;
105 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
107 struct clk_div_table *dt = clk_to_div_table(clk);
108 unsigned long value;
109 int idx;
111 idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 if (idx < 0)
113 return idx;
115 value = sh_clk_read(clk);
116 value &= ~(clk->div_mask << clk->enable_bit);
117 value |= (idx << clk->enable_bit);
118 sh_clk_write(value, clk);
120 /* XXX: Should use a post-change notifier */
121 if (dt->kick)
122 dt->kick(clk);
124 return 0;
127 static int sh_clk_div_enable(struct clk *clk)
129 if (clk->div_mask == SH_CLK_DIV6_MSK) {
130 int ret = sh_clk_div_set_rate(clk, clk->rate);
131 if (ret < 0)
132 return ret;
135 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
136 return 0;
139 static void sh_clk_div_disable(struct clk *clk)
141 unsigned int val;
143 val = sh_clk_read(clk);
144 val |= CPG_CKSTP_BIT;
147 * div6 clocks require the divisor field to be non-zero or the
148 * above CKSTP toggle silently fails. Ensure that the divisor
149 * array is reset to its initial state on disable.
151 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
152 val |= clk->div_mask;
154 sh_clk_write(val, clk);
157 static struct sh_clk_ops sh_clk_div_clk_ops = {
158 .recalc = sh_clk_div_recalc,
159 .set_rate = sh_clk_div_set_rate,
160 .round_rate = sh_clk_div_round_rate,
163 static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
164 .recalc = sh_clk_div_recalc,
165 .set_rate = sh_clk_div_set_rate,
166 .round_rate = sh_clk_div_round_rate,
167 .enable = sh_clk_div_enable,
168 .disable = sh_clk_div_disable,
171 static int __init sh_clk_init_parent(struct clk *clk)
173 u32 val;
175 if (clk->parent)
176 return 0;
178 if (!clk->parent_table || !clk->parent_num)
179 return 0;
181 if (!clk->src_width) {
182 pr_err("sh_clk_init_parent: cannot select parent clock\n");
183 return -EINVAL;
186 val = (sh_clk_read(clk) >> clk->src_shift);
187 val &= (1 << clk->src_width) - 1;
189 if (val >= clk->parent_num) {
190 pr_err("sh_clk_init_parent: parent table size failed\n");
191 return -EINVAL;
194 clk_reparent(clk, clk->parent_table[val]);
195 if (!clk->parent) {
196 pr_err("sh_clk_init_parent: unable to set parent");
197 return -EINVAL;
200 return 0;
203 static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
204 struct clk_div_table *table, struct sh_clk_ops *ops)
206 struct clk *clkp;
207 void *freq_table;
208 int nr_divs = table->div_mult_table->nr_divisors;
209 int freq_table_size = sizeof(struct cpufreq_frequency_table);
210 int ret = 0;
211 int k;
213 freq_table_size *= (nr_divs + 1);
214 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
215 if (!freq_table) {
216 pr_err("%s: unable to alloc memory\n", __func__);
217 return -ENOMEM;
220 for (k = 0; !ret && (k < nr); k++) {
221 clkp = clks + k;
223 clkp->ops = ops;
224 clkp->priv = table;
226 clkp->freq_table = freq_table + (k * freq_table_size);
227 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
229 ret = clk_register(clkp);
230 if (ret == 0)
231 ret = sh_clk_init_parent(clkp);
234 return ret;
238 * div6 support
240 static int sh_clk_div6_divisors[64] = {
241 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
242 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
243 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
244 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
247 static struct clk_div_mult_table div6_div_mult_table = {
248 .divisors = sh_clk_div6_divisors,
249 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
252 static struct clk_div_table sh_clk_div6_table = {
253 .div_mult_table = &div6_div_mult_table,
256 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
258 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
259 u32 value;
260 int ret, i;
262 if (!clk->parent_table || !clk->parent_num)
263 return -EINVAL;
265 /* Search the parent */
266 for (i = 0; i < clk->parent_num; i++)
267 if (clk->parent_table[i] == parent)
268 break;
270 if (i == clk->parent_num)
271 return -ENODEV;
273 ret = clk_reparent(clk, parent);
274 if (ret < 0)
275 return ret;
277 value = sh_clk_read(clk) &
278 ~(((1 << clk->src_width) - 1) << clk->src_shift);
280 sh_clk_write(value | (i << clk->src_shift), clk);
282 /* Rebuild the frequency table */
283 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
284 table, NULL);
286 return 0;
289 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
290 .recalc = sh_clk_div_recalc,
291 .round_rate = sh_clk_div_round_rate,
292 .set_rate = sh_clk_div_set_rate,
293 .enable = sh_clk_div_enable,
294 .disable = sh_clk_div_disable,
295 .set_parent = sh_clk_div6_set_parent,
298 int __init sh_clk_div6_register(struct clk *clks, int nr)
300 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
301 &sh_clk_div_enable_clk_ops);
304 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
306 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
307 &sh_clk_div6_reparent_clk_ops);
311 * div4 support
313 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
315 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
316 u32 value;
317 int ret;
319 /* we really need a better way to determine parent index, but for
320 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
321 * no CLK_ENABLE_ON_INIT means external clock...
324 if (parent->flags & CLK_ENABLE_ON_INIT)
325 value = sh_clk_read(clk) & ~(1 << 7);
326 else
327 value = sh_clk_read(clk) | (1 << 7);
329 ret = clk_reparent(clk, parent);
330 if (ret < 0)
331 return ret;
333 sh_clk_write(value, clk);
335 /* Rebiuld the frequency table */
336 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
337 table, &clk->arch_flags);
339 return 0;
342 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
343 .recalc = sh_clk_div_recalc,
344 .set_rate = sh_clk_div_set_rate,
345 .round_rate = sh_clk_div_round_rate,
346 .enable = sh_clk_div_enable,
347 .disable = sh_clk_div_disable,
348 .set_parent = sh_clk_div4_set_parent,
351 int __init sh_clk_div4_register(struct clk *clks, int nr,
352 struct clk_div4_table *table)
354 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
357 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
358 struct clk_div4_table *table)
360 return sh_clk_div_register_ops(clks, nr, table,
361 &sh_clk_div_enable_clk_ops);
364 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
365 struct clk_div4_table *table)
367 return sh_clk_div_register_ops(clks, nr, table,
368 &sh_clk_div4_reparent_clk_ops);
371 /* FSI-DIV */
372 static unsigned long fsidiv_recalc(struct clk *clk)
374 u32 value;
376 value = __raw_readl(clk->mapping->base);
378 value >>= 16;
379 if (value < 2)
380 return clk->parent->rate;
382 return clk->parent->rate / value;
385 static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
387 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
390 static void fsidiv_disable(struct clk *clk)
392 __raw_writel(0, clk->mapping->base);
395 static int fsidiv_enable(struct clk *clk)
397 u32 value;
399 value = __raw_readl(clk->mapping->base) >> 16;
400 if (value < 2)
401 return 0;
403 __raw_writel((value << 16) | 0x3, clk->mapping->base);
405 return 0;
408 static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
410 int idx;
412 idx = (clk->parent->rate / rate) & 0xffff;
413 if (idx < 2)
414 __raw_writel(0, clk->mapping->base);
415 else
416 __raw_writel(idx << 16, clk->mapping->base);
418 return 0;
421 static struct sh_clk_ops fsidiv_clk_ops = {
422 .recalc = fsidiv_recalc,
423 .round_rate = fsidiv_round_rate,
424 .set_rate = fsidiv_set_rate,
425 .enable = fsidiv_enable,
426 .disable = fsidiv_disable,
429 int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
431 struct clk_mapping *map;
432 int i;
434 for (i = 0; i < nr; i++) {
436 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
437 if (!map) {
438 pr_err("%s: unable to alloc memory\n", __func__);
439 return -ENOMEM;
442 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
443 map->phys = (phys_addr_t)clks[i].enable_reg;
444 map->len = 8;
446 clks[i].enable_reg = 0; /* remove .enable_reg */
447 clks[i].ops = &fsidiv_clk_ops;
448 clks[i].mapping = map;
450 clk_register(&clks[i]);
453 return 0;