2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
15 #include <linux/sh_clk.h>
17 #define CPG_CKSTP_BIT BIT(8)
19 static unsigned int sh_clk_read(struct clk
*clk
)
21 if (clk
->flags
& CLK_ENABLE_REG_8BIT
)
22 return ioread8(clk
->mapped_reg
);
23 else if (clk
->flags
& CLK_ENABLE_REG_16BIT
)
24 return ioread16(clk
->mapped_reg
);
26 return ioread32(clk
->mapped_reg
);
29 static void sh_clk_write(int value
, struct clk
*clk
)
31 if (clk
->flags
& CLK_ENABLE_REG_8BIT
)
32 iowrite8(value
, clk
->mapped_reg
);
33 else if (clk
->flags
& CLK_ENABLE_REG_16BIT
)
34 iowrite16(value
, clk
->mapped_reg
);
36 iowrite32(value
, clk
->mapped_reg
);
39 static int sh_clk_mstp_enable(struct clk
*clk
)
41 sh_clk_write(sh_clk_read(clk
) & ~(1 << clk
->enable_bit
), clk
);
42 if (clk
->status_reg
) {
43 unsigned int (*read
)(const void __iomem
*addr
);
45 void __iomem
*mapped_status
= (phys_addr_t
)clk
->status_reg
-
46 (phys_addr_t
)clk
->enable_reg
+ clk
->mapped_reg
;
48 if (clk
->flags
& CLK_ENABLE_REG_8BIT
)
50 else if (clk
->flags
& CLK_ENABLE_REG_16BIT
)
56 (read(mapped_status
) & (1 << clk
->enable_bit
)) && i
;
60 pr_err("cpg: failed to enable %p[%d]\n",
61 clk
->enable_reg
, clk
->enable_bit
);
68 static void sh_clk_mstp_disable(struct clk
*clk
)
70 sh_clk_write(sh_clk_read(clk
) | (1 << clk
->enable_bit
), clk
);
73 static struct sh_clk_ops sh_clk_mstp_clk_ops
= {
74 .enable
= sh_clk_mstp_enable
,
75 .disable
= sh_clk_mstp_disable
,
76 .recalc
= followparent_recalc
,
79 int __init
sh_clk_mstp_register(struct clk
*clks
, int nr
)
85 for (k
= 0; !ret
&& (k
< nr
); k
++) {
87 clkp
->ops
= &sh_clk_mstp_clk_ops
;
88 ret
|= clk_register(clkp
);
95 * Div/mult table lookup helpers
97 static inline struct clk_div_table
*clk_to_div_table(struct clk
*clk
)
102 static inline struct clk_div_mult_table
*clk_to_div_mult_table(struct clk
*clk
)
104 return clk_to_div_table(clk
)->div_mult_table
;
110 static long sh_clk_div_round_rate(struct clk
*clk
, unsigned long rate
)
112 return clk_rate_table_round(clk
, clk
->freq_table
, rate
);
115 static unsigned long sh_clk_div_recalc(struct clk
*clk
)
117 struct clk_div_mult_table
*table
= clk_to_div_mult_table(clk
);
120 clk_rate_table_build(clk
, clk
->freq_table
, table
->nr_divisors
,
121 table
, clk
->arch_flags
? &clk
->arch_flags
: NULL
);
123 idx
= (sh_clk_read(clk
) >> clk
->enable_bit
) & clk
->div_mask
;
125 return clk
->freq_table
[idx
].frequency
;
128 static int sh_clk_div_set_rate(struct clk
*clk
, unsigned long rate
)
130 struct clk_div_table
*dt
= clk_to_div_table(clk
);
134 idx
= clk_rate_table_find(clk
, clk
->freq_table
, rate
);
138 value
= sh_clk_read(clk
);
139 value
&= ~(clk
->div_mask
<< clk
->enable_bit
);
140 value
|= (idx
<< clk
->enable_bit
);
141 sh_clk_write(value
, clk
);
143 /* XXX: Should use a post-change notifier */
150 static int sh_clk_div_enable(struct clk
*clk
)
152 if (clk
->div_mask
== SH_CLK_DIV6_MSK
) {
153 int ret
= sh_clk_div_set_rate(clk
, clk
->rate
);
158 sh_clk_write(sh_clk_read(clk
) & ~CPG_CKSTP_BIT
, clk
);
162 static void sh_clk_div_disable(struct clk
*clk
)
166 val
= sh_clk_read(clk
);
167 val
|= CPG_CKSTP_BIT
;
170 * div6 clocks require the divisor field to be non-zero or the
171 * above CKSTP toggle silently fails. Ensure that the divisor
172 * array is reset to its initial state on disable.
174 if (clk
->flags
& CLK_MASK_DIV_ON_DISABLE
)
175 val
|= clk
->div_mask
;
177 sh_clk_write(val
, clk
);
180 static struct sh_clk_ops sh_clk_div_clk_ops
= {
181 .recalc
= sh_clk_div_recalc
,
182 .set_rate
= sh_clk_div_set_rate
,
183 .round_rate
= sh_clk_div_round_rate
,
186 static struct sh_clk_ops sh_clk_div_enable_clk_ops
= {
187 .recalc
= sh_clk_div_recalc
,
188 .set_rate
= sh_clk_div_set_rate
,
189 .round_rate
= sh_clk_div_round_rate
,
190 .enable
= sh_clk_div_enable
,
191 .disable
= sh_clk_div_disable
,
194 static int __init
sh_clk_init_parent(struct clk
*clk
)
201 if (!clk
->parent_table
|| !clk
->parent_num
)
204 if (!clk
->src_width
) {
205 pr_err("sh_clk_init_parent: cannot select parent clock\n");
209 val
= (sh_clk_read(clk
) >> clk
->src_shift
);
210 val
&= (1 << clk
->src_width
) - 1;
212 if (val
>= clk
->parent_num
) {
213 pr_err("sh_clk_init_parent: parent table size failed\n");
217 clk_reparent(clk
, clk
->parent_table
[val
]);
219 pr_err("sh_clk_init_parent: unable to set parent");
226 static int __init
sh_clk_div_register_ops(struct clk
*clks
, int nr
,
227 struct clk_div_table
*table
, struct sh_clk_ops
*ops
)
231 int nr_divs
= table
->div_mult_table
->nr_divisors
;
232 int freq_table_size
= sizeof(struct cpufreq_frequency_table
);
236 freq_table_size
*= (nr_divs
+ 1);
237 freq_table
= kcalloc(nr
, freq_table_size
, GFP_KERNEL
);
239 pr_err("%s: unable to alloc memory\n", __func__
);
243 for (k
= 0; !ret
&& (k
< nr
); k
++) {
249 clkp
->freq_table
= freq_table
+ (k
* freq_table_size
);
250 clkp
->freq_table
[nr_divs
].frequency
= CPUFREQ_TABLE_END
;
252 ret
= clk_register(clkp
);
254 ret
= sh_clk_init_parent(clkp
);
263 static int sh_clk_div6_divisors
[64] = {
264 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
265 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
266 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
267 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
270 static struct clk_div_mult_table div6_div_mult_table
= {
271 .divisors
= sh_clk_div6_divisors
,
272 .nr_divisors
= ARRAY_SIZE(sh_clk_div6_divisors
),
275 static struct clk_div_table sh_clk_div6_table
= {
276 .div_mult_table
= &div6_div_mult_table
,
279 static int sh_clk_div6_set_parent(struct clk
*clk
, struct clk
*parent
)
281 struct clk_div_mult_table
*table
= clk_to_div_mult_table(clk
);
285 if (!clk
->parent_table
|| !clk
->parent_num
)
288 /* Search the parent */
289 for (i
= 0; i
< clk
->parent_num
; i
++)
290 if (clk
->parent_table
[i
] == parent
)
293 if (i
== clk
->parent_num
)
296 ret
= clk_reparent(clk
, parent
);
300 value
= sh_clk_read(clk
) &
301 ~(((1 << clk
->src_width
) - 1) << clk
->src_shift
);
303 sh_clk_write(value
| (i
<< clk
->src_shift
), clk
);
305 /* Rebuild the frequency table */
306 clk_rate_table_build(clk
, clk
->freq_table
, table
->nr_divisors
,
312 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops
= {
313 .recalc
= sh_clk_div_recalc
,
314 .round_rate
= sh_clk_div_round_rate
,
315 .set_rate
= sh_clk_div_set_rate
,
316 .enable
= sh_clk_div_enable
,
317 .disable
= sh_clk_div_disable
,
318 .set_parent
= sh_clk_div6_set_parent
,
321 int __init
sh_clk_div6_register(struct clk
*clks
, int nr
)
323 return sh_clk_div_register_ops(clks
, nr
, &sh_clk_div6_table
,
324 &sh_clk_div_enable_clk_ops
);
327 int __init
sh_clk_div6_reparent_register(struct clk
*clks
, int nr
)
329 return sh_clk_div_register_ops(clks
, nr
, &sh_clk_div6_table
,
330 &sh_clk_div6_reparent_clk_ops
);
336 static int sh_clk_div4_set_parent(struct clk
*clk
, struct clk
*parent
)
338 struct clk_div_mult_table
*table
= clk_to_div_mult_table(clk
);
342 /* we really need a better way to determine parent index, but for
343 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
344 * no CLK_ENABLE_ON_INIT means external clock...
347 if (parent
->flags
& CLK_ENABLE_ON_INIT
)
348 value
= sh_clk_read(clk
) & ~(1 << 7);
350 value
= sh_clk_read(clk
) | (1 << 7);
352 ret
= clk_reparent(clk
, parent
);
356 sh_clk_write(value
, clk
);
358 /* Rebiuld the frequency table */
359 clk_rate_table_build(clk
, clk
->freq_table
, table
->nr_divisors
,
360 table
, &clk
->arch_flags
);
365 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops
= {
366 .recalc
= sh_clk_div_recalc
,
367 .set_rate
= sh_clk_div_set_rate
,
368 .round_rate
= sh_clk_div_round_rate
,
369 .enable
= sh_clk_div_enable
,
370 .disable
= sh_clk_div_disable
,
371 .set_parent
= sh_clk_div4_set_parent
,
374 int __init
sh_clk_div4_register(struct clk
*clks
, int nr
,
375 struct clk_div4_table
*table
)
377 return sh_clk_div_register_ops(clks
, nr
, table
, &sh_clk_div_clk_ops
);
380 int __init
sh_clk_div4_enable_register(struct clk
*clks
, int nr
,
381 struct clk_div4_table
*table
)
383 return sh_clk_div_register_ops(clks
, nr
, table
,
384 &sh_clk_div_enable_clk_ops
);
387 int __init
sh_clk_div4_reparent_register(struct clk
*clks
, int nr
,
388 struct clk_div4_table
*table
)
390 return sh_clk_div_register_ops(clks
, nr
, table
,
391 &sh_clk_div4_reparent_clk_ops
);
395 static unsigned long fsidiv_recalc(struct clk
*clk
)
399 value
= __raw_readl(clk
->mapping
->base
);
403 return clk
->parent
->rate
;
405 return clk
->parent
->rate
/ value
;
408 static long fsidiv_round_rate(struct clk
*clk
, unsigned long rate
)
410 return clk_rate_div_range_round(clk
, 1, 0xffff, rate
);
413 static void fsidiv_disable(struct clk
*clk
)
415 __raw_writel(0, clk
->mapping
->base
);
418 static int fsidiv_enable(struct clk
*clk
)
422 value
= __raw_readl(clk
->mapping
->base
) >> 16;
426 __raw_writel((value
<< 16) | 0x3, clk
->mapping
->base
);
431 static int fsidiv_set_rate(struct clk
*clk
, unsigned long rate
)
435 idx
= (clk
->parent
->rate
/ rate
) & 0xffff;
437 __raw_writel(0, clk
->mapping
->base
);
439 __raw_writel(idx
<< 16, clk
->mapping
->base
);
444 static struct sh_clk_ops fsidiv_clk_ops
= {
445 .recalc
= fsidiv_recalc
,
446 .round_rate
= fsidiv_round_rate
,
447 .set_rate
= fsidiv_set_rate
,
448 .enable
= fsidiv_enable
,
449 .disable
= fsidiv_disable
,
452 int __init
sh_clk_fsidiv_register(struct clk
*clks
, int nr
)
454 struct clk_mapping
*map
;
457 for (i
= 0; i
< nr
; i
++) {
459 map
= kzalloc(sizeof(struct clk_mapping
), GFP_KERNEL
);
461 pr_err("%s: unable to alloc memory\n", __func__
);
465 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
466 map
->phys
= (phys_addr_t
)clks
[i
].enable_reg
;
469 clks
[i
].enable_reg
= 0; /* remove .enable_reg */
470 clks
[i
].ops
= &fsidiv_clk_ops
;
471 clks
[i
].mapping
= map
;
473 clk_register(&clks
[i
]);