2 * Ingenic SoC CGU driver
4 * Copyright (c) 2013-2015 Imagination Technologies
5 * Author: Paul Burton <paul.burton@mips.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/bitops.h>
19 #include <linux/clk.h>
20 #include <linux/clk-provider.h>
21 #include <linux/clkdev.h>
22 #include <linux/delay.h>
23 #include <linux/math64.h>
25 #include <linux/of_address.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
30 #define MHZ (1000 * 1000)
33 * ingenic_cgu_gate_get() - get the value of clock gate register bit
34 * @cgu: reference to the CGU whose registers should be read
35 * @info: info struct describing the gate bit
37 * Retrieves the state of the clock gate bit described by info. The
38 * caller must hold cgu->lock.
40 * Return: true if the gate bit is set, else false.
43 ingenic_cgu_gate_get(struct ingenic_cgu
*cgu
,
44 const struct ingenic_cgu_gate_info
*info
)
46 return !!(readl(cgu
->base
+ info
->reg
) & BIT(info
->bit
))
47 ^ info
->clear_to_gate
;
51 * ingenic_cgu_gate_set() - set the value of clock gate register bit
52 * @cgu: reference to the CGU whose registers should be modified
53 * @info: info struct describing the gate bit
54 * @val: non-zero to gate a clock, otherwise zero
56 * Sets the given gate bit in order to gate or ungate a clock.
58 * The caller must hold cgu->lock.
61 ingenic_cgu_gate_set(struct ingenic_cgu
*cgu
,
62 const struct ingenic_cgu_gate_info
*info
, bool val
)
64 u32 clkgr
= readl(cgu
->base
+ info
->reg
);
66 if (val
^ info
->clear_to_gate
)
67 clkgr
|= BIT(info
->bit
);
69 clkgr
&= ~BIT(info
->bit
);
71 writel(clkgr
, cgu
->base
+ info
->reg
);
79 ingenic_pll_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
81 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
82 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
83 const struct ingenic_cgu_clk_info
*clk_info
;
84 const struct ingenic_cgu_pll_info
*pll_info
;
85 unsigned m
, n
, od_enc
, od
;
90 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
91 BUG_ON(clk_info
->type
!= CGU_CLK_PLL
);
92 pll_info
= &clk_info
->pll
;
94 spin_lock_irqsave(&cgu
->lock
, flags
);
95 ctl
= readl(cgu
->base
+ pll_info
->reg
);
96 spin_unlock_irqrestore(&cgu
->lock
, flags
);
98 m
= (ctl
>> pll_info
->m_shift
) & GENMASK(pll_info
->m_bits
- 1, 0);
99 m
+= pll_info
->m_offset
;
100 n
= (ctl
>> pll_info
->n_shift
) & GENMASK(pll_info
->n_bits
- 1, 0);
101 n
+= pll_info
->n_offset
;
102 od_enc
= ctl
>> pll_info
->od_shift
;
103 od_enc
&= GENMASK(pll_info
->od_bits
- 1, 0);
104 bypass
= !pll_info
->no_bypass_bit
&&
105 !!(ctl
& BIT(pll_info
->bypass_bit
));
106 enable
= !!(ctl
& BIT(pll_info
->enable_bit
));
111 for (od
= 0; od
< pll_info
->od_max
; od
++) {
112 if (pll_info
->od_encoding
[od
] == od_enc
)
115 BUG_ON(od
== pll_info
->od_max
);
118 return div_u64((u64
)parent_rate
* m
, n
* od
);
122 ingenic_pll_calc(const struct ingenic_cgu_clk_info
*clk_info
,
123 unsigned long rate
, unsigned long parent_rate
,
124 unsigned *pm
, unsigned *pn
, unsigned *pod
)
126 const struct ingenic_cgu_pll_info
*pll_info
;
129 pll_info
= &clk_info
->pll
;
133 * The frequency after the input divider must be between 10 and 50 MHz.
134 * The highest divider yields the best resolution.
136 n
= parent_rate
/ (10 * MHZ
);
137 n
= min_t(unsigned, n
, 1 << clk_info
->pll
.n_bits
);
138 n
= max_t(unsigned, n
, pll_info
->n_offset
);
140 m
= (rate
/ MHZ
) * od
* n
/ (parent_rate
/ MHZ
);
141 m
= min_t(unsigned, m
, 1 << clk_info
->pll
.m_bits
);
142 m
= max_t(unsigned, m
, pll_info
->m_offset
);
151 return div_u64((u64
)parent_rate
* m
, n
* od
);
154 static inline const struct ingenic_cgu_clk_info
*to_clk_info(
155 struct ingenic_clk
*ingenic_clk
)
157 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
158 const struct ingenic_cgu_clk_info
*clk_info
;
160 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
161 BUG_ON(clk_info
->type
!= CGU_CLK_PLL
);
167 ingenic_pll_round_rate(struct clk_hw
*hw
, unsigned long req_rate
,
168 unsigned long *prate
)
170 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
171 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
173 return ingenic_pll_calc(clk_info
, req_rate
, *prate
, NULL
, NULL
, NULL
);
177 ingenic_pll_set_rate(struct clk_hw
*hw
, unsigned long req_rate
,
178 unsigned long parent_rate
)
180 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
181 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
182 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
183 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
184 unsigned long rate
, flags
;
185 unsigned int m
, n
, od
;
188 rate
= ingenic_pll_calc(clk_info
, req_rate
, parent_rate
,
190 if (rate
!= req_rate
)
191 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
192 clk_info
->name
, req_rate
, rate
);
194 spin_lock_irqsave(&cgu
->lock
, flags
);
195 ctl
= readl(cgu
->base
+ pll_info
->reg
);
197 ctl
&= ~(GENMASK(pll_info
->m_bits
- 1, 0) << pll_info
->m_shift
);
198 ctl
|= (m
- pll_info
->m_offset
) << pll_info
->m_shift
;
200 ctl
&= ~(GENMASK(pll_info
->n_bits
- 1, 0) << pll_info
->n_shift
);
201 ctl
|= (n
- pll_info
->n_offset
) << pll_info
->n_shift
;
203 ctl
&= ~(GENMASK(pll_info
->od_bits
- 1, 0) << pll_info
->od_shift
);
204 ctl
|= pll_info
->od_encoding
[od
- 1] << pll_info
->od_shift
;
206 writel(ctl
, cgu
->base
+ pll_info
->reg
);
207 spin_unlock_irqrestore(&cgu
->lock
, flags
);
212 static int ingenic_pll_enable(struct clk_hw
*hw
)
214 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
215 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
216 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
217 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
218 const unsigned int timeout
= 100;
223 spin_lock_irqsave(&cgu
->lock
, flags
);
224 ctl
= readl(cgu
->base
+ pll_info
->reg
);
226 ctl
&= ~BIT(pll_info
->bypass_bit
);
227 ctl
|= BIT(pll_info
->enable_bit
);
229 writel(ctl
, cgu
->base
+ pll_info
->reg
);
231 /* wait for the PLL to stabilise */
232 for (i
= 0; i
< timeout
; i
++) {
233 ctl
= readl(cgu
->base
+ pll_info
->reg
);
234 if (ctl
& BIT(pll_info
->stable_bit
))
239 spin_unlock_irqrestore(&cgu
->lock
, flags
);
247 static void ingenic_pll_disable(struct clk_hw
*hw
)
249 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
250 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
251 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
252 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
256 spin_lock_irqsave(&cgu
->lock
, flags
);
257 ctl
= readl(cgu
->base
+ pll_info
->reg
);
259 ctl
&= ~BIT(pll_info
->enable_bit
);
261 writel(ctl
, cgu
->base
+ pll_info
->reg
);
262 spin_unlock_irqrestore(&cgu
->lock
, flags
);
265 static int ingenic_pll_is_enabled(struct clk_hw
*hw
)
267 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
268 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
269 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
270 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
274 spin_lock_irqsave(&cgu
->lock
, flags
);
275 ctl
= readl(cgu
->base
+ pll_info
->reg
);
276 spin_unlock_irqrestore(&cgu
->lock
, flags
);
278 return !!(ctl
& BIT(pll_info
->enable_bit
));
281 static const struct clk_ops ingenic_pll_ops
= {
282 .recalc_rate
= ingenic_pll_recalc_rate
,
283 .round_rate
= ingenic_pll_round_rate
,
284 .set_rate
= ingenic_pll_set_rate
,
286 .enable
= ingenic_pll_enable
,
287 .disable
= ingenic_pll_disable
,
288 .is_enabled
= ingenic_pll_is_enabled
,
292 * Operations for all non-PLL clocks
295 static u8
ingenic_clk_get_parent(struct clk_hw
*hw
)
297 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
298 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
299 const struct ingenic_cgu_clk_info
*clk_info
;
301 u8 i
, hw_idx
, idx
= 0;
303 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
305 if (clk_info
->type
& CGU_CLK_MUX
) {
306 reg
= readl(cgu
->base
+ clk_info
->mux
.reg
);
307 hw_idx
= (reg
>> clk_info
->mux
.shift
) &
308 GENMASK(clk_info
->mux
.bits
- 1, 0);
311 * Convert the hardware index to the parent index by skipping
312 * over any -1's in the parents array.
314 for (i
= 0; i
< hw_idx
; i
++) {
315 if (clk_info
->parents
[i
] != -1)
323 static int ingenic_clk_set_parent(struct clk_hw
*hw
, u8 idx
)
325 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
326 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
327 const struct ingenic_cgu_clk_info
*clk_info
;
329 u8 curr_idx
, hw_idx
, num_poss
;
332 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
334 if (clk_info
->type
& CGU_CLK_MUX
) {
336 * Convert the parent index to the hardware index by adding
337 * 1 for any -1 in the parents array preceding the given
338 * index. That is, we want the index of idx'th entry in
339 * clk_info->parents which does not equal -1.
341 hw_idx
= curr_idx
= 0;
342 num_poss
= 1 << clk_info
->mux
.bits
;
343 for (; hw_idx
< num_poss
; hw_idx
++) {
344 if (clk_info
->parents
[hw_idx
] == -1)
351 /* idx should always be a valid parent */
352 BUG_ON(curr_idx
!= idx
);
354 mask
= GENMASK(clk_info
->mux
.bits
- 1, 0);
355 mask
<<= clk_info
->mux
.shift
;
357 spin_lock_irqsave(&cgu
->lock
, flags
);
359 /* write the register */
360 reg
= readl(cgu
->base
+ clk_info
->mux
.reg
);
362 reg
|= hw_idx
<< clk_info
->mux
.shift
;
363 writel(reg
, cgu
->base
+ clk_info
->mux
.reg
);
365 spin_unlock_irqrestore(&cgu
->lock
, flags
);
369 return idx
? -EINVAL
: 0;
373 ingenic_clk_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
375 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
376 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
377 const struct ingenic_cgu_clk_info
*clk_info
;
378 unsigned long rate
= parent_rate
;
381 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
383 if (clk_info
->type
& CGU_CLK_DIV
) {
384 div_reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
385 div
= (div_reg
>> clk_info
->div
.shift
) &
386 GENMASK(clk_info
->div
.bits
- 1, 0);
388 div
*= clk_info
->div
.div
;
391 } else if (clk_info
->type
& CGU_CLK_FIXDIV
) {
392 rate
/= clk_info
->fixdiv
.div
;
399 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info
*clk_info
,
400 unsigned long parent_rate
, unsigned long req_rate
)
404 /* calculate the divide */
405 div
= DIV_ROUND_UP(parent_rate
, req_rate
);
407 /* and impose hardware constraints */
408 div
= min_t(unsigned, div
, 1 << clk_info
->div
.bits
);
409 div
= max_t(unsigned, div
, 1);
412 * If the divider value itself must be divided before being written to
413 * the divider register, we must ensure we don't have any bits set that
414 * would be lost as a result of doing so.
416 div
/= clk_info
->div
.div
;
417 div
*= clk_info
->div
.div
;
423 ingenic_clk_round_rate(struct clk_hw
*hw
, unsigned long req_rate
,
424 unsigned long *parent_rate
)
426 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
427 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
428 const struct ingenic_cgu_clk_info
*clk_info
;
429 unsigned int div
= 1;
431 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
433 if (clk_info
->type
& CGU_CLK_DIV
)
434 div
= ingenic_clk_calc_div(clk_info
, *parent_rate
, req_rate
);
435 else if (clk_info
->type
& CGU_CLK_FIXDIV
)
436 div
= clk_info
->fixdiv
.div
;
438 return DIV_ROUND_UP(*parent_rate
, div
);
442 ingenic_clk_set_rate(struct clk_hw
*hw
, unsigned long req_rate
,
443 unsigned long parent_rate
)
445 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
446 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
447 const struct ingenic_cgu_clk_info
*clk_info
;
448 const unsigned timeout
= 100;
449 unsigned long rate
, flags
;
454 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
456 if (clk_info
->type
& CGU_CLK_DIV
) {
457 div
= ingenic_clk_calc_div(clk_info
, parent_rate
, req_rate
);
458 rate
= DIV_ROUND_UP(parent_rate
, div
);
460 if (rate
!= req_rate
)
463 spin_lock_irqsave(&cgu
->lock
, flags
);
464 reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
466 /* update the divide */
467 mask
= GENMASK(clk_info
->div
.bits
- 1, 0);
468 reg
&= ~(mask
<< clk_info
->div
.shift
);
469 reg
|= ((div
/ clk_info
->div
.div
) - 1) << clk_info
->div
.shift
;
471 /* clear the stop bit */
472 if (clk_info
->div
.stop_bit
!= -1)
473 reg
&= ~BIT(clk_info
->div
.stop_bit
);
475 /* set the change enable bit */
476 if (clk_info
->div
.ce_bit
!= -1)
477 reg
|= BIT(clk_info
->div
.ce_bit
);
479 /* update the hardware */
480 writel(reg
, cgu
->base
+ clk_info
->div
.reg
);
482 /* wait for the change to take effect */
483 if (clk_info
->div
.busy_bit
!= -1) {
484 for (i
= 0; i
< timeout
; i
++) {
485 reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
486 if (!(reg
& BIT(clk_info
->div
.busy_bit
)))
494 spin_unlock_irqrestore(&cgu
->lock
, flags
);
501 static int ingenic_clk_enable(struct clk_hw
*hw
)
503 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
504 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
505 const struct ingenic_cgu_clk_info
*clk_info
;
508 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
510 if (clk_info
->type
& CGU_CLK_GATE
) {
511 /* ungate the clock */
512 spin_lock_irqsave(&cgu
->lock
, flags
);
513 ingenic_cgu_gate_set(cgu
, &clk_info
->gate
, false);
514 spin_unlock_irqrestore(&cgu
->lock
, flags
);
516 if (clk_info
->gate
.delay_us
)
517 udelay(clk_info
->gate
.delay_us
);
523 static void ingenic_clk_disable(struct clk_hw
*hw
)
525 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
526 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
527 const struct ingenic_cgu_clk_info
*clk_info
;
530 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
532 if (clk_info
->type
& CGU_CLK_GATE
) {
534 spin_lock_irqsave(&cgu
->lock
, flags
);
535 ingenic_cgu_gate_set(cgu
, &clk_info
->gate
, true);
536 spin_unlock_irqrestore(&cgu
->lock
, flags
);
540 static int ingenic_clk_is_enabled(struct clk_hw
*hw
)
542 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
543 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
544 const struct ingenic_cgu_clk_info
*clk_info
;
548 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
550 if (clk_info
->type
& CGU_CLK_GATE
) {
551 spin_lock_irqsave(&cgu
->lock
, flags
);
552 enabled
= !ingenic_cgu_gate_get(cgu
, &clk_info
->gate
);
553 spin_unlock_irqrestore(&cgu
->lock
, flags
);
559 static const struct clk_ops ingenic_clk_ops
= {
560 .get_parent
= ingenic_clk_get_parent
,
561 .set_parent
= ingenic_clk_set_parent
,
563 .recalc_rate
= ingenic_clk_recalc_rate
,
564 .round_rate
= ingenic_clk_round_rate
,
565 .set_rate
= ingenic_clk_set_rate
,
567 .enable
= ingenic_clk_enable
,
568 .disable
= ingenic_clk_disable
,
569 .is_enabled
= ingenic_clk_is_enabled
,
576 static int ingenic_register_clock(struct ingenic_cgu
*cgu
, unsigned idx
)
578 const struct ingenic_cgu_clk_info
*clk_info
= &cgu
->clock_info
[idx
];
579 struct clk_init_data clk_init
;
580 struct ingenic_clk
*ingenic_clk
= NULL
;
581 struct clk
*clk
, *parent
;
582 const char *parent_names
[4];
583 unsigned caps
, i
, num_possible
;
586 BUILD_BUG_ON(ARRAY_SIZE(clk_info
->parents
) > ARRAY_SIZE(parent_names
));
588 if (clk_info
->type
== CGU_CLK_EXT
) {
589 clk
= of_clk_get_by_name(cgu
->np
, clk_info
->name
);
591 pr_err("%s: no external clock '%s' provided\n",
592 __func__
, clk_info
->name
);
596 err
= clk_register_clkdev(clk
, clk_info
->name
, NULL
);
601 cgu
->clocks
.clks
[idx
] = clk
;
605 if (!clk_info
->type
) {
606 pr_err("%s: no clock type specified for '%s'\n", __func__
,
611 ingenic_clk
= kzalloc(sizeof(*ingenic_clk
), GFP_KERNEL
);
617 ingenic_clk
->hw
.init
= &clk_init
;
618 ingenic_clk
->cgu
= cgu
;
619 ingenic_clk
->idx
= idx
;
621 clk_init
.name
= clk_info
->name
;
623 clk_init
.parent_names
= parent_names
;
625 caps
= clk_info
->type
;
627 if (caps
& (CGU_CLK_MUX
| CGU_CLK_CUSTOM
)) {
628 clk_init
.num_parents
= 0;
630 if (caps
& CGU_CLK_MUX
)
631 num_possible
= 1 << clk_info
->mux
.bits
;
633 num_possible
= ARRAY_SIZE(clk_info
->parents
);
635 for (i
= 0; i
< num_possible
; i
++) {
636 if (clk_info
->parents
[i
] == -1)
639 parent
= cgu
->clocks
.clks
[clk_info
->parents
[i
]];
640 parent_names
[clk_init
.num_parents
] =
641 __clk_get_name(parent
);
642 clk_init
.num_parents
++;
645 BUG_ON(!clk_init
.num_parents
);
646 BUG_ON(clk_init
.num_parents
> ARRAY_SIZE(parent_names
));
648 BUG_ON(clk_info
->parents
[0] == -1);
649 clk_init
.num_parents
= 1;
650 parent
= cgu
->clocks
.clks
[clk_info
->parents
[0]];
651 parent_names
[0] = __clk_get_name(parent
);
654 if (caps
& CGU_CLK_CUSTOM
) {
655 clk_init
.ops
= clk_info
->custom
.clk_ops
;
657 caps
&= ~CGU_CLK_CUSTOM
;
660 pr_err("%s: custom clock may not be combined with type 0x%x\n",
664 } else if (caps
& CGU_CLK_PLL
) {
665 clk_init
.ops
= &ingenic_pll_ops
;
666 clk_init
.flags
|= CLK_SET_RATE_GATE
;
668 caps
&= ~CGU_CLK_PLL
;
671 pr_err("%s: PLL may not be combined with type 0x%x\n",
676 clk_init
.ops
= &ingenic_clk_ops
;
679 /* nothing to do for gates or fixed dividers */
680 caps
&= ~(CGU_CLK_GATE
| CGU_CLK_FIXDIV
);
682 if (caps
& CGU_CLK_MUX
) {
683 if (!(caps
& CGU_CLK_MUX_GLITCHFREE
))
684 clk_init
.flags
|= CLK_SET_PARENT_GATE
;
686 caps
&= ~(CGU_CLK_MUX
| CGU_CLK_MUX_GLITCHFREE
);
689 if (caps
& CGU_CLK_DIV
) {
690 caps
&= ~CGU_CLK_DIV
;
692 /* pass rate changes to the parent clock */
693 clk_init
.flags
|= CLK_SET_RATE_PARENT
;
697 pr_err("%s: unknown clock type 0x%x\n", __func__
, caps
);
701 clk
= clk_register(NULL
, &ingenic_clk
->hw
);
703 pr_err("%s: failed to register clock '%s'\n", __func__
,
709 err
= clk_register_clkdev(clk
, clk_info
->name
, NULL
);
713 cgu
->clocks
.clks
[idx
] = clk
;
721 ingenic_cgu_new(const struct ingenic_cgu_clk_info
*clock_info
,
722 unsigned num_clocks
, struct device_node
*np
)
724 struct ingenic_cgu
*cgu
;
726 cgu
= kzalloc(sizeof(*cgu
), GFP_KERNEL
);
730 cgu
->base
= of_iomap(np
, 0);
732 pr_err("%s: failed to map CGU registers\n", __func__
);
737 cgu
->clock_info
= clock_info
;
738 cgu
->clocks
.clk_num
= num_clocks
;
740 spin_lock_init(&cgu
->lock
);
750 int ingenic_cgu_register_clocks(struct ingenic_cgu
*cgu
)
755 cgu
->clocks
.clks
= kcalloc(cgu
->clocks
.clk_num
, sizeof(struct clk
*),
757 if (!cgu
->clocks
.clks
) {
762 for (i
= 0; i
< cgu
->clocks
.clk_num
; i
++) {
763 err
= ingenic_register_clock(cgu
, i
);
765 goto err_out_unregister
;
768 err
= of_clk_add_provider(cgu
->np
, of_clk_src_onecell_get
,
771 goto err_out_unregister
;
776 for (i
= 0; i
< cgu
->clocks
.clk_num
; i
++) {
777 if (!cgu
->clocks
.clks
[i
])
779 if (cgu
->clock_info
[i
].type
& CGU_CLK_EXT
)
780 clk_put(cgu
->clocks
.clks
[i
]);
782 clk_unregister(cgu
->clocks
.clks
[i
]);
784 kfree(cgu
->clocks
.clks
);