2 * Ingenic SoC CGU driver
4 * Copyright (c) 2013-2015 Imagination Technologies
5 * Author: Paul Burton <paul.burton@mips.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/bitops.h>
19 #include <linux/clk.h>
20 #include <linux/clk-provider.h>
21 #include <linux/clkdev.h>
22 #include <linux/delay.h>
23 #include <linux/math64.h>
25 #include <linux/of_address.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
30 #define MHZ (1000 * 1000)
33 * ingenic_cgu_gate_get() - get the value of clock gate register bit
34 * @cgu: reference to the CGU whose registers should be read
35 * @info: info struct describing the gate bit
37 * Retrieves the state of the clock gate bit described by info. The
38 * caller must hold cgu->lock.
40 * Return: true if the gate bit is set, else false.
43 ingenic_cgu_gate_get(struct ingenic_cgu
*cgu
,
44 const struct ingenic_cgu_gate_info
*info
)
46 return readl(cgu
->base
+ info
->reg
) & BIT(info
->bit
);
50 * ingenic_cgu_gate_set() - set the value of clock gate register bit
51 * @cgu: reference to the CGU whose registers should be modified
52 * @info: info struct describing the gate bit
53 * @val: non-zero to gate a clock, otherwise zero
55 * Sets the given gate bit in order to gate or ungate a clock.
57 * The caller must hold cgu->lock.
60 ingenic_cgu_gate_set(struct ingenic_cgu
*cgu
,
61 const struct ingenic_cgu_gate_info
*info
, bool val
)
63 u32 clkgr
= readl(cgu
->base
+ info
->reg
);
66 clkgr
|= BIT(info
->bit
);
68 clkgr
&= ~BIT(info
->bit
);
70 writel(clkgr
, cgu
->base
+ info
->reg
);
78 ingenic_pll_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
80 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
81 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
82 const struct ingenic_cgu_clk_info
*clk_info
;
83 const struct ingenic_cgu_pll_info
*pll_info
;
84 unsigned m
, n
, od_enc
, od
;
89 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
90 BUG_ON(clk_info
->type
!= CGU_CLK_PLL
);
91 pll_info
= &clk_info
->pll
;
93 spin_lock_irqsave(&cgu
->lock
, flags
);
94 ctl
= readl(cgu
->base
+ pll_info
->reg
);
95 spin_unlock_irqrestore(&cgu
->lock
, flags
);
97 m
= (ctl
>> pll_info
->m_shift
) & GENMASK(pll_info
->m_bits
- 1, 0);
98 m
+= pll_info
->m_offset
;
99 n
= (ctl
>> pll_info
->n_shift
) & GENMASK(pll_info
->n_bits
- 1, 0);
100 n
+= pll_info
->n_offset
;
101 od_enc
= ctl
>> pll_info
->od_shift
;
102 od_enc
&= GENMASK(pll_info
->od_bits
- 1, 0);
103 bypass
= !pll_info
->no_bypass_bit
&&
104 !!(ctl
& BIT(pll_info
->bypass_bit
));
105 enable
= !!(ctl
& BIT(pll_info
->enable_bit
));
110 for (od
= 0; od
< pll_info
->od_max
; od
++) {
111 if (pll_info
->od_encoding
[od
] == od_enc
)
114 BUG_ON(od
== pll_info
->od_max
);
117 return div_u64((u64
)parent_rate
* m
, n
* od
);
121 ingenic_pll_calc(const struct ingenic_cgu_clk_info
*clk_info
,
122 unsigned long rate
, unsigned long parent_rate
,
123 unsigned *pm
, unsigned *pn
, unsigned *pod
)
125 const struct ingenic_cgu_pll_info
*pll_info
;
128 pll_info
= &clk_info
->pll
;
132 * The frequency after the input divider must be between 10 and 50 MHz.
133 * The highest divider yields the best resolution.
135 n
= parent_rate
/ (10 * MHZ
);
136 n
= min_t(unsigned, n
, 1 << clk_info
->pll
.n_bits
);
137 n
= max_t(unsigned, n
, pll_info
->n_offset
);
139 m
= (rate
/ MHZ
) * od
* n
/ (parent_rate
/ MHZ
);
140 m
= min_t(unsigned, m
, 1 << clk_info
->pll
.m_bits
);
141 m
= max_t(unsigned, m
, pll_info
->m_offset
);
150 return div_u64((u64
)parent_rate
* m
, n
* od
);
153 static inline const struct ingenic_cgu_clk_info
*to_clk_info(
154 struct ingenic_clk
*ingenic_clk
)
156 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
157 const struct ingenic_cgu_clk_info
*clk_info
;
159 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
160 BUG_ON(clk_info
->type
!= CGU_CLK_PLL
);
166 ingenic_pll_round_rate(struct clk_hw
*hw
, unsigned long req_rate
,
167 unsigned long *prate
)
169 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
170 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
172 return ingenic_pll_calc(clk_info
, req_rate
, *prate
, NULL
, NULL
, NULL
);
176 ingenic_pll_set_rate(struct clk_hw
*hw
, unsigned long req_rate
,
177 unsigned long parent_rate
)
179 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
180 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
181 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
182 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
183 unsigned long rate
, flags
;
184 unsigned int m
, n
, od
;
187 rate
= ingenic_pll_calc(clk_info
, req_rate
, parent_rate
,
189 if (rate
!= req_rate
)
190 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
191 clk_info
->name
, req_rate
, rate
);
193 spin_lock_irqsave(&cgu
->lock
, flags
);
194 ctl
= readl(cgu
->base
+ pll_info
->reg
);
196 ctl
&= ~(GENMASK(pll_info
->m_bits
- 1, 0) << pll_info
->m_shift
);
197 ctl
|= (m
- pll_info
->m_offset
) << pll_info
->m_shift
;
199 ctl
&= ~(GENMASK(pll_info
->n_bits
- 1, 0) << pll_info
->n_shift
);
200 ctl
|= (n
- pll_info
->n_offset
) << pll_info
->n_shift
;
202 ctl
&= ~(GENMASK(pll_info
->od_bits
- 1, 0) << pll_info
->od_shift
);
203 ctl
|= pll_info
->od_encoding
[od
- 1] << pll_info
->od_shift
;
205 writel(ctl
, cgu
->base
+ pll_info
->reg
);
206 spin_unlock_irqrestore(&cgu
->lock
, flags
);
211 static int ingenic_pll_enable(struct clk_hw
*hw
)
213 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
214 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
215 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
216 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
217 const unsigned int timeout
= 100;
222 spin_lock_irqsave(&cgu
->lock
, flags
);
223 ctl
= readl(cgu
->base
+ pll_info
->reg
);
225 ctl
&= ~BIT(pll_info
->bypass_bit
);
226 ctl
|= BIT(pll_info
->enable_bit
);
228 writel(ctl
, cgu
->base
+ pll_info
->reg
);
230 /* wait for the PLL to stabilise */
231 for (i
= 0; i
< timeout
; i
++) {
232 ctl
= readl(cgu
->base
+ pll_info
->reg
);
233 if (ctl
& BIT(pll_info
->stable_bit
))
238 spin_unlock_irqrestore(&cgu
->lock
, flags
);
246 static void ingenic_pll_disable(struct clk_hw
*hw
)
248 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
249 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
250 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
251 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
255 spin_lock_irqsave(&cgu
->lock
, flags
);
256 ctl
= readl(cgu
->base
+ pll_info
->reg
);
258 ctl
&= ~BIT(pll_info
->enable_bit
);
260 writel(ctl
, cgu
->base
+ pll_info
->reg
);
261 spin_unlock_irqrestore(&cgu
->lock
, flags
);
264 static int ingenic_pll_is_enabled(struct clk_hw
*hw
)
266 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
267 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
268 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
269 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
273 spin_lock_irqsave(&cgu
->lock
, flags
);
274 ctl
= readl(cgu
->base
+ pll_info
->reg
);
275 spin_unlock_irqrestore(&cgu
->lock
, flags
);
277 return !!(ctl
& BIT(pll_info
->enable_bit
));
280 static const struct clk_ops ingenic_pll_ops
= {
281 .recalc_rate
= ingenic_pll_recalc_rate
,
282 .round_rate
= ingenic_pll_round_rate
,
283 .set_rate
= ingenic_pll_set_rate
,
285 .enable
= ingenic_pll_enable
,
286 .disable
= ingenic_pll_disable
,
287 .is_enabled
= ingenic_pll_is_enabled
,
291 * Operations for all non-PLL clocks
294 static u8
ingenic_clk_get_parent(struct clk_hw
*hw
)
296 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
297 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
298 const struct ingenic_cgu_clk_info
*clk_info
;
300 u8 i
, hw_idx
, idx
= 0;
302 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
304 if (clk_info
->type
& CGU_CLK_MUX
) {
305 reg
= readl(cgu
->base
+ clk_info
->mux
.reg
);
306 hw_idx
= (reg
>> clk_info
->mux
.shift
) &
307 GENMASK(clk_info
->mux
.bits
- 1, 0);
310 * Convert the hardware index to the parent index by skipping
311 * over any -1's in the parents array.
313 for (i
= 0; i
< hw_idx
; i
++) {
314 if (clk_info
->parents
[i
] != -1)
322 static int ingenic_clk_set_parent(struct clk_hw
*hw
, u8 idx
)
324 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
325 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
326 const struct ingenic_cgu_clk_info
*clk_info
;
328 u8 curr_idx
, hw_idx
, num_poss
;
331 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
333 if (clk_info
->type
& CGU_CLK_MUX
) {
335 * Convert the parent index to the hardware index by adding
336 * 1 for any -1 in the parents array preceding the given
337 * index. That is, we want the index of idx'th entry in
338 * clk_info->parents which does not equal -1.
340 hw_idx
= curr_idx
= 0;
341 num_poss
= 1 << clk_info
->mux
.bits
;
342 for (; hw_idx
< num_poss
; hw_idx
++) {
343 if (clk_info
->parents
[hw_idx
] == -1)
350 /* idx should always be a valid parent */
351 BUG_ON(curr_idx
!= idx
);
353 mask
= GENMASK(clk_info
->mux
.bits
- 1, 0);
354 mask
<<= clk_info
->mux
.shift
;
356 spin_lock_irqsave(&cgu
->lock
, flags
);
358 /* write the register */
359 reg
= readl(cgu
->base
+ clk_info
->mux
.reg
);
361 reg
|= hw_idx
<< clk_info
->mux
.shift
;
362 writel(reg
, cgu
->base
+ clk_info
->mux
.reg
);
364 spin_unlock_irqrestore(&cgu
->lock
, flags
);
368 return idx
? -EINVAL
: 0;
372 ingenic_clk_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
374 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
375 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
376 const struct ingenic_cgu_clk_info
*clk_info
;
377 unsigned long rate
= parent_rate
;
380 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
382 if (clk_info
->type
& CGU_CLK_DIV
) {
383 div_reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
384 div
= (div_reg
>> clk_info
->div
.shift
) &
385 GENMASK(clk_info
->div
.bits
- 1, 0);
387 div
*= clk_info
->div
.div
;
390 } else if (clk_info
->type
& CGU_CLK_FIXDIV
) {
391 rate
/= clk_info
->fixdiv
.div
;
398 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info
*clk_info
,
399 unsigned long parent_rate
, unsigned long req_rate
)
403 /* calculate the divide */
404 div
= DIV_ROUND_UP(parent_rate
, req_rate
);
406 /* and impose hardware constraints */
407 div
= min_t(unsigned, div
, 1 << clk_info
->div
.bits
);
408 div
= max_t(unsigned, div
, 1);
411 * If the divider value itself must be divided before being written to
412 * the divider register, we must ensure we don't have any bits set that
413 * would be lost as a result of doing so.
415 div
/= clk_info
->div
.div
;
416 div
*= clk_info
->div
.div
;
422 ingenic_clk_round_rate(struct clk_hw
*hw
, unsigned long req_rate
,
423 unsigned long *parent_rate
)
425 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
426 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
427 const struct ingenic_cgu_clk_info
*clk_info
;
428 long rate
= *parent_rate
;
430 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
432 if (clk_info
->type
& CGU_CLK_DIV
)
433 rate
/= ingenic_clk_calc_div(clk_info
, *parent_rate
, req_rate
);
434 else if (clk_info
->type
& CGU_CLK_FIXDIV
)
435 rate
/= clk_info
->fixdiv
.div
;
441 ingenic_clk_set_rate(struct clk_hw
*hw
, unsigned long req_rate
,
442 unsigned long parent_rate
)
444 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
445 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
446 const struct ingenic_cgu_clk_info
*clk_info
;
447 const unsigned timeout
= 100;
448 unsigned long rate
, flags
;
453 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
455 if (clk_info
->type
& CGU_CLK_DIV
) {
456 div
= ingenic_clk_calc_div(clk_info
, parent_rate
, req_rate
);
457 rate
= parent_rate
/ div
;
459 if (rate
!= req_rate
)
462 spin_lock_irqsave(&cgu
->lock
, flags
);
463 reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
465 /* update the divide */
466 mask
= GENMASK(clk_info
->div
.bits
- 1, 0);
467 reg
&= ~(mask
<< clk_info
->div
.shift
);
468 reg
|= ((div
/ clk_info
->div
.div
) - 1) << clk_info
->div
.shift
;
470 /* clear the stop bit */
471 if (clk_info
->div
.stop_bit
!= -1)
472 reg
&= ~BIT(clk_info
->div
.stop_bit
);
474 /* set the change enable bit */
475 if (clk_info
->div
.ce_bit
!= -1)
476 reg
|= BIT(clk_info
->div
.ce_bit
);
478 /* update the hardware */
479 writel(reg
, cgu
->base
+ clk_info
->div
.reg
);
481 /* wait for the change to take effect */
482 if (clk_info
->div
.busy_bit
!= -1) {
483 for (i
= 0; i
< timeout
; i
++) {
484 reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
485 if (!(reg
& BIT(clk_info
->div
.busy_bit
)))
493 spin_unlock_irqrestore(&cgu
->lock
, flags
);
500 static int ingenic_clk_enable(struct clk_hw
*hw
)
502 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
503 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
504 const struct ingenic_cgu_clk_info
*clk_info
;
507 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
509 if (clk_info
->type
& CGU_CLK_GATE
) {
510 /* ungate the clock */
511 spin_lock_irqsave(&cgu
->lock
, flags
);
512 ingenic_cgu_gate_set(cgu
, &clk_info
->gate
, false);
513 spin_unlock_irqrestore(&cgu
->lock
, flags
);
519 static void ingenic_clk_disable(struct clk_hw
*hw
)
521 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
522 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
523 const struct ingenic_cgu_clk_info
*clk_info
;
526 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
528 if (clk_info
->type
& CGU_CLK_GATE
) {
530 spin_lock_irqsave(&cgu
->lock
, flags
);
531 ingenic_cgu_gate_set(cgu
, &clk_info
->gate
, true);
532 spin_unlock_irqrestore(&cgu
->lock
, flags
);
536 static int ingenic_clk_is_enabled(struct clk_hw
*hw
)
538 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
539 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
540 const struct ingenic_cgu_clk_info
*clk_info
;
544 clk_info
= &cgu
->clock_info
[ingenic_clk
->idx
];
546 if (clk_info
->type
& CGU_CLK_GATE
) {
547 spin_lock_irqsave(&cgu
->lock
, flags
);
548 enabled
= !ingenic_cgu_gate_get(cgu
, &clk_info
->gate
);
549 spin_unlock_irqrestore(&cgu
->lock
, flags
);
555 static const struct clk_ops ingenic_clk_ops
= {
556 .get_parent
= ingenic_clk_get_parent
,
557 .set_parent
= ingenic_clk_set_parent
,
559 .recalc_rate
= ingenic_clk_recalc_rate
,
560 .round_rate
= ingenic_clk_round_rate
,
561 .set_rate
= ingenic_clk_set_rate
,
563 .enable
= ingenic_clk_enable
,
564 .disable
= ingenic_clk_disable
,
565 .is_enabled
= ingenic_clk_is_enabled
,
572 static int ingenic_register_clock(struct ingenic_cgu
*cgu
, unsigned idx
)
574 const struct ingenic_cgu_clk_info
*clk_info
= &cgu
->clock_info
[idx
];
575 struct clk_init_data clk_init
;
576 struct ingenic_clk
*ingenic_clk
= NULL
;
577 struct clk
*clk
, *parent
;
578 const char *parent_names
[4];
579 unsigned caps
, i
, num_possible
;
582 BUILD_BUG_ON(ARRAY_SIZE(clk_info
->parents
) > ARRAY_SIZE(parent_names
));
584 if (clk_info
->type
== CGU_CLK_EXT
) {
585 clk
= of_clk_get_by_name(cgu
->np
, clk_info
->name
);
587 pr_err("%s: no external clock '%s' provided\n",
588 __func__
, clk_info
->name
);
592 err
= clk_register_clkdev(clk
, clk_info
->name
, NULL
);
597 cgu
->clocks
.clks
[idx
] = clk
;
601 if (!clk_info
->type
) {
602 pr_err("%s: no clock type specified for '%s'\n", __func__
,
607 ingenic_clk
= kzalloc(sizeof(*ingenic_clk
), GFP_KERNEL
);
613 ingenic_clk
->hw
.init
= &clk_init
;
614 ingenic_clk
->cgu
= cgu
;
615 ingenic_clk
->idx
= idx
;
617 clk_init
.name
= clk_info
->name
;
619 clk_init
.parent_names
= parent_names
;
621 caps
= clk_info
->type
;
623 if (caps
& (CGU_CLK_MUX
| CGU_CLK_CUSTOM
)) {
624 clk_init
.num_parents
= 0;
626 if (caps
& CGU_CLK_MUX
)
627 num_possible
= 1 << clk_info
->mux
.bits
;
629 num_possible
= ARRAY_SIZE(clk_info
->parents
);
631 for (i
= 0; i
< num_possible
; i
++) {
632 if (clk_info
->parents
[i
] == -1)
635 parent
= cgu
->clocks
.clks
[clk_info
->parents
[i
]];
636 parent_names
[clk_init
.num_parents
] =
637 __clk_get_name(parent
);
638 clk_init
.num_parents
++;
641 BUG_ON(!clk_init
.num_parents
);
642 BUG_ON(clk_init
.num_parents
> ARRAY_SIZE(parent_names
));
644 BUG_ON(clk_info
->parents
[0] == -1);
645 clk_init
.num_parents
= 1;
646 parent
= cgu
->clocks
.clks
[clk_info
->parents
[0]];
647 parent_names
[0] = __clk_get_name(parent
);
650 if (caps
& CGU_CLK_CUSTOM
) {
651 clk_init
.ops
= clk_info
->custom
.clk_ops
;
653 caps
&= ~CGU_CLK_CUSTOM
;
656 pr_err("%s: custom clock may not be combined with type 0x%x\n",
660 } else if (caps
& CGU_CLK_PLL
) {
661 clk_init
.ops
= &ingenic_pll_ops
;
662 clk_init
.flags
|= CLK_SET_RATE_GATE
;
664 caps
&= ~CGU_CLK_PLL
;
667 pr_err("%s: PLL may not be combined with type 0x%x\n",
672 clk_init
.ops
= &ingenic_clk_ops
;
675 /* nothing to do for gates or fixed dividers */
676 caps
&= ~(CGU_CLK_GATE
| CGU_CLK_FIXDIV
);
678 if (caps
& CGU_CLK_MUX
) {
679 if (!(caps
& CGU_CLK_MUX_GLITCHFREE
))
680 clk_init
.flags
|= CLK_SET_PARENT_GATE
;
682 caps
&= ~(CGU_CLK_MUX
| CGU_CLK_MUX_GLITCHFREE
);
685 if (caps
& CGU_CLK_DIV
) {
686 caps
&= ~CGU_CLK_DIV
;
688 /* pass rate changes to the parent clock */
689 clk_init
.flags
|= CLK_SET_RATE_PARENT
;
693 pr_err("%s: unknown clock type 0x%x\n", __func__
, caps
);
697 clk
= clk_register(NULL
, &ingenic_clk
->hw
);
699 pr_err("%s: failed to register clock '%s'\n", __func__
,
705 err
= clk_register_clkdev(clk
, clk_info
->name
, NULL
);
709 cgu
->clocks
.clks
[idx
] = clk
;
717 ingenic_cgu_new(const struct ingenic_cgu_clk_info
*clock_info
,
718 unsigned num_clocks
, struct device_node
*np
)
720 struct ingenic_cgu
*cgu
;
722 cgu
= kzalloc(sizeof(*cgu
), GFP_KERNEL
);
726 cgu
->base
= of_iomap(np
, 0);
728 pr_err("%s: failed to map CGU registers\n", __func__
);
733 cgu
->clock_info
= clock_info
;
734 cgu
->clocks
.clk_num
= num_clocks
;
736 spin_lock_init(&cgu
->lock
);
746 int ingenic_cgu_register_clocks(struct ingenic_cgu
*cgu
)
751 cgu
->clocks
.clks
= kcalloc(cgu
->clocks
.clk_num
, sizeof(struct clk
*),
753 if (!cgu
->clocks
.clks
) {
758 for (i
= 0; i
< cgu
->clocks
.clk_num
; i
++) {
759 err
= ingenic_register_clock(cgu
, i
);
761 goto err_out_unregister
;
764 err
= of_clk_add_provider(cgu
->np
, of_clk_src_onecell_get
,
767 goto err_out_unregister
;
772 for (i
= 0; i
< cgu
->clocks
.clk_num
; i
++) {
773 if (!cgu
->clocks
.clks
[i
])
775 if (cgu
->clock_info
[i
].type
& CGU_CLK_EXT
)
776 clk_put(cgu
->clocks
.clks
[i
]);
778 clk_unregister(cgu
->clocks
.clks
[i
]);
780 kfree(cgu
->clocks
.clks
);